vdso2c.h 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * This file is included twice from vdso2c.c. It generates code for 32-bit
  4. * and 64-bit vDSOs. We need both for 64-bit builds, since 32-bit vDSOs
  5. * are built for 32-bit userspace.
  6. */
  7. static void BITSFUNC(copy)(FILE *outfile, const unsigned char *data, size_t len)
  8. {
  9. size_t i;
  10. for (i = 0; i < len; i++) {
  11. if (i % 10 == 0)
  12. fprintf(outfile, "\n\t");
  13. fprintf(outfile, "0x%02X, ", (int)(data)[i]);
  14. }
  15. }
  16. /*
  17. * Extract a section from the input data into a standalone blob. Used to
  18. * capture kernel-only data that needs to persist indefinitely, e.g. the
  19. * exception fixup tables, but only in the kernel, i.e. the section can
  20. * be stripped from the final vDSO image.
  21. */
  22. static void BITSFUNC(extract)(const unsigned char *data, size_t data_len,
  23. FILE *outfile, ELF(Shdr) *sec, const char *name)
  24. {
  25. unsigned long offset;
  26. size_t len;
  27. offset = (unsigned long)GET_LE(&sec->sh_offset);
  28. len = (size_t)GET_LE(&sec->sh_size);
  29. if (offset + len > data_len)
  30. fail("section to extract overruns input data");
  31. fprintf(outfile, "static const unsigned char %s[%zu] = {", name, len);
  32. BITSFUNC(copy)(outfile, data + offset, len);
  33. fprintf(outfile, "\n};\n\n");
  34. }
  35. static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
  36. void *stripped_addr, size_t stripped_len,
  37. FILE *outfile, const char *image_name)
  38. {
  39. int found_load = 0;
  40. unsigned long load_size = -1; /* Work around bogus warning */
  41. unsigned long mapping_size;
  42. ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
  43. unsigned long i, syms_nr;
  44. ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
  45. *alt_sec = NULL, *extable_sec = NULL;
  46. ELF(Dyn) *dyn = 0, *dyn_end = 0;
  47. const char *secstrings;
  48. INT_BITS syms[NSYMS] = {};
  49. ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_LE(&hdr->e_phoff));
  50. if (GET_LE(&hdr->e_type) != ET_DYN)
  51. fail("input is not a shared object\n");
  52. /* Walk the segment table. */
  53. for (i = 0; i < GET_LE(&hdr->e_phnum); i++) {
  54. if (GET_LE(&pt[i].p_type) == PT_LOAD) {
  55. if (found_load)
  56. fail("multiple PT_LOAD segs\n");
  57. if (GET_LE(&pt[i].p_offset) != 0 ||
  58. GET_LE(&pt[i].p_vaddr) != 0)
  59. fail("PT_LOAD in wrong place\n");
  60. if (GET_LE(&pt[i].p_memsz) != GET_LE(&pt[i].p_filesz))
  61. fail("cannot handle memsz != filesz\n");
  62. load_size = GET_LE(&pt[i].p_memsz);
  63. found_load = 1;
  64. } else if (GET_LE(&pt[i].p_type) == PT_DYNAMIC) {
  65. dyn = raw_addr + GET_LE(&pt[i].p_offset);
  66. dyn_end = raw_addr + GET_LE(&pt[i].p_offset) +
  67. GET_LE(&pt[i].p_memsz);
  68. }
  69. }
  70. if (!found_load)
  71. fail("no PT_LOAD seg\n");
  72. if (stripped_len < load_size)
  73. fail("stripped input is too short\n");
  74. if (!dyn)
  75. fail("input has no PT_DYNAMIC section -- your toolchain is buggy\n");
  76. /* Walk the dynamic table */
  77. for (i = 0; dyn + i < dyn_end &&
  78. GET_LE(&dyn[i].d_tag) != DT_NULL; i++) {
  79. typeof(dyn[i].d_tag) tag = GET_LE(&dyn[i].d_tag);
  80. if (tag == DT_REL || tag == DT_RELSZ || tag == DT_RELA ||
  81. tag == DT_RELENT || tag == DT_TEXTREL)
  82. fail("vdso image contains dynamic relocations\n");
  83. }
  84. /* Walk the section table */
  85. secstrings_hdr = raw_addr + GET_LE(&hdr->e_shoff) +
  86. GET_LE(&hdr->e_shentsize)*GET_LE(&hdr->e_shstrndx);
  87. secstrings = raw_addr + GET_LE(&secstrings_hdr->sh_offset);
  88. for (i = 0; i < GET_LE(&hdr->e_shnum); i++) {
  89. ELF(Shdr) *sh = raw_addr + GET_LE(&hdr->e_shoff) +
  90. GET_LE(&hdr->e_shentsize) * i;
  91. if (GET_LE(&sh->sh_type) == SHT_SYMTAB)
  92. symtab_hdr = sh;
  93. if (!strcmp(secstrings + GET_LE(&sh->sh_name),
  94. ".altinstructions"))
  95. alt_sec = sh;
  96. if (!strcmp(secstrings + GET_LE(&sh->sh_name), "__ex_table"))
  97. extable_sec = sh;
  98. }
  99. if (!symtab_hdr)
  100. fail("no symbol table\n");
  101. strtab_hdr = raw_addr + GET_LE(&hdr->e_shoff) +
  102. GET_LE(&hdr->e_shentsize) * GET_LE(&symtab_hdr->sh_link);
  103. syms_nr = GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize);
  104. /* Walk the symbol table */
  105. for (i = 0; i < syms_nr; i++) {
  106. unsigned int k;
  107. ELF(Sym) *sym = raw_addr + GET_LE(&symtab_hdr->sh_offset) +
  108. GET_LE(&symtab_hdr->sh_entsize) * i;
  109. const char *sym_name = raw_addr +
  110. GET_LE(&strtab_hdr->sh_offset) +
  111. GET_LE(&sym->st_name);
  112. for (k = 0; k < NSYMS; k++) {
  113. if (!strcmp(sym_name, required_syms[k].name)) {
  114. if (syms[k]) {
  115. fail("duplicate symbol %s\n",
  116. required_syms[k].name);
  117. }
  118. /*
  119. * Careful: we use negative addresses, but
  120. * st_value is unsigned, so we rely
  121. * on syms[k] being a signed type of the
  122. * correct width.
  123. */
  124. syms[k] = GET_LE(&sym->st_value);
  125. }
  126. }
  127. }
  128. /* Validate mapping addresses. */
  129. for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) {
  130. INT_BITS symval = syms[special_pages[i]];
  131. if (!symval)
  132. continue; /* The mapping isn't used; ignore it. */
  133. if (symval % 4096)
  134. fail("%s must be a multiple of 4096\n",
  135. required_syms[i].name);
  136. if (symval + 4096 < syms[sym_vvar_start])
  137. fail("%s underruns vvar_start\n",
  138. required_syms[i].name);
  139. if (symval + 4096 > 0)
  140. fail("%s is on the wrong side of the vdso text\n",
  141. required_syms[i].name);
  142. }
  143. if (syms[sym_vvar_start] % 4096)
  144. fail("vvar_begin must be a multiple of 4096\n");
  145. if (!image_name) {
  146. fwrite(stripped_addr, stripped_len, 1, outfile);
  147. return;
  148. }
  149. mapping_size = (stripped_len + 4095) / 4096 * 4096;
  150. fprintf(outfile, "/* AUTOMATICALLY GENERATED -- DO NOT EDIT */\n\n");
  151. fprintf(outfile, "#include <linux/linkage.h>\n");
  152. fprintf(outfile, "#include <asm/page_types.h>\n");
  153. fprintf(outfile, "#include <asm/vdso.h>\n");
  154. fprintf(outfile, "\n");
  155. fprintf(outfile,
  156. "static unsigned char raw_data[%lu] __ro_after_init __aligned(PAGE_SIZE) = {",
  157. mapping_size);
  158. for (i = 0; i < stripped_len; i++) {
  159. if (i % 10 == 0)
  160. fprintf(outfile, "\n\t");
  161. fprintf(outfile, "0x%02X, ",
  162. (int)((unsigned char *)stripped_addr)[i]);
  163. }
  164. fprintf(outfile, "\n};\n\n");
  165. if (extable_sec)
  166. BITSFUNC(extract)(raw_addr, raw_len, outfile,
  167. extable_sec, "extable");
  168. fprintf(outfile, "const struct vdso_image %s = {\n", image_name);
  169. fprintf(outfile, "\t.data = raw_data,\n");
  170. fprintf(outfile, "\t.size = %lu,\n", mapping_size);
  171. if (alt_sec) {
  172. fprintf(outfile, "\t.alt = %lu,\n",
  173. (unsigned long)GET_LE(&alt_sec->sh_offset));
  174. fprintf(outfile, "\t.alt_len = %lu,\n",
  175. (unsigned long)GET_LE(&alt_sec->sh_size));
  176. }
  177. if (extable_sec) {
  178. fprintf(outfile, "\t.extable_base = %lu,\n",
  179. (unsigned long)GET_LE(&extable_sec->sh_offset));
  180. fprintf(outfile, "\t.extable_len = %lu,\n",
  181. (unsigned long)GET_LE(&extable_sec->sh_size));
  182. fprintf(outfile, "\t.extable = extable,\n");
  183. }
  184. for (i = 0; i < NSYMS; i++) {
  185. if (required_syms[i].export && syms[i])
  186. fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n",
  187. required_syms[i].name, (int64_t)syms[i]);
  188. }
  189. fprintf(outfile, "};\n");
  190. }