decode.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2015 Josh Poimboeuf <[email protected]>
  4. */
  5. #include <stdio.h>
  6. #include <stdlib.h>
  7. #define unlikely(cond) (cond)
  8. #include <asm/insn.h>
  9. #include "../../../arch/x86/lib/inat.c"
  10. #include "../../../arch/x86/lib/insn.c"
  11. #define CONFIG_64BIT 1
  12. #include <asm/nops.h>
  13. #include <asm/orc_types.h>
  14. #include <objtool/check.h>
  15. #include <objtool/elf.h>
  16. #include <objtool/arch.h>
  17. #include <objtool/warn.h>
  18. #include <objtool/endianness.h>
  19. #include <objtool/builtin.h>
  20. #include <arch/elf.h>
  21. static int is_x86_64(const struct elf *elf)
  22. {
  23. switch (elf->ehdr.e_machine) {
  24. case EM_X86_64:
  25. return 1;
  26. case EM_386:
  27. return 0;
  28. default:
  29. WARN("unexpected ELF machine type %d", elf->ehdr.e_machine);
  30. return -1;
  31. }
  32. }
  33. bool arch_callee_saved_reg(unsigned char reg)
  34. {
  35. switch (reg) {
  36. case CFI_BP:
  37. case CFI_BX:
  38. case CFI_R12:
  39. case CFI_R13:
  40. case CFI_R14:
  41. case CFI_R15:
  42. return true;
  43. case CFI_AX:
  44. case CFI_CX:
  45. case CFI_DX:
  46. case CFI_SI:
  47. case CFI_DI:
  48. case CFI_SP:
  49. case CFI_R8:
  50. case CFI_R9:
  51. case CFI_R10:
  52. case CFI_R11:
  53. case CFI_RA:
  54. default:
  55. return false;
  56. }
  57. }
  58. unsigned long arch_dest_reloc_offset(int addend)
  59. {
  60. return addend + 4;
  61. }
  62. unsigned long arch_jump_destination(struct instruction *insn)
  63. {
  64. return insn->offset + insn->len + insn->immediate;
  65. }
  66. #define ADD_OP(op) \
  67. if (!(op = calloc(1, sizeof(*op)))) \
  68. return -1; \
  69. else for (list_add_tail(&op->list, ops_list); op; op = NULL)
  70. /*
  71. * Helpers to decode ModRM/SIB:
  72. *
  73. * r/m| AX CX DX BX | SP | BP | SI DI |
  74. * | R8 R9 R10 R11 | R12 | R13 | R14 R15 |
  75. * Mod+----------------+-----+-----+---------+
  76. * 00 | [r/m] |[SIB]|[IP+]| [r/m] |
  77. * 01 | [r/m + d8] |[S+d]| [r/m + d8] |
  78. * 10 | [r/m + d32] |[S+D]| [r/m + d32] |
  79. * 11 | r/ m |
  80. */
  81. #define mod_is_mem() (modrm_mod != 3)
  82. #define mod_is_reg() (modrm_mod == 3)
  83. #define is_RIP() ((modrm_rm & 7) == CFI_BP && modrm_mod == 0)
  84. #define have_SIB() ((modrm_rm & 7) == CFI_SP && mod_is_mem())
  85. #define rm_is(reg) (have_SIB() ? \
  86. sib_base == (reg) && sib_index == CFI_SP : \
  87. modrm_rm == (reg))
  88. #define rm_is_mem(reg) (mod_is_mem() && !is_RIP() && rm_is(reg))
  89. #define rm_is_reg(reg) (mod_is_reg() && modrm_rm == (reg))
  90. static bool has_notrack_prefix(struct insn *insn)
  91. {
  92. int i;
  93. for (i = 0; i < insn->prefixes.nbytes; i++) {
  94. if (insn->prefixes.bytes[i] == 0x3e)
  95. return true;
  96. }
  97. return false;
  98. }
  99. int arch_decode_instruction(struct objtool_file *file, const struct section *sec,
  100. unsigned long offset, unsigned int maxlen,
  101. unsigned int *len, enum insn_type *type,
  102. unsigned long *immediate,
  103. struct list_head *ops_list)
  104. {
  105. const struct elf *elf = file->elf;
  106. struct insn insn;
  107. int x86_64, ret;
  108. unsigned char op1, op2, op3, prefix,
  109. rex = 0, rex_b = 0, rex_r = 0, rex_w = 0, rex_x = 0,
  110. modrm = 0, modrm_mod = 0, modrm_rm = 0, modrm_reg = 0,
  111. sib = 0, /* sib_scale = 0, */ sib_index = 0, sib_base = 0;
  112. struct stack_op *op = NULL;
  113. struct symbol *sym;
  114. u64 imm;
  115. x86_64 = is_x86_64(elf);
  116. if (x86_64 == -1)
  117. return -1;
  118. ret = insn_decode(&insn, sec->data->d_buf + offset, maxlen,
  119. x86_64 ? INSN_MODE_64 : INSN_MODE_32);
  120. if (ret < 0) {
  121. WARN("can't decode instruction at %s:0x%lx", sec->name, offset);
  122. return -1;
  123. }
  124. *len = insn.length;
  125. *type = INSN_OTHER;
  126. if (insn.vex_prefix.nbytes)
  127. return 0;
  128. prefix = insn.prefixes.bytes[0];
  129. op1 = insn.opcode.bytes[0];
  130. op2 = insn.opcode.bytes[1];
  131. op3 = insn.opcode.bytes[2];
  132. if (insn.rex_prefix.nbytes) {
  133. rex = insn.rex_prefix.bytes[0];
  134. rex_w = X86_REX_W(rex) >> 3;
  135. rex_r = X86_REX_R(rex) >> 2;
  136. rex_x = X86_REX_X(rex) >> 1;
  137. rex_b = X86_REX_B(rex);
  138. }
  139. if (insn.modrm.nbytes) {
  140. modrm = insn.modrm.bytes[0];
  141. modrm_mod = X86_MODRM_MOD(modrm);
  142. modrm_reg = X86_MODRM_REG(modrm) + 8*rex_r;
  143. modrm_rm = X86_MODRM_RM(modrm) + 8*rex_b;
  144. }
  145. if (insn.sib.nbytes) {
  146. sib = insn.sib.bytes[0];
  147. /* sib_scale = X86_SIB_SCALE(sib); */
  148. sib_index = X86_SIB_INDEX(sib) + 8*rex_x;
  149. sib_base = X86_SIB_BASE(sib) + 8*rex_b;
  150. }
  151. switch (op1) {
  152. case 0x1:
  153. case 0x29:
  154. if (rex_w && rm_is_reg(CFI_SP)) {
  155. /* add/sub reg, %rsp */
  156. ADD_OP(op) {
  157. op->src.type = OP_SRC_ADD;
  158. op->src.reg = modrm_reg;
  159. op->dest.type = OP_DEST_REG;
  160. op->dest.reg = CFI_SP;
  161. }
  162. }
  163. break;
  164. case 0x50 ... 0x57:
  165. /* push reg */
  166. ADD_OP(op) {
  167. op->src.type = OP_SRC_REG;
  168. op->src.reg = (op1 & 0x7) + 8*rex_b;
  169. op->dest.type = OP_DEST_PUSH;
  170. }
  171. break;
  172. case 0x58 ... 0x5f:
  173. /* pop reg */
  174. ADD_OP(op) {
  175. op->src.type = OP_SRC_POP;
  176. op->dest.type = OP_DEST_REG;
  177. op->dest.reg = (op1 & 0x7) + 8*rex_b;
  178. }
  179. break;
  180. case 0x68:
  181. case 0x6a:
  182. /* push immediate */
  183. ADD_OP(op) {
  184. op->src.type = OP_SRC_CONST;
  185. op->dest.type = OP_DEST_PUSH;
  186. }
  187. break;
  188. case 0x70 ... 0x7f:
  189. *type = INSN_JUMP_CONDITIONAL;
  190. break;
  191. case 0x80 ... 0x83:
  192. /*
  193. * 1000 00sw : mod OP r/m : immediate
  194. *
  195. * s - sign extend immediate
  196. * w - imm8 / imm32
  197. *
  198. * OP: 000 ADD 100 AND
  199. * 001 OR 101 SUB
  200. * 010 ADC 110 XOR
  201. * 011 SBB 111 CMP
  202. */
  203. /* 64bit only */
  204. if (!rex_w)
  205. break;
  206. /* %rsp target only */
  207. if (!rm_is_reg(CFI_SP))
  208. break;
  209. imm = insn.immediate.value;
  210. if (op1 & 2) { /* sign extend */
  211. if (op1 & 1) { /* imm32 */
  212. imm <<= 32;
  213. imm = (s64)imm >> 32;
  214. } else { /* imm8 */
  215. imm <<= 56;
  216. imm = (s64)imm >> 56;
  217. }
  218. }
  219. switch (modrm_reg & 7) {
  220. case 5:
  221. imm = -imm;
  222. /* fallthrough */
  223. case 0:
  224. /* add/sub imm, %rsp */
  225. ADD_OP(op) {
  226. op->src.type = OP_SRC_ADD;
  227. op->src.reg = CFI_SP;
  228. op->src.offset = imm;
  229. op->dest.type = OP_DEST_REG;
  230. op->dest.reg = CFI_SP;
  231. }
  232. break;
  233. case 4:
  234. /* and imm, %rsp */
  235. ADD_OP(op) {
  236. op->src.type = OP_SRC_AND;
  237. op->src.reg = CFI_SP;
  238. op->src.offset = insn.immediate.value;
  239. op->dest.type = OP_DEST_REG;
  240. op->dest.reg = CFI_SP;
  241. }
  242. break;
  243. default:
  244. /* WARN ? */
  245. break;
  246. }
  247. break;
  248. case 0x89:
  249. if (!rex_w)
  250. break;
  251. if (modrm_reg == CFI_SP) {
  252. if (mod_is_reg()) {
  253. /* mov %rsp, reg */
  254. ADD_OP(op) {
  255. op->src.type = OP_SRC_REG;
  256. op->src.reg = CFI_SP;
  257. op->dest.type = OP_DEST_REG;
  258. op->dest.reg = modrm_rm;
  259. }
  260. break;
  261. } else {
  262. /* skip RIP relative displacement */
  263. if (is_RIP())
  264. break;
  265. /* skip nontrivial SIB */
  266. if (have_SIB()) {
  267. modrm_rm = sib_base;
  268. if (sib_index != CFI_SP)
  269. break;
  270. }
  271. /* mov %rsp, disp(%reg) */
  272. ADD_OP(op) {
  273. op->src.type = OP_SRC_REG;
  274. op->src.reg = CFI_SP;
  275. op->dest.type = OP_DEST_REG_INDIRECT;
  276. op->dest.reg = modrm_rm;
  277. op->dest.offset = insn.displacement.value;
  278. }
  279. break;
  280. }
  281. break;
  282. }
  283. if (rm_is_reg(CFI_SP)) {
  284. /* mov reg, %rsp */
  285. ADD_OP(op) {
  286. op->src.type = OP_SRC_REG;
  287. op->src.reg = modrm_reg;
  288. op->dest.type = OP_DEST_REG;
  289. op->dest.reg = CFI_SP;
  290. }
  291. break;
  292. }
  293. /* fallthrough */
  294. case 0x88:
  295. if (!rex_w)
  296. break;
  297. if (rm_is_mem(CFI_BP)) {
  298. /* mov reg, disp(%rbp) */
  299. ADD_OP(op) {
  300. op->src.type = OP_SRC_REG;
  301. op->src.reg = modrm_reg;
  302. op->dest.type = OP_DEST_REG_INDIRECT;
  303. op->dest.reg = CFI_BP;
  304. op->dest.offset = insn.displacement.value;
  305. }
  306. break;
  307. }
  308. if (rm_is_mem(CFI_SP)) {
  309. /* mov reg, disp(%rsp) */
  310. ADD_OP(op) {
  311. op->src.type = OP_SRC_REG;
  312. op->src.reg = modrm_reg;
  313. op->dest.type = OP_DEST_REG_INDIRECT;
  314. op->dest.reg = CFI_SP;
  315. op->dest.offset = insn.displacement.value;
  316. }
  317. break;
  318. }
  319. break;
  320. case 0x8b:
  321. if (!rex_w)
  322. break;
  323. if (rm_is_mem(CFI_BP)) {
  324. /* mov disp(%rbp), reg */
  325. ADD_OP(op) {
  326. op->src.type = OP_SRC_REG_INDIRECT;
  327. op->src.reg = CFI_BP;
  328. op->src.offset = insn.displacement.value;
  329. op->dest.type = OP_DEST_REG;
  330. op->dest.reg = modrm_reg;
  331. }
  332. break;
  333. }
  334. if (rm_is_mem(CFI_SP)) {
  335. /* mov disp(%rsp), reg */
  336. ADD_OP(op) {
  337. op->src.type = OP_SRC_REG_INDIRECT;
  338. op->src.reg = CFI_SP;
  339. op->src.offset = insn.displacement.value;
  340. op->dest.type = OP_DEST_REG;
  341. op->dest.reg = modrm_reg;
  342. }
  343. break;
  344. }
  345. break;
  346. case 0x8d:
  347. if (mod_is_reg()) {
  348. WARN("invalid LEA encoding at %s:0x%lx", sec->name, offset);
  349. break;
  350. }
  351. /* skip non 64bit ops */
  352. if (!rex_w)
  353. break;
  354. /* skip RIP relative displacement */
  355. if (is_RIP())
  356. break;
  357. /* skip nontrivial SIB */
  358. if (have_SIB()) {
  359. modrm_rm = sib_base;
  360. if (sib_index != CFI_SP)
  361. break;
  362. }
  363. /* lea disp(%src), %dst */
  364. ADD_OP(op) {
  365. op->src.offset = insn.displacement.value;
  366. if (!op->src.offset) {
  367. /* lea (%src), %dst */
  368. op->src.type = OP_SRC_REG;
  369. } else {
  370. /* lea disp(%src), %dst */
  371. op->src.type = OP_SRC_ADD;
  372. }
  373. op->src.reg = modrm_rm;
  374. op->dest.type = OP_DEST_REG;
  375. op->dest.reg = modrm_reg;
  376. }
  377. break;
  378. case 0x8f:
  379. /* pop to mem */
  380. ADD_OP(op) {
  381. op->src.type = OP_SRC_POP;
  382. op->dest.type = OP_DEST_MEM;
  383. }
  384. break;
  385. case 0x90:
  386. *type = INSN_NOP;
  387. break;
  388. case 0x9c:
  389. /* pushf */
  390. ADD_OP(op) {
  391. op->src.type = OP_SRC_CONST;
  392. op->dest.type = OP_DEST_PUSHF;
  393. }
  394. break;
  395. case 0x9d:
  396. /* popf */
  397. ADD_OP(op) {
  398. op->src.type = OP_SRC_POPF;
  399. op->dest.type = OP_DEST_MEM;
  400. }
  401. break;
  402. case 0x0f:
  403. if (op2 == 0x01) {
  404. if (modrm == 0xca)
  405. *type = INSN_CLAC;
  406. else if (modrm == 0xcb)
  407. *type = INSN_STAC;
  408. } else if (op2 >= 0x80 && op2 <= 0x8f) {
  409. *type = INSN_JUMP_CONDITIONAL;
  410. } else if (op2 == 0x05 || op2 == 0x07 || op2 == 0x34 ||
  411. op2 == 0x35) {
  412. /* sysenter, sysret */
  413. *type = INSN_CONTEXT_SWITCH;
  414. } else if (op2 == 0x0b || op2 == 0xb9) {
  415. /* ud2 */
  416. *type = INSN_BUG;
  417. } else if (op2 == 0x0d || op2 == 0x1f) {
  418. /* nopl/nopw */
  419. *type = INSN_NOP;
  420. } else if (op2 == 0x1e) {
  421. if (prefix == 0xf3 && (modrm == 0xfa || modrm == 0xfb))
  422. *type = INSN_ENDBR;
  423. } else if (op2 == 0x38 && op3 == 0xf8) {
  424. if (insn.prefixes.nbytes == 1 &&
  425. insn.prefixes.bytes[0] == 0xf2) {
  426. /* ENQCMD cannot be used in the kernel. */
  427. WARN("ENQCMD instruction at %s:%lx", sec->name,
  428. offset);
  429. }
  430. } else if (op2 == 0xa0 || op2 == 0xa8) {
  431. /* push fs/gs */
  432. ADD_OP(op) {
  433. op->src.type = OP_SRC_CONST;
  434. op->dest.type = OP_DEST_PUSH;
  435. }
  436. } else if (op2 == 0xa1 || op2 == 0xa9) {
  437. /* pop fs/gs */
  438. ADD_OP(op) {
  439. op->src.type = OP_SRC_POP;
  440. op->dest.type = OP_DEST_MEM;
  441. }
  442. }
  443. break;
  444. case 0xc9:
  445. /*
  446. * leave
  447. *
  448. * equivalent to:
  449. * mov bp, sp
  450. * pop bp
  451. */
  452. ADD_OP(op) {
  453. op->src.type = OP_SRC_REG;
  454. op->src.reg = CFI_BP;
  455. op->dest.type = OP_DEST_REG;
  456. op->dest.reg = CFI_SP;
  457. }
  458. ADD_OP(op) {
  459. op->src.type = OP_SRC_POP;
  460. op->dest.type = OP_DEST_REG;
  461. op->dest.reg = CFI_BP;
  462. }
  463. break;
  464. case 0xcc:
  465. /* int3 */
  466. *type = INSN_TRAP;
  467. break;
  468. case 0xe3:
  469. /* jecxz/jrcxz */
  470. *type = INSN_JUMP_CONDITIONAL;
  471. break;
  472. case 0xe9:
  473. case 0xeb:
  474. *type = INSN_JUMP_UNCONDITIONAL;
  475. break;
  476. case 0xc2:
  477. case 0xc3:
  478. *type = INSN_RETURN;
  479. break;
  480. case 0xc7: /* mov imm, r/m */
  481. if (!opts.noinstr)
  482. break;
  483. if (insn.length == 3+4+4 && !strncmp(sec->name, ".init.text", 10)) {
  484. struct reloc *immr, *disp;
  485. struct symbol *func;
  486. int idx;
  487. immr = find_reloc_by_dest(elf, (void *)sec, offset+3);
  488. disp = find_reloc_by_dest(elf, (void *)sec, offset+7);
  489. if (!immr || strcmp(immr->sym->name, "pv_ops"))
  490. break;
  491. idx = (immr->addend + 8) / sizeof(void *);
  492. func = disp->sym;
  493. if (disp->sym->type == STT_SECTION)
  494. func = find_symbol_by_offset(disp->sym->sec, disp->addend);
  495. if (!func) {
  496. WARN("no func for pv_ops[]");
  497. return -1;
  498. }
  499. objtool_pv_add(file, idx, func);
  500. }
  501. break;
  502. case 0xcf: /* iret */
  503. /*
  504. * Handle sync_core(), which has an IRET to self.
  505. * All other IRET are in STT_NONE entry code.
  506. */
  507. sym = find_symbol_containing(sec, offset);
  508. if (sym && sym->type == STT_FUNC) {
  509. ADD_OP(op) {
  510. /* add $40, %rsp */
  511. op->src.type = OP_SRC_ADD;
  512. op->src.reg = CFI_SP;
  513. op->src.offset = 5*8;
  514. op->dest.type = OP_DEST_REG;
  515. op->dest.reg = CFI_SP;
  516. }
  517. break;
  518. }
  519. /* fallthrough */
  520. case 0xca: /* retf */
  521. case 0xcb: /* retf */
  522. *type = INSN_CONTEXT_SWITCH;
  523. break;
  524. case 0xe0: /* loopne */
  525. case 0xe1: /* loope */
  526. case 0xe2: /* loop */
  527. *type = INSN_JUMP_CONDITIONAL;
  528. break;
  529. case 0xe8:
  530. *type = INSN_CALL;
  531. /*
  532. * For the impact on the stack, a CALL behaves like
  533. * a PUSH of an immediate value (the return address).
  534. */
  535. ADD_OP(op) {
  536. op->src.type = OP_SRC_CONST;
  537. op->dest.type = OP_DEST_PUSH;
  538. }
  539. break;
  540. case 0xfc:
  541. *type = INSN_CLD;
  542. break;
  543. case 0xfd:
  544. *type = INSN_STD;
  545. break;
  546. case 0xff:
  547. if (modrm_reg == 2 || modrm_reg == 3) {
  548. *type = INSN_CALL_DYNAMIC;
  549. if (has_notrack_prefix(&insn))
  550. WARN("notrack prefix found at %s:0x%lx", sec->name, offset);
  551. } else if (modrm_reg == 4) {
  552. *type = INSN_JUMP_DYNAMIC;
  553. if (has_notrack_prefix(&insn))
  554. WARN("notrack prefix found at %s:0x%lx", sec->name, offset);
  555. } else if (modrm_reg == 5) {
  556. /* jmpf */
  557. *type = INSN_CONTEXT_SWITCH;
  558. } else if (modrm_reg == 6) {
  559. /* push from mem */
  560. ADD_OP(op) {
  561. op->src.type = OP_SRC_CONST;
  562. op->dest.type = OP_DEST_PUSH;
  563. }
  564. }
  565. break;
  566. default:
  567. break;
  568. }
  569. *immediate = insn.immediate.nbytes ? insn.immediate.value : 0;
  570. return 0;
  571. }
  572. void arch_initial_func_cfi_state(struct cfi_init_state *state)
  573. {
  574. int i;
  575. for (i = 0; i < CFI_NUM_REGS; i++) {
  576. state->regs[i].base = CFI_UNDEFINED;
  577. state->regs[i].offset = 0;
  578. }
  579. /* initial CFA (call frame address) */
  580. state->cfa.base = CFI_SP;
  581. state->cfa.offset = 8;
  582. /* initial RA (return address) */
  583. state->regs[CFI_RA].base = CFI_CFA;
  584. state->regs[CFI_RA].offset = -8;
  585. }
  586. const char *arch_nop_insn(int len)
  587. {
  588. static const char nops[5][5] = {
  589. { BYTES_NOP1 },
  590. { BYTES_NOP2 },
  591. { BYTES_NOP3 },
  592. { BYTES_NOP4 },
  593. { BYTES_NOP5 },
  594. };
  595. if (len < 1 || len > 5) {
  596. WARN("invalid NOP size: %d\n", len);
  597. return NULL;
  598. }
  599. return nops[len-1];
  600. }
  601. #define BYTE_RET 0xC3
  602. const char *arch_ret_insn(int len)
  603. {
  604. static const char ret[5][5] = {
  605. { BYTE_RET },
  606. { BYTE_RET, 0xcc },
  607. { BYTE_RET, 0xcc, BYTES_NOP1 },
  608. { BYTE_RET, 0xcc, BYTES_NOP2 },
  609. { BYTE_RET, 0xcc, BYTES_NOP3 },
  610. };
  611. if (len < 1 || len > 5) {
  612. WARN("invalid RET size: %d\n", len);
  613. return NULL;
  614. }
  615. return ret[len-1];
  616. }
  617. int arch_decode_hint_reg(u8 sp_reg, int *base)
  618. {
  619. switch (sp_reg) {
  620. case ORC_REG_UNDEFINED:
  621. *base = CFI_UNDEFINED;
  622. break;
  623. case ORC_REG_SP:
  624. *base = CFI_SP;
  625. break;
  626. case ORC_REG_BP:
  627. *base = CFI_BP;
  628. break;
  629. case ORC_REG_SP_INDIRECT:
  630. *base = CFI_SP_INDIRECT;
  631. break;
  632. case ORC_REG_R10:
  633. *base = CFI_R10;
  634. break;
  635. case ORC_REG_R13:
  636. *base = CFI_R13;
  637. break;
  638. case ORC_REG_DI:
  639. *base = CFI_DI;
  640. break;
  641. case ORC_REG_DX:
  642. *base = CFI_DX;
  643. break;
  644. default:
  645. return -1;
  646. }
  647. return 0;
  648. }
  649. bool arch_is_retpoline(struct symbol *sym)
  650. {
  651. return !strncmp(sym->name, "__x86_indirect_", 15);
  652. }
  653. bool arch_is_rethunk(struct symbol *sym)
  654. {
  655. return !strcmp(sym->name, "__x86_return_thunk");
  656. }
  657. bool arch_is_embedded_insn(struct symbol *sym)
  658. {
  659. return !strcmp(sym->name, "retbleed_return_thunk") ||
  660. !strcmp(sym->name, "srso_safe_ret");
  661. }