unwind.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * arch/arm/kernel/unwind.c
  4. *
  5. * Copyright (C) 2008 ARM Limited
  6. *
  7. * Stack unwinding support for ARM
  8. *
  9. * An ARM EABI version of gcc is required to generate the unwind
  10. * tables. For information about the structure of the unwind tables,
  11. * see "Exception Handling ABI for the ARM Architecture" at:
  12. *
  13. * http://infocenter.arm.com/help/topic/com.arm.doc.subset.swdev.abi/index.html
  14. */
  15. #ifndef __CHECKER__
  16. #if !defined (__ARM_EABI__)
  17. #warning Your compiler does not have EABI support.
  18. #warning ARM unwind is known to compile only with EABI compilers.
  19. #warning Change compiler or disable ARM_UNWIND option.
  20. #endif
  21. #endif /* __CHECKER__ */
  22. #include <linux/kernel.h>
  23. #include <linux/init.h>
  24. #include <linux/export.h>
  25. #include <linux/sched.h>
  26. #include <linux/slab.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/list.h>
  29. #include <asm/stacktrace.h>
  30. #include <asm/traps.h>
  31. #include <asm/unwind.h>
  32. #include "reboot.h"
  33. /* Dummy functions to avoid linker complaints */
  34. void __aeabi_unwind_cpp_pr0(void)
  35. {
  36. };
  37. EXPORT_SYMBOL(__aeabi_unwind_cpp_pr0);
  38. void __aeabi_unwind_cpp_pr1(void)
  39. {
  40. };
  41. EXPORT_SYMBOL(__aeabi_unwind_cpp_pr1);
  42. void __aeabi_unwind_cpp_pr2(void)
  43. {
  44. };
  45. EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
  46. struct unwind_ctrl_block {
  47. unsigned long vrs[16]; /* virtual register set */
  48. const unsigned long *insn; /* pointer to the current instructions word */
  49. unsigned long sp_high; /* highest value of sp allowed */
  50. unsigned long *lr_addr; /* address of LR value on the stack */
  51. /*
  52. * 1 : check for stack overflow for each register pop.
  53. * 0 : save overhead if there is plenty of stack remaining.
  54. */
  55. int check_each_pop;
  56. int entries; /* number of entries left to interpret */
  57. int byte; /* current byte number in the instructions word */
  58. };
  59. enum regs {
  60. #ifdef CONFIG_THUMB2_KERNEL
  61. FP = 7,
  62. #else
  63. FP = 11,
  64. #endif
  65. SP = 13,
  66. LR = 14,
  67. PC = 15
  68. };
  69. extern const struct unwind_idx __start_unwind_idx[];
  70. static const struct unwind_idx *__origin_unwind_idx;
  71. extern const struct unwind_idx __stop_unwind_idx[];
  72. static DEFINE_RAW_SPINLOCK(unwind_lock);
  73. static LIST_HEAD(unwind_tables);
  74. /* Convert a prel31 symbol to an absolute address */
  75. #define prel31_to_addr(ptr) \
  76. ({ \
  77. /* sign-extend to 32 bits */ \
  78. long offset = (((long)*(ptr)) << 1) >> 1; \
  79. (unsigned long)(ptr) + offset; \
  80. })
  81. /*
  82. * Binary search in the unwind index. The entries are
  83. * guaranteed to be sorted in ascending order by the linker.
  84. *
  85. * start = first entry
  86. * origin = first entry with positive offset (or stop if there is no such entry)
  87. * stop - 1 = last entry
  88. */
  89. static const struct unwind_idx *search_index(unsigned long addr,
  90. const struct unwind_idx *start,
  91. const struct unwind_idx *origin,
  92. const struct unwind_idx *stop)
  93. {
  94. unsigned long addr_prel31;
  95. pr_debug("%s(%08lx, %p, %p, %p)\n",
  96. __func__, addr, start, origin, stop);
  97. /*
  98. * only search in the section with the matching sign. This way the
  99. * prel31 numbers can be compared as unsigned longs.
  100. */
  101. if (addr < (unsigned long)start)
  102. /* negative offsets: [start; origin) */
  103. stop = origin;
  104. else
  105. /* positive offsets: [origin; stop) */
  106. start = origin;
  107. /* prel31 for address relavive to start */
  108. addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff;
  109. while (start < stop - 1) {
  110. const struct unwind_idx *mid = start + ((stop - start) >> 1);
  111. /*
  112. * As addr_prel31 is relative to start an offset is needed to
  113. * make it relative to mid.
  114. */
  115. if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) <
  116. mid->addr_offset)
  117. stop = mid;
  118. else {
  119. /* keep addr_prel31 relative to start */
  120. addr_prel31 -= ((unsigned long)mid -
  121. (unsigned long)start);
  122. start = mid;
  123. }
  124. }
  125. if (likely(start->addr_offset <= addr_prel31))
  126. return start;
  127. else {
  128. pr_warn("unwind: Unknown symbol address %08lx\n", addr);
  129. return NULL;
  130. }
  131. }
  132. static const struct unwind_idx *unwind_find_origin(
  133. const struct unwind_idx *start, const struct unwind_idx *stop)
  134. {
  135. pr_debug("%s(%p, %p)\n", __func__, start, stop);
  136. while (start < stop) {
  137. const struct unwind_idx *mid = start + ((stop - start) >> 1);
  138. if (mid->addr_offset >= 0x40000000)
  139. /* negative offset */
  140. start = mid + 1;
  141. else
  142. /* positive offset */
  143. stop = mid;
  144. }
  145. pr_debug("%s -> %p\n", __func__, stop);
  146. return stop;
  147. }
  148. static const struct unwind_idx *unwind_find_idx(unsigned long addr)
  149. {
  150. const struct unwind_idx *idx = NULL;
  151. unsigned long flags;
  152. pr_debug("%s(%08lx)\n", __func__, addr);
  153. if (core_kernel_text(addr)) {
  154. if (unlikely(!__origin_unwind_idx))
  155. __origin_unwind_idx =
  156. unwind_find_origin(__start_unwind_idx,
  157. __stop_unwind_idx);
  158. /* main unwind table */
  159. idx = search_index(addr, __start_unwind_idx,
  160. __origin_unwind_idx,
  161. __stop_unwind_idx);
  162. } else {
  163. /* module unwind tables */
  164. struct unwind_table *table;
  165. raw_spin_lock_irqsave(&unwind_lock, flags);
  166. list_for_each_entry(table, &unwind_tables, list) {
  167. if (addr >= table->begin_addr &&
  168. addr < table->end_addr) {
  169. idx = search_index(addr, table->start,
  170. table->origin,
  171. table->stop);
  172. /* Move-to-front to exploit common traces */
  173. list_move(&table->list, &unwind_tables);
  174. break;
  175. }
  176. }
  177. raw_spin_unlock_irqrestore(&unwind_lock, flags);
  178. }
  179. pr_debug("%s: idx = %p\n", __func__, idx);
  180. return idx;
  181. }
  182. static unsigned long unwind_get_byte(struct unwind_ctrl_block *ctrl)
  183. {
  184. unsigned long ret;
  185. if (ctrl->entries <= 0) {
  186. pr_warn("unwind: Corrupt unwind table\n");
  187. return 0;
  188. }
  189. ret = (*ctrl->insn >> (ctrl->byte * 8)) & 0xff;
  190. if (ctrl->byte == 0) {
  191. ctrl->insn++;
  192. ctrl->entries--;
  193. ctrl->byte = 3;
  194. } else
  195. ctrl->byte--;
  196. return ret;
  197. }
  198. /* Before poping a register check whether it is feasible or not */
  199. static int unwind_pop_register(struct unwind_ctrl_block *ctrl,
  200. unsigned long **vsp, unsigned int reg)
  201. {
  202. if (unlikely(ctrl->check_each_pop))
  203. if (*vsp >= (unsigned long *)ctrl->sp_high)
  204. return -URC_FAILURE;
  205. /* Use READ_ONCE_NOCHECK here to avoid this memory access
  206. * from being tracked by KASAN.
  207. */
  208. ctrl->vrs[reg] = READ_ONCE_NOCHECK(*(*vsp));
  209. if (reg == 14)
  210. ctrl->lr_addr = *vsp;
  211. (*vsp)++;
  212. return URC_OK;
  213. }
  214. /* Helper functions to execute the instructions */
  215. static int unwind_exec_pop_subset_r4_to_r13(struct unwind_ctrl_block *ctrl,
  216. unsigned long mask)
  217. {
  218. unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
  219. int load_sp, reg = 4;
  220. load_sp = mask & (1 << (13 - 4));
  221. while (mask) {
  222. if (mask & 1)
  223. if (unwind_pop_register(ctrl, &vsp, reg))
  224. return -URC_FAILURE;
  225. mask >>= 1;
  226. reg++;
  227. }
  228. if (!load_sp) {
  229. ctrl->vrs[SP] = (unsigned long)vsp;
  230. }
  231. return URC_OK;
  232. }
  233. static int unwind_exec_pop_r4_to_rN(struct unwind_ctrl_block *ctrl,
  234. unsigned long insn)
  235. {
  236. unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
  237. int reg;
  238. /* pop R4-R[4+bbb] */
  239. for (reg = 4; reg <= 4 + (insn & 7); reg++)
  240. if (unwind_pop_register(ctrl, &vsp, reg))
  241. return -URC_FAILURE;
  242. if (insn & 0x8)
  243. if (unwind_pop_register(ctrl, &vsp, 14))
  244. return -URC_FAILURE;
  245. ctrl->vrs[SP] = (unsigned long)vsp;
  246. return URC_OK;
  247. }
  248. static int unwind_exec_pop_subset_r0_to_r3(struct unwind_ctrl_block *ctrl,
  249. unsigned long mask)
  250. {
  251. unsigned long *vsp = (unsigned long *)ctrl->vrs[SP];
  252. int reg = 0;
  253. /* pop R0-R3 according to mask */
  254. while (mask) {
  255. if (mask & 1)
  256. if (unwind_pop_register(ctrl, &vsp, reg))
  257. return -URC_FAILURE;
  258. mask >>= 1;
  259. reg++;
  260. }
  261. ctrl->vrs[SP] = (unsigned long)vsp;
  262. return URC_OK;
  263. }
  264. static unsigned long unwind_decode_uleb128(struct unwind_ctrl_block *ctrl)
  265. {
  266. unsigned long bytes = 0;
  267. unsigned long insn;
  268. unsigned long result = 0;
  269. /*
  270. * unwind_get_byte() will advance `ctrl` one instruction at a time, so
  271. * loop until we get an instruction byte where bit 7 is not set.
  272. *
  273. * Note: This decodes a maximum of 4 bytes to output 28 bits data where
  274. * max is 0xfffffff: that will cover a vsp increment of 1073742336, hence
  275. * it is sufficient for unwinding the stack.
  276. */
  277. do {
  278. insn = unwind_get_byte(ctrl);
  279. result |= (insn & 0x7f) << (bytes * 7);
  280. bytes++;
  281. } while (!!(insn & 0x80) && (bytes != sizeof(result)));
  282. return result;
  283. }
  284. /*
  285. * Execute the current unwind instruction.
  286. */
  287. static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
  288. {
  289. unsigned long insn = unwind_get_byte(ctrl);
  290. int ret = URC_OK;
  291. pr_debug("%s: insn = %08lx\n", __func__, insn);
  292. if ((insn & 0xc0) == 0x00)
  293. ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
  294. else if ((insn & 0xc0) == 0x40) {
  295. ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
  296. } else if ((insn & 0xf0) == 0x80) {
  297. unsigned long mask;
  298. insn = (insn << 8) | unwind_get_byte(ctrl);
  299. mask = insn & 0x0fff;
  300. if (mask == 0) {
  301. pr_warn("unwind: 'Refuse to unwind' instruction %04lx\n",
  302. insn);
  303. return -URC_FAILURE;
  304. }
  305. ret = unwind_exec_pop_subset_r4_to_r13(ctrl, mask);
  306. if (ret)
  307. goto error;
  308. } else if ((insn & 0xf0) == 0x90 &&
  309. (insn & 0x0d) != 0x0d) {
  310. ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
  311. } else if ((insn & 0xf0) == 0xa0) {
  312. ret = unwind_exec_pop_r4_to_rN(ctrl, insn);
  313. if (ret)
  314. goto error;
  315. } else if (insn == 0xb0) {
  316. if (ctrl->vrs[PC] == 0)
  317. ctrl->vrs[PC] = ctrl->vrs[LR];
  318. /* no further processing */
  319. ctrl->entries = 0;
  320. } else if (insn == 0xb1) {
  321. unsigned long mask = unwind_get_byte(ctrl);
  322. if (mask == 0 || mask & 0xf0) {
  323. pr_warn("unwind: Spare encoding %04lx\n",
  324. (insn << 8) | mask);
  325. return -URC_FAILURE;
  326. }
  327. ret = unwind_exec_pop_subset_r0_to_r3(ctrl, mask);
  328. if (ret)
  329. goto error;
  330. } else if (insn == 0xb2) {
  331. unsigned long uleb128 = unwind_decode_uleb128(ctrl);
  332. ctrl->vrs[SP] += 0x204 + (uleb128 << 2);
  333. } else {
  334. pr_warn("unwind: Unhandled instruction %02lx\n", insn);
  335. return -URC_FAILURE;
  336. }
  337. pr_debug("%s: fp = %08lx sp = %08lx lr = %08lx pc = %08lx\n", __func__,
  338. ctrl->vrs[FP], ctrl->vrs[SP], ctrl->vrs[LR], ctrl->vrs[PC]);
  339. error:
  340. return ret;
  341. }
  342. /*
  343. * Unwind a single frame starting with *sp for the symbol at *pc. It
  344. * updates the *pc and *sp with the new values.
  345. */
  346. int unwind_frame(struct stackframe *frame)
  347. {
  348. const struct unwind_idx *idx;
  349. struct unwind_ctrl_block ctrl;
  350. unsigned long sp_low;
  351. /* store the highest address on the stack to avoid crossing it*/
  352. sp_low = frame->sp;
  353. ctrl.sp_high = ALIGN(sp_low - THREAD_SIZE, THREAD_ALIGN)
  354. + THREAD_SIZE;
  355. pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
  356. frame->pc, frame->lr, frame->sp);
  357. idx = unwind_find_idx(frame->pc);
  358. if (!idx) {
  359. if (frame->pc && kernel_text_address(frame->pc))
  360. pr_warn("unwind: Index not found %08lx\n", frame->pc);
  361. return -URC_FAILURE;
  362. }
  363. ctrl.vrs[FP] = frame->fp;
  364. ctrl.vrs[SP] = frame->sp;
  365. ctrl.vrs[LR] = frame->lr;
  366. ctrl.vrs[PC] = 0;
  367. if (idx->insn == 1)
  368. /* can't unwind */
  369. return -URC_FAILURE;
  370. else if (frame->pc == prel31_to_addr(&idx->addr_offset)) {
  371. /*
  372. * Unwinding is tricky when we're halfway through the prologue,
  373. * since the stack frame that the unwinder expects may not be
  374. * fully set up yet. However, one thing we do know for sure is
  375. * that if we are unwinding from the very first instruction of
  376. * a function, we are still effectively in the stack frame of
  377. * the caller, and the unwind info has no relevance yet.
  378. */
  379. if (frame->pc == frame->lr)
  380. return -URC_FAILURE;
  381. frame->pc = frame->lr;
  382. return URC_OK;
  383. } else if ((idx->insn & 0x80000000) == 0)
  384. /* prel31 to the unwind table */
  385. ctrl.insn = (unsigned long *)prel31_to_addr(&idx->insn);
  386. else if ((idx->insn & 0xff000000) == 0x80000000)
  387. /* only personality routine 0 supported in the index */
  388. ctrl.insn = &idx->insn;
  389. else {
  390. pr_warn("unwind: Unsupported personality routine %08lx in the index at %p\n",
  391. idx->insn, idx);
  392. return -URC_FAILURE;
  393. }
  394. /* check the personality routine */
  395. if ((*ctrl.insn & 0xff000000) == 0x80000000) {
  396. ctrl.byte = 2;
  397. ctrl.entries = 1;
  398. } else if ((*ctrl.insn & 0xff000000) == 0x81000000) {
  399. ctrl.byte = 1;
  400. ctrl.entries = 1 + ((*ctrl.insn & 0x00ff0000) >> 16);
  401. } else {
  402. pr_warn("unwind: Unsupported personality routine %08lx at %p\n",
  403. *ctrl.insn, ctrl.insn);
  404. return -URC_FAILURE;
  405. }
  406. ctrl.check_each_pop = 0;
  407. if (prel31_to_addr(&idx->addr_offset) == (u32)&call_with_stack) {
  408. /*
  409. * call_with_stack() is the only place where we permit SP to
  410. * jump from one stack to another, and since we know it is
  411. * guaranteed to happen, set up the SP bounds accordingly.
  412. */
  413. sp_low = frame->fp;
  414. ctrl.sp_high = ALIGN(frame->fp, THREAD_SIZE);
  415. }
  416. while (ctrl.entries > 0) {
  417. int urc;
  418. if ((ctrl.sp_high - ctrl.vrs[SP]) < sizeof(ctrl.vrs))
  419. ctrl.check_each_pop = 1;
  420. urc = unwind_exec_insn(&ctrl);
  421. if (urc < 0)
  422. return urc;
  423. if (ctrl.vrs[SP] < sp_low || ctrl.vrs[SP] > ctrl.sp_high)
  424. return -URC_FAILURE;
  425. }
  426. if (ctrl.vrs[PC] == 0)
  427. ctrl.vrs[PC] = ctrl.vrs[LR];
  428. /* check for infinite loop */
  429. if (frame->pc == ctrl.vrs[PC] && frame->sp == ctrl.vrs[SP])
  430. return -URC_FAILURE;
  431. frame->fp = ctrl.vrs[FP];
  432. frame->sp = ctrl.vrs[SP];
  433. frame->lr = ctrl.vrs[LR];
  434. frame->pc = ctrl.vrs[PC];
  435. frame->lr_addr = ctrl.lr_addr;
  436. return URC_OK;
  437. }
  438. void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
  439. const char *loglvl)
  440. {
  441. struct stackframe frame;
  442. pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
  443. if (!tsk)
  444. tsk = current;
  445. if (regs) {
  446. arm_get_current_stackframe(regs, &frame);
  447. /* PC might be corrupted, use LR in that case. */
  448. if (!kernel_text_address(regs->ARM_pc))
  449. frame.pc = regs->ARM_lr;
  450. } else if (tsk == current) {
  451. frame.fp = (unsigned long)__builtin_frame_address(0);
  452. frame.sp = current_stack_pointer;
  453. frame.lr = (unsigned long)__builtin_return_address(0);
  454. /* We are saving the stack and execution state at this
  455. * point, so we should ensure that frame.pc is within
  456. * this block of code.
  457. */
  458. here:
  459. frame.pc = (unsigned long)&&here;
  460. } else {
  461. /* task blocked in __switch_to */
  462. frame.fp = thread_saved_fp(tsk);
  463. frame.sp = thread_saved_sp(tsk);
  464. /*
  465. * The function calling __switch_to cannot be a leaf function
  466. * so LR is recovered from the stack.
  467. */
  468. frame.lr = 0;
  469. frame.pc = thread_saved_pc(tsk);
  470. }
  471. while (1) {
  472. int urc;
  473. unsigned long where = frame.pc;
  474. urc = unwind_frame(&frame);
  475. if (urc < 0)
  476. break;
  477. dump_backtrace_entry(where, frame.pc, frame.sp - 4, loglvl);
  478. }
  479. }
  480. struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
  481. unsigned long text_addr,
  482. unsigned long text_size)
  483. {
  484. unsigned long flags;
  485. struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
  486. pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
  487. text_addr, text_size);
  488. if (!tab)
  489. return tab;
  490. tab->start = (const struct unwind_idx *)start;
  491. tab->stop = (const struct unwind_idx *)(start + size);
  492. tab->origin = unwind_find_origin(tab->start, tab->stop);
  493. tab->begin_addr = text_addr;
  494. tab->end_addr = text_addr + text_size;
  495. raw_spin_lock_irqsave(&unwind_lock, flags);
  496. list_add_tail(&tab->list, &unwind_tables);
  497. raw_spin_unlock_irqrestore(&unwind_lock, flags);
  498. return tab;
  499. }
  500. void unwind_table_del(struct unwind_table *tab)
  501. {
  502. unsigned long flags;
  503. if (!tab)
  504. return;
  505. raw_spin_lock_irqsave(&unwind_lock, flags);
  506. list_del(&tab->list);
  507. raw_spin_unlock_irqrestore(&unwind_lock, flags);
  508. kfree(tab);
  509. }