perf_callchain.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
  3. #include <linux/perf_event.h>
  4. #include <linux/uaccess.h>
  5. /* Kernel callchain */
  6. struct stackframe {
  7. unsigned long fp;
  8. unsigned long lr;
  9. };
  10. static int unwind_frame_kernel(struct stackframe *frame)
  11. {
  12. unsigned long low = (unsigned long)task_stack_page(current);
  13. unsigned long high = low + THREAD_SIZE;
  14. if (unlikely(frame->fp < low || frame->fp > high))
  15. return -EPERM;
  16. if (kstack_end((void *)frame->fp) || frame->fp & 0x3)
  17. return -EPERM;
  18. *frame = *(struct stackframe *)frame->fp;
  19. if (__kernel_text_address(frame->lr)) {
  20. int graph = 0;
  21. frame->lr = ftrace_graph_ret_addr(NULL, &graph, frame->lr,
  22. NULL);
  23. }
  24. return 0;
  25. }
  26. static void notrace walk_stackframe(struct stackframe *fr,
  27. struct perf_callchain_entry_ctx *entry)
  28. {
  29. do {
  30. perf_callchain_store(entry, fr->lr);
  31. } while (unwind_frame_kernel(fr) >= 0);
  32. }
  33. /*
  34. * Get the return address for a single stackframe and return a pointer to the
  35. * next frame tail.
  36. */
  37. static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
  38. unsigned long fp, unsigned long reg_lr)
  39. {
  40. struct stackframe buftail;
  41. unsigned long lr = 0;
  42. unsigned long __user *user_frame_tail = (unsigned long __user *)fp;
  43. /* Check accessibility of one struct frame_tail beyond */
  44. if (!access_ok(user_frame_tail, sizeof(buftail)))
  45. return 0;
  46. if (__copy_from_user_inatomic(&buftail, user_frame_tail,
  47. sizeof(buftail)))
  48. return 0;
  49. if (reg_lr != 0)
  50. lr = reg_lr;
  51. else
  52. lr = buftail.lr;
  53. fp = buftail.fp;
  54. perf_callchain_store(entry, lr);
  55. return fp;
  56. }
  57. /*
  58. * This will be called when the target is in user mode
  59. * This function will only be called when we use
  60. * "PERF_SAMPLE_CALLCHAIN" in
  61. * kernel/events/core.c:perf_prepare_sample()
  62. *
  63. * How to trigger perf_callchain_[user/kernel] :
  64. * $ perf record -e cpu-clock --call-graph fp ./program
  65. * $ perf report --call-graph
  66. *
  67. * On C-SKY platform, the program being sampled and the C library
  68. * need to be compiled with * -mbacktrace, otherwise the user
  69. * stack will not contain function frame.
  70. */
  71. void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
  72. struct pt_regs *regs)
  73. {
  74. unsigned long fp = 0;
  75. fp = regs->regs[4];
  76. perf_callchain_store(entry, regs->pc);
  77. /*
  78. * While backtrace from leaf function, lr is normally
  79. * not saved inside frame on C-SKY, so get lr from pt_regs
  80. * at the sample point. However, lr value can be incorrect if
  81. * lr is used as temp register
  82. */
  83. fp = user_backtrace(entry, fp, regs->lr);
  84. while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
  85. fp = user_backtrace(entry, fp, 0);
  86. }
  87. void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
  88. struct pt_regs *regs)
  89. {
  90. struct stackframe fr;
  91. fr.fp = regs->regs[4];
  92. fr.lr = regs->lr;
  93. walk_stackframe(&fr, entry);
  94. }