callchain.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Performance counter callchain support - powerpc architecture code
  4. *
  5. * Copyright © 2009 Paul Mackerras, IBM Corporation.
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/sched.h>
  9. #include <linux/perf_event.h>
  10. #include <linux/percpu.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/mm.h>
  13. #include <asm/ptrace.h>
  14. #include <asm/sigcontext.h>
  15. #include <asm/ucontext.h>
  16. #include <asm/vdso.h>
  17. #include <asm/pte-walk.h>
  18. #include "callchain.h"
  19. /*
  20. * Is sp valid as the address of the next kernel stack frame after prev_sp?
  21. * The next frame may be in a different stack area but should not go
  22. * back down in the same stack area.
  23. */
  24. static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
  25. {
  26. if (sp & 0xf)
  27. return 0; /* must be 16-byte aligned */
  28. if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
  29. return 0;
  30. if (sp >= prev_sp + STACK_FRAME_MIN_SIZE)
  31. return 1;
  32. /*
  33. * sp could decrease when we jump off an interrupt stack
  34. * back to the regular process stack.
  35. */
  36. if ((sp & ~(THREAD_SIZE - 1)) != (prev_sp & ~(THREAD_SIZE - 1)))
  37. return 1;
  38. return 0;
  39. }
  40. void __no_sanitize_address
  41. perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
  42. {
  43. unsigned long sp, next_sp;
  44. unsigned long next_ip;
  45. unsigned long lr;
  46. long level = 0;
  47. unsigned long *fp;
  48. lr = regs->link;
  49. sp = regs->gpr[1];
  50. perf_callchain_store(entry, perf_instruction_pointer(regs));
  51. if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
  52. return;
  53. for (;;) {
  54. fp = (unsigned long *) sp;
  55. next_sp = fp[0];
  56. if (next_sp == sp + STACK_INT_FRAME_SIZE &&
  57. validate_sp(sp, current, STACK_INT_FRAME_SIZE) &&
  58. fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
  59. /*
  60. * This looks like an interrupt frame for an
  61. * interrupt that occurred in the kernel
  62. */
  63. regs = (struct pt_regs *)(sp + STACK_FRAME_OVERHEAD);
  64. next_ip = regs->nip;
  65. lr = regs->link;
  66. level = 0;
  67. perf_callchain_store_context(entry, PERF_CONTEXT_KERNEL);
  68. } else {
  69. if (level == 0)
  70. next_ip = lr;
  71. else
  72. next_ip = fp[STACK_FRAME_LR_SAVE];
  73. /*
  74. * We can't tell which of the first two addresses
  75. * we get are valid, but we can filter out the
  76. * obviously bogus ones here. We replace them
  77. * with 0 rather than removing them entirely so
  78. * that userspace can tell which is which.
  79. */
  80. if ((level == 1 && next_ip == lr) ||
  81. (level <= 1 && !kernel_text_address(next_ip)))
  82. next_ip = 0;
  83. ++level;
  84. }
  85. perf_callchain_store(entry, next_ip);
  86. if (!valid_next_sp(next_sp, sp))
  87. return;
  88. sp = next_sp;
  89. }
  90. }
  91. void
  92. perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
  93. {
  94. if (!is_32bit_task())
  95. perf_callchain_user_64(entry, regs);
  96. else
  97. perf_callchain_user_32(entry, regs);
  98. }