callchain_64.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Performance counter callchain support - powerpc architecture code
  4. *
  5. * Copyright © 2009 Paul Mackerras, IBM Corporation.
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/sched.h>
  9. #include <linux/perf_event.h>
  10. #include <linux/percpu.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/mm.h>
  13. #include <asm/ptrace.h>
  14. #include <asm/sigcontext.h>
  15. #include <asm/ucontext.h>
  16. #include <asm/vdso.h>
  17. #include <asm/pte-walk.h>
  18. #include "callchain.h"
  19. static int read_user_stack_64(const unsigned long __user *ptr, unsigned long *ret)
  20. {
  21. return __read_user_stack(ptr, ret, sizeof(*ret));
  22. }
  23. /*
  24. * 64-bit user processes use the same stack frame for RT and non-RT signals.
  25. */
  26. struct signal_frame_64 {
  27. char dummy[__SIGNAL_FRAMESIZE];
  28. struct ucontext uc;
  29. unsigned long unused[2];
  30. unsigned int tramp[6];
  31. struct siginfo *pinfo;
  32. void *puc;
  33. struct siginfo info;
  34. char abigap[288];
  35. };
  36. static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
  37. {
  38. if (nip == fp + offsetof(struct signal_frame_64, tramp))
  39. return 1;
  40. if (current->mm->context.vdso &&
  41. nip == VDSO64_SYMBOL(current->mm->context.vdso, sigtramp_rt64))
  42. return 1;
  43. return 0;
  44. }
  45. /*
  46. * Do some sanity checking on the signal frame pointed to by sp.
  47. * We check the pinfo and puc pointers in the frame.
  48. */
  49. static int sane_signal_64_frame(unsigned long sp)
  50. {
  51. struct signal_frame_64 __user *sf;
  52. unsigned long pinfo, puc;
  53. sf = (struct signal_frame_64 __user *) sp;
  54. if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
  55. read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
  56. return 0;
  57. return pinfo == (unsigned long) &sf->info &&
  58. puc == (unsigned long) &sf->uc;
  59. }
  60. void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
  61. struct pt_regs *regs)
  62. {
  63. unsigned long sp, next_sp;
  64. unsigned long next_ip;
  65. unsigned long lr;
  66. long level = 0;
  67. struct signal_frame_64 __user *sigframe;
  68. unsigned long __user *fp, *uregs;
  69. next_ip = perf_instruction_pointer(regs);
  70. lr = regs->link;
  71. sp = regs->gpr[1];
  72. perf_callchain_store(entry, next_ip);
  73. while (entry->nr < entry->max_stack) {
  74. fp = (unsigned long __user *) sp;
  75. if (invalid_user_sp(sp) || read_user_stack_64(fp, &next_sp))
  76. return;
  77. if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
  78. return;
  79. /*
  80. * Note: the next_sp - sp >= signal frame size check
  81. * is true when next_sp < sp, which can happen when
  82. * transitioning from an alternate signal stack to the
  83. * normal stack.
  84. */
  85. if (next_sp - sp >= sizeof(struct signal_frame_64) &&
  86. (is_sigreturn_64_address(next_ip, sp) ||
  87. (level <= 1 && is_sigreturn_64_address(lr, sp))) &&
  88. sane_signal_64_frame(sp)) {
  89. /*
  90. * This looks like an signal frame
  91. */
  92. sigframe = (struct signal_frame_64 __user *) sp;
  93. uregs = sigframe->uc.uc_mcontext.gp_regs;
  94. if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
  95. read_user_stack_64(&uregs[PT_LNK], &lr) ||
  96. read_user_stack_64(&uregs[PT_R1], &sp))
  97. return;
  98. level = 0;
  99. perf_callchain_store_context(entry, PERF_CONTEXT_USER);
  100. perf_callchain_store(entry, next_ip);
  101. continue;
  102. }
  103. if (level == 0)
  104. next_ip = lr;
  105. perf_callchain_store(entry, next_ip);
  106. ++level;
  107. sp = next_sp;
  108. }
  109. }