perf_regs.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright 2016 Anju T, IBM Corporation.
  4. */
  5. #include <linux/errno.h>
  6. #include <linux/kernel.h>
  7. #include <linux/sched.h>
  8. #include <linux/sched/task_stack.h>
  9. #include <linux/perf_event.h>
  10. #include <linux/bug.h>
  11. #include <linux/stddef.h>
  12. #include <asm/ptrace.h>
  13. #include <asm/perf_regs.h>
  14. u64 PERF_REG_EXTENDED_MASK;
  15. #define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r)
  16. #define REG_RESERVED (~(PERF_REG_EXTENDED_MASK | PERF_REG_PMU_MASK))
  17. static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
  18. PT_REGS_OFFSET(PERF_REG_POWERPC_R0, gpr[0]),
  19. PT_REGS_OFFSET(PERF_REG_POWERPC_R1, gpr[1]),
  20. PT_REGS_OFFSET(PERF_REG_POWERPC_R2, gpr[2]),
  21. PT_REGS_OFFSET(PERF_REG_POWERPC_R3, gpr[3]),
  22. PT_REGS_OFFSET(PERF_REG_POWERPC_R4, gpr[4]),
  23. PT_REGS_OFFSET(PERF_REG_POWERPC_R5, gpr[5]),
  24. PT_REGS_OFFSET(PERF_REG_POWERPC_R6, gpr[6]),
  25. PT_REGS_OFFSET(PERF_REG_POWERPC_R7, gpr[7]),
  26. PT_REGS_OFFSET(PERF_REG_POWERPC_R8, gpr[8]),
  27. PT_REGS_OFFSET(PERF_REG_POWERPC_R9, gpr[9]),
  28. PT_REGS_OFFSET(PERF_REG_POWERPC_R10, gpr[10]),
  29. PT_REGS_OFFSET(PERF_REG_POWERPC_R11, gpr[11]),
  30. PT_REGS_OFFSET(PERF_REG_POWERPC_R12, gpr[12]),
  31. PT_REGS_OFFSET(PERF_REG_POWERPC_R13, gpr[13]),
  32. PT_REGS_OFFSET(PERF_REG_POWERPC_R14, gpr[14]),
  33. PT_REGS_OFFSET(PERF_REG_POWERPC_R15, gpr[15]),
  34. PT_REGS_OFFSET(PERF_REG_POWERPC_R16, gpr[16]),
  35. PT_REGS_OFFSET(PERF_REG_POWERPC_R17, gpr[17]),
  36. PT_REGS_OFFSET(PERF_REG_POWERPC_R18, gpr[18]),
  37. PT_REGS_OFFSET(PERF_REG_POWERPC_R19, gpr[19]),
  38. PT_REGS_OFFSET(PERF_REG_POWERPC_R20, gpr[20]),
  39. PT_REGS_OFFSET(PERF_REG_POWERPC_R21, gpr[21]),
  40. PT_REGS_OFFSET(PERF_REG_POWERPC_R22, gpr[22]),
  41. PT_REGS_OFFSET(PERF_REG_POWERPC_R23, gpr[23]),
  42. PT_REGS_OFFSET(PERF_REG_POWERPC_R24, gpr[24]),
  43. PT_REGS_OFFSET(PERF_REG_POWERPC_R25, gpr[25]),
  44. PT_REGS_OFFSET(PERF_REG_POWERPC_R26, gpr[26]),
  45. PT_REGS_OFFSET(PERF_REG_POWERPC_R27, gpr[27]),
  46. PT_REGS_OFFSET(PERF_REG_POWERPC_R28, gpr[28]),
  47. PT_REGS_OFFSET(PERF_REG_POWERPC_R29, gpr[29]),
  48. PT_REGS_OFFSET(PERF_REG_POWERPC_R30, gpr[30]),
  49. PT_REGS_OFFSET(PERF_REG_POWERPC_R31, gpr[31]),
  50. PT_REGS_OFFSET(PERF_REG_POWERPC_NIP, nip),
  51. PT_REGS_OFFSET(PERF_REG_POWERPC_MSR, msr),
  52. PT_REGS_OFFSET(PERF_REG_POWERPC_ORIG_R3, orig_gpr3),
  53. PT_REGS_OFFSET(PERF_REG_POWERPC_CTR, ctr),
  54. PT_REGS_OFFSET(PERF_REG_POWERPC_LINK, link),
  55. PT_REGS_OFFSET(PERF_REG_POWERPC_XER, xer),
  56. PT_REGS_OFFSET(PERF_REG_POWERPC_CCR, ccr),
  57. #ifdef CONFIG_PPC64
  58. PT_REGS_OFFSET(PERF_REG_POWERPC_SOFTE, softe),
  59. #else
  60. PT_REGS_OFFSET(PERF_REG_POWERPC_SOFTE, mq),
  61. #endif
  62. PT_REGS_OFFSET(PERF_REG_POWERPC_TRAP, trap),
  63. PT_REGS_OFFSET(PERF_REG_POWERPC_DAR, dar),
  64. PT_REGS_OFFSET(PERF_REG_POWERPC_DSISR, dsisr),
  65. PT_REGS_OFFSET(PERF_REG_POWERPC_SIER, dar),
  66. PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr),
  67. };
  68. /* Function to return the extended register values */
  69. static u64 get_ext_regs_value(int idx)
  70. {
  71. switch (idx) {
  72. case PERF_REG_POWERPC_PMC1 ... PERF_REG_POWERPC_PMC6:
  73. return get_pmcs_ext_regs(idx - PERF_REG_POWERPC_PMC1);
  74. case PERF_REG_POWERPC_MMCR0:
  75. return mfspr(SPRN_MMCR0);
  76. case PERF_REG_POWERPC_MMCR1:
  77. return mfspr(SPRN_MMCR1);
  78. case PERF_REG_POWERPC_MMCR2:
  79. return mfspr(SPRN_MMCR2);
  80. #ifdef CONFIG_PPC64
  81. case PERF_REG_POWERPC_MMCR3:
  82. return mfspr(SPRN_MMCR3);
  83. case PERF_REG_POWERPC_SIER2:
  84. return mfspr(SPRN_SIER2);
  85. case PERF_REG_POWERPC_SIER3:
  86. return mfspr(SPRN_SIER3);
  87. case PERF_REG_POWERPC_SDAR:
  88. return mfspr(SPRN_SDAR);
  89. #endif
  90. case PERF_REG_POWERPC_SIAR:
  91. return mfspr(SPRN_SIAR);
  92. default: return 0;
  93. }
  94. }
  95. u64 perf_reg_value(struct pt_regs *regs, int idx)
  96. {
  97. if (idx == PERF_REG_POWERPC_SIER &&
  98. (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
  99. IS_ENABLED(CONFIG_PPC32) ||
  100. !is_sier_available()))
  101. return 0;
  102. if (idx == PERF_REG_POWERPC_MMCRA &&
  103. (IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
  104. IS_ENABLED(CONFIG_PPC32)))
  105. return 0;
  106. if (idx >= PERF_REG_POWERPC_MAX && idx < PERF_REG_EXTENDED_MAX)
  107. return get_ext_regs_value(idx);
  108. /*
  109. * If the idx is referring to value beyond the
  110. * supported registers, return 0 with a warning
  111. */
  112. if (WARN_ON_ONCE(idx >= PERF_REG_EXTENDED_MAX))
  113. return 0;
  114. return regs_get_register(regs, pt_regs_offset[idx]);
  115. }
  116. int perf_reg_validate(u64 mask)
  117. {
  118. if (!mask || mask & REG_RESERVED)
  119. return -EINVAL;
  120. return 0;
  121. }
  122. u64 perf_reg_abi(struct task_struct *task)
  123. {
  124. if (is_tsk_32bit_task(task))
  125. return PERF_SAMPLE_REGS_ABI_32;
  126. else
  127. return PERF_SAMPLE_REGS_ABI_64;
  128. }
  129. void perf_get_regs_user(struct perf_regs *regs_user,
  130. struct pt_regs *regs)
  131. {
  132. regs_user->regs = task_pt_regs(current);
  133. regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
  134. PERF_SAMPLE_REGS_ABI_NONE;
  135. }