perf_event.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Performance event support for s390x
  4. *
  5. * Copyright IBM Corp. 2012, 2013
  6. * Author(s): Hendrik Brueckner <[email protected]>
  7. */
  8. #define KMSG_COMPONENT "perf"
  9. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  10. #include <linux/kernel.h>
  11. #include <linux/perf_event.h>
  12. #include <linux/kvm_host.h>
  13. #include <linux/percpu.h>
  14. #include <linux/export.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/sysfs.h>
  18. #include <asm/irq.h>
  19. #include <asm/cpu_mf.h>
  20. #include <asm/lowcore.h>
  21. #include <asm/processor.h>
  22. #include <asm/sysinfo.h>
  23. #include <asm/unwind.h>
  24. static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
  25. {
  26. struct stack_frame *stack = (struct stack_frame *) regs->gprs[15];
  27. if (!stack)
  28. return NULL;
  29. return (struct kvm_s390_sie_block *)stack->sie_control_block;
  30. }
  31. static bool is_in_guest(struct pt_regs *regs)
  32. {
  33. if (user_mode(regs))
  34. return false;
  35. #if IS_ENABLED(CONFIG_KVM)
  36. return instruction_pointer(regs) == (unsigned long) &sie_exit;
  37. #else
  38. return false;
  39. #endif
  40. }
  41. static unsigned long guest_is_user_mode(struct pt_regs *regs)
  42. {
  43. return sie_block(regs)->gpsw.mask & PSW_MASK_PSTATE;
  44. }
  45. static unsigned long instruction_pointer_guest(struct pt_regs *regs)
  46. {
  47. return sie_block(regs)->gpsw.addr;
  48. }
  49. unsigned long perf_instruction_pointer(struct pt_regs *regs)
  50. {
  51. return is_in_guest(regs) ? instruction_pointer_guest(regs)
  52. : instruction_pointer(regs);
  53. }
  54. static unsigned long perf_misc_guest_flags(struct pt_regs *regs)
  55. {
  56. return guest_is_user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER
  57. : PERF_RECORD_MISC_GUEST_KERNEL;
  58. }
  59. static unsigned long perf_misc_flags_sf(struct pt_regs *regs)
  60. {
  61. struct perf_sf_sde_regs *sde_regs;
  62. unsigned long flags;
  63. sde_regs = (struct perf_sf_sde_regs *) &regs->int_parm_long;
  64. if (sde_regs->in_guest)
  65. flags = user_mode(regs) ? PERF_RECORD_MISC_GUEST_USER
  66. : PERF_RECORD_MISC_GUEST_KERNEL;
  67. else
  68. flags = user_mode(regs) ? PERF_RECORD_MISC_USER
  69. : PERF_RECORD_MISC_KERNEL;
  70. return flags;
  71. }
  72. unsigned long perf_misc_flags(struct pt_regs *regs)
  73. {
  74. /* Check if the cpum_sf PMU has created the pt_regs structure.
  75. * In this case, perf misc flags can be easily extracted. Otherwise,
  76. * do regular checks on the pt_regs content.
  77. */
  78. if (regs->int_code == 0x1407 && regs->int_parm == CPU_MF_INT_SF_PRA)
  79. if (!regs->gprs[15])
  80. return perf_misc_flags_sf(regs);
  81. if (is_in_guest(regs))
  82. return perf_misc_guest_flags(regs);
  83. return user_mode(regs) ? PERF_RECORD_MISC_USER
  84. : PERF_RECORD_MISC_KERNEL;
  85. }
  86. static void print_debug_cf(void)
  87. {
  88. struct cpumf_ctr_info cf_info;
  89. int cpu = smp_processor_id();
  90. memset(&cf_info, 0, sizeof(cf_info));
  91. if (!qctri(&cf_info))
  92. pr_info("CPU[%i] CPUM_CF: ver=%u.%u A=%04x E=%04x C=%04x\n",
  93. cpu, cf_info.cfvn, cf_info.csvn,
  94. cf_info.auth_ctl, cf_info.enable_ctl, cf_info.act_ctl);
  95. }
  96. static void print_debug_sf(void)
  97. {
  98. struct hws_qsi_info_block si;
  99. int cpu = smp_processor_id();
  100. memset(&si, 0, sizeof(si));
  101. if (qsi(&si))
  102. return;
  103. pr_info("CPU[%i] CPUM_SF: basic=%i diag=%i min=%lu max=%lu cpu_speed=%u\n",
  104. cpu, si.as, si.ad, si.min_sampl_rate, si.max_sampl_rate,
  105. si.cpu_speed);
  106. if (si.as)
  107. pr_info("CPU[%i] CPUM_SF: Basic-sampling: a=%i e=%i c=%i"
  108. " bsdes=%i tear=%016lx dear=%016lx\n", cpu,
  109. si.as, si.es, si.cs, si.bsdes, si.tear, si.dear);
  110. if (si.ad)
  111. pr_info("CPU[%i] CPUM_SF: Diagnostic-sampling: a=%i e=%i c=%i"
  112. " dsdes=%i tear=%016lx dear=%016lx\n", cpu,
  113. si.ad, si.ed, si.cd, si.dsdes, si.tear, si.dear);
  114. }
  115. void perf_event_print_debug(void)
  116. {
  117. unsigned long flags;
  118. local_irq_save(flags);
  119. if (cpum_cf_avail())
  120. print_debug_cf();
  121. if (cpum_sf_avail())
  122. print_debug_sf();
  123. local_irq_restore(flags);
  124. }
  125. /* Service level infrastructure */
  126. static void sl_print_counter(struct seq_file *m)
  127. {
  128. struct cpumf_ctr_info ci;
  129. memset(&ci, 0, sizeof(ci));
  130. if (qctri(&ci))
  131. return;
  132. seq_printf(m, "CPU-MF: Counter facility: version=%u.%u "
  133. "authorization=%04x\n", ci.cfvn, ci.csvn, ci.auth_ctl);
  134. }
  135. static void sl_print_sampling(struct seq_file *m)
  136. {
  137. struct hws_qsi_info_block si;
  138. memset(&si, 0, sizeof(si));
  139. if (qsi(&si))
  140. return;
  141. if (!si.as && !si.ad)
  142. return;
  143. seq_printf(m, "CPU-MF: Sampling facility: min_rate=%lu max_rate=%lu"
  144. " cpu_speed=%u\n", si.min_sampl_rate, si.max_sampl_rate,
  145. si.cpu_speed);
  146. if (si.as)
  147. seq_printf(m, "CPU-MF: Sampling facility: mode=basic"
  148. " sample_size=%u\n", si.bsdes);
  149. if (si.ad)
  150. seq_printf(m, "CPU-MF: Sampling facility: mode=diagnostic"
  151. " sample_size=%u\n", si.dsdes);
  152. }
  153. static void service_level_perf_print(struct seq_file *m,
  154. struct service_level *sl)
  155. {
  156. if (cpum_cf_avail())
  157. sl_print_counter(m);
  158. if (cpum_sf_avail())
  159. sl_print_sampling(m);
  160. }
  161. static struct service_level service_level_perf = {
  162. .seq_print = service_level_perf_print,
  163. };
  164. static int __init service_level_perf_register(void)
  165. {
  166. return register_service_level(&service_level_perf);
  167. }
  168. arch_initcall(service_level_perf_register);
  169. void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
  170. struct pt_regs *regs)
  171. {
  172. struct unwind_state state;
  173. unsigned long addr;
  174. unwind_for_each_frame(&state, current, regs, 0) {
  175. addr = unwind_get_return_address(&state);
  176. if (!addr || perf_callchain_store(entry, addr))
  177. return;
  178. }
  179. }
  180. /* Perf definitions for PMU event attributes in sysfs */
  181. ssize_t cpumf_events_sysfs_show(struct device *dev,
  182. struct device_attribute *attr, char *page)
  183. {
  184. struct perf_pmu_events_attr *pmu_attr;
  185. pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
  186. return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
  187. }