pmu.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright 2019 Arm Limited
  4. * Author: Andrew Murray <[email protected]>
  5. */
  6. #include <linux/kvm_host.h>
  7. #include <linux/perf_event.h>
  8. static DEFINE_PER_CPU(struct kvm_pmu_events, kvm_pmu_events);
  9. /*
  10. * Given the perf event attributes and system type, determine
  11. * if we are going to need to switch counters at guest entry/exit.
  12. */
  13. static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
  14. {
  15. /**
  16. * With VHE the guest kernel runs at EL1 and the host at EL2,
  17. * where user (EL0) is excluded then we have no reason to switch
  18. * counters.
  19. */
  20. if (has_vhe() && attr->exclude_user)
  21. return false;
  22. /* Only switch if attributes are different */
  23. return (attr->exclude_host != attr->exclude_guest);
  24. }
  25. struct kvm_pmu_events *kvm_get_pmu_events(void)
  26. {
  27. return this_cpu_ptr(&kvm_pmu_events);
  28. }
  29. /*
  30. * Add events to track that we may want to switch at guest entry/exit
  31. * time.
  32. */
  33. void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
  34. {
  35. struct kvm_pmu_events *pmu = kvm_get_pmu_events();
  36. if (!kvm_arm_support_pmu_v3() || !pmu || !kvm_pmu_switch_needed(attr))
  37. return;
  38. if (!attr->exclude_host)
  39. pmu->events_host |= set;
  40. if (!attr->exclude_guest)
  41. pmu->events_guest |= set;
  42. }
  43. /*
  44. * Stop tracking events
  45. */
  46. void kvm_clr_pmu_events(u32 clr)
  47. {
  48. struct kvm_pmu_events *pmu = kvm_get_pmu_events();
  49. if (!kvm_arm_support_pmu_v3() || !pmu)
  50. return;
  51. pmu->events_host &= ~clr;
  52. pmu->events_guest &= ~clr;
  53. }
  54. #define PMEVTYPER_READ_CASE(idx) \
  55. case idx: \
  56. return read_sysreg(pmevtyper##idx##_el0)
  57. #define PMEVTYPER_WRITE_CASE(idx) \
  58. case idx: \
  59. write_sysreg(val, pmevtyper##idx##_el0); \
  60. break
  61. #define PMEVTYPER_CASES(readwrite) \
  62. PMEVTYPER_##readwrite##_CASE(0); \
  63. PMEVTYPER_##readwrite##_CASE(1); \
  64. PMEVTYPER_##readwrite##_CASE(2); \
  65. PMEVTYPER_##readwrite##_CASE(3); \
  66. PMEVTYPER_##readwrite##_CASE(4); \
  67. PMEVTYPER_##readwrite##_CASE(5); \
  68. PMEVTYPER_##readwrite##_CASE(6); \
  69. PMEVTYPER_##readwrite##_CASE(7); \
  70. PMEVTYPER_##readwrite##_CASE(8); \
  71. PMEVTYPER_##readwrite##_CASE(9); \
  72. PMEVTYPER_##readwrite##_CASE(10); \
  73. PMEVTYPER_##readwrite##_CASE(11); \
  74. PMEVTYPER_##readwrite##_CASE(12); \
  75. PMEVTYPER_##readwrite##_CASE(13); \
  76. PMEVTYPER_##readwrite##_CASE(14); \
  77. PMEVTYPER_##readwrite##_CASE(15); \
  78. PMEVTYPER_##readwrite##_CASE(16); \
  79. PMEVTYPER_##readwrite##_CASE(17); \
  80. PMEVTYPER_##readwrite##_CASE(18); \
  81. PMEVTYPER_##readwrite##_CASE(19); \
  82. PMEVTYPER_##readwrite##_CASE(20); \
  83. PMEVTYPER_##readwrite##_CASE(21); \
  84. PMEVTYPER_##readwrite##_CASE(22); \
  85. PMEVTYPER_##readwrite##_CASE(23); \
  86. PMEVTYPER_##readwrite##_CASE(24); \
  87. PMEVTYPER_##readwrite##_CASE(25); \
  88. PMEVTYPER_##readwrite##_CASE(26); \
  89. PMEVTYPER_##readwrite##_CASE(27); \
  90. PMEVTYPER_##readwrite##_CASE(28); \
  91. PMEVTYPER_##readwrite##_CASE(29); \
  92. PMEVTYPER_##readwrite##_CASE(30)
  93. /*
  94. * Read a value direct from PMEVTYPER<idx> where idx is 0-30
  95. * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
  96. */
  97. static u64 kvm_vcpu_pmu_read_evtype_direct(int idx)
  98. {
  99. switch (idx) {
  100. PMEVTYPER_CASES(READ);
  101. case ARMV8_PMU_CYCLE_IDX:
  102. return read_sysreg(pmccfiltr_el0);
  103. default:
  104. WARN_ON(1);
  105. }
  106. return 0;
  107. }
  108. /*
  109. * Write a value direct to PMEVTYPER<idx> where idx is 0-30
  110. * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
  111. */
  112. static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val)
  113. {
  114. switch (idx) {
  115. PMEVTYPER_CASES(WRITE);
  116. case ARMV8_PMU_CYCLE_IDX:
  117. write_sysreg(val, pmccfiltr_el0);
  118. break;
  119. default:
  120. WARN_ON(1);
  121. }
  122. }
  123. /*
  124. * Modify ARMv8 PMU events to include EL0 counting
  125. */
  126. static void kvm_vcpu_pmu_enable_el0(unsigned long events)
  127. {
  128. u64 typer;
  129. u32 counter;
  130. for_each_set_bit(counter, &events, 32) {
  131. typer = kvm_vcpu_pmu_read_evtype_direct(counter);
  132. typer &= ~ARMV8_PMU_EXCLUDE_EL0;
  133. kvm_vcpu_pmu_write_evtype_direct(counter, typer);
  134. }
  135. }
  136. /*
  137. * Modify ARMv8 PMU events to exclude EL0 counting
  138. */
  139. static void kvm_vcpu_pmu_disable_el0(unsigned long events)
  140. {
  141. u64 typer;
  142. u32 counter;
  143. for_each_set_bit(counter, &events, 32) {
  144. typer = kvm_vcpu_pmu_read_evtype_direct(counter);
  145. typer |= ARMV8_PMU_EXCLUDE_EL0;
  146. kvm_vcpu_pmu_write_evtype_direct(counter, typer);
  147. }
  148. }
  149. /*
  150. * On VHE ensure that only guest events have EL0 counting enabled.
  151. * This is called from both vcpu_{load,put} and the sysreg handling.
  152. * Since the latter is preemptible, special care must be taken to
  153. * disable preemption.
  154. */
  155. void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
  156. {
  157. struct kvm_pmu_events *pmu;
  158. u32 events_guest, events_host;
  159. if (!kvm_arm_support_pmu_v3() || !has_vhe())
  160. return;
  161. preempt_disable();
  162. pmu = kvm_get_pmu_events();
  163. events_guest = pmu->events_guest;
  164. events_host = pmu->events_host;
  165. kvm_vcpu_pmu_enable_el0(events_guest);
  166. kvm_vcpu_pmu_disable_el0(events_host);
  167. preempt_enable();
  168. }
  169. /*
  170. * On VHE ensure that only host events have EL0 counting enabled
  171. */
  172. void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
  173. {
  174. struct kvm_pmu_events *pmu;
  175. u32 events_guest, events_host;
  176. if (!kvm_arm_support_pmu_v3() || !has_vhe())
  177. return;
  178. pmu = kvm_get_pmu_events();
  179. events_guest = pmu->events_guest;
  180. events_host = pmu->events_host;
  181. kvm_vcpu_pmu_enable_el0(events_host);
  182. kvm_vcpu_pmu_disable_el0(events_guest);
  183. }