pmu.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * KVM PMU support for AMD
  4. *
  5. * Copyright 2015, Red Hat, Inc. and/or its affiliates.
  6. *
  7. * Author:
  8. * Wei Huang <[email protected]>
  9. *
  10. * Implementation is based on pmu_intel.c file
  11. */
  12. #include <linux/types.h>
  13. #include <linux/kvm_host.h>
  14. #include <linux/perf_event.h>
  15. #include "x86.h"
  16. #include "cpuid.h"
  17. #include "lapic.h"
  18. #include "pmu.h"
  19. #include "svm.h"
  20. enum pmu_type {
  21. PMU_TYPE_COUNTER = 0,
  22. PMU_TYPE_EVNTSEL,
  23. };
  24. static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
  25. {
  26. unsigned int num_counters = pmu->nr_arch_gp_counters;
  27. if (pmc_idx >= num_counters)
  28. return NULL;
  29. return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)];
  30. }
  31. static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
  32. enum pmu_type type)
  33. {
  34. struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
  35. unsigned int idx;
  36. if (!vcpu->kvm->arch.enable_pmu)
  37. return NULL;
  38. switch (msr) {
  39. case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
  40. if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
  41. return NULL;
  42. /*
  43. * Each PMU counter has a pair of CTL and CTR MSRs. CTLn
  44. * MSRs (accessed via EVNTSEL) are even, CTRn MSRs are odd.
  45. */
  46. idx = (unsigned int)((msr - MSR_F15H_PERF_CTL0) / 2);
  47. if (!(msr & 0x1) != (type == PMU_TYPE_EVNTSEL))
  48. return NULL;
  49. break;
  50. case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
  51. if (type != PMU_TYPE_EVNTSEL)
  52. return NULL;
  53. idx = msr - MSR_K7_EVNTSEL0;
  54. break;
  55. case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
  56. if (type != PMU_TYPE_COUNTER)
  57. return NULL;
  58. idx = msr - MSR_K7_PERFCTR0;
  59. break;
  60. default:
  61. return NULL;
  62. }
  63. return amd_pmc_idx_to_pmc(pmu, idx);
  64. }
  65. static bool amd_hw_event_available(struct kvm_pmc *pmc)
  66. {
  67. return true;
  68. }
  69. /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
  70. * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
  71. */
  72. static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
  73. {
  74. return true;
  75. }
  76. static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
  77. {
  78. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  79. idx &= ~(3u << 30);
  80. return idx < pmu->nr_arch_gp_counters;
  81. }
  82. /* idx is the ECX register of RDPMC instruction */
  83. static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
  84. unsigned int idx, u64 *mask)
  85. {
  86. return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30));
  87. }
  88. static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
  89. {
  90. /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */
  91. return false;
  92. }
  93. static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
  94. {
  95. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  96. struct kvm_pmc *pmc;
  97. pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
  98. pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
  99. return pmc;
  100. }
  101. static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
  102. {
  103. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  104. struct kvm_pmc *pmc;
  105. u32 msr = msr_info->index;
  106. /* MSR_PERFCTRn */
  107. pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
  108. if (pmc) {
  109. msr_info->data = pmc_read_counter(pmc);
  110. return 0;
  111. }
  112. /* MSR_EVNTSELn */
  113. pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
  114. if (pmc) {
  115. msr_info->data = pmc->eventsel;
  116. return 0;
  117. }
  118. return 1;
  119. }
  120. static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
  121. {
  122. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  123. struct kvm_pmc *pmc;
  124. u32 msr = msr_info->index;
  125. u64 data = msr_info->data;
  126. /* MSR_PERFCTRn */
  127. pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
  128. if (pmc) {
  129. pmc_write_counter(pmc, data);
  130. pmc_update_sample_period(pmc);
  131. return 0;
  132. }
  133. /* MSR_EVNTSELn */
  134. pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
  135. if (pmc) {
  136. data &= ~pmu->reserved_bits;
  137. if (data != pmc->eventsel) {
  138. pmc->eventsel = data;
  139. reprogram_counter(pmc);
  140. }
  141. return 0;
  142. }
  143. return 1;
  144. }
  145. static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
  146. {
  147. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  148. if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
  149. pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
  150. else
  151. pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
  152. pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
  153. pmu->reserved_bits = 0xfffffff000280000ull;
  154. pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
  155. pmu->version = 1;
  156. /* not applicable to AMD; but clean them to prevent any fall out */
  157. pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
  158. pmu->nr_arch_fixed_counters = 0;
  159. pmu->global_status = 0;
  160. bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
  161. }
  162. static void amd_pmu_init(struct kvm_vcpu *vcpu)
  163. {
  164. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  165. int i;
  166. BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > AMD64_NUM_COUNTERS_CORE);
  167. BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > INTEL_PMC_MAX_GENERIC);
  168. for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC ; i++) {
  169. pmu->gp_counters[i].type = KVM_PMC_GP;
  170. pmu->gp_counters[i].vcpu = vcpu;
  171. pmu->gp_counters[i].idx = i;
  172. pmu->gp_counters[i].current_config = 0;
  173. }
  174. }
  175. static void amd_pmu_reset(struct kvm_vcpu *vcpu)
  176. {
  177. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  178. int i;
  179. for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC; i++) {
  180. struct kvm_pmc *pmc = &pmu->gp_counters[i];
  181. pmc_stop_counter(pmc);
  182. pmc->counter = pmc->eventsel = 0;
  183. }
  184. }
  185. struct kvm_pmu_ops amd_pmu_ops __initdata = {
  186. .hw_event_available = amd_hw_event_available,
  187. .pmc_is_enabled = amd_pmc_is_enabled,
  188. .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
  189. .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
  190. .msr_idx_to_pmc = amd_msr_idx_to_pmc,
  191. .is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
  192. .is_valid_msr = amd_is_valid_msr,
  193. .get_msr = amd_pmu_get_msr,
  194. .set_msr = amd_pmu_set_msr,
  195. .refresh = amd_pmu_refresh,
  196. .init = amd_pmu_init,
  197. .reset = amd_pmu_reset,
  198. };