arm_pmu.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2015 Linaro Ltd.
  4. * Author: Shannon Zhao <[email protected]>
  5. */
  6. #ifndef __ASM_ARM_KVM_PMU_H
  7. #define __ASM_ARM_KVM_PMU_H
  8. #include <linux/perf_event.h>
  9. #include <asm/perf_event.h>
  10. #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
  11. #ifdef CONFIG_HW_PERF_EVENTS
  12. struct kvm_pmc {
  13. u8 idx; /* index into the pmu->pmc array */
  14. struct perf_event *perf_event;
  15. };
  16. struct kvm_pmu_events {
  17. u32 events_host;
  18. u32 events_guest;
  19. };
  20. struct kvm_pmu {
  21. struct irq_work overflow_work;
  22. struct kvm_pmu_events events;
  23. struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
  24. int irq_num;
  25. bool created;
  26. bool irq_level;
  27. };
  28. struct arm_pmu_entry {
  29. struct list_head entry;
  30. struct arm_pmu *arm_pmu;
  31. };
  32. DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
  33. static __always_inline bool kvm_arm_support_pmu_v3(void)
  34. {
  35. return static_branch_likely(&kvm_arm_pmu_available);
  36. }
  37. #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
  38. u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
  39. void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
  40. u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
  41. u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1);
  42. void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu);
  43. void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
  44. void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
  45. void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
  46. void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val);
  47. void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
  48. void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
  49. bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
  50. void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
  51. void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
  52. void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
  53. void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
  54. u64 select_idx);
  55. int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
  56. struct kvm_device_attr *attr);
  57. int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
  58. struct kvm_device_attr *attr);
  59. int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
  60. struct kvm_device_attr *attr);
  61. int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
  62. struct kvm_pmu_events *kvm_get_pmu_events(void);
  63. void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
  64. void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
  65. #define kvm_vcpu_has_pmu(vcpu) \
  66. (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
  67. /*
  68. * Updates the vcpu's view of the pmu events for this cpu.
  69. * Must be called before every vcpu run after disabling interrupts, to ensure
  70. * that an interrupt cannot fire and update the structure.
  71. */
  72. #define kvm_pmu_update_vcpu_events(vcpu) \
  73. do { \
  74. if (!has_vhe() && kvm_vcpu_has_pmu(vcpu)) \
  75. vcpu->arch.pmu.events = *kvm_get_pmu_events(); \
  76. } while (0)
  77. #else
  78. struct kvm_pmu {
  79. };
  80. static inline bool kvm_arm_support_pmu_v3(void)
  81. {
  82. return false;
  83. }
  84. #define kvm_arm_pmu_irq_initialized(v) (false)
  85. static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
  86. u64 select_idx)
  87. {
  88. return 0;
  89. }
  90. static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
  91. u64 select_idx, u64 val) {}
  92. static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
  93. {
  94. return 0;
  95. }
  96. static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {}
  97. static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
  98. static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
  99. static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
  100. static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {}
  101. static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
  102. static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
  103. static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
  104. {
  105. return false;
  106. }
  107. static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
  108. static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
  109. static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
  110. static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
  111. u64 data, u64 select_idx) {}
  112. static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
  113. struct kvm_device_attr *attr)
  114. {
  115. return -ENXIO;
  116. }
  117. static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
  118. struct kvm_device_attr *attr)
  119. {
  120. return -ENXIO;
  121. }
  122. static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
  123. struct kvm_device_attr *attr)
  124. {
  125. return -ENXIO;
  126. }
  127. static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
  128. {
  129. return 0;
  130. }
  131. static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
  132. {
  133. return 0;
  134. }
  135. #define kvm_vcpu_has_pmu(vcpu) ({ false; })
  136. static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {}
  137. static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
  138. static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
  139. #endif
  140. #endif