perfctr-watchdog.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * local apic based NMI watchdog for various CPUs.
  4. *
  5. * This file also handles reservation of performance counters for coordination
  6. * with other users.
  7. *
  8. * Note that these events normally don't tick when the CPU idles. This means
  9. * the frequency varies with CPU load.
  10. *
  11. * Original code for K7/P6 written by Keith Owens
  12. *
  13. */
  14. #include <linux/percpu.h>
  15. #include <linux/export.h>
  16. #include <linux/kernel.h>
  17. #include <linux/bitops.h>
  18. #include <linux/smp.h>
  19. #include <asm/nmi.h>
  20. #include <linux/kprobes.h>
  21. #include <asm/apic.h>
  22. #include <asm/perf_event.h>
  23. /*
  24. * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
  25. * offset from MSR_P4_BSU_ESCR0.
  26. *
  27. * It will be the max for all platforms (for now)
  28. */
  29. #define NMI_MAX_COUNTER_BITS 66
  30. /*
  31. * perfctr_nmi_owner tracks the ownership of the perfctr registers:
  32. * evtsel_nmi_owner tracks the ownership of the event selection
  33. * - different performance counters/ event selection may be reserved for
  34. * different subsystems this reservation system just tries to coordinate
  35. * things a little
  36. */
  37. static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
  38. static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
  39. /* converts an msr to an appropriate reservation bit */
  40. static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
  41. {
  42. /* returns the bit offset of the performance counter register */
  43. switch (boot_cpu_data.x86_vendor) {
  44. case X86_VENDOR_HYGON:
  45. case X86_VENDOR_AMD:
  46. if (msr >= MSR_F15H_PERF_CTR)
  47. return (msr - MSR_F15H_PERF_CTR) >> 1;
  48. return msr - MSR_K7_PERFCTR0;
  49. case X86_VENDOR_INTEL:
  50. if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  51. return msr - MSR_ARCH_PERFMON_PERFCTR0;
  52. switch (boot_cpu_data.x86) {
  53. case 6:
  54. return msr - MSR_P6_PERFCTR0;
  55. case 11:
  56. return msr - MSR_KNC_PERFCTR0;
  57. case 15:
  58. return msr - MSR_P4_BPU_PERFCTR0;
  59. }
  60. break;
  61. case X86_VENDOR_ZHAOXIN:
  62. case X86_VENDOR_CENTAUR:
  63. return msr - MSR_ARCH_PERFMON_PERFCTR0;
  64. }
  65. return 0;
  66. }
  67. /*
  68. * converts an msr to an appropriate reservation bit
  69. * returns the bit offset of the event selection register
  70. */
  71. static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
  72. {
  73. /* returns the bit offset of the event selection register */
  74. switch (boot_cpu_data.x86_vendor) {
  75. case X86_VENDOR_HYGON:
  76. case X86_VENDOR_AMD:
  77. if (msr >= MSR_F15H_PERF_CTL)
  78. return (msr - MSR_F15H_PERF_CTL) >> 1;
  79. return msr - MSR_K7_EVNTSEL0;
  80. case X86_VENDOR_INTEL:
  81. if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
  82. return msr - MSR_ARCH_PERFMON_EVENTSEL0;
  83. switch (boot_cpu_data.x86) {
  84. case 6:
  85. return msr - MSR_P6_EVNTSEL0;
  86. case 11:
  87. return msr - MSR_KNC_EVNTSEL0;
  88. case 15:
  89. return msr - MSR_P4_BSU_ESCR0;
  90. }
  91. break;
  92. case X86_VENDOR_ZHAOXIN:
  93. case X86_VENDOR_CENTAUR:
  94. return msr - MSR_ARCH_PERFMON_EVENTSEL0;
  95. }
  96. return 0;
  97. }
  98. int reserve_perfctr_nmi(unsigned int msr)
  99. {
  100. unsigned int counter;
  101. counter = nmi_perfctr_msr_to_bit(msr);
  102. /* register not managed by the allocator? */
  103. if (counter > NMI_MAX_COUNTER_BITS)
  104. return 1;
  105. if (!test_and_set_bit(counter, perfctr_nmi_owner))
  106. return 1;
  107. return 0;
  108. }
  109. EXPORT_SYMBOL(reserve_perfctr_nmi);
  110. void release_perfctr_nmi(unsigned int msr)
  111. {
  112. unsigned int counter;
  113. counter = nmi_perfctr_msr_to_bit(msr);
  114. /* register not managed by the allocator? */
  115. if (counter > NMI_MAX_COUNTER_BITS)
  116. return;
  117. clear_bit(counter, perfctr_nmi_owner);
  118. }
  119. EXPORT_SYMBOL(release_perfctr_nmi);
  120. int reserve_evntsel_nmi(unsigned int msr)
  121. {
  122. unsigned int counter;
  123. counter = nmi_evntsel_msr_to_bit(msr);
  124. /* register not managed by the allocator? */
  125. if (counter > NMI_MAX_COUNTER_BITS)
  126. return 1;
  127. if (!test_and_set_bit(counter, evntsel_nmi_owner))
  128. return 1;
  129. return 0;
  130. }
  131. EXPORT_SYMBOL(reserve_evntsel_nmi);
  132. void release_evntsel_nmi(unsigned int msr)
  133. {
  134. unsigned int counter;
  135. counter = nmi_evntsel_msr_to_bit(msr);
  136. /* register not managed by the allocator? */
  137. if (counter > NMI_MAX_COUNTER_BITS)
  138. return;
  139. clear_bit(counter, evntsel_nmi_owner);
  140. }
  141. EXPORT_SYMBOL(release_evntsel_nmi);