qspinlock_stat.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. *
  4. * Authors: Waiman Long <[email protected]>
  5. */
  6. #include "lock_events.h"
  7. #ifdef CONFIG_LOCK_EVENT_COUNTS
  8. #ifdef CONFIG_PARAVIRT_SPINLOCKS
  9. /*
  10. * Collect pvqspinlock locking event counts
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/sched/clock.h>
  14. #include <linux/fs.h>
  15. #define EVENT_COUNT(ev) lockevents[LOCKEVENT_ ## ev]
  16. /*
  17. * PV specific per-cpu counter
  18. */
  19. static DEFINE_PER_CPU(u64, pv_kick_time);
  20. /*
  21. * Function to read and return the PV qspinlock counts.
  22. *
  23. * The following counters are handled specially:
  24. * 1. pv_latency_kick
  25. * Average kick latency (ns) = pv_latency_kick/pv_kick_unlock
  26. * 2. pv_latency_wake
  27. * Average wake latency (ns) = pv_latency_wake/pv_kick_wake
  28. * 3. pv_hash_hops
  29. * Average hops/hash = pv_hash_hops/pv_kick_unlock
  30. */
  31. ssize_t lockevent_read(struct file *file, char __user *user_buf,
  32. size_t count, loff_t *ppos)
  33. {
  34. char buf[64];
  35. int cpu, id, len;
  36. u64 sum = 0, kicks = 0;
  37. /*
  38. * Get the counter ID stored in file->f_inode->i_private
  39. */
  40. id = (long)file_inode(file)->i_private;
  41. if (id >= lockevent_num)
  42. return -EBADF;
  43. for_each_possible_cpu(cpu) {
  44. sum += per_cpu(lockevents[id], cpu);
  45. /*
  46. * Need to sum additional counters for some of them
  47. */
  48. switch (id) {
  49. case LOCKEVENT_pv_latency_kick:
  50. case LOCKEVENT_pv_hash_hops:
  51. kicks += per_cpu(EVENT_COUNT(pv_kick_unlock), cpu);
  52. break;
  53. case LOCKEVENT_pv_latency_wake:
  54. kicks += per_cpu(EVENT_COUNT(pv_kick_wake), cpu);
  55. break;
  56. }
  57. }
  58. if (id == LOCKEVENT_pv_hash_hops) {
  59. u64 frac = 0;
  60. if (kicks) {
  61. frac = 100ULL * do_div(sum, kicks);
  62. frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
  63. }
  64. /*
  65. * Return a X.XX decimal number
  66. */
  67. len = snprintf(buf, sizeof(buf) - 1, "%llu.%02llu\n",
  68. sum, frac);
  69. } else {
  70. /*
  71. * Round to the nearest ns
  72. */
  73. if ((id == LOCKEVENT_pv_latency_kick) ||
  74. (id == LOCKEVENT_pv_latency_wake)) {
  75. if (kicks)
  76. sum = DIV_ROUND_CLOSEST_ULL(sum, kicks);
  77. }
  78. len = snprintf(buf, sizeof(buf) - 1, "%llu\n", sum);
  79. }
  80. return simple_read_from_buffer(user_buf, count, ppos, buf, len);
  81. }
  82. /*
  83. * PV hash hop count
  84. */
  85. static inline void lockevent_pv_hop(int hopcnt)
  86. {
  87. this_cpu_add(EVENT_COUNT(pv_hash_hops), hopcnt);
  88. }
  89. /*
  90. * Replacement function for pv_kick()
  91. */
  92. static inline void __pv_kick(int cpu)
  93. {
  94. u64 start = sched_clock();
  95. per_cpu(pv_kick_time, cpu) = start;
  96. pv_kick(cpu);
  97. this_cpu_add(EVENT_COUNT(pv_latency_kick), sched_clock() - start);
  98. }
  99. /*
  100. * Replacement function for pv_wait()
  101. */
  102. static inline void __pv_wait(u8 *ptr, u8 val)
  103. {
  104. u64 *pkick_time = this_cpu_ptr(&pv_kick_time);
  105. *pkick_time = 0;
  106. pv_wait(ptr, val);
  107. if (*pkick_time) {
  108. this_cpu_add(EVENT_COUNT(pv_latency_wake),
  109. sched_clock() - *pkick_time);
  110. lockevent_inc(pv_kick_wake);
  111. }
  112. }
  113. #define pv_kick(c) __pv_kick(c)
  114. #define pv_wait(p, v) __pv_wait(p, v)
  115. #endif /* CONFIG_PARAVIRT_SPINLOCKS */
  116. #else /* CONFIG_LOCK_EVENT_COUNTS */
  117. static inline void lockevent_pv_hop(int hopcnt) { }
  118. #endif /* CONFIG_LOCK_EVENT_COUNTS */