cpufreq_stats.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * drivers/cpufreq/cpufreq_stats.c
  4. *
  5. * Copyright (C) 2003-2004 Venkatesh Pallipadi <[email protected]>.
  6. * (C) 2004 Zou Nan hai <[email protected]>.
  7. */
  8. #include <linux/cpu.h>
  9. #include <linux/cpufreq.h>
  10. #include <linux/module.h>
  11. #include <linux/sched/clock.h>
  12. #include <linux/slab.h>
  13. struct cpufreq_stats {
  14. unsigned int total_trans;
  15. unsigned long long last_time;
  16. unsigned int max_state;
  17. unsigned int state_num;
  18. unsigned int last_index;
  19. u64 *time_in_state;
  20. unsigned int *freq_table;
  21. unsigned int *trans_table;
  22. /* Deferred reset */
  23. unsigned int reset_pending;
  24. unsigned long long reset_time;
  25. };
  26. static void cpufreq_stats_update(struct cpufreq_stats *stats,
  27. unsigned long long time)
  28. {
  29. unsigned long long cur_time = local_clock();
  30. stats->time_in_state[stats->last_index] += cur_time - time;
  31. stats->last_time = cur_time;
  32. }
  33. static void cpufreq_stats_reset_table(struct cpufreq_stats *stats)
  34. {
  35. unsigned int count = stats->max_state;
  36. memset(stats->time_in_state, 0, count * sizeof(u64));
  37. memset(stats->trans_table, 0, count * count * sizeof(int));
  38. stats->last_time = local_clock();
  39. stats->total_trans = 0;
  40. /* Adjust for the time elapsed since reset was requested */
  41. WRITE_ONCE(stats->reset_pending, 0);
  42. /*
  43. * Prevent the reset_time read from being reordered before the
  44. * reset_pending accesses in cpufreq_stats_record_transition().
  45. */
  46. smp_rmb();
  47. cpufreq_stats_update(stats, READ_ONCE(stats->reset_time));
  48. }
  49. static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
  50. {
  51. struct cpufreq_stats *stats = policy->stats;
  52. if (READ_ONCE(stats->reset_pending))
  53. return sprintf(buf, "%d\n", 0);
  54. else
  55. return sprintf(buf, "%u\n", stats->total_trans);
  56. }
  57. cpufreq_freq_attr_ro(total_trans);
  58. static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
  59. {
  60. struct cpufreq_stats *stats = policy->stats;
  61. bool pending = READ_ONCE(stats->reset_pending);
  62. unsigned long long time;
  63. ssize_t len = 0;
  64. int i;
  65. for (i = 0; i < stats->state_num; i++) {
  66. if (pending) {
  67. if (i == stats->last_index) {
  68. /*
  69. * Prevent the reset_time read from occurring
  70. * before the reset_pending read above.
  71. */
  72. smp_rmb();
  73. time = local_clock() - READ_ONCE(stats->reset_time);
  74. } else {
  75. time = 0;
  76. }
  77. } else {
  78. time = stats->time_in_state[i];
  79. if (i == stats->last_index)
  80. time += local_clock() - stats->last_time;
  81. }
  82. len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
  83. nsec_to_clock_t(time));
  84. }
  85. return len;
  86. }
  87. cpufreq_freq_attr_ro(time_in_state);
  88. /* We don't care what is written to the attribute */
  89. static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
  90. size_t count)
  91. {
  92. struct cpufreq_stats *stats = policy->stats;
  93. /*
  94. * Defer resetting of stats to cpufreq_stats_record_transition() to
  95. * avoid races.
  96. */
  97. WRITE_ONCE(stats->reset_time, local_clock());
  98. /*
  99. * The memory barrier below is to prevent the readers of reset_time from
  100. * seeing a stale or partially updated value.
  101. */
  102. smp_wmb();
  103. WRITE_ONCE(stats->reset_pending, 1);
  104. return count;
  105. }
  106. cpufreq_freq_attr_wo(reset);
  107. static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
  108. {
  109. struct cpufreq_stats *stats = policy->stats;
  110. bool pending = READ_ONCE(stats->reset_pending);
  111. ssize_t len = 0;
  112. int i, j, count;
  113. len += scnprintf(buf + len, PAGE_SIZE - len, " From : To\n");
  114. len += scnprintf(buf + len, PAGE_SIZE - len, " : ");
  115. for (i = 0; i < stats->state_num; i++) {
  116. if (len >= PAGE_SIZE - 1)
  117. break;
  118. len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ",
  119. stats->freq_table[i]);
  120. }
  121. if (len >= PAGE_SIZE - 1)
  122. return PAGE_SIZE - 1;
  123. len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
  124. for (i = 0; i < stats->state_num; i++) {
  125. if (len >= PAGE_SIZE - 1)
  126. break;
  127. len += scnprintf(buf + len, PAGE_SIZE - len, "%9u: ",
  128. stats->freq_table[i]);
  129. for (j = 0; j < stats->state_num; j++) {
  130. if (len >= PAGE_SIZE - 1)
  131. break;
  132. if (pending)
  133. count = 0;
  134. else
  135. count = stats->trans_table[i * stats->max_state + j];
  136. len += scnprintf(buf + len, PAGE_SIZE - len, "%9u ", count);
  137. }
  138. if (len >= PAGE_SIZE - 1)
  139. break;
  140. len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
  141. }
  142. if (len >= PAGE_SIZE - 1) {
  143. pr_warn_once("cpufreq transition table exceeds PAGE_SIZE. Disabling\n");
  144. return -EFBIG;
  145. }
  146. return len;
  147. }
  148. cpufreq_freq_attr_ro(trans_table);
  149. static struct attribute *default_attrs[] = {
  150. &total_trans.attr,
  151. &time_in_state.attr,
  152. &reset.attr,
  153. &trans_table.attr,
  154. NULL
  155. };
  156. static const struct attribute_group stats_attr_group = {
  157. .attrs = default_attrs,
  158. .name = "stats"
  159. };
  160. static int freq_table_get_index(struct cpufreq_stats *stats, unsigned int freq)
  161. {
  162. int index;
  163. for (index = 0; index < stats->max_state; index++)
  164. if (stats->freq_table[index] == freq)
  165. return index;
  166. return -1;
  167. }
  168. void cpufreq_stats_free_table(struct cpufreq_policy *policy)
  169. {
  170. struct cpufreq_stats *stats = policy->stats;
  171. /* Already freed */
  172. if (!stats)
  173. return;
  174. pr_debug("%s: Free stats table\n", __func__);
  175. sysfs_remove_group(&policy->kobj, &stats_attr_group);
  176. kfree(stats->time_in_state);
  177. kfree(stats);
  178. policy->stats = NULL;
  179. }
  180. void cpufreq_stats_create_table(struct cpufreq_policy *policy)
  181. {
  182. unsigned int i = 0, count;
  183. struct cpufreq_stats *stats;
  184. unsigned int alloc_size;
  185. struct cpufreq_frequency_table *pos;
  186. count = cpufreq_table_count_valid_entries(policy);
  187. if (!count)
  188. return;
  189. /* stats already initialized */
  190. if (policy->stats)
  191. return;
  192. stats = kzalloc(sizeof(*stats), GFP_KERNEL);
  193. if (!stats)
  194. return;
  195. alloc_size = count * sizeof(int) + count * sizeof(u64);
  196. alloc_size += count * count * sizeof(int);
  197. /* Allocate memory for time_in_state/freq_table/trans_table in one go */
  198. stats->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
  199. if (!stats->time_in_state)
  200. goto free_stat;
  201. stats->freq_table = (unsigned int *)(stats->time_in_state + count);
  202. stats->trans_table = stats->freq_table + count;
  203. stats->max_state = count;
  204. /* Find valid-unique entries */
  205. cpufreq_for_each_valid_entry(pos, policy->freq_table)
  206. if (freq_table_get_index(stats, pos->frequency) == -1)
  207. stats->freq_table[i++] = pos->frequency;
  208. stats->state_num = i;
  209. stats->last_time = local_clock();
  210. stats->last_index = freq_table_get_index(stats, policy->cur);
  211. policy->stats = stats;
  212. if (!sysfs_create_group(&policy->kobj, &stats_attr_group))
  213. return;
  214. /* We failed, release resources */
  215. policy->stats = NULL;
  216. kfree(stats->time_in_state);
  217. free_stat:
  218. kfree(stats);
  219. }
  220. void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
  221. unsigned int new_freq)
  222. {
  223. struct cpufreq_stats *stats = policy->stats;
  224. int old_index, new_index;
  225. if (unlikely(!stats))
  226. return;
  227. if (unlikely(READ_ONCE(stats->reset_pending)))
  228. cpufreq_stats_reset_table(stats);
  229. old_index = stats->last_index;
  230. new_index = freq_table_get_index(stats, new_freq);
  231. /* We can't do stats->time_in_state[-1]= .. */
  232. if (unlikely(old_index == -1 || new_index == -1 || old_index == new_index))
  233. return;
  234. cpufreq_stats_update(stats, stats->last_time);
  235. stats->last_index = new_index;
  236. stats->trans_table[old_index * stats->max_state + new_index]++;
  237. stats->total_trans++;
  238. }