dtpm_cpu.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2020 Linaro Limited
  4. *
  5. * Author: Daniel Lezcano <[email protected]>
  6. *
  7. * The DTPM CPU is based on the energy model. It hooks the CPU in the
  8. * DTPM tree which in turns update the power number by propagating the
  9. * power number from the CPU energy model information to the parents.
  10. *
  11. * The association between the power and the performance state, allows
  12. * to set the power of the CPU at the OPP granularity.
  13. *
  14. * The CPU hotplug is supported and the power numbers will be updated
  15. * if a CPU is hot plugged / unplugged.
  16. */
  17. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18. #include <linux/cpumask.h>
  19. #include <linux/cpufreq.h>
  20. #include <linux/cpuhotplug.h>
  21. #include <linux/dtpm.h>
  22. #include <linux/energy_model.h>
  23. #include <linux/of.h>
  24. #include <linux/pm_qos.h>
  25. #include <linux/slab.h>
  26. struct dtpm_cpu {
  27. struct dtpm dtpm;
  28. struct freq_qos_request qos_req;
  29. int cpu;
  30. };
  31. static DEFINE_PER_CPU(struct dtpm_cpu *, dtpm_per_cpu);
  32. static struct dtpm_cpu *to_dtpm_cpu(struct dtpm *dtpm)
  33. {
  34. return container_of(dtpm, struct dtpm_cpu, dtpm);
  35. }
  36. static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
  37. {
  38. struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm);
  39. struct em_perf_domain *pd = em_cpu_get(dtpm_cpu->cpu);
  40. struct cpumask cpus;
  41. unsigned long freq;
  42. u64 power;
  43. int i, nr_cpus;
  44. cpumask_and(&cpus, cpu_online_mask, to_cpumask(pd->cpus));
  45. nr_cpus = cpumask_weight(&cpus);
  46. for (i = 0; i < pd->nr_perf_states; i++) {
  47. power = pd->table[i].power * nr_cpus;
  48. if (power > power_limit)
  49. break;
  50. }
  51. freq = pd->table[i - 1].frequency;
  52. freq_qos_update_request(&dtpm_cpu->qos_req, freq);
  53. power_limit = pd->table[i - 1].power * nr_cpus;
  54. return power_limit;
  55. }
  56. static u64 scale_pd_power_uw(struct cpumask *pd_mask, u64 power)
  57. {
  58. unsigned long max, sum_util = 0;
  59. int cpu;
  60. /*
  61. * The capacity is the same for all CPUs belonging to
  62. * the same perf domain.
  63. */
  64. max = arch_scale_cpu_capacity(cpumask_first(pd_mask));
  65. for_each_cpu_and(cpu, pd_mask, cpu_online_mask)
  66. sum_util += sched_cpu_util(cpu);
  67. return (power * ((sum_util << 10) / max)) >> 10;
  68. }
  69. static u64 get_pd_power_uw(struct dtpm *dtpm)
  70. {
  71. struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm);
  72. struct em_perf_domain *pd;
  73. struct cpumask *pd_mask;
  74. unsigned long freq;
  75. int i;
  76. pd = em_cpu_get(dtpm_cpu->cpu);
  77. pd_mask = em_span_cpus(pd);
  78. freq = cpufreq_quick_get(dtpm_cpu->cpu);
  79. for (i = 0; i < pd->nr_perf_states; i++) {
  80. if (pd->table[i].frequency < freq)
  81. continue;
  82. return scale_pd_power_uw(pd_mask, pd->table[i].power);
  83. }
  84. return 0;
  85. }
  86. static int update_pd_power_uw(struct dtpm *dtpm)
  87. {
  88. struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm);
  89. struct em_perf_domain *em = em_cpu_get(dtpm_cpu->cpu);
  90. struct cpumask cpus;
  91. int nr_cpus;
  92. cpumask_and(&cpus, cpu_online_mask, to_cpumask(em->cpus));
  93. nr_cpus = cpumask_weight(&cpus);
  94. dtpm->power_min = em->table[0].power;
  95. dtpm->power_min *= nr_cpus;
  96. dtpm->power_max = em->table[em->nr_perf_states - 1].power;
  97. dtpm->power_max *= nr_cpus;
  98. return 0;
  99. }
  100. static void pd_release(struct dtpm *dtpm)
  101. {
  102. struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm);
  103. struct cpufreq_policy *policy;
  104. if (freq_qos_request_active(&dtpm_cpu->qos_req))
  105. freq_qos_remove_request(&dtpm_cpu->qos_req);
  106. policy = cpufreq_cpu_get(dtpm_cpu->cpu);
  107. if (policy) {
  108. for_each_cpu(dtpm_cpu->cpu, policy->related_cpus)
  109. per_cpu(dtpm_per_cpu, dtpm_cpu->cpu) = NULL;
  110. cpufreq_cpu_put(policy);
  111. }
  112. kfree(dtpm_cpu);
  113. }
  114. static struct dtpm_ops dtpm_ops = {
  115. .set_power_uw = set_pd_power_limit,
  116. .get_power_uw = get_pd_power_uw,
  117. .update_power_uw = update_pd_power_uw,
  118. .release = pd_release,
  119. };
  120. static int cpuhp_dtpm_cpu_offline(unsigned int cpu)
  121. {
  122. struct dtpm_cpu *dtpm_cpu;
  123. dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
  124. if (dtpm_cpu)
  125. dtpm_update_power(&dtpm_cpu->dtpm);
  126. return 0;
  127. }
  128. static int cpuhp_dtpm_cpu_online(unsigned int cpu)
  129. {
  130. struct dtpm_cpu *dtpm_cpu;
  131. dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
  132. if (dtpm_cpu)
  133. return dtpm_update_power(&dtpm_cpu->dtpm);
  134. return 0;
  135. }
  136. static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
  137. {
  138. struct dtpm_cpu *dtpm_cpu;
  139. struct cpufreq_policy *policy;
  140. struct em_perf_domain *pd;
  141. char name[CPUFREQ_NAME_LEN];
  142. int ret = -ENOMEM;
  143. dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
  144. if (dtpm_cpu)
  145. return 0;
  146. policy = cpufreq_cpu_get(cpu);
  147. if (!policy)
  148. return 0;
  149. pd = em_cpu_get(cpu);
  150. if (!pd || em_is_artificial(pd)) {
  151. ret = -EINVAL;
  152. goto release_policy;
  153. }
  154. dtpm_cpu = kzalloc(sizeof(*dtpm_cpu), GFP_KERNEL);
  155. if (!dtpm_cpu) {
  156. ret = -ENOMEM;
  157. goto release_policy;
  158. }
  159. dtpm_init(&dtpm_cpu->dtpm, &dtpm_ops);
  160. dtpm_cpu->cpu = cpu;
  161. for_each_cpu(cpu, policy->related_cpus)
  162. per_cpu(dtpm_per_cpu, cpu) = dtpm_cpu;
  163. snprintf(name, sizeof(name), "cpu%d-cpufreq", dtpm_cpu->cpu);
  164. ret = dtpm_register(name, &dtpm_cpu->dtpm, parent);
  165. if (ret)
  166. goto out_kfree_dtpm_cpu;
  167. ret = freq_qos_add_request(&policy->constraints,
  168. &dtpm_cpu->qos_req, FREQ_QOS_MAX,
  169. pd->table[pd->nr_perf_states - 1].frequency);
  170. if (ret)
  171. goto out_dtpm_unregister;
  172. cpufreq_cpu_put(policy);
  173. return 0;
  174. out_dtpm_unregister:
  175. dtpm_unregister(&dtpm_cpu->dtpm);
  176. dtpm_cpu = NULL;
  177. out_kfree_dtpm_cpu:
  178. for_each_cpu(cpu, policy->related_cpus)
  179. per_cpu(dtpm_per_cpu, cpu) = NULL;
  180. kfree(dtpm_cpu);
  181. release_policy:
  182. cpufreq_cpu_put(policy);
  183. return ret;
  184. }
  185. static int dtpm_cpu_setup(struct dtpm *dtpm, struct device_node *np)
  186. {
  187. int cpu;
  188. cpu = of_cpu_node_to_id(np);
  189. if (cpu < 0)
  190. return 0;
  191. return __dtpm_cpu_setup(cpu, dtpm);
  192. }
  193. static int dtpm_cpu_init(void)
  194. {
  195. int ret;
  196. /*
  197. * The callbacks at CPU hotplug time are calling
  198. * dtpm_update_power() which in turns calls update_pd_power().
  199. *
  200. * The function update_pd_power() uses the online mask to
  201. * figure out the power consumption limits.
  202. *
  203. * At CPUHP_AP_ONLINE_DYN, the CPU is present in the CPU
  204. * online mask when the cpuhp_dtpm_cpu_online function is
  205. * called, but the CPU is still in the online mask for the
  206. * tear down callback. So the power can not be updated when
  207. * the CPU is unplugged.
  208. *
  209. * At CPUHP_AP_DTPM_CPU_DEAD, the situation is the opposite as
  210. * above. The CPU online mask is not up to date when the CPU
  211. * is plugged in.
  212. *
  213. * For this reason, we need to call the online and offline
  214. * callbacks at different moments when the CPU online mask is
  215. * consistent with the power numbers we want to update.
  216. */
  217. ret = cpuhp_setup_state(CPUHP_AP_DTPM_CPU_DEAD, "dtpm_cpu:offline",
  218. NULL, cpuhp_dtpm_cpu_offline);
  219. if (ret < 0)
  220. return ret;
  221. ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "dtpm_cpu:online",
  222. cpuhp_dtpm_cpu_online, NULL);
  223. if (ret < 0)
  224. return ret;
  225. return 0;
  226. }
  227. static void dtpm_cpu_exit(void)
  228. {
  229. cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
  230. cpuhp_remove_state_nocalls(CPUHP_AP_DTPM_CPU_DEAD);
  231. }
  232. struct dtpm_subsys_ops dtpm_cpu_ops = {
  233. .name = KBUILD_MODNAME,
  234. .init = dtpm_cpu_init,
  235. .exit = dtpm_cpu_exit,
  236. .setup = dtpm_cpu_setup,
  237. };