processor_thermal.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * processor_thermal.c - Passive cooling submodule of the ACPI processor driver
  4. *
  5. * Copyright (C) 2001, 2002 Andy Grover <[email protected]>
  6. * Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]>
  7. * Copyright (C) 2004 Dominik Brodowski <[email protected]>
  8. * Copyright (C) 2004 Anil S Keshavamurthy <[email protected]>
  9. * - Added processor hotplug support
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/cpufreq.h>
  15. #include <linux/acpi.h>
  16. #include <acpi/processor.h>
  17. #include <linux/uaccess.h>
  18. #ifdef CONFIG_CPU_FREQ
  19. /* If a passive cooling situation is detected, primarily CPUfreq is used, as it
  20. * offers (in most cases) voltage scaling in addition to frequency scaling, and
  21. * thus a cubic (instead of linear) reduction of energy. Also, we allow for
  22. * _any_ cpufreq driver and not only the acpi-cpufreq driver.
  23. */
  24. #define CPUFREQ_THERMAL_MIN_STEP 0
  25. #define CPUFREQ_THERMAL_MAX_STEP 3
  26. static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
  27. #define reduction_pctg(cpu) \
  28. per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
  29. /*
  30. * Emulate "per package data" using per cpu data (which should really be
  31. * provided elsewhere)
  32. *
  33. * Note we can lose a CPU on cpu hotunplug, in this case we forget the state
  34. * temporarily. Fortunately that's not a big issue here (I hope)
  35. */
  36. static int phys_package_first_cpu(int cpu)
  37. {
  38. int i;
  39. int id = topology_physical_package_id(cpu);
  40. for_each_online_cpu(i)
  41. if (topology_physical_package_id(i) == id)
  42. return i;
  43. return 0;
  44. }
  45. static int cpu_has_cpufreq(unsigned int cpu)
  46. {
  47. struct cpufreq_policy *policy;
  48. if (!acpi_processor_cpufreq_init)
  49. return 0;
  50. policy = cpufreq_cpu_get(cpu);
  51. if (policy) {
  52. cpufreq_cpu_put(policy);
  53. return 1;
  54. }
  55. return 0;
  56. }
  57. static int cpufreq_get_max_state(unsigned int cpu)
  58. {
  59. if (!cpu_has_cpufreq(cpu))
  60. return 0;
  61. return CPUFREQ_THERMAL_MAX_STEP;
  62. }
  63. static int cpufreq_get_cur_state(unsigned int cpu)
  64. {
  65. if (!cpu_has_cpufreq(cpu))
  66. return 0;
  67. return reduction_pctg(cpu);
  68. }
  69. static int cpufreq_set_cur_state(unsigned int cpu, int state)
  70. {
  71. struct cpufreq_policy *policy;
  72. struct acpi_processor *pr;
  73. unsigned long max_freq;
  74. int i, ret;
  75. if (!cpu_has_cpufreq(cpu))
  76. return 0;
  77. reduction_pctg(cpu) = state;
  78. /*
  79. * Update all the CPUs in the same package because they all
  80. * contribute to the temperature and often share the same
  81. * frequency.
  82. */
  83. for_each_online_cpu(i) {
  84. if (topology_physical_package_id(i) !=
  85. topology_physical_package_id(cpu))
  86. continue;
  87. pr = per_cpu(processors, i);
  88. if (unlikely(!freq_qos_request_active(&pr->thermal_req)))
  89. continue;
  90. policy = cpufreq_cpu_get(i);
  91. if (!policy)
  92. return -EINVAL;
  93. max_freq = (policy->cpuinfo.max_freq * (100 - reduction_pctg(i) * 20)) / 100;
  94. cpufreq_cpu_put(policy);
  95. ret = freq_qos_update_request(&pr->thermal_req, max_freq);
  96. if (ret < 0) {
  97. pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
  98. pr->id, ret);
  99. }
  100. }
  101. return 0;
  102. }
  103. void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
  104. {
  105. unsigned int cpu;
  106. for_each_cpu(cpu, policy->related_cpus) {
  107. struct acpi_processor *pr = per_cpu(processors, cpu);
  108. int ret;
  109. if (!pr)
  110. continue;
  111. ret = freq_qos_add_request(&policy->constraints,
  112. &pr->thermal_req,
  113. FREQ_QOS_MAX, INT_MAX);
  114. if (ret < 0)
  115. pr_err("Failed to add freq constraint for CPU%d (%d)\n",
  116. cpu, ret);
  117. }
  118. }
  119. void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
  120. {
  121. unsigned int cpu;
  122. for_each_cpu(cpu, policy->related_cpus) {
  123. struct acpi_processor *pr = per_cpu(processors, cpu);
  124. if (pr)
  125. freq_qos_remove_request(&pr->thermal_req);
  126. }
  127. }
  128. #else /* ! CONFIG_CPU_FREQ */
  129. static int cpufreq_get_max_state(unsigned int cpu)
  130. {
  131. return 0;
  132. }
  133. static int cpufreq_get_cur_state(unsigned int cpu)
  134. {
  135. return 0;
  136. }
  137. static int cpufreq_set_cur_state(unsigned int cpu, int state)
  138. {
  139. return 0;
  140. }
  141. #endif
  142. /* thermal cooling device callbacks */
  143. static int acpi_processor_max_state(struct acpi_processor *pr)
  144. {
  145. int max_state = 0;
  146. /*
  147. * There exists four states according to
  148. * cpufreq_thermal_reduction_pctg. 0, 1, 2, 3
  149. */
  150. max_state += cpufreq_get_max_state(pr->id);
  151. if (pr->flags.throttling)
  152. max_state += (pr->throttling.state_count -1);
  153. return max_state;
  154. }
  155. static int
  156. processor_get_max_state(struct thermal_cooling_device *cdev,
  157. unsigned long *state)
  158. {
  159. struct acpi_device *device = cdev->devdata;
  160. struct acpi_processor *pr;
  161. if (!device)
  162. return -EINVAL;
  163. pr = acpi_driver_data(device);
  164. if (!pr)
  165. return -EINVAL;
  166. *state = acpi_processor_max_state(pr);
  167. return 0;
  168. }
  169. static int
  170. processor_get_cur_state(struct thermal_cooling_device *cdev,
  171. unsigned long *cur_state)
  172. {
  173. struct acpi_device *device = cdev->devdata;
  174. struct acpi_processor *pr;
  175. if (!device)
  176. return -EINVAL;
  177. pr = acpi_driver_data(device);
  178. if (!pr)
  179. return -EINVAL;
  180. *cur_state = cpufreq_get_cur_state(pr->id);
  181. if (pr->flags.throttling)
  182. *cur_state += pr->throttling.state;
  183. return 0;
  184. }
  185. static int
  186. processor_set_cur_state(struct thermal_cooling_device *cdev,
  187. unsigned long state)
  188. {
  189. struct acpi_device *device = cdev->devdata;
  190. struct acpi_processor *pr;
  191. int result = 0;
  192. int max_pstate;
  193. if (!device)
  194. return -EINVAL;
  195. pr = acpi_driver_data(device);
  196. if (!pr)
  197. return -EINVAL;
  198. max_pstate = cpufreq_get_max_state(pr->id);
  199. if (state > acpi_processor_max_state(pr))
  200. return -EINVAL;
  201. if (state <= max_pstate) {
  202. if (pr->flags.throttling && pr->throttling.state)
  203. result = acpi_processor_set_throttling(pr, 0, false);
  204. cpufreq_set_cur_state(pr->id, state);
  205. } else {
  206. cpufreq_set_cur_state(pr->id, max_pstate);
  207. result = acpi_processor_set_throttling(pr,
  208. state - max_pstate, false);
  209. }
  210. return result;
  211. }
  212. const struct thermal_cooling_device_ops processor_cooling_ops = {
  213. .get_max_state = processor_get_max_state,
  214. .get_cur_state = processor_get_cur_state,
  215. .set_cur_state = processor_set_cur_state,
  216. };
  217. int acpi_processor_thermal_init(struct acpi_processor *pr,
  218. struct acpi_device *device)
  219. {
  220. int result = 0;
  221. pr->cdev = thermal_cooling_device_register("Processor", device,
  222. &processor_cooling_ops);
  223. if (IS_ERR(pr->cdev)) {
  224. result = PTR_ERR(pr->cdev);
  225. return result;
  226. }
  227. dev_dbg(&device->dev, "registered as cooling_device%d\n",
  228. pr->cdev->id);
  229. result = sysfs_create_link(&device->dev.kobj,
  230. &pr->cdev->device.kobj,
  231. "thermal_cooling");
  232. if (result) {
  233. dev_err(&device->dev,
  234. "Failed to create sysfs link 'thermal_cooling'\n");
  235. goto err_thermal_unregister;
  236. }
  237. result = sysfs_create_link(&pr->cdev->device.kobj,
  238. &device->dev.kobj,
  239. "device");
  240. if (result) {
  241. dev_err(&pr->cdev->device,
  242. "Failed to create sysfs link 'device'\n");
  243. goto err_remove_sysfs_thermal;
  244. }
  245. return 0;
  246. err_remove_sysfs_thermal:
  247. sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
  248. err_thermal_unregister:
  249. thermal_cooling_device_unregister(pr->cdev);
  250. return result;
  251. }
  252. void acpi_processor_thermal_exit(struct acpi_processor *pr,
  253. struct acpi_device *device)
  254. {
  255. if (pr->cdev) {
  256. sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
  257. sysfs_remove_link(&pr->cdev->device.kobj, "device");
  258. thermal_cooling_device_unregister(pr->cdev);
  259. pr->cdev = NULL;
  260. }
  261. }