governor_passive.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * linux/drivers/devfreq/governor_passive.c
  4. *
  5. * Copyright (C) 2016 Samsung Electronics
  6. * Author: Chanwoo Choi <[email protected]>
  7. * Author: MyungJoo Ham <[email protected]>
  8. */
  9. #include <linux/module.h>
  10. #include <linux/cpu.h>
  11. #include <linux/cpufreq.h>
  12. #include <linux/cpumask.h>
  13. #include <linux/slab.h>
  14. #include <linux/device.h>
  15. #include <linux/devfreq.h>
  16. #include <linux/units.h>
  17. #include "governor.h"
  18. static struct devfreq_cpu_data *
  19. get_parent_cpu_data(struct devfreq_passive_data *p_data,
  20. struct cpufreq_policy *policy)
  21. {
  22. struct devfreq_cpu_data *parent_cpu_data;
  23. if (!p_data || !policy)
  24. return NULL;
  25. list_for_each_entry(parent_cpu_data, &p_data->cpu_data_list, node)
  26. if (parent_cpu_data->first_cpu == cpumask_first(policy->related_cpus))
  27. return parent_cpu_data;
  28. return NULL;
  29. }
  30. static void delete_parent_cpu_data(struct devfreq_passive_data *p_data)
  31. {
  32. struct devfreq_cpu_data *parent_cpu_data, *tmp;
  33. list_for_each_entry_safe(parent_cpu_data, tmp, &p_data->cpu_data_list, node) {
  34. list_del(&parent_cpu_data->node);
  35. if (parent_cpu_data->opp_table)
  36. dev_pm_opp_put_opp_table(parent_cpu_data->opp_table);
  37. kfree(parent_cpu_data);
  38. }
  39. }
  40. static unsigned long get_target_freq_by_required_opp(struct device *p_dev,
  41. struct opp_table *p_opp_table,
  42. struct opp_table *opp_table,
  43. unsigned long *freq)
  44. {
  45. struct dev_pm_opp *opp = NULL, *p_opp = NULL;
  46. unsigned long target_freq;
  47. if (!p_dev || !p_opp_table || !opp_table || !freq)
  48. return 0;
  49. p_opp = devfreq_recommended_opp(p_dev, freq, 0);
  50. if (IS_ERR(p_opp))
  51. return 0;
  52. opp = dev_pm_opp_xlate_required_opp(p_opp_table, opp_table, p_opp);
  53. dev_pm_opp_put(p_opp);
  54. if (IS_ERR(opp))
  55. return 0;
  56. target_freq = dev_pm_opp_get_freq(opp);
  57. dev_pm_opp_put(opp);
  58. return target_freq;
  59. }
  60. static int get_target_freq_with_cpufreq(struct devfreq *devfreq,
  61. unsigned long *target_freq)
  62. {
  63. struct devfreq_passive_data *p_data =
  64. (struct devfreq_passive_data *)devfreq->data;
  65. struct devfreq_cpu_data *parent_cpu_data;
  66. struct cpufreq_policy *policy;
  67. unsigned long cpu, cpu_cur, cpu_min, cpu_max, cpu_percent;
  68. unsigned long dev_min, dev_max;
  69. unsigned long freq = 0;
  70. int ret = 0;
  71. for_each_online_cpu(cpu) {
  72. policy = cpufreq_cpu_get(cpu);
  73. if (!policy) {
  74. ret = -EINVAL;
  75. continue;
  76. }
  77. parent_cpu_data = get_parent_cpu_data(p_data, policy);
  78. if (!parent_cpu_data) {
  79. cpufreq_cpu_put(policy);
  80. continue;
  81. }
  82. /* Get target freq via required opps */
  83. cpu_cur = parent_cpu_data->cur_freq * HZ_PER_KHZ;
  84. freq = get_target_freq_by_required_opp(parent_cpu_data->dev,
  85. parent_cpu_data->opp_table,
  86. devfreq->opp_table, &cpu_cur);
  87. if (freq) {
  88. *target_freq = max(freq, *target_freq);
  89. cpufreq_cpu_put(policy);
  90. continue;
  91. }
  92. /* Use interpolation if required opps is not available */
  93. devfreq_get_freq_range(devfreq, &dev_min, &dev_max);
  94. cpu_min = parent_cpu_data->min_freq;
  95. cpu_max = parent_cpu_data->max_freq;
  96. cpu_cur = parent_cpu_data->cur_freq;
  97. cpu_percent = ((cpu_cur - cpu_min) * 100) / (cpu_max - cpu_min);
  98. freq = dev_min + mult_frac(dev_max - dev_min, cpu_percent, 100);
  99. *target_freq = max(freq, *target_freq);
  100. cpufreq_cpu_put(policy);
  101. }
  102. return ret;
  103. }
  104. static int get_target_freq_with_devfreq(struct devfreq *devfreq,
  105. unsigned long *freq)
  106. {
  107. struct devfreq_passive_data *p_data
  108. = (struct devfreq_passive_data *)devfreq->data;
  109. struct devfreq *parent_devfreq = (struct devfreq *)p_data->parent;
  110. unsigned long child_freq = ULONG_MAX;
  111. int i, count;
  112. /* Get target freq via required opps */
  113. child_freq = get_target_freq_by_required_opp(parent_devfreq->dev.parent,
  114. parent_devfreq->opp_table,
  115. devfreq->opp_table, freq);
  116. if (child_freq)
  117. goto out;
  118. /* Use interpolation if required opps is not available */
  119. for (i = 0; i < parent_devfreq->max_state; i++)
  120. if (parent_devfreq->freq_table[i] == *freq)
  121. break;
  122. if (i == parent_devfreq->max_state)
  123. return -EINVAL;
  124. if (i < devfreq->max_state) {
  125. child_freq = devfreq->freq_table[i];
  126. } else {
  127. count = devfreq->max_state;
  128. child_freq = devfreq->freq_table[count - 1];
  129. }
  130. out:
  131. *freq = child_freq;
  132. return 0;
  133. }
  134. static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
  135. unsigned long *freq)
  136. {
  137. struct devfreq_passive_data *p_data =
  138. (struct devfreq_passive_data *)devfreq->data;
  139. int ret;
  140. if (!p_data)
  141. return -EINVAL;
  142. /*
  143. * If the devfreq device with passive governor has the specific method
  144. * to determine the next frequency, should use the get_target_freq()
  145. * of struct devfreq_passive_data.
  146. */
  147. if (p_data->get_target_freq)
  148. return p_data->get_target_freq(devfreq, freq);
  149. switch (p_data->parent_type) {
  150. case DEVFREQ_PARENT_DEV:
  151. ret = get_target_freq_with_devfreq(devfreq, freq);
  152. break;
  153. case CPUFREQ_PARENT_DEV:
  154. ret = get_target_freq_with_cpufreq(devfreq, freq);
  155. break;
  156. default:
  157. ret = -EINVAL;
  158. dev_err(&devfreq->dev, "Invalid parent type\n");
  159. break;
  160. }
  161. return ret;
  162. }
  163. static int cpufreq_passive_notifier_call(struct notifier_block *nb,
  164. unsigned long event, void *ptr)
  165. {
  166. struct devfreq_passive_data *p_data =
  167. container_of(nb, struct devfreq_passive_data, nb);
  168. struct devfreq *devfreq = (struct devfreq *)p_data->this;
  169. struct devfreq_cpu_data *parent_cpu_data;
  170. struct cpufreq_freqs *freqs = ptr;
  171. unsigned int cur_freq;
  172. int ret;
  173. if (event != CPUFREQ_POSTCHANGE || !freqs)
  174. return 0;
  175. parent_cpu_data = get_parent_cpu_data(p_data, freqs->policy);
  176. if (!parent_cpu_data || parent_cpu_data->cur_freq == freqs->new)
  177. return 0;
  178. cur_freq = parent_cpu_data->cur_freq;
  179. parent_cpu_data->cur_freq = freqs->new;
  180. mutex_lock(&devfreq->lock);
  181. ret = devfreq_update_target(devfreq, freqs->new);
  182. mutex_unlock(&devfreq->lock);
  183. if (ret) {
  184. parent_cpu_data->cur_freq = cur_freq;
  185. dev_err(&devfreq->dev, "failed to update the frequency.\n");
  186. return ret;
  187. }
  188. return 0;
  189. }
  190. static int cpufreq_passive_unregister_notifier(struct devfreq *devfreq)
  191. {
  192. struct devfreq_passive_data *p_data
  193. = (struct devfreq_passive_data *)devfreq->data;
  194. int ret;
  195. if (p_data->nb.notifier_call) {
  196. ret = cpufreq_unregister_notifier(&p_data->nb,
  197. CPUFREQ_TRANSITION_NOTIFIER);
  198. if (ret < 0)
  199. return ret;
  200. }
  201. delete_parent_cpu_data(p_data);
  202. return 0;
  203. }
  204. static int cpufreq_passive_register_notifier(struct devfreq *devfreq)
  205. {
  206. struct devfreq_passive_data *p_data
  207. = (struct devfreq_passive_data *)devfreq->data;
  208. struct device *dev = devfreq->dev.parent;
  209. struct opp_table *opp_table = NULL;
  210. struct devfreq_cpu_data *parent_cpu_data;
  211. struct cpufreq_policy *policy;
  212. struct device *cpu_dev;
  213. unsigned int cpu;
  214. int ret;
  215. p_data->cpu_data_list
  216. = (struct list_head)LIST_HEAD_INIT(p_data->cpu_data_list);
  217. p_data->nb.notifier_call = cpufreq_passive_notifier_call;
  218. ret = cpufreq_register_notifier(&p_data->nb, CPUFREQ_TRANSITION_NOTIFIER);
  219. if (ret) {
  220. dev_err(dev, "failed to register cpufreq notifier\n");
  221. p_data->nb.notifier_call = NULL;
  222. goto err;
  223. }
  224. for_each_possible_cpu(cpu) {
  225. policy = cpufreq_cpu_get(cpu);
  226. if (!policy) {
  227. ret = -EPROBE_DEFER;
  228. goto err;
  229. }
  230. parent_cpu_data = get_parent_cpu_data(p_data, policy);
  231. if (parent_cpu_data) {
  232. cpufreq_cpu_put(policy);
  233. continue;
  234. }
  235. parent_cpu_data = kzalloc(sizeof(*parent_cpu_data),
  236. GFP_KERNEL);
  237. if (!parent_cpu_data) {
  238. ret = -ENOMEM;
  239. goto err_put_policy;
  240. }
  241. cpu_dev = get_cpu_device(cpu);
  242. if (!cpu_dev) {
  243. dev_err(dev, "failed to get cpu device\n");
  244. ret = -ENODEV;
  245. goto err_free_cpu_data;
  246. }
  247. opp_table = dev_pm_opp_get_opp_table(cpu_dev);
  248. if (IS_ERR(opp_table)) {
  249. dev_err(dev, "failed to get opp_table of cpu%d\n", cpu);
  250. ret = PTR_ERR(opp_table);
  251. goto err_free_cpu_data;
  252. }
  253. parent_cpu_data->dev = cpu_dev;
  254. parent_cpu_data->opp_table = opp_table;
  255. parent_cpu_data->first_cpu = cpumask_first(policy->related_cpus);
  256. parent_cpu_data->cur_freq = policy->cur;
  257. parent_cpu_data->min_freq = policy->cpuinfo.min_freq;
  258. parent_cpu_data->max_freq = policy->cpuinfo.max_freq;
  259. list_add_tail(&parent_cpu_data->node, &p_data->cpu_data_list);
  260. cpufreq_cpu_put(policy);
  261. }
  262. mutex_lock(&devfreq->lock);
  263. ret = devfreq_update_target(devfreq, 0L);
  264. mutex_unlock(&devfreq->lock);
  265. if (ret)
  266. dev_err(dev, "failed to update the frequency\n");
  267. return ret;
  268. err_free_cpu_data:
  269. kfree(parent_cpu_data);
  270. err_put_policy:
  271. cpufreq_cpu_put(policy);
  272. err:
  273. return ret;
  274. }
  275. static int devfreq_passive_notifier_call(struct notifier_block *nb,
  276. unsigned long event, void *ptr)
  277. {
  278. struct devfreq_passive_data *data
  279. = container_of(nb, struct devfreq_passive_data, nb);
  280. struct devfreq *devfreq = (struct devfreq *)data->this;
  281. struct devfreq *parent = (struct devfreq *)data->parent;
  282. struct devfreq_freqs *freqs = (struct devfreq_freqs *)ptr;
  283. unsigned long freq = freqs->new;
  284. int ret = 0;
  285. mutex_lock_nested(&devfreq->lock, SINGLE_DEPTH_NESTING);
  286. switch (event) {
  287. case DEVFREQ_PRECHANGE:
  288. if (parent->previous_freq > freq)
  289. ret = devfreq_update_target(devfreq, freq);
  290. break;
  291. case DEVFREQ_POSTCHANGE:
  292. if (parent->previous_freq < freq)
  293. ret = devfreq_update_target(devfreq, freq);
  294. break;
  295. }
  296. mutex_unlock(&devfreq->lock);
  297. if (ret < 0)
  298. dev_warn(&devfreq->dev,
  299. "failed to update devfreq using passive governor\n");
  300. return NOTIFY_DONE;
  301. }
  302. static int devfreq_passive_unregister_notifier(struct devfreq *devfreq)
  303. {
  304. struct devfreq_passive_data *p_data
  305. = (struct devfreq_passive_data *)devfreq->data;
  306. struct devfreq *parent = (struct devfreq *)p_data->parent;
  307. struct notifier_block *nb = &p_data->nb;
  308. return devfreq_unregister_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER);
  309. }
  310. static int devfreq_passive_register_notifier(struct devfreq *devfreq)
  311. {
  312. struct devfreq_passive_data *p_data
  313. = (struct devfreq_passive_data *)devfreq->data;
  314. struct devfreq *parent = (struct devfreq *)p_data->parent;
  315. struct notifier_block *nb = &p_data->nb;
  316. if (!parent)
  317. return -EPROBE_DEFER;
  318. nb->notifier_call = devfreq_passive_notifier_call;
  319. return devfreq_register_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER);
  320. }
  321. static int devfreq_passive_event_handler(struct devfreq *devfreq,
  322. unsigned int event, void *data)
  323. {
  324. struct devfreq_passive_data *p_data
  325. = (struct devfreq_passive_data *)devfreq->data;
  326. int ret = 0;
  327. if (!p_data)
  328. return -EINVAL;
  329. p_data->this = devfreq;
  330. switch (event) {
  331. case DEVFREQ_GOV_START:
  332. if (p_data->parent_type == DEVFREQ_PARENT_DEV)
  333. ret = devfreq_passive_register_notifier(devfreq);
  334. else if (p_data->parent_type == CPUFREQ_PARENT_DEV)
  335. ret = cpufreq_passive_register_notifier(devfreq);
  336. break;
  337. case DEVFREQ_GOV_STOP:
  338. if (p_data->parent_type == DEVFREQ_PARENT_DEV)
  339. WARN_ON(devfreq_passive_unregister_notifier(devfreq));
  340. else if (p_data->parent_type == CPUFREQ_PARENT_DEV)
  341. WARN_ON(cpufreq_passive_unregister_notifier(devfreq));
  342. break;
  343. default:
  344. break;
  345. }
  346. return ret;
  347. }
  348. static struct devfreq_governor devfreq_passive = {
  349. .name = DEVFREQ_GOV_PASSIVE,
  350. .flags = DEVFREQ_GOV_FLAG_IMMUTABLE,
  351. .get_target_freq = devfreq_passive_get_target_freq,
  352. .event_handler = devfreq_passive_event_handler,
  353. };
  354. static int __init devfreq_passive_init(void)
  355. {
  356. return devfreq_add_governor(&devfreq_passive);
  357. }
  358. subsys_initcall(devfreq_passive_init);
  359. static void __exit devfreq_passive_exit(void)
  360. {
  361. int ret;
  362. ret = devfreq_remove_governor(&devfreq_passive);
  363. if (ret)
  364. pr_err("%s: failed remove governor %d\n", __func__, ret);
  365. }
  366. module_exit(devfreq_passive_exit);
  367. MODULE_AUTHOR("Chanwoo Choi <[email protected]>");
  368. MODULE_AUTHOR("MyungJoo Ham <[email protected]>");
  369. MODULE_DESCRIPTION("DEVFREQ Passive governor");
  370. MODULE_LICENSE("GPL v2");