cpu.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Generic OPP helper interface for CPU device
  4. *
  5. * Copyright (C) 2009-2014 Texas Instruments Incorporated.
  6. * Nishanth Menon
  7. * Romit Dasgupta
  8. * Kevin Hilman
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/cpu.h>
  12. #include <linux/cpufreq.h>
  13. #include <linux/err.h>
  14. #include <linux/errno.h>
  15. #include <linux/export.h>
  16. #include <linux/slab.h>
  17. #include "opp.h"
  18. #ifdef CONFIG_CPU_FREQ
  19. /**
  20. * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
  21. * @dev: device for which we do this operation
  22. * @table: Cpufreq table returned back to caller
  23. *
  24. * Generate a cpufreq table for a provided device- this assumes that the
  25. * opp table is already initialized and ready for usage.
  26. *
  27. * This function allocates required memory for the cpufreq table. It is
  28. * expected that the caller does the required maintenance such as freeing
  29. * the table as required.
  30. *
  31. * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
  32. * if no memory available for the operation (table is not populated), returns 0
  33. * if successful and table is populated.
  34. *
  35. * WARNING: It is important for the callers to ensure refreshing their copy of
  36. * the table if any of the mentioned functions have been invoked in the interim.
  37. */
  38. int dev_pm_opp_init_cpufreq_table(struct device *dev,
  39. struct cpufreq_frequency_table **opp_table)
  40. {
  41. struct dev_pm_opp *opp;
  42. struct cpufreq_frequency_table *freq_table = NULL;
  43. int i, max_opps, ret = 0;
  44. unsigned long rate;
  45. max_opps = dev_pm_opp_get_opp_count(dev);
  46. if (max_opps <= 0)
  47. return max_opps ? max_opps : -ENODATA;
  48. freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_KERNEL);
  49. if (!freq_table)
  50. return -ENOMEM;
  51. for (i = 0, rate = 0; i < max_opps; i++, rate++) {
  52. /* find next rate */
  53. opp = dev_pm_opp_find_freq_ceil(dev, &rate);
  54. if (IS_ERR(opp)) {
  55. ret = PTR_ERR(opp);
  56. goto out;
  57. }
  58. freq_table[i].driver_data = i;
  59. freq_table[i].frequency = rate / 1000;
  60. /* Is Boost/turbo opp ? */
  61. if (dev_pm_opp_is_turbo(opp))
  62. freq_table[i].flags = CPUFREQ_BOOST_FREQ;
  63. dev_pm_opp_put(opp);
  64. }
  65. freq_table[i].driver_data = i;
  66. freq_table[i].frequency = CPUFREQ_TABLE_END;
  67. *opp_table = &freq_table[0];
  68. out:
  69. if (ret)
  70. kfree(freq_table);
  71. return ret;
  72. }
  73. EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
  74. /**
  75. * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
  76. * @dev: device for which we do this operation
  77. * @table: table to free
  78. *
  79. * Free up the table allocated by dev_pm_opp_init_cpufreq_table
  80. */
  81. void dev_pm_opp_free_cpufreq_table(struct device *dev,
  82. struct cpufreq_frequency_table **opp_table)
  83. {
  84. if (!opp_table)
  85. return;
  86. kfree(*opp_table);
  87. *opp_table = NULL;
  88. }
  89. EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
  90. #endif /* CONFIG_CPU_FREQ */
  91. void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask,
  92. int last_cpu)
  93. {
  94. struct device *cpu_dev;
  95. int cpu;
  96. WARN_ON(cpumask_empty(cpumask));
  97. for_each_cpu(cpu, cpumask) {
  98. if (cpu == last_cpu)
  99. break;
  100. cpu_dev = get_cpu_device(cpu);
  101. if (!cpu_dev) {
  102. pr_err("%s: failed to get cpu%d device\n", __func__,
  103. cpu);
  104. continue;
  105. }
  106. dev_pm_opp_remove_table(cpu_dev);
  107. }
  108. }
  109. /**
  110. * dev_pm_opp_cpumask_remove_table() - Removes OPP table for @cpumask
  111. * @cpumask: cpumask for which OPP table needs to be removed
  112. *
  113. * This removes the OPP tables for CPUs present in the @cpumask.
  114. * This should be used to remove all the OPPs entries associated with
  115. * the cpus in @cpumask.
  116. */
  117. void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
  118. {
  119. _dev_pm_opp_cpumask_remove_table(cpumask, -1);
  120. }
  121. EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table);
  122. /**
  123. * dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs
  124. * @cpu_dev: CPU device for which we do this operation
  125. * @cpumask: cpumask of the CPUs which share the OPP table with @cpu_dev
  126. *
  127. * This marks OPP table of the @cpu_dev as shared by the CPUs present in
  128. * @cpumask.
  129. *
  130. * Returns -ENODEV if OPP table isn't already present.
  131. */
  132. int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
  133. const struct cpumask *cpumask)
  134. {
  135. struct opp_device *opp_dev;
  136. struct opp_table *opp_table;
  137. struct device *dev;
  138. int cpu, ret = 0;
  139. opp_table = _find_opp_table(cpu_dev);
  140. if (IS_ERR(opp_table))
  141. return PTR_ERR(opp_table);
  142. for_each_cpu(cpu, cpumask) {
  143. if (cpu == cpu_dev->id)
  144. continue;
  145. dev = get_cpu_device(cpu);
  146. if (!dev) {
  147. dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
  148. __func__, cpu);
  149. continue;
  150. }
  151. opp_dev = _add_opp_dev(dev, opp_table);
  152. if (!opp_dev) {
  153. dev_err(dev, "%s: failed to add opp-dev for cpu%d device\n",
  154. __func__, cpu);
  155. continue;
  156. }
  157. /* Mark opp-table as multiple CPUs are sharing it now */
  158. opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
  159. }
  160. dev_pm_opp_put_opp_table(opp_table);
  161. return ret;
  162. }
  163. EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
  164. /**
  165. * dev_pm_opp_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with @cpu_dev
  166. * @cpu_dev: CPU device for which we do this operation
  167. * @cpumask: cpumask to update with information of sharing CPUs
  168. *
  169. * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
  170. *
  171. * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP
  172. * table's status is access-unknown.
  173. */
  174. int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
  175. {
  176. struct opp_device *opp_dev;
  177. struct opp_table *opp_table;
  178. int ret = 0;
  179. opp_table = _find_opp_table(cpu_dev);
  180. if (IS_ERR(opp_table))
  181. return PTR_ERR(opp_table);
  182. if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) {
  183. ret = -EINVAL;
  184. goto put_opp_table;
  185. }
  186. cpumask_clear(cpumask);
  187. if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
  188. mutex_lock(&opp_table->lock);
  189. list_for_each_entry(opp_dev, &opp_table->dev_list, node)
  190. cpumask_set_cpu(opp_dev->dev->id, cpumask);
  191. mutex_unlock(&opp_table->lock);
  192. } else {
  193. cpumask_set_cpu(cpu_dev->id, cpumask);
  194. }
  195. put_opp_table:
  196. dev_pm_opp_put_opp_table(opp_table);
  197. return ret;
  198. }
  199. EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus);