of.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Generic OPP OF helpers
  4. *
  5. * Copyright (C) 2009-2010 Texas Instruments Incorporated.
  6. * Nishanth Menon
  7. * Romit Dasgupta
  8. * Kevin Hilman
  9. */
  10. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  11. #include <linux/cpu.h>
  12. #include <linux/errno.h>
  13. #include <linux/device.h>
  14. #include <linux/of_device.h>
  15. #include <linux/pm_domain.h>
  16. #include <linux/slab.h>
  17. #include <linux/export.h>
  18. #include <linux/energy_model.h>
  19. #include "opp.h"
  20. /*
  21. * Returns opp descriptor node for a device node, caller must
  22. * do of_node_put().
  23. */
  24. static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np,
  25. int index)
  26. {
  27. /* "operating-points-v2" can be an array for power domain providers */
  28. return of_parse_phandle(np, "operating-points-v2", index);
  29. }
  30. /* Returns opp descriptor node for a device, caller must do of_node_put() */
  31. struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
  32. {
  33. return _opp_of_get_opp_desc_node(dev->of_node, 0);
  34. }
  35. EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
  36. struct opp_table *_managed_opp(struct device *dev, int index)
  37. {
  38. struct opp_table *opp_table, *managed_table = NULL;
  39. struct device_node *np;
  40. np = _opp_of_get_opp_desc_node(dev->of_node, index);
  41. if (!np)
  42. return NULL;
  43. list_for_each_entry(opp_table, &opp_tables, node) {
  44. if (opp_table->np == np) {
  45. /*
  46. * Multiple devices can point to the same OPP table and
  47. * so will have same node-pointer, np.
  48. *
  49. * But the OPPs will be considered as shared only if the
  50. * OPP table contains a "opp-shared" property.
  51. */
  52. if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
  53. _get_opp_table_kref(opp_table);
  54. managed_table = opp_table;
  55. }
  56. break;
  57. }
  58. }
  59. of_node_put(np);
  60. return managed_table;
  61. }
  62. /* The caller must call dev_pm_opp_put() after the OPP is used */
  63. static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
  64. struct device_node *opp_np)
  65. {
  66. struct dev_pm_opp *opp;
  67. mutex_lock(&opp_table->lock);
  68. list_for_each_entry(opp, &opp_table->opp_list, node) {
  69. if (opp->np == opp_np) {
  70. dev_pm_opp_get(opp);
  71. mutex_unlock(&opp_table->lock);
  72. return opp;
  73. }
  74. }
  75. mutex_unlock(&opp_table->lock);
  76. return NULL;
  77. }
  78. static struct device_node *of_parse_required_opp(struct device_node *np,
  79. int index)
  80. {
  81. return of_parse_phandle(np, "required-opps", index);
  82. }
  83. /* The caller must call dev_pm_opp_put_opp_table() after the table is used */
  84. static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np)
  85. {
  86. struct opp_table *opp_table;
  87. struct device_node *opp_table_np;
  88. opp_table_np = of_get_parent(opp_np);
  89. if (!opp_table_np)
  90. goto err;
  91. /* It is safe to put the node now as all we need now is its address */
  92. of_node_put(opp_table_np);
  93. mutex_lock(&opp_table_lock);
  94. list_for_each_entry(opp_table, &opp_tables, node) {
  95. if (opp_table_np == opp_table->np) {
  96. _get_opp_table_kref(opp_table);
  97. mutex_unlock(&opp_table_lock);
  98. return opp_table;
  99. }
  100. }
  101. mutex_unlock(&opp_table_lock);
  102. err:
  103. return ERR_PTR(-ENODEV);
  104. }
  105. /* Free resources previously acquired by _opp_table_alloc_required_tables() */
  106. static void _opp_table_free_required_tables(struct opp_table *opp_table)
  107. {
  108. struct opp_table **required_opp_tables = opp_table->required_opp_tables;
  109. int i;
  110. if (!required_opp_tables)
  111. return;
  112. for (i = 0; i < opp_table->required_opp_count; i++) {
  113. if (IS_ERR_OR_NULL(required_opp_tables[i]))
  114. continue;
  115. dev_pm_opp_put_opp_table(required_opp_tables[i]);
  116. }
  117. kfree(required_opp_tables);
  118. opp_table->required_opp_count = 0;
  119. opp_table->required_opp_tables = NULL;
  120. list_del(&opp_table->lazy);
  121. }
  122. /*
  123. * Populate all devices and opp tables which are part of "required-opps" list.
  124. * Checking only the first OPP node should be enough.
  125. */
  126. static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
  127. struct device *dev,
  128. struct device_node *opp_np)
  129. {
  130. struct opp_table **required_opp_tables;
  131. struct device_node *required_np, *np;
  132. bool lazy = false;
  133. int count, i;
  134. /* Traversing the first OPP node is all we need */
  135. np = of_get_next_available_child(opp_np, NULL);
  136. if (!np) {
  137. dev_warn(dev, "Empty OPP table\n");
  138. return;
  139. }
  140. count = of_count_phandle_with_args(np, "required-opps", NULL);
  141. if (count <= 0)
  142. goto put_np;
  143. required_opp_tables = kcalloc(count, sizeof(*required_opp_tables),
  144. GFP_KERNEL);
  145. if (!required_opp_tables)
  146. goto put_np;
  147. opp_table->required_opp_tables = required_opp_tables;
  148. opp_table->required_opp_count = count;
  149. for (i = 0; i < count; i++) {
  150. required_np = of_parse_required_opp(np, i);
  151. if (!required_np)
  152. goto free_required_tables;
  153. required_opp_tables[i] = _find_table_of_opp_np(required_np);
  154. of_node_put(required_np);
  155. if (IS_ERR(required_opp_tables[i]))
  156. lazy = true;
  157. }
  158. /* Let's do the linking later on */
  159. if (lazy)
  160. list_add(&opp_table->lazy, &lazy_opp_tables);
  161. goto put_np;
  162. free_required_tables:
  163. _opp_table_free_required_tables(opp_table);
  164. put_np:
  165. of_node_put(np);
  166. }
  167. void _of_init_opp_table(struct opp_table *opp_table, struct device *dev,
  168. int index)
  169. {
  170. struct device_node *np, *opp_np;
  171. u32 val;
  172. /*
  173. * Only required for backward compatibility with v1 bindings, but isn't
  174. * harmful for other cases. And so we do it unconditionally.
  175. */
  176. np = of_node_get(dev->of_node);
  177. if (!np)
  178. return;
  179. if (!of_property_read_u32(np, "clock-latency", &val))
  180. opp_table->clock_latency_ns_max = val;
  181. of_property_read_u32(np, "voltage-tolerance",
  182. &opp_table->voltage_tolerance_v1);
  183. if (of_find_property(np, "#power-domain-cells", NULL))
  184. opp_table->is_genpd = true;
  185. /* Get OPP table node */
  186. opp_np = _opp_of_get_opp_desc_node(np, index);
  187. of_node_put(np);
  188. if (!opp_np)
  189. return;
  190. if (of_property_read_bool(opp_np, "opp-shared"))
  191. opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
  192. else
  193. opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
  194. opp_table->np = opp_np;
  195. _opp_table_alloc_required_tables(opp_table, dev, opp_np);
  196. }
  197. void _of_clear_opp_table(struct opp_table *opp_table)
  198. {
  199. _opp_table_free_required_tables(opp_table);
  200. of_node_put(opp_table->np);
  201. }
  202. /*
  203. * Release all resources previously acquired with a call to
  204. * _of_opp_alloc_required_opps().
  205. */
  206. static void _of_opp_free_required_opps(struct opp_table *opp_table,
  207. struct dev_pm_opp *opp)
  208. {
  209. struct dev_pm_opp **required_opps = opp->required_opps;
  210. int i;
  211. if (!required_opps)
  212. return;
  213. for (i = 0; i < opp_table->required_opp_count; i++) {
  214. if (!required_opps[i])
  215. continue;
  216. /* Put the reference back */
  217. dev_pm_opp_put(required_opps[i]);
  218. }
  219. opp->required_opps = NULL;
  220. kfree(required_opps);
  221. }
  222. void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp)
  223. {
  224. _of_opp_free_required_opps(opp_table, opp);
  225. of_node_put(opp->np);
  226. }
  227. /* Populate all required OPPs which are part of "required-opps" list */
  228. static int _of_opp_alloc_required_opps(struct opp_table *opp_table,
  229. struct dev_pm_opp *opp)
  230. {
  231. struct dev_pm_opp **required_opps;
  232. struct opp_table *required_table;
  233. struct device_node *np;
  234. int i, ret, count = opp_table->required_opp_count;
  235. if (!count)
  236. return 0;
  237. required_opps = kcalloc(count, sizeof(*required_opps), GFP_KERNEL);
  238. if (!required_opps)
  239. return -ENOMEM;
  240. opp->required_opps = required_opps;
  241. for (i = 0; i < count; i++) {
  242. required_table = opp_table->required_opp_tables[i];
  243. /* Required table not added yet, we will link later */
  244. if (IS_ERR_OR_NULL(required_table))
  245. continue;
  246. np = of_parse_required_opp(opp->np, i);
  247. if (unlikely(!np)) {
  248. ret = -ENODEV;
  249. goto free_required_opps;
  250. }
  251. required_opps[i] = _find_opp_of_np(required_table, np);
  252. of_node_put(np);
  253. if (!required_opps[i]) {
  254. pr_err("%s: Unable to find required OPP node: %pOF (%d)\n",
  255. __func__, opp->np, i);
  256. ret = -ENODEV;
  257. goto free_required_opps;
  258. }
  259. }
  260. return 0;
  261. free_required_opps:
  262. _of_opp_free_required_opps(opp_table, opp);
  263. return ret;
  264. }
  265. /* Link required OPPs for an individual OPP */
  266. static int lazy_link_required_opps(struct opp_table *opp_table,
  267. struct opp_table *new_table, int index)
  268. {
  269. struct device_node *required_np;
  270. struct dev_pm_opp *opp;
  271. list_for_each_entry(opp, &opp_table->opp_list, node) {
  272. required_np = of_parse_required_opp(opp->np, index);
  273. if (unlikely(!required_np))
  274. return -ENODEV;
  275. opp->required_opps[index] = _find_opp_of_np(new_table, required_np);
  276. of_node_put(required_np);
  277. if (!opp->required_opps[index]) {
  278. pr_err("%s: Unable to find required OPP node: %pOF (%d)\n",
  279. __func__, opp->np, index);
  280. return -ENODEV;
  281. }
  282. }
  283. return 0;
  284. }
  285. /* Link required OPPs for all OPPs of the newly added OPP table */
  286. static void lazy_link_required_opp_table(struct opp_table *new_table)
  287. {
  288. struct opp_table *opp_table, *temp, **required_opp_tables;
  289. struct device_node *required_np, *opp_np, *required_table_np;
  290. struct dev_pm_opp *opp;
  291. int i, ret;
  292. mutex_lock(&opp_table_lock);
  293. list_for_each_entry_safe(opp_table, temp, &lazy_opp_tables, lazy) {
  294. bool lazy = false;
  295. /* opp_np can't be invalid here */
  296. opp_np = of_get_next_available_child(opp_table->np, NULL);
  297. for (i = 0; i < opp_table->required_opp_count; i++) {
  298. required_opp_tables = opp_table->required_opp_tables;
  299. /* Required opp-table is already parsed */
  300. if (!IS_ERR(required_opp_tables[i]))
  301. continue;
  302. /* required_np can't be invalid here */
  303. required_np = of_parse_required_opp(opp_np, i);
  304. required_table_np = of_get_parent(required_np);
  305. of_node_put(required_table_np);
  306. of_node_put(required_np);
  307. /*
  308. * Newly added table isn't the required opp-table for
  309. * opp_table.
  310. */
  311. if (required_table_np != new_table->np) {
  312. lazy = true;
  313. continue;
  314. }
  315. required_opp_tables[i] = new_table;
  316. _get_opp_table_kref(new_table);
  317. /* Link OPPs now */
  318. ret = lazy_link_required_opps(opp_table, new_table, i);
  319. if (ret) {
  320. /* The OPPs will be marked unusable */
  321. lazy = false;
  322. break;
  323. }
  324. }
  325. of_node_put(opp_np);
  326. /* All required opp-tables found, remove from lazy list */
  327. if (!lazy) {
  328. list_del_init(&opp_table->lazy);
  329. list_for_each_entry(opp, &opp_table->opp_list, node)
  330. _required_opps_available(opp, opp_table->required_opp_count);
  331. }
  332. }
  333. mutex_unlock(&opp_table_lock);
  334. }
  335. static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table)
  336. {
  337. struct device_node *np, *opp_np;
  338. struct property *prop;
  339. if (!opp_table) {
  340. np = of_node_get(dev->of_node);
  341. if (!np)
  342. return -ENODEV;
  343. opp_np = _opp_of_get_opp_desc_node(np, 0);
  344. of_node_put(np);
  345. } else {
  346. opp_np = of_node_get(opp_table->np);
  347. }
  348. /* Lets not fail in case we are parsing opp-v1 bindings */
  349. if (!opp_np)
  350. return 0;
  351. /* Checking only first OPP is sufficient */
  352. np = of_get_next_available_child(opp_np, NULL);
  353. of_node_put(opp_np);
  354. if (!np) {
  355. dev_err(dev, "OPP table empty\n");
  356. return -EINVAL;
  357. }
  358. prop = of_find_property(np, "opp-peak-kBps", NULL);
  359. of_node_put(np);
  360. if (!prop || !prop->length)
  361. return 0;
  362. return 1;
  363. }
  364. int dev_pm_opp_of_find_icc_paths(struct device *dev,
  365. struct opp_table *opp_table)
  366. {
  367. struct device_node *np;
  368. int ret, i, count, num_paths;
  369. struct icc_path **paths;
  370. ret = _bandwidth_supported(dev, opp_table);
  371. if (ret == -EINVAL)
  372. return 0; /* Empty OPP table is a valid corner-case, let's not fail */
  373. else if (ret <= 0)
  374. return ret;
  375. ret = 0;
  376. np = of_node_get(dev->of_node);
  377. if (!np)
  378. return 0;
  379. count = of_count_phandle_with_args(np, "interconnects",
  380. "#interconnect-cells");
  381. of_node_put(np);
  382. if (count < 0)
  383. return 0;
  384. /* two phandles when #interconnect-cells = <1> */
  385. if (count % 2) {
  386. dev_err(dev, "%s: Invalid interconnects values\n", __func__);
  387. return -EINVAL;
  388. }
  389. num_paths = count / 2;
  390. paths = kcalloc(num_paths, sizeof(*paths), GFP_KERNEL);
  391. if (!paths)
  392. return -ENOMEM;
  393. for (i = 0; i < num_paths; i++) {
  394. paths[i] = of_icc_get_by_index(dev, i);
  395. if (IS_ERR(paths[i])) {
  396. ret = PTR_ERR(paths[i]);
  397. if (ret != -EPROBE_DEFER) {
  398. dev_err(dev, "%s: Unable to get path%d: %d\n",
  399. __func__, i, ret);
  400. }
  401. goto err;
  402. }
  403. }
  404. if (opp_table) {
  405. opp_table->paths = paths;
  406. opp_table->path_count = num_paths;
  407. return 0;
  408. }
  409. err:
  410. while (i--)
  411. icc_put(paths[i]);
  412. kfree(paths);
  413. return ret;
  414. }
  415. EXPORT_SYMBOL_GPL(dev_pm_opp_of_find_icc_paths);
  416. static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
  417. struct device_node *np)
  418. {
  419. unsigned int levels = opp_table->supported_hw_count;
  420. int count, versions, ret, i, j;
  421. u32 val;
  422. if (!opp_table->supported_hw) {
  423. /*
  424. * In the case that no supported_hw has been set by the
  425. * platform but there is an opp-supported-hw value set for
  426. * an OPP then the OPP should not be enabled as there is
  427. * no way to see if the hardware supports it.
  428. */
  429. if (of_find_property(np, "opp-supported-hw", NULL))
  430. return false;
  431. else
  432. return true;
  433. }
  434. count = of_property_count_u32_elems(np, "opp-supported-hw");
  435. if (count <= 0 || count % levels) {
  436. dev_err(dev, "%s: Invalid opp-supported-hw property (%d)\n",
  437. __func__, count);
  438. return false;
  439. }
  440. versions = count / levels;
  441. /* All levels in at least one of the versions should match */
  442. for (i = 0; i < versions; i++) {
  443. bool supported = true;
  444. for (j = 0; j < levels; j++) {
  445. ret = of_property_read_u32_index(np, "opp-supported-hw",
  446. i * levels + j, &val);
  447. if (ret) {
  448. dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
  449. __func__, i * levels + j, ret);
  450. return false;
  451. }
  452. /* Check if the level is supported */
  453. if (!(val & opp_table->supported_hw[j])) {
  454. supported = false;
  455. break;
  456. }
  457. }
  458. if (supported)
  459. return true;
  460. }
  461. return false;
  462. }
  463. static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
  464. struct opp_table *opp_table)
  465. {
  466. u32 *microvolt, *microamp = NULL, *microwatt = NULL;
  467. int supplies = opp_table->regulator_count;
  468. int vcount, icount, pcount, ret, i, j;
  469. struct property *prop = NULL;
  470. char name[NAME_MAX];
  471. /* Search for "opp-microvolt-<name>" */
  472. if (opp_table->prop_name) {
  473. snprintf(name, sizeof(name), "opp-microvolt-%s",
  474. opp_table->prop_name);
  475. prop = of_find_property(opp->np, name, NULL);
  476. }
  477. if (!prop) {
  478. /* Search for "opp-microvolt" */
  479. sprintf(name, "opp-microvolt");
  480. prop = of_find_property(opp->np, name, NULL);
  481. /* Missing property isn't a problem, but an invalid entry is */
  482. if (!prop) {
  483. if (unlikely(supplies == -1)) {
  484. /* Initialize regulator_count */
  485. opp_table->regulator_count = 0;
  486. return 0;
  487. }
  488. if (!supplies)
  489. return 0;
  490. dev_err(dev, "%s: opp-microvolt missing although OPP managing regulators\n",
  491. __func__);
  492. return -EINVAL;
  493. }
  494. }
  495. if (unlikely(supplies == -1)) {
  496. /* Initialize regulator_count */
  497. supplies = opp_table->regulator_count = 1;
  498. } else if (unlikely(!supplies)) {
  499. dev_err(dev, "%s: opp-microvolt wasn't expected\n", __func__);
  500. return -EINVAL;
  501. }
  502. vcount = of_property_count_u32_elems(opp->np, name);
  503. if (vcount < 0) {
  504. dev_err(dev, "%s: Invalid %s property (%d)\n",
  505. __func__, name, vcount);
  506. return vcount;
  507. }
  508. /* There can be one or three elements per supply */
  509. if (vcount != supplies && vcount != supplies * 3) {
  510. dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
  511. __func__, name, vcount, supplies);
  512. return -EINVAL;
  513. }
  514. microvolt = kmalloc_array(vcount, sizeof(*microvolt), GFP_KERNEL);
  515. if (!microvolt)
  516. return -ENOMEM;
  517. ret = of_property_read_u32_array(opp->np, name, microvolt, vcount);
  518. if (ret) {
  519. dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
  520. ret = -EINVAL;
  521. goto free_microvolt;
  522. }
  523. /* Search for "opp-microamp-<name>" */
  524. prop = NULL;
  525. if (opp_table->prop_name) {
  526. snprintf(name, sizeof(name), "opp-microamp-%s",
  527. opp_table->prop_name);
  528. prop = of_find_property(opp->np, name, NULL);
  529. }
  530. if (!prop) {
  531. /* Search for "opp-microamp" */
  532. sprintf(name, "opp-microamp");
  533. prop = of_find_property(opp->np, name, NULL);
  534. }
  535. if (prop) {
  536. icount = of_property_count_u32_elems(opp->np, name);
  537. if (icount < 0) {
  538. dev_err(dev, "%s: Invalid %s property (%d)\n", __func__,
  539. name, icount);
  540. ret = icount;
  541. goto free_microvolt;
  542. }
  543. if (icount != supplies) {
  544. dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
  545. __func__, name, icount, supplies);
  546. ret = -EINVAL;
  547. goto free_microvolt;
  548. }
  549. microamp = kmalloc_array(icount, sizeof(*microamp), GFP_KERNEL);
  550. if (!microamp) {
  551. ret = -EINVAL;
  552. goto free_microvolt;
  553. }
  554. ret = of_property_read_u32_array(opp->np, name, microamp,
  555. icount);
  556. if (ret) {
  557. dev_err(dev, "%s: error parsing %s: %d\n", __func__,
  558. name, ret);
  559. ret = -EINVAL;
  560. goto free_microamp;
  561. }
  562. }
  563. /* Search for "opp-microwatt" */
  564. sprintf(name, "opp-microwatt");
  565. prop = of_find_property(opp->np, name, NULL);
  566. if (prop) {
  567. pcount = of_property_count_u32_elems(opp->np, name);
  568. if (pcount < 0) {
  569. dev_err(dev, "%s: Invalid %s property (%d)\n", __func__,
  570. name, pcount);
  571. ret = pcount;
  572. goto free_microamp;
  573. }
  574. if (pcount != supplies) {
  575. dev_err(dev, "%s: Invalid number of elements in %s property (%d) with supplies (%d)\n",
  576. __func__, name, pcount, supplies);
  577. ret = -EINVAL;
  578. goto free_microamp;
  579. }
  580. microwatt = kmalloc_array(pcount, sizeof(*microwatt),
  581. GFP_KERNEL);
  582. if (!microwatt) {
  583. ret = -EINVAL;
  584. goto free_microamp;
  585. }
  586. ret = of_property_read_u32_array(opp->np, name, microwatt,
  587. pcount);
  588. if (ret) {
  589. dev_err(dev, "%s: error parsing %s: %d\n", __func__,
  590. name, ret);
  591. ret = -EINVAL;
  592. goto free_microwatt;
  593. }
  594. }
  595. for (i = 0, j = 0; i < supplies; i++) {
  596. opp->supplies[i].u_volt = microvolt[j++];
  597. if (vcount == supplies) {
  598. opp->supplies[i].u_volt_min = opp->supplies[i].u_volt;
  599. opp->supplies[i].u_volt_max = opp->supplies[i].u_volt;
  600. } else {
  601. opp->supplies[i].u_volt_min = microvolt[j++];
  602. opp->supplies[i].u_volt_max = microvolt[j++];
  603. }
  604. if (microamp)
  605. opp->supplies[i].u_amp = microamp[i];
  606. if (microwatt)
  607. opp->supplies[i].u_watt = microwatt[i];
  608. }
  609. free_microwatt:
  610. kfree(microwatt);
  611. free_microamp:
  612. kfree(microamp);
  613. free_microvolt:
  614. kfree(microvolt);
  615. return ret;
  616. }
  617. /**
  618. * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
  619. * entries
  620. * @dev: device pointer used to lookup OPP table.
  621. *
  622. * Free OPPs created using static entries present in DT.
  623. */
  624. void dev_pm_opp_of_remove_table(struct device *dev)
  625. {
  626. dev_pm_opp_remove_table(dev);
  627. }
  628. EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
  629. static int _read_rate(struct dev_pm_opp *new_opp, struct opp_table *opp_table,
  630. struct device_node *np)
  631. {
  632. struct property *prop;
  633. int i, count, ret;
  634. u64 *rates;
  635. prop = of_find_property(np, "opp-hz", NULL);
  636. if (!prop)
  637. return -ENODEV;
  638. count = prop->length / sizeof(u64);
  639. if (opp_table->clk_count != count) {
  640. pr_err("%s: Count mismatch between opp-hz and clk_count (%d %d)\n",
  641. __func__, count, opp_table->clk_count);
  642. return -EINVAL;
  643. }
  644. rates = kmalloc_array(count, sizeof(*rates), GFP_KERNEL);
  645. if (!rates)
  646. return -ENOMEM;
  647. ret = of_property_read_u64_array(np, "opp-hz", rates, count);
  648. if (ret) {
  649. pr_err("%s: Error parsing opp-hz: %d\n", __func__, ret);
  650. } else {
  651. /*
  652. * Rate is defined as an unsigned long in clk API, and so
  653. * casting explicitly to its type. Must be fixed once rate is 64
  654. * bit guaranteed in clk API.
  655. */
  656. for (i = 0; i < count; i++) {
  657. new_opp->rates[i] = (unsigned long)rates[i];
  658. /* This will happen for frequencies > 4.29 GHz */
  659. WARN_ON(new_opp->rates[i] != rates[i]);
  660. }
  661. }
  662. kfree(rates);
  663. return ret;
  664. }
  665. static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *opp_table,
  666. struct device_node *np, bool peak)
  667. {
  668. const char *name = peak ? "opp-peak-kBps" : "opp-avg-kBps";
  669. struct property *prop;
  670. int i, count, ret;
  671. u32 *bw;
  672. prop = of_find_property(np, name, NULL);
  673. if (!prop)
  674. return -ENODEV;
  675. count = prop->length / sizeof(u32);
  676. if (opp_table->path_count != count) {
  677. pr_err("%s: Mismatch between %s and paths (%d %d)\n",
  678. __func__, name, count, opp_table->path_count);
  679. return -EINVAL;
  680. }
  681. bw = kmalloc_array(count, sizeof(*bw), GFP_KERNEL);
  682. if (!bw)
  683. return -ENOMEM;
  684. ret = of_property_read_u32_array(np, name, bw, count);
  685. if (ret) {
  686. pr_err("%s: Error parsing %s: %d\n", __func__, name, ret);
  687. goto out;
  688. }
  689. for (i = 0; i < count; i++) {
  690. if (peak)
  691. new_opp->bandwidth[i].peak = kBps_to_icc(bw[i]);
  692. else
  693. new_opp->bandwidth[i].avg = kBps_to_icc(bw[i]);
  694. }
  695. out:
  696. kfree(bw);
  697. return ret;
  698. }
  699. static int _read_opp_key(struct dev_pm_opp *new_opp,
  700. struct opp_table *opp_table, struct device_node *np)
  701. {
  702. bool found = false;
  703. int ret;
  704. ret = _read_rate(new_opp, opp_table, np);
  705. if (!ret)
  706. found = true;
  707. else if (ret != -ENODEV)
  708. return ret;
  709. /*
  710. * Bandwidth consists of peak and average (optional) values:
  711. * opp-peak-kBps = <path1_value path2_value>;
  712. * opp-avg-kBps = <path1_value path2_value>;
  713. */
  714. ret = _read_bw(new_opp, opp_table, np, true);
  715. if (!ret) {
  716. found = true;
  717. ret = _read_bw(new_opp, opp_table, np, false);
  718. }
  719. /* The properties were found but we failed to parse them */
  720. if (ret && ret != -ENODEV)
  721. return ret;
  722. if (!of_property_read_u32(np, "opp-level", &new_opp->level))
  723. found = true;
  724. if (found)
  725. return 0;
  726. return ret;
  727. }
  728. /**
  729. * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
  730. * @opp_table: OPP table
  731. * @dev: device for which we do this operation
  732. * @np: device node
  733. *
  734. * This function adds an opp definition to the opp table and returns status. The
  735. * opp can be controlled using dev_pm_opp_enable/disable functions and may be
  736. * removed by dev_pm_opp_remove.
  737. *
  738. * Return:
  739. * Valid OPP pointer:
  740. * On success
  741. * NULL:
  742. * Duplicate OPPs (both freq and volt are same) and opp->available
  743. * OR if the OPP is not supported by hardware.
  744. * ERR_PTR(-EEXIST):
  745. * Freq are same and volt are different OR
  746. * Duplicate OPPs (both freq and volt are same) and !opp->available
  747. * ERR_PTR(-ENOMEM):
  748. * Memory allocation failure
  749. * ERR_PTR(-EINVAL):
  750. * Failed parsing the OPP node
  751. */
  752. static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
  753. struct device *dev, struct device_node *np)
  754. {
  755. struct dev_pm_opp *new_opp;
  756. u32 val;
  757. int ret;
  758. new_opp = _opp_allocate(opp_table);
  759. if (!new_opp)
  760. return ERR_PTR(-ENOMEM);
  761. ret = _read_opp_key(new_opp, opp_table, np);
  762. if (ret < 0) {
  763. dev_err(dev, "%s: opp key field not found\n", __func__);
  764. goto free_opp;
  765. }
  766. /* Check if the OPP supports hardware's hierarchy of versions or not */
  767. if (!_opp_is_supported(dev, opp_table, np)) {
  768. dev_dbg(dev, "OPP not supported by hardware: %s\n",
  769. of_node_full_name(np));
  770. goto free_opp;
  771. }
  772. new_opp->turbo = of_property_read_bool(np, "turbo-mode");
  773. new_opp->np = of_node_get(np);
  774. new_opp->dynamic = false;
  775. new_opp->available = true;
  776. ret = _of_opp_alloc_required_opps(opp_table, new_opp);
  777. if (ret)
  778. goto free_opp;
  779. if (!of_property_read_u32(np, "clock-latency-ns", &val))
  780. new_opp->clock_latency_ns = val;
  781. ret = opp_parse_supplies(new_opp, dev, opp_table);
  782. if (ret)
  783. goto free_required_opps;
  784. if (opp_table->is_genpd)
  785. new_opp->pstate = pm_genpd_opp_to_performance_state(dev, new_opp);
  786. ret = _opp_add(dev, new_opp, opp_table);
  787. if (ret) {
  788. /* Don't return error for duplicate OPPs */
  789. if (ret == -EBUSY)
  790. ret = 0;
  791. goto free_required_opps;
  792. }
  793. /* OPP to select on device suspend */
  794. if (of_property_read_bool(np, "opp-suspend")) {
  795. if (opp_table->suspend_opp) {
  796. /* Pick the OPP with higher rate/bw/level as suspend OPP */
  797. if (_opp_compare_key(opp_table, new_opp, opp_table->suspend_opp) == 1) {
  798. opp_table->suspend_opp->suspend = false;
  799. new_opp->suspend = true;
  800. opp_table->suspend_opp = new_opp;
  801. }
  802. } else {
  803. new_opp->suspend = true;
  804. opp_table->suspend_opp = new_opp;
  805. }
  806. }
  807. if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
  808. opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
  809. pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu level:%u\n",
  810. __func__, new_opp->turbo, new_opp->rates[0],
  811. new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min,
  812. new_opp->supplies[0].u_volt_max, new_opp->clock_latency_ns,
  813. new_opp->level);
  814. /*
  815. * Notify the changes in the availability of the operable
  816. * frequency/voltage list.
  817. */
  818. blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
  819. return new_opp;
  820. free_required_opps:
  821. _of_opp_free_required_opps(opp_table, new_opp);
  822. free_opp:
  823. _opp_free(new_opp);
  824. return ret ? ERR_PTR(ret) : NULL;
  825. }
  826. /* Initializes OPP tables based on new bindings */
  827. static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
  828. {
  829. struct device_node *np;
  830. int ret, count = 0;
  831. struct dev_pm_opp *opp;
  832. /* OPP table is already initialized for the device */
  833. mutex_lock(&opp_table->lock);
  834. if (opp_table->parsed_static_opps) {
  835. opp_table->parsed_static_opps++;
  836. mutex_unlock(&opp_table->lock);
  837. return 0;
  838. }
  839. opp_table->parsed_static_opps = 1;
  840. mutex_unlock(&opp_table->lock);
  841. /* We have opp-table node now, iterate over it and add OPPs */
  842. for_each_available_child_of_node(opp_table->np, np) {
  843. opp = _opp_add_static_v2(opp_table, dev, np);
  844. if (IS_ERR(opp)) {
  845. ret = PTR_ERR(opp);
  846. dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
  847. ret);
  848. of_node_put(np);
  849. goto remove_static_opp;
  850. } else if (opp) {
  851. count++;
  852. }
  853. }
  854. /* There should be one or more OPPs defined */
  855. if (!count) {
  856. dev_err(dev, "%s: no supported OPPs", __func__);
  857. ret = -ENOENT;
  858. goto remove_static_opp;
  859. }
  860. list_for_each_entry(opp, &opp_table->opp_list, node) {
  861. /* Any non-zero performance state would enable the feature */
  862. if (opp->pstate) {
  863. opp_table->genpd_performance_state = true;
  864. break;
  865. }
  866. }
  867. lazy_link_required_opp_table(opp_table);
  868. return 0;
  869. remove_static_opp:
  870. _opp_remove_all_static(opp_table);
  871. return ret;
  872. }
  873. /* Initializes OPP tables based on old-deprecated bindings */
  874. static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
  875. {
  876. const struct property *prop;
  877. const __be32 *val;
  878. int nr, ret = 0;
  879. mutex_lock(&opp_table->lock);
  880. if (opp_table->parsed_static_opps) {
  881. opp_table->parsed_static_opps++;
  882. mutex_unlock(&opp_table->lock);
  883. return 0;
  884. }
  885. opp_table->parsed_static_opps = 1;
  886. mutex_unlock(&opp_table->lock);
  887. prop = of_find_property(dev->of_node, "operating-points", NULL);
  888. if (!prop) {
  889. ret = -ENODEV;
  890. goto remove_static_opp;
  891. }
  892. if (!prop->value) {
  893. ret = -ENODATA;
  894. goto remove_static_opp;
  895. }
  896. /*
  897. * Each OPP is a set of tuples consisting of frequency and
  898. * voltage like <freq-kHz vol-uV>.
  899. */
  900. nr = prop->length / sizeof(u32);
  901. if (nr % 2) {
  902. dev_err(dev, "%s: Invalid OPP table\n", __func__);
  903. ret = -EINVAL;
  904. goto remove_static_opp;
  905. }
  906. val = prop->value;
  907. while (nr) {
  908. unsigned long freq = be32_to_cpup(val++) * 1000;
  909. unsigned long volt = be32_to_cpup(val++);
  910. ret = _opp_add_v1(opp_table, dev, freq, volt, false);
  911. if (ret) {
  912. dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
  913. __func__, freq, ret);
  914. goto remove_static_opp;
  915. }
  916. nr -= 2;
  917. }
  918. return 0;
  919. remove_static_opp:
  920. _opp_remove_all_static(opp_table);
  921. return ret;
  922. }
  923. static int _of_add_table_indexed(struct device *dev, int index)
  924. {
  925. struct opp_table *opp_table;
  926. int ret, count;
  927. if (index) {
  928. /*
  929. * If only one phandle is present, then the same OPP table
  930. * applies for all index requests.
  931. */
  932. count = of_count_phandle_with_args(dev->of_node,
  933. "operating-points-v2", NULL);
  934. if (count == 1)
  935. index = 0;
  936. }
  937. opp_table = _add_opp_table_indexed(dev, index, true);
  938. if (IS_ERR(opp_table))
  939. return PTR_ERR(opp_table);
  940. /*
  941. * OPPs have two version of bindings now. Also try the old (v1)
  942. * bindings for backward compatibility with older dtbs.
  943. */
  944. if (opp_table->np)
  945. ret = _of_add_opp_table_v2(dev, opp_table);
  946. else
  947. ret = _of_add_opp_table_v1(dev, opp_table);
  948. if (ret)
  949. dev_pm_opp_put_opp_table(opp_table);
  950. return ret;
  951. }
  952. static void devm_pm_opp_of_table_release(void *data)
  953. {
  954. dev_pm_opp_of_remove_table(data);
  955. }
  956. static int _devm_of_add_table_indexed(struct device *dev, int index)
  957. {
  958. int ret;
  959. ret = _of_add_table_indexed(dev, index);
  960. if (ret)
  961. return ret;
  962. return devm_add_action_or_reset(dev, devm_pm_opp_of_table_release, dev);
  963. }
  964. /**
  965. * devm_pm_opp_of_add_table() - Initialize opp table from device tree
  966. * @dev: device pointer used to lookup OPP table.
  967. *
  968. * Register the initial OPP table with the OPP library for given device.
  969. *
  970. * The opp_table structure will be freed after the device is destroyed.
  971. *
  972. * Return:
  973. * 0 On success OR
  974. * Duplicate OPPs (both freq and volt are same) and opp->available
  975. * -EEXIST Freq are same and volt are different OR
  976. * Duplicate OPPs (both freq and volt are same) and !opp->available
  977. * -ENOMEM Memory allocation failure
  978. * -ENODEV when 'operating-points' property is not found or is invalid data
  979. * in device node.
  980. * -ENODATA when empty 'operating-points' property is found
  981. * -EINVAL when invalid entries are found in opp-v2 table
  982. */
  983. int devm_pm_opp_of_add_table(struct device *dev)
  984. {
  985. return _devm_of_add_table_indexed(dev, 0);
  986. }
  987. EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table);
  988. /**
  989. * dev_pm_opp_of_add_table() - Initialize opp table from device tree
  990. * @dev: device pointer used to lookup OPP table.
  991. *
  992. * Register the initial OPP table with the OPP library for given device.
  993. *
  994. * Return:
  995. * 0 On success OR
  996. * Duplicate OPPs (both freq and volt are same) and opp->available
  997. * -EEXIST Freq are same and volt are different OR
  998. * Duplicate OPPs (both freq and volt are same) and !opp->available
  999. * -ENOMEM Memory allocation failure
  1000. * -ENODEV when 'operating-points' property is not found or is invalid data
  1001. * in device node.
  1002. * -ENODATA when empty 'operating-points' property is found
  1003. * -EINVAL when invalid entries are found in opp-v2 table
  1004. */
  1005. int dev_pm_opp_of_add_table(struct device *dev)
  1006. {
  1007. return _of_add_table_indexed(dev, 0);
  1008. }
  1009. EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
  1010. /**
  1011. * dev_pm_opp_of_add_table_indexed() - Initialize indexed opp table from device tree
  1012. * @dev: device pointer used to lookup OPP table.
  1013. * @index: Index number.
  1014. *
  1015. * Register the initial OPP table with the OPP library for given device only
  1016. * using the "operating-points-v2" property.
  1017. *
  1018. * Return: Refer to dev_pm_opp_of_add_table() for return values.
  1019. */
  1020. int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
  1021. {
  1022. return _of_add_table_indexed(dev, index);
  1023. }
  1024. EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table_indexed);
  1025. /**
  1026. * devm_pm_opp_of_add_table_indexed() - Initialize indexed opp table from device tree
  1027. * @dev: device pointer used to lookup OPP table.
  1028. * @index: Index number.
  1029. *
  1030. * This is a resource-managed variant of dev_pm_opp_of_add_table_indexed().
  1031. */
  1032. int devm_pm_opp_of_add_table_indexed(struct device *dev, int index)
  1033. {
  1034. return _devm_of_add_table_indexed(dev, index);
  1035. }
  1036. EXPORT_SYMBOL_GPL(devm_pm_opp_of_add_table_indexed);
  1037. /* CPU device specific helpers */
  1038. /**
  1039. * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
  1040. * @cpumask: cpumask for which OPP table needs to be removed
  1041. *
  1042. * This removes the OPP tables for CPUs present in the @cpumask.
  1043. * This should be used only to remove static entries created from DT.
  1044. */
  1045. void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
  1046. {
  1047. _dev_pm_opp_cpumask_remove_table(cpumask, -1);
  1048. }
  1049. EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
  1050. /**
  1051. * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask
  1052. * @cpumask: cpumask for which OPP table needs to be added.
  1053. *
  1054. * This adds the OPP tables for CPUs present in the @cpumask.
  1055. */
  1056. int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
  1057. {
  1058. struct device *cpu_dev;
  1059. int cpu, ret;
  1060. if (WARN_ON(cpumask_empty(cpumask)))
  1061. return -ENODEV;
  1062. for_each_cpu(cpu, cpumask) {
  1063. cpu_dev = get_cpu_device(cpu);
  1064. if (!cpu_dev) {
  1065. pr_err("%s: failed to get cpu%d device\n", __func__,
  1066. cpu);
  1067. ret = -ENODEV;
  1068. goto remove_table;
  1069. }
  1070. ret = dev_pm_opp_of_add_table(cpu_dev);
  1071. if (ret) {
  1072. /*
  1073. * OPP may get registered dynamically, don't print error
  1074. * message here.
  1075. */
  1076. pr_debug("%s: couldn't find opp table for cpu:%d, %d\n",
  1077. __func__, cpu, ret);
  1078. goto remove_table;
  1079. }
  1080. }
  1081. return 0;
  1082. remove_table:
  1083. /* Free all other OPPs */
  1084. _dev_pm_opp_cpumask_remove_table(cpumask, cpu);
  1085. return ret;
  1086. }
  1087. EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
  1088. /*
  1089. * Works only for OPP v2 bindings.
  1090. *
  1091. * Returns -ENOENT if operating-points-v2 bindings aren't supported.
  1092. */
  1093. /**
  1094. * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with
  1095. * @cpu_dev using operating-points-v2
  1096. * bindings.
  1097. *
  1098. * @cpu_dev: CPU device for which we do this operation
  1099. * @cpumask: cpumask to update with information of sharing CPUs
  1100. *
  1101. * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
  1102. *
  1103. * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
  1104. */
  1105. int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
  1106. struct cpumask *cpumask)
  1107. {
  1108. struct device_node *np, *tmp_np, *cpu_np;
  1109. int cpu, ret = 0;
  1110. /* Get OPP descriptor node */
  1111. np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
  1112. if (!np) {
  1113. dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__);
  1114. return -ENOENT;
  1115. }
  1116. cpumask_set_cpu(cpu_dev->id, cpumask);
  1117. /* OPPs are shared ? */
  1118. if (!of_property_read_bool(np, "opp-shared"))
  1119. goto put_cpu_node;
  1120. for_each_possible_cpu(cpu) {
  1121. if (cpu == cpu_dev->id)
  1122. continue;
  1123. cpu_np = of_cpu_device_node_get(cpu);
  1124. if (!cpu_np) {
  1125. dev_err(cpu_dev, "%s: failed to get cpu%d node\n",
  1126. __func__, cpu);
  1127. ret = -ENOENT;
  1128. goto put_cpu_node;
  1129. }
  1130. /* Get OPP descriptor node */
  1131. tmp_np = _opp_of_get_opp_desc_node(cpu_np, 0);
  1132. of_node_put(cpu_np);
  1133. if (!tmp_np) {
  1134. pr_err("%pOF: Couldn't find opp node\n", cpu_np);
  1135. ret = -ENOENT;
  1136. goto put_cpu_node;
  1137. }
  1138. /* CPUs are sharing opp node */
  1139. if (np == tmp_np)
  1140. cpumask_set_cpu(cpu, cpumask);
  1141. of_node_put(tmp_np);
  1142. }
  1143. put_cpu_node:
  1144. of_node_put(np);
  1145. return ret;
  1146. }
  1147. EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
  1148. /**
  1149. * of_get_required_opp_performance_state() - Search for required OPP and return its performance state.
  1150. * @np: Node that contains the "required-opps" property.
  1151. * @index: Index of the phandle to parse.
  1152. *
  1153. * Returns the performance state of the OPP pointed out by the "required-opps"
  1154. * property at @index in @np.
  1155. *
  1156. * Return: Zero or positive performance state on success, otherwise negative
  1157. * value on errors.
  1158. */
  1159. int of_get_required_opp_performance_state(struct device_node *np, int index)
  1160. {
  1161. struct dev_pm_opp *opp;
  1162. struct device_node *required_np;
  1163. struct opp_table *opp_table;
  1164. int pstate = -EINVAL;
  1165. required_np = of_parse_required_opp(np, index);
  1166. if (!required_np)
  1167. return -ENODEV;
  1168. opp_table = _find_table_of_opp_np(required_np);
  1169. if (IS_ERR(opp_table)) {
  1170. pr_err("%s: Failed to find required OPP table %pOF: %ld\n",
  1171. __func__, np, PTR_ERR(opp_table));
  1172. goto put_required_np;
  1173. }
  1174. opp = _find_opp_of_np(opp_table, required_np);
  1175. if (opp) {
  1176. pstate = opp->pstate;
  1177. dev_pm_opp_put(opp);
  1178. }
  1179. dev_pm_opp_put_opp_table(opp_table);
  1180. put_required_np:
  1181. of_node_put(required_np);
  1182. return pstate;
  1183. }
  1184. EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state);
  1185. /**
  1186. * dev_pm_opp_get_of_node() - Gets the DT node corresponding to an opp
  1187. * @opp: opp for which DT node has to be returned for
  1188. *
  1189. * Return: DT node corresponding to the opp, else 0 on success.
  1190. *
  1191. * The caller needs to put the node with of_node_put() after using it.
  1192. */
  1193. struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp)
  1194. {
  1195. if (IS_ERR_OR_NULL(opp)) {
  1196. pr_err("%s: Invalid parameters\n", __func__);
  1197. return NULL;
  1198. }
  1199. return of_node_get(opp->np);
  1200. }
  1201. EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);
  1202. /*
  1203. * Callback function provided to the Energy Model framework upon registration.
  1204. * It provides the power used by @dev at @kHz if it is the frequency of an
  1205. * existing OPP, or at the frequency of the first OPP above @kHz otherwise
  1206. * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
  1207. * frequency and @uW to the associated power.
  1208. *
  1209. * Returns 0 on success or a proper -EINVAL value in case of error.
  1210. */
  1211. static int __maybe_unused
  1212. _get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz)
  1213. {
  1214. struct dev_pm_opp *opp;
  1215. unsigned long opp_freq, opp_power;
  1216. /* Find the right frequency and related OPP */
  1217. opp_freq = *kHz * 1000;
  1218. opp = dev_pm_opp_find_freq_ceil(dev, &opp_freq);
  1219. if (IS_ERR(opp))
  1220. return -EINVAL;
  1221. opp_power = dev_pm_opp_get_power(opp);
  1222. dev_pm_opp_put(opp);
  1223. if (!opp_power)
  1224. return -EINVAL;
  1225. *kHz = opp_freq / 1000;
  1226. *uW = opp_power;
  1227. return 0;
  1228. }
  1229. /*
  1230. * Callback function provided to the Energy Model framework upon registration.
  1231. * This computes the power estimated by @dev at @kHz if it is the frequency
  1232. * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise
  1233. * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
  1234. * frequency and @uW to the associated power. The power is estimated as
  1235. * P = C * V^2 * f with C being the device's capacitance and V and f
  1236. * respectively the voltage and frequency of the OPP.
  1237. *
  1238. * Returns -EINVAL if the power calculation failed because of missing
  1239. * parameters, 0 otherwise.
  1240. */
  1241. static int __maybe_unused _get_power(struct device *dev, unsigned long *uW,
  1242. unsigned long *kHz)
  1243. {
  1244. struct dev_pm_opp *opp;
  1245. struct device_node *np;
  1246. unsigned long mV, Hz;
  1247. u32 cap;
  1248. u64 tmp;
  1249. int ret;
  1250. np = of_node_get(dev->of_node);
  1251. if (!np)
  1252. return -EINVAL;
  1253. ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
  1254. of_node_put(np);
  1255. if (ret)
  1256. return -EINVAL;
  1257. Hz = *kHz * 1000;
  1258. opp = dev_pm_opp_find_freq_ceil(dev, &Hz);
  1259. if (IS_ERR(opp))
  1260. return -EINVAL;
  1261. mV = dev_pm_opp_get_voltage(opp) / 1000;
  1262. dev_pm_opp_put(opp);
  1263. if (!mV)
  1264. return -EINVAL;
  1265. tmp = (u64)cap * mV * mV * (Hz / 1000000);
  1266. /* Provide power in micro-Watts */
  1267. do_div(tmp, 1000000);
  1268. *uW = (unsigned long)tmp;
  1269. *kHz = Hz / 1000;
  1270. return 0;
  1271. }
  1272. static bool _of_has_opp_microwatt_property(struct device *dev)
  1273. {
  1274. unsigned long power, freq = 0;
  1275. struct dev_pm_opp *opp;
  1276. /* Check if at least one OPP has needed property */
  1277. opp = dev_pm_opp_find_freq_ceil(dev, &freq);
  1278. if (IS_ERR(opp))
  1279. return false;
  1280. power = dev_pm_opp_get_power(opp);
  1281. dev_pm_opp_put(opp);
  1282. if (!power)
  1283. return false;
  1284. return true;
  1285. }
  1286. /**
  1287. * dev_pm_opp_of_register_em() - Attempt to register an Energy Model
  1288. * @dev : Device for which an Energy Model has to be registered
  1289. * @cpus : CPUs for which an Energy Model has to be registered. For
  1290. * other type of devices it should be set to NULL.
  1291. *
  1292. * This checks whether the "dynamic-power-coefficient" devicetree property has
  1293. * been specified, and tries to register an Energy Model with it if it has.
  1294. * Having this property means the voltages are known for OPPs and the EM
  1295. * might be calculated.
  1296. */
  1297. int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus)
  1298. {
  1299. struct em_data_callback em_cb;
  1300. struct device_node *np;
  1301. int ret, nr_opp;
  1302. u32 cap;
  1303. if (IS_ERR_OR_NULL(dev)) {
  1304. ret = -EINVAL;
  1305. goto failed;
  1306. }
  1307. nr_opp = dev_pm_opp_get_opp_count(dev);
  1308. if (nr_opp <= 0) {
  1309. ret = -EINVAL;
  1310. goto failed;
  1311. }
  1312. /* First, try to find more precised Energy Model in DT */
  1313. if (_of_has_opp_microwatt_property(dev)) {
  1314. EM_SET_ACTIVE_POWER_CB(em_cb, _get_dt_power);
  1315. goto register_em;
  1316. }
  1317. np = of_node_get(dev->of_node);
  1318. if (!np) {
  1319. ret = -EINVAL;
  1320. goto failed;
  1321. }
  1322. /*
  1323. * Register an EM only if the 'dynamic-power-coefficient' property is
  1324. * set in devicetree. It is assumed the voltage values are known if that
  1325. * property is set since it is useless otherwise. If voltages are not
  1326. * known, just let the EM registration fail with an error to alert the
  1327. * user about the inconsistent configuration.
  1328. */
  1329. ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
  1330. of_node_put(np);
  1331. if (ret || !cap) {
  1332. dev_dbg(dev, "Couldn't find proper 'dynamic-power-coefficient' in DT\n");
  1333. ret = -EINVAL;
  1334. goto failed;
  1335. }
  1336. EM_SET_ACTIVE_POWER_CB(em_cb, _get_power);
  1337. register_em:
  1338. ret = em_dev_register_perf_domain(dev, nr_opp, &em_cb, cpus, true);
  1339. if (ret)
  1340. goto failed;
  1341. return 0;
  1342. failed:
  1343. dev_dbg(dev, "Couldn't register Energy Model %d\n", ret);
  1344. return ret;
  1345. }
  1346. EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em);