msm_cvp_res_parse.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/iommu.h>
  7. #include <linux/of.h>
  8. #include <linux/slab.h>
  9. #include <linux/sort.h>
  10. #include <linux/of_reserved_mem.h>
  11. #include "msm_cvp_debug.h"
  12. #include "msm_cvp_resources.h"
  13. #include "msm_cvp_res_parse.h"
  14. #include "cvp_core_hfi.h"
  15. #include "soc/qcom/secure_buffer.h"
  16. enum clock_properties {
  17. CLOCK_PROP_HAS_SCALING = 1 << 0,
  18. CLOCK_PROP_HAS_MEM_RETENTION = 1 << 1,
  19. };
  20. #define PERF_GOV "performance"
  21. static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
  22. {
  23. return NULL;
  24. }
  25. static size_t get_u32_array_num_elements(struct device_node *np,
  26. char *name)
  27. {
  28. int len;
  29. size_t num_elements = 0;
  30. if (!of_get_property(np, name, &len)) {
  31. dprintk(CVP_ERR, "Failed to read %s from device tree\n",
  32. name);
  33. goto fail_read;
  34. }
  35. num_elements = len / sizeof(u32);
  36. if (num_elements <= 0) {
  37. dprintk(CVP_ERR, "%s not specified in device tree\n",
  38. name);
  39. goto fail_read;
  40. }
  41. return num_elements;
  42. fail_read:
  43. return 0;
  44. }
  45. static inline void msm_cvp_free_allowed_clocks_table(
  46. struct msm_cvp_platform_resources *res)
  47. {
  48. res->allowed_clks_tbl = NULL;
  49. }
  50. static inline void msm_cvp_free_cycles_per_mb_table(
  51. struct msm_cvp_platform_resources *res)
  52. {
  53. res->clock_freq_tbl.clk_prof_entries = NULL;
  54. }
  55. static inline void msm_cvp_free_reg_table(
  56. struct msm_cvp_platform_resources *res)
  57. {
  58. res->reg_set.reg_tbl = NULL;
  59. }
  60. static inline void msm_cvp_free_qdss_addr_table(
  61. struct msm_cvp_platform_resources *res)
  62. {
  63. res->qdss_addr_set.addr_tbl = NULL;
  64. }
  65. static inline void msm_cvp_free_bus_vectors(
  66. struct msm_cvp_platform_resources *res)
  67. {
  68. kfree(res->bus_set.bus_tbl);
  69. res->bus_set.bus_tbl = NULL;
  70. res->bus_set.count = 0;
  71. }
  72. static inline void msm_cvp_free_regulator_table(
  73. struct msm_cvp_platform_resources *res)
  74. {
  75. int c = 0;
  76. for (c = 0; c < res->regulator_set.count; ++c) {
  77. struct regulator_info *rinfo =
  78. &res->regulator_set.regulator_tbl[c];
  79. rinfo->name = NULL;
  80. }
  81. res->regulator_set.regulator_tbl = NULL;
  82. res->regulator_set.count = 0;
  83. }
  84. static inline void msm_cvp_free_clock_table(
  85. struct msm_cvp_platform_resources *res)
  86. {
  87. res->clock_set.clock_tbl = NULL;
  88. res->clock_set.count = 0;
  89. }
  90. void msm_cvp_free_platform_resources(
  91. struct msm_cvp_platform_resources *res)
  92. {
  93. msm_cvp_free_clock_table(res);
  94. msm_cvp_free_regulator_table(res);
  95. msm_cvp_free_allowed_clocks_table(res);
  96. msm_cvp_free_reg_table(res);
  97. msm_cvp_free_qdss_addr_table(res);
  98. msm_cvp_free_bus_vectors(res);
  99. }
  100. static int msm_cvp_load_ipcc_regs(struct msm_cvp_platform_resources *res)
  101. {
  102. int ret = 0;
  103. unsigned int reg_config[2];
  104. struct platform_device *pdev = res->pdev;
  105. ret = of_property_read_u32_array(pdev->dev.of_node, "qcom,ipcc-reg",
  106. reg_config, 2);
  107. if (ret) {
  108. dprintk(CVP_ERR, "Failed to read ipcc reg: %d\n", ret);
  109. return ret;
  110. }
  111. res->ipcc_reg_base = reg_config[0];
  112. res->ipcc_reg_size = reg_config[1];
  113. dprintk(CVP_CORE,
  114. "ipcc reg_base = %x, reg_size = %x\n",
  115. res->ipcc_reg_base,
  116. res->ipcc_reg_size
  117. );
  118. return ret;
  119. }
  120. static int msm_cvp_load_regspace_mapping(struct msm_cvp_platform_resources *res)
  121. {
  122. int ret = 0;
  123. unsigned int ipclite_mapping_config[3] = {0};
  124. unsigned int hwmutex_mapping_config[3] = {0};
  125. unsigned int aon_mapping_config[3] = {0};
  126. unsigned int timer_config[3] = {0};
  127. struct platform_device *pdev = res->pdev;
  128. ret = of_property_read_u32_array(pdev->dev.of_node, "ipclite_mappings",
  129. ipclite_mapping_config, 3);
  130. if (ret) {
  131. dprintk(CVP_ERR, "Failed to read ipclite reg: %d\n", ret);
  132. return ret;
  133. }
  134. res->reg_mappings.ipclite_iova = ipclite_mapping_config[0];
  135. res->reg_mappings.ipclite_size = ipclite_mapping_config[1];
  136. res->reg_mappings.ipclite_phyaddr = ipclite_mapping_config[2];
  137. ret = of_property_read_u32_array(pdev->dev.of_node, "hwmutex_mappings",
  138. hwmutex_mapping_config, 3);
  139. if (ret) {
  140. dprintk(CVP_ERR, "Failed to read hwmutex reg: %d\n", ret);
  141. return ret;
  142. }
  143. res->reg_mappings.hwmutex_iova = hwmutex_mapping_config[0];
  144. res->reg_mappings.hwmutex_size = hwmutex_mapping_config[1];
  145. res->reg_mappings.hwmutex_phyaddr = hwmutex_mapping_config[2];
  146. ret = of_property_read_u32_array(pdev->dev.of_node, "aon_mappings",
  147. aon_mapping_config, 3);
  148. if (ret) {
  149. dprintk(CVP_ERR, "Failed to read aon reg: %d\n", ret);
  150. return ret;
  151. }
  152. res->reg_mappings.aon_iova = aon_mapping_config[0];
  153. res->reg_mappings.aon_size = aon_mapping_config[1];
  154. res->reg_mappings.aon_phyaddr = aon_mapping_config[2];
  155. ret = of_property_read_u32_array(pdev->dev.of_node,
  156. "aon_timer_mappings", timer_config, 3);
  157. if (ret) {
  158. dprintk(CVP_ERR, "Failed to read timer reg: %d\n", ret);
  159. return ret;
  160. }
  161. res->reg_mappings.timer_iova = timer_config[0];
  162. res->reg_mappings.timer_size = timer_config[1];
  163. res->reg_mappings.timer_phyaddr = timer_config[2];
  164. dprintk(CVP_CORE,
  165. "reg mappings %#x %#x %#x %#x %#x %#X %#x %#x %#x %#x %#x %#x\n",
  166. res->reg_mappings.ipclite_iova, res->reg_mappings.ipclite_size,
  167. res->reg_mappings.ipclite_phyaddr, res->reg_mappings.hwmutex_iova,
  168. res->reg_mappings.hwmutex_size, res->reg_mappings.hwmutex_phyaddr,
  169. res->reg_mappings.aon_iova, res->reg_mappings.aon_size,
  170. res->reg_mappings.aon_phyaddr, res->reg_mappings.timer_iova,
  171. res->reg_mappings.timer_size, res->reg_mappings.timer_phyaddr);
  172. return ret;
  173. }
  174. static int msm_cvp_load_gcc_regs(struct msm_cvp_platform_resources *res)
  175. {
  176. int ret = 0;
  177. unsigned int reg_config[2];
  178. struct platform_device *pdev = res->pdev;
  179. ret = of_property_read_u32_array(pdev->dev.of_node, "qcom,gcc-reg",
  180. reg_config, 2);
  181. if (ret) {
  182. dprintk(CVP_WARN, "No gcc reg configured: %d\n", ret);
  183. return ret;
  184. }
  185. res->gcc_reg_base = reg_config[0];
  186. res->gcc_reg_size = reg_config[1];
  187. return ret;
  188. }
  189. static int msm_cvp_load_reg_table(struct msm_cvp_platform_resources *res)
  190. {
  191. struct reg_set *reg_set;
  192. struct platform_device *pdev = res->pdev;
  193. int i;
  194. int rc = 0;
  195. if (!of_find_property(pdev->dev.of_node, "qcom,reg-presets", NULL)) {
  196. /*
  197. * qcom,reg-presets is an optional property. It likely won't be
  198. * present if we don't have any register settings to program
  199. */
  200. dprintk(CVP_CORE, "qcom,reg-presets not found\n");
  201. return 0;
  202. }
  203. reg_set = &res->reg_set;
  204. reg_set->count = get_u32_array_num_elements(pdev->dev.of_node,
  205. "qcom,reg-presets");
  206. reg_set->count /= sizeof(*reg_set->reg_tbl) / sizeof(u32);
  207. if (!reg_set->count) {
  208. dprintk(CVP_CORE, "no elements in reg set\n");
  209. return rc;
  210. }
  211. reg_set->reg_tbl = devm_kzalloc(&pdev->dev, reg_set->count *
  212. sizeof(*(reg_set->reg_tbl)), GFP_KERNEL);
  213. if (!reg_set->reg_tbl) {
  214. dprintk(CVP_ERR, "%s Failed to alloc register table\n",
  215. __func__);
  216. return -ENOMEM;
  217. }
  218. if (of_property_read_u32_array(pdev->dev.of_node, "qcom,reg-presets",
  219. (u32 *)reg_set->reg_tbl, reg_set->count * 2)) {
  220. dprintk(CVP_ERR, "Failed to read register table\n");
  221. msm_cvp_free_reg_table(res);
  222. return -EINVAL;
  223. }
  224. for (i = 0; i < reg_set->count; i++) {
  225. dprintk(CVP_CORE,
  226. "reg = %x, value = %x\n",
  227. reg_set->reg_tbl[i].reg,
  228. reg_set->reg_tbl[i].value
  229. );
  230. }
  231. return rc;
  232. }
  233. static int msm_cvp_load_qdss_table(struct msm_cvp_platform_resources *res)
  234. {
  235. struct addr_set *qdss_addr_set;
  236. struct platform_device *pdev = res->pdev;
  237. int i;
  238. int rc = 0;
  239. if (!of_find_property(pdev->dev.of_node, "qcom,qdss-presets", NULL)) {
  240. /*
  241. * qcom,qdss-presets is an optional property. It likely won't be
  242. * present if we don't have any register settings to program
  243. */
  244. dprintk(CVP_CORE, "qcom,qdss-presets not found\n");
  245. return rc;
  246. }
  247. qdss_addr_set = &res->qdss_addr_set;
  248. qdss_addr_set->count = get_u32_array_num_elements(pdev->dev.of_node,
  249. "qcom,qdss-presets");
  250. qdss_addr_set->count /= sizeof(*qdss_addr_set->addr_tbl) / sizeof(u32);
  251. if (!qdss_addr_set->count) {
  252. dprintk(CVP_CORE, "no elements in qdss reg set\n");
  253. return rc;
  254. }
  255. qdss_addr_set->addr_tbl = devm_kzalloc(&pdev->dev,
  256. qdss_addr_set->count * sizeof(*qdss_addr_set->addr_tbl),
  257. GFP_KERNEL);
  258. if (!qdss_addr_set->addr_tbl) {
  259. dprintk(CVP_ERR, "%s Failed to alloc register table\n",
  260. __func__);
  261. rc = -ENOMEM;
  262. goto err_qdss_addr_tbl;
  263. }
  264. rc = of_property_read_u32_array(pdev->dev.of_node, "qcom,qdss-presets",
  265. (u32 *)qdss_addr_set->addr_tbl, qdss_addr_set->count * 2);
  266. if (rc) {
  267. dprintk(CVP_ERR, "Failed to read qdss address table\n");
  268. msm_cvp_free_qdss_addr_table(res);
  269. rc = -EINVAL;
  270. goto err_qdss_addr_tbl;
  271. }
  272. for (i = 0; i < qdss_addr_set->count; i++) {
  273. dprintk(CVP_CORE, "qdss addr = %x, value = %x\n",
  274. qdss_addr_set->addr_tbl[i].start,
  275. qdss_addr_set->addr_tbl[i].size);
  276. }
  277. err_qdss_addr_tbl:
  278. return rc;
  279. }
  280. static int msm_cvp_load_fw_name(struct msm_cvp_platform_resources *res)
  281. {
  282. struct platform_device *pdev = res->pdev;
  283. return of_property_read_string_index(pdev->dev.of_node,
  284. "cvp,firmware-name", 0, &res->fw_name);
  285. }
  286. static int msm_cvp_load_subcache_info(struct msm_cvp_platform_resources *res)
  287. {
  288. int rc = 0, num_subcaches = 0, c;
  289. struct platform_device *pdev = res->pdev;
  290. struct subcache_set *subcaches = &res->subcache_set;
  291. num_subcaches = of_property_count_strings(pdev->dev.of_node,
  292. "cache-slice-names");
  293. if (num_subcaches <= 0) {
  294. dprintk(CVP_CORE, "No subcaches found\n");
  295. goto err_load_subcache_table_fail;
  296. }
  297. subcaches->subcache_tbl = devm_kzalloc(&pdev->dev,
  298. sizeof(*subcaches->subcache_tbl) * num_subcaches, GFP_KERNEL);
  299. if (!subcaches->subcache_tbl) {
  300. dprintk(CVP_ERR,
  301. "Failed to allocate memory for subcache tbl\n");
  302. rc = -ENOMEM;
  303. goto err_load_subcache_table_fail;
  304. }
  305. subcaches->count = num_subcaches;
  306. dprintk(CVP_CORE, "Found %d subcaches\n", num_subcaches);
  307. for (c = 0; c < num_subcaches; ++c) {
  308. struct subcache_info *vsc = &res->subcache_set.subcache_tbl[c];
  309. of_property_read_string_index(pdev->dev.of_node,
  310. "cache-slice-names", c, &vsc->name);
  311. }
  312. res->sys_cache_present = true;
  313. return 0;
  314. err_load_subcache_table_fail:
  315. res->sys_cache_present = false;
  316. subcaches->count = 0;
  317. subcaches->subcache_tbl = NULL;
  318. return rc;
  319. }
  320. /**
  321. * msm_cvp_load_u32_table() - load dtsi table entries
  322. * @pdev: A pointer to the platform device.
  323. * @of_node: A pointer to the device node.
  324. * @table_name: A pointer to the dtsi table entry name.
  325. * @struct_size: The size of the structure which is nothing but
  326. * a single entry in the dtsi table.
  327. * @table: A pointer to the table pointer which needs to be
  328. * filled by the dtsi table entries.
  329. * @num_elements: Number of elements pointer which needs to be filled
  330. * with the number of elements in the table.
  331. *
  332. * This is a generic implementation to load single or multiple array
  333. * table from dtsi. The array elements should be of size equal to u32.
  334. *
  335. * Return: Return '0' for success else appropriate error value.
  336. */
  337. int msm_cvp_load_u32_table(struct platform_device *pdev,
  338. struct device_node *of_node, char *table_name, int struct_size,
  339. u32 **table, u32 *num_elements)
  340. {
  341. int rc = 0, num_elemts = 0;
  342. u32 *ptbl = NULL;
  343. if (!of_find_property(of_node, table_name, NULL)) {
  344. dprintk(CVP_CORE, "%s not found\n", table_name);
  345. return 0;
  346. }
  347. num_elemts = get_u32_array_num_elements(of_node, table_name);
  348. if (!num_elemts) {
  349. dprintk(CVP_ERR, "no elements in %s\n", table_name);
  350. return 0;
  351. }
  352. num_elemts /= struct_size / sizeof(u32);
  353. ptbl = devm_kzalloc(&pdev->dev, num_elemts * struct_size, GFP_KERNEL);
  354. if (!ptbl) {
  355. dprintk(CVP_ERR, "Failed to alloc table %s\n", table_name);
  356. return -ENOMEM;
  357. }
  358. if (of_property_read_u32_array(of_node, table_name, ptbl,
  359. num_elemts * struct_size / sizeof(u32))) {
  360. dprintk(CVP_ERR, "Failed to read %s\n", table_name);
  361. return -EINVAL;
  362. }
  363. *table = ptbl;
  364. if (num_elements)
  365. *num_elements = num_elemts;
  366. return rc;
  367. }
  368. EXPORT_SYMBOL(msm_cvp_load_u32_table);
  369. /* A comparator to compare loads (needed later on) */
  370. static int cmp(const void *a, const void *b)
  371. {
  372. return ((struct allowed_clock_rates_table *)a)->clock_rate -
  373. ((struct allowed_clock_rates_table *)b)->clock_rate;
  374. }
  375. static int msm_cvp_load_allowed_clocks_table(
  376. struct msm_cvp_platform_resources *res)
  377. {
  378. int rc = 0;
  379. struct platform_device *pdev = res->pdev;
  380. if (!of_find_property(pdev->dev.of_node,
  381. "qcom,allowed-clock-rates", NULL)) {
  382. dprintk(CVP_CORE, "qcom,allowed-clock-rates not found\n");
  383. return 0;
  384. }
  385. rc = msm_cvp_load_u32_table(pdev, pdev->dev.of_node,
  386. "qcom,allowed-clock-rates",
  387. sizeof(*res->allowed_clks_tbl),
  388. (u32 **)&res->allowed_clks_tbl,
  389. &res->allowed_clks_tbl_size);
  390. if (rc) {
  391. dprintk(CVP_ERR,
  392. "%s: failed to read allowed clocks table\n", __func__);
  393. return rc;
  394. }
  395. sort(res->allowed_clks_tbl, res->allowed_clks_tbl_size,
  396. sizeof(*res->allowed_clks_tbl), cmp, NULL);
  397. return 0;
  398. }
  399. static int msm_cvp_populate_mem_cdsp(struct device *dev,
  400. struct msm_cvp_platform_resources *res)
  401. {
  402. struct device_node *mem_node;
  403. int ret;
  404. mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
  405. if (mem_node) {
  406. ret = of_reserved_mem_device_init_by_idx(dev,
  407. dev->of_node, 0);
  408. of_node_put(dev->of_node);
  409. if (ret) {
  410. dprintk(CVP_ERR,
  411. "Failed to initialize reserved mem, ret %d\n",
  412. ret);
  413. return ret;
  414. }
  415. }
  416. res->mem_cdsp.dev = dev;
  417. return 0;
  418. }
  419. static int msm_cvp_populate_bus(struct device *dev,
  420. struct msm_cvp_platform_resources *res)
  421. {
  422. struct bus_set *buses = &res->bus_set;
  423. const char *temp_name = NULL;
  424. struct bus_info *bus = NULL, *temp_table;
  425. u32 range[2];
  426. int rc = 0;
  427. temp_table = krealloc(buses->bus_tbl, sizeof(*temp_table) *
  428. (buses->count + 1), GFP_KERNEL);
  429. if (!temp_table) {
  430. dprintk(CVP_ERR, "%s: Failed to allocate memory", __func__);
  431. rc = -ENOMEM;
  432. goto err_bus;
  433. }
  434. buses->bus_tbl = temp_table;
  435. bus = &buses->bus_tbl[buses->count];
  436. memset(bus, 0x0, sizeof(struct bus_info));
  437. rc = of_property_read_string(dev->of_node, "label", &temp_name);
  438. if (rc) {
  439. dprintk(CVP_ERR, "'label' not found in node\n");
  440. goto err_bus;
  441. }
  442. /* need a non-const version of name, hence copying it over */
  443. bus->name = devm_kstrdup(dev, temp_name, GFP_KERNEL);
  444. if (!bus->name) {
  445. rc = -ENOMEM;
  446. goto err_bus;
  447. }
  448. rc = of_property_read_u32(dev->of_node, "qcom,bus-master",
  449. &bus->master);
  450. if (rc) {
  451. dprintk(CVP_ERR, "'qcom,bus-master' not found in node\n");
  452. goto err_bus;
  453. }
  454. rc = of_property_read_u32(dev->of_node, "qcom,bus-slave", &bus->slave);
  455. if (rc) {
  456. dprintk(CVP_ERR, "'qcom,bus-slave' not found in node\n");
  457. goto err_bus;
  458. }
  459. rc = of_property_read_string(dev->of_node, "qcom,bus-governor",
  460. &bus->governor);
  461. if (rc) {
  462. rc = 0;
  463. dprintk(CVP_CORE,
  464. "'qcom,bus-governor' not found, default to performance governor\n");
  465. bus->governor = PERF_GOV;
  466. }
  467. if (!strcmp(bus->governor, PERF_GOV))
  468. bus->is_prfm_gov_used = true;
  469. rc = of_property_read_u32_array(dev->of_node, "qcom,bus-range-kbps",
  470. range, ARRAY_SIZE(range));
  471. if (rc) {
  472. rc = 0;
  473. dprintk(CVP_CORE,
  474. "'qcom,range' not found defaulting to <0 INT_MAX>\n");
  475. range[0] = 0;
  476. range[1] = INT_MAX;
  477. }
  478. bus->range[0] = range[0]; /* min */
  479. bus->range[1] = range[1]; /* max */
  480. buses->count++;
  481. bus->dev = dev;
  482. dprintk(CVP_CORE, "Found bus %s [%d->%d] with governor %s\n",
  483. bus->name, bus->master, bus->slave, bus->governor);
  484. err_bus:
  485. return rc;
  486. }
  487. static int msm_cvp_load_regulator_table(
  488. struct msm_cvp_platform_resources *res)
  489. {
  490. int rc = 0;
  491. struct platform_device *pdev = res->pdev;
  492. struct regulator_set *regulators = &res->regulator_set;
  493. struct device_node *domains_parent_node = NULL;
  494. struct property *domains_property = NULL;
  495. int reg_count = 0;
  496. regulators->count = 0;
  497. regulators->regulator_tbl = NULL;
  498. domains_parent_node = pdev->dev.of_node;
  499. for_each_property_of_node(domains_parent_node, domains_property) {
  500. const char *search_string = "-supply";
  501. char *supply;
  502. bool matched = false;
  503. /* check if current property is possibly a regulator */
  504. supply = strnstr(domains_property->name, search_string,
  505. strlen(domains_property->name) + 1);
  506. matched = supply && (*(supply + strlen(search_string)) == '\0');
  507. if (!matched)
  508. continue;
  509. reg_count++;
  510. }
  511. regulators->regulator_tbl = devm_kzalloc(&pdev->dev,
  512. sizeof(*regulators->regulator_tbl) *
  513. reg_count, GFP_KERNEL);
  514. if (!regulators->regulator_tbl) {
  515. rc = -ENOMEM;
  516. dprintk(CVP_ERR,
  517. "Failed to alloc memory for regulator table\n");
  518. goto err_reg_tbl_alloc;
  519. }
  520. for_each_property_of_node(domains_parent_node, domains_property) {
  521. const char *search_string = "-supply";
  522. char *supply;
  523. bool matched = false;
  524. struct device_node *regulator_node = NULL;
  525. struct regulator_info *rinfo = NULL;
  526. /* check if current property is possibly a regulator */
  527. supply = strnstr(domains_property->name, search_string,
  528. strlen(domains_property->name) + 1);
  529. matched = supply && (supply[strlen(search_string)] == '\0');
  530. if (!matched)
  531. continue;
  532. /* make sure prop isn't being misused */
  533. regulator_node = of_parse_phandle(domains_parent_node,
  534. domains_property->name, 0);
  535. if (IS_ERR(regulator_node)) {
  536. dprintk(CVP_WARN, "%s is not a phandle\n",
  537. domains_property->name);
  538. continue;
  539. }
  540. regulators->count++;
  541. /* populate regulator info */
  542. rinfo = &regulators->regulator_tbl[regulators->count - 1];
  543. rinfo->name = devm_kzalloc(&pdev->dev,
  544. (supply - domains_property->name) + 1, GFP_KERNEL);
  545. if (!rinfo->name) {
  546. rc = -ENOMEM;
  547. dprintk(CVP_ERR,
  548. "Failed to alloc memory for regulator name\n");
  549. goto err_reg_name_alloc;
  550. }
  551. strlcpy(rinfo->name, domains_property->name,
  552. (supply - domains_property->name) + 1);
  553. rinfo->has_hw_power_collapse = of_property_read_bool(
  554. regulator_node, "qcom,support-hw-trigger");
  555. dprintk(CVP_CORE, "Found regulator %s: h/w collapse = %s\n",
  556. rinfo->name,
  557. rinfo->has_hw_power_collapse ? "yes" : "no");
  558. }
  559. if (!regulators->count)
  560. dprintk(CVP_CORE, "No regulators found");
  561. return 0;
  562. err_reg_name_alloc:
  563. err_reg_tbl_alloc:
  564. msm_cvp_free_regulator_table(res);
  565. return rc;
  566. }
  567. static int msm_cvp_load_clock_table(
  568. struct msm_cvp_platform_resources *res)
  569. {
  570. int rc = 0, num_clocks = 0, c = 0;
  571. struct platform_device *pdev = res->pdev;
  572. int *clock_ids = NULL;
  573. int *clock_props = NULL;
  574. struct clock_set *clocks = &res->clock_set;
  575. num_clocks = of_property_count_strings(pdev->dev.of_node,
  576. "clock-names");
  577. if (num_clocks <= 0) {
  578. dprintk(CVP_CORE, "No clocks found\n");
  579. clocks->count = 0;
  580. rc = 0;
  581. goto err_load_clk_table_fail;
  582. }
  583. clock_ids = devm_kzalloc(&pdev->dev, num_clocks *
  584. sizeof(*clock_ids), GFP_KERNEL);
  585. if (!clock_ids) {
  586. dprintk(CVP_ERR, "No memory to read clock ids\n");
  587. rc = -ENOMEM;
  588. goto err_load_clk_table_fail;
  589. }
  590. rc = of_property_read_u32_array(pdev->dev.of_node,
  591. "clock-ids", clock_ids,
  592. num_clocks);
  593. if (rc) {
  594. dprintk(CVP_CORE, "Failed to read clock ids: %d\n", rc);
  595. msm_cvp_mmrm_enabled = false;
  596. dprintk(CVP_CORE, "flag msm_cvp_mmrm_enabled disabled\n");
  597. }
  598. clock_props = devm_kzalloc(&pdev->dev, num_clocks *
  599. sizeof(*clock_props), GFP_KERNEL);
  600. if (!clock_props) {
  601. dprintk(CVP_ERR, "No memory to read clock properties\n");
  602. rc = -ENOMEM;
  603. goto err_load_clk_table_fail;
  604. }
  605. rc = of_property_read_u32_array(pdev->dev.of_node,
  606. "qcom,clock-configs", clock_props,
  607. num_clocks);
  608. if (rc) {
  609. dprintk(CVP_ERR, "Failed to read clock properties: %d\n", rc);
  610. goto err_load_clk_prop_fail;
  611. }
  612. clocks->clock_tbl = devm_kzalloc(&pdev->dev, sizeof(*clocks->clock_tbl)
  613. * num_clocks, GFP_KERNEL);
  614. if (!clocks->clock_tbl) {
  615. dprintk(CVP_ERR, "Failed to allocate memory for clock tbl\n");
  616. rc = -ENOMEM;
  617. goto err_load_clk_prop_fail;
  618. }
  619. clocks->count = num_clocks;
  620. dprintk(CVP_CORE, "Found %d clocks\n", num_clocks);
  621. for (c = 0; c < num_clocks; ++c) {
  622. struct clock_info *vc = &res->clock_set.clock_tbl[c];
  623. of_property_read_string_index(pdev->dev.of_node,
  624. "clock-names", c, &vc->name);
  625. if (msm_cvp_mmrm_enabled == true)
  626. vc->clk_id = clock_ids[c];
  627. if (clock_props[c] & CLOCK_PROP_HAS_SCALING) {
  628. vc->has_scaling = true;
  629. } else {
  630. vc->count = 0;
  631. vc->has_scaling = false;
  632. }
  633. if (clock_props[c] & CLOCK_PROP_HAS_MEM_RETENTION)
  634. vc->has_mem_retention = true;
  635. else
  636. vc->has_mem_retention = false;
  637. dprintk(CVP_CORE, "Found clock %s id %d: scale-able = %s\n",
  638. vc->name, vc->clk_id, vc->count ? "yes" : "no");
  639. }
  640. return 0;
  641. err_load_clk_prop_fail:
  642. err_load_clk_table_fail:
  643. return rc;
  644. }
  645. #define MAX_CLK_RESETS 5
  646. static int msm_cvp_load_reset_table(
  647. struct msm_cvp_platform_resources *res)
  648. {
  649. struct platform_device *pdev = res->pdev;
  650. struct reset_set *rst = &res->reset_set;
  651. int num_clocks = 0, c = 0, ret = 0;
  652. int pwr_stats[MAX_CLK_RESETS];
  653. num_clocks = of_property_count_strings(pdev->dev.of_node,
  654. "reset-names");
  655. if (num_clocks <= 0 || num_clocks > MAX_CLK_RESETS) {
  656. dprintk(CVP_ERR, "Num reset clocks out of range\n");
  657. rst->count = 0;
  658. return 0;
  659. }
  660. rst->reset_tbl = devm_kcalloc(&pdev->dev, num_clocks,
  661. sizeof(*rst->reset_tbl), GFP_KERNEL);
  662. if (!rst->reset_tbl)
  663. return -ENOMEM;
  664. rst->count = num_clocks;
  665. dprintk(CVP_CORE, "Found %d reset clocks\n", num_clocks);
  666. ret = of_property_read_u32_array(pdev->dev.of_node,
  667. "reset-power-status", pwr_stats,
  668. num_clocks);
  669. if (ret) {
  670. dprintk(CVP_ERR, "Failed to read reset pwr state: %d\n", ret);
  671. devm_kfree(&pdev->dev, rst->reset_tbl);
  672. return ret;
  673. }
  674. for (c = 0; c < num_clocks; ++c) {
  675. struct reset_info *rc = &res->reset_set.reset_tbl[c];
  676. of_property_read_string_index(pdev->dev.of_node,
  677. "reset-names", c, &rc->name);
  678. rc->required_stage = pwr_stats[c];
  679. }
  680. return 0;
  681. }
  682. static int find_key_value(struct msm_cvp_platform_data *platform_data,
  683. const char *key)
  684. {
  685. int i = 0;
  686. struct msm_cvp_common_data *common_data = platform_data->common_data;
  687. int size = platform_data->common_data_length;
  688. for (i = 0; i < size; i++) {
  689. if (!strcmp(common_data[i].key, key))
  690. return common_data[i].value;
  691. }
  692. return 0;
  693. }
  694. int cvp_read_platform_resources_from_drv_data(
  695. struct msm_cvp_core *core)
  696. {
  697. struct msm_cvp_platform_data *platform_data;
  698. struct msm_cvp_platform_resources *res;
  699. int rc = 0, i;
  700. if (!core || !core->platform_data) {
  701. dprintk(CVP_ERR, "%s Invalid data\n", __func__);
  702. return -ENOENT;
  703. }
  704. platform_data = core->platform_data;
  705. res = &core->resources;
  706. res->sku_version = platform_data->sku_version;
  707. res->dsp_enabled = find_key_value(platform_data,
  708. "qcom,dsp-enabled");
  709. res->max_ssr_allowed = find_key_value(platform_data,
  710. "qcom,max-ssr-allowed");
  711. res->sw_power_collapsible = find_key_value(platform_data,
  712. "qcom,sw-power-collapse");
  713. res->debug_timeout = find_key_value(platform_data,
  714. "qcom,debug-timeout");
  715. res->pm_qos.latency_us = find_key_value(platform_data,
  716. "qcom,pm-qos-latency-us");
  717. res->pm_qos.silver_count = 0;
  718. for(i = 0; i < MAX_SILVER_CORE_NUM; i++) {
  719. if(topology_cluster_id(i) == 0)
  720. res->pm_qos.silver_count++;
  721. else
  722. break;
  723. }
  724. for (i = 0; i < res->pm_qos.silver_count; i++)
  725. res->pm_qos.silver_cores[i] = i;
  726. res->pm_qos.off_vote_cnt = 0;
  727. spin_lock_init(&res->pm_qos.lock);
  728. res->max_secure_inst_count = find_key_value(platform_data,
  729. "qcom,max-secure-instances");
  730. res->thermal_mitigable = find_key_value(platform_data,
  731. "qcom,enable-thermal-mitigation");
  732. res->msm_cvp_pwr_collapse_delay = find_key_value(platform_data,
  733. "qcom,power-collapse-delay");
  734. res->msm_cvp_hw_rsp_timeout = find_key_value(platform_data,
  735. "qcom,hw-resp-timeout");
  736. res->msm_cvp_dsp_rsp_timeout = find_key_value(platform_data,
  737. "qcom,dsp-resp-timeout");
  738. res->non_fatal_pagefaults = find_key_value(platform_data,
  739. "qcom,domain-attr-non-fatal-faults");
  740. res->vpu_ver = platform_data->vpu_ver;
  741. res->ubwc_config = platform_data->ubwc_config;
  742. res->fatal_ssr = false;
  743. return rc;
  744. }
  745. int cvp_read_platform_resources_from_dt(
  746. struct msm_cvp_platform_resources *res)
  747. {
  748. struct platform_device *pdev = res->pdev;
  749. struct resource *kres = NULL;
  750. int rc = 0;
  751. uint32_t firmware_base = 0;
  752. if (!pdev->dev.of_node) {
  753. dprintk(CVP_ERR, "DT node not found\n");
  754. return -ENOENT;
  755. }
  756. INIT_LIST_HEAD(&res->context_banks);
  757. res->firmware_base = (phys_addr_t)firmware_base;
  758. kres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  759. res->register_base = kres ? kres->start : -1;
  760. res->register_size = kres ? (kres->end + 1 - kres->start) : -1;
  761. res->irq = platform_get_irq(pdev, 0);
  762. dprintk(CVP_CORE, "%s: res->irq:%d \n",
  763. __func__, res->irq);
  764. //Parsing for WD interrupt
  765. res->irq_wd = platform_get_irq(pdev, 1);
  766. dprintk(CVP_CORE, "%s: res->irq_wd:%d \n",
  767. __func__, res->irq_wd);
  768. rc = msm_cvp_load_fw_name(res);
  769. dprintk(CVP_CORE, "Firmware filename: %s\n", res->fw_name);
  770. if (rc)
  771. dprintk(CVP_WARN, "Failed to load fw name info: %d\n", rc);
  772. rc = msm_cvp_load_subcache_info(res);
  773. if (rc)
  774. dprintk(CVP_WARN, "Failed to load subcache info: %d\n", rc);
  775. rc = msm_cvp_load_qdss_table(res);
  776. if (rc)
  777. dprintk(CVP_WARN, "Failed to load qdss reg table: %d\n", rc);
  778. rc = msm_cvp_load_reg_table(res);
  779. if (rc) {
  780. dprintk(CVP_ERR, "Failed to load reg table: %d\n", rc);
  781. goto err_load_reg_table;
  782. }
  783. rc = msm_cvp_load_ipcc_regs(res);
  784. if (rc)
  785. dprintk(CVP_ERR, "Failed to load IPCC regs: %d\n", rc);
  786. rc = msm_cvp_load_regspace_mapping(res);
  787. if (rc)
  788. dprintk(CVP_ERR, "Failed to load reg space mapping: %d\n", rc);
  789. rc = msm_cvp_load_gcc_regs(res);
  790. rc = msm_cvp_load_regulator_table(res);
  791. if (rc) {
  792. dprintk(CVP_ERR, "Failed to load list of regulators %d\n", rc);
  793. goto err_load_regulator_table;
  794. }
  795. rc = msm_cvp_load_clock_table(res);
  796. if (rc) {
  797. dprintk(CVP_ERR,
  798. "Failed to load clock table: %d\n", rc);
  799. goto err_load_clock_table;
  800. }
  801. rc = msm_cvp_load_allowed_clocks_table(res);
  802. if (rc) {
  803. dprintk(CVP_ERR,
  804. "Failed to load allowed clocks table: %d\n", rc);
  805. goto err_load_allowed_clocks_table;
  806. }
  807. rc = msm_cvp_load_reset_table(res);
  808. if (rc) {
  809. dprintk(CVP_ERR,
  810. "Failed to load reset table: %d\n", rc);
  811. goto err_load_reset_table;
  812. }
  813. res->use_non_secure_pil = of_property_read_bool(pdev->dev.of_node,
  814. "qcom,use-non-secure-pil");
  815. if (res->use_non_secure_pil || !is_iommu_present(res)) {
  816. of_property_read_u32(pdev->dev.of_node, "qcom,fw-bias",
  817. &firmware_base);
  818. res->firmware_base = (phys_addr_t)firmware_base;
  819. dprintk(CVP_CORE,
  820. "Using fw-bias : %pa", &res->firmware_base);
  821. }
  822. return rc;
  823. err_load_reset_table:
  824. msm_cvp_free_allowed_clocks_table(res);
  825. err_load_allowed_clocks_table:
  826. msm_cvp_free_clock_table(res);
  827. err_load_clock_table:
  828. msm_cvp_free_regulator_table(res);
  829. err_load_regulator_table:
  830. msm_cvp_free_reg_table(res);
  831. err_load_reg_table:
  832. return rc;
  833. }
  834. static int msm_cvp_setup_context_bank(struct msm_cvp_platform_resources *res,
  835. struct context_bank_info *cb, struct device *dev)
  836. {
  837. int rc = 0;
  838. struct bus_type *bus;
  839. if (!dev || !cb || !res) {
  840. dprintk(CVP_ERR,
  841. "%s: Invalid Input params\n", __func__);
  842. return -EINVAL;
  843. }
  844. cb->dev = dev;
  845. bus = cb->dev->bus;
  846. if (IS_ERR_OR_NULL(bus)) {
  847. dprintk(CVP_ERR, "%s - failed to get bus type\n", __func__);
  848. rc = PTR_ERR(bus) ?: -ENODEV;
  849. goto remove_cb;
  850. }
  851. /*
  852. * configure device segment size and segment boundary to ensure
  853. * iommu mapping returns one mapping (which is required for partial
  854. * cache operations)
  855. */
  856. if (!dev->dma_parms)
  857. dev->dma_parms =
  858. devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
  859. dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
  860. dma_set_seg_boundary(dev, DMA_BIT_MASK(64));
  861. dprintk(CVP_CORE, "Attached %s and created mapping\n", dev_name(dev));
  862. dprintk(CVP_CORE,
  863. "Context bank name:%s, buffer_type: %#x, is_secure: %d, address range start: %#x, size: %#x, dev: %pK",
  864. cb->name, cb->buffer_type, cb->is_secure, cb->addr_range.start,
  865. cb->addr_range.size, cb->dev);
  866. return rc;
  867. remove_cb:
  868. return rc;
  869. }
  870. int msm_cvp_smmu_fault_handler(struct iommu_domain *domain,
  871. struct device *dev, unsigned long iova, int flags, void *token)
  872. {
  873. struct msm_cvp_core *core = token;
  874. struct iris_hfi_device *hdev;
  875. struct msm_cvp_inst *inst;
  876. bool log = false;
  877. if (!domain || !core) {
  878. dprintk(CVP_ERR, "%s - invalid param %pK %pK\n",
  879. __func__, domain, core);
  880. return -EINVAL;
  881. }
  882. dprintk(CVP_ERR, "%s - faulting address: %lx fault cnt %d\n",
  883. __func__, iova, core->smmu_fault_count);
  884. if (core->smmu_fault_count > 0) {
  885. core->smmu_fault_count++;
  886. return -ENOSYS;
  887. }
  888. mutex_lock(&core->lock);
  889. core->smmu_fault_count++;
  890. if (!core->last_fault_addr)
  891. core->last_fault_addr = iova;
  892. log = (core->log.snapshot_index > 0)? false : true;
  893. list_for_each_entry(inst, &core->instances, list) {
  894. cvp_print_inst(CVP_ERR, inst);
  895. msm_cvp_print_inst_bufs(inst, log);
  896. }
  897. hdev = core->dev_ops->hfi_device_data;
  898. if (hdev) {
  899. hdev->error = CVP_ERR_NOC_ERROR;
  900. call_hfi_op(core->dev_ops, debug_hook, hdev);
  901. }
  902. mutex_unlock(&core->lock);
  903. /*
  904. * Return -EINVAL to elicit the default behaviour of smmu driver.
  905. * If we return -ENOSYS, then smmu driver assumes page fault handler
  906. * is not installed and prints a list of useful debug information like
  907. * FAR, SID etc. This information is not printed if we return 0.
  908. */
  909. return -ENOSYS;
  910. }
  911. static int msm_cvp_populate_context_bank(struct device *dev,
  912. struct msm_cvp_core *core)
  913. {
  914. int rc = 0;
  915. struct context_bank_info *cb = NULL;
  916. struct device_node *np = NULL;
  917. if (!dev || !core) {
  918. dprintk(CVP_ERR, "%s - invalid inputs\n", __func__);
  919. return -EINVAL;
  920. }
  921. np = dev->of_node;
  922. cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
  923. if (!cb) {
  924. dprintk(CVP_ERR, "%s - Failed to allocate cb\n", __func__);
  925. return -ENOMEM;
  926. }
  927. rc = of_property_read_string(np, "label", &cb->name);
  928. if (rc) {
  929. dprintk(CVP_CORE,
  930. "Failed to read cb label from device tree\n");
  931. rc = 0;
  932. }
  933. INIT_LIST_HEAD(&cb->list);
  934. list_add_tail(&cb->list, &core->resources.context_banks);
  935. dprintk(CVP_CORE, "%s: context bank has name %s\n", __func__, cb->name);
  936. if (!strcmp(cb->name, "cvp_camera")) {
  937. cb->is_secure = true;
  938. rc = msm_cvp_setup_context_bank(&core->resources, cb, dev);
  939. if (rc) {
  940. dprintk(CVP_ERR, "Cannot setup context bank %s %d\n",
  941. cb->name, rc);
  942. goto err_setup_cb;
  943. }
  944. return 0;
  945. }
  946. rc = of_property_read_u32_array(np, "qcom,iommu-dma-addr-pool",
  947. (u32 *)&cb->addr_range, 2);
  948. if (rc) {
  949. dprintk(CVP_CORE,
  950. "Could not read addr pool for context bank : %s %d\n",
  951. cb->name, rc);
  952. }
  953. cb->is_secure = of_property_read_bool(np, "qcom,iommu-vmid");
  954. dprintk(CVP_CORE, "context bank %s : secure = %d\n",
  955. cb->name, cb->is_secure);
  956. /* setup buffer type for each sub device*/
  957. rc = of_property_read_u32(np, "buffer-types", &cb->buffer_type);
  958. if (rc) {
  959. dprintk(CVP_ERR, "failed to load buffer_type info %d\n", rc);
  960. rc = -ENOENT;
  961. goto err_setup_cb;
  962. }
  963. dprintk(CVP_CORE,
  964. "context bank %s address start = %x address size = %x buffer_type = %x\n",
  965. cb->name, cb->addr_range.start,
  966. cb->addr_range.size, cb->buffer_type);
  967. cb->domain = iommu_get_domain_for_dev(dev);
  968. if (IS_ERR_OR_NULL(cb->domain)) {
  969. dprintk(CVP_ERR, "Create domain failed\n");
  970. rc = -ENODEV;
  971. goto err_setup_cb;
  972. }
  973. rc = msm_cvp_setup_context_bank(&core->resources, cb, dev);
  974. if (rc) {
  975. dprintk(CVP_ERR, "Cannot setup context bank %d\n", rc);
  976. goto err_setup_cb;
  977. }
  978. iommu_set_fault_handler(cb->domain,
  979. msm_cvp_smmu_fault_handler, (void *)core);
  980. return 0;
  981. err_setup_cb:
  982. list_del(&cb->list);
  983. return rc;
  984. }
  985. int cvp_read_context_bank_resources_from_dt(struct platform_device *pdev)
  986. {
  987. struct msm_cvp_core *core;
  988. int rc = 0;
  989. if (!pdev) {
  990. dprintk(CVP_ERR, "Invalid platform device\n");
  991. return -EINVAL;
  992. } else if (!pdev->dev.parent) {
  993. dprintk(CVP_ERR, "Failed to find a parent for %s\n",
  994. dev_name(&pdev->dev));
  995. return -ENODEV;
  996. }
  997. core = dev_get_drvdata(pdev->dev.parent);
  998. if (!core) {
  999. dprintk(CVP_ERR, "Failed to find cookie in parent device %s",
  1000. dev_name(pdev->dev.parent));
  1001. return -EINVAL;
  1002. }
  1003. rc = msm_cvp_populate_context_bank(&pdev->dev, core);
  1004. if (rc)
  1005. dprintk(CVP_ERR, "Failed to probe context bank\n");
  1006. else
  1007. dprintk(CVP_CORE, "Successfully probed context bank\n");
  1008. return rc;
  1009. }
  1010. int cvp_read_bus_resources_from_dt(struct platform_device *pdev)
  1011. {
  1012. struct msm_cvp_core *core;
  1013. if (!pdev) {
  1014. dprintk(CVP_ERR, "Invalid platform device\n");
  1015. return -EINVAL;
  1016. } else if (!pdev->dev.parent) {
  1017. dprintk(CVP_ERR, "Failed to find a parent for %s\n",
  1018. dev_name(&pdev->dev));
  1019. return -ENODEV;
  1020. }
  1021. core = dev_get_drvdata(pdev->dev.parent);
  1022. if (!core) {
  1023. dprintk(CVP_ERR, "Failed to find cookie in parent device %s",
  1024. dev_name(pdev->dev.parent));
  1025. return -EINVAL;
  1026. }
  1027. return msm_cvp_populate_bus(&pdev->dev, &core->resources);
  1028. }
  1029. int cvp_read_mem_cdsp_resources_from_dt(struct platform_device *pdev)
  1030. {
  1031. struct msm_cvp_core *core;
  1032. if (!pdev) {
  1033. dprintk(CVP_ERR, "%s: invalid platform device\n", __func__);
  1034. return -EINVAL;
  1035. } else if (!pdev->dev.parent) {
  1036. dprintk(CVP_ERR, "Failed to find a parent for %s\n",
  1037. dev_name(&pdev->dev));
  1038. return -ENODEV;
  1039. }
  1040. core = dev_get_drvdata(pdev->dev.parent);
  1041. if (!core) {
  1042. dprintk(CVP_ERR, "Failed to find cookie in parent device %s",
  1043. dev_name(pdev->dev.parent));
  1044. return -EINVAL;
  1045. }
  1046. return msm_cvp_populate_mem_cdsp(&pdev->dev, &core->resources);
  1047. }