msm_cvp_res_parse.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/iommu.h>
  6. #include <linux/of.h>
  7. #include <linux/slab.h>
  8. #include <linux/sort.h>
  9. #include <linux/of_reserved_mem.h>
  10. #include "msm_cvp_debug.h"
  11. #include "msm_cvp_resources.h"
  12. #include "msm_cvp_res_parse.h"
  13. #include "cvp_core_hfi.h"
  14. #include "soc/qcom/secure_buffer.h"
  15. enum clock_properties {
  16. CLOCK_PROP_HAS_SCALING = 1 << 0,
  17. CLOCK_PROP_HAS_MEM_RETENTION = 1 << 1,
  18. };
  19. #define PERF_GOV "performance"
  20. static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
  21. {
  22. return NULL;
  23. }
  24. static size_t get_u32_array_num_elements(struct device_node *np,
  25. char *name)
  26. {
  27. int len;
  28. size_t num_elements = 0;
  29. if (!of_get_property(np, name, &len)) {
  30. dprintk(CVP_ERR, "Failed to read %s from device tree\n",
  31. name);
  32. goto fail_read;
  33. }
  34. num_elements = len / sizeof(u32);
  35. if (num_elements <= 0) {
  36. dprintk(CVP_ERR, "%s not specified in device tree\n",
  37. name);
  38. goto fail_read;
  39. }
  40. return num_elements;
  41. fail_read:
  42. return 0;
  43. }
  44. static inline void msm_cvp_free_allowed_clocks_table(
  45. struct msm_cvp_platform_resources *res)
  46. {
  47. res->allowed_clks_tbl = NULL;
  48. }
  49. static inline void msm_cvp_free_cycles_per_mb_table(
  50. struct msm_cvp_platform_resources *res)
  51. {
  52. res->clock_freq_tbl.clk_prof_entries = NULL;
  53. }
  54. static inline void msm_cvp_free_reg_table(
  55. struct msm_cvp_platform_resources *res)
  56. {
  57. res->reg_set.reg_tbl = NULL;
  58. }
  59. static inline void msm_cvp_free_qdss_addr_table(
  60. struct msm_cvp_platform_resources *res)
  61. {
  62. res->qdss_addr_set.addr_tbl = NULL;
  63. }
  64. static inline void msm_cvp_free_bus_vectors(
  65. struct msm_cvp_platform_resources *res)
  66. {
  67. kfree(res->bus_set.bus_tbl);
  68. res->bus_set.bus_tbl = NULL;
  69. res->bus_set.count = 0;
  70. }
  71. static inline void msm_cvp_free_regulator_table(
  72. struct msm_cvp_platform_resources *res)
  73. {
  74. int c = 0;
  75. for (c = 0; c < res->regulator_set.count; ++c) {
  76. struct regulator_info *rinfo =
  77. &res->regulator_set.regulator_tbl[c];
  78. rinfo->name = NULL;
  79. }
  80. res->regulator_set.regulator_tbl = NULL;
  81. res->regulator_set.count = 0;
  82. }
  83. static inline void msm_cvp_free_clock_table(
  84. struct msm_cvp_platform_resources *res)
  85. {
  86. res->clock_set.clock_tbl = NULL;
  87. res->clock_set.count = 0;
  88. }
  89. void msm_cvp_free_platform_resources(
  90. struct msm_cvp_platform_resources *res)
  91. {
  92. msm_cvp_free_clock_table(res);
  93. msm_cvp_free_regulator_table(res);
  94. msm_cvp_free_allowed_clocks_table(res);
  95. msm_cvp_free_reg_table(res);
  96. msm_cvp_free_qdss_addr_table(res);
  97. msm_cvp_free_bus_vectors(res);
  98. }
  99. static int msm_cvp_load_ipcc_regs(struct msm_cvp_platform_resources *res)
  100. {
  101. int ret = 0;
  102. unsigned int reg_config[2];
  103. struct platform_device *pdev = res->pdev;
  104. ret = of_property_read_u32_array(pdev->dev.of_node, "qcom,ipcc-reg",
  105. reg_config, 2);
  106. if (ret) {
  107. dprintk(CVP_ERR, "Failed to read ipcc reg: %d\n", ret);
  108. return ret;
  109. }
  110. res->ipcc_reg_base = reg_config[0];
  111. res->ipcc_reg_size = reg_config[1];
  112. dprintk(CVP_CORE,
  113. "ipcc reg_base = %x, reg_size = %x\n",
  114. res->ipcc_reg_base,
  115. res->ipcc_reg_size
  116. );
  117. return ret;
  118. }
  119. static int msm_cvp_load_regspace_mapping(struct msm_cvp_platform_resources *res)
  120. {
  121. int ret = 0;
  122. unsigned int ipclite_mapping_config[3] = {0};
  123. unsigned int hwmutex_mapping_config[3] = {0};
  124. unsigned int aon_mapping_config[3] = {0};
  125. unsigned int timer_config[3] = {0};
  126. struct platform_device *pdev = res->pdev;
  127. ret = of_property_read_u32_array(pdev->dev.of_node, "ipclite_mappings",
  128. ipclite_mapping_config, 3);
  129. if (ret) {
  130. dprintk(CVP_ERR, "Failed to read ipclite reg: %d\n", ret);
  131. return ret;
  132. }
  133. res->reg_mappings.ipclite_iova = ipclite_mapping_config[0];
  134. res->reg_mappings.ipclite_size = ipclite_mapping_config[1];
  135. res->reg_mappings.ipclite_phyaddr = ipclite_mapping_config[2];
  136. ret = of_property_read_u32_array(pdev->dev.of_node, "hwmutex_mappings",
  137. hwmutex_mapping_config, 3);
  138. if (ret) {
  139. dprintk(CVP_ERR, "Failed to read hwmutex reg: %d\n", ret);
  140. return ret;
  141. }
  142. res->reg_mappings.hwmutex_iova = hwmutex_mapping_config[0];
  143. res->reg_mappings.hwmutex_size = hwmutex_mapping_config[1];
  144. res->reg_mappings.hwmutex_phyaddr = hwmutex_mapping_config[2];
  145. ret = of_property_read_u32_array(pdev->dev.of_node, "aon_mappings",
  146. aon_mapping_config, 3);
  147. if (ret) {
  148. dprintk(CVP_ERR, "Failed to read aon reg: %d\n", ret);
  149. return ret;
  150. }
  151. res->reg_mappings.aon_iova = aon_mapping_config[0];
  152. res->reg_mappings.aon_size = aon_mapping_config[1];
  153. res->reg_mappings.aon_phyaddr = aon_mapping_config[2];
  154. ret = of_property_read_u32_array(pdev->dev.of_node,
  155. "aon_timer_mappings", timer_config, 3);
  156. if (ret) {
  157. dprintk(CVP_ERR, "Failed to read timer reg: %d\n", ret);
  158. return ret;
  159. }
  160. res->reg_mappings.timer_iova = timer_config[0];
  161. res->reg_mappings.timer_size = timer_config[1];
  162. res->reg_mappings.timer_phyaddr = timer_config[2];
  163. dprintk(CVP_CORE,
  164. "reg mappings %#x %#x %#x %#x %#x %#X %#x %#x %#x %#x %#x %#x\n",
  165. res->reg_mappings.ipclite_iova, res->reg_mappings.ipclite_size,
  166. res->reg_mappings.ipclite_phyaddr, res->reg_mappings.hwmutex_iova,
  167. res->reg_mappings.hwmutex_size, res->reg_mappings.hwmutex_phyaddr,
  168. res->reg_mappings.aon_iova, res->reg_mappings.aon_size,
  169. res->reg_mappings.aon_phyaddr, res->reg_mappings.timer_iova,
  170. res->reg_mappings.timer_size, res->reg_mappings.timer_phyaddr);
  171. return ret;
  172. }
  173. static int msm_cvp_load_gcc_regs(struct msm_cvp_platform_resources *res)
  174. {
  175. int ret = 0;
  176. unsigned int reg_config[2];
  177. struct platform_device *pdev = res->pdev;
  178. ret = of_property_read_u32_array(pdev->dev.of_node, "qcom,gcc-reg",
  179. reg_config, 2);
  180. if (ret) {
  181. dprintk(CVP_WARN, "No gcc reg configured: %d\n", ret);
  182. return ret;
  183. }
  184. res->gcc_reg_base = reg_config[0];
  185. res->gcc_reg_size = reg_config[1];
  186. return ret;
  187. }
  188. static int msm_cvp_load_reg_table(struct msm_cvp_platform_resources *res)
  189. {
  190. struct reg_set *reg_set;
  191. struct platform_device *pdev = res->pdev;
  192. int i;
  193. int rc = 0;
  194. if (!of_find_property(pdev->dev.of_node, "qcom,reg-presets", NULL)) {
  195. /*
  196. * qcom,reg-presets is an optional property. It likely won't be
  197. * present if we don't have any register settings to program
  198. */
  199. dprintk(CVP_CORE, "qcom,reg-presets not found\n");
  200. return 0;
  201. }
  202. reg_set = &res->reg_set;
  203. reg_set->count = get_u32_array_num_elements(pdev->dev.of_node,
  204. "qcom,reg-presets");
  205. reg_set->count /= sizeof(*reg_set->reg_tbl) / sizeof(u32);
  206. if (!reg_set->count) {
  207. dprintk(CVP_CORE, "no elements in reg set\n");
  208. return rc;
  209. }
  210. reg_set->reg_tbl = devm_kzalloc(&pdev->dev, reg_set->count *
  211. sizeof(*(reg_set->reg_tbl)), GFP_KERNEL);
  212. if (!reg_set->reg_tbl) {
  213. dprintk(CVP_ERR, "%s Failed to alloc register table\n",
  214. __func__);
  215. return -ENOMEM;
  216. }
  217. if (of_property_read_u32_array(pdev->dev.of_node, "qcom,reg-presets",
  218. (u32 *)reg_set->reg_tbl, reg_set->count * 2)) {
  219. dprintk(CVP_ERR, "Failed to read register table\n");
  220. msm_cvp_free_reg_table(res);
  221. return -EINVAL;
  222. }
  223. for (i = 0; i < reg_set->count; i++) {
  224. dprintk(CVP_CORE,
  225. "reg = %x, value = %x\n",
  226. reg_set->reg_tbl[i].reg,
  227. reg_set->reg_tbl[i].value
  228. );
  229. }
  230. return rc;
  231. }
  232. static int msm_cvp_load_qdss_table(struct msm_cvp_platform_resources *res)
  233. {
  234. struct addr_set *qdss_addr_set;
  235. struct platform_device *pdev = res->pdev;
  236. int i;
  237. int rc = 0;
  238. if (!of_find_property(pdev->dev.of_node, "qcom,qdss-presets", NULL)) {
  239. /*
  240. * qcom,qdss-presets is an optional property. It likely won't be
  241. * present if we don't have any register settings to program
  242. */
  243. dprintk(CVP_CORE, "qcom,qdss-presets not found\n");
  244. return rc;
  245. }
  246. qdss_addr_set = &res->qdss_addr_set;
  247. qdss_addr_set->count = get_u32_array_num_elements(pdev->dev.of_node,
  248. "qcom,qdss-presets");
  249. qdss_addr_set->count /= sizeof(*qdss_addr_set->addr_tbl) / sizeof(u32);
  250. if (!qdss_addr_set->count) {
  251. dprintk(CVP_CORE, "no elements in qdss reg set\n");
  252. return rc;
  253. }
  254. qdss_addr_set->addr_tbl = devm_kzalloc(&pdev->dev,
  255. qdss_addr_set->count * sizeof(*qdss_addr_set->addr_tbl),
  256. GFP_KERNEL);
  257. if (!qdss_addr_set->addr_tbl) {
  258. dprintk(CVP_ERR, "%s Failed to alloc register table\n",
  259. __func__);
  260. rc = -ENOMEM;
  261. goto err_qdss_addr_tbl;
  262. }
  263. rc = of_property_read_u32_array(pdev->dev.of_node, "qcom,qdss-presets",
  264. (u32 *)qdss_addr_set->addr_tbl, qdss_addr_set->count * 2);
  265. if (rc) {
  266. dprintk(CVP_ERR, "Failed to read qdss address table\n");
  267. msm_cvp_free_qdss_addr_table(res);
  268. rc = -EINVAL;
  269. goto err_qdss_addr_tbl;
  270. }
  271. for (i = 0; i < qdss_addr_set->count; i++) {
  272. dprintk(CVP_CORE, "qdss addr = %x, value = %x\n",
  273. qdss_addr_set->addr_tbl[i].start,
  274. qdss_addr_set->addr_tbl[i].size);
  275. }
  276. err_qdss_addr_tbl:
  277. return rc;
  278. }
  279. static int msm_cvp_load_subcache_info(struct msm_cvp_platform_resources *res)
  280. {
  281. int rc = 0, num_subcaches = 0, c;
  282. struct platform_device *pdev = res->pdev;
  283. struct subcache_set *subcaches = &res->subcache_set;
  284. num_subcaches = of_property_count_strings(pdev->dev.of_node,
  285. "cache-slice-names");
  286. if (num_subcaches <= 0) {
  287. dprintk(CVP_CORE, "No subcaches found\n");
  288. goto err_load_subcache_table_fail;
  289. }
  290. subcaches->subcache_tbl = devm_kzalloc(&pdev->dev,
  291. sizeof(*subcaches->subcache_tbl) * num_subcaches, GFP_KERNEL);
  292. if (!subcaches->subcache_tbl) {
  293. dprintk(CVP_ERR,
  294. "Failed to allocate memory for subcache tbl\n");
  295. rc = -ENOMEM;
  296. goto err_load_subcache_table_fail;
  297. }
  298. subcaches->count = num_subcaches;
  299. dprintk(CVP_CORE, "Found %d subcaches\n", num_subcaches);
  300. for (c = 0; c < num_subcaches; ++c) {
  301. struct subcache_info *vsc = &res->subcache_set.subcache_tbl[c];
  302. of_property_read_string_index(pdev->dev.of_node,
  303. "cache-slice-names", c, &vsc->name);
  304. }
  305. res->sys_cache_present = true;
  306. return 0;
  307. err_load_subcache_table_fail:
  308. res->sys_cache_present = false;
  309. subcaches->count = 0;
  310. subcaches->subcache_tbl = NULL;
  311. return rc;
  312. }
  313. /**
  314. * msm_cvp_load_u32_table() - load dtsi table entries
  315. * @pdev: A pointer to the platform device.
  316. * @of_node: A pointer to the device node.
  317. * @table_name: A pointer to the dtsi table entry name.
  318. * @struct_size: The size of the structure which is nothing but
  319. * a single entry in the dtsi table.
  320. * @table: A pointer to the table pointer which needs to be
  321. * filled by the dtsi table entries.
  322. * @num_elements: Number of elements pointer which needs to be filled
  323. * with the number of elements in the table.
  324. *
  325. * This is a generic implementation to load single or multiple array
  326. * table from dtsi. The array elements should be of size equal to u32.
  327. *
  328. * Return: Return '0' for success else appropriate error value.
  329. */
  330. int msm_cvp_load_u32_table(struct platform_device *pdev,
  331. struct device_node *of_node, char *table_name, int struct_size,
  332. u32 **table, u32 *num_elements)
  333. {
  334. int rc = 0, num_elemts = 0;
  335. u32 *ptbl = NULL;
  336. if (!of_find_property(of_node, table_name, NULL)) {
  337. dprintk(CVP_CORE, "%s not found\n", table_name);
  338. return 0;
  339. }
  340. num_elemts = get_u32_array_num_elements(of_node, table_name);
  341. if (!num_elemts) {
  342. dprintk(CVP_ERR, "no elements in %s\n", table_name);
  343. return 0;
  344. }
  345. num_elemts /= struct_size / sizeof(u32);
  346. ptbl = devm_kzalloc(&pdev->dev, num_elemts * struct_size, GFP_KERNEL);
  347. if (!ptbl) {
  348. dprintk(CVP_ERR, "Failed to alloc table %s\n", table_name);
  349. return -ENOMEM;
  350. }
  351. if (of_property_read_u32_array(of_node, table_name, ptbl,
  352. num_elemts * struct_size / sizeof(u32))) {
  353. dprintk(CVP_ERR, "Failed to read %s\n", table_name);
  354. return -EINVAL;
  355. }
  356. *table = ptbl;
  357. if (num_elements)
  358. *num_elements = num_elemts;
  359. return rc;
  360. }
  361. EXPORT_SYMBOL(msm_cvp_load_u32_table);
  362. /* A comparator to compare loads (needed later on) */
  363. static int cmp(const void *a, const void *b)
  364. {
  365. return ((struct allowed_clock_rates_table *)a)->clock_rate -
  366. ((struct allowed_clock_rates_table *)b)->clock_rate;
  367. }
  368. static int msm_cvp_load_allowed_clocks_table(
  369. struct msm_cvp_platform_resources *res)
  370. {
  371. int rc = 0;
  372. struct platform_device *pdev = res->pdev;
  373. if (!of_find_property(pdev->dev.of_node,
  374. "qcom,allowed-clock-rates", NULL)) {
  375. dprintk(CVP_CORE, "qcom,allowed-clock-rates not found\n");
  376. return 0;
  377. }
  378. rc = msm_cvp_load_u32_table(pdev, pdev->dev.of_node,
  379. "qcom,allowed-clock-rates",
  380. sizeof(*res->allowed_clks_tbl),
  381. (u32 **)&res->allowed_clks_tbl,
  382. &res->allowed_clks_tbl_size);
  383. if (rc) {
  384. dprintk(CVP_ERR,
  385. "%s: failed to read allowed clocks table\n", __func__);
  386. return rc;
  387. }
  388. sort(res->allowed_clks_tbl, res->allowed_clks_tbl_size,
  389. sizeof(*res->allowed_clks_tbl), cmp, NULL);
  390. return 0;
  391. }
  392. static int msm_cvp_populate_mem_cdsp(struct device *dev,
  393. struct msm_cvp_platform_resources *res)
  394. {
  395. struct device_node *mem_node;
  396. int ret;
  397. mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
  398. if (mem_node) {
  399. ret = of_reserved_mem_device_init_by_idx(dev,
  400. dev->of_node, 0);
  401. of_node_put(dev->of_node);
  402. if (ret) {
  403. dprintk(CVP_ERR,
  404. "Failed to initialize reserved mem, ret %d\n",
  405. ret);
  406. return ret;
  407. }
  408. }
  409. res->mem_cdsp.dev = dev;
  410. return 0;
  411. }
  412. static int msm_cvp_populate_bus(struct device *dev,
  413. struct msm_cvp_platform_resources *res)
  414. {
  415. struct bus_set *buses = &res->bus_set;
  416. const char *temp_name = NULL;
  417. struct bus_info *bus = NULL, *temp_table;
  418. u32 range[2];
  419. int rc = 0;
  420. temp_table = krealloc(buses->bus_tbl, sizeof(*temp_table) *
  421. (buses->count + 1), GFP_KERNEL);
  422. if (!temp_table) {
  423. dprintk(CVP_ERR, "%s: Failed to allocate memory", __func__);
  424. rc = -ENOMEM;
  425. goto err_bus;
  426. }
  427. buses->bus_tbl = temp_table;
  428. bus = &buses->bus_tbl[buses->count];
  429. memset(bus, 0x0, sizeof(struct bus_info));
  430. rc = of_property_read_string(dev->of_node, "label", &temp_name);
  431. if (rc) {
  432. dprintk(CVP_ERR, "'label' not found in node\n");
  433. goto err_bus;
  434. }
  435. /* need a non-const version of name, hence copying it over */
  436. bus->name = devm_kstrdup(dev, temp_name, GFP_KERNEL);
  437. if (!bus->name) {
  438. rc = -ENOMEM;
  439. goto err_bus;
  440. }
  441. rc = of_property_read_u32(dev->of_node, "qcom,bus-master",
  442. &bus->master);
  443. if (rc) {
  444. dprintk(CVP_ERR, "'qcom,bus-master' not found in node\n");
  445. goto err_bus;
  446. }
  447. rc = of_property_read_u32(dev->of_node, "qcom,bus-slave", &bus->slave);
  448. if (rc) {
  449. dprintk(CVP_ERR, "'qcom,bus-slave' not found in node\n");
  450. goto err_bus;
  451. }
  452. rc = of_property_read_string(dev->of_node, "qcom,bus-governor",
  453. &bus->governor);
  454. if (rc) {
  455. rc = 0;
  456. dprintk(CVP_CORE,
  457. "'qcom,bus-governor' not found, default to performance governor\n");
  458. bus->governor = PERF_GOV;
  459. }
  460. if (!strcmp(bus->governor, PERF_GOV))
  461. bus->is_prfm_gov_used = true;
  462. rc = of_property_read_u32_array(dev->of_node, "qcom,bus-range-kbps",
  463. range, ARRAY_SIZE(range));
  464. if (rc) {
  465. rc = 0;
  466. dprintk(CVP_CORE,
  467. "'qcom,range' not found defaulting to <0 INT_MAX>\n");
  468. range[0] = 0;
  469. range[1] = INT_MAX;
  470. }
  471. bus->range[0] = range[0]; /* min */
  472. bus->range[1] = range[1]; /* max */
  473. buses->count++;
  474. bus->dev = dev;
  475. dprintk(CVP_CORE, "Found bus %s [%d->%d] with governor %s\n",
  476. bus->name, bus->master, bus->slave, bus->governor);
  477. err_bus:
  478. return rc;
  479. }
  480. static int msm_cvp_load_regulator_table(
  481. struct msm_cvp_platform_resources *res)
  482. {
  483. int rc = 0;
  484. struct platform_device *pdev = res->pdev;
  485. struct regulator_set *regulators = &res->regulator_set;
  486. struct device_node *domains_parent_node = NULL;
  487. struct property *domains_property = NULL;
  488. int reg_count = 0;
  489. regulators->count = 0;
  490. regulators->regulator_tbl = NULL;
  491. domains_parent_node = pdev->dev.of_node;
  492. for_each_property_of_node(domains_parent_node, domains_property) {
  493. const char *search_string = "-supply";
  494. char *supply;
  495. bool matched = false;
  496. /* check if current property is possibly a regulator */
  497. supply = strnstr(domains_property->name, search_string,
  498. strlen(domains_property->name) + 1);
  499. matched = supply && (*(supply + strlen(search_string)) == '\0');
  500. if (!matched)
  501. continue;
  502. reg_count++;
  503. }
  504. regulators->regulator_tbl = devm_kzalloc(&pdev->dev,
  505. sizeof(*regulators->regulator_tbl) *
  506. reg_count, GFP_KERNEL);
  507. if (!regulators->regulator_tbl) {
  508. rc = -ENOMEM;
  509. dprintk(CVP_ERR,
  510. "Failed to alloc memory for regulator table\n");
  511. goto err_reg_tbl_alloc;
  512. }
  513. for_each_property_of_node(domains_parent_node, domains_property) {
  514. const char *search_string = "-supply";
  515. char *supply;
  516. bool matched = false;
  517. struct device_node *regulator_node = NULL;
  518. struct regulator_info *rinfo = NULL;
  519. /* check if current property is possibly a regulator */
  520. supply = strnstr(domains_property->name, search_string,
  521. strlen(domains_property->name) + 1);
  522. matched = supply && (supply[strlen(search_string)] == '\0');
  523. if (!matched)
  524. continue;
  525. /* make sure prop isn't being misused */
  526. regulator_node = of_parse_phandle(domains_parent_node,
  527. domains_property->name, 0);
  528. if (IS_ERR(regulator_node)) {
  529. dprintk(CVP_WARN, "%s is not a phandle\n",
  530. domains_property->name);
  531. continue;
  532. }
  533. regulators->count++;
  534. /* populate regulator info */
  535. rinfo = &regulators->regulator_tbl[regulators->count - 1];
  536. rinfo->name = devm_kzalloc(&pdev->dev,
  537. (supply - domains_property->name) + 1, GFP_KERNEL);
  538. if (!rinfo->name) {
  539. rc = -ENOMEM;
  540. dprintk(CVP_ERR,
  541. "Failed to alloc memory for regulator name\n");
  542. goto err_reg_name_alloc;
  543. }
  544. strlcpy(rinfo->name, domains_property->name,
  545. (supply - domains_property->name) + 1);
  546. rinfo->has_hw_power_collapse = of_property_read_bool(
  547. regulator_node, "qcom,support-hw-trigger");
  548. dprintk(CVP_CORE, "Found regulator %s: h/w collapse = %s\n",
  549. rinfo->name,
  550. rinfo->has_hw_power_collapse ? "yes" : "no");
  551. }
  552. if (!regulators->count)
  553. dprintk(CVP_CORE, "No regulators found");
  554. return 0;
  555. err_reg_name_alloc:
  556. err_reg_tbl_alloc:
  557. msm_cvp_free_regulator_table(res);
  558. return rc;
  559. }
  560. static int msm_cvp_load_clock_table(
  561. struct msm_cvp_platform_resources *res)
  562. {
  563. int rc = 0, num_clocks = 0, c = 0;
  564. struct platform_device *pdev = res->pdev;
  565. int *clock_ids = NULL;
  566. int *clock_props = NULL;
  567. struct clock_set *clocks = &res->clock_set;
  568. num_clocks = of_property_count_strings(pdev->dev.of_node,
  569. "clock-names");
  570. if (num_clocks <= 0) {
  571. dprintk(CVP_CORE, "No clocks found\n");
  572. clocks->count = 0;
  573. rc = 0;
  574. goto err_load_clk_table_fail;
  575. }
  576. clock_ids = devm_kzalloc(&pdev->dev, num_clocks *
  577. sizeof(*clock_ids), GFP_KERNEL);
  578. if (!clock_ids) {
  579. dprintk(CVP_ERR, "No memory to read clock ids\n");
  580. rc = -ENOMEM;
  581. goto err_load_clk_table_fail;
  582. }
  583. rc = of_property_read_u32_array(pdev->dev.of_node,
  584. "clock-ids", clock_ids,
  585. num_clocks);
  586. if (rc) {
  587. dprintk(CVP_CORE, "Failed to read clock ids: %d\n", rc);
  588. msm_cvp_mmrm_enabled = false;
  589. dprintk(CVP_CORE, "flag msm_cvp_mmrm_enabled disabled\n");
  590. }
  591. clock_props = devm_kzalloc(&pdev->dev, num_clocks *
  592. sizeof(*clock_props), GFP_KERNEL);
  593. if (!clock_props) {
  594. dprintk(CVP_ERR, "No memory to read clock properties\n");
  595. rc = -ENOMEM;
  596. goto err_load_clk_table_fail;
  597. }
  598. rc = of_property_read_u32_array(pdev->dev.of_node,
  599. "qcom,clock-configs", clock_props,
  600. num_clocks);
  601. if (rc) {
  602. dprintk(CVP_ERR, "Failed to read clock properties: %d\n", rc);
  603. goto err_load_clk_prop_fail;
  604. }
  605. clocks->clock_tbl = devm_kzalloc(&pdev->dev, sizeof(*clocks->clock_tbl)
  606. * num_clocks, GFP_KERNEL);
  607. if (!clocks->clock_tbl) {
  608. dprintk(CVP_ERR, "Failed to allocate memory for clock tbl\n");
  609. rc = -ENOMEM;
  610. goto err_load_clk_prop_fail;
  611. }
  612. clocks->count = num_clocks;
  613. dprintk(CVP_CORE, "Found %d clocks\n", num_clocks);
  614. for (c = 0; c < num_clocks; ++c) {
  615. struct clock_info *vc = &res->clock_set.clock_tbl[c];
  616. of_property_read_string_index(pdev->dev.of_node,
  617. "clock-names", c, &vc->name);
  618. if (msm_cvp_mmrm_enabled == true)
  619. vc->clk_id = clock_ids[c];
  620. if (clock_props[c] & CLOCK_PROP_HAS_SCALING) {
  621. vc->has_scaling = true;
  622. } else {
  623. vc->count = 0;
  624. vc->has_scaling = false;
  625. }
  626. if (clock_props[c] & CLOCK_PROP_HAS_MEM_RETENTION)
  627. vc->has_mem_retention = true;
  628. else
  629. vc->has_mem_retention = false;
  630. dprintk(CVP_CORE, "Found clock %s id %d: scale-able = %s\n",
  631. vc->name, vc->clk_id, vc->count ? "yes" : "no");
  632. }
  633. return 0;
  634. err_load_clk_prop_fail:
  635. err_load_clk_table_fail:
  636. return rc;
  637. }
  638. #define MAX_CLK_RESETS 5
  639. static int msm_cvp_load_reset_table(
  640. struct msm_cvp_platform_resources *res)
  641. {
  642. struct platform_device *pdev = res->pdev;
  643. struct reset_set *rst = &res->reset_set;
  644. int num_clocks = 0, c = 0, ret = 0;
  645. int pwr_stats[MAX_CLK_RESETS];
  646. num_clocks = of_property_count_strings(pdev->dev.of_node,
  647. "reset-names");
  648. if (num_clocks <= 0 || num_clocks > MAX_CLK_RESETS) {
  649. dprintk(CVP_ERR, "Num reset clocks out of range\n");
  650. rst->count = 0;
  651. return 0;
  652. }
  653. rst->reset_tbl = devm_kcalloc(&pdev->dev, num_clocks,
  654. sizeof(*rst->reset_tbl), GFP_KERNEL);
  655. if (!rst->reset_tbl)
  656. return -ENOMEM;
  657. rst->count = num_clocks;
  658. dprintk(CVP_CORE, "Found %d reset clocks\n", num_clocks);
  659. ret = of_property_read_u32_array(pdev->dev.of_node,
  660. "reset-power-status", pwr_stats,
  661. num_clocks);
  662. if (ret) {
  663. dprintk(CVP_ERR, "Failed to read reset pwr state: %d\n", ret);
  664. devm_kfree(&pdev->dev, rst->reset_tbl);
  665. return ret;
  666. }
  667. for (c = 0; c < num_clocks; ++c) {
  668. struct reset_info *rc = &res->reset_set.reset_tbl[c];
  669. of_property_read_string_index(pdev->dev.of_node,
  670. "reset-names", c, &rc->name);
  671. rc->required_stage = pwr_stats[c];
  672. }
  673. return 0;
  674. }
  675. static int find_key_value(struct msm_cvp_platform_data *platform_data,
  676. const char *key)
  677. {
  678. int i = 0;
  679. struct msm_cvp_common_data *common_data = platform_data->common_data;
  680. int size = platform_data->common_data_length;
  681. for (i = 0; i < size; i++) {
  682. if (!strcmp(common_data[i].key, key))
  683. return common_data[i].value;
  684. }
  685. return 0;
  686. }
  687. int cvp_read_platform_resources_from_drv_data(
  688. struct msm_cvp_core *core)
  689. {
  690. struct msm_cvp_platform_data *platform_data;
  691. struct msm_cvp_platform_resources *res;
  692. int rc = 0, i;
  693. if (!core || !core->platform_data) {
  694. dprintk(CVP_ERR, "%s Invalid data\n", __func__);
  695. return -ENOENT;
  696. }
  697. platform_data = core->platform_data;
  698. res = &core->resources;
  699. res->sku_version = platform_data->sku_version;
  700. res->fw_name = "evass";
  701. dprintk(CVP_CORE, "Firmware filename: %s\n", res->fw_name);
  702. res->dsp_enabled = find_key_value(platform_data,
  703. "qcom,dsp-enabled");
  704. res->max_ssr_allowed = find_key_value(platform_data,
  705. "qcom,max-ssr-allowed");
  706. res->sw_power_collapsible = find_key_value(platform_data,
  707. "qcom,sw-power-collapse");
  708. res->debug_timeout = find_key_value(platform_data,
  709. "qcom,debug-timeout");
  710. res->pm_qos.latency_us = find_key_value(platform_data,
  711. "qcom,pm-qos-latency-us");
  712. res->pm_qos.silver_count = 4;
  713. for (i = 0; i < res->pm_qos.silver_count; i++)
  714. res->pm_qos.silver_cores[i] = i;
  715. res->pm_qos.off_vote_cnt = 0;
  716. spin_lock_init(&res->pm_qos.lock);
  717. res->max_secure_inst_count = find_key_value(platform_data,
  718. "qcom,max-secure-instances");
  719. res->thermal_mitigable = find_key_value(platform_data,
  720. "qcom,enable-thermal-mitigation");
  721. res->msm_cvp_pwr_collapse_delay = find_key_value(platform_data,
  722. "qcom,power-collapse-delay");
  723. res->msm_cvp_firmware_unload_delay = find_key_value(platform_data,
  724. "qcom,fw-unload-delay");
  725. res->msm_cvp_hw_rsp_timeout = find_key_value(platform_data,
  726. "qcom,hw-resp-timeout");
  727. res->msm_cvp_dsp_rsp_timeout = find_key_value(platform_data,
  728. "qcom,dsp-resp-timeout");
  729. res->non_fatal_pagefaults = find_key_value(platform_data,
  730. "qcom,domain-attr-non-fatal-faults");
  731. res->vpu_ver = platform_data->vpu_ver;
  732. res->ubwc_config = platform_data->ubwc_config;
  733. res->fatal_ssr = false;
  734. return rc;
  735. }
  736. int cvp_read_platform_resources_from_dt(
  737. struct msm_cvp_platform_resources *res)
  738. {
  739. struct platform_device *pdev = res->pdev;
  740. struct resource *kres = NULL;
  741. int rc = 0;
  742. uint32_t firmware_base = 0;
  743. if (!pdev->dev.of_node) {
  744. dprintk(CVP_ERR, "DT node not found\n");
  745. return -ENOENT;
  746. }
  747. INIT_LIST_HEAD(&res->context_banks);
  748. res->firmware_base = (phys_addr_t)firmware_base;
  749. kres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  750. res->register_base = kres ? kres->start : -1;
  751. res->register_size = kres ? (kres->end + 1 - kres->start) : -1;
  752. res->irq = platform_get_irq(pdev, 0);
  753. dprintk(CVP_CORE, "%s: res->irq:%d \n",
  754. __func__, res->irq);
  755. rc = msm_cvp_load_subcache_info(res);
  756. if (rc)
  757. dprintk(CVP_WARN, "Failed to load subcache info: %d\n", rc);
  758. rc = msm_cvp_load_qdss_table(res);
  759. if (rc)
  760. dprintk(CVP_WARN, "Failed to load qdss reg table: %d\n", rc);
  761. rc = msm_cvp_load_reg_table(res);
  762. if (rc) {
  763. dprintk(CVP_ERR, "Failed to load reg table: %d\n", rc);
  764. goto err_load_reg_table;
  765. }
  766. rc = msm_cvp_load_ipcc_regs(res);
  767. if (rc)
  768. dprintk(CVP_ERR, "Failed to load IPCC regs: %d\n", rc);
  769. rc = msm_cvp_load_regspace_mapping(res);
  770. if (rc)
  771. dprintk(CVP_ERR, "Failed to load reg space mapping: %d\n", rc);
  772. rc = msm_cvp_load_gcc_regs(res);
  773. rc = msm_cvp_load_regulator_table(res);
  774. if (rc) {
  775. dprintk(CVP_ERR, "Failed to load list of regulators %d\n", rc);
  776. goto err_load_regulator_table;
  777. }
  778. rc = msm_cvp_load_clock_table(res);
  779. if (rc) {
  780. dprintk(CVP_ERR,
  781. "Failed to load clock table: %d\n", rc);
  782. goto err_load_clock_table;
  783. }
  784. rc = msm_cvp_load_allowed_clocks_table(res);
  785. if (rc) {
  786. dprintk(CVP_ERR,
  787. "Failed to load allowed clocks table: %d\n", rc);
  788. goto err_load_allowed_clocks_table;
  789. }
  790. rc = msm_cvp_load_reset_table(res);
  791. if (rc) {
  792. dprintk(CVP_ERR,
  793. "Failed to load reset table: %d\n", rc);
  794. goto err_load_reset_table;
  795. }
  796. res->use_non_secure_pil = of_property_read_bool(pdev->dev.of_node,
  797. "qcom,use-non-secure-pil");
  798. if (res->use_non_secure_pil || !is_iommu_present(res)) {
  799. of_property_read_u32(pdev->dev.of_node, "qcom,fw-bias",
  800. &firmware_base);
  801. res->firmware_base = (phys_addr_t)firmware_base;
  802. dprintk(CVP_CORE,
  803. "Using fw-bias : %pa", &res->firmware_base);
  804. }
  805. return rc;
  806. err_load_reset_table:
  807. msm_cvp_free_allowed_clocks_table(res);
  808. err_load_allowed_clocks_table:
  809. msm_cvp_free_clock_table(res);
  810. err_load_clock_table:
  811. msm_cvp_free_regulator_table(res);
  812. err_load_regulator_table:
  813. msm_cvp_free_reg_table(res);
  814. err_load_reg_table:
  815. return rc;
  816. }
  817. static int msm_cvp_setup_context_bank(struct msm_cvp_platform_resources *res,
  818. struct context_bank_info *cb, struct device *dev)
  819. {
  820. int rc = 0;
  821. struct bus_type *bus;
  822. if (!dev || !cb || !res) {
  823. dprintk(CVP_ERR,
  824. "%s: Invalid Input params\n", __func__);
  825. return -EINVAL;
  826. }
  827. cb->dev = dev;
  828. bus = cb->dev->bus;
  829. if (IS_ERR_OR_NULL(bus)) {
  830. dprintk(CVP_ERR, "%s - failed to get bus type\n", __func__);
  831. rc = PTR_ERR(bus) ?: -ENODEV;
  832. goto remove_cb;
  833. }
  834. /*
  835. * configure device segment size and segment boundary to ensure
  836. * iommu mapping returns one mapping (which is required for partial
  837. * cache operations)
  838. */
  839. if (!dev->dma_parms)
  840. dev->dma_parms =
  841. devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
  842. dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
  843. dma_set_seg_boundary(dev, DMA_BIT_MASK(64));
  844. dprintk(CVP_CORE, "Attached %s and created mapping\n", dev_name(dev));
  845. dprintk(CVP_CORE,
  846. "Context bank name:%s, buffer_type: %#x, is_secure: %d, address range start: %#x, size: %#x, dev: %pK",
  847. cb->name, cb->buffer_type, cb->is_secure, cb->addr_range.start,
  848. cb->addr_range.size, cb->dev);
  849. return rc;
  850. remove_cb:
  851. return rc;
  852. }
  853. int msm_cvp_smmu_fault_handler(struct iommu_domain *domain,
  854. struct device *dev, unsigned long iova, int flags, void *token)
  855. {
  856. struct msm_cvp_core *core = token;
  857. struct iris_hfi_device *hdev;
  858. struct msm_cvp_inst *inst;
  859. bool log = false;
  860. if (!domain || !core) {
  861. dprintk(CVP_ERR, "%s - invalid param %pK %pK\n",
  862. __func__, domain, core);
  863. return -EINVAL;
  864. }
  865. core->smmu_fault_count++;
  866. if (!core->last_fault_addr)
  867. core->last_fault_addr = iova;
  868. dprintk(CVP_ERR, "%s - faulting address: %lx, %d\n",
  869. __func__, iova, core->smmu_fault_count);
  870. mutex_lock(&core->lock);
  871. log = (core->log.snapshot_index > 0)? false : true;
  872. list_for_each_entry(inst, &core->instances, list) {
  873. cvp_print_inst(CVP_ERR, inst);
  874. msm_cvp_print_inst_bufs(inst, log);
  875. }
  876. hdev = core->device->hfi_device_data;
  877. if (hdev) {
  878. hdev->error = CVP_ERR_NOC_ERROR;
  879. /* call_hfi_op(core->device, debug_hook, hdev); */
  880. }
  881. mutex_unlock(&core->lock);
  882. /*
  883. * Return -EINVAL to elicit the default behaviour of smmu driver.
  884. * If we return -ENOSYS, then smmu driver assumes page fault handler
  885. * is not installed and prints a list of useful debug information like
  886. * FAR, SID etc. This information is not printed if we return 0.
  887. */
  888. return -ENOSYS;
  889. }
  890. static int msm_cvp_populate_context_bank(struct device *dev,
  891. struct msm_cvp_core *core)
  892. {
  893. int rc = 0;
  894. struct context_bank_info *cb = NULL;
  895. struct device_node *np = NULL;
  896. if (!dev || !core) {
  897. dprintk(CVP_ERR, "%s - invalid inputs\n", __func__);
  898. return -EINVAL;
  899. }
  900. np = dev->of_node;
  901. cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
  902. if (!cb) {
  903. dprintk(CVP_ERR, "%s - Failed to allocate cb\n", __func__);
  904. return -ENOMEM;
  905. }
  906. INIT_LIST_HEAD(&cb->list);
  907. list_add_tail(&cb->list, &core->resources.context_banks);
  908. rc = of_property_read_string(np, "label", &cb->name);
  909. if (rc) {
  910. dprintk(CVP_CORE,
  911. "Failed to read cb label from device tree\n");
  912. rc = 0;
  913. }
  914. dprintk(CVP_CORE, "%s: context bank has name %s\n", __func__, cb->name);
  915. if (!strcmp(cb->name, "cvp_camera")) {
  916. cb->is_secure = true;
  917. rc = msm_cvp_setup_context_bank(&core->resources, cb, dev);
  918. if (rc) {
  919. dprintk(CVP_ERR, "Cannot setup context bank %s %d\n",
  920. cb->name, rc);
  921. goto err_setup_cb;
  922. }
  923. return 0;
  924. }
  925. rc = of_property_read_u32_array(np, "qcom,iommu-dma-addr-pool",
  926. (u32 *)&cb->addr_range, 2);
  927. if (rc) {
  928. dprintk(CVP_ERR,
  929. "Could not read addr pool for context bank : %s %d\n",
  930. cb->name, rc);
  931. goto err_setup_cb;
  932. }
  933. cb->is_secure = of_property_read_bool(np, "qcom,iommu-vmid");
  934. dprintk(CVP_CORE, "context bank %s : secure = %d\n",
  935. cb->name, cb->is_secure);
  936. /* setup buffer type for each sub device*/
  937. rc = of_property_read_u32(np, "buffer-types", &cb->buffer_type);
  938. if (rc) {
  939. dprintk(CVP_ERR, "failed to load buffer_type info %d\n", rc);
  940. rc = -ENOENT;
  941. goto err_setup_cb;
  942. }
  943. dprintk(CVP_CORE,
  944. "context bank %s address start = %x address size = %x buffer_type = %x\n",
  945. cb->name, cb->addr_range.start,
  946. cb->addr_range.size, cb->buffer_type);
  947. cb->domain = iommu_get_domain_for_dev(dev);
  948. if (IS_ERR_OR_NULL(cb->domain)) {
  949. dprintk(CVP_ERR, "Create domain failed\n");
  950. rc = -ENODEV;
  951. goto err_setup_cb;
  952. }
  953. rc = msm_cvp_setup_context_bank(&core->resources, cb, dev);
  954. if (rc) {
  955. dprintk(CVP_ERR, "Cannot setup context bank %d\n", rc);
  956. goto err_setup_cb;
  957. }
  958. iommu_set_fault_handler(cb->domain,
  959. msm_cvp_smmu_fault_handler, (void *)core);
  960. return 0;
  961. err_setup_cb:
  962. list_del(&cb->list);
  963. return rc;
  964. }
  965. int cvp_read_context_bank_resources_from_dt(struct platform_device *pdev)
  966. {
  967. struct msm_cvp_core *core;
  968. int rc = 0;
  969. if (!pdev) {
  970. dprintk(CVP_ERR, "Invalid platform device\n");
  971. return -EINVAL;
  972. } else if (!pdev->dev.parent) {
  973. dprintk(CVP_ERR, "Failed to find a parent for %s\n",
  974. dev_name(&pdev->dev));
  975. return -ENODEV;
  976. }
  977. core = dev_get_drvdata(pdev->dev.parent);
  978. if (!core) {
  979. dprintk(CVP_ERR, "Failed to find cookie in parent device %s",
  980. dev_name(pdev->dev.parent));
  981. return -EINVAL;
  982. }
  983. rc = msm_cvp_populate_context_bank(&pdev->dev, core);
  984. if (rc)
  985. dprintk(CVP_ERR, "Failed to probe context bank\n");
  986. else
  987. dprintk(CVP_CORE, "Successfully probed context bank\n");
  988. return rc;
  989. }
  990. int cvp_read_bus_resources_from_dt(struct platform_device *pdev)
  991. {
  992. struct msm_cvp_core *core;
  993. if (!pdev) {
  994. dprintk(CVP_ERR, "Invalid platform device\n");
  995. return -EINVAL;
  996. } else if (!pdev->dev.parent) {
  997. dprintk(CVP_ERR, "Failed to find a parent for %s\n",
  998. dev_name(&pdev->dev));
  999. return -ENODEV;
  1000. }
  1001. core = dev_get_drvdata(pdev->dev.parent);
  1002. if (!core) {
  1003. dprintk(CVP_ERR, "Failed to find cookie in parent device %s",
  1004. dev_name(pdev->dev.parent));
  1005. return -EINVAL;
  1006. }
  1007. return msm_cvp_populate_bus(&pdev->dev, &core->resources);
  1008. }
  1009. int cvp_read_mem_cdsp_resources_from_dt(struct platform_device *pdev)
  1010. {
  1011. struct msm_cvp_core *core;
  1012. if (!pdev) {
  1013. dprintk(CVP_ERR, "%s: invalid platform device\n", __func__);
  1014. return -EINVAL;
  1015. } else if (!pdev->dev.parent) {
  1016. dprintk(CVP_ERR, "Failed to find a parent for %s\n",
  1017. dev_name(&pdev->dev));
  1018. return -ENODEV;
  1019. }
  1020. core = dev_get_drvdata(pdev->dev.parent);
  1021. if (!core) {
  1022. dprintk(CVP_ERR, "Failed to find cookie in parent device %s",
  1023. dev_name(pdev->dev.parent));
  1024. return -EINVAL;
  1025. }
  1026. return msm_cvp_populate_mem_cdsp(&pdev->dev, &core->resources);
  1027. }