msm_cvp_res_parse.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/iommu.h>
  6. #include <linux/of.h>
  7. #include <linux/slab.h>
  8. #include <linux/sort.h>
  9. #include <linux/of_reserved_mem.h>
  10. #include "msm_cvp_debug.h"
  11. #include "msm_cvp_resources.h"
  12. #include "msm_cvp_res_parse.h"
  13. #include "cvp_core_hfi.h"
  14. #include "soc/qcom/secure_buffer.h"
  15. enum clock_properties {
  16. CLOCK_PROP_HAS_SCALING = 1 << 0,
  17. CLOCK_PROP_HAS_MEM_RETENTION = 1 << 1,
  18. };
  19. #define PERF_GOV "performance"
  20. static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
  21. {
  22. return NULL;
  23. }
  24. static size_t get_u32_array_num_elements(struct device_node *np,
  25. char *name)
  26. {
  27. int len;
  28. size_t num_elements = 0;
  29. if (!of_get_property(np, name, &len)) {
  30. dprintk(CVP_ERR, "Failed to read %s from device tree\n",
  31. name);
  32. goto fail_read;
  33. }
  34. num_elements = len / sizeof(u32);
  35. if (num_elements <= 0) {
  36. dprintk(CVP_ERR, "%s not specified in device tree\n",
  37. name);
  38. goto fail_read;
  39. }
  40. return num_elements;
  41. fail_read:
  42. return 0;
  43. }
  44. static inline void msm_cvp_free_allowed_clocks_table(
  45. struct msm_cvp_platform_resources *res)
  46. {
  47. res->allowed_clks_tbl = NULL;
  48. }
  49. static inline void msm_cvp_free_cycles_per_mb_table(
  50. struct msm_cvp_platform_resources *res)
  51. {
  52. res->clock_freq_tbl.clk_prof_entries = NULL;
  53. }
  54. static inline void msm_cvp_free_reg_table(
  55. struct msm_cvp_platform_resources *res)
  56. {
  57. res->reg_set.reg_tbl = NULL;
  58. }
  59. static inline void msm_cvp_free_qdss_addr_table(
  60. struct msm_cvp_platform_resources *res)
  61. {
  62. res->qdss_addr_set.addr_tbl = NULL;
  63. }
  64. static inline void msm_cvp_free_bus_vectors(
  65. struct msm_cvp_platform_resources *res)
  66. {
  67. kfree(res->bus_set.bus_tbl);
  68. res->bus_set.bus_tbl = NULL;
  69. res->bus_set.count = 0;
  70. }
  71. static inline void msm_cvp_free_regulator_table(
  72. struct msm_cvp_platform_resources *res)
  73. {
  74. int c = 0;
  75. for (c = 0; c < res->regulator_set.count; ++c) {
  76. struct regulator_info *rinfo =
  77. &res->regulator_set.regulator_tbl[c];
  78. rinfo->name = NULL;
  79. }
  80. res->regulator_set.regulator_tbl = NULL;
  81. res->regulator_set.count = 0;
  82. }
  83. static inline void msm_cvp_free_clock_table(
  84. struct msm_cvp_platform_resources *res)
  85. {
  86. res->clock_set.clock_tbl = NULL;
  87. res->clock_set.count = 0;
  88. }
  89. void msm_cvp_free_platform_resources(
  90. struct msm_cvp_platform_resources *res)
  91. {
  92. msm_cvp_free_clock_table(res);
  93. msm_cvp_free_regulator_table(res);
  94. msm_cvp_free_allowed_clocks_table(res);
  95. msm_cvp_free_reg_table(res);
  96. msm_cvp_free_qdss_addr_table(res);
  97. msm_cvp_free_bus_vectors(res);
  98. }
  99. static int msm_cvp_load_ipcc_regs(struct msm_cvp_platform_resources *res)
  100. {
  101. int ret = 0;
  102. unsigned int reg_config[2];
  103. struct platform_device *pdev = res->pdev;
  104. ret = of_property_read_u32_array(pdev->dev.of_node, "qcom,ipcc-reg",
  105. reg_config, 2);
  106. if (ret) {
  107. dprintk(CVP_ERR, "Failed to read ipcc reg: %d\n", ret);
  108. return ret;
  109. }
  110. res->ipcc_reg_base = reg_config[0];
  111. res->ipcc_reg_size = reg_config[1];
  112. dprintk(CVP_CORE,
  113. "ipcc reg_base = %x, reg_size = %x\n",
  114. res->ipcc_reg_base,
  115. res->ipcc_reg_size
  116. );
  117. return ret;
  118. }
  119. static int msm_cvp_load_regspace_mapping(struct msm_cvp_platform_resources *res)
  120. {
  121. int ret = 0;
  122. unsigned int ipclite_mapping_config[3];
  123. unsigned int hwmutex_mapping_config[3];
  124. unsigned int aon_mapping_config[3];
  125. struct platform_device *pdev = res->pdev;
  126. ret = of_property_read_u32_array(pdev->dev.of_node, "ipclite_mappings",
  127. ipclite_mapping_config, 3);
  128. if (ret) {
  129. dprintk(CVP_ERR, "Failed to read ipclite reg: %d\n", ret);
  130. return ret;
  131. }
  132. res->ipclite_iova = ipclite_mapping_config[0];
  133. res->ipclite_size = ipclite_mapping_config[1];
  134. res->ipclite_phyaddr = ipclite_mapping_config[2];
  135. ret = of_property_read_u32_array(pdev->dev.of_node, "hwmutex_mappings",
  136. hwmutex_mapping_config, 3);
  137. if (ret) {
  138. dprintk(CVP_ERR, "Failed to read hwmutex reg: %d\n", ret);
  139. return ret;
  140. }
  141. res->hwmutex_iova = hwmutex_mapping_config[0];
  142. res->hwmutex_size = hwmutex_mapping_config[1];
  143. res->hwmutex_phyaddr = hwmutex_mapping_config[2];
  144. dprintk(CVP_CORE, "ipclite %#x %#x %#x hwmutex %#x %#x %#x\n",
  145. res->ipclite_iova, res->ipclite_phyaddr, res->ipclite_size,
  146. res->hwmutex_iova, res->hwmutex_phyaddr, res->hwmutex_size);
  147. ret = of_property_read_u32_array(pdev->dev.of_node, "aon_mappings",
  148. aon_mapping_config, 3);
  149. if (ret) {
  150. dprintk(CVP_ERR, "Failed to read aon reg: %d\n", ret);
  151. return ret;
  152. }
  153. res->aon_iova = aon_mapping_config[0];
  154. res->aon_size = aon_mapping_config[1];
  155. res->aon_phyaddr = aon_mapping_config[2];
  156. dprintk(CVP_CORE, "aon %#x %#x %#x \n",
  157. res->hwmutex_iova, res->hwmutex_phyaddr, res->hwmutex_size);
  158. return ret;
  159. }
  160. static int msm_cvp_load_gcc_regs(struct msm_cvp_platform_resources *res)
  161. {
  162. int ret = 0;
  163. unsigned int reg_config[2];
  164. struct platform_device *pdev = res->pdev;
  165. ret = of_property_read_u32_array(pdev->dev.of_node, "qcom,gcc-reg",
  166. reg_config, 2);
  167. if (ret) {
  168. dprintk(CVP_WARN, "No gcc reg configured: %d\n", ret);
  169. return ret;
  170. }
  171. res->gcc_reg_base = reg_config[0];
  172. res->gcc_reg_size = reg_config[1];
  173. return ret;
  174. }
  175. static int msm_cvp_load_reg_table(struct msm_cvp_platform_resources *res)
  176. {
  177. struct reg_set *reg_set;
  178. struct platform_device *pdev = res->pdev;
  179. int i;
  180. int rc = 0;
  181. if (!of_find_property(pdev->dev.of_node, "qcom,reg-presets", NULL)) {
  182. /*
  183. * qcom,reg-presets is an optional property. It likely won't be
  184. * present if we don't have any register settings to program
  185. */
  186. dprintk(CVP_CORE, "qcom,reg-presets not found\n");
  187. return 0;
  188. }
  189. reg_set = &res->reg_set;
  190. reg_set->count = get_u32_array_num_elements(pdev->dev.of_node,
  191. "qcom,reg-presets");
  192. reg_set->count /= sizeof(*reg_set->reg_tbl) / sizeof(u32);
  193. if (!reg_set->count) {
  194. dprintk(CVP_CORE, "no elements in reg set\n");
  195. return rc;
  196. }
  197. reg_set->reg_tbl = devm_kzalloc(&pdev->dev, reg_set->count *
  198. sizeof(*(reg_set->reg_tbl)), GFP_KERNEL);
  199. if (!reg_set->reg_tbl) {
  200. dprintk(CVP_ERR, "%s Failed to alloc register table\n",
  201. __func__);
  202. return -ENOMEM;
  203. }
  204. if (of_property_read_u32_array(pdev->dev.of_node, "qcom,reg-presets",
  205. (u32 *)reg_set->reg_tbl, reg_set->count * 2)) {
  206. dprintk(CVP_ERR, "Failed to read register table\n");
  207. msm_cvp_free_reg_table(res);
  208. return -EINVAL;
  209. }
  210. for (i = 0; i < reg_set->count; i++) {
  211. dprintk(CVP_CORE,
  212. "reg = %x, value = %x\n",
  213. reg_set->reg_tbl[i].reg,
  214. reg_set->reg_tbl[i].value
  215. );
  216. }
  217. return rc;
  218. }
  219. static int msm_cvp_load_qdss_table(struct msm_cvp_platform_resources *res)
  220. {
  221. struct addr_set *qdss_addr_set;
  222. struct platform_device *pdev = res->pdev;
  223. int i;
  224. int rc = 0;
  225. if (!of_find_property(pdev->dev.of_node, "qcom,qdss-presets", NULL)) {
  226. /*
  227. * qcom,qdss-presets is an optional property. It likely won't be
  228. * present if we don't have any register settings to program
  229. */
  230. dprintk(CVP_CORE, "qcom,qdss-presets not found\n");
  231. return rc;
  232. }
  233. qdss_addr_set = &res->qdss_addr_set;
  234. qdss_addr_set->count = get_u32_array_num_elements(pdev->dev.of_node,
  235. "qcom,qdss-presets");
  236. qdss_addr_set->count /= sizeof(*qdss_addr_set->addr_tbl) / sizeof(u32);
  237. if (!qdss_addr_set->count) {
  238. dprintk(CVP_CORE, "no elements in qdss reg set\n");
  239. return rc;
  240. }
  241. qdss_addr_set->addr_tbl = devm_kzalloc(&pdev->dev,
  242. qdss_addr_set->count * sizeof(*qdss_addr_set->addr_tbl),
  243. GFP_KERNEL);
  244. if (!qdss_addr_set->addr_tbl) {
  245. dprintk(CVP_ERR, "%s Failed to alloc register table\n",
  246. __func__);
  247. rc = -ENOMEM;
  248. goto err_qdss_addr_tbl;
  249. }
  250. rc = of_property_read_u32_array(pdev->dev.of_node, "qcom,qdss-presets",
  251. (u32 *)qdss_addr_set->addr_tbl, qdss_addr_set->count * 2);
  252. if (rc) {
  253. dprintk(CVP_ERR, "Failed to read qdss address table\n");
  254. msm_cvp_free_qdss_addr_table(res);
  255. rc = -EINVAL;
  256. goto err_qdss_addr_tbl;
  257. }
  258. for (i = 0; i < qdss_addr_set->count; i++) {
  259. dprintk(CVP_CORE, "qdss addr = %x, value = %x\n",
  260. qdss_addr_set->addr_tbl[i].start,
  261. qdss_addr_set->addr_tbl[i].size);
  262. }
  263. err_qdss_addr_tbl:
  264. return rc;
  265. }
  266. static int msm_cvp_load_subcache_info(struct msm_cvp_platform_resources *res)
  267. {
  268. int rc = 0, num_subcaches = 0, c;
  269. struct platform_device *pdev = res->pdev;
  270. struct subcache_set *subcaches = &res->subcache_set;
  271. num_subcaches = of_property_count_strings(pdev->dev.of_node,
  272. "cache-slice-names");
  273. if (num_subcaches <= 0) {
  274. dprintk(CVP_CORE, "No subcaches found\n");
  275. goto err_load_subcache_table_fail;
  276. }
  277. subcaches->subcache_tbl = devm_kzalloc(&pdev->dev,
  278. sizeof(*subcaches->subcache_tbl) * num_subcaches, GFP_KERNEL);
  279. if (!subcaches->subcache_tbl) {
  280. dprintk(CVP_ERR,
  281. "Failed to allocate memory for subcache tbl\n");
  282. rc = -ENOMEM;
  283. goto err_load_subcache_table_fail;
  284. }
  285. subcaches->count = num_subcaches;
  286. dprintk(CVP_CORE, "Found %d subcaches\n", num_subcaches);
  287. for (c = 0; c < num_subcaches; ++c) {
  288. struct subcache_info *vsc = &res->subcache_set.subcache_tbl[c];
  289. of_property_read_string_index(pdev->dev.of_node,
  290. "cache-slice-names", c, &vsc->name);
  291. }
  292. res->sys_cache_present = true;
  293. return 0;
  294. err_load_subcache_table_fail:
  295. res->sys_cache_present = false;
  296. subcaches->count = 0;
  297. subcaches->subcache_tbl = NULL;
  298. return rc;
  299. }
  300. /**
  301. * msm_cvp_load_u32_table() - load dtsi table entries
  302. * @pdev: A pointer to the platform device.
  303. * @of_node: A pointer to the device node.
  304. * @table_name: A pointer to the dtsi table entry name.
  305. * @struct_size: The size of the structure which is nothing but
  306. * a single entry in the dtsi table.
  307. * @table: A pointer to the table pointer which needs to be
  308. * filled by the dtsi table entries.
  309. * @num_elements: Number of elements pointer which needs to be filled
  310. * with the number of elements in the table.
  311. *
  312. * This is a generic implementation to load single or multiple array
  313. * table from dtsi. The array elements should be of size equal to u32.
  314. *
  315. * Return: Return '0' for success else appropriate error value.
  316. */
  317. int msm_cvp_load_u32_table(struct platform_device *pdev,
  318. struct device_node *of_node, char *table_name, int struct_size,
  319. u32 **table, u32 *num_elements)
  320. {
  321. int rc = 0, num_elemts = 0;
  322. u32 *ptbl = NULL;
  323. if (!of_find_property(of_node, table_name, NULL)) {
  324. dprintk(CVP_CORE, "%s not found\n", table_name);
  325. return 0;
  326. }
  327. num_elemts = get_u32_array_num_elements(of_node, table_name);
  328. if (!num_elemts) {
  329. dprintk(CVP_ERR, "no elements in %s\n", table_name);
  330. return 0;
  331. }
  332. num_elemts /= struct_size / sizeof(u32);
  333. ptbl = devm_kzalloc(&pdev->dev, num_elemts * struct_size, GFP_KERNEL);
  334. if (!ptbl) {
  335. dprintk(CVP_ERR, "Failed to alloc table %s\n", table_name);
  336. return -ENOMEM;
  337. }
  338. if (of_property_read_u32_array(of_node, table_name, ptbl,
  339. num_elemts * struct_size / sizeof(u32))) {
  340. dprintk(CVP_ERR, "Failed to read %s\n", table_name);
  341. return -EINVAL;
  342. }
  343. *table = ptbl;
  344. if (num_elements)
  345. *num_elements = num_elemts;
  346. return rc;
  347. }
  348. EXPORT_SYMBOL(msm_cvp_load_u32_table);
  349. /* A comparator to compare loads (needed later on) */
  350. static int cmp(const void *a, const void *b)
  351. {
  352. return ((struct allowed_clock_rates_table *)a)->clock_rate -
  353. ((struct allowed_clock_rates_table *)b)->clock_rate;
  354. }
  355. static int msm_cvp_load_allowed_clocks_table(
  356. struct msm_cvp_platform_resources *res)
  357. {
  358. int rc = 0;
  359. struct platform_device *pdev = res->pdev;
  360. if (!of_find_property(pdev->dev.of_node,
  361. "qcom,allowed-clock-rates", NULL)) {
  362. dprintk(CVP_CORE, "qcom,allowed-clock-rates not found\n");
  363. return 0;
  364. }
  365. rc = msm_cvp_load_u32_table(pdev, pdev->dev.of_node,
  366. "qcom,allowed-clock-rates",
  367. sizeof(*res->allowed_clks_tbl),
  368. (u32 **)&res->allowed_clks_tbl,
  369. &res->allowed_clks_tbl_size);
  370. if (rc) {
  371. dprintk(CVP_ERR,
  372. "%s: failed to read allowed clocks table\n", __func__);
  373. return rc;
  374. }
  375. sort(res->allowed_clks_tbl, res->allowed_clks_tbl_size,
  376. sizeof(*res->allowed_clks_tbl), cmp, NULL);
  377. return 0;
  378. }
  379. static int msm_cvp_populate_mem_cdsp(struct device *dev,
  380. struct msm_cvp_platform_resources *res)
  381. {
  382. struct device_node *mem_node;
  383. int ret;
  384. mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
  385. if (mem_node) {
  386. ret = of_reserved_mem_device_init_by_idx(dev,
  387. dev->of_node, 0);
  388. of_node_put(dev->of_node);
  389. if (ret) {
  390. dprintk(CVP_ERR,
  391. "Failed to initialize reserved mem, ret %d\n",
  392. ret);
  393. return ret;
  394. }
  395. }
  396. res->mem_cdsp.dev = dev;
  397. return 0;
  398. }
  399. static int msm_cvp_populate_bus(struct device *dev,
  400. struct msm_cvp_platform_resources *res)
  401. {
  402. struct bus_set *buses = &res->bus_set;
  403. const char *temp_name = NULL;
  404. struct bus_info *bus = NULL, *temp_table;
  405. u32 range[2];
  406. int rc = 0;
  407. temp_table = krealloc(buses->bus_tbl, sizeof(*temp_table) *
  408. (buses->count + 1), GFP_KERNEL);
  409. if (!temp_table) {
  410. dprintk(CVP_ERR, "%s: Failed to allocate memory", __func__);
  411. rc = -ENOMEM;
  412. goto err_bus;
  413. }
  414. buses->bus_tbl = temp_table;
  415. bus = &buses->bus_tbl[buses->count];
  416. memset(bus, 0x0, sizeof(struct bus_info));
  417. rc = of_property_read_string(dev->of_node, "label", &temp_name);
  418. if (rc) {
  419. dprintk(CVP_ERR, "'label' not found in node\n");
  420. goto err_bus;
  421. }
  422. /* need a non-const version of name, hence copying it over */
  423. bus->name = devm_kstrdup(dev, temp_name, GFP_KERNEL);
  424. if (!bus->name) {
  425. rc = -ENOMEM;
  426. goto err_bus;
  427. }
  428. rc = of_property_read_u32(dev->of_node, "qcom,bus-master",
  429. &bus->master);
  430. if (rc) {
  431. dprintk(CVP_ERR, "'qcom,bus-master' not found in node\n");
  432. goto err_bus;
  433. }
  434. rc = of_property_read_u32(dev->of_node, "qcom,bus-slave", &bus->slave);
  435. if (rc) {
  436. dprintk(CVP_ERR, "'qcom,bus-slave' not found in node\n");
  437. goto err_bus;
  438. }
  439. rc = of_property_read_string(dev->of_node, "qcom,bus-governor",
  440. &bus->governor);
  441. if (rc) {
  442. rc = 0;
  443. dprintk(CVP_CORE,
  444. "'qcom,bus-governor' not found, default to performance governor\n");
  445. bus->governor = PERF_GOV;
  446. }
  447. if (!strcmp(bus->governor, PERF_GOV))
  448. bus->is_prfm_gov_used = true;
  449. rc = of_property_read_u32_array(dev->of_node, "qcom,bus-range-kbps",
  450. range, ARRAY_SIZE(range));
  451. if (rc) {
  452. rc = 0;
  453. dprintk(CVP_CORE,
  454. "'qcom,range' not found defaulting to <0 INT_MAX>\n");
  455. range[0] = 0;
  456. range[1] = INT_MAX;
  457. }
  458. bus->range[0] = range[0]; /* min */
  459. bus->range[1] = range[1]; /* max */
  460. buses->count++;
  461. bus->dev = dev;
  462. dprintk(CVP_CORE, "Found bus %s [%d->%d] with governor %s\n",
  463. bus->name, bus->master, bus->slave, bus->governor);
  464. err_bus:
  465. return rc;
  466. }
  467. static int msm_cvp_load_regulator_table(
  468. struct msm_cvp_platform_resources *res)
  469. {
  470. int rc = 0;
  471. struct platform_device *pdev = res->pdev;
  472. struct regulator_set *regulators = &res->regulator_set;
  473. struct device_node *domains_parent_node = NULL;
  474. struct property *domains_property = NULL;
  475. int reg_count = 0;
  476. regulators->count = 0;
  477. regulators->regulator_tbl = NULL;
  478. domains_parent_node = pdev->dev.of_node;
  479. for_each_property_of_node(domains_parent_node, domains_property) {
  480. const char *search_string = "-supply";
  481. char *supply;
  482. bool matched = false;
  483. /* check if current property is possibly a regulator */
  484. supply = strnstr(domains_property->name, search_string,
  485. strlen(domains_property->name) + 1);
  486. matched = supply && (*(supply + strlen(search_string)) == '\0');
  487. if (!matched)
  488. continue;
  489. reg_count++;
  490. }
  491. regulators->regulator_tbl = devm_kzalloc(&pdev->dev,
  492. sizeof(*regulators->regulator_tbl) *
  493. reg_count, GFP_KERNEL);
  494. if (!regulators->regulator_tbl) {
  495. rc = -ENOMEM;
  496. dprintk(CVP_ERR,
  497. "Failed to alloc memory for regulator table\n");
  498. goto err_reg_tbl_alloc;
  499. }
  500. for_each_property_of_node(domains_parent_node, domains_property) {
  501. const char *search_string = "-supply";
  502. char *supply;
  503. bool matched = false;
  504. struct device_node *regulator_node = NULL;
  505. struct regulator_info *rinfo = NULL;
  506. /* check if current property is possibly a regulator */
  507. supply = strnstr(domains_property->name, search_string,
  508. strlen(domains_property->name) + 1);
  509. matched = supply && (supply[strlen(search_string)] == '\0');
  510. if (!matched)
  511. continue;
  512. /* make sure prop isn't being misused */
  513. regulator_node = of_parse_phandle(domains_parent_node,
  514. domains_property->name, 0);
  515. if (IS_ERR(regulator_node)) {
  516. dprintk(CVP_WARN, "%s is not a phandle\n",
  517. domains_property->name);
  518. continue;
  519. }
  520. regulators->count++;
  521. /* populate regulator info */
  522. rinfo = &regulators->regulator_tbl[regulators->count - 1];
  523. rinfo->name = devm_kzalloc(&pdev->dev,
  524. (supply - domains_property->name) + 1, GFP_KERNEL);
  525. if (!rinfo->name) {
  526. rc = -ENOMEM;
  527. dprintk(CVP_ERR,
  528. "Failed to alloc memory for regulator name\n");
  529. goto err_reg_name_alloc;
  530. }
  531. strlcpy(rinfo->name, domains_property->name,
  532. (supply - domains_property->name) + 1);
  533. rinfo->has_hw_power_collapse = of_property_read_bool(
  534. regulator_node, "qcom,support-hw-trigger");
  535. dprintk(CVP_CORE, "Found regulator %s: h/w collapse = %s\n",
  536. rinfo->name,
  537. rinfo->has_hw_power_collapse ? "yes" : "no");
  538. }
  539. if (!regulators->count)
  540. dprintk(CVP_CORE, "No regulators found");
  541. return 0;
  542. err_reg_name_alloc:
  543. err_reg_tbl_alloc:
  544. msm_cvp_free_regulator_table(res);
  545. return rc;
  546. }
  547. static int msm_cvp_load_clock_table(
  548. struct msm_cvp_platform_resources *res)
  549. {
  550. int rc = 0, num_clocks = 0, c = 0;
  551. struct platform_device *pdev = res->pdev;
  552. int *clock_ids = NULL;
  553. int *clock_props = NULL;
  554. struct clock_set *clocks = &res->clock_set;
  555. num_clocks = of_property_count_strings(pdev->dev.of_node,
  556. "clock-names");
  557. if (num_clocks <= 0) {
  558. dprintk(CVP_CORE, "No clocks found\n");
  559. clocks->count = 0;
  560. rc = 0;
  561. goto err_load_clk_table_fail;
  562. }
  563. clock_ids = devm_kzalloc(&pdev->dev, num_clocks *
  564. sizeof(*clock_ids), GFP_KERNEL);
  565. if (!clock_ids) {
  566. dprintk(CVP_ERR, "No memory to read clock ids\n");
  567. rc = -ENOMEM;
  568. goto err_load_clk_table_fail;
  569. }
  570. rc = of_property_read_u32_array(pdev->dev.of_node,
  571. "clock-ids", clock_ids,
  572. num_clocks);
  573. if (rc) {
  574. dprintk(CVP_CORE, "Failed to read clock ids: %d\n", rc);
  575. msm_cvp_mmrm_enabled = false;
  576. dprintk(CVP_CORE, "flag msm_cvp_mmrm_enabled disabled\n");
  577. }
  578. clock_props = devm_kzalloc(&pdev->dev, num_clocks *
  579. sizeof(*clock_props), GFP_KERNEL);
  580. if (!clock_props) {
  581. dprintk(CVP_ERR, "No memory to read clock properties\n");
  582. rc = -ENOMEM;
  583. goto err_load_clk_table_fail;
  584. }
  585. rc = of_property_read_u32_array(pdev->dev.of_node,
  586. "qcom,clock-configs", clock_props,
  587. num_clocks);
  588. if (rc) {
  589. dprintk(CVP_ERR, "Failed to read clock properties: %d\n", rc);
  590. goto err_load_clk_prop_fail;
  591. }
  592. clocks->clock_tbl = devm_kzalloc(&pdev->dev, sizeof(*clocks->clock_tbl)
  593. * num_clocks, GFP_KERNEL);
  594. if (!clocks->clock_tbl) {
  595. dprintk(CVP_ERR, "Failed to allocate memory for clock tbl\n");
  596. rc = -ENOMEM;
  597. goto err_load_clk_prop_fail;
  598. }
  599. clocks->count = num_clocks;
  600. dprintk(CVP_CORE, "Found %d clocks\n", num_clocks);
  601. for (c = 0; c < num_clocks; ++c) {
  602. struct clock_info *vc = &res->clock_set.clock_tbl[c];
  603. of_property_read_string_index(pdev->dev.of_node,
  604. "clock-names", c, &vc->name);
  605. if (msm_cvp_mmrm_enabled == true)
  606. vc->clk_id = clock_ids[c];
  607. if (clock_props[c] & CLOCK_PROP_HAS_SCALING) {
  608. vc->has_scaling = true;
  609. } else {
  610. vc->count = 0;
  611. vc->has_scaling = false;
  612. }
  613. if (clock_props[c] & CLOCK_PROP_HAS_MEM_RETENTION)
  614. vc->has_mem_retention = true;
  615. else
  616. vc->has_mem_retention = false;
  617. dprintk(CVP_CORE, "Found clock %s id %d: scale-able = %s\n",
  618. vc->name, vc->clk_id, vc->count ? "yes" : "no");
  619. }
  620. return 0;
  621. err_load_clk_prop_fail:
  622. err_load_clk_table_fail:
  623. return rc;
  624. }
  625. #define MAX_CLK_RESETS 5
  626. static int msm_cvp_load_reset_table(
  627. struct msm_cvp_platform_resources *res)
  628. {
  629. struct platform_device *pdev = res->pdev;
  630. struct reset_set *rst = &res->reset_set;
  631. int num_clocks = 0, c = 0, ret = 0;
  632. int pwr_stats[MAX_CLK_RESETS];
  633. num_clocks = of_property_count_strings(pdev->dev.of_node,
  634. "reset-names");
  635. if (num_clocks <= 0 || num_clocks > MAX_CLK_RESETS) {
  636. dprintk(CVP_ERR, "Num reset clocks out of range\n");
  637. rst->count = 0;
  638. return 0;
  639. }
  640. rst->reset_tbl = devm_kcalloc(&pdev->dev, num_clocks,
  641. sizeof(*rst->reset_tbl), GFP_KERNEL);
  642. if (!rst->reset_tbl)
  643. return -ENOMEM;
  644. rst->count = num_clocks;
  645. dprintk(CVP_CORE, "Found %d reset clocks\n", num_clocks);
  646. ret = of_property_read_u32_array(pdev->dev.of_node,
  647. "reset-power-status", pwr_stats,
  648. num_clocks);
  649. if (ret) {
  650. dprintk(CVP_ERR, "Failed to read reset pwr state: %d\n", ret);
  651. devm_kfree(&pdev->dev, rst->reset_tbl);
  652. return ret;
  653. }
  654. for (c = 0; c < num_clocks; ++c) {
  655. struct reset_info *rc = &res->reset_set.reset_tbl[c];
  656. of_property_read_string_index(pdev->dev.of_node,
  657. "reset-names", c, &rc->name);
  658. rc->required_stage = pwr_stats[c];
  659. }
  660. return 0;
  661. }
  662. static int find_key_value(struct msm_cvp_platform_data *platform_data,
  663. const char *key)
  664. {
  665. int i = 0;
  666. struct msm_cvp_common_data *common_data = platform_data->common_data;
  667. int size = platform_data->common_data_length;
  668. for (i = 0; i < size; i++) {
  669. if (!strcmp(common_data[i].key, key))
  670. return common_data[i].value;
  671. }
  672. return 0;
  673. }
  674. int cvp_read_platform_resources_from_drv_data(
  675. struct msm_cvp_core *core)
  676. {
  677. struct msm_cvp_platform_data *platform_data;
  678. struct msm_cvp_platform_resources *res;
  679. int rc = 0, i;
  680. if (!core || !core->platform_data) {
  681. dprintk(CVP_ERR, "%s Invalid data\n", __func__);
  682. return -ENOENT;
  683. }
  684. platform_data = core->platform_data;
  685. res = &core->resources;
  686. res->sku_version = platform_data->sku_version;
  687. res->fw_name = "evass";
  688. dprintk(CVP_CORE, "Firmware filename: %s\n", res->fw_name);
  689. res->dsp_enabled = find_key_value(platform_data,
  690. "qcom,dsp-enabled");
  691. res->max_ssr_allowed = find_key_value(platform_data,
  692. "qcom,max-ssr-allowed");
  693. res->sw_power_collapsible = find_key_value(platform_data,
  694. "qcom,sw-power-collapse");
  695. res->debug_timeout = find_key_value(platform_data,
  696. "qcom,debug-timeout");
  697. res->pm_qos.latency_us = find_key_value(platform_data,
  698. "qcom,pm-qos-latency-us");
  699. res->pm_qos.silver_count = 4;
  700. for (i = 0; i < res->pm_qos.silver_count; i++)
  701. res->pm_qos.silver_cores[i] = i;
  702. res->pm_qos.off_vote_cnt = 0;
  703. spin_lock_init(&res->pm_qos.lock);
  704. res->max_secure_inst_count = find_key_value(platform_data,
  705. "qcom,max-secure-instances");
  706. res->thermal_mitigable = find_key_value(platform_data,
  707. "qcom,enable-thermal-mitigation");
  708. res->msm_cvp_pwr_collapse_delay = find_key_value(platform_data,
  709. "qcom,power-collapse-delay");
  710. res->msm_cvp_firmware_unload_delay = find_key_value(platform_data,
  711. "qcom,fw-unload-delay");
  712. res->msm_cvp_hw_rsp_timeout = find_key_value(platform_data,
  713. "qcom,hw-resp-timeout");
  714. res->msm_cvp_dsp_rsp_timeout = find_key_value(platform_data,
  715. "qcom,dsp-resp-timeout");
  716. res->non_fatal_pagefaults = find_key_value(platform_data,
  717. "qcom,domain-attr-non-fatal-faults");
  718. res->vpu_ver = platform_data->vpu_ver;
  719. res->ubwc_config = platform_data->ubwc_config;
  720. res->fatal_ssr = false;
  721. return rc;
  722. }
  723. int cvp_read_platform_resources_from_dt(
  724. struct msm_cvp_platform_resources *res)
  725. {
  726. struct platform_device *pdev = res->pdev;
  727. struct resource *kres = NULL;
  728. int rc = 0;
  729. uint32_t firmware_base = 0;
  730. if (!pdev->dev.of_node) {
  731. dprintk(CVP_ERR, "DT node not found\n");
  732. return -ENOENT;
  733. }
  734. INIT_LIST_HEAD(&res->context_banks);
  735. res->firmware_base = (phys_addr_t)firmware_base;
  736. kres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  737. res->register_base = kres ? kres->start : -1;
  738. res->register_size = kres ? (kres->end + 1 - kres->start) : -1;
  739. res->irq = platform_get_irq(pdev, 0);
  740. dprintk(CVP_CORE, "%s: res->irq:%d \n",
  741. __func__, res->irq);
  742. rc = msm_cvp_load_subcache_info(res);
  743. if (rc)
  744. dprintk(CVP_WARN, "Failed to load subcache info: %d\n", rc);
  745. rc = msm_cvp_load_qdss_table(res);
  746. if (rc)
  747. dprintk(CVP_WARN, "Failed to load qdss reg table: %d\n", rc);
  748. rc = msm_cvp_load_reg_table(res);
  749. if (rc) {
  750. dprintk(CVP_ERR, "Failed to load reg table: %d\n", rc);
  751. goto err_load_reg_table;
  752. }
  753. rc = msm_cvp_load_ipcc_regs(res);
  754. if (rc)
  755. dprintk(CVP_ERR, "Failed to load IPCC regs: %d\n", rc);
  756. rc = msm_cvp_load_regspace_mapping(res);
  757. if (rc)
  758. dprintk(CVP_ERR, "Failed to load reg space mapping: %d\n", rc);
  759. rc = msm_cvp_load_gcc_regs(res);
  760. rc = msm_cvp_load_regulator_table(res);
  761. if (rc) {
  762. dprintk(CVP_ERR, "Failed to load list of regulators %d\n", rc);
  763. goto err_load_regulator_table;
  764. }
  765. rc = msm_cvp_load_clock_table(res);
  766. if (rc) {
  767. dprintk(CVP_ERR,
  768. "Failed to load clock table: %d\n", rc);
  769. goto err_load_clock_table;
  770. }
  771. rc = msm_cvp_load_allowed_clocks_table(res);
  772. if (rc) {
  773. dprintk(CVP_ERR,
  774. "Failed to load allowed clocks table: %d\n", rc);
  775. goto err_load_allowed_clocks_table;
  776. }
  777. rc = msm_cvp_load_reset_table(res);
  778. if (rc) {
  779. dprintk(CVP_ERR,
  780. "Failed to load reset table: %d\n", rc);
  781. goto err_load_reset_table;
  782. }
  783. res->use_non_secure_pil = of_property_read_bool(pdev->dev.of_node,
  784. "qcom,use-non-secure-pil");
  785. if (res->use_non_secure_pil || !is_iommu_present(res)) {
  786. of_property_read_u32(pdev->dev.of_node, "qcom,fw-bias",
  787. &firmware_base);
  788. res->firmware_base = (phys_addr_t)firmware_base;
  789. dprintk(CVP_CORE,
  790. "Using fw-bias : %pa", &res->firmware_base);
  791. }
  792. return rc;
  793. err_load_reset_table:
  794. msm_cvp_free_allowed_clocks_table(res);
  795. err_load_allowed_clocks_table:
  796. msm_cvp_free_clock_table(res);
  797. err_load_clock_table:
  798. msm_cvp_free_regulator_table(res);
  799. err_load_regulator_table:
  800. msm_cvp_free_reg_table(res);
  801. err_load_reg_table:
  802. return rc;
  803. }
  804. static int msm_cvp_setup_context_bank(struct msm_cvp_platform_resources *res,
  805. struct context_bank_info *cb, struct device *dev)
  806. {
  807. int rc = 0;
  808. struct bus_type *bus;
  809. if (!dev || !cb || !res) {
  810. dprintk(CVP_ERR,
  811. "%s: Invalid Input params\n", __func__);
  812. return -EINVAL;
  813. }
  814. cb->dev = dev;
  815. bus = cb->dev->bus;
  816. if (IS_ERR_OR_NULL(bus)) {
  817. dprintk(CVP_ERR, "%s - failed to get bus type\n", __func__);
  818. rc = PTR_ERR(bus) ?: -ENODEV;
  819. goto remove_cb;
  820. }
  821. /*
  822. * configure device segment size and segment boundary to ensure
  823. * iommu mapping returns one mapping (which is required for partial
  824. * cache operations)
  825. */
  826. if (!dev->dma_parms)
  827. dev->dma_parms =
  828. devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
  829. dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
  830. dma_set_seg_boundary(dev, DMA_BIT_MASK(64));
  831. dprintk(CVP_CORE, "Attached %s and created mapping\n", dev_name(dev));
  832. dprintk(CVP_CORE,
  833. "Context bank name:%s, buffer_type: %#x, is_secure: %d, address range start: %#x, size: %#x, dev: %pK",
  834. cb->name, cb->buffer_type, cb->is_secure, cb->addr_range.start,
  835. cb->addr_range.size, cb->dev);
  836. return rc;
  837. remove_cb:
  838. return rc;
  839. }
  840. int msm_cvp_smmu_fault_handler(struct iommu_domain *domain,
  841. struct device *dev, unsigned long iova, int flags, void *token)
  842. {
  843. struct msm_cvp_core *core = token;
  844. struct iris_hfi_device *hdev;
  845. struct msm_cvp_inst *inst;
  846. bool log = false;
  847. if (!domain || !core) {
  848. dprintk(CVP_ERR, "%s - invalid param %pK %pK\n",
  849. __func__, domain, core);
  850. return -EINVAL;
  851. }
  852. core->smmu_fault_count++;
  853. if (!core->last_fault_addr)
  854. core->last_fault_addr = iova;
  855. dprintk(CVP_ERR, "%s - faulting address: %lx, %d\n",
  856. __func__, iova, core->smmu_fault_count);
  857. mutex_lock(&core->lock);
  858. log = (core->log.snapshot_index > 0)? false : true;
  859. list_for_each_entry(inst, &core->instances, list) {
  860. cvp_print_inst(CVP_ERR, inst);
  861. msm_cvp_print_inst_bufs(inst, log);
  862. }
  863. hdev = core->device->hfi_device_data;
  864. if (hdev) {
  865. hdev->error = CVP_ERR_NOC_ERROR;
  866. /* call_hfi_op(core->device, debug_hook, hdev); */
  867. }
  868. mutex_unlock(&core->lock);
  869. /*
  870. * Return -EINVAL to elicit the default behaviour of smmu driver.
  871. * If we return -ENOSYS, then smmu driver assumes page fault handler
  872. * is not installed and prints a list of useful debug information like
  873. * FAR, SID etc. This information is not printed if we return 0.
  874. */
  875. return -ENOSYS;
  876. }
  877. static int msm_cvp_populate_context_bank(struct device *dev,
  878. struct msm_cvp_core *core)
  879. {
  880. int rc = 0;
  881. struct context_bank_info *cb = NULL;
  882. struct device_node *np = NULL;
  883. if (!dev || !core) {
  884. dprintk(CVP_ERR, "%s - invalid inputs\n", __func__);
  885. return -EINVAL;
  886. }
  887. np = dev->of_node;
  888. cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
  889. if (!cb) {
  890. dprintk(CVP_ERR, "%s - Failed to allocate cb\n", __func__);
  891. return -ENOMEM;
  892. }
  893. INIT_LIST_HEAD(&cb->list);
  894. list_add_tail(&cb->list, &core->resources.context_banks);
  895. rc = of_property_read_string(np, "label", &cb->name);
  896. if (rc) {
  897. dprintk(CVP_CORE,
  898. "Failed to read cb label from device tree\n");
  899. rc = 0;
  900. }
  901. dprintk(CVP_CORE, "%s: context bank has name %s\n", __func__, cb->name);
  902. if (!strcmp(cb->name, "cvp_dummy"))
  903. goto success_setup_cb;
  904. rc = of_property_read_u32_array(np, "qcom,iommu-dma-addr-pool",
  905. (u32 *)&cb->addr_range, 2);
  906. if (rc) {
  907. dprintk(CVP_ERR,
  908. "Could not read addr pool for context bank : %s %d\n",
  909. cb->name, rc);
  910. goto err_setup_cb;
  911. }
  912. cb->is_secure = of_property_read_bool(np, "qcom,iommu-vmid");
  913. dprintk(CVP_CORE, "context bank %s : secure = %d\n",
  914. cb->name, cb->is_secure);
  915. /* setup buffer type for each sub device*/
  916. rc = of_property_read_u32(np, "buffer-types", &cb->buffer_type);
  917. if (rc) {
  918. dprintk(CVP_ERR, "failed to load buffer_type info %d\n", rc);
  919. rc = -ENOENT;
  920. goto err_setup_cb;
  921. }
  922. dprintk(CVP_CORE,
  923. "context bank %s address start = %x address size = %x buffer_type = %x\n",
  924. cb->name, cb->addr_range.start,
  925. cb->addr_range.size, cb->buffer_type);
  926. success_setup_cb:
  927. cb->domain = iommu_get_domain_for_dev(dev);
  928. if (IS_ERR_OR_NULL(cb->domain)) {
  929. dprintk(CVP_ERR, "Create domain failed\n");
  930. rc = -ENODEV;
  931. goto err_setup_cb;
  932. }
  933. rc = msm_cvp_setup_context_bank(&core->resources, cb, dev);
  934. if (rc) {
  935. dprintk(CVP_ERR, "Cannot setup context bank %d\n", rc);
  936. goto err_setup_cb;
  937. }
  938. iommu_set_fault_handler(cb->domain,
  939. msm_cvp_smmu_fault_handler, (void *)core);
  940. return 0;
  941. err_setup_cb:
  942. list_del(&cb->list);
  943. return rc;
  944. }
  945. int cvp_read_context_bank_resources_from_dt(struct platform_device *pdev)
  946. {
  947. struct msm_cvp_core *core;
  948. int rc = 0;
  949. if (!pdev) {
  950. dprintk(CVP_ERR, "Invalid platform device\n");
  951. return -EINVAL;
  952. } else if (!pdev->dev.parent) {
  953. dprintk(CVP_ERR, "Failed to find a parent for %s\n",
  954. dev_name(&pdev->dev));
  955. return -ENODEV;
  956. }
  957. core = dev_get_drvdata(pdev->dev.parent);
  958. if (!core) {
  959. dprintk(CVP_ERR, "Failed to find cookie in parent device %s",
  960. dev_name(pdev->dev.parent));
  961. return -EINVAL;
  962. }
  963. rc = msm_cvp_populate_context_bank(&pdev->dev, core);
  964. if (rc)
  965. dprintk(CVP_ERR, "Failed to probe context bank\n");
  966. else
  967. dprintk(CVP_CORE, "Successfully probed context bank\n");
  968. return rc;
  969. }
  970. int cvp_read_bus_resources_from_dt(struct platform_device *pdev)
  971. {
  972. struct msm_cvp_core *core;
  973. if (!pdev) {
  974. dprintk(CVP_ERR, "Invalid platform device\n");
  975. return -EINVAL;
  976. } else if (!pdev->dev.parent) {
  977. dprintk(CVP_ERR, "Failed to find a parent for %s\n",
  978. dev_name(&pdev->dev));
  979. return -ENODEV;
  980. }
  981. core = dev_get_drvdata(pdev->dev.parent);
  982. if (!core) {
  983. dprintk(CVP_ERR, "Failed to find cookie in parent device %s",
  984. dev_name(pdev->dev.parent));
  985. return -EINVAL;
  986. }
  987. return msm_cvp_populate_bus(&pdev->dev, &core->resources);
  988. }
  989. int cvp_read_mem_cdsp_resources_from_dt(struct platform_device *pdev)
  990. {
  991. struct msm_cvp_core *core;
  992. if (!pdev) {
  993. dprintk(CVP_ERR, "%s: invalid platform device\n", __func__);
  994. return -EINVAL;
  995. } else if (!pdev->dev.parent) {
  996. dprintk(CVP_ERR, "Failed to find a parent for %s\n",
  997. dev_name(&pdev->dev));
  998. return -ENODEV;
  999. }
  1000. core = dev_get_drvdata(pdev->dev.parent);
  1001. if (!core) {
  1002. dprintk(CVP_ERR, "Failed to find cookie in parent device %s",
  1003. dev_name(pdev->dev.parent));
  1004. return -EINVAL;
  1005. }
  1006. return msm_cvp_populate_mem_cdsp(&pdev->dev, &core->resources);
  1007. }