msm_vidc_dt.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/iommu.h>
  6. #include <linux/dma-iommu.h>
  7. #include <linux/of.h>
  8. #include <linux/sort.h>
  9. #include "msm_vidc_debug.h"
  10. #include "msm_vidc_dt.h"
  11. #include "msm_vidc_internal.h"
  12. #include "msm_vidc_core.h"
  13. #include "msm_vidc_driver.h"
  14. static size_t get_u32_array_num_elements(struct device_node *np,
  15. char *name)
  16. {
  17. int len;
  18. size_t num_elements = 0;
  19. if (!of_get_property(np, name, &len)) {
  20. d_vpr_e("Failed to read %s from device tree\n", name);
  21. goto fail_read;
  22. }
  23. num_elements = len / sizeof(u32);
  24. if (num_elements <= 0) {
  25. d_vpr_e("%s not specified in device tree\n", name);
  26. goto fail_read;
  27. }
  28. return num_elements;
  29. fail_read:
  30. return 0;
  31. }
  32. /**
  33. * msm_vidc_load_u32_table() - load dtsi table entries
  34. * @pdev: A pointer to the platform device.
  35. * @of_node: A pointer to the device node.
  36. * @table_name: A pointer to the dtsi table entry name.
  37. * @struct_size: The size of the structure which is nothing but
  38. * a single entry in the dtsi table.
  39. * @table: A pointer to the table pointer which needs to be
  40. * filled by the dtsi table entries.
  41. * @num_elements: Number of elements pointer which needs to be filled
  42. * with the number of elements in the table.
  43. *
  44. * This is a generic implementation to load single or multiple array
  45. * table from dtsi. The array elements should be of size equal to u32.
  46. *
  47. * Return: Return '0' for success else appropriate error value.
  48. */
  49. static int msm_vidc_load_u32_table(struct platform_device *pdev,
  50. struct device_node *of_node, char *table_name, int struct_size,
  51. u32 **table, u32 *num_elements)
  52. {
  53. int rc = 0, num_elemts = 0;
  54. u32 *ptbl = NULL;
  55. if (!of_find_property(of_node, table_name, NULL)) {
  56. d_vpr_h("%s not found\n", table_name);
  57. return 0;
  58. }
  59. num_elemts = get_u32_array_num_elements(of_node, table_name);
  60. if (!num_elemts) {
  61. d_vpr_e("no elements in %s\n", table_name);
  62. return 0;
  63. }
  64. num_elemts /= struct_size / sizeof(u32);
  65. ptbl = devm_kzalloc(&pdev->dev, num_elemts * struct_size, GFP_KERNEL);
  66. if (!ptbl) {
  67. d_vpr_e("Failed to alloc table %s\n", table_name);
  68. return -ENOMEM;
  69. }
  70. if (of_property_read_u32_array(of_node, table_name, ptbl,
  71. num_elemts * struct_size / sizeof(u32))) {
  72. d_vpr_e("Failed to read %s\n", table_name);
  73. return -EINVAL;
  74. }
  75. *table = ptbl;
  76. if (num_elements)
  77. *num_elements = num_elemts;
  78. return rc;
  79. }
  80. static void msm_vidc_free_allowed_clocks_table(struct msm_vidc_dt *dt)
  81. {
  82. dt->allowed_clks_tbl = NULL;
  83. }
  84. static void msm_vidc_free_reg_table(struct msm_vidc_dt *dt)
  85. {
  86. dt->reg_set.reg_tbl = NULL;
  87. }
  88. static void msm_vidc_free_qdss_addr_table(struct msm_vidc_dt *dt)
  89. {
  90. dt->qdss_addr_set.addr_tbl = NULL;
  91. }
  92. static void msm_vidc_free_bus_table(struct msm_vidc_dt *dt)
  93. {
  94. dt->bus_set.bus_tbl = NULL;
  95. dt->bus_set.count = 0;
  96. }
  97. static void msm_vidc_free_buffer_usage_table(struct msm_vidc_dt *dt)
  98. {
  99. dt->buffer_usage_set.buffer_usage_tbl = NULL;
  100. }
  101. static void msm_vidc_free_regulator_table(struct msm_vidc_dt *dt)
  102. {
  103. int c = 0;
  104. for (c = 0; c < dt->regulator_set.count; ++c) {
  105. struct regulator_info *rinfo =
  106. &dt->regulator_set.regulator_tbl[c];
  107. rinfo->name = NULL;
  108. }
  109. dt->regulator_set.regulator_tbl = NULL;
  110. dt->regulator_set.count = 0;
  111. }
  112. static void msm_vidc_free_clock_table(struct msm_vidc_dt *dt)
  113. {
  114. dt->clock_set.clock_tbl = NULL;
  115. dt->clock_set.count = 0;
  116. }
  117. static int msm_vidc_load_fw_name(struct msm_vidc_core *core)
  118. {
  119. struct platform_device *pdev = core->pdev;
  120. return of_property_read_string_index(pdev->dev.of_node,
  121. "vidc,firmware-name", 0, &core->dt->fw_name);
  122. }
  123. static int msm_vidc_load_reg_table(struct msm_vidc_core *core)
  124. {
  125. struct reg_set *reg_set;
  126. struct platform_device *pdev = core->pdev;
  127. struct msm_vidc_dt *dt = core->dt;
  128. int i;
  129. int rc = 0;
  130. if (!of_find_property(pdev->dev.of_node, "qcom,reg-presets", NULL)) {
  131. /*
  132. * qcom,reg-presets is an optional property. It likely won't be
  133. * present if we don't have any register settings to program
  134. */
  135. d_vpr_h("reg-presets not found\n");
  136. return 0;
  137. }
  138. reg_set = &dt->reg_set;
  139. reg_set->count = get_u32_array_num_elements(pdev->dev.of_node,
  140. "qcom,reg-presets");
  141. reg_set->count /= sizeof(*reg_set->reg_tbl) / sizeof(u32);
  142. if (!reg_set->count) {
  143. d_vpr_h("no elements in reg set\n");
  144. return rc;
  145. }
  146. reg_set->reg_tbl = devm_kzalloc(&pdev->dev, reg_set->count *
  147. sizeof(*(reg_set->reg_tbl)), GFP_KERNEL);
  148. if (!reg_set->reg_tbl) {
  149. d_vpr_e("%s: Failed to alloc register table\n", __func__);
  150. return -ENOMEM;
  151. }
  152. if (of_property_read_u32_array(pdev->dev.of_node, "qcom,reg-presets",
  153. (u32 *)reg_set->reg_tbl, reg_set->count * 3)) {
  154. d_vpr_e("Failed to read register table\n");
  155. msm_vidc_free_reg_table(core->dt);
  156. return -EINVAL;
  157. }
  158. for (i = 0; i < reg_set->count; i++) {
  159. d_vpr_h("reg = %#x, value = %#x, mask = %#x\n",
  160. reg_set->reg_tbl[i].reg, reg_set->reg_tbl[i].value,
  161. reg_set->reg_tbl[i].mask);
  162. }
  163. return rc;
  164. }
  165. static int msm_vidc_load_qdss_table(struct msm_vidc_core *core)
  166. {
  167. struct addr_set *qdss_addr_set;
  168. struct platform_device *pdev = core->pdev;
  169. struct msm_vidc_dt *dt = core->dt;
  170. int i;
  171. int rc = 0;
  172. if (!of_find_property(pdev->dev.of_node, "qcom,qdss-presets", NULL)) {
  173. /*
  174. * qcom,qdss-presets is an optional property. It likely won't be
  175. * present if we don't have any register settings to program
  176. */
  177. d_vpr_h("qdss-presets not found\n");
  178. return rc;
  179. }
  180. qdss_addr_set = &dt->qdss_addr_set;
  181. qdss_addr_set->count = get_u32_array_num_elements(pdev->dev.of_node,
  182. "qcom,qdss-presets");
  183. qdss_addr_set->count /= sizeof(*qdss_addr_set->addr_tbl) / sizeof(u32);
  184. if (!qdss_addr_set->count) {
  185. d_vpr_h("no elements in qdss reg set\n");
  186. return rc;
  187. }
  188. qdss_addr_set->addr_tbl = devm_kzalloc(&pdev->dev,
  189. qdss_addr_set->count * sizeof(*qdss_addr_set->addr_tbl),
  190. GFP_KERNEL);
  191. if (!qdss_addr_set->addr_tbl) {
  192. d_vpr_e("%s: Failed to alloc register table\n", __func__);
  193. rc = -ENOMEM;
  194. goto err_qdss_addr_tbl;
  195. }
  196. rc = of_property_read_u32_array(pdev->dev.of_node, "qcom,qdss-presets",
  197. (u32 *)qdss_addr_set->addr_tbl, qdss_addr_set->count * 2);
  198. if (rc) {
  199. d_vpr_e("Failed to read qdss address table\n");
  200. msm_vidc_free_qdss_addr_table(core->dt);
  201. rc = -EINVAL;
  202. goto err_qdss_addr_tbl;
  203. }
  204. for (i = 0; i < qdss_addr_set->count; i++) {
  205. d_vpr_h("qdss addr = %x, value = %x\n",
  206. qdss_addr_set->addr_tbl[i].start,
  207. qdss_addr_set->addr_tbl[i].size);
  208. }
  209. err_qdss_addr_tbl:
  210. return rc;
  211. }
  212. static int msm_vidc_load_subcache_info(struct msm_vidc_core *core)
  213. {
  214. int rc = 0, num_subcaches = 0, c;
  215. struct platform_device *pdev = core->pdev;
  216. struct msm_vidc_dt *dt = core->dt;
  217. struct subcache_set *subcaches = &dt->subcache_set;
  218. num_subcaches = of_property_count_strings(pdev->dev.of_node,
  219. "cache-slice-names");
  220. if (num_subcaches <= 0) {
  221. d_vpr_h("No subcaches found\n");
  222. goto err_load_subcache_table_fail;
  223. }
  224. subcaches->subcache_tbl = devm_kzalloc(&pdev->dev,
  225. sizeof(*subcaches->subcache_tbl) * num_subcaches, GFP_KERNEL);
  226. if (!subcaches->subcache_tbl) {
  227. d_vpr_e("Failed to allocate memory for subcache tbl\n");
  228. rc = -ENOMEM;
  229. goto err_load_subcache_table_fail;
  230. }
  231. subcaches->count = num_subcaches;
  232. d_vpr_h("Found %d subcaches\n", num_subcaches);
  233. for (c = 0; c < num_subcaches; ++c) {
  234. struct subcache_info *vsc = &dt->subcache_set.subcache_tbl[c];
  235. of_property_read_string_index(pdev->dev.of_node,
  236. "cache-slice-names", c, &vsc->name);
  237. }
  238. dt->sys_cache_present = true;
  239. return 0;
  240. err_load_subcache_table_fail:
  241. dt->sys_cache_present = false;
  242. subcaches->count = 0;
  243. subcaches->subcache_tbl = NULL;
  244. return rc;
  245. }
  246. static int msm_vidc_load_allowed_clocks_table(
  247. struct msm_vidc_core *core)
  248. {
  249. int rc = 0;
  250. struct platform_device *pdev = core->pdev;
  251. struct msm_vidc_dt *dt = core->dt;
  252. int i;
  253. if (!of_find_property(pdev->dev.of_node,
  254. "qcom,allowed-clock-rates", NULL)) {
  255. d_vpr_h("allowed-clock-rates not found\n");
  256. return 0;
  257. }
  258. rc = msm_vidc_load_u32_table(pdev, pdev->dev.of_node,
  259. "qcom,allowed-clock-rates",
  260. sizeof(*dt->allowed_clks_tbl),
  261. (u32 **)&dt->allowed_clks_tbl,
  262. &dt->allowed_clks_tbl_size);
  263. if (rc) {
  264. d_vpr_e("%s: failed to read allowed clocks table\n", __func__);
  265. return rc;
  266. }
  267. sort(dt->allowed_clks_tbl, dt->allowed_clks_tbl_size,
  268. sizeof(*dt->allowed_clks_tbl), cmp, NULL);
  269. d_vpr_h("Found allowed clock rates\n");
  270. for (i = 0; i < dt->allowed_clks_tbl_size; i++)
  271. d_vpr_h(" %d\n", dt->allowed_clks_tbl[i].clock_rate);
  272. return 0;
  273. }
  274. static int msm_vidc_load_bus_table(struct msm_vidc_core *core)
  275. {
  276. int rc = 0;
  277. struct platform_device *pdev = core->pdev;
  278. struct msm_vidc_dt *dt = core->dt;
  279. struct bus_set *buses = &dt->bus_set;
  280. int c = 0, num_buses = 0;
  281. u32 *bus_ranges = NULL;
  282. num_buses = of_property_count_strings(pdev->dev.of_node,
  283. "interconnect-names");
  284. if (num_buses <= 0) {
  285. d_vpr_e("No buses found\n");
  286. return -EINVAL;
  287. }
  288. buses->count = num_buses;
  289. d_vpr_h("Found %d bus interconnects\n", num_buses);
  290. bus_ranges = kzalloc(2 * num_buses * sizeof(*bus_ranges), GFP_KERNEL);
  291. if (!bus_ranges) {
  292. d_vpr_e("No memory to read bus ranges\n");
  293. return -ENOMEM;
  294. }
  295. rc = of_property_read_u32_array(pdev->dev.of_node,
  296. "qcom,bus-range-kbps", bus_ranges,
  297. num_buses * 2);
  298. if (rc) {
  299. d_vpr_e(
  300. "Failed to read bus ranges: defaulting to <0 INT_MAX>\n");
  301. for (c = 0; c < num_buses; c++) {
  302. bus_ranges[c * 2] = 0;
  303. bus_ranges[c * 2 + 1] = INT_MAX;
  304. }
  305. }
  306. buses->bus_tbl = devm_kzalloc(&pdev->dev, num_buses *
  307. sizeof(*buses->bus_tbl), GFP_KERNEL);
  308. if (!buses->bus_tbl) {
  309. d_vpr_e("No memory for bus table\n");
  310. rc = -ENOMEM;
  311. goto exit;
  312. }
  313. for (c = 0; c < num_buses; c++) {
  314. struct bus_info *bus = &dt->bus_set.bus_tbl[c];
  315. of_property_read_string_index(pdev->dev.of_node,
  316. "interconnect-names", c, &bus->name);
  317. bus->dev = &pdev->dev;
  318. bus->range[0] = bus_ranges[c * 2];
  319. bus->range[1] = bus_ranges[c * 2 + 1];
  320. d_vpr_h("Found bus %s, range [%d %d]\n", bus->name,
  321. bus->range[0], bus->range[1]);
  322. }
  323. exit:
  324. kfree(bus_ranges);
  325. return rc;
  326. }
  327. /* TODO: move this to platform data */
  328. static int msm_vidc_load_buffer_usage_table(struct msm_vidc_core *core)
  329. {
  330. int rc = 0;
  331. struct platform_device *pdev = core->pdev;
  332. struct msm_vidc_dt *dt = core->dt;
  333. struct buffer_usage_set *buffer_usage_set = &dt->buffer_usage_set;
  334. if (!of_find_property(pdev->dev.of_node,
  335. "qcom,buffer-type-tz-usage-table", NULL)) {
  336. /*
  337. * qcom,buffer-type-tz-usage-table is an optional property. It
  338. * likely won't be present if the core doesn't support content
  339. * protection
  340. */
  341. d_vpr_h("buffer-type-tz-usage-table not found\n");
  342. return 0;
  343. }
  344. buffer_usage_set->count = get_u32_array_num_elements(
  345. pdev->dev.of_node, "qcom,buffer-type-tz-usage-table");
  346. buffer_usage_set->count /=
  347. sizeof(*buffer_usage_set->buffer_usage_tbl) / sizeof(u32);
  348. if (!buffer_usage_set->count) {
  349. d_vpr_h("no elements in buffer usage set\n");
  350. return 0;
  351. }
  352. buffer_usage_set->buffer_usage_tbl = devm_kzalloc(&pdev->dev,
  353. buffer_usage_set->count *
  354. sizeof(*buffer_usage_set->buffer_usage_tbl),
  355. GFP_KERNEL);
  356. if (!buffer_usage_set->buffer_usage_tbl) {
  357. d_vpr_e("%s: Failed to alloc buffer usage table\n",
  358. __func__);
  359. rc = -ENOMEM;
  360. goto err_load_buf_usage;
  361. }
  362. rc = of_property_read_u32_array(pdev->dev.of_node,
  363. "qcom,buffer-type-tz-usage-table",
  364. (u32 *)buffer_usage_set->buffer_usage_tbl,
  365. buffer_usage_set->count *
  366. sizeof(*buffer_usage_set->buffer_usage_tbl) / sizeof(u32));
  367. if (rc) {
  368. d_vpr_e("Failed to read buffer usage table\n");
  369. goto err_load_buf_usage;
  370. }
  371. return 0;
  372. err_load_buf_usage:
  373. msm_vidc_free_buffer_usage_table(core->dt);
  374. return rc;
  375. }
  376. static int msm_vidc_load_regulator_table(struct msm_vidc_core *core)
  377. {
  378. int rc = 0;
  379. struct platform_device *pdev = core->pdev;
  380. struct msm_vidc_dt *dt = core->dt;
  381. struct regulator_set *regulators = &dt->regulator_set;
  382. struct device_node *domains_parent_node = NULL;
  383. struct property *domains_property = NULL;
  384. int reg_count = 0;
  385. regulators->count = 0;
  386. regulators->regulator_tbl = NULL;
  387. domains_parent_node = pdev->dev.of_node;
  388. for_each_property_of_node(domains_parent_node, domains_property) {
  389. const char *search_string = "-supply";
  390. char *supply;
  391. bool matched = false;
  392. /* check if current property is possibly a regulator */
  393. supply = strnstr(domains_property->name, search_string,
  394. strlen(domains_property->name) + 1);
  395. matched = supply && (*(supply + strlen(search_string)) == '\0');
  396. if (!matched)
  397. continue;
  398. reg_count++;
  399. }
  400. regulators->regulator_tbl = devm_kzalloc(&pdev->dev,
  401. sizeof(*regulators->regulator_tbl) *
  402. reg_count, GFP_KERNEL);
  403. if (!regulators->regulator_tbl) {
  404. rc = -ENOMEM;
  405. d_vpr_e("Failed to alloc memory for regulator table\n");
  406. goto err_reg_tbl_alloc;
  407. }
  408. for_each_property_of_node(domains_parent_node, domains_property) {
  409. const char *search_string = "-supply";
  410. char *supply;
  411. bool matched = false;
  412. struct device_node *regulator_node = NULL;
  413. struct regulator_info *rinfo = NULL;
  414. /* check if current property is possibly a regulator */
  415. supply = strnstr(domains_property->name, search_string,
  416. strlen(domains_property->name) + 1);
  417. matched = supply && (supply[strlen(search_string)] == '\0');
  418. if (!matched)
  419. continue;
  420. /* make sure prop isn't being misused */
  421. regulator_node = of_parse_phandle(domains_parent_node,
  422. domains_property->name, 0);
  423. if (IS_ERR(regulator_node)) {
  424. d_vpr_e("%s is not a phandle\n",
  425. domains_property->name);
  426. continue;
  427. }
  428. regulators->count++;
  429. /* populate regulator info */
  430. rinfo = &regulators->regulator_tbl[regulators->count - 1];
  431. rinfo->name = devm_kzalloc(&pdev->dev,
  432. (supply - domains_property->name) + 1, GFP_KERNEL);
  433. if (!rinfo->name) {
  434. rc = -ENOMEM;
  435. d_vpr_e("Failed to alloc memory for regulator name\n");
  436. goto err_reg_name_alloc;
  437. }
  438. strlcpy(rinfo->name, domains_property->name,
  439. (supply - domains_property->name) + 1);
  440. rinfo->has_hw_power_collapse = of_property_read_bool(
  441. regulator_node, "qcom,support-hw-trigger");
  442. d_vpr_h("Found regulator %s: h/w collapse = %s\n",
  443. rinfo->name,
  444. rinfo->has_hw_power_collapse ? "yes" : "no");
  445. }
  446. if (!regulators->count)
  447. d_vpr_h("No regulators found");
  448. return 0;
  449. err_reg_name_alloc:
  450. err_reg_tbl_alloc:
  451. msm_vidc_free_regulator_table(core->dt);
  452. return rc;
  453. }
  454. static int msm_vidc_load_clock_table(struct msm_vidc_core *core)
  455. {
  456. int rc = 0, num_clocks = 0, c = 0;
  457. struct platform_device *pdev = core->pdev;
  458. struct msm_vidc_dt *dt = core->dt;
  459. int *clock_ids = NULL;
  460. int *clock_props = NULL;
  461. struct clock_set *clocks = &dt->clock_set;
  462. num_clocks = of_property_count_strings(pdev->dev.of_node,
  463. "clock-names");
  464. if (num_clocks <= 0) {
  465. d_vpr_h("No clocks found\n");
  466. clocks->count = 0;
  467. rc = 0;
  468. goto err_load_clk_table_fail;
  469. }
  470. clock_ids = devm_kzalloc(&pdev->dev, num_clocks *
  471. sizeof(*clock_ids), GFP_KERNEL);
  472. if (!clock_ids) {
  473. d_vpr_e("No memory to read clock ids\n");
  474. rc = -ENOMEM;
  475. goto err_load_clk_table_fail;
  476. }
  477. rc = of_property_read_u32_array(pdev->dev.of_node,
  478. "clock-ids", clock_ids,
  479. num_clocks);
  480. if (rc) {
  481. d_vpr_e("Failed to read clock ids: %d\n", rc);
  482. goto err_load_clk_prop_fail;
  483. }
  484. clock_props = devm_kzalloc(&pdev->dev, num_clocks *
  485. sizeof(*clock_props), GFP_KERNEL);
  486. if (!clock_props) {
  487. d_vpr_e("No memory to read clock properties\n");
  488. rc = -ENOMEM;
  489. goto err_load_clk_table_fail;
  490. }
  491. rc = of_property_read_u32_array(pdev->dev.of_node,
  492. "qcom,clock-configs", clock_props,
  493. num_clocks);
  494. if (rc) {
  495. d_vpr_e("Failed to read clock properties: %d\n", rc);
  496. goto err_load_clk_prop_fail;
  497. }
  498. clocks->clock_tbl = devm_kzalloc(&pdev->dev, sizeof(*clocks->clock_tbl)
  499. * num_clocks, GFP_KERNEL);
  500. if (!clocks->clock_tbl) {
  501. d_vpr_e("Failed to allocate memory for clock tbl\n");
  502. rc = -ENOMEM;
  503. goto err_load_clk_prop_fail;
  504. }
  505. clocks->count = num_clocks;
  506. d_vpr_h("Found %d clocks\n", num_clocks);
  507. for (c = 0; c < num_clocks; ++c) {
  508. struct clock_info *vc = &dt->clock_set.clock_tbl[c];
  509. of_property_read_string_index(pdev->dev.of_node,
  510. "clock-names", c, &vc->name);
  511. vc->clk_id = clock_ids[c];
  512. if (clock_props[c] & CLOCK_PROP_HAS_SCALING) {
  513. vc->has_scaling = true;
  514. } else {
  515. vc->has_scaling = false;
  516. }
  517. if (clock_props[c] & CLOCK_PROP_HAS_MEM_RETENTION)
  518. vc->has_mem_retention = true;
  519. else
  520. vc->has_mem_retention = false;
  521. d_vpr_h("Found clock %s: scale-able = %s\n", vc->name,
  522. vc->has_scaling ? "yes" : "no");
  523. }
  524. return 0;
  525. err_load_clk_prop_fail:
  526. err_load_clk_table_fail:
  527. return rc;
  528. }
  529. static int msm_vidc_load_reset_table(struct msm_vidc_core *core)
  530. {
  531. struct platform_device *pdev = core->pdev;
  532. struct msm_vidc_dt *dt = core->dt;
  533. struct reset_set *rst = &dt->reset_set;
  534. int num_clocks = 0, c = 0;
  535. num_clocks = of_property_count_strings(pdev->dev.of_node,
  536. "reset-names");
  537. if (num_clocks <= 0) {
  538. d_vpr_h("No reset clocks found\n");
  539. rst->count = 0;
  540. return 0;
  541. }
  542. rst->reset_tbl = devm_kcalloc(&pdev->dev, num_clocks,
  543. sizeof(*rst->reset_tbl), GFP_KERNEL);
  544. if (!rst->reset_tbl)
  545. return -ENOMEM;
  546. rst->count = num_clocks;
  547. d_vpr_h("Found %d reset clocks\n", num_clocks);
  548. for (c = 0; c < num_clocks; ++c) {
  549. struct reset_info *rc = &dt->reset_set.reset_tbl[c];
  550. of_property_read_string_index(pdev->dev.of_node,
  551. "reset-names", c, &rc->name);
  552. }
  553. return 0;
  554. }
  555. static int msm_vidc_read_resources_from_dt(struct platform_device *pdev)
  556. {
  557. int rc = 0;
  558. struct msm_vidc_core *core;
  559. struct msm_vidc_dt *dt;
  560. struct resource *kres;
  561. if (!pdev) {
  562. d_vpr_e("%s: invalid params\n", __func__);
  563. return -EINVAL;
  564. }
  565. core = dev_get_drvdata(&pdev->dev);
  566. if (!core || !core->dt) {
  567. d_vpr_e("%s: core not found in device %s",
  568. __func__, dev_name(&pdev->dev));
  569. return -EINVAL;
  570. }
  571. dt = core->dt;
  572. INIT_LIST_HEAD(&dt->context_banks);
  573. kres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  574. dt->register_base = kres ? kres->start : -1;
  575. dt->register_size = kres ? (kres->end + 1 - kres->start) : -1;
  576. d_vpr_h("%s: register base %pa, size %#x\n",
  577. __func__, &dt->register_base, dt->register_size);
  578. kres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  579. dt->irq = kres ? kres->start : -1;
  580. d_vpr_h("%s: irq %d\n", __func__, dt->irq);
  581. rc = msm_vidc_load_fw_name(core);
  582. if (rc)
  583. d_vpr_e("%s: failed to load fw name, rc %d, using default fw\n",
  584. __func__, rc);
  585. rc = msm_vidc_load_subcache_info(core);
  586. if (rc)
  587. d_vpr_e("Failed to load subcache info: %d\n", rc);
  588. rc = msm_vidc_load_qdss_table(core);
  589. if (rc)
  590. d_vpr_e("Failed to load qdss reg table: %d\n", rc);
  591. rc = msm_vidc_load_reg_table(core);
  592. if (rc) {
  593. d_vpr_e("Failed to load reg table: %d\n", rc);
  594. goto err_load_reg_table;
  595. }
  596. // TODO: move this table to platform
  597. rc = msm_vidc_load_buffer_usage_table(core);
  598. if (rc) {
  599. d_vpr_e("Failed to load buffer usage table: %d\n", rc);
  600. goto err_load_buffer_usage_table;
  601. }
  602. rc = msm_vidc_load_regulator_table(core);
  603. if (rc) {
  604. d_vpr_e("Failed to load list of regulators %d\n", rc);
  605. goto err_load_regulator_table;
  606. }
  607. rc = msm_vidc_load_bus_table(core);
  608. if (rc) {
  609. d_vpr_e("Failed to load bus table: %d\n", rc);
  610. goto err_load_bus_table;
  611. }
  612. rc = msm_vidc_load_clock_table(core);
  613. if (rc) {
  614. d_vpr_e("Failed to load clock table: %d\n", rc);
  615. goto err_load_clock_table;
  616. }
  617. // TODO: move this table to platform
  618. rc = msm_vidc_load_allowed_clocks_table(core);
  619. if (rc) {
  620. d_vpr_e("Failed to load allowed clocks table: %d\n", rc);
  621. goto err_load_allowed_clocks_table;
  622. }
  623. rc = msm_vidc_load_reset_table(core);
  624. if (rc) {
  625. d_vpr_e("Failed to load reset table: %d\n", rc);
  626. goto err_load_reset_table;
  627. }
  628. return rc;
  629. err_load_reset_table:
  630. msm_vidc_free_allowed_clocks_table(core->dt);
  631. err_load_allowed_clocks_table:
  632. msm_vidc_free_clock_table(core->dt);
  633. err_load_clock_table:
  634. msm_vidc_free_bus_table(core->dt);
  635. err_load_bus_table:
  636. msm_vidc_free_regulator_table(core->dt);
  637. err_load_regulator_table:
  638. msm_vidc_free_buffer_usage_table(core->dt);
  639. err_load_buffer_usage_table:
  640. msm_vidc_free_reg_table(core->dt);
  641. err_load_reg_table:
  642. return rc;
  643. }
  644. static int msm_vidc_setup_context_bank(struct msm_vidc_core *core,
  645. struct context_bank_info *cb, struct device *dev)
  646. {
  647. int rc = 0;
  648. struct bus_type *bus;
  649. if (!core || !dev || !cb) {
  650. d_vpr_e("%s: Invalid Input params\n", __func__);
  651. return -EINVAL;
  652. }
  653. cb->dev = dev;
  654. bus = cb->dev->bus;
  655. if (IS_ERR_OR_NULL(bus)) {
  656. d_vpr_e("%s: failed to get bus type\n", __func__);
  657. rc = PTR_ERR(bus) ? PTR_ERR(bus) : -ENODEV;
  658. goto remove_cb;
  659. }
  660. cb->domain = iommu_get_domain_for_dev(cb->dev);
  661. /*
  662. * When memory is fragmented, below configuration increases the
  663. * possibility to get a mapping for buffer in the configured CB.
  664. */
  665. /*
  666. * TBD: iommu_dma_enable_best_fit_algo is commented temoprarily to enable dlkm
  667. * compilation once headers are availbale when the GKI branch for Kailua (android13-5.next)
  668. * branches out
  669. */
  670. /*iommu_dma_enable_best_fit_algo(cb->dev);*/
  671. /*
  672. * configure device segment size and segment boundary to ensure
  673. * iommu mapping returns one mapping (which is required for partial
  674. * cache operations)
  675. */
  676. if (!dev->dma_parms)
  677. dev->dma_parms =
  678. devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
  679. dma_set_max_seg_size(dev, (unsigned int)DMA_BIT_MASK(32));
  680. dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64));
  681. d_vpr_h("Attached %s and created mapping\n", dev_name(dev));
  682. d_vpr_h(
  683. "Context bank: %s, is_secure: %d, address range start: %#x, size: %#x, dev: %pK, domain: %pK",
  684. cb->name, cb->is_secure, cb->addr_range.start,
  685. cb->addr_range.size, cb->dev, cb->domain);
  686. remove_cb:
  687. return rc;
  688. }
  689. static int msm_vidc_populate_context_bank(struct device *dev,
  690. struct msm_vidc_core *core)
  691. {
  692. int rc = 0;
  693. struct context_bank_info *cb = NULL;
  694. struct device_node *np = NULL;
  695. if (!dev || !core || !core->dt) {
  696. d_vpr_e("%s: invalid inputs\n", __func__);
  697. return -EINVAL;
  698. }
  699. np = dev->of_node;
  700. cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
  701. if (!cb) {
  702. d_vpr_e("%s: Failed to allocate cb\n", __func__);
  703. return -ENOMEM;
  704. }
  705. INIT_LIST_HEAD(&cb->list);
  706. list_add_tail(&cb->list, &core->dt->context_banks);
  707. rc = of_property_read_string(np, "label", &cb->name);
  708. if (rc) {
  709. d_vpr_h("Failed to read cb label from device tree\n");
  710. rc = 0;
  711. }
  712. d_vpr_h("%s: context bank has name %s\n", __func__, cb->name);
  713. rc = of_property_read_u32_array(np, "virtual-addr-pool",
  714. (u32 *)&cb->addr_range, 2);
  715. if (rc) {
  716. d_vpr_e("Could not read addr pool: context bank: %s %d\n",
  717. cb->name, rc);
  718. goto err_setup_cb;
  719. }
  720. cb->is_secure = of_property_read_bool(np, "qcom,secure-context-bank");
  721. d_vpr_h("context bank %s: secure = %d\n",
  722. cb->name, cb->is_secure);
  723. d_vpr_h("context bank %s address start %x size %x\n",
  724. cb->name, cb->addr_range.start,
  725. cb->addr_range.size);
  726. rc = msm_vidc_setup_context_bank(core, cb, dev);
  727. if (rc) {
  728. d_vpr_e("Cannot setup context bank %d\n", rc);
  729. goto err_setup_cb;
  730. }
  731. iommu_set_fault_handler(cb->domain,
  732. msm_vidc_smmu_fault_handler, (void *)core);
  733. return 0;
  734. err_setup_cb:
  735. list_del(&cb->list);
  736. return rc;
  737. }
  738. int msm_vidc_read_context_bank_resources_from_dt(struct platform_device *pdev)
  739. {
  740. struct msm_vidc_core *core;
  741. int rc = 0;
  742. if (!pdev) {
  743. d_vpr_e("Invalid platform device\n");
  744. return -EINVAL;
  745. } else if (!pdev->dev.parent) {
  746. d_vpr_e("Failed to find a parent for %s\n",
  747. dev_name(&pdev->dev));
  748. return -ENODEV;
  749. }
  750. core = dev_get_drvdata(pdev->dev.parent);
  751. if (!core) {
  752. d_vpr_e("Failed to find cookie in parent device %s",
  753. dev_name(pdev->dev.parent));
  754. return -EINVAL;
  755. }
  756. rc = msm_vidc_populate_context_bank(&pdev->dev, core);
  757. if (rc)
  758. d_vpr_e("Failed to probe context bank\n");
  759. else
  760. d_vpr_h("Successfully probed context bank\n");
  761. return rc;
  762. }
  763. void msm_vidc_deinit_dt(struct platform_device *pdev)
  764. {
  765. struct msm_vidc_core *core;
  766. if (!pdev) {
  767. d_vpr_e("%s: invalid params\n", __func__);
  768. return;
  769. }
  770. core = dev_get_drvdata(&pdev->dev);
  771. if (!core) {
  772. d_vpr_e("%s: core not found in device %s",
  773. __func__, dev_name(&pdev->dev));
  774. return;
  775. } else if (!core->dt) {
  776. d_vpr_e("%s: invalid dt in device %s",
  777. __func__, dev_name(&pdev->dev));
  778. return;
  779. }
  780. msm_vidc_free_clock_table(core->dt);
  781. msm_vidc_free_regulator_table(core->dt);
  782. msm_vidc_free_allowed_clocks_table(core->dt);
  783. msm_vidc_free_reg_table(core->dt);
  784. msm_vidc_free_qdss_addr_table(core->dt);
  785. msm_vidc_free_bus_table(core->dt);
  786. msm_vidc_free_buffer_usage_table(core->dt);
  787. }
  788. int msm_vidc_init_dt(struct platform_device *pdev)
  789. {
  790. int rc = 0;
  791. struct msm_vidc_dt *dt;
  792. struct msm_vidc_core *core;
  793. if (!pdev) {
  794. d_vpr_e("%s: invalid params\n", __func__);
  795. return -EINVAL;
  796. }
  797. core = dev_get_drvdata(&pdev->dev);
  798. if (!core) {
  799. d_vpr_e("%s: core not found in device %s",
  800. __func__, dev_name(&pdev->dev));
  801. return -EINVAL;
  802. }
  803. dt = kzalloc(sizeof(struct msm_vidc_dt), GFP_KERNEL);
  804. if (!dt)
  805. return -ENOMEM;
  806. core->dt = dt;
  807. dt->core = core;
  808. rc = msm_vidc_read_resources_from_dt(pdev);
  809. if (rc)
  810. return rc;
  811. return 0;
  812. }