msm_vidc_dt.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/iommu.h>
  6. #include <linux/dma-iommu.h>
  7. #include <linux/of.h>
  8. #include <linux/sort.h>
  9. #include "msm_vidc_dt.h"
  10. #include "msm_vidc_internal.h"
  11. #include "msm_vidc_core.h"
  12. #include "msm_vidc_debug.h"
  13. #include "msm_vidc_driver.h"
  14. static size_t get_u32_array_num_elements(struct device_node *np,
  15. char *name)
  16. {
  17. int len;
  18. size_t num_elements = 0;
  19. if (!of_get_property(np, name, &len)) {
  20. d_vpr_e("Failed to read %s from device tree\n", name);
  21. goto fail_read;
  22. }
  23. num_elements = len / sizeof(u32);
  24. if (num_elements <= 0) {
  25. d_vpr_e("%s not specified in device tree\n", name);
  26. goto fail_read;
  27. }
  28. return num_elements;
  29. fail_read:
  30. return 0;
  31. }
  32. /**
  33. * msm_vidc_load_u32_table() - load dtsi table entries
  34. * @pdev: A pointer to the platform device.
  35. * @of_node: A pointer to the device node.
  36. * @table_name: A pointer to the dtsi table entry name.
  37. * @struct_size: The size of the structure which is nothing but
  38. * a single entry in the dtsi table.
  39. * @table: A pointer to the table pointer which needs to be
  40. * filled by the dtsi table entries.
  41. * @num_elements: Number of elements pointer which needs to be filled
  42. * with the number of elements in the table.
  43. *
  44. * This is a generic implementation to load single or multiple array
  45. * table from dtsi. The array elements should be of size equal to u32.
  46. *
  47. * Return: Return '0' for success else appropriate error value.
  48. */
  49. static int msm_vidc_load_u32_table(struct platform_device *pdev,
  50. struct device_node *of_node, char *table_name, int struct_size,
  51. u32 **table, u32 *num_elements)
  52. {
  53. int rc = 0, num_elemts = 0;
  54. u32 *ptbl = NULL;
  55. if (!of_find_property(of_node, table_name, NULL)) {
  56. d_vpr_h("%s not found\n", table_name);
  57. return 0;
  58. }
  59. num_elemts = get_u32_array_num_elements(of_node, table_name);
  60. if (!num_elemts) {
  61. d_vpr_e("no elements in %s\n", table_name);
  62. return 0;
  63. }
  64. num_elemts /= struct_size / sizeof(u32);
  65. ptbl = devm_kzalloc(&pdev->dev, num_elemts * struct_size, GFP_KERNEL);
  66. if (!ptbl) {
  67. d_vpr_e("Failed to alloc table %s\n", table_name);
  68. return -ENOMEM;
  69. }
  70. if (of_property_read_u32_array(of_node, table_name, ptbl,
  71. num_elemts * struct_size / sizeof(u32))) {
  72. d_vpr_e("Failed to read %s\n", table_name);
  73. return -EINVAL;
  74. }
  75. *table = ptbl;
  76. if (num_elements)
  77. *num_elements = num_elemts;
  78. return rc;
  79. }
  80. /* A comparator to compare loads (needed later on) */
  81. static int cmp(const void *a, const void *b)
  82. {
  83. /* want to sort in reverse so flip the comparison */
  84. return ((struct allowed_clock_rates_table *)b)->clock_rate -
  85. ((struct allowed_clock_rates_table *)a)->clock_rate;
  86. }
  87. static void msm_vidc_free_allowed_clocks_table(struct msm_vidc_dt *dt)
  88. {
  89. dt->allowed_clks_tbl = NULL;
  90. }
  91. static void msm_vidc_free_reg_table(struct msm_vidc_dt *dt)
  92. {
  93. dt->reg_set.reg_tbl = NULL;
  94. }
  95. static void msm_vidc_free_qdss_addr_table(struct msm_vidc_dt *dt)
  96. {
  97. dt->qdss_addr_set.addr_tbl = NULL;
  98. }
  99. static void msm_vidc_free_bus_table(struct msm_vidc_dt *dt)
  100. {
  101. dt->bus_set.bus_tbl = NULL;
  102. dt->bus_set.count = 0;
  103. }
  104. static void msm_vidc_free_buffer_usage_table(struct msm_vidc_dt *dt)
  105. {
  106. dt->buffer_usage_set.buffer_usage_tbl = NULL;
  107. }
  108. static void msm_vidc_free_regulator_table(struct msm_vidc_dt *dt)
  109. {
  110. int c = 0;
  111. for (c = 0; c < dt->regulator_set.count; ++c) {
  112. struct regulator_info *rinfo =
  113. &dt->regulator_set.regulator_tbl[c];
  114. rinfo->name = NULL;
  115. }
  116. dt->regulator_set.regulator_tbl = NULL;
  117. dt->regulator_set.count = 0;
  118. }
  119. static void msm_vidc_free_clock_table(struct msm_vidc_dt *dt)
  120. {
  121. dt->clock_set.clock_tbl = NULL;
  122. dt->clock_set.count = 0;
  123. }
  124. static int msm_vidc_load_fw_name(struct msm_vidc_core *core)
  125. {
  126. struct platform_device *pdev = core->pdev;
  127. return of_property_read_string_index(pdev->dev.of_node,
  128. "vidc,firmware-name", 0, &core->dt->fw_name);
  129. }
  130. static int msm_vidc_load_reg_table(struct msm_vidc_core *core)
  131. {
  132. struct reg_set *reg_set;
  133. struct platform_device *pdev = core->pdev;
  134. struct msm_vidc_dt *dt = core->dt;
  135. int i;
  136. int rc = 0;
  137. if (!of_find_property(pdev->dev.of_node, "qcom,reg-presets", NULL)) {
  138. /*
  139. * qcom,reg-presets is an optional property. It likely won't be
  140. * present if we don't have any register settings to program
  141. */
  142. d_vpr_h("reg-presets not found\n");
  143. return 0;
  144. }
  145. reg_set = &dt->reg_set;
  146. reg_set->count = get_u32_array_num_elements(pdev->dev.of_node,
  147. "qcom,reg-presets");
  148. reg_set->count /= sizeof(*reg_set->reg_tbl) / sizeof(u32);
  149. if (!reg_set->count) {
  150. d_vpr_h("no elements in reg set\n");
  151. return rc;
  152. }
  153. reg_set->reg_tbl = devm_kzalloc(&pdev->dev, reg_set->count *
  154. sizeof(*(reg_set->reg_tbl)), GFP_KERNEL);
  155. if (!reg_set->reg_tbl) {
  156. d_vpr_e("%s: Failed to alloc register table\n", __func__);
  157. return -ENOMEM;
  158. }
  159. if (of_property_read_u32_array(pdev->dev.of_node, "qcom,reg-presets",
  160. (u32 *)reg_set->reg_tbl, reg_set->count * 3)) {
  161. d_vpr_e("Failed to read register table\n");
  162. msm_vidc_free_reg_table(core->dt);
  163. return -EINVAL;
  164. }
  165. for (i = 0; i < reg_set->count; i++) {
  166. d_vpr_h("reg = %#x, value = %#x, mask = %#x\n",
  167. reg_set->reg_tbl[i].reg, reg_set->reg_tbl[i].value,
  168. reg_set->reg_tbl[i].mask);
  169. }
  170. return rc;
  171. }
  172. static int msm_vidc_load_qdss_table(struct msm_vidc_core *core)
  173. {
  174. struct addr_set *qdss_addr_set;
  175. struct platform_device *pdev = core->pdev;
  176. struct msm_vidc_dt *dt = core->dt;
  177. int i;
  178. int rc = 0;
  179. if (!of_find_property(pdev->dev.of_node, "qcom,qdss-presets", NULL)) {
  180. /*
  181. * qcom,qdss-presets is an optional property. It likely won't be
  182. * present if we don't have any register settings to program
  183. */
  184. d_vpr_h("qdss-presets not found\n");
  185. return rc;
  186. }
  187. qdss_addr_set = &dt->qdss_addr_set;
  188. qdss_addr_set->count = get_u32_array_num_elements(pdev->dev.of_node,
  189. "qcom,qdss-presets");
  190. qdss_addr_set->count /= sizeof(*qdss_addr_set->addr_tbl) / sizeof(u32);
  191. if (!qdss_addr_set->count) {
  192. d_vpr_h("no elements in qdss reg set\n");
  193. return rc;
  194. }
  195. qdss_addr_set->addr_tbl = devm_kzalloc(&pdev->dev,
  196. qdss_addr_set->count * sizeof(*qdss_addr_set->addr_tbl),
  197. GFP_KERNEL);
  198. if (!qdss_addr_set->addr_tbl) {
  199. d_vpr_e("%s: Failed to alloc register table\n", __func__);
  200. rc = -ENOMEM;
  201. goto err_qdss_addr_tbl;
  202. }
  203. rc = of_property_read_u32_array(pdev->dev.of_node, "qcom,qdss-presets",
  204. (u32 *)qdss_addr_set->addr_tbl, qdss_addr_set->count * 2);
  205. if (rc) {
  206. d_vpr_e("Failed to read qdss address table\n");
  207. msm_vidc_free_qdss_addr_table(core->dt);
  208. rc = -EINVAL;
  209. goto err_qdss_addr_tbl;
  210. }
  211. for (i = 0; i < qdss_addr_set->count; i++) {
  212. d_vpr_h("qdss addr = %x, value = %x\n",
  213. qdss_addr_set->addr_tbl[i].start,
  214. qdss_addr_set->addr_tbl[i].size);
  215. }
  216. err_qdss_addr_tbl:
  217. return rc;
  218. }
  219. static int msm_vidc_load_subcache_info(struct msm_vidc_core *core)
  220. {
  221. int rc = 0, num_subcaches = 0, c;
  222. struct platform_device *pdev = core->pdev;
  223. struct msm_vidc_dt *dt = core->dt;
  224. struct subcache_set *subcaches = &dt->subcache_set;
  225. num_subcaches = of_property_count_strings(pdev->dev.of_node,
  226. "cache-slice-names");
  227. if (num_subcaches <= 0) {
  228. d_vpr_h("No subcaches found\n");
  229. goto err_load_subcache_table_fail;
  230. }
  231. subcaches->subcache_tbl = devm_kzalloc(&pdev->dev,
  232. sizeof(*subcaches->subcache_tbl) * num_subcaches, GFP_KERNEL);
  233. if (!subcaches->subcache_tbl) {
  234. d_vpr_e("Failed to allocate memory for subcache tbl\n");
  235. rc = -ENOMEM;
  236. goto err_load_subcache_table_fail;
  237. }
  238. subcaches->count = num_subcaches;
  239. d_vpr_h("Found %d subcaches\n", num_subcaches);
  240. for (c = 0; c < num_subcaches; ++c) {
  241. struct subcache_info *vsc = &dt->subcache_set.subcache_tbl[c];
  242. of_property_read_string_index(pdev->dev.of_node,
  243. "cache-slice-names", c, &vsc->name);
  244. }
  245. dt->sys_cache_present = true;
  246. return 0;
  247. err_load_subcache_table_fail:
  248. dt->sys_cache_present = false;
  249. subcaches->count = 0;
  250. subcaches->subcache_tbl = NULL;
  251. return rc;
  252. }
  253. static int msm_vidc_load_allowed_clocks_table(
  254. struct msm_vidc_core *core)
  255. {
  256. int rc = 0;
  257. struct platform_device *pdev = core->pdev;
  258. struct msm_vidc_dt *dt = core->dt;
  259. int i;
  260. if (!of_find_property(pdev->dev.of_node,
  261. "qcom,allowed-clock-rates", NULL)) {
  262. d_vpr_h("allowed-clock-rates not found\n");
  263. return 0;
  264. }
  265. rc = msm_vidc_load_u32_table(pdev, pdev->dev.of_node,
  266. "qcom,allowed-clock-rates",
  267. sizeof(*dt->allowed_clks_tbl),
  268. (u32 **)&dt->allowed_clks_tbl,
  269. &dt->allowed_clks_tbl_size);
  270. if (rc) {
  271. d_vpr_e("%s: failed to read allowed clocks table\n", __func__);
  272. return rc;
  273. }
  274. sort(dt->allowed_clks_tbl, dt->allowed_clks_tbl_size,
  275. sizeof(*dt->allowed_clks_tbl), cmp, NULL);
  276. d_vpr_h("Found allowed clock rates\n");
  277. for (i = 0; i < dt->allowed_clks_tbl_size; i++)
  278. d_vpr_h(" %d\n", dt->allowed_clks_tbl[i]);
  279. return 0;
  280. }
  281. static int msm_vidc_load_bus_table(struct msm_vidc_core *core)
  282. {
  283. int rc = 0;
  284. struct platform_device *pdev = core->pdev;
  285. struct msm_vidc_dt *dt = core->dt;
  286. struct bus_set *buses = &dt->bus_set;
  287. int c = 0, num_buses = 0;
  288. u32 *bus_ranges = NULL;
  289. num_buses = of_property_count_strings(pdev->dev.of_node,
  290. "interconnect-names");
  291. if (num_buses <= 0) {
  292. d_vpr_e("No buses found\n");
  293. return -EINVAL;
  294. }
  295. buses->count = num_buses;
  296. d_vpr_h("Found %d bus interconnects\n", num_buses);
  297. bus_ranges = kzalloc(2 * num_buses * sizeof(*bus_ranges), GFP_KERNEL);
  298. if (!bus_ranges) {
  299. d_vpr_e("No memory to read bus ranges\n");
  300. return -ENOMEM;
  301. }
  302. rc = of_property_read_u32_array(pdev->dev.of_node,
  303. "qcom,bus-range-kbps", bus_ranges,
  304. num_buses * 2);
  305. if (rc) {
  306. d_vpr_e(
  307. "Failed to read bus ranges: defaulting to <0 INT_MAX>\n");
  308. for (c = 0; c < num_buses; c++) {
  309. bus_ranges[c * 2] = 0;
  310. bus_ranges[c * 2 + 1] = INT_MAX;
  311. }
  312. }
  313. buses->bus_tbl = devm_kzalloc(&pdev->dev, num_buses *
  314. sizeof(*buses->bus_tbl), GFP_KERNEL);
  315. if (!buses->bus_tbl) {
  316. d_vpr_e("No memory for bus table\n");
  317. rc = -ENOMEM;
  318. goto exit;
  319. }
  320. for (c = 0; c < num_buses; c++) {
  321. struct bus_info *bus = &dt->bus_set.bus_tbl[c];
  322. of_property_read_string_index(pdev->dev.of_node,
  323. "interconnect-names", c, &bus->name);
  324. bus->dev = &pdev->dev;
  325. bus->range[0] = bus_ranges[c * 2];
  326. bus->range[1] = bus_ranges[c * 2 + 1];
  327. d_vpr_h("Found bus %s, range [%d %d]\n", bus->name,
  328. bus->range[0], bus->range[1]);
  329. }
  330. exit:
  331. kfree(bus_ranges);
  332. return rc;
  333. }
  334. /* TODO: move this to platform data */
  335. static int msm_vidc_load_buffer_usage_table(struct msm_vidc_core *core)
  336. {
  337. int rc = 0;
  338. struct platform_device *pdev = core->pdev;
  339. struct msm_vidc_dt *dt = core->dt;
  340. struct buffer_usage_set *buffer_usage_set = &dt->buffer_usage_set;
  341. if (!of_find_property(pdev->dev.of_node,
  342. "qcom,buffer-type-tz-usage-table", NULL)) {
  343. /*
  344. * qcom,buffer-type-tz-usage-table is an optional property. It
  345. * likely won't be present if the core doesn't support content
  346. * protection
  347. */
  348. d_vpr_h("buffer-type-tz-usage-table not found\n");
  349. return 0;
  350. }
  351. buffer_usage_set->count = get_u32_array_num_elements(
  352. pdev->dev.of_node, "qcom,buffer-type-tz-usage-table");
  353. buffer_usage_set->count /=
  354. sizeof(*buffer_usage_set->buffer_usage_tbl) / sizeof(u32);
  355. if (!buffer_usage_set->count) {
  356. d_vpr_h("no elements in buffer usage set\n");
  357. return 0;
  358. }
  359. buffer_usage_set->buffer_usage_tbl = devm_kzalloc(&pdev->dev,
  360. buffer_usage_set->count *
  361. sizeof(*buffer_usage_set->buffer_usage_tbl),
  362. GFP_KERNEL);
  363. if (!buffer_usage_set->buffer_usage_tbl) {
  364. d_vpr_e("%s: Failed to alloc buffer usage table\n",
  365. __func__);
  366. rc = -ENOMEM;
  367. goto err_load_buf_usage;
  368. }
  369. rc = of_property_read_u32_array(pdev->dev.of_node,
  370. "qcom,buffer-type-tz-usage-table",
  371. (u32 *)buffer_usage_set->buffer_usage_tbl,
  372. buffer_usage_set->count *
  373. sizeof(*buffer_usage_set->buffer_usage_tbl) / sizeof(u32));
  374. if (rc) {
  375. d_vpr_e("Failed to read buffer usage table\n");
  376. goto err_load_buf_usage;
  377. }
  378. return 0;
  379. err_load_buf_usage:
  380. msm_vidc_free_buffer_usage_table(core->dt);
  381. return rc;
  382. }
  383. static int msm_vidc_load_regulator_table(struct msm_vidc_core *core)
  384. {
  385. int rc = 0;
  386. struct platform_device *pdev = core->pdev;
  387. struct msm_vidc_dt *dt = core->dt;
  388. struct regulator_set *regulators = &dt->regulator_set;
  389. struct device_node *domains_parent_node = NULL;
  390. struct property *domains_property = NULL;
  391. int reg_count = 0;
  392. regulators->count = 0;
  393. regulators->regulator_tbl = NULL;
  394. domains_parent_node = pdev->dev.of_node;
  395. for_each_property_of_node(domains_parent_node, domains_property) {
  396. const char *search_string = "-supply";
  397. char *supply;
  398. bool matched = false;
  399. /* check if current property is possibly a regulator */
  400. supply = strnstr(domains_property->name, search_string,
  401. strlen(domains_property->name) + 1);
  402. matched = supply && (*(supply + strlen(search_string)) == '\0');
  403. if (!matched)
  404. continue;
  405. reg_count++;
  406. }
  407. regulators->regulator_tbl = devm_kzalloc(&pdev->dev,
  408. sizeof(*regulators->regulator_tbl) *
  409. reg_count, GFP_KERNEL);
  410. if (!regulators->regulator_tbl) {
  411. rc = -ENOMEM;
  412. d_vpr_e("Failed to alloc memory for regulator table\n");
  413. goto err_reg_tbl_alloc;
  414. }
  415. for_each_property_of_node(domains_parent_node, domains_property) {
  416. const char *search_string = "-supply";
  417. char *supply;
  418. bool matched = false;
  419. struct device_node *regulator_node = NULL;
  420. struct regulator_info *rinfo = NULL;
  421. /* check if current property is possibly a regulator */
  422. supply = strnstr(domains_property->name, search_string,
  423. strlen(domains_property->name) + 1);
  424. matched = supply && (supply[strlen(search_string)] == '\0');
  425. if (!matched)
  426. continue;
  427. /* make sure prop isn't being misused */
  428. regulator_node = of_parse_phandle(domains_parent_node,
  429. domains_property->name, 0);
  430. if (IS_ERR(regulator_node)) {
  431. d_vpr_e("%s is not a phandle\n",
  432. domains_property->name);
  433. continue;
  434. }
  435. regulators->count++;
  436. /* populate regulator info */
  437. rinfo = &regulators->regulator_tbl[regulators->count - 1];
  438. rinfo->name = devm_kzalloc(&pdev->dev,
  439. (supply - domains_property->name) + 1, GFP_KERNEL);
  440. if (!rinfo->name) {
  441. rc = -ENOMEM;
  442. d_vpr_e("Failed to alloc memory for regulator name\n");
  443. goto err_reg_name_alloc;
  444. }
  445. strlcpy(rinfo->name, domains_property->name,
  446. (supply - domains_property->name) + 1);
  447. rinfo->has_hw_power_collapse = of_property_read_bool(
  448. regulator_node, "qcom,support-hw-trigger");
  449. d_vpr_h("Found regulator %s: h/w collapse = %s\n",
  450. rinfo->name,
  451. rinfo->has_hw_power_collapse ? "yes" : "no");
  452. }
  453. if (!regulators->count)
  454. d_vpr_h("No regulators found");
  455. return 0;
  456. err_reg_name_alloc:
  457. err_reg_tbl_alloc:
  458. msm_vidc_free_regulator_table(core->dt);
  459. return rc;
  460. }
  461. static int msm_vidc_load_clock_table(struct msm_vidc_core *core)
  462. {
  463. int rc = 0, num_clocks = 0, c = 0;
  464. struct platform_device *pdev = core->pdev;
  465. struct msm_vidc_dt *dt = core->dt;
  466. int *clock_ids = NULL;
  467. int *clock_props = NULL;
  468. struct clock_set *clocks = &dt->clock_set;
  469. num_clocks = of_property_count_strings(pdev->dev.of_node,
  470. "clock-names");
  471. if (num_clocks <= 0) {
  472. d_vpr_h("No clocks found\n");
  473. clocks->count = 0;
  474. rc = 0;
  475. goto err_load_clk_table_fail;
  476. }
  477. clock_ids = devm_kzalloc(&pdev->dev, num_clocks *
  478. sizeof(*clock_ids), GFP_KERNEL);
  479. if (!clock_ids) {
  480. d_vpr_e("No memory to read clock ids\n");
  481. rc = -ENOMEM;
  482. goto err_load_clk_table_fail;
  483. }
  484. rc = of_property_read_u32_array(pdev->dev.of_node,
  485. "clock-ids", clock_ids,
  486. num_clocks);
  487. if (rc) {
  488. d_vpr_e("Failed to read clock ids: %d\n", rc);
  489. goto err_load_clk_prop_fail;
  490. }
  491. clock_props = devm_kzalloc(&pdev->dev, num_clocks *
  492. sizeof(*clock_props), GFP_KERNEL);
  493. if (!clock_props) {
  494. d_vpr_e("No memory to read clock properties\n");
  495. rc = -ENOMEM;
  496. goto err_load_clk_table_fail;
  497. }
  498. rc = of_property_read_u32_array(pdev->dev.of_node,
  499. "qcom,clock-configs", clock_props,
  500. num_clocks);
  501. if (rc) {
  502. d_vpr_e("Failed to read clock properties: %d\n", rc);
  503. goto err_load_clk_prop_fail;
  504. }
  505. clocks->clock_tbl = devm_kzalloc(&pdev->dev, sizeof(*clocks->clock_tbl)
  506. * num_clocks, GFP_KERNEL);
  507. if (!clocks->clock_tbl) {
  508. d_vpr_e("Failed to allocate memory for clock tbl\n");
  509. rc = -ENOMEM;
  510. goto err_load_clk_prop_fail;
  511. }
  512. clocks->count = num_clocks;
  513. d_vpr_h("Found %d clocks\n", num_clocks);
  514. for (c = 0; c < num_clocks; ++c) {
  515. struct clock_info *vc = &dt->clock_set.clock_tbl[c];
  516. of_property_read_string_index(pdev->dev.of_node,
  517. "clock-names", c, &vc->name);
  518. vc->clk_id = clock_ids[c];
  519. if (clock_props[c] & CLOCK_PROP_HAS_SCALING) {
  520. vc->has_scaling = true;
  521. } else {
  522. vc->has_scaling = false;
  523. }
  524. if (clock_props[c] & CLOCK_PROP_HAS_MEM_RETENTION)
  525. vc->has_mem_retention = true;
  526. else
  527. vc->has_mem_retention = false;
  528. d_vpr_h("Found clock %s: scale-able = %s\n", vc->name,
  529. vc->has_scaling ? "yes" : "no");
  530. }
  531. return 0;
  532. err_load_clk_prop_fail:
  533. err_load_clk_table_fail:
  534. return rc;
  535. }
  536. static int msm_vidc_load_reset_table(struct msm_vidc_core *core)
  537. {
  538. struct platform_device *pdev = core->pdev;
  539. struct msm_vidc_dt *dt = core->dt;
  540. struct reset_set *rst = &dt->reset_set;
  541. int num_clocks = 0, c = 0;
  542. num_clocks = of_property_count_strings(pdev->dev.of_node,
  543. "reset-names");
  544. if (num_clocks <= 0) {
  545. d_vpr_h("No reset clocks found\n");
  546. rst->count = 0;
  547. return 0;
  548. }
  549. rst->reset_tbl = devm_kcalloc(&pdev->dev, num_clocks,
  550. sizeof(*rst->reset_tbl), GFP_KERNEL);
  551. if (!rst->reset_tbl)
  552. return -ENOMEM;
  553. rst->count = num_clocks;
  554. d_vpr_h("Found %d reset clocks\n", num_clocks);
  555. for (c = 0; c < num_clocks; ++c) {
  556. struct reset_info *rc = &dt->reset_set.reset_tbl[c];
  557. of_property_read_string_index(pdev->dev.of_node,
  558. "reset-names", c, &rc->name);
  559. }
  560. return 0;
  561. }
  562. static int msm_decide_dt_node(struct msm_vidc_core *core)
  563. {
  564. int rc = 0;
  565. struct platform_device *pdev = core->pdev;
  566. u32 sku_index = 0;
  567. rc = of_property_read_u32(pdev->dev.of_node, "sku-index",
  568. &sku_index);
  569. if (rc) {
  570. d_vpr_h("'sku_index' not found in node\n");
  571. return 0;
  572. }
  573. return 0;
  574. }
  575. static int msm_vidc_read_resources_from_dt(struct platform_device *pdev)
  576. {
  577. int rc = 0;
  578. struct msm_vidc_core *core;
  579. struct msm_vidc_dt *dt;
  580. struct resource *kres;
  581. if (!pdev) {
  582. d_vpr_e("%s: invalid params\n", __func__);
  583. return -EINVAL;
  584. }
  585. core = dev_get_drvdata(&pdev->dev);
  586. if (!core || !core->dt) {
  587. d_vpr_e("%s: core not found in device %s",
  588. dev_name(&pdev->dev));
  589. return -EINVAL;
  590. }
  591. dt = core->dt;
  592. rc = msm_decide_dt_node(core);
  593. if (rc)
  594. return rc;
  595. INIT_LIST_HEAD(&dt->context_banks);
  596. kres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  597. dt->register_base = kres ? kres->start : -1;
  598. dt->register_size = kres ? (kres->end + 1 - kres->start) : -1;
  599. d_vpr_h("%s: register base %pa, size %#x\n",
  600. __func__, &dt->register_base, dt->register_size);
  601. kres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  602. dt->irq = kres ? kres->start : -1;
  603. d_vpr_h("%s: irq %d\n", __func__, dt->irq);
  604. rc = msm_vidc_load_fw_name(core);
  605. if (rc)
  606. d_vpr_e("%s: failed to load fw name, rc %d, using default fw\n",
  607. __func__, rc);
  608. rc = msm_vidc_load_subcache_info(core);
  609. if (rc)
  610. d_vpr_e("Failed to load subcache info: %d\n", rc);
  611. rc = msm_vidc_load_qdss_table(core);
  612. if (rc)
  613. d_vpr_e("Failed to load qdss reg table: %d\n", rc);
  614. rc = msm_vidc_load_reg_table(core);
  615. if (rc) {
  616. d_vpr_e("Failed to load reg table: %d\n", rc);
  617. goto err_load_reg_table;
  618. }
  619. // TODO: move this table to platform
  620. rc = msm_vidc_load_buffer_usage_table(core);
  621. if (rc) {
  622. d_vpr_e("Failed to load buffer usage table: %d\n", rc);
  623. goto err_load_buffer_usage_table;
  624. }
  625. rc = msm_vidc_load_regulator_table(core);
  626. if (rc) {
  627. d_vpr_e("Failed to load list of regulators %d\n", rc);
  628. goto err_load_regulator_table;
  629. }
  630. rc = msm_vidc_load_bus_table(core);
  631. if (rc) {
  632. d_vpr_e("Failed to load bus table: %d\n", rc);
  633. goto err_load_bus_table;
  634. }
  635. rc = msm_vidc_load_clock_table(core);
  636. if (rc) {
  637. d_vpr_e("Failed to load clock table: %d\n", rc);
  638. goto err_load_clock_table;
  639. }
  640. // TODO: move this table to platform
  641. rc = msm_vidc_load_allowed_clocks_table(core);
  642. if (rc) {
  643. d_vpr_e("Failed to load allowed clocks table: %d\n", rc);
  644. goto err_load_allowed_clocks_table;
  645. }
  646. rc = msm_vidc_load_reset_table(core);
  647. if (rc) {
  648. d_vpr_e("Failed to load reset table: %d\n", rc);
  649. goto err_load_reset_table;
  650. }
  651. return rc;
  652. err_load_reset_table:
  653. msm_vidc_free_allowed_clocks_table(core->dt);
  654. err_load_allowed_clocks_table:
  655. msm_vidc_free_clock_table(core->dt);
  656. err_load_clock_table:
  657. msm_vidc_free_bus_table(core->dt);
  658. err_load_bus_table:
  659. msm_vidc_free_regulator_table(core->dt);
  660. err_load_regulator_table:
  661. msm_vidc_free_buffer_usage_table(core->dt);
  662. err_load_buffer_usage_table:
  663. msm_vidc_free_reg_table(core->dt);
  664. err_load_reg_table:
  665. return rc;
  666. }
  667. static int msm_vidc_setup_context_bank(struct msm_vidc_core *core,
  668. struct context_bank_info *cb, struct device *dev)
  669. {
  670. int rc = 0;
  671. struct bus_type *bus;
  672. if (!core || !dev || !cb) {
  673. d_vpr_e("%s: Invalid Input params\n", __func__);
  674. return -EINVAL;
  675. }
  676. cb->dev = dev;
  677. bus = cb->dev->bus;
  678. if (IS_ERR_OR_NULL(bus)) {
  679. d_vpr_e("%s: failed to get bus type\n", __func__);
  680. rc = PTR_ERR(bus) ? PTR_ERR(bus) : -ENODEV;
  681. goto remove_cb;
  682. }
  683. cb->domain = iommu_get_domain_for_dev(cb->dev);
  684. /*
  685. * When memory is fragmented, below configuration increases the
  686. * possibility to get a mapping for buffer in the configured CB.
  687. */
  688. iommu_dma_enable_best_fit_algo(cb->dev);
  689. /*
  690. * configure device segment size and segment boundary to ensure
  691. * iommu mapping returns one mapping (which is required for partial
  692. * cache operations)
  693. */
  694. if (!dev->dma_parms)
  695. dev->dma_parms =
  696. devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
  697. dma_set_max_seg_size(dev, (unsigned int)DMA_BIT_MASK(32));
  698. dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64));
  699. d_vpr_h("Attached %s and created mapping\n", dev_name(dev));
  700. d_vpr_h(
  701. "Context bank: %s, is_secure: %d, address range start: %#x, size: %#x, dev: %pK, domain: %pK",
  702. cb->name, cb->is_secure, cb->addr_range.start,
  703. cb->addr_range.size, cb->dev, cb->domain);
  704. remove_cb:
  705. return rc;
  706. }
  707. static int msm_vidc_populate_context_bank(struct device *dev,
  708. struct msm_vidc_core *core)
  709. {
  710. int rc = 0;
  711. struct context_bank_info *cb = NULL;
  712. struct device_node *np = NULL;
  713. if (!dev || !core || !core->dt) {
  714. d_vpr_e("%s: invalid inputs\n", __func__);
  715. return -EINVAL;
  716. }
  717. np = dev->of_node;
  718. cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
  719. if (!cb) {
  720. d_vpr_e("%s: Failed to allocate cb\n", __func__);
  721. return -ENOMEM;
  722. }
  723. INIT_LIST_HEAD(&cb->list);
  724. list_add_tail(&cb->list, &core->dt->context_banks);
  725. rc = of_property_read_string(np, "label", &cb->name);
  726. if (rc) {
  727. d_vpr_h("Failed to read cb label from device tree\n");
  728. rc = 0;
  729. }
  730. d_vpr_h("%s: context bank has name %s\n", __func__, cb->name);
  731. rc = of_property_read_u32_array(np, "virtual-addr-pool",
  732. (u32 *)&cb->addr_range, 2);
  733. if (rc) {
  734. d_vpr_e("Could not read addr pool: context bank: %s %d\n",
  735. cb->name, rc);
  736. goto err_setup_cb;
  737. }
  738. cb->is_secure = of_property_read_bool(np, "qcom,secure-context-bank");
  739. d_vpr_h("context bank %s: secure = %d\n",
  740. cb->name, cb->is_secure);
  741. d_vpr_h("context bank %s address start %x size %x\n",
  742. cb->name, cb->addr_range.start,
  743. cb->addr_range.size);
  744. rc = msm_vidc_setup_context_bank(core, cb, dev);
  745. if (rc) {
  746. d_vpr_e("Cannot setup context bank %d\n", rc);
  747. goto err_setup_cb;
  748. }
  749. iommu_set_fault_handler(cb->domain,
  750. msm_vidc_smmu_fault_handler, (void *)core);
  751. return 0;
  752. err_setup_cb:
  753. list_del(&cb->list);
  754. return rc;
  755. }
  756. int msm_vidc_read_context_bank_resources_from_dt(struct platform_device *pdev)
  757. {
  758. struct msm_vidc_core *core;
  759. int rc = 0;
  760. if (!pdev) {
  761. d_vpr_e("Invalid platform device\n");
  762. return -EINVAL;
  763. } else if (!pdev->dev.parent) {
  764. d_vpr_e("Failed to find a parent for %s\n",
  765. dev_name(&pdev->dev));
  766. return -ENODEV;
  767. }
  768. core = dev_get_drvdata(pdev->dev.parent);
  769. if (!core) {
  770. d_vpr_e("Failed to find cookie in parent device %s",
  771. dev_name(pdev->dev.parent));
  772. return -EINVAL;
  773. }
  774. rc = msm_vidc_populate_context_bank(&pdev->dev, core);
  775. if (rc)
  776. d_vpr_e("Failed to probe context bank\n");
  777. else
  778. d_vpr_h("Successfully probed context bank\n");
  779. return rc;
  780. }
  781. void msm_vidc_deinit_dt(struct platform_device *pdev)
  782. {
  783. struct msm_vidc_core *core;
  784. if (!pdev) {
  785. d_vpr_e("%s: invalid params\n", __func__);
  786. return;
  787. }
  788. core = dev_get_drvdata(&pdev->dev);
  789. if (!core) {
  790. d_vpr_e("%s: core not found in device %s",
  791. dev_name(&pdev->dev));
  792. return;
  793. } else if (!core->dt) {
  794. d_vpr_e("%s: invalid dt in device %s",
  795. dev_name(&pdev->dev));
  796. return;
  797. }
  798. msm_vidc_free_clock_table(core->dt);
  799. msm_vidc_free_regulator_table(core->dt);
  800. msm_vidc_free_allowed_clocks_table(core->dt);
  801. msm_vidc_free_reg_table(core->dt);
  802. msm_vidc_free_qdss_addr_table(core->dt);
  803. msm_vidc_free_bus_table(core->dt);
  804. msm_vidc_free_buffer_usage_table(core->dt);
  805. }
  806. int msm_vidc_init_dt(struct platform_device *pdev)
  807. {
  808. int rc = 0;
  809. struct msm_vidc_dt *dt;
  810. struct msm_vidc_core *core;
  811. if (!pdev) {
  812. d_vpr_e("%s: invalid params\n", __func__);
  813. return -EINVAL;
  814. }
  815. core = dev_get_drvdata(&pdev->dev);
  816. if (!core) {
  817. d_vpr_e("%s: core not found in device %s",
  818. dev_name(&pdev->dev));
  819. return -EINVAL;
  820. }
  821. dt = kzalloc(sizeof(struct msm_vidc_dt), GFP_KERNEL);
  822. if (!dt)
  823. return -ENOMEM;
  824. core->dt = dt;
  825. dt->core = core;
  826. rc = msm_vidc_read_resources_from_dt(pdev);
  827. if (rc)
  828. return rc;
  829. return 0;
  830. }