resources.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/sort.h>
  7. #include <linux/clk.h>
  8. #include <linux/pm_runtime.h>
  9. #include <linux/pm_domain.h>
  10. #include <linux/pm_opp.h>
  11. #include <linux/reset.h>
  12. #include <linux/interconnect.h>
  13. #include <linux/soc/qcom/llcc-qcom.h>
  14. #ifdef CONFIG_MSM_MMRM
  15. #include <linux/soc/qcom/msm_mmrm.h>
  16. #endif
  17. #include "msm_vidc_core.h"
  18. #include "msm_vidc_power.h"
  19. #include "msm_vidc_debug.h"
  20. #include "msm_vidc_driver.h"
  21. #include "msm_vidc_platform.h"
  22. #include "venus_hfi.h"
  23. /* Less than 50MBps is treated as trivial BW change */
  24. #define TRIVIAL_BW_THRESHOLD 50000
  25. #define TRIVIAL_BW_CHANGE(a, b) \
  26. ((a) > (b) ? (a) - (b) < TRIVIAL_BW_THRESHOLD : \
  27. (b) - (a) < TRIVIAL_BW_THRESHOLD)
  28. static struct clock_residency *get_residency_stats(struct clock_info *cl, u64 rate);
  29. static int __update_residency_stats(struct msm_vidc_core *core,
  30. struct clock_info *cl, u64 rate);
  31. enum reset_state {
  32. INIT = 1,
  33. ASSERT,
  34. DEASSERT,
  35. };
  36. /* A comparator to compare loads (needed later on) */
  37. static inline int cmp(const void *a, const void *b)
  38. {
  39. /* want to sort in reverse so flip the comparison */
  40. return ((struct freq_table *)b)->freq -
  41. ((struct freq_table *)a)->freq;
  42. }
  43. static void __fatal_error(bool fatal)
  44. {
  45. WARN_ON(fatal);
  46. }
  47. static void devm_llcc_release(void *res)
  48. {
  49. d_vpr_h("%s()\n", __func__);
  50. llcc_slice_putd((struct llcc_slice_desc *)res);
  51. }
  52. static struct llcc_slice_desc *devm_llcc_get(struct device *dev, u32 id)
  53. {
  54. struct llcc_slice_desc *llcc = NULL;
  55. int rc = 0;
  56. llcc = llcc_slice_getd(id);
  57. if (!llcc)
  58. return NULL;
  59. /**
  60. * register release callback with devm, so that when device goes
  61. * out of scope(during remove sequence), devm will take care of
  62. * de-register part by invoking release callback.
  63. */
  64. rc = devm_add_action_or_reset(dev, devm_llcc_release, (void *)llcc);
  65. if (rc)
  66. return NULL;
  67. return llcc;
  68. }
  69. #ifdef CONFIG_MSM_MMRM
  70. static void devm_mmrm_release(void *res)
  71. {
  72. d_vpr_h("%s()\n", __func__);
  73. mmrm_client_deregister((struct mmrm_client *)res);
  74. }
  75. static struct mmrm_client *devm_mmrm_get(struct device *dev, struct mmrm_client_desc *desc)
  76. {
  77. struct mmrm_client *mmrm = NULL;
  78. int rc = 0;
  79. mmrm = mmrm_client_register(desc);
  80. if (!mmrm)
  81. return NULL;
  82. /**
  83. * register release callback with devm, so that when device goes
  84. * out of scope(during remove sequence), devm will take care of
  85. * de-register part by invoking release callback.
  86. */
  87. rc = devm_add_action_or_reset(dev, devm_mmrm_release, (void *)mmrm);
  88. if (rc)
  89. return NULL;
  90. return mmrm;
  91. }
  92. #endif
  93. static void devm_pd_release(void *res)
  94. {
  95. struct device *pd = (struct device *)res;
  96. d_vpr_h("%s(): %s\n", __func__, dev_name(pd));
  97. dev_pm_domain_detach(pd, true);
  98. }
  99. static struct device *devm_pd_get(struct device *dev, const char *name)
  100. {
  101. struct device *pd = NULL;
  102. int rc = 0;
  103. pd = dev_pm_domain_attach_by_name(dev, name);
  104. if (!pd) {
  105. d_vpr_e("%s: pm domain attach failed %s\n", __func__, name);
  106. return NULL;
  107. }
  108. rc = devm_add_action_or_reset(dev, devm_pd_release, (void *)pd);
  109. if (rc) {
  110. d_vpr_e("%s: add action or reset failed %s\n", __func__, name);
  111. return NULL;
  112. }
  113. return pd;
  114. }
  115. static void devm_opp_dl_release(void *res)
  116. {
  117. struct device_link *link = (struct device_link *)res;
  118. d_vpr_h("%s(): %s\n", __func__, dev_name(&link->link_dev));
  119. device_link_del(link);
  120. }
  121. static int devm_opp_dl_get(struct device *dev, struct device *supplier)
  122. {
  123. u32 flag = DL_FLAG_RPM_ACTIVE | DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS;
  124. struct device_link *link = NULL;
  125. int rc = 0;
  126. link = device_link_add(dev, supplier, flag);
  127. if (!link) {
  128. d_vpr_e("%s: device link add failed\n", __func__);
  129. return -EINVAL;
  130. }
  131. rc = devm_add_action_or_reset(dev, devm_opp_dl_release, (void *)link);
  132. if (rc) {
  133. d_vpr_e("%s: add action or reset failed\n", __func__);
  134. return rc;
  135. }
  136. return rc;
  137. }
  138. static void devm_pm_runtime_put_sync(void *res)
  139. {
  140. struct device *dev = (struct device *)res;
  141. d_vpr_h("%s(): %s\n", __func__, dev_name(dev));
  142. pm_runtime_put_sync(dev);
  143. }
  144. static int devm_pm_runtime_get_sync(struct device *dev)
  145. {
  146. int rc = 0;
  147. rc = pm_runtime_get_sync(dev);
  148. if (rc < 0) {
  149. d_vpr_e("%s: pm domain get sync failed\n", __func__);
  150. return rc;
  151. }
  152. rc = devm_add_action_or_reset(dev, devm_pm_runtime_put_sync, (void *)dev);
  153. if (rc) {
  154. d_vpr_e("%s: add action or reset failed\n", __func__);
  155. return rc;
  156. }
  157. return rc;
  158. }
  159. static int __opp_set_rate(struct msm_vidc_core *core, u64 freq)
  160. {
  161. unsigned long opp_freq = 0;
  162. struct dev_pm_opp *opp;
  163. int rc = 0;
  164. opp_freq = freq;
  165. /* find max(ceil) freq from opp table */
  166. opp = dev_pm_opp_find_freq_ceil(&core->pdev->dev, &opp_freq);
  167. if (IS_ERR(opp)) {
  168. opp = dev_pm_opp_find_freq_floor(&core->pdev->dev, &opp_freq);
  169. if (IS_ERR(opp)) {
  170. d_vpr_e("%s: unable to find freq %lld in opp table\n", __func__, freq);
  171. return -EINVAL;
  172. }
  173. }
  174. dev_pm_opp_put(opp);
  175. /* print freq value */
  176. d_vpr_h("%s: set rate %lld (requested %lld)\n",
  177. __func__, opp_freq, freq);
  178. /* scale freq to power up mxc & mmcx */
  179. rc = dev_pm_opp_set_rate(&core->pdev->dev, opp_freq);
  180. if (rc) {
  181. d_vpr_e("%s: failed to set rate\n", __func__);
  182. return rc;
  183. }
  184. return rc;
  185. }
  186. static int __init_register_base(struct msm_vidc_core *core)
  187. {
  188. struct msm_vidc_resource *res;
  189. res = core->resource;
  190. res->register_base_addr = devm_platform_ioremap_resource(core->pdev, 0);
  191. if (IS_ERR(res->register_base_addr)) {
  192. d_vpr_e("%s: map reg addr failed %ld\n",
  193. __func__, PTR_ERR(res->register_base_addr));
  194. return -EINVAL;
  195. }
  196. d_vpr_h("%s: reg_base %#x\n", __func__, res->register_base_addr);
  197. return 0;
  198. }
  199. static int __init_irq(struct msm_vidc_core *core)
  200. {
  201. struct msm_vidc_resource *res;
  202. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0))
  203. struct resource *kres;
  204. #endif
  205. int rc = 0;
  206. res = core->resource;
  207. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0))
  208. res->irq = platform_get_irq(core->pdev, 0);
  209. #else
  210. kres = platform_get_resource(core->pdev, IORESOURCE_IRQ, 0);
  211. res->irq = kres ? kres->start : -1;
  212. #endif
  213. if (res->irq < 0)
  214. d_vpr_e("%s: get irq failed, %d\n", __func__, res->irq);
  215. d_vpr_h("%s: irq %d\n", __func__, res->irq);
  216. rc = devm_request_threaded_irq(&core->pdev->dev, res->irq, venus_hfi_isr,
  217. venus_hfi_isr_handler, IRQF_TRIGGER_HIGH, "msm-vidc", core);
  218. if (rc) {
  219. d_vpr_e("%s: Failed to allocate venus IRQ\n", __func__);
  220. return rc;
  221. }
  222. disable_irq_nosync(res->irq);
  223. return rc;
  224. }
  225. static int __init_bus(struct msm_vidc_core *core)
  226. {
  227. const struct bw_table *bus_tbl;
  228. struct bus_set *interconnects;
  229. struct bus_info *binfo = NULL;
  230. u32 bus_count = 0, cnt = 0;
  231. int rc = 0;
  232. interconnects = &core->resource->bus_set;
  233. bus_tbl = core->platform->data.bw_tbl;
  234. bus_count = core->platform->data.bw_tbl_size;
  235. if (!bus_tbl || !bus_count) {
  236. d_vpr_e("%s: invalid bus tbl %#x or count %d\n",
  237. __func__, bus_tbl, bus_count);
  238. return -EINVAL;
  239. }
  240. /* allocate bus_set */
  241. interconnects->bus_tbl = devm_kzalloc(&core->pdev->dev,
  242. sizeof(*interconnects->bus_tbl) * bus_count, GFP_KERNEL);
  243. if (!interconnects->bus_tbl) {
  244. d_vpr_e("%s: failed to alloc memory for bus table\n", __func__);
  245. return -ENOMEM;
  246. }
  247. interconnects->count = bus_count;
  248. /* populate bus field from platform data */
  249. for (cnt = 0; cnt < interconnects->count; cnt++) {
  250. interconnects->bus_tbl[cnt].name = bus_tbl[cnt].name;
  251. interconnects->bus_tbl[cnt].min_kbps = bus_tbl[cnt].min_kbps;
  252. interconnects->bus_tbl[cnt].max_kbps = bus_tbl[cnt].max_kbps;
  253. }
  254. /* print bus fields */
  255. venus_hfi_for_each_bus(core, binfo) {
  256. d_vpr_h("%s: name %s min_kbps %u max_kbps %u\n",
  257. __func__, binfo->name, binfo->min_kbps, binfo->max_kbps);
  258. }
  259. /* get interconnect handle */
  260. venus_hfi_for_each_bus(core, binfo) {
  261. if (!strcmp(binfo->name, "venus-llcc")) {
  262. if (msm_vidc_syscache_disable) {
  263. d_vpr_h("%s: skipping LLC bus init: %s\n", __func__,
  264. binfo->name);
  265. continue;
  266. }
  267. }
  268. binfo->icc = devm_of_icc_get(&core->pdev->dev, binfo->name);
  269. if (IS_ERR_OR_NULL(binfo->icc)) {
  270. d_vpr_e("%s: failed to get bus: %s\n", __func__, binfo->name);
  271. rc = PTR_ERR(binfo->icc) ?
  272. PTR_ERR(binfo->icc) : -EBADHANDLE;
  273. binfo->icc = NULL;
  274. return rc;
  275. }
  276. }
  277. return rc;
  278. }
  279. static int __init_power_domains(struct msm_vidc_core *core)
  280. {
  281. struct power_domain_info *pdinfo = NULL;
  282. const struct pd_table *pd_tbl;
  283. struct power_domain_set *pds;
  284. struct device **opp_vdevs = NULL;
  285. const char * const *opp_tbl;
  286. u32 pd_count = 0, opp_count = 0, cnt = 0;
  287. int rc = 0;
  288. pds = &core->resource->power_domain_set;
  289. pd_tbl = core->platform->data.pd_tbl;
  290. pd_count = core->platform->data.pd_tbl_size;
  291. /* skip init if power domain not supported */
  292. if (!pd_count) {
  293. d_vpr_h("%s: power domain entries not available in db\n", __func__);
  294. return 0;
  295. }
  296. /* sanitize power domain table */
  297. if (!pd_tbl) {
  298. d_vpr_e("%s: invalid power domain tbl\n", __func__);
  299. return -EINVAL;
  300. }
  301. /* allocate power_domain_set */
  302. pds->power_domain_tbl = devm_kzalloc(&core->pdev->dev,
  303. sizeof(*pds->power_domain_tbl) * pd_count, GFP_KERNEL);
  304. if (!pds->power_domain_tbl) {
  305. d_vpr_e("%s: failed to alloc memory for pd table\n", __func__);
  306. return -ENOMEM;
  307. }
  308. pds->count = pd_count;
  309. /* populate power domain fields */
  310. for (cnt = 0; cnt < pds->count; cnt++)
  311. pds->power_domain_tbl[cnt].name = pd_tbl[cnt].name;
  312. /* print power domain fields */
  313. venus_hfi_for_each_power_domain(core, pdinfo)
  314. d_vpr_h("%s: pd name %s\n", __func__, pdinfo->name);
  315. /* get power domain handle */
  316. venus_hfi_for_each_power_domain(core, pdinfo) {
  317. pdinfo->genpd_dev = devm_pd_get(&core->pdev->dev, pdinfo->name);
  318. if (IS_ERR_OR_NULL(pdinfo->genpd_dev)) {
  319. rc = PTR_ERR(pdinfo->genpd_dev) ?
  320. PTR_ERR(pdinfo->genpd_dev) : -EBADHANDLE;
  321. d_vpr_e("%s: failed to get pd: %s\n", __func__, pdinfo->name);
  322. pdinfo->genpd_dev = NULL;
  323. return rc;
  324. }
  325. }
  326. opp_tbl = core->platform->data.opp_tbl;
  327. opp_count = core->platform->data.opp_tbl_size;
  328. /* skip init if opp not supported */
  329. if (opp_count < 2) {
  330. d_vpr_h("%s: opp entries not available\n", __func__);
  331. return 0;
  332. }
  333. /* sanitize opp table */
  334. if (!opp_tbl) {
  335. d_vpr_e("%s: invalid opp table\n", __func__);
  336. return -EINVAL;
  337. }
  338. /* ignore NULL entry at the end of table */
  339. opp_count -= 1;
  340. /* print opp table entries */
  341. for (cnt = 0; cnt < opp_count; cnt++)
  342. d_vpr_h("%s: opp name %s\n", __func__, opp_tbl[cnt]);
  343. /* populate opp power domains(for rails) */
  344. //rc = devm_pm_opp_attach_genpd(&core->pdev->dev, opp_tbl, &opp_vdevs);
  345. rc = -EINVAL;
  346. if (rc)
  347. return rc;
  348. /* create device_links b/w consumer(dev) and multiple suppliers(mx, mmcx) */
  349. for (cnt = 0; cnt < opp_count; cnt++) {
  350. rc = devm_opp_dl_get(&core->pdev->dev, opp_vdevs[cnt]);
  351. if (rc) {
  352. d_vpr_e("%s: failed to create dl: %s\n",
  353. __func__, dev_name(opp_vdevs[cnt]));
  354. return rc;
  355. }
  356. }
  357. /* initialize opp table from device tree */
  358. rc = devm_pm_opp_of_add_table(&core->pdev->dev);
  359. if (rc) {
  360. d_vpr_e("%s: failed to add opp table\n", __func__);
  361. return rc;
  362. }
  363. /**
  364. * 1. power up mx & mmcx supply for RCG(mvs0_clk_src)
  365. * 2. power up gdsc0c for mvs0c branch clk
  366. * 3. power up gdsc0 for mvs0 branch clk
  367. */
  368. /**
  369. * power up mxc, mmcx rails to enable supply for
  370. * RCG(video_cc_mvs0_clk_src)
  371. */
  372. /* enable runtime pm */
  373. rc = devm_pm_runtime_enable(&core->pdev->dev);
  374. if (rc) {
  375. d_vpr_e("%s: failed to enable runtime pm\n", __func__);
  376. return rc;
  377. }
  378. /* power up rails(mxc & mmcx) */
  379. rc = devm_pm_runtime_get_sync(&core->pdev->dev);
  380. if (rc) {
  381. d_vpr_e("%s: failed to get sync runtime pm\n", __func__);
  382. return rc;
  383. }
  384. return rc;
  385. }
  386. static int __init_clocks(struct msm_vidc_core *core)
  387. {
  388. struct clock_residency *residency = NULL;
  389. const struct clk_table *clk_tbl;
  390. struct freq_table *freq_tbl;
  391. struct clock_set *clocks;
  392. struct clock_info *cinfo = NULL;
  393. u32 clk_count = 0, freq_count = 0;
  394. int fcnt = 0, cnt = 0, rc = 0;
  395. clocks = &core->resource->clock_set;
  396. clk_tbl = core->platform->data.clk_tbl;
  397. clk_count = core->platform->data.clk_tbl_size;
  398. if (!clk_tbl || !clk_count) {
  399. d_vpr_e("%s: invalid clock tbl %#x or count %d\n",
  400. __func__, clk_tbl, clk_count);
  401. return -EINVAL;
  402. }
  403. /* allocate clock_set */
  404. clocks->clock_tbl = devm_kzalloc(&core->pdev->dev,
  405. sizeof(*clocks->clock_tbl) * clk_count, GFP_KERNEL);
  406. if (!clocks->clock_tbl) {
  407. d_vpr_e("%s: failed to alloc memory for clock table\n", __func__);
  408. return -ENOMEM;
  409. }
  410. clocks->count = clk_count;
  411. /* populate clock field from platform data */
  412. for (cnt = 0; cnt < clocks->count; cnt++) {
  413. clocks->clock_tbl[cnt].name = clk_tbl[cnt].name;
  414. clocks->clock_tbl[cnt].clk_id = clk_tbl[cnt].clk_id;
  415. clocks->clock_tbl[cnt].has_scaling = clk_tbl[cnt].scaling;
  416. }
  417. freq_tbl = core->platform->data.freq_tbl;
  418. freq_count = core->platform->data.freq_tbl_size;
  419. /* populate clk residency stats table */
  420. for (cnt = 0; cnt < clocks->count; cnt++) {
  421. /* initialize residency_list */
  422. INIT_LIST_HEAD(&clocks->clock_tbl[cnt].residency_list);
  423. /* skip if scaling not supported */
  424. if (!clocks->clock_tbl[cnt].has_scaling)
  425. continue;
  426. for (fcnt = 0; fcnt < freq_count; fcnt++) {
  427. residency = devm_kzalloc(&core->pdev->dev,
  428. sizeof(struct clock_residency), GFP_KERNEL);
  429. if (!residency) {
  430. d_vpr_e("%s: failed to alloc clk residency stat node\n", __func__);
  431. return -ENOMEM;
  432. }
  433. if (!freq_tbl) {
  434. d_vpr_e("%s: invalid freq tbl %#x\n", __func__, freq_tbl);
  435. return -EINVAL;
  436. }
  437. /* update residency node */
  438. residency->rate = freq_tbl[fcnt].freq;
  439. residency->start_time_us = 0;
  440. residency->total_time_us = 0;
  441. INIT_LIST_HEAD(&residency->list);
  442. /* add entry into residency_list */
  443. list_add_tail(&residency->list, &clocks->clock_tbl[cnt].residency_list);
  444. }
  445. }
  446. /* print clock fields */
  447. venus_hfi_for_each_clock(core, cinfo) {
  448. d_vpr_h("%s: clock name %s clock id %#x scaling %d\n",
  449. __func__, cinfo->name, cinfo->clk_id, cinfo->has_scaling);
  450. }
  451. /* get clock handle */
  452. venus_hfi_for_each_clock(core, cinfo) {
  453. cinfo->clk = devm_clk_get(&core->pdev->dev, cinfo->name);
  454. if (IS_ERR_OR_NULL(cinfo->clk)) {
  455. d_vpr_e("%s: failed to get clock: %s\n", __func__, cinfo->name);
  456. rc = PTR_ERR(cinfo->clk) ?
  457. PTR_ERR(cinfo->clk) : -EINVAL;
  458. cinfo->clk = NULL;
  459. return rc;
  460. }
  461. }
  462. return rc;
  463. }
  464. static int __init_reset_clocks(struct msm_vidc_core *core)
  465. {
  466. const struct clk_rst_table *rst_tbl;
  467. struct reset_set *rsts;
  468. struct reset_info *rinfo = NULL;
  469. u32 rst_count = 0, cnt = 0;
  470. int rc = 0;
  471. rsts = &core->resource->reset_set;
  472. rst_tbl = core->platform->data.clk_rst_tbl;
  473. rst_count = core->platform->data.clk_rst_tbl_size;
  474. if (!rst_tbl || !rst_count) {
  475. d_vpr_e("%s: invalid reset tbl %#x or count %d\n",
  476. __func__, rst_tbl, rst_count);
  477. return -EINVAL;
  478. }
  479. /* allocate reset_set */
  480. rsts->reset_tbl = devm_kzalloc(&core->pdev->dev,
  481. sizeof(*rsts->reset_tbl) * rst_count, GFP_KERNEL);
  482. if (!rsts->reset_tbl) {
  483. d_vpr_e("%s: failed to alloc memory for reset table\n", __func__);
  484. return -ENOMEM;
  485. }
  486. rsts->count = rst_count;
  487. /* populate clock field from platform data */
  488. for (cnt = 0; cnt < rsts->count; cnt++) {
  489. rsts->reset_tbl[cnt].name = rst_tbl[cnt].name;
  490. rsts->reset_tbl[cnt].exclusive_release = rst_tbl[cnt].exclusive_release;
  491. }
  492. /* print reset clock fields */
  493. venus_hfi_for_each_reset_clock(core, rinfo) {
  494. d_vpr_h("%s: reset clk %s, exclusive %d\n",
  495. __func__, rinfo->name, rinfo->exclusive_release);
  496. }
  497. /* get reset clock handle */
  498. venus_hfi_for_each_reset_clock(core, rinfo) {
  499. if (rinfo->exclusive_release)
  500. rinfo->rst = devm_reset_control_get_exclusive_released(
  501. &core->pdev->dev, rinfo->name);
  502. else
  503. rinfo->rst = devm_reset_control_get(&core->pdev->dev, rinfo->name);
  504. if (IS_ERR_OR_NULL(rinfo->rst)) {
  505. d_vpr_e("%s: failed to get reset clock: %s\n", __func__, rinfo->name);
  506. rc = PTR_ERR(rinfo->rst) ?
  507. PTR_ERR(rinfo->rst) : -EINVAL;
  508. rinfo->rst = NULL;
  509. return rc;
  510. }
  511. }
  512. return rc;
  513. }
  514. static int __init_subcaches(struct msm_vidc_core *core)
  515. {
  516. const struct subcache_table *llcc_tbl;
  517. struct subcache_set *caches;
  518. struct subcache_info *sinfo = NULL;
  519. u32 llcc_count = 0, cnt = 0;
  520. int rc = 0;
  521. caches = &core->resource->subcache_set;
  522. /* skip init if subcache not available */
  523. if (!is_sys_cache_present(core))
  524. return 0;
  525. llcc_tbl = core->platform->data.subcache_tbl;
  526. llcc_count = core->platform->data.subcache_tbl_size;
  527. if (!llcc_tbl || !llcc_count) {
  528. d_vpr_e("%s: invalid llcc tbl %#x or count %d\n",
  529. __func__, llcc_tbl, llcc_count);
  530. return -EINVAL;
  531. }
  532. /* allocate clock_set */
  533. caches->subcache_tbl = devm_kzalloc(&core->pdev->dev,
  534. sizeof(*caches->subcache_tbl) * llcc_count, GFP_KERNEL);
  535. if (!caches->subcache_tbl) {
  536. d_vpr_e("%s: failed to alloc memory for subcache table\n", __func__);
  537. return -ENOMEM;
  538. }
  539. caches->count = llcc_count;
  540. /* populate subcache fields from platform data */
  541. for (cnt = 0; cnt < caches->count; cnt++) {
  542. caches->subcache_tbl[cnt].name = llcc_tbl[cnt].name;
  543. caches->subcache_tbl[cnt].llcc_id = llcc_tbl[cnt].llcc_id;
  544. }
  545. /* print subcache fields */
  546. venus_hfi_for_each_subcache(core, sinfo) {
  547. d_vpr_h("%s: name %s subcache id %d\n",
  548. __func__, sinfo->name, sinfo->llcc_id);
  549. }
  550. /* get subcache/llcc handle */
  551. venus_hfi_for_each_subcache(core, sinfo) {
  552. sinfo->subcache = devm_llcc_get(&core->pdev->dev, sinfo->llcc_id);
  553. if (IS_ERR_OR_NULL(sinfo->subcache)) {
  554. d_vpr_e("%s: failed to get subcache: %d\n", __func__, sinfo->llcc_id);
  555. rc = PTR_ERR(sinfo->subcache) ?
  556. PTR_ERR(sinfo->subcache) : -EBADHANDLE;
  557. sinfo->subcache = NULL;
  558. return rc;
  559. }
  560. }
  561. return rc;
  562. }
  563. static int __init_freq_table(struct msm_vidc_core *core)
  564. {
  565. struct freq_table *freq_tbl;
  566. struct freq_set *clks;
  567. u32 freq_count = 0, cnt = 0;
  568. int rc = 0;
  569. clks = &core->resource->freq_set;
  570. freq_tbl = core->platform->data.freq_tbl;
  571. freq_count = core->platform->data.freq_tbl_size;
  572. if (!freq_tbl || !freq_count) {
  573. d_vpr_e("%s: invalid freq tbl %#x or count %d\n",
  574. __func__, freq_tbl, freq_count);
  575. return -EINVAL;
  576. }
  577. /* allocate freq_set */
  578. clks->freq_tbl = devm_kzalloc(&core->pdev->dev,
  579. sizeof(*clks->freq_tbl) * freq_count, GFP_KERNEL);
  580. if (!clks->freq_tbl) {
  581. d_vpr_e("%s: failed to alloc memory for freq table\n", __func__);
  582. return -ENOMEM;
  583. }
  584. clks->count = freq_count;
  585. /* populate freq field from platform data */
  586. for (cnt = 0; cnt < clks->count; cnt++)
  587. clks->freq_tbl[cnt].freq = freq_tbl[cnt].freq;
  588. /* sort freq table */
  589. sort(clks->freq_tbl, clks->count, sizeof(*clks->freq_tbl), cmp, NULL);
  590. /* print freq field freq_set */
  591. d_vpr_h("%s: updated freq table\n", __func__);
  592. for (cnt = 0; cnt < clks->count; cnt++)
  593. d_vpr_h("%s:\t %lu\n", __func__, clks->freq_tbl[cnt].freq);
  594. return rc;
  595. }
  596. static int __init_context_banks(struct msm_vidc_core *core)
  597. {
  598. const struct context_bank_table *cb_tbl;
  599. struct context_bank_set *cbs;
  600. struct context_bank_info *cbinfo = NULL;
  601. u32 cb_count = 0, cnt = 0;
  602. int rc = 0;
  603. cbs = &core->resource->context_bank_set;
  604. cb_tbl = core->platform->data.context_bank_tbl;
  605. cb_count = core->platform->data.context_bank_tbl_size;
  606. if (!cb_tbl || !cb_count) {
  607. d_vpr_e("%s: invalid context bank tbl %#x or count %d\n",
  608. __func__, cb_tbl, cb_count);
  609. return -EINVAL;
  610. }
  611. /* allocate context_bank table */
  612. cbs->context_bank_tbl = devm_kzalloc(&core->pdev->dev,
  613. sizeof(*cbs->context_bank_tbl) * cb_count, GFP_KERNEL);
  614. if (!cbs->context_bank_tbl) {
  615. d_vpr_e("%s: failed to alloc memory for context_bank table\n", __func__);
  616. return -ENOMEM;
  617. }
  618. cbs->count = cb_count;
  619. /**
  620. * populate context bank field from platform data except
  621. * dev & domain which are assigned as part of context bank
  622. * probe sequence
  623. */
  624. for (cnt = 0; cnt < cbs->count; cnt++) {
  625. cbs->context_bank_tbl[cnt].name = cb_tbl[cnt].name;
  626. cbs->context_bank_tbl[cnt].addr_range.start = cb_tbl[cnt].start;
  627. cbs->context_bank_tbl[cnt].addr_range.size = cb_tbl[cnt].size;
  628. cbs->context_bank_tbl[cnt].secure = cb_tbl[cnt].secure;
  629. cbs->context_bank_tbl[cnt].dma_coherant = cb_tbl[cnt].dma_coherant;
  630. cbs->context_bank_tbl[cnt].region = cb_tbl[cnt].region;
  631. cbs->context_bank_tbl[cnt].dma_mask = cb_tbl[cnt].dma_mask;
  632. }
  633. /* print context_bank fiels */
  634. venus_hfi_for_each_context_bank(core, cbinfo) {
  635. d_vpr_h("%s: name %s addr start %#x size %#x secure %d "
  636. "coherant %d region %d dma_mask %llu\n",
  637. __func__, cbinfo->name, cbinfo->addr_range.start,
  638. cbinfo->addr_range.size, cbinfo->secure,
  639. cbinfo->dma_coherant, cbinfo->region, cbinfo->dma_mask);
  640. }
  641. return rc;
  642. }
  643. static int __init_device_region(struct msm_vidc_core *core)
  644. {
  645. const struct device_region_table *dev_reg_tbl;
  646. struct device_region_set *dev_set;
  647. struct device_region_info *dev_reg_info;
  648. u32 dev_reg_count = 0, cnt = 0;
  649. int rc = 0;
  650. dev_set = &core->resource->device_region_set;
  651. dev_reg_tbl = core->platform->data.dev_reg_tbl;
  652. dev_reg_count = core->platform->data.dev_reg_tbl_size;
  653. if (!dev_reg_tbl || !dev_reg_count) {
  654. d_vpr_h("%s: device regions not available\n", __func__);
  655. return 0;
  656. }
  657. /* allocate device region table */
  658. dev_set->device_region_tbl = devm_kzalloc(&core->pdev->dev,
  659. sizeof(*dev_set->device_region_tbl) * dev_reg_count, GFP_KERNEL);
  660. if (!dev_set->device_region_tbl) {
  661. d_vpr_e("%s: failed to alloc memory for device region table\n", __func__);
  662. return -ENOMEM;
  663. }
  664. dev_set->count = dev_reg_count;
  665. /* populate device region fields from platform data */
  666. for (cnt = 0; cnt < dev_set->count; cnt++) {
  667. dev_set->device_region_tbl[cnt].name = dev_reg_tbl[cnt].name;
  668. dev_set->device_region_tbl[cnt].phy_addr = dev_reg_tbl[cnt].phy_addr;
  669. dev_set->device_region_tbl[cnt].size = dev_reg_tbl[cnt].size;
  670. dev_set->device_region_tbl[cnt].dev_addr = dev_reg_tbl[cnt].dev_addr;
  671. dev_set->device_region_tbl[cnt].region = dev_reg_tbl[cnt].region;
  672. }
  673. /* print device region fields */
  674. venus_hfi_for_each_device_region(core, dev_reg_info) {
  675. d_vpr_h("%s: name %s phy_addr %#x size %#x dev_addr %#x dev_region %d\n",
  676. __func__, dev_reg_info->name, dev_reg_info->phy_addr, dev_reg_info->size,
  677. dev_reg_info->dev_addr, dev_reg_info->region);
  678. }
  679. return rc;
  680. }
  681. #ifdef CONFIG_MSM_MMRM
  682. static int __register_mmrm(struct msm_vidc_core *core)
  683. {
  684. int rc = 0;
  685. struct clock_info *cl;
  686. /* skip if platform does not support mmrm */
  687. if (!is_mmrm_supported(core)) {
  688. d_vpr_h("%s: MMRM not supported\n", __func__);
  689. return 0;
  690. }
  691. /* get mmrm handle for each clock sources */
  692. venus_hfi_for_each_clock(core, cl) {
  693. struct mmrm_client_desc desc;
  694. char *name = (char *)desc.client_info.desc.name;
  695. // TODO: set notifier data vals
  696. struct mmrm_client_notifier_data notifier_data = {
  697. MMRM_CLIENT_RESOURCE_VALUE_CHANGE,
  698. {{0, 0}},
  699. NULL};
  700. // TODO: add callback fn
  701. desc.notifier_callback_fn = NULL;
  702. if (!cl->has_scaling)
  703. continue;
  704. if (IS_ERR_OR_NULL(cl->clk)) {
  705. d_vpr_e("%s: Invalid clock: %s\n", __func__, cl->name);
  706. return PTR_ERR(cl->clk) ? PTR_ERR(cl->clk) : -EINVAL;
  707. }
  708. desc.client_type = MMRM_CLIENT_CLOCK;
  709. desc.client_info.desc.client_domain = MMRM_CLIENT_DOMAIN_VIDEO;
  710. desc.client_info.desc.client_id = cl->clk_id;
  711. strscpy(name, cl->name, sizeof(desc.client_info.desc.name));
  712. desc.client_info.desc.clk = cl->clk;
  713. desc.priority = MMRM_CLIENT_PRIOR_LOW;
  714. desc.pvt_data = notifier_data.pvt_data;
  715. d_vpr_h("%s: domain(%d) cid(%d) name(%s) clk(%pK)\n",
  716. __func__,
  717. desc.client_info.desc.client_domain,
  718. desc.client_info.desc.client_id,
  719. desc.client_info.desc.name,
  720. desc.client_info.desc.clk);
  721. d_vpr_h("%s: type(%d) pri(%d) pvt(%pK) notifier(%pK)\n",
  722. __func__,
  723. desc.client_type,
  724. desc.priority,
  725. desc.pvt_data,
  726. desc.notifier_callback_fn);
  727. cl->mmrm_client = devm_mmrm_get(&core->pdev->dev, &desc);
  728. if (!cl->mmrm_client) {
  729. d_vpr_e("%s: Failed to register clk(%s): %d\n",
  730. __func__, cl->name, rc);
  731. return -EINVAL;
  732. }
  733. }
  734. return rc;
  735. }
  736. #else
  737. static int __register_mmrm(struct msm_vidc_core *core)
  738. {
  739. return 0;
  740. }
  741. #endif
  742. static int __enable_power_domains(struct msm_vidc_core *core, const char *name)
  743. {
  744. struct power_domain_info *pdinfo = NULL;
  745. int rc = 0;
  746. /* power up rails(mxc & mmcx) to enable RCG(video_cc_mvs0_clk_src) */
  747. rc = __opp_set_rate(core, ULONG_MAX);
  748. if (rc) {
  749. d_vpr_e("%s: opp setrate failed\n", __func__);
  750. return rc;
  751. }
  752. /* power up (gdsc0/gdsc0c) to enable (mvs0/mvs0c) branch clock */
  753. venus_hfi_for_each_power_domain(core, pdinfo) {
  754. if (strcmp(pdinfo->name, name))
  755. continue;
  756. rc = pm_runtime_get_sync(pdinfo->genpd_dev);
  757. if (rc < 0) {
  758. d_vpr_e("%s: failed to get sync: %s\n", __func__, pdinfo->name);
  759. return rc;
  760. }
  761. d_vpr_h("%s: enabled power doamin %s\n", __func__, pdinfo->name);
  762. }
  763. return rc;
  764. }
  765. static int __disable_power_domains(struct msm_vidc_core *core, const char *name)
  766. {
  767. struct power_domain_info *pdinfo = NULL;
  768. int rc = 0;
  769. /* power down (gdsc0/gdsc0c) to disable (mvs0/mvs0c) branch clock */
  770. venus_hfi_for_each_power_domain(core, pdinfo) {
  771. if (strcmp(pdinfo->name, name))
  772. continue;
  773. rc = pm_runtime_put_sync(pdinfo->genpd_dev);
  774. if (rc) {
  775. d_vpr_e("%s: failed to put sync: %s\n", __func__, pdinfo->name);
  776. return rc;
  777. }
  778. d_vpr_h("%s: disabled power doamin %s\n", __func__, pdinfo->name);
  779. }
  780. /* power down rails(mxc & mmcx) to disable RCG(video_cc_mvs0_clk_src) */
  781. rc = __opp_set_rate(core, 0);
  782. if (rc) {
  783. d_vpr_e("%s: opp setrate failed\n", __func__);
  784. return rc;
  785. }
  786. msm_vidc_change_core_sub_state(core, CORE_SUBSTATE_GDSC_HANDOFF, 0, __func__);
  787. return rc;
  788. }
  789. static int __hand_off_power_domains(struct msm_vidc_core *core)
  790. {
  791. msm_vidc_change_core_sub_state(core, 0, CORE_SUBSTATE_GDSC_HANDOFF, __func__);
  792. return 0;
  793. }
  794. static int __acquire_power_domains(struct msm_vidc_core *core)
  795. {
  796. msm_vidc_change_core_sub_state(core, CORE_SUBSTATE_GDSC_HANDOFF, 0, __func__);
  797. return 0;
  798. }
  799. static int __disable_subcaches(struct msm_vidc_core *core)
  800. {
  801. struct subcache_info *sinfo;
  802. int rc = 0;
  803. if (msm_vidc_syscache_disable || !is_sys_cache_present(core))
  804. return 0;
  805. /* De-activate subcaches */
  806. venus_hfi_for_each_subcache_reverse(core, sinfo) {
  807. if (!sinfo->isactive)
  808. continue;
  809. d_vpr_h("%s: De-activate subcache %s\n", __func__, sinfo->name);
  810. rc = llcc_slice_deactivate(sinfo->subcache);
  811. if (rc) {
  812. d_vpr_e("Failed to de-activate %s: %d\n",
  813. sinfo->name, rc);
  814. }
  815. sinfo->isactive = false;
  816. }
  817. return 0;
  818. }
  819. static int __enable_subcaches(struct msm_vidc_core *core)
  820. {
  821. int rc = 0;
  822. u32 c = 0;
  823. struct subcache_info *sinfo;
  824. if (msm_vidc_syscache_disable || !is_sys_cache_present(core))
  825. return 0;
  826. /* Activate subcaches */
  827. venus_hfi_for_each_subcache(core, sinfo) {
  828. rc = llcc_slice_activate(sinfo->subcache);
  829. if (rc) {
  830. d_vpr_e("Failed to activate %s: %d\n", sinfo->name, rc);
  831. __fatal_error(true);
  832. goto err_activate_fail;
  833. }
  834. sinfo->isactive = true;
  835. d_vpr_h("Activated subcache %s\n", sinfo->name);
  836. c++;
  837. }
  838. d_vpr_h("Activated %d Subcaches to Venus\n", c);
  839. return 0;
  840. err_activate_fail:
  841. __disable_subcaches(core);
  842. return rc;
  843. }
  844. static int llcc_enable(struct msm_vidc_core *core, bool enable)
  845. {
  846. int ret;
  847. if (enable)
  848. ret = __enable_subcaches(core);
  849. else
  850. ret = __disable_subcaches(core);
  851. return ret;
  852. }
  853. static int __vote_bandwidth(struct bus_info *bus, unsigned long bw_kbps)
  854. {
  855. int rc = 0;
  856. if (!bus->icc) {
  857. d_vpr_e("%s: invalid bus\n", __func__);
  858. return -EINVAL;
  859. }
  860. d_vpr_p("Voting bus %s to ab %lu kBps\n", bus->name, bw_kbps);
  861. rc = icc_set_bw(bus->icc, bw_kbps, 0);
  862. if (rc)
  863. d_vpr_e("Failed voting bus %s to ab %lu, rc=%d\n",
  864. bus->name, bw_kbps, rc);
  865. return rc;
  866. }
  867. static int __unvote_buses(struct msm_vidc_core *core)
  868. {
  869. int rc = 0;
  870. struct bus_info *bus = NULL;
  871. core->power.bw_ddr = 0;
  872. core->power.bw_llcc = 0;
  873. venus_hfi_for_each_bus(core, bus) {
  874. rc = __vote_bandwidth(bus, 0);
  875. if (rc)
  876. goto err_unknown_device;
  877. }
  878. err_unknown_device:
  879. return rc;
  880. }
  881. static int __vote_buses(struct msm_vidc_core *core,
  882. unsigned long bw_ddr, unsigned long bw_llcc)
  883. {
  884. int rc = 0;
  885. struct bus_info *bus = NULL;
  886. unsigned long bw_kbps = 0, bw_prev = 0;
  887. enum vidc_bus_type type;
  888. venus_hfi_for_each_bus(core, bus) {
  889. if (bus && bus->icc) {
  890. type = get_type_frm_name(bus->name);
  891. if (type == DDR) {
  892. bw_kbps = bw_ddr;
  893. bw_prev = core->power.bw_ddr;
  894. } else if (type == LLCC) {
  895. bw_kbps = bw_llcc;
  896. bw_prev = core->power.bw_llcc;
  897. } else {
  898. bw_kbps = bus->max_kbps;
  899. bw_prev = core->power.bw_ddr ?
  900. bw_kbps : 0;
  901. }
  902. /* ensure freq is within limits */
  903. bw_kbps = clamp_t(typeof(bw_kbps), bw_kbps,
  904. bus->min_kbps, bus->max_kbps);
  905. if (TRIVIAL_BW_CHANGE(bw_kbps, bw_prev) && bw_prev) {
  906. d_vpr_l("Skip voting bus %s to %lu kBps\n",
  907. bus->name, bw_kbps);
  908. continue;
  909. }
  910. rc = __vote_bandwidth(bus, bw_kbps);
  911. if (type == DDR)
  912. core->power.bw_ddr = bw_kbps;
  913. else if (type == LLCC)
  914. core->power.bw_llcc = bw_kbps;
  915. } else {
  916. d_vpr_e("No BUS to Vote\n");
  917. }
  918. }
  919. return rc;
  920. }
  921. static int set_bw(struct msm_vidc_core *core, unsigned long bw_ddr,
  922. unsigned long bw_llcc)
  923. {
  924. if (!bw_ddr && !bw_llcc)
  925. return __unvote_buses(core);
  926. return __vote_buses(core, bw_ddr, bw_llcc);
  927. }
  928. static int print_residency_stats(struct msm_vidc_core *core, struct clock_info *cl)
  929. {
  930. struct clock_residency *residency = NULL;
  931. u64 total_time_us = 0;
  932. int rc = 0;
  933. /* skip if scaling not supported */
  934. if (!cl->has_scaling)
  935. return 0;
  936. /* grand total residency time */
  937. list_for_each_entry(residency, &cl->residency_list, list)
  938. total_time_us += residency->total_time_us;
  939. /* sanity check to avoid divide by 0 */
  940. total_time_us = (total_time_us > 0) ? total_time_us : 1;
  941. /* print residency percent for each clock */
  942. list_for_each_entry(residency, &cl->residency_list, list) {
  943. d_vpr_hs("%s: %s clock rate [%d] total %lluus residency %u%%\n",
  944. __func__, cl->name, residency->rate, residency->total_time_us,
  945. (residency->total_time_us * 100 + total_time_us / 2) / total_time_us);
  946. }
  947. return rc;
  948. }
  949. static int reset_residency_stats(struct msm_vidc_core *core, struct clock_info *cl)
  950. {
  951. struct clock_residency *residency = NULL;
  952. int rc = 0;
  953. /* skip if scaling not supported */
  954. if (!cl->has_scaling)
  955. return 0;
  956. d_vpr_h("%s: reset %s residency stats\n", __func__, cl->name);
  957. /* reset clock residency stats */
  958. list_for_each_entry(residency, &cl->residency_list, list) {
  959. residency->start_time_us = 0;
  960. residency->total_time_us = 0;
  961. }
  962. /*
  963. * During the reset make sure to update start time of the clk prev freq,
  964. * because the prev clk freq might not be 0 so when the next seesion start
  965. * voting from that freq, then those resideny print will not come in stats
  966. */
  967. residency = get_residency_stats(cl, cl->prev);
  968. if (residency)
  969. residency->start_time_us = ktime_get_ns() / 1000;
  970. return rc;
  971. }
  972. static struct clock_residency *get_residency_stats(struct clock_info *cl, u64 rate)
  973. {
  974. struct clock_residency *residency = NULL;
  975. bool found = false;
  976. if (!cl) {
  977. d_vpr_e("%s: invalid params\n", __func__);
  978. return NULL;
  979. }
  980. list_for_each_entry(residency, &cl->residency_list, list) {
  981. if (residency->rate == rate) {
  982. found = true;
  983. break;
  984. }
  985. }
  986. return found ? residency : NULL;
  987. }
  988. static int __update_residency_stats(struct msm_vidc_core *core,
  989. struct clock_info *cl, u64 rate)
  990. {
  991. struct clock_residency *cur_residency = NULL, *prev_residency = NULL;
  992. u64 cur_time_us = 0;
  993. int rc = 0;
  994. /* skip update if high or stats logs not enabled */
  995. if (!(msm_vidc_debug & (VIDC_HIGH | VIDC_STAT)))
  996. return 0;
  997. /* skip update if scaling not supported */
  998. if (!cl->has_scaling)
  999. return 0;
  1000. /* skip update if rate not changed */
  1001. if (rate == cl->prev)
  1002. return 0;
  1003. /* get current time in ns */
  1004. cur_time_us = ktime_get_ns() / 1000;
  1005. /* update previous rate residency end or total time */
  1006. prev_residency = get_residency_stats(cl, cl->prev);
  1007. if (prev_residency) {
  1008. if (prev_residency->start_time_us)
  1009. prev_residency->total_time_us += cur_time_us -
  1010. prev_residency->start_time_us;
  1011. /* reset start time us */
  1012. prev_residency->start_time_us = 0;
  1013. }
  1014. /* clk disable case - no need to update new entry */
  1015. if (rate == 0)
  1016. return 0;
  1017. /* check if rate entry is present */
  1018. cur_residency = get_residency_stats(cl, rate);
  1019. if (!cur_residency) {
  1020. d_vpr_e("%s: entry not found. rate %llu\n", __func__, rate);
  1021. return -EINVAL;
  1022. }
  1023. /* update residency start time for current rate/freq */
  1024. cur_residency->start_time_us = cur_time_us;
  1025. return rc;
  1026. }
  1027. static int __set_clk_rate(struct msm_vidc_core *core, struct clock_info *cl,
  1028. u64 rate)
  1029. {
  1030. int rc = 0;
  1031. /* update clock residency stats */
  1032. __update_residency_stats(core, cl, rate);
  1033. /* bail early if requested clk rate is not changed */
  1034. if (rate == cl->prev)
  1035. return 0;
  1036. d_vpr_p("Scaling clock %s to %llu, prev %llu\n",
  1037. cl->name, rate, cl->prev);
  1038. rc = clk_set_rate(cl->clk, rate);
  1039. if (rc) {
  1040. d_vpr_e("%s: Failed to set clock rate %llu %s: %d\n",
  1041. __func__, rate, cl->name, rc);
  1042. return rc;
  1043. }
  1044. cl->prev = rate;
  1045. return rc;
  1046. }
  1047. static int __set_clocks(struct msm_vidc_core *core, u64 freq)
  1048. {
  1049. struct clock_info *cl;
  1050. int rc = 0;
  1051. /* scale mxc & mmcx rails */
  1052. rc = __opp_set_rate(core, freq);
  1053. if (rc) {
  1054. d_vpr_e("%s: opp setrate failed %lld\n", __func__, freq);
  1055. return rc;
  1056. }
  1057. venus_hfi_for_each_clock(core, cl) {
  1058. if (cl->has_scaling) {
  1059. rc = __set_clk_rate(core, cl, freq);
  1060. if (rc)
  1061. return rc;
  1062. }
  1063. }
  1064. return 0;
  1065. }
  1066. static int __disable_unprepare_clock(struct msm_vidc_core *core,
  1067. const char *clk_name)
  1068. {
  1069. int rc = 0;
  1070. struct clock_info *cl;
  1071. bool found;
  1072. found = false;
  1073. venus_hfi_for_each_clock(core, cl) {
  1074. if (!cl->clk) {
  1075. d_vpr_e("%s: invalid clock %s\n", __func__, cl->name);
  1076. return -EINVAL;
  1077. }
  1078. if (strcmp(cl->name, clk_name))
  1079. continue;
  1080. found = true;
  1081. clk_disable_unprepare(cl->clk);
  1082. if (cl->has_scaling)
  1083. __set_clk_rate(core, cl, 0);
  1084. cl->prev = 0;
  1085. d_vpr_h("%s: clock %s disable unprepared\n", __func__, cl->name);
  1086. break;
  1087. }
  1088. if (!found) {
  1089. d_vpr_e("%s: clock %s not found\n", __func__, clk_name);
  1090. return -EINVAL;
  1091. }
  1092. return rc;
  1093. }
  1094. static int __prepare_enable_clock(struct msm_vidc_core *core,
  1095. const char *clk_name)
  1096. {
  1097. int rc = 0;
  1098. struct clock_info *cl;
  1099. bool found;
  1100. u64 rate = 0;
  1101. found = false;
  1102. venus_hfi_for_each_clock(core, cl) {
  1103. if (!cl->clk) {
  1104. d_vpr_e("%s: invalid clock\n", __func__);
  1105. return -EINVAL;
  1106. }
  1107. if (strcmp(cl->name, clk_name))
  1108. continue;
  1109. found = true;
  1110. /*
  1111. * For the clocks we control, set the rate prior to preparing
  1112. * them. Since we don't really have a load at this point, scale
  1113. * it to the lowest frequency possible
  1114. */
  1115. if (cl->has_scaling) {
  1116. /* reset clk residency stats */
  1117. reset_residency_stats(core, cl);
  1118. rate = clk_round_rate(cl->clk, 0);
  1119. /**
  1120. * source clock is already multipled with scaling ratio and __set_clk_rate
  1121. * attempts to multiply again. So divide scaling ratio before calling
  1122. * __set_clk_rate.
  1123. */
  1124. rate = rate / MSM_VIDC_CLOCK_SOURCE_SCALING_RATIO;
  1125. __set_clk_rate(core, cl, rate);
  1126. }
  1127. rc = clk_prepare_enable(cl->clk);
  1128. if (rc) {
  1129. d_vpr_e("%s: failed to enable clock %s\n",
  1130. __func__, cl->name);
  1131. return rc;
  1132. }
  1133. if (!__clk_is_enabled(cl->clk)) {
  1134. d_vpr_e("%s: clock %s not enabled\n",
  1135. __func__, cl->name);
  1136. clk_disable_unprepare(cl->clk);
  1137. if (cl->has_scaling)
  1138. __set_clk_rate(core, cl, 0);
  1139. return -EINVAL;
  1140. }
  1141. d_vpr_h("%s: clock %s prepare enabled\n", __func__, cl->name);
  1142. break;
  1143. }
  1144. if (!found) {
  1145. d_vpr_e("%s: clock %s not found\n", __func__, clk_name);
  1146. return -EINVAL;
  1147. }
  1148. return rc;
  1149. }
  1150. static int __init_resources(struct msm_vidc_core *core)
  1151. {
  1152. int rc = 0;
  1153. rc = __init_register_base(core);
  1154. if (rc)
  1155. return rc;
  1156. rc = __init_irq(core);
  1157. if (rc)
  1158. return rc;
  1159. rc = __init_bus(core);
  1160. if (rc)
  1161. return rc;
  1162. rc = call_res_op(core, gdsc_init, core);
  1163. if (rc)
  1164. return rc;
  1165. rc = __init_clocks(core);
  1166. if (rc)
  1167. return rc;
  1168. rc = __init_reset_clocks(core);
  1169. if (rc)
  1170. return rc;
  1171. rc = __init_subcaches(core);
  1172. if (rc)
  1173. return rc;
  1174. rc = __init_freq_table(core);
  1175. if (rc)
  1176. return rc;
  1177. rc = __init_context_banks(core);
  1178. if (rc)
  1179. return rc;
  1180. rc = __init_device_region(core);
  1181. if (rc)
  1182. return rc;
  1183. rc = __register_mmrm(core);
  1184. if (rc)
  1185. return rc;
  1186. return rc;
  1187. }
  1188. static int __reset_control_acquire_name(struct msm_vidc_core *core,
  1189. const char *name)
  1190. {
  1191. struct reset_info *rcinfo = NULL;
  1192. int rc = 0;
  1193. bool found = false;
  1194. venus_hfi_for_each_reset_clock(core, rcinfo) {
  1195. if (strcmp(rcinfo->name, name))
  1196. continue;
  1197. /* this function is valid only for exclusive_release reset clocks*/
  1198. if (!rcinfo->exclusive_release) {
  1199. d_vpr_e("%s: unsupported reset control (%s), exclusive %d\n",
  1200. __func__, name, rcinfo->exclusive_release);
  1201. return -EINVAL;
  1202. }
  1203. found = true;
  1204. /* reset_control_acquire is exposed in kernel version 6 */
  1205. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0))
  1206. rc = reset_control_acquire(rcinfo->rst);
  1207. #else
  1208. rc = -EINVAL;
  1209. #endif
  1210. if (rc)
  1211. d_vpr_e("%s: failed to acquire reset control (%s), rc = %d\n",
  1212. __func__, rcinfo->name, rc);
  1213. else
  1214. d_vpr_h("%s: acquire reset control (%s)\n",
  1215. __func__, rcinfo->name);
  1216. break;
  1217. }
  1218. if (!found) {
  1219. d_vpr_e("%s: reset control (%s) not found\n", __func__, name);
  1220. rc = -EINVAL;
  1221. }
  1222. return rc;
  1223. }
  1224. static int __reset_control_release_name(struct msm_vidc_core *core,
  1225. const char *name)
  1226. {
  1227. struct reset_info *rcinfo = NULL;
  1228. int rc = 0;
  1229. bool found = false;
  1230. venus_hfi_for_each_reset_clock(core, rcinfo) {
  1231. if (strcmp(rcinfo->name, name))
  1232. continue;
  1233. /* this function is valid only for exclusive_release reset clocks*/
  1234. if (!rcinfo->exclusive_release) {
  1235. d_vpr_e("%s: unsupported reset control (%s), exclusive %d\n",
  1236. __func__, name, rcinfo->exclusive_release);
  1237. return -EINVAL;
  1238. }
  1239. found = true;
  1240. /* reset_control_release exposed in kernel version 6 */
  1241. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0))
  1242. reset_control_release(rcinfo->rst);
  1243. #else
  1244. rc = -EINVAL;
  1245. #endif
  1246. if (rc)
  1247. d_vpr_e("%s: release reset control (%s) failed\n",
  1248. __func__, rcinfo->name);
  1249. else
  1250. d_vpr_h("%s: release reset control (%s) done\n",
  1251. __func__, rcinfo->name);
  1252. break;
  1253. }
  1254. if (!found) {
  1255. d_vpr_e("%s: reset control (%s) not found\n", __func__, name);
  1256. rc = -EINVAL;
  1257. }
  1258. return rc;
  1259. }
  1260. static int __reset_control_assert_name(struct msm_vidc_core *core,
  1261. const char *name)
  1262. {
  1263. struct reset_info *rcinfo = NULL;
  1264. int rc = 0;
  1265. bool found = false;
  1266. venus_hfi_for_each_reset_clock(core, rcinfo) {
  1267. if (strcmp(rcinfo->name, name))
  1268. continue;
  1269. found = true;
  1270. rc = reset_control_assert(rcinfo->rst);
  1271. if (rc)
  1272. d_vpr_e("%s: failed to assert reset control (%s), rc = %d\n",
  1273. __func__, rcinfo->name, rc);
  1274. else
  1275. d_vpr_h("%s: assert reset control (%s)\n",
  1276. __func__, rcinfo->name);
  1277. break;
  1278. }
  1279. if (!found) {
  1280. d_vpr_e("%s: reset control (%s) not found\n", __func__, name);
  1281. rc = -EINVAL;
  1282. }
  1283. return rc;
  1284. }
  1285. static int __reset_control_deassert_name(struct msm_vidc_core *core,
  1286. const char *name)
  1287. {
  1288. struct reset_info *rcinfo = NULL;
  1289. int rc = 0;
  1290. bool found = false;
  1291. venus_hfi_for_each_reset_clock(core, rcinfo) {
  1292. if (strcmp(rcinfo->name, name))
  1293. continue;
  1294. found = true;
  1295. rc = reset_control_deassert(rcinfo->rst);
  1296. if (rc)
  1297. d_vpr_e("%s: deassert reset control for (%s) failed, rc %d\n",
  1298. __func__, rcinfo->name, rc);
  1299. else
  1300. d_vpr_h("%s: deassert reset control (%s)\n",
  1301. __func__, rcinfo->name);
  1302. break;
  1303. }
  1304. if (!found) {
  1305. d_vpr_e("%s: reset control (%s) not found\n", __func__, name);
  1306. rc = -EINVAL;
  1307. }
  1308. return rc;
  1309. }
  1310. static int __reset_control_deassert(struct msm_vidc_core *core)
  1311. {
  1312. struct reset_info *rcinfo = NULL;
  1313. int rc = 0;
  1314. venus_hfi_for_each_reset_clock(core, rcinfo) {
  1315. rc = reset_control_deassert(rcinfo->rst);
  1316. if (rc) {
  1317. d_vpr_e("%s: deassert reset control failed. rc = %d\n", __func__, rc);
  1318. continue;
  1319. }
  1320. d_vpr_h("%s: deassert reset control %s\n", __func__, rcinfo->name);
  1321. }
  1322. return rc;
  1323. }
  1324. static int __reset_control_assert(struct msm_vidc_core *core)
  1325. {
  1326. struct reset_info *rcinfo = NULL;
  1327. int rc = 0, cnt = 0;
  1328. venus_hfi_for_each_reset_clock(core, rcinfo) {
  1329. if (!rcinfo->rst) {
  1330. d_vpr_e("%s: invalid reset clock %s\n",
  1331. __func__, rcinfo->name);
  1332. return -EINVAL;
  1333. }
  1334. rc = reset_control_assert(rcinfo->rst);
  1335. if (rc) {
  1336. d_vpr_e("%s: failed to assert reset control %s, rc = %d\n",
  1337. __func__, rcinfo->name, rc);
  1338. goto deassert_reset_control;
  1339. }
  1340. cnt++;
  1341. d_vpr_h("%s: assert reset control %s, count %d\n", __func__, rcinfo->name, cnt);
  1342. usleep_range(1000, 1100);
  1343. }
  1344. return rc;
  1345. deassert_reset_control:
  1346. venus_hfi_for_each_reset_clock_reverse_continue(core, rcinfo, cnt) {
  1347. d_vpr_e("%s: deassert reset control %s\n", __func__, rcinfo->name);
  1348. reset_control_deassert(rcinfo->rst);
  1349. }
  1350. return rc;
  1351. }
  1352. static int __reset_ahb2axi_bridge(struct msm_vidc_core *core)
  1353. {
  1354. int rc = 0;
  1355. rc = __reset_control_assert(core);
  1356. if (rc)
  1357. return rc;
  1358. rc = __reset_control_deassert(core);
  1359. if (rc)
  1360. return rc;
  1361. return rc;
  1362. }
  1363. static int __print_clock_residency_stats(struct msm_vidc_core *core)
  1364. {
  1365. struct clock_info *cl;
  1366. int rc = 0;
  1367. venus_hfi_for_each_clock(core, cl) {
  1368. /* skip if scaling not supported */
  1369. if (!cl->has_scaling)
  1370. continue;
  1371. /*
  1372. * residency for the last clk corner entry will be updated in stats
  1373. * only if we call update residency with rate 0
  1374. */
  1375. __update_residency_stats(core, cl, 0);
  1376. /* print clock residency stats */
  1377. print_residency_stats(core, cl);
  1378. }
  1379. return rc;
  1380. }
  1381. static int __reset_clock_residency_stats(struct msm_vidc_core *core)
  1382. {
  1383. struct clock_info *cl;
  1384. int rc = 0;
  1385. venus_hfi_for_each_clock(core, cl) {
  1386. /* skip if scaling not supported */
  1387. if (!cl->has_scaling)
  1388. continue;
  1389. /* reset clock residency stats */
  1390. reset_residency_stats(core, cl);
  1391. }
  1392. return rc;
  1393. }
  1394. static const struct msm_vidc_resources_ops res_ops = {
  1395. .init = __init_resources,
  1396. .reset_bridge = __reset_ahb2axi_bridge,
  1397. .reset_control_acquire = __reset_control_acquire_name,
  1398. .reset_control_release = __reset_control_release_name,
  1399. .reset_control_assert = __reset_control_assert_name,
  1400. .reset_control_deassert = __reset_control_deassert_name,
  1401. .gdsc_init = __init_power_domains,
  1402. .gdsc_on = __enable_power_domains,
  1403. .gdsc_off = __disable_power_domains,
  1404. .gdsc_hw_ctrl = __hand_off_power_domains,
  1405. .gdsc_sw_ctrl = __acquire_power_domains,
  1406. .llcc = llcc_enable,
  1407. .set_bw = set_bw,
  1408. .set_clks = __set_clocks,
  1409. .clk_enable = __prepare_enable_clock,
  1410. .clk_disable = __disable_unprepare_clock,
  1411. .clk_print_residency_stats = __print_clock_residency_stats,
  1412. .clk_reset_residency_stats = __reset_clock_residency_stats,
  1413. .clk_update_residency_stats = __update_residency_stats,
  1414. };
  1415. const struct msm_vidc_resources_ops *get_resources_ops(void)
  1416. {
  1417. return &res_ops;
  1418. }