resources.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/sort.h>
  7. #include <linux/clk.h>
  8. #include <linux/pm_runtime.h>
  9. #include <linux/pm_domain.h>
  10. #include <linux/pm_opp.h>
  11. #include <linux/reset.h>
  12. #include <linux/interconnect.h>
  13. #include <linux/soc/qcom/llcc-qcom.h>
  14. #ifdef CONFIG_MSM_MMRM
  15. #include <linux/soc/qcom/msm_mmrm.h>
  16. #endif
  17. #include "msm_vidc_core.h"
  18. #include "msm_vidc_power.h"
  19. #include "msm_vidc_debug.h"
  20. #include "msm_vidc_driver.h"
  21. #include "msm_vidc_platform.h"
  22. #include "venus_hfi.h"
  23. /* Less than 50MBps is treated as trivial BW change */
  24. #define TRIVIAL_BW_THRESHOLD 50000
  25. #define TRIVIAL_BW_CHANGE(a, b) \
  26. ((a) > (b) ? (a) - (b) < TRIVIAL_BW_THRESHOLD : \
  27. (b) - (a) < TRIVIAL_BW_THRESHOLD)
  28. static struct clock_residency *get_residency_stats(struct clock_info *cl, u64 rate);
  29. static int __update_residency_stats(struct msm_vidc_core *core,
  30. struct clock_info *cl, u64 rate);
  31. enum reset_state {
  32. INIT = 1,
  33. ASSERT,
  34. DEASSERT,
  35. };
  36. /* A comparator to compare loads (needed later on) */
  37. static inline int cmp(const void *a, const void *b)
  38. {
  39. /* want to sort in reverse so flip the comparison */
  40. return ((struct freq_table *)b)->freq -
  41. ((struct freq_table *)a)->freq;
  42. }
  43. static void __fatal_error(bool fatal)
  44. {
  45. WARN_ON(fatal);
  46. }
  47. static void devm_llcc_release(void *res)
  48. {
  49. d_vpr_h("%s()\n", __func__);
  50. llcc_slice_putd((struct llcc_slice_desc *)res);
  51. }
  52. static struct llcc_slice_desc *devm_llcc_get(struct device *dev, u32 id)
  53. {
  54. struct llcc_slice_desc *llcc = NULL;
  55. int rc = 0;
  56. llcc = llcc_slice_getd(id);
  57. if (!llcc)
  58. return NULL;
  59. /**
  60. * register release callback with devm, so that when device goes
  61. * out of scope(during remove sequence), devm will take care of
  62. * de-register part by invoking release callback.
  63. */
  64. rc = devm_add_action_or_reset(dev, devm_llcc_release, (void *)llcc);
  65. if (rc)
  66. return NULL;
  67. return llcc;
  68. }
  69. #ifdef CONFIG_MSM_MMRM
  70. static void devm_mmrm_release(void *res)
  71. {
  72. d_vpr_h("%s()\n", __func__);
  73. mmrm_client_deregister((struct mmrm_client *)res);
  74. }
  75. static struct mmrm_client *devm_mmrm_get(struct device *dev, struct mmrm_client_desc *desc)
  76. {
  77. struct mmrm_client *mmrm = NULL;
  78. int rc = 0;
  79. mmrm = mmrm_client_register(desc);
  80. if (!mmrm)
  81. return NULL;
  82. /**
  83. * register release callback with devm, so that when device goes
  84. * out of scope(during remove sequence), devm will take care of
  85. * de-register part by invoking release callback.
  86. */
  87. rc = devm_add_action_or_reset(dev, devm_mmrm_release, (void *)mmrm);
  88. if (rc)
  89. return NULL;
  90. return mmrm;
  91. }
  92. #endif
  93. static void devm_pd_release(void *res)
  94. {
  95. struct device *pd = (struct device *)res;
  96. d_vpr_h("%s(): %s\n", __func__, dev_name(pd));
  97. dev_pm_domain_detach(pd, true);
  98. }
  99. static struct device *devm_pd_get(struct device *dev, const char *name)
  100. {
  101. struct device *pd = NULL;
  102. int rc = 0;
  103. pd = dev_pm_domain_attach_by_name(dev, name);
  104. if (!pd) {
  105. d_vpr_e("%s: pm domain attach failed %s\n", __func__, name);
  106. return NULL;
  107. }
  108. rc = devm_add_action_or_reset(dev, devm_pd_release, (void *)pd);
  109. if (rc) {
  110. d_vpr_e("%s: add action or reset failed %s\n", __func__, name);
  111. return NULL;
  112. }
  113. return pd;
  114. }
  115. static void devm_opp_dl_release(void *res)
  116. {
  117. struct device_link *link = (struct device_link *)res;
  118. d_vpr_h("%s(): %s\n", __func__, dev_name(&link->link_dev));
  119. device_link_del(link);
  120. }
  121. static int devm_opp_dl_get(struct device *dev, struct device *supplier)
  122. {
  123. u32 flag = DL_FLAG_RPM_ACTIVE | DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS;
  124. struct device_link *link = NULL;
  125. int rc = 0;
  126. link = device_link_add(dev, supplier, flag);
  127. if (!link) {
  128. d_vpr_e("%s: device link add failed\n", __func__);
  129. return -EINVAL;
  130. }
  131. rc = devm_add_action_or_reset(dev, devm_opp_dl_release, (void *)link);
  132. if (rc) {
  133. d_vpr_e("%s: add action or reset failed\n", __func__);
  134. return rc;
  135. }
  136. return rc;
  137. }
  138. static void devm_pm_runtime_put_sync(void *res)
  139. {
  140. struct device *dev = (struct device *)res;
  141. d_vpr_h("%s(): %s\n", __func__, dev_name(dev));
  142. pm_runtime_put_sync(dev);
  143. }
  144. static int devm_pm_runtime_get_sync(struct device *dev)
  145. {
  146. int rc = 0;
  147. rc = pm_runtime_get_sync(dev);
  148. if (rc < 0) {
  149. d_vpr_e("%s: pm domain get sync failed\n", __func__);
  150. return rc;
  151. }
  152. rc = devm_add_action_or_reset(dev, devm_pm_runtime_put_sync, (void *)dev);
  153. if (rc) {
  154. d_vpr_e("%s: add action or reset failed\n", __func__);
  155. return rc;
  156. }
  157. return rc;
  158. }
  159. static int __opp_set_rate(struct msm_vidc_core *core, u64 freq)
  160. {
  161. unsigned long opp_freq = 0;
  162. struct dev_pm_opp *opp;
  163. int rc = 0;
  164. opp_freq = freq;
  165. /* find max(ceil) freq from opp table */
  166. opp = dev_pm_opp_find_freq_ceil(&core->pdev->dev, &opp_freq);
  167. if (IS_ERR(opp)) {
  168. opp = dev_pm_opp_find_freq_floor(&core->pdev->dev, &opp_freq);
  169. if (IS_ERR(opp)) {
  170. d_vpr_e("%s: unable to find freq %lld in opp table\n", __func__, freq);
  171. return -EINVAL;
  172. }
  173. }
  174. dev_pm_opp_put(opp);
  175. /* print freq value */
  176. d_vpr_h("%s: set rate %lld (requested %lld)\n",
  177. __func__, opp_freq, freq);
  178. /* scale freq to power up mxc & mmcx */
  179. rc = dev_pm_opp_set_rate(&core->pdev->dev, opp_freq);
  180. if (rc) {
  181. d_vpr_e("%s: failed to set rate\n", __func__);
  182. return rc;
  183. }
  184. return rc;
  185. }
  186. static int __init_register_base(struct msm_vidc_core *core)
  187. {
  188. struct msm_vidc_resource *res;
  189. res = core->resource;
  190. res->register_base_addr = devm_platform_ioremap_resource(core->pdev, 0);
  191. if (IS_ERR(res->register_base_addr)) {
  192. d_vpr_e("%s: map reg addr failed %ld\n",
  193. __func__, PTR_ERR(res->register_base_addr));
  194. return -EINVAL;
  195. }
  196. d_vpr_h("%s: reg_base %#x\n", __func__, res->register_base_addr);
  197. return 0;
  198. }
  199. static int __init_irq(struct msm_vidc_core *core)
  200. {
  201. struct msm_vidc_resource *res;
  202. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0))
  203. struct resource *kres;
  204. #endif
  205. int rc = 0;
  206. res = core->resource;
  207. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0))
  208. res->irq = platform_get_irq(core->pdev, 0);
  209. #else
  210. kres = platform_get_resource(core->pdev, IORESOURCE_IRQ, 0);
  211. res->irq = kres ? kres->start : -1;
  212. #endif
  213. if (res->irq < 0)
  214. d_vpr_e("%s: get irq failed, %d\n", __func__, res->irq);
  215. d_vpr_h("%s: irq %d\n", __func__, res->irq);
  216. rc = devm_request_threaded_irq(&core->pdev->dev, res->irq, venus_hfi_isr,
  217. venus_hfi_isr_handler, IRQF_TRIGGER_HIGH, "msm-vidc", core);
  218. if (rc) {
  219. d_vpr_e("%s: Failed to allocate venus IRQ\n", __func__);
  220. return rc;
  221. }
  222. disable_irq_nosync(res->irq);
  223. return rc;
  224. }
  225. static int __init_bus(struct msm_vidc_core *core)
  226. {
  227. const struct bw_table *bus_tbl;
  228. struct bus_set *interconnects;
  229. struct bus_info *binfo = NULL;
  230. u32 bus_count = 0, cnt = 0;
  231. int rc = 0;
  232. interconnects = &core->resource->bus_set;
  233. bus_tbl = core->platform->data.bw_tbl;
  234. bus_count = core->platform->data.bw_tbl_size;
  235. if (!bus_tbl || !bus_count) {
  236. d_vpr_e("%s: invalid bus tbl %#x or count %d\n",
  237. __func__, bus_tbl, bus_count);
  238. return -EINVAL;
  239. }
  240. /* allocate bus_set */
  241. interconnects->bus_tbl = devm_kzalloc(&core->pdev->dev,
  242. sizeof(*interconnects->bus_tbl) * bus_count, GFP_KERNEL);
  243. if (!interconnects->bus_tbl) {
  244. d_vpr_e("%s: failed to alloc memory for bus table\n", __func__);
  245. return -ENOMEM;
  246. }
  247. interconnects->count = bus_count;
  248. /* populate bus field from platform data */
  249. for (cnt = 0; cnt < interconnects->count; cnt++) {
  250. interconnects->bus_tbl[cnt].name = bus_tbl[cnt].name;
  251. interconnects->bus_tbl[cnt].min_kbps = bus_tbl[cnt].min_kbps;
  252. interconnects->bus_tbl[cnt].max_kbps = bus_tbl[cnt].max_kbps;
  253. }
  254. /* print bus fields */
  255. venus_hfi_for_each_bus(core, binfo) {
  256. d_vpr_h("%s: name %s min_kbps %u max_kbps %u\n",
  257. __func__, binfo->name, binfo->min_kbps, binfo->max_kbps);
  258. }
  259. /* get interconnect handle */
  260. venus_hfi_for_each_bus(core, binfo) {
  261. if (!strcmp(binfo->name, "venus-llcc")) {
  262. if (msm_vidc_syscache_disable) {
  263. d_vpr_h("%s: skipping LLC bus init: %s\n", __func__,
  264. binfo->name);
  265. continue;
  266. }
  267. }
  268. binfo->icc = devm_of_icc_get(&core->pdev->dev, binfo->name);
  269. if (IS_ERR_OR_NULL(binfo->icc)) {
  270. d_vpr_e("%s: failed to get bus: %s\n", __func__, binfo->name);
  271. rc = PTR_ERR(binfo->icc) ?
  272. PTR_ERR(binfo->icc) : -EBADHANDLE;
  273. binfo->icc = NULL;
  274. return rc;
  275. }
  276. }
  277. return rc;
  278. }
  279. static int __init_power_domains(struct msm_vidc_core *core)
  280. {
  281. struct power_domain_info *pdinfo = NULL;
  282. const struct pd_table *pd_tbl;
  283. struct power_domain_set *pds;
  284. struct device **opp_vdevs = NULL;
  285. const char * const *opp_tbl;
  286. u32 pd_count = 0, opp_count = 0, cnt = 0;
  287. int rc = 0;
  288. pds = &core->resource->power_domain_set;
  289. pd_tbl = core->platform->data.pd_tbl;
  290. pd_count = core->platform->data.pd_tbl_size;
  291. /* skip init if power domain not supported */
  292. if (!pd_count) {
  293. d_vpr_h("%s: power domain entries not available in db\n", __func__);
  294. return 0;
  295. }
  296. /* sanitize power domain table */
  297. if (!pd_tbl) {
  298. d_vpr_e("%s: invalid power domain tbl\n", __func__);
  299. return -EINVAL;
  300. }
  301. /* allocate power_domain_set */
  302. pds->power_domain_tbl = devm_kzalloc(&core->pdev->dev,
  303. sizeof(*pds->power_domain_tbl) * pd_count, GFP_KERNEL);
  304. if (!pds->power_domain_tbl) {
  305. d_vpr_e("%s: failed to alloc memory for pd table\n", __func__);
  306. return -ENOMEM;
  307. }
  308. pds->count = pd_count;
  309. /* populate power domain fields */
  310. for (cnt = 0; cnt < pds->count; cnt++)
  311. pds->power_domain_tbl[cnt].name = pd_tbl[cnt].name;
  312. /* print power domain fields */
  313. venus_hfi_for_each_power_domain(core, pdinfo)
  314. d_vpr_h("%s: pd name %s\n", __func__, pdinfo->name);
  315. /* get power domain handle */
  316. venus_hfi_for_each_power_domain(core, pdinfo) {
  317. pdinfo->genpd_dev = devm_pd_get(&core->pdev->dev, pdinfo->name);
  318. if (IS_ERR_OR_NULL(pdinfo->genpd_dev)) {
  319. rc = PTR_ERR(pdinfo->genpd_dev) ?
  320. PTR_ERR(pdinfo->genpd_dev) : -EBADHANDLE;
  321. d_vpr_e("%s: failed to get pd: %s\n", __func__, pdinfo->name);
  322. pdinfo->genpd_dev = NULL;
  323. return rc;
  324. }
  325. }
  326. opp_tbl = core->platform->data.opp_tbl;
  327. opp_count = core->platform->data.opp_tbl_size;
  328. /* skip init if opp not supported */
  329. if (opp_count < 2) {
  330. d_vpr_h("%s: opp entries not available\n", __func__);
  331. return 0;
  332. }
  333. /* sanitize opp table */
  334. if (!opp_tbl) {
  335. d_vpr_e("%s: invalid opp table\n", __func__);
  336. return -EINVAL;
  337. }
  338. /* ignore NULL entry at the end of table */
  339. opp_count -= 1;
  340. /* print opp table entries */
  341. for (cnt = 0; cnt < opp_count; cnt++)
  342. d_vpr_h("%s: opp name %s\n", __func__, opp_tbl[cnt]);
  343. /* populate opp power domains(for rails) */
  344. //rc = devm_pm_opp_attach_genpd(&core->pdev->dev, opp_tbl, &opp_vdevs);
  345. rc = -EINVAL;
  346. if (rc)
  347. return rc;
  348. /* create device_links b/w consumer(dev) and multiple suppliers(mx, mmcx) */
  349. for (cnt = 0; cnt < opp_count; cnt++) {
  350. rc = devm_opp_dl_get(&core->pdev->dev, opp_vdevs[cnt]);
  351. if (rc) {
  352. d_vpr_e("%s: failed to create dl: %s\n",
  353. __func__, dev_name(opp_vdevs[cnt]));
  354. return rc;
  355. }
  356. }
  357. /* initialize opp table from device tree */
  358. rc = devm_pm_opp_of_add_table(&core->pdev->dev);
  359. if (rc) {
  360. d_vpr_e("%s: failed to add opp table\n", __func__);
  361. return rc;
  362. }
  363. /**
  364. * 1. power up mx & mmcx supply for RCG(mvs0_clk_src)
  365. * 2. power up gdsc0c for mvs0c branch clk
  366. * 3. power up gdsc0 for mvs0 branch clk
  367. */
  368. /**
  369. * power up mxc, mmcx rails to enable supply for
  370. * RCG(video_cc_mvs0_clk_src)
  371. */
  372. /* enable runtime pm */
  373. rc = devm_pm_runtime_enable(&core->pdev->dev);
  374. if (rc) {
  375. d_vpr_e("%s: failed to enable runtime pm\n", __func__);
  376. return rc;
  377. }
  378. /* power up rails(mxc & mmcx) */
  379. rc = devm_pm_runtime_get_sync(&core->pdev->dev);
  380. if (rc) {
  381. d_vpr_e("%s: failed to get sync runtime pm\n", __func__);
  382. return rc;
  383. }
  384. return rc;
  385. }
  386. static int __init_clocks(struct msm_vidc_core *core)
  387. {
  388. struct clock_residency *residency = NULL;
  389. const struct clk_table *clk_tbl;
  390. struct freq_table *freq_tbl;
  391. struct clock_set *clocks;
  392. struct clock_info *cinfo = NULL;
  393. u32 clk_count = 0, freq_count = 0;
  394. int fcnt = 0, cnt = 0, rc = 0;
  395. clocks = &core->resource->clock_set;
  396. clk_tbl = core->platform->data.clk_tbl;
  397. clk_count = core->platform->data.clk_tbl_size;
  398. if (!clk_tbl || !clk_count) {
  399. d_vpr_e("%s: invalid clock tbl %#x or count %d\n",
  400. __func__, clk_tbl, clk_count);
  401. return -EINVAL;
  402. }
  403. /* allocate clock_set */
  404. clocks->clock_tbl = devm_kzalloc(&core->pdev->dev,
  405. sizeof(*clocks->clock_tbl) * clk_count, GFP_KERNEL);
  406. if (!clocks->clock_tbl) {
  407. d_vpr_e("%s: failed to alloc memory for clock table\n", __func__);
  408. return -ENOMEM;
  409. }
  410. clocks->count = clk_count;
  411. /* populate clock field from platform data */
  412. for (cnt = 0; cnt < clocks->count; cnt++) {
  413. clocks->clock_tbl[cnt].name = clk_tbl[cnt].name;
  414. clocks->clock_tbl[cnt].clk_id = clk_tbl[cnt].clk_id;
  415. clocks->clock_tbl[cnt].has_scaling = clk_tbl[cnt].scaling;
  416. }
  417. freq_tbl = core->platform->data.freq_tbl;
  418. freq_count = core->platform->data.freq_tbl_size;
  419. /* populate clk residency stats table */
  420. for (cnt = 0; cnt < clocks->count; cnt++) {
  421. /* initialize residency_list */
  422. INIT_LIST_HEAD(&clocks->clock_tbl[cnt].residency_list);
  423. /* skip if scaling not supported */
  424. if (!clocks->clock_tbl[cnt].has_scaling)
  425. continue;
  426. for (fcnt = 0; fcnt < freq_count; fcnt++) {
  427. residency = devm_kzalloc(&core->pdev->dev,
  428. sizeof(struct clock_residency), GFP_KERNEL);
  429. if (!residency) {
  430. d_vpr_e("%s: failed to alloc clk residency stat node\n", __func__);
  431. return -ENOMEM;
  432. }
  433. if (!freq_tbl) {
  434. d_vpr_e("%s: invalid freq tbl %#x\n", __func__, freq_tbl);
  435. return -EINVAL;
  436. }
  437. /* update residency node */
  438. residency->rate = freq_tbl[fcnt].freq;
  439. residency->start_time_us = 0;
  440. residency->total_time_us = 0;
  441. INIT_LIST_HEAD(&residency->list);
  442. /* add entry into residency_list */
  443. list_add_tail(&residency->list, &clocks->clock_tbl[cnt].residency_list);
  444. }
  445. }
  446. /* print clock fields */
  447. venus_hfi_for_each_clock(core, cinfo) {
  448. d_vpr_h("%s: clock name %s clock id %#x scaling %d\n",
  449. __func__, cinfo->name, cinfo->clk_id, cinfo->has_scaling);
  450. }
  451. /* get clock handle */
  452. venus_hfi_for_each_clock(core, cinfo) {
  453. cinfo->clk = devm_clk_get(&core->pdev->dev, cinfo->name);
  454. if (IS_ERR_OR_NULL(cinfo->clk)) {
  455. d_vpr_e("%s: failed to get clock: %s\n", __func__, cinfo->name);
  456. rc = PTR_ERR(cinfo->clk) ?
  457. PTR_ERR(cinfo->clk) : -EINVAL;
  458. cinfo->clk = NULL;
  459. return rc;
  460. }
  461. }
  462. return rc;
  463. }
  464. static int __init_reset_clocks(struct msm_vidc_core *core)
  465. {
  466. const struct clk_rst_table *rst_tbl;
  467. struct reset_set *rsts;
  468. struct reset_info *rinfo = NULL;
  469. u32 rst_count = 0, cnt = 0;
  470. int rc = 0;
  471. rsts = &core->resource->reset_set;
  472. rst_tbl = core->platform->data.clk_rst_tbl;
  473. rst_count = core->platform->data.clk_rst_tbl_size;
  474. if (!rst_tbl || !rst_count) {
  475. d_vpr_e("%s: invalid reset tbl %#x or count %d\n",
  476. __func__, rst_tbl, rst_count);
  477. return -EINVAL;
  478. }
  479. /* allocate reset_set */
  480. rsts->reset_tbl = devm_kzalloc(&core->pdev->dev,
  481. sizeof(*rsts->reset_tbl) * rst_count, GFP_KERNEL);
  482. if (!rsts->reset_tbl) {
  483. d_vpr_e("%s: failed to alloc memory for reset table\n", __func__);
  484. return -ENOMEM;
  485. }
  486. rsts->count = rst_count;
  487. /* populate clock field from platform data */
  488. for (cnt = 0; cnt < rsts->count; cnt++) {
  489. rsts->reset_tbl[cnt].name = rst_tbl[cnt].name;
  490. rsts->reset_tbl[cnt].exclusive_release = rst_tbl[cnt].exclusive_release;
  491. }
  492. /* print reset clock fields */
  493. venus_hfi_for_each_reset_clock(core, rinfo) {
  494. d_vpr_h("%s: reset clk %s, exclusive %d\n",
  495. __func__, rinfo->name, rinfo->exclusive_release);
  496. }
  497. /* get reset clock handle */
  498. venus_hfi_for_each_reset_clock(core, rinfo) {
  499. if (rinfo->exclusive_release)
  500. rinfo->rst = devm_reset_control_get_exclusive_released(
  501. &core->pdev->dev, rinfo->name);
  502. else
  503. rinfo->rst = devm_reset_control_get(&core->pdev->dev, rinfo->name);
  504. if (IS_ERR_OR_NULL(rinfo->rst)) {
  505. d_vpr_e("%s: failed to get reset clock: %s\n", __func__, rinfo->name);
  506. rc = PTR_ERR(rinfo->rst) ?
  507. PTR_ERR(rinfo->rst) : -EINVAL;
  508. rinfo->rst = NULL;
  509. return rc;
  510. }
  511. }
  512. return rc;
  513. }
  514. static int __init_subcaches(struct msm_vidc_core *core)
  515. {
  516. const struct subcache_table *llcc_tbl;
  517. struct subcache_set *caches;
  518. struct subcache_info *sinfo = NULL;
  519. u32 llcc_count = 0, cnt = 0;
  520. int rc = 0;
  521. caches = &core->resource->subcache_set;
  522. /* skip init if subcache not available */
  523. if (!is_sys_cache_present(core))
  524. return 0;
  525. llcc_tbl = core->platform->data.subcache_tbl;
  526. llcc_count = core->platform->data.subcache_tbl_size;
  527. if (!llcc_tbl || !llcc_count) {
  528. d_vpr_e("%s: invalid llcc tbl %#x or count %d\n",
  529. __func__, llcc_tbl, llcc_count);
  530. return -EINVAL;
  531. }
  532. /* allocate clock_set */
  533. caches->subcache_tbl = devm_kzalloc(&core->pdev->dev,
  534. sizeof(*caches->subcache_tbl) * llcc_count, GFP_KERNEL);
  535. if (!caches->subcache_tbl) {
  536. d_vpr_e("%s: failed to alloc memory for subcache table\n", __func__);
  537. return -ENOMEM;
  538. }
  539. caches->count = llcc_count;
  540. /* populate subcache fields from platform data */
  541. for (cnt = 0; cnt < caches->count; cnt++) {
  542. caches->subcache_tbl[cnt].name = llcc_tbl[cnt].name;
  543. caches->subcache_tbl[cnt].llcc_id = llcc_tbl[cnt].llcc_id;
  544. }
  545. /* print subcache fields */
  546. venus_hfi_for_each_subcache(core, sinfo) {
  547. d_vpr_h("%s: name %s subcache id %d\n",
  548. __func__, sinfo->name, sinfo->llcc_id);
  549. }
  550. /* get subcache/llcc handle */
  551. venus_hfi_for_each_subcache(core, sinfo) {
  552. sinfo->subcache = devm_llcc_get(&core->pdev->dev, sinfo->llcc_id);
  553. if (IS_ERR_OR_NULL(sinfo->subcache)) {
  554. d_vpr_e("%s: failed to get subcache: %d\n", __func__, sinfo->llcc_id);
  555. rc = PTR_ERR(sinfo->subcache) ?
  556. PTR_ERR(sinfo->subcache) : -EBADHANDLE;
  557. sinfo->subcache = NULL;
  558. return rc;
  559. }
  560. }
  561. return rc;
  562. }
  563. static int __init_freq_table(struct msm_vidc_core *core)
  564. {
  565. struct freq_table *freq_tbl;
  566. struct freq_set *clks;
  567. u32 freq_count = 0, cnt = 0;
  568. int rc = 0;
  569. clks = &core->resource->freq_set;
  570. freq_tbl = core->platform->data.freq_tbl;
  571. freq_count = core->platform->data.freq_tbl_size;
  572. if (!freq_tbl || !freq_count) {
  573. d_vpr_e("%s: invalid freq tbl %#x or count %d\n",
  574. __func__, freq_tbl, freq_count);
  575. return -EINVAL;
  576. }
  577. /* allocate freq_set */
  578. clks->freq_tbl = devm_kzalloc(&core->pdev->dev,
  579. sizeof(*clks->freq_tbl) * freq_count, GFP_KERNEL);
  580. if (!clks->freq_tbl) {
  581. d_vpr_e("%s: failed to alloc memory for freq table\n", __func__);
  582. return -ENOMEM;
  583. }
  584. clks->count = freq_count;
  585. /* populate freq field from platform data */
  586. for (cnt = 0; cnt < clks->count; cnt++)
  587. clks->freq_tbl[cnt].freq = freq_tbl[cnt].freq;
  588. /* sort freq table */
  589. sort(clks->freq_tbl, clks->count, sizeof(*clks->freq_tbl), cmp, NULL);
  590. /* print freq field freq_set */
  591. d_vpr_h("%s: updated freq table\n", __func__);
  592. for (cnt = 0; cnt < clks->count; cnt++)
  593. d_vpr_h("%s:\t %lu\n", __func__, clks->freq_tbl[cnt].freq);
  594. return rc;
  595. }
  596. static int __init_context_banks(struct msm_vidc_core *core)
  597. {
  598. const struct context_bank_table *cb_tbl;
  599. struct context_bank_set *cbs;
  600. struct context_bank_info *cbinfo = NULL;
  601. u32 cb_count = 0, cnt = 0;
  602. int rc = 0;
  603. cbs = &core->resource->context_bank_set;
  604. cb_tbl = core->platform->data.context_bank_tbl;
  605. cb_count = core->platform->data.context_bank_tbl_size;
  606. if (!cb_tbl || !cb_count) {
  607. d_vpr_e("%s: invalid context bank tbl %#x or count %d\n",
  608. __func__, cb_tbl, cb_count);
  609. return -EINVAL;
  610. }
  611. /* allocate context_bank table */
  612. cbs->context_bank_tbl = devm_kzalloc(&core->pdev->dev,
  613. sizeof(*cbs->context_bank_tbl) * cb_count, GFP_KERNEL);
  614. if (!cbs->context_bank_tbl) {
  615. d_vpr_e("%s: failed to alloc memory for context_bank table\n", __func__);
  616. return -ENOMEM;
  617. }
  618. cbs->count = cb_count;
  619. /**
  620. * populate context bank field from platform data except
  621. * dev & domain which are assigned as part of context bank
  622. * probe sequence
  623. */
  624. for (cnt = 0; cnt < cbs->count; cnt++) {
  625. cbs->context_bank_tbl[cnt].name = cb_tbl[cnt].name;
  626. cbs->context_bank_tbl[cnt].addr_range.start = cb_tbl[cnt].start;
  627. cbs->context_bank_tbl[cnt].addr_range.size = cb_tbl[cnt].size;
  628. cbs->context_bank_tbl[cnt].secure = cb_tbl[cnt].secure;
  629. cbs->context_bank_tbl[cnt].dma_coherant = cb_tbl[cnt].dma_coherant;
  630. cbs->context_bank_tbl[cnt].region = cb_tbl[cnt].region;
  631. cbs->context_bank_tbl[cnt].dma_mask = cb_tbl[cnt].dma_mask;
  632. }
  633. /* print context_bank fiels */
  634. venus_hfi_for_each_context_bank(core, cbinfo) {
  635. d_vpr_h("%s: name %s addr start %#x size %#x secure %d "
  636. "coherant %d region %d dma_mask %llu\n",
  637. __func__, cbinfo->name, cbinfo->addr_range.start,
  638. cbinfo->addr_range.size, cbinfo->secure,
  639. cbinfo->dma_coherant, cbinfo->region, cbinfo->dma_mask);
  640. }
  641. return rc;
  642. }
  643. static int __init_device_region(struct msm_vidc_core *core)
  644. {
  645. const struct device_region_table *dev_reg_tbl;
  646. struct device_region_set *dev_set;
  647. struct device_region_info *dev_reg_info;
  648. u32 dev_reg_count = 0, cnt = 0;
  649. int rc = 0;
  650. dev_set = &core->resource->device_region_set;
  651. dev_reg_tbl = core->platform->data.dev_reg_tbl;
  652. dev_reg_count = core->platform->data.dev_reg_tbl_size;
  653. if (!dev_reg_tbl || !dev_reg_count) {
  654. d_vpr_h("%s: device regions not available\n", __func__);
  655. return 0;
  656. }
  657. /* allocate device region table */
  658. dev_set->device_region_tbl = devm_kzalloc(&core->pdev->dev,
  659. sizeof(*dev_set->device_region_tbl) * dev_reg_count, GFP_KERNEL);
  660. if (!dev_set->device_region_tbl) {
  661. d_vpr_e("%s: failed to alloc memory for device region table\n", __func__);
  662. return -ENOMEM;
  663. }
  664. dev_set->count = dev_reg_count;
  665. /* populate device region fields from platform data */
  666. for (cnt = 0; cnt < dev_set->count; cnt++) {
  667. dev_set->device_region_tbl[cnt].name = dev_reg_tbl[cnt].name;
  668. dev_set->device_region_tbl[cnt].phy_addr = dev_reg_tbl[cnt].phy_addr;
  669. dev_set->device_region_tbl[cnt].size = dev_reg_tbl[cnt].size;
  670. dev_set->device_region_tbl[cnt].dev_addr = dev_reg_tbl[cnt].dev_addr;
  671. dev_set->device_region_tbl[cnt].region = dev_reg_tbl[cnt].region;
  672. }
  673. /* print device region fields */
  674. venus_hfi_for_each_device_region(core, dev_reg_info) {
  675. d_vpr_h("%s: name %s phy_addr %#x size %#x dev_addr %#x dev_region %d\n",
  676. __func__, dev_reg_info->name, dev_reg_info->phy_addr, dev_reg_info->size,
  677. dev_reg_info->dev_addr, dev_reg_info->region);
  678. }
  679. return rc;
  680. }
  681. #ifdef CONFIG_MSM_MMRM
  682. static int __register_mmrm(struct msm_vidc_core *core)
  683. {
  684. int rc = 0;
  685. struct clock_info *cl;
  686. /* skip if platform does not support mmrm */
  687. if (!is_mmrm_supported(core)) {
  688. d_vpr_h("%s: MMRM not supported\n", __func__);
  689. return 0;
  690. }
  691. /* get mmrm handle for each clock sources */
  692. venus_hfi_for_each_clock(core, cl) {
  693. struct mmrm_client_desc desc;
  694. char *name = (char *)desc.client_info.desc.name;
  695. // TODO: set notifier data vals
  696. struct mmrm_client_notifier_data notifier_data = {
  697. MMRM_CLIENT_RESOURCE_VALUE_CHANGE,
  698. {{0, 0}},
  699. NULL};
  700. // TODO: add callback fn
  701. desc.notifier_callback_fn = NULL;
  702. if (!cl->has_scaling)
  703. continue;
  704. if (IS_ERR_OR_NULL(cl->clk)) {
  705. d_vpr_e("%s: Invalid clock: %s\n", __func__, cl->name);
  706. return PTR_ERR(cl->clk) ? PTR_ERR(cl->clk) : -EINVAL;
  707. }
  708. desc.client_type = MMRM_CLIENT_CLOCK;
  709. desc.client_info.desc.client_domain = MMRM_CLIENT_DOMAIN_VIDEO;
  710. desc.client_info.desc.client_id = cl->clk_id;
  711. strscpy(name, cl->name, sizeof(desc.client_info.desc.name));
  712. desc.client_info.desc.clk = cl->clk;
  713. desc.priority = MMRM_CLIENT_PRIOR_LOW;
  714. desc.pvt_data = notifier_data.pvt_data;
  715. d_vpr_h("%s: domain(%d) cid(%d) name(%s) clk(%pK)\n",
  716. __func__,
  717. desc.client_info.desc.client_domain,
  718. desc.client_info.desc.client_id,
  719. desc.client_info.desc.name,
  720. desc.client_info.desc.clk);
  721. d_vpr_h("%s: type(%d) pri(%d) pvt(%pK) notifier(%pK)\n",
  722. __func__,
  723. desc.client_type,
  724. desc.priority,
  725. desc.pvt_data,
  726. desc.notifier_callback_fn);
  727. cl->mmrm_client = devm_mmrm_get(&core->pdev->dev, &desc);
  728. if (!cl->mmrm_client) {
  729. d_vpr_e("%s: Failed to register clk(%s): %d\n",
  730. __func__, cl->name, rc);
  731. return -EINVAL;
  732. }
  733. }
  734. return rc;
  735. }
  736. #else
  737. static int __register_mmrm(struct msm_vidc_core *core)
  738. {
  739. return 0;
  740. }
  741. #endif
  742. static int __enable_power_domains(struct msm_vidc_core *core, const char *name)
  743. {
  744. struct power_domain_info *pdinfo = NULL;
  745. int rc = 0;
  746. /* power up rails(mxc & mmcx) to enable RCG(video_cc_mvs0_clk_src) */
  747. rc = __opp_set_rate(core, ULONG_MAX);
  748. if (rc) {
  749. d_vpr_e("%s: opp setrate failed\n", __func__);
  750. return rc;
  751. }
  752. /* power up (gdsc0/gdsc0c) to enable (mvs0/mvs0c) branch clock */
  753. venus_hfi_for_each_power_domain(core, pdinfo) {
  754. if (strcmp(pdinfo->name, name))
  755. continue;
  756. rc = pm_runtime_get_sync(pdinfo->genpd_dev);
  757. if (rc < 0) {
  758. d_vpr_e("%s: failed to get sync: %s\n", __func__, pdinfo->name);
  759. return rc;
  760. }
  761. d_vpr_h("%s: enabled power doamin %s\n", __func__, pdinfo->name);
  762. }
  763. return rc;
  764. }
  765. static int __disable_power_domains(struct msm_vidc_core *core, const char *name)
  766. {
  767. struct power_domain_info *pdinfo = NULL;
  768. int rc = 0;
  769. /* power down (gdsc0/gdsc0c) to disable (mvs0/mvs0c) branch clock */
  770. venus_hfi_for_each_power_domain(core, pdinfo) {
  771. if (strcmp(pdinfo->name, name))
  772. continue;
  773. rc = pm_runtime_put_sync(pdinfo->genpd_dev);
  774. if (rc) {
  775. d_vpr_e("%s: failed to put sync: %s\n", __func__, pdinfo->name);
  776. return rc;
  777. }
  778. d_vpr_h("%s: disabled power doamin %s\n", __func__, pdinfo->name);
  779. }
  780. /* power down rails(mxc & mmcx) to disable RCG(video_cc_mvs0_clk_src) */
  781. rc = __opp_set_rate(core, 0);
  782. if (rc) {
  783. d_vpr_e("%s: opp setrate failed\n", __func__);
  784. return rc;
  785. }
  786. msm_vidc_change_core_sub_state(core, CORE_SUBSTATE_GDSC_HANDOFF, 0, __func__);
  787. return rc;
  788. }
  789. static int __hand_off_power_domains(struct msm_vidc_core *core)
  790. {
  791. msm_vidc_change_core_sub_state(core, 0, CORE_SUBSTATE_GDSC_HANDOFF, __func__);
  792. return 0;
  793. }
  794. static int __acquire_power_domains(struct msm_vidc_core *core)
  795. {
  796. msm_vidc_change_core_sub_state(core, CORE_SUBSTATE_GDSC_HANDOFF, 0, __func__);
  797. return 0;
  798. }
  799. static int __disable_subcaches(struct msm_vidc_core *core)
  800. {
  801. struct subcache_info *sinfo;
  802. int rc = 0;
  803. if (msm_vidc_syscache_disable || !is_sys_cache_present(core))
  804. return 0;
  805. /* De-activate subcaches */
  806. venus_hfi_for_each_subcache_reverse(core, sinfo) {
  807. if (!sinfo->isactive)
  808. continue;
  809. d_vpr_h("%s: De-activate subcache %s\n", __func__, sinfo->name);
  810. rc = llcc_slice_deactivate(sinfo->subcache);
  811. if (rc) {
  812. d_vpr_e("Failed to de-activate %s: %d\n",
  813. sinfo->name, rc);
  814. }
  815. sinfo->isactive = false;
  816. }
  817. return 0;
  818. }
  819. static int __enable_subcaches(struct msm_vidc_core *core)
  820. {
  821. int rc = 0;
  822. u32 c = 0;
  823. struct subcache_info *sinfo;
  824. if (msm_vidc_syscache_disable || !is_sys_cache_present(core))
  825. return 0;
  826. /* Activate subcaches */
  827. venus_hfi_for_each_subcache(core, sinfo) {
  828. rc = llcc_slice_activate(sinfo->subcache);
  829. if (rc) {
  830. d_vpr_e("Failed to activate %s: %d\n", sinfo->name, rc);
  831. __fatal_error(true);
  832. goto err_activate_fail;
  833. }
  834. sinfo->isactive = true;
  835. d_vpr_h("Activated subcache %s\n", sinfo->name);
  836. c++;
  837. }
  838. d_vpr_h("Activated %d Subcaches to Venus\n", c);
  839. return 0;
  840. err_activate_fail:
  841. __disable_subcaches(core);
  842. return rc;
  843. }
  844. static int llcc_enable(struct msm_vidc_core *core, bool enable)
  845. {
  846. int ret;
  847. if (enable)
  848. ret = __enable_subcaches(core);
  849. else
  850. ret = __disable_subcaches(core);
  851. return ret;
  852. }
  853. static int __vote_bandwidth(struct bus_info *bus, unsigned long bw_kbps)
  854. {
  855. int rc = 0;
  856. if (!bus->icc) {
  857. d_vpr_e("%s: invalid bus\n", __func__);
  858. return -EINVAL;
  859. }
  860. d_vpr_p("Voting bus %s to ab %lu kBps\n", bus->name, bw_kbps);
  861. rc = icc_set_bw(bus->icc, bw_kbps, 0);
  862. if (rc)
  863. d_vpr_e("Failed voting bus %s to ab %lu, rc=%d\n",
  864. bus->name, bw_kbps, rc);
  865. return rc;
  866. }
  867. static int __unvote_buses(struct msm_vidc_core *core)
  868. {
  869. int rc = 0;
  870. struct bus_info *bus = NULL;
  871. core->power.bw_ddr = 0;
  872. core->power.bw_llcc = 0;
  873. venus_hfi_for_each_bus(core, bus) {
  874. rc = __vote_bandwidth(bus, 0);
  875. if (rc)
  876. goto err_unknown_device;
  877. }
  878. err_unknown_device:
  879. return rc;
  880. }
  881. static int __vote_buses(struct msm_vidc_core *core,
  882. unsigned long bw_ddr, unsigned long bw_llcc)
  883. {
  884. int rc = 0;
  885. struct bus_info *bus = NULL;
  886. unsigned long bw_kbps = 0, bw_prev = 0;
  887. enum vidc_bus_type type;
  888. venus_hfi_for_each_bus(core, bus) {
  889. if (bus && bus->icc) {
  890. type = get_type_frm_name(bus->name);
  891. if (type == DDR) {
  892. bw_kbps = bw_ddr;
  893. bw_prev = core->power.bw_ddr;
  894. } else if (type == LLCC) {
  895. bw_kbps = bw_llcc;
  896. bw_prev = core->power.bw_llcc;
  897. } else {
  898. bw_kbps = bus->max_kbps;
  899. bw_prev = core->power.bw_ddr ?
  900. bw_kbps : 0;
  901. }
  902. /* ensure freq is within limits */
  903. bw_kbps = clamp_t(typeof(bw_kbps), bw_kbps,
  904. bus->min_kbps, bus->max_kbps);
  905. if (TRIVIAL_BW_CHANGE(bw_kbps, bw_prev) && bw_prev) {
  906. d_vpr_l("Skip voting bus %s to %lu kBps\n",
  907. bus->name, bw_kbps);
  908. continue;
  909. }
  910. rc = __vote_bandwidth(bus, bw_kbps);
  911. if (type == DDR)
  912. core->power.bw_ddr = bw_kbps;
  913. else if (type == LLCC)
  914. core->power.bw_llcc = bw_kbps;
  915. } else {
  916. d_vpr_e("No BUS to Vote\n");
  917. }
  918. }
  919. return rc;
  920. }
  921. static int set_bw(struct msm_vidc_core *core, unsigned long bw_ddr,
  922. unsigned long bw_llcc)
  923. {
  924. if (!bw_ddr && !bw_llcc)
  925. return __unvote_buses(core);
  926. return __vote_buses(core, bw_ddr, bw_llcc);
  927. }
  928. static int print_residency_stats(struct msm_vidc_core *core, struct clock_info *cl)
  929. {
  930. struct clock_residency *residency = NULL;
  931. u64 total_time_us = 0;
  932. int rc = 0;
  933. /* skip if scaling not supported */
  934. if (!cl->has_scaling)
  935. return 0;
  936. /* grand total residency time */
  937. list_for_each_entry(residency, &cl->residency_list, list)
  938. total_time_us += residency->total_time_us;
  939. /* sanity check to avoid divide by 0 */
  940. total_time_us = (total_time_us > 0) ? total_time_us : 1;
  941. /* print residency percent for each clock */
  942. list_for_each_entry(residency, &cl->residency_list, list) {
  943. d_vpr_hs("%s: %s clock rate [%d] total %lluus residency %u%%\n",
  944. __func__, cl->name, residency->rate, residency->total_time_us,
  945. (residency->total_time_us * 100 + total_time_us / 2) / total_time_us);
  946. }
  947. return rc;
  948. }
  949. static int reset_residency_stats(struct msm_vidc_core *core, struct clock_info *cl)
  950. {
  951. struct clock_residency *residency = NULL;
  952. int rc = 0;
  953. /* skip if scaling not supported */
  954. if (!cl->has_scaling)
  955. return 0;
  956. d_vpr_h("%s: reset %s residency stats\n", __func__, cl->name);
  957. /* reset clock residency stats */
  958. list_for_each_entry(residency, &cl->residency_list, list) {
  959. residency->start_time_us = 0;
  960. residency->total_time_us = 0;
  961. }
  962. /*
  963. * During the reset make sure to update start time of the clk prev freq,
  964. * because the prev clk freq might not be 0 so when the next seesion start
  965. * voting from that freq, then those resideny print will not come in stats
  966. */
  967. residency = get_residency_stats(cl, cl->prev);
  968. if (residency)
  969. residency->start_time_us = ktime_get_ns() / 1000;
  970. return rc;
  971. }
  972. static struct clock_residency *get_residency_stats(struct clock_info *cl, u64 rate)
  973. {
  974. struct clock_residency *residency = NULL;
  975. bool found = false;
  976. if (!cl) {
  977. d_vpr_e("%s: invalid params\n", __func__);
  978. return NULL;
  979. }
  980. list_for_each_entry(residency, &cl->residency_list, list) {
  981. if (residency->rate == rate) {
  982. found = true;
  983. break;
  984. }
  985. }
  986. return found ? residency : NULL;
  987. }
  988. static int __update_residency_stats(struct msm_vidc_core *core,
  989. struct clock_info *cl, u64 rate)
  990. {
  991. struct clock_residency *cur_residency = NULL, *prev_residency = NULL;
  992. u64 cur_time_us = 0;
  993. int rc = 0;
  994. /* skip update if high or stats logs not enabled */
  995. if (!(msm_vidc_debug & (VIDC_HIGH | VIDC_STAT)))
  996. return 0;
  997. /* skip update if scaling not supported */
  998. if (!cl->has_scaling)
  999. return 0;
  1000. /* skip update if rate not changed */
  1001. if (rate == cl->prev)
  1002. return 0;
  1003. /* get current time in ns */
  1004. cur_time_us = ktime_get_ns() / 1000;
  1005. /* update previous rate residency end or total time */
  1006. prev_residency = get_residency_stats(cl, cl->prev);
  1007. if (prev_residency) {
  1008. if (prev_residency->start_time_us)
  1009. prev_residency->total_time_us += cur_time_us - prev_residency->start_time_us;
  1010. /* reset start time us */
  1011. prev_residency->start_time_us = 0;
  1012. }
  1013. /* clk disable case - no need to update new entry */
  1014. if (rate == 0)
  1015. return 0;
  1016. /* check if rate entry is present */
  1017. cur_residency = get_residency_stats(cl, rate);
  1018. if (!cur_residency) {
  1019. d_vpr_e("%s: entry not found. rate %llu\n", __func__, rate);
  1020. return -EINVAL;
  1021. }
  1022. /* update residency start time for current rate/freq */
  1023. cur_residency->start_time_us = cur_time_us;
  1024. return rc;
  1025. }
  1026. static int __set_clk_rate(struct msm_vidc_core *core, struct clock_info *cl,
  1027. u64 rate)
  1028. {
  1029. int rc = 0;
  1030. /* update clock residency stats */
  1031. __update_residency_stats(core, cl, rate);
  1032. /* bail early if requested clk rate is not changed */
  1033. if (rate == cl->prev)
  1034. return 0;
  1035. d_vpr_p("Scaling clock %s to %llu, prev %llu\n",
  1036. cl->name, rate, cl->prev);
  1037. rc = clk_set_rate(cl->clk, rate);
  1038. if (rc) {
  1039. d_vpr_e("%s: Failed to set clock rate %llu %s: %d\n",
  1040. __func__, rate, cl->name, rc);
  1041. return rc;
  1042. }
  1043. cl->prev = rate;
  1044. return rc;
  1045. }
  1046. static int __set_clocks(struct msm_vidc_core *core, u64 freq)
  1047. {
  1048. struct clock_info *cl;
  1049. int rc = 0;
  1050. /* scale mxc & mmcx rails */
  1051. rc = __opp_set_rate(core, freq);
  1052. if (rc) {
  1053. d_vpr_e("%s: opp setrate failed %lld\n", __func__, freq);
  1054. return rc;
  1055. }
  1056. venus_hfi_for_each_clock(core, cl) {
  1057. if (cl->has_scaling) {
  1058. rc = __set_clk_rate(core, cl, freq);
  1059. if (rc)
  1060. return rc;
  1061. }
  1062. }
  1063. return 0;
  1064. }
  1065. static int __disable_unprepare_clock(struct msm_vidc_core *core,
  1066. const char *clk_name)
  1067. {
  1068. int rc = 0;
  1069. struct clock_info *cl;
  1070. bool found;
  1071. found = false;
  1072. venus_hfi_for_each_clock(core, cl) {
  1073. if (!cl->clk) {
  1074. d_vpr_e("%s: invalid clock %s\n", __func__, cl->name);
  1075. return -EINVAL;
  1076. }
  1077. if (strcmp(cl->name, clk_name))
  1078. continue;
  1079. found = true;
  1080. clk_disable_unprepare(cl->clk);
  1081. if (cl->has_scaling)
  1082. __set_clk_rate(core, cl, 0);
  1083. cl->prev = 0;
  1084. d_vpr_h("%s: clock %s disable unprepared\n", __func__, cl->name);
  1085. break;
  1086. }
  1087. if (!found) {
  1088. d_vpr_e("%s: clock %s not found\n", __func__, clk_name);
  1089. return -EINVAL;
  1090. }
  1091. return rc;
  1092. }
  1093. static int __prepare_enable_clock(struct msm_vidc_core *core,
  1094. const char *clk_name)
  1095. {
  1096. int rc = 0;
  1097. struct clock_info *cl;
  1098. bool found;
  1099. u64 rate = 0;
  1100. found = false;
  1101. venus_hfi_for_each_clock(core, cl) {
  1102. if (!cl->clk) {
  1103. d_vpr_e("%s: invalid clock\n", __func__);
  1104. return -EINVAL;
  1105. }
  1106. if (strcmp(cl->name, clk_name))
  1107. continue;
  1108. found = true;
  1109. /*
  1110. * For the clocks we control, set the rate prior to preparing
  1111. * them. Since we don't really have a load at this point, scale
  1112. * it to the lowest frequency possible
  1113. */
  1114. if (cl->has_scaling) {
  1115. /* reset clk residency stats */
  1116. reset_residency_stats(core, cl);
  1117. rate = clk_round_rate(cl->clk, 0);
  1118. /**
  1119. * source clock is already multipled with scaling ratio and __set_clk_rate
  1120. * attempts to multiply again. So divide scaling ratio before calling
  1121. * __set_clk_rate.
  1122. */
  1123. rate = rate / MSM_VIDC_CLOCK_SOURCE_SCALING_RATIO;
  1124. __set_clk_rate(core, cl, rate);
  1125. }
  1126. rc = clk_prepare_enable(cl->clk);
  1127. if (rc) {
  1128. d_vpr_e("%s: failed to enable clock %s\n",
  1129. __func__, cl->name);
  1130. return rc;
  1131. }
  1132. if (!__clk_is_enabled(cl->clk)) {
  1133. d_vpr_e("%s: clock %s not enabled\n",
  1134. __func__, cl->name);
  1135. clk_disable_unprepare(cl->clk);
  1136. if (cl->has_scaling)
  1137. __set_clk_rate(core, cl, 0);
  1138. return -EINVAL;
  1139. }
  1140. d_vpr_h("%s: clock %s prepare enabled\n", __func__, cl->name);
  1141. break;
  1142. }
  1143. if (!found) {
  1144. d_vpr_e("%s: clock %s not found\n", __func__, clk_name);
  1145. return -EINVAL;
  1146. }
  1147. return rc;
  1148. }
  1149. static int __init_resources(struct msm_vidc_core *core)
  1150. {
  1151. int rc = 0;
  1152. rc = __init_register_base(core);
  1153. if (rc)
  1154. return rc;
  1155. rc = __init_irq(core);
  1156. if (rc)
  1157. return rc;
  1158. rc = __init_bus(core);
  1159. if (rc)
  1160. return rc;
  1161. rc = call_res_op(core, gdsc_init, core);
  1162. if (rc)
  1163. return rc;
  1164. rc = __init_clocks(core);
  1165. if (rc)
  1166. return rc;
  1167. rc = __init_reset_clocks(core);
  1168. if (rc)
  1169. return rc;
  1170. rc = __init_subcaches(core);
  1171. if (rc)
  1172. return rc;
  1173. rc = __init_freq_table(core);
  1174. if (rc)
  1175. return rc;
  1176. rc = __init_context_banks(core);
  1177. if (rc)
  1178. return rc;
  1179. rc = __init_device_region(core);
  1180. if (rc)
  1181. return rc;
  1182. rc = __register_mmrm(core);
  1183. if (rc)
  1184. return rc;
  1185. return rc;
  1186. }
  1187. static int __reset_control_acquire_name(struct msm_vidc_core *core,
  1188. const char *name)
  1189. {
  1190. struct reset_info *rcinfo = NULL;
  1191. int rc = 0;
  1192. bool found = false;
  1193. venus_hfi_for_each_reset_clock(core, rcinfo) {
  1194. if (strcmp(rcinfo->name, name))
  1195. continue;
  1196. /* this function is valid only for exclusive_release reset clocks*/
  1197. if (!rcinfo->exclusive_release) {
  1198. d_vpr_e("%s: unsupported reset control (%s), exclusive %d\n",
  1199. __func__, name, rcinfo->exclusive_release);
  1200. return -EINVAL;
  1201. }
  1202. found = true;
  1203. /* reset_control_acquire is exposed in kernel version 6 */
  1204. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0))
  1205. rc = reset_control_acquire(rcinfo->rst);
  1206. #else
  1207. rc = -EINVAL;
  1208. #endif
  1209. if (rc)
  1210. d_vpr_e("%s: failed to acquire reset control (%s), rc = %d\n",
  1211. __func__, rcinfo->name, rc);
  1212. else
  1213. d_vpr_h("%s: acquire reset control (%s)\n",
  1214. __func__, rcinfo->name);
  1215. break;
  1216. }
  1217. if (!found) {
  1218. d_vpr_e("%s: reset control (%s) not found\n", __func__, name);
  1219. rc = -EINVAL;
  1220. }
  1221. return rc;
  1222. }
  1223. static int __reset_control_release_name(struct msm_vidc_core *core,
  1224. const char *name)
  1225. {
  1226. struct reset_info *rcinfo = NULL;
  1227. int rc = 0;
  1228. bool found = false;
  1229. venus_hfi_for_each_reset_clock(core, rcinfo) {
  1230. if (strcmp(rcinfo->name, name))
  1231. continue;
  1232. /* this function is valid only for exclusive_release reset clocks*/
  1233. if (!rcinfo->exclusive_release) {
  1234. d_vpr_e("%s: unsupported reset control (%s), exclusive %d\n",
  1235. __func__, name, rcinfo->exclusive_release);
  1236. return -EINVAL;
  1237. }
  1238. found = true;
  1239. /* reset_control_release exposed in kernel version 6 */
  1240. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 0, 0))
  1241. reset_control_release(rcinfo->rst);
  1242. #else
  1243. rc = -EINVAL;
  1244. #endif
  1245. if (rc)
  1246. d_vpr_e("%s: release reset control (%s) failed\n",
  1247. __func__, rcinfo->name);
  1248. else
  1249. d_vpr_h("%s: release reset control (%s) done\n",
  1250. __func__, rcinfo->name);
  1251. break;
  1252. }
  1253. if (!found) {
  1254. d_vpr_e("%s: reset control (%s) not found\n", __func__, name);
  1255. rc = -EINVAL;
  1256. }
  1257. return rc;
  1258. }
  1259. static int __reset_control_assert_name(struct msm_vidc_core *core,
  1260. const char *name)
  1261. {
  1262. struct reset_info *rcinfo = NULL;
  1263. int rc = 0;
  1264. bool found = false;
  1265. venus_hfi_for_each_reset_clock(core, rcinfo) {
  1266. if (strcmp(rcinfo->name, name))
  1267. continue;
  1268. found = true;
  1269. rc = reset_control_assert(rcinfo->rst);
  1270. if (rc)
  1271. d_vpr_e("%s: failed to assert reset control (%s), rc = %d\n",
  1272. __func__, rcinfo->name, rc);
  1273. else
  1274. d_vpr_h("%s: assert reset control (%s)\n",
  1275. __func__, rcinfo->name);
  1276. break;
  1277. }
  1278. if (!found) {
  1279. d_vpr_e("%s: reset control (%s) not found\n", __func__, name);
  1280. rc = -EINVAL;
  1281. }
  1282. return rc;
  1283. }
  1284. static int __reset_control_deassert_name(struct msm_vidc_core *core,
  1285. const char *name)
  1286. {
  1287. struct reset_info *rcinfo = NULL;
  1288. int rc = 0;
  1289. bool found = false;
  1290. venus_hfi_for_each_reset_clock(core, rcinfo) {
  1291. if (strcmp(rcinfo->name, name))
  1292. continue;
  1293. found = true;
  1294. rc = reset_control_deassert(rcinfo->rst);
  1295. if (rc)
  1296. d_vpr_e("%s: deassert reset control for (%s) failed, rc %d\n",
  1297. __func__, rcinfo->name, rc);
  1298. else
  1299. d_vpr_h("%s: deassert reset control (%s)\n",
  1300. __func__, rcinfo->name);
  1301. break;
  1302. }
  1303. if (!found) {
  1304. d_vpr_e("%s: reset control (%s) not found\n", __func__, name);
  1305. rc = -EINVAL;
  1306. }
  1307. return rc;
  1308. }
  1309. static int __reset_control_deassert(struct msm_vidc_core *core)
  1310. {
  1311. struct reset_info *rcinfo = NULL;
  1312. int rc = 0;
  1313. venus_hfi_for_each_reset_clock(core, rcinfo) {
  1314. rc = reset_control_deassert(rcinfo->rst);
  1315. if (rc) {
  1316. d_vpr_e("%s: deassert reset control failed. rc = %d\n", __func__, rc);
  1317. continue;
  1318. }
  1319. d_vpr_h("%s: deassert reset control %s\n", __func__, rcinfo->name);
  1320. }
  1321. return rc;
  1322. }
  1323. static int __reset_control_assert(struct msm_vidc_core *core)
  1324. {
  1325. struct reset_info *rcinfo = NULL;
  1326. int rc = 0, cnt = 0;
  1327. venus_hfi_for_each_reset_clock(core, rcinfo) {
  1328. if (!rcinfo->rst) {
  1329. d_vpr_e("%s: invalid reset clock %s\n",
  1330. __func__, rcinfo->name);
  1331. return -EINVAL;
  1332. }
  1333. rc = reset_control_assert(rcinfo->rst);
  1334. if (rc) {
  1335. d_vpr_e("%s: failed to assert reset control %s, rc = %d\n",
  1336. __func__, rcinfo->name, rc);
  1337. goto deassert_reset_control;
  1338. }
  1339. cnt++;
  1340. d_vpr_h("%s: assert reset control %s, count %d\n", __func__, rcinfo->name, cnt);
  1341. usleep_range(1000, 1100);
  1342. }
  1343. return rc;
  1344. deassert_reset_control:
  1345. venus_hfi_for_each_reset_clock_reverse_continue(core, rcinfo, cnt) {
  1346. d_vpr_e("%s: deassert reset control %s\n", __func__, rcinfo->name);
  1347. reset_control_deassert(rcinfo->rst);
  1348. }
  1349. return rc;
  1350. }
  1351. static int __reset_ahb2axi_bridge(struct msm_vidc_core *core)
  1352. {
  1353. int rc = 0;
  1354. rc = __reset_control_assert(core);
  1355. if (rc)
  1356. return rc;
  1357. rc = __reset_control_deassert(core);
  1358. if (rc)
  1359. return rc;
  1360. return rc;
  1361. }
  1362. static int __print_clock_residency_stats(struct msm_vidc_core *core)
  1363. {
  1364. struct clock_info *cl;
  1365. int rc = 0;
  1366. venus_hfi_for_each_clock(core, cl) {
  1367. /* skip if scaling not supported */
  1368. if (!cl->has_scaling)
  1369. continue;
  1370. /*
  1371. * residency for the last clk corner entry will be updated in stats
  1372. * only if we call update residency with rate 0
  1373. */
  1374. __update_residency_stats(core, cl, 0);
  1375. /* print clock residency stats */
  1376. print_residency_stats(core, cl);
  1377. }
  1378. return rc;
  1379. }
  1380. static int __reset_clock_residency_stats(struct msm_vidc_core *core)
  1381. {
  1382. struct clock_info *cl;
  1383. int rc = 0;
  1384. venus_hfi_for_each_clock(core, cl) {
  1385. /* skip if scaling not supported */
  1386. if (!cl->has_scaling)
  1387. continue;
  1388. /* reset clock residency stats */
  1389. reset_residency_stats(core, cl);
  1390. }
  1391. return rc;
  1392. }
  1393. static const struct msm_vidc_resources_ops res_ops = {
  1394. .init = __init_resources,
  1395. .reset_bridge = __reset_ahb2axi_bridge,
  1396. .reset_control_acquire = __reset_control_acquire_name,
  1397. .reset_control_release = __reset_control_release_name,
  1398. .reset_control_assert = __reset_control_assert_name,
  1399. .reset_control_deassert = __reset_control_deassert_name,
  1400. .gdsc_init = __init_power_domains,
  1401. .gdsc_on = __enable_power_domains,
  1402. .gdsc_off = __disable_power_domains,
  1403. .gdsc_hw_ctrl = __hand_off_power_domains,
  1404. .gdsc_sw_ctrl = __acquire_power_domains,
  1405. .llcc = llcc_enable,
  1406. .set_bw = set_bw,
  1407. .set_clks = __set_clocks,
  1408. .clk_enable = __prepare_enable_clock,
  1409. .clk_disable = __disable_unprepare_clock,
  1410. .clk_print_residency_stats = __print_clock_residency_stats,
  1411. .clk_reset_residency_stats = __reset_clock_residency_stats,
  1412. .clk_update_residency_stats = __update_residency_stats,
  1413. };
  1414. const struct msm_vidc_resources_ops *get_resources_ops(void)
  1415. {
  1416. return &res_ops;
  1417. }