venus_hfi.c 77 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/clk.h>
  6. #include <linux/regulator/consumer.h>
  7. #include <linux/clk-provider.h>
  8. #include <linux/iommu.h>
  9. #include <linux/qcom_scm.h>
  10. #include <linux/soc/qcom/smem.h>
  11. #include <linux/irqreturn.h>
  12. #include <linux/reset.h>
  13. #include <linux/interconnect.h>
  14. #include <soc/qcom/subsystem_restart.h>
  15. #include <linux/of_address.h>
  16. #include <linux/firmware.h>
  17. #include <linux/qcom_scm.h>
  18. #include <linux/soc/qcom/mdt_loader.h>
  19. #include "venus_hfi.h"
  20. #include "msm_vidc_core.h"
  21. #include "msm_vidc_power.h"
  22. #include "msm_vidc_dt.h"
  23. #include "msm_vidc_platform.h"
  24. #include "msm_vidc_memory.h"
  25. #include "msm_vidc_driver.h"
  26. #include "msm_vidc_debug.h"
  27. #include "hfi_packet.h"
  28. #include "venus_hfi_response.h"
  29. #define MIN_PAYLOAD_SIZE 3
  30. #define MAX_FIRMWARE_NAME_SIZE 128
  31. #define update_offset(offset, val) ((offset) += (val))
  32. #define update_timestamp(ts, val) \
  33. do { \
  34. do_div((ts), NSEC_PER_USEC); \
  35. (ts) += (val); \
  36. (ts) *= NSEC_PER_USEC; \
  37. } while (0)
  38. extern struct msm_vidc_core *g_core;
  39. static int __resume(struct msm_vidc_core *core);
  40. static int __suspend(struct msm_vidc_core *core);
  41. struct tzbsp_memprot {
  42. u32 cp_start;
  43. u32 cp_size;
  44. u32 cp_nonpixel_start;
  45. u32 cp_nonpixel_size;
  46. };
  47. enum tzbsp_video_state {
  48. TZBSP_VIDEO_STATE_SUSPEND = 0,
  49. TZBSP_VIDEO_STATE_RESUME = 1,
  50. TZBSP_VIDEO_STATE_RESTORE_THRESHOLD = 2,
  51. };
  52. enum reset_state {
  53. INIT = 1,
  54. ASSERT,
  55. DEASSERT,
  56. };
  57. /* Less than 50MBps is treated as trivial BW change */
  58. #define TRIVIAL_BW_THRESHOLD 50000
  59. #define TRIVIAL_BW_CHANGE(a, b) \
  60. ((a) > (b) ? (a) - (b) < TRIVIAL_BW_THRESHOLD : \
  61. (b) - (a) < TRIVIAL_BW_THRESHOLD)
  62. /**
  63. * Utility function to enforce some of our assumptions. Spam calls to this
  64. * in hotspots in code to double check some of the assumptions that we hold.
  65. */
  66. struct lut const *__lut(int width, int height, int fps)
  67. {
  68. int frame_size = height * width, c = 0;
  69. do {
  70. if (LUT[c].frame_size >= frame_size && LUT[c].frame_rate >= fps)
  71. return &LUT[c];
  72. } while (++c < ARRAY_SIZE(LUT));
  73. return &LUT[ARRAY_SIZE(LUT) - 1];
  74. }
  75. fp_t __compression_ratio(struct lut const *entry, int bpp)
  76. {
  77. int c = 0;
  78. for (c = 0; c < COMPRESSION_RATIO_MAX; ++c) {
  79. if (entry->compression_ratio[c].bpp == bpp)
  80. return entry->compression_ratio[c].ratio;
  81. }
  82. WARN(true, "Shouldn't be here, LUT possibly corrupted?\n");
  83. return FP_ZERO; /* impossible */
  84. }
  85. void __dump(struct dump dump[], int len)
  86. {
  87. int c = 0;
  88. for (c = 0; c < len; ++c) {
  89. char format_line[128] = "", formatted_line[128] = "";
  90. if (dump[c].val == DUMP_HEADER_MAGIC) {
  91. snprintf(formatted_line, sizeof(formatted_line), "%s\n",
  92. dump[c].key);
  93. } else {
  94. bool fp_format = !strcmp(dump[c].format, DUMP_FP_FMT);
  95. if (!fp_format) {
  96. snprintf(format_line, sizeof(format_line),
  97. " %-35s: %s\n", dump[c].key,
  98. dump[c].format);
  99. snprintf(formatted_line, sizeof(formatted_line),
  100. format_line, dump[c].val);
  101. } else {
  102. size_t integer_part, fractional_part;
  103. integer_part = fp_int(dump[c].val);
  104. fractional_part = fp_frac(dump[c].val);
  105. snprintf(formatted_line, sizeof(formatted_line),
  106. " %-35s: %zd + %zd/%zd\n",
  107. dump[c].key, integer_part,
  108. fractional_part,
  109. fp_frac_base());
  110. }
  111. }
  112. d_vpr_b("%s", formatted_line);
  113. }
  114. }
  115. static void __dump_packet(u8 *packet, const char *function, void *qinfo)
  116. {
  117. u32 c = 0, session_id, packet_size = *(u32 *)packet;
  118. const int row_size = 32;
  119. struct msm_vidc_inst *inst = NULL;
  120. bool matches = false;
  121. /*
  122. * row must contain enough for 0xdeadbaad * 8 to be converted into
  123. * "de ad ba ab " * 8 + '\0'
  124. */
  125. char row[3 * 32];
  126. session_id = *((u32 *)packet + 1);
  127. list_for_each_entry(inst, &g_core->instances, list) {
  128. if (inst->session_id == session_id) {
  129. matches = true;
  130. break;
  131. }
  132. }
  133. if (matches)
  134. i_vpr_t(inst, "%s: %pK\n", function, qinfo);
  135. else
  136. d_vpr_t("%s: %pK\n", function, qinfo);
  137. for (c = 0; c * row_size < packet_size; ++c) {
  138. int bytes_to_read = ((c + 1) * row_size > packet_size) ?
  139. packet_size % row_size : row_size;
  140. hex_dump_to_buffer(packet + c * row_size, bytes_to_read,
  141. row_size, 4, row, sizeof(row), false);
  142. if (matches)
  143. i_vpr_t(inst, "%s\n", row);
  144. else
  145. d_vpr_t("%s\n", row);
  146. }
  147. }
  148. static void __fatal_error(struct msm_vidc_core *core, bool fatal)
  149. {
  150. return;
  151. fatal &= core->capabilities[HW_RESPONSE_TIMEOUT].value;
  152. MSM_VIDC_ERROR(fatal);
  153. }
  154. static void __strict_check(struct msm_vidc_core *core)
  155. {
  156. __fatal_error(core, !mutex_is_locked(&core->lock));
  157. }
  158. bool __core_in_valid_state(struct msm_vidc_core *core)
  159. {
  160. return core->state == MSM_VIDC_CORE_INIT;
  161. }
  162. bool is_sys_cache_present(struct msm_vidc_core *core)
  163. {
  164. return core->dt->sys_cache_present;
  165. }
  166. static bool __valdiate_session(struct msm_vidc_core *core,
  167. struct msm_vidc_inst *inst, const char *func)
  168. {
  169. bool valid = false;
  170. struct msm_vidc_inst *temp;
  171. if (!core || !inst)
  172. return false;
  173. __strict_check(core);
  174. list_for_each_entry(temp, &core->instances, list) {
  175. if (temp == inst) {
  176. valid = true;
  177. break;
  178. }
  179. }
  180. if (!valid)
  181. i_vpr_e(inst, "%s: invalid session\n", func);
  182. return valid;
  183. }
  184. void __write_register(struct msm_vidc_core *core,
  185. u32 reg, u32 value)
  186. {
  187. u32 hwiosymaddr = reg;
  188. u8 *base_addr;
  189. if (!core) {
  190. d_vpr_e("%s: invalid params\n", __func__);
  191. return;
  192. }
  193. __strict_check(core);
  194. if (!core->power_enabled) {
  195. d_vpr_e("HFI Write register failed : Power is OFF\n");
  196. __fatal_error(core, true);
  197. return;
  198. }
  199. base_addr = core->register_base_addr;
  200. d_vpr_l("Base addr: %pK, writing to: %#x, Value: %#x...\n",
  201. base_addr, hwiosymaddr, value);
  202. base_addr += hwiosymaddr;
  203. writel_relaxed(value, base_addr);
  204. /*
  205. * Memory barrier to make sure value is written into the register.
  206. */
  207. wmb();
  208. }
  209. /*
  210. * Argument mask is used to specify which bits to update. In case mask is 0x11,
  211. * only bits 0 & 4 will be updated with corresponding bits from value. To update
  212. * entire register with value, set mask = 0xFFFFFFFF.
  213. */
  214. void __write_register_masked(struct msm_vidc_core *core,
  215. u32 reg, u32 value, u32 mask)
  216. {
  217. u32 prev_val, new_val;
  218. u8 *base_addr;
  219. if (!core) {
  220. d_vpr_e("%s: invalid params\n", __func__);
  221. return;
  222. }
  223. __strict_check(core);
  224. if (!core->power_enabled) {
  225. d_vpr_e("%s: register write failed, power is off\n",
  226. __func__);
  227. __fatal_error(core, true);
  228. return;
  229. }
  230. base_addr = core->register_base_addr;
  231. base_addr += reg;
  232. prev_val = readl_relaxed(base_addr);
  233. /*
  234. * Memory barrier to ensure register read is correct
  235. */
  236. rmb();
  237. new_val = (prev_val & ~mask) | (value & mask);
  238. d_vpr_l(
  239. "Base addr: %pK, writing to: %#x, previous-value: %#x, value: %#x, mask: %#x, new-value: %#x...\n",
  240. base_addr, reg, prev_val, value, mask, new_val);
  241. writel_relaxed(new_val, base_addr);
  242. /*
  243. * Memory barrier to make sure value is written into the register.
  244. */
  245. wmb();
  246. }
  247. int __read_register(struct msm_vidc_core *core, u32 reg)
  248. {
  249. int rc = 0;
  250. u8 *base_addr;
  251. if (!core) {
  252. d_vpr_e("%s: invalid params\n", __func__);
  253. return -EINVAL;
  254. }
  255. __strict_check(core);
  256. if (!core->power_enabled) {
  257. d_vpr_e("HFI Read register failed : Power is OFF\n");
  258. __fatal_error(core, true);
  259. return -EINVAL;
  260. }
  261. base_addr = core->register_base_addr;
  262. rc = readl_relaxed(base_addr + reg);
  263. /*
  264. * Memory barrier to make sure value is read correctly from the
  265. * register.
  266. */
  267. rmb();
  268. d_vpr_l("Base addr: %pK, read from: %#x, value: %#x...\n",
  269. base_addr, reg, rc);
  270. return rc;
  271. }
  272. static void __schedule_power_collapse_work(struct msm_vidc_core *core)
  273. {
  274. if (!core || !core->capabilities) {
  275. d_vpr_e("%s: invalid params\n", __func__);
  276. return;
  277. }
  278. if (!core->capabilities[SW_PC].value) {
  279. d_vpr_l("software power collapse not enabled\n");
  280. return;
  281. }
  282. cancel_delayed_work(&core->pm_work);
  283. if (!queue_delayed_work(core->pm_workq,
  284. &core->pm_work, msecs_to_jiffies(
  285. core->capabilities[SW_PC_DELAY].value))) {
  286. d_vpr_e("power collapse already scheduled\n");
  287. } else {
  288. d_vpr_l("power collapse scheduled for %d ms\n",
  289. core->capabilities[SW_PC_DELAY].value);
  290. }
  291. }
  292. static void __cancel_power_collapse_work(struct msm_vidc_core *core)
  293. {
  294. if (!core || !core->capabilities) {
  295. d_vpr_e("%s: invalid params\n", __func__);
  296. return;
  297. }
  298. if (!core->capabilities[SW_PC].value)
  299. return;
  300. cancel_delayed_work(&core->pm_work);
  301. }
  302. static int __acquire_regulator(struct msm_vidc_core *core,
  303. struct regulator_info *rinfo)
  304. {
  305. int rc = 0;
  306. if (rinfo->has_hw_power_collapse) {
  307. if (!rinfo->regulator) {
  308. d_vpr_e("%s: invalid regulator\n", __func__);
  309. rc = -EINVAL;
  310. goto exit;
  311. }
  312. if (regulator_get_mode(rinfo->regulator) ==
  313. REGULATOR_MODE_NORMAL) {
  314. d_vpr_h("Skip acquire regulator %s\n", rinfo->name);
  315. goto exit;
  316. }
  317. rc = regulator_set_mode(rinfo->regulator,
  318. REGULATOR_MODE_NORMAL);
  319. if (rc) {
  320. /*
  321. * This is somewhat fatal, but nothing we can do
  322. * about it. We can't disable the regulator w/o
  323. * getting it back under s/w control
  324. */
  325. d_vpr_e("Failed to acquire regulator control: %s\n",
  326. rinfo->name);
  327. goto exit;
  328. } else {
  329. d_vpr_h("Acquired regulator control from HW: %s\n",
  330. rinfo->name);
  331. }
  332. if (!regulator_is_enabled(rinfo->regulator)) {
  333. d_vpr_e("%s: Regulator is not enabled %s\n",
  334. __func__, rinfo->name);
  335. __fatal_error(core, true);
  336. }
  337. }
  338. exit:
  339. return rc;
  340. }
  341. static int __hand_off_regulator(struct msm_vidc_core *core,
  342. struct regulator_info *rinfo)
  343. {
  344. int rc = 0;
  345. if (rinfo->has_hw_power_collapse) {
  346. if (!rinfo->regulator) {
  347. d_vpr_e("%s: invalid regulator\n", __func__);
  348. return -EINVAL;
  349. }
  350. rc = regulator_set_mode(rinfo->regulator,
  351. REGULATOR_MODE_FAST);
  352. if (rc) {
  353. core->handoff_done = 0;
  354. d_vpr_e("Failed to hand off regulator control: %s\n",
  355. rinfo->name);
  356. return rc;
  357. } else {
  358. core->handoff_done = 1;
  359. d_vpr_h("Hand off regulator control to HW: %s\n",
  360. rinfo->name);
  361. }
  362. if (!regulator_is_enabled(rinfo->regulator)) {
  363. d_vpr_e("%s: Regulator is not enabled %s\n",
  364. __func__, rinfo->name);
  365. __fatal_error(core, true);
  366. }
  367. }
  368. return rc;
  369. }
  370. static int __hand_off_regulators(struct msm_vidc_core *core)
  371. {
  372. struct regulator_info *rinfo;
  373. int rc = 0, c = 0;
  374. venus_hfi_for_each_regulator(core, rinfo) {
  375. rc = __hand_off_regulator(core, rinfo);
  376. /*
  377. * If one regulator hand off failed, driver should take
  378. * the control for other regulators back.
  379. */
  380. if (rc)
  381. goto err_reg_handoff_failed;
  382. c++;
  383. }
  384. return rc;
  385. err_reg_handoff_failed:
  386. venus_hfi_for_each_regulator_reverse_continue(core, rinfo, c)
  387. __acquire_regulator(core, rinfo);
  388. return rc;
  389. }
  390. static void __set_registers(struct msm_vidc_core *core)
  391. {
  392. struct reg_set *reg_set;
  393. int i;
  394. if (!core || !core->dt) {
  395. d_vpr_e("core resources null, cannot set registers\n");
  396. return;
  397. }
  398. reg_set = &core->dt->reg_set;
  399. for (i = 0; i < reg_set->count; i++) {
  400. __write_register_masked(core, reg_set->reg_tbl[i].reg,
  401. reg_set->reg_tbl[i].value,
  402. reg_set->reg_tbl[i].mask);
  403. }
  404. }
  405. static int __vote_bandwidth(struct bus_info *bus,
  406. unsigned long bw_kbps)
  407. {
  408. int rc = 0;
  409. if (!bus->path) {
  410. d_vpr_e("%s: invalid bus\n", __func__);
  411. return -EINVAL;
  412. }
  413. d_vpr_p("Voting bus %s to ab %llu kBps\n", bus->name, bw_kbps);
  414. rc = icc_set_bw(bus->path, bw_kbps, 0);
  415. if (rc)
  416. d_vpr_e("Failed voting bus %s to ab %llu, rc=%d\n",
  417. bus->name, bw_kbps, rc);
  418. return rc;
  419. }
  420. int __unvote_buses(struct msm_vidc_core *core)
  421. {
  422. int rc = 0;
  423. struct bus_info *bus = NULL;
  424. core->power.bw_ddr = 0;
  425. core->power.bw_llcc = 0;
  426. venus_hfi_for_each_bus(core, bus) {
  427. rc = __vote_bandwidth(bus, 0);
  428. if (rc)
  429. goto err_unknown_device;
  430. }
  431. err_unknown_device:
  432. return rc;
  433. }
  434. static int __vote_buses(struct msm_vidc_core *core,
  435. unsigned long bw_ddr, unsigned long bw_llcc)
  436. {
  437. int rc = 0;
  438. struct bus_info *bus = NULL;
  439. unsigned long bw_kbps = 0, bw_prev = 0;
  440. enum vidc_bus_type type;
  441. venus_hfi_for_each_bus(core, bus) {
  442. if (bus && bus->path) {
  443. type = get_type_frm_name(bus->name);
  444. if (type == DDR) {
  445. bw_kbps = bw_ddr;
  446. bw_prev = core->power.bw_ddr;
  447. } else if (type == LLCC) {
  448. bw_kbps = bw_llcc;
  449. bw_prev = core->power.bw_llcc;
  450. } else {
  451. bw_kbps = bus->range[1];
  452. bw_prev = core->power.bw_ddr ?
  453. bw_kbps : 0;
  454. }
  455. /* ensure freq is within limits */
  456. bw_kbps = clamp_t(typeof(bw_kbps), bw_kbps,
  457. bus->range[0], bus->range[1]);
  458. if (TRIVIAL_BW_CHANGE(bw_kbps, bw_prev) && bw_prev) {
  459. d_vpr_l("Skip voting bus %s to %llu kBps\n",
  460. bus->name, bw_kbps);
  461. continue;
  462. }
  463. rc = __vote_bandwidth(bus, bw_kbps);
  464. if (type == DDR)
  465. core->power.bw_ddr = bw_kbps;
  466. else if (type == LLCC)
  467. core->power.bw_llcc = bw_kbps;
  468. } else {
  469. d_vpr_e("No BUS to Vote\n");
  470. }
  471. }
  472. return rc;
  473. }
  474. static int __tzbsp_set_video_state(enum tzbsp_video_state state)
  475. {
  476. int tzbsp_rsp = qcom_scm_set_remote_state(state, 0);
  477. d_vpr_l("Set state %d, resp %d\n", state, tzbsp_rsp);
  478. if (tzbsp_rsp) {
  479. d_vpr_e("Failed to set video core state to suspend: %d\n",
  480. tzbsp_rsp);
  481. return -EINVAL;
  482. }
  483. return 0;
  484. }
  485. static int __set_clk_rate(struct msm_vidc_core *core,
  486. struct clock_info *cl, u64 rate)
  487. {
  488. int rc = 0;
  489. // struct clk *clk = cl->clk;
  490. struct mmrm_client_data client_data;
  491. struct mmrm_client *client = cl->mmrm_client;
  492. /* not registered */
  493. if (!client)
  494. return -EINVAL;
  495. /* bail early if requested clk rate is not changed */
  496. if (rate == cl->prev)
  497. return 0;
  498. d_vpr_p("Scaling clock %s to %llu, prev %llu\n", cl->name, rate, cl->prev);
  499. /* TODO: Set num_hw_blocks based on encoder or decoder */
  500. memset(&client_data, 0, sizeof(client_data));
  501. rc = mmrm_client_set_value(client, &client_data, rate);
  502. if (rc) {
  503. d_vpr_e("%s: Failed to set clock rate %llu %s: %d\n",
  504. __func__, rate, cl->name, rc);
  505. return rc;
  506. }
  507. cl->prev = rate;
  508. return rc;
  509. }
  510. static int __set_clocks(struct msm_vidc_core *core, u32 freq)
  511. {
  512. int rc = 0;
  513. struct clock_info *cl;
  514. venus_hfi_for_each_clock(core, cl) {
  515. if (cl->has_scaling) {/* has_scaling */
  516. rc = __set_clk_rate(core, cl, freq);
  517. if (rc)
  518. return rc;
  519. }
  520. }
  521. return 0;
  522. }
  523. static int __scale_clocks(struct msm_vidc_core *core)
  524. {
  525. int rc = 0;
  526. struct allowed_clock_rates_table *allowed_clks_tbl;
  527. u32 freq = 0;
  528. allowed_clks_tbl = core->dt->allowed_clks_tbl;
  529. freq = core->power.clk_freq ? core->power.clk_freq :
  530. allowed_clks_tbl[0].clock_rate;
  531. rc = __set_clocks(core, freq);
  532. if (rc)
  533. return rc;
  534. core->power.clk_freq = freq;
  535. return 0;
  536. }
  537. static int __write_queue(struct msm_vidc_iface_q_info *qinfo, u8 *packet,
  538. bool *rx_req_is_set)
  539. {
  540. struct hfi_queue_header *queue;
  541. u32 packet_size_in_words, new_write_idx;
  542. u32 empty_space, read_idx, write_idx;
  543. u32 *write_ptr;
  544. if (!qinfo || !packet) {
  545. d_vpr_e("%s: invalid params %pK %pK\n",
  546. __func__, qinfo, packet);
  547. return -EINVAL;
  548. } else if (!qinfo->q_array.align_virtual_addr) {
  549. d_vpr_e("Queues have already been freed\n");
  550. return -EINVAL;
  551. }
  552. queue = (struct hfi_queue_header *) qinfo->q_hdr;
  553. if (!queue) {
  554. d_vpr_e("queue not present\n");
  555. return -ENOENT;
  556. }
  557. if (msm_vidc_debug & VIDC_PKT)
  558. __dump_packet(packet, __func__, qinfo);
  559. // TODO: handle writing packet
  560. //d_vpr_e("skip writing packet\n");
  561. //return 0;
  562. packet_size_in_words = (*(u32 *)packet) >> 2;
  563. if (!packet_size_in_words || packet_size_in_words >
  564. qinfo->q_array.mem_size>>2) {
  565. d_vpr_e("Invalid packet size\n");
  566. return -ENODATA;
  567. }
  568. read_idx = queue->qhdr_read_idx;
  569. write_idx = queue->qhdr_write_idx;
  570. empty_space = (write_idx >= read_idx) ?
  571. ((qinfo->q_array.mem_size>>2) - (write_idx - read_idx)) :
  572. (read_idx - write_idx);
  573. if (empty_space <= packet_size_in_words) {
  574. queue->qhdr_tx_req = 1;
  575. d_vpr_e("Insufficient size (%d) to write (%d)\n",
  576. empty_space, packet_size_in_words);
  577. return -ENOTEMPTY;
  578. }
  579. queue->qhdr_tx_req = 0;
  580. new_write_idx = write_idx + packet_size_in_words;
  581. write_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
  582. (write_idx << 2));
  583. if (write_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
  584. write_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
  585. qinfo->q_array.mem_size)) {
  586. d_vpr_e("Invalid write index\n");
  587. return -ENODATA;
  588. }
  589. if (new_write_idx < (qinfo->q_array.mem_size >> 2)) {
  590. memcpy(write_ptr, packet, packet_size_in_words << 2);
  591. } else {
  592. new_write_idx -= qinfo->q_array.mem_size >> 2;
  593. memcpy(write_ptr, packet, (packet_size_in_words -
  594. new_write_idx) << 2);
  595. memcpy((void *)qinfo->q_array.align_virtual_addr,
  596. packet + ((packet_size_in_words - new_write_idx) << 2),
  597. new_write_idx << 2);
  598. }
  599. /*
  600. * Memory barrier to make sure packet is written before updating the
  601. * write index
  602. */
  603. mb();
  604. queue->qhdr_write_idx = new_write_idx;
  605. if (rx_req_is_set)
  606. *rx_req_is_set = queue->qhdr_rx_req == 1;
  607. /*
  608. * Memory barrier to make sure write index is updated before an
  609. * interrupt is raised on venus.
  610. */
  611. mb();
  612. return 0;
  613. }
  614. static int __read_queue(struct msm_vidc_iface_q_info *qinfo, u8 *packet,
  615. u32 *pb_tx_req_is_set)
  616. {
  617. struct hfi_queue_header *queue;
  618. u32 packet_size_in_words, new_read_idx;
  619. u32 *read_ptr;
  620. u32 receive_request = 0;
  621. u32 read_idx, write_idx;
  622. int rc = 0;
  623. if (!qinfo || !packet || !pb_tx_req_is_set) {
  624. d_vpr_e("%s: invalid params %pK %pK %pK\n",
  625. __func__, qinfo, packet, pb_tx_req_is_set);
  626. return -EINVAL;
  627. } else if (!qinfo->q_array.align_virtual_addr) {
  628. d_vpr_e("Queues have already been freed\n");
  629. return -EINVAL;
  630. }
  631. /*
  632. * Memory barrier to make sure data is valid before
  633. *reading it
  634. */
  635. mb();
  636. queue = (struct hfi_queue_header *) qinfo->q_hdr;
  637. if (!queue) {
  638. d_vpr_e("Queue memory is not allocated\n");
  639. return -ENOMEM;
  640. }
  641. /*
  642. * Do not set receive request for debug queue, if set,
  643. * Venus generates interrupt for debug messages even
  644. * when there is no response message available.
  645. * In general debug queue will not become full as it
  646. * is being emptied out for every interrupt from Venus.
  647. * Venus will anyway generates interrupt if it is full.
  648. */
  649. if (queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_MSG_Q)
  650. receive_request = 1;
  651. read_idx = queue->qhdr_read_idx;
  652. write_idx = queue->qhdr_write_idx;
  653. if (read_idx == write_idx) {
  654. queue->qhdr_rx_req = receive_request;
  655. /*
  656. * mb() to ensure qhdr is updated in main memory
  657. * so that venus reads the updated header values
  658. */
  659. mb();
  660. *pb_tx_req_is_set = 0;
  661. d_vpr_l(
  662. "%s queue is empty, rx_req = %u, tx_req = %u, read_idx = %u\n",
  663. receive_request ? "message" : "debug",
  664. queue->qhdr_rx_req, queue->qhdr_tx_req,
  665. queue->qhdr_read_idx);
  666. return -ENODATA;
  667. }
  668. read_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
  669. (read_idx << 2));
  670. if (read_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
  671. read_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
  672. qinfo->q_array.mem_size - sizeof(*read_ptr))) {
  673. d_vpr_e("Invalid read index\n");
  674. return -ENODATA;
  675. }
  676. packet_size_in_words = (*read_ptr) >> 2;
  677. if (!packet_size_in_words) {
  678. d_vpr_e("Zero packet size\n");
  679. return -ENODATA;
  680. }
  681. new_read_idx = read_idx + packet_size_in_words;
  682. if (((packet_size_in_words << 2) <= VIDC_IFACEQ_VAR_HUGE_PKT_SIZE) &&
  683. read_idx <= (qinfo->q_array.mem_size >> 2)) {
  684. if (new_read_idx < (qinfo->q_array.mem_size >> 2)) {
  685. memcpy(packet, read_ptr,
  686. packet_size_in_words << 2);
  687. } else {
  688. new_read_idx -= (qinfo->q_array.mem_size >> 2);
  689. memcpy(packet, read_ptr,
  690. (packet_size_in_words - new_read_idx) << 2);
  691. memcpy(packet + ((packet_size_in_words -
  692. new_read_idx) << 2),
  693. (u8 *)qinfo->q_array.align_virtual_addr,
  694. new_read_idx << 2);
  695. }
  696. } else {
  697. d_vpr_e("BAD packet received, read_idx: %#x, pkt_size: %d\n",
  698. read_idx, packet_size_in_words << 2);
  699. d_vpr_e("Dropping this packet\n");
  700. new_read_idx = write_idx;
  701. rc = -ENODATA;
  702. }
  703. if (new_read_idx != write_idx)
  704. queue->qhdr_rx_req = 0;
  705. else
  706. queue->qhdr_rx_req = receive_request;
  707. queue->qhdr_read_idx = new_read_idx;
  708. /*
  709. * mb() to ensure qhdr is updated in main memory
  710. * so that venus reads the updated header values
  711. */
  712. mb();
  713. *pb_tx_req_is_set = (queue->qhdr_tx_req == 1) ? 1 : 0;
  714. if ((msm_vidc_debug & VIDC_PKT) &&
  715. !(queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q)) {
  716. __dump_packet(packet, __func__, qinfo);
  717. }
  718. return rc;
  719. }
  720. /* Writes into cmdq without raising an interrupt */
  721. static int __iface_cmdq_write_relaxed(struct msm_vidc_core *core,
  722. void *pkt, bool *requires_interrupt)
  723. {
  724. struct msm_vidc_iface_q_info *q_info;
  725. //struct vidc_hal_cmd_pkt_hdr *cmd_packet;
  726. int result = -E2BIG;
  727. if (!core || !pkt) {
  728. d_vpr_e("%s: invalid params %pK %pK\n",
  729. __func__, core, pkt);
  730. return -EINVAL;
  731. }
  732. __strict_check(core);
  733. if (!__core_in_valid_state(core)) {
  734. d_vpr_e("%s: fw not in init state\n", __func__);
  735. result = -EINVAL;
  736. goto err_q_null;
  737. }
  738. //cmd_packet = (struct vidc_hal_cmd_pkt_hdr *)pkt;
  739. //core->last_packet_type = cmd_packet->packet_type;
  740. q_info = &core->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
  741. if (!q_info) {
  742. d_vpr_e("cannot write to shared Q's\n");
  743. goto err_q_null;
  744. }
  745. if (!q_info->q_array.align_virtual_addr) {
  746. d_vpr_e("cannot write to shared CMD Q's\n");
  747. result = -ENODATA;
  748. goto err_q_null;
  749. }
  750. if (__resume(core)) {
  751. d_vpr_e("%s: Power on failed\n", __func__);
  752. goto err_q_write;
  753. }
  754. if (!__write_queue(q_info, (u8 *)pkt, requires_interrupt)) {
  755. __schedule_power_collapse_work(core);
  756. result = 0;
  757. } else {
  758. d_vpr_e("__iface_cmdq_write: queue full\n");
  759. }
  760. err_q_write:
  761. err_q_null:
  762. return result;
  763. }
  764. int __iface_cmdq_write(struct msm_vidc_core *core,
  765. void *pkt)
  766. {
  767. bool needs_interrupt = false;
  768. int rc = __iface_cmdq_write_relaxed(core, pkt, &needs_interrupt);
  769. if (!rc && needs_interrupt)
  770. call_venus_op(core, raise_interrupt, core);
  771. return rc;
  772. }
  773. static int __iface_cmdq_write_intr(struct msm_vidc_core *core,
  774. void *pkt, bool allow)
  775. {
  776. bool needs_interrupt = false;
  777. int rc = __iface_cmdq_write_relaxed(core, pkt, &needs_interrupt);
  778. if (!rc && allow && needs_interrupt)
  779. call_venus_op(core, raise_interrupt, core);
  780. return rc;
  781. }
  782. int __iface_msgq_read(struct msm_vidc_core *core, void *pkt)
  783. {
  784. u32 tx_req_is_set = 0;
  785. int rc = 0;
  786. struct msm_vidc_iface_q_info *q_info;
  787. if (!pkt) {
  788. d_vpr_e("%s: invalid params\n", __func__);
  789. return -EINVAL;
  790. }
  791. __strict_check(core);
  792. if (!__core_in_valid_state(core)) {
  793. d_vpr_e("%s: fw not in init state\n", __func__);
  794. rc = -EINVAL;
  795. goto read_error_null;
  796. }
  797. q_info = &core->iface_queues[VIDC_IFACEQ_MSGQ_IDX];
  798. if (!q_info->q_array.align_virtual_addr) {
  799. d_vpr_e("cannot read from shared MSG Q's\n");
  800. rc = -ENODATA;
  801. goto read_error_null;
  802. }
  803. if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
  804. if (tx_req_is_set) {
  805. //call_venus_op(core, raise_interrupt, core);
  806. d_vpr_e("%s: queue is full\n", __func__);
  807. rc = -EINVAL;
  808. goto read_error_null;
  809. }
  810. rc = 0;
  811. } else {
  812. rc = -ENODATA;
  813. }
  814. read_error_null:
  815. return rc;
  816. }
  817. int __iface_dbgq_read(struct msm_vidc_core *core, void *pkt)
  818. {
  819. u32 tx_req_is_set = 0;
  820. int rc = 0;
  821. struct msm_vidc_iface_q_info *q_info;
  822. if (!pkt) {
  823. d_vpr_e("%s: invalid params\n", __func__);
  824. return -EINVAL;
  825. }
  826. __strict_check(core);
  827. q_info = &core->iface_queues[VIDC_IFACEQ_DBGQ_IDX];
  828. if (!q_info->q_array.align_virtual_addr) {
  829. d_vpr_e("cannot read from shared DBG Q's\n");
  830. rc = -ENODATA;
  831. goto dbg_error_null;
  832. }
  833. if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
  834. if (tx_req_is_set) {
  835. d_vpr_e("%s: queue is full\n", __func__);
  836. //call_venus_op(core, raise_interrupt, core);
  837. rc = -EINVAL;
  838. goto dbg_error_null;
  839. }
  840. rc = 0;
  841. } else {
  842. rc = -ENODATA;
  843. }
  844. dbg_error_null:
  845. return rc;
  846. }
  847. static void __flush_debug_queue(struct msm_vidc_core *core,
  848. u8 *packet, u32 packet_size)
  849. {
  850. u8 *log;
  851. struct hfi_debug_header *pkt;
  852. bool local_packet = false;
  853. enum vidc_msg_prio log_level = msm_vidc_debug;
  854. if (!core) {
  855. d_vpr_e("%s: invalid params\n", __func__);
  856. return;
  857. }
  858. if (!packet || !packet_size) {
  859. packet = kzalloc(VIDC_IFACEQ_VAR_HUGE_PKT_SIZE, GFP_KERNEL);
  860. if (!packet) {
  861. d_vpr_e("%s: fail to allocate\n", __func__);
  862. return;
  863. }
  864. packet_size = VIDC_IFACEQ_VAR_HUGE_PKT_SIZE;
  865. local_packet = true;
  866. /*
  867. * Local packet is used when error occurred.
  868. * It is good to print these logs to printk as well.
  869. */
  870. log_level |= FW_PRINTK;
  871. }
  872. while (!__iface_dbgq_read(core, packet)) {
  873. pkt = (struct hfi_debug_header *) packet;
  874. if (pkt->size < sizeof(struct hfi_debug_header)) {
  875. d_vpr_e("%s: invalid pkt size %d\n",
  876. __func__, pkt->size);
  877. continue;
  878. }
  879. if (pkt->size >= packet_size) {
  880. d_vpr_e("%s: pkt size[%d] >= packet_size[%d]\n",
  881. __func__, pkt->size, packet_size);
  882. continue;
  883. }
  884. packet[pkt->size] = '\0';
  885. /*
  886. * All fw messages starts with new line character. This
  887. * causes dprintk to print this message in two lines
  888. * in the kernel log. Ignoring the first character
  889. * from the message fixes this to print it in a single
  890. * line.
  891. */
  892. log = (u8 *)packet + sizeof(struct hfi_debug_header) + 1;
  893. dprintk_firmware(log_level, "%s", log);
  894. }
  895. if (local_packet)
  896. kfree(packet);
  897. }
  898. static int __sys_set_debug(struct msm_vidc_core *core, u32 debug)
  899. {
  900. int rc = 0;
  901. rc = hfi_packet_sys_debug_config(core, core->packet,
  902. core->packet_size, debug);
  903. if (rc) {
  904. d_vpr_e("Debug mode setting to FW failed\n");
  905. return -ENOTEMPTY;
  906. }
  907. if (__iface_cmdq_write(core, core->packet))
  908. return -ENOTEMPTY;
  909. return 0;
  910. }
  911. /*
  912. static int __sys_set_coverage(struct msm_vidc_core *core,
  913. u32 mode)
  914. {
  915. int rc = 0;
  916. //rc = call_hfi_pkt_op(core, sys_coverage_config, pkt, mode);
  917. if (rc) {
  918. d_vpr_e("Coverage mode setting to FW failed\n");
  919. return -ENOTEMPTY;
  920. }
  921. //if (__iface_cmdq_write(core, pkt, sid)) {
  922. // d_vpr_e("Failed to send coverage pkt to f/w\n");
  923. // return -ENOTEMPTY;
  924. //}
  925. return 0;
  926. }
  927. */
  928. static int __sys_set_power_control(struct msm_vidc_core *core, bool enable)
  929. {
  930. int rc = 0;
  931. if (!core->handoff_done)
  932. return rc;
  933. rc = hfi_packet_sys_intraframe_powercollapse(core, core->packet, core->packet_size, enable);
  934. if (rc)
  935. return rc;
  936. rc = __iface_cmdq_write(core, core->packet);
  937. if (rc)
  938. return rc;
  939. return rc;
  940. }
  941. int __prepare_pc(struct msm_vidc_core *core)
  942. {
  943. int rc = 0;
  944. rc = hfi_packet_sys_pc_prep(core, core->packet, core->packet_size);
  945. if (rc) {
  946. d_vpr_e("Failed to create sys pc prep pkt\n");
  947. goto err_pc_prep;
  948. }
  949. if (__iface_cmdq_write(core, core->packet))
  950. rc = -ENOTEMPTY;
  951. if (rc)
  952. d_vpr_e("Failed to prepare venus for power off");
  953. err_pc_prep:
  954. return rc;
  955. }
  956. static int __power_collapse(struct msm_vidc_core *core, bool force)
  957. {
  958. int rc = 0;
  959. if (!core) {
  960. d_vpr_e("%s: invalid params\n", __func__);
  961. return -EINVAL;
  962. }
  963. if (!core->power_enabled) {
  964. d_vpr_h("%s: Power already disabled\n", __func__);
  965. goto exit;
  966. }
  967. if (!__core_in_valid_state(core)) {
  968. d_vpr_e("%s: Core not in init state\n", __func__);
  969. return -EINVAL;
  970. }
  971. rc = call_venus_op(core, prepare_pc, core);
  972. if (rc)
  973. goto skip_power_off;
  974. __flush_debug_queue(core, core->packet, core->packet_size);
  975. rc = __suspend(core);
  976. if (rc)
  977. d_vpr_e("Failed __suspend\n");
  978. exit:
  979. return rc;
  980. skip_power_off:
  981. d_vpr_e("%s: skipped\n", __func__);
  982. return -EAGAIN;
  983. }
  984. static int __protect_cp_mem(struct msm_vidc_core *core)
  985. {
  986. struct tzbsp_memprot memprot;
  987. int rc = 0;
  988. struct context_bank_info *cb;
  989. if (!core)
  990. return -EINVAL;
  991. memprot.cp_start = 0x0;
  992. memprot.cp_size = 0x0;
  993. memprot.cp_nonpixel_start = 0x0;
  994. memprot.cp_nonpixel_size = 0x0;
  995. list_for_each_entry(cb, &core->dt->context_banks, list) {
  996. if (!strcmp(cb->name, "venus_ns")) {
  997. memprot.cp_size = cb->addr_range.start;
  998. d_vpr_h("%s: memprot.cp_size: %#x\n",
  999. __func__, memprot.cp_size);
  1000. }
  1001. if (!strcmp(cb->name, "venus_sec_non_pixel")) {
  1002. memprot.cp_nonpixel_start = cb->addr_range.start;
  1003. memprot.cp_nonpixel_size = cb->addr_range.size;
  1004. d_vpr_h("%s: cp_nonpixel_start: %#x size: %#x\n",
  1005. __func__, memprot.cp_nonpixel_start,
  1006. memprot.cp_nonpixel_size);
  1007. }
  1008. }
  1009. rc = qcom_scm_mem_protect_video_var(memprot.cp_start, memprot.cp_size,
  1010. memprot.cp_nonpixel_start, memprot.cp_nonpixel_size);
  1011. if (rc)
  1012. d_vpr_e("Failed to protect memory(%d)\n", rc);
  1013. return rc;
  1014. }
  1015. #if 0 // TODO
  1016. static int __core_set_resource(struct msm_vidc_core *core,
  1017. struct vidc_resource_hdr *resource_hdr, void *resource_value)
  1018. {
  1019. int rc = 0;
  1020. if (!core || !resource_hdr || !resource_value) {
  1021. d_vpr_e("%s: invalid params %pK %pK %pK\n", __func__,
  1022. core, resource_hdr, resource_value);
  1023. return -EINVAL;
  1024. }
  1025. //rc = hfi_packet_sys_set_resource(core, core->packet, core->packet_size,
  1026. // resource_hdr, resource_value);
  1027. if (rc) {
  1028. d_vpr_e("set_res: failed to create packet\n");
  1029. goto err_create_pkt;
  1030. }
  1031. //rc = __iface_cmdq_write(core, core->packet);
  1032. if (rc)
  1033. rc = -ENOTEMPTY;
  1034. err_create_pkt:
  1035. return rc;
  1036. }
  1037. static int __core_release_resource(struct msm_vidc_core *core,
  1038. struct vidc_resource_hdr *resource_hdr)
  1039. {
  1040. int rc = 0;
  1041. if (!core || !resource_hdr) {
  1042. d_vpr_e("%s: invalid params %pK %pK\n",
  1043. __func__, core, resource_hdr);
  1044. return -EINVAL;
  1045. }
  1046. //rc = hfi_packet_sys_release_resource(core, core->packet, core->packet_size, resource_hdr);
  1047. if (rc) {
  1048. d_vpr_e("release_res: failed to create packet\n");
  1049. goto err_create_pkt;
  1050. }
  1051. //rc = __iface_cmdq_write(core, core->packet);
  1052. if (rc)
  1053. rc = -ENOTEMPTY;
  1054. err_create_pkt:
  1055. return rc;
  1056. }
  1057. #endif
  1058. static void __deinit_clocks(struct msm_vidc_core *core)
  1059. {
  1060. struct clock_info *cl;
  1061. core->power.clk_freq = 0;
  1062. venus_hfi_for_each_clock_reverse(core, cl) {
  1063. if (cl->clk) {
  1064. clk_put(cl->clk);
  1065. cl->clk = NULL;
  1066. }
  1067. }
  1068. }
  1069. static int __init_clocks(struct msm_vidc_core *core)
  1070. {
  1071. int rc = 0;
  1072. struct clock_info *cl = NULL;
  1073. if (!core) {
  1074. d_vpr_e("%s: invalid params\n", __func__);
  1075. return -EINVAL;
  1076. }
  1077. venus_hfi_for_each_clock(core, cl) {
  1078. d_vpr_h("%s: scalable? %d, count %d\n",
  1079. cl->name, cl->has_scaling, cl->count);
  1080. }
  1081. venus_hfi_for_each_clock(core, cl) {
  1082. if (!cl->clk) {
  1083. cl->clk = clk_get(&core->pdev->dev, cl->name);
  1084. if (IS_ERR_OR_NULL(cl->clk)) {
  1085. d_vpr_e("Failed to get clock: %s\n", cl->name);
  1086. rc = PTR_ERR(cl->clk) ?
  1087. PTR_ERR(cl->clk) : -EINVAL;
  1088. cl->clk = NULL;
  1089. goto err_clk_get;
  1090. }
  1091. }
  1092. }
  1093. core->power.clk_freq = 0;
  1094. return 0;
  1095. err_clk_get:
  1096. __deinit_clocks(core);
  1097. return rc;
  1098. }
  1099. static void __deregister_mmrm(struct msm_vidc_core *core)
  1100. {
  1101. struct clock_info *cl;
  1102. venus_hfi_for_each_clock(core, cl) {
  1103. if (cl->has_scaling && cl->mmrm_client) {
  1104. mmrm_client_deregister(cl->mmrm_client);
  1105. cl->mmrm_client = NULL;
  1106. }
  1107. }
  1108. }
  1109. static int __register_mmrm(struct msm_vidc_core *core)
  1110. {
  1111. int rc = 0;
  1112. struct clock_info *cl;
  1113. venus_hfi_for_each_clock(core, cl) {
  1114. struct mmrm_client_desc desc;
  1115. char *name = (char *)desc.client_info.desc.name;
  1116. // TODO: set notifier data vals
  1117. struct mmrm_client_notifier_data notifier_data = {
  1118. MMRM_CLIENT_RESOURCE_VALUE_CHANGE,
  1119. {{0, 0}},
  1120. NULL};
  1121. // TODO: add callback fn
  1122. desc.notifier_callback_fn = NULL;
  1123. if (!cl->has_scaling)
  1124. continue;
  1125. if (IS_ERR_OR_NULL(cl->clk)) {
  1126. d_vpr_e("%s: Invalid clock: %s\n", __func__, cl->name);
  1127. rc = PTR_ERR(cl->clk) ? PTR_ERR(cl->clk) : -EINVAL;
  1128. goto err_register_mmrm;
  1129. }
  1130. desc.client_type = MMRM_CLIENT_CLOCK;
  1131. desc.client_info.desc.client_domain = MMRM_CLIENT_DOMAIN_VIDEO;
  1132. desc.client_info.desc.client_id = cl->clk_id;
  1133. strlcpy(name, cl->name, sizeof(desc.client_info.desc.name));
  1134. desc.client_info.desc.clk = cl->clk;
  1135. desc.priority = MMRM_CLIENT_PRIOR_LOW;
  1136. desc.pvt_data = notifier_data.pvt_data;
  1137. d_vpr_h("%s: domain(%d) cid(%d) name(%s) clk(%pK)\n",
  1138. __func__,
  1139. desc.client_info.desc.client_domain,
  1140. desc.client_info.desc.client_id,
  1141. desc.client_info.desc.name,
  1142. desc.client_info.desc.clk);
  1143. d_vpr_h("%s: type(%d) pri(%d) pvt(%pK) notifier(%pK)\n",
  1144. __func__,
  1145. desc.client_type,
  1146. desc.priority,
  1147. desc.pvt_data,
  1148. desc.notifier_callback_fn);
  1149. cl->mmrm_client = mmrm_client_register(&desc);
  1150. if (!cl->mmrm_client) {
  1151. d_vpr_e("%s: Failed to register clk(%s): %d\n",
  1152. __func__, cl->name, rc);
  1153. rc = -EINVAL;
  1154. goto err_register_mmrm;
  1155. }
  1156. }
  1157. return 0;
  1158. err_register_mmrm:
  1159. __deregister_mmrm(core);
  1160. return rc;
  1161. }
  1162. static int __handle_reset_clk(struct msm_vidc_core *core,
  1163. int reset_index, enum reset_state state)
  1164. {
  1165. int rc = 0;
  1166. struct msm_vidc_dt *dt = core->dt;
  1167. struct reset_control *rst;
  1168. struct reset_set *rst_set = &dt->reset_set;
  1169. if (!rst_set->reset_tbl)
  1170. return 0;
  1171. rst = rst_set->reset_tbl[reset_index].rst;
  1172. d_vpr_h("reset_clk: name %s reset_state %d rst %pK\n",
  1173. rst_set->reset_tbl[reset_index].name, state, rst);
  1174. switch (state) {
  1175. case INIT:
  1176. if (rst)
  1177. goto skip_reset_init;
  1178. rst = devm_reset_control_get(&core->pdev->dev,
  1179. rst_set->reset_tbl[reset_index].name);
  1180. if (IS_ERR(rst))
  1181. rc = PTR_ERR(rst);
  1182. rst_set->reset_tbl[reset_index].rst = rst;
  1183. break;
  1184. case ASSERT:
  1185. if (!rst) {
  1186. rc = PTR_ERR(rst);
  1187. goto failed_to_reset;
  1188. }
  1189. rc = reset_control_assert(rst);
  1190. break;
  1191. case DEASSERT:
  1192. if (!rst) {
  1193. rc = PTR_ERR(rst);
  1194. goto failed_to_reset;
  1195. }
  1196. rc = reset_control_deassert(rst);
  1197. break;
  1198. default:
  1199. d_vpr_e("%s: invalid reset request\n", __func__);
  1200. if (rc)
  1201. goto failed_to_reset;
  1202. }
  1203. return 0;
  1204. skip_reset_init:
  1205. failed_to_reset:
  1206. return rc;
  1207. }
  1208. void __disable_unprepare_clks(struct msm_vidc_core *core)
  1209. {
  1210. struct clock_info *cl;
  1211. if (!core) {
  1212. d_vpr_e("%s: invalid params\n", __func__);
  1213. return;
  1214. }
  1215. venus_hfi_for_each_clock_reverse(core, cl) {
  1216. if (!cl->clk)
  1217. continue;
  1218. d_vpr_h("Clock: %s disable and unprepare\n",
  1219. cl->name);
  1220. if (!__clk_is_enabled(cl->clk))
  1221. d_vpr_e("%s: clock %s already disabled\n",
  1222. __func__, cl->name);
  1223. clk_disable_unprepare(cl->clk);
  1224. cl->prev = 0;
  1225. }
  1226. }
  1227. int __reset_ahb2axi_bridge(struct msm_vidc_core *core)
  1228. {
  1229. int rc, i;
  1230. if (!core) {
  1231. d_vpr_e("%s: invalid params\n", __func__);
  1232. return -EINVAL;
  1233. }
  1234. for (i = 0; i < core->dt->reset_set.count; i++) {
  1235. rc = __handle_reset_clk(core, i, ASSERT);
  1236. if (rc) {
  1237. d_vpr_e("failed to assert reset clocks\n");
  1238. goto failed_to_reset;
  1239. }
  1240. /* wait for deassert */
  1241. usleep_range(1000, 1100);
  1242. }
  1243. for (i = 0; i < core->dt->reset_set.count; i++) {
  1244. rc = __handle_reset_clk(core, i, DEASSERT);
  1245. if (rc) {
  1246. d_vpr_e("failed to deassert reset clocks\n");
  1247. goto failed_to_reset;
  1248. }
  1249. }
  1250. return 0;
  1251. failed_to_reset:
  1252. return rc;
  1253. }
  1254. static int __prepare_enable_clks(struct msm_vidc_core *core)
  1255. {
  1256. struct clock_info *cl = NULL, *cl_fail = NULL;
  1257. int rc = 0, c = 0;
  1258. if (!core) {
  1259. d_vpr_e("%s: invalid params\n", __func__);
  1260. return -EINVAL;
  1261. }
  1262. venus_hfi_for_each_clock(core, cl) {
  1263. if (!cl->clk) {
  1264. d_vpr_e("%s: invalid clock\n", __func__);
  1265. rc = -EINVAL;
  1266. goto fail_clk_enable;
  1267. }
  1268. /*
  1269. * For the clocks we control, set the rate prior to preparing
  1270. * them. Since we don't really have a load at this point, scale
  1271. * it to the lowest frequency possible
  1272. */
  1273. if (cl->has_scaling)
  1274. __set_clk_rate(core, cl,
  1275. clk_round_rate(cl->clk, 0));
  1276. rc = clk_prepare_enable(cl->clk);
  1277. if (rc) {
  1278. d_vpr_e("Failed to enable clocks\n");
  1279. cl_fail = cl;
  1280. goto fail_clk_enable;
  1281. }
  1282. if (!__clk_is_enabled(cl->clk))
  1283. d_vpr_e("%s: clock %s not enabled\n",
  1284. __func__, cl->name);
  1285. c++;
  1286. d_vpr_h("Clock: %s prepared and enabled\n", cl->name);
  1287. }
  1288. call_venus_op(core, clock_config_on_enable, core);
  1289. return rc;
  1290. fail_clk_enable:
  1291. venus_hfi_for_each_clock_reverse_continue(core, cl, c) {
  1292. if (!cl->clk)
  1293. continue;
  1294. d_vpr_e("Clock: %s disable and unprepare\n",
  1295. cl->name);
  1296. clk_disable_unprepare(cl->clk);
  1297. cl->prev = 0;
  1298. }
  1299. return rc;
  1300. }
  1301. static void __deinit_bus(struct msm_vidc_core *core)
  1302. {
  1303. struct bus_info *bus = NULL;
  1304. if (!core)
  1305. return;
  1306. core->power.bw_ddr = 0;
  1307. core->power.bw_llcc = 0;
  1308. venus_hfi_for_each_bus_reverse(core, bus) {
  1309. if (!bus->path)
  1310. continue;
  1311. icc_put(bus->path);
  1312. bus->path = NULL;
  1313. }
  1314. }
  1315. static int __init_bus(struct msm_vidc_core *core)
  1316. {
  1317. struct bus_info *bus = NULL;
  1318. int rc = 0;
  1319. if (!core) {
  1320. d_vpr_e("%s: invalid param\n", __func__);
  1321. return -EINVAL;
  1322. }
  1323. venus_hfi_for_each_bus(core, bus) {
  1324. if (!strcmp(bus->name, "venus-llcc")) {
  1325. if (msm_vidc_syscache_disable) {
  1326. d_vpr_h("Skipping LLC bus init: %s\n",
  1327. bus->name);
  1328. continue;
  1329. }
  1330. }
  1331. bus->path = of_icc_get(bus->dev, bus->name);
  1332. if (IS_ERR_OR_NULL(bus->path)) {
  1333. rc = PTR_ERR(bus->path) ?
  1334. PTR_ERR(bus->path) : -EBADHANDLE;
  1335. d_vpr_e("Failed to register bus %s: %d\n",
  1336. bus->name, rc);
  1337. bus->path = NULL;
  1338. goto err_add_dev;
  1339. }
  1340. }
  1341. return 0;
  1342. err_add_dev:
  1343. __deinit_bus(core);
  1344. return rc;
  1345. }
  1346. static void __deinit_regulators(struct msm_vidc_core *core)
  1347. {
  1348. struct regulator_info *rinfo = NULL;
  1349. venus_hfi_for_each_regulator_reverse(core, rinfo) {
  1350. if (rinfo->regulator) {
  1351. regulator_put(rinfo->regulator);
  1352. rinfo->regulator = NULL;
  1353. }
  1354. }
  1355. }
  1356. static int __init_regulators(struct msm_vidc_core *core)
  1357. {
  1358. int rc = 0;
  1359. struct regulator_info *rinfo = NULL;
  1360. venus_hfi_for_each_regulator(core, rinfo) {
  1361. rinfo->regulator = regulator_get(&core->pdev->dev,
  1362. rinfo->name);
  1363. if (IS_ERR_OR_NULL(rinfo->regulator)) {
  1364. rc = PTR_ERR(rinfo->regulator) ?
  1365. PTR_ERR(rinfo->regulator) : -EBADHANDLE;
  1366. d_vpr_e("Failed to get regulator: %s\n", rinfo->name);
  1367. rinfo->regulator = NULL;
  1368. goto err_reg_get;
  1369. }
  1370. }
  1371. return 0;
  1372. err_reg_get:
  1373. __deinit_regulators(core);
  1374. return rc;
  1375. }
  1376. static void __deinit_subcaches(struct msm_vidc_core *core)
  1377. {
  1378. struct subcache_info *sinfo = NULL;
  1379. if (!core) {
  1380. d_vpr_e("%s: invalid params\n", __func__);
  1381. goto exit;
  1382. }
  1383. if (!is_sys_cache_present(core))
  1384. goto exit;
  1385. venus_hfi_for_each_subcache_reverse(core, sinfo) {
  1386. if (sinfo->subcache) {
  1387. d_vpr_h("deinit_subcaches: %s\n", sinfo->name);
  1388. llcc_slice_putd(sinfo->subcache);
  1389. sinfo->subcache = NULL;
  1390. }
  1391. }
  1392. exit:
  1393. return;
  1394. }
  1395. static int __init_subcaches(struct msm_vidc_core *core)
  1396. {
  1397. int rc = 0;
  1398. struct subcache_info *sinfo = NULL;
  1399. if (!core) {
  1400. d_vpr_e("%s: invalid params\n", __func__);
  1401. return -EINVAL;
  1402. }
  1403. if (!is_sys_cache_present(core))
  1404. return 0;
  1405. venus_hfi_for_each_subcache(core, sinfo) {
  1406. if (!strcmp("vidsc0", sinfo->name)) {
  1407. sinfo->subcache = llcc_slice_getd(LLCC_VIDSC0);
  1408. } else if (!strcmp("vidsc1", sinfo->name)) {
  1409. sinfo->subcache = llcc_slice_getd(LLCC_VIDSC1);
  1410. } else if (!strcmp("vidscfw", sinfo->name)) {
  1411. sinfo->subcache = llcc_slice_getd(LLCC_VIDFW);
  1412. } else {
  1413. d_vpr_e("Invalid subcache name %s\n",
  1414. sinfo->name);
  1415. }
  1416. if (IS_ERR_OR_NULL(sinfo->subcache)) {
  1417. rc = PTR_ERR(sinfo->subcache) ?
  1418. PTR_ERR(sinfo->subcache) : -EBADHANDLE;
  1419. d_vpr_e("init_subcaches: invalid subcache: %s rc %d\n",
  1420. sinfo->name, rc);
  1421. sinfo->subcache = NULL;
  1422. goto err_subcache_get;
  1423. }
  1424. d_vpr_h("init_subcaches: %s\n", sinfo->name);
  1425. }
  1426. return 0;
  1427. err_subcache_get:
  1428. __deinit_subcaches(core);
  1429. return rc;
  1430. }
  1431. static int __init_resources(struct msm_vidc_core *core)
  1432. {
  1433. int i, rc = 0;
  1434. rc = __init_regulators(core);
  1435. if (rc) {
  1436. d_vpr_e("Failed to get all regulators\n");
  1437. return -ENODEV;
  1438. }
  1439. rc = __init_clocks(core);
  1440. if (rc) {
  1441. d_vpr_e("Failed to init clocks\n");
  1442. rc = -ENODEV;
  1443. goto err_init_clocks;
  1444. }
  1445. rc = __register_mmrm(core);
  1446. if (rc) {
  1447. d_vpr_e("Failed to register mmrm\n");
  1448. rc = -ENODEV;
  1449. goto err_init_mmrm;
  1450. }
  1451. for (i = 0; i < core->dt->reset_set.count; i++) {
  1452. rc = __handle_reset_clk(core, i, INIT);
  1453. if (rc) {
  1454. d_vpr_e("Failed to init reset clocks\n");
  1455. rc = -ENODEV;
  1456. goto err_init_reset_clk;
  1457. }
  1458. }
  1459. rc = __init_bus(core);
  1460. if (rc) {
  1461. d_vpr_e("Failed to init bus: %d\n", rc);
  1462. goto err_init_bus;
  1463. }
  1464. rc = __init_subcaches(core);
  1465. if (rc)
  1466. d_vpr_e("Failed to init subcaches: %d\n", rc);
  1467. return rc;
  1468. err_init_reset_clk:
  1469. err_init_bus:
  1470. __deregister_mmrm(core);
  1471. err_init_mmrm:
  1472. __deinit_clocks(core);
  1473. err_init_clocks:
  1474. __deinit_regulators(core);
  1475. return rc;
  1476. }
  1477. static void __deinit_resources(struct msm_vidc_core *core)
  1478. {
  1479. __deinit_subcaches(core);
  1480. __deinit_bus(core);
  1481. __deregister_mmrm(core);
  1482. __deinit_clocks(core);
  1483. __deinit_regulators(core);
  1484. }
  1485. static int __disable_regulator(struct regulator_info *rinfo,
  1486. struct msm_vidc_core *core)
  1487. {
  1488. int rc = 0;
  1489. if (!rinfo->regulator) {
  1490. d_vpr_e("%s: invalid regulator\n", __func__);
  1491. return -EINVAL;
  1492. }
  1493. d_vpr_h("Disabling regulator %s\n", rinfo->name);
  1494. /*
  1495. * This call is needed. Driver needs to acquire the control back
  1496. * from HW in order to disable the regualtor. Else the behavior
  1497. * is unknown.
  1498. */
  1499. rc = __acquire_regulator(core, rinfo);
  1500. if (rc) {
  1501. /*
  1502. * This is somewhat fatal, but nothing we can do
  1503. * about it. We can't disable the regulator w/o
  1504. * getting it back under s/w control
  1505. */
  1506. d_vpr_e("Failed to acquire control on %s\n",
  1507. rinfo->name);
  1508. goto disable_regulator_failed;
  1509. }
  1510. core->handoff_done = 0;
  1511. if (!regulator_is_enabled(rinfo->regulator))
  1512. d_vpr_e("%s: regulator %s already disabled\n",
  1513. __func__, rinfo->name);
  1514. rc = regulator_disable(rinfo->regulator);
  1515. if (rc) {
  1516. d_vpr_e("Failed to disable %s: %d\n",
  1517. rinfo->name, rc);
  1518. goto disable_regulator_failed;
  1519. }
  1520. return 0;
  1521. disable_regulator_failed:
  1522. /* Bring attention to this issue */
  1523. __fatal_error(core, true);
  1524. return rc;
  1525. }
  1526. static int __enable_hw_power_collapse(struct msm_vidc_core *core)
  1527. {
  1528. int rc = 0;
  1529. rc = __hand_off_regulators(core);
  1530. if (rc)
  1531. d_vpr_e("%s: Failed to enable HW power collapse %d\n",
  1532. __func__, rc);
  1533. return rc;
  1534. }
  1535. static int __enable_regulators(struct msm_vidc_core *core)
  1536. {
  1537. int rc = 0, c = 0;
  1538. struct regulator_info *rinfo;
  1539. d_vpr_h("Enabling regulators\n");
  1540. venus_hfi_for_each_regulator(core, rinfo) {
  1541. if (!rinfo->regulator) {
  1542. d_vpr_e("%s: invalid regulator\n", __func__);
  1543. rc = -EINVAL;
  1544. goto err_reg_enable_failed;
  1545. }
  1546. rc = regulator_enable(rinfo->regulator);
  1547. if (rc) {
  1548. d_vpr_e("Failed to enable %s: %d\n",
  1549. rinfo->name, rc);
  1550. goto err_reg_enable_failed;
  1551. }
  1552. if (!regulator_is_enabled(rinfo->regulator))
  1553. d_vpr_e("%s: regulator %s not enabled\n",
  1554. __func__, rinfo->name);
  1555. d_vpr_h("Enabled regulator %s\n",
  1556. rinfo->name);
  1557. c++;
  1558. }
  1559. return 0;
  1560. err_reg_enable_failed:
  1561. venus_hfi_for_each_regulator_reverse_continue(core, rinfo, c) {
  1562. if (!rinfo->regulator)
  1563. continue;
  1564. __disable_regulator(rinfo, core);
  1565. }
  1566. return rc;
  1567. }
  1568. int __disable_regulators(struct msm_vidc_core *core)
  1569. {
  1570. struct regulator_info *rinfo;
  1571. d_vpr_h("Disabling regulators\n");
  1572. venus_hfi_for_each_regulator_reverse(core, rinfo)
  1573. __disable_regulator(rinfo, core);
  1574. return 0;
  1575. }
  1576. static int __release_subcaches(struct msm_vidc_core *core)
  1577. {
  1578. int rc = 0;
  1579. struct subcache_info* sinfo;
  1580. struct hfi_buffer buf;
  1581. if (msm_vidc_syscache_disable || !is_sys_cache_present(core))
  1582. return 0;
  1583. if (!core->dt->sys_cache_res_set) {
  1584. d_vpr_h("Subcaches not set to Venus\n");
  1585. return 0;
  1586. }
  1587. rc = hfi_create_header(core->packet, core->packet_size,
  1588. 0, core->header_id++);
  1589. if (rc)
  1590. return rc;
  1591. memset(&buf, 0, sizeof(struct hfi_buffer));
  1592. buf.type = HFI_BUFFER_SUBCACHE;
  1593. buf.flags = HFI_BUF_HOST_FLAG_RELEASE;
  1594. venus_hfi_for_each_subcache_reverse(core, sinfo) {
  1595. if (sinfo->isactive) {
  1596. buf.index = sinfo->subcache->slice_id;
  1597. buf.buffer_size = sinfo->subcache->slice_size;
  1598. rc = hfi_create_packet(core->packet,
  1599. core->packet_size,
  1600. HFI_CMD_BUFFER,
  1601. HFI_BUF_HOST_FLAG_NONE,
  1602. HFI_PAYLOAD_STRUCTURE,
  1603. HFI_PORT_NONE,
  1604. core->packet_id++,
  1605. &buf,
  1606. sizeof(buf));
  1607. if (rc)
  1608. return rc;
  1609. }
  1610. }
  1611. /* Set resource to Venus for activated subcaches */
  1612. rc = __iface_cmdq_write(core, core->packet);
  1613. if (rc)
  1614. return rc;
  1615. venus_hfi_for_each_subcache_reverse(core, sinfo) {
  1616. if (sinfo->isactive) {
  1617. sinfo->isset = false;
  1618. d_vpr_h("Release Subcache id %d size %d done\n",
  1619. sinfo->subcache->slice_id,
  1620. sinfo->subcache->slice_size);
  1621. }
  1622. }
  1623. core->dt->sys_cache_res_set = false;
  1624. return 0;
  1625. }
  1626. static int __disable_subcaches(struct msm_vidc_core *core)
  1627. {
  1628. struct subcache_info *sinfo;
  1629. int rc = 0;
  1630. if (msm_vidc_syscache_disable || !is_sys_cache_present(core))
  1631. return 0;
  1632. /* De-activate subcaches */
  1633. venus_hfi_for_each_subcache_reverse(core, sinfo) {
  1634. if (sinfo->isactive) {
  1635. d_vpr_h("De-activate subcache %s\n",
  1636. sinfo->name);
  1637. rc = llcc_slice_deactivate(sinfo->subcache);
  1638. if (rc) {
  1639. d_vpr_e("Failed to de-activate %s: %d\n",
  1640. sinfo->name, rc);
  1641. }
  1642. sinfo->isactive = false;
  1643. }
  1644. }
  1645. return 0;
  1646. }
  1647. static int __enable_subcaches(struct msm_vidc_core *core)
  1648. {
  1649. int rc = 0;
  1650. u32 c = 0;
  1651. struct subcache_info *sinfo;
  1652. if (msm_vidc_syscache_disable || !is_sys_cache_present(core))
  1653. return 0;
  1654. /* Activate subcaches */
  1655. venus_hfi_for_each_subcache(core, sinfo) {
  1656. rc = llcc_slice_activate(sinfo->subcache);
  1657. if (rc) {
  1658. d_vpr_e("Failed to activate %s: %d\n",
  1659. sinfo->name, rc);
  1660. __fatal_error(core, true);
  1661. goto err_activate_fail;
  1662. }
  1663. sinfo->isactive = true;
  1664. d_vpr_h("Activated subcache %s\n", sinfo->name);
  1665. c++;
  1666. }
  1667. d_vpr_h("Activated %d Subcaches to Venus\n", c);
  1668. return 0;
  1669. err_activate_fail:
  1670. __release_subcaches(core);
  1671. __disable_subcaches(core);
  1672. return rc;
  1673. }
  1674. static int __set_subcaches(struct msm_vidc_core *core)
  1675. {
  1676. int rc = 0;
  1677. struct subcache_info *sinfo;
  1678. struct hfi_buffer buf;
  1679. if (msm_vidc_syscache_disable ||
  1680. !is_sys_cache_present(core)) {
  1681. return 0;
  1682. }
  1683. if (core->dt->sys_cache_res_set) {
  1684. d_vpr_h("Subcaches already set to Venus\n");
  1685. return 0;
  1686. }
  1687. rc = hfi_create_header(core->packet, core->packet_size,
  1688. 0, core->header_id++);
  1689. if (rc)
  1690. goto err_fail_set_subacaches;
  1691. memset(&buf, 0, sizeof(struct hfi_buffer));
  1692. buf.type = HFI_BUFFER_SUBCACHE;
  1693. buf.flags = HFI_BUF_HOST_FLAG_NONE;
  1694. venus_hfi_for_each_subcache(core, sinfo) {
  1695. if (sinfo->isactive) {
  1696. buf.index = sinfo->subcache->slice_id;
  1697. buf.buffer_size = sinfo->subcache->slice_size;
  1698. rc = hfi_create_packet(core->packet,
  1699. core->packet_size,
  1700. HFI_CMD_BUFFER,
  1701. HFI_BUF_HOST_FLAG_NONE,
  1702. HFI_PAYLOAD_STRUCTURE,
  1703. HFI_PORT_NONE,
  1704. core->packet_id++,
  1705. &buf,
  1706. sizeof(buf));
  1707. if (rc)
  1708. goto err_fail_set_subacaches;
  1709. }
  1710. }
  1711. /* Set resource to Venus for activated subcaches */
  1712. rc = __iface_cmdq_write(core, core->packet);
  1713. if (rc)
  1714. goto err_fail_set_subacaches;
  1715. venus_hfi_for_each_subcache(core, sinfo) {
  1716. if (sinfo->isactive) {
  1717. sinfo->isset = true;
  1718. d_vpr_h("Set Subcache id %d size %d done\n",
  1719. sinfo->subcache->slice_id,
  1720. sinfo->subcache->slice_size);
  1721. }
  1722. }
  1723. core->dt->sys_cache_res_set = true;
  1724. return 0;
  1725. err_fail_set_subacaches:
  1726. __disable_subcaches(core);
  1727. return rc;
  1728. }
  1729. /*
  1730. static int __set_ubwc_config(struct msm_vidc_core *core)
  1731. {
  1732. int rc = 0;
  1733. if (!core->platform->data.ubwc_config) {
  1734. d_vpr_h("%s: invalid ubwc config\n", __func__);
  1735. return -EINVAL;
  1736. }
  1737. //rc = hfi_packet_sys_ubwc_config(core, core->packet, core->packet_size);
  1738. if (rc)
  1739. return rc;
  1740. //rc = __iface_cmdq_write(core, core->packet));
  1741. if (rc)
  1742. return rc;
  1743. d_vpr_h("Configured UBWC Config\n");
  1744. return rc;
  1745. }
  1746. */
  1747. static int __venus_power_off(struct msm_vidc_core* core)
  1748. {
  1749. int rc = 0;
  1750. if (!core->power_enabled)
  1751. return 0;
  1752. rc = call_venus_op(core, power_off, core);
  1753. if (rc) {
  1754. d_vpr_e("Failed to power off, err: %d\n", rc);
  1755. return rc;
  1756. }
  1757. core->power_enabled = false;
  1758. return rc;
  1759. }
  1760. static int __venus_power_on(struct msm_vidc_core *core)
  1761. {
  1762. int rc = 0;
  1763. if (core->power_enabled) {
  1764. d_vpr_e("%s: Skip power on, core already enabled.\n", __func__);
  1765. return 0;
  1766. }
  1767. core->power_enabled = true;
  1768. /* Vote for all hardware resources */
  1769. rc = __vote_buses(core, INT_MAX, INT_MAX);
  1770. if (rc) {
  1771. d_vpr_e("Failed to vote buses, err: %d\n", rc);
  1772. goto fail_vote_buses;
  1773. }
  1774. rc = __enable_regulators(core);
  1775. if (rc) {
  1776. d_vpr_e("Failed to enable GDSC, err = %d\n", rc);
  1777. goto fail_enable_gdsc;
  1778. }
  1779. rc = call_venus_op(core, reset_ahb2axi_bridge, core);
  1780. if (rc) {
  1781. d_vpr_e("Failed to reset ahb2axi: %d\n", rc);
  1782. goto fail_enable_clks;
  1783. }
  1784. rc = __prepare_enable_clks(core);
  1785. if (rc) {
  1786. d_vpr_e("Failed to enable clocks: %d\n", rc);
  1787. goto fail_enable_clks;
  1788. }
  1789. rc = __scale_clocks(core);
  1790. if (rc) {
  1791. d_vpr_e("Failed to scale clocks, performance might be affected\n");
  1792. rc = 0;
  1793. }
  1794. /*
  1795. * Re-program all of the registers that get reset as a result of
  1796. * regulator_disable() and _enable()
  1797. */
  1798. __set_registers(core);
  1799. call_venus_op(core, interrupt_init, core);
  1800. core->intr_status = 0;
  1801. enable_irq(core->dt->irq);
  1802. return rc;
  1803. fail_enable_clks:
  1804. __disable_regulators(core);
  1805. fail_enable_gdsc:
  1806. __unvote_buses(core);
  1807. fail_vote_buses:
  1808. core->power_enabled = false;
  1809. return rc;
  1810. }
  1811. static int __suspend(struct msm_vidc_core *core)
  1812. {
  1813. int rc = 0;
  1814. if (!core) {
  1815. d_vpr_e("%s: invalid params\n", __func__);
  1816. return -EINVAL;
  1817. } else if (!core->power_enabled) {
  1818. d_vpr_h("Power already disabled\n");
  1819. return 0;
  1820. }
  1821. d_vpr_h("Entering suspend\n");
  1822. rc = __tzbsp_set_video_state(TZBSP_VIDEO_STATE_SUSPEND);
  1823. if (rc) {
  1824. d_vpr_e("Failed to suspend video core %d\n", rc);
  1825. goto err_tzbsp_suspend;
  1826. }
  1827. __disable_subcaches(core);
  1828. call_venus_op(core, power_off, core);
  1829. d_vpr_h("Venus power off\n");
  1830. return rc;
  1831. err_tzbsp_suspend:
  1832. return rc;
  1833. }
  1834. static int __resume(struct msm_vidc_core *core)
  1835. {
  1836. int rc = 0;
  1837. if (!core) {
  1838. d_vpr_e("%s: invalid params\n", __func__);
  1839. return -EINVAL;
  1840. } else if (core->power_enabled) {
  1841. goto exit;
  1842. } else if (!__core_in_valid_state(core)) {
  1843. d_vpr_e("%s: core not in valid state\n", __func__);
  1844. return -EINVAL;
  1845. }
  1846. d_vpr_h("Resuming from power collapse\n");
  1847. rc = __venus_power_on(core);
  1848. if (rc) {
  1849. d_vpr_e("Failed to power on venus\n");
  1850. goto err_venus_power_on;
  1851. }
  1852. /* Reboot the firmware */
  1853. rc = __tzbsp_set_video_state(TZBSP_VIDEO_STATE_RESUME);
  1854. if (rc) {
  1855. d_vpr_e("Failed to resume video core %d\n", rc);
  1856. goto err_set_video_state;
  1857. }
  1858. /*
  1859. * Hand off control of regulators to h/w _after_ loading fw.
  1860. * Note that the GDSC will turn off when switching from normal
  1861. * (s/w triggered) to fast (HW triggered) unless the h/w vote is
  1862. * present.
  1863. */
  1864. if (__enable_hw_power_collapse(core))
  1865. d_vpr_e("Failed to enabled inter-frame PC\n");
  1866. call_venus_op(core, setup_ucregion_memmap, core);
  1867. /* Wait for boot completion */
  1868. rc = call_venus_op(core, boot_firmware, core);
  1869. if (rc) {
  1870. d_vpr_e("Failed to reset venus core\n");
  1871. goto err_reset_core;
  1872. }
  1873. __sys_set_debug(core, (msm_vidc_debug & FW_LOGMASK) >> FW_LOGSHIFT);
  1874. rc = __enable_subcaches(core);
  1875. if (rc) {
  1876. d_vpr_e("Failed to activate subcache\n");
  1877. goto err_reset_core;
  1878. }
  1879. __set_subcaches(core);
  1880. __sys_set_power_control(core, true);
  1881. d_vpr_h("Resumed from power collapse\n");
  1882. exit:
  1883. /* Don't reset skip_pc_count for SYS_PC_PREP cmd */
  1884. //if (core->last_packet_type != HFI_CMD_SYS_PC_PREP)
  1885. // core->skip_pc_count = 0;
  1886. return rc;
  1887. err_reset_core:
  1888. __tzbsp_set_video_state(TZBSP_VIDEO_STATE_SUSPEND);
  1889. err_set_video_state:
  1890. call_venus_op(core, power_off, core);
  1891. err_venus_power_on:
  1892. d_vpr_e("Failed to resume from power collapse\n");
  1893. return rc;
  1894. }
  1895. static void __set_queue_hdr_defaults(struct hfi_queue_header *q_hdr)
  1896. {
  1897. q_hdr->qhdr_status = 0x1;
  1898. q_hdr->qhdr_type = VIDC_IFACEQ_DFLT_QHDR;
  1899. q_hdr->qhdr_q_size = VIDC_IFACEQ_QUEUE_SIZE / 4;
  1900. q_hdr->qhdr_pkt_size = 0;
  1901. q_hdr->qhdr_rx_wm = 0x1;
  1902. q_hdr->qhdr_tx_wm = 0x1;
  1903. q_hdr->qhdr_rx_req = 0x1;
  1904. q_hdr->qhdr_tx_req = 0x0;
  1905. q_hdr->qhdr_rx_irq_status = 0x0;
  1906. q_hdr->qhdr_tx_irq_status = 0x0;
  1907. q_hdr->qhdr_read_idx = 0x0;
  1908. q_hdr->qhdr_write_idx = 0x0;
  1909. }
  1910. static void __interface_queues_deinit(struct msm_vidc_core *core)
  1911. {
  1912. int i;
  1913. d_vpr_h("%s()\n", __func__);
  1914. msm_vidc_memory_unmap(core, &core->iface_q_table.map);
  1915. msm_vidc_memory_free(core, &core->iface_q_table.alloc);
  1916. msm_vidc_memory_unmap(core, &core->sfr.map);
  1917. msm_vidc_memory_free(core, &core->sfr.alloc);
  1918. for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
  1919. core->iface_queues[i].q_hdr = NULL;
  1920. core->iface_queues[i].q_array.align_virtual_addr = NULL;
  1921. core->iface_queues[i].q_array.align_device_addr = 0;
  1922. }
  1923. core->iface_q_table.align_virtual_addr = NULL;
  1924. core->iface_q_table.align_device_addr = 0;
  1925. core->sfr.align_virtual_addr = NULL;
  1926. core->sfr.align_device_addr = 0;
  1927. }
  1928. static int __interface_queues_init(struct msm_vidc_core *core)
  1929. {
  1930. int rc = 0;
  1931. struct hfi_queue_table_header *q_tbl_hdr;
  1932. struct hfi_queue_header *q_hdr;
  1933. struct msm_vidc_iface_q_info *iface_q;
  1934. struct msm_vidc_alloc alloc;
  1935. struct msm_vidc_map map;
  1936. int offset = 0;
  1937. u32 q_size;
  1938. u32 i;
  1939. d_vpr_h("%s()\n", __func__);
  1940. q_size = SHARED_QSIZE - ALIGNED_SFR_SIZE - ALIGNED_QDSS_SIZE;
  1941. memset(&alloc, 0, sizeof(alloc));
  1942. alloc.type = MSM_VIDC_BUF_QUEUE;
  1943. alloc.region = MSM_VIDC_NON_SECURE;
  1944. alloc.size = q_size;
  1945. alloc.secure = false;
  1946. alloc.map_kernel = true;
  1947. rc = msm_vidc_memory_alloc(core, &alloc);
  1948. if (rc) {
  1949. d_vpr_e("%s: alloc failed\n", __func__);
  1950. goto fail_alloc_queue;
  1951. }
  1952. memset(&map, 0, sizeof(map));
  1953. map.type = alloc.type;
  1954. map.region = alloc.region;
  1955. map.dmabuf = alloc.dmabuf;
  1956. rc = msm_vidc_memory_map(core, &map);
  1957. if (rc) {
  1958. d_vpr_e("%s: alloc failed\n", __func__);
  1959. goto fail_alloc_queue;
  1960. }
  1961. core->iface_q_table.align_virtual_addr = alloc.kvaddr;
  1962. core->iface_q_table.align_device_addr = map.device_addr;
  1963. core->iface_q_table.mem_size = VIDC_IFACEQ_TABLE_SIZE;
  1964. core->iface_q_table.alloc = alloc;
  1965. core->iface_q_table.map = map;
  1966. offset += core->iface_q_table.mem_size;
  1967. for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
  1968. iface_q = &core->iface_queues[i];
  1969. iface_q->q_array.align_device_addr = map.device_addr + offset;
  1970. iface_q->q_array.align_virtual_addr = (void*)((char*)alloc.kvaddr + offset);
  1971. iface_q->q_array.mem_size = VIDC_IFACEQ_QUEUE_SIZE;
  1972. offset += iface_q->q_array.mem_size;
  1973. iface_q->q_hdr = VIDC_IFACEQ_GET_QHDR_START_ADDR(
  1974. core->iface_q_table.align_virtual_addr, i);
  1975. __set_queue_hdr_defaults(iface_q->q_hdr);
  1976. }
  1977. q_tbl_hdr = (struct hfi_queue_table_header *)
  1978. core->iface_q_table.align_virtual_addr;
  1979. q_tbl_hdr->qtbl_version = 0;
  1980. q_tbl_hdr->device_addr = (void *)core;
  1981. strlcpy(q_tbl_hdr->name, "msm_v4l2_vidc", sizeof(q_tbl_hdr->name));
  1982. q_tbl_hdr->qtbl_size = VIDC_IFACEQ_TABLE_SIZE;
  1983. q_tbl_hdr->qtbl_qhdr0_offset = sizeof(struct hfi_queue_table_header);
  1984. q_tbl_hdr->qtbl_qhdr_size = sizeof(struct hfi_queue_header);
  1985. q_tbl_hdr->qtbl_num_q = VIDC_IFACEQ_NUMQ;
  1986. q_tbl_hdr->qtbl_num_active_q = VIDC_IFACEQ_NUMQ;
  1987. iface_q = &core->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
  1988. q_hdr = iface_q->q_hdr;
  1989. q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
  1990. q_hdr->qhdr_type |= HFI_Q_ID_HOST_TO_CTRL_CMD_Q;
  1991. iface_q = &core->iface_queues[VIDC_IFACEQ_MSGQ_IDX];
  1992. q_hdr = iface_q->q_hdr;
  1993. q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
  1994. q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_MSG_Q;
  1995. iface_q = &core->iface_queues[VIDC_IFACEQ_DBGQ_IDX];
  1996. q_hdr = iface_q->q_hdr;
  1997. q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
  1998. q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q;
  1999. /*
  2000. * Set receive request to zero on debug queue as there is no
  2001. * need of interrupt from video hardware for debug messages
  2002. */
  2003. q_hdr->qhdr_rx_req = 0;
  2004. /* sfr buffer */
  2005. memset(&alloc, 0, sizeof(alloc));
  2006. alloc.type = MSM_VIDC_BUF_QUEUE;
  2007. alloc.region = MSM_VIDC_NON_SECURE;
  2008. alloc.size = ALIGNED_SFR_SIZE;
  2009. alloc.secure = false;
  2010. alloc.map_kernel = true;
  2011. rc = msm_vidc_memory_alloc(core, &alloc);
  2012. if (rc) {
  2013. d_vpr_e("%s: sfr alloc failed\n", __func__);
  2014. goto fail_alloc_queue;
  2015. }
  2016. memset(&map, 0, sizeof(map));
  2017. map.type = alloc.type;
  2018. map.region = alloc.region;
  2019. map.dmabuf = alloc.dmabuf;
  2020. rc = msm_vidc_memory_map(core, &map);
  2021. if (rc) {
  2022. d_vpr_e("%s: sfr map failed\n", __func__);
  2023. goto fail_alloc_queue;
  2024. }
  2025. core->sfr.align_device_addr = map.device_addr;
  2026. core->sfr.align_virtual_addr = alloc.kvaddr;
  2027. core->sfr.mem_size = ALIGNED_SFR_SIZE;
  2028. core->sfr.alloc = alloc;
  2029. core->sfr.map = map;
  2030. /* write sfr buffer size in first word */
  2031. *((u32 *)core->sfr.align_virtual_addr) = ALIGNED_SFR_SIZE;
  2032. rc = call_venus_op(core, setup_ucregion_memmap, core);
  2033. if (rc)
  2034. return rc;
  2035. return 0;
  2036. fail_alloc_queue:
  2037. return -ENOMEM;
  2038. }
  2039. static int __load_fw_to_memory(struct platform_device *pdev,
  2040. const char *fw_name)
  2041. {
  2042. int rc = 0;
  2043. const struct firmware *firmware = NULL;
  2044. char firmware_name[MAX_FIRMWARE_NAME_SIZE] = { 0 };
  2045. struct device_node *node = NULL;
  2046. struct resource res = { 0 };
  2047. phys_addr_t phys = 0;
  2048. size_t res_size = 0;
  2049. ssize_t fw_size = 0;
  2050. void *virt = NULL;
  2051. int pas_id = 0;
  2052. if (!fw_name || !(*fw_name) || !pdev) {
  2053. d_vpr_e("%s: Invalid inputs\n", __func__);
  2054. return -EINVAL;
  2055. }
  2056. if (strlen(fw_name) >= MAX_FIRMWARE_NAME_SIZE - 4) {
  2057. d_vpr_e("%s: Invalid fw name\n", __func__);
  2058. return -EINVAL;
  2059. }
  2060. scnprintf(firmware_name, ARRAY_SIZE(firmware_name), "%s.mdt", fw_name);
  2061. rc = of_property_read_u32(pdev->dev.of_node, "pas-id", &pas_id);
  2062. if (rc) {
  2063. d_vpr_e("%s: failed to read \"pas-id\". error %d\n",
  2064. __func__, rc);
  2065. goto exit;
  2066. }
  2067. node = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
  2068. if (!node) {
  2069. d_vpr_e("%s: failed to read \"memory-region\"\n",
  2070. __func__);
  2071. return -EINVAL;
  2072. }
  2073. rc = of_address_to_resource(node, 0, &res);
  2074. if (rc) {
  2075. d_vpr_e("%s: failed to read \"memory-region\", error %d\n",
  2076. __func__, rc);
  2077. goto exit;
  2078. }
  2079. phys = res.start;
  2080. res_size = (size_t)resource_size(&res);
  2081. rc = request_firmware(&firmware, firmware_name, &pdev->dev);
  2082. if (rc) {
  2083. d_vpr_e("%s: failed to request fw \"%s\", error %d\n",
  2084. __func__, rc, firmware_name);
  2085. goto exit;
  2086. }
  2087. fw_size = qcom_mdt_get_size(firmware);
  2088. if (fw_size < 0 || res_size < (size_t)fw_size) {
  2089. rc = -EINVAL;
  2090. d_vpr_e("%s: out of bound fw image fw size: %ld, res_size: %lu",
  2091. __func__, fw_size, res_size);
  2092. goto exit;
  2093. }
  2094. virt = memremap(phys, res_size, MEMREMAP_WC);
  2095. if (!virt) {
  2096. d_vpr_e("%s: failed to remap fw memory phys %pa[p]\n",
  2097. __func__, phys);
  2098. return -ENOMEM;
  2099. }
  2100. rc = qcom_mdt_load(&pdev->dev, firmware, firmware_name,
  2101. pas_id, virt, phys, res_size, NULL);
  2102. if (rc) {
  2103. d_vpr_e("%s: error %d loading fw \"%s\"\n",
  2104. __func__, rc, firmware_name);
  2105. goto exit;
  2106. }
  2107. rc = qcom_scm_pas_auth_and_reset(pas_id);
  2108. if (rc) {
  2109. d_vpr_e("%s: error %d authenticating fw \"%s\"\n",
  2110. __func__, rc, firmware_name);
  2111. goto exit;
  2112. }
  2113. memunmap(virt);
  2114. release_firmware(firmware);
  2115. d_vpr_h("%s: firmware \"%s\" loaded successfully\n",
  2116. __func__, firmware_name);
  2117. return pas_id;
  2118. exit:
  2119. if (virt)
  2120. memunmap(virt);
  2121. if (firmware)
  2122. release_firmware(firmware);
  2123. return rc;
  2124. }
  2125. int __load_fw(struct msm_vidc_core *core)
  2126. {
  2127. int rc = 0;
  2128. rc = __init_resources(core);
  2129. if (rc) {
  2130. d_vpr_e("%s: Failed to init resources: %d\n", __func__, rc);
  2131. goto fail_init_res;
  2132. }
  2133. rc = __venus_power_on(core);
  2134. if (rc) {
  2135. d_vpr_e("%s: power on failed\n", __func__);
  2136. goto fail_venus_power_on;
  2137. }
  2138. if (!core->dt->fw_cookie) {
  2139. core->dt->fw_cookie = __load_fw_to_memory(core->pdev,
  2140. core->dt->fw_name);
  2141. if (core->dt->fw_cookie <= 0) {
  2142. d_vpr_e("%s: firmware download failed %d\n",
  2143. __func__, core->dt->fw_cookie);
  2144. core->dt->fw_cookie = 0;
  2145. rc = -ENOMEM;
  2146. goto fail_load_fw;
  2147. }
  2148. }
  2149. rc = __protect_cp_mem(core);
  2150. if (rc) {
  2151. d_vpr_e("%s: protect memory failed\n", __func__);
  2152. goto fail_protect_mem;
  2153. }
  2154. /*
  2155. * Hand off control of regulators to h/w _after_ loading fw.
  2156. * Note that the GDSC will turn off when switching from normal
  2157. * (s/w triggered) to fast (HW triggered) unless the h/w vote is
  2158. * present.
  2159. */
  2160. __enable_hw_power_collapse(core);
  2161. return rc;
  2162. fail_protect_mem:
  2163. if (core->dt->fw_cookie)
  2164. qcom_scm_pas_shutdown(core->dt->fw_cookie);
  2165. core->dt->fw_cookie = 0;
  2166. fail_load_fw:
  2167. call_venus_op(core, power_off, core);
  2168. fail_venus_power_on:
  2169. __deinit_resources(core);
  2170. fail_init_res:
  2171. return rc;
  2172. }
  2173. void __unload_fw(struct msm_vidc_core *core)
  2174. {
  2175. int rc = 0;
  2176. if (!core->dt->fw_cookie)
  2177. return;
  2178. cancel_delayed_work(&core->pm_work);
  2179. if (core->state != MSM_VIDC_CORE_DEINIT)
  2180. flush_workqueue(core->pm_workq);
  2181. rc = qcom_scm_pas_shutdown(core->dt->fw_cookie);
  2182. if (rc)
  2183. d_vpr_e("Firmware unload failed rc=%d\n", rc);
  2184. core->dt->fw_cookie = 0;
  2185. __venus_power_off(core);
  2186. __deinit_resources(core);
  2187. d_vpr_h("%s done\n", __func__);
  2188. }
  2189. static int __response_handler(struct msm_vidc_core *core)
  2190. {
  2191. int rc = 0;
  2192. if (call_venus_op(core, watchdog, core, core->intr_status))
  2193. return handle_system_error(core, NULL);
  2194. memset(core->response_packet, 0, core->packet_size);
  2195. while (!__iface_msgq_read(core, core->response_packet)) {
  2196. rc = handle_response(core, core->response_packet);
  2197. if (rc)
  2198. continue;
  2199. /* check for system error */
  2200. if (core->state != MSM_VIDC_CORE_INIT)
  2201. break;
  2202. memset(core->response_packet, 0, core->packet_size);
  2203. }
  2204. __schedule_power_collapse_work(core);
  2205. __flush_debug_queue(core, core->response_packet, core->packet_size);
  2206. return rc;
  2207. }
  2208. irqreturn_t venus_hfi_isr(int irq, void *data)
  2209. {
  2210. struct msm_vidc_core *core = data;
  2211. disable_irq_nosync(irq);
  2212. queue_work(core->device_workq, &core->device_work);
  2213. return IRQ_HANDLED;
  2214. }
  2215. void venus_hfi_work_handler(struct work_struct *work)
  2216. {
  2217. struct msm_vidc_core *core;
  2218. int num_responses = 0;
  2219. d_vpr_l("%s()\n", __func__);
  2220. core = container_of(work, struct msm_vidc_core, device_work);
  2221. if (!core) {
  2222. d_vpr_e("%s: invalid params\n", __func__);
  2223. return;
  2224. }
  2225. core_lock(core, __func__);
  2226. if (__resume(core)) {
  2227. d_vpr_e("%s: Power on failed\n", __func__);
  2228. core_unlock(core, __func__);
  2229. goto err_no_work;
  2230. }
  2231. call_venus_op(core, clear_interrupt, core);
  2232. core_unlock(core, __func__);
  2233. num_responses = __response_handler(core);
  2234. err_no_work:
  2235. if (!call_venus_op(core, watchdog, core, core->intr_status))
  2236. enable_irq(core->dt->irq);
  2237. }
  2238. void venus_hfi_pm_work_handler(struct work_struct *work)
  2239. {
  2240. int rc = 0;
  2241. struct msm_vidc_core *core;
  2242. core = container_of(work, struct msm_vidc_core, pm_work.work);
  2243. if (!core) {
  2244. d_vpr_e("%s: invalid params\n", __func__);
  2245. return;
  2246. }
  2247. /*
  2248. * It is ok to check this variable outside the lock since
  2249. * it is being updated in this context only
  2250. */
  2251. if (core->skip_pc_count >= VIDC_MAX_PC_SKIP_COUNT) {
  2252. d_vpr_e("Failed to PC for %d times\n",
  2253. core->skip_pc_count);
  2254. core->skip_pc_count = 0;
  2255. //__process_fatal_error(core);
  2256. return;
  2257. }
  2258. core_lock(core, __func__);
  2259. rc = __power_collapse(core, false);
  2260. switch (rc) {
  2261. case 0:
  2262. core->skip_pc_count = 0;
  2263. /* Cancel pending delayed works if any */
  2264. __cancel_power_collapse_work(core);
  2265. d_vpr_h("%s: power collapse successful!\n", __func__);
  2266. break;
  2267. case -EBUSY:
  2268. core->skip_pc_count = 0;
  2269. d_vpr_h("%s: retry PC as dsp is busy\n", __func__);
  2270. __schedule_power_collapse_work(core);
  2271. break;
  2272. case -EAGAIN:
  2273. core->skip_pc_count++;
  2274. d_vpr_e("%s: retry power collapse (count %d)\n",
  2275. __func__, core->skip_pc_count);
  2276. __schedule_power_collapse_work(core);
  2277. break;
  2278. default:
  2279. d_vpr_e("%s: power collapse failed\n", __func__);
  2280. break;
  2281. }
  2282. core_unlock(core, __func__);
  2283. }
  2284. static int __sys_init(struct msm_vidc_core *core)
  2285. {
  2286. int rc = 0;
  2287. rc = hfi_packet_sys_init(core, core->packet, core->packet_size);
  2288. if (rc)
  2289. return rc;
  2290. rc = __iface_cmdq_write(core, core->packet);
  2291. if (rc)
  2292. return rc;
  2293. return 0;
  2294. }
  2295. static int __sys_image_version(struct msm_vidc_core *core)
  2296. {
  2297. int rc = 0;
  2298. rc = hfi_packet_image_version(core, core->packet, core->packet_size);
  2299. if (rc)
  2300. return rc;
  2301. rc = __iface_cmdq_write(core, core->packet);
  2302. if (rc)
  2303. return rc;
  2304. return 0;
  2305. }
  2306. int venus_hfi_core_init(struct msm_vidc_core *core)
  2307. {
  2308. int rc = 0;
  2309. if (!core) {
  2310. d_vpr_e("%s: invalid params\n", __func__);
  2311. return -EINVAL;
  2312. }
  2313. d_vpr_h("%s(): core %pK\n", __func__, core);
  2314. __strict_check(core);
  2315. core->handoff_done = 0;
  2316. rc = __load_fw(core);
  2317. if (rc)
  2318. goto error;
  2319. rc = __interface_queues_init(core);
  2320. if (rc)
  2321. goto error;
  2322. rc = call_venus_op(core, boot_firmware, core);
  2323. if (rc)
  2324. goto error;
  2325. rc = __enable_subcaches(core);
  2326. if (rc)
  2327. goto error;
  2328. rc = __sys_init(core);
  2329. if (rc)
  2330. goto error;
  2331. rc = __sys_image_version(core);
  2332. if (rc)
  2333. goto error;
  2334. rc = __sys_set_debug(core, (msm_vidc_debug & FW_LOGMASK) >> FW_LOGSHIFT);
  2335. if (rc)
  2336. goto error;
  2337. rc = __set_subcaches(core);
  2338. if (rc)
  2339. goto error;
  2340. __sys_set_power_control(core, true);
  2341. d_vpr_h("%s(): successful\n", __func__);
  2342. return 0;
  2343. error:
  2344. d_vpr_e("%s(): failed\n", __func__);
  2345. return rc;
  2346. }
  2347. int venus_hfi_core_deinit(struct msm_vidc_core *core)
  2348. {
  2349. if (!core) {
  2350. d_vpr_h("%s(): invalid params\n", __func__);
  2351. return -EINVAL;
  2352. }
  2353. d_vpr_h("%s(): core %pK\n", __func__, core);
  2354. __strict_check(core);
  2355. if (core->state == MSM_VIDC_CORE_DEINIT)
  2356. return 0;
  2357. __resume(core);
  2358. __flush_debug_queue(core, core->packet, core->packet_size);
  2359. __disable_subcaches(core);
  2360. __interface_queues_deinit(core);
  2361. __unload_fw(core);
  2362. return 0;
  2363. }
  2364. int venus_hfi_noc_error_info(struct msm_vidc_core *core)
  2365. {
  2366. if (!core || !core->capabilities) {
  2367. d_vpr_e("%s: Invalid parameters: %pK\n",
  2368. __func__, core);
  2369. return -EINVAL;
  2370. }
  2371. if (!core->capabilities[NON_FATAL_FAULTS].value)
  2372. return 0;
  2373. core_lock(core, __func__);
  2374. if (core->state == MSM_VIDC_CORE_DEINIT)
  2375. goto unlock;
  2376. /* resume venus before accessing noc registers */
  2377. if (__resume(core)) {
  2378. d_vpr_e("%s: Power on failed\n", __func__);
  2379. goto unlock;
  2380. }
  2381. call_venus_op(core, noc_error_info, core);
  2382. unlock:
  2383. core_unlock(core, __func__);
  2384. return 0;
  2385. }
  2386. int venus_hfi_suspend(struct msm_vidc_core *core)
  2387. {
  2388. int rc = 0;
  2389. if (!core) {
  2390. d_vpr_e("%s: invalid params\n", __func__);
  2391. return -EINVAL;
  2392. }
  2393. core_lock(core, __func__);
  2394. d_vpr_h("Suspending Venus\n");
  2395. rc = __power_collapse(core, true);
  2396. if (!rc) {
  2397. /* Cancel pending delayed works if any */
  2398. __cancel_power_collapse_work(core);
  2399. } else {
  2400. d_vpr_e("%s: Venus is busy\n", __func__);
  2401. rc = -EBUSY;
  2402. }
  2403. core_unlock(core, __func__);
  2404. return rc;
  2405. }
  2406. int venus_hfi_trigger_ssr(struct msm_vidc_core *core, u32 type,
  2407. u32 client_id, u32 addr)
  2408. {
  2409. int rc = 0;
  2410. u32 payload[2];
  2411. if (!core || !core->packet) {
  2412. d_vpr_e("%s: Invalid params\n", __func__);
  2413. return -EINVAL;
  2414. }
  2415. payload[0] = client_id << 4 | type;
  2416. payload[1] = addr;
  2417. rc = hfi_create_header(core->packet, core->packet_size,
  2418. 0 /*session_id*/,
  2419. core->header_id++);
  2420. if (rc)
  2421. goto err_ssr_pkt;
  2422. /* HFI_CMD_SSR */
  2423. rc = hfi_create_packet(core->packet, core->packet_size,
  2424. HFI_CMD_SSR,
  2425. HFI_HOST_FLAGS_RESPONSE_REQUIRED |
  2426. HFI_HOST_FLAGS_INTR_REQUIRED,
  2427. HFI_PAYLOAD_U64,
  2428. HFI_PORT_NONE,
  2429. core->packet_id++,
  2430. &payload, sizeof(u64));
  2431. if (rc)
  2432. goto err_ssr_pkt;
  2433. rc = __iface_cmdq_write(core, core->packet);
  2434. if (rc)
  2435. return rc;
  2436. return 0;
  2437. err_ssr_pkt:
  2438. d_vpr_e("%s: create packet failed\n", __func__);
  2439. return rc;
  2440. }
  2441. int venus_hfi_session_open(struct msm_vidc_inst *inst)
  2442. {
  2443. int rc = 0;
  2444. struct msm_vidc_core *core;
  2445. if (!inst || !inst->core || !inst->packet) {
  2446. d_vpr_e("%s: invalid params\n", __func__);
  2447. return -EINVAL;
  2448. }
  2449. core = inst->core;
  2450. core_lock(core, __func__);
  2451. if (!__valdiate_session(core, inst, __func__)) {
  2452. rc = -EINVAL;
  2453. goto unlock;
  2454. }
  2455. __sys_set_debug(core,
  2456. (msm_vidc_debug & FW_LOGMASK) >> FW_LOGSHIFT);
  2457. rc = hfi_packet_session_command(inst,
  2458. HFI_CMD_OPEN,
  2459. (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
  2460. HFI_HOST_FLAGS_INTR_REQUIRED),
  2461. HFI_PORT_NONE,
  2462. 0, /* session_id */
  2463. HFI_PAYLOAD_U32,
  2464. &inst->session_id, /* payload */
  2465. sizeof(u32));
  2466. if (rc)
  2467. goto unlock;
  2468. rc = __iface_cmdq_write(inst->core, inst->packet);
  2469. if (rc)
  2470. goto unlock;
  2471. unlock:
  2472. core_unlock(core, __func__);
  2473. return 0;
  2474. }
  2475. int venus_hfi_session_set_codec(struct msm_vidc_inst *inst)
  2476. {
  2477. int rc = 0;
  2478. struct msm_vidc_core *core;
  2479. u32 codec;
  2480. if (!inst || !inst->core || !inst->packet) {
  2481. d_vpr_e("%s: invalid params\n", __func__);
  2482. return -EINVAL;
  2483. }
  2484. core = inst->core;
  2485. core_lock(core, __func__);
  2486. if (!__valdiate_session(core, inst, __func__)) {
  2487. rc = -EINVAL;
  2488. goto unlock;
  2489. }
  2490. rc = hfi_create_header(inst->packet, inst->packet_size,
  2491. inst->session_id, core->header_id++);
  2492. if (rc)
  2493. goto unlock;
  2494. codec = get_hfi_codec(inst);
  2495. rc = hfi_create_packet(inst->packet, inst->packet_size,
  2496. HFI_PROP_CODEC,
  2497. HFI_HOST_FLAGS_NONE,
  2498. HFI_PAYLOAD_U32_ENUM,
  2499. HFI_PORT_NONE,
  2500. core->packet_id++,
  2501. &codec,
  2502. sizeof(u32));
  2503. if (rc)
  2504. goto unlock;
  2505. rc = __iface_cmdq_write(inst->core, inst->packet);
  2506. if (rc)
  2507. goto unlock;
  2508. unlock:
  2509. core_unlock(core, __func__);
  2510. return rc;
  2511. }
  2512. int venus_hfi_session_property(struct msm_vidc_inst *inst,
  2513. u32 pkt_type, u32 flags, u32 port, u32 payload_type,
  2514. void *payload, u32 payload_size)
  2515. {
  2516. int rc = 0;
  2517. struct msm_vidc_core *core;
  2518. if (!inst || !inst->core || !inst->packet) {
  2519. d_vpr_e("%s: invalid params\n", __func__);
  2520. return -EINVAL;
  2521. }
  2522. core = inst->core;
  2523. core_lock(core, __func__);
  2524. if (!__valdiate_session(core, inst, __func__)) {
  2525. rc = -EINVAL;
  2526. goto unlock;
  2527. }
  2528. rc = hfi_create_header(inst->packet, inst->packet_size,
  2529. inst->session_id, core->header_id++);
  2530. if (rc)
  2531. goto unlock;
  2532. rc = hfi_create_packet(inst->packet, inst->packet_size,
  2533. pkt_type,
  2534. flags,
  2535. payload_type,
  2536. port,
  2537. core->packet_id++,
  2538. payload,
  2539. payload_size);
  2540. if (rc)
  2541. goto unlock;
  2542. rc = __iface_cmdq_write(inst->core, inst->packet);
  2543. if (rc)
  2544. goto unlock;
  2545. unlock:
  2546. core_unlock(core, __func__);
  2547. return rc;
  2548. }
  2549. int venus_hfi_session_close(struct msm_vidc_inst *inst)
  2550. {
  2551. int rc = 0;
  2552. struct msm_vidc_core* core;
  2553. if (!inst || !inst->packet) {
  2554. d_vpr_e("%s: invalid params\n", __func__);
  2555. return -EINVAL;
  2556. }
  2557. core = inst->core;
  2558. __strict_check(core);
  2559. if (!__valdiate_session(core, inst, __func__))
  2560. return -EINVAL;
  2561. rc = hfi_packet_session_command(inst,
  2562. HFI_CMD_CLOSE,
  2563. (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
  2564. HFI_HOST_FLAGS_INTR_REQUIRED |
  2565. HFI_HOST_FLAGS_NON_DISCARDABLE),
  2566. HFI_PORT_NONE,
  2567. inst->session_id,
  2568. HFI_PAYLOAD_NONE,
  2569. NULL,
  2570. 0);
  2571. if (!rc)
  2572. rc = __iface_cmdq_write(inst->core, inst->packet);
  2573. return rc;
  2574. }
  2575. int venus_hfi_start(struct msm_vidc_inst *inst, enum msm_vidc_port_type port)
  2576. {
  2577. int rc = 0;
  2578. struct msm_vidc_core* core;
  2579. if (!inst || !inst->core || !inst->packet) {
  2580. d_vpr_e("%s: invalid params\n", __func__);
  2581. return -EINVAL;
  2582. }
  2583. core = inst->core;
  2584. core_lock(core, __func__);
  2585. if (!__valdiate_session(core, inst, __func__)) {
  2586. rc = -EINVAL;
  2587. goto unlock;
  2588. }
  2589. if (port != INPUT_PORT && port != OUTPUT_PORT) {
  2590. i_vpr_e(inst, "%s: invalid port %d\n", __func__, port);
  2591. goto unlock;
  2592. }
  2593. rc = hfi_packet_session_command(inst,
  2594. HFI_CMD_START,
  2595. (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
  2596. HFI_HOST_FLAGS_INTR_REQUIRED),
  2597. get_hfi_port(inst, port),
  2598. inst->session_id,
  2599. HFI_PAYLOAD_NONE,
  2600. NULL,
  2601. 0);
  2602. if (rc)
  2603. goto unlock;
  2604. rc = __iface_cmdq_write(inst->core, inst->packet);
  2605. if (rc)
  2606. goto unlock;
  2607. unlock:
  2608. core_unlock(core, __func__);
  2609. return rc;
  2610. }
  2611. int venus_hfi_stop(struct msm_vidc_inst *inst, enum msm_vidc_port_type port)
  2612. {
  2613. int rc = 0;
  2614. struct msm_vidc_core* core;
  2615. if (!inst || !inst->core || !inst->packet) {
  2616. d_vpr_e("%s: invalid params\n", __func__);
  2617. return -EINVAL;
  2618. }
  2619. core = inst->core;
  2620. core_lock(core, __func__);
  2621. if (!__valdiate_session(core, inst, __func__)) {
  2622. rc = -EINVAL;
  2623. goto unlock;
  2624. }
  2625. if (port != INPUT_PORT && port != OUTPUT_PORT) {
  2626. i_vpr_e(inst, "%s: invalid port %d\n", __func__, port);
  2627. goto unlock;
  2628. }
  2629. rc = hfi_packet_session_command(inst,
  2630. HFI_CMD_STOP,
  2631. (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
  2632. HFI_HOST_FLAGS_INTR_REQUIRED |
  2633. HFI_HOST_FLAGS_NON_DISCARDABLE),
  2634. get_hfi_port(inst, port),
  2635. inst->session_id,
  2636. HFI_PAYLOAD_NONE,
  2637. NULL,
  2638. 0);
  2639. if (rc)
  2640. goto unlock;
  2641. rc = __iface_cmdq_write(inst->core, inst->packet);
  2642. if (rc)
  2643. goto unlock;
  2644. unlock:
  2645. core_unlock(core, __func__);
  2646. return rc;
  2647. }
  2648. int venus_hfi_session_command(struct msm_vidc_inst *inst,
  2649. u32 cmd, enum msm_vidc_port_type port, u32 payload_type,
  2650. void *payload, u32 payload_size)
  2651. {
  2652. int rc = 0;
  2653. struct msm_vidc_core *core;
  2654. if (!inst || !inst->core || !inst->packet) {
  2655. d_vpr_e("%s: invalid params\n", __func__);
  2656. return -EINVAL;
  2657. }
  2658. core = inst->core;
  2659. core_lock(core, __func__);
  2660. if (!__valdiate_session(core, inst, __func__)) {
  2661. rc = -EINVAL;
  2662. goto unlock;
  2663. }
  2664. rc = hfi_create_header(inst->packet, inst->packet_size,
  2665. inst->session_id,
  2666. core->header_id++);
  2667. if (rc)
  2668. goto unlock;
  2669. rc = hfi_create_packet(inst->packet, inst->packet_size,
  2670. cmd,
  2671. (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
  2672. HFI_HOST_FLAGS_INTR_REQUIRED),
  2673. payload_type,
  2674. get_hfi_port(inst, port),
  2675. core->packet_id++,
  2676. payload,
  2677. payload_size);
  2678. if (rc)
  2679. goto unlock;
  2680. rc = __iface_cmdq_write(inst->core, inst->packet);
  2681. if (rc)
  2682. goto unlock;
  2683. unlock:
  2684. core_unlock(core, __func__);
  2685. return rc;
  2686. }
  2687. int venus_hfi_queue_super_buffer(struct msm_vidc_inst *inst,
  2688. struct msm_vidc_buffer *buffer, struct msm_vidc_buffer *metabuf)
  2689. {
  2690. int rc = 0;
  2691. struct msm_vidc_core *core;
  2692. struct hfi_buffer hfi_buffer;
  2693. struct hfi_buffer hfi_meta_buffer;
  2694. struct msm_vidc_inst_capability *capability;
  2695. u32 frame_size, meta_size, batch_size, cnt = 0;
  2696. u64 ts_delta_us;
  2697. if (!inst || !inst->core || !inst->capabilities || !inst->packet) {
  2698. d_vpr_e("%s: invalid params\n", __func__);
  2699. return -EINVAL;
  2700. }
  2701. core = inst->core;
  2702. capability = inst->capabilities;
  2703. core_lock(core, __func__);
  2704. if (!__valdiate_session(core, inst, __func__)) {
  2705. rc = -EINVAL;
  2706. goto unlock;
  2707. }
  2708. /* Get super yuv buffer */
  2709. rc = get_hfi_buffer(inst, buffer, &hfi_buffer);
  2710. if (rc)
  2711. goto unlock;
  2712. /* Get super meta buffer */
  2713. if (metabuf) {
  2714. rc = get_hfi_buffer(inst, metabuf, &hfi_meta_buffer);
  2715. if (rc)
  2716. goto unlock;
  2717. }
  2718. batch_size = capability->cap[SUPER_FRAME].value;
  2719. frame_size = call_session_op(core, buffer_size, inst, MSM_VIDC_BUF_INPUT);
  2720. meta_size = call_session_op(core, buffer_size, inst, MSM_VIDC_BUF_INPUT_META);
  2721. ts_delta_us = 1000000 / (capability->cap[FRAME_RATE].value >> 16);
  2722. /* Sanitize super yuv buffer */
  2723. if (frame_size * batch_size != buffer->buffer_size) {
  2724. i_vpr_e(inst, "%s: invalid super yuv buffer. frame %u, batch %u, buffer size %u\n",
  2725. __func__, frame_size, batch_size, buffer->buffer_size);
  2726. goto unlock;
  2727. }
  2728. /* Sanitize super meta buffer */
  2729. if (metabuf && meta_size * batch_size != metabuf->buffer_size) {
  2730. i_vpr_e(inst, "%s: invalid super meta buffer. meta %u, batch %u, buffer size %u\n",
  2731. __func__, meta_size, batch_size, metabuf->buffer_size);
  2732. goto unlock;
  2733. }
  2734. /* Initialize yuv buffer */
  2735. hfi_buffer.data_size = frame_size;
  2736. hfi_buffer.addr_offset = 0;
  2737. /* Initialize meta buffer */
  2738. if (metabuf) {
  2739. hfi_meta_buffer.data_size = meta_size;
  2740. hfi_meta_buffer.addr_offset = 0;
  2741. }
  2742. while (cnt < batch_size) {
  2743. /* Create header */
  2744. rc = hfi_create_header(inst->packet, inst->packet_size,
  2745. inst->session_id, core->header_id++);
  2746. if (rc)
  2747. goto unlock;
  2748. /* Create yuv packet */
  2749. update_offset(hfi_buffer.addr_offset, (cnt ? frame_size : 0u));
  2750. update_timestamp(hfi_buffer.timestamp, (cnt ? ts_delta_us : 0u));
  2751. rc = hfi_create_packet(inst->packet,
  2752. inst->packet_size,
  2753. HFI_CMD_BUFFER,
  2754. HFI_HOST_FLAGS_INTR_REQUIRED,
  2755. HFI_PAYLOAD_STRUCTURE,
  2756. get_hfi_port_from_buffer_type(inst, buffer->type),
  2757. core->packet_id++,
  2758. &hfi_buffer,
  2759. sizeof(hfi_buffer));
  2760. if (rc)
  2761. goto unlock;
  2762. /* Create meta packet */
  2763. if (metabuf) {
  2764. update_offset(hfi_meta_buffer.addr_offset, (cnt ? meta_size : 0u));
  2765. update_timestamp(hfi_meta_buffer.timestamp, (cnt ? ts_delta_us : 0u));
  2766. rc = hfi_create_packet(inst->packet,
  2767. inst->packet_size,
  2768. HFI_CMD_BUFFER,
  2769. HFI_HOST_FLAGS_INTR_REQUIRED,
  2770. HFI_PAYLOAD_STRUCTURE,
  2771. get_hfi_port_from_buffer_type(inst, metabuf->type),
  2772. core->packet_id++,
  2773. &hfi_meta_buffer,
  2774. sizeof(hfi_meta_buffer));
  2775. if (rc)
  2776. goto unlock;
  2777. }
  2778. /* Raise interrupt only for last pkt in the batch */
  2779. rc = __iface_cmdq_write_intr(inst->core, inst->packet, (cnt == batch_size - 1));
  2780. if (rc)
  2781. goto unlock;
  2782. cnt++;
  2783. }
  2784. unlock:
  2785. core_unlock(core, __func__);
  2786. if (rc)
  2787. i_vpr_e(inst, "%s: queue super buffer failed: %d\n", __func__, rc);
  2788. return rc;
  2789. }
  2790. int venus_hfi_queue_buffer(struct msm_vidc_inst *inst,
  2791. struct msm_vidc_buffer *buffer, struct msm_vidc_buffer *metabuf)
  2792. {
  2793. int rc = 0;
  2794. struct msm_vidc_core *core;
  2795. struct hfi_buffer hfi_buffer;
  2796. if (!inst || !inst->core || !inst->packet) {
  2797. d_vpr_e("%s: invalid params\n", __func__);
  2798. return -EINVAL;
  2799. }
  2800. core = inst->core;
  2801. core_lock(core, __func__);
  2802. if (!__valdiate_session(core, inst, __func__)) {
  2803. rc = -EINVAL;
  2804. goto unlock;
  2805. }
  2806. rc = get_hfi_buffer(inst, buffer, &hfi_buffer);
  2807. if (rc)
  2808. goto unlock;
  2809. rc = hfi_create_header(inst->packet, inst->packet_size,
  2810. inst->session_id, core->header_id++);
  2811. if (rc)
  2812. goto unlock;
  2813. rc = hfi_create_packet(inst->packet,
  2814. inst->packet_size,
  2815. HFI_CMD_BUFFER,
  2816. HFI_HOST_FLAGS_INTR_REQUIRED,
  2817. HFI_PAYLOAD_STRUCTURE,
  2818. get_hfi_port_from_buffer_type(inst, buffer->type),
  2819. core->packet_id++,
  2820. &hfi_buffer,
  2821. sizeof(hfi_buffer));
  2822. if (rc)
  2823. goto unlock;
  2824. if (metabuf) {
  2825. rc = get_hfi_buffer(inst, metabuf, &hfi_buffer);
  2826. if (rc)
  2827. goto unlock;
  2828. rc = hfi_create_packet(inst->packet,
  2829. inst->packet_size,
  2830. HFI_CMD_BUFFER,
  2831. HFI_HOST_FLAGS_INTR_REQUIRED,
  2832. HFI_PAYLOAD_STRUCTURE,
  2833. get_hfi_port_from_buffer_type(inst, metabuf->type),
  2834. core->packet_id++,
  2835. &hfi_buffer,
  2836. sizeof(hfi_buffer));
  2837. if (rc)
  2838. goto unlock;
  2839. }
  2840. rc = __iface_cmdq_write(inst->core, inst->packet);
  2841. if (rc)
  2842. goto unlock;
  2843. unlock:
  2844. core_unlock(core, __func__);
  2845. return rc;
  2846. }
  2847. int venus_hfi_release_buffer(struct msm_vidc_inst *inst,
  2848. struct msm_vidc_buffer *buffer)
  2849. {
  2850. int rc = 0;
  2851. struct msm_vidc_core *core;
  2852. struct hfi_buffer hfi_buffer;
  2853. if (!inst || !inst->core || !inst->packet || !buffer) {
  2854. d_vpr_e("%s: invalid params\n", __func__);
  2855. return -EINVAL;
  2856. }
  2857. core = inst->core;
  2858. core_lock(core, __func__);
  2859. if (!__valdiate_session(core, inst, __func__)) {
  2860. rc = -EINVAL;
  2861. goto unlock;
  2862. }
  2863. rc = get_hfi_buffer(inst, buffer, &hfi_buffer);
  2864. if (rc)
  2865. goto unlock;
  2866. /* add release flag */
  2867. hfi_buffer.flags |= HFI_BUF_HOST_FLAG_RELEASE;
  2868. rc = hfi_create_header(inst->packet, inst->packet_size,
  2869. inst->session_id, core->header_id++);
  2870. if (rc)
  2871. goto unlock;
  2872. rc = hfi_create_packet(inst->packet,
  2873. inst->packet_size,
  2874. HFI_CMD_BUFFER,
  2875. (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
  2876. HFI_HOST_FLAGS_INTR_REQUIRED),
  2877. HFI_PAYLOAD_STRUCTURE,
  2878. get_hfi_port_from_buffer_type(inst, buffer->type),
  2879. core->packet_id++,
  2880. &hfi_buffer,
  2881. sizeof(hfi_buffer));
  2882. if (rc)
  2883. goto unlock;
  2884. rc = __iface_cmdq_write(inst->core, inst->packet);
  2885. if (rc)
  2886. goto unlock;
  2887. unlock:
  2888. core_unlock(core, __func__);
  2889. return rc;
  2890. }
  2891. int venus_hfi_scale_clocks(struct msm_vidc_inst* inst, u64 freq)
  2892. {
  2893. int rc = 0;
  2894. struct msm_vidc_core* core;
  2895. if (!inst || !inst->core) {
  2896. d_vpr_e("%s: invalid params\n", __func__);
  2897. return -EINVAL;
  2898. }
  2899. core = inst->core;
  2900. core_lock(core, __func__);
  2901. if (__resume(core)) {
  2902. i_vpr_e(inst, "Resume from power collapse failed\n");
  2903. rc = -EINVAL;
  2904. goto exit;
  2905. }
  2906. rc = __set_clocks(core, freq);
  2907. exit:
  2908. core_unlock(core, __func__);
  2909. return rc;
  2910. }
  2911. int venus_hfi_scale_buses(struct msm_vidc_inst *inst, u64 bw_ddr, u64 bw_llcc)
  2912. {
  2913. int rc = 0;
  2914. struct msm_vidc_core* core;
  2915. if (!inst || !inst->core) {
  2916. d_vpr_e("%s: invalid params\n", __func__);
  2917. return -EINVAL;
  2918. }
  2919. core = inst->core;
  2920. core_lock(core, __func__);
  2921. rc = __vote_buses(core, bw_ddr, bw_llcc);
  2922. core_unlock(core, __func__);
  2923. return rc;
  2924. }