venus_hfi.c 81 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/iommu.h>
  6. #include <linux/qcom_scm.h>
  7. #include <linux/soc/qcom/smem.h>
  8. #include <linux/irqreturn.h>
  9. #include <linux/reset.h>
  10. #include <linux/interconnect.h>
  11. #include <linux/of_address.h>
  12. #include <linux/firmware.h>
  13. #include <linux/qcom_scm.h>
  14. #include <linux/soc/qcom/mdt_loader.h>
  15. #include <linux/iopoll.h>
  16. #include "venus_hfi.h"
  17. #include "msm_vidc_core.h"
  18. #include "msm_vidc_power.h"
  19. #include "msm_vidc_platform.h"
  20. #include "msm_vidc_memory.h"
  21. #include "msm_vidc_driver.h"
  22. #include "msm_vidc_debug.h"
  23. #include "hfi_packet.h"
  24. #include "venus_hfi_response.h"
  25. #include "msm_vidc_events.h"
  26. #define MIN_PAYLOAD_SIZE 3
  27. #define MAX_FIRMWARE_NAME_SIZE 128
  28. #define update_offset(offset, val) ((offset) += (val))
  29. #define update_timestamp(ts, val) \
  30. do { \
  31. do_div((ts), NSEC_PER_USEC); \
  32. (ts) += (val); \
  33. (ts) *= NSEC_PER_USEC; \
  34. } while (0)
  35. extern struct msm_vidc_core *g_core;
  36. static int __resume(struct msm_vidc_core *core);
  37. static int __suspend(struct msm_vidc_core *core);
  38. struct tzbsp_memprot {
  39. u32 cp_start;
  40. u32 cp_size;
  41. u32 cp_nonpixel_start;
  42. u32 cp_nonpixel_size;
  43. };
  44. enum tzbsp_video_state {
  45. TZBSP_VIDEO_STATE_SUSPEND = 0,
  46. TZBSP_VIDEO_STATE_RESUME = 1,
  47. TZBSP_VIDEO_STATE_RESTORE_THRESHOLD = 2,
  48. };
  49. enum reset_state {
  50. INIT = 1,
  51. ASSERT,
  52. DEASSERT,
  53. };
  54. /* Less than 50MBps is treated as trivial BW change */
  55. #define TRIVIAL_BW_THRESHOLD 50000
  56. #define TRIVIAL_BW_CHANGE(a, b) \
  57. ((a) > (b) ? (a) - (b) < TRIVIAL_BW_THRESHOLD : \
  58. (b) - (a) < TRIVIAL_BW_THRESHOLD)
  59. /**
  60. * Utility function to enforce some of our assumptions. Spam calls to this
  61. * in hotspots in code to double check some of the assumptions that we hold.
  62. */
  63. struct lut const *__lut(int width, int height, int fps)
  64. {
  65. int frame_size = height * width, c = 0;
  66. do {
  67. if (LUT[c].frame_size >= frame_size && LUT[c].frame_rate >= fps)
  68. return &LUT[c];
  69. } while (++c < ARRAY_SIZE(LUT));
  70. return &LUT[ARRAY_SIZE(LUT) - 1];
  71. }
  72. fp_t __compression_ratio(struct lut const *entry, int bpp)
  73. {
  74. int c = 0;
  75. for (c = 0; c < COMPRESSION_RATIO_MAX; ++c) {
  76. if (entry->compression_ratio[c].bpp == bpp)
  77. return entry->compression_ratio[c].ratio;
  78. }
  79. WARN(true, "Shouldn't be here, LUT possibly corrupted?\n");
  80. return FP_ZERO; /* impossible */
  81. }
  82. void __dump(struct dump dump[], int len)
  83. {
  84. int c = 0;
  85. for (c = 0; c < len; ++c) {
  86. char format_line[128] = "", formatted_line[128] = "";
  87. if (dump[c].val == DUMP_HEADER_MAGIC) {
  88. snprintf(formatted_line, sizeof(formatted_line), "%s\n",
  89. dump[c].key);
  90. } else {
  91. bool fp_format = !strcmp(dump[c].format, DUMP_FP_FMT);
  92. if (!fp_format) {
  93. snprintf(format_line, sizeof(format_line),
  94. " %-35s: %s\n", dump[c].key,
  95. dump[c].format);
  96. snprintf(formatted_line, sizeof(formatted_line),
  97. format_line, dump[c].val);
  98. } else {
  99. size_t integer_part, fractional_part;
  100. integer_part = fp_int(dump[c].val);
  101. fractional_part = fp_frac(dump[c].val);
  102. snprintf(formatted_line, sizeof(formatted_line),
  103. " %-35s: %zd + %zd/%zd\n",
  104. dump[c].key, integer_part,
  105. fractional_part,
  106. fp_frac_base());
  107. }
  108. }
  109. d_vpr_b("%s", formatted_line);
  110. }
  111. }
  112. static void __dump_packet(u8 *packet, const char *function, void *qinfo)
  113. {
  114. u32 c = 0, session_id, packet_size = *(u32 *)packet;
  115. const int row_size = 32;
  116. struct msm_vidc_inst *inst = NULL;
  117. bool matches = false;
  118. /*
  119. * row must contain enough for 0xdeadbaad * 8 to be converted into
  120. * "de ad ba ab " * 8 + '\0'
  121. */
  122. char row[3 * 32];
  123. session_id = *((u32 *)packet + 1);
  124. list_for_each_entry(inst, &g_core->instances, list) {
  125. if (inst->session_id == session_id) {
  126. matches = true;
  127. break;
  128. }
  129. }
  130. if (matches)
  131. i_vpr_t(inst, "%s: %pK\n", function, qinfo);
  132. else
  133. d_vpr_t("%s: %pK\n", function, qinfo);
  134. for (c = 0; c * row_size < packet_size; ++c) {
  135. int bytes_to_read = ((c + 1) * row_size > packet_size) ?
  136. packet_size % row_size : row_size;
  137. hex_dump_to_buffer(packet + c * row_size, bytes_to_read,
  138. row_size, 4, row, sizeof(row), false);
  139. if (matches)
  140. i_vpr_t(inst, "%s\n", row);
  141. else
  142. d_vpr_t("%s\n", row);
  143. }
  144. }
  145. static void __fatal_error(bool fatal)
  146. {
  147. WARN_ON(fatal);
  148. }
  149. static int __strict_check(struct msm_vidc_core *core, const char *function)
  150. {
  151. bool fatal = !mutex_is_locked(&core->lock);
  152. __fatal_error(fatal);
  153. if (fatal)
  154. d_vpr_e("%s: strict check failed\n", function);
  155. return fatal ? -EINVAL : 0;
  156. }
  157. bool __core_in_valid_state(struct msm_vidc_core *core)
  158. {
  159. return core->state != MSM_VIDC_CORE_DEINIT;
  160. }
  161. bool is_sys_cache_present(struct msm_vidc_core *core)
  162. {
  163. return core->dt->sys_cache_present;
  164. }
  165. static bool __valdiate_session(struct msm_vidc_core *core,
  166. struct msm_vidc_inst *inst, const char *func)
  167. {
  168. bool valid = false;
  169. struct msm_vidc_inst *temp;
  170. int rc = 0;
  171. if (!core || !inst)
  172. return false;
  173. rc = __strict_check(core, __func__);
  174. if (rc)
  175. return false;
  176. list_for_each_entry(temp, &core->instances, list) {
  177. if (temp == inst) {
  178. valid = true;
  179. break;
  180. }
  181. }
  182. if (!valid)
  183. i_vpr_e(inst, "%s: invalid session\n", func);
  184. return valid;
  185. }
  186. int __write_register(struct msm_vidc_core *core,
  187. u32 reg, u32 value)
  188. {
  189. u32 hwiosymaddr = reg;
  190. u8 *base_addr;
  191. int rc = 0;
  192. if (!core) {
  193. d_vpr_e("%s: invalid params\n", __func__);
  194. return -EINVAL;
  195. }
  196. rc = __strict_check(core, __func__);
  197. if (rc)
  198. return rc;
  199. if (!core->power_enabled) {
  200. d_vpr_e("HFI Write register failed : Power is OFF\n");
  201. return -EINVAL;
  202. }
  203. base_addr = core->register_base_addr;
  204. d_vpr_l("regwrite(%pK + %#x) = %#x\n", base_addr, hwiosymaddr, value);
  205. base_addr += hwiosymaddr;
  206. writel_relaxed(value, base_addr);
  207. /*
  208. * Memory barrier to make sure value is written into the register.
  209. */
  210. wmb();
  211. return rc;
  212. }
  213. /*
  214. * Argument mask is used to specify which bits to update. In case mask is 0x11,
  215. * only bits 0 & 4 will be updated with corresponding bits from value. To update
  216. * entire register with value, set mask = 0xFFFFFFFF.
  217. */
  218. int __write_register_masked(struct msm_vidc_core *core,
  219. u32 reg, u32 value, u32 mask)
  220. {
  221. u32 prev_val, new_val;
  222. u8 *base_addr;
  223. int rc = 0;
  224. if (!core) {
  225. d_vpr_e("%s: invalid params\n", __func__);
  226. return -EINVAL;
  227. }
  228. rc = __strict_check(core, __func__);
  229. if (rc)
  230. return rc;
  231. if (!core->power_enabled) {
  232. d_vpr_e("%s: register write failed, power is off\n",
  233. __func__);
  234. return -EINVAL;
  235. }
  236. base_addr = core->register_base_addr;
  237. base_addr += reg;
  238. prev_val = readl_relaxed(base_addr);
  239. /*
  240. * Memory barrier to ensure register read is correct
  241. */
  242. rmb();
  243. new_val = (prev_val & ~mask) | (value & mask);
  244. d_vpr_l(
  245. "Base addr: %pK, writing to: %#x, previous-value: %#x, value: %#x, mask: %#x, new-value: %#x...\n",
  246. base_addr, reg, prev_val, value, mask, new_val);
  247. writel_relaxed(new_val, base_addr);
  248. /*
  249. * Memory barrier to make sure value is written into the register.
  250. */
  251. wmb();
  252. return rc;
  253. }
  254. int __read_register(struct msm_vidc_core *core, u32 reg, u32 *value)
  255. {
  256. int rc = 0;
  257. u8 *base_addr;
  258. if (!core || !value) {
  259. d_vpr_e("%s: invalid params\n", __func__);
  260. return -EINVAL;
  261. }
  262. if (!core->power_enabled) {
  263. d_vpr_e("HFI Read register failed : Power is OFF\n");
  264. return -EINVAL;
  265. }
  266. base_addr = core->register_base_addr;
  267. *value = readl_relaxed(base_addr + reg);
  268. /*
  269. * Memory barrier to make sure value is read correctly from the
  270. * register.
  271. */
  272. rmb();
  273. d_vpr_l("regread(%pK + %#x) = %#x\n", base_addr, reg, *value);
  274. return rc;
  275. }
  276. int __read_register_with_poll_timeout(struct msm_vidc_core *core,
  277. u32 reg, u32 mask, u32 exp_val, u32 sleep_us, u32 timeout_us)
  278. {
  279. int rc = 0;
  280. u32 val = 0;
  281. u8 *addr;
  282. if (!core) {
  283. d_vpr_e("%s: invalid params\n", __func__);
  284. return -EINVAL;
  285. }
  286. if (!core->power_enabled) {
  287. d_vpr_e("%s failed: Power is OFF\n", __func__);
  288. return -EINVAL;
  289. }
  290. addr = (u8 *)core->register_base_addr + reg;
  291. rc = readl_relaxed_poll_timeout(addr, val, ((val & mask) == exp_val), sleep_us, timeout_us);
  292. /*
  293. * Memory barrier to make sure value is read correctly from the
  294. * register.
  295. */
  296. rmb();
  297. d_vpr_l(
  298. "regread(%pK + %#x) = %#x. rc %d, mask %#x, exp_val %#x, cond %u, sleep %u, timeout %u\n",
  299. core->register_base_addr, reg, val, rc, mask, exp_val,
  300. ((val & mask) == exp_val), sleep_us, timeout_us);
  301. return rc;
  302. }
  303. static void __schedule_power_collapse_work(struct msm_vidc_core *core)
  304. {
  305. if (!core || !core->capabilities) {
  306. d_vpr_e("%s: invalid params\n", __func__);
  307. return;
  308. }
  309. if (!core->capabilities[SW_PC].value) {
  310. d_vpr_l("software power collapse not enabled\n");
  311. return;
  312. }
  313. if (!mod_delayed_work(core->pm_workq, &core->pm_work,
  314. msecs_to_jiffies(core->capabilities[SW_PC_DELAY].value))) {
  315. d_vpr_h("power collapse already scheduled\n");
  316. } else {
  317. d_vpr_l("power collapse scheduled for %d ms\n",
  318. core->capabilities[SW_PC_DELAY].value);
  319. }
  320. }
  321. static void __cancel_power_collapse_work(struct msm_vidc_core *core)
  322. {
  323. if (!core || !core->capabilities) {
  324. d_vpr_e("%s: invalid params\n", __func__);
  325. return;
  326. }
  327. if (!core->capabilities[SW_PC].value)
  328. return;
  329. cancel_delayed_work(&core->pm_work);
  330. }
  331. int __acquire_regulator(struct msm_vidc_core *core,
  332. struct regulator_info *rinfo)
  333. {
  334. int rc = 0;
  335. if (!core || !rinfo) {
  336. d_vpr_e("%s: invalid params\n", __func__);
  337. return -EINVAL;
  338. }
  339. if (rinfo->has_hw_power_collapse) {
  340. if (!rinfo->regulator) {
  341. d_vpr_e("%s: invalid regulator\n", __func__);
  342. rc = -EINVAL;
  343. goto exit;
  344. }
  345. if (regulator_get_mode(rinfo->regulator) ==
  346. REGULATOR_MODE_NORMAL) {
  347. core->handoff_done = false;
  348. d_vpr_h("Skip acquire regulator %s\n", rinfo->name);
  349. goto exit;
  350. }
  351. rc = regulator_set_mode(rinfo->regulator,
  352. REGULATOR_MODE_NORMAL);
  353. if (rc) {
  354. /*
  355. * This is somewhat fatal, but nothing we can do
  356. * about it. We can't disable the regulator w/o
  357. * getting it back under s/w control
  358. */
  359. d_vpr_e("Failed to acquire regulator control: %s\n",
  360. rinfo->name);
  361. goto exit;
  362. } else {
  363. core->handoff_done = false;
  364. d_vpr_h("Acquired regulator control from HW: %s\n",
  365. rinfo->name);
  366. }
  367. if (!regulator_is_enabled(rinfo->regulator)) {
  368. d_vpr_e("%s: Regulator is not enabled %s\n",
  369. __func__, rinfo->name);
  370. __fatal_error(true);
  371. }
  372. }
  373. exit:
  374. return rc;
  375. }
  376. static int __acquire_regulators(struct msm_vidc_core *core)
  377. {
  378. int rc = 0;
  379. struct regulator_info *rinfo;
  380. venus_hfi_for_each_regulator(core, rinfo)
  381. __acquire_regulator(core, rinfo);
  382. return rc;
  383. }
  384. static int __hand_off_regulator(struct msm_vidc_core *core,
  385. struct regulator_info *rinfo)
  386. {
  387. int rc = 0;
  388. if (rinfo->has_hw_power_collapse) {
  389. if (!rinfo->regulator) {
  390. d_vpr_e("%s: invalid regulator\n", __func__);
  391. return -EINVAL;
  392. }
  393. rc = regulator_set_mode(rinfo->regulator,
  394. REGULATOR_MODE_FAST);
  395. if (rc) {
  396. d_vpr_e("Failed to hand off regulator control: %s\n",
  397. rinfo->name);
  398. return rc;
  399. } else {
  400. core->handoff_done = true;
  401. d_vpr_h("Hand off regulator control to HW: %s\n",
  402. rinfo->name);
  403. }
  404. if (!regulator_is_enabled(rinfo->regulator)) {
  405. d_vpr_e("%s: Regulator is not enabled %s\n",
  406. __func__, rinfo->name);
  407. __fatal_error(true);
  408. }
  409. }
  410. return rc;
  411. }
  412. static int __hand_off_regulators(struct msm_vidc_core *core)
  413. {
  414. struct regulator_info *rinfo;
  415. int rc = 0, c = 0;
  416. venus_hfi_for_each_regulator(core, rinfo) {
  417. rc = __hand_off_regulator(core, rinfo);
  418. /*
  419. * If one regulator hand off failed, driver should take
  420. * the control for other regulators back.
  421. */
  422. if (rc)
  423. goto err_reg_handoff_failed;
  424. c++;
  425. }
  426. return rc;
  427. err_reg_handoff_failed:
  428. venus_hfi_for_each_regulator_reverse_continue(core, rinfo, c)
  429. __acquire_regulator(core, rinfo);
  430. return rc;
  431. }
  432. int __set_registers(struct msm_vidc_core *core)
  433. {
  434. struct reg_set *reg_set;
  435. int i, rc = 0;
  436. if (!core || !core->dt) {
  437. d_vpr_e("core resources null, cannot set registers\n");
  438. return -EINVAL;
  439. }
  440. reg_set = &core->dt->reg_set;
  441. for (i = 0; i < reg_set->count; i++) {
  442. rc = __write_register_masked(core, reg_set->reg_tbl[i].reg,
  443. reg_set->reg_tbl[i].value,
  444. reg_set->reg_tbl[i].mask);
  445. if (rc)
  446. return rc;
  447. }
  448. return rc;
  449. }
  450. static int __vote_bandwidth(struct bus_info *bus,
  451. unsigned long bw_kbps)
  452. {
  453. int rc = 0;
  454. if (!bus->path) {
  455. d_vpr_e("%s: invalid bus\n", __func__);
  456. return -EINVAL;
  457. }
  458. d_vpr_p("Voting bus %s to ab %llu kBps\n", bus->name, bw_kbps);
  459. rc = icc_set_bw(bus->path, bw_kbps, 0);
  460. if (rc)
  461. d_vpr_e("Failed voting bus %s to ab %llu, rc=%d\n",
  462. bus->name, bw_kbps, rc);
  463. return rc;
  464. }
  465. int __unvote_buses(struct msm_vidc_core *core)
  466. {
  467. int rc = 0;
  468. struct bus_info *bus = NULL;
  469. if (!core) {
  470. d_vpr_e("%s: invalid params\n", __func__);
  471. return -EINVAL;
  472. }
  473. core->power.bw_ddr = 0;
  474. core->power.bw_llcc = 0;
  475. venus_hfi_for_each_bus(core, bus) {
  476. rc = __vote_bandwidth(bus, 0);
  477. if (rc)
  478. goto err_unknown_device;
  479. }
  480. err_unknown_device:
  481. return rc;
  482. }
  483. int __vote_buses(struct msm_vidc_core *core,
  484. unsigned long bw_ddr, unsigned long bw_llcc)
  485. {
  486. int rc = 0;
  487. struct bus_info *bus = NULL;
  488. unsigned long bw_kbps = 0, bw_prev = 0;
  489. enum vidc_bus_type type;
  490. if (!core) {
  491. d_vpr_e("%s: invalid params\n", __func__);
  492. return -EINVAL;
  493. }
  494. venus_hfi_for_each_bus(core, bus) {
  495. if (bus && bus->path) {
  496. type = get_type_frm_name(bus->name);
  497. if (type == DDR) {
  498. bw_kbps = bw_ddr;
  499. bw_prev = core->power.bw_ddr;
  500. } else if (type == LLCC) {
  501. bw_kbps = bw_llcc;
  502. bw_prev = core->power.bw_llcc;
  503. } else {
  504. bw_kbps = bus->range[1];
  505. bw_prev = core->power.bw_ddr ?
  506. bw_kbps : 0;
  507. }
  508. /* ensure freq is within limits */
  509. bw_kbps = clamp_t(typeof(bw_kbps), bw_kbps,
  510. bus->range[0], bus->range[1]);
  511. if (TRIVIAL_BW_CHANGE(bw_kbps, bw_prev) && bw_prev) {
  512. d_vpr_l("Skip voting bus %s to %llu kBps\n",
  513. bus->name, bw_kbps);
  514. continue;
  515. }
  516. rc = __vote_bandwidth(bus, bw_kbps);
  517. if (type == DDR)
  518. core->power.bw_ddr = bw_kbps;
  519. else if (type == LLCC)
  520. core->power.bw_llcc = bw_kbps;
  521. } else {
  522. d_vpr_e("No BUS to Vote\n");
  523. }
  524. }
  525. return rc;
  526. }
  527. static int __tzbsp_set_video_state(enum tzbsp_video_state state)
  528. {
  529. int tzbsp_rsp = qcom_scm_set_remote_state(state, 0);
  530. d_vpr_l("Set state %d, resp %d\n", state, tzbsp_rsp);
  531. if (tzbsp_rsp) {
  532. d_vpr_e("Failed to set video core state to suspend: %d\n",
  533. tzbsp_rsp);
  534. return -EINVAL;
  535. }
  536. return 0;
  537. }
  538. int __set_clk_rate(struct msm_vidc_core *core,
  539. struct clock_info *cl, u64 rate)
  540. {
  541. int rc = 0;
  542. struct mmrm_client_data client_data;
  543. struct mmrm_client *client;
  544. /* not registered */
  545. if (!core || !cl || !core->capabilities) {
  546. d_vpr_e("%s: invalid params\n", __func__);
  547. return -EINVAL;
  548. }
  549. if (core->capabilities[MMRM].value && !cl->mmrm_client) {
  550. d_vpr_e("%s: invalid mmrm client\n", __func__);
  551. return -EINVAL;
  552. }
  553. /*
  554. * This conversion is necessary since we are scaling clock values based on
  555. * the branch clock. However, mmrm driver expects source clock to be registered
  556. * and used for scaling.
  557. * TODO: Remove this scaling if using source clock instead of branch clock.
  558. */
  559. rate = rate * MSM_VIDC_CLOCK_SOURCE_SCALING_RATIO;
  560. /* bail early if requested clk rate is not changed */
  561. if (rate == cl->prev)
  562. return 0;
  563. d_vpr_p("Scaling clock %s to %llu, prev %llu\n", cl->name, rate, cl->prev);
  564. if (core->capabilities[MMRM].value) {
  565. /* set clock rate to mmrm driver */
  566. client = cl->mmrm_client;
  567. memset(&client_data, 0, sizeof(client_data));
  568. client_data.num_hw_blocks = 1;
  569. rc = mmrm_client_set_value(client, &client_data, rate);
  570. if (rc) {
  571. d_vpr_e("%s: Failed to set mmrm clock rate %llu %s: %d\n",
  572. __func__, rate, cl->name, rc);
  573. return rc;
  574. }
  575. } else {
  576. /* set clock rate to clock driver */
  577. rc = clk_set_rate(cl->clk, rate);
  578. if (rc) {
  579. d_vpr_e("%s: Failed to set clock rate %llu %s: %d\n",
  580. __func__, rate, cl->name, rc);
  581. return rc;
  582. }
  583. }
  584. cl->prev = rate;
  585. return rc;
  586. }
  587. int __set_clocks(struct msm_vidc_core *core, u32 freq)
  588. {
  589. int rc = 0;
  590. struct clock_info *cl;
  591. venus_hfi_for_each_clock(core, cl) {
  592. if (cl->has_scaling) {/* has_scaling */
  593. rc = __set_clk_rate(core, cl, freq);
  594. if (rc)
  595. return rc;
  596. }
  597. }
  598. return 0;
  599. }
  600. int __scale_clocks(struct msm_vidc_core *core)
  601. {
  602. int rc = 0;
  603. struct allowed_clock_rates_table *allowed_clks_tbl;
  604. u32 freq = 0;
  605. if (!core || !core->dt) {
  606. d_vpr_e("%s: invalid params\n", __func__);
  607. return -EINVAL;
  608. }
  609. allowed_clks_tbl = core->dt->allowed_clks_tbl;
  610. freq = core->power.clk_freq ? core->power.clk_freq :
  611. allowed_clks_tbl[0].clock_rate;
  612. rc = __set_clocks(core, freq);
  613. if (rc)
  614. return rc;
  615. core->power.clk_freq = freq;
  616. return 0;
  617. }
  618. static int __write_queue(struct msm_vidc_iface_q_info *qinfo, u8 *packet,
  619. bool *rx_req_is_set)
  620. {
  621. struct hfi_queue_header *queue;
  622. u32 packet_size_in_words, new_write_idx;
  623. u32 empty_space, read_idx, write_idx;
  624. u32 *write_ptr;
  625. if (!qinfo || !packet) {
  626. d_vpr_e("%s: invalid params %pK %pK\n",
  627. __func__, qinfo, packet);
  628. return -EINVAL;
  629. } else if (!qinfo->q_array.align_virtual_addr) {
  630. d_vpr_e("Queues have already been freed\n");
  631. return -EINVAL;
  632. }
  633. queue = (struct hfi_queue_header *) qinfo->q_hdr;
  634. if (!queue) {
  635. d_vpr_e("queue not present\n");
  636. return -ENOENT;
  637. }
  638. if (msm_vidc_debug & VIDC_PKT)
  639. __dump_packet(packet, __func__, qinfo);
  640. // TODO: handle writing packet
  641. //d_vpr_e("skip writing packet\n");
  642. //return 0;
  643. packet_size_in_words = (*(u32 *)packet) >> 2;
  644. if (!packet_size_in_words || packet_size_in_words >
  645. qinfo->q_array.mem_size>>2) {
  646. d_vpr_e("Invalid packet size\n");
  647. return -ENODATA;
  648. }
  649. read_idx = queue->qhdr_read_idx;
  650. write_idx = queue->qhdr_write_idx;
  651. empty_space = (write_idx >= read_idx) ?
  652. ((qinfo->q_array.mem_size>>2) - (write_idx - read_idx)) :
  653. (read_idx - write_idx);
  654. if (empty_space <= packet_size_in_words) {
  655. queue->qhdr_tx_req = 1;
  656. d_vpr_e("Insufficient size (%d) to write (%d)\n",
  657. empty_space, packet_size_in_words);
  658. return -ENOTEMPTY;
  659. }
  660. queue->qhdr_tx_req = 0;
  661. new_write_idx = write_idx + packet_size_in_words;
  662. write_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
  663. (write_idx << 2));
  664. if (write_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
  665. write_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
  666. qinfo->q_array.mem_size)) {
  667. d_vpr_e("Invalid write index\n");
  668. return -ENODATA;
  669. }
  670. if (new_write_idx < (qinfo->q_array.mem_size >> 2)) {
  671. memcpy(write_ptr, packet, packet_size_in_words << 2);
  672. } else {
  673. new_write_idx -= qinfo->q_array.mem_size >> 2;
  674. memcpy(write_ptr, packet, (packet_size_in_words -
  675. new_write_idx) << 2);
  676. memcpy((void *)qinfo->q_array.align_virtual_addr,
  677. packet + ((packet_size_in_words - new_write_idx) << 2),
  678. new_write_idx << 2);
  679. }
  680. /*
  681. * Memory barrier to make sure packet is written before updating the
  682. * write index
  683. */
  684. mb();
  685. queue->qhdr_write_idx = new_write_idx;
  686. if (rx_req_is_set)
  687. *rx_req_is_set = true;
  688. /*
  689. * Memory barrier to make sure write index is updated before an
  690. * interrupt is raised on venus.
  691. */
  692. mb();
  693. return 0;
  694. }
  695. static int __read_queue(struct msm_vidc_iface_q_info *qinfo, u8 *packet,
  696. u32 *pb_tx_req_is_set)
  697. {
  698. struct hfi_queue_header *queue;
  699. u32 packet_size_in_words, new_read_idx;
  700. u32 *read_ptr;
  701. u32 receive_request = 0;
  702. u32 read_idx, write_idx;
  703. int rc = 0;
  704. if (!qinfo || !packet || !pb_tx_req_is_set) {
  705. d_vpr_e("%s: invalid params %pK %pK %pK\n",
  706. __func__, qinfo, packet, pb_tx_req_is_set);
  707. return -EINVAL;
  708. } else if (!qinfo->q_array.align_virtual_addr) {
  709. d_vpr_e("Queues have already been freed\n");
  710. return -EINVAL;
  711. }
  712. /*
  713. * Memory barrier to make sure data is valid before
  714. *reading it
  715. */
  716. mb();
  717. queue = (struct hfi_queue_header *) qinfo->q_hdr;
  718. if (!queue) {
  719. d_vpr_e("Queue memory is not allocated\n");
  720. return -ENOMEM;
  721. }
  722. /*
  723. * Do not set receive request for debug queue, if set,
  724. * Venus generates interrupt for debug messages even
  725. * when there is no response message available.
  726. * In general debug queue will not become full as it
  727. * is being emptied out for every interrupt from Venus.
  728. * Venus will anyway generates interrupt if it is full.
  729. */
  730. if (queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_MSG_Q)
  731. receive_request = 1;
  732. read_idx = queue->qhdr_read_idx;
  733. write_idx = queue->qhdr_write_idx;
  734. if (read_idx == write_idx) {
  735. queue->qhdr_rx_req = receive_request;
  736. /*
  737. * mb() to ensure qhdr is updated in main memory
  738. * so that venus reads the updated header values
  739. */
  740. mb();
  741. *pb_tx_req_is_set = 0;
  742. d_vpr_l(
  743. "%s queue is empty, rx_req = %u, tx_req = %u, read_idx = %u\n",
  744. receive_request ? "message" : "debug",
  745. queue->qhdr_rx_req, queue->qhdr_tx_req,
  746. queue->qhdr_read_idx);
  747. return -ENODATA;
  748. }
  749. read_ptr = (u32 *)((qinfo->q_array.align_virtual_addr) +
  750. (read_idx << 2));
  751. if (read_ptr < (u32 *)qinfo->q_array.align_virtual_addr ||
  752. read_ptr > (u32 *)(qinfo->q_array.align_virtual_addr +
  753. qinfo->q_array.mem_size - sizeof(*read_ptr))) {
  754. d_vpr_e("Invalid read index\n");
  755. return -ENODATA;
  756. }
  757. packet_size_in_words = (*read_ptr) >> 2;
  758. if (!packet_size_in_words) {
  759. d_vpr_e("Zero packet size\n");
  760. return -ENODATA;
  761. }
  762. new_read_idx = read_idx + packet_size_in_words;
  763. if (((packet_size_in_words << 2) <= VIDC_IFACEQ_VAR_HUGE_PKT_SIZE) &&
  764. read_idx <= (qinfo->q_array.mem_size >> 2)) {
  765. if (new_read_idx < (qinfo->q_array.mem_size >> 2)) {
  766. memcpy(packet, read_ptr,
  767. packet_size_in_words << 2);
  768. } else {
  769. new_read_idx -= (qinfo->q_array.mem_size >> 2);
  770. memcpy(packet, read_ptr,
  771. (packet_size_in_words - new_read_idx) << 2);
  772. memcpy(packet + ((packet_size_in_words -
  773. new_read_idx) << 2),
  774. (u8 *)qinfo->q_array.align_virtual_addr,
  775. new_read_idx << 2);
  776. }
  777. } else {
  778. d_vpr_e("BAD packet received, read_idx: %#x, pkt_size: %d\n",
  779. read_idx, packet_size_in_words << 2);
  780. d_vpr_e("Dropping this packet\n");
  781. new_read_idx = write_idx;
  782. rc = -ENODATA;
  783. }
  784. queue->qhdr_rx_req = receive_request;
  785. queue->qhdr_read_idx = new_read_idx;
  786. /*
  787. * mb() to ensure qhdr is updated in main memory
  788. * so that venus reads the updated header values
  789. */
  790. mb();
  791. *pb_tx_req_is_set = (queue->qhdr_tx_req == 1) ? 1 : 0;
  792. if ((msm_vidc_debug & VIDC_PKT) &&
  793. !(queue->qhdr_type & HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q)) {
  794. __dump_packet(packet, __func__, qinfo);
  795. }
  796. return rc;
  797. }
  798. /* Writes into cmdq without raising an interrupt */
  799. static int __iface_cmdq_write_relaxed(struct msm_vidc_core *core,
  800. void *pkt, bool *requires_interrupt)
  801. {
  802. struct msm_vidc_iface_q_info *q_info;
  803. //struct vidc_hal_cmd_pkt_hdr *cmd_packet;
  804. int rc = -E2BIG;
  805. if (!core || !pkt) {
  806. d_vpr_e("%s: invalid params %pK %pK\n",
  807. __func__, core, pkt);
  808. return -EINVAL;
  809. }
  810. rc = __strict_check(core, __func__);
  811. if (rc)
  812. return rc;
  813. if (!__core_in_valid_state(core)) {
  814. d_vpr_e("%s: fw not in init state\n", __func__);
  815. rc = -EINVAL;
  816. goto err_q_null;
  817. }
  818. //cmd_packet = (struct vidc_hal_cmd_pkt_hdr *)pkt;
  819. //core->last_packet_type = cmd_packet->packet_type;
  820. q_info = &core->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
  821. if (!q_info) {
  822. d_vpr_e("cannot write to shared Q's\n");
  823. goto err_q_null;
  824. }
  825. if (!q_info->q_array.align_virtual_addr) {
  826. d_vpr_e("cannot write to shared CMD Q's\n");
  827. rc = -ENODATA;
  828. goto err_q_null;
  829. }
  830. rc = __resume(core);
  831. if (rc) {
  832. d_vpr_e("%s: Power on failed\n", __func__);
  833. goto err_q_write;
  834. }
  835. if (!__write_queue(q_info, (u8 *)pkt, requires_interrupt)) {
  836. __schedule_power_collapse_work(core);
  837. rc = 0;
  838. } else {
  839. d_vpr_e("__iface_cmdq_write: queue full\n");
  840. }
  841. err_q_write:
  842. err_q_null:
  843. return rc;
  844. }
  845. int __iface_cmdq_write(struct msm_vidc_core *core,
  846. void *pkt)
  847. {
  848. bool needs_interrupt = false;
  849. int rc = __iface_cmdq_write_relaxed(core, pkt, &needs_interrupt);
  850. if (!rc && needs_interrupt)
  851. call_venus_op(core, raise_interrupt, core);
  852. return rc;
  853. }
  854. static int __iface_cmdq_write_intr(struct msm_vidc_core *core,
  855. void *pkt, bool allow)
  856. {
  857. bool needs_interrupt = false;
  858. int rc = __iface_cmdq_write_relaxed(core, pkt, &needs_interrupt);
  859. if (!rc && allow && needs_interrupt)
  860. call_venus_op(core, raise_interrupt, core);
  861. return rc;
  862. }
  863. int __iface_msgq_read(struct msm_vidc_core *core, void *pkt)
  864. {
  865. u32 tx_req_is_set = 0;
  866. int rc = 0;
  867. struct msm_vidc_iface_q_info *q_info;
  868. if (!pkt) {
  869. d_vpr_e("%s: invalid params\n", __func__);
  870. return -EINVAL;
  871. }
  872. if (!__core_in_valid_state(core)) {
  873. d_vpr_e("%s: fw not in init state\n", __func__);
  874. rc = -EINVAL;
  875. goto read_error_null;
  876. }
  877. q_info = &core->iface_queues[VIDC_IFACEQ_MSGQ_IDX];
  878. if (!q_info->q_array.align_virtual_addr) {
  879. d_vpr_e("cannot read from shared MSG Q's\n");
  880. rc = -ENODATA;
  881. goto read_error_null;
  882. }
  883. if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
  884. if (tx_req_is_set) {
  885. //call_venus_op(core, raise_interrupt, core);
  886. d_vpr_e("%s: queue is full\n", __func__);
  887. rc = -EINVAL;
  888. goto read_error_null;
  889. }
  890. rc = 0;
  891. } else {
  892. rc = -ENODATA;
  893. }
  894. read_error_null:
  895. return rc;
  896. }
  897. int __iface_dbgq_read(struct msm_vidc_core *core, void *pkt)
  898. {
  899. u32 tx_req_is_set = 0;
  900. int rc = 0;
  901. struct msm_vidc_iface_q_info *q_info;
  902. if (!pkt) {
  903. d_vpr_e("%s: invalid params\n", __func__);
  904. return -EINVAL;
  905. }
  906. q_info = &core->iface_queues[VIDC_IFACEQ_DBGQ_IDX];
  907. if (!q_info->q_array.align_virtual_addr) {
  908. d_vpr_e("cannot read from shared DBG Q's\n");
  909. rc = -ENODATA;
  910. goto dbg_error_null;
  911. }
  912. if (!__read_queue(q_info, (u8 *)pkt, &tx_req_is_set)) {
  913. if (tx_req_is_set) {
  914. d_vpr_e("%s: queue is full\n", __func__);
  915. //call_venus_op(core, raise_interrupt, core);
  916. rc = -EINVAL;
  917. goto dbg_error_null;
  918. }
  919. rc = 0;
  920. } else {
  921. rc = -ENODATA;
  922. }
  923. dbg_error_null:
  924. return rc;
  925. }
  926. static void __flush_debug_queue(struct msm_vidc_core *core,
  927. u8 *packet, u32 packet_size)
  928. {
  929. u8 *log;
  930. struct hfi_debug_header *pkt;
  931. bool local_packet = false;
  932. enum vidc_msg_prio log_level = msm_vidc_debug;
  933. if (!core) {
  934. d_vpr_e("%s: invalid params\n", __func__);
  935. return;
  936. }
  937. if (!packet || !packet_size) {
  938. packet = kzalloc(VIDC_IFACEQ_VAR_HUGE_PKT_SIZE, GFP_KERNEL);
  939. if (!packet) {
  940. d_vpr_e("%s: fail to allocate\n", __func__);
  941. return;
  942. }
  943. packet_size = VIDC_IFACEQ_VAR_HUGE_PKT_SIZE;
  944. local_packet = true;
  945. /*
  946. * Local packet is used when error occurred.
  947. * It is good to print these logs to printk as well.
  948. */
  949. log_level |= FW_PRINTK;
  950. }
  951. while (!__iface_dbgq_read(core, packet)) {
  952. pkt = (struct hfi_debug_header *) packet;
  953. if (pkt->size < sizeof(struct hfi_debug_header)) {
  954. d_vpr_e("%s: invalid pkt size %d\n",
  955. __func__, pkt->size);
  956. continue;
  957. }
  958. if (pkt->size >= packet_size) {
  959. d_vpr_e("%s: pkt size[%d] >= packet_size[%d]\n",
  960. __func__, pkt->size, packet_size);
  961. continue;
  962. }
  963. packet[pkt->size] = '\0';
  964. /*
  965. * All fw messages starts with new line character. This
  966. * causes dprintk to print this message in two lines
  967. * in the kernel log. Ignoring the first character
  968. * from the message fixes this to print it in a single
  969. * line.
  970. */
  971. log = (u8 *)packet + sizeof(struct hfi_debug_header) + 1;
  972. dprintk_firmware(log_level, "%s", log);
  973. }
  974. if (local_packet)
  975. kfree(packet);
  976. }
  977. static int __sys_set_debug(struct msm_vidc_core *core, u32 debug)
  978. {
  979. int rc = 0;
  980. rc = hfi_packet_sys_debug_config(core, core->packet,
  981. core->packet_size, debug);
  982. if (rc)
  983. goto exit;
  984. rc = __iface_cmdq_write(core, core->packet);
  985. if (rc)
  986. goto exit;
  987. exit:
  988. if (rc)
  989. d_vpr_e("Debug mode setting to FW failed\n");
  990. return rc;
  991. }
  992. /*
  993. static int __sys_set_coverage(struct msm_vidc_core *core,
  994. u32 mode)
  995. {
  996. int rc = 0;
  997. //rc = call_hfi_pkt_op(core, sys_coverage_config, pkt, mode);
  998. if (rc) {
  999. d_vpr_e("Coverage mode setting to FW failed\n");
  1000. return -ENOTEMPTY;
  1001. }
  1002. //if (__iface_cmdq_write(core, pkt, sid)) {
  1003. // d_vpr_e("Failed to send coverage pkt to f/w\n");
  1004. // return -ENOTEMPTY;
  1005. //}
  1006. return 0;
  1007. }
  1008. */
  1009. static int __sys_set_power_control(struct msm_vidc_core *core, bool enable)
  1010. {
  1011. int rc = 0;
  1012. if (!core->handoff_done) {
  1013. d_vpr_e("%s: skipping as power control hanfoff was not done\n",
  1014. __func__);
  1015. return rc;
  1016. }
  1017. rc = hfi_packet_sys_intraframe_powercollapse(core,
  1018. core->packet, core->packet_size, enable);
  1019. if (rc)
  1020. return rc;
  1021. rc = __iface_cmdq_write(core, core->packet);
  1022. if (rc)
  1023. return rc;
  1024. core->hw_power_control = true;
  1025. d_vpr_h("%s: set hardware power control successful\n", __func__);
  1026. return rc;
  1027. }
  1028. int __prepare_pc(struct msm_vidc_core *core)
  1029. {
  1030. int rc = 0;
  1031. rc = hfi_packet_sys_pc_prep(core, core->packet, core->packet_size);
  1032. if (rc) {
  1033. d_vpr_e("Failed to create sys pc prep pkt\n");
  1034. goto err_pc_prep;
  1035. }
  1036. if (__iface_cmdq_write(core, core->packet))
  1037. rc = -ENOTEMPTY;
  1038. if (rc)
  1039. d_vpr_e("Failed to prepare venus for power off");
  1040. err_pc_prep:
  1041. return rc;
  1042. }
  1043. static int __power_collapse(struct msm_vidc_core *core, bool force)
  1044. {
  1045. int rc = 0;
  1046. if (!core) {
  1047. d_vpr_e("%s: invalid params\n", __func__);
  1048. return -EINVAL;
  1049. }
  1050. if (!core->power_enabled) {
  1051. d_vpr_h("%s: Power already disabled\n", __func__);
  1052. goto exit;
  1053. }
  1054. if (!__core_in_valid_state(core)) {
  1055. d_vpr_e("%s: Core not in init state\n", __func__);
  1056. return -EINVAL;
  1057. }
  1058. __flush_debug_queue(core, (!force ? core->packet : NULL), core->packet_size);
  1059. rc = call_venus_op(core, prepare_pc, core);
  1060. if (rc)
  1061. goto skip_power_off;
  1062. rc = __suspend(core);
  1063. if (rc)
  1064. d_vpr_e("Failed __suspend\n");
  1065. exit:
  1066. return rc;
  1067. skip_power_off:
  1068. d_vpr_e("%s: skipped\n", __func__);
  1069. return -EAGAIN;
  1070. }
  1071. static int __protect_cp_mem(struct msm_vidc_core *core)
  1072. {
  1073. struct tzbsp_memprot memprot;
  1074. int rc = 0;
  1075. struct context_bank_info *cb;
  1076. if (!core)
  1077. return -EINVAL;
  1078. memprot.cp_start = 0x0;
  1079. memprot.cp_size = 0x0;
  1080. memprot.cp_nonpixel_start = 0x0;
  1081. memprot.cp_nonpixel_size = 0x0;
  1082. list_for_each_entry(cb, &core->dt->context_banks, list) {
  1083. if (!strcmp(cb->name, "venus_ns")) {
  1084. memprot.cp_size = cb->addr_range.start;
  1085. d_vpr_h("%s: memprot.cp_size: %#x\n",
  1086. __func__, memprot.cp_size);
  1087. }
  1088. if (!strcmp(cb->name, "venus_sec_non_pixel")) {
  1089. memprot.cp_nonpixel_start = cb->addr_range.start;
  1090. memprot.cp_nonpixel_size = cb->addr_range.size;
  1091. d_vpr_h("%s: cp_nonpixel_start: %#x size: %#x\n",
  1092. __func__, memprot.cp_nonpixel_start,
  1093. memprot.cp_nonpixel_size);
  1094. }
  1095. }
  1096. rc = qcom_scm_mem_protect_video_var(memprot.cp_start, memprot.cp_size,
  1097. memprot.cp_nonpixel_start, memprot.cp_nonpixel_size);
  1098. if (rc)
  1099. d_vpr_e("Failed to protect memory(%d)\n", rc);
  1100. trace_venus_hfi_var_done(
  1101. memprot.cp_start, memprot.cp_size,
  1102. memprot.cp_nonpixel_start, memprot.cp_nonpixel_size);
  1103. return rc;
  1104. }
  1105. #if 0 // TODO
  1106. static int __core_set_resource(struct msm_vidc_core *core,
  1107. struct vidc_resource_hdr *resource_hdr, void *resource_value)
  1108. {
  1109. int rc = 0;
  1110. if (!core || !resource_hdr || !resource_value) {
  1111. d_vpr_e("%s: invalid params %pK %pK %pK\n", __func__,
  1112. core, resource_hdr, resource_value);
  1113. return -EINVAL;
  1114. }
  1115. //rc = hfi_packet_sys_set_resource(core, core->packet, core->packet_size,
  1116. // resource_hdr, resource_value);
  1117. if (rc) {
  1118. d_vpr_e("set_res: failed to create packet\n");
  1119. goto err_create_pkt;
  1120. }
  1121. //rc = __iface_cmdq_write(core, core->packet);
  1122. if (rc)
  1123. rc = -ENOTEMPTY;
  1124. err_create_pkt:
  1125. return rc;
  1126. }
  1127. static int __core_release_resource(struct msm_vidc_core *core,
  1128. struct vidc_resource_hdr *resource_hdr)
  1129. {
  1130. int rc = 0;
  1131. if (!core || !resource_hdr) {
  1132. d_vpr_e("%s: invalid params %pK %pK\n",
  1133. __func__, core, resource_hdr);
  1134. return -EINVAL;
  1135. }
  1136. //rc = hfi_packet_sys_release_resource(core, core->packet, core->packet_size, resource_hdr);
  1137. if (rc) {
  1138. d_vpr_e("release_res: failed to create packet\n");
  1139. goto err_create_pkt;
  1140. }
  1141. //rc = __iface_cmdq_write(core, core->packet);
  1142. if (rc)
  1143. rc = -ENOTEMPTY;
  1144. err_create_pkt:
  1145. return rc;
  1146. }
  1147. #endif
  1148. static void __deinit_clocks(struct msm_vidc_core *core)
  1149. {
  1150. struct clock_info *cl;
  1151. core->power.clk_freq = 0;
  1152. venus_hfi_for_each_clock_reverse(core, cl) {
  1153. if (cl->clk) {
  1154. clk_put(cl->clk);
  1155. cl->clk = NULL;
  1156. }
  1157. }
  1158. }
  1159. static int __init_clocks(struct msm_vidc_core *core)
  1160. {
  1161. int rc = 0;
  1162. struct clock_info *cl = NULL;
  1163. if (!core) {
  1164. d_vpr_e("%s: invalid params\n", __func__);
  1165. return -EINVAL;
  1166. }
  1167. venus_hfi_for_each_clock(core, cl) {
  1168. d_vpr_h("%s: scalable? %d, count %d\n",
  1169. cl->name, cl->has_scaling, cl->count);
  1170. }
  1171. venus_hfi_for_each_clock(core, cl) {
  1172. if (!cl->clk) {
  1173. cl->clk = clk_get(&core->pdev->dev, cl->name);
  1174. if (IS_ERR_OR_NULL(cl->clk)) {
  1175. d_vpr_e("Failed to get clock: %s\n", cl->name);
  1176. rc = PTR_ERR(cl->clk) ?
  1177. PTR_ERR(cl->clk) : -EINVAL;
  1178. cl->clk = NULL;
  1179. goto err_clk_get;
  1180. }
  1181. }
  1182. }
  1183. core->power.clk_freq = 0;
  1184. return 0;
  1185. err_clk_get:
  1186. __deinit_clocks(core);
  1187. return rc;
  1188. }
  1189. static void __deregister_mmrm(struct msm_vidc_core *core)
  1190. {
  1191. struct clock_info *cl;
  1192. if (!core || !core->capabilities) {
  1193. d_vpr_e("%s: invalid params\n", __func__);
  1194. return;
  1195. }
  1196. if (!core->capabilities[MMRM].value) {
  1197. d_vpr_h("%s: MMRM not supported\n", __func__);
  1198. return;
  1199. }
  1200. venus_hfi_for_each_clock(core, cl) {
  1201. if (cl->has_scaling && cl->mmrm_client) {
  1202. mmrm_client_deregister(cl->mmrm_client);
  1203. cl->mmrm_client = NULL;
  1204. }
  1205. }
  1206. }
  1207. static int __register_mmrm(struct msm_vidc_core *core)
  1208. {
  1209. int rc = 0;
  1210. struct clock_info *cl;
  1211. if (!core ||!core->capabilities) {
  1212. d_vpr_e("%s: invalid params\n", __func__);
  1213. return -EINVAL;
  1214. }
  1215. if (!core->capabilities[MMRM].value) {
  1216. d_vpr_h("%s: MMRM not supported\n", __func__);
  1217. return 0;
  1218. }
  1219. venus_hfi_for_each_clock(core, cl) {
  1220. struct mmrm_client_desc desc;
  1221. char *name = (char *)desc.client_info.desc.name;
  1222. // TODO: set notifier data vals
  1223. struct mmrm_client_notifier_data notifier_data = {
  1224. MMRM_CLIENT_RESOURCE_VALUE_CHANGE,
  1225. {{0, 0}},
  1226. NULL};
  1227. // TODO: add callback fn
  1228. desc.notifier_callback_fn = NULL;
  1229. if (!cl->has_scaling)
  1230. continue;
  1231. if (IS_ERR_OR_NULL(cl->clk)) {
  1232. d_vpr_e("%s: Invalid clock: %s\n", __func__, cl->name);
  1233. rc = PTR_ERR(cl->clk) ? PTR_ERR(cl->clk) : -EINVAL;
  1234. goto err_register_mmrm;
  1235. }
  1236. desc.client_type = MMRM_CLIENT_CLOCK;
  1237. desc.client_info.desc.client_domain = MMRM_CLIENT_DOMAIN_VIDEO;
  1238. desc.client_info.desc.client_id = cl->clk_id;
  1239. strlcpy(name, cl->name, sizeof(desc.client_info.desc.name));
  1240. desc.client_info.desc.clk = cl->clk;
  1241. desc.priority = MMRM_CLIENT_PRIOR_LOW;
  1242. desc.pvt_data = notifier_data.pvt_data;
  1243. d_vpr_h("%s: domain(%d) cid(%d) name(%s) clk(%pK)\n",
  1244. __func__,
  1245. desc.client_info.desc.client_domain,
  1246. desc.client_info.desc.client_id,
  1247. desc.client_info.desc.name,
  1248. desc.client_info.desc.clk);
  1249. d_vpr_h("%s: type(%d) pri(%d) pvt(%pK) notifier(%pK)\n",
  1250. __func__,
  1251. desc.client_type,
  1252. desc.priority,
  1253. desc.pvt_data,
  1254. desc.notifier_callback_fn);
  1255. cl->mmrm_client = mmrm_client_register(&desc);
  1256. if (!cl->mmrm_client) {
  1257. d_vpr_e("%s: Failed to register clk(%s): %d\n",
  1258. __func__, cl->name, rc);
  1259. rc = -EINVAL;
  1260. goto err_register_mmrm;
  1261. }
  1262. }
  1263. return 0;
  1264. err_register_mmrm:
  1265. __deregister_mmrm(core);
  1266. return rc;
  1267. }
  1268. static int __handle_reset_clk(struct msm_vidc_core *core,
  1269. int reset_index, enum reset_state state)
  1270. {
  1271. int rc = 0;
  1272. struct msm_vidc_dt *dt = core->dt;
  1273. struct reset_control *rst;
  1274. struct reset_set *rst_set = &dt->reset_set;
  1275. if (!rst_set->reset_tbl)
  1276. return 0;
  1277. rst = rst_set->reset_tbl[reset_index].rst;
  1278. d_vpr_h("reset_clk: name %s reset_state %d rst %pK\n",
  1279. rst_set->reset_tbl[reset_index].name, state, rst);
  1280. switch (state) {
  1281. case INIT:
  1282. if (rst)
  1283. goto skip_reset_init;
  1284. rst = devm_reset_control_get(&core->pdev->dev,
  1285. rst_set->reset_tbl[reset_index].name);
  1286. if (IS_ERR(rst))
  1287. rc = PTR_ERR(rst);
  1288. rst_set->reset_tbl[reset_index].rst = rst;
  1289. break;
  1290. case ASSERT:
  1291. if (!rst) {
  1292. rc = PTR_ERR(rst);
  1293. goto failed_to_reset;
  1294. }
  1295. rc = reset_control_assert(rst);
  1296. break;
  1297. case DEASSERT:
  1298. if (!rst) {
  1299. rc = PTR_ERR(rst);
  1300. goto failed_to_reset;
  1301. }
  1302. rc = reset_control_deassert(rst);
  1303. break;
  1304. default:
  1305. d_vpr_e("%s: invalid reset request\n", __func__);
  1306. if (rc)
  1307. goto failed_to_reset;
  1308. }
  1309. return 0;
  1310. skip_reset_init:
  1311. failed_to_reset:
  1312. return rc;
  1313. }
  1314. void __disable_unprepare_clks(struct msm_vidc_core *core)
  1315. {
  1316. struct clock_info *cl;
  1317. if (!core) {
  1318. d_vpr_e("%s: invalid params\n", __func__);
  1319. return;
  1320. }
  1321. venus_hfi_for_each_clock_reverse(core, cl) {
  1322. if (!cl->clk)
  1323. continue;
  1324. d_vpr_h("Clock: %s disable and unprepare\n",
  1325. cl->name);
  1326. if (!__clk_is_enabled(cl->clk))
  1327. d_vpr_e("%s: clock %s already disabled\n",
  1328. __func__, cl->name);
  1329. clk_disable_unprepare(cl->clk);
  1330. if (cl->has_scaling)
  1331. __set_clk_rate(core, cl, 0);
  1332. cl->prev = 0;
  1333. }
  1334. }
  1335. int __reset_ahb2axi_bridge(struct msm_vidc_core *core)
  1336. {
  1337. int rc, i;
  1338. if (!core) {
  1339. d_vpr_e("%s: invalid params\n", __func__);
  1340. return -EINVAL;
  1341. }
  1342. for (i = 0; i < core->dt->reset_set.count; i++) {
  1343. rc = __handle_reset_clk(core, i, ASSERT);
  1344. if (rc) {
  1345. d_vpr_e("failed to assert reset clocks\n");
  1346. goto failed_to_reset;
  1347. }
  1348. /* wait for deassert */
  1349. usleep_range(1000, 1100);
  1350. }
  1351. for (i = 0; i < core->dt->reset_set.count; i++) {
  1352. rc = __handle_reset_clk(core, i, DEASSERT);
  1353. if (rc) {
  1354. d_vpr_e("failed to deassert reset clocks\n");
  1355. goto failed_to_reset;
  1356. }
  1357. }
  1358. return 0;
  1359. failed_to_reset:
  1360. return rc;
  1361. }
  1362. int __prepare_enable_clks(struct msm_vidc_core *core)
  1363. {
  1364. struct clock_info *cl = NULL;
  1365. int rc = 0, c = 0;
  1366. u64 rate = 0;
  1367. if (!core) {
  1368. d_vpr_e("%s: invalid params\n", __func__);
  1369. return -EINVAL;
  1370. }
  1371. venus_hfi_for_each_clock(core, cl) {
  1372. if (!cl->clk) {
  1373. d_vpr_e("%s: invalid clock\n", __func__);
  1374. rc = -EINVAL;
  1375. goto fail_clk_enable;
  1376. }
  1377. /*
  1378. * For the clocks we control, set the rate prior to preparing
  1379. * them. Since we don't really have a load at this point, scale
  1380. * it to the lowest frequency possible
  1381. */
  1382. if (cl->has_scaling) {
  1383. rate = clk_round_rate(cl->clk, 0);
  1384. /**
  1385. * source clock is already multipled with scaling ratio and __set_clk_rate
  1386. * attempts to multiply again. So divide scaling ratio before calling
  1387. * __set_clk_rate.
  1388. */
  1389. rate = rate / MSM_VIDC_CLOCK_SOURCE_SCALING_RATIO;
  1390. __set_clk_rate(core, cl, rate);
  1391. }
  1392. rc = clk_prepare_enable(cl->clk);
  1393. if (rc) {
  1394. d_vpr_e("Failed to enable clocks\n");
  1395. goto fail_clk_enable;
  1396. }
  1397. if (!__clk_is_enabled(cl->clk))
  1398. d_vpr_e("%s: clock %s not enabled\n",
  1399. __func__, cl->name);
  1400. c++;
  1401. d_vpr_h("Clock: %s prepared and enabled\n", cl->name);
  1402. }
  1403. call_venus_op(core, clock_config_on_enable, core);
  1404. return rc;
  1405. fail_clk_enable:
  1406. venus_hfi_for_each_clock_reverse_continue(core, cl, c) {
  1407. if (!cl->clk)
  1408. continue;
  1409. d_vpr_e("Clock: %s disable and unprepare\n",
  1410. cl->name);
  1411. clk_disable_unprepare(cl->clk);
  1412. if (cl->has_scaling)
  1413. __set_clk_rate(core, cl, 0);
  1414. cl->prev = 0;
  1415. }
  1416. return rc;
  1417. }
  1418. static void __deinit_bus(struct msm_vidc_core *core)
  1419. {
  1420. struct bus_info *bus = NULL;
  1421. if (!core)
  1422. return;
  1423. core->power.bw_ddr = 0;
  1424. core->power.bw_llcc = 0;
  1425. venus_hfi_for_each_bus_reverse(core, bus) {
  1426. if (!bus->path)
  1427. continue;
  1428. icc_put(bus->path);
  1429. bus->path = NULL;
  1430. }
  1431. }
  1432. static int __init_bus(struct msm_vidc_core *core)
  1433. {
  1434. struct bus_info *bus = NULL;
  1435. int rc = 0;
  1436. if (!core) {
  1437. d_vpr_e("%s: invalid param\n", __func__);
  1438. return -EINVAL;
  1439. }
  1440. venus_hfi_for_each_bus(core, bus) {
  1441. if (!strcmp(bus->name, "venus-llcc")) {
  1442. if (msm_vidc_syscache_disable) {
  1443. d_vpr_h("Skipping LLC bus init: %s\n",
  1444. bus->name);
  1445. continue;
  1446. }
  1447. }
  1448. bus->path = of_icc_get(bus->dev, bus->name);
  1449. if (IS_ERR_OR_NULL(bus->path)) {
  1450. rc = PTR_ERR(bus->path) ?
  1451. PTR_ERR(bus->path) : -EBADHANDLE;
  1452. d_vpr_e("Failed to register bus %s: %d\n",
  1453. bus->name, rc);
  1454. bus->path = NULL;
  1455. goto err_add_dev;
  1456. }
  1457. }
  1458. return 0;
  1459. err_add_dev:
  1460. __deinit_bus(core);
  1461. return rc;
  1462. }
  1463. static void __deinit_regulators(struct msm_vidc_core *core)
  1464. {
  1465. struct regulator_info *rinfo = NULL;
  1466. venus_hfi_for_each_regulator_reverse(core, rinfo) {
  1467. if (rinfo->regulator) {
  1468. regulator_put(rinfo->regulator);
  1469. rinfo->regulator = NULL;
  1470. }
  1471. }
  1472. }
  1473. static int __init_regulators(struct msm_vidc_core *core)
  1474. {
  1475. int rc = 0;
  1476. struct regulator_info *rinfo = NULL;
  1477. venus_hfi_for_each_regulator(core, rinfo) {
  1478. rinfo->regulator = regulator_get(&core->pdev->dev,
  1479. rinfo->name);
  1480. if (IS_ERR_OR_NULL(rinfo->regulator)) {
  1481. rc = PTR_ERR(rinfo->regulator) ?
  1482. PTR_ERR(rinfo->regulator) : -EBADHANDLE;
  1483. d_vpr_e("Failed to get regulator: %s\n", rinfo->name);
  1484. rinfo->regulator = NULL;
  1485. goto err_reg_get;
  1486. }
  1487. }
  1488. return 0;
  1489. err_reg_get:
  1490. __deinit_regulators(core);
  1491. return rc;
  1492. }
  1493. static void __deinit_subcaches(struct msm_vidc_core *core)
  1494. {
  1495. struct subcache_info *sinfo = NULL;
  1496. if (!core) {
  1497. d_vpr_e("%s: invalid params\n", __func__);
  1498. goto exit;
  1499. }
  1500. if (!is_sys_cache_present(core))
  1501. goto exit;
  1502. venus_hfi_for_each_subcache_reverse(core, sinfo) {
  1503. if (sinfo->subcache) {
  1504. d_vpr_h("deinit_subcaches: %s\n", sinfo->name);
  1505. llcc_slice_putd(sinfo->subcache);
  1506. sinfo->subcache = NULL;
  1507. }
  1508. }
  1509. exit:
  1510. return;
  1511. }
  1512. static int __init_subcaches(struct msm_vidc_core *core)
  1513. {
  1514. int rc = 0;
  1515. struct subcache_info *sinfo = NULL;
  1516. if (!core) {
  1517. d_vpr_e("%s: invalid params\n", __func__);
  1518. return -EINVAL;
  1519. }
  1520. if (!is_sys_cache_present(core))
  1521. return 0;
  1522. venus_hfi_for_each_subcache(core, sinfo) {
  1523. if (!strcmp("vidsc0", sinfo->name)) {
  1524. sinfo->subcache = llcc_slice_getd(LLCC_VIDSC0);
  1525. } else if (!strcmp("vidsc1", sinfo->name)) {
  1526. sinfo->subcache = llcc_slice_getd(LLCC_VIDSC1);
  1527. } else if (!strcmp("vidscfw", sinfo->name)) {
  1528. sinfo->subcache = llcc_slice_getd(LLCC_VIDFW);
  1529. } else {
  1530. d_vpr_e("Invalid subcache name %s\n",
  1531. sinfo->name);
  1532. }
  1533. if (IS_ERR_OR_NULL(sinfo->subcache)) {
  1534. rc = PTR_ERR(sinfo->subcache) ?
  1535. PTR_ERR(sinfo->subcache) : -EBADHANDLE;
  1536. d_vpr_e("init_subcaches: invalid subcache: %s rc %d\n",
  1537. sinfo->name, rc);
  1538. sinfo->subcache = NULL;
  1539. goto err_subcache_get;
  1540. }
  1541. d_vpr_h("init_subcaches: %s\n", sinfo->name);
  1542. }
  1543. return 0;
  1544. err_subcache_get:
  1545. __deinit_subcaches(core);
  1546. return rc;
  1547. }
  1548. static int __init_resources(struct msm_vidc_core *core)
  1549. {
  1550. int i, rc = 0;
  1551. rc = __init_regulators(core);
  1552. if (rc) {
  1553. d_vpr_e("Failed to get all regulators\n");
  1554. return -ENODEV;
  1555. }
  1556. rc = __init_clocks(core);
  1557. if (rc) {
  1558. d_vpr_e("Failed to init clocks\n");
  1559. rc = -ENODEV;
  1560. goto err_init_clocks;
  1561. }
  1562. rc = __register_mmrm(core);
  1563. if (rc) {
  1564. d_vpr_e("Failed to register mmrm\n");
  1565. rc = -ENODEV;
  1566. goto err_init_mmrm;
  1567. }
  1568. for (i = 0; i < core->dt->reset_set.count; i++) {
  1569. rc = __handle_reset_clk(core, i, INIT);
  1570. if (rc) {
  1571. d_vpr_e("Failed to init reset clocks\n");
  1572. rc = -ENODEV;
  1573. goto err_init_reset_clk;
  1574. }
  1575. }
  1576. rc = __init_bus(core);
  1577. if (rc) {
  1578. d_vpr_e("Failed to init bus: %d\n", rc);
  1579. goto err_init_bus;
  1580. }
  1581. rc = __init_subcaches(core);
  1582. if (rc)
  1583. d_vpr_e("Failed to init subcaches: %d\n", rc);
  1584. return rc;
  1585. err_init_reset_clk:
  1586. err_init_bus:
  1587. __deregister_mmrm(core);
  1588. err_init_mmrm:
  1589. __deinit_clocks(core);
  1590. err_init_clocks:
  1591. __deinit_regulators(core);
  1592. return rc;
  1593. }
  1594. static void __deinit_resources(struct msm_vidc_core *core)
  1595. {
  1596. __deinit_subcaches(core);
  1597. __deinit_bus(core);
  1598. __deregister_mmrm(core);
  1599. __deinit_clocks(core);
  1600. __deinit_regulators(core);
  1601. }
  1602. static int __disable_regulator(struct regulator_info *rinfo,
  1603. struct msm_vidc_core *core)
  1604. {
  1605. int rc = 0;
  1606. if (!rinfo->regulator) {
  1607. d_vpr_e("%s: invalid regulator\n", __func__);
  1608. return -EINVAL;
  1609. }
  1610. d_vpr_h("Disabling regulator %s\n", rinfo->name);
  1611. /*
  1612. * This call is needed. Driver needs to acquire the control back
  1613. * from HW in order to disable the regualtor. Else the behavior
  1614. * is unknown.
  1615. */
  1616. rc = __acquire_regulator(core, rinfo);
  1617. if (rc) {
  1618. /*
  1619. * This is somewhat fatal, but nothing we can do
  1620. * about it. We can't disable the regulator w/o
  1621. * getting it back under s/w control
  1622. */
  1623. d_vpr_e("Failed to acquire control on %s\n",
  1624. rinfo->name);
  1625. goto disable_regulator_failed;
  1626. }
  1627. if (!regulator_is_enabled(rinfo->regulator))
  1628. d_vpr_e("%s: regulator %s already disabled\n",
  1629. __func__, rinfo->name);
  1630. rc = regulator_disable(rinfo->regulator);
  1631. if (rc) {
  1632. d_vpr_e("Failed to disable %s: %d\n",
  1633. rinfo->name, rc);
  1634. goto disable_regulator_failed;
  1635. }
  1636. return 0;
  1637. disable_regulator_failed:
  1638. /* Bring attention to this issue */
  1639. __fatal_error(true);
  1640. return rc;
  1641. }
  1642. int __enable_regulators(struct msm_vidc_core *core)
  1643. {
  1644. int rc = 0, c = 0;
  1645. struct regulator_info *rinfo;
  1646. if (!core) {
  1647. d_vpr_e("%s: invalid params\n", __func__);
  1648. return -EINVAL;
  1649. }
  1650. d_vpr_h("Enabling regulators\n");
  1651. venus_hfi_for_each_regulator(core, rinfo) {
  1652. if (!rinfo->regulator) {
  1653. d_vpr_e("%s: invalid regulator\n", __func__);
  1654. rc = -EINVAL;
  1655. goto err_reg_enable_failed;
  1656. }
  1657. rc = regulator_enable(rinfo->regulator);
  1658. if (rc) {
  1659. d_vpr_e("Failed to enable %s: %d\n",
  1660. rinfo->name, rc);
  1661. goto err_reg_enable_failed;
  1662. }
  1663. if (!regulator_is_enabled(rinfo->regulator))
  1664. d_vpr_e("%s: regulator %s not enabled\n",
  1665. __func__, rinfo->name);
  1666. d_vpr_h("Enabled regulator %s\n",
  1667. rinfo->name);
  1668. c++;
  1669. }
  1670. return 0;
  1671. err_reg_enable_failed:
  1672. venus_hfi_for_each_regulator_reverse_continue(core, rinfo, c) {
  1673. if (!rinfo->regulator)
  1674. continue;
  1675. __disable_regulator(rinfo, core);
  1676. }
  1677. return rc;
  1678. }
  1679. int __disable_regulators(struct msm_vidc_core *core)
  1680. {
  1681. struct regulator_info *rinfo;
  1682. d_vpr_h("Disabling regulators\n");
  1683. venus_hfi_for_each_regulator_reverse(core, rinfo)
  1684. __disable_regulator(rinfo, core);
  1685. return 0;
  1686. }
  1687. static int __release_subcaches(struct msm_vidc_core *core)
  1688. {
  1689. int rc = 0;
  1690. struct subcache_info* sinfo;
  1691. struct hfi_buffer buf;
  1692. if (msm_vidc_syscache_disable || !is_sys_cache_present(core))
  1693. return 0;
  1694. if (!core->dt->sys_cache_res_set) {
  1695. d_vpr_h("Subcaches not set to Venus\n");
  1696. return 0;
  1697. }
  1698. rc = hfi_create_header(core->packet, core->packet_size,
  1699. 0, core->header_id++);
  1700. if (rc)
  1701. return rc;
  1702. memset(&buf, 0, sizeof(struct hfi_buffer));
  1703. buf.type = HFI_BUFFER_SUBCACHE;
  1704. buf.flags = HFI_BUF_HOST_FLAG_RELEASE;
  1705. venus_hfi_for_each_subcache_reverse(core, sinfo) {
  1706. if (sinfo->isactive) {
  1707. buf.index = sinfo->subcache->slice_id;
  1708. buf.buffer_size = sinfo->subcache->slice_size;
  1709. rc = hfi_create_packet(core->packet,
  1710. core->packet_size,
  1711. HFI_CMD_BUFFER,
  1712. HFI_BUF_HOST_FLAG_NONE,
  1713. HFI_PAYLOAD_STRUCTURE,
  1714. HFI_PORT_NONE,
  1715. core->packet_id++,
  1716. &buf,
  1717. sizeof(buf));
  1718. if (rc)
  1719. return rc;
  1720. }
  1721. }
  1722. /* Set resource to Venus for activated subcaches */
  1723. rc = __iface_cmdq_write(core, core->packet);
  1724. if (rc)
  1725. return rc;
  1726. venus_hfi_for_each_subcache_reverse(core, sinfo) {
  1727. if (sinfo->isactive) {
  1728. sinfo->isset = false;
  1729. d_vpr_h("Release Subcache id %d size %d done\n",
  1730. sinfo->subcache->slice_id,
  1731. sinfo->subcache->slice_size);
  1732. }
  1733. }
  1734. core->dt->sys_cache_res_set = false;
  1735. return 0;
  1736. }
  1737. static int __disable_subcaches(struct msm_vidc_core *core)
  1738. {
  1739. struct subcache_info *sinfo;
  1740. int rc = 0;
  1741. if (msm_vidc_syscache_disable || !is_sys_cache_present(core))
  1742. return 0;
  1743. /* De-activate subcaches */
  1744. venus_hfi_for_each_subcache_reverse(core, sinfo) {
  1745. if (sinfo->isactive) {
  1746. d_vpr_h("De-activate subcache %s\n",
  1747. sinfo->name);
  1748. rc = llcc_slice_deactivate(sinfo->subcache);
  1749. if (rc) {
  1750. d_vpr_e("Failed to de-activate %s: %d\n",
  1751. sinfo->name, rc);
  1752. }
  1753. sinfo->isactive = false;
  1754. }
  1755. }
  1756. return 0;
  1757. }
  1758. static int __enable_subcaches(struct msm_vidc_core *core)
  1759. {
  1760. int rc = 0;
  1761. u32 c = 0;
  1762. struct subcache_info *sinfo;
  1763. if (msm_vidc_syscache_disable || !is_sys_cache_present(core))
  1764. return 0;
  1765. /* Activate subcaches */
  1766. venus_hfi_for_each_subcache(core, sinfo) {
  1767. rc = llcc_slice_activate(sinfo->subcache);
  1768. if (rc) {
  1769. d_vpr_e("Failed to activate %s: %d\n",
  1770. sinfo->name, rc);
  1771. __fatal_error(true);
  1772. goto err_activate_fail;
  1773. }
  1774. sinfo->isactive = true;
  1775. d_vpr_h("Activated subcache %s\n", sinfo->name);
  1776. c++;
  1777. }
  1778. d_vpr_h("Activated %d Subcaches to Venus\n", c);
  1779. return 0;
  1780. err_activate_fail:
  1781. __release_subcaches(core);
  1782. __disable_subcaches(core);
  1783. return rc;
  1784. }
  1785. static int __set_subcaches(struct msm_vidc_core *core)
  1786. {
  1787. int rc = 0;
  1788. struct subcache_info *sinfo;
  1789. struct hfi_buffer buf;
  1790. if (msm_vidc_syscache_disable ||
  1791. !is_sys_cache_present(core)) {
  1792. return 0;
  1793. }
  1794. if (core->dt->sys_cache_res_set) {
  1795. d_vpr_h("Subcaches already set to Venus\n");
  1796. return 0;
  1797. }
  1798. rc = hfi_create_header(core->packet, core->packet_size,
  1799. 0, core->header_id++);
  1800. if (rc)
  1801. goto err_fail_set_subacaches;
  1802. memset(&buf, 0, sizeof(struct hfi_buffer));
  1803. buf.type = HFI_BUFFER_SUBCACHE;
  1804. buf.flags = HFI_BUF_HOST_FLAG_NONE;
  1805. venus_hfi_for_each_subcache(core, sinfo) {
  1806. if (sinfo->isactive) {
  1807. buf.index = sinfo->subcache->slice_id;
  1808. buf.buffer_size = sinfo->subcache->slice_size;
  1809. rc = hfi_create_packet(core->packet,
  1810. core->packet_size,
  1811. HFI_CMD_BUFFER,
  1812. HFI_BUF_HOST_FLAG_NONE,
  1813. HFI_PAYLOAD_STRUCTURE,
  1814. HFI_PORT_NONE,
  1815. core->packet_id++,
  1816. &buf,
  1817. sizeof(buf));
  1818. if (rc)
  1819. goto err_fail_set_subacaches;
  1820. }
  1821. }
  1822. /* Set resource to Venus for activated subcaches */
  1823. rc = __iface_cmdq_write(core, core->packet);
  1824. if (rc)
  1825. goto err_fail_set_subacaches;
  1826. venus_hfi_for_each_subcache(core, sinfo) {
  1827. if (sinfo->isactive) {
  1828. sinfo->isset = true;
  1829. d_vpr_h("Set Subcache id %d size %d done\n",
  1830. sinfo->subcache->slice_id,
  1831. sinfo->subcache->slice_size);
  1832. }
  1833. }
  1834. core->dt->sys_cache_res_set = true;
  1835. return 0;
  1836. err_fail_set_subacaches:
  1837. __disable_subcaches(core);
  1838. return rc;
  1839. }
  1840. /*
  1841. static int __set_ubwc_config(struct msm_vidc_core *core)
  1842. {
  1843. int rc = 0;
  1844. if (!core->platform->data.ubwc_config) {
  1845. d_vpr_h("%s: invalid ubwc config\n", __func__);
  1846. return -EINVAL;
  1847. }
  1848. //rc = hfi_packet_sys_ubwc_config(core, core->packet, core->packet_size);
  1849. if (rc)
  1850. return rc;
  1851. //rc = __iface_cmdq_write(core, core->packet));
  1852. if (rc)
  1853. return rc;
  1854. d_vpr_h("Configured UBWC Config\n");
  1855. return rc;
  1856. }
  1857. */
  1858. static int __venus_power_off(struct msm_vidc_core* core)
  1859. {
  1860. int rc = 0;
  1861. if (!core->power_enabled)
  1862. return 0;
  1863. rc = call_venus_op(core, power_off, core);
  1864. if (rc) {
  1865. d_vpr_e("Failed to power off, err: %d\n", rc);
  1866. return rc;
  1867. }
  1868. core->power_enabled = false;
  1869. return rc;
  1870. }
  1871. static int __venus_power_on(struct msm_vidc_core *core)
  1872. {
  1873. int rc = 0;
  1874. if (core->power_enabled)
  1875. return 0;
  1876. rc = call_venus_op(core, power_on, core);
  1877. if (rc) {
  1878. d_vpr_e("Failed to power on, err: %d\n", rc);
  1879. return rc;
  1880. }
  1881. core->power_enabled = true;
  1882. return rc;
  1883. }
  1884. static int __suspend(struct msm_vidc_core *core)
  1885. {
  1886. int rc = 0;
  1887. if (!core) {
  1888. d_vpr_e("%s: invalid params\n", __func__);
  1889. return -EINVAL;
  1890. } else if (!core->power_enabled) {
  1891. d_vpr_h("Power already disabled\n");
  1892. return 0;
  1893. }
  1894. rc = __strict_check(core, __func__);
  1895. if (rc)
  1896. return rc;
  1897. d_vpr_h("Entering suspend\n");
  1898. rc = __tzbsp_set_video_state(TZBSP_VIDEO_STATE_SUSPEND);
  1899. if (rc) {
  1900. d_vpr_e("Failed to suspend video core %d\n", rc);
  1901. goto err_tzbsp_suspend;
  1902. }
  1903. __disable_subcaches(core);
  1904. __venus_power_off(core);
  1905. d_vpr_h("Venus power off\n");
  1906. return rc;
  1907. err_tzbsp_suspend:
  1908. return rc;
  1909. }
  1910. static int __resume(struct msm_vidc_core *core)
  1911. {
  1912. int rc = 0;
  1913. if (!core) {
  1914. d_vpr_e("%s: invalid params\n", __func__);
  1915. return -EINVAL;
  1916. } else if (core->power_enabled) {
  1917. goto exit;
  1918. } else if (!__core_in_valid_state(core)) {
  1919. d_vpr_e("%s: core not in valid state\n", __func__);
  1920. return -EINVAL;
  1921. }
  1922. rc = __strict_check(core, __func__);
  1923. if (rc)
  1924. return rc;
  1925. d_vpr_h("Resuming from power collapse\n");
  1926. core->handoff_done = false;
  1927. core->hw_power_control = false;
  1928. rc = __venus_power_on(core);
  1929. if (rc) {
  1930. d_vpr_e("Failed to power on venus\n");
  1931. goto err_venus_power_on;
  1932. }
  1933. /* Reboot the firmware */
  1934. rc = __tzbsp_set_video_state(TZBSP_VIDEO_STATE_RESUME);
  1935. if (rc) {
  1936. d_vpr_e("Failed to resume video core %d\n", rc);
  1937. goto err_set_video_state;
  1938. }
  1939. /*
  1940. * Hand off control of regulators to h/w _after_ loading fw.
  1941. * Note that the GDSC will turn off when switching from normal
  1942. * (s/w triggered) to fast (HW triggered) unless the h/w vote is
  1943. * present.
  1944. */
  1945. __hand_off_regulators(core);
  1946. call_venus_op(core, setup_ucregion_memmap, core);
  1947. /* Wait for boot completion */
  1948. rc = call_venus_op(core, boot_firmware, core);
  1949. if (rc) {
  1950. d_vpr_e("Failed to reset venus core\n");
  1951. goto err_reset_core;
  1952. }
  1953. __sys_set_debug(core, (msm_vidc_debug & FW_LOGMASK) >> FW_LOGSHIFT);
  1954. rc = __enable_subcaches(core);
  1955. if (rc) {
  1956. d_vpr_e("Failed to activate subcache\n");
  1957. goto err_reset_core;
  1958. }
  1959. __set_subcaches(core);
  1960. rc = __sys_set_power_control(core, true);
  1961. if (rc) {
  1962. d_vpr_e("%s: set power control failed\n", __func__);
  1963. __acquire_regulators(core);
  1964. rc = 0;
  1965. }
  1966. d_vpr_h("Resumed from power collapse\n");
  1967. exit:
  1968. /* Don't reset skip_pc_count for SYS_PC_PREP cmd */
  1969. //if (core->last_packet_type != HFI_CMD_SYS_PC_PREP)
  1970. // core->skip_pc_count = 0;
  1971. return rc;
  1972. err_reset_core:
  1973. __tzbsp_set_video_state(TZBSP_VIDEO_STATE_SUSPEND);
  1974. err_set_video_state:
  1975. __venus_power_off(core);
  1976. err_venus_power_on:
  1977. d_vpr_e("Failed to resume from power collapse\n");
  1978. return rc;
  1979. }
  1980. static void __set_queue_hdr_defaults(struct hfi_queue_header *q_hdr)
  1981. {
  1982. q_hdr->qhdr_status = 0x1;
  1983. q_hdr->qhdr_type = VIDC_IFACEQ_DFLT_QHDR;
  1984. q_hdr->qhdr_q_size = VIDC_IFACEQ_QUEUE_SIZE / 4;
  1985. q_hdr->qhdr_pkt_size = 0;
  1986. q_hdr->qhdr_rx_wm = 0x1;
  1987. q_hdr->qhdr_tx_wm = 0x1;
  1988. q_hdr->qhdr_rx_req = 0x1;
  1989. q_hdr->qhdr_tx_req = 0x0;
  1990. q_hdr->qhdr_rx_irq_status = 0x0;
  1991. q_hdr->qhdr_tx_irq_status = 0x0;
  1992. q_hdr->qhdr_read_idx = 0x0;
  1993. q_hdr->qhdr_write_idx = 0x0;
  1994. }
  1995. static void __interface_queues_deinit(struct msm_vidc_core *core)
  1996. {
  1997. int i;
  1998. d_vpr_h("%s()\n", __func__);
  1999. msm_vidc_memory_unmap(core, &core->iface_q_table.map);
  2000. msm_vidc_memory_free(core, &core->iface_q_table.alloc);
  2001. msm_vidc_memory_unmap(core, &core->sfr.map);
  2002. msm_vidc_memory_free(core, &core->sfr.alloc);
  2003. for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
  2004. core->iface_queues[i].q_hdr = NULL;
  2005. core->iface_queues[i].q_array.align_virtual_addr = NULL;
  2006. core->iface_queues[i].q_array.align_device_addr = 0;
  2007. }
  2008. core->iface_q_table.align_virtual_addr = NULL;
  2009. core->iface_q_table.align_device_addr = 0;
  2010. core->sfr.align_virtual_addr = NULL;
  2011. core->sfr.align_device_addr = 0;
  2012. }
  2013. static int __interface_queues_init(struct msm_vidc_core *core)
  2014. {
  2015. int rc = 0;
  2016. struct hfi_queue_table_header *q_tbl_hdr;
  2017. struct hfi_queue_header *q_hdr;
  2018. struct msm_vidc_iface_q_info *iface_q;
  2019. struct msm_vidc_alloc alloc;
  2020. struct msm_vidc_map map;
  2021. int offset = 0;
  2022. u32 i;
  2023. d_vpr_h("%s()\n", __func__);
  2024. memset(&alloc, 0, sizeof(alloc));
  2025. alloc.type = MSM_VIDC_BUF_QUEUE;
  2026. alloc.region = MSM_VIDC_NON_SECURE;
  2027. alloc.size = TOTAL_QSIZE;
  2028. alloc.secure = false;
  2029. alloc.map_kernel = true;
  2030. rc = msm_vidc_memory_alloc(core, &alloc);
  2031. if (rc) {
  2032. d_vpr_e("%s: alloc failed\n", __func__);
  2033. goto fail_alloc_queue;
  2034. }
  2035. memset(&map, 0, sizeof(map));
  2036. map.type = alloc.type;
  2037. map.region = alloc.region;
  2038. map.dmabuf = alloc.dmabuf;
  2039. rc = msm_vidc_memory_map(core, &map);
  2040. if (rc) {
  2041. d_vpr_e("%s: alloc failed\n", __func__);
  2042. goto fail_alloc_queue;
  2043. }
  2044. core->iface_q_table.align_virtual_addr = alloc.kvaddr;
  2045. core->iface_q_table.align_device_addr = map.device_addr;
  2046. core->iface_q_table.mem_size = VIDC_IFACEQ_TABLE_SIZE;
  2047. core->iface_q_table.alloc = alloc;
  2048. core->iface_q_table.map = map;
  2049. offset += core->iface_q_table.mem_size;
  2050. for (i = 0; i < VIDC_IFACEQ_NUMQ; i++) {
  2051. iface_q = &core->iface_queues[i];
  2052. iface_q->q_array.align_device_addr = map.device_addr + offset;
  2053. iface_q->q_array.align_virtual_addr = (void*)((char*)alloc.kvaddr + offset);
  2054. iface_q->q_array.mem_size = VIDC_IFACEQ_QUEUE_SIZE;
  2055. offset += iface_q->q_array.mem_size;
  2056. iface_q->q_hdr = VIDC_IFACEQ_GET_QHDR_START_ADDR(
  2057. core->iface_q_table.align_virtual_addr, i);
  2058. __set_queue_hdr_defaults(iface_q->q_hdr);
  2059. }
  2060. q_tbl_hdr = (struct hfi_queue_table_header *)
  2061. core->iface_q_table.align_virtual_addr;
  2062. q_tbl_hdr->qtbl_version = 0;
  2063. q_tbl_hdr->device_addr = (void *)core;
  2064. strlcpy(q_tbl_hdr->name, "msm_v4l2_vidc", sizeof(q_tbl_hdr->name));
  2065. q_tbl_hdr->qtbl_size = VIDC_IFACEQ_TABLE_SIZE;
  2066. q_tbl_hdr->qtbl_qhdr0_offset = sizeof(struct hfi_queue_table_header);
  2067. q_tbl_hdr->qtbl_qhdr_size = sizeof(struct hfi_queue_header);
  2068. q_tbl_hdr->qtbl_num_q = VIDC_IFACEQ_NUMQ;
  2069. q_tbl_hdr->qtbl_num_active_q = VIDC_IFACEQ_NUMQ;
  2070. iface_q = &core->iface_queues[VIDC_IFACEQ_CMDQ_IDX];
  2071. q_hdr = iface_q->q_hdr;
  2072. q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
  2073. q_hdr->qhdr_type |= HFI_Q_ID_HOST_TO_CTRL_CMD_Q;
  2074. iface_q = &core->iface_queues[VIDC_IFACEQ_MSGQ_IDX];
  2075. q_hdr = iface_q->q_hdr;
  2076. q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
  2077. q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_MSG_Q;
  2078. iface_q = &core->iface_queues[VIDC_IFACEQ_DBGQ_IDX];
  2079. q_hdr = iface_q->q_hdr;
  2080. q_hdr->qhdr_start_addr = iface_q->q_array.align_device_addr;
  2081. q_hdr->qhdr_type |= HFI_Q_ID_CTRL_TO_HOST_DEBUG_Q;
  2082. /*
  2083. * Set receive request to zero on debug queue as there is no
  2084. * need of interrupt from video hardware for debug messages
  2085. */
  2086. q_hdr->qhdr_rx_req = 0;
  2087. /* sfr buffer */
  2088. memset(&alloc, 0, sizeof(alloc));
  2089. alloc.type = MSM_VIDC_BUF_QUEUE;
  2090. alloc.region = MSM_VIDC_NON_SECURE;
  2091. alloc.size = ALIGNED_SFR_SIZE;
  2092. alloc.secure = false;
  2093. alloc.map_kernel = true;
  2094. rc = msm_vidc_memory_alloc(core, &alloc);
  2095. if (rc) {
  2096. d_vpr_e("%s: sfr alloc failed\n", __func__);
  2097. goto fail_alloc_queue;
  2098. }
  2099. memset(&map, 0, sizeof(map));
  2100. map.type = alloc.type;
  2101. map.region = alloc.region;
  2102. map.dmabuf = alloc.dmabuf;
  2103. rc = msm_vidc_memory_map(core, &map);
  2104. if (rc) {
  2105. d_vpr_e("%s: sfr map failed\n", __func__);
  2106. goto fail_alloc_queue;
  2107. }
  2108. core->sfr.align_device_addr = map.device_addr;
  2109. core->sfr.align_virtual_addr = alloc.kvaddr;
  2110. core->sfr.mem_size = ALIGNED_SFR_SIZE;
  2111. core->sfr.alloc = alloc;
  2112. core->sfr.map = map;
  2113. /* write sfr buffer size in first word */
  2114. *((u32 *)core->sfr.align_virtual_addr) = ALIGNED_SFR_SIZE;
  2115. rc = call_venus_op(core, setup_ucregion_memmap, core);
  2116. if (rc)
  2117. return rc;
  2118. return 0;
  2119. fail_alloc_queue:
  2120. return -ENOMEM;
  2121. }
  2122. static int __load_fw_to_memory(struct platform_device *pdev,
  2123. const char *fw_name)
  2124. {
  2125. int rc = 0;
  2126. const struct firmware *firmware = NULL;
  2127. char firmware_name[MAX_FIRMWARE_NAME_SIZE] = { 0 };
  2128. struct device_node *node = NULL;
  2129. struct resource res = { 0 };
  2130. phys_addr_t phys = 0;
  2131. size_t res_size = 0;
  2132. ssize_t fw_size = 0;
  2133. void *virt = NULL;
  2134. int pas_id = 0;
  2135. if (!fw_name || !(*fw_name) || !pdev) {
  2136. d_vpr_e("%s: Invalid inputs\n", __func__);
  2137. return -EINVAL;
  2138. }
  2139. if (strlen(fw_name) >= MAX_FIRMWARE_NAME_SIZE - 4) {
  2140. d_vpr_e("%s: Invalid fw name\n", __func__);
  2141. return -EINVAL;
  2142. }
  2143. scnprintf(firmware_name, ARRAY_SIZE(firmware_name), "%s.mbn", fw_name);
  2144. rc = of_property_read_u32(pdev->dev.of_node, "pas-id", &pas_id);
  2145. if (rc) {
  2146. d_vpr_e("%s: failed to read \"pas-id\". error %d\n",
  2147. __func__, rc);
  2148. goto exit;
  2149. }
  2150. node = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
  2151. if (!node) {
  2152. d_vpr_e("%s: failed to read \"memory-region\"\n",
  2153. __func__);
  2154. return -EINVAL;
  2155. }
  2156. rc = of_address_to_resource(node, 0, &res);
  2157. if (rc) {
  2158. d_vpr_e("%s: failed to read \"memory-region\", error %d\n",
  2159. __func__, rc);
  2160. goto exit;
  2161. }
  2162. phys = res.start;
  2163. res_size = (size_t)resource_size(&res);
  2164. rc = request_firmware(&firmware, firmware_name, &pdev->dev);
  2165. if (rc) {
  2166. d_vpr_e("%s: failed to request fw \"%s\", error %d\n",
  2167. __func__, firmware_name, rc);
  2168. goto exit;
  2169. }
  2170. fw_size = qcom_mdt_get_size(firmware);
  2171. if (fw_size < 0 || res_size < (size_t)fw_size) {
  2172. rc = -EINVAL;
  2173. d_vpr_e("%s: out of bound fw image fw size: %ld, res_size: %lu",
  2174. __func__, fw_size, res_size);
  2175. goto exit;
  2176. }
  2177. virt = memremap(phys, res_size, MEMREMAP_WC);
  2178. if (!virt) {
  2179. d_vpr_e("%s: failed to remap fw memory phys %pa[p]\n",
  2180. __func__, phys);
  2181. return -ENOMEM;
  2182. }
  2183. /* prevent system suspend during fw_load */
  2184. pm_stay_awake(pdev->dev.parent);
  2185. rc = qcom_mdt_load(&pdev->dev, firmware, firmware_name,
  2186. pas_id, virt, phys, res_size, NULL);
  2187. pm_relax(pdev->dev.parent);
  2188. if (rc) {
  2189. d_vpr_e("%s: error %d loading fw \"%s\"\n",
  2190. __func__, rc, firmware_name);
  2191. goto exit;
  2192. }
  2193. rc = qcom_scm_pas_auth_and_reset(pas_id);
  2194. if (rc) {
  2195. d_vpr_e("%s: error %d authenticating fw \"%s\"\n",
  2196. __func__, rc, firmware_name);
  2197. goto exit;
  2198. }
  2199. memunmap(virt);
  2200. release_firmware(firmware);
  2201. d_vpr_h("%s: firmware \"%s\" loaded successfully\n",
  2202. __func__, firmware_name);
  2203. return pas_id;
  2204. exit:
  2205. if (virt)
  2206. memunmap(virt);
  2207. if (firmware)
  2208. release_firmware(firmware);
  2209. return rc;
  2210. }
  2211. int __load_fw(struct msm_vidc_core *core)
  2212. {
  2213. int rc = 0;
  2214. d_vpr_h("%s\n", __func__);
  2215. core->handoff_done = false;
  2216. core->hw_power_control = false;
  2217. trace_msm_v4l2_vidc_fw_load("START");
  2218. rc = __init_resources(core);
  2219. if (rc) {
  2220. d_vpr_e("%s: Failed to init resources: %d\n", __func__, rc);
  2221. goto fail_init_res;
  2222. }
  2223. rc = __venus_power_on(core);
  2224. if (rc) {
  2225. d_vpr_e("%s: power on failed\n", __func__);
  2226. goto fail_venus_power_on;
  2227. }
  2228. if (!core->dt->fw_cookie) {
  2229. core->dt->fw_cookie = __load_fw_to_memory(core->pdev,
  2230. core->dt->fw_name);
  2231. if (core->dt->fw_cookie <= 0) {
  2232. d_vpr_e("%s: firmware download failed %d\n",
  2233. __func__, core->dt->fw_cookie);
  2234. core->dt->fw_cookie = 0;
  2235. rc = -ENOMEM;
  2236. goto fail_load_fw;
  2237. }
  2238. }
  2239. rc = __protect_cp_mem(core);
  2240. if (rc) {
  2241. d_vpr_e("%s: protect memory failed\n", __func__);
  2242. goto fail_protect_mem;
  2243. }
  2244. /*
  2245. * Hand off control of regulators to h/w _after_ loading fw.
  2246. * Note that the GDSC will turn off when switching from normal
  2247. * (s/w triggered) to fast (HW triggered) unless the h/w vote is
  2248. * present.
  2249. */
  2250. __hand_off_regulators(core);
  2251. trace_msm_v4l2_vidc_fw_load("END");
  2252. return rc;
  2253. fail_protect_mem:
  2254. if (core->dt->fw_cookie)
  2255. qcom_scm_pas_shutdown(core->dt->fw_cookie);
  2256. core->dt->fw_cookie = 0;
  2257. fail_load_fw:
  2258. __venus_power_off(core);
  2259. fail_venus_power_on:
  2260. __deinit_resources(core);
  2261. fail_init_res:
  2262. trace_msm_v4l2_vidc_fw_load("END");
  2263. return rc;
  2264. }
  2265. void __unload_fw(struct msm_vidc_core *core)
  2266. {
  2267. int rc = 0;
  2268. if (!core->dt->fw_cookie)
  2269. return;
  2270. cancel_delayed_work(&core->pm_work);
  2271. rc = qcom_scm_pas_shutdown(core->dt->fw_cookie);
  2272. if (rc)
  2273. d_vpr_e("Firmware unload failed rc=%d\n", rc);
  2274. core->dt->fw_cookie = 0;
  2275. __venus_power_off(core);
  2276. __deinit_resources(core);
  2277. d_vpr_h("%s done\n", __func__);
  2278. }
  2279. static int __response_handler(struct msm_vidc_core *core)
  2280. {
  2281. int rc = 0;
  2282. if (call_venus_op(core, watchdog, core, core->intr_status)) {
  2283. struct hfi_packet pkt = {.type = HFI_SYS_ERROR_WD_TIMEOUT};
  2284. return handle_system_error(core, &pkt);
  2285. }
  2286. memset(core->response_packet, 0, core->packet_size);
  2287. while (!__iface_msgq_read(core, core->response_packet)) {
  2288. rc = handle_response(core, core->response_packet);
  2289. if (rc)
  2290. continue;
  2291. /* check for system error */
  2292. if (core->state != MSM_VIDC_CORE_INIT)
  2293. break;
  2294. memset(core->response_packet, 0, core->packet_size);
  2295. }
  2296. __schedule_power_collapse_work(core);
  2297. __flush_debug_queue(core, core->response_packet, core->packet_size);
  2298. return rc;
  2299. }
  2300. irqreturn_t venus_hfi_isr(int irq, void *data)
  2301. {
  2302. disable_irq_nosync(irq);
  2303. return IRQ_WAKE_THREAD;
  2304. }
  2305. irqreturn_t venus_hfi_isr_handler(int irq, void *data)
  2306. {
  2307. struct msm_vidc_core *core = data;
  2308. int num_responses = 0, rc = 0;
  2309. d_vpr_l("%s()\n", __func__);
  2310. if (!core) {
  2311. d_vpr_e("%s: invalid params\n", __func__);
  2312. return IRQ_NONE;
  2313. }
  2314. core_lock(core, __func__);
  2315. rc = __resume(core);
  2316. if (rc) {
  2317. d_vpr_e("%s: Power on failed\n", __func__);
  2318. core_unlock(core, __func__);
  2319. goto exit;
  2320. }
  2321. call_venus_op(core, clear_interrupt, core);
  2322. core_unlock(core, __func__);
  2323. num_responses = __response_handler(core);
  2324. exit:
  2325. if (!call_venus_op(core, watchdog, core, core->intr_status))
  2326. enable_irq(irq);
  2327. return IRQ_HANDLED;
  2328. }
  2329. void venus_hfi_pm_work_handler(struct work_struct *work)
  2330. {
  2331. int rc = 0;
  2332. struct msm_vidc_core *core;
  2333. core = container_of(work, struct msm_vidc_core, pm_work.work);
  2334. if (!core) {
  2335. d_vpr_e("%s: invalid params\n", __func__);
  2336. return;
  2337. }
  2338. d_vpr_h("%s: try power collapse\n", __func__);
  2339. /*
  2340. * It is ok to check this variable outside the lock since
  2341. * it is being updated in this context only
  2342. */
  2343. if (core->skip_pc_count >= VIDC_MAX_PC_SKIP_COUNT) {
  2344. d_vpr_e("Failed to PC for %d times\n",
  2345. core->skip_pc_count);
  2346. core->skip_pc_count = 0;
  2347. msm_vidc_core_deinit(core, true);
  2348. return;
  2349. }
  2350. core_lock(core, __func__);
  2351. /* core already deinited - skip power collapse */
  2352. if (core->state == MSM_VIDC_CORE_DEINIT) {
  2353. d_vpr_e("%s: core is already de-inited\n", __func__);
  2354. goto unlock;
  2355. }
  2356. rc = __power_collapse(core, false);
  2357. switch (rc) {
  2358. case 0:
  2359. core->skip_pc_count = 0;
  2360. /* Cancel pending delayed works if any */
  2361. __cancel_power_collapse_work(core);
  2362. d_vpr_h("%s: power collapse successful!\n", __func__);
  2363. break;
  2364. case -EBUSY:
  2365. core->skip_pc_count = 0;
  2366. d_vpr_h("%s: retry PC as dsp is busy\n", __func__);
  2367. __schedule_power_collapse_work(core);
  2368. break;
  2369. case -EAGAIN:
  2370. core->skip_pc_count++;
  2371. d_vpr_e("%s: retry power collapse (count %d)\n",
  2372. __func__, core->skip_pc_count);
  2373. __schedule_power_collapse_work(core);
  2374. break;
  2375. default:
  2376. d_vpr_e("%s: power collapse failed\n", __func__);
  2377. break;
  2378. }
  2379. unlock:
  2380. core_unlock(core, __func__);
  2381. }
  2382. static int __sys_init(struct msm_vidc_core *core)
  2383. {
  2384. int rc = 0;
  2385. rc = hfi_packet_sys_init(core, core->packet, core->packet_size);
  2386. if (rc)
  2387. return rc;
  2388. rc = __iface_cmdq_write(core, core->packet);
  2389. if (rc)
  2390. return rc;
  2391. return 0;
  2392. }
  2393. static int __sys_image_version(struct msm_vidc_core *core)
  2394. {
  2395. int rc = 0;
  2396. rc = hfi_packet_image_version(core, core->packet, core->packet_size);
  2397. if (rc)
  2398. return rc;
  2399. rc = __iface_cmdq_write(core, core->packet);
  2400. if (rc)
  2401. return rc;
  2402. return 0;
  2403. }
  2404. int venus_hfi_core_init(struct msm_vidc_core *core)
  2405. {
  2406. int rc = 0;
  2407. if (!core) {
  2408. d_vpr_e("%s: invalid params\n", __func__);
  2409. return -EINVAL;
  2410. }
  2411. d_vpr_h("%s(): core %pK\n", __func__, core);
  2412. rc = __strict_check(core, __func__);
  2413. if (rc)
  2414. return rc;
  2415. rc = __load_fw(core);
  2416. if (rc)
  2417. goto error;
  2418. rc = __interface_queues_init(core);
  2419. if (rc)
  2420. goto error;
  2421. rc = call_venus_op(core, boot_firmware, core);
  2422. if (rc)
  2423. goto error;
  2424. rc = __enable_subcaches(core);
  2425. if (rc)
  2426. goto error;
  2427. rc = __sys_init(core);
  2428. if (rc)
  2429. goto error;
  2430. rc = __sys_image_version(core);
  2431. if (rc)
  2432. goto error;
  2433. rc = __sys_set_debug(core, (msm_vidc_debug & FW_LOGMASK) >> FW_LOGSHIFT);
  2434. if (rc)
  2435. goto error;
  2436. rc = __set_subcaches(core);
  2437. if (rc)
  2438. goto error;
  2439. rc = __sys_set_power_control(core, true);
  2440. if (rc) {
  2441. d_vpr_e("%s: set power control failed\n", __func__);
  2442. __acquire_regulators(core);
  2443. rc = 0;
  2444. }
  2445. d_vpr_h("%s(): successful\n", __func__);
  2446. return 0;
  2447. error:
  2448. d_vpr_e("%s(): failed\n", __func__);
  2449. return rc;
  2450. }
  2451. int venus_hfi_core_deinit(struct msm_vidc_core *core, bool force)
  2452. {
  2453. int rc = 0;
  2454. if (!core) {
  2455. d_vpr_h("%s(): invalid params\n", __func__);
  2456. return -EINVAL;
  2457. }
  2458. d_vpr_h("%s(): core %pK\n", __func__, core);
  2459. rc = __strict_check(core, __func__);
  2460. if (rc)
  2461. return rc;
  2462. if (core->state == MSM_VIDC_CORE_DEINIT)
  2463. return 0;
  2464. __resume(core);
  2465. __flush_debug_queue(core, (!force ? core->packet : NULL), core->packet_size);
  2466. __disable_subcaches(core);
  2467. __unload_fw(core);
  2468. /**
  2469. * coredump need to be called after firmware unload, coredump also
  2470. * copying queues memory. So need to be called before queues deinit.
  2471. */
  2472. if (msm_vidc_fw_dump)
  2473. fw_coredump(core);
  2474. __interface_queues_deinit(core);
  2475. return 0;
  2476. }
  2477. int venus_hfi_noc_error_info(struct msm_vidc_core *core)
  2478. {
  2479. int rc = 0;
  2480. if (!core || !core->capabilities) {
  2481. d_vpr_e("%s: Invalid parameters: %pK\n",
  2482. __func__, core);
  2483. return -EINVAL;
  2484. }
  2485. if (!core->capabilities[NON_FATAL_FAULTS].value)
  2486. return 0;
  2487. core_lock(core, __func__);
  2488. if (core->state == MSM_VIDC_CORE_DEINIT)
  2489. goto unlock;
  2490. /* resume venus before accessing noc registers */
  2491. rc = __resume(core);
  2492. if (rc) {
  2493. d_vpr_e("%s: Power on failed\n", __func__);
  2494. goto unlock;
  2495. }
  2496. call_venus_op(core, noc_error_info, core);
  2497. unlock:
  2498. core_unlock(core, __func__);
  2499. return rc;
  2500. }
  2501. int venus_hfi_suspend(struct msm_vidc_core *core)
  2502. {
  2503. int rc = 0;
  2504. if (!core) {
  2505. d_vpr_e("%s: invalid params\n", __func__);
  2506. return -EINVAL;
  2507. }
  2508. core_lock(core, __func__);
  2509. d_vpr_h("Suspending Venus\n");
  2510. rc = __power_collapse(core, true);
  2511. if (!rc) {
  2512. /* Cancel pending delayed works if any */
  2513. __cancel_power_collapse_work(core);
  2514. } else {
  2515. d_vpr_e("%s: Venus is busy\n", __func__);
  2516. rc = -EBUSY;
  2517. }
  2518. core_unlock(core, __func__);
  2519. return rc;
  2520. }
  2521. int venus_hfi_trigger_ssr(struct msm_vidc_core *core, u32 type,
  2522. u32 client_id, u32 addr)
  2523. {
  2524. int rc = 0;
  2525. u32 payload[2];
  2526. if (!core || !core->packet) {
  2527. d_vpr_e("%s: Invalid params\n", __func__);
  2528. return -EINVAL;
  2529. }
  2530. payload[0] = client_id << 4 | type;
  2531. payload[1] = addr;
  2532. rc = hfi_create_header(core->packet, core->packet_size,
  2533. 0 /*session_id*/,
  2534. core->header_id++);
  2535. if (rc)
  2536. goto exit;
  2537. /* HFI_CMD_SSR */
  2538. rc = hfi_create_packet(core->packet, core->packet_size,
  2539. HFI_CMD_SSR,
  2540. HFI_HOST_FLAGS_RESPONSE_REQUIRED |
  2541. HFI_HOST_FLAGS_INTR_REQUIRED,
  2542. HFI_PAYLOAD_U64,
  2543. HFI_PORT_NONE,
  2544. core->packet_id++,
  2545. &payload, sizeof(u64));
  2546. if (rc)
  2547. goto exit;
  2548. rc = __iface_cmdq_write(core, core->packet);
  2549. if (rc)
  2550. goto exit;
  2551. exit:
  2552. if (rc)
  2553. d_vpr_e("%s(): failed\n", __func__);
  2554. return rc;
  2555. }
  2556. int venus_hfi_session_open(struct msm_vidc_inst *inst)
  2557. {
  2558. int rc = 0;
  2559. struct msm_vidc_core *core;
  2560. if (!inst || !inst->core || !inst->packet) {
  2561. d_vpr_e("%s: invalid params\n", __func__);
  2562. return -EINVAL;
  2563. }
  2564. core = inst->core;
  2565. core_lock(core, __func__);
  2566. if (!__valdiate_session(core, inst, __func__)) {
  2567. rc = -EINVAL;
  2568. goto unlock;
  2569. }
  2570. __sys_set_debug(core,
  2571. (msm_vidc_debug & FW_LOGMASK) >> FW_LOGSHIFT);
  2572. rc = hfi_packet_session_command(inst,
  2573. HFI_CMD_OPEN,
  2574. (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
  2575. HFI_HOST_FLAGS_INTR_REQUIRED),
  2576. HFI_PORT_NONE,
  2577. 0, /* session_id */
  2578. HFI_PAYLOAD_U32,
  2579. &inst->session_id, /* payload */
  2580. sizeof(u32));
  2581. if (rc)
  2582. goto unlock;
  2583. rc = __iface_cmdq_write(inst->core, inst->packet);
  2584. if (rc)
  2585. goto unlock;
  2586. unlock:
  2587. core_unlock(core, __func__);
  2588. return rc;
  2589. }
  2590. int venus_hfi_session_set_codec(struct msm_vidc_inst *inst)
  2591. {
  2592. int rc = 0;
  2593. struct msm_vidc_core *core;
  2594. u32 codec;
  2595. if (!inst || !inst->core || !inst->packet) {
  2596. d_vpr_e("%s: invalid params\n", __func__);
  2597. return -EINVAL;
  2598. }
  2599. core = inst->core;
  2600. core_lock(core, __func__);
  2601. if (!__valdiate_session(core, inst, __func__)) {
  2602. rc = -EINVAL;
  2603. goto unlock;
  2604. }
  2605. rc = hfi_create_header(inst->packet, inst->packet_size,
  2606. inst->session_id, core->header_id++);
  2607. if (rc)
  2608. goto unlock;
  2609. codec = get_hfi_codec(inst);
  2610. rc = hfi_create_packet(inst->packet, inst->packet_size,
  2611. HFI_PROP_CODEC,
  2612. HFI_HOST_FLAGS_NONE,
  2613. HFI_PAYLOAD_U32_ENUM,
  2614. HFI_PORT_NONE,
  2615. core->packet_id++,
  2616. &codec,
  2617. sizeof(u32));
  2618. if (rc)
  2619. goto unlock;
  2620. rc = __iface_cmdq_write(inst->core, inst->packet);
  2621. if (rc)
  2622. goto unlock;
  2623. unlock:
  2624. core_unlock(core, __func__);
  2625. return rc;
  2626. }
  2627. int venus_hfi_session_set_secure_mode(struct msm_vidc_inst *inst)
  2628. {
  2629. int rc = 0;
  2630. struct msm_vidc_core *core;
  2631. u32 secure_mode;
  2632. if (!inst || !inst->core || !inst->packet) {
  2633. d_vpr_e("%s: invalid params\n", __func__);
  2634. return -EINVAL;
  2635. }
  2636. core = inst->core;
  2637. core_lock(core, __func__);
  2638. if (!__valdiate_session(core, inst, __func__)) {
  2639. rc = -EINVAL;
  2640. goto unlock;
  2641. }
  2642. rc = hfi_create_header(inst->packet, inst->packet_size,
  2643. inst->session_id, core->header_id++);
  2644. if (rc)
  2645. goto unlock;
  2646. secure_mode = inst->capabilities->cap[SECURE_MODE].value;
  2647. rc = hfi_create_packet(inst->packet, inst->packet_size,
  2648. HFI_PROP_SECURE,
  2649. HFI_HOST_FLAGS_NONE,
  2650. HFI_PAYLOAD_U32,
  2651. HFI_PORT_NONE,
  2652. core->packet_id++,
  2653. &secure_mode,
  2654. sizeof(u32));
  2655. if (rc)
  2656. goto unlock;
  2657. rc = __iface_cmdq_write(inst->core, inst->packet);
  2658. if (rc)
  2659. goto unlock;
  2660. unlock:
  2661. core_unlock(core, __func__);
  2662. return rc;
  2663. }
  2664. int venus_hfi_session_property(struct msm_vidc_inst *inst,
  2665. u32 pkt_type, u32 flags, u32 port, u32 payload_type,
  2666. void *payload, u32 payload_size)
  2667. {
  2668. int rc = 0;
  2669. struct msm_vidc_core *core;
  2670. if (!inst || !inst->core || !inst->packet) {
  2671. d_vpr_e("%s: invalid params\n", __func__);
  2672. return -EINVAL;
  2673. }
  2674. core = inst->core;
  2675. core_lock(core, __func__);
  2676. if (!__valdiate_session(core, inst, __func__)) {
  2677. rc = -EINVAL;
  2678. goto unlock;
  2679. }
  2680. rc = hfi_create_header(inst->packet, inst->packet_size,
  2681. inst->session_id, core->header_id++);
  2682. if (rc)
  2683. goto unlock;
  2684. rc = hfi_create_packet(inst->packet, inst->packet_size,
  2685. pkt_type,
  2686. flags,
  2687. payload_type,
  2688. port,
  2689. core->packet_id++,
  2690. payload,
  2691. payload_size);
  2692. if (rc)
  2693. goto unlock;
  2694. rc = __iface_cmdq_write(inst->core, inst->packet);
  2695. if (rc)
  2696. goto unlock;
  2697. unlock:
  2698. core_unlock(core, __func__);
  2699. return rc;
  2700. }
  2701. int venus_hfi_session_close(struct msm_vidc_inst *inst)
  2702. {
  2703. int rc = 0;
  2704. struct msm_vidc_core* core;
  2705. if (!inst || !inst->packet) {
  2706. d_vpr_e("%s: invalid params\n", __func__);
  2707. return -EINVAL;
  2708. }
  2709. core = inst->core;
  2710. core_lock(core, __func__);
  2711. if (!__valdiate_session(core, inst, __func__)) {
  2712. rc = -EINVAL;
  2713. goto unlock;
  2714. }
  2715. rc = hfi_packet_session_command(inst,
  2716. HFI_CMD_CLOSE,
  2717. (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
  2718. HFI_HOST_FLAGS_INTR_REQUIRED |
  2719. HFI_HOST_FLAGS_NON_DISCARDABLE),
  2720. HFI_PORT_NONE,
  2721. inst->session_id,
  2722. HFI_PAYLOAD_NONE,
  2723. NULL,
  2724. 0);
  2725. if (rc)
  2726. goto unlock;
  2727. rc = __iface_cmdq_write(inst->core, inst->packet);
  2728. if (rc)
  2729. goto unlock;
  2730. unlock:
  2731. core_unlock(core, __func__);
  2732. return rc;
  2733. }
  2734. int venus_hfi_start(struct msm_vidc_inst *inst, enum msm_vidc_port_type port)
  2735. {
  2736. int rc = 0;
  2737. struct msm_vidc_core* core;
  2738. if (!inst || !inst->core || !inst->packet) {
  2739. d_vpr_e("%s: invalid params\n", __func__);
  2740. return -EINVAL;
  2741. }
  2742. core = inst->core;
  2743. core_lock(core, __func__);
  2744. if (!__valdiate_session(core, inst, __func__)) {
  2745. rc = -EINVAL;
  2746. goto unlock;
  2747. }
  2748. if (port != INPUT_PORT && port != OUTPUT_PORT) {
  2749. i_vpr_e(inst, "%s: invalid port %d\n", __func__, port);
  2750. goto unlock;
  2751. }
  2752. rc = hfi_packet_session_command(inst,
  2753. HFI_CMD_START,
  2754. (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
  2755. HFI_HOST_FLAGS_INTR_REQUIRED),
  2756. get_hfi_port(inst, port),
  2757. inst->session_id,
  2758. HFI_PAYLOAD_NONE,
  2759. NULL,
  2760. 0);
  2761. if (rc)
  2762. goto unlock;
  2763. rc = __iface_cmdq_write(inst->core, inst->packet);
  2764. if (rc)
  2765. goto unlock;
  2766. unlock:
  2767. core_unlock(core, __func__);
  2768. return rc;
  2769. }
  2770. int venus_hfi_stop(struct msm_vidc_inst *inst, enum msm_vidc_port_type port)
  2771. {
  2772. int rc = 0;
  2773. struct msm_vidc_core* core;
  2774. if (!inst || !inst->core || !inst->packet) {
  2775. d_vpr_e("%s: invalid params\n", __func__);
  2776. return -EINVAL;
  2777. }
  2778. core = inst->core;
  2779. core_lock(core, __func__);
  2780. if (!__valdiate_session(core, inst, __func__)) {
  2781. rc = -EINVAL;
  2782. goto unlock;
  2783. }
  2784. if (port != INPUT_PORT && port != OUTPUT_PORT) {
  2785. i_vpr_e(inst, "%s: invalid port %d\n", __func__, port);
  2786. goto unlock;
  2787. }
  2788. rc = hfi_packet_session_command(inst,
  2789. HFI_CMD_STOP,
  2790. (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
  2791. HFI_HOST_FLAGS_INTR_REQUIRED |
  2792. HFI_HOST_FLAGS_NON_DISCARDABLE),
  2793. get_hfi_port(inst, port),
  2794. inst->session_id,
  2795. HFI_PAYLOAD_NONE,
  2796. NULL,
  2797. 0);
  2798. if (rc)
  2799. goto unlock;
  2800. rc = __iface_cmdq_write(inst->core, inst->packet);
  2801. if (rc)
  2802. goto unlock;
  2803. unlock:
  2804. core_unlock(core, __func__);
  2805. return rc;
  2806. }
  2807. int venus_hfi_session_command(struct msm_vidc_inst *inst,
  2808. u32 cmd, enum msm_vidc_port_type port, u32 payload_type,
  2809. void *payload, u32 payload_size)
  2810. {
  2811. int rc = 0;
  2812. struct msm_vidc_core *core;
  2813. if (!inst || !inst->core || !inst->packet) {
  2814. d_vpr_e("%s: invalid params\n", __func__);
  2815. return -EINVAL;
  2816. }
  2817. core = inst->core;
  2818. core_lock(core, __func__);
  2819. if (!__valdiate_session(core, inst, __func__)) {
  2820. rc = -EINVAL;
  2821. goto unlock;
  2822. }
  2823. rc = hfi_create_header(inst->packet, inst->packet_size,
  2824. inst->session_id,
  2825. core->header_id++);
  2826. if (rc)
  2827. goto unlock;
  2828. rc = hfi_create_packet(inst->packet, inst->packet_size,
  2829. cmd,
  2830. (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
  2831. HFI_HOST_FLAGS_INTR_REQUIRED),
  2832. payload_type,
  2833. get_hfi_port(inst, port),
  2834. core->packet_id++,
  2835. payload,
  2836. payload_size);
  2837. if (rc)
  2838. goto unlock;
  2839. rc = __iface_cmdq_write(inst->core, inst->packet);
  2840. if (rc)
  2841. goto unlock;
  2842. unlock:
  2843. core_unlock(core, __func__);
  2844. return rc;
  2845. }
  2846. int venus_hfi_queue_super_buffer(struct msm_vidc_inst *inst,
  2847. struct msm_vidc_buffer *buffer, struct msm_vidc_buffer *metabuf)
  2848. {
  2849. int rc = 0;
  2850. struct msm_vidc_core *core;
  2851. struct hfi_buffer hfi_buffer;
  2852. struct hfi_buffer hfi_meta_buffer;
  2853. struct msm_vidc_inst_capability *capability;
  2854. u32 frame_size, meta_size, batch_size, cnt = 0;
  2855. u64 ts_delta_us;
  2856. if (!inst || !inst->core || !inst->capabilities || !inst->packet) {
  2857. d_vpr_e("%s: invalid params\n", __func__);
  2858. return -EINVAL;
  2859. }
  2860. core = inst->core;
  2861. capability = inst->capabilities;
  2862. core_lock(core, __func__);
  2863. if (!__valdiate_session(core, inst, __func__)) {
  2864. rc = -EINVAL;
  2865. goto unlock;
  2866. }
  2867. /* Get super yuv buffer */
  2868. rc = get_hfi_buffer(inst, buffer, &hfi_buffer);
  2869. if (rc)
  2870. goto unlock;
  2871. /* Get super meta buffer */
  2872. if (metabuf) {
  2873. rc = get_hfi_buffer(inst, metabuf, &hfi_meta_buffer);
  2874. if (rc)
  2875. goto unlock;
  2876. }
  2877. batch_size = capability->cap[SUPER_FRAME].value;
  2878. frame_size = call_session_op(core, buffer_size, inst, MSM_VIDC_BUF_INPUT);
  2879. meta_size = call_session_op(core, buffer_size, inst, MSM_VIDC_BUF_INPUT_META);
  2880. ts_delta_us = 1000000 / (capability->cap[FRAME_RATE].value >> 16);
  2881. /* Sanitize super yuv buffer */
  2882. if (frame_size * batch_size != buffer->buffer_size) {
  2883. i_vpr_e(inst, "%s: invalid super yuv buffer. frame %u, batch %u, buffer size %u\n",
  2884. __func__, frame_size, batch_size, buffer->buffer_size);
  2885. goto unlock;
  2886. }
  2887. /* Sanitize super meta buffer */
  2888. if (metabuf && meta_size * batch_size != metabuf->buffer_size) {
  2889. i_vpr_e(inst, "%s: invalid super meta buffer. meta %u, batch %u, buffer size %u\n",
  2890. __func__, meta_size, batch_size, metabuf->buffer_size);
  2891. goto unlock;
  2892. }
  2893. /* Initialize yuv buffer */
  2894. hfi_buffer.data_size = frame_size;
  2895. hfi_buffer.addr_offset = 0;
  2896. /* Initialize meta buffer */
  2897. if (metabuf) {
  2898. hfi_meta_buffer.data_size = meta_size;
  2899. hfi_meta_buffer.addr_offset = 0;
  2900. }
  2901. while (cnt < batch_size) {
  2902. /* Create header */
  2903. rc = hfi_create_header(inst->packet, inst->packet_size,
  2904. inst->session_id, core->header_id++);
  2905. if (rc)
  2906. goto unlock;
  2907. /* Create yuv packet */
  2908. update_offset(hfi_buffer.addr_offset, (cnt ? frame_size : 0u));
  2909. update_timestamp(hfi_buffer.timestamp, (cnt ? ts_delta_us : 0u));
  2910. rc = hfi_create_packet(inst->packet,
  2911. inst->packet_size,
  2912. HFI_CMD_BUFFER,
  2913. HFI_HOST_FLAGS_INTR_REQUIRED,
  2914. HFI_PAYLOAD_STRUCTURE,
  2915. get_hfi_port_from_buffer_type(inst, buffer->type),
  2916. core->packet_id++,
  2917. &hfi_buffer,
  2918. sizeof(hfi_buffer));
  2919. if (rc)
  2920. goto unlock;
  2921. /* Create meta packet */
  2922. if (metabuf) {
  2923. update_offset(hfi_meta_buffer.addr_offset, (cnt ? meta_size : 0u));
  2924. update_timestamp(hfi_meta_buffer.timestamp, (cnt ? ts_delta_us : 0u));
  2925. rc = hfi_create_packet(inst->packet,
  2926. inst->packet_size,
  2927. HFI_CMD_BUFFER,
  2928. HFI_HOST_FLAGS_INTR_REQUIRED,
  2929. HFI_PAYLOAD_STRUCTURE,
  2930. get_hfi_port_from_buffer_type(inst, metabuf->type),
  2931. core->packet_id++,
  2932. &hfi_meta_buffer,
  2933. sizeof(hfi_meta_buffer));
  2934. if (rc)
  2935. goto unlock;
  2936. }
  2937. /* Raise interrupt only for last pkt in the batch */
  2938. rc = __iface_cmdq_write_intr(inst->core, inst->packet, (cnt == batch_size - 1));
  2939. if (rc)
  2940. goto unlock;
  2941. cnt++;
  2942. }
  2943. unlock:
  2944. core_unlock(core, __func__);
  2945. if (rc)
  2946. i_vpr_e(inst, "%s: queue super buffer failed: %d\n", __func__, rc);
  2947. return rc;
  2948. }
  2949. int venus_hfi_queue_buffer(struct msm_vidc_inst *inst,
  2950. struct msm_vidc_buffer *buffer, struct msm_vidc_buffer *metabuf)
  2951. {
  2952. int rc = 0;
  2953. struct msm_vidc_core *core;
  2954. struct hfi_buffer hfi_buffer;
  2955. if (!inst || !inst->core || !inst->packet) {
  2956. d_vpr_e("%s: invalid params\n", __func__);
  2957. return -EINVAL;
  2958. }
  2959. core = inst->core;
  2960. core_lock(core, __func__);
  2961. if (!__valdiate_session(core, inst, __func__)) {
  2962. rc = -EINVAL;
  2963. goto unlock;
  2964. }
  2965. rc = get_hfi_buffer(inst, buffer, &hfi_buffer);
  2966. if (rc)
  2967. goto unlock;
  2968. rc = hfi_create_header(inst->packet, inst->packet_size,
  2969. inst->session_id, core->header_id++);
  2970. if (rc)
  2971. goto unlock;
  2972. rc = hfi_create_packet(inst->packet,
  2973. inst->packet_size,
  2974. HFI_CMD_BUFFER,
  2975. HFI_HOST_FLAGS_INTR_REQUIRED,
  2976. HFI_PAYLOAD_STRUCTURE,
  2977. get_hfi_port_from_buffer_type(inst, buffer->type),
  2978. core->packet_id++,
  2979. &hfi_buffer,
  2980. sizeof(hfi_buffer));
  2981. if (rc)
  2982. goto unlock;
  2983. if (metabuf) {
  2984. rc = get_hfi_buffer(inst, metabuf, &hfi_buffer);
  2985. if (rc)
  2986. goto unlock;
  2987. rc = hfi_create_packet(inst->packet,
  2988. inst->packet_size,
  2989. HFI_CMD_BUFFER,
  2990. HFI_HOST_FLAGS_INTR_REQUIRED,
  2991. HFI_PAYLOAD_STRUCTURE,
  2992. get_hfi_port_from_buffer_type(inst, metabuf->type),
  2993. core->packet_id++,
  2994. &hfi_buffer,
  2995. sizeof(hfi_buffer));
  2996. if (rc)
  2997. goto unlock;
  2998. }
  2999. rc = __iface_cmdq_write(inst->core, inst->packet);
  3000. if (rc)
  3001. goto unlock;
  3002. unlock:
  3003. core_unlock(core, __func__);
  3004. return rc;
  3005. }
  3006. int venus_hfi_release_buffer(struct msm_vidc_inst *inst,
  3007. struct msm_vidc_buffer *buffer)
  3008. {
  3009. int rc = 0;
  3010. struct msm_vidc_core *core;
  3011. struct hfi_buffer hfi_buffer;
  3012. if (!inst || !inst->core || !inst->packet || !buffer) {
  3013. d_vpr_e("%s: invalid params\n", __func__);
  3014. return -EINVAL;
  3015. }
  3016. core = inst->core;
  3017. core_lock(core, __func__);
  3018. if (!__valdiate_session(core, inst, __func__)) {
  3019. rc = -EINVAL;
  3020. goto unlock;
  3021. }
  3022. rc = get_hfi_buffer(inst, buffer, &hfi_buffer);
  3023. if (rc)
  3024. goto unlock;
  3025. /* add release flag */
  3026. hfi_buffer.flags |= HFI_BUF_HOST_FLAG_RELEASE;
  3027. rc = hfi_create_header(inst->packet, inst->packet_size,
  3028. inst->session_id, core->header_id++);
  3029. if (rc)
  3030. goto unlock;
  3031. rc = hfi_create_packet(inst->packet,
  3032. inst->packet_size,
  3033. HFI_CMD_BUFFER,
  3034. (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
  3035. HFI_HOST_FLAGS_INTR_REQUIRED),
  3036. HFI_PAYLOAD_STRUCTURE,
  3037. get_hfi_port_from_buffer_type(inst, buffer->type),
  3038. core->packet_id++,
  3039. &hfi_buffer,
  3040. sizeof(hfi_buffer));
  3041. if (rc)
  3042. goto unlock;
  3043. rc = __iface_cmdq_write(inst->core, inst->packet);
  3044. if (rc)
  3045. goto unlock;
  3046. unlock:
  3047. core_unlock(core, __func__);
  3048. return rc;
  3049. }
  3050. int venus_hfi_scale_clocks(struct msm_vidc_inst* inst, u64 freq)
  3051. {
  3052. int rc = 0;
  3053. struct msm_vidc_core* core;
  3054. if (!inst || !inst->core) {
  3055. d_vpr_e("%s: invalid params\n", __func__);
  3056. return -EINVAL;
  3057. }
  3058. core = inst->core;
  3059. core_lock(core, __func__);
  3060. rc = __resume(core);
  3061. if (rc) {
  3062. i_vpr_e(inst, "%s: Resume from power collapse failed\n", __func__);
  3063. goto exit;
  3064. }
  3065. rc = __set_clocks(core, freq);
  3066. if (rc)
  3067. goto exit;
  3068. exit:
  3069. core_unlock(core, __func__);
  3070. return rc;
  3071. }
  3072. int venus_hfi_scale_buses(struct msm_vidc_inst *inst, u64 bw_ddr, u64 bw_llcc)
  3073. {
  3074. int rc = 0;
  3075. struct msm_vidc_core* core;
  3076. if (!inst || !inst->core) {
  3077. d_vpr_e("%s: invalid params\n", __func__);
  3078. return -EINVAL;
  3079. }
  3080. core = inst->core;
  3081. core_lock(core, __func__);
  3082. rc = __resume(core);
  3083. if (rc) {
  3084. i_vpr_e(inst, "%s: Resume from power collapse failed\n", __func__);
  3085. goto exit;
  3086. }
  3087. rc = __vote_buses(core, bw_ddr, bw_llcc);
  3088. if (rc)
  3089. goto exit;
  3090. exit:
  3091. core_unlock(core, __func__);
  3092. return rc;
  3093. }