msm_vidc_driver.c 69 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/iommu.h>
  6. #include <linux/workqueue.h>
  7. #include <linux/hash.h>
  8. #include <media/v4l2_vidc_extensions.h>
  9. #include "msm_media_info.h"
  10. #include "msm_vidc_driver.h"
  11. #include "msm_vidc_platform.h"
  12. #include "msm_vidc_internal.h"
  13. #include "msm_vidc_memory.h"
  14. #include "msm_vidc_debug.h"
  15. #include "msm_vidc_power.h"
  16. #include "msm_vidc.h"
  17. #include "msm_vdec.h"
  18. #include "msm_venc.h"
  19. #include "venus_hfi.h"
  20. #include "venus_hfi_response.h"
  21. #define COUNT_BITS(a, out) { \
  22. while ((a) >= 1) { \
  23. (out) += (a) & (1); \
  24. (a) >>= (1); \
  25. } \
  26. }
  27. void print_vidc_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst,
  28. struct msm_vidc_buffer *vbuf)
  29. {
  30. if (!(tag & msm_vidc_debug) || !inst || !vbuf)
  31. return;
  32. if (vbuf->type == MSM_VIDC_BUF_INPUT || vbuf->type == MSM_VIDC_BUF_OUTPUT) {
  33. dprintk(tag, inst->sid,
  34. "%s: %s: idx %2d fd %3d off %d daddr %#llx size %d filled %d flags %#x ts %lld attr %#x\n",
  35. str, vbuf->type == MSM_VIDC_BUF_INPUT ? "INPUT" : "OUTPUT",
  36. vbuf->index, vbuf->fd, vbuf->data_offset,
  37. vbuf->device_addr, vbuf->buffer_size, vbuf->data_size,
  38. vbuf->flags, vbuf->timestamp, vbuf->attr);
  39. } else if (vbuf->type == MSM_VIDC_BUF_INPUT_META ||
  40. vbuf->type == MSM_VIDC_BUF_OUTPUT_META) {
  41. dprintk(tag, inst->sid,
  42. "%s: %s: idx %2d fd %3d off %d daddr %#llx size %d filled %d flags %#x ts %lld attr %#x\n",
  43. str, vbuf->type == MSM_VIDC_BUF_INPUT_META ? "INPUT_META" : "OUTPUT_META",
  44. vbuf->index, vbuf->fd, vbuf->data_offset,
  45. vbuf->device_addr, vbuf->buffer_size, vbuf->data_size,
  46. vbuf->flags, vbuf->timestamp, vbuf->attr);
  47. }
  48. }
  49. void print_vb2_buffer(const char *str, struct msm_vidc_inst *inst,
  50. struct vb2_buffer *vb2)
  51. {
  52. if (!inst || !vb2)
  53. return;
  54. s_vpr_e(inst->sid,
  55. "%s: %s: idx %2d fd %d off %d size %d filled %d\n",
  56. str, vb2->type == INPUT_MPLANE ? "INPUT" : "OUTPUT",
  57. vb2->index, vb2->planes[0].m.fd,
  58. vb2->planes[0].data_offset, vb2->planes[0].length,
  59. vb2->planes[0].bytesused);
  60. }
  61. enum msm_vidc_buffer_type v4l2_type_to_driver(u32 type, const char *func)
  62. {
  63. enum msm_vidc_buffer_type buffer_type = 0;
  64. switch (type) {
  65. case INPUT_MPLANE:
  66. buffer_type = MSM_VIDC_BUF_INPUT;
  67. break;
  68. case OUTPUT_MPLANE:
  69. buffer_type = MSM_VIDC_BUF_OUTPUT;
  70. break;
  71. case INPUT_META_PLANE:
  72. buffer_type = MSM_VIDC_BUF_INPUT_META;
  73. break;
  74. case OUTPUT_META_PLANE:
  75. buffer_type = MSM_VIDC_BUF_OUTPUT_META;
  76. break;
  77. default:
  78. d_vpr_e("%s: invalid v4l2 buffer type %#x\n", func, type);
  79. break;
  80. }
  81. return buffer_type;
  82. }
  83. u32 v4l2_type_from_driver(enum msm_vidc_buffer_type buffer_type,
  84. const char *func)
  85. {
  86. u32 type = 0;
  87. switch (buffer_type) {
  88. case MSM_VIDC_BUF_INPUT:
  89. type = INPUT_MPLANE;
  90. break;
  91. case MSM_VIDC_BUF_OUTPUT:
  92. type = OUTPUT_MPLANE;
  93. break;
  94. case MSM_VIDC_BUF_INPUT_META:
  95. type = INPUT_META_PLANE;
  96. break;
  97. case MSM_VIDC_BUF_OUTPUT_META:
  98. type = OUTPUT_META_PLANE;
  99. break;
  100. default:
  101. d_vpr_e("%s: invalid driver buffer type %d\n",
  102. func, buffer_type);
  103. break;
  104. }
  105. return type;
  106. }
  107. enum msm_vidc_codec_type v4l2_codec_to_driver(u32 v4l2_codec, const char *func)
  108. {
  109. enum msm_vidc_codec_type codec = 0;
  110. switch (v4l2_codec) {
  111. case V4L2_PIX_FMT_H264:
  112. codec = MSM_VIDC_H264;
  113. break;
  114. case V4L2_PIX_FMT_HEVC:
  115. codec = MSM_VIDC_HEVC;
  116. break;
  117. case V4L2_PIX_FMT_VP9:
  118. codec = MSM_VIDC_VP9;
  119. break;
  120. default:
  121. d_vpr_e("%s: invalid v4l2 codec %#x\n", func, v4l2_codec);
  122. break;
  123. }
  124. return codec;
  125. }
  126. u32 v4l2_codec_from_driver(enum msm_vidc_codec_type codec, const char *func)
  127. {
  128. u32 v4l2_codec = 0;
  129. switch (codec) {
  130. case MSM_VIDC_H264:
  131. v4l2_codec = V4L2_PIX_FMT_H264;
  132. break;
  133. case MSM_VIDC_HEVC:
  134. v4l2_codec = V4L2_PIX_FMT_HEVC;
  135. break;
  136. case MSM_VIDC_VP9:
  137. v4l2_codec = V4L2_PIX_FMT_VP9;
  138. break;
  139. default:
  140. d_vpr_e("%s: invalid driver codec %#x\n", func, codec);
  141. break;
  142. }
  143. return v4l2_codec;
  144. }
  145. enum msm_vidc_colorformat_type v4l2_colorformat_to_driver(u32 v4l2_colorformat,
  146. const char *func)
  147. {
  148. enum msm_vidc_colorformat_type colorformat = 0;
  149. switch (v4l2_colorformat) {
  150. case V4L2_PIX_FMT_NV12:
  151. colorformat = MSM_VIDC_FMT_NV12;
  152. break;
  153. case V4L2_PIX_FMT_NV21:
  154. colorformat = MSM_VIDC_FMT_NV21;
  155. break;
  156. case V4L2_PIX_FMT_VIDC_NV12C:
  157. colorformat = MSM_VIDC_FMT_NV12C;
  158. break;
  159. case V4L2_PIX_FMT_VIDC_TP10C:
  160. colorformat = MSM_VIDC_FMT_TP10C;
  161. break;
  162. case V4L2_PIX_FMT_VIDC_ARGB32C:
  163. colorformat = MSM_VIDC_FMT_RGBA8888C;
  164. break;
  165. case V4L2_PIX_FMT_VIDC_P010:
  166. colorformat = MSM_VIDC_FMT_P010;
  167. break;
  168. default:
  169. d_vpr_e("%s: invalid v4l2 color format %#x\n",
  170. func, v4l2_colorformat);
  171. break;
  172. }
  173. return colorformat;
  174. }
  175. u32 v4l2_colorformat_from_driver(enum msm_vidc_colorformat_type colorformat,
  176. const char *func)
  177. {
  178. u32 v4l2_colorformat = 0;
  179. switch (colorformat) {
  180. case MSM_VIDC_FMT_NV12:
  181. v4l2_colorformat = V4L2_PIX_FMT_NV12;
  182. break;
  183. case MSM_VIDC_FMT_NV21:
  184. v4l2_colorformat = V4L2_PIX_FMT_NV21;
  185. break;
  186. case MSM_VIDC_FMT_NV12C:
  187. v4l2_colorformat = V4L2_PIX_FMT_VIDC_NV12C;
  188. break;
  189. case MSM_VIDC_FMT_TP10C:
  190. v4l2_colorformat = V4L2_PIX_FMT_VIDC_TP10C;
  191. break;
  192. case MSM_VIDC_FMT_RGBA8888C:
  193. v4l2_colorformat = V4L2_PIX_FMT_VIDC_ARGB32C;
  194. break;
  195. case MSM_VIDC_FMT_P010:
  196. v4l2_colorformat = V4L2_PIX_FMT_VIDC_P010;
  197. break;
  198. default:
  199. d_vpr_e("%s: invalid driver color format %#x\n",
  200. func, colorformat);
  201. break;
  202. }
  203. return v4l2_colorformat;
  204. }
  205. u32 v4l2_colorformat_to_media(u32 v4l2_fmt, const char *func)
  206. {
  207. switch (v4l2_fmt) {
  208. case V4L2_PIX_FMT_NV12:
  209. return COLOR_FMT_NV12;
  210. case V4L2_PIX_FMT_NV21:
  211. return COLOR_FMT_NV21;
  212. case V4L2_PIX_FMT_VIDC_P010:
  213. return COLOR_FMT_P010;
  214. case V4L2_PIX_FMT_VIDC_NV12C:
  215. return COLOR_FMT_NV12_UBWC;
  216. case V4L2_PIX_FMT_VIDC_TP10C:
  217. return COLOR_FMT_NV12_BPP10_UBWC;
  218. case V4L2_PIX_FMT_VIDC_ARGB32C:
  219. return COLOR_FMT_RGBA8888_UBWC;
  220. default:
  221. d_vpr_e("%s: invalid v4l2 color fmt: %#x, set default (NV12)",
  222. func, v4l2_fmt);
  223. return COLOR_FMT_NV12;
  224. }
  225. }
  226. int v4l2_type_to_driver_port(struct msm_vidc_inst *inst, u32 type,
  227. const char *func)
  228. {
  229. int port;
  230. if (type == INPUT_MPLANE) {
  231. port = INPUT_PORT;
  232. } else if (type == INPUT_META_PLANE) {
  233. port = INPUT_META_PORT;
  234. } else if (type == OUTPUT_MPLANE) {
  235. port = OUTPUT_PORT;
  236. } else if (type == OUTPUT_META_PLANE) {
  237. port = OUTPUT_META_PORT;
  238. } else {
  239. s_vpr_e(inst->sid, "%s: port not found for v4l2 type %d\n",
  240. func, type);
  241. port = -EINVAL;
  242. }
  243. return port;
  244. }
  245. u32 msm_vidc_get_buffer_region(struct msm_vidc_inst *inst,
  246. enum msm_vidc_buffer_type buffer_type, const char *func)
  247. {
  248. u32 region = MSM_VIDC_NON_SECURE;
  249. if (!is_secure_session(inst) &&
  250. buffer_type != MSM_VIDC_BUF_ARP) {
  251. return region;
  252. }
  253. switch (buffer_type) {
  254. case MSM_VIDC_BUF_INPUT:
  255. if (is_encode_session(inst))
  256. region = MSM_VIDC_SECURE_PIXEL;
  257. else
  258. region = MSM_VIDC_SECURE_BITSTREAM;
  259. break;
  260. case MSM_VIDC_BUF_OUTPUT:
  261. if (is_encode_session(inst))
  262. region = MSM_VIDC_SECURE_BITSTREAM;
  263. else
  264. region = MSM_VIDC_SECURE_PIXEL;
  265. break;
  266. case MSM_VIDC_BUF_INPUT_META:
  267. case MSM_VIDC_BUF_OUTPUT_META:
  268. region = MSM_VIDC_NON_SECURE;
  269. break;
  270. case MSM_VIDC_BUF_BIN:
  271. region = MSM_VIDC_SECURE_BITSTREAM;
  272. break;
  273. case MSM_VIDC_BUF_COMV:
  274. case MSM_VIDC_BUF_NON_COMV:
  275. case MSM_VIDC_BUF_LINE:
  276. region = MSM_VIDC_SECURE_NONPIXEL;
  277. break;
  278. case MSM_VIDC_BUF_DPB:
  279. region = MSM_VIDC_SECURE_PIXEL;
  280. break;
  281. case MSM_VIDC_BUF_PERSIST:
  282. // TODO: Need to revisit for ARP
  283. case MSM_VIDC_BUF_ARP:
  284. region = MSM_VIDC_SECURE_NONPIXEL;
  285. break;
  286. default:
  287. s_vpr_e(inst->sid, "%s: invalid driver buffer type %d\n",
  288. func, buffer_type);
  289. }
  290. return region;
  291. }
  292. struct msm_vidc_buffers *msm_vidc_get_buffers(
  293. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  294. const char *func)
  295. {
  296. switch (buffer_type) {
  297. case MSM_VIDC_BUF_INPUT:
  298. return &inst->buffers.input;
  299. case MSM_VIDC_BUF_INPUT_META:
  300. return &inst->buffers.input_meta;
  301. case MSM_VIDC_BUF_OUTPUT:
  302. return &inst->buffers.output;
  303. case MSM_VIDC_BUF_OUTPUT_META:
  304. return &inst->buffers.output_meta;
  305. case MSM_VIDC_BUF_BIN:
  306. return &inst->buffers.bin;
  307. case MSM_VIDC_BUF_ARP:
  308. return &inst->buffers.arp;
  309. case MSM_VIDC_BUF_COMV:
  310. return &inst->buffers.comv;
  311. case MSM_VIDC_BUF_NON_COMV:
  312. return &inst->buffers.non_comv;
  313. case MSM_VIDC_BUF_LINE:
  314. return &inst->buffers.line;
  315. case MSM_VIDC_BUF_DPB:
  316. return &inst->buffers.dpb;
  317. case MSM_VIDC_BUF_PERSIST:
  318. return &inst->buffers.persist;
  319. case MSM_VIDC_BUF_VPSS:
  320. return &inst->buffers.vpss;
  321. default:
  322. s_vpr_e(inst->sid, "%s: invalid driver buffer type %d\n",
  323. func, buffer_type);
  324. return NULL;
  325. }
  326. }
  327. struct msm_vidc_mappings *msm_vidc_get_mappings(
  328. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  329. const char *func)
  330. {
  331. switch (buffer_type) {
  332. case MSM_VIDC_BUF_INPUT:
  333. return &inst->mappings.input;
  334. case MSM_VIDC_BUF_INPUT_META:
  335. return &inst->mappings.input_meta;
  336. case MSM_VIDC_BUF_OUTPUT:
  337. return &inst->mappings.output;
  338. case MSM_VIDC_BUF_OUTPUT_META:
  339. return &inst->mappings.output_meta;
  340. case MSM_VIDC_BUF_BIN:
  341. return &inst->mappings.bin;
  342. case MSM_VIDC_BUF_ARP:
  343. return &inst->mappings.arp;
  344. case MSM_VIDC_BUF_COMV:
  345. return &inst->mappings.comv;
  346. case MSM_VIDC_BUF_NON_COMV:
  347. return &inst->mappings.non_comv;
  348. case MSM_VIDC_BUF_LINE:
  349. return &inst->mappings.line;
  350. case MSM_VIDC_BUF_DPB:
  351. return &inst->mappings.dpb;
  352. case MSM_VIDC_BUF_PERSIST:
  353. return &inst->mappings.persist;
  354. case MSM_VIDC_BUF_VPSS:
  355. return &inst->mappings.vpss;
  356. default:
  357. s_vpr_e(inst->sid, "%s: invalid driver buffer type %d\n",
  358. func, buffer_type);
  359. return NULL;
  360. }
  361. }
  362. struct msm_vidc_allocations *msm_vidc_get_allocations(
  363. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  364. const char *func)
  365. {
  366. switch (buffer_type) {
  367. case MSM_VIDC_BUF_BIN:
  368. return &inst->allocations.bin;
  369. case MSM_VIDC_BUF_ARP:
  370. return &inst->allocations.arp;
  371. case MSM_VIDC_BUF_COMV:
  372. return &inst->allocations.comv;
  373. case MSM_VIDC_BUF_NON_COMV:
  374. return &inst->allocations.non_comv;
  375. case MSM_VIDC_BUF_LINE:
  376. return &inst->allocations.line;
  377. case MSM_VIDC_BUF_DPB:
  378. return &inst->allocations.dpb;
  379. case MSM_VIDC_BUF_PERSIST:
  380. return &inst->allocations.persist;
  381. case MSM_VIDC_BUF_VPSS:
  382. return &inst->allocations.vpss;
  383. default:
  384. s_vpr_e(inst->sid, "%s: invalid driver buffer type %d\n",
  385. func, buffer_type);
  386. return NULL;
  387. }
  388. }
  389. const char *core_state_name(enum msm_vidc_core_state state)
  390. {
  391. const char* name = "UNKNOWN";
  392. switch (state) {
  393. case MSM_VIDC_CORE_INIT:
  394. name = "CORE_INIT";
  395. break;
  396. case MSM_VIDC_CORE_DEINIT:
  397. name = "CORE_DEINIT";
  398. break;
  399. default:
  400. name = "UNKNOWN";
  401. break;
  402. }
  403. return name;
  404. }
  405. int msm_vidc_change_core_state(struct msm_vidc_core *core,
  406. enum msm_vidc_core_state request_state, const char *func)
  407. {
  408. if (!core) {
  409. d_vpr_e("%s: invalid params\n", __func__);
  410. return -EINVAL;
  411. }
  412. d_vpr_h("%s: core state changed from %s to %s\n",
  413. func, core_state_name(core->state),
  414. core_state_name(request_state));
  415. core->state = request_state;
  416. return 0;
  417. }
  418. const char *state_name(enum msm_vidc_inst_state state)
  419. {
  420. const char *name = "UNKNOWN";
  421. switch (state) {
  422. case MSM_VIDC_OPEN:
  423. name = "OPEN";
  424. break;
  425. case MSM_VIDC_START_INPUT:
  426. name = "START_INPUT";
  427. break;
  428. case MSM_VIDC_START_OUTPUT:
  429. name = "START_OUTPUT";
  430. break;
  431. case MSM_VIDC_START:
  432. name = "START";
  433. break;
  434. case MSM_VIDC_DRC:
  435. name = "DRC";
  436. break;
  437. case MSM_VIDC_DRC_LAST_FLAG:
  438. name = "DRC_LAST_FLAG";
  439. break;
  440. case MSM_VIDC_DRAIN:
  441. name = "DRAIN";
  442. break;
  443. case MSM_VIDC_DRAIN_LAST_FLAG:
  444. name = "DRAIN_LAST_FLAG";
  445. break;
  446. case MSM_VIDC_DRC_DRAIN:
  447. name = "DRC_DRAIN";
  448. break;
  449. case MSM_VIDC_DRC_DRAIN_LAST_FLAG:
  450. name = "DRC_DRAIN_LAST_FLAG";
  451. break;
  452. case MSM_VIDC_DRAIN_START_INPUT:
  453. name = "DRAIN_START_INPUT";
  454. break;
  455. case MSM_VIDC_ERROR:
  456. name = "ERROR";
  457. break;
  458. default:
  459. name = "UNKNOWN";
  460. break;
  461. }
  462. return name;
  463. }
  464. int msm_vidc_change_inst_state(struct msm_vidc_inst *inst,
  465. enum msm_vidc_inst_state request_state, const char *func)
  466. {
  467. if (!inst) {
  468. d_vpr_e("%s: invalid params\n", __func__);
  469. return -EINVAL;
  470. }
  471. if (!request_state) {
  472. s_vpr_e(inst->sid, "%s: invalid request state\n", func);
  473. return -EINVAL;
  474. }
  475. if (inst->state == MSM_VIDC_ERROR) {
  476. s_vpr_h(inst->sid,
  477. "%s: inst is in bad state, can not change state to %s\n",
  478. func, state_name(request_state));
  479. return 0;
  480. }
  481. s_vpr_h(inst->sid, "%s: state changed from %s to %s\n",
  482. func, state_name(inst->state), state_name(request_state));
  483. inst->state = request_state;
  484. return 0;
  485. }
  486. bool msm_vidc_allow_s_fmt(struct msm_vidc_inst *inst, u32 type)
  487. {
  488. bool allow = false;
  489. if (!inst) {
  490. d_vpr_e("%s: invalid params\n", __func__);
  491. return false;
  492. }
  493. if (inst->state == MSM_VIDC_OPEN) {
  494. allow = true;
  495. goto exit;
  496. }
  497. if (inst->state == MSM_VIDC_START_INPUT) {
  498. if (type == OUTPUT_MPLANE || type == OUTPUT_META_PLANE) {
  499. allow = true;
  500. goto exit;
  501. }
  502. }
  503. if (inst->state == MSM_VIDC_START_OUTPUT) {
  504. if (type == INPUT_MPLANE || type == INPUT_META_PLANE) {
  505. allow = true;
  506. goto exit;
  507. }
  508. }
  509. exit:
  510. if (!allow)
  511. s_vpr_e(inst->sid, "%s: type %d not allowed in state %s\n",
  512. __func__, type, state_name(inst->state));
  513. return allow;
  514. }
  515. bool msm_vidc_allow_s_ctrl(struct msm_vidc_inst *inst, u32 id)
  516. {
  517. bool allow = false;
  518. if (!inst) {
  519. d_vpr_e("%s: invalid params\n", __func__);
  520. return false;
  521. }
  522. if (inst->state == MSM_VIDC_OPEN) {
  523. allow = true;
  524. goto exit;
  525. }
  526. if (is_decode_session(inst)) {
  527. if (!inst->vb2q[INPUT_PORT].streaming) {
  528. allow = true;
  529. goto exit;
  530. }
  531. if (inst->vb2q[INPUT_PORT].streaming) {
  532. switch (id) {
  533. case V4L2_CID_MPEG_VIDC_CODEC_CONFIG:
  534. allow = true;
  535. break;
  536. default:
  537. allow = false;
  538. break;
  539. }
  540. }
  541. } else if (is_encode_session(inst)) {
  542. if (inst->state == MSM_VIDC_START || inst->state == MSM_VIDC_START_OUTPUT) {
  543. switch (id) {
  544. case V4L2_CID_MPEG_VIDEO_BITRATE:
  545. case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
  546. case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
  547. case V4L2_CID_HFLIP:
  548. case V4L2_CID_VFLIP:
  549. case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP:
  550. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER:
  551. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR:
  552. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR:
  553. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR:
  554. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR:
  555. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR:
  556. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR:
  557. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L6_BR:
  558. case V4L2_CID_MPEG_VIDC_BASELAYER_PRIORITY:
  559. case V4L2_CID_MPEG_VIDC_USELTRFRAME:
  560. case V4L2_CID_MPEG_VIDC_MARKLTRFRAME:
  561. case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_TYPES:
  562. case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_RESOLUTION:
  563. allow = true;
  564. break;
  565. default:
  566. allow = false;
  567. break;
  568. }
  569. }
  570. }
  571. exit:
  572. if (!allow)
  573. s_vpr_e(inst->sid, "%s: id %d not allowed in state %s\n",
  574. __func__, id, state_name(inst->state));
  575. return allow;
  576. }
  577. bool msm_vidc_allow_reqbufs(struct msm_vidc_inst *inst, u32 type)
  578. {
  579. bool allow = false;
  580. if (!inst) {
  581. d_vpr_e("%s: invalid params\n", __func__);
  582. return false;
  583. }
  584. if (inst->state == MSM_VIDC_OPEN) {
  585. allow = true;
  586. goto exit;
  587. }
  588. if (inst->state == MSM_VIDC_START_INPUT) {
  589. if (type == OUTPUT_MPLANE || type == OUTPUT_META_PLANE) {
  590. allow = true;
  591. goto exit;
  592. }
  593. }
  594. if (inst->state == MSM_VIDC_START_OUTPUT) {
  595. if (type == INPUT_MPLANE || type == INPUT_META_PLANE) {
  596. allow = true;
  597. goto exit;
  598. }
  599. }
  600. exit:
  601. if (!allow)
  602. s_vpr_e(inst->sid, "%s: type %d not allowed in state %s\n",
  603. __func__, type, state_name(inst->state));
  604. return allow;
  605. }
  606. enum msm_vidc_allow msm_vidc_allow_stop(struct msm_vidc_inst *inst)
  607. {
  608. enum msm_vidc_allow allow = MSM_VIDC_DISALLOW;
  609. if (!inst) {
  610. d_vpr_e("%s: invalid params\n", __func__);
  611. return allow;
  612. }
  613. if (inst->state == MSM_VIDC_START ||
  614. inst->state == MSM_VIDC_DRC ||
  615. inst->state == MSM_VIDC_DRC_LAST_FLAG ||
  616. inst->state == MSM_VIDC_DRC_DRAIN) {
  617. allow = MSM_VIDC_ALLOW;
  618. } else if (inst->state == MSM_VIDC_START_INPUT) {
  619. allow = MSM_VIDC_IGNORE;
  620. s_vpr_e(inst->sid, "%s: stop ignored in state %s\n",
  621. __func__, state_name(inst->state));
  622. } else {
  623. s_vpr_e(inst->sid, "%s: stop not allowed in state %s\n",
  624. __func__, state_name(inst->state));
  625. }
  626. return allow;
  627. }
  628. bool msm_vidc_allow_start(struct msm_vidc_inst *inst)
  629. {
  630. if (!inst) {
  631. d_vpr_e("%s: invalid params\n", __func__);
  632. return false;
  633. }
  634. if (inst->state == MSM_VIDC_DRAIN_LAST_FLAG ||
  635. inst->state == MSM_VIDC_DRC_LAST_FLAG ||
  636. inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG)
  637. return true;
  638. s_vpr_e(inst->sid, "%s: not allowed in state %s\n",
  639. __func__, state_name(inst->state));
  640. return false;
  641. }
  642. bool msm_vidc_allow_streamon(struct msm_vidc_inst *inst, u32 type)
  643. {
  644. if (!inst) {
  645. d_vpr_e("%s: invalid params\n", __func__);
  646. return false;
  647. }
  648. if (type == INPUT_MPLANE || type == INPUT_META_PLANE) {
  649. if (inst->state == MSM_VIDC_OPEN ||
  650. inst->state == MSM_VIDC_START_OUTPUT)
  651. return true;
  652. } else if (type == OUTPUT_MPLANE || type == OUTPUT_META_PLANE) {
  653. if (inst->state == MSM_VIDC_OPEN ||
  654. inst->state == MSM_VIDC_START_INPUT ||
  655. inst->state == MSM_VIDC_DRAIN_START_INPUT)
  656. return true;
  657. }
  658. s_vpr_e(inst->sid, "%s: type %d not allowed in state %s\n",
  659. __func__, type, state_name(inst->state));
  660. return false;
  661. }
  662. bool msm_vidc_allow_streamoff(struct msm_vidc_inst *inst, u32 type)
  663. {
  664. bool allow = true;
  665. if (!inst) {
  666. d_vpr_e("%s: invalid params\n", __func__);
  667. return false;
  668. }
  669. if (type == INPUT_MPLANE) {
  670. if (inst->state == MSM_VIDC_OPEN ||
  671. inst->state == MSM_VIDC_START_OUTPUT)
  672. allow = false;
  673. } else if (type == INPUT_META_PLANE) {
  674. if (inst->state == MSM_VIDC_START_INPUT)
  675. allow = false;
  676. } else if (type == OUTPUT_MPLANE) {
  677. if (inst->state == MSM_VIDC_OPEN ||
  678. inst->state == MSM_VIDC_START_INPUT)
  679. allow = false;
  680. } else if (type == OUTPUT_META_PLANE) {
  681. if (inst->state == MSM_VIDC_START_OUTPUT)
  682. allow = false;
  683. }
  684. if (!allow)
  685. s_vpr_e(inst->sid, "%s: type %d not allowed in state %s\n",
  686. __func__, type, state_name(inst->state));
  687. return allow;
  688. }
  689. bool msm_vidc_allow_qbuf(struct msm_vidc_inst *inst)
  690. {
  691. if (!inst) {
  692. d_vpr_e("%s: invalid params\n", __func__);
  693. return false;
  694. }
  695. if (inst->state == MSM_VIDC_ERROR) {
  696. s_vpr_e(inst->sid, "%s: inst in error state\n", __func__);
  697. return false;
  698. } else {
  699. return true;
  700. }
  701. }
  702. enum msm_vidc_allow msm_vidc_allow_input_psc(struct msm_vidc_inst *inst)
  703. {
  704. enum msm_vidc_allow allow = MSM_VIDC_DISALLOW;
  705. if (!inst) {
  706. d_vpr_e("%s: invalid params\n", __func__);
  707. return MSM_VIDC_DISALLOW;
  708. }
  709. if (inst->state == MSM_VIDC_START ||
  710. inst->state == MSM_VIDC_START_INPUT ||
  711. inst->state == MSM_VIDC_DRAIN) {
  712. allow = MSM_VIDC_ALLOW;
  713. } else if (inst->state == MSM_VIDC_DRC ||
  714. inst->state == MSM_VIDC_DRC_LAST_FLAG ||
  715. inst->state == MSM_VIDC_DRC_DRAIN ||
  716. inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG ||
  717. inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  718. s_vpr_h(inst->sid, "%s: defer input psc, inst state %s\n",
  719. __func__, state_name(inst->state));
  720. allow = MSM_VIDC_DEFER;
  721. } else {
  722. s_vpr_e(inst->sid, "%s: input psc in wrong state %s\n",
  723. __func__, state_name(inst->state));
  724. allow = MSM_VIDC_DISALLOW;
  725. }
  726. return allow;
  727. }
  728. bool msm_vidc_allow_last_flag(struct msm_vidc_inst *inst)
  729. {
  730. if (!inst) {
  731. d_vpr_e("%s: invalid params\n", __func__);
  732. return false;
  733. }
  734. if (inst->state == MSM_VIDC_DRC ||
  735. inst->state == MSM_VIDC_DRAIN ||
  736. inst->state == MSM_VIDC_DRC_DRAIN)
  737. return true;
  738. s_vpr_e(inst->sid, "%s: not allowed in state %s\n",
  739. __func__, state_name(inst->state));
  740. return false;
  741. }
  742. int msm_vidc_state_change_streamon(struct msm_vidc_inst *inst, u32 type)
  743. {
  744. int rc = 0;
  745. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  746. struct response_work *resp_work;
  747. if (!inst || !inst->core) {
  748. d_vpr_e("%s: invalid params\n", __func__);
  749. return -EINVAL;
  750. }
  751. if (type == INPUT_META_PLANE || type == OUTPUT_META_PLANE)
  752. return 0;
  753. if (type == INPUT_MPLANE) {
  754. if (inst->state == MSM_VIDC_OPEN)
  755. new_state = MSM_VIDC_START_INPUT;
  756. else if (inst->state == MSM_VIDC_START_OUTPUT)
  757. new_state = MSM_VIDC_START;
  758. } else if (type == OUTPUT_MPLANE) {
  759. if (inst->state == MSM_VIDC_OPEN) {
  760. new_state = MSM_VIDC_START_OUTPUT;
  761. } else if (inst->state == MSM_VIDC_START_INPUT) {
  762. new_state = MSM_VIDC_START;
  763. } else if (inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  764. s_vpr_h(inst->sid,
  765. "%s: streamon(output) in DRAIN_START_INPUT state\n",
  766. __func__);
  767. new_state = MSM_VIDC_DRAIN;
  768. if (!list_empty(&inst->response_works)) {
  769. resp_work = list_first_entry(&inst->response_works,
  770. struct response_work, list);
  771. if (resp_work->type == RESP_WORK_INPUT_PSC) {
  772. s_vpr_h(inst->sid,
  773. "%s: streamon(output) in DRAIN_START_INPUT state, input psc pending\n",
  774. __func__);
  775. rc = handle_session_response_work(inst, resp_work);
  776. if (rc) {
  777. s_vpr_e(inst->sid,
  778. "%s: handle input psc failed\n", __func__);
  779. new_state = MSM_VIDC_ERROR;
  780. } else {
  781. new_state = MSM_VIDC_DRC_DRAIN;
  782. }
  783. list_del(&resp_work->list);
  784. kfree(resp_work->data);
  785. kfree(resp_work);
  786. }
  787. }
  788. }
  789. }
  790. rc = msm_vidc_change_inst_state(inst, new_state, __func__);
  791. if (rc)
  792. return rc;
  793. return rc;
  794. }
  795. int msm_vidc_state_change_streamoff(struct msm_vidc_inst *inst, u32 type)
  796. {
  797. int rc = 0;
  798. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  799. struct response_work *resp_work, *dummy;
  800. if (!inst || !inst->core) {
  801. d_vpr_e("%s: invalid params\n", __func__);
  802. return -EINVAL;
  803. }
  804. if (type == INPUT_META_PLANE || type == OUTPUT_META_PLANE)
  805. return 0;
  806. if (type == INPUT_MPLANE) {
  807. if (inst->state == MSM_VIDC_START_INPUT) {
  808. new_state = MSM_VIDC_OPEN;
  809. } else if (inst->state == MSM_VIDC_START) {
  810. new_state = MSM_VIDC_START_OUTPUT;
  811. } else if (inst->state == MSM_VIDC_DRC ||
  812. inst->state == MSM_VIDC_DRC_LAST_FLAG ||
  813. inst->state == MSM_VIDC_DRAIN ||
  814. inst->state == MSM_VIDC_DRAIN_LAST_FLAG ||
  815. inst->state == MSM_VIDC_DRC_DRAIN ||
  816. inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG ||
  817. inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  818. new_state = MSM_VIDC_START_OUTPUT;
  819. /* discard pending port settings change if any */
  820. list_for_each_entry_safe(resp_work, dummy,
  821. &inst->response_works, list) {
  822. if (resp_work->type == RESP_WORK_INPUT_PSC) {
  823. s_vpr_h(inst->sid,
  824. "%s: discard pending input psc\n", __func__);
  825. list_del(&resp_work->list);
  826. kfree(resp_work->data);
  827. kfree(resp_work);
  828. }
  829. }
  830. }
  831. } else if (type == OUTPUT_MPLANE) {
  832. if (inst->state == MSM_VIDC_START_OUTPUT) {
  833. new_state = MSM_VIDC_OPEN;
  834. } else if (inst->state == MSM_VIDC_START ||
  835. inst->state == MSM_VIDC_DRAIN ||
  836. inst->state == MSM_VIDC_DRAIN_LAST_FLAG ||
  837. inst->state == MSM_VIDC_DRC ||
  838. inst->state == MSM_VIDC_DRC_LAST_FLAG ||
  839. inst->state == MSM_VIDC_DRC_DRAIN) {
  840. new_state = MSM_VIDC_START_INPUT;
  841. } else if (inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG) {
  842. new_state = MSM_VIDC_DRAIN_START_INPUT;
  843. }
  844. }
  845. rc = msm_vidc_change_inst_state(inst, new_state, __func__);
  846. if (rc)
  847. goto exit;
  848. exit:
  849. return rc;
  850. }
  851. int msm_vidc_state_change_stop(struct msm_vidc_inst *inst)
  852. {
  853. int rc = 0;
  854. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  855. if (!inst || !inst->core) {
  856. d_vpr_e("%s: invalid params\n", __func__);
  857. return -EINVAL;
  858. }
  859. if (inst->state == MSM_VIDC_START) {
  860. new_state = MSM_VIDC_DRAIN;
  861. } else if (inst->state == MSM_VIDC_DRC) {
  862. new_state = MSM_VIDC_DRC_DRAIN;
  863. } else if (inst->state == MSM_VIDC_DRC_DRAIN ||
  864. inst->state == MSM_VIDC_DRC_LAST_FLAG) {
  865. new_state = MSM_VIDC_DRC_DRAIN_LAST_FLAG;
  866. } else {
  867. s_vpr_e(inst->sid, "%s: wrong state %s\n",
  868. __func__, state_name(inst->state));
  869. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  870. return -EINVAL;
  871. }
  872. rc = msm_vidc_change_inst_state(inst, new_state, __func__);
  873. if (rc)
  874. return rc;
  875. return rc;
  876. }
  877. int msm_vidc_state_change_start(struct msm_vidc_inst *inst)
  878. {
  879. int rc = 0;
  880. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  881. struct response_work *resp_work;
  882. if (!inst || !inst->core) {
  883. d_vpr_e("%s: invalid params\n", __func__);
  884. return -EINVAL;
  885. }
  886. if (inst->state == MSM_VIDC_DRAIN_LAST_FLAG ||
  887. inst->state == MSM_VIDC_DRC_LAST_FLAG) {
  888. new_state = MSM_VIDC_START;
  889. if (!list_empty(&inst->response_works)) {
  890. resp_work = list_first_entry(&inst->response_works,
  891. struct response_work, list);
  892. if (resp_work->type == RESP_WORK_INPUT_PSC) {
  893. s_vpr_h(inst->sid,
  894. "%s: start in DRC(DRAIN)_LAST_FLAG state, input psc pending\n",
  895. __func__);
  896. rc = handle_session_response_work(inst, resp_work);
  897. if (rc) {
  898. s_vpr_e(inst->sid,
  899. "%s: handle input psc failed\n", __func__);
  900. new_state = MSM_VIDC_ERROR;
  901. } else {
  902. new_state = MSM_VIDC_DRC;
  903. }
  904. list_del(&resp_work->list);
  905. kfree(resp_work->data);
  906. kfree(resp_work);
  907. }
  908. }
  909. } else if (inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG) {
  910. new_state = MSM_VIDC_DRAIN;
  911. if (!list_empty(&inst->response_works)) {
  912. resp_work = list_first_entry(&inst->response_works,
  913. struct response_work, list);
  914. if (resp_work->type == RESP_WORK_INPUT_PSC) {
  915. s_vpr_h(inst->sid,
  916. "%s: start in DRC_DRAIN_LAST_FLAG state, input psc pending\n");
  917. rc = handle_session_response_work(inst, resp_work);
  918. if (rc) {
  919. s_vpr_e(inst->sid,
  920. "%s: handle input psc failed\n", __func__);
  921. new_state = MSM_VIDC_ERROR;
  922. } else {
  923. new_state = MSM_VIDC_DRC_DRAIN;
  924. }
  925. list_del(&resp_work->list);
  926. kfree(resp_work->data);
  927. kfree(resp_work);
  928. }
  929. }
  930. } else {
  931. s_vpr_e(inst->sid, "%s: wrong state %s\n",
  932. __func__, state_name(inst->state));
  933. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  934. return -EINVAL;
  935. }
  936. rc = msm_vidc_change_inst_state(inst, new_state, __func__);
  937. if (rc)
  938. return rc;
  939. return rc;
  940. }
  941. int msm_vidc_state_change_input_psc(struct msm_vidc_inst *inst)
  942. {
  943. int rc = 0;
  944. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  945. if (!inst || !inst->core) {
  946. d_vpr_e("%s: invalid params\n", __func__);
  947. return -EINVAL;
  948. }
  949. /* don't change state as output port is not started yet */
  950. if (inst->state == MSM_VIDC_START_INPUT)
  951. return 0;
  952. if (inst->state == MSM_VIDC_START) {
  953. new_state = MSM_VIDC_DRC;
  954. } else if (inst->state == MSM_VIDC_DRAIN) {
  955. new_state = MSM_VIDC_DRC_DRAIN;
  956. } else {
  957. s_vpr_e(inst->sid, "%s: wrong state %s\n",
  958. __func__, state_name(inst->state));
  959. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  960. return -EINVAL;
  961. }
  962. rc = msm_vidc_change_inst_state(inst, new_state, __func__);
  963. if (rc)
  964. return rc;
  965. return rc;
  966. }
  967. int msm_vidc_state_change_last_flag(struct msm_vidc_inst *inst)
  968. {
  969. int rc = 0;
  970. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  971. if (!inst || !inst->core) {
  972. d_vpr_e("%s: invalid params\n", __func__);
  973. return -EINVAL;
  974. }
  975. if (inst->state == MSM_VIDC_DRC) {
  976. new_state = MSM_VIDC_DRC_LAST_FLAG;
  977. } else if (inst->state == MSM_VIDC_DRAIN) {
  978. new_state = MSM_VIDC_DRAIN_LAST_FLAG;
  979. } else if (inst->state == MSM_VIDC_DRC_DRAIN) {
  980. new_state = MSM_VIDC_DRC_DRAIN_LAST_FLAG;
  981. } else {
  982. s_vpr_e(inst->sid, "%s: wrong state %s\n",
  983. __func__, state_name(inst->state));
  984. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  985. return -EINVAL;
  986. }
  987. rc = msm_vidc_change_inst_state(inst, new_state, __func__);
  988. if (rc)
  989. return rc;
  990. return rc;
  991. }
  992. int msm_vidc_get_control(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
  993. {
  994. int rc = 0;
  995. if (!inst || !ctrl) {
  996. d_vpr_e("%s: invalid params\n", __func__);
  997. return -EINVAL;
  998. }
  999. switch (ctrl->id) {
  1000. case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
  1001. ctrl->val = inst->buffers.output.min_count +
  1002. inst->buffers.output.extra_count;
  1003. s_vpr_h(inst->sid, "g_min: output buffers %d\n", ctrl->val);
  1004. break;
  1005. case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
  1006. ctrl->val = inst->buffers.input.min_count +
  1007. inst->buffers.input.extra_count;
  1008. s_vpr_h(inst->sid, "g_min: input buffers %d\n", ctrl->val);
  1009. break;
  1010. default:
  1011. break;
  1012. }
  1013. return rc;
  1014. }
  1015. int msm_vidc_get_mbs_per_frame(struct msm_vidc_inst *inst)
  1016. {
  1017. int height, width;
  1018. struct v4l2_format *out_f;
  1019. struct v4l2_format *inp_f;
  1020. out_f = &inst->fmts[OUTPUT_PORT];
  1021. inp_f = &inst->fmts[INPUT_PORT];
  1022. height = max(out_f->fmt.pix_mp.height,
  1023. inp_f->fmt.pix_mp.height);
  1024. width = max(out_f->fmt.pix_mp.width,
  1025. inp_f->fmt.pix_mp.width);
  1026. return NUM_MBS_PER_FRAME(height, width);
  1027. }
  1028. int msm_vidc_get_fps(struct msm_vidc_inst *inst)
  1029. {
  1030. int fps;
  1031. u32 frame_rate, operating_rate;
  1032. if (!inst || !inst->capabilities) {
  1033. d_vpr_e("%s: invalid params\n", __func__);
  1034. return -EINVAL;
  1035. }
  1036. frame_rate = inst->capabilities->cap[FRAME_RATE].value;
  1037. operating_rate = inst->capabilities->cap[OPERATING_RATE].value;
  1038. if (operating_rate > frame_rate)
  1039. fps = (operating_rate >> 16) ?
  1040. (operating_rate >> 16) : 1;
  1041. else
  1042. fps = frame_rate >> 16;
  1043. return fps;
  1044. }
  1045. int msm_vidc_num_buffers(struct msm_vidc_inst *inst,
  1046. enum msm_vidc_buffer_type type, enum msm_vidc_buffer_attributes attr)
  1047. {
  1048. int count = 0;
  1049. struct msm_vidc_buffer *vbuf;
  1050. struct msm_vidc_buffers *buffers;
  1051. if (!inst) {
  1052. d_vpr_e("%s: invalid params\n", __func__);
  1053. return count;
  1054. }
  1055. if (type == MSM_VIDC_BUF_OUTPUT) {
  1056. buffers = &inst->buffers.output;
  1057. } else if (type == MSM_VIDC_BUF_INPUT) {
  1058. buffers = &inst->buffers.input;
  1059. } else {
  1060. s_vpr_e(inst->sid, "%s: invalid buffer type %#x\n",
  1061. __func__, type);
  1062. return count;
  1063. }
  1064. list_for_each_entry(vbuf, &buffers->list, list) {
  1065. if (vbuf->type != type)
  1066. continue;
  1067. if (!(vbuf->attr & attr))
  1068. continue;
  1069. count++;
  1070. }
  1071. return count;
  1072. }
  1073. static int vb2_buffer_to_driver(struct vb2_buffer *vb2,
  1074. struct msm_vidc_buffer *buf)
  1075. {
  1076. int rc = 0;
  1077. if (!vb2 || !buf) {
  1078. d_vpr_e("%s: invalid params\n", __func__);
  1079. return -EINVAL;
  1080. }
  1081. buf->valid = true;
  1082. buf->type = v4l2_type_to_driver(vb2->type, __func__);
  1083. if (!buf->type)
  1084. return -EINVAL;
  1085. buf->index = vb2->index;
  1086. buf->fd = vb2->planes[0].m.fd;
  1087. buf->data_offset = vb2->planes[0].data_offset;
  1088. buf->data_size = vb2->planes[0].bytesused;
  1089. buf->buffer_size = vb2->planes[0].length;
  1090. buf->timestamp = vb2->timestamp;
  1091. return rc;
  1092. }
  1093. int msm_vidc_unmap_driver_buf(struct msm_vidc_inst *inst,
  1094. struct msm_vidc_buffer *buf)
  1095. {
  1096. int rc = 0;
  1097. struct msm_vidc_mappings *mappings;
  1098. struct msm_vidc_map *map = NULL;
  1099. bool found = false;
  1100. if (!inst || !buf) {
  1101. d_vpr_e("%s: invalid params\n", __func__);
  1102. return -EINVAL;
  1103. }
  1104. mappings = msm_vidc_get_mappings(inst, buf->type, __func__);
  1105. if (!mappings)
  1106. return -EINVAL;
  1107. /* sanity check to see if it was not removed */
  1108. list_for_each_entry(map, &mappings->list, list) {
  1109. if (map->dmabuf == buf->dmabuf) {
  1110. found = true;
  1111. break;
  1112. }
  1113. }
  1114. if (!found) {
  1115. print_vidc_buffer(VIDC_ERR, "no buf in mappings", inst, buf);
  1116. return -EINVAL;
  1117. }
  1118. rc = msm_vidc_memory_unmap(inst->core, map);
  1119. if (rc) {
  1120. print_vidc_buffer(VIDC_ERR, "unmap failed", inst, buf);
  1121. return -EINVAL;
  1122. }
  1123. /* finally delete if refcount is zero */
  1124. if (!map->refcount) {
  1125. list_del(&map->list);
  1126. kfree(map);
  1127. }
  1128. return 0;
  1129. }
  1130. int msm_vidc_put_driver_buf(struct msm_vidc_inst *inst,
  1131. struct msm_vidc_buffer *buf)
  1132. {
  1133. int rc = 0;
  1134. if (!inst || !buf) {
  1135. d_vpr_e("%s: invalid params\n", __func__);
  1136. return -EINVAL;
  1137. }
  1138. rc = msm_vidc_unmap_driver_buf(inst, buf);
  1139. if (rc)
  1140. return rc;
  1141. msm_vidc_memory_put_dmabuf(buf->dmabuf);
  1142. /* delete the buffer from buffers->list */
  1143. list_del(&buf->list);
  1144. kfree(buf);
  1145. return 0;
  1146. }
  1147. int msm_vidc_map_driver_buf(struct msm_vidc_inst *inst,
  1148. struct msm_vidc_buffer *buf)
  1149. {
  1150. int rc = 0;
  1151. struct msm_vidc_mappings *mappings;
  1152. struct msm_vidc_map *map = NULL;
  1153. bool found = false;
  1154. if (!inst || !buf) {
  1155. d_vpr_e("%s: invalid params\n", __func__);
  1156. return -EINVAL;
  1157. }
  1158. mappings = msm_vidc_get_mappings(inst, buf->type, __func__);
  1159. if (!mappings)
  1160. return -EINVAL;
  1161. /* check if it is an existing one */
  1162. list_for_each_entry(map, &mappings->list, list) {
  1163. if (map->dmabuf == buf->dmabuf) {
  1164. found = true;
  1165. break;
  1166. }
  1167. }
  1168. if (found) {
  1169. /* skip mapping for RO buffer */
  1170. if (!(buf->attr & MSM_VIDC_ATTR_READ_ONLY)) {
  1171. rc = msm_vidc_memory_map(inst->core, map);
  1172. if (rc)
  1173. return -ENOMEM;
  1174. buf->device_addr = map->device_addr;
  1175. }
  1176. return 0;
  1177. }
  1178. map = kzalloc(sizeof(struct msm_vidc_map), GFP_KERNEL);
  1179. if (!map) {
  1180. s_vpr_e(inst->sid, "%s: alloc failed\n", __func__);
  1181. return -ENOMEM;
  1182. }
  1183. INIT_LIST_HEAD(&map->list);
  1184. map->type = buf->type;
  1185. map->dmabuf = buf->dmabuf;
  1186. map->region = msm_vidc_get_buffer_region(inst, buf->type, __func__);
  1187. rc = msm_vidc_memory_map(inst->core, map);
  1188. if (rc) {
  1189. kfree(map);
  1190. return -ENOMEM;
  1191. }
  1192. buf->device_addr = map->device_addr;
  1193. list_add_tail(&map->list, &mappings->list);
  1194. return 0;
  1195. }
  1196. struct msm_vidc_buffer *msm_vidc_get_driver_buf(struct msm_vidc_inst *inst,
  1197. struct vb2_buffer *vb2)
  1198. {
  1199. int rc = 0;
  1200. struct msm_vidc_buffer *buf = NULL;
  1201. struct msm_vidc_buffers *buffers;
  1202. struct dma_buf *dmabuf;
  1203. enum msm_vidc_buffer_type buf_type;
  1204. bool found = false;
  1205. if (!inst || !vb2) {
  1206. d_vpr_e("%s: invalid params\n", __func__);
  1207. return NULL;
  1208. }
  1209. buf_type = v4l2_type_to_driver(vb2->type, __func__);
  1210. if (!buf_type)
  1211. return NULL;
  1212. buffers = msm_vidc_get_buffers(inst, buf_type, __func__);
  1213. if (!buffers)
  1214. return NULL;
  1215. dmabuf = msm_vidc_memory_get_dmabuf(vb2->planes[0].m.fd);
  1216. if (!dmabuf)
  1217. return NULL;
  1218. /* check if it is an existing buffer */
  1219. list_for_each_entry(buf, &buffers->list, list) {
  1220. if (buf->dmabuf == dmabuf &&
  1221. buf->data_offset == vb2->planes[0].data_offset) {
  1222. found = true;
  1223. break;
  1224. }
  1225. }
  1226. if (found) {
  1227. /* only YUV buffers are allowed to repeat */
  1228. if ((is_decode_session(inst) && vb2->type != OUTPUT_MPLANE) ||
  1229. (is_encode_session(inst) && vb2->type != INPUT_MPLANE)) {
  1230. print_vidc_buffer(VIDC_ERR,
  1231. "existing buffer", inst, buf);
  1232. goto error;
  1233. }
  1234. /* for decoder, YUV with RO flag are allowed to repeat */
  1235. if (is_decode_session(inst) &&
  1236. !(buf->attr & MSM_VIDC_ATTR_READ_ONLY)) {
  1237. print_vidc_buffer(VIDC_ERR,
  1238. "existing buffer without RO flag", inst, buf);
  1239. goto error;
  1240. }
  1241. /* for encoder, treat the repeated buffer as new buffer */
  1242. if (is_encode_session(inst) && vb2->type == INPUT_MPLANE)
  1243. found = false;
  1244. } else {
  1245. buf = kzalloc(sizeof(struct msm_vidc_buffer), GFP_KERNEL);
  1246. if (!buf) {
  1247. s_vpr_e(inst->sid, "%s: alloc failed\n", __func__);
  1248. goto error;
  1249. }
  1250. INIT_LIST_HEAD(&buf->list);
  1251. buf->dmabuf = dmabuf;
  1252. }
  1253. rc = vb2_buffer_to_driver(vb2, buf);
  1254. if (rc)
  1255. goto error;
  1256. if (!found)
  1257. list_add_tail(&buf->list, &buffers->list);
  1258. rc = msm_vidc_map_driver_buf(inst, buf);
  1259. if (rc)
  1260. goto error;
  1261. return buf;
  1262. error:
  1263. msm_vidc_memory_put_dmabuf(dmabuf);
  1264. if (!found)
  1265. kfree(buf);
  1266. return NULL;
  1267. }
  1268. struct msm_vidc_buffer *get_meta_buffer(struct msm_vidc_inst *inst,
  1269. struct msm_vidc_buffer *buf)
  1270. {
  1271. struct msm_vidc_buffer *mbuf;
  1272. struct msm_vidc_buffers *buffers;
  1273. bool found = false;
  1274. if (!inst || !buf) {
  1275. d_vpr_e("%s: invalid params\n", __func__);
  1276. return NULL;
  1277. }
  1278. if (buf->type == MSM_VIDC_BUF_INPUT) {
  1279. buffers = &inst->buffers.input_meta;
  1280. } else if (buf->type == MSM_VIDC_BUF_OUTPUT) {
  1281. buffers = &inst->buffers.output_meta;
  1282. } else {
  1283. s_vpr_e(inst->sid, "%s: invalid buffer type %d\n",
  1284. __func__, buf->type);
  1285. return NULL;
  1286. }
  1287. list_for_each_entry(mbuf, &buffers->list, list) {
  1288. if (!mbuf->valid)
  1289. continue;
  1290. if (mbuf->index == buf->index) {
  1291. found = true;
  1292. break;
  1293. }
  1294. }
  1295. if (!found)
  1296. return NULL;
  1297. return mbuf;
  1298. }
  1299. int msm_vidc_queue_buffer(struct msm_vidc_inst *inst, struct vb2_buffer *vb2)
  1300. {
  1301. int rc = 0;
  1302. struct msm_vidc_buffer *buf;
  1303. struct msm_vidc_buffer *meta;
  1304. int port;
  1305. if (!inst || !vb2) {
  1306. d_vpr_e("%s: invalid params\n", __func__);
  1307. return -EINVAL;
  1308. }
  1309. buf = msm_vidc_get_driver_buf(inst, vb2);
  1310. if (!buf)
  1311. return -EINVAL;
  1312. /* meta buffer will be queued along with actual buffer */
  1313. if (buf->type == MSM_VIDC_BUF_INPUT_META ||
  1314. buf->type == MSM_VIDC_BUF_OUTPUT_META) {
  1315. buf->attr |= MSM_VIDC_ATTR_DEFERRED;
  1316. print_vidc_buffer(VIDC_HIGH, "qbuf deferred", inst, buf);
  1317. return 0;
  1318. }
  1319. /* skip queuing if streamon not completed */
  1320. port = v4l2_type_to_driver_port(inst, vb2->type, __func__);
  1321. if (port < 0)
  1322. return -EINVAL;
  1323. if (!inst->vb2q[port].streaming) {
  1324. buf->attr |= MSM_VIDC_ATTR_DEFERRED;
  1325. print_vidc_buffer(VIDC_HIGH, "qbuf deferred", inst, buf);
  1326. return 0;
  1327. }
  1328. if (is_decode_session(inst) &&
  1329. inst->capabilities->cap[CODEC_CONFIG].value) {
  1330. buf->flags |= MSM_VIDC_BUF_FLAG_CODECCONFIG;
  1331. inst->capabilities->cap[CODEC_CONFIG].value = 0;
  1332. }
  1333. if (buf->type == MSM_VIDC_BUF_INPUT) {
  1334. inst->power.buffer_counter++;
  1335. msm_vidc_scale_power(inst, true);
  1336. }
  1337. print_vidc_buffer(VIDC_HIGH, "qbuf", inst, buf);
  1338. meta = get_meta_buffer(inst, buf);
  1339. if (!meta) {
  1340. if (is_meta_enabled(inst, buf->type)) {
  1341. print_vidc_buffer(VIDC_ERR, "missing meta for",
  1342. inst, buf);
  1343. return -EINVAL;
  1344. }
  1345. }
  1346. rc = venus_hfi_queue_buffer(inst, buf, meta);
  1347. if (rc)
  1348. return rc;
  1349. buf->attr &= ~MSM_VIDC_ATTR_DEFERRED;
  1350. buf->attr |= MSM_VIDC_ATTR_QUEUED;
  1351. if (meta) {
  1352. meta->attr &= ~MSM_VIDC_ATTR_DEFERRED;
  1353. meta->attr |= MSM_VIDC_ATTR_QUEUED;
  1354. }
  1355. return rc;
  1356. }
  1357. int msm_vidc_destroy_internal_buffer(struct msm_vidc_inst *inst,
  1358. struct msm_vidc_buffer *buffer)
  1359. {
  1360. struct msm_vidc_buffers *buffers;
  1361. struct msm_vidc_allocations *allocations;
  1362. struct msm_vidc_mappings *mappings;
  1363. struct msm_vidc_alloc *alloc, *alloc_dummy;
  1364. struct msm_vidc_map *map, *map_dummy;
  1365. struct msm_vidc_buffer *buf, *dummy;
  1366. if (!inst || !inst->core) {
  1367. d_vpr_e("%s: invalid params\n", __func__);
  1368. return -EINVAL;
  1369. }
  1370. if (!is_internal_buffer(buffer->type)) {
  1371. s_vpr_e(inst->sid, "%s: buffer type %#x is not internal\n",
  1372. __func__, buffer->type);
  1373. return 0;
  1374. }
  1375. s_vpr_h(inst->sid,
  1376. "%s: destroy buffer_type %#x, size %d device_addr %#x\n",
  1377. __func__, buffer->type, buffer->buffer_size,
  1378. buffer->device_addr);
  1379. buffers = msm_vidc_get_buffers(inst, buffer->type, __func__);
  1380. if (!buffers)
  1381. return -EINVAL;
  1382. allocations = msm_vidc_get_allocations(inst, buffer->type, __func__);
  1383. if (!allocations)
  1384. return -EINVAL;
  1385. mappings = msm_vidc_get_mappings(inst, buffer->type, __func__);
  1386. if (!mappings)
  1387. return -EINVAL;
  1388. list_for_each_entry_safe(map, map_dummy, &mappings->list, list) {
  1389. if (map->dmabuf == buffer->dmabuf) {
  1390. msm_vidc_memory_unmap(inst->core, map);
  1391. list_del(&map->list);
  1392. kfree(map);
  1393. }
  1394. }
  1395. list_for_each_entry_safe(alloc, alloc_dummy, &allocations->list, list) {
  1396. if (alloc->dmabuf == buffer->dmabuf) {
  1397. msm_vidc_memory_free(inst->core, alloc);
  1398. list_del(&alloc->list);
  1399. kfree(alloc);
  1400. }
  1401. }
  1402. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  1403. if (buf->dmabuf == buffer->dmabuf) {
  1404. list_del(&buf->list);
  1405. kfree(buf);
  1406. }
  1407. }
  1408. return 0;
  1409. }
  1410. int msm_vidc_get_internal_buffers(struct msm_vidc_inst *inst,
  1411. enum msm_vidc_buffer_type buffer_type)
  1412. {
  1413. u32 buf_size;
  1414. u32 buf_count;
  1415. struct msm_vidc_core *core;
  1416. struct msm_vidc_buffers *buffers;
  1417. if (!inst || !inst->core) {
  1418. d_vpr_e("%s: invalid params\n", __func__);
  1419. return -EINVAL;
  1420. }
  1421. core = inst->core;
  1422. buf_size = call_session_op(core, buffer_size,
  1423. inst, buffer_type);
  1424. buf_count = call_session_op(core, min_count,
  1425. inst, buffer_type);
  1426. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  1427. if (!buffers)
  1428. return -EINVAL;
  1429. if (buf_size <= buffers->size &&
  1430. buf_count <= buffers->min_count) {
  1431. buffers->reuse = true;
  1432. } else {
  1433. buffers->reuse = false;
  1434. buffers->size = buf_size;
  1435. buffers->min_count = buf_count;
  1436. }
  1437. return 0;
  1438. }
  1439. int msm_vidc_create_internal_buffer(struct msm_vidc_inst *inst,
  1440. enum msm_vidc_buffer_type buffer_type, u32 index)
  1441. {
  1442. int rc = 0;
  1443. struct msm_vidc_buffers *buffers;
  1444. struct msm_vidc_allocations *allocations;
  1445. struct msm_vidc_mappings *mappings;
  1446. struct msm_vidc_buffer *buffer;
  1447. struct msm_vidc_alloc *alloc;
  1448. struct msm_vidc_map *map;
  1449. if (!inst || !inst->core) {
  1450. d_vpr_e("%s: invalid params\n", __func__);
  1451. return -EINVAL;
  1452. }
  1453. if (!is_internal_buffer(buffer_type)) {
  1454. s_vpr_e(inst->sid, "%s: buffer type %#x is not internal\n",
  1455. __func__, buffer_type);
  1456. return 0;
  1457. }
  1458. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  1459. if (!buffers)
  1460. return -EINVAL;
  1461. allocations = msm_vidc_get_allocations(inst, buffer_type, __func__);
  1462. if (!allocations)
  1463. return -EINVAL;
  1464. mappings = msm_vidc_get_mappings(inst, buffer_type, __func__);
  1465. if (!mappings)
  1466. return -EINVAL;
  1467. if (!buffers->size) {
  1468. s_vpr_e(inst->sid, "%s: invalid buffer %#x\n",
  1469. __func__, buffer_type);
  1470. return -EINVAL;
  1471. }
  1472. buffer = kzalloc(sizeof(struct msm_vidc_buffer), GFP_KERNEL);
  1473. if (!buffer) {
  1474. s_vpr_e(inst->sid, "%s: buf alloc failed\n", __func__);
  1475. return -ENOMEM;
  1476. }
  1477. INIT_LIST_HEAD(&buffer->list);
  1478. buffer->valid = true;
  1479. buffer->type = buffer_type;
  1480. buffer->index = index;
  1481. buffer->buffer_size = buffers->size;
  1482. list_add_tail(&buffer->list, &buffers->list);
  1483. alloc = kzalloc(sizeof(struct msm_vidc_alloc), GFP_KERNEL);
  1484. if (!alloc) {
  1485. s_vpr_e(inst->sid, "%s: alloc failed\n", __func__);
  1486. return -ENOMEM;
  1487. }
  1488. INIT_LIST_HEAD(&alloc->list);
  1489. alloc->type = buffer_type;
  1490. alloc->region = msm_vidc_get_buffer_region(inst,
  1491. buffer_type, __func__);
  1492. alloc->size = buffer->buffer_size;
  1493. alloc->secure = (alloc->region > MSM_VIDC_NON_SECURE) ? 1 : 0;
  1494. rc = msm_vidc_memory_alloc(inst->core, alloc);
  1495. if (rc)
  1496. return -ENOMEM;
  1497. list_add_tail(&alloc->list, &allocations->list);
  1498. map = kzalloc(sizeof(struct msm_vidc_map), GFP_KERNEL);
  1499. if (!map) {
  1500. s_vpr_e(inst->sid, "%s: map alloc failed\n", __func__);
  1501. return -ENOMEM;
  1502. }
  1503. INIT_LIST_HEAD(&map->list);
  1504. map->type = alloc->type;
  1505. map->region = alloc->region;
  1506. map->dmabuf = alloc->dmabuf;
  1507. rc = msm_vidc_memory_map(inst->core, map);
  1508. if (rc)
  1509. return -ENOMEM;
  1510. list_add_tail(&map->list, &mappings->list);
  1511. buffer->dmabuf = alloc->dmabuf;
  1512. buffer->device_addr = map->device_addr;
  1513. s_vpr_h(inst->sid,
  1514. "%s: created buffer_type %#x, size %d device_addr %#x\n",
  1515. __func__, buffer_type, buffers->size,
  1516. buffer->device_addr);
  1517. return 0;
  1518. }
  1519. int msm_vidc_create_internal_buffers(struct msm_vidc_inst *inst,
  1520. enum msm_vidc_buffer_type buffer_type)
  1521. {
  1522. int rc = 0;
  1523. struct msm_vidc_buffers *buffers;
  1524. int i;
  1525. if (!inst || !inst->core) {
  1526. d_vpr_e("%s: invalid params\n", __func__);
  1527. return -EINVAL;
  1528. }
  1529. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  1530. if (!buffers)
  1531. return -EINVAL;
  1532. if (buffers->reuse) {
  1533. s_vpr_l(inst->sid, "%s: reuse enabled for buffer type %#x\n",
  1534. __func__, buffer_type);
  1535. return 0;
  1536. }
  1537. for (i = 0; i < buffers->min_count; i++) {
  1538. rc = msm_vidc_create_internal_buffer(inst, buffer_type, i);
  1539. if (rc)
  1540. return rc;
  1541. }
  1542. return rc;
  1543. }
  1544. int msm_vidc_queue_internal_buffers(struct msm_vidc_inst *inst,
  1545. enum msm_vidc_buffer_type buffer_type)
  1546. {
  1547. int rc = 0;
  1548. struct msm_vidc_buffers *buffers;
  1549. struct msm_vidc_buffer *buffer, *dummy;
  1550. if (!inst || !inst->core) {
  1551. d_vpr_e("%s: invalid params\n", __func__);
  1552. return -EINVAL;
  1553. }
  1554. if (!is_internal_buffer(buffer_type)) {
  1555. s_vpr_e(inst->sid, "%s: buffer type %#x is not internal\n",
  1556. __func__, buffer_type);
  1557. return 0;
  1558. }
  1559. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  1560. if (!buffers)
  1561. return -EINVAL;
  1562. if (buffers->reuse) {
  1563. s_vpr_l(inst->sid, "%s: reuse enabled for buffer type %#x\n",
  1564. __func__, buffer_type);
  1565. return 0;
  1566. }
  1567. list_for_each_entry_safe(buffer, dummy, &buffers->list, list) {
  1568. /* do not queue pending release buffers */
  1569. if (buffer->flags & MSM_VIDC_ATTR_PENDING_RELEASE)
  1570. continue;
  1571. /* do not queue already queued buffers */
  1572. if (buffer->attr & MSM_VIDC_ATTR_QUEUED)
  1573. continue;
  1574. rc = venus_hfi_queue_buffer(inst, buffer, NULL);
  1575. if (rc)
  1576. return rc;
  1577. /* mark queued */
  1578. buffer->attr |= MSM_VIDC_ATTR_QUEUED;
  1579. s_vpr_h(inst->sid, "%s: queued buffer_type %#x, size %d\n",
  1580. __func__, buffer_type, buffers->size);
  1581. }
  1582. return 0;
  1583. }
  1584. int msm_vidc_alloc_and_queue_session_internal_buffers(struct msm_vidc_inst *inst,
  1585. enum msm_vidc_buffer_type buffer_type)
  1586. {
  1587. int rc = 0;
  1588. if (!inst || !inst->core) {
  1589. d_vpr_e("%s: invalid params\n", __func__);
  1590. return -EINVAL;
  1591. }
  1592. if (buffer_type != MSM_VIDC_BUF_ARP &&
  1593. buffer_type != MSM_VIDC_BUF_PERSIST) {
  1594. s_vpr_e(inst->sid, "%s: invalid buffer type: %d\n",
  1595. __func__, buffer_type);
  1596. rc = -EINVAL;
  1597. goto exit;
  1598. }
  1599. rc = msm_vidc_get_internal_buffers(inst, buffer_type);
  1600. if (rc)
  1601. goto exit;
  1602. rc = msm_vidc_create_internal_buffers(inst, buffer_type);
  1603. if (rc)
  1604. goto exit;
  1605. rc = msm_vidc_queue_internal_buffers(inst, buffer_type);
  1606. if (rc)
  1607. goto exit;
  1608. exit:
  1609. return rc;
  1610. }
  1611. int msm_vidc_release_internal_buffers(struct msm_vidc_inst *inst,
  1612. enum msm_vidc_buffer_type buffer_type)
  1613. {
  1614. int rc = 0;
  1615. struct msm_vidc_buffers *buffers;
  1616. struct msm_vidc_buffer *buffer, *dummy;
  1617. if (!inst || !inst->core) {
  1618. d_vpr_e("%s: invalid params\n", __func__);
  1619. return -EINVAL;
  1620. }
  1621. if (!is_internal_buffer(buffer_type)) {
  1622. s_vpr_e(inst->sid, "%s: buffer type %#x is not internal\n",
  1623. __func__, buffer_type);
  1624. return 0;
  1625. }
  1626. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  1627. if (!buffers)
  1628. return -EINVAL;
  1629. if (buffers->reuse) {
  1630. s_vpr_l(inst->sid, "%s: reuse enabled for buffer type %#x\n",
  1631. __func__, buffer_type);
  1632. return 0;
  1633. }
  1634. list_for_each_entry_safe(buffer, dummy, &buffers->list, list) {
  1635. /* do not release already pending release buffers */
  1636. if (buffer->attr & MSM_VIDC_ATTR_PENDING_RELEASE)
  1637. continue;
  1638. /* release only queued buffers */
  1639. if (!(buffer->attr & MSM_VIDC_ATTR_QUEUED))
  1640. continue;
  1641. rc = venus_hfi_release_buffer(inst, buffer);
  1642. if (rc)
  1643. return rc;
  1644. /* mark pending release */
  1645. buffer->attr |= MSM_VIDC_ATTR_PENDING_RELEASE;
  1646. s_vpr_e(inst->sid, "%s: released buffer_type %#x, size %d\n",
  1647. __func__, buffer_type, buffers->size);
  1648. }
  1649. return 0;
  1650. }
  1651. int msm_vidc_vb2_buffer_done(struct msm_vidc_inst *inst,
  1652. struct msm_vidc_buffer *buf)
  1653. {
  1654. int type, port;
  1655. struct vb2_queue *q;
  1656. struct vb2_buffer *vb2;
  1657. struct vb2_v4l2_buffer *vbuf;
  1658. bool found;
  1659. if (!inst || !buf) {
  1660. d_vpr_e("%s: invalid params\n", __func__);
  1661. return -EINVAL;
  1662. }
  1663. type = v4l2_type_from_driver(buf->type, __func__);
  1664. if (!type)
  1665. return -EINVAL;
  1666. port = v4l2_type_to_driver_port(inst, type, __func__);
  1667. if (port < 0)
  1668. return -EINVAL;
  1669. q = &inst->vb2q[port];
  1670. if (!q->streaming) {
  1671. s_vpr_e(inst->sid, "%s: port %d is not streaming\n",
  1672. __func__, port);
  1673. return -EINVAL;
  1674. }
  1675. found = false;
  1676. list_for_each_entry(vb2, &q->queued_list, queued_entry) {
  1677. if (vb2->state != VB2_BUF_STATE_ACTIVE)
  1678. continue;
  1679. if (vb2->index == buf->index) {
  1680. found = true;
  1681. break;
  1682. }
  1683. }
  1684. if (!found) {
  1685. print_vidc_buffer(VIDC_ERR, "vb2 not found for", inst, buf);
  1686. return -EINVAL;
  1687. }
  1688. vbuf = to_vb2_v4l2_buffer(vb2);
  1689. vbuf->flags = buf->flags;
  1690. vb2->timestamp = buf->timestamp;
  1691. vb2->planes[0].bytesused = buf->data_size;
  1692. vb2_buffer_done(vb2, VB2_BUF_STATE_DONE);
  1693. return 0;
  1694. }
  1695. int msm_vidc_event_queue_init(struct msm_vidc_inst *inst)
  1696. {
  1697. int rc = 0;
  1698. int index;
  1699. struct msm_vidc_core *core;
  1700. if (!inst || !inst->core) {
  1701. d_vpr_e("%s: invalid params\n", __func__);
  1702. return -EINVAL;
  1703. }
  1704. core = inst->core;
  1705. if (is_decode_session(inst))
  1706. index = 0;
  1707. else if (is_encode_session(inst))
  1708. index = 1;
  1709. else
  1710. return -EINVAL;
  1711. v4l2_fh_init(&inst->event_handler, &core->vdev[index].vdev);
  1712. v4l2_fh_add(&inst->event_handler);
  1713. return rc;
  1714. }
  1715. int msm_vidc_event_queue_deinit(struct msm_vidc_inst *inst)
  1716. {
  1717. int rc = 0;
  1718. if (!inst) {
  1719. d_vpr_e("%s: invalid params\n", __func__);
  1720. return -EINVAL;
  1721. }
  1722. v4l2_fh_del(&inst->event_handler);
  1723. v4l2_fh_exit(&inst->event_handler);
  1724. return rc;
  1725. }
  1726. static int vb2q_init(struct msm_vidc_inst *inst,
  1727. struct vb2_queue *q, enum v4l2_buf_type type)
  1728. {
  1729. int rc = 0;
  1730. struct msm_vidc_core *core;
  1731. if (!inst || !q || !inst->core) {
  1732. d_vpr_e("%s: invalid params\n", __func__);
  1733. return -EINVAL;
  1734. }
  1735. core = inst->core;
  1736. q->type = type;
  1737. q->io_modes = VB2_DMABUF;
  1738. q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
  1739. q->ops = core->vb2_ops;
  1740. q->mem_ops = core->vb2_mem_ops;
  1741. q->drv_priv = inst;
  1742. q->allow_zero_bytesused = 1;
  1743. q->copy_timestamp = 1;
  1744. rc = vb2_queue_init(q);
  1745. if (rc)
  1746. s_vpr_e(inst->sid, "%s: vb2_queue_init failed for type %d\n",
  1747. __func__, type);
  1748. return rc;
  1749. }
  1750. int msm_vidc_vb2_queue_init(struct msm_vidc_inst *inst)
  1751. {
  1752. int rc = 0;
  1753. if (!inst) {
  1754. d_vpr_e("%s: invalid params\n", __func__);
  1755. return -EINVAL;
  1756. }
  1757. rc = vb2q_init(inst, &inst->vb2q[INPUT_PORT], INPUT_MPLANE);
  1758. if (rc)
  1759. return rc;
  1760. rc = vb2q_init(inst, &inst->vb2q[OUTPUT_PORT], OUTPUT_MPLANE);
  1761. if (rc)
  1762. return rc;
  1763. rc = vb2q_init(inst, &inst->vb2q[INPUT_META_PORT], INPUT_META_PLANE);
  1764. if (rc)
  1765. return rc;
  1766. rc = vb2q_init(inst, &inst->vb2q[OUTPUT_META_PORT], OUTPUT_META_PLANE);
  1767. if (rc)
  1768. return rc;
  1769. return rc;
  1770. }
  1771. int msm_vidc_vb2_queue_deinit(struct msm_vidc_inst *inst)
  1772. {
  1773. int rc = 0;
  1774. if (!inst) {
  1775. d_vpr_e("%s: invalid params\n", __func__);
  1776. return -EINVAL;
  1777. }
  1778. vb2_queue_release(&inst->vb2q[OUTPUT_META_PORT]);
  1779. vb2_queue_release(&inst->vb2q[INPUT_META_PORT]);
  1780. vb2_queue_release(&inst->vb2q[OUTPUT_PORT]);
  1781. vb2_queue_release(&inst->vb2q[INPUT_PORT]);
  1782. return rc;
  1783. }
  1784. int msm_vidc_add_session(struct msm_vidc_inst *inst)
  1785. {
  1786. int rc = 0;
  1787. struct msm_vidc_inst *i;
  1788. struct msm_vidc_core *core;
  1789. u32 count = 0;
  1790. if (!inst || !inst->core) {
  1791. d_vpr_e("%s: invalid params\n", __func__);
  1792. return -EINVAL;
  1793. }
  1794. core = inst->core;
  1795. core_lock(core, __func__);
  1796. list_for_each_entry(i, &core->instances, list)
  1797. count++;
  1798. if (count < 0xffffff /*TODO: MAX_SUPPORTED_INSTANCES*/) {
  1799. list_add_tail(&inst->list, &core->instances);
  1800. } else {
  1801. d_vpr_e("%s: total sessions %d exceeded max limit %d\n",
  1802. __func__, count, MAX_SUPPORTED_INSTANCES);
  1803. rc = -EINVAL;
  1804. }
  1805. core_unlock(core, __func__);
  1806. /* assign session_id */
  1807. inst->session_id = hash32_ptr(inst);
  1808. inst->sid = inst->session_id;
  1809. return rc;
  1810. }
  1811. int msm_vidc_remove_session(struct msm_vidc_inst *inst)
  1812. {
  1813. struct msm_vidc_inst *i, *temp;
  1814. struct msm_vidc_core *core;
  1815. u32 count = 0;
  1816. if (!inst || !inst->core) {
  1817. d_vpr_e("%s: invalid params\n", __func__);
  1818. return -EINVAL;
  1819. }
  1820. core = inst->core;
  1821. core_lock(core, __func__);
  1822. list_for_each_entry_safe(i, temp, &core->instances, list) {
  1823. if (i->session_id == inst->session_id) {
  1824. list_del_init(&i->list);
  1825. d_vpr_h("%s: removed session %d\n",
  1826. __func__, i->session_id);
  1827. inst->sid = 0;
  1828. }
  1829. }
  1830. list_for_each_entry(i, &core->instances, list)
  1831. count++;
  1832. d_vpr_h("%s: remaining sessions %d\n", __func__, count);
  1833. core_unlock(core, __func__);
  1834. return 0;
  1835. }
  1836. int msm_vidc_session_open(struct msm_vidc_inst *inst)
  1837. {
  1838. int rc = 0;
  1839. if (!inst) {
  1840. d_vpr_e("%s: invalid params\n", __func__);
  1841. return -EINVAL;
  1842. }
  1843. inst->packet_size = 4096;
  1844. inst->packet = kzalloc(inst->packet_size, GFP_KERNEL);
  1845. if (!inst->packet) {
  1846. s_vpr_e(inst->sid, "%s(): inst packet allocation failed\n", __func__);
  1847. return -ENOMEM;
  1848. }
  1849. rc = venus_hfi_session_open(inst);
  1850. if (rc)
  1851. goto error;
  1852. return 0;
  1853. error:
  1854. s_vpr_e(inst->sid, "%s(): session open failed\n", __func__);
  1855. kfree(inst->packet);
  1856. inst->packet = NULL;
  1857. return rc;
  1858. }
  1859. int msm_vidc_session_set_codec(struct msm_vidc_inst *inst)
  1860. {
  1861. int rc = 0;
  1862. if (!inst) {
  1863. d_vpr_e("%s: invalid params\n", __func__);
  1864. return -EINVAL;
  1865. }
  1866. rc = venus_hfi_session_set_codec(inst);
  1867. if (rc)
  1868. return rc;
  1869. return 0;
  1870. }
  1871. int msm_vidc_session_streamon(struct msm_vidc_inst *inst,
  1872. enum msm_vidc_port_type port)
  1873. {
  1874. int rc = 0;
  1875. if (!inst || !inst->core) {
  1876. d_vpr_e("%s: invalid params\n", __func__);
  1877. return -EINVAL;
  1878. }
  1879. msm_vidc_scale_power(inst, true);
  1880. rc = venus_hfi_start(inst, port);
  1881. if (rc)
  1882. return rc;
  1883. return rc;
  1884. }
  1885. int msm_vidc_session_streamoff(struct msm_vidc_inst *inst,
  1886. enum msm_vidc_port_type port)
  1887. {
  1888. int rc = 0;
  1889. int count = 0;
  1890. struct msm_vidc_core *core;
  1891. enum signal_session_response signal_type;
  1892. enum msm_vidc_buffer_type buffer_type;
  1893. if (!inst || !inst->core) {
  1894. d_vpr_e("%s: invalid params\n", __func__);
  1895. return -EINVAL;
  1896. }
  1897. if (port == INPUT_PORT) {
  1898. signal_type = SIGNAL_CMD_STOP_INPUT;
  1899. buffer_type = MSM_VIDC_BUF_INPUT;
  1900. } else if (port == OUTPUT_PORT) {
  1901. signal_type = SIGNAL_CMD_STOP_OUTPUT;
  1902. buffer_type = MSM_VIDC_BUF_OUTPUT;
  1903. } else {
  1904. s_vpr_e(inst->sid, "%s: invalid port: %d\n", __func__, port);
  1905. return -EINVAL;
  1906. }
  1907. rc = venus_hfi_stop(inst, port);
  1908. if (rc)
  1909. return rc;
  1910. core = inst->core;
  1911. s_vpr_h(inst->sid, "%s: wait on port: %d for time: %d ms\n",
  1912. __func__, port, core->capabilities[HW_RESPONSE_TIMEOUT].value);
  1913. mutex_unlock(&inst->lock);
  1914. rc = wait_for_completion_timeout(
  1915. &inst->completions[signal_type],
  1916. msecs_to_jiffies(
  1917. core->capabilities[HW_RESPONSE_TIMEOUT].value));
  1918. if (!rc) {
  1919. s_vpr_e(inst->sid, "%s: session stop timed out for port: %d\n",
  1920. __func__, port);
  1921. rc = -ETIMEDOUT;
  1922. msm_vidc_core_timeout(inst->core);
  1923. } else {
  1924. rc = 0;
  1925. }
  1926. mutex_lock(&inst->lock);
  1927. /* no more queued buffers after streamoff */
  1928. count = msm_vidc_num_buffers(inst, buffer_type, MSM_VIDC_ATTR_QUEUED);
  1929. if (count) {
  1930. s_vpr_e(inst->sid, "%s: %d buffers pending on port: %d\n",
  1931. __func__, count, port);
  1932. msm_vidc_kill_session(inst);
  1933. }
  1934. rc = msm_vidc_flush_buffers(inst, buffer_type);
  1935. if (rc)
  1936. return rc;
  1937. s_vpr_h(inst->sid, "%s: stop successful on port: %d\n",
  1938. __func__, port);
  1939. return 0;
  1940. }
  1941. int msm_vidc_session_close(struct msm_vidc_inst *inst)
  1942. {
  1943. int rc = 0;
  1944. struct msm_vidc_core *core;
  1945. if (!inst || !inst->core) {
  1946. d_vpr_e("%s: invalid params\n", __func__);
  1947. return -EINVAL;
  1948. }
  1949. rc = venus_hfi_session_close(inst);
  1950. if (rc)
  1951. return rc;
  1952. core = inst->core;
  1953. s_vpr_h(inst->sid, "%s: wait on close for time: %d ms\n",
  1954. __func__, core->capabilities[HW_RESPONSE_TIMEOUT].value);
  1955. mutex_unlock(&inst->lock);
  1956. rc = wait_for_completion_timeout(
  1957. &inst->completions[SIGNAL_CMD_CLOSE],
  1958. msecs_to_jiffies(
  1959. core->capabilities[HW_RESPONSE_TIMEOUT].value));
  1960. if (!rc) {
  1961. s_vpr_e(inst->sid, "%s: session close timed out\n", __func__);
  1962. rc = -ETIMEDOUT;
  1963. msm_vidc_core_timeout(inst->core);
  1964. } else {
  1965. rc = 0;
  1966. s_vpr_h(inst->sid, "%s: close successful\n", __func__);
  1967. }
  1968. mutex_lock(&inst->lock);
  1969. msm_vidc_remove_session(inst);
  1970. s_vpr_h(inst->sid, "%s: free session packet data\n", __func__);
  1971. kfree(inst->packet);
  1972. inst->packet = NULL;
  1973. return rc;
  1974. }
  1975. int msm_vidc_kill_session(struct msm_vidc_inst *inst)
  1976. {
  1977. if (!inst) {
  1978. d_vpr_e("%s: invalid params\n", __func__);
  1979. return -EINVAL;
  1980. }
  1981. if (!inst->session_id) {
  1982. s_vpr_e(inst->sid, "%s: already killed\n", __func__);
  1983. return 0;
  1984. }
  1985. s_vpr_e(inst->sid, "%s: killing session\n", __func__);
  1986. msm_vidc_session_close(inst);
  1987. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  1988. return 0;
  1989. }
  1990. int msm_vidc_get_inst_capability(struct msm_vidc_inst *inst)
  1991. {
  1992. int rc = 0;
  1993. int i;
  1994. struct msm_vidc_core *core;
  1995. d_vpr_h("%s()\n", __func__);
  1996. if (!inst || !inst->core || !inst->capabilities) {
  1997. d_vpr_e("%s: invalid params\n", __func__);
  1998. return -EINVAL;
  1999. }
  2000. core = inst->core;
  2001. for (i = 0; i < core->codecs_count; i++) {
  2002. if (core->inst_caps[i].domain == inst->domain &&
  2003. core->inst_caps[i].codec == inst->codec) {
  2004. s_vpr_h(inst->sid,
  2005. "%s: copied capabilities with %#x codec, %#x domain\n",
  2006. __func__, inst->codec, inst->domain);
  2007. memcpy(inst->capabilities, &core->inst_caps[i],
  2008. sizeof(struct msm_vidc_inst_capability));
  2009. }
  2010. }
  2011. if (!inst->capabilities) {
  2012. s_vpr_e(inst->sid, "%s: capabilities not found\n", __func__);
  2013. return -EINVAL;
  2014. }
  2015. return rc;
  2016. }
  2017. static int msm_vidc_deinit_core_caps(struct msm_vidc_core *core)
  2018. {
  2019. int rc = 0;
  2020. if (!core) {
  2021. d_vpr_e("%s: invalid params\n", __func__);
  2022. return -EINVAL;
  2023. }
  2024. d_vpr_h("%s: skip freeing core capabilities\n", __func__);
  2025. //kfree(core->capabilities);
  2026. //core->capabilities = NULL;
  2027. return rc;
  2028. }
  2029. static int msm_vidc_init_core_caps(struct msm_vidc_core *core)
  2030. {
  2031. int rc = 0;
  2032. int i, num_platform_caps;
  2033. struct msm_platform_core_capability *platform_data;
  2034. if (!core || !core->platform) {
  2035. d_vpr_e("%s: invalid params\n", __func__);
  2036. rc = -EINVAL;
  2037. goto exit;
  2038. }
  2039. platform_data = core->platform->data.core_data;
  2040. if (!platform_data) {
  2041. d_vpr_e("%s: platform core data is NULL\n",
  2042. __func__);
  2043. rc = -EINVAL;
  2044. goto exit;
  2045. }
  2046. if (!core->capabilities) {
  2047. core->capabilities = kcalloc(1,
  2048. (sizeof(struct msm_vidc_core_capability) *
  2049. CORE_CAP_MAX), GFP_KERNEL);
  2050. if (!core->capabilities) {
  2051. d_vpr_e("%s: failed to allocate core capabilities\n",
  2052. __func__);
  2053. rc = -ENOMEM;
  2054. goto exit;
  2055. }
  2056. } else {
  2057. d_vpr_h("%s: capabilities memory is expected to be freed\n",
  2058. __func__);
  2059. }
  2060. num_platform_caps = core->platform->data.core_data_size;
  2061. /* loop over platform caps */
  2062. for (i = 0; i < num_platform_caps; i++) {
  2063. core->capabilities[platform_data[i].type].type = platform_data[i].type;
  2064. core->capabilities[platform_data[i].type].value = platform_data[i].value;
  2065. }
  2066. exit:
  2067. if (rc)
  2068. msm_vidc_deinit_core_caps(core);
  2069. return rc;
  2070. }
  2071. static void update_inst_capability(struct msm_platform_inst_capability *in,
  2072. struct msm_vidc_inst_capability *capability)
  2073. {
  2074. if (!in || !capability) {
  2075. d_vpr_e("%s: invalid params %pK %pK\n",
  2076. __func__, in, capability);
  2077. return;
  2078. }
  2079. if (in->cap < INST_CAP_MAX) {
  2080. capability->cap[in->cap].cap = in->cap;
  2081. capability->cap[in->cap].min = in->min;
  2082. capability->cap[in->cap].max = in->max;
  2083. capability->cap[in->cap].step_or_mask = in->step_or_mask;
  2084. capability->cap[in->cap].value = in->value;
  2085. capability->cap[in->cap].flags = in->flags;
  2086. capability->cap[in->cap].v4l2_id = in->v4l2_id;
  2087. capability->cap[in->cap].hfi_id = in->hfi_id;
  2088. memcpy(capability->cap[in->cap].parents, in->parents,
  2089. sizeof(capability->cap[in->cap].parents));
  2090. memcpy(capability->cap[in->cap].children, in->children,
  2091. sizeof(capability->cap[in->cap].children));
  2092. capability->cap[in->cap].adjust = in->adjust;
  2093. capability->cap[in->cap].set = in->set;
  2094. } else {
  2095. d_vpr_e("%s: invalid cap %d\n",
  2096. __func__, in->cap);
  2097. }
  2098. }
  2099. static int msm_vidc_deinit_instance_caps(struct msm_vidc_core *core)
  2100. {
  2101. int rc = 0;
  2102. if (!core) {
  2103. d_vpr_e("%s: invalid params\n", __func__);
  2104. return -EINVAL;
  2105. }
  2106. d_vpr_h("%s: skip freeing core->instance capabilities\n", __func__);
  2107. //kfree(core->inst_caps);
  2108. //core->inst_caps = NULL;
  2109. return rc;
  2110. }
  2111. static int msm_vidc_init_instance_caps(struct msm_vidc_core *core)
  2112. {
  2113. int rc = 0;
  2114. u8 enc_valid_codecs, dec_valid_codecs;
  2115. u8 count_bits, enc_codec_count;
  2116. u8 codecs_count = 0;
  2117. int i, j, check_bit, num_platform_caps;
  2118. struct msm_platform_inst_capability *platform_data = NULL;
  2119. if (!core || !core->platform || !core->capabilities) {
  2120. d_vpr_e("%s: invalid params\n", __func__);
  2121. rc = -EINVAL;
  2122. goto error;
  2123. }
  2124. platform_data = core->platform->data.instance_data;
  2125. if (!platform_data) {
  2126. d_vpr_e("%s: platform instance data is NULL\n",
  2127. __func__);
  2128. rc = -EINVAL;
  2129. goto error;
  2130. }
  2131. enc_valid_codecs = core->capabilities[ENC_CODECS].value;
  2132. count_bits = enc_valid_codecs;
  2133. COUNT_BITS(count_bits, codecs_count);
  2134. enc_codec_count = codecs_count;
  2135. dec_valid_codecs = core->capabilities[DEC_CODECS].value;
  2136. count_bits = dec_valid_codecs;
  2137. COUNT_BITS(count_bits, codecs_count);
  2138. core->codecs_count = codecs_count;
  2139. if (!core->inst_caps) {
  2140. core->inst_caps = kcalloc(codecs_count,
  2141. sizeof(struct msm_vidc_inst_capability),
  2142. GFP_KERNEL);
  2143. if (!core->inst_caps) {
  2144. d_vpr_e("%s: failed to allocate core capabilities\n",
  2145. __func__);
  2146. rc = -ENOMEM;
  2147. goto error;
  2148. }
  2149. } else {
  2150. d_vpr_h("%s: capabilities memory is expected to be freed\n",
  2151. __func__);
  2152. }
  2153. check_bit = 0;
  2154. /* determine codecs for enc domain */
  2155. for (i = 0; i < enc_codec_count; i++) {
  2156. while (check_bit < (sizeof(enc_valid_codecs) * 8)) {
  2157. if (enc_valid_codecs & BIT(check_bit)) {
  2158. core->inst_caps[i].domain = MSM_VIDC_ENCODER;
  2159. core->inst_caps[i].codec = enc_valid_codecs &
  2160. BIT(check_bit);
  2161. check_bit++;
  2162. break;
  2163. }
  2164. check_bit++;
  2165. }
  2166. }
  2167. /* reset checkbit to check from 0th bit of decoder codecs set bits*/
  2168. check_bit = 0;
  2169. /* determine codecs for dec domain */
  2170. for (; i < codecs_count; i++) {
  2171. while (check_bit < (sizeof(dec_valid_codecs) * 8)) {
  2172. if (dec_valid_codecs & BIT(check_bit)) {
  2173. core->inst_caps[i].domain = MSM_VIDC_DECODER;
  2174. core->inst_caps[i].codec = dec_valid_codecs &
  2175. BIT(check_bit);
  2176. check_bit++;
  2177. break;
  2178. }
  2179. check_bit++;
  2180. }
  2181. }
  2182. num_platform_caps = core->platform->data.instance_data_size;
  2183. d_vpr_h("%s: num caps %d\n", __func__, num_platform_caps);
  2184. /* loop over each platform capability */
  2185. for (i = 0; i < num_platform_caps; i++) {
  2186. /* select matching core codec and update it */
  2187. for (j = 0; j < codecs_count; j++) {
  2188. if ((platform_data[i].domain &
  2189. core->inst_caps[j].domain) &&
  2190. (platform_data[i].codec &
  2191. core->inst_caps[j].codec)) {
  2192. /* update core capability */
  2193. update_inst_capability(&platform_data[i],
  2194. &core->inst_caps[j]);
  2195. }
  2196. }
  2197. }
  2198. return 0;
  2199. error:
  2200. if (rc)
  2201. msm_vidc_deinit_instance_caps(core);
  2202. return rc;
  2203. }
  2204. int msm_vidc_core_deinit(struct msm_vidc_core *core, bool force)
  2205. {
  2206. int rc = 0;
  2207. struct msm_vidc_inst *inst, *dummy;
  2208. if (!core) {
  2209. d_vpr_e("%s: invalid params\n", __func__);
  2210. return -EINVAL;
  2211. }
  2212. core_lock(core, __func__);
  2213. d_vpr_h("%s()\n", __func__);
  2214. if (core->state == MSM_VIDC_CORE_DEINIT)
  2215. goto unlock;
  2216. if (!force)
  2217. if (!list_empty(&core->instances))
  2218. goto unlock;
  2219. venus_hfi_core_deinit(core);
  2220. msm_vidc_deinit_instance_caps(core);
  2221. msm_vidc_deinit_core_caps(core);
  2222. /* unlink all sessions from core, if any */
  2223. list_for_each_entry_safe(inst, dummy, &core->instances, list) {
  2224. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  2225. list_del(&inst->list);
  2226. }
  2227. msm_vidc_change_core_state(core, MSM_VIDC_CORE_DEINIT, __func__);
  2228. unlock:
  2229. core_unlock(core, __func__);
  2230. return rc;
  2231. }
  2232. int msm_vidc_core_init(struct msm_vidc_core *core)
  2233. {
  2234. int rc = 0;
  2235. if (!core || !core->platform) {
  2236. d_vpr_e("%s: invalid params\n", __func__);
  2237. return -EINVAL;
  2238. }
  2239. core_lock(core, __func__);
  2240. if (core->state == MSM_VIDC_CORE_INIT) {
  2241. rc = 0;
  2242. goto unlock;
  2243. }
  2244. rc = msm_vidc_init_core_caps(core);
  2245. if (rc)
  2246. goto unlock;
  2247. rc = msm_vidc_init_instance_caps(core);
  2248. if (rc)
  2249. goto unlock;
  2250. msm_vidc_change_core_state(core, MSM_VIDC_CORE_INIT, __func__);
  2251. init_completion(&core->init_done);
  2252. core->smmu_fault_handled = false;
  2253. core->ssr.trigger = false;
  2254. rc = venus_hfi_core_init(core);
  2255. if (rc) {
  2256. d_vpr_e("%s: core init failed\n", __func__);
  2257. goto unlock;
  2258. }
  2259. d_vpr_h("%s(): waiting for sys init done, %d ms\n", __func__,
  2260. core->capabilities[HW_RESPONSE_TIMEOUT].value);
  2261. core_unlock(core, __func__);
  2262. rc = wait_for_completion_timeout(&core->init_done, msecs_to_jiffies(
  2263. core->capabilities[HW_RESPONSE_TIMEOUT].value));
  2264. core_lock(core, __func__);
  2265. if (!rc) {
  2266. d_vpr_e("%s: core init timed out\n", __func__);
  2267. rc = -ETIMEDOUT;
  2268. } else {
  2269. d_vpr_h("%s: system init wait completed\n", __func__);
  2270. rc = 0;
  2271. }
  2272. unlock:
  2273. core_unlock(core, __func__);
  2274. if (rc)
  2275. msm_vidc_core_deinit(core, true);
  2276. return rc;
  2277. }
  2278. int msm_vidc_core_timeout(struct msm_vidc_core *core)
  2279. {
  2280. return msm_vidc_core_deinit(core, true);
  2281. }
  2282. int msm_vidc_smmu_fault_handler(struct iommu_domain *domain,
  2283. struct device *dev, unsigned long iova, int flags, void *data)
  2284. {
  2285. return -EINVAL;
  2286. }
  2287. int msm_vidc_trigger_ssr(struct msm_vidc_core *core,
  2288. enum msm_vidc_ssr_trigger_type type)
  2289. {
  2290. return 0;
  2291. }
  2292. void msm_vidc_ssr_handler(struct work_struct *work)
  2293. {
  2294. }
  2295. void msm_vidc_pm_work_handler(struct work_struct *work)
  2296. {
  2297. }
  2298. void msm_vidc_fw_unload_handler(struct work_struct *work)
  2299. {
  2300. struct msm_vidc_core *core = NULL;
  2301. int rc = 0;
  2302. core = container_of(work, struct msm_vidc_core, fw_unload_work.work);
  2303. if (!core) {
  2304. d_vpr_e("%s: invalid work or core handle\n", __func__);
  2305. return;
  2306. }
  2307. d_vpr_h("%s: deinitializing video core\n",__func__);
  2308. rc = msm_vidc_core_deinit(core, false);
  2309. if (rc)
  2310. d_vpr_e("%s: Failed to deinit core\n", __func__);
  2311. }
  2312. void msm_vidc_batch_handler(struct work_struct *work)
  2313. {
  2314. }
  2315. int msm_vidc_flush_buffers(struct msm_vidc_inst* inst,
  2316. enum msm_vidc_buffer_type type)
  2317. {
  2318. int rc = 0;
  2319. struct msm_vidc_buffers *buffers;
  2320. struct msm_vidc_buffer *buf, *dummy;
  2321. enum msm_vidc_buffer_type buffer_type[2];
  2322. int i;
  2323. if (!inst) {
  2324. d_vpr_e("%s: invalid params\n", __func__);
  2325. return -EINVAL;
  2326. }
  2327. if (type == MSM_VIDC_BUF_INPUT) {
  2328. buffer_type[0] = MSM_VIDC_BUF_INPUT_META;
  2329. buffer_type[1] = MSM_VIDC_BUF_INPUT;
  2330. } else if (type == MSM_VIDC_BUF_OUTPUT) {
  2331. buffer_type[0] = MSM_VIDC_BUF_OUTPUT_META;
  2332. buffer_type[1] = MSM_VIDC_BUF_OUTPUT;
  2333. } else {
  2334. s_vpr_h(inst->sid, "%s: invalid buffer type %d\n",
  2335. __func__, type);
  2336. return -EINVAL;
  2337. }
  2338. for (i = 0; i < ARRAY_SIZE(buffer_type); i++) {
  2339. buffers = msm_vidc_get_buffers(inst, buffer_type[i], __func__);
  2340. if (!buffers)
  2341. return -EINVAL;
  2342. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  2343. if (buf->attr & MSM_VIDC_ATTR_QUEUED ||
  2344. buf->attr & MSM_VIDC_ATTR_DEFERRED) {
  2345. print_vidc_buffer(VIDC_ERR, "flushing buffer", inst, buf);
  2346. msm_vidc_vb2_buffer_done(inst, buf);
  2347. msm_vidc_put_driver_buf(inst, buf);
  2348. }
  2349. }
  2350. }
  2351. return rc;
  2352. }
  2353. void msm_vidc_destroy_buffers(struct msm_vidc_inst *inst)
  2354. {
  2355. struct msm_vidc_buffers *buffers;
  2356. struct msm_vidc_buffer *buf, *dummy;
  2357. enum msm_vidc_buffer_type buf_types[] = {
  2358. MSM_VIDC_BUF_INPUT,
  2359. MSM_VIDC_BUF_OUTPUT,
  2360. MSM_VIDC_BUF_INPUT_META,
  2361. MSM_VIDC_BUF_OUTPUT_META,
  2362. MSM_VIDC_BUF_BIN,
  2363. MSM_VIDC_BUF_ARP,
  2364. MSM_VIDC_BUF_COMV,
  2365. MSM_VIDC_BUF_NON_COMV,
  2366. MSM_VIDC_BUF_LINE,
  2367. MSM_VIDC_BUF_DPB,
  2368. MSM_VIDC_BUF_PERSIST,
  2369. MSM_VIDC_BUF_VPSS,
  2370. };
  2371. int i;
  2372. if (!inst) {
  2373. d_vpr_e("%s: invalid params\n", __func__);
  2374. return;
  2375. }
  2376. for (i = 0; i < ARRAY_SIZE(buf_types); i++) {
  2377. buffers = msm_vidc_get_buffers(inst, buf_types[i], __func__);
  2378. if (!buffers)
  2379. continue;
  2380. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  2381. s_vpr_h(inst->sid,
  2382. "destroying buffer: type %d idx %d fd %d addr %#x size %d\n",
  2383. buf->type, buf->index, buf->fd, buf->device_addr, buf->buffer_size);
  2384. if (is_internal_buffer(buf->type))
  2385. msm_vidc_destroy_internal_buffer(inst, buf);
  2386. else
  2387. msm_vidc_put_driver_buf(inst, buf);
  2388. }
  2389. }
  2390. }
  2391. static void msm_vidc_close_helper(struct kref *kref)
  2392. {
  2393. struct msm_vidc_inst *inst = container_of(kref,
  2394. struct msm_vidc_inst, kref);
  2395. s_vpr_h(inst->sid, "%s()\n", __func__);
  2396. msm_vidc_event_queue_deinit(inst);
  2397. msm_vidc_vb2_queue_deinit(inst);
  2398. if (is_decode_session(inst))
  2399. msm_vdec_inst_deinit(inst);
  2400. else if (is_encode_session(inst))
  2401. msm_venc_inst_deinit(inst);
  2402. kfree(inst->capabilities);
  2403. if (inst->response_workq)
  2404. destroy_workqueue(inst->response_workq);
  2405. }
  2406. struct msm_vidc_inst *get_inst_ref(struct msm_vidc_core *core,
  2407. struct msm_vidc_inst *instance)
  2408. {
  2409. struct msm_vidc_inst *inst = NULL;
  2410. bool matches = false;
  2411. if (!core) {
  2412. d_vpr_e("%s: invalid params\n", __func__);
  2413. return NULL;
  2414. }
  2415. mutex_lock(&core->lock);
  2416. list_for_each_entry(inst, &core->instances, list) {
  2417. if (inst == instance) {
  2418. matches = true;
  2419. break;
  2420. }
  2421. }
  2422. inst = (matches && kref_get_unless_zero(&inst->kref)) ? inst : NULL;
  2423. mutex_unlock(&core->lock);
  2424. return inst;
  2425. }
  2426. struct msm_vidc_inst *get_inst(struct msm_vidc_core *core,
  2427. u32 session_id)
  2428. {
  2429. struct msm_vidc_inst *inst = NULL;
  2430. bool matches = false;
  2431. if (!core) {
  2432. d_vpr_e("%s: invalid params\n", __func__);
  2433. return NULL;
  2434. }
  2435. mutex_lock(&core->lock);
  2436. list_for_each_entry(inst, &core->instances, list) {
  2437. if (inst->session_id == session_id) {
  2438. matches = true;
  2439. break;
  2440. }
  2441. }
  2442. inst = (matches && kref_get_unless_zero(&inst->kref)) ? inst : NULL;
  2443. mutex_unlock(&core->lock);
  2444. return inst;
  2445. }
  2446. void put_inst(struct msm_vidc_inst *inst)
  2447. {
  2448. if (!inst) {
  2449. d_vpr_e("%s: invalid params\n", __func__);
  2450. return;
  2451. }
  2452. kref_put(&inst->kref, msm_vidc_close_helper);
  2453. }
  2454. bool core_lock_check(struct msm_vidc_core *core, const char* func)
  2455. {
  2456. return mutex_is_locked(&core->lock);
  2457. }
  2458. void core_lock(struct msm_vidc_core *core, const char *function)
  2459. {
  2460. mutex_lock(&core->lock);
  2461. }
  2462. void core_unlock(struct msm_vidc_core *core, const char *function)
  2463. {
  2464. mutex_unlock(&core->lock);
  2465. }
  2466. bool inst_lock_check(struct msm_vidc_inst *inst, const char* func)
  2467. {
  2468. return mutex_is_locked(&inst->lock);
  2469. }
  2470. void inst_lock(struct msm_vidc_inst *inst, const char *function)
  2471. {
  2472. mutex_lock(&inst->lock);
  2473. }
  2474. void inst_unlock(struct msm_vidc_inst *inst, const char *function)
  2475. {
  2476. mutex_unlock(&inst->lock);
  2477. }
  2478. int msm_vidc_update_meta_port_settings(struct msm_vidc_inst *inst)
  2479. {
  2480. struct msm_vidc_core *core;
  2481. struct v4l2_format *fmt;
  2482. if (!inst || !inst->core) {
  2483. d_vpr_e("%s: invalid params\n", __func__);
  2484. return -EINVAL;
  2485. }
  2486. core = inst->core;
  2487. fmt = &inst->fmts[INPUT_META_PORT];
  2488. if (is_input_meta_enabled(inst)) {
  2489. fmt->fmt.meta.buffersize = call_session_op(core,
  2490. buffer_size, inst, MSM_VIDC_BUF_INPUT_META);
  2491. inst->buffers.input_meta.min_count =
  2492. inst->buffers.input.min_count;
  2493. inst->buffers.input_meta.extra_count =
  2494. inst->buffers.input.extra_count;
  2495. inst->buffers.input_meta.actual_count =
  2496. inst->buffers.input.actual_count;
  2497. inst->buffers.input_meta.size = fmt->fmt.meta.buffersize;
  2498. } else {
  2499. fmt->fmt.meta.buffersize = 0;
  2500. inst->buffers.input_meta.min_count = 0;
  2501. inst->buffers.input_meta.extra_count = 0;
  2502. inst->buffers.input_meta.actual_count = 0;
  2503. inst->buffers.input_meta.size = 0;
  2504. }
  2505. fmt = &inst->fmts[OUTPUT_META_PORT];
  2506. if (is_output_meta_enabled(inst)) {
  2507. fmt->fmt.meta.buffersize = call_session_op(core,
  2508. buffer_size, inst, MSM_VIDC_BUF_OUTPUT_META);
  2509. inst->buffers.output_meta.min_count =
  2510. inst->buffers.output.min_count;
  2511. inst->buffers.output_meta.extra_count =
  2512. inst->buffers.output.extra_count;
  2513. inst->buffers.output_meta.actual_count =
  2514. inst->buffers.output.actual_count;
  2515. inst->buffers.output_meta.size = fmt->fmt.meta.buffersize;
  2516. } else {
  2517. fmt->fmt.meta.buffersize = 0;
  2518. inst->buffers.output_meta.min_count = 0;
  2519. inst->buffers.output_meta.extra_count = 0;
  2520. inst->buffers.output_meta.actual_count = 0;
  2521. inst->buffers.output_meta.size = 0;
  2522. }
  2523. return 0;
  2524. }
  2525. void msm_vidc_schedule_core_deinit(struct msm_vidc_core *core)
  2526. {
  2527. if (!core)
  2528. return;
  2529. if (!core->capabilities[FW_UNLOAD].value)
  2530. return;
  2531. cancel_delayed_work(&core->fw_unload_work);
  2532. schedule_delayed_work(&core->fw_unload_work,
  2533. msecs_to_jiffies(core->capabilities[FW_UNLOAD_DELAY].value));
  2534. d_vpr_h("firmware unload delayed by %u ms\n",
  2535. core->capabilities[FW_UNLOAD_DELAY].value);
  2536. return;
  2537. }