msm_vidc_control.c 104 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2022, The Linux Foundation. All rights reserved.
  4. */
  5. #include "msm_vidc_control.h"
  6. #include "msm_vidc_debug.h"
  7. #include "hfi_packet.h"
  8. #include "hfi_property.h"
  9. #include "venus_hfi.h"
  10. #include "msm_vidc_internal.h"
  11. #include "msm_vidc_driver.h"
  12. #include "msm_venc.h"
  13. #include "msm_vidc_platform.h"
  14. #define CAP_TO_8BIT_QP(a) { \
  15. if ((a) < 0) \
  16. (a) = 0; \
  17. }
  18. static bool is_priv_ctrl(u32 id)
  19. {
  20. bool private = false;
  21. if (IS_PRIV_CTRL(id))
  22. return true;
  23. /*
  24. * Treat below standard controls as private because
  25. * we have added custom values to the controls
  26. */
  27. switch (id) {
  28. /*
  29. * TODO: V4L2_CID_MPEG_VIDEO_HEVC_PROFILE is std ctrl. But
  30. * V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10_STILL_PICTURE support is not
  31. * available yet. Hence, make this as private ctrl for time being
  32. */
  33. case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
  34. /*
  35. * TODO: V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE is
  36. * std ctrl. But needs some fixes in v4l2-ctrls.c. Hence,
  37. * make this as private ctrl for time being
  38. */
  39. case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE:
  40. /*
  41. * TODO: treat below std ctrls as private ctrls until
  42. * all below ctrls are available in upstream
  43. */
  44. case V4L2_CID_MPEG_VIDEO_AU_DELIMITER:
  45. case V4L2_CID_MPEG_VIDEO_LTR_COUNT:
  46. case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX:
  47. case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES:
  48. case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY:
  49. case V4L2_CID_MPEG_VIDEO_DEC_DISPLAY_DELAY_ENABLE:
  50. private = true;
  51. break;
  52. default:
  53. private = false;
  54. break;
  55. }
  56. return private;
  57. }
  58. static bool is_meta_ctrl(u32 id)
  59. {
  60. return (id == V4L2_CID_MPEG_VIDC_METADATA_LTR_MARK_USE_DETAILS ||
  61. id == V4L2_CID_MPEG_VIDC_METADATA_SEQ_HEADER_NAL ||
  62. id == V4L2_CID_MPEG_VIDC_METADATA_DPB_LUMA_CHROMA_MISR ||
  63. id == V4L2_CID_MPEG_VIDC_METADATA_OPB_LUMA_CHROMA_MISR ||
  64. id == V4L2_CID_MPEG_VIDC_METADATA_INTERLACE ||
  65. id == V4L2_CID_MPEG_VIDC_METADATA_CONCEALED_MB_COUNT ||
  66. id == V4L2_CID_MPEG_VIDC_METADATA_HISTOGRAM_INFO ||
  67. id == V4L2_CID_MPEG_VIDC_METADATA_SEI_MASTERING_DISPLAY_COLOUR ||
  68. id == V4L2_CID_MPEG_VIDC_METADATA_SEI_CONTENT_LIGHT_LEVEL ||
  69. id == V4L2_CID_MPEG_VIDC_METADATA_HDR10PLUS ||
  70. id == V4L2_CID_MPEG_VIDC_METADATA_EVA_STATS ||
  71. id == V4L2_CID_MPEG_VIDC_METADATA_BUFFER_TAG ||
  72. id == V4L2_CID_MPEG_VIDC_METADATA_DPB_TAG_LIST ||
  73. id == V4L2_CID_MPEG_VIDC_METADATA_SUBFRAME_OUTPUT ||
  74. id == V4L2_CID_MPEG_VIDC_METADATA_ROI_INFO ||
  75. id == V4L2_CID_MPEG_VIDC_METADATA_TIMESTAMP ||
  76. id == V4L2_CID_MPEG_VIDC_METADATA_ENC_QP_METADATA ||
  77. id == V4L2_CID_MPEG_VIDC_METADATA_BITSTREAM_RESOLUTION ||
  78. id == V4L2_CID_MPEG_VIDC_METADATA_CROP_OFFSETS);
  79. }
  80. static const char *const mpeg_video_rate_control[] = {
  81. "VBR",
  82. "CBR",
  83. "CBR VFR",
  84. "MBR",
  85. "MBR VFR",
  86. "CQ",
  87. NULL,
  88. };
  89. static const char *const mpeg_video_stream_format[] = {
  90. "NAL Format Start Codes",
  91. "NAL Format One NAL Per Buffer",
  92. "NAL Format One Byte Length",
  93. "NAL Format Two Byte Length",
  94. "NAL Format Four Byte Length",
  95. NULL,
  96. };
  97. static const char *const mpeg_video_blur_types[] = {
  98. "Blur None",
  99. "Blur External",
  100. "Blur Adaptive",
  101. NULL,
  102. };
  103. static const char *const mpeg_video_avc_coding_layer[] = {
  104. "B",
  105. "P",
  106. NULL,
  107. };
  108. static const char *const mpeg_video_hevc_profile[] = {
  109. "Main",
  110. "Main Still Picture",
  111. "Main 10",
  112. "Main 10 Still Picture",
  113. NULL,
  114. };
  115. static const char * const av1_profile[] = {
  116. "Main",
  117. "High",
  118. "Professional",
  119. NULL,
  120. };
  121. static const char * const av1_level[] = {
  122. "2.0",
  123. "2.1",
  124. "2.2",
  125. "2.3",
  126. "3.0",
  127. "3.1",
  128. "3.2",
  129. "3.3",
  130. "4.0",
  131. "4.1",
  132. "4.2",
  133. "4.3",
  134. "5.0",
  135. "5.1",
  136. "5.2",
  137. "5.3",
  138. "6.0",
  139. "6.1",
  140. "6.2",
  141. "6.3",
  142. "7.0",
  143. "7.1",
  144. "7.2",
  145. "7.3",
  146. NULL,
  147. };
  148. static const char * const av1_tier[] = {
  149. "Main",
  150. "High",
  151. NULL,
  152. };
  153. static u32 msm_vidc_get_port_info(struct msm_vidc_inst *inst,
  154. enum msm_vidc_inst_capability_type cap_id)
  155. {
  156. struct msm_vidc_inst_capability *capability = inst->capabilities;
  157. if (capability->cap[cap_id].flags & CAP_FLAG_INPUT_PORT &&
  158. capability->cap[cap_id].flags & CAP_FLAG_OUTPUT_PORT) {
  159. if (inst->bufq[OUTPUT_PORT].vb2q->streaming)
  160. return get_hfi_port(inst, INPUT_PORT);
  161. else
  162. return get_hfi_port(inst, OUTPUT_PORT);
  163. }
  164. if (capability->cap[cap_id].flags & CAP_FLAG_INPUT_PORT)
  165. return get_hfi_port(inst, INPUT_PORT);
  166. else if (capability->cap[cap_id].flags & CAP_FLAG_OUTPUT_PORT)
  167. return get_hfi_port(inst, OUTPUT_PORT);
  168. else
  169. return HFI_PORT_NONE;
  170. }
  171. static const char * const * msm_vidc_get_qmenu_type(
  172. struct msm_vidc_inst *inst, u32 control_id)
  173. {
  174. switch (control_id) {
  175. case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
  176. return mpeg_video_rate_control;
  177. case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD:
  178. return mpeg_video_stream_format;
  179. case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_TYPES:
  180. return mpeg_video_blur_types;
  181. case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE:
  182. return mpeg_video_avc_coding_layer;
  183. case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
  184. return mpeg_video_hevc_profile;
  185. case V4L2_CID_MPEG_VIDEO_AV1_PROFILE:
  186. return av1_profile;
  187. case V4L2_CID_MPEG_VIDEO_AV1_LEVEL:
  188. return av1_level;
  189. case V4L2_CID_MPEG_VIDEO_AV1_TIER:
  190. return av1_tier;
  191. default:
  192. i_vpr_e(inst, "%s: No available qmenu for ctrl %#x\n",
  193. __func__, control_id);
  194. return NULL;
  195. }
  196. }
  197. static inline bool has_parents(struct msm_vidc_inst_cap *cap)
  198. {
  199. return !!cap->parents[0];
  200. }
  201. static inline bool has_childrens(struct msm_vidc_inst_cap *cap)
  202. {
  203. return !!cap->children[0];
  204. }
  205. static inline bool is_root(struct msm_vidc_inst_cap *cap)
  206. {
  207. return !has_parents(cap);
  208. }
  209. static inline bool is_valid_cap_id(enum msm_vidc_inst_capability_type cap_id)
  210. {
  211. return cap_id > INST_CAP_NONE && cap_id < INST_CAP_MAX;
  212. }
  213. static inline bool is_valid_cap(struct msm_vidc_inst_cap *cap)
  214. {
  215. return is_valid_cap_id(cap->cap_id);
  216. }
  217. static inline bool is_all_parents_visited(
  218. struct msm_vidc_inst_cap *cap, bool lookup[INST_CAP_MAX]) {
  219. bool found = true;
  220. int i;
  221. for (i = 0; i < MAX_CAP_PARENTS; i++) {
  222. if (cap->parents[i] == INST_CAP_NONE)
  223. continue;
  224. if (!lookup[cap->parents[i]]) {
  225. found = false;
  226. break;
  227. }
  228. }
  229. return found;
  230. }
  231. static int add_node_list(struct list_head *list, enum msm_vidc_inst_capability_type cap_id)
  232. {
  233. struct msm_vidc_inst_cap_entry *entry;
  234. entry = kzalloc(sizeof(struct msm_vidc_inst_cap_entry), GFP_KERNEL);
  235. if (!entry) {
  236. d_vpr_e("%s: msm_vidc_inst_cap_entry alloc failed\n", __func__);
  237. return -EINVAL;
  238. }
  239. INIT_LIST_HEAD(&entry->list);
  240. entry->cap_id = cap_id;
  241. list_add_tail(&entry->list, list);
  242. return 0;
  243. }
  244. static int add_node(
  245. struct list_head *list, struct msm_vidc_inst_cap *rcap, bool lookup[INST_CAP_MAX])
  246. {
  247. int rc = 0;
  248. if (lookup[rcap->cap_id])
  249. return 0;
  250. rc = add_node_list(list, rcap->cap_id);
  251. if (rc)
  252. return rc;
  253. lookup[rcap->cap_id] = true;
  254. return 0;
  255. }
  256. static int msm_vidc_packetize_control(struct msm_vidc_inst *inst,
  257. enum msm_vidc_inst_capability_type cap_id, u32 payload_type,
  258. void *hfi_val, u32 payload_size, const char *func)
  259. {
  260. int rc = 0;
  261. u64 payload = 0;
  262. if (payload_size == sizeof(u32))
  263. payload = *(u32 *)hfi_val;
  264. else if (payload_size == sizeof(u64))
  265. payload = *(u64 *)hfi_val;
  266. else if (payload_size == sizeof(u8))
  267. payload = *(u8 *)hfi_val;
  268. else if (payload_size == sizeof(u16))
  269. payload = *(u16 *)hfi_val;
  270. if (payload_size <= sizeof(u64))
  271. i_vpr_h(inst,
  272. "set cap: name: %24s, cap value: %#10x, hfi: %#10x\n",
  273. cap_name(cap_id), inst->capabilities->cap[cap_id].value, payload);
  274. else
  275. i_vpr_h(inst,
  276. "set cap: name: %24s, hfi payload size %d\n",
  277. cap_name(cap_id), payload_size);
  278. rc = venus_hfi_session_property(inst,
  279. inst->capabilities->cap[cap_id].hfi_id,
  280. HFI_HOST_FLAGS_NONE,
  281. msm_vidc_get_port_info(inst, cap_id),
  282. payload_type,
  283. hfi_val,
  284. payload_size);
  285. if (rc) {
  286. i_vpr_e(inst, "%s: failed to set cap[%d] %s to fw\n",
  287. func, cap_id, cap_name(cap_id));
  288. return rc;
  289. }
  290. return 0;
  291. }
  292. static enum msm_vidc_inst_capability_type msm_vidc_get_cap_id(
  293. struct msm_vidc_inst *inst, u32 id)
  294. {
  295. enum msm_vidc_inst_capability_type i = INST_CAP_NONE + 1;
  296. struct msm_vidc_inst_capability *capability;
  297. enum msm_vidc_inst_capability_type cap_id = INST_CAP_NONE;
  298. capability = inst->capabilities;
  299. do {
  300. if (capability->cap[i].v4l2_id == id) {
  301. cap_id = capability->cap[i].cap_id;
  302. break;
  303. }
  304. i++;
  305. } while (i < INST_CAP_MAX);
  306. return cap_id;
  307. }
  308. static int msm_vidc_add_capid_to_fw_list(struct msm_vidc_inst *inst,
  309. enum msm_vidc_inst_capability_type cap_id)
  310. {
  311. struct msm_vidc_inst_cap_entry *entry = NULL;
  312. int rc = 0;
  313. /* skip adding if cap_id already present in firmware list */
  314. list_for_each_entry(entry, &inst->firmware_list, list) {
  315. if (entry->cap_id == cap_id) {
  316. i_vpr_l(inst,
  317. "%s: cap[%d] %s already present in fw list\n",
  318. __func__, cap_id, cap_name(cap_id));
  319. return 0;
  320. }
  321. }
  322. rc = add_node_list(&inst->firmware_list, cap_id);
  323. if (rc)
  324. return rc;
  325. return 0;
  326. }
  327. static int msm_vidc_add_children(struct msm_vidc_inst *inst,
  328. enum msm_vidc_inst_capability_type cap_id)
  329. {
  330. struct msm_vidc_inst_cap *cap;
  331. int i, rc = 0;
  332. cap = &inst->capabilities->cap[cap_id];
  333. for (i = 0; i < MAX_CAP_CHILDREN; i++) {
  334. if (!cap->children[i])
  335. break;
  336. if (!is_valid_cap_id(cap->children[i]))
  337. continue;
  338. rc = add_node_list(&inst->children_list, cap->children[i]);
  339. if (rc)
  340. return rc;
  341. }
  342. return rc;
  343. }
  344. static bool is_parent_available(struct msm_vidc_inst *inst,
  345. u32 cap_id, u32 check_parent, const char *func)
  346. {
  347. int i = 0;
  348. u32 cap_parent;
  349. while (i < MAX_CAP_PARENTS &&
  350. inst->capabilities->cap[cap_id].parents[i]) {
  351. cap_parent = inst->capabilities->cap[cap_id].parents[i];
  352. if (cap_parent == check_parent) {
  353. return true;
  354. }
  355. i++;
  356. }
  357. i_vpr_e(inst,
  358. "%s: missing parent %s for %s\n",
  359. func, cap_name(check_parent), cap_name(cap_id));
  360. return false;
  361. }
  362. int msm_vidc_update_cap_value(struct msm_vidc_inst *inst, u32 cap_id,
  363. s32 adjusted_val, const char *func)
  364. {
  365. if (!inst || !inst->capabilities) {
  366. d_vpr_e("%s: invalid params\n", __func__);
  367. return -EINVAL;
  368. }
  369. if (inst->capabilities->cap[cap_id].value != adjusted_val)
  370. i_vpr_h(inst,
  371. "%s: updated database: name: %s, value: %#x -> %#x\n",
  372. func, cap_name(cap_id),
  373. inst->capabilities->cap[cap_id].value, adjusted_val);
  374. inst->capabilities->cap[cap_id].value = adjusted_val;
  375. return 0;
  376. }
  377. int msm_vidc_get_parent_value(struct msm_vidc_inst* inst,
  378. u32 cap_id, u32 parent, s32 *value, const char *func)
  379. {
  380. int rc = 0;
  381. if (is_parent_available(inst, cap_id, parent, func)) {
  382. switch (parent) {
  383. case BITRATE_MODE:
  384. *value = inst->hfi_rc_type;
  385. break;
  386. case LAYER_TYPE:
  387. *value = inst->hfi_layer_type;
  388. break;
  389. default:
  390. *value = inst->capabilities->cap[parent].value;
  391. break;
  392. }
  393. } else {
  394. rc = -EINVAL;
  395. }
  396. return rc;
  397. }
  398. static int msm_vidc_adjust_hevc_qp(struct msm_vidc_inst *inst,
  399. enum msm_vidc_inst_capability_type cap_id)
  400. {
  401. struct msm_vidc_inst_capability *capability;
  402. s32 pix_fmt = -1;
  403. capability = inst->capabilities;
  404. if (!(inst->codec == MSM_VIDC_HEVC || inst->codec == MSM_VIDC_HEIC)) {
  405. i_vpr_e(inst,
  406. "%s: incorrect cap[%d] %s entry in database, fix database\n",
  407. __func__, cap_id, cap_name(cap_id));
  408. return -EINVAL;
  409. }
  410. if (msm_vidc_get_parent_value(inst, cap_id,
  411. PIX_FMTS, &pix_fmt, __func__))
  412. return -EINVAL;
  413. if (pix_fmt == MSM_VIDC_FMT_P010 || pix_fmt == MSM_VIDC_FMT_TP10C)
  414. goto exit;
  415. CAP_TO_8BIT_QP(capability->cap[cap_id].value);
  416. if (cap_id == MIN_FRAME_QP) {
  417. CAP_TO_8BIT_QP(capability->cap[I_FRAME_MIN_QP].value);
  418. CAP_TO_8BIT_QP(capability->cap[P_FRAME_MIN_QP].value);
  419. CAP_TO_8BIT_QP(capability->cap[B_FRAME_MIN_QP].value);
  420. } else if (cap_id == MAX_FRAME_QP) {
  421. CAP_TO_8BIT_QP(capability->cap[I_FRAME_MAX_QP].value);
  422. CAP_TO_8BIT_QP(capability->cap[P_FRAME_MAX_QP].value);
  423. CAP_TO_8BIT_QP(capability->cap[B_FRAME_MAX_QP].value);
  424. }
  425. exit:
  426. return 0;
  427. }
  428. static int msm_vidc_adjust_cap(struct msm_vidc_inst *inst,
  429. enum msm_vidc_inst_capability_type cap_id,
  430. struct v4l2_ctrl *ctrl, const char *func)
  431. {
  432. struct msm_vidc_inst_cap *cap;
  433. int rc = 0;
  434. /* validate cap_id */
  435. if (!is_valid_cap_id(cap_id))
  436. return 0;
  437. /* validate cap */
  438. cap = &inst->capabilities->cap[cap_id];
  439. if (!is_valid_cap(cap))
  440. return 0;
  441. /* check if adjust supported */
  442. if (!cap->adjust) {
  443. if (ctrl)
  444. msm_vidc_update_cap_value(inst, cap_id, ctrl->val, func);
  445. return 0;
  446. }
  447. /* call adjust */
  448. rc = cap->adjust(inst, ctrl);
  449. if (rc) {
  450. i_vpr_e(inst, "%s: adjust cap failed for %s\n", func, cap_name(cap_id));
  451. return rc;
  452. }
  453. return rc;
  454. }
  455. static int msm_vidc_set_cap(struct msm_vidc_inst *inst,
  456. enum msm_vidc_inst_capability_type cap_id,
  457. const char *func)
  458. {
  459. struct msm_vidc_inst_cap *cap;
  460. int rc = 0;
  461. /* validate cap_id */
  462. if (!is_valid_cap_id(cap_id))
  463. return 0;
  464. /* validate cap */
  465. cap = &inst->capabilities->cap[cap_id];
  466. if (!is_valid_cap(cap))
  467. return 0;
  468. /* check if set supported */
  469. if (!cap->set)
  470. return 0;
  471. /* call set */
  472. rc = cap->set(inst, cap_id);
  473. if (rc) {
  474. i_vpr_e(inst, "%s: set cap failed for %s\n", func, cap_name(cap_id));
  475. return rc;
  476. }
  477. return rc;
  478. }
  479. static int msm_vidc_adjust_dynamic_property(struct msm_vidc_inst *inst,
  480. enum msm_vidc_inst_capability_type cap_id, struct v4l2_ctrl *ctrl)
  481. {
  482. struct msm_vidc_inst_cap_entry *entry = NULL, *temp = NULL;
  483. struct msm_vidc_inst_capability *capability;
  484. s32 prev_value;
  485. int rc = 0;
  486. if (!inst || !inst->capabilities || !ctrl) {
  487. d_vpr_e("%s: invalid param\n", __func__);
  488. return -EINVAL;
  489. }
  490. capability = inst->capabilities;
  491. /* sanitize cap_id */
  492. if (!is_valid_cap_id(cap_id)) {
  493. i_vpr_e(inst, "%s: invalid cap_id %u\n", __func__, cap_id);
  494. return -EINVAL;
  495. }
  496. if (!(capability->cap[cap_id].flags & CAP_FLAG_DYNAMIC_ALLOWED)) {
  497. i_vpr_h(inst,
  498. "%s: dynamic setting of cap[%d] %s is not allowed\n",
  499. __func__, cap_id, cap_name(cap_id));
  500. return -EBUSY;
  501. }
  502. i_vpr_h(inst, "%s: cap[%d] %s\n", __func__, cap_id, cap_name(cap_id));
  503. prev_value = capability->cap[cap_id].value;
  504. rc = msm_vidc_adjust_cap(inst, cap_id, ctrl, __func__);
  505. if (rc)
  506. return rc;
  507. if (capability->cap[cap_id].value == prev_value && cap_id == GOP_SIZE) {
  508. /*
  509. * Ignore setting same GOP size value to firmware to avoid
  510. * unnecessary generation of IDR frame.
  511. */
  512. return 0;
  513. }
  514. /* add cap_id to firmware list always */
  515. rc = msm_vidc_add_capid_to_fw_list(inst, cap_id);
  516. if (rc)
  517. goto error;
  518. /* add children only if cap value modified */
  519. if (capability->cap[cap_id].value == prev_value)
  520. return 0;
  521. rc = msm_vidc_add_children(inst, cap_id);
  522. if (rc)
  523. goto error;
  524. list_for_each_entry_safe(entry, temp, &inst->children_list, list) {
  525. if (!is_valid_cap_id(entry->cap_id)) {
  526. rc = -EINVAL;
  527. goto error;
  528. }
  529. if (!capability->cap[entry->cap_id].adjust) {
  530. i_vpr_e(inst, "%s: child cap must have ajdust function %s\n",
  531. __func__, cap_name(entry->cap_id));
  532. rc = -EINVAL;
  533. goto error;
  534. }
  535. prev_value = capability->cap[entry->cap_id].value;
  536. rc = msm_vidc_adjust_cap(inst, entry->cap_id, NULL, __func__);
  537. if (rc)
  538. goto error;
  539. /* add children if cap value modified */
  540. if (capability->cap[entry->cap_id].value != prev_value) {
  541. /* add cap_id to firmware list always */
  542. rc = msm_vidc_add_capid_to_fw_list(inst, entry->cap_id);
  543. if (rc)
  544. goto error;
  545. rc = msm_vidc_add_children(inst, entry->cap_id);
  546. if (rc)
  547. goto error;
  548. }
  549. list_del_init(&entry->list);
  550. kfree(entry);
  551. }
  552. /* expecting children_list to be empty */
  553. if (!list_empty(&inst->children_list)) {
  554. i_vpr_e(inst, "%s: child_list is not empty\n", __func__);
  555. rc = -EINVAL;
  556. goto error;
  557. }
  558. return 0;
  559. error:
  560. list_for_each_entry_safe(entry, temp, &inst->children_list, list) {
  561. i_vpr_e(inst, "%s: child list: %s\n", __func__, cap_name(entry->cap_id));
  562. list_del_init(&entry->list);
  563. kfree(entry);
  564. }
  565. list_for_each_entry_safe(entry, temp, &inst->firmware_list, list) {
  566. i_vpr_e(inst, "%s: fw list: %s\n", __func__, cap_name(entry->cap_id));
  567. list_del_init(&entry->list);
  568. kfree(entry);
  569. }
  570. return rc;
  571. }
  572. static int msm_vidc_set_dynamic_property(struct msm_vidc_inst *inst)
  573. {
  574. struct msm_vidc_inst_cap_entry *entry = NULL, *temp = NULL;
  575. int rc = 0;
  576. if (!inst) {
  577. d_vpr_e("%s: invalid params\n", __func__);
  578. return -EINVAL;
  579. }
  580. i_vpr_h(inst, "%s()\n", __func__);
  581. list_for_each_entry_safe(entry, temp, &inst->firmware_list, list) {
  582. rc = msm_vidc_set_cap(inst, entry->cap_id, __func__);
  583. if (rc)
  584. goto error;
  585. list_del_init(&entry->list);
  586. kfree(entry);
  587. }
  588. return 0;
  589. error:
  590. list_for_each_entry_safe(entry, temp, &inst->firmware_list, list) {
  591. i_vpr_e(inst, "%s: fw list: %s\n", __func__, cap_name(entry->cap_id));
  592. list_del_init(&entry->list);
  593. kfree(entry);
  594. }
  595. return rc;
  596. }
  597. void msm_vidc_add_volatile_flag(struct v4l2_ctrl *ctrl)
  598. {
  599. if (ctrl->id == V4L2_CID_MIN_BUFFERS_FOR_OUTPUT ||
  600. ctrl->id == V4L2_CID_MIN_BUFFERS_FOR_CAPTURE ||
  601. ctrl->id == V4L2_CID_MPEG_VIDC_AV1D_FILM_GRAIN_PRESENT)
  602. ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
  603. }
  604. int msm_vidc_ctrl_deinit(struct msm_vidc_inst *inst)
  605. {
  606. if (!inst) {
  607. d_vpr_e("%s: invalid parameters\n", __func__);
  608. return -EINVAL;
  609. }
  610. i_vpr_h(inst, "%s(): num ctrls %d\n", __func__, inst->num_ctrls);
  611. v4l2_ctrl_handler_free(&inst->ctrl_handler);
  612. memset(&inst->ctrl_handler, 0, sizeof(struct v4l2_ctrl_handler));
  613. kfree(inst->ctrls);
  614. inst->ctrls = NULL;
  615. return 0;
  616. }
  617. int msm_vidc_ctrl_init(struct msm_vidc_inst *inst)
  618. {
  619. int rc = 0;
  620. struct msm_vidc_inst_capability *capability;
  621. struct msm_vidc_core *core;
  622. int idx = 0;
  623. struct v4l2_ctrl_config ctrl_cfg = {0};
  624. int num_ctrls = 0, ctrl_idx = 0;
  625. if (!inst || !inst->core || !inst->capabilities) {
  626. d_vpr_e("%s: invalid params\n", __func__);
  627. return -EINVAL;
  628. }
  629. core = inst->core;
  630. capability = inst->capabilities;
  631. if (!core->v4l2_ctrl_ops) {
  632. i_vpr_e(inst, "%s: no control ops\n", __func__);
  633. return -EINVAL;
  634. }
  635. for (idx = 0; idx < INST_CAP_MAX; idx++) {
  636. if (capability->cap[idx].v4l2_id)
  637. num_ctrls++;
  638. }
  639. if (!num_ctrls) {
  640. i_vpr_e(inst, "%s: no ctrls available in cap database\n",
  641. __func__);
  642. return -EINVAL;
  643. }
  644. inst->ctrls = kcalloc(num_ctrls,
  645. sizeof(struct v4l2_ctrl *), GFP_KERNEL);
  646. if (!inst->ctrls) {
  647. i_vpr_e(inst, "%s: failed to allocate ctrl\n", __func__);
  648. return -ENOMEM;
  649. }
  650. rc = v4l2_ctrl_handler_init(&inst->ctrl_handler, num_ctrls);
  651. if (rc) {
  652. i_vpr_e(inst, "control handler init failed, %d\n",
  653. inst->ctrl_handler.error);
  654. goto error;
  655. }
  656. for (idx = 0; idx < INST_CAP_MAX; idx++) {
  657. struct v4l2_ctrl *ctrl;
  658. if (!capability->cap[idx].v4l2_id)
  659. continue;
  660. if (ctrl_idx >= num_ctrls) {
  661. i_vpr_e(inst,
  662. "%s: invalid ctrl %#x, max allowed %d\n",
  663. __func__, capability->cap[idx].v4l2_id,
  664. num_ctrls);
  665. rc = -EINVAL;
  666. goto error;
  667. }
  668. i_vpr_l(inst,
  669. "%s: cap[%d] %24s, value %d min %d max %d step_or_mask %#x flags %#x v4l2_id %#x hfi_id %#x\n",
  670. __func__, idx, cap_name(idx),
  671. capability->cap[idx].value,
  672. capability->cap[idx].min,
  673. capability->cap[idx].max,
  674. capability->cap[idx].step_or_mask,
  675. capability->cap[idx].flags,
  676. capability->cap[idx].v4l2_id,
  677. capability->cap[idx].hfi_id);
  678. memset(&ctrl_cfg, 0, sizeof(struct v4l2_ctrl_config));
  679. if (is_priv_ctrl(capability->cap[idx].v4l2_id)) {
  680. /* add private control */
  681. ctrl_cfg.def = capability->cap[idx].value;
  682. ctrl_cfg.flags = 0;
  683. ctrl_cfg.id = capability->cap[idx].v4l2_id;
  684. ctrl_cfg.max = capability->cap[idx].max;
  685. ctrl_cfg.min = capability->cap[idx].min;
  686. ctrl_cfg.ops = core->v4l2_ctrl_ops;
  687. ctrl_cfg.type = (capability->cap[idx].flags &
  688. CAP_FLAG_MENU) ?
  689. V4L2_CTRL_TYPE_MENU :
  690. V4L2_CTRL_TYPE_INTEGER;
  691. if (ctrl_cfg.type == V4L2_CTRL_TYPE_MENU) {
  692. ctrl_cfg.menu_skip_mask =
  693. ~(capability->cap[idx].step_or_mask);
  694. ctrl_cfg.qmenu = msm_vidc_get_qmenu_type(inst,
  695. capability->cap[idx].v4l2_id);
  696. } else {
  697. ctrl_cfg.step =
  698. capability->cap[idx].step_or_mask;
  699. }
  700. ctrl_cfg.name = cap_name(capability->cap[idx].cap_id);
  701. if (!ctrl_cfg.name) {
  702. i_vpr_e(inst, "%s: %#x ctrl name is null\n",
  703. __func__, ctrl_cfg.id);
  704. rc = -EINVAL;
  705. goto error;
  706. }
  707. ctrl = v4l2_ctrl_new_custom(&inst->ctrl_handler,
  708. &ctrl_cfg, NULL);
  709. } else {
  710. if (capability->cap[idx].flags & CAP_FLAG_MENU) {
  711. ctrl = v4l2_ctrl_new_std_menu(
  712. &inst->ctrl_handler,
  713. core->v4l2_ctrl_ops,
  714. capability->cap[idx].v4l2_id,
  715. capability->cap[idx].max,
  716. ~(capability->cap[idx].step_or_mask),
  717. capability->cap[idx].value);
  718. } else {
  719. ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler,
  720. core->v4l2_ctrl_ops,
  721. capability->cap[idx].v4l2_id,
  722. capability->cap[idx].min,
  723. capability->cap[idx].max,
  724. capability->cap[idx].step_or_mask,
  725. capability->cap[idx].value);
  726. }
  727. }
  728. if (!ctrl) {
  729. i_vpr_e(inst, "%s: invalid ctrl %#x\n", __func__,
  730. capability->cap[idx].v4l2_id);
  731. rc = -EINVAL;
  732. goto error;
  733. }
  734. rc = inst->ctrl_handler.error;
  735. if (rc) {
  736. i_vpr_e(inst,
  737. "error adding ctrl (%#x) to ctrl handle, %d\n",
  738. capability->cap[idx].v4l2_id,
  739. inst->ctrl_handler.error);
  740. goto error;
  741. }
  742. /*
  743. * TODO(AS)
  744. * ctrl->flags |= capability->cap[idx].flags;
  745. */
  746. msm_vidc_add_volatile_flag(ctrl);
  747. ctrl->flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
  748. inst->ctrls[ctrl_idx] = ctrl;
  749. ctrl_idx++;
  750. }
  751. inst->num_ctrls = num_ctrls;
  752. i_vpr_h(inst, "%s(): num ctrls %d\n", __func__, inst->num_ctrls);
  753. return 0;
  754. error:
  755. msm_vidc_ctrl_deinit(inst);
  756. return rc;
  757. }
  758. static int msm_vidc_update_buffer_count_if_needed(struct msm_vidc_inst* inst,
  759. struct v4l2_ctrl *ctrl)
  760. {
  761. int rc = 0;
  762. bool update_input_port = false, update_output_port = false;
  763. if (!inst || !ctrl) {
  764. d_vpr_e("%s: invalid parameters\n", __func__);
  765. return -EINVAL;
  766. }
  767. switch (ctrl->id) {
  768. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE:
  769. case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_TYPE:
  770. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER:
  771. case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING:
  772. case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER:
  773. update_input_port = true;
  774. break;
  775. case V4L2_CID_MPEG_VIDC_THUMBNAIL_MODE:
  776. case V4L2_CID_MPEG_VIDC_PRIORITY:
  777. update_input_port = true;
  778. update_output_port = true;
  779. break;
  780. default:
  781. update_input_port = false;
  782. update_output_port = false;
  783. break;
  784. }
  785. if (update_input_port) {
  786. rc = msm_vidc_update_buffer_count(inst, INPUT_PORT);
  787. if (rc)
  788. return rc;
  789. }
  790. if (update_output_port) {
  791. rc = msm_vidc_update_buffer_count(inst, OUTPUT_PORT);
  792. if (rc)
  793. return rc;
  794. }
  795. return rc;
  796. }
  797. static int msm_vidc_allow_secure_session(struct msm_vidc_inst *inst)
  798. {
  799. int rc = 0;
  800. struct msm_vidc_inst *i;
  801. struct msm_vidc_core *core;
  802. u32 count = 0;
  803. if (!inst || !inst->core) {
  804. d_vpr_e("%s: invalid params\n", __func__);
  805. return -EINVAL;
  806. }
  807. core = inst->core;
  808. if (!core->capabilities) {
  809. i_vpr_e(inst, "%s: invalid params\n", __func__);
  810. return -EINVAL;
  811. }
  812. core_lock(core, __func__);
  813. list_for_each_entry(i, &core->instances, list) {
  814. if (i->capabilities) {
  815. if (i->capabilities->cap[SECURE_MODE].value)
  816. count++;
  817. }
  818. }
  819. if (count > core->capabilities[MAX_SECURE_SESSION_COUNT].value) {
  820. i_vpr_e(inst,
  821. "%s: total secure sessions %d exceeded max limit %d\n",
  822. __func__, count,
  823. core->capabilities[MAX_SECURE_SESSION_COUNT].value);
  824. rc = -EINVAL;
  825. }
  826. core_unlock(core, __func__);
  827. return rc;
  828. }
  829. int msm_v4l2_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
  830. {
  831. int rc = 0;
  832. struct msm_vidc_inst *inst;
  833. if (!ctrl) {
  834. d_vpr_e("%s: invalid ctrl parameter\n", __func__);
  835. return -EINVAL;
  836. }
  837. inst = container_of(ctrl->handler,
  838. struct msm_vidc_inst, ctrl_handler);
  839. if (!inst) {
  840. d_vpr_e("%s: could not find inst for ctrl %s id %#x\n", __func__, ctrl->name, ctrl->id);
  841. return -EINVAL;
  842. }
  843. rc = msm_vidc_get_control(inst, ctrl);
  844. if (rc)
  845. i_vpr_e(inst, "%s: failed for ctrl %s id %#x\n",
  846. __func__, ctrl->name, ctrl->id);
  847. else
  848. i_vpr_h(inst, "%s: ctrl %s id %#x, value %d\n",
  849. __func__, ctrl->name, ctrl->id, ctrl->val);
  850. return rc;
  851. }
  852. static int msm_vidc_update_static_property(struct msm_vidc_inst *inst,
  853. enum msm_vidc_inst_capability_type cap_id, struct v4l2_ctrl *ctrl)
  854. {
  855. int rc = 0;
  856. if (!inst || !ctrl) {
  857. d_vpr_e("%s: invalid params\n", __func__);
  858. return -EINVAL;
  859. }
  860. /* update value to db */
  861. msm_vidc_update_cap_value(inst, cap_id, ctrl->val, __func__);
  862. if (ctrl->id == V4L2_CID_MPEG_VIDC_SECURE) {
  863. if (ctrl->val) {
  864. rc = msm_vidc_allow_secure_session(inst);
  865. if (rc)
  866. return rc;
  867. }
  868. }
  869. if (ctrl->id == V4L2_CID_ROTATE) {
  870. struct v4l2_format *output_fmt;
  871. output_fmt = &inst->fmts[OUTPUT_PORT];
  872. rc = msm_venc_s_fmt_output(inst, output_fmt);
  873. if (rc)
  874. return rc;
  875. }
  876. if (ctrl->id == V4L2_CID_MPEG_VIDC_MIN_BITSTREAM_SIZE_OVERWRITE) {
  877. rc = msm_vidc_update_bitstream_buffer_size(inst);
  878. if (rc)
  879. return rc;
  880. }
  881. if (ctrl->id == V4L2_CID_MPEG_VIDC_PRIORITY) {
  882. rc = msm_vidc_adjust_session_priority(inst, ctrl);
  883. if (rc)
  884. return rc;
  885. /**
  886. * This is the last static s_ctrl from client(commit point). So update
  887. * input & output counts to reflect final buffer counts based on dcvs
  888. * & decoder_batching enable/disable. So client is expected to query
  889. * for final counts after setting priority control.
  890. */
  891. if (is_decode_session(inst))
  892. inst->decode_batch.enable = msm_vidc_allow_decode_batch(inst);
  893. msm_vidc_allow_dcvs(inst);
  894. }
  895. if (is_meta_ctrl(ctrl->id)) {
  896. if (cap_id == META_DPB_TAG_LIST) {
  897. /*
  898. * To subscribe HFI_PROP_DPB_TAG_LIST
  899. * data in FBD, HFI_PROP_BUFFER_TAG data
  900. * must be delivered via FTB. Hence, update
  901. * META_OUTPUT_BUF_TAG when META_DPB_TAG_LIST
  902. * is updated.
  903. */
  904. msm_vidc_update_cap_value(inst, META_OUTPUT_BUF_TAG,
  905. ctrl->val, __func__);
  906. }
  907. rc = msm_vidc_update_meta_port_settings(inst);
  908. if (rc)
  909. return rc;
  910. }
  911. rc = msm_vidc_update_buffer_count_if_needed(inst, ctrl);
  912. if (rc)
  913. return rc;
  914. return rc;
  915. }
  916. int msm_v4l2_op_s_ctrl(struct v4l2_ctrl *ctrl)
  917. {
  918. int rc = 0;
  919. struct msm_vidc_inst *inst;
  920. enum msm_vidc_inst_capability_type cap_id;
  921. struct msm_vidc_inst_capability *capability;
  922. if (!ctrl) {
  923. d_vpr_e("%s: invalid ctrl parameter\n", __func__);
  924. return -EINVAL;
  925. }
  926. inst = container_of(ctrl->handler,
  927. struct msm_vidc_inst, ctrl_handler);
  928. if (!inst || !inst->capabilities) {
  929. d_vpr_e("%s: invalid parameters for inst\n", __func__);
  930. return -EINVAL;
  931. }
  932. capability = inst->capabilities;
  933. i_vpr_h(inst, "%s: state %s, name %s, id 0x%x value %d\n",
  934. __func__, state_name(inst->state), ctrl->name, ctrl->id, ctrl->val);
  935. if (!msm_vidc_allow_s_ctrl(inst, ctrl->id))
  936. return -EINVAL;
  937. cap_id = msm_vidc_get_cap_id(inst, ctrl->id);
  938. if (!is_valid_cap_id(cap_id)) {
  939. i_vpr_e(inst, "%s: could not find cap_id for ctrl %s\n",
  940. __func__, ctrl->name);
  941. return -EINVAL;
  942. }
  943. if (ctrl->id == V4L2_CID_MPEG_VIDC_INPUT_METADATA_FD) {
  944. if (!capability->cap[INPUT_META_VIA_REQUEST].value) {
  945. i_vpr_e(inst,
  946. "%s: input metadata not enabled via request\n", __func__);
  947. return -EINVAL;
  948. }
  949. rc = msm_vidc_create_input_metadata_buffer(inst, ctrl->val);
  950. if (rc)
  951. return rc;
  952. }
  953. /* mark client set flag */
  954. capability->cap[cap_id].flags |= CAP_FLAG_CLIENT_SET;
  955. if (!inst->bufq[OUTPUT_PORT].vb2q->streaming) {
  956. /* static case */
  957. rc = msm_vidc_update_static_property(inst, cap_id, ctrl);
  958. if (rc)
  959. return rc;
  960. } else {
  961. /* dynamic case */
  962. rc = msm_vidc_adjust_dynamic_property(inst, cap_id, ctrl);
  963. if (rc)
  964. return rc;
  965. rc = msm_vidc_set_dynamic_property(inst);
  966. if (rc)
  967. return rc;
  968. }
  969. return rc;
  970. }
  971. int msm_vidc_adjust_entropy_mode(void *instance, struct v4l2_ctrl *ctrl)
  972. {
  973. struct msm_vidc_inst_capability *capability;
  974. s32 adjusted_value;
  975. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  976. s32 profile = -1;
  977. if (!inst || !inst->capabilities) {
  978. d_vpr_e("%s: invalid params\n", __func__);
  979. return -EINVAL;
  980. }
  981. capability = inst->capabilities;
  982. /* ctrl is always NULL in streamon case */
  983. adjusted_value = ctrl ? ctrl->val :
  984. capability->cap[ENTROPY_MODE].value;
  985. if (inst->codec != MSM_VIDC_H264) {
  986. i_vpr_e(inst,
  987. "%s: incorrect entry in database. fix the database\n",
  988. __func__);
  989. return 0;
  990. }
  991. if (msm_vidc_get_parent_value(inst, ENTROPY_MODE,
  992. PROFILE, &profile, __func__))
  993. return -EINVAL;
  994. if (profile == V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE ||
  995. profile == V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE)
  996. adjusted_value = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC;
  997. msm_vidc_update_cap_value(inst, ENTROPY_MODE,
  998. adjusted_value, __func__);
  999. return 0;
  1000. }
  1001. int msm_vidc_adjust_bitrate_mode(void *instance, struct v4l2_ctrl *ctrl)
  1002. {
  1003. struct msm_vidc_inst_capability *capability;
  1004. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1005. int lossless, frame_rc, bitrate_mode, frame_skip;
  1006. u32 hfi_value = 0;
  1007. if (!inst || !inst->capabilities) {
  1008. d_vpr_e("%s: invalid params\n", __func__);
  1009. return -EINVAL;
  1010. }
  1011. capability = inst->capabilities;
  1012. bitrate_mode = capability->cap[BITRATE_MODE].value;
  1013. lossless = capability->cap[LOSSLESS].value;
  1014. frame_rc = capability->cap[FRAME_RC_ENABLE].value;
  1015. frame_skip = capability->cap[FRAME_SKIP_MODE].value;
  1016. if (lossless || (msm_vidc_lossless_encode &&
  1017. inst->codec == MSM_VIDC_HEVC)) {
  1018. hfi_value = HFI_RC_LOSSLESS;
  1019. goto update;
  1020. }
  1021. if (!frame_rc && !is_image_session(inst)) {
  1022. hfi_value = HFI_RC_OFF;
  1023. goto update;
  1024. }
  1025. if (bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) {
  1026. hfi_value = HFI_RC_VBR_CFR;
  1027. } else if (bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) {
  1028. if (frame_skip)
  1029. hfi_value = HFI_RC_CBR_VFR;
  1030. else
  1031. hfi_value = HFI_RC_CBR_CFR;
  1032. } else if (bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ) {
  1033. hfi_value = HFI_RC_CQ;
  1034. }
  1035. update:
  1036. inst->hfi_rc_type = hfi_value;
  1037. i_vpr_h(inst, "%s: hfi rc type: %#x\n",
  1038. __func__, inst->hfi_rc_type);
  1039. return 0;
  1040. }
  1041. int msm_vidc_adjust_profile(void *instance, struct v4l2_ctrl *ctrl)
  1042. {
  1043. struct msm_vidc_inst_capability *capability;
  1044. s32 adjusted_value;
  1045. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1046. s32 pix_fmt = -1;
  1047. if (!inst || !inst->capabilities) {
  1048. d_vpr_e("%s: invalid params\n", __func__);
  1049. return -EINVAL;
  1050. }
  1051. capability = inst->capabilities;
  1052. adjusted_value = ctrl ? ctrl->val : capability->cap[PROFILE].value;
  1053. /* PIX_FMTS dependency is common across all chipsets.
  1054. * Hence, PIX_FMTS must be specified as Parent for HEVC profile.
  1055. * Otherwise it would be a database error that should be fixed.
  1056. */
  1057. if (msm_vidc_get_parent_value(inst, PROFILE, PIX_FMTS,
  1058. &pix_fmt, __func__))
  1059. return -EINVAL;
  1060. /* 10 bit profile for 10 bit color format */
  1061. if (pix_fmt == MSM_VIDC_FMT_TP10C || pix_fmt == MSM_VIDC_FMT_P010) {
  1062. if (is_image_session(inst))
  1063. adjusted_value = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10_STILL_PICTURE;
  1064. else
  1065. adjusted_value = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10;
  1066. } else {
  1067. /* 8 bit profile for 8 bit color format */
  1068. if (is_image_session(inst))
  1069. adjusted_value = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE;
  1070. else
  1071. adjusted_value = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN;
  1072. }
  1073. msm_vidc_update_cap_value(inst, PROFILE,
  1074. adjusted_value, __func__);
  1075. return 0;
  1076. }
  1077. int msm_vidc_adjust_ltr_count(void *instance, struct v4l2_ctrl *ctrl)
  1078. {
  1079. struct msm_vidc_inst_capability *capability;
  1080. s32 adjusted_value;
  1081. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1082. s32 rc_type = -1, all_intra = 0;
  1083. if (!inst || !inst->capabilities) {
  1084. d_vpr_e("%s: invalid params\n", __func__);
  1085. return -EINVAL;
  1086. }
  1087. capability = inst->capabilities;
  1088. adjusted_value = ctrl ? ctrl->val : capability->cap[LTR_COUNT].value;
  1089. if (msm_vidc_get_parent_value(inst, LTR_COUNT, BITRATE_MODE,
  1090. &rc_type, __func__) ||
  1091. msm_vidc_get_parent_value(inst, LTR_COUNT, ALL_INTRA,
  1092. &all_intra, __func__))
  1093. return -EINVAL;
  1094. if ((rc_type != HFI_RC_OFF &&
  1095. rc_type != HFI_RC_CBR_CFR &&
  1096. rc_type != HFI_RC_CBR_VFR) ||
  1097. all_intra) {
  1098. adjusted_value = 0;
  1099. i_vpr_h(inst,
  1100. "%s: ltr count unsupported, rc_type: %#x, all_intra %d\n",
  1101. __func__,rc_type, all_intra);
  1102. }
  1103. msm_vidc_update_cap_value(inst, LTR_COUNT,
  1104. adjusted_value, __func__);
  1105. return 0;
  1106. }
  1107. int msm_vidc_adjust_use_ltr(void *instance, struct v4l2_ctrl *ctrl)
  1108. {
  1109. struct msm_vidc_inst_capability *capability;
  1110. s32 adjusted_value, ltr_count;
  1111. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1112. if (!inst || !inst->capabilities) {
  1113. d_vpr_e("%s: invalid params\n", __func__);
  1114. return -EINVAL;
  1115. }
  1116. capability = inst->capabilities;
  1117. adjusted_value = ctrl ? ctrl->val : capability->cap[USE_LTR].value;
  1118. /*
  1119. * Since USE_LTR is only set dynamically, and LTR_COUNT is static
  1120. * control, no need to make LTR_COUNT as parent for USE_LTR as
  1121. * LTR_COUNT value will always be updated when dynamically USE_LTR
  1122. * is set
  1123. */
  1124. ltr_count = capability->cap[LTR_COUNT].value;
  1125. if (!ltr_count)
  1126. return 0;
  1127. if (adjusted_value <= 0 ||
  1128. adjusted_value > ((1 << ltr_count) - 1)) {
  1129. /*
  1130. * USE_LTR is bitmask value, hence should be
  1131. * > 0 and <= (2 ^ LTR_COUNT) - 1
  1132. */
  1133. i_vpr_e(inst, "%s: invalid value %d\n",
  1134. __func__, adjusted_value);
  1135. return 0;
  1136. }
  1137. /* USE_LTR value is a bitmask value */
  1138. msm_vidc_update_cap_value(inst, USE_LTR,
  1139. adjusted_value, __func__);
  1140. return 0;
  1141. }
  1142. int msm_vidc_adjust_mark_ltr(void *instance, struct v4l2_ctrl *ctrl)
  1143. {
  1144. struct msm_vidc_inst_capability *capability;
  1145. s32 adjusted_value, ltr_count;
  1146. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1147. if (!inst || !inst->capabilities) {
  1148. d_vpr_e("%s: invalid params\n", __func__);
  1149. return -EINVAL;
  1150. }
  1151. capability = inst->capabilities;
  1152. adjusted_value = ctrl ? ctrl->val : capability->cap[MARK_LTR].value;
  1153. /*
  1154. * Since MARK_LTR is only set dynamically, and LTR_COUNT is static
  1155. * control, no need to make LTR_COUNT as parent for MARK_LTR as
  1156. * LTR_COUNT value will always be updated when dynamically MARK_LTR
  1157. * is set
  1158. */
  1159. ltr_count = capability->cap[LTR_COUNT].value;
  1160. if (!ltr_count)
  1161. return 0;
  1162. if (adjusted_value < 0 ||
  1163. adjusted_value > (ltr_count - 1)) {
  1164. /* MARK_LTR value should be >= 0 and <= (LTR_COUNT - 1) */
  1165. i_vpr_e(inst, "%s: invalid value %d\n",
  1166. __func__, adjusted_value);
  1167. return 0;
  1168. }
  1169. msm_vidc_update_cap_value(inst, MARK_LTR,
  1170. adjusted_value, __func__);
  1171. return 0;
  1172. }
  1173. int msm_vidc_adjust_ir_random(void *instance, struct v4l2_ctrl *ctrl)
  1174. {
  1175. struct msm_vidc_inst_capability *capability;
  1176. s32 adjusted_value, all_intra = 0;
  1177. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1178. if (!inst || !inst->capabilities) {
  1179. d_vpr_e("%s: invalid params\n", __func__);
  1180. return -EINVAL;
  1181. }
  1182. capability = inst->capabilities;
  1183. adjusted_value = ctrl ? ctrl->val : capability->cap[IR_RANDOM].value;
  1184. if (msm_vidc_get_parent_value(inst, IR_RANDOM, ALL_INTRA,
  1185. &all_intra, __func__))
  1186. return -EINVAL;
  1187. if (all_intra) {
  1188. adjusted_value = 0;
  1189. i_vpr_h(inst, "%s: IR unsupported, all intra: %d\n",
  1190. __func__, all_intra);
  1191. goto exit;
  1192. }
  1193. /*
  1194. * BITRATE_MODE dependency is NOT common across all chipsets.
  1195. * Hence, do not return error if not specified as one of the parent.
  1196. */
  1197. if (is_parent_available(inst, IR_RANDOM, BITRATE_MODE, __func__) &&
  1198. inst->hfi_rc_type != HFI_RC_CBR_CFR &&
  1199. inst->hfi_rc_type != HFI_RC_CBR_VFR)
  1200. adjusted_value = 0;
  1201. exit:
  1202. msm_vidc_update_cap_value(inst, IR_RANDOM,
  1203. adjusted_value, __func__);
  1204. return 0;
  1205. }
  1206. int msm_vidc_adjust_delta_based_rc(void *instance, struct v4l2_ctrl *ctrl)
  1207. {
  1208. struct msm_vidc_inst_capability *capability;
  1209. s32 adjusted_value;
  1210. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1211. s32 rc_type = -1;
  1212. if (!inst || !inst->capabilities) {
  1213. d_vpr_e("%s: invalid params\n", __func__);
  1214. return -EINVAL;
  1215. }
  1216. capability = inst->capabilities;
  1217. adjusted_value = ctrl ? ctrl->val :
  1218. capability->cap[TIME_DELTA_BASED_RC].value;
  1219. if (msm_vidc_get_parent_value(inst, TIME_DELTA_BASED_RC,
  1220. BITRATE_MODE, &rc_type, __func__))
  1221. return -EINVAL;
  1222. if (rc_type == HFI_RC_OFF ||
  1223. rc_type == HFI_RC_CQ)
  1224. adjusted_value = 0;
  1225. msm_vidc_update_cap_value(inst, TIME_DELTA_BASED_RC,
  1226. adjusted_value, __func__);
  1227. return 0;
  1228. }
  1229. int msm_vidc_adjust_transform_8x8(void *instance, struct v4l2_ctrl *ctrl)
  1230. {
  1231. struct msm_vidc_inst_capability *capability;
  1232. s32 adjusted_value;
  1233. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1234. s32 profile = -1;
  1235. if (!inst || !inst->capabilities) {
  1236. d_vpr_e("%s: invalid params\n", __func__);
  1237. return -EINVAL;
  1238. }
  1239. capability = inst->capabilities;
  1240. adjusted_value = ctrl ? ctrl->val :
  1241. capability->cap[TRANSFORM_8X8].value;
  1242. if (inst->codec != MSM_VIDC_H264) {
  1243. i_vpr_e(inst,
  1244. "%s: incorrect entry in database. fix the database\n",
  1245. __func__);
  1246. return 0;
  1247. }
  1248. if (msm_vidc_get_parent_value(inst, TRANSFORM_8X8,
  1249. PROFILE, &profile, __func__))
  1250. return -EINVAL;
  1251. if (profile != V4L2_MPEG_VIDEO_H264_PROFILE_HIGH &&
  1252. profile != V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH)
  1253. adjusted_value = V4L2_MPEG_MSM_VIDC_DISABLE;
  1254. msm_vidc_update_cap_value(inst, TRANSFORM_8X8,
  1255. adjusted_value, __func__);
  1256. return 0;
  1257. }
  1258. int msm_vidc_adjust_chroma_qp_index_offset(void *instance,
  1259. struct v4l2_ctrl *ctrl)
  1260. {
  1261. struct msm_vidc_inst_capability *capability;
  1262. s32 adjusted_value;
  1263. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1264. if (!inst || !inst->capabilities) {
  1265. d_vpr_e("%s: invalid params\n", __func__);
  1266. return -EINVAL;
  1267. }
  1268. capability = inst->capabilities;
  1269. adjusted_value = ctrl ? ctrl->val :
  1270. capability->cap[CHROMA_QP_INDEX_OFFSET].value;
  1271. if (adjusted_value != MIN_CHROMA_QP_OFFSET)
  1272. adjusted_value = MAX_CHROMA_QP_OFFSET;
  1273. msm_vidc_update_cap_value(inst, CHROMA_QP_INDEX_OFFSET,
  1274. adjusted_value, __func__);
  1275. return 0;
  1276. }
  1277. int msm_vidc_adjust_slice_count(void *instance, struct v4l2_ctrl *ctrl)
  1278. {
  1279. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1280. struct msm_vidc_inst_capability *capability;
  1281. struct v4l2_format *output_fmt;
  1282. s32 adjusted_value, rc_type = -1, slice_mode, all_intra = 0;
  1283. u32 slice_val, mbpf = 0, mbps = 0, max_mbpf = 0, max_mbps = 0;
  1284. u32 update_cap, max_avg_slicesize, output_width, output_height;
  1285. u32 min_width, min_height, max_width, max_height, fps;
  1286. if (!inst || !inst->capabilities) {
  1287. d_vpr_e("%s: invalid params\n", __func__);
  1288. return -EINVAL;
  1289. }
  1290. capability = inst->capabilities;
  1291. slice_mode = ctrl ? ctrl->val :
  1292. capability->cap[SLICE_MODE].value;
  1293. if (msm_vidc_get_parent_value(inst, SLICE_MODE,
  1294. BITRATE_MODE, &rc_type, __func__) ||
  1295. msm_vidc_get_parent_value(inst, SLICE_MODE,
  1296. ALL_INTRA, &all_intra, __func__))
  1297. return -EINVAL;
  1298. if (slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE)
  1299. return 0;
  1300. fps = capability->cap[FRAME_RATE].value >> 16;
  1301. if (fps > MAX_SLICES_FRAME_RATE ||
  1302. (rc_type != HFI_RC_OFF &&
  1303. rc_type != HFI_RC_CBR_CFR &&
  1304. rc_type != HFI_RC_CBR_VFR) ||
  1305. all_intra) {
  1306. adjusted_value = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE;
  1307. update_cap = SLICE_MODE;
  1308. i_vpr_h(inst,
  1309. "%s: slice unsupported, fps: %u, rc_type: %#x, all_intra %d\n",
  1310. __func__, fps, rc_type, all_intra);
  1311. goto exit;
  1312. }
  1313. output_fmt = &inst->fmts[OUTPUT_PORT];
  1314. output_width = output_fmt->fmt.pix_mp.width;
  1315. output_height = output_fmt->fmt.pix_mp.height;
  1316. max_width = (slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB) ?
  1317. MAX_MB_SLICE_WIDTH : MAX_BYTES_SLICE_WIDTH;
  1318. max_height = (slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB) ?
  1319. MAX_MB_SLICE_HEIGHT : MAX_BYTES_SLICE_HEIGHT;
  1320. min_width = (inst->codec == MSM_VIDC_HEVC) ?
  1321. MIN_HEVC_SLICE_WIDTH : MIN_AVC_SLICE_WIDTH;
  1322. min_height = MIN_SLICE_HEIGHT;
  1323. /*
  1324. * For V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB:
  1325. * - width >= 384 and height >= 128
  1326. * - width and height <= 4096
  1327. * For V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES:
  1328. * - width >= 192 and height >= 128
  1329. * - width and height <= 1920
  1330. */
  1331. if (output_width < min_width || output_height < min_height ||
  1332. output_width > max_width || output_height > max_width) {
  1333. adjusted_value = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE;
  1334. update_cap = SLICE_MODE;
  1335. i_vpr_h(inst,
  1336. "%s: slice unsupported, codec: %#x wxh: [%dx%d]\n",
  1337. __func__, inst->codec, output_width, output_height);
  1338. goto exit;
  1339. }
  1340. mbpf = NUM_MBS_PER_FRAME(output_height, output_width);
  1341. mbps = NUM_MBS_PER_SEC(output_height, output_width, fps);
  1342. max_mbpf = NUM_MBS_PER_FRAME(max_height, max_width);
  1343. max_mbps = NUM_MBS_PER_SEC(max_height, max_width, MAX_SLICES_FRAME_RATE);
  1344. if (mbpf > max_mbpf || mbps > max_mbps) {
  1345. adjusted_value = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE;
  1346. update_cap = SLICE_MODE;
  1347. i_vpr_h(inst,
  1348. "%s: Unsupported, mbpf[%u] > max[%u], mbps[%u] > max[%u]\n",
  1349. __func__, mbpf, max_mbpf, mbps, max_mbps);
  1350. goto exit;
  1351. }
  1352. if (slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB) {
  1353. update_cap = SLICE_MAX_MB;
  1354. slice_val = capability->cap[SLICE_MAX_MB].value;
  1355. slice_val = max(slice_val, mbpf / MAX_SLICES_PER_FRAME);
  1356. } else {
  1357. slice_val = capability->cap[SLICE_MAX_BYTES].value;
  1358. update_cap = SLICE_MAX_BYTES;
  1359. if (rc_type != HFI_RC_OFF) {
  1360. max_avg_slicesize = ((capability->cap[BIT_RATE].value /
  1361. fps) / 8) /
  1362. MAX_SLICES_PER_FRAME;
  1363. slice_val = max(slice_val, max_avg_slicesize);
  1364. }
  1365. }
  1366. adjusted_value = slice_val;
  1367. exit:
  1368. msm_vidc_update_cap_value(inst, update_cap,
  1369. adjusted_value, __func__);
  1370. return 0;
  1371. }
  1372. static int msm_vidc_adjust_static_layer_count_and_type(struct msm_vidc_inst *inst,
  1373. s32 layer_count)
  1374. {
  1375. bool hb_requested = false;
  1376. if (!inst || !inst->capabilities) {
  1377. d_vpr_e("%s: invalid params\n", __func__);
  1378. return -EINVAL;
  1379. }
  1380. if (!layer_count) {
  1381. i_vpr_h(inst, "client not enabled layer encoding\n");
  1382. goto exit;
  1383. }
  1384. if (inst->hfi_rc_type == HFI_RC_CQ) {
  1385. i_vpr_h(inst, "rc type is CQ, disabling layer encoding\n");
  1386. layer_count = 0;
  1387. goto exit;
  1388. }
  1389. if (inst->codec == MSM_VIDC_H264) {
  1390. if (!inst->capabilities->cap[LAYER_ENABLE].value) {
  1391. layer_count = 0;
  1392. goto exit;
  1393. }
  1394. hb_requested = (inst->capabilities->cap[LAYER_TYPE].value ==
  1395. V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_B) ?
  1396. true : false;
  1397. } else if (inst->codec == MSM_VIDC_HEVC) {
  1398. hb_requested = (inst->capabilities->cap[LAYER_TYPE].value ==
  1399. V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B) ?
  1400. true : false;
  1401. }
  1402. if (hb_requested && inst->hfi_rc_type != HFI_RC_VBR_CFR) {
  1403. i_vpr_h(inst,
  1404. "%s: HB layer encoding is supported for VBR rc only\n",
  1405. __func__);
  1406. layer_count = 0;
  1407. goto exit;
  1408. }
  1409. if (!inst->capabilities->cap[META_EVA_STATS].value &&
  1410. hb_requested && (layer_count > 1)) {
  1411. layer_count = 1;
  1412. i_vpr_h(inst,
  1413. "%s: cvp disable supports only one enh layer HB\n",
  1414. __func__);
  1415. }
  1416. /* decide hfi layer type */
  1417. if (hb_requested) {
  1418. inst->hfi_layer_type = HFI_HIER_B;
  1419. } else {
  1420. /* HP requested */
  1421. inst->hfi_layer_type = HFI_HIER_P_SLIDING_WINDOW;
  1422. if (inst->codec == MSM_VIDC_H264 &&
  1423. inst->hfi_rc_type == HFI_RC_VBR_CFR)
  1424. inst->hfi_layer_type = HFI_HIER_P_HYBRID_LTR;
  1425. }
  1426. /* sanitize layer count based on layer type and codec */
  1427. if (inst->hfi_layer_type == HFI_HIER_B) {
  1428. if (layer_count > MAX_ENH_LAYER_HB)
  1429. layer_count = MAX_ENH_LAYER_HB;
  1430. } else if (inst->hfi_layer_type == HFI_HIER_P_HYBRID_LTR) {
  1431. if (layer_count > MAX_AVC_ENH_LAYER_HYBRID_HP)
  1432. layer_count = MAX_AVC_ENH_LAYER_HYBRID_HP;
  1433. } else if (inst->hfi_layer_type == HFI_HIER_P_SLIDING_WINDOW) {
  1434. if (inst->codec == MSM_VIDC_H264) {
  1435. if (layer_count > MAX_AVC_ENH_LAYER_SLIDING_WINDOW)
  1436. layer_count = MAX_AVC_ENH_LAYER_SLIDING_WINDOW;
  1437. } else {
  1438. if (layer_count > MAX_HEVC_ENH_LAYER_SLIDING_WINDOW)
  1439. layer_count = MAX_HEVC_ENH_LAYER_SLIDING_WINDOW;
  1440. }
  1441. }
  1442. exit:
  1443. msm_vidc_update_cap_value(inst, ENH_LAYER_COUNT,
  1444. layer_count, __func__);
  1445. inst->capabilities->cap[ENH_LAYER_COUNT].max = layer_count;
  1446. return 0;
  1447. }
  1448. int msm_vidc_adjust_layer_count(void *instance, struct v4l2_ctrl *ctrl)
  1449. {
  1450. int rc = 0;
  1451. struct msm_vidc_inst_capability *capability;
  1452. s32 client_layer_count;
  1453. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1454. if (!inst || !inst->capabilities) {
  1455. d_vpr_e("%s: invalid params\n", __func__);
  1456. return -EINVAL;
  1457. }
  1458. capability = inst->capabilities;
  1459. client_layer_count = ctrl ? ctrl->val :
  1460. capability->cap[ENH_LAYER_COUNT].value;
  1461. if (!is_parent_available(inst, ENH_LAYER_COUNT,
  1462. BITRATE_MODE, __func__) ||
  1463. !is_parent_available(inst, ENH_LAYER_COUNT,
  1464. META_EVA_STATS, __func__))
  1465. return -EINVAL;
  1466. if (!inst->bufq[OUTPUT_PORT].vb2q->streaming) {
  1467. rc = msm_vidc_adjust_static_layer_count_and_type(inst,
  1468. client_layer_count);
  1469. if (rc)
  1470. goto exit;
  1471. } else {
  1472. if (inst->hfi_layer_type == HFI_HIER_P_HYBRID_LTR ||
  1473. inst->hfi_layer_type == HFI_HIER_P_SLIDING_WINDOW) {
  1474. /* dynamic layer count change is only supported for HP */
  1475. if (client_layer_count >
  1476. inst->capabilities->cap[ENH_LAYER_COUNT].max)
  1477. client_layer_count =
  1478. inst->capabilities->cap[ENH_LAYER_COUNT].max;
  1479. msm_vidc_update_cap_value(inst, ENH_LAYER_COUNT,
  1480. client_layer_count, __func__);
  1481. }
  1482. }
  1483. exit:
  1484. return rc;
  1485. }
  1486. int msm_vidc_adjust_gop_size(void *instance, struct v4l2_ctrl *ctrl)
  1487. {
  1488. struct msm_vidc_inst_capability *capability;
  1489. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  1490. s32 adjusted_value, enh_layer_count = -1;
  1491. u32 min_gop_size, num_subgops;
  1492. if (!inst || !inst->capabilities) {
  1493. d_vpr_e("%s: invalid params\n", __func__);
  1494. return -EINVAL;
  1495. }
  1496. capability = inst->capabilities;
  1497. adjusted_value = ctrl ? ctrl->val : capability->cap[GOP_SIZE].value;
  1498. if (msm_vidc_get_parent_value(inst, GOP_SIZE,
  1499. ENH_LAYER_COUNT, &enh_layer_count, __func__))
  1500. return -EINVAL;
  1501. if (!enh_layer_count)
  1502. goto exit;
  1503. /*
  1504. * Layer encoding needs GOP size to be multiple of subgop size
  1505. * And subgop size is 2 ^ number of enhancement layers.
  1506. */
  1507. /* v4l2 layer count is the number of enhancement layers */
  1508. min_gop_size = 1 << enh_layer_count;
  1509. num_subgops = (adjusted_value + (min_gop_size >> 1)) /
  1510. min_gop_size;
  1511. if (num_subgops)
  1512. adjusted_value = num_subgops * min_gop_size;
  1513. else
  1514. adjusted_value = min_gop_size;
  1515. exit:
  1516. msm_vidc_update_cap_value(inst, GOP_SIZE, adjusted_value, __func__);
  1517. return 0;
  1518. }
  1519. int msm_vidc_adjust_b_frame(void *instance, struct v4l2_ctrl *ctrl)
  1520. {
  1521. struct msm_vidc_inst_capability *capability;
  1522. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  1523. s32 adjusted_value, enh_layer_count = -1;
  1524. const u32 max_bframe_size = 7;
  1525. if (!inst || !inst->capabilities) {
  1526. d_vpr_e("%s: invalid params\n", __func__);
  1527. return -EINVAL;
  1528. }
  1529. capability = inst->capabilities;
  1530. adjusted_value = ctrl ? ctrl->val : capability->cap[B_FRAME].value;
  1531. if (msm_vidc_get_parent_value(inst, B_FRAME,
  1532. ENH_LAYER_COUNT, &enh_layer_count, __func__))
  1533. return -EINVAL;
  1534. if (!enh_layer_count || inst->hfi_layer_type != HFI_HIER_B) {
  1535. adjusted_value = 0;
  1536. goto exit;
  1537. }
  1538. adjusted_value = (1 << enh_layer_count) - 1;
  1539. /* Allowed Bframe values are 0, 1, 3, 7 */
  1540. if (adjusted_value > max_bframe_size)
  1541. adjusted_value = max_bframe_size;
  1542. exit:
  1543. msm_vidc_update_cap_value(inst, B_FRAME, adjusted_value, __func__);
  1544. return 0;
  1545. }
  1546. static bool msm_vidc_check_all_layer_bitrate_set(struct msm_vidc_inst *inst)
  1547. {
  1548. bool layer_bitrate_set = true;
  1549. u32 cap_id = 0, i, enh_layer_count;
  1550. u32 layer_br_caps[6] = {L0_BR, L1_BR, L2_BR, L3_BR, L4_BR, L5_BR};
  1551. enh_layer_count = inst->capabilities->cap[ENH_LAYER_COUNT].value;
  1552. for (i = 0; i <= enh_layer_count; i++) {
  1553. if (i >= ARRAY_SIZE(layer_br_caps))
  1554. break;
  1555. cap_id = layer_br_caps[i];
  1556. if (!(inst->capabilities->cap[cap_id].flags & CAP_FLAG_CLIENT_SET)) {
  1557. layer_bitrate_set = false;
  1558. break;
  1559. }
  1560. }
  1561. return layer_bitrate_set;
  1562. }
  1563. static u32 msm_vidc_get_cumulative_bitrate(struct msm_vidc_inst *inst)
  1564. {
  1565. int i;
  1566. u32 cap_id = 0;
  1567. u32 cumulative_br = 0;
  1568. s32 enh_layer_count;
  1569. u32 layer_br_caps[6] = {L0_BR, L1_BR, L2_BR, L3_BR, L4_BR, L5_BR};
  1570. enh_layer_count = inst->capabilities->cap[ENH_LAYER_COUNT].value;
  1571. for (i = 0; i <= enh_layer_count; i++) {
  1572. if (i >= ARRAY_SIZE(layer_br_caps))
  1573. break;
  1574. cap_id = layer_br_caps[i];
  1575. cumulative_br += inst->capabilities->cap[cap_id].value;
  1576. }
  1577. return cumulative_br;
  1578. }
  1579. int msm_vidc_adjust_bitrate(void *instance, struct v4l2_ctrl *ctrl)
  1580. {
  1581. int i, rc = 0;
  1582. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1583. struct msm_vidc_inst_capability *capability;
  1584. s32 adjusted_value, max_bitrate, enh_layer_count;
  1585. u32 cumulative_bitrate = 0, cap_id = 0, cap_value = 0;
  1586. u32 layer_br_caps[6] = {L0_BR, L1_BR, L2_BR, L3_BR, L4_BR, L5_BR};
  1587. if (!inst || !inst->capabilities) {
  1588. d_vpr_e("%s: invalid params\n", __func__);
  1589. return -EINVAL;
  1590. }
  1591. capability = inst->capabilities;
  1592. /* ignore layer bitrate when total bitrate is set */
  1593. if (capability->cap[BIT_RATE].flags & CAP_FLAG_CLIENT_SET) {
  1594. /*
  1595. * For static case, ctrl is null.
  1596. * For dynamic case, only BIT_RATE cap uses this adjust function.
  1597. * Hence, no need to check for ctrl id to be BIT_RATE control, and not
  1598. * any of layer bitrate controls.
  1599. */
  1600. adjusted_value = ctrl ? ctrl->val : capability->cap[BIT_RATE].value;
  1601. msm_vidc_update_cap_value(inst, BIT_RATE, adjusted_value, __func__);
  1602. return 0;
  1603. }
  1604. if (inst->bufq[OUTPUT_PORT].vb2q->streaming)
  1605. return 0;
  1606. if (msm_vidc_get_parent_value(inst, BIT_RATE,
  1607. ENH_LAYER_COUNT, &enh_layer_count, __func__))
  1608. return -EINVAL;
  1609. max_bitrate = inst->capabilities->cap[BIT_RATE].max;
  1610. /*
  1611. * ENH_LAYER_COUNT cap max is positive only if
  1612. * layer encoding is enabled during streamon.
  1613. */
  1614. if (capability->cap[ENH_LAYER_COUNT].max) {
  1615. if (!msm_vidc_check_all_layer_bitrate_set(inst)) {
  1616. i_vpr_h(inst,
  1617. "%s: client did not set all layer bitrates\n",
  1618. __func__);
  1619. return 0;
  1620. }
  1621. cumulative_bitrate = msm_vidc_get_cumulative_bitrate(inst);
  1622. /* cap layer bitrates to max supported bitrate */
  1623. if (cumulative_bitrate > max_bitrate) {
  1624. u32 decrement_in_value = 0;
  1625. u32 decrement_in_percent = ((cumulative_bitrate - max_bitrate) * 100) /
  1626. max_bitrate;
  1627. cumulative_bitrate = 0;
  1628. for (i = 0; i <= enh_layer_count; i++) {
  1629. if (i >= ARRAY_SIZE(layer_br_caps))
  1630. break;
  1631. cap_id = layer_br_caps[i];
  1632. cap_value = inst->capabilities->cap[cap_id].value;
  1633. decrement_in_value = (cap_value *
  1634. decrement_in_percent) / 100;
  1635. cumulative_bitrate += (cap_value - decrement_in_value);
  1636. /*
  1637. * cap value for the L*_BR is changed. Hence, update cap,
  1638. * and add to FW_LIST to set new values to firmware.
  1639. */
  1640. msm_vidc_update_cap_value(inst, cap_id,
  1641. (cap_value - decrement_in_value), __func__);
  1642. }
  1643. }
  1644. i_vpr_h(inst,
  1645. "%s: update BIT_RATE with cumulative bitrate\n",
  1646. __func__);
  1647. msm_vidc_update_cap_value(inst, BIT_RATE,
  1648. cumulative_bitrate, __func__);
  1649. }
  1650. return rc;
  1651. }
  1652. int msm_vidc_adjust_dynamic_layer_bitrate(void *instance, struct v4l2_ctrl *ctrl)
  1653. {
  1654. int rc = 0;
  1655. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1656. struct msm_vidc_inst_capability *capability;
  1657. u32 cumulative_bitrate = 0;
  1658. u32 client_set_cap_id = INST_CAP_NONE;
  1659. u32 old_br = 0, new_br = 0, exceeded_br = 0;
  1660. s32 max_bitrate;
  1661. if (!inst || !inst->capabilities) {
  1662. d_vpr_e("%s: invalid params\n", __func__);
  1663. return -EINVAL;
  1664. }
  1665. capability = inst->capabilities;
  1666. if (!ctrl)
  1667. return 0;
  1668. /* ignore layer bitrate when total bitrate is set */
  1669. if (capability->cap[BIT_RATE].flags & CAP_FLAG_CLIENT_SET)
  1670. return 0;
  1671. if (!inst->bufq[OUTPUT_PORT].vb2q->streaming)
  1672. return 0;
  1673. /*
  1674. * ENH_LAYER_COUNT cap max is positive only if
  1675. * layer encoding is enabled during streamon.
  1676. */
  1677. if (!capability->cap[ENH_LAYER_COUNT].max) {
  1678. i_vpr_e(inst, "%s: layers not enabled\n", __func__);
  1679. return -EINVAL;
  1680. }
  1681. if (!msm_vidc_check_all_layer_bitrate_set(inst)) {
  1682. i_vpr_h(inst,
  1683. "%s: client did not set all layer bitrates\n",
  1684. __func__);
  1685. return 0;
  1686. }
  1687. client_set_cap_id = msm_vidc_get_cap_id(inst, ctrl->id);
  1688. if (client_set_cap_id == INST_CAP_NONE) {
  1689. i_vpr_e(inst, "%s: could not find cap_id for ctrl %s\n",
  1690. __func__, ctrl->name);
  1691. return -EINVAL;
  1692. }
  1693. cumulative_bitrate = msm_vidc_get_cumulative_bitrate(inst);
  1694. max_bitrate = inst->capabilities->cap[BIT_RATE].max;
  1695. old_br = capability->cap[client_set_cap_id].value;
  1696. new_br = ctrl->val;
  1697. /*
  1698. * new bitrate is not supposed to cause cumulative bitrate to
  1699. * exceed max supported bitrate
  1700. */
  1701. if ((cumulative_bitrate - old_br + new_br) > max_bitrate) {
  1702. /* adjust new bitrate */
  1703. exceeded_br = (cumulative_bitrate - old_br + new_br) - max_bitrate;
  1704. new_br = ctrl->val - exceeded_br;
  1705. }
  1706. msm_vidc_update_cap_value(inst, client_set_cap_id, new_br, __func__);
  1707. /* adjust totol bitrate cap */
  1708. i_vpr_h(inst,
  1709. "%s: update BIT_RATE with cumulative bitrate\n",
  1710. __func__);
  1711. msm_vidc_update_cap_value(inst, BIT_RATE,
  1712. msm_vidc_get_cumulative_bitrate(inst), __func__);
  1713. return rc;
  1714. }
  1715. int msm_vidc_adjust_peak_bitrate(void *instance, struct v4l2_ctrl *ctrl)
  1716. {
  1717. struct msm_vidc_inst_capability *capability;
  1718. s32 adjusted_value;
  1719. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1720. s32 rc_type = -1, bitrate = -1;
  1721. if (!inst || !inst->capabilities) {
  1722. d_vpr_e("%s: invalid params\n", __func__);
  1723. return -EINVAL;
  1724. }
  1725. capability = inst->capabilities;
  1726. adjusted_value = ctrl ? ctrl->val :
  1727. capability->cap[PEAK_BITRATE].value;
  1728. if (msm_vidc_get_parent_value(inst, PEAK_BITRATE,
  1729. BITRATE_MODE, &rc_type, __func__))
  1730. return -EINVAL;
  1731. if (rc_type != HFI_RC_CBR_CFR &&
  1732. rc_type != HFI_RC_CBR_VFR)
  1733. return 0;
  1734. if (msm_vidc_get_parent_value(inst, PEAK_BITRATE,
  1735. BIT_RATE, &bitrate, __func__))
  1736. return -EINVAL;
  1737. /* Peak Bitrate should be larger than or equal to avg bitrate */
  1738. if (capability->cap[PEAK_BITRATE].flags & CAP_FLAG_CLIENT_SET) {
  1739. if (adjusted_value < bitrate)
  1740. adjusted_value = bitrate;
  1741. } else {
  1742. adjusted_value = capability->cap[BIT_RATE].value;
  1743. }
  1744. msm_vidc_update_cap_value(inst, PEAK_BITRATE,
  1745. adjusted_value, __func__);
  1746. return 0;
  1747. }
  1748. int msm_vidc_adjust_hevc_min_qp(void *instance, struct v4l2_ctrl *ctrl)
  1749. {
  1750. int rc = 0;
  1751. struct msm_vidc_inst_capability *capability;
  1752. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1753. if (!inst || !inst->capabilities) {
  1754. d_vpr_e("%s: invalid params\n", __func__);
  1755. return -EINVAL;
  1756. }
  1757. capability = inst->capabilities;
  1758. if (ctrl)
  1759. msm_vidc_update_cap_value(inst, MIN_FRAME_QP,
  1760. ctrl->val, __func__);
  1761. rc = msm_vidc_adjust_hevc_qp(inst, MIN_FRAME_QP);
  1762. return rc;
  1763. }
  1764. int msm_vidc_adjust_hevc_max_qp(void *instance, struct v4l2_ctrl *ctrl)
  1765. {
  1766. int rc = 0;
  1767. struct msm_vidc_inst_capability *capability;
  1768. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1769. if (!inst || !inst->capabilities) {
  1770. d_vpr_e("%s: invalid params\n", __func__);
  1771. return -EINVAL;
  1772. }
  1773. capability = inst->capabilities;
  1774. if (ctrl)
  1775. msm_vidc_update_cap_value(inst, MAX_FRAME_QP,
  1776. ctrl->val, __func__);
  1777. rc = msm_vidc_adjust_hevc_qp(inst, MAX_FRAME_QP);
  1778. return rc;
  1779. }
  1780. int msm_vidc_adjust_hevc_i_frame_qp(void *instance, struct v4l2_ctrl *ctrl)
  1781. {
  1782. int rc = 0;
  1783. struct msm_vidc_inst_capability *capability;
  1784. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1785. if (!inst || !inst->capabilities) {
  1786. d_vpr_e("%s: invalid params\n", __func__);
  1787. return -EINVAL;
  1788. }
  1789. capability = inst->capabilities;
  1790. if (ctrl)
  1791. msm_vidc_update_cap_value(inst, I_FRAME_QP,
  1792. ctrl->val, __func__);
  1793. rc = msm_vidc_adjust_hevc_qp(inst, I_FRAME_QP);
  1794. if (rc)
  1795. return rc;
  1796. return rc;
  1797. }
  1798. int msm_vidc_adjust_hevc_p_frame_qp(void *instance, struct v4l2_ctrl *ctrl)
  1799. {
  1800. int rc = 0;
  1801. struct msm_vidc_inst_capability *capability;
  1802. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1803. if (!inst || !inst->capabilities) {
  1804. d_vpr_e("%s: invalid params\n", __func__);
  1805. return -EINVAL;
  1806. }
  1807. capability = inst->capabilities;
  1808. if (ctrl)
  1809. msm_vidc_update_cap_value(inst, P_FRAME_QP,
  1810. ctrl->val, __func__);
  1811. rc = msm_vidc_adjust_hevc_qp(inst, P_FRAME_QP);
  1812. if (rc)
  1813. return rc;
  1814. return rc;
  1815. }
  1816. int msm_vidc_adjust_hevc_b_frame_qp(void *instance, struct v4l2_ctrl *ctrl)
  1817. {
  1818. int rc = 0;
  1819. struct msm_vidc_inst_capability *capability;
  1820. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1821. if (!inst || !inst->capabilities) {
  1822. d_vpr_e("%s: invalid params\n", __func__);
  1823. return -EINVAL;
  1824. }
  1825. capability = inst->capabilities;
  1826. if (ctrl)
  1827. msm_vidc_update_cap_value(inst, B_FRAME_QP,
  1828. ctrl->val, __func__);
  1829. rc = msm_vidc_adjust_hevc_qp(inst, B_FRAME_QP);
  1830. if (rc)
  1831. return rc;
  1832. return rc;
  1833. }
  1834. int msm_vidc_adjust_blur_type(void *instance, struct v4l2_ctrl *ctrl)
  1835. {
  1836. struct msm_vidc_inst_capability *capability;
  1837. s32 adjusted_value;
  1838. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1839. s32 rc_type = -1;
  1840. s32 pix_fmts = -1, min_quality = -1;
  1841. if (!inst || !inst->capabilities) {
  1842. d_vpr_e("%s: invalid params\n", __func__);
  1843. return -EINVAL;
  1844. }
  1845. capability = inst->capabilities;
  1846. adjusted_value = ctrl ? ctrl->val :
  1847. capability->cap[BLUR_TYPES].value;
  1848. if (adjusted_value == VIDC_BLUR_NONE)
  1849. return 0;
  1850. if (msm_vidc_get_parent_value(inst, BLUR_TYPES, BITRATE_MODE,
  1851. &rc_type, __func__) ||
  1852. msm_vidc_get_parent_value(inst, BLUR_TYPES, PIX_FMTS,
  1853. &pix_fmts, __func__) ||
  1854. msm_vidc_get_parent_value(inst, BLUR_TYPES, MIN_QUALITY,
  1855. &min_quality, __func__))
  1856. return -EINVAL;
  1857. if (adjusted_value == VIDC_BLUR_EXTERNAL) {
  1858. if (is_scaling_enabled(inst) || min_quality) {
  1859. adjusted_value = VIDC_BLUR_NONE;
  1860. }
  1861. } else if (adjusted_value == VIDC_BLUR_ADAPTIVE) {
  1862. if (is_scaling_enabled(inst) || min_quality ||
  1863. (rc_type != HFI_RC_VBR_CFR &&
  1864. rc_type != HFI_RC_CBR_CFR &&
  1865. rc_type != HFI_RC_CBR_VFR) ||
  1866. is_10bit_colorformat(pix_fmts)) {
  1867. adjusted_value = VIDC_BLUR_NONE;
  1868. }
  1869. }
  1870. msm_vidc_update_cap_value(inst, BLUR_TYPES,
  1871. adjusted_value, __func__);
  1872. return 0;
  1873. }
  1874. int msm_vidc_adjust_all_intra(void *instance, struct v4l2_ctrl *ctrl)
  1875. {
  1876. struct msm_vidc_inst_capability *capability;
  1877. s32 adjusted_value;
  1878. struct msm_vidc_core *core;
  1879. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1880. s32 gop_size = -1, bframe = -1;
  1881. u32 width, height, fps, mbps, max_mbps;
  1882. if (!inst || !inst->capabilities || !inst->core) {
  1883. d_vpr_e("%s: invalid params\n", __func__);
  1884. return -EINVAL;
  1885. }
  1886. capability = inst->capabilities;
  1887. adjusted_value = capability->cap[ALL_INTRA].value;
  1888. if (msm_vidc_get_parent_value(inst, ALL_INTRA, GOP_SIZE,
  1889. &gop_size, __func__) ||
  1890. msm_vidc_get_parent_value(inst, ALL_INTRA, B_FRAME,
  1891. &bframe, __func__))
  1892. return -EINVAL;
  1893. width = inst->crop.width;
  1894. height = inst->crop.height;
  1895. fps = msm_vidc_get_fps(inst);
  1896. mbps = NUM_MBS_PER_SEC(height, width, fps);
  1897. core = inst->core;
  1898. max_mbps = core->capabilities[MAX_MBPS_ALL_INTRA].value;
  1899. if (mbps > max_mbps) {
  1900. adjusted_value = 0;
  1901. i_vpr_h(inst, "%s: mbps %d exceeds max supported mbps %d\n",
  1902. __func__, mbps, max_mbps);
  1903. goto exit;
  1904. }
  1905. if (!gop_size && !bframe)
  1906. adjusted_value = 1;
  1907. exit:
  1908. msm_vidc_update_cap_value(inst, ALL_INTRA,
  1909. adjusted_value, __func__);
  1910. return 0;
  1911. }
  1912. int msm_vidc_adjust_blur_resolution(void *instance, struct v4l2_ctrl *ctrl)
  1913. {
  1914. struct msm_vidc_inst_capability *capability;
  1915. s32 adjusted_value;
  1916. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1917. s32 blur_type = -1;
  1918. if (!inst || !inst->capabilities) {
  1919. d_vpr_e("%s: invalid params\n", __func__);
  1920. return -EINVAL;
  1921. }
  1922. capability = inst->capabilities;
  1923. adjusted_value = ctrl ? ctrl->val :
  1924. capability->cap[BLUR_RESOLUTION].value;
  1925. if (msm_vidc_get_parent_value(inst, BLUR_RESOLUTION, BLUR_TYPES,
  1926. &blur_type, __func__))
  1927. return -EINVAL;
  1928. if (blur_type != VIDC_BLUR_EXTERNAL)
  1929. return 0;
  1930. msm_vidc_update_cap_value(inst, BLUR_RESOLUTION,
  1931. adjusted_value, __func__);
  1932. return 0;
  1933. }
  1934. int msm_vidc_adjust_cac(void *instance, struct v4l2_ctrl *ctrl)
  1935. {
  1936. struct msm_vidc_inst_capability *capability;
  1937. s32 adjusted_value;
  1938. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1939. s32 min_quality = -1, rc_type = -1;
  1940. if (!inst || !inst->capabilities) {
  1941. d_vpr_e("%s: invalid params\n", __func__);
  1942. return -EINVAL;
  1943. }
  1944. capability = inst->capabilities;
  1945. adjusted_value = ctrl ? ctrl->val :
  1946. capability->cap[CONTENT_ADAPTIVE_CODING].value;
  1947. if (inst->bufq[OUTPUT_PORT].vb2q->streaming)
  1948. return 0;
  1949. if (msm_vidc_get_parent_value(inst, CONTENT_ADAPTIVE_CODING,
  1950. MIN_QUALITY, &min_quality, __func__) ||
  1951. msm_vidc_get_parent_value(inst, CONTENT_ADAPTIVE_CODING,
  1952. BITRATE_MODE, &rc_type, __func__))
  1953. return -EINVAL;
  1954. /*
  1955. * CAC is supported only for VBR rc type.
  1956. * Hence, do not adjust or set to firmware for non VBR rc's
  1957. */
  1958. if (rc_type != HFI_RC_VBR_CFR) {
  1959. adjusted_value = 0;
  1960. goto adjust;
  1961. }
  1962. if (min_quality) {
  1963. adjusted_value = 1;
  1964. goto adjust;
  1965. }
  1966. adjust:
  1967. msm_vidc_update_cap_value(inst, CONTENT_ADAPTIVE_CODING,
  1968. adjusted_value, __func__);
  1969. return 0;
  1970. }
  1971. int msm_vidc_adjust_bitrate_boost(void *instance, struct v4l2_ctrl *ctrl)
  1972. {
  1973. struct msm_vidc_inst_capability *capability;
  1974. s32 adjusted_value;
  1975. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1976. s32 min_quality = -1, rc_type = -1;
  1977. if (!inst || !inst->capabilities) {
  1978. d_vpr_e("%s: invalid params\n", __func__);
  1979. return -EINVAL;
  1980. }
  1981. capability = inst->capabilities;
  1982. adjusted_value = ctrl ? ctrl->val :
  1983. capability->cap[BITRATE_BOOST].value;
  1984. if (inst->bufq[OUTPUT_PORT].vb2q->streaming)
  1985. return 0;
  1986. if (msm_vidc_get_parent_value(inst, BITRATE_BOOST,
  1987. MIN_QUALITY, &min_quality, __func__) ||
  1988. msm_vidc_get_parent_value(inst, BITRATE_BOOST,
  1989. BITRATE_MODE, &rc_type, __func__))
  1990. return -EINVAL;
  1991. /*
  1992. * Bitrate Boost are supported only for VBR rc type.
  1993. * Hence, do not adjust or set to firmware for non VBR rc's
  1994. */
  1995. if (rc_type != HFI_RC_VBR_CFR) {
  1996. adjusted_value = 0;
  1997. goto adjust;
  1998. }
  1999. if (min_quality) {
  2000. adjusted_value = MAX_BITRATE_BOOST;
  2001. goto adjust;
  2002. }
  2003. adjust:
  2004. msm_vidc_update_cap_value(inst, BITRATE_BOOST,
  2005. adjusted_value, __func__);
  2006. return 0;
  2007. }
  2008. int msm_vidc_adjust_min_quality(void *instance, struct v4l2_ctrl *ctrl)
  2009. {
  2010. struct msm_vidc_inst_capability *capability;
  2011. s32 adjusted_value;
  2012. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  2013. s32 roi_enable = -1, rc_type = -1, enh_layer_count = -1, pix_fmts = -1;
  2014. u32 width, height, frame_rate;
  2015. struct v4l2_format *f;
  2016. if (!inst || !inst->capabilities) {
  2017. d_vpr_e("%s: invalid params\n", __func__);
  2018. return -EINVAL;
  2019. }
  2020. capability = inst->capabilities;
  2021. adjusted_value = ctrl ? ctrl->val : capability->cap[MIN_QUALITY].value;
  2022. /*
  2023. * Although MIN_QUALITY is static, one of its parents,
  2024. * ENH_LAYER_COUNT is dynamic cap. Hence, dynamic call
  2025. * may be made for MIN_QUALITY via ENH_LAYER_COUNT.
  2026. * Therefore, below streaming check is required to avoid
  2027. * runtime modification of MIN_QUALITY.
  2028. */
  2029. if (inst->bufq[OUTPUT_PORT].vb2q->streaming)
  2030. return 0;
  2031. if (msm_vidc_get_parent_value(inst, MIN_QUALITY,
  2032. BITRATE_MODE, &rc_type, __func__) ||
  2033. msm_vidc_get_parent_value(inst, MIN_QUALITY,
  2034. META_ROI_INFO, &roi_enable, __func__) ||
  2035. msm_vidc_get_parent_value(inst, MIN_QUALITY,
  2036. ENH_LAYER_COUNT, &enh_layer_count, __func__))
  2037. return -EINVAL;
  2038. /*
  2039. * Min Quality is supported only for VBR rc type.
  2040. * Hence, do not adjust or set to firmware for non VBR rc's
  2041. */
  2042. if (rc_type != HFI_RC_VBR_CFR) {
  2043. adjusted_value = 0;
  2044. goto update_and_exit;
  2045. }
  2046. frame_rate = inst->capabilities->cap[FRAME_RATE].value >> 16;
  2047. f = &inst->fmts[OUTPUT_PORT];
  2048. width = f->fmt.pix_mp.width;
  2049. height = f->fmt.pix_mp.height;
  2050. /*
  2051. * VBR Min Quality not supported for:
  2052. * - HEVC 10bit
  2053. * - ROI support
  2054. * - HP encoding
  2055. * - External Blur
  2056. * - Resolution beyond 1080P
  2057. * (It will fall back to CQCAC 25% or 0% (CAC) or CQCAC-OFF)
  2058. */
  2059. if (inst->codec == MSM_VIDC_HEVC) {
  2060. if (msm_vidc_get_parent_value(inst, MIN_QUALITY,
  2061. PIX_FMTS, &pix_fmts, __func__))
  2062. return -EINVAL;
  2063. if (is_10bit_colorformat(pix_fmts)) {
  2064. i_vpr_h(inst,
  2065. "%s: min quality is supported only for 8 bit\n",
  2066. __func__);
  2067. adjusted_value = 0;
  2068. goto update_and_exit;
  2069. }
  2070. }
  2071. if (res_is_greater_than(width, height, 1920, 1080)) {
  2072. i_vpr_h(inst, "%s: unsupported res, wxh %ux%u\n",
  2073. __func__, width, height);
  2074. adjusted_value = 0;
  2075. goto update_and_exit;
  2076. }
  2077. if (frame_rate > 60) {
  2078. i_vpr_h(inst, "%s: unsupported fps %u\n",
  2079. __func__, frame_rate);
  2080. adjusted_value = 0;
  2081. goto update_and_exit;
  2082. }
  2083. if (roi_enable) {
  2084. i_vpr_h(inst,
  2085. "%s: min quality not supported with roi metadata\n",
  2086. __func__);
  2087. adjusted_value = 0;
  2088. goto update_and_exit;
  2089. }
  2090. if (enh_layer_count && inst->hfi_layer_type != HFI_HIER_B) {
  2091. i_vpr_h(inst,
  2092. "%s: min quality not supported for HP encoding\n",
  2093. __func__);
  2094. adjusted_value = 0;
  2095. goto update_and_exit;
  2096. }
  2097. /* Above conditions are met. Hence enable min quality */
  2098. adjusted_value = MAX_SUPPORTED_MIN_QUALITY;
  2099. update_and_exit:
  2100. msm_vidc_update_cap_value(inst, MIN_QUALITY,
  2101. adjusted_value, __func__);
  2102. return 0;
  2103. }
  2104. int msm_vidc_adjust_lowlatency_mode(void *instance, struct v4l2_ctrl *ctrl)
  2105. {
  2106. struct msm_vidc_inst_capability *capability;
  2107. s32 adjusted_value;
  2108. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  2109. s32 rc_type = -1;
  2110. if (!inst || !inst->capabilities) {
  2111. d_vpr_e("%s: invalid params\n", __func__);
  2112. return -EINVAL;
  2113. }
  2114. capability = inst->capabilities;
  2115. adjusted_value = ctrl ? ctrl->val :
  2116. capability->cap[LOWLATENCY_MODE].value;
  2117. if (msm_vidc_get_parent_value(inst, LOWLATENCY_MODE, BITRATE_MODE,
  2118. &rc_type, __func__))
  2119. return -EINVAL;
  2120. if (rc_type == HFI_RC_CBR_CFR ||
  2121. rc_type == HFI_RC_CBR_VFR)
  2122. adjusted_value = 1;
  2123. msm_vidc_update_cap_value(inst, LOWLATENCY_MODE,
  2124. adjusted_value, __func__);
  2125. return 0;
  2126. }
  2127. int msm_vidc_adjust_session_priority(void *instance, struct v4l2_ctrl *ctrl)
  2128. {
  2129. int rc = 0;
  2130. int adjusted_value;
  2131. bool rate_by_client;
  2132. struct msm_vidc_inst_capability *capability;
  2133. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2134. if (!inst || !inst->capabilities) {
  2135. d_vpr_e("%s: invalid params\n", __func__);
  2136. return -EINVAL;
  2137. }
  2138. capability = inst->capabilities;
  2139. adjusted_value = ctrl ? ctrl->val :
  2140. capability->cap[PRIORITY].value;
  2141. if (capability->cap[FRAME_RATE].flags & CAP_FLAG_CLIENT_SET ||
  2142. capability->cap[OPERATING_RATE].flags & CAP_FLAG_CLIENT_SET) {
  2143. rate_by_client = true;
  2144. inst->priority_level = MSM_VIDC_PRIORITY_HIGH;
  2145. } else {
  2146. rate_by_client = false;
  2147. inst->priority_level = MSM_VIDC_PRIORITY_LOW;
  2148. }
  2149. /*
  2150. * For RT, check for resource feasability.
  2151. * For NRT, sessions with rate set by client takes higher order
  2152. * among NRT sessions. They are constraint RT or low priority RT.
  2153. */
  2154. if (adjusted_value == 0) {
  2155. rc = msm_vidc_check_core_mbps(inst);
  2156. if (rc) {
  2157. i_vpr_e(inst, "%s: unsupported load\n", __func__);
  2158. goto exit;
  2159. }
  2160. rc = capability->cap[FRAME_RATE].value > capability->cap[FRAME_RATE].max;
  2161. if (rc) {
  2162. i_vpr_e(inst, "%s: unsupported FRAME_RATE %u, max %u\n", __func__,
  2163. capability->cap[FRAME_RATE].value >> 16,
  2164. capability->cap[FRAME_RATE].max >> 16);
  2165. rc = -ENOMEM;
  2166. goto exit;
  2167. }
  2168. rc = capability->cap[OPERATING_RATE].value > capability->cap[OPERATING_RATE].max;
  2169. if (rc) {
  2170. i_vpr_e(inst, "%s: unsupported OPERATING_RATE %u, max %u\n", __func__,
  2171. capability->cap[OPERATING_RATE].value >> 16,
  2172. capability->cap[OPERATING_RATE].max >> 16);
  2173. rc = -ENOMEM;
  2174. goto exit;
  2175. }
  2176. rc = msm_vidc_check_core_mbpf(inst);
  2177. if (rc) {
  2178. i_vpr_e(inst, "%s: unsupported load\n", __func__);
  2179. goto exit;
  2180. }
  2181. }
  2182. msm_vidc_update_cap_value(inst, PRIORITY, adjusted_value, __func__);
  2183. exit:
  2184. return rc;
  2185. }
  2186. int msm_vidc_adjust_roi_info(void *instance, struct v4l2_ctrl *ctrl)
  2187. {
  2188. struct msm_vidc_inst_capability *capability;
  2189. s32 adjusted_value;
  2190. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  2191. s32 rc_type = -1, pix_fmt = -1;
  2192. if (!inst || !inst->capabilities) {
  2193. d_vpr_e("%s: invalid params\n", __func__);
  2194. return -EINVAL;
  2195. }
  2196. capability = inst->capabilities;
  2197. adjusted_value = ctrl ? ctrl->val : capability->cap[META_ROI_INFO].value;
  2198. if (msm_vidc_get_parent_value(inst, META_ROI_INFO, BITRATE_MODE,
  2199. &rc_type, __func__))
  2200. return -EINVAL;
  2201. if (msm_vidc_get_parent_value(inst, META_ROI_INFO, PIX_FMTS,
  2202. &pix_fmt, __func__))
  2203. return -EINVAL;
  2204. if (rc_type != HFI_RC_VBR_CFR || !is_8bit_colorformat(pix_fmt))
  2205. adjusted_value = 0;
  2206. msm_vidc_update_cap_value(inst, META_ROI_INFO,
  2207. adjusted_value, __func__);
  2208. return 0;
  2209. }
  2210. int msm_vidc_prepare_dependency_list(struct msm_vidc_inst *inst)
  2211. {
  2212. struct list_head root_list, opt_list;
  2213. struct msm_vidc_inst_capability *capability;
  2214. struct msm_vidc_inst_cap *cap, *rcap;
  2215. struct msm_vidc_inst_cap_entry *entry = NULL, *temp = NULL;
  2216. bool root_visited[INST_CAP_MAX];
  2217. bool opt_visited[INST_CAP_MAX];
  2218. int tmp_count_total, tmp_count, num_nodes = 0;
  2219. int i, rc = 0;
  2220. if (!inst || !inst->capabilities) {
  2221. d_vpr_e("%s: invalid params\n", __func__);
  2222. return -EINVAL;
  2223. }
  2224. capability = inst->capabilities;
  2225. if (!list_empty(&inst->caps_list)) {
  2226. i_vpr_h(inst, "%s: dependency list already prepared\n", __func__);
  2227. return 0;
  2228. }
  2229. /* init local list and lookup table entries */
  2230. INIT_LIST_HEAD(&root_list);
  2231. INIT_LIST_HEAD(&opt_list);
  2232. memset(&root_visited, 0, sizeof(root_visited));
  2233. memset(&opt_visited, 0, sizeof(opt_visited));
  2234. /* populate root nodes first */
  2235. for (i = 1; i < INST_CAP_MAX; i++) {
  2236. rcap = &capability->cap[i];
  2237. if (!is_valid_cap(rcap))
  2238. continue;
  2239. /* sanitize cap value */
  2240. if (i != rcap->cap_id) {
  2241. i_vpr_e(inst, "%s: cap id mismatch. expected %s, actual %s\n",
  2242. __func__, cap_name(i), cap_name(rcap->cap_id));
  2243. rc = -EINVAL;
  2244. goto error;
  2245. }
  2246. /* add all root nodes */
  2247. if (is_root(rcap)) {
  2248. rc = add_node(&root_list, rcap, root_visited);
  2249. if (rc)
  2250. goto error;
  2251. }
  2252. }
  2253. /* add all dependent parents */
  2254. list_for_each_entry_safe(entry, temp, &root_list, list) {
  2255. rcap = &capability->cap[entry->cap_id];
  2256. /* skip leaf node */
  2257. if (!has_childrens(rcap))
  2258. continue;
  2259. for (i = 0; i < MAX_CAP_CHILDREN; i++) {
  2260. if (!rcap->children[i])
  2261. break;
  2262. if (!is_valid_cap_id(rcap->children[i]))
  2263. continue;
  2264. cap = &capability->cap[rcap->children[i]];
  2265. if (!is_valid_cap(cap))
  2266. continue;
  2267. /**
  2268. * if child node is already part of root or optional list
  2269. * then no need to add it again.
  2270. */
  2271. if (root_visited[cap->cap_id] || opt_visited[cap->cap_id])
  2272. continue;
  2273. /**
  2274. * if child node's all parents are already present in root list
  2275. * then add it to root list else add it to optional list.
  2276. */
  2277. if (is_all_parents_visited(cap, root_visited)) {
  2278. rc = add_node(&root_list, cap, root_visited);
  2279. if (rc)
  2280. goto error;
  2281. } else {
  2282. rc = add_node(&opt_list, cap, opt_visited);
  2283. if (rc)
  2284. goto error;
  2285. }
  2286. }
  2287. }
  2288. /* find total optional list entries */
  2289. list_for_each_entry(entry, &opt_list, list)
  2290. num_nodes++;
  2291. /* used for loop detection */
  2292. tmp_count_total = num_nodes;
  2293. tmp_count = num_nodes;
  2294. /* sort final outstanding nodes */
  2295. list_for_each_entry_safe(entry, temp, &opt_list, list) {
  2296. /* initially remove entry from opt list */
  2297. list_del_init(&entry->list);
  2298. tmp_count--;
  2299. cap = &capability->cap[entry->cap_id];
  2300. /**
  2301. * if all parents are visited then add this entry to
  2302. * root list else add it to the end of optional list.
  2303. */
  2304. if (is_all_parents_visited(cap, root_visited)) {
  2305. list_add_tail(&entry->list, &root_list);
  2306. root_visited[entry->cap_id] = true;
  2307. tmp_count_total--;
  2308. } else {
  2309. list_add_tail(&entry->list, &opt_list);
  2310. }
  2311. /* detect loop */
  2312. if (!tmp_count) {
  2313. if (num_nodes == tmp_count_total) {
  2314. i_vpr_e(inst, "%s: loop detected in subgraph %d\n",
  2315. __func__, num_nodes);
  2316. rc = -EINVAL;
  2317. goto error;
  2318. }
  2319. num_nodes = tmp_count_total;
  2320. tmp_count = tmp_count_total;
  2321. }
  2322. }
  2323. /* expecting opt_list to be empty */
  2324. if (!list_empty(&opt_list)) {
  2325. i_vpr_e(inst, "%s: opt_list is not empty\n", __func__);
  2326. rc = -EINVAL;
  2327. goto error;
  2328. }
  2329. /* move elements to &inst->caps_list from local */
  2330. list_replace_init(&root_list, &inst->caps_list);
  2331. return 0;
  2332. error:
  2333. list_for_each_entry_safe(entry, temp, &opt_list, list) {
  2334. i_vpr_e(inst, "%s: opt_list: %s\n", __func__, cap_name(entry->cap_id));
  2335. list_del_init(&entry->list);
  2336. kfree(entry);
  2337. }
  2338. list_for_each_entry_safe(entry, temp, &root_list, list) {
  2339. i_vpr_e(inst, "%s: root_list: %s\n", __func__, cap_name(entry->cap_id));
  2340. list_del_init(&entry->list);
  2341. kfree(entry);
  2342. }
  2343. return rc;
  2344. }
  2345. /*
  2346. * Loop over instance capabilities from caps_list
  2347. * and call adjust and set function
  2348. */
  2349. int msm_vidc_adjust_set_v4l2_properties(struct msm_vidc_inst *inst)
  2350. {
  2351. struct msm_vidc_inst_cap_entry *entry = NULL, *temp = NULL;
  2352. int rc = 0;
  2353. if (!inst || !inst->capabilities) {
  2354. d_vpr_e("%s: invalid params\n", __func__);
  2355. return -EINVAL;
  2356. }
  2357. i_vpr_h(inst, "%s()\n", __func__);
  2358. /* adjust all possible caps from caps_list */
  2359. list_for_each_entry_safe(entry, temp, &inst->caps_list, list) {
  2360. i_vpr_l(inst, "%s: cap: id %3u, name %s\n", __func__,
  2361. entry->cap_id, cap_name(entry->cap_id));
  2362. rc = msm_vidc_adjust_cap(inst, entry->cap_id, NULL, __func__);
  2363. if (rc)
  2364. return rc;
  2365. }
  2366. /* set all caps from caps_list */
  2367. list_for_each_entry_safe(entry, temp, &inst->caps_list, list) {
  2368. rc = msm_vidc_set_cap(inst, entry->cap_id, __func__);
  2369. if (rc)
  2370. return rc;
  2371. }
  2372. return rc;
  2373. }
  2374. int msm_vidc_set_header_mode(void *instance,
  2375. enum msm_vidc_inst_capability_type cap_id)
  2376. {
  2377. int rc = 0;
  2378. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  2379. int header_mode, prepend_sps_pps, hdr_metadata;
  2380. u32 hfi_value = 0;
  2381. struct msm_vidc_inst_capability *capability;
  2382. if (!inst || !inst->capabilities) {
  2383. d_vpr_e("%s: invalid params\n", __func__);
  2384. return -EINVAL;
  2385. }
  2386. capability = inst->capabilities;
  2387. header_mode = capability->cap[cap_id].value;
  2388. prepend_sps_pps = capability->cap[PREPEND_SPSPPS_TO_IDR].value;
  2389. hdr_metadata = capability->cap[META_SEQ_HDR_NAL].value;
  2390. /* prioritize PREPEND_SPSPPS_TO_IDR mode over other header modes */
  2391. if (prepend_sps_pps)
  2392. hfi_value = HFI_SEQ_HEADER_PREFIX_WITH_SYNC_FRAME;
  2393. else if (header_mode == V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME)
  2394. hfi_value = HFI_SEQ_HEADER_JOINED_WITH_1ST_FRAME;
  2395. else
  2396. hfi_value = HFI_SEQ_HEADER_SEPERATE_FRAME;
  2397. if (hdr_metadata)
  2398. hfi_value |= HFI_SEQ_HEADER_METADATA;
  2399. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32_ENUM,
  2400. &hfi_value, sizeof(u32), __func__);
  2401. if (rc)
  2402. return rc;
  2403. return rc;
  2404. }
  2405. int msm_vidc_set_deblock_mode(void *instance,
  2406. enum msm_vidc_inst_capability_type cap_id)
  2407. {
  2408. int rc = 0;
  2409. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  2410. s32 alpha = 0, beta = 0;
  2411. u32 lf_mode, hfi_value = 0, lf_offset = 6;
  2412. struct msm_vidc_inst_capability *capability;
  2413. if (!inst || !inst->capabilities) {
  2414. d_vpr_e("%s: invalid params\n", __func__);
  2415. return -EINVAL;
  2416. }
  2417. capability = inst->capabilities;
  2418. rc = msm_vidc_v4l2_to_hfi_enum(inst, LF_MODE, &lf_mode);
  2419. if (rc)
  2420. return -EINVAL;
  2421. beta = inst->capabilities->cap[LF_BETA].value + lf_offset;
  2422. alpha = inst->capabilities->cap[LF_ALPHA].value + lf_offset;
  2423. hfi_value = (alpha << 16) | (beta << 8) | lf_mode;
  2424. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_32_PACKED,
  2425. &hfi_value, sizeof(u32), __func__);
  2426. if (rc)
  2427. return rc;
  2428. return rc;
  2429. }
  2430. int msm_vidc_set_constant_quality(void *instance,
  2431. enum msm_vidc_inst_capability_type cap_id)
  2432. {
  2433. int rc = 0;
  2434. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2435. u32 hfi_value = 0;
  2436. s32 rc_type = -1;
  2437. if (!inst || !inst->capabilities) {
  2438. d_vpr_e("%s: invalid params\n", __func__);
  2439. return -EINVAL;
  2440. }
  2441. if (msm_vidc_get_parent_value(inst, cap_id,
  2442. BITRATE_MODE, &rc_type, __func__))
  2443. return -EINVAL;
  2444. if (rc_type != HFI_RC_CQ)
  2445. return 0;
  2446. hfi_value = inst->capabilities->cap[cap_id].value;
  2447. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  2448. &hfi_value, sizeof(u32), __func__);
  2449. if (rc)
  2450. return rc;
  2451. return rc;
  2452. }
  2453. int msm_vidc_set_vbr_related_properties(void *instance,
  2454. enum msm_vidc_inst_capability_type cap_id)
  2455. {
  2456. int rc = 0;
  2457. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2458. u32 hfi_value = 0;
  2459. s32 rc_type = -1;
  2460. if (!inst || !inst->capabilities) {
  2461. d_vpr_e("%s: invalid params\n", __func__);
  2462. return -EINVAL;
  2463. }
  2464. if (msm_vidc_get_parent_value(inst, cap_id,
  2465. BITRATE_MODE, &rc_type, __func__))
  2466. return -EINVAL;
  2467. if (rc_type != HFI_RC_VBR_CFR)
  2468. return 0;
  2469. hfi_value = inst->capabilities->cap[cap_id].value;
  2470. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  2471. &hfi_value, sizeof(u32), __func__);
  2472. if (rc)
  2473. return rc;
  2474. return rc;
  2475. }
  2476. int msm_vidc_set_cbr_related_properties(void *instance,
  2477. enum msm_vidc_inst_capability_type cap_id)
  2478. {
  2479. int rc = 0;
  2480. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2481. u32 hfi_value = 0;
  2482. s32 rc_type = -1;
  2483. if (!inst || !inst->capabilities) {
  2484. d_vpr_e("%s: invalid params\n", __func__);
  2485. return -EINVAL;
  2486. }
  2487. if (msm_vidc_get_parent_value(inst, cap_id,
  2488. BITRATE_MODE, &rc_type, __func__))
  2489. return -EINVAL;
  2490. if (rc_type != HFI_RC_CBR_VFR &&
  2491. rc_type != HFI_RC_CBR_CFR)
  2492. return 0;
  2493. hfi_value = inst->capabilities->cap[cap_id].value;
  2494. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  2495. &hfi_value, sizeof(u32), __func__);
  2496. if (rc)
  2497. return rc;
  2498. return rc;
  2499. }
  2500. int msm_vidc_set_use_and_mark_ltr(void *instance,
  2501. enum msm_vidc_inst_capability_type cap_id)
  2502. {
  2503. int rc = 0;
  2504. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2505. u32 hfi_value = 0;
  2506. if (!inst || !inst->capabilities) {
  2507. d_vpr_e("%s: invalid params\n", __func__);
  2508. return -EINVAL;
  2509. }
  2510. if (!inst->capabilities->cap[LTR_COUNT].value ||
  2511. (inst->capabilities->cap[cap_id].value ==
  2512. INVALID_DEFAULT_MARK_OR_USE_LTR)) {
  2513. i_vpr_h(inst,
  2514. "%s: LTR_COUNT: %d %s: %d, cap %s is not set\n",
  2515. __func__, inst->capabilities->cap[LTR_COUNT].value,
  2516. cap_name(cap_id),
  2517. inst->capabilities->cap[cap_id].value,
  2518. cap_name(cap_id));
  2519. return 0;
  2520. }
  2521. hfi_value = inst->capabilities->cap[cap_id].value;
  2522. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  2523. &hfi_value, sizeof(u32), __func__);
  2524. if (rc)
  2525. return rc;
  2526. return rc;
  2527. }
  2528. int msm_vidc_set_min_qp(void *instance,
  2529. enum msm_vidc_inst_capability_type cap_id)
  2530. {
  2531. int rc = 0;
  2532. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2533. struct msm_vidc_inst_capability *capability;
  2534. s32 i_frame_qp = 0, p_frame_qp = 0, b_frame_qp = 0, min_qp_enable = 0;
  2535. u32 i_qp_enable = 0, p_qp_enable = 0, b_qp_enable = 0;
  2536. u32 client_qp_enable = 0, hfi_value = 0, offset = 0;
  2537. if (!inst || !inst->capabilities) {
  2538. d_vpr_e("%s: invalid params\n", __func__);
  2539. return -EINVAL;
  2540. }
  2541. capability = inst->capabilities;
  2542. if (capability->cap[MIN_FRAME_QP].flags & CAP_FLAG_CLIENT_SET)
  2543. min_qp_enable = 1;
  2544. if (min_qp_enable ||
  2545. (capability->cap[I_FRAME_MIN_QP].flags & CAP_FLAG_CLIENT_SET))
  2546. i_qp_enable = 1;
  2547. if (min_qp_enable ||
  2548. (capability->cap[P_FRAME_MIN_QP].flags & CAP_FLAG_CLIENT_SET))
  2549. p_qp_enable = 1;
  2550. if (min_qp_enable ||
  2551. (capability->cap[B_FRAME_MIN_QP].flags & CAP_FLAG_CLIENT_SET))
  2552. b_qp_enable = 1;
  2553. client_qp_enable = i_qp_enable | p_qp_enable << 1 | b_qp_enable << 2;
  2554. if (!client_qp_enable) {
  2555. i_vpr_h(inst,
  2556. "%s: client did not set min qp, cap %s is not set\n",
  2557. __func__, cap_name(cap_id));
  2558. return 0;
  2559. }
  2560. if (is_10bit_colorformat(capability->cap[PIX_FMTS].value))
  2561. offset = 12;
  2562. /*
  2563. * I_FRAME_MIN_QP, P_FRAME_MIN_QP, B_FRAME_MIN_QP,
  2564. * MIN_FRAME_QP caps have default value as MIN_QP_10BIT values.
  2565. * Hence, if client sets either one among MIN_FRAME_QP
  2566. * and (I_FRAME_MIN_QP or P_FRAME_MIN_QP or B_FRAME_MIN_QP),
  2567. * max of both caps will result into client set value.
  2568. */
  2569. i_frame_qp = max(capability->cap[I_FRAME_MIN_QP].value,
  2570. capability->cap[MIN_FRAME_QP].value) + offset;
  2571. p_frame_qp = max(capability->cap[P_FRAME_MIN_QP].value,
  2572. capability->cap[MIN_FRAME_QP].value) + offset;
  2573. b_frame_qp = max(capability->cap[B_FRAME_MIN_QP].value,
  2574. capability->cap[MIN_FRAME_QP].value) + offset;
  2575. hfi_value = i_frame_qp | p_frame_qp << 8 | b_frame_qp << 16 |
  2576. client_qp_enable << 24;
  2577. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_32_PACKED,
  2578. &hfi_value, sizeof(u32), __func__);
  2579. if (rc)
  2580. return rc;
  2581. return rc;
  2582. }
  2583. int msm_vidc_set_max_qp(void *instance,
  2584. enum msm_vidc_inst_capability_type cap_id)
  2585. {
  2586. int rc = 0;
  2587. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2588. struct msm_vidc_inst_capability *capability;
  2589. s32 i_frame_qp = 0, p_frame_qp = 0, b_frame_qp = 0, max_qp_enable = 0;
  2590. u32 i_qp_enable = 0, p_qp_enable = 0, b_qp_enable = 0;
  2591. u32 client_qp_enable = 0, hfi_value = 0, offset = 0;
  2592. if (!inst || !inst->capabilities) {
  2593. d_vpr_e("%s: invalid params\n", __func__);
  2594. return -EINVAL;
  2595. }
  2596. capability = inst->capabilities;
  2597. if (capability->cap[MAX_FRAME_QP].flags & CAP_FLAG_CLIENT_SET)
  2598. max_qp_enable = 1;
  2599. if (max_qp_enable ||
  2600. (capability->cap[I_FRAME_MAX_QP].flags & CAP_FLAG_CLIENT_SET))
  2601. i_qp_enable = 1;
  2602. if (max_qp_enable ||
  2603. (capability->cap[P_FRAME_MAX_QP].flags & CAP_FLAG_CLIENT_SET))
  2604. p_qp_enable = 1;
  2605. if (max_qp_enable ||
  2606. (capability->cap[B_FRAME_MAX_QP].flags & CAP_FLAG_CLIENT_SET))
  2607. b_qp_enable = 1;
  2608. client_qp_enable = i_qp_enable | p_qp_enable << 1 | b_qp_enable << 2;
  2609. if (!client_qp_enable) {
  2610. i_vpr_h(inst,
  2611. "%s: client did not set max qp, cap %s is not set\n",
  2612. __func__, cap_name(cap_id));
  2613. return 0;
  2614. }
  2615. if (is_10bit_colorformat(capability->cap[PIX_FMTS].value))
  2616. offset = 12;
  2617. /*
  2618. * I_FRAME_MAX_QP, P_FRAME_MAX_QP, B_FRAME_MAX_QP,
  2619. * MAX_FRAME_QP caps have default value as MAX_QP values.
  2620. * Hence, if client sets either one among MAX_FRAME_QP
  2621. * and (I_FRAME_MAX_QP or P_FRAME_MAX_QP or B_FRAME_MAX_QP),
  2622. * min of both caps will result into client set value.
  2623. */
  2624. i_frame_qp = min(capability->cap[I_FRAME_MAX_QP].value,
  2625. capability->cap[MAX_FRAME_QP].value) + offset;
  2626. p_frame_qp = min(capability->cap[P_FRAME_MAX_QP].value,
  2627. capability->cap[MAX_FRAME_QP].value) + offset;
  2628. b_frame_qp = min(capability->cap[B_FRAME_MAX_QP].value,
  2629. capability->cap[MAX_FRAME_QP].value) + offset;
  2630. hfi_value = i_frame_qp | p_frame_qp << 8 | b_frame_qp << 16 |
  2631. client_qp_enable << 24;
  2632. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_32_PACKED,
  2633. &hfi_value, sizeof(u32), __func__);
  2634. if (rc)
  2635. return rc;
  2636. return rc;
  2637. }
  2638. int msm_vidc_set_frame_qp(void *instance,
  2639. enum msm_vidc_inst_capability_type cap_id)
  2640. {
  2641. int rc = 0;
  2642. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2643. struct msm_vidc_inst_capability *capab;
  2644. s32 i_frame_qp = 0, p_frame_qp = 0, b_frame_qp = 0;
  2645. u32 i_qp_enable = 0, p_qp_enable = 0, b_qp_enable = 0;
  2646. u32 client_qp_enable = 0, hfi_value = 0, offset = 0;
  2647. s32 rc_type = -1;
  2648. if (!inst || !inst->capabilities) {
  2649. d_vpr_e("%s: invalid params\n", __func__);
  2650. return -EINVAL;
  2651. }
  2652. capab = inst->capabilities;
  2653. if (msm_vidc_get_parent_value(inst, cap_id,
  2654. BITRATE_MODE, &rc_type, __func__))
  2655. return -EINVAL;
  2656. if (inst->bufq[OUTPUT_PORT].vb2q->streaming) {
  2657. if (rc_type != HFI_RC_OFF) {
  2658. i_vpr_h(inst,
  2659. "%s: dynamic qp not allowed for rc type %d\n",
  2660. __func__, rc_type);
  2661. return 0;
  2662. }
  2663. }
  2664. if (rc_type == HFI_RC_OFF) {
  2665. /* Mandatorily set for rc off case */
  2666. i_qp_enable = p_qp_enable = b_qp_enable = 1;
  2667. } else {
  2668. /* Set only if client has set for NON rc off case */
  2669. if (capab->cap[I_FRAME_QP].flags & CAP_FLAG_CLIENT_SET)
  2670. i_qp_enable = 1;
  2671. if (capab->cap[P_FRAME_QP].flags & CAP_FLAG_CLIENT_SET)
  2672. p_qp_enable = 1;
  2673. if (capab->cap[B_FRAME_QP].flags & CAP_FLAG_CLIENT_SET)
  2674. b_qp_enable = 1;
  2675. }
  2676. client_qp_enable = i_qp_enable | p_qp_enable << 1 | b_qp_enable << 2;
  2677. if (!client_qp_enable) {
  2678. i_vpr_h(inst,
  2679. "%s: client did not set frame qp, cap %s is not set\n",
  2680. __func__, cap_name(cap_id));
  2681. return 0;
  2682. }
  2683. if (is_10bit_colorformat(capab->cap[PIX_FMTS].value))
  2684. offset = 12;
  2685. i_frame_qp = capab->cap[I_FRAME_QP].value + offset;
  2686. p_frame_qp = capab->cap[P_FRAME_QP].value + offset;
  2687. b_frame_qp = capab->cap[B_FRAME_QP].value + offset;
  2688. hfi_value = i_frame_qp | p_frame_qp << 8 | b_frame_qp << 16 |
  2689. client_qp_enable << 24;
  2690. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_32_PACKED,
  2691. &hfi_value, sizeof(u32), __func__);
  2692. if (rc)
  2693. return rc;
  2694. return rc;
  2695. }
  2696. int msm_vidc_set_req_sync_frame(void *instance,
  2697. enum msm_vidc_inst_capability_type cap_id)
  2698. {
  2699. int rc = 0;
  2700. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2701. s32 prepend_spspps;
  2702. u32 hfi_value = 0;
  2703. if (!inst || !inst->capabilities) {
  2704. d_vpr_e("%s: invalid params\n", __func__);
  2705. return -EINVAL;
  2706. }
  2707. prepend_spspps = inst->capabilities->cap[PREPEND_SPSPPS_TO_IDR].value;
  2708. if (prepend_spspps)
  2709. hfi_value = HFI_SYNC_FRAME_REQUEST_WITH_PREFIX_SEQ_HDR;
  2710. else
  2711. hfi_value = HFI_SYNC_FRAME_REQUEST_WITHOUT_SEQ_HDR;
  2712. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32_ENUM,
  2713. &hfi_value, sizeof(u32), __func__);
  2714. if (rc)
  2715. return rc;
  2716. return rc;
  2717. }
  2718. int msm_vidc_set_chroma_qp_index_offset(void *instance,
  2719. enum msm_vidc_inst_capability_type cap_id)
  2720. {
  2721. int rc = 0;
  2722. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2723. u32 hfi_value = 0, chroma_qp_offset_mode = 0, chroma_qp = 0;
  2724. u32 offset = 12;
  2725. if (!inst || !inst->capabilities) {
  2726. d_vpr_e("%s: invalid params\n", __func__);
  2727. return -EINVAL;
  2728. }
  2729. if (inst->capabilities->cap[cap_id].flags & CAP_FLAG_CLIENT_SET)
  2730. chroma_qp_offset_mode = HFI_FIXED_CHROMAQP_OFFSET;
  2731. else
  2732. chroma_qp_offset_mode = HFI_ADAPTIVE_CHROMAQP_OFFSET;
  2733. chroma_qp = inst->capabilities->cap[cap_id].value + offset;
  2734. hfi_value = chroma_qp_offset_mode | chroma_qp << 8 | chroma_qp << 16 ;
  2735. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_32_PACKED,
  2736. &hfi_value, sizeof(u32), __func__);
  2737. if (rc)
  2738. return rc;
  2739. return rc;
  2740. }
  2741. int msm_vidc_set_slice_count(void* instance,
  2742. enum msm_vidc_inst_capability_type cap_id)
  2743. {
  2744. int rc = 0;
  2745. struct msm_vidc_inst* inst = (struct msm_vidc_inst*)instance;
  2746. s32 slice_mode = -1;
  2747. u32 hfi_value = 0, set_cap_id = 0;
  2748. if (!inst || !inst->capabilities) {
  2749. d_vpr_e("%s: invalid params\n", __func__);
  2750. return -EINVAL;
  2751. }
  2752. slice_mode = inst->capabilities->cap[SLICE_MODE].value;
  2753. if (slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE) {
  2754. i_vpr_h(inst, "%s: slice mode is: %u, ignore setting to fw\n",
  2755. __func__, slice_mode);
  2756. return 0;
  2757. }
  2758. if (slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB) {
  2759. hfi_value = (inst->codec == MSM_VIDC_HEVC) ?
  2760. ((inst->capabilities->cap[SLICE_MAX_MB].value + 3) / 4) :
  2761. inst->capabilities->cap[SLICE_MAX_MB].value;
  2762. set_cap_id = SLICE_MAX_MB;
  2763. } else if (slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES) {
  2764. hfi_value = inst->capabilities->cap[SLICE_MAX_BYTES].value;
  2765. set_cap_id = SLICE_MAX_BYTES;
  2766. }
  2767. rc = msm_vidc_packetize_control(inst, set_cap_id, HFI_PAYLOAD_U32,
  2768. &hfi_value, sizeof(u32), __func__);
  2769. if (rc)
  2770. return rc;
  2771. return rc;
  2772. }
  2773. int msm_vidc_set_nal_length(void* instance,
  2774. enum msm_vidc_inst_capability_type cap_id)
  2775. {
  2776. int rc = 0;
  2777. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2778. u32 hfi_value = HFI_NAL_LENGTH_STARTCODES;
  2779. if (!inst || !inst->capabilities) {
  2780. d_vpr_e("%s: invalid params\n", __func__);
  2781. return -EINVAL;
  2782. }
  2783. if (!inst->capabilities->cap[WITHOUT_STARTCODE].value) {
  2784. hfi_value = HFI_NAL_LENGTH_STARTCODES;
  2785. } else {
  2786. rc = msm_vidc_v4l2_to_hfi_enum(inst, NAL_LENGTH_FIELD, &hfi_value);
  2787. if (rc)
  2788. return -EINVAL;
  2789. }
  2790. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32_ENUM,
  2791. &hfi_value, sizeof(u32), __func__);
  2792. if (rc)
  2793. return rc;
  2794. return rc;
  2795. }
  2796. int msm_vidc_set_layer_count_and_type(void *instance,
  2797. enum msm_vidc_inst_capability_type cap_id)
  2798. {
  2799. int rc = 0;
  2800. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2801. u32 hfi_layer_count, hfi_layer_type = 0;
  2802. if (!inst || !inst->capabilities) {
  2803. d_vpr_e("%s: invalid params\n", __func__);
  2804. return -EINVAL;
  2805. }
  2806. if (!inst->bufq[OUTPUT_PORT].vb2q->streaming) {
  2807. /* set layer type */
  2808. hfi_layer_type = inst->hfi_layer_type;
  2809. cap_id = LAYER_TYPE;
  2810. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32_ENUM,
  2811. &hfi_layer_type, sizeof(u32), __func__);
  2812. if (rc)
  2813. goto exit;
  2814. } else {
  2815. if (inst->hfi_layer_type == HFI_HIER_B) {
  2816. i_vpr_l(inst,
  2817. "%s: HB dyn layers change is not supported\n",
  2818. __func__);
  2819. return 0;
  2820. }
  2821. }
  2822. /* set layer count */
  2823. cap_id = ENH_LAYER_COUNT;
  2824. /* hfi baselayer starts from 1 */
  2825. hfi_layer_count = inst->capabilities->cap[ENH_LAYER_COUNT].value + 1;
  2826. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  2827. &hfi_layer_count, sizeof(u32), __func__);
  2828. if (rc)
  2829. goto exit;
  2830. exit:
  2831. return rc;
  2832. }
  2833. int msm_vidc_set_gop_size(void *instance,
  2834. enum msm_vidc_inst_capability_type cap_id)
  2835. {
  2836. int rc = 0;
  2837. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2838. u32 hfi_value;
  2839. if (!inst || !inst->capabilities) {
  2840. d_vpr_e("%s: invalid params\n", __func__);
  2841. return -EINVAL;
  2842. }
  2843. if (inst->bufq[OUTPUT_PORT].vb2q->streaming) {
  2844. if (inst->hfi_layer_type == HFI_HIER_B) {
  2845. i_vpr_l(inst,
  2846. "%s: HB dyn GOP setting is not supported\n",
  2847. __func__);
  2848. return 0;
  2849. }
  2850. }
  2851. hfi_value = inst->capabilities->cap[GOP_SIZE].value;
  2852. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  2853. &hfi_value, sizeof(u32), __func__);
  2854. if (rc)
  2855. return rc;
  2856. return rc;
  2857. }
  2858. int msm_vidc_set_bitrate(void *instance,
  2859. enum msm_vidc_inst_capability_type cap_id)
  2860. {
  2861. int rc = 0, i;
  2862. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2863. u32 hfi_value = 0;
  2864. s32 rc_type = -1, enh_layer_count = -1;
  2865. u32 layer_br_caps[6] = {L0_BR, L1_BR, L2_BR, L3_BR, L4_BR, L5_BR};
  2866. if (!inst || !inst->capabilities) {
  2867. d_vpr_e("%s: invalid params\n", __func__);
  2868. return -EINVAL;
  2869. }
  2870. /* set Total Bitrate */
  2871. if (inst->capabilities->cap[BIT_RATE].flags & CAP_FLAG_CLIENT_SET)
  2872. goto set_total_bitrate;
  2873. /*
  2874. * During runtime, if BIT_RATE cap CLIENT_SET flag is not set,
  2875. * then this function will be called due to change in ENH_LAYER_COUNT.
  2876. * In this case, client did not change bitrate, hence, no need to set
  2877. * to fw.
  2878. */
  2879. if (inst->bufq[OUTPUT_PORT].vb2q->streaming)
  2880. return 0;
  2881. if (msm_vidc_get_parent_value(inst, BIT_RATE,
  2882. BITRATE_MODE, &rc_type, __func__))
  2883. return -EINVAL;
  2884. if (rc_type != HFI_RC_CBR_CFR && rc_type != HFI_RC_CBR_VFR) {
  2885. i_vpr_h(inst, "%s: set total bitrate for non CBR rc type\n",
  2886. __func__);
  2887. goto set_total_bitrate;
  2888. }
  2889. if (msm_vidc_get_parent_value(inst, BIT_RATE,
  2890. ENH_LAYER_COUNT, &enh_layer_count, __func__))
  2891. return -EINVAL;
  2892. /*
  2893. * ENH_LAYER_COUNT cap max is positive only if
  2894. * layer encoding is enabled during streamon.
  2895. */
  2896. if (inst->capabilities->cap[ENH_LAYER_COUNT].max) {
  2897. if (!msm_vidc_check_all_layer_bitrate_set(inst))
  2898. goto set_total_bitrate;
  2899. /* set Layer Bitrate */
  2900. for (i = 0; i <= enh_layer_count; i++) {
  2901. if (i >= ARRAY_SIZE(layer_br_caps))
  2902. break;
  2903. cap_id = layer_br_caps[i];
  2904. hfi_value = inst->capabilities->cap[cap_id].value;
  2905. rc = msm_vidc_packetize_control(inst, cap_id,
  2906. HFI_PAYLOAD_U32, &hfi_value,
  2907. sizeof(u32), __func__);
  2908. if (rc)
  2909. return rc;
  2910. }
  2911. goto exit;
  2912. }
  2913. set_total_bitrate:
  2914. hfi_value = inst->capabilities->cap[BIT_RATE].value;
  2915. rc = msm_vidc_packetize_control(inst, BIT_RATE, HFI_PAYLOAD_U32,
  2916. &hfi_value, sizeof(u32), __func__);
  2917. if (rc)
  2918. return rc;
  2919. exit:
  2920. return rc;
  2921. }
  2922. int msm_vidc_set_dynamic_layer_bitrate(void *instance,
  2923. enum msm_vidc_inst_capability_type cap_id)
  2924. {
  2925. int rc = 0;
  2926. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2927. u32 hfi_value = 0;
  2928. s32 rc_type = -1;
  2929. if (!inst || !inst->capabilities) {
  2930. d_vpr_e("%s: invalid params\n", __func__);
  2931. return -EINVAL;
  2932. }
  2933. if (!inst->bufq[OUTPUT_PORT].vb2q->streaming)
  2934. return 0;
  2935. /* set Total Bitrate */
  2936. if (inst->capabilities->cap[BIT_RATE].flags & CAP_FLAG_CLIENT_SET) {
  2937. i_vpr_h(inst,
  2938. "%s: Total bitrate is set, ignore layer bitrate\n",
  2939. __func__);
  2940. return 0;
  2941. }
  2942. /*
  2943. * ENH_LAYER_COUNT cap max is positive only if
  2944. * layer encoding is enabled during streamon.
  2945. */
  2946. if (!inst->capabilities->cap[ENH_LAYER_COUNT].max ||
  2947. !msm_vidc_check_all_layer_bitrate_set(inst)) {
  2948. i_vpr_h(inst,
  2949. "%s: invalid layer bitrate, ignore setting to fw\n",
  2950. __func__);
  2951. return 0;
  2952. }
  2953. if (inst->hfi_rc_type == HFI_RC_CBR_CFR ||
  2954. rc_type == HFI_RC_CBR_VFR) {
  2955. /* set layer bitrate for the client set layer */
  2956. hfi_value = inst->capabilities->cap[cap_id].value;
  2957. rc = msm_vidc_packetize_control(inst, cap_id,
  2958. HFI_PAYLOAD_U32, &hfi_value,
  2959. sizeof(u32), __func__);
  2960. if (rc)
  2961. return rc;
  2962. } else {
  2963. /*
  2964. * All layer bitartes set for unsupported rc type.
  2965. * Hence accept layer bitrates, but set total bitrate prop
  2966. * with cumulative bitrate.
  2967. */
  2968. hfi_value = inst->capabilities->cap[BIT_RATE].value;
  2969. rc = msm_vidc_packetize_control(inst, BIT_RATE, HFI_PAYLOAD_U32,
  2970. &hfi_value, sizeof(u32), __func__);
  2971. if (rc)
  2972. return rc;
  2973. }
  2974. return rc;
  2975. }
  2976. int msm_vidc_set_session_priority(void *instance,
  2977. enum msm_vidc_inst_capability_type cap_id)
  2978. {
  2979. int rc = 0;
  2980. u32 hfi_value = 0;
  2981. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2982. if (!inst || !inst->capabilities) {
  2983. d_vpr_e("%s: invalid params\n", __func__);
  2984. return -EINVAL;
  2985. }
  2986. hfi_value = (inst->capabilities->cap[cap_id].value * 2) + inst->priority_level;
  2987. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  2988. &hfi_value, sizeof(u32), __func__);
  2989. if (rc)
  2990. return rc;
  2991. inst->firmware_priority = hfi_value;
  2992. return rc;
  2993. }
  2994. int msm_vidc_set_flip(void *instance,
  2995. enum msm_vidc_inst_capability_type cap_id)
  2996. {
  2997. int rc = 0;
  2998. u32 hflip, vflip, hfi_value = HFI_DISABLE_FLIP;
  2999. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3000. if (!inst || !inst->capabilities) {
  3001. d_vpr_e("%s: invalid params\n", __func__);
  3002. return -EINVAL;
  3003. }
  3004. hflip = inst->capabilities->cap[HFLIP].value;
  3005. vflip = inst->capabilities->cap[VFLIP].value;
  3006. if (hflip)
  3007. hfi_value |= HFI_HORIZONTAL_FLIP;
  3008. if (vflip)
  3009. hfi_value |= HFI_VERTICAL_FLIP;
  3010. if (inst->bufq[OUTPUT_PORT].vb2q->streaming) {
  3011. if (hfi_value != HFI_DISABLE_FLIP) {
  3012. rc = msm_vidc_set_req_sync_frame(inst,
  3013. REQUEST_I_FRAME);
  3014. if (rc)
  3015. return rc;
  3016. }
  3017. }
  3018. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32_ENUM,
  3019. &hfi_value, sizeof(u32), __func__);
  3020. if (rc)
  3021. return rc;
  3022. return rc;
  3023. }
  3024. int msm_vidc_set_rotation(void *instance,
  3025. enum msm_vidc_inst_capability_type cap_id)
  3026. {
  3027. int rc = 0;
  3028. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3029. u32 hfi_value;
  3030. if (!inst || !inst->capabilities) {
  3031. d_vpr_e("%s: invalid params\n", __func__);
  3032. return -EINVAL;
  3033. }
  3034. rc = msm_vidc_v4l2_to_hfi_enum(inst, cap_id, &hfi_value);
  3035. if (rc)
  3036. return -EINVAL;
  3037. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  3038. &hfi_value, sizeof(u32), __func__);
  3039. if (rc)
  3040. return rc;
  3041. return rc;
  3042. }
  3043. int msm_vidc_set_blur_resolution(void *instance,
  3044. enum msm_vidc_inst_capability_type cap_id)
  3045. {
  3046. int rc = 0;
  3047. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3048. s32 blur_type = -1;
  3049. u32 hfi_value, blur_width, blur_height;
  3050. if (!inst || !inst->capabilities) {
  3051. d_vpr_e("%s: invalid params\n", __func__);
  3052. return -EINVAL;
  3053. }
  3054. if (msm_vidc_get_parent_value(inst, cap_id,
  3055. BLUR_TYPES, &blur_type, __func__))
  3056. return -EINVAL;
  3057. if (blur_type != VIDC_BLUR_EXTERNAL)
  3058. return 0;
  3059. hfi_value = inst->capabilities->cap[cap_id].value;
  3060. blur_width = (hfi_value & 0xFFFF0000) >> 16;
  3061. blur_height = hfi_value & 0xFFFF;
  3062. if (blur_width > inst->crop.width ||
  3063. blur_height > inst->crop.height) {
  3064. i_vpr_e(inst,
  3065. "%s: blur wxh: %dx%d exceeds crop wxh: %dx%d\n",
  3066. __func__, blur_width, blur_height,
  3067. inst->crop.width, inst->crop.height);
  3068. hfi_value = 0;
  3069. }
  3070. if (blur_width == inst->crop.width &&
  3071. blur_height == inst->crop.height) {
  3072. i_vpr_e(inst,
  3073. "%s: blur wxh: %dx%d is equal to crop wxh: %dx%d\n",
  3074. __func__, blur_width, blur_height,
  3075. inst->crop.width, inst->crop.height);
  3076. hfi_value = 0;
  3077. }
  3078. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_32_PACKED,
  3079. &hfi_value, sizeof(u32), __func__);
  3080. if (rc)
  3081. return rc;
  3082. return rc;
  3083. }
  3084. static msm_venc_set_csc_coeff(struct msm_vidc_inst *inst,
  3085. const char *prop_name, u32 hfi_id, void *payload,
  3086. u32 payload_size, u32 row_count, u32 column_count)
  3087. {
  3088. int rc = 0;
  3089. i_vpr_h(inst,
  3090. "set cap: name: %24s, hard coded %dx%d matrix array\n",
  3091. prop_name, row_count, column_count);
  3092. rc = venus_hfi_session_property(inst,
  3093. hfi_id,
  3094. HFI_HOST_FLAGS_NONE,
  3095. HFI_PORT_BITSTREAM,
  3096. HFI_PAYLOAD_S32_ARRAY,
  3097. payload,
  3098. payload_size);
  3099. if (rc) {
  3100. i_vpr_e(inst,
  3101. "%s: failed to set %s to fw\n",
  3102. __func__, prop_name);
  3103. }
  3104. return rc;
  3105. }
  3106. int msm_vidc_set_csc_custom_matrix(void *instance,
  3107. enum msm_vidc_inst_capability_type cap_id)
  3108. {
  3109. int rc = 0;
  3110. int i;
  3111. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3112. struct msm_vidc_core *core;
  3113. struct msm_vidc_csc_coeff *csc_coeff;
  3114. s32 matrix_payload[MAX_MATRIX_COEFFS + 2];
  3115. s32 csc_bias_payload[MAX_BIAS_COEFFS + 2];
  3116. s32 csc_limit_payload[MAX_LIMIT_COEFFS + 2];
  3117. if (!inst || !inst->capabilities || !inst->core) {
  3118. d_vpr_e("%s: invalid params\n", __func__);
  3119. return -EINVAL;
  3120. }
  3121. core = inst->core;
  3122. if (!core->platform) {
  3123. d_vpr_e("%s: invalid core platform\n", __func__);
  3124. return -EINVAL;
  3125. }
  3126. csc_coeff = &core->platform->data.csc_data;
  3127. if (!inst->capabilities->cap[cap_id].value ||
  3128. !inst->capabilities->cap[CSC].value) {
  3129. i_vpr_h(inst,
  3130. "%s: ignored as custom martix %u, csc %u\n",
  3131. __func__, inst->capabilities->cap[cap_id].value,
  3132. inst->capabilities->cap[CSC].value);
  3133. return 0;
  3134. }
  3135. /*
  3136. * first 2 u32's of payload in each case are for
  3137. * row and column count, next remaining u32's are
  3138. * for the actual payload values.
  3139. */
  3140. /* set custom matrix */
  3141. matrix_payload[0] = 3;
  3142. matrix_payload[1] = 3;
  3143. for(i = 0; i < MAX_MATRIX_COEFFS; i++) {
  3144. if ((i + 2) >= ARRAY_SIZE(matrix_payload))
  3145. break;
  3146. matrix_payload[i + 2] =
  3147. csc_coeff->vpe_csc_custom_matrix_coeff[i];
  3148. }
  3149. rc = msm_venc_set_csc_coeff(inst, "CSC_CUSTOM_MATRIX",
  3150. HFI_PROP_CSC_MATRIX, &matrix_payload[0],
  3151. ARRAY_SIZE(matrix_payload) * sizeof(s32),
  3152. matrix_payload[0], matrix_payload[1]);
  3153. if (rc)
  3154. return rc;
  3155. /* set csc bias */
  3156. csc_bias_payload[0] = 1;
  3157. csc_bias_payload[1] = 3;
  3158. for(i = 0; i < MAX_BIAS_COEFFS; i++) {
  3159. if ((i + 2) >= ARRAY_SIZE(csc_bias_payload))
  3160. break;
  3161. csc_bias_payload[i + 2] =
  3162. csc_coeff->vpe_csc_custom_bias_coeff[i];
  3163. }
  3164. rc = msm_venc_set_csc_coeff(inst, "CSC_BIAS",
  3165. HFI_PROP_CSC_BIAS, &csc_bias_payload[0],
  3166. ARRAY_SIZE(csc_bias_payload) * sizeof(s32),
  3167. csc_bias_payload[0], csc_bias_payload[1]);
  3168. if (rc)
  3169. return rc;
  3170. /* set csc limit */
  3171. csc_limit_payload[0] = 1;
  3172. csc_limit_payload[1] = 6;
  3173. for(i = 0; i < MAX_LIMIT_COEFFS; i++) {
  3174. if ((i + 2) >= ARRAY_SIZE(csc_limit_payload))
  3175. break;
  3176. csc_limit_payload[i + 2] =
  3177. csc_coeff->vpe_csc_custom_limit_coeff[i];
  3178. }
  3179. rc = msm_venc_set_csc_coeff(inst, "CSC_LIMIT",
  3180. HFI_PROP_CSC_LIMIT, &csc_limit_payload[0],
  3181. ARRAY_SIZE(csc_limit_payload) * sizeof(s32),
  3182. csc_limit_payload[0], csc_limit_payload[1]);
  3183. if (rc)
  3184. return rc;
  3185. return rc;
  3186. }
  3187. int msm_vidc_set_level(void *instance,
  3188. enum msm_vidc_inst_capability_type cap_id)
  3189. {
  3190. int rc = 0;
  3191. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3192. u32 hfi_value = 0;
  3193. if (!inst || !inst->capabilities) {
  3194. d_vpr_e("%s: invalid params\n", __func__);
  3195. return -EINVAL;
  3196. }
  3197. hfi_value = inst->capabilities->cap[cap_id].value;
  3198. if (!(inst->capabilities->cap[cap_id].flags & CAP_FLAG_CLIENT_SET))
  3199. hfi_value = HFI_LEVEL_NONE;
  3200. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32_ENUM,
  3201. &hfi_value, sizeof(u32), __func__);
  3202. if (rc)
  3203. return rc;
  3204. return rc;
  3205. }
  3206. int msm_vidc_set_q16(void *instance,
  3207. enum msm_vidc_inst_capability_type cap_id)
  3208. {
  3209. int rc = 0;
  3210. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3211. u32 hfi_value = 0;
  3212. if (!inst || !inst->capabilities) {
  3213. d_vpr_e("%s: invalid params\n", __func__);
  3214. return -EINVAL;
  3215. }
  3216. hfi_value = inst->capabilities->cap[cap_id].value;
  3217. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_Q16,
  3218. &hfi_value, sizeof(u32), __func__);
  3219. if (rc)
  3220. return rc;
  3221. return rc;
  3222. }
  3223. int msm_vidc_set_u32(void *instance,
  3224. enum msm_vidc_inst_capability_type cap_id)
  3225. {
  3226. int rc = 0;
  3227. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3228. u32 hfi_value;
  3229. if (!inst || !inst->capabilities) {
  3230. d_vpr_e("%s: invalid params\n", __func__);
  3231. return -EINVAL;
  3232. }
  3233. if (inst->capabilities->cap[cap_id].flags & CAP_FLAG_MENU) {
  3234. rc = msm_vidc_v4l2_menu_to_hfi(inst, cap_id, &hfi_value);
  3235. if (rc)
  3236. return -EINVAL;
  3237. } else {
  3238. hfi_value = inst->capabilities->cap[cap_id].value;
  3239. }
  3240. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  3241. &hfi_value, sizeof(u32), __func__);
  3242. if (rc)
  3243. return rc;
  3244. return rc;
  3245. }
  3246. int msm_vidc_set_u32_enum(void *instance,
  3247. enum msm_vidc_inst_capability_type cap_id)
  3248. {
  3249. int rc = 0;
  3250. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3251. u32 hfi_value;
  3252. if (!inst || !inst->capabilities) {
  3253. d_vpr_e("%s: invalid params\n", __func__);
  3254. return -EINVAL;
  3255. }
  3256. rc = msm_vidc_v4l2_to_hfi_enum(inst, cap_id, &hfi_value);
  3257. if (rc)
  3258. return -EINVAL;
  3259. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32_ENUM,
  3260. &hfi_value, sizeof(u32), __func__);
  3261. if (rc)
  3262. return rc;
  3263. return rc;
  3264. }
  3265. int msm_vidc_set_s32(void *instance,
  3266. enum msm_vidc_inst_capability_type cap_id)
  3267. {
  3268. int rc = 0;
  3269. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3270. s32 hfi_value = 0;
  3271. if (!inst || !inst->capabilities) {
  3272. d_vpr_e("%s: invalid params\n", __func__);
  3273. return -EINVAL;
  3274. }
  3275. hfi_value = inst->capabilities->cap[cap_id].value;
  3276. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_S32,
  3277. &hfi_value, sizeof(s32), __func__);
  3278. if (rc)
  3279. return rc;
  3280. return rc;
  3281. }
  3282. int msm_vidc_v4l2_menu_to_hfi(struct msm_vidc_inst *inst,
  3283. enum msm_vidc_inst_capability_type cap_id, u32 *value)
  3284. {
  3285. struct msm_vidc_inst_capability *capability = inst->capabilities;
  3286. switch (capability->cap[cap_id].v4l2_id) {
  3287. case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
  3288. switch (capability->cap[cap_id].value) {
  3289. case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC:
  3290. *value = 1;
  3291. break;
  3292. case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC:
  3293. *value = 0;
  3294. break;
  3295. default:
  3296. *value = 1;
  3297. goto set_default;
  3298. }
  3299. return 0;
  3300. default:
  3301. i_vpr_e(inst,
  3302. "%s: mapping not specified for ctrl_id: %#x\n",
  3303. __func__, capability->cap[cap_id].v4l2_id);
  3304. return -EINVAL;
  3305. }
  3306. set_default:
  3307. i_vpr_e(inst,
  3308. "%s: invalid value %d for ctrl id: %#x. Set default: %u\n",
  3309. __func__, capability->cap[cap_id].value,
  3310. capability->cap[cap_id].v4l2_id, *value);
  3311. return 0;
  3312. }
  3313. int msm_vidc_v4l2_to_hfi_enum(struct msm_vidc_inst *inst,
  3314. enum msm_vidc_inst_capability_type cap_id, u32 *value)
  3315. {
  3316. struct msm_vidc_inst_capability *capability = inst->capabilities;
  3317. switch (capability->cap[cap_id].v4l2_id) {
  3318. case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
  3319. *value = inst->hfi_rc_type;
  3320. return 0;
  3321. case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
  3322. case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
  3323. case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
  3324. case V4L2_CID_MPEG_VIDEO_AV1_PROFILE:
  3325. case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
  3326. case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
  3327. case V4L2_CID_MPEG_VIDEO_AV1_LEVEL:
  3328. case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
  3329. case V4L2_CID_MPEG_VIDEO_AV1_TIER:
  3330. case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_TYPES:
  3331. *value = capability->cap[cap_id].value;
  3332. return 0;
  3333. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_TYPE:
  3334. switch (capability->cap[cap_id].value) {
  3335. case V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B:
  3336. *value = HFI_HIER_B;
  3337. break;
  3338. case V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P:
  3339. //TODO (AS): check if this is right mapping
  3340. *value = HFI_HIER_P_SLIDING_WINDOW;
  3341. break;
  3342. default:
  3343. *value = HFI_HIER_P_SLIDING_WINDOW;
  3344. goto set_default;
  3345. }
  3346. return 0;
  3347. case V4L2_CID_ROTATE:
  3348. switch (capability->cap[cap_id].value) {
  3349. case 0:
  3350. *value = HFI_ROTATION_NONE;
  3351. break;
  3352. case 90:
  3353. *value = HFI_ROTATION_90;
  3354. break;
  3355. case 180:
  3356. *value = HFI_ROTATION_180;
  3357. break;
  3358. case 270:
  3359. *value = HFI_ROTATION_270;
  3360. break;
  3361. default:
  3362. *value = HFI_ROTATION_NONE;
  3363. goto set_default;
  3364. }
  3365. return 0;
  3366. case V4L2_CID_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE:
  3367. switch (capability->cap[cap_id].value) {
  3368. case V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_ENABLED:
  3369. *value = HFI_DEBLOCK_ALL_BOUNDARY;
  3370. break;
  3371. case V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED:
  3372. *value = HFI_DEBLOCK_DISABLE;
  3373. break;
  3374. case DB_HEVC_DISABLE_SLICE_BOUNDARY:
  3375. *value = HFI_DEBLOCK_DISABLE_AT_SLICE_BOUNDARY;
  3376. break;
  3377. default:
  3378. *value = HFI_DEBLOCK_ALL_BOUNDARY;
  3379. goto set_default;
  3380. }
  3381. return 0;
  3382. case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
  3383. switch (capability->cap[cap_id].value) {
  3384. case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED:
  3385. *value = HFI_DEBLOCK_ALL_BOUNDARY;
  3386. break;
  3387. case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED:
  3388. *value = HFI_DEBLOCK_DISABLE;
  3389. break;
  3390. case DB_H264_DISABLE_SLICE_BOUNDARY:
  3391. *value = HFI_DEBLOCK_DISABLE_AT_SLICE_BOUNDARY;
  3392. break;
  3393. default:
  3394. *value = HFI_DEBLOCK_ALL_BOUNDARY;
  3395. goto set_default;
  3396. }
  3397. return 0;
  3398. case V4L2_CID_MPEG_VIDEO_HEVC_SIZE_OF_LENGTH_FIELD:
  3399. switch (capability->cap[cap_id].value) {
  3400. case V4L2_MPEG_VIDEO_HEVC_SIZE_4:
  3401. *value = HFI_NAL_LENGTH_SIZE_4;
  3402. break;
  3403. default:
  3404. *value = HFI_NAL_LENGTH_STARTCODES;
  3405. goto set_default;
  3406. }
  3407. return 0;
  3408. default:
  3409. i_vpr_e(inst,
  3410. "%s: mapping not specified for ctrl_id: %#x\n",
  3411. __func__, capability->cap[cap_id].v4l2_id);
  3412. return -EINVAL;
  3413. }
  3414. set_default:
  3415. i_vpr_e(inst,
  3416. "%s: invalid value %d for ctrl id: %#x. Set default: %u\n",
  3417. __func__, capability->cap[cap_id].value,
  3418. capability->cap[cap_id].v4l2_id, *value);
  3419. return 0;
  3420. }
  3421. int msm_vidc_set_stage(void *instance,
  3422. enum msm_vidc_inst_capability_type cap_id)
  3423. {
  3424. int rc = 0;
  3425. u32 stage = 0;
  3426. struct msm_vidc_core *core;
  3427. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3428. if (!inst || !inst->capabilities || !inst->core) {
  3429. d_vpr_e("%s: invalid params\n", __func__);
  3430. return -EINVAL;
  3431. }
  3432. core = inst->core;
  3433. rc = call_session_op(core, decide_work_mode, inst);
  3434. if (rc) {
  3435. i_vpr_e(inst, "%s: decide_work_mode failed\n", __func__);
  3436. return -EINVAL;
  3437. }
  3438. stage = inst->capabilities->cap[STAGE].value;
  3439. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  3440. &stage, sizeof(u32), __func__);
  3441. if (rc)
  3442. return rc;
  3443. return rc;
  3444. }
  3445. int msm_vidc_set_pipe(void *instance,
  3446. enum msm_vidc_inst_capability_type cap_id)
  3447. {
  3448. int rc = 0;
  3449. u32 pipe;
  3450. struct msm_vidc_core *core;
  3451. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3452. if (!inst || !inst->capabilities || !inst->core) {
  3453. d_vpr_e("%s: invalid params\n", __func__);
  3454. return -EINVAL;
  3455. }
  3456. core = inst->core;
  3457. rc = call_session_op(core, decide_work_route, inst);
  3458. if (rc) {
  3459. i_vpr_e(inst, "%s: decide_work_route failed\n",
  3460. __func__);
  3461. return -EINVAL;
  3462. }
  3463. pipe = inst->capabilities->cap[PIPE].value;
  3464. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  3465. &pipe, sizeof(u32), __func__);
  3466. if (rc)
  3467. return rc;
  3468. return rc;
  3469. }