msm_vidc_driver.c 137 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/iommu.h>
  6. #include <linux/workqueue.h>
  7. #include <media/v4l2_vidc_extensions.h>
  8. #include "msm_media_info.h"
  9. #include "msm_vidc_driver.h"
  10. #include "msm_vidc_platform.h"
  11. #include "msm_vidc_internal.h"
  12. #include "msm_vidc_control.h"
  13. #include "msm_vidc_memory.h"
  14. #include "msm_vidc_power.h"
  15. #include "msm_vidc_debug.h"
  16. #include "msm_vidc_power.h"
  17. #include "msm_vidc.h"
  18. #include "msm_vdec.h"
  19. #include "msm_venc.h"
  20. #include "venus_hfi.h"
  21. #include "venus_hfi_response.h"
  22. #include "hfi_packet.h"
  23. extern struct msm_vidc_core *g_core;
  24. #define is_odd(val) ((val) % 2 == 1)
  25. #define in_range(val, min, max) (((min) <= (val)) && ((val) <= (max)))
  26. #define COUNT_BITS(a, out) { \
  27. while ((a) >= 1) { \
  28. (out) += (a) & (1); \
  29. (a) >>= (1); \
  30. } \
  31. }
  32. #define SSR_TYPE 0x0000000F
  33. #define SSR_TYPE_SHIFT 0
  34. #define SSR_SUB_CLIENT_ID 0x000000F0
  35. #define SSR_SUB_CLIENT_ID_SHIFT 4
  36. #define SSR_ADDR_ID 0xFFFFFFFF00000000
  37. #define SSR_ADDR_SHIFT 32
  38. struct msm_vidc_cap_name {
  39. enum msm_vidc_inst_capability_type cap;
  40. char *name;
  41. };
  42. static const struct msm_vidc_cap_name cap_name_arr[] = {
  43. {INST_CAP_NONE, "INST_CAP_NONE" },
  44. {FRAME_WIDTH, "FRAME_WIDTH" },
  45. {LOSSLESS_FRAME_WIDTH, "LOSSLESS_FRAME_WIDTH" },
  46. {SECURE_FRAME_WIDTH, "SECURE_FRAME_WIDTH" },
  47. {FRAME_HEIGHT, "FRAME_HEIGHT" },
  48. {LOSSLESS_FRAME_HEIGHT, "LOSSLESS_FRAME_HEIGHT" },
  49. {SECURE_FRAME_HEIGHT, "SECURE_FRAME_HEIGHT" },
  50. {PIX_FMTS, "PIX_FMTS" },
  51. {MIN_BUFFERS_INPUT, "MIN_BUFFERS_INPUT" },
  52. {MIN_BUFFERS_OUTPUT, "MIN_BUFFERS_OUTPUT" },
  53. {MBPF, "MBPF" },
  54. {LOSSLESS_MBPF, "LOSSLESS_MBPF" },
  55. {BATCH_MBPF, "BATCH_MBPF" },
  56. {BATCH_FPS, "BATCH_FPS" },
  57. {SECURE_MBPF, "SECURE_MBPF" },
  58. {MBPS, "MBPS" },
  59. {POWER_SAVE_MBPS, "POWER_SAVE_MBPS" },
  60. {FRAME_RATE, "FRAME_RATE" },
  61. {OPERATING_RATE, "OPERATING_RATE" },
  62. {SCALE_FACTOR, "SCALE_FACTOR" },
  63. {MB_CYCLES_VSP, "MB_CYCLES_VSP" },
  64. {MB_CYCLES_VPP, "MB_CYCLES_VPP" },
  65. {MB_CYCLES_LP, "MB_CYCLES_LP" },
  66. {MB_CYCLES_FW, "MB_CYCLES_FW" },
  67. {MB_CYCLES_FW_VPP, "MB_CYCLES_FW_VPP" },
  68. {SECURE_MODE, "SECURE_MODE" },
  69. {HFLIP, "HFLIP" },
  70. {VFLIP, "VFLIP" },
  71. {ROTATION, "ROTATION" },
  72. {SUPER_FRAME, "SUPER_FRAME" },
  73. {SLICE_INTERFACE, "SLICE_INTERFACE" },
  74. {HEADER_MODE, "HEADER_MODE" },
  75. {PREPEND_SPSPPS_TO_IDR, "PREPEND_SPSPPS_TO_IDR" },
  76. {META_SEQ_HDR_NAL, "META_SEQ_HDR_NAL" },
  77. {WITHOUT_STARTCODE, "WITHOUT_STARTCODE" },
  78. {NAL_LENGTH_FIELD, "NAL_LENGTH_FIELD" },
  79. {REQUEST_I_FRAME, "REQUEST_I_FRAME" },
  80. {BIT_RATE, "BIT_RATE" },
  81. {BITRATE_MODE, "BITRATE_MODE" },
  82. {LOSSLESS, "LOSSLESS" },
  83. {FRAME_SKIP_MODE, "FRAME_SKIP_MODE" },
  84. {FRAME_RC_ENABLE, "FRAME_RC_ENABLE" },
  85. {CONSTANT_QUALITY, "CONSTANT_QUALITY" },
  86. {GOP_SIZE, "GOP_SIZE" },
  87. {GOP_CLOSURE, "GOP_CLOSURE" },
  88. {B_FRAME, "B_FRAME" },
  89. {BLUR_TYPES, "BLUR_TYPES" },
  90. {BLUR_RESOLUTION, "BLUR_RESOLUTION" },
  91. {CSC, "CSC" },
  92. {CSC_CUSTOM_MATRIX, "CSC_CUSTOM_MATRIX" },
  93. {GRID, "GRID" },
  94. {LOWLATENCY_MODE, "LOWLATENCY_MODE" },
  95. {LTR_COUNT, "LTR_COUNT" },
  96. {USE_LTR, "USE_LTR" },
  97. {MARK_LTR, "MARK_LTR" },
  98. {BASELAYER_PRIORITY, "BASELAYER_PRIORITY" },
  99. {IR_RANDOM, "IR_RANDOM" },
  100. {AU_DELIMITER, "AU_DELIMITER" },
  101. {TIME_DELTA_BASED_RC, "TIME_DELTA_BASED_RC" },
  102. {CONTENT_ADAPTIVE_CODING, "CONTENT_ADAPTIVE_CODING" },
  103. {BITRATE_BOOST, "BITRATE_BOOST" },
  104. {MIN_QUALITY, "MIN_QUALITY" },
  105. {VBV_DELAY, "VBV_DELAY" },
  106. {PEAK_BITRATE, "PEAK_BITRATE" },
  107. {MIN_FRAME_QP, "MIN_FRAME_QP" },
  108. {I_FRAME_MIN_QP, "I_FRAME_MIN_QP" },
  109. {P_FRAME_MIN_QP, "P_FRAME_MIN_QP" },
  110. {B_FRAME_MIN_QP, "B_FRAME_MIN_QP" },
  111. {MAX_FRAME_QP, "MAX_FRAME_QP" },
  112. {I_FRAME_MAX_QP, "I_FRAME_MAX_QP" },
  113. {P_FRAME_MAX_QP, "P_FRAME_MAX_QP" },
  114. {B_FRAME_MAX_QP, "B_FRAME_MAX_QP" },
  115. {I_FRAME_QP, "I_FRAME_QP" },
  116. {P_FRAME_QP, "P_FRAME_QP" },
  117. {B_FRAME_QP, "B_FRAME_QP" },
  118. {LAYER_TYPE, "LAYER_TYPE" },
  119. {LAYER_ENABLE, "LAYER_ENABLE" },
  120. {ENH_LAYER_COUNT, "ENH_LAYER_COUNT" },
  121. {L0_BR, "L0_BR" },
  122. {L1_BR, "L1_BR" },
  123. {L2_BR, "L2_BR" },
  124. {L3_BR, "L3_BR" },
  125. {L4_BR, "L4_BR" },
  126. {L5_BR, "L5_BR" },
  127. {ENTROPY_MODE, "ENTROPY_MODE" },
  128. {PROFILE, "PROFILE" },
  129. {LEVEL, "LEVEL" },
  130. {HEVC_TIER, "HEVC_TIER" },
  131. {LF_MODE, "LF_MODE" },
  132. {LF_ALPHA, "LF_ALPHA" },
  133. {LF_BETA, "LF_BETA" },
  134. {SLICE_MODE, "SLICE_MODE" },
  135. {SLICE_MAX_BYTES, "SLICE_MAX_BYTES" },
  136. {SLICE_MAX_MB, "SLICE_MAX_MB" },
  137. {MB_RC, "MB_RC" },
  138. {TRANSFORM_8X8, "TRANSFORM_8X8" },
  139. {CHROMA_QP_INDEX_OFFSET, "CHROMA_QP_INDEX_OFFSET" },
  140. {DISPLAY_DELAY_ENABLE, "DISPLAY_DELAY_ENABLE" },
  141. {DISPLAY_DELAY, "DISPLAY_DELAY" },
  142. {CONCEAL_COLOR_8BIT, "CONCEAL_COLOR_8BIT" },
  143. {CONCEAL_COLOR_10BIT, "CONCEAL_COLOR_10BIT" },
  144. {STAGE, "STAGE" },
  145. {PIPE, "PIPE" },
  146. {POC, "POC" },
  147. {QUALITY_MODE, "QUALITY_MODE" },
  148. {CODED_FRAMES, "CODED_FRAMES" },
  149. {BIT_DEPTH, "BIT_DEPTH" },
  150. {CODEC_CONFIG, "CODEC_CONFIG" },
  151. {BITSTREAM_SIZE_OVERWRITE, "BITSTREAM_SIZE_OVERWRITE" },
  152. {THUMBNAIL_MODE, "THUMBNAIL_MODE" },
  153. {DEFAULT_HEADER, "DEFAULT_HEADER" },
  154. {RAP_FRAME, "RAP_FRAME" },
  155. {SEQ_CHANGE_AT_SYNC_FRAME, "SEQ_CHANGE_AT_SYNC_FRAME" },
  156. {PRIORITY, "PRIORITY" },
  157. {ENC_IP_CR, "ENC_IP_CR" },
  158. {DPB_LIST, "DPB_LIST" },
  159. {META_LTR_MARK_USE, "META_LTR_MARK_USE" },
  160. {META_DPB_MISR, "META_DPB_MISR" },
  161. {META_OPB_MISR, "META_OPB_MISR" },
  162. {META_INTERLACE, "META_INTERLACE" },
  163. {META_TIMESTAMP, "META_TIMESTAMP" },
  164. {META_CONCEALED_MB_CNT, "META_CONCEALED_MB_CNT" },
  165. {META_HIST_INFO, "META_HIST_INFO" },
  166. {META_SEI_MASTERING_DISP, "META_SEI_MASTERING_DISP" },
  167. {META_SEI_CLL, "META_SEI_CLL" },
  168. {META_HDR10PLUS, "META_HDR10PLUS" },
  169. {META_EVA_STATS, "META_EVA_STATS" },
  170. {META_BUF_TAG, "META_BUF_TAG" },
  171. {META_DPB_TAG_LIST, "META_DPB_TAG_LIST" },
  172. {META_OUTPUT_BUF_TAG, "META_OUTPUT_BUF_TAG" },
  173. {META_SUBFRAME_OUTPUT, "META_SUBFRAME_OUTPUT" },
  174. {META_ENC_QP_METADATA, "META_ENC_QP_METADATA" },
  175. {META_ROI_INFO, "META_ROI_INFO" },
  176. {META_DEC_QP_METADATA, "META_DEC_QP_METADATA" },
  177. {COMPLEXITY, "COMPLEXITY" },
  178. {INST_CAP_MAX, "INST_CAP_MAX" },
  179. };
  180. const char *cap_name(enum msm_vidc_inst_capability_type cap)
  181. {
  182. const char *name = "UNKNOWN CAP";
  183. if (cap > ARRAY_SIZE(cap_name_arr))
  184. goto exit;
  185. if (cap_name_arr[cap].cap != cap)
  186. goto exit;
  187. name = cap_name_arr[cap].name;
  188. exit:
  189. return name;
  190. }
  191. struct msm_vidc_buf_type_name {
  192. enum msm_vidc_buffer_type type;
  193. char *name;
  194. };
  195. static const struct msm_vidc_buf_type_name buf_type_name_arr[] = {
  196. {MSM_VIDC_BUF_INPUT, "INPUT" },
  197. {MSM_VIDC_BUF_OUTPUT, "OUTPUT" },
  198. {MSM_VIDC_BUF_INPUT_META, "INPUT_META" },
  199. {MSM_VIDC_BUF_OUTPUT_META, "OUTPUT_META" },
  200. {MSM_VIDC_BUF_READ_ONLY, "READ_ONLY" },
  201. {MSM_VIDC_BUF_QUEUE, "QUEUE" },
  202. {MSM_VIDC_BUF_BIN, "BIN" },
  203. {MSM_VIDC_BUF_ARP, "ARP" },
  204. {MSM_VIDC_BUF_COMV, "COMV" },
  205. {MSM_VIDC_BUF_NON_COMV, "NON_COMV" },
  206. {MSM_VIDC_BUF_LINE, "LINE" },
  207. {MSM_VIDC_BUF_DPB, "DPB" },
  208. {MSM_VIDC_BUF_PERSIST, "PERSIST" },
  209. {MSM_VIDC_BUF_VPSS, "VPSS" },
  210. };
  211. const char *buf_name(enum msm_vidc_buffer_type type)
  212. {
  213. const char *name = "UNKNOWN BUF";
  214. if (!type || type > ARRAY_SIZE(buf_type_name_arr))
  215. goto exit;
  216. if (buf_type_name_arr[type - 1].type != type)
  217. goto exit;
  218. name = buf_type_name_arr[type - 1].name;
  219. exit:
  220. return name;
  221. }
  222. struct msm_vidc_inst_state_name {
  223. enum msm_vidc_inst_state state;
  224. char *name;
  225. };
  226. static const struct msm_vidc_inst_state_name inst_state_name_arr[] = {
  227. {MSM_VIDC_OPEN, "OPEN" },
  228. {MSM_VIDC_START_INPUT, "START_INPUT" },
  229. {MSM_VIDC_START_OUTPUT, "START_OUTPUT" },
  230. {MSM_VIDC_START, "START" },
  231. {MSM_VIDC_DRC, "DRC" },
  232. {MSM_VIDC_DRC_LAST_FLAG, "DRC_LAST_FLAG" },
  233. {MSM_VIDC_DRAIN, "DRAIN" },
  234. {MSM_VIDC_DRAIN_LAST_FLAG, "DRAIN_LAST_FLAG" },
  235. {MSM_VIDC_DRC_DRAIN, "DRC_DRAIN" },
  236. {MSM_VIDC_DRC_DRAIN_LAST_FLAG, "DRC_DRAIN_LAST_FLAG" },
  237. {MSM_VIDC_DRAIN_START_INPUT, "DRAIN_START_INPUT" },
  238. {MSM_VIDC_ERROR, "ERROR" },
  239. };
  240. const char *state_name(enum msm_vidc_inst_state state)
  241. {
  242. const char *name = "UNKNOWN STATE";
  243. if (!state || state > ARRAY_SIZE(inst_state_name_arr))
  244. goto exit;
  245. if (inst_state_name_arr[state - 1].state != state)
  246. goto exit;
  247. name = inst_state_name_arr[state - 1].name;
  248. exit:
  249. return name;
  250. }
  251. struct msm_vidc_core_state_name {
  252. enum msm_vidc_core_state state;
  253. char *name;
  254. };
  255. static const struct msm_vidc_core_state_name core_state_name_arr[] = {
  256. {MSM_VIDC_CORE_DEINIT, "CORE_DEINIT" },
  257. {MSM_VIDC_CORE_INIT_WAIT, "CORE_INIT_WAIT" },
  258. {MSM_VIDC_CORE_INIT, "CORE_INIT" },
  259. };
  260. const char *core_state_name(enum msm_vidc_core_state state)
  261. {
  262. const char *name = "UNKNOWN STATE";
  263. if (state >= ARRAY_SIZE(core_state_name_arr))
  264. goto exit;
  265. if (core_state_name_arr[state].state != state)
  266. goto exit;
  267. name = core_state_name_arr[state].name;
  268. exit:
  269. return name;
  270. }
  271. const char *v4l2_type_name(u32 port)
  272. {
  273. switch (port) {
  274. case INPUT_MPLANE: return "INPUT";
  275. case OUTPUT_MPLANE: return "OUTPUT";
  276. case INPUT_META_PLANE: return "INPUT_META";
  277. case OUTPUT_META_PLANE: return "OUTPUT_META";
  278. }
  279. return "UNKNOWN";
  280. }
  281. const char *v4l2_pixelfmt_name(u32 pixfmt)
  282. {
  283. switch (pixfmt) {
  284. /* raw port: color format */
  285. case V4L2_PIX_FMT_NV12: return "NV12";
  286. case V4L2_PIX_FMT_NV21: return "NV21";
  287. case V4L2_PIX_FMT_VIDC_NV12C: return "NV12C";
  288. case V4L2_PIX_FMT_VIDC_P010: return "P010";
  289. case V4L2_PIX_FMT_VIDC_TP10C: return "TP10C";
  290. case V4L2_PIX_FMT_RGBA32: return "RGBA";
  291. case V4L2_PIX_FMT_VIDC_ARGB32C: return "RGBAC";
  292. /* bitstream port: codec type */
  293. case V4L2_PIX_FMT_H264: return "AVC";
  294. case V4L2_PIX_FMT_HEVC: return "HEVC";
  295. case V4L2_PIX_FMT_HEIC: return "HEIC";
  296. case V4L2_PIX_FMT_VP9: return "VP9";
  297. /* meta port */
  298. case V4L2_META_FMT_VIDC: return "META";
  299. }
  300. return "UNKNOWN";
  301. }
  302. void print_vidc_buffer(u32 tag, const char *tag_str, const char *str, struct msm_vidc_inst *inst,
  303. struct msm_vidc_buffer *vbuf)
  304. {
  305. if (!(tag & msm_vidc_debug) || !inst || !vbuf || !tag_str || !str)
  306. return;
  307. dprintk_inst(tag, tag_str, inst,
  308. "%s: %s: idx %2d fd %3d off %d daddr %#llx size %d filled %d flags %#x ts %lld attr %#x\n",
  309. str, buf_name(vbuf->type),
  310. vbuf->index, vbuf->fd, vbuf->data_offset,
  311. vbuf->device_addr, vbuf->buffer_size, vbuf->data_size,
  312. vbuf->flags, vbuf->timestamp, vbuf->attr);
  313. }
  314. void print_vb2_buffer(const char *str, struct msm_vidc_inst *inst,
  315. struct vb2_buffer *vb2)
  316. {
  317. if (!inst || !vb2)
  318. return;
  319. if (vb2->type == INPUT_MPLANE || vb2->type == OUTPUT_MPLANE) {
  320. i_vpr_e(inst,
  321. "%s: %s: idx %2d fd %d off %d size %d filled %d\n",
  322. str, vb2->type == INPUT_MPLANE ? "INPUT" : "OUTPUT",
  323. vb2->index, vb2->planes[0].m.fd,
  324. vb2->planes[0].data_offset, vb2->planes[0].length,
  325. vb2->planes[0].bytesused);
  326. } else if (vb2->type == INPUT_META_PLANE || vb2->type == OUTPUT_META_PLANE) {
  327. i_vpr_e(inst,
  328. "%s: %s: idx %2d fd %d off %d size %d filled %d\n",
  329. str, vb2->type == INPUT_MPLANE ? "INPUT_META" : "OUTPUT_META",
  330. vb2->index, vb2->planes[0].m.fd,
  331. vb2->planes[0].data_offset, vb2->planes[0].length,
  332. vb2->planes[0].bytesused);
  333. }
  334. }
  335. static void __fatal_error(bool fatal)
  336. {
  337. WARN_ON(fatal);
  338. }
  339. static int __strict_check(struct msm_vidc_core *core, const char *function)
  340. {
  341. bool fatal = !mutex_is_locked(&core->lock);
  342. __fatal_error(fatal);
  343. if (fatal)
  344. d_vpr_e("%s: strict check failed\n", function);
  345. return fatal ? -EINVAL : 0;
  346. }
  347. enum msm_vidc_buffer_type v4l2_type_to_driver(u32 type, const char *func)
  348. {
  349. enum msm_vidc_buffer_type buffer_type = 0;
  350. switch (type) {
  351. case INPUT_MPLANE:
  352. buffer_type = MSM_VIDC_BUF_INPUT;
  353. break;
  354. case OUTPUT_MPLANE:
  355. buffer_type = MSM_VIDC_BUF_OUTPUT;
  356. break;
  357. case INPUT_META_PLANE:
  358. buffer_type = MSM_VIDC_BUF_INPUT_META;
  359. break;
  360. case OUTPUT_META_PLANE:
  361. buffer_type = MSM_VIDC_BUF_OUTPUT_META;
  362. break;
  363. default:
  364. d_vpr_e("%s: invalid v4l2 buffer type %#x\n", func, type);
  365. break;
  366. }
  367. return buffer_type;
  368. }
  369. u32 v4l2_type_from_driver(enum msm_vidc_buffer_type buffer_type,
  370. const char *func)
  371. {
  372. u32 type = 0;
  373. switch (buffer_type) {
  374. case MSM_VIDC_BUF_INPUT:
  375. type = INPUT_MPLANE;
  376. break;
  377. case MSM_VIDC_BUF_OUTPUT:
  378. type = OUTPUT_MPLANE;
  379. break;
  380. case MSM_VIDC_BUF_INPUT_META:
  381. type = INPUT_META_PLANE;
  382. break;
  383. case MSM_VIDC_BUF_OUTPUT_META:
  384. type = OUTPUT_META_PLANE;
  385. break;
  386. default:
  387. d_vpr_e("%s: invalid driver buffer type %d\n",
  388. func, buffer_type);
  389. break;
  390. }
  391. return type;
  392. }
  393. enum msm_vidc_codec_type v4l2_codec_to_driver(u32 v4l2_codec, const char *func)
  394. {
  395. enum msm_vidc_codec_type codec = 0;
  396. switch (v4l2_codec) {
  397. case V4L2_PIX_FMT_H264:
  398. codec = MSM_VIDC_H264;
  399. break;
  400. case V4L2_PIX_FMT_HEVC:
  401. codec = MSM_VIDC_HEVC;
  402. break;
  403. case V4L2_PIX_FMT_VP9:
  404. codec = MSM_VIDC_VP9;
  405. break;
  406. case V4L2_PIX_FMT_HEIC:
  407. codec = MSM_VIDC_HEIC;
  408. break;
  409. default:
  410. d_vpr_e("%s: invalid v4l2 codec %#x\n", func, v4l2_codec);
  411. break;
  412. }
  413. return codec;
  414. }
  415. u32 v4l2_codec_from_driver(enum msm_vidc_codec_type codec, const char *func)
  416. {
  417. u32 v4l2_codec = 0;
  418. switch (codec) {
  419. case MSM_VIDC_H264:
  420. v4l2_codec = V4L2_PIX_FMT_H264;
  421. break;
  422. case MSM_VIDC_HEVC:
  423. v4l2_codec = V4L2_PIX_FMT_HEVC;
  424. break;
  425. case MSM_VIDC_VP9:
  426. v4l2_codec = V4L2_PIX_FMT_VP9;
  427. break;
  428. case MSM_VIDC_HEIC:
  429. v4l2_codec = V4L2_PIX_FMT_HEIC;
  430. break;
  431. default:
  432. d_vpr_e("%s: invalid driver codec %#x\n", func, codec);
  433. break;
  434. }
  435. return v4l2_codec;
  436. }
  437. enum msm_vidc_colorformat_type v4l2_colorformat_to_driver(u32 v4l2_colorformat,
  438. const char *func)
  439. {
  440. enum msm_vidc_colorformat_type colorformat = 0;
  441. switch (v4l2_colorformat) {
  442. case V4L2_PIX_FMT_NV12:
  443. colorformat = MSM_VIDC_FMT_NV12;
  444. break;
  445. case V4L2_PIX_FMT_NV21:
  446. colorformat = MSM_VIDC_FMT_NV21;
  447. break;
  448. case V4L2_PIX_FMT_VIDC_NV12C:
  449. colorformat = MSM_VIDC_FMT_NV12C;
  450. break;
  451. case V4L2_PIX_FMT_VIDC_TP10C:
  452. colorformat = MSM_VIDC_FMT_TP10C;
  453. break;
  454. case V4L2_PIX_FMT_RGBA32:
  455. colorformat = MSM_VIDC_FMT_RGBA8888;
  456. break;
  457. case V4L2_PIX_FMT_VIDC_ARGB32C:
  458. colorformat = MSM_VIDC_FMT_RGBA8888C;
  459. break;
  460. case V4L2_PIX_FMT_VIDC_P010:
  461. colorformat = MSM_VIDC_FMT_P010;
  462. break;
  463. default:
  464. d_vpr_e("%s: invalid v4l2 color format %#x\n",
  465. func, v4l2_colorformat);
  466. break;
  467. }
  468. return colorformat;
  469. }
  470. u32 v4l2_colorformat_from_driver(enum msm_vidc_colorformat_type colorformat,
  471. const char *func)
  472. {
  473. u32 v4l2_colorformat = 0;
  474. switch (colorformat) {
  475. case MSM_VIDC_FMT_NV12:
  476. v4l2_colorformat = V4L2_PIX_FMT_NV12;
  477. break;
  478. case MSM_VIDC_FMT_NV21:
  479. v4l2_colorformat = V4L2_PIX_FMT_NV21;
  480. break;
  481. case MSM_VIDC_FMT_NV12C:
  482. v4l2_colorformat = V4L2_PIX_FMT_VIDC_NV12C;
  483. break;
  484. case MSM_VIDC_FMT_TP10C:
  485. v4l2_colorformat = V4L2_PIX_FMT_VIDC_TP10C;
  486. break;
  487. case MSM_VIDC_FMT_RGBA8888:
  488. v4l2_colorformat = V4L2_PIX_FMT_RGBA32;
  489. break;
  490. case MSM_VIDC_FMT_RGBA8888C:
  491. v4l2_colorformat = V4L2_PIX_FMT_VIDC_ARGB32C;
  492. break;
  493. case MSM_VIDC_FMT_P010:
  494. v4l2_colorformat = V4L2_PIX_FMT_VIDC_P010;
  495. break;
  496. default:
  497. d_vpr_e("%s: invalid driver color format %#x\n",
  498. func, colorformat);
  499. break;
  500. }
  501. return v4l2_colorformat;
  502. }
  503. u32 v4l2_color_primaries_to_driver(struct msm_vidc_inst *inst,
  504. u32 v4l2_primaries, const char *func)
  505. {
  506. u32 vidc_color_primaries = MSM_VIDC_PRIMARIES_RESERVED;
  507. switch(v4l2_primaries) {
  508. case V4L2_COLORSPACE_DEFAULT:
  509. vidc_color_primaries = MSM_VIDC_PRIMARIES_RESERVED;
  510. break;
  511. case V4L2_COLORSPACE_REC709:
  512. vidc_color_primaries = MSM_VIDC_PRIMARIES_BT709;
  513. break;
  514. case V4L2_COLORSPACE_470_SYSTEM_M:
  515. vidc_color_primaries = MSM_VIDC_PRIMARIES_BT470_SYSTEM_M;
  516. break;
  517. case V4L2_COLORSPACE_470_SYSTEM_BG:
  518. vidc_color_primaries = MSM_VIDC_PRIMARIES_BT470_SYSTEM_BG;
  519. break;
  520. case V4L2_COLORSPACE_SMPTE170M:
  521. vidc_color_primaries = MSM_VIDC_PRIMARIES_BT601_525;
  522. break;
  523. case V4L2_COLORSPACE_SMPTE240M:
  524. vidc_color_primaries = MSM_VIDC_PRIMARIES_SMPTE_ST240M;
  525. break;
  526. case V4L2_COLORSPACE_VIDC_GENERIC_FILM:
  527. vidc_color_primaries = MSM_VIDC_PRIMARIES_GENERIC_FILM;
  528. break;
  529. case V4L2_COLORSPACE_BT2020:
  530. vidc_color_primaries = MSM_VIDC_PRIMARIES_BT2020;
  531. break;
  532. case V4L2_COLORSPACE_DCI_P3:
  533. vidc_color_primaries = MSM_VIDC_PRIMARIES_SMPTE_RP431_2;
  534. break;
  535. case V4L2_COLORSPACE_VIDC_EG431:
  536. vidc_color_primaries = MSM_VIDC_PRIMARIES_SMPTE_EG431_1;
  537. break;
  538. case V4L2_COLORSPACE_VIDC_EBU_TECH:
  539. vidc_color_primaries = MSM_VIDC_PRIMARIES_SMPTE_EBU_TECH;
  540. break;
  541. default:
  542. i_vpr_e(inst, "%s: invalid v4l2 color primaries %d\n",
  543. func, v4l2_primaries);
  544. break;
  545. }
  546. return vidc_color_primaries;
  547. }
  548. u32 v4l2_color_primaries_from_driver(struct msm_vidc_inst *inst,
  549. u32 vidc_color_primaries, const char *func)
  550. {
  551. u32 v4l2_primaries = V4L2_COLORSPACE_DEFAULT;
  552. switch(vidc_color_primaries) {
  553. case MSM_VIDC_PRIMARIES_UNSPECIFIED:
  554. v4l2_primaries = V4L2_COLORSPACE_DEFAULT;
  555. break;
  556. case MSM_VIDC_PRIMARIES_BT709:
  557. v4l2_primaries = V4L2_COLORSPACE_REC709;
  558. break;
  559. case MSM_VIDC_PRIMARIES_BT470_SYSTEM_M:
  560. v4l2_primaries = V4L2_COLORSPACE_470_SYSTEM_M;
  561. break;
  562. case MSM_VIDC_PRIMARIES_BT470_SYSTEM_BG:
  563. v4l2_primaries = V4L2_COLORSPACE_470_SYSTEM_BG;
  564. break;
  565. case MSM_VIDC_PRIMARIES_BT601_525:
  566. v4l2_primaries = V4L2_COLORSPACE_SMPTE170M;
  567. break;
  568. case MSM_VIDC_PRIMARIES_SMPTE_ST240M:
  569. v4l2_primaries = V4L2_COLORSPACE_SMPTE240M;
  570. break;
  571. case MSM_VIDC_PRIMARIES_GENERIC_FILM:
  572. v4l2_primaries = V4L2_COLORSPACE_VIDC_GENERIC_FILM;
  573. break;
  574. case MSM_VIDC_PRIMARIES_BT2020:
  575. v4l2_primaries = V4L2_COLORSPACE_BT2020;
  576. break;
  577. case MSM_VIDC_PRIMARIES_SMPTE_RP431_2:
  578. v4l2_primaries = V4L2_COLORSPACE_DCI_P3;
  579. break;
  580. case MSM_VIDC_PRIMARIES_SMPTE_EG431_1:
  581. v4l2_primaries = V4L2_COLORSPACE_VIDC_EG431;
  582. break;
  583. case MSM_VIDC_PRIMARIES_SMPTE_EBU_TECH:
  584. v4l2_primaries = V4L2_COLORSPACE_VIDC_EBU_TECH;
  585. break;
  586. default:
  587. i_vpr_e(inst, "%s: invalid hfi color primaries %d\n",
  588. func, vidc_color_primaries);
  589. break;
  590. }
  591. return v4l2_primaries;
  592. }
  593. u32 v4l2_transfer_char_to_driver(struct msm_vidc_inst *inst,
  594. u32 v4l2_transfer_char, const char *func)
  595. {
  596. u32 vidc_transfer_char = MSM_VIDC_TRANSFER_RESERVED;
  597. switch(v4l2_transfer_char) {
  598. case V4L2_XFER_FUNC_DEFAULT:
  599. vidc_transfer_char = MSM_VIDC_TRANSFER_RESERVED;
  600. break;
  601. case V4L2_XFER_FUNC_709:
  602. vidc_transfer_char = MSM_VIDC_TRANSFER_BT709;
  603. break;
  604. case V4L2_XFER_FUNC_VIDC_BT470_SYSTEM_M:
  605. vidc_transfer_char = MSM_VIDC_TRANSFER_BT470_SYSTEM_M;
  606. break;
  607. case V4L2_XFER_FUNC_VIDC_BT470_SYSTEM_BG:
  608. vidc_transfer_char = MSM_VIDC_TRANSFER_BT470_SYSTEM_BG;
  609. break;
  610. case V4L2_XFER_FUNC_VIDC_BT601_525_OR_625:
  611. vidc_transfer_char = MSM_VIDC_TRANSFER_BT601_525_OR_625;
  612. break;
  613. case V4L2_XFER_FUNC_SMPTE240M:
  614. vidc_transfer_char = MSM_VIDC_TRANSFER_SMPTE_ST240M;
  615. break;
  616. case V4L2_XFER_FUNC_VIDC_LINEAR:
  617. vidc_transfer_char = MSM_VIDC_TRANSFER_LINEAR;
  618. break;
  619. case V4L2_XFER_FUNC_VIDC_XVYCC:
  620. vidc_transfer_char = MSM_VIDC_TRANSFER_XVYCC;
  621. break;
  622. case V4L2_XFER_FUNC_VIDC_BT1361:
  623. vidc_transfer_char = MSM_VIDC_TRANSFER_BT1361_0;
  624. break;
  625. case V4L2_XFER_FUNC_SRGB:
  626. vidc_transfer_char = MSM_VIDC_TRANSFER_SRGB_SYCC;
  627. break;
  628. case V4L2_XFER_FUNC_VIDC_BT2020:
  629. vidc_transfer_char = MSM_VIDC_TRANSFER_BT2020_14;
  630. break;
  631. case V4L2_XFER_FUNC_SMPTE2084:
  632. vidc_transfer_char = MSM_VIDC_TRANSFER_SMPTE_ST2084_PQ;
  633. break;
  634. case V4L2_XFER_FUNC_VIDC_ST428:
  635. vidc_transfer_char = MSM_VIDC_TRANSFER_SMPTE_ST428_1;
  636. break;
  637. case V4L2_XFER_FUNC_VIDC_HLG:
  638. vidc_transfer_char = MSM_VIDC_TRANSFER_BT2100_2_HLG;
  639. break;
  640. default:
  641. i_vpr_e(inst, "%s: invalid v4l2 transfer char %d\n",
  642. func, v4l2_transfer_char);
  643. break;
  644. }
  645. return vidc_transfer_char;
  646. }
  647. u32 v4l2_transfer_char_from_driver(struct msm_vidc_inst *inst,
  648. u32 vidc_transfer_char, const char *func)
  649. {
  650. u32 v4l2_transfer_char = V4L2_XFER_FUNC_DEFAULT;
  651. switch(vidc_transfer_char) {
  652. case MSM_VIDC_TRANSFER_UNSPECIFIED:
  653. v4l2_transfer_char = V4L2_XFER_FUNC_DEFAULT;
  654. break;
  655. case MSM_VIDC_TRANSFER_BT709:
  656. v4l2_transfer_char = V4L2_XFER_FUNC_709;
  657. break;
  658. case MSM_VIDC_TRANSFER_BT470_SYSTEM_M:
  659. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_BT470_SYSTEM_M;
  660. break;
  661. case MSM_VIDC_TRANSFER_BT470_SYSTEM_BG:
  662. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_BT470_SYSTEM_BG;
  663. break;
  664. case MSM_VIDC_TRANSFER_BT601_525_OR_625:
  665. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_BT601_525_OR_625;
  666. break;
  667. case MSM_VIDC_TRANSFER_SMPTE_ST240M:
  668. v4l2_transfer_char = V4L2_XFER_FUNC_SMPTE240M;
  669. break;
  670. case MSM_VIDC_TRANSFER_LINEAR:
  671. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_LINEAR;
  672. break;
  673. case MSM_VIDC_TRANSFER_XVYCC:
  674. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_XVYCC;
  675. break;
  676. case MSM_VIDC_TRANSFER_BT1361_0:
  677. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_BT1361;
  678. break;
  679. case MSM_VIDC_TRANSFER_SRGB_SYCC:
  680. v4l2_transfer_char = V4L2_XFER_FUNC_SRGB;
  681. break;
  682. case MSM_VIDC_TRANSFER_BT2020_14:
  683. case MSM_VIDC_TRANSFER_BT2020_15:
  684. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_BT2020;
  685. break;
  686. case MSM_VIDC_TRANSFER_SMPTE_ST2084_PQ:
  687. v4l2_transfer_char = V4L2_XFER_FUNC_SMPTE2084;
  688. break;
  689. case MSM_VIDC_TRANSFER_SMPTE_ST428_1:
  690. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_ST428;
  691. break;
  692. case MSM_VIDC_TRANSFER_BT2100_2_HLG:
  693. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_HLG;
  694. break;
  695. default:
  696. i_vpr_e(inst, "%s: invalid hfi transfer char %d\n",
  697. func, vidc_transfer_char);
  698. break;
  699. }
  700. return v4l2_transfer_char;
  701. }
  702. u32 v4l2_matrix_coeff_to_driver(struct msm_vidc_inst *inst,
  703. u32 v4l2_matrix_coeff, const char *func)
  704. {
  705. u32 vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_RESERVED;
  706. switch(v4l2_matrix_coeff) {
  707. case V4L2_YCBCR_ENC_DEFAULT:
  708. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_RESERVED;
  709. break;
  710. case V4L2_YCBCR_VIDC_SRGB_OR_SMPTE_ST428:
  711. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_SRGB_SMPTE_ST428_1;
  712. break;
  713. case V4L2_YCBCR_ENC_709:
  714. case V4L2_YCBCR_ENC_XV709:
  715. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_BT709;
  716. break;
  717. case V4L2_YCBCR_VIDC_FCC47_73_682:
  718. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_FCC_TITLE_47;
  719. break;
  720. case V4L2_YCBCR_ENC_XV601:
  721. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_BT470_SYS_BG_OR_BT601_625;
  722. break;
  723. case V4L2_YCBCR_ENC_601:
  724. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_BT601_525_BT1358_525_OR_625;
  725. break;
  726. case V4L2_YCBCR_ENC_SMPTE240M:
  727. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_SMPTE_ST240;
  728. break;
  729. case V4L2_YCBCR_ENC_BT2020:
  730. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_BT2020_NON_CONSTANT;
  731. break;
  732. case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
  733. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_BT2020_CONSTANT;
  734. break;
  735. default:
  736. i_vpr_e(inst, "%s: invalid v4l2 matrix coeff %d\n",
  737. func, v4l2_matrix_coeff);
  738. break;
  739. }
  740. return vidc_matrix_coeff;
  741. }
  742. u32 v4l2_matrix_coeff_from_driver(struct msm_vidc_inst *inst,
  743. u32 vidc_matrix_coeff, const char *func)
  744. {
  745. u32 v4l2_matrix_coeff = V4L2_YCBCR_ENC_DEFAULT;
  746. switch(vidc_matrix_coeff) {
  747. case MSM_VIDC_MATRIX_COEFF_SRGB_SMPTE_ST428_1:
  748. v4l2_matrix_coeff = V4L2_YCBCR_VIDC_SRGB_OR_SMPTE_ST428;
  749. break;
  750. case MSM_VIDC_MATRIX_COEFF_BT709:
  751. v4l2_matrix_coeff = V4L2_YCBCR_ENC_709;
  752. break;
  753. case MSM_VIDC_MATRIX_COEFF_UNSPECIFIED:
  754. v4l2_matrix_coeff = V4L2_YCBCR_ENC_DEFAULT;
  755. break;
  756. case MSM_VIDC_MATRIX_COEFF_FCC_TITLE_47:
  757. v4l2_matrix_coeff = V4L2_YCBCR_VIDC_FCC47_73_682;
  758. break;
  759. case MSM_VIDC_MATRIX_COEFF_BT470_SYS_BG_OR_BT601_625:
  760. v4l2_matrix_coeff = V4L2_YCBCR_ENC_XV601;
  761. break;
  762. case MSM_VIDC_MATRIX_COEFF_BT601_525_BT1358_525_OR_625:
  763. v4l2_matrix_coeff = V4L2_YCBCR_ENC_601;
  764. break;
  765. case MSM_VIDC_MATRIX_COEFF_SMPTE_ST240:
  766. v4l2_matrix_coeff = V4L2_YCBCR_ENC_SMPTE240M;
  767. break;
  768. case MSM_VIDC_MATRIX_COEFF_BT2020_NON_CONSTANT:
  769. v4l2_matrix_coeff = V4L2_YCBCR_ENC_BT2020;
  770. break;
  771. case MSM_VIDC_MATRIX_COEFF_BT2020_CONSTANT:
  772. v4l2_matrix_coeff = V4L2_YCBCR_ENC_BT2020_CONST_LUM;
  773. break;
  774. default:
  775. i_vpr_e(inst, "%s: invalid hfi matrix coeff %d\n",
  776. func, vidc_matrix_coeff);
  777. break;
  778. }
  779. return v4l2_matrix_coeff;
  780. }
  781. int v4l2_type_to_driver_port(struct msm_vidc_inst *inst, u32 type,
  782. const char *func)
  783. {
  784. int port;
  785. if (type == INPUT_MPLANE) {
  786. port = INPUT_PORT;
  787. } else if (type == INPUT_META_PLANE) {
  788. port = INPUT_META_PORT;
  789. } else if (type == OUTPUT_MPLANE) {
  790. port = OUTPUT_PORT;
  791. } else if (type == OUTPUT_META_PLANE) {
  792. port = OUTPUT_META_PORT;
  793. } else {
  794. i_vpr_e(inst, "%s: port not found for v4l2 type %d\n",
  795. func, type);
  796. port = -EINVAL;
  797. }
  798. return port;
  799. }
  800. u32 msm_vidc_get_buffer_region(struct msm_vidc_inst *inst,
  801. enum msm_vidc_buffer_type buffer_type, const char *func)
  802. {
  803. u32 region = MSM_VIDC_NON_SECURE;
  804. if (!is_secure_session(inst)) {
  805. switch (buffer_type) {
  806. case MSM_VIDC_BUF_ARP:
  807. region = MSM_VIDC_SECURE_NONPIXEL;
  808. break;
  809. case MSM_VIDC_BUF_INPUT:
  810. if (is_encode_session(inst))
  811. region = MSM_VIDC_NON_SECURE_PIXEL;
  812. else
  813. region = MSM_VIDC_NON_SECURE;
  814. break;
  815. case MSM_VIDC_BUF_OUTPUT:
  816. if (is_encode_session(inst))
  817. region = MSM_VIDC_NON_SECURE;
  818. else
  819. region = MSM_VIDC_NON_SECURE_PIXEL;
  820. break;
  821. case MSM_VIDC_BUF_DPB:
  822. case MSM_VIDC_BUF_VPSS:
  823. region = MSM_VIDC_NON_SECURE_PIXEL;
  824. break;
  825. case MSM_VIDC_BUF_INPUT_META:
  826. case MSM_VIDC_BUF_OUTPUT_META:
  827. case MSM_VIDC_BUF_BIN:
  828. case MSM_VIDC_BUF_COMV:
  829. case MSM_VIDC_BUF_NON_COMV:
  830. case MSM_VIDC_BUF_LINE:
  831. case MSM_VIDC_BUF_PERSIST:
  832. region = MSM_VIDC_NON_SECURE;
  833. break;
  834. default:
  835. i_vpr_e(inst, "%s: invalid driver buffer type %d\n",
  836. func, buffer_type);
  837. }
  838. } else {
  839. switch (buffer_type) {
  840. case MSM_VIDC_BUF_INPUT:
  841. if (is_encode_session(inst))
  842. region = MSM_VIDC_SECURE_PIXEL;
  843. else
  844. region = MSM_VIDC_SECURE_BITSTREAM;
  845. break;
  846. case MSM_VIDC_BUF_OUTPUT:
  847. if (is_encode_session(inst))
  848. region = MSM_VIDC_SECURE_BITSTREAM;
  849. else
  850. region = MSM_VIDC_SECURE_PIXEL;
  851. break;
  852. case MSM_VIDC_BUF_INPUT_META:
  853. case MSM_VIDC_BUF_OUTPUT_META:
  854. region = MSM_VIDC_NON_SECURE;
  855. break;
  856. case MSM_VIDC_BUF_DPB:
  857. case MSM_VIDC_BUF_VPSS:
  858. region = MSM_VIDC_SECURE_PIXEL;
  859. break;
  860. case MSM_VIDC_BUF_BIN:
  861. region = MSM_VIDC_SECURE_BITSTREAM;
  862. break;
  863. case MSM_VIDC_BUF_ARP:
  864. case MSM_VIDC_BUF_COMV:
  865. case MSM_VIDC_BUF_NON_COMV:
  866. case MSM_VIDC_BUF_LINE:
  867. case MSM_VIDC_BUF_PERSIST:
  868. region = MSM_VIDC_SECURE_NONPIXEL;
  869. break;
  870. default:
  871. i_vpr_e(inst, "%s: invalid driver buffer type %d\n",
  872. func, buffer_type);
  873. }
  874. }
  875. return region;
  876. }
  877. struct msm_vidc_buffers *msm_vidc_get_buffers(
  878. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  879. const char *func)
  880. {
  881. switch (buffer_type) {
  882. case MSM_VIDC_BUF_INPUT:
  883. return &inst->buffers.input;
  884. case MSM_VIDC_BUF_INPUT_META:
  885. return &inst->buffers.input_meta;
  886. case MSM_VIDC_BUF_OUTPUT:
  887. return &inst->buffers.output;
  888. case MSM_VIDC_BUF_OUTPUT_META:
  889. return &inst->buffers.output_meta;
  890. case MSM_VIDC_BUF_READ_ONLY:
  891. return &inst->buffers.read_only;
  892. case MSM_VIDC_BUF_BIN:
  893. return &inst->buffers.bin;
  894. case MSM_VIDC_BUF_ARP:
  895. return &inst->buffers.arp;
  896. case MSM_VIDC_BUF_COMV:
  897. return &inst->buffers.comv;
  898. case MSM_VIDC_BUF_NON_COMV:
  899. return &inst->buffers.non_comv;
  900. case MSM_VIDC_BUF_LINE:
  901. return &inst->buffers.line;
  902. case MSM_VIDC_BUF_DPB:
  903. return &inst->buffers.dpb;
  904. case MSM_VIDC_BUF_PERSIST:
  905. return &inst->buffers.persist;
  906. case MSM_VIDC_BUF_VPSS:
  907. return &inst->buffers.vpss;
  908. case MSM_VIDC_BUF_QUEUE:
  909. return NULL;
  910. default:
  911. i_vpr_e(inst, "%s: invalid driver buffer type %d\n",
  912. func, buffer_type);
  913. return NULL;
  914. }
  915. }
  916. struct msm_vidc_mappings *msm_vidc_get_mappings(
  917. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  918. const char *func)
  919. {
  920. switch (buffer_type) {
  921. case MSM_VIDC_BUF_INPUT:
  922. return &inst->mappings.input;
  923. case MSM_VIDC_BUF_INPUT_META:
  924. return &inst->mappings.input_meta;
  925. case MSM_VIDC_BUF_OUTPUT:
  926. return &inst->mappings.output;
  927. case MSM_VIDC_BUF_OUTPUT_META:
  928. return &inst->mappings.output_meta;
  929. case MSM_VIDC_BUF_BIN:
  930. return &inst->mappings.bin;
  931. case MSM_VIDC_BUF_ARP:
  932. return &inst->mappings.arp;
  933. case MSM_VIDC_BUF_COMV:
  934. return &inst->mappings.comv;
  935. case MSM_VIDC_BUF_NON_COMV:
  936. return &inst->mappings.non_comv;
  937. case MSM_VIDC_BUF_LINE:
  938. return &inst->mappings.line;
  939. case MSM_VIDC_BUF_DPB:
  940. return &inst->mappings.dpb;
  941. case MSM_VIDC_BUF_PERSIST:
  942. return &inst->mappings.persist;
  943. case MSM_VIDC_BUF_VPSS:
  944. return &inst->mappings.vpss;
  945. default:
  946. i_vpr_e(inst, "%s: invalid driver buffer type %d\n",
  947. func, buffer_type);
  948. return NULL;
  949. }
  950. }
  951. struct msm_vidc_allocations *msm_vidc_get_allocations(
  952. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  953. const char *func)
  954. {
  955. switch (buffer_type) {
  956. case MSM_VIDC_BUF_BIN:
  957. return &inst->allocations.bin;
  958. case MSM_VIDC_BUF_ARP:
  959. return &inst->allocations.arp;
  960. case MSM_VIDC_BUF_COMV:
  961. return &inst->allocations.comv;
  962. case MSM_VIDC_BUF_NON_COMV:
  963. return &inst->allocations.non_comv;
  964. case MSM_VIDC_BUF_LINE:
  965. return &inst->allocations.line;
  966. case MSM_VIDC_BUF_DPB:
  967. return &inst->allocations.dpb;
  968. case MSM_VIDC_BUF_PERSIST:
  969. return &inst->allocations.persist;
  970. case MSM_VIDC_BUF_VPSS:
  971. return &inst->allocations.vpss;
  972. default:
  973. i_vpr_e(inst, "%s: invalid driver buffer type %d\n",
  974. func, buffer_type);
  975. return NULL;
  976. }
  977. }
  978. bool res_is_greater_than(u32 width, u32 height,
  979. u32 ref_width, u32 ref_height)
  980. {
  981. u32 num_mbs = NUM_MBS_PER_FRAME(height, width);
  982. u32 max_side = max(ref_width, ref_height);
  983. if (num_mbs > NUM_MBS_PER_FRAME(ref_height, ref_width) ||
  984. width > max_side ||
  985. height > max_side)
  986. return true;
  987. else
  988. return false;
  989. }
  990. bool res_is_less_than(u32 width, u32 height,
  991. u32 ref_width, u32 ref_height)
  992. {
  993. u32 num_mbs = NUM_MBS_PER_FRAME(height, width);
  994. u32 max_side = max(ref_width, ref_height);
  995. if (num_mbs < NUM_MBS_PER_FRAME(ref_height, ref_width) &&
  996. width < max_side &&
  997. height < max_side)
  998. return true;
  999. else
  1000. return false;
  1001. }
  1002. bool res_is_less_than_or_equal_to(u32 width, u32 height,
  1003. u32 ref_width, u32 ref_height)
  1004. {
  1005. u32 num_mbs = NUM_MBS_PER_FRAME(height, width);
  1006. u32 max_side = max(ref_width, ref_height);
  1007. if (num_mbs <= NUM_MBS_PER_FRAME(ref_height, ref_width) &&
  1008. width <= max_side &&
  1009. height <= max_side)
  1010. return true;
  1011. else
  1012. return false;
  1013. }
  1014. int msm_vidc_change_core_state(struct msm_vidc_core *core,
  1015. enum msm_vidc_core_state request_state, const char *func)
  1016. {
  1017. if (!core) {
  1018. d_vpr_e("%s: invalid params\n", __func__);
  1019. return -EINVAL;
  1020. }
  1021. d_vpr_h("%s: core state changed to %s from %s\n",
  1022. func, core_state_name(request_state),
  1023. core_state_name(core->state));
  1024. core->state = request_state;
  1025. return 0;
  1026. }
  1027. int msm_vidc_change_inst_state(struct msm_vidc_inst *inst,
  1028. enum msm_vidc_inst_state request_state, const char *func)
  1029. {
  1030. if (!inst) {
  1031. d_vpr_e("%s: invalid params\n", __func__);
  1032. return -EINVAL;
  1033. }
  1034. if (!request_state) {
  1035. i_vpr_e(inst, "%s: invalid request state\n", func);
  1036. return -EINVAL;
  1037. }
  1038. if (is_session_error(inst)) {
  1039. i_vpr_h(inst,
  1040. "%s: inst is in bad state, can not change state to %s\n",
  1041. func, state_name(request_state));
  1042. return 0;
  1043. }
  1044. if (request_state == MSM_VIDC_ERROR)
  1045. i_vpr_e(inst, "%s: state changed to %s from %s\n",
  1046. func, state_name(request_state), state_name(inst->state));
  1047. else
  1048. i_vpr_h(inst, "%s: state changed to %s from %s\n",
  1049. func, state_name(request_state), state_name(inst->state));
  1050. inst->state = request_state;
  1051. return 0;
  1052. }
  1053. bool msm_vidc_allow_s_fmt(struct msm_vidc_inst *inst, u32 type)
  1054. {
  1055. bool allow = false;
  1056. if (!inst) {
  1057. d_vpr_e("%s: invalid params\n", __func__);
  1058. return false;
  1059. }
  1060. if (inst->state == MSM_VIDC_OPEN) {
  1061. allow = true;
  1062. goto exit;
  1063. }
  1064. if (type == OUTPUT_MPLANE || type == OUTPUT_META_PLANE) {
  1065. if (inst->state == MSM_VIDC_START_INPUT ||
  1066. inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  1067. allow = true;
  1068. goto exit;
  1069. }
  1070. }
  1071. if (type == INPUT_MPLANE || type == INPUT_META_PLANE) {
  1072. if (inst->state == MSM_VIDC_START_OUTPUT) {
  1073. allow = true;
  1074. goto exit;
  1075. }
  1076. }
  1077. exit:
  1078. if (!allow)
  1079. i_vpr_e(inst, "%s: type %d not allowed in state %s\n",
  1080. __func__, type, state_name(inst->state));
  1081. return allow;
  1082. }
  1083. bool msm_vidc_allow_s_ctrl(struct msm_vidc_inst *inst, u32 id)
  1084. {
  1085. bool allow = false;
  1086. if (!inst) {
  1087. d_vpr_e("%s: invalid params\n", __func__);
  1088. return false;
  1089. }
  1090. if (inst->state == MSM_VIDC_OPEN) {
  1091. allow = true;
  1092. goto exit;
  1093. }
  1094. if (is_decode_session(inst)) {
  1095. if (!inst->vb2q[INPUT_PORT].streaming) {
  1096. allow = true;
  1097. goto exit;
  1098. }
  1099. if (inst->vb2q[INPUT_PORT].streaming) {
  1100. switch (id) {
  1101. case V4L2_CID_MPEG_VIDC_CODEC_CONFIG:
  1102. case V4L2_CID_MPEG_VIDC_PRIORITY:
  1103. case V4L2_CID_MPEG_VIDC_LOWLATENCY_REQUEST:
  1104. allow = true;
  1105. break;
  1106. default:
  1107. allow = false;
  1108. break;
  1109. }
  1110. }
  1111. } else if (is_encode_session(inst)) {
  1112. if (inst->state == MSM_VIDC_START || inst->state == MSM_VIDC_START_OUTPUT) {
  1113. switch (id) {
  1114. case V4L2_CID_MPEG_VIDEO_BITRATE:
  1115. case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
  1116. case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
  1117. case V4L2_CID_HFLIP:
  1118. case V4L2_CID_VFLIP:
  1119. case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP:
  1120. case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP:
  1121. case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP:
  1122. case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
  1123. case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
  1124. case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:
  1125. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER:
  1126. case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER:
  1127. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR:
  1128. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR:
  1129. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR:
  1130. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR:
  1131. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR:
  1132. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR:
  1133. case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR:
  1134. case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR:
  1135. case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR:
  1136. case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR:
  1137. case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR:
  1138. case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR:
  1139. case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES:
  1140. case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX:
  1141. case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_TYPES:
  1142. case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_RESOLUTION:
  1143. case V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY:
  1144. case V4L2_CID_MPEG_VIDC_ENC_INPUT_COMPRESSION_RATIO:
  1145. case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
  1146. case V4L2_CID_MPEG_VIDC_PRIORITY:
  1147. allow = true;
  1148. break;
  1149. default:
  1150. allow = false;
  1151. break;
  1152. }
  1153. }
  1154. }
  1155. exit:
  1156. if (!allow)
  1157. i_vpr_e(inst, "%s: id %#x not allowed in state %s\n",
  1158. __func__, id, state_name(inst->state));
  1159. return allow;
  1160. }
  1161. bool msm_vidc_allow_metadata(struct msm_vidc_inst *inst, u32 cap_id)
  1162. {
  1163. bool is_allowed = true;
  1164. if (!inst || !inst->capabilities) {
  1165. d_vpr_e("%s: invalid params\n", __func__);
  1166. return false;
  1167. }
  1168. switch (cap_id) {
  1169. case META_OUTPUT_BUF_TAG:
  1170. case META_DPB_TAG_LIST:
  1171. if (!is_ubwc_colorformat(inst->capabilities->cap[PIX_FMTS].value)) {
  1172. i_vpr_h(inst,
  1173. "%s: cap: %24s not allowed for split mode\n",
  1174. __func__, cap_name(cap_id));
  1175. is_allowed = false;
  1176. }
  1177. break;
  1178. default:
  1179. is_allowed = true;
  1180. break;
  1181. }
  1182. return is_allowed;
  1183. }
  1184. bool msm_vidc_allow_property(struct msm_vidc_inst *inst, u32 hfi_id)
  1185. {
  1186. bool is_allowed = true;
  1187. if (!inst || !inst->capabilities) {
  1188. d_vpr_e("%s: invalid params\n", __func__);
  1189. return false;
  1190. }
  1191. switch (hfi_id) {
  1192. case HFI_PROP_WORST_COMPRESSION_RATIO:
  1193. case HFI_PROP_WORST_COMPLEXITY_FACTOR:
  1194. case HFI_PROP_PICTURE_TYPE:
  1195. is_allowed = true;
  1196. break;
  1197. case HFI_PROP_DPB_LIST:
  1198. if (!is_ubwc_colorformat(inst->capabilities->cap[PIX_FMTS].value)) {
  1199. i_vpr_h(inst,
  1200. "%s: cap: %24s not allowed for split mode\n",
  1201. __func__, cap_name(DPB_LIST));
  1202. is_allowed = false;
  1203. }
  1204. break;
  1205. default:
  1206. is_allowed = true;
  1207. break;
  1208. }
  1209. return is_allowed;
  1210. }
  1211. int msm_vidc_update_property_cap(struct msm_vidc_inst *inst, u32 hfi_id,
  1212. bool allow)
  1213. {
  1214. int rc = 0;
  1215. if (!inst || !inst->capabilities) {
  1216. d_vpr_e("%s: invalid params\n", __func__);
  1217. return -EINVAL;
  1218. }
  1219. switch (hfi_id) {
  1220. case HFI_PROP_WORST_COMPRESSION_RATIO:
  1221. case HFI_PROP_WORST_COMPLEXITY_FACTOR:
  1222. case HFI_PROP_PICTURE_TYPE:
  1223. break;
  1224. case HFI_PROP_DPB_LIST:
  1225. if (!allow)
  1226. memset(inst->dpb_list_payload, 0, MAX_DPB_LIST_ARRAY_SIZE);
  1227. msm_vidc_update_cap_value(inst, DPB_LIST, allow, __func__);
  1228. break;
  1229. default:
  1230. break;
  1231. }
  1232. return rc;
  1233. }
  1234. bool msm_vidc_allow_reqbufs(struct msm_vidc_inst *inst, u32 type)
  1235. {
  1236. bool allow = false;
  1237. if (!inst) {
  1238. d_vpr_e("%s: invalid params\n", __func__);
  1239. return false;
  1240. }
  1241. if (inst->state == MSM_VIDC_OPEN) {
  1242. allow = true;
  1243. goto exit;
  1244. }
  1245. if (type == OUTPUT_MPLANE || type == OUTPUT_META_PLANE) {
  1246. if (inst->state == MSM_VIDC_START_INPUT ||
  1247. inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  1248. allow = true;
  1249. goto exit;
  1250. }
  1251. }
  1252. if (type == INPUT_MPLANE || type == INPUT_META_PLANE) {
  1253. if (inst->state == MSM_VIDC_START_OUTPUT) {
  1254. allow = true;
  1255. goto exit;
  1256. }
  1257. }
  1258. exit:
  1259. if (!allow)
  1260. i_vpr_e(inst, "%s: type %d not allowed in state %s\n",
  1261. __func__, type, state_name(inst->state));
  1262. return allow;
  1263. }
  1264. enum msm_vidc_allow msm_vidc_allow_stop(struct msm_vidc_inst *inst)
  1265. {
  1266. enum msm_vidc_allow allow = MSM_VIDC_DISALLOW;
  1267. if (!inst) {
  1268. d_vpr_e("%s: invalid params\n", __func__);
  1269. return allow;
  1270. }
  1271. if (inst->state == MSM_VIDC_START ||
  1272. inst->state == MSM_VIDC_DRC ||
  1273. inst->state == MSM_VIDC_DRC_LAST_FLAG ||
  1274. inst->state == MSM_VIDC_DRC_DRAIN) {
  1275. allow = MSM_VIDC_ALLOW;
  1276. } else if (inst->state == MSM_VIDC_START_INPUT) {
  1277. allow = MSM_VIDC_IGNORE;
  1278. i_vpr_e(inst, "%s: stop ignored in state %s\n",
  1279. __func__, state_name(inst->state));
  1280. } else {
  1281. i_vpr_e(inst, "%s: stop not allowed in state %s\n",
  1282. __func__, state_name(inst->state));
  1283. }
  1284. return allow;
  1285. }
  1286. bool msm_vidc_allow_start(struct msm_vidc_inst *inst)
  1287. {
  1288. if (!inst) {
  1289. d_vpr_e("%s: invalid params\n", __func__);
  1290. return false;
  1291. }
  1292. if (inst->state == MSM_VIDC_DRAIN_LAST_FLAG ||
  1293. inst->state == MSM_VIDC_DRC_LAST_FLAG ||
  1294. inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG)
  1295. return true;
  1296. i_vpr_e(inst, "%s: not allowed in state %s\n",
  1297. __func__, state_name(inst->state));
  1298. return false;
  1299. }
  1300. bool msm_vidc_allow_streamon(struct msm_vidc_inst *inst, u32 type)
  1301. {
  1302. if (!inst) {
  1303. d_vpr_e("%s: invalid params\n", __func__);
  1304. return false;
  1305. }
  1306. if (type == INPUT_MPLANE || type == INPUT_META_PLANE) {
  1307. if (inst->state == MSM_VIDC_OPEN ||
  1308. inst->state == MSM_VIDC_START_OUTPUT)
  1309. return true;
  1310. } else if (type == OUTPUT_MPLANE || type == OUTPUT_META_PLANE) {
  1311. if (inst->state == MSM_VIDC_OPEN ||
  1312. inst->state == MSM_VIDC_START_INPUT ||
  1313. inst->state == MSM_VIDC_DRAIN_START_INPUT)
  1314. return true;
  1315. }
  1316. i_vpr_e(inst, "%s: type %d not allowed in state %s\n",
  1317. __func__, type, state_name(inst->state));
  1318. return false;
  1319. }
  1320. bool msm_vidc_allow_streamoff(struct msm_vidc_inst *inst, u32 type)
  1321. {
  1322. bool allow = true;
  1323. if (!inst) {
  1324. d_vpr_e("%s: invalid params\n", __func__);
  1325. return false;
  1326. }
  1327. if (type == INPUT_MPLANE) {
  1328. if (inst->state == MSM_VIDC_OPEN ||
  1329. inst->state == MSM_VIDC_START_OUTPUT)
  1330. allow = false;
  1331. } else if (type == INPUT_META_PLANE) {
  1332. if (inst->state == MSM_VIDC_START_INPUT)
  1333. allow = false;
  1334. } else if (type == OUTPUT_MPLANE) {
  1335. if (inst->state == MSM_VIDC_OPEN ||
  1336. inst->state == MSM_VIDC_START_INPUT)
  1337. allow = false;
  1338. } else if (type == OUTPUT_META_PLANE) {
  1339. if (inst->state == MSM_VIDC_START_OUTPUT)
  1340. allow = false;
  1341. }
  1342. if (!allow)
  1343. i_vpr_e(inst, "%s: type %d not allowed in state %s\n",
  1344. __func__, type, state_name(inst->state));
  1345. return allow;
  1346. }
  1347. enum msm_vidc_allow msm_vidc_allow_qbuf(struct msm_vidc_inst *inst, u32 type)
  1348. {
  1349. int port = 0;
  1350. if (!inst) {
  1351. d_vpr_e("%s: invalid params\n", __func__);
  1352. return MSM_VIDC_DISALLOW;
  1353. }
  1354. port = v4l2_type_to_driver_port(inst, type, __func__);
  1355. if (port < 0)
  1356. return MSM_VIDC_DISALLOW;
  1357. /* defer queuing if streamon not completed */
  1358. if (!inst->vb2q[port].streaming)
  1359. return MSM_VIDC_DEFER;
  1360. if (type == INPUT_META_PLANE || type == OUTPUT_META_PLANE)
  1361. return MSM_VIDC_DEFER;
  1362. if (type == INPUT_MPLANE) {
  1363. if (inst->state == MSM_VIDC_OPEN ||
  1364. inst->state == MSM_VIDC_START_OUTPUT)
  1365. return MSM_VIDC_DEFER;
  1366. else
  1367. return MSM_VIDC_ALLOW;
  1368. } else if (type == OUTPUT_MPLANE) {
  1369. if (inst->state == MSM_VIDC_OPEN ||
  1370. inst->state == MSM_VIDC_START_INPUT ||
  1371. inst->state == MSM_VIDC_DRAIN_START_INPUT)
  1372. return MSM_VIDC_DEFER;
  1373. else
  1374. return MSM_VIDC_ALLOW;
  1375. } else {
  1376. i_vpr_e(inst, "%s: unknown buffer type %d\n", __func__, type);
  1377. return MSM_VIDC_DISALLOW;
  1378. }
  1379. return MSM_VIDC_DISALLOW;
  1380. }
  1381. enum msm_vidc_allow msm_vidc_allow_input_psc(struct msm_vidc_inst *inst)
  1382. {
  1383. enum msm_vidc_allow allow = MSM_VIDC_DISALLOW;
  1384. if (!inst) {
  1385. d_vpr_e("%s: invalid params\n", __func__);
  1386. return MSM_VIDC_DISALLOW;
  1387. }
  1388. if (inst->state == MSM_VIDC_START ||
  1389. inst->state == MSM_VIDC_START_INPUT ||
  1390. inst->state == MSM_VIDC_DRAIN) {
  1391. allow = MSM_VIDC_ALLOW;
  1392. } else if (inst->state == MSM_VIDC_DRC ||
  1393. inst->state == MSM_VIDC_DRC_LAST_FLAG ||
  1394. inst->state == MSM_VIDC_DRC_DRAIN ||
  1395. inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG ||
  1396. inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  1397. i_vpr_h(inst, "%s: defer input psc, inst state %s\n",
  1398. __func__, state_name(inst->state));
  1399. allow = MSM_VIDC_DEFER;
  1400. } else if (inst->state == MSM_VIDC_OPEN ||
  1401. inst->state == MSM_VIDC_START_OUTPUT) {
  1402. i_vpr_h(inst, "%s: discard input psc, inst state %s\n",
  1403. __func__, state_name(inst->state));
  1404. allow = MSM_VIDC_DISCARD;
  1405. } else {
  1406. i_vpr_e(inst, "%s: input psc in wrong state %s\n",
  1407. __func__, state_name(inst->state));
  1408. allow = MSM_VIDC_DISALLOW;
  1409. }
  1410. return allow;
  1411. }
  1412. bool msm_vidc_allow_last_flag(struct msm_vidc_inst *inst)
  1413. {
  1414. if (!inst) {
  1415. d_vpr_e("%s: invalid params\n", __func__);
  1416. return false;
  1417. }
  1418. if (inst->state == MSM_VIDC_DRC ||
  1419. inst->state == MSM_VIDC_DRAIN ||
  1420. inst->state == MSM_VIDC_DRC_DRAIN)
  1421. return true;
  1422. i_vpr_e(inst, "%s: not allowed in state %s\n",
  1423. __func__, state_name(inst->state));
  1424. return false;
  1425. }
  1426. static int msm_vidc_process_pending_ipsc(struct msm_vidc_inst *inst,
  1427. enum msm_vidc_inst_state *new_state)
  1428. {
  1429. struct response_work *resp_work, *dummy = NULL;
  1430. int rc = 0;
  1431. if (!inst || !new_state) {
  1432. d_vpr_e("%s: invalid params\n", __func__);
  1433. return -EINVAL;
  1434. }
  1435. if (list_empty(&inst->response_works))
  1436. return 0;
  1437. i_vpr_h(inst, "%s: state %s, ipsc pending\n", __func__, state_name(inst->state));
  1438. list_for_each_entry_safe(resp_work, dummy, &inst->response_works, list) {
  1439. if (resp_work->type == RESP_WORK_INPUT_PSC) {
  1440. rc = handle_session_response_work(inst, resp_work);
  1441. if (rc) {
  1442. i_vpr_e(inst, "%s: handle ipsc failed\n", __func__);
  1443. *new_state = MSM_VIDC_ERROR;
  1444. } else {
  1445. if (inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG ||
  1446. inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  1447. *new_state = MSM_VIDC_DRC_DRAIN;
  1448. } else if (inst->state == MSM_VIDC_DRC_LAST_FLAG) {
  1449. *new_state = MSM_VIDC_DRC;
  1450. }
  1451. }
  1452. list_del(&resp_work->list);
  1453. kfree(resp_work->data);
  1454. kfree(resp_work);
  1455. /* list contains max only one ipsc at anytime */
  1456. break;
  1457. }
  1458. }
  1459. return rc;
  1460. }
  1461. int msm_vidc_state_change_streamon(struct msm_vidc_inst *inst, u32 type)
  1462. {
  1463. int rc = 0;
  1464. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  1465. if (!inst || !inst->core) {
  1466. d_vpr_e("%s: invalid params\n", __func__);
  1467. return -EINVAL;
  1468. }
  1469. if (type == INPUT_META_PLANE || type == OUTPUT_META_PLANE)
  1470. return 0;
  1471. if (type == INPUT_MPLANE) {
  1472. if (inst->state == MSM_VIDC_OPEN)
  1473. new_state = MSM_VIDC_START_INPUT;
  1474. else if (inst->state == MSM_VIDC_START_OUTPUT)
  1475. new_state = MSM_VIDC_START;
  1476. } else if (type == OUTPUT_MPLANE) {
  1477. if (inst->state == MSM_VIDC_OPEN) {
  1478. new_state = MSM_VIDC_START_OUTPUT;
  1479. } else if (inst->state == MSM_VIDC_START_INPUT) {
  1480. new_state = MSM_VIDC_START;
  1481. } else if (inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  1482. i_vpr_h(inst, "%s: streamon(output) in %s state\n",
  1483. __func__, state_name(inst->state));
  1484. new_state = MSM_VIDC_DRAIN;
  1485. rc = msm_vidc_process_pending_ipsc(inst, &new_state);
  1486. if (rc) {
  1487. i_vpr_e(inst, "%s: process pending ipsc failed\n", __func__);
  1488. goto state_change;
  1489. }
  1490. }
  1491. }
  1492. state_change:
  1493. msm_vidc_change_inst_state(inst, new_state, __func__);
  1494. return rc;
  1495. }
  1496. int msm_vidc_state_change_streamoff(struct msm_vidc_inst *inst, u32 type)
  1497. {
  1498. int rc = 0;
  1499. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  1500. struct response_work *resp_work, *dummy;
  1501. if (!inst || !inst->core) {
  1502. d_vpr_e("%s: invalid params\n", __func__);
  1503. return -EINVAL;
  1504. }
  1505. if (type == INPUT_META_PLANE || type == OUTPUT_META_PLANE)
  1506. return 0;
  1507. if (type == INPUT_MPLANE) {
  1508. if (inst->state == MSM_VIDC_START_INPUT) {
  1509. new_state = MSM_VIDC_OPEN;
  1510. } else if (inst->state == MSM_VIDC_START) {
  1511. new_state = MSM_VIDC_START_OUTPUT;
  1512. } else if (inst->state == MSM_VIDC_DRC ||
  1513. inst->state == MSM_VIDC_DRC_LAST_FLAG ||
  1514. inst->state == MSM_VIDC_DRAIN ||
  1515. inst->state == MSM_VIDC_DRAIN_LAST_FLAG ||
  1516. inst->state == MSM_VIDC_DRC_DRAIN ||
  1517. inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG ||
  1518. inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  1519. new_state = MSM_VIDC_START_OUTPUT;
  1520. /* discard pending port settings change if any */
  1521. list_for_each_entry_safe(resp_work, dummy,
  1522. &inst->response_works, list) {
  1523. if (resp_work->type == RESP_WORK_INPUT_PSC) {
  1524. i_vpr_h(inst,
  1525. "%s: discard pending input psc\n", __func__);
  1526. list_del(&resp_work->list);
  1527. kfree(resp_work->data);
  1528. kfree(resp_work);
  1529. }
  1530. }
  1531. }
  1532. } else if (type == OUTPUT_MPLANE) {
  1533. if (inst->state == MSM_VIDC_START_OUTPUT) {
  1534. new_state = MSM_VIDC_OPEN;
  1535. } else if (inst->state == MSM_VIDC_START ||
  1536. inst->state == MSM_VIDC_DRAIN ||
  1537. inst->state == MSM_VIDC_DRAIN_LAST_FLAG ||
  1538. inst->state == MSM_VIDC_DRC ||
  1539. inst->state == MSM_VIDC_DRC_LAST_FLAG ||
  1540. inst->state == MSM_VIDC_DRC_DRAIN) {
  1541. new_state = MSM_VIDC_START_INPUT;
  1542. } else if (inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG) {
  1543. new_state = MSM_VIDC_DRAIN_START_INPUT;
  1544. }
  1545. }
  1546. rc = msm_vidc_change_inst_state(inst, new_state, __func__);
  1547. if (rc)
  1548. goto exit;
  1549. exit:
  1550. return rc;
  1551. }
  1552. int msm_vidc_state_change_stop(struct msm_vidc_inst *inst)
  1553. {
  1554. int rc = 0;
  1555. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  1556. if (!inst || !inst->core) {
  1557. d_vpr_e("%s: invalid params\n", __func__);
  1558. return -EINVAL;
  1559. }
  1560. if (inst->state == MSM_VIDC_START) {
  1561. new_state = MSM_VIDC_DRAIN;
  1562. } else if (inst->state == MSM_VIDC_DRC) {
  1563. new_state = MSM_VIDC_DRC_DRAIN;
  1564. } else if (inst->state == MSM_VIDC_DRC_DRAIN ||
  1565. inst->state == MSM_VIDC_DRC_LAST_FLAG) {
  1566. new_state = MSM_VIDC_DRC_DRAIN_LAST_FLAG;
  1567. } else {
  1568. i_vpr_e(inst, "%s: wrong state %s\n",
  1569. __func__, state_name(inst->state));
  1570. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  1571. return -EINVAL;
  1572. }
  1573. rc = msm_vidc_change_inst_state(inst, new_state, __func__);
  1574. if (rc)
  1575. return rc;
  1576. return rc;
  1577. }
  1578. int msm_vidc_state_change_start(struct msm_vidc_inst *inst)
  1579. {
  1580. int rc = 0;
  1581. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  1582. if (!inst || !inst->core) {
  1583. d_vpr_e("%s: invalid params\n", __func__);
  1584. return -EINVAL;
  1585. }
  1586. if (inst->state == MSM_VIDC_DRAIN_LAST_FLAG ||
  1587. inst->state == MSM_VIDC_DRC_LAST_FLAG) {
  1588. new_state = MSM_VIDC_START;
  1589. rc = msm_vidc_process_pending_ipsc(inst, &new_state);
  1590. if (rc) {
  1591. i_vpr_e(inst, "%s: process pending ipsc failed\n", __func__);
  1592. goto state_change;
  1593. }
  1594. } else if (inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG) {
  1595. new_state = MSM_VIDC_DRAIN;
  1596. rc = msm_vidc_process_pending_ipsc(inst, &new_state);
  1597. if (rc) {
  1598. i_vpr_e(inst, "%s: process pending ipsc failed\n", __func__);
  1599. goto state_change;
  1600. }
  1601. } else {
  1602. i_vpr_e(inst, "%s: wrong state %s\n", __func__, state_name(inst->state));
  1603. new_state = MSM_VIDC_ERROR;
  1604. rc = -EINVAL;
  1605. goto state_change;
  1606. }
  1607. state_change:
  1608. msm_vidc_change_inst_state(inst, new_state, __func__);
  1609. return rc;
  1610. }
  1611. int msm_vidc_state_change_input_psc(struct msm_vidc_inst *inst)
  1612. {
  1613. int rc = 0;
  1614. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  1615. if (!inst || !inst->core) {
  1616. d_vpr_e("%s: invalid params\n", __func__);
  1617. return -EINVAL;
  1618. }
  1619. /* don't change state as output port is not started yet */
  1620. if (inst->state == MSM_VIDC_START_INPUT)
  1621. return 0;
  1622. if (inst->state == MSM_VIDC_START) {
  1623. new_state = MSM_VIDC_DRC;
  1624. } else if (inst->state == MSM_VIDC_DRAIN) {
  1625. new_state = MSM_VIDC_DRC_DRAIN;
  1626. } else {
  1627. i_vpr_e(inst, "%s: wrong state %s\n",
  1628. __func__, state_name(inst->state));
  1629. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  1630. return -EINVAL;
  1631. }
  1632. rc = msm_vidc_change_inst_state(inst, new_state, __func__);
  1633. if (rc)
  1634. return rc;
  1635. return rc;
  1636. }
  1637. int msm_vidc_state_change_last_flag(struct msm_vidc_inst *inst)
  1638. {
  1639. int rc = 0;
  1640. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  1641. if (!inst || !inst->core) {
  1642. d_vpr_e("%s: invalid params\n", __func__);
  1643. return -EINVAL;
  1644. }
  1645. if (inst->state == MSM_VIDC_DRC) {
  1646. new_state = MSM_VIDC_DRC_LAST_FLAG;
  1647. } else if (inst->state == MSM_VIDC_DRAIN) {
  1648. new_state = MSM_VIDC_DRAIN_LAST_FLAG;
  1649. } else if (inst->state == MSM_VIDC_DRC_DRAIN) {
  1650. new_state = MSM_VIDC_DRC_DRAIN_LAST_FLAG;
  1651. } else {
  1652. i_vpr_e(inst, "%s: wrong state %s\n",
  1653. __func__, state_name(inst->state));
  1654. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  1655. return -EINVAL;
  1656. }
  1657. rc = msm_vidc_change_inst_state(inst, new_state, __func__);
  1658. if (rc)
  1659. return rc;
  1660. return rc;
  1661. }
  1662. int msm_vidc_get_control(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
  1663. {
  1664. int rc = 0;
  1665. if (!inst || !ctrl) {
  1666. d_vpr_e("%s: invalid params\n", __func__);
  1667. return -EINVAL;
  1668. }
  1669. switch (ctrl->id) {
  1670. case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
  1671. ctrl->val = inst->buffers.output.min_count +
  1672. inst->buffers.output.extra_count;
  1673. i_vpr_h(inst, "g_min: output buffers %d\n", ctrl->val);
  1674. break;
  1675. case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
  1676. ctrl->val = inst->buffers.input.min_count +
  1677. inst->buffers.input.extra_count;
  1678. i_vpr_h(inst, "g_min: input buffers %d\n", ctrl->val);
  1679. break;
  1680. default:
  1681. break;
  1682. }
  1683. return rc;
  1684. }
  1685. int msm_vidc_get_mbs_per_frame(struct msm_vidc_inst *inst)
  1686. {
  1687. int height = 0, width = 0;
  1688. struct v4l2_format *inp_f;
  1689. if (is_decode_session(inst)) {
  1690. inp_f = &inst->fmts[INPUT_PORT];
  1691. width = inp_f->fmt.pix_mp.width;
  1692. height = inp_f->fmt.pix_mp.height;
  1693. } else if (is_encode_session(inst)) {
  1694. width = inst->crop.width;
  1695. height = inst->crop.height;
  1696. }
  1697. return NUM_MBS_PER_FRAME(height, width);
  1698. }
  1699. int msm_vidc_get_fps(struct msm_vidc_inst *inst)
  1700. {
  1701. int fps;
  1702. u32 frame_rate, operating_rate;
  1703. if (!inst || !inst->capabilities) {
  1704. d_vpr_e("%s: invalid params\n", __func__);
  1705. return -EINVAL;
  1706. }
  1707. frame_rate = inst->capabilities->cap[FRAME_RATE].value;
  1708. operating_rate = inst->capabilities->cap[OPERATING_RATE].value;
  1709. if (operating_rate > frame_rate)
  1710. fps = (operating_rate >> 16) ?
  1711. (operating_rate >> 16) : 1;
  1712. else
  1713. fps = frame_rate >> 16;
  1714. return fps;
  1715. }
  1716. int msm_vidc_num_buffers(struct msm_vidc_inst *inst,
  1717. enum msm_vidc_buffer_type type, enum msm_vidc_buffer_attributes attr)
  1718. {
  1719. int count = 0;
  1720. struct msm_vidc_buffer *vbuf;
  1721. struct msm_vidc_buffers *buffers;
  1722. if (!inst) {
  1723. d_vpr_e("%s: invalid params\n", __func__);
  1724. return count;
  1725. }
  1726. if (type == MSM_VIDC_BUF_OUTPUT) {
  1727. buffers = &inst->buffers.output;
  1728. } else if (type == MSM_VIDC_BUF_INPUT) {
  1729. buffers = &inst->buffers.input;
  1730. } else {
  1731. i_vpr_e(inst, "%s: invalid buffer type %#x\n",
  1732. __func__, type);
  1733. return count;
  1734. }
  1735. list_for_each_entry(vbuf, &buffers->list, list) {
  1736. if (vbuf->type != type)
  1737. continue;
  1738. if (!(vbuf->attr & attr))
  1739. continue;
  1740. count++;
  1741. }
  1742. return count;
  1743. }
  1744. static int vb2_buffer_to_driver(struct vb2_buffer *vb2,
  1745. struct msm_vidc_buffer *buf)
  1746. {
  1747. int rc = 0;
  1748. if (!vb2 || !buf) {
  1749. d_vpr_e("%s: invalid params\n", __func__);
  1750. return -EINVAL;
  1751. }
  1752. buf->type = v4l2_type_to_driver(vb2->type, __func__);
  1753. if (!buf->type)
  1754. return -EINVAL;
  1755. buf->index = vb2->index;
  1756. buf->fd = vb2->planes[0].m.fd;
  1757. buf->data_offset = vb2->planes[0].data_offset;
  1758. buf->data_size = vb2->planes[0].bytesused - vb2->planes[0].data_offset;
  1759. buf->buffer_size = vb2->planes[0].length;
  1760. buf->timestamp = vb2->timestamp;
  1761. return rc;
  1762. }
  1763. int msm_vidc_process_readonly_buffers(struct msm_vidc_inst *inst,
  1764. struct msm_vidc_buffer *buf)
  1765. {
  1766. int rc = 0;
  1767. struct msm_vidc_buffer *ro_buf, *dummy;
  1768. struct msm_vidc_buffers *ro_buffers;
  1769. if (!inst || !buf) {
  1770. d_vpr_e("%s: invalid params\n", __func__);
  1771. return -EINVAL;
  1772. }
  1773. if (!is_decode_session(inst) || !is_output_buffer(buf->type))
  1774. return 0;
  1775. ro_buffers = msm_vidc_get_buffers(inst, MSM_VIDC_BUF_READ_ONLY, __func__);
  1776. if (!ro_buffers)
  1777. return -EINVAL;
  1778. /*
  1779. * check if buffer present in ro_buffers list
  1780. * if present: add ro flag to buf and remove from ro_buffers list
  1781. * if not present: do nothing
  1782. */
  1783. list_for_each_entry_safe(ro_buf, dummy, &ro_buffers->list, list) {
  1784. if (ro_buf->device_addr == buf->device_addr) {
  1785. buf->attr |= MSM_VIDC_ATTR_READ_ONLY;
  1786. print_vidc_buffer(VIDC_LOW, "low ", "ro buf removed", inst, ro_buf);
  1787. list_del(&ro_buf->list);
  1788. msm_memory_free(inst, MSM_MEM_POOL_BUFFER, ro_buf);
  1789. break;
  1790. }
  1791. }
  1792. return rc;
  1793. }
  1794. int msm_vidc_memory_unmap_completely(struct msm_vidc_inst *inst,
  1795. struct msm_vidc_map *map)
  1796. {
  1797. int rc = 0;
  1798. if (!inst || !map) {
  1799. d_vpr_e("%s: invalid params\n", __func__);
  1800. return -EINVAL;
  1801. }
  1802. if (!map->refcount)
  1803. return 0;
  1804. while (map->refcount) {
  1805. rc = msm_vidc_memory_unmap(inst->core, map);
  1806. if (rc)
  1807. break;
  1808. if (!map->refcount) {
  1809. msm_vidc_memory_put_dmabuf(map->dmabuf);
  1810. list_del(&map->list);
  1811. msm_memory_free(inst, MSM_MEM_POOL_MAP, map);
  1812. break;
  1813. }
  1814. }
  1815. return rc;
  1816. }
  1817. int msm_vidc_set_auto_framerate(struct msm_vidc_inst *inst, u64 timestamp)
  1818. {
  1819. struct msm_vidc_core *core;
  1820. struct msm_vidc_timestamp *ts;
  1821. struct msm_vidc_timestamp *prev = NULL;
  1822. u32 counter = 0, prev_fr = 0, curr_fr = 0;
  1823. u64 ts_ms = 0;
  1824. int rc = 0;
  1825. if (!inst || !inst->core || !inst->capabilities) {
  1826. d_vpr_e("%s: invalid params\n", __func__);
  1827. return -EINVAL;
  1828. }
  1829. core = inst->core;
  1830. if (!core->capabilities[ENC_AUTO_FRAMERATE].value ||
  1831. is_image_session(inst) || msm_vidc_is_super_buffer(inst))
  1832. goto exit;
  1833. rc = msm_vidc_update_timestamp(inst, timestamp);
  1834. if (rc)
  1835. goto exit;
  1836. list_for_each_entry(ts, &inst->timestamps.list, sort.list) {
  1837. if (prev) {
  1838. ts_ms = div_u64(ts->sort.val - prev->sort.val, 1000);
  1839. prev_fr = curr_fr;
  1840. curr_fr = ts_ms ? div_u64(MSEC_PER_SEC, ts_ms) << 16 :
  1841. inst->auto_framerate;
  1842. if (curr_fr > inst->capabilities->cap[FRAME_RATE].max)
  1843. curr_fr = inst->capabilities->cap[FRAME_RATE].max;
  1844. }
  1845. prev = ts;
  1846. counter++;
  1847. }
  1848. if (counter < ENC_FPS_WINDOW)
  1849. goto exit;
  1850. /* if framerate changed and stable for 2 frames, set to firmware */
  1851. if (curr_fr == prev_fr && curr_fr != inst->auto_framerate) {
  1852. i_vpr_l(inst, "%s: updated fps to %u\n", __func__, curr_fr >> 16);
  1853. rc = venus_hfi_session_property(inst,
  1854. HFI_PROP_FRAME_RATE,
  1855. HFI_HOST_FLAGS_NONE,
  1856. HFI_PORT_BITSTREAM,
  1857. HFI_PAYLOAD_Q16,
  1858. &curr_fr,
  1859. sizeof(u32));
  1860. if (rc) {
  1861. i_vpr_e(inst, "%s: set auto frame rate failed\n",
  1862. __func__);
  1863. goto exit;
  1864. }
  1865. inst->auto_framerate = curr_fr;
  1866. }
  1867. exit:
  1868. return rc;
  1869. }
  1870. int msm_vidc_calc_window_avg_framerate(struct msm_vidc_inst *inst)
  1871. {
  1872. struct msm_vidc_timestamp *ts;
  1873. struct msm_vidc_timestamp *prev = NULL;
  1874. u32 counter = 0;
  1875. u64 ts_ms = 0;
  1876. if (!inst) {
  1877. d_vpr_e("%s: invalid params\n", __func__);
  1878. return -EINVAL;
  1879. }
  1880. list_for_each_entry(ts, &inst->timestamps.list, sort.list) {
  1881. if (prev) {
  1882. if (ts->sort.val == prev->sort.val)
  1883. continue;
  1884. ts_ms += div_u64(ts->sort.val - prev->sort.val, 1000000);
  1885. counter++;
  1886. }
  1887. prev = ts;
  1888. }
  1889. return ts_ms ? (1000 * counter) / ts_ms : 0;
  1890. }
  1891. static int msm_vidc_insert_sort(struct list_head *head,
  1892. struct msm_vidc_sort *entry)
  1893. {
  1894. struct msm_vidc_sort *first, *node;
  1895. struct msm_vidc_sort *prev = NULL;
  1896. bool is_inserted = false;
  1897. if (!head || !entry) {
  1898. d_vpr_e("%s: invalid params\n", __func__);
  1899. return -EINVAL;
  1900. }
  1901. if (list_empty(head)) {
  1902. list_add(&entry->list, head);
  1903. return 0;
  1904. }
  1905. first = list_first_entry(head, struct msm_vidc_sort, list);
  1906. if (entry->val < first->val) {
  1907. list_add(&entry->list, head);
  1908. return 0;
  1909. }
  1910. list_for_each_entry(node, head, list) {
  1911. if (prev &&
  1912. entry->val >= prev->val && entry->val <= node->val) {
  1913. list_add(&entry->list, &prev->list);
  1914. is_inserted = true;
  1915. break;
  1916. }
  1917. prev = node;
  1918. }
  1919. if (!is_inserted)
  1920. list_add(&entry->list, &prev->list);
  1921. return 0;
  1922. }
  1923. static struct msm_vidc_timestamp *msm_vidc_get_least_rank_ts(struct msm_vidc_inst *inst)
  1924. {
  1925. struct msm_vidc_timestamp *ts, *final = NULL;
  1926. u64 least_rank = INT_MAX;
  1927. if (!inst) {
  1928. d_vpr_e("%s: Invalid params\n", __func__);
  1929. return NULL;
  1930. }
  1931. list_for_each_entry(ts, &inst->timestamps.list, sort.list) {
  1932. if (ts->rank < least_rank) {
  1933. least_rank = ts->rank;
  1934. final = ts;
  1935. }
  1936. }
  1937. return final;
  1938. }
  1939. int msm_vidc_flush_ts(struct msm_vidc_inst *inst)
  1940. {
  1941. struct msm_vidc_timestamp *temp, *ts = NULL;
  1942. if (!inst) {
  1943. d_vpr_e("%s: Invalid params\n", __func__);
  1944. return -EINVAL;
  1945. }
  1946. list_for_each_entry_safe(ts, temp, &inst->timestamps.list, sort.list) {
  1947. i_vpr_l(inst, "%s: flushing ts: val %lld, rank %%lld\n",
  1948. __func__, ts->sort.val, ts->rank);
  1949. list_del(&ts->sort.list);
  1950. msm_memory_free(inst, MSM_MEM_POOL_TIMESTAMP, ts);
  1951. }
  1952. inst->timestamps.count = 0;
  1953. inst->timestamps.rank = 0;
  1954. return 0;
  1955. }
  1956. int msm_vidc_update_timestamp(struct msm_vidc_inst *inst, u64 timestamp)
  1957. {
  1958. struct msm_vidc_timestamp *ts;
  1959. int rc = 0;
  1960. u32 window_size = 0;
  1961. if (!inst) {
  1962. d_vpr_e("%s: Invalid params\n", __func__);
  1963. return -EINVAL;
  1964. }
  1965. ts = msm_memory_alloc(inst, MSM_MEM_POOL_TIMESTAMP);
  1966. if (!ts) {
  1967. i_vpr_e(inst, "%s: ts alloc failed\n", __func__);
  1968. return -ENOMEM;
  1969. }
  1970. INIT_LIST_HEAD(&ts->sort.list);
  1971. ts->sort.val = timestamp;
  1972. ts->rank = inst->timestamps.rank++;
  1973. rc = msm_vidc_insert_sort(&inst->timestamps.list, &ts->sort);
  1974. if (rc)
  1975. return rc;
  1976. inst->timestamps.count++;
  1977. if (is_encode_session(inst))
  1978. window_size = ENC_FPS_WINDOW;
  1979. else
  1980. window_size = DEC_FPS_WINDOW;
  1981. /* keep sliding window */
  1982. if (inst->timestamps.count > window_size) {
  1983. ts = msm_vidc_get_least_rank_ts(inst);
  1984. if (!ts) {
  1985. i_vpr_e(inst, "%s: least rank ts is NULL\n", __func__);
  1986. return -EINVAL;
  1987. }
  1988. inst->timestamps.count--;
  1989. list_del(&ts->sort.list);
  1990. msm_memory_free(inst, MSM_MEM_POOL_TIMESTAMP, ts);
  1991. }
  1992. return 0;
  1993. }
  1994. int msm_vidc_get_delayed_unmap(struct msm_vidc_inst *inst, struct msm_vidc_map *map)
  1995. {
  1996. int rc = 0;
  1997. if (!inst || !map) {
  1998. d_vpr_e("%s: invalid params\n", __func__);
  1999. return -EINVAL;
  2000. }
  2001. map->skip_delayed_unmap = 1;
  2002. rc = msm_vidc_memory_map(inst->core, map);
  2003. if (rc)
  2004. return rc;
  2005. return 0;
  2006. }
  2007. int msm_vidc_put_delayed_unmap(struct msm_vidc_inst *inst, struct msm_vidc_map *map)
  2008. {
  2009. int rc = 0;
  2010. if (!inst || !map) {
  2011. d_vpr_e("%s: invalid params\n", __func__);
  2012. return -EINVAL;
  2013. }
  2014. if (!map->skip_delayed_unmap) {
  2015. i_vpr_e(inst, "%s: no delayed unmap, addr %#x\n",
  2016. __func__, map->device_addr);
  2017. return -EINVAL;
  2018. }
  2019. map->skip_delayed_unmap = 0;
  2020. rc = msm_vidc_memory_unmap(inst->core, map);
  2021. if (rc)
  2022. i_vpr_e(inst, "%s: unmap failed\n", __func__);
  2023. if (!map->refcount) {
  2024. msm_vidc_memory_put_dmabuf(map->dmabuf);
  2025. list_del(&map->list);
  2026. msm_memory_free(inst, MSM_MEM_POOL_MAP, map);
  2027. }
  2028. return rc;
  2029. }
  2030. int msm_vidc_unmap_buffers(struct msm_vidc_inst *inst,
  2031. enum msm_vidc_buffer_type type)
  2032. {
  2033. int rc = 0;
  2034. struct msm_vidc_mappings *mappings;
  2035. struct msm_vidc_map *map, *dummy;
  2036. if (!inst) {
  2037. d_vpr_e("%s: invalid params\n", __func__);
  2038. return -EINVAL;
  2039. }
  2040. mappings = msm_vidc_get_mappings(inst, type, __func__);
  2041. if (!mappings)
  2042. return -EINVAL;
  2043. list_for_each_entry_safe(map, dummy, &mappings->list, list) {
  2044. msm_vidc_memory_unmap_completely(inst, map);
  2045. }
  2046. return rc;
  2047. }
  2048. int msm_vidc_unmap_driver_buf(struct msm_vidc_inst *inst,
  2049. struct msm_vidc_buffer *buf)
  2050. {
  2051. int rc = 0;
  2052. struct msm_vidc_mappings *mappings;
  2053. struct msm_vidc_map *map = NULL;
  2054. bool found = false;
  2055. if (!inst || !buf) {
  2056. d_vpr_e("%s: invalid params\n", __func__);
  2057. return -EINVAL;
  2058. }
  2059. mappings = msm_vidc_get_mappings(inst, buf->type, __func__);
  2060. if (!mappings)
  2061. return -EINVAL;
  2062. /* sanity check to see if it was not removed */
  2063. list_for_each_entry(map, &mappings->list, list) {
  2064. if (map->dmabuf == buf->dmabuf) {
  2065. found = true;
  2066. break;
  2067. }
  2068. }
  2069. if (!found) {
  2070. print_vidc_buffer(VIDC_ERR, "err ", "no buf in mappings", inst, buf);
  2071. return -EINVAL;
  2072. }
  2073. rc = msm_vidc_memory_unmap(inst->core, map);
  2074. if (rc) {
  2075. print_vidc_buffer(VIDC_ERR, "err ", "unmap failed", inst, buf);
  2076. return -EINVAL;
  2077. }
  2078. /* finally delete if refcount is zero */
  2079. if (!map->refcount) {
  2080. msm_vidc_memory_put_dmabuf(map->dmabuf);
  2081. list_del(&map->list);
  2082. msm_memory_free(inst, MSM_MEM_POOL_MAP, map);
  2083. }
  2084. return rc;
  2085. }
  2086. int msm_vidc_map_driver_buf(struct msm_vidc_inst *inst,
  2087. struct msm_vidc_buffer *buf)
  2088. {
  2089. int rc = 0;
  2090. struct msm_vidc_mappings *mappings;
  2091. struct msm_vidc_map *map;
  2092. bool found = false;
  2093. if (!inst || !buf) {
  2094. d_vpr_e("%s: invalid params\n", __func__);
  2095. return -EINVAL;
  2096. }
  2097. mappings = msm_vidc_get_mappings(inst, buf->type, __func__);
  2098. if (!mappings)
  2099. return -EINVAL;
  2100. /*
  2101. * new buffer: map twice for delayed unmap feature sake
  2102. * existing buffer: map once
  2103. */
  2104. list_for_each_entry(map, &mappings->list, list) {
  2105. if (map->dmabuf == buf->dmabuf) {
  2106. found = true;
  2107. break;
  2108. }
  2109. }
  2110. if (!found) {
  2111. /* new buffer case */
  2112. map = msm_memory_alloc(inst, MSM_MEM_POOL_MAP);
  2113. if (!map) {
  2114. i_vpr_e(inst, "%s: alloc failed\n", __func__);
  2115. return -ENOMEM;
  2116. }
  2117. INIT_LIST_HEAD(&map->list);
  2118. map->type = buf->type;
  2119. map->dmabuf = msm_vidc_memory_get_dmabuf(buf->fd);
  2120. if (!map->dmabuf)
  2121. return -EINVAL;
  2122. map->region = msm_vidc_get_buffer_region(inst, buf->type, __func__);
  2123. /* delayed unmap feature needed for decoder output buffers */
  2124. if (is_decode_session(inst) && is_output_buffer(buf->type)) {
  2125. rc = msm_vidc_get_delayed_unmap(inst, map);
  2126. if (rc) {
  2127. msm_vidc_memory_put_dmabuf(map->dmabuf);
  2128. msm_memory_free(inst, MSM_MEM_POOL_MAP, map);
  2129. return rc;
  2130. }
  2131. }
  2132. list_add_tail(&map->list, &mappings->list);
  2133. }
  2134. rc = msm_vidc_memory_map(inst->core, map);
  2135. if (rc)
  2136. return rc;
  2137. buf->device_addr = map->device_addr;
  2138. return 0;
  2139. }
  2140. int msm_vidc_put_driver_buf(struct msm_vidc_inst *inst,
  2141. struct msm_vidc_buffer *buf)
  2142. {
  2143. int rc = 0;
  2144. if (!inst || !buf) {
  2145. d_vpr_e("%s: invalid params\n", __func__);
  2146. return -EINVAL;
  2147. }
  2148. msm_vidc_unmap_driver_buf(inst, buf);
  2149. msm_vidc_memory_put_dmabuf(buf->dmabuf);
  2150. /* delete the buffer from buffers->list */
  2151. list_del(&buf->list);
  2152. msm_memory_free(inst, MSM_MEM_POOL_BUFFER, buf);
  2153. return rc;
  2154. }
  2155. struct msm_vidc_buffer *msm_vidc_get_driver_buf(struct msm_vidc_inst *inst,
  2156. struct vb2_buffer *vb2)
  2157. {
  2158. int rc = 0;
  2159. struct msm_vidc_buffer *buf = NULL;
  2160. struct msm_vidc_buffers *buffers;
  2161. enum msm_vidc_buffer_type buf_type;
  2162. if (!inst || !vb2) {
  2163. d_vpr_e("%s: invalid params\n", __func__);
  2164. return NULL;
  2165. }
  2166. buf_type = v4l2_type_to_driver(vb2->type, __func__);
  2167. if (!buf_type)
  2168. return NULL;
  2169. buffers = msm_vidc_get_buffers(inst, buf_type, __func__);
  2170. if (!buffers)
  2171. return NULL;
  2172. buf = msm_memory_alloc(inst, MSM_MEM_POOL_BUFFER);
  2173. if (!buf) {
  2174. i_vpr_e(inst, "%s: alloc failed\n", __func__);
  2175. return NULL;
  2176. }
  2177. INIT_LIST_HEAD(&buf->list);
  2178. list_add_tail(&buf->list, &buffers->list);
  2179. rc = vb2_buffer_to_driver(vb2, buf);
  2180. if (rc)
  2181. goto error;
  2182. buf->dmabuf = msm_vidc_memory_get_dmabuf(buf->fd);
  2183. if (!buf->dmabuf)
  2184. goto error;
  2185. /* treat every buffer as deferred buffer initially */
  2186. buf->attr |= MSM_VIDC_ATTR_DEFERRED;
  2187. rc = msm_vidc_map_driver_buf(inst, buf);
  2188. if (rc)
  2189. goto error;
  2190. return buf;
  2191. error:
  2192. msm_vidc_memory_put_dmabuf(buf->dmabuf);
  2193. list_del(&buf->list);
  2194. msm_memory_free(inst, MSM_MEM_POOL_BUFFER, buf);
  2195. return NULL;
  2196. }
  2197. struct msm_vidc_buffer *get_meta_buffer(struct msm_vidc_inst *inst,
  2198. struct msm_vidc_buffer *buf)
  2199. {
  2200. struct msm_vidc_buffer *mbuf;
  2201. struct msm_vidc_buffers *buffers;
  2202. bool found = false;
  2203. if (!inst || !buf) {
  2204. d_vpr_e("%s: invalid params\n", __func__);
  2205. return NULL;
  2206. }
  2207. if (buf->type == MSM_VIDC_BUF_INPUT) {
  2208. buffers = &inst->buffers.input_meta;
  2209. } else if (buf->type == MSM_VIDC_BUF_OUTPUT) {
  2210. buffers = &inst->buffers.output_meta;
  2211. } else {
  2212. i_vpr_e(inst, "%s: invalid buffer type %d\n",
  2213. __func__, buf->type);
  2214. return NULL;
  2215. }
  2216. list_for_each_entry(mbuf, &buffers->list, list) {
  2217. if (mbuf->index == buf->index) {
  2218. found = true;
  2219. break;
  2220. }
  2221. }
  2222. if (!found)
  2223. return NULL;
  2224. return mbuf;
  2225. }
  2226. bool msm_vidc_is_super_buffer(struct msm_vidc_inst *inst)
  2227. {
  2228. struct msm_vidc_inst_capability *capability = NULL;
  2229. if (!inst || !inst->capabilities) {
  2230. d_vpr_e("%s: Invalid params\n", __func__);
  2231. return false;
  2232. }
  2233. capability = inst->capabilities;
  2234. return !!capability->cap[SUPER_FRAME].value;
  2235. }
  2236. static bool is_single_session(struct msm_vidc_inst *inst)
  2237. {
  2238. struct msm_vidc_core *core;
  2239. u32 count = 0;
  2240. if (!inst) {
  2241. d_vpr_e("%s: Invalid params\n", __func__);
  2242. return false;
  2243. }
  2244. core = inst->core;
  2245. core_lock(core, __func__);
  2246. list_for_each_entry(inst, &core->instances, list)
  2247. count++;
  2248. core_unlock(core, __func__);
  2249. return count == 1;
  2250. }
  2251. void msm_vidc_allow_dcvs(struct msm_vidc_inst *inst)
  2252. {
  2253. bool allow = false;
  2254. struct msm_vidc_core *core;
  2255. if (!inst || !inst->core) {
  2256. d_vpr_e("%s: Invalid args: %pK\n", __func__, inst);
  2257. return;
  2258. }
  2259. core = inst->core;
  2260. allow = !msm_vidc_clock_voting;
  2261. if (!allow) {
  2262. i_vpr_h(inst, "%s: core_clock_voting is set\n", __func__);
  2263. goto exit;
  2264. }
  2265. allow = core->capabilities[DCVS].value;
  2266. if (!allow) {
  2267. i_vpr_h(inst, "%s: core doesn't support dcvs\n", __func__);
  2268. goto exit;
  2269. }
  2270. allow = !inst->decode_batch.enable;
  2271. if (!allow) {
  2272. i_vpr_h(inst, "%s: decode_batching enabled\n", __func__);
  2273. goto exit;
  2274. }
  2275. allow = !msm_vidc_is_super_buffer(inst);
  2276. if (!allow) {
  2277. i_vpr_h(inst, "%s: encode_batching(super_buffer) enabled\n", __func__);
  2278. goto exit;
  2279. }
  2280. allow = !is_thumbnail_session(inst);
  2281. if (!allow) {
  2282. i_vpr_h(inst, "%s: thumbnail session\n", __func__);
  2283. goto exit;
  2284. }
  2285. allow = is_realtime_session(inst);
  2286. if (!allow) {
  2287. i_vpr_h(inst, "%s: non-realtime session\n", __func__);
  2288. goto exit;
  2289. }
  2290. allow = !is_image_session(inst);
  2291. if (!allow) {
  2292. i_vpr_h(inst, "%s: image session\n", __func__);
  2293. goto exit;
  2294. }
  2295. allow = !is_lowlatency_session(inst);
  2296. if (!allow) {
  2297. i_vpr_h(inst, "%s: lowlatency session\n", __func__);
  2298. goto exit;
  2299. }
  2300. exit:
  2301. i_vpr_hp(inst, "%s: dcvs: %s\n", __func__, allow ? "enabled" : "disabled");
  2302. inst->power.dcvs_flags = 0;
  2303. inst->power.dcvs_mode = allow;
  2304. }
  2305. bool msm_vidc_allow_decode_batch(struct msm_vidc_inst *inst)
  2306. {
  2307. struct msm_vidc_inst_capability *capability;
  2308. struct msm_vidc_core *core;
  2309. bool allow = false;
  2310. u32 value = 0;
  2311. if (!inst || !inst->core || !inst->capabilities) {
  2312. d_vpr_e("%s: invalid params\n", __func__);
  2313. return false;
  2314. }
  2315. core = inst->core;
  2316. capability = inst->capabilities;
  2317. allow = inst->decode_batch.enable;
  2318. if (!allow) {
  2319. i_vpr_h(inst, "%s: batching already disabled\n", __func__);
  2320. goto exit;
  2321. }
  2322. allow = core->capabilities[DECODE_BATCH].value;
  2323. if (!allow) {
  2324. i_vpr_h(inst, "%s: core doesn't support batching\n", __func__);
  2325. goto exit;
  2326. }
  2327. allow = is_single_session(inst);
  2328. if (!allow) {
  2329. i_vpr_h(inst, "%s: multiple sessions running\n", __func__);
  2330. goto exit;
  2331. }
  2332. allow = is_decode_session(inst);
  2333. if (!allow) {
  2334. i_vpr_h(inst, "%s: not a decoder session\n", __func__);
  2335. goto exit;
  2336. }
  2337. allow = !is_thumbnail_session(inst);
  2338. if (!allow) {
  2339. i_vpr_h(inst, "%s: thumbnail session\n", __func__);
  2340. goto exit;
  2341. }
  2342. allow = !is_image_session(inst);
  2343. if (!allow) {
  2344. i_vpr_h(inst, "%s: image session\n", __func__);
  2345. goto exit;
  2346. }
  2347. allow = is_realtime_session(inst);
  2348. if (!allow) {
  2349. i_vpr_h(inst, "%s: non-realtime session\n", __func__);
  2350. goto exit;
  2351. }
  2352. allow = !is_lowlatency_session(inst);
  2353. if (!allow) {
  2354. i_vpr_h(inst, "%s: lowlatency session\n", __func__);
  2355. goto exit;
  2356. }
  2357. value = msm_vidc_get_fps(inst);
  2358. allow = value < capability->cap[BATCH_FPS].value;
  2359. if (!allow) {
  2360. i_vpr_h(inst, "%s: unsupported fps %u, max %u\n", __func__,
  2361. value, capability->cap[BATCH_FPS].value);
  2362. goto exit;
  2363. }
  2364. value = msm_vidc_get_mbs_per_frame(inst);
  2365. allow = value < capability->cap[BATCH_MBPF].value;
  2366. if (!allow) {
  2367. i_vpr_h(inst, "%s: unsupported mbpf %u, max %u\n", __func__,
  2368. value, capability->cap[BATCH_MBPF].value);
  2369. goto exit;
  2370. }
  2371. exit:
  2372. i_vpr_hp(inst, "%s: batching: %s\n", __func__, allow ? "enabled" : "disabled");
  2373. return allow;
  2374. }
  2375. static void msm_vidc_update_input_cr(struct msm_vidc_inst *inst, u32 idx, u32 cr)
  2376. {
  2377. struct msm_vidc_input_cr_data *temp, *next;
  2378. bool found = false;
  2379. list_for_each_entry_safe(temp, next, &inst->enc_input_crs, list) {
  2380. if (temp->index == idx) {
  2381. temp->input_cr = cr;
  2382. found = true;
  2383. break;
  2384. }
  2385. }
  2386. if (!found) {
  2387. temp = kzalloc(sizeof(*temp), GFP_KERNEL);
  2388. if (!temp) {
  2389. i_vpr_e(inst, "%s: malloc failure.\n", __func__);
  2390. return;
  2391. }
  2392. temp->index = idx;
  2393. temp->input_cr = cr;
  2394. list_add_tail(&temp->list, &inst->enc_input_crs);
  2395. }
  2396. }
  2397. static void msm_vidc_free_input_cr_list(struct msm_vidc_inst *inst)
  2398. {
  2399. struct msm_vidc_input_cr_data *temp, *next;
  2400. list_for_each_entry_safe(temp, next, &inst->enc_input_crs, list) {
  2401. list_del(&temp->list);
  2402. kfree(temp);
  2403. }
  2404. INIT_LIST_HEAD(&inst->enc_input_crs);
  2405. }
  2406. void msm_vidc_free_capabililty_list(struct msm_vidc_inst *inst,
  2407. enum msm_vidc_ctrl_list_type list_type)
  2408. {
  2409. struct msm_vidc_inst_cap_entry *temp = NULL, *next = NULL;
  2410. if (list_type & CHILD_LIST) {
  2411. list_for_each_entry_safe(temp, next, &inst->children.list, list) {
  2412. list_del(&temp->list);
  2413. kfree(temp);
  2414. }
  2415. INIT_LIST_HEAD(&inst->children.list);
  2416. }
  2417. temp = NULL;
  2418. next = NULL;
  2419. if (list_type & FW_LIST) {
  2420. list_for_each_entry_safe(temp, next, &inst->firmware.list, list) {
  2421. list_del(&temp->list);
  2422. kfree(temp);
  2423. }
  2424. INIT_LIST_HEAD(&inst->firmware.list);
  2425. }
  2426. }
  2427. static int msm_vidc_queue_buffer(struct msm_vidc_inst *inst, struct msm_vidc_buffer *buf)
  2428. {
  2429. struct msm_vidc_buffer *meta;
  2430. int rc = 0;
  2431. u32 cr = 0;
  2432. if (!inst || !buf || !inst->capabilities) {
  2433. d_vpr_e("%s: invalid params\n", __func__);
  2434. return -EINVAL;
  2435. }
  2436. if (is_encode_session(inst) && is_input_buffer(buf->type)) {
  2437. cr = inst->capabilities->cap[ENC_IP_CR].value;
  2438. msm_vidc_update_input_cr(inst, buf->index, cr);
  2439. msm_vidc_update_cap_value(inst, ENC_IP_CR, 0, __func__);
  2440. }
  2441. if (is_decode_session(inst) && is_input_buffer(buf->type) &&
  2442. inst->capabilities->cap[CODEC_CONFIG].value) {
  2443. buf->flags |= MSM_VIDC_BUF_FLAG_CODECCONFIG;
  2444. msm_vidc_update_cap_value(inst, CODEC_CONFIG, 0, __func__);
  2445. }
  2446. if (is_decode_session(inst) && is_output_buffer(buf->type)) {
  2447. rc = msm_vidc_process_readonly_buffers(inst, buf);
  2448. if (rc)
  2449. return rc;
  2450. }
  2451. print_vidc_buffer(VIDC_HIGH, "high", "qbuf", inst, buf);
  2452. meta = get_meta_buffer(inst, buf);
  2453. if (meta)
  2454. print_vidc_buffer(VIDC_HIGH, "high", "qbuf", inst, meta);
  2455. if (!meta && is_meta_enabled(inst, buf->type)) {
  2456. print_vidc_buffer(VIDC_ERR, "err ", "missing meta for", inst, buf);
  2457. return -EINVAL;
  2458. }
  2459. if (msm_vidc_is_super_buffer(inst) && is_input_buffer(buf->type))
  2460. rc = venus_hfi_queue_super_buffer(inst, buf, meta);
  2461. else
  2462. rc = venus_hfi_queue_buffer(inst, buf, meta);
  2463. if (rc)
  2464. return rc;
  2465. buf->attr &= ~MSM_VIDC_ATTR_DEFERRED;
  2466. buf->attr |= MSM_VIDC_ATTR_QUEUED;
  2467. if (meta) {
  2468. meta->attr &= ~MSM_VIDC_ATTR_DEFERRED;
  2469. meta->attr |= MSM_VIDC_ATTR_QUEUED;
  2470. }
  2471. if (is_input_buffer(buf->type))
  2472. inst->power.buffer_counter++;
  2473. if (buf->type == MSM_VIDC_BUF_INPUT)
  2474. msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_ETB);
  2475. else if (buf->type == MSM_VIDC_BUF_OUTPUT)
  2476. msm_vidc_debugfs_update(inst, MSM_VIDC_DEBUGFS_EVENT_FTB);
  2477. return 0;
  2478. }
  2479. int msm_vidc_queue_deferred_buffers(struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buf_type)
  2480. {
  2481. struct msm_vidc_buffers *buffers;
  2482. struct msm_vidc_buffer *buf;
  2483. int rc = 0;
  2484. if (!inst || !buf_type) {
  2485. d_vpr_e("%s: invalid params\n", __func__);
  2486. return -EINVAL;
  2487. }
  2488. buffers = msm_vidc_get_buffers(inst, buf_type, __func__);
  2489. if (!buffers)
  2490. return -EINVAL;
  2491. msm_vidc_scale_power(inst, true);
  2492. list_for_each_entry(buf, &buffers->list, list) {
  2493. if (!(buf->attr & MSM_VIDC_ATTR_DEFERRED))
  2494. continue;
  2495. rc = msm_vidc_queue_buffer(inst, buf);
  2496. if (rc)
  2497. return rc;
  2498. }
  2499. return 0;
  2500. }
  2501. int msm_vidc_queue_buffer_single(struct msm_vidc_inst *inst, struct vb2_buffer *vb2)
  2502. {
  2503. int rc = 0;
  2504. struct msm_vidc_buffer *buf;
  2505. enum msm_vidc_allow allow;
  2506. if (!inst || !vb2) {
  2507. d_vpr_e("%s: invalid params\n", __func__);
  2508. return -EINVAL;
  2509. }
  2510. buf = msm_vidc_get_driver_buf(inst, vb2);
  2511. if (!buf)
  2512. return -EINVAL;
  2513. allow = msm_vidc_allow_qbuf(inst, vb2->type);
  2514. if (allow == MSM_VIDC_DISALLOW) {
  2515. i_vpr_e(inst, "%s: qbuf not allowed\n", __func__);
  2516. return -EINVAL;
  2517. } else if (allow == MSM_VIDC_DEFER) {
  2518. print_vidc_buffer(VIDC_LOW, "low ", "qbuf deferred", inst, buf);
  2519. return 0;
  2520. }
  2521. msm_vidc_scale_power(inst, is_input_buffer(buf->type));
  2522. rc = msm_vidc_queue_buffer(inst, buf);
  2523. if (rc)
  2524. return rc;
  2525. return rc;
  2526. }
  2527. int msm_vidc_destroy_internal_buffer(struct msm_vidc_inst *inst,
  2528. struct msm_vidc_buffer *buffer)
  2529. {
  2530. struct msm_vidc_buffers *buffers;
  2531. struct msm_vidc_allocations *allocations;
  2532. struct msm_vidc_mappings *mappings;
  2533. struct msm_vidc_alloc *alloc, *alloc_dummy;
  2534. struct msm_vidc_map *map, *map_dummy;
  2535. struct msm_vidc_buffer *buf, *dummy;
  2536. if (!inst || !inst->core) {
  2537. d_vpr_e("%s: invalid params\n", __func__);
  2538. return -EINVAL;
  2539. }
  2540. if (!is_internal_buffer(buffer->type)) {
  2541. i_vpr_e(inst, "%s: type: %s is not internal\n",
  2542. __func__, buf_name(buffer->type));
  2543. return 0;
  2544. }
  2545. i_vpr_h(inst, "%s: destroy: type: %8s, size: %9u, device_addr %#x\n", __func__,
  2546. buf_name(buffer->type), buffer->buffer_size, buffer->device_addr);
  2547. buffers = msm_vidc_get_buffers(inst, buffer->type, __func__);
  2548. if (!buffers)
  2549. return -EINVAL;
  2550. allocations = msm_vidc_get_allocations(inst, buffer->type, __func__);
  2551. if (!allocations)
  2552. return -EINVAL;
  2553. mappings = msm_vidc_get_mappings(inst, buffer->type, __func__);
  2554. if (!mappings)
  2555. return -EINVAL;
  2556. list_for_each_entry_safe(map, map_dummy, &mappings->list, list) {
  2557. if (map->dmabuf == buffer->dmabuf) {
  2558. msm_vidc_memory_unmap(inst->core, map);
  2559. list_del(&map->list);
  2560. msm_memory_free(inst, MSM_MEM_POOL_MAP, map);
  2561. break;
  2562. }
  2563. }
  2564. list_for_each_entry_safe(alloc, alloc_dummy, &allocations->list, list) {
  2565. if (alloc->dmabuf == buffer->dmabuf) {
  2566. msm_vidc_memory_free(inst->core, alloc);
  2567. list_del(&alloc->list);
  2568. msm_memory_free(inst, MSM_MEM_POOL_ALLOC, alloc);
  2569. break;
  2570. }
  2571. }
  2572. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  2573. if (buf->dmabuf == buffer->dmabuf) {
  2574. list_del(&buf->list);
  2575. msm_memory_free(inst, MSM_MEM_POOL_BUFFER, buf);
  2576. break;
  2577. }
  2578. }
  2579. buffers->size = 0;
  2580. buffers->min_count = buffers->extra_count = buffers->actual_count = 0;
  2581. return 0;
  2582. }
  2583. int msm_vidc_get_internal_buffers(struct msm_vidc_inst *inst,
  2584. enum msm_vidc_buffer_type buffer_type)
  2585. {
  2586. u32 buf_size;
  2587. u32 buf_count;
  2588. struct msm_vidc_core *core;
  2589. struct msm_vidc_buffers *buffers;
  2590. if (!inst || !inst->core) {
  2591. d_vpr_e("%s: invalid params\n", __func__);
  2592. return -EINVAL;
  2593. }
  2594. core = inst->core;
  2595. buf_size = call_session_op(core, buffer_size,
  2596. inst, buffer_type);
  2597. buf_count = call_session_op(core, min_count,
  2598. inst, buffer_type);
  2599. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  2600. if (!buffers)
  2601. return -EINVAL;
  2602. if (buf_size <= buffers->size &&
  2603. buf_count <= buffers->min_count) {
  2604. buffers->reuse = true;
  2605. } else {
  2606. buffers->reuse = false;
  2607. buffers->size = buf_size;
  2608. buffers->min_count = buf_count;
  2609. }
  2610. return 0;
  2611. }
  2612. int msm_vidc_create_internal_buffer(struct msm_vidc_inst *inst,
  2613. enum msm_vidc_buffer_type buffer_type, u32 index)
  2614. {
  2615. int rc = 0;
  2616. struct msm_vidc_buffers *buffers;
  2617. struct msm_vidc_allocations *allocations;
  2618. struct msm_vidc_mappings *mappings;
  2619. struct msm_vidc_buffer *buffer;
  2620. struct msm_vidc_alloc *alloc;
  2621. struct msm_vidc_map *map;
  2622. if (!inst || !inst->core) {
  2623. d_vpr_e("%s: invalid params\n", __func__);
  2624. return -EINVAL;
  2625. }
  2626. if (!is_internal_buffer(buffer_type)) {
  2627. i_vpr_e(inst, "%s: type %s is not internal\n",
  2628. __func__, buf_name(buffer_type));
  2629. return 0;
  2630. }
  2631. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  2632. if (!buffers)
  2633. return -EINVAL;
  2634. allocations = msm_vidc_get_allocations(inst, buffer_type, __func__);
  2635. if (!allocations)
  2636. return -EINVAL;
  2637. mappings = msm_vidc_get_mappings(inst, buffer_type, __func__);
  2638. if (!mappings)
  2639. return -EINVAL;
  2640. if (!buffers->size)
  2641. return 0;
  2642. buffer = msm_memory_alloc(inst, MSM_MEM_POOL_BUFFER);
  2643. if (!buffer) {
  2644. i_vpr_e(inst, "%s: buf alloc failed\n", __func__);
  2645. return -ENOMEM;
  2646. }
  2647. INIT_LIST_HEAD(&buffer->list);
  2648. buffer->type = buffer_type;
  2649. buffer->index = index;
  2650. buffer->buffer_size = buffers->size;
  2651. list_add_tail(&buffer->list, &buffers->list);
  2652. alloc = msm_memory_alloc(inst, MSM_MEM_POOL_ALLOC);
  2653. if (!alloc) {
  2654. i_vpr_e(inst, "%s: alloc failed\n", __func__);
  2655. return -ENOMEM;
  2656. }
  2657. INIT_LIST_HEAD(&alloc->list);
  2658. alloc->type = buffer_type;
  2659. alloc->region = msm_vidc_get_buffer_region(inst,
  2660. buffer_type, __func__);
  2661. alloc->size = buffer->buffer_size;
  2662. alloc->secure = is_secure_region(alloc->region);
  2663. rc = msm_vidc_memory_alloc(inst->core, alloc);
  2664. if (rc)
  2665. return -ENOMEM;
  2666. list_add_tail(&alloc->list, &allocations->list);
  2667. map = msm_memory_alloc(inst, MSM_MEM_POOL_MAP);
  2668. if (!map) {
  2669. i_vpr_e(inst, "%s: map alloc failed\n", __func__);
  2670. return -ENOMEM;
  2671. }
  2672. INIT_LIST_HEAD(&map->list);
  2673. map->type = alloc->type;
  2674. map->region = alloc->region;
  2675. map->dmabuf = alloc->dmabuf;
  2676. rc = msm_vidc_memory_map(inst->core, map);
  2677. if (rc)
  2678. return -ENOMEM;
  2679. list_add_tail(&map->list, &mappings->list);
  2680. buffer->dmabuf = alloc->dmabuf;
  2681. buffer->device_addr = map->device_addr;
  2682. i_vpr_h(inst, "%s: create: type: %8s, size: %9u, device_addr %#x\n", __func__,
  2683. buf_name(buffer_type), buffers->size, buffer->device_addr);
  2684. return 0;
  2685. }
  2686. int msm_vidc_create_internal_buffers(struct msm_vidc_inst *inst,
  2687. enum msm_vidc_buffer_type buffer_type)
  2688. {
  2689. int rc = 0;
  2690. struct msm_vidc_buffers *buffers;
  2691. int i;
  2692. if (!inst || !inst->core) {
  2693. d_vpr_e("%s: invalid params\n", __func__);
  2694. return -EINVAL;
  2695. }
  2696. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  2697. if (!buffers)
  2698. return -EINVAL;
  2699. if (buffers->reuse) {
  2700. i_vpr_l(inst, "%s: reuse enabled for %s\n", __func__, buf_name(buffer_type));
  2701. return 0;
  2702. }
  2703. for (i = 0; i < buffers->min_count; i++) {
  2704. rc = msm_vidc_create_internal_buffer(inst, buffer_type, i);
  2705. if (rc)
  2706. return rc;
  2707. }
  2708. return rc;
  2709. }
  2710. int msm_vidc_queue_internal_buffers(struct msm_vidc_inst *inst,
  2711. enum msm_vidc_buffer_type buffer_type)
  2712. {
  2713. int rc = 0;
  2714. struct msm_vidc_buffers *buffers;
  2715. struct msm_vidc_buffer *buffer, *dummy;
  2716. if (!inst || !inst->core) {
  2717. d_vpr_e("%s: invalid params\n", __func__);
  2718. return -EINVAL;
  2719. }
  2720. if (!is_internal_buffer(buffer_type)) {
  2721. i_vpr_e(inst, "%s: %s is not internal\n", __func__, buf_name(buffer_type));
  2722. return 0;
  2723. }
  2724. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  2725. if (!buffers)
  2726. return -EINVAL;
  2727. if (buffers->reuse) {
  2728. i_vpr_l(inst, "%s: reuse enabled for %s buf\n",
  2729. __func__, buf_name(buffer_type));
  2730. return 0;
  2731. }
  2732. list_for_each_entry_safe(buffer, dummy, &buffers->list, list) {
  2733. /* do not queue pending release buffers */
  2734. if (buffer->flags & MSM_VIDC_ATTR_PENDING_RELEASE)
  2735. continue;
  2736. /* do not queue already queued buffers */
  2737. if (buffer->attr & MSM_VIDC_ATTR_QUEUED)
  2738. continue;
  2739. rc = venus_hfi_queue_buffer(inst, buffer, NULL);
  2740. if (rc)
  2741. return rc;
  2742. /* mark queued */
  2743. buffer->attr |= MSM_VIDC_ATTR_QUEUED;
  2744. i_vpr_h(inst, "%s: queue: type: %8s, size: %9u, device_addr %#x\n", __func__,
  2745. buf_name(buffer->type), buffer->buffer_size, buffer->device_addr);
  2746. }
  2747. return 0;
  2748. }
  2749. int msm_vidc_alloc_and_queue_session_internal_buffers(struct msm_vidc_inst *inst,
  2750. enum msm_vidc_buffer_type buffer_type)
  2751. {
  2752. int rc = 0;
  2753. if (!inst || !inst->core) {
  2754. d_vpr_e("%s: invalid params\n", __func__);
  2755. return -EINVAL;
  2756. }
  2757. if (buffer_type != MSM_VIDC_BUF_ARP &&
  2758. buffer_type != MSM_VIDC_BUF_PERSIST) {
  2759. i_vpr_e(inst, "%s: invalid buffer type: %s\n",
  2760. __func__, buf_name(buffer_type));
  2761. rc = -EINVAL;
  2762. goto exit;
  2763. }
  2764. rc = msm_vidc_get_internal_buffers(inst, buffer_type);
  2765. if (rc)
  2766. goto exit;
  2767. rc = msm_vidc_create_internal_buffers(inst, buffer_type);
  2768. if (rc)
  2769. goto exit;
  2770. rc = msm_vidc_queue_internal_buffers(inst, buffer_type);
  2771. if (rc)
  2772. goto exit;
  2773. exit:
  2774. return rc;
  2775. }
  2776. int msm_vidc_release_internal_buffers(struct msm_vidc_inst *inst,
  2777. enum msm_vidc_buffer_type buffer_type)
  2778. {
  2779. int rc = 0;
  2780. struct msm_vidc_buffers *buffers;
  2781. struct msm_vidc_buffer *buffer, *dummy;
  2782. if (!inst || !inst->core) {
  2783. d_vpr_e("%s: invalid params\n", __func__);
  2784. return -EINVAL;
  2785. }
  2786. if (!is_internal_buffer(buffer_type)) {
  2787. i_vpr_e(inst, "%s: %s is not internal\n",
  2788. __func__, buf_name(buffer_type));
  2789. return 0;
  2790. }
  2791. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  2792. if (!buffers)
  2793. return -EINVAL;
  2794. if (buffers->reuse) {
  2795. i_vpr_l(inst, "%s: reuse enabled for %s buf\n",
  2796. __func__, buf_name(buffer_type));
  2797. return 0;
  2798. }
  2799. list_for_each_entry_safe(buffer, dummy, &buffers->list, list) {
  2800. /* do not release already pending release buffers */
  2801. if (buffer->attr & MSM_VIDC_ATTR_PENDING_RELEASE)
  2802. continue;
  2803. /* release only queued buffers */
  2804. if (!(buffer->attr & MSM_VIDC_ATTR_QUEUED))
  2805. continue;
  2806. rc = venus_hfi_release_buffer(inst, buffer);
  2807. if (rc)
  2808. return rc;
  2809. /* mark pending release */
  2810. buffer->attr |= MSM_VIDC_ATTR_PENDING_RELEASE;
  2811. i_vpr_h(inst, "%s: release: type: %8s, size: %9u, device_addr %#x\n", __func__,
  2812. buf_name(buffer->type), buffer->buffer_size, buffer->device_addr);
  2813. }
  2814. return 0;
  2815. }
  2816. int msm_vidc_vb2_buffer_done(struct msm_vidc_inst *inst,
  2817. struct msm_vidc_buffer *buf)
  2818. {
  2819. int type, port, state;
  2820. struct vb2_queue *q;
  2821. struct vb2_buffer *vb2;
  2822. struct vb2_v4l2_buffer *vbuf;
  2823. bool found;
  2824. if (!inst || !buf) {
  2825. d_vpr_e("%s: invalid params\n", __func__);
  2826. return -EINVAL;
  2827. }
  2828. type = v4l2_type_from_driver(buf->type, __func__);
  2829. if (!type)
  2830. return -EINVAL;
  2831. port = v4l2_type_to_driver_port(inst, type, __func__);
  2832. if (port < 0)
  2833. return -EINVAL;
  2834. q = &inst->vb2q[port];
  2835. if (!q->streaming) {
  2836. i_vpr_e(inst, "%s: port %d is not streaming\n",
  2837. __func__, port);
  2838. return -EINVAL;
  2839. }
  2840. found = false;
  2841. list_for_each_entry(vb2, &q->queued_list, queued_entry) {
  2842. if (vb2->state != VB2_BUF_STATE_ACTIVE)
  2843. continue;
  2844. if (vb2->index == buf->index) {
  2845. found = true;
  2846. break;
  2847. }
  2848. }
  2849. if (!found) {
  2850. print_vidc_buffer(VIDC_ERR, "err ", "vb2 not found for", inst, buf);
  2851. return -EINVAL;
  2852. }
  2853. /**
  2854. * v4l2 clears buffer state related flags. For driver errors
  2855. * send state as error to avoid skipping V4L2_BUF_FLAG_ERROR
  2856. * flag at v4l2 side.
  2857. */
  2858. if (buf->flags & MSM_VIDC_BUF_FLAG_ERROR)
  2859. state = VB2_BUF_STATE_ERROR;
  2860. else
  2861. state = VB2_BUF_STATE_DONE;
  2862. vbuf = to_vb2_v4l2_buffer(vb2);
  2863. vbuf->flags = buf->flags;
  2864. vb2->timestamp = buf->timestamp;
  2865. vb2->planes[0].bytesused = buf->data_size + vb2->planes[0].data_offset;
  2866. vb2_buffer_done(vb2, state);
  2867. return 0;
  2868. }
  2869. int msm_vidc_event_queue_init(struct msm_vidc_inst *inst)
  2870. {
  2871. int rc = 0;
  2872. int index;
  2873. struct msm_vidc_core *core;
  2874. if (!inst || !inst->core) {
  2875. d_vpr_e("%s: invalid params\n", __func__);
  2876. return -EINVAL;
  2877. }
  2878. core = inst->core;
  2879. if (is_decode_session(inst))
  2880. index = 0;
  2881. else if (is_encode_session(inst))
  2882. index = 1;
  2883. else
  2884. return -EINVAL;
  2885. v4l2_fh_init(&inst->event_handler, &core->vdev[index].vdev);
  2886. v4l2_fh_add(&inst->event_handler);
  2887. return rc;
  2888. }
  2889. int msm_vidc_event_queue_deinit(struct msm_vidc_inst *inst)
  2890. {
  2891. int rc = 0;
  2892. if (!inst) {
  2893. d_vpr_e("%s: invalid params\n", __func__);
  2894. return -EINVAL;
  2895. }
  2896. /* do not deinit, if not already inited */
  2897. if (!inst->event_handler.vdev) {
  2898. i_vpr_e(inst, "%s: already not inited\n", __func__);
  2899. return 0;
  2900. }
  2901. v4l2_fh_del(&inst->event_handler);
  2902. v4l2_fh_exit(&inst->event_handler);
  2903. return rc;
  2904. }
  2905. static int vb2q_init(struct msm_vidc_inst *inst,
  2906. struct vb2_queue *q, enum v4l2_buf_type type)
  2907. {
  2908. int rc = 0;
  2909. struct msm_vidc_core *core;
  2910. if (!inst || !q || !inst->core) {
  2911. d_vpr_e("%s: invalid params\n", __func__);
  2912. return -EINVAL;
  2913. }
  2914. core = inst->core;
  2915. q->type = type;
  2916. q->io_modes = VB2_DMABUF;
  2917. q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
  2918. q->ops = core->vb2_ops;
  2919. q->mem_ops = core->vb2_mem_ops;
  2920. q->drv_priv = inst;
  2921. q->allow_zero_bytesused = 1;
  2922. q->copy_timestamp = 1;
  2923. rc = vb2_queue_init(q);
  2924. if (rc)
  2925. i_vpr_e(inst, "%s: vb2_queue_init failed for type %d\n",
  2926. __func__, type);
  2927. return rc;
  2928. }
  2929. int msm_vidc_vb2_queue_init(struct msm_vidc_inst *inst)
  2930. {
  2931. int rc = 0;
  2932. if (!inst) {
  2933. i_vpr_e(inst, "%s: invalid params\n", __func__);
  2934. return -EINVAL;
  2935. }
  2936. if (inst->vb2q_init) {
  2937. i_vpr_h(inst, "%s: vb2q already inited\n", __func__);
  2938. return 0;
  2939. }
  2940. rc = vb2q_init(inst, &inst->vb2q[INPUT_PORT], INPUT_MPLANE);
  2941. if (rc)
  2942. goto exit;
  2943. rc = vb2q_init(inst, &inst->vb2q[OUTPUT_PORT], OUTPUT_MPLANE);
  2944. if (rc)
  2945. goto fail_out_vb2q_init;
  2946. rc = vb2q_init(inst, &inst->vb2q[INPUT_META_PORT], INPUT_META_PLANE);
  2947. if (rc)
  2948. goto fail_in_meta_vb2q_init;
  2949. rc = vb2q_init(inst, &inst->vb2q[OUTPUT_META_PORT], OUTPUT_META_PLANE);
  2950. if (rc)
  2951. goto fail_out_meta_vb2q_init;
  2952. inst->vb2q_init = true;
  2953. return 0;
  2954. fail_out_meta_vb2q_init:
  2955. vb2_queue_release(&inst->vb2q[INPUT_META_PORT]);
  2956. fail_in_meta_vb2q_init:
  2957. vb2_queue_release(&inst->vb2q[OUTPUT_PORT]);
  2958. fail_out_vb2q_init:
  2959. vb2_queue_release(&inst->vb2q[INPUT_PORT]);
  2960. exit:
  2961. return rc;
  2962. }
  2963. int msm_vidc_vb2_queue_deinit(struct msm_vidc_inst *inst)
  2964. {
  2965. int rc = 0;
  2966. if (!inst) {
  2967. d_vpr_e("%s: invalid params\n", __func__);
  2968. return -EINVAL;
  2969. }
  2970. if (!inst->vb2q_init) {
  2971. i_vpr_h(inst, "%s: vb2q already deinited\n", __func__);
  2972. return 0;
  2973. }
  2974. vb2_queue_release(&inst->vb2q[OUTPUT_META_PORT]);
  2975. vb2_queue_release(&inst->vb2q[INPUT_META_PORT]);
  2976. vb2_queue_release(&inst->vb2q[OUTPUT_PORT]);
  2977. vb2_queue_release(&inst->vb2q[INPUT_PORT]);
  2978. inst->vb2q_init = false;
  2979. return rc;
  2980. }
  2981. int msm_vidc_add_session(struct msm_vidc_inst *inst)
  2982. {
  2983. int rc = 0;
  2984. struct msm_vidc_inst *i;
  2985. struct msm_vidc_core *core;
  2986. u32 count = 0;
  2987. if (!inst || !inst->core) {
  2988. d_vpr_e("%s: invalid params\n", __func__);
  2989. return -EINVAL;
  2990. }
  2991. core = inst->core;
  2992. if (!core->capabilities) {
  2993. i_vpr_e(inst, "%s: invalid params\n", __func__);
  2994. return -EINVAL;
  2995. }
  2996. core_lock(core, __func__);
  2997. list_for_each_entry(i, &core->instances, list)
  2998. count++;
  2999. if (count < core->capabilities[MAX_SESSION_COUNT].value) {
  3000. list_add_tail(&inst->list, &core->instances);
  3001. } else {
  3002. i_vpr_e(inst, "%s: total sessions %d exceeded max limit %d\n",
  3003. __func__, count, core->capabilities[MAX_SESSION_COUNT].value);
  3004. rc = -EINVAL;
  3005. }
  3006. core_unlock(core, __func__);
  3007. return rc;
  3008. }
  3009. int msm_vidc_remove_session(struct msm_vidc_inst *inst)
  3010. {
  3011. struct msm_vidc_inst *i, *temp;
  3012. struct msm_vidc_core *core;
  3013. u32 count = 0;
  3014. if (!inst || !inst->core) {
  3015. d_vpr_e("%s: invalid params\n", __func__);
  3016. return -EINVAL;
  3017. }
  3018. core = inst->core;
  3019. core_lock(core, __func__);
  3020. list_for_each_entry_safe(i, temp, &core->instances, list) {
  3021. if (i->session_id == inst->session_id) {
  3022. list_del_init(&i->list);
  3023. i_vpr_h(inst, "%s: removed session %#x\n",
  3024. __func__, i->session_id);
  3025. }
  3026. }
  3027. list_for_each_entry(i, &core->instances, list)
  3028. count++;
  3029. i_vpr_h(inst, "%s: remaining sessions %d\n", __func__, count);
  3030. core_unlock(core, __func__);
  3031. return 0;
  3032. }
  3033. int msm_vidc_session_open(struct msm_vidc_inst *inst)
  3034. {
  3035. int rc = 0;
  3036. if (!inst) {
  3037. d_vpr_e("%s: invalid params\n", __func__);
  3038. return -EINVAL;
  3039. }
  3040. inst->packet_size = 4096;
  3041. inst->packet = kzalloc(inst->packet_size, GFP_KERNEL);
  3042. if (!inst->packet) {
  3043. i_vpr_e(inst, "%s(): inst packet allocation failed\n", __func__);
  3044. return -ENOMEM;
  3045. }
  3046. rc = venus_hfi_session_open(inst);
  3047. if (rc)
  3048. goto error;
  3049. return 0;
  3050. error:
  3051. i_vpr_e(inst, "%s(): session open failed\n", __func__);
  3052. kfree(inst->packet);
  3053. inst->packet = NULL;
  3054. return rc;
  3055. }
  3056. int msm_vidc_session_set_codec(struct msm_vidc_inst *inst)
  3057. {
  3058. int rc = 0;
  3059. if (!inst) {
  3060. d_vpr_e("%s: invalid params\n", __func__);
  3061. return -EINVAL;
  3062. }
  3063. rc = venus_hfi_session_set_codec(inst);
  3064. if (rc)
  3065. return rc;
  3066. return 0;
  3067. }
  3068. int msm_vidc_session_set_secure_mode(struct msm_vidc_inst *inst)
  3069. {
  3070. int rc = 0;
  3071. if (!inst) {
  3072. d_vpr_e("%s: invalid params\n", __func__);
  3073. return -EINVAL;
  3074. }
  3075. rc = venus_hfi_session_set_secure_mode(inst);
  3076. if (rc)
  3077. return rc;
  3078. return 0;
  3079. }
  3080. int msm_vidc_session_set_default_header(struct msm_vidc_inst *inst)
  3081. {
  3082. int rc = 0;
  3083. u32 default_header = false;
  3084. if (!inst) {
  3085. d_vpr_e("%s: invalid params\n", __func__);
  3086. return -EINVAL;
  3087. }
  3088. default_header = inst->capabilities->cap[DEFAULT_HEADER].value;
  3089. i_vpr_h(inst, "%s: default header: %d", __func__, default_header);
  3090. rc = venus_hfi_session_property(inst,
  3091. HFI_PROP_DEC_DEFAULT_HEADER,
  3092. HFI_HOST_FLAGS_NONE,
  3093. get_hfi_port(inst, INPUT_PORT),
  3094. HFI_PAYLOAD_U32,
  3095. &default_header,
  3096. sizeof(u32));
  3097. if (rc)
  3098. i_vpr_e(inst, "%s: set property failed\n", __func__);
  3099. return rc;
  3100. }
  3101. int msm_vidc_session_streamon(struct msm_vidc_inst *inst,
  3102. enum msm_vidc_port_type port)
  3103. {
  3104. int rc = 0;
  3105. if (!inst || !inst->core) {
  3106. d_vpr_e("%s: invalid params\n", __func__);
  3107. return -EINVAL;
  3108. }
  3109. msm_vidc_scale_power(inst, true);
  3110. rc = venus_hfi_start(inst, port);
  3111. if (rc)
  3112. return rc;
  3113. return rc;
  3114. }
  3115. int msm_vidc_session_streamoff(struct msm_vidc_inst *inst,
  3116. enum msm_vidc_port_type port)
  3117. {
  3118. int rc = 0;
  3119. int count = 0;
  3120. struct msm_vidc_core *core;
  3121. enum signal_session_response signal_type;
  3122. enum msm_vidc_buffer_type buffer_type;
  3123. if (!inst || !inst->core) {
  3124. d_vpr_e("%s: invalid params\n", __func__);
  3125. return -EINVAL;
  3126. }
  3127. if (port == INPUT_PORT) {
  3128. signal_type = SIGNAL_CMD_STOP_INPUT;
  3129. buffer_type = MSM_VIDC_BUF_INPUT;
  3130. } else if (port == OUTPUT_PORT) {
  3131. signal_type = SIGNAL_CMD_STOP_OUTPUT;
  3132. buffer_type = MSM_VIDC_BUF_OUTPUT;
  3133. } else {
  3134. i_vpr_e(inst, "%s: invalid port: %d\n", __func__, port);
  3135. return -EINVAL;
  3136. }
  3137. rc = venus_hfi_stop(inst, port);
  3138. if (rc)
  3139. goto error;
  3140. core = inst->core;
  3141. i_vpr_h(inst, "%s: wait on port: %d for time: %d ms\n",
  3142. __func__, port, core->capabilities[HW_RESPONSE_TIMEOUT].value);
  3143. inst_unlock(inst, __func__);
  3144. rc = wait_for_completion_timeout(
  3145. &inst->completions[signal_type],
  3146. msecs_to_jiffies(
  3147. core->capabilities[HW_RESPONSE_TIMEOUT].value));
  3148. if (!rc) {
  3149. i_vpr_e(inst, "%s: session stop timed out for port: %d\n",
  3150. __func__, port);
  3151. rc = -ETIMEDOUT;
  3152. msm_vidc_inst_timeout(inst);
  3153. } else {
  3154. rc = 0;
  3155. }
  3156. inst_lock(inst, __func__);
  3157. if(rc)
  3158. goto error;
  3159. /* no more queued buffers after streamoff */
  3160. count = msm_vidc_num_buffers(inst, buffer_type, MSM_VIDC_ATTR_QUEUED);
  3161. if (!count) {
  3162. i_vpr_h(inst, "%s: stop successful on port: %d\n",
  3163. __func__, port);
  3164. } else {
  3165. i_vpr_e(inst,
  3166. "%s: %d buffers pending with firmware on port: %d\n",
  3167. __func__, count, port);
  3168. rc = -EINVAL;
  3169. goto error;
  3170. }
  3171. /* flush deferred buffers */
  3172. msm_vidc_flush_buffers(inst, buffer_type);
  3173. msm_vidc_flush_delayed_unmap_buffers(inst, buffer_type);
  3174. return 0;
  3175. error:
  3176. msm_vidc_kill_session(inst);
  3177. msm_vidc_flush_buffers(inst, buffer_type);
  3178. return rc;
  3179. }
  3180. int msm_vidc_session_close(struct msm_vidc_inst *inst)
  3181. {
  3182. int rc = 0;
  3183. struct msm_vidc_core *core;
  3184. if (!inst || !inst->core) {
  3185. d_vpr_e("%s: invalid params\n", __func__);
  3186. return -EINVAL;
  3187. }
  3188. rc = venus_hfi_session_close(inst);
  3189. if (rc)
  3190. return rc;
  3191. /* we are not supposed to send any more commands after close */
  3192. i_vpr_h(inst, "%s: free session packet data\n", __func__);
  3193. kfree(inst->packet);
  3194. inst->packet = NULL;
  3195. core = inst->core;
  3196. i_vpr_h(inst, "%s: wait on close for time: %d ms\n",
  3197. __func__, core->capabilities[HW_RESPONSE_TIMEOUT].value);
  3198. inst_unlock(inst, __func__);
  3199. rc = wait_for_completion_timeout(
  3200. &inst->completions[SIGNAL_CMD_CLOSE],
  3201. msecs_to_jiffies(
  3202. core->capabilities[HW_RESPONSE_TIMEOUT].value));
  3203. if (!rc) {
  3204. i_vpr_e(inst, "%s: session close timed out\n", __func__);
  3205. rc = -ETIMEDOUT;
  3206. msm_vidc_inst_timeout(inst);
  3207. } else {
  3208. rc = 0;
  3209. i_vpr_h(inst, "%s: close successful\n", __func__);
  3210. }
  3211. inst_lock(inst, __func__);
  3212. msm_vidc_remove_session(inst);
  3213. return rc;
  3214. }
  3215. int msm_vidc_kill_session(struct msm_vidc_inst *inst)
  3216. {
  3217. if (!inst) {
  3218. d_vpr_e("%s: invalid params\n", __func__);
  3219. return -EINVAL;
  3220. }
  3221. if (!inst->session_id) {
  3222. i_vpr_e(inst, "%s: already killed\n", __func__);
  3223. return 0;
  3224. }
  3225. i_vpr_e(inst, "%s: killing session\n", __func__);
  3226. msm_vidc_session_close(inst);
  3227. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  3228. return 0;
  3229. }
  3230. int msm_vidc_get_inst_capability(struct msm_vidc_inst *inst)
  3231. {
  3232. int rc = 0;
  3233. int i;
  3234. struct msm_vidc_core *core;
  3235. if (!inst || !inst->core || !inst->capabilities) {
  3236. d_vpr_e("%s: invalid params\n", __func__);
  3237. return -EINVAL;
  3238. }
  3239. core = inst->core;
  3240. for (i = 0; i < core->codecs_count; i++) {
  3241. if (core->inst_caps[i].domain == inst->domain &&
  3242. core->inst_caps[i].codec == inst->codec) {
  3243. i_vpr_h(inst,
  3244. "%s: copied capabilities with %#x codec, %#x domain\n",
  3245. __func__, inst->codec, inst->domain);
  3246. memcpy(inst->capabilities, &core->inst_caps[i],
  3247. sizeof(struct msm_vidc_inst_capability));
  3248. }
  3249. }
  3250. return rc;
  3251. }
  3252. int msm_vidc_deinit_core_caps(struct msm_vidc_core *core)
  3253. {
  3254. int rc = 0;
  3255. if (!core) {
  3256. d_vpr_e("%s: invalid params\n", __func__);
  3257. return -EINVAL;
  3258. }
  3259. kfree(core->capabilities);
  3260. core->capabilities = NULL;
  3261. d_vpr_h("%s: Core capabilities freed\n", __func__);
  3262. return rc;
  3263. }
  3264. int msm_vidc_init_core_caps(struct msm_vidc_core *core)
  3265. {
  3266. int rc = 0;
  3267. int i, num_platform_caps;
  3268. struct msm_platform_core_capability *platform_data;
  3269. if (!core || !core->platform) {
  3270. d_vpr_e("%s: invalid params\n", __func__);
  3271. rc = -EINVAL;
  3272. goto exit;
  3273. }
  3274. platform_data = core->platform->data.core_data;
  3275. if (!platform_data) {
  3276. d_vpr_e("%s: platform core data is NULL\n",
  3277. __func__);
  3278. rc = -EINVAL;
  3279. goto exit;
  3280. }
  3281. core->capabilities = kcalloc(1,
  3282. (sizeof(struct msm_vidc_core_capability) *
  3283. (CORE_CAP_MAX + 1)), GFP_KERNEL);
  3284. if (!core->capabilities) {
  3285. d_vpr_e("%s: failed to allocate core capabilities\n",
  3286. __func__);
  3287. rc = -ENOMEM;
  3288. goto exit;
  3289. }
  3290. num_platform_caps = core->platform->data.core_data_size;
  3291. /* loop over platform caps */
  3292. for (i = 0; i < num_platform_caps && i < CORE_CAP_MAX; i++) {
  3293. core->capabilities[platform_data[i].type].type = platform_data[i].type;
  3294. core->capabilities[platform_data[i].type].value = platform_data[i].value;
  3295. }
  3296. exit:
  3297. return rc;
  3298. }
  3299. static void update_inst_capability(struct msm_platform_inst_capability *in,
  3300. struct msm_vidc_inst_capability *capability)
  3301. {
  3302. if (!in || !capability) {
  3303. d_vpr_e("%s: invalid params %pK %pK\n",
  3304. __func__, in, capability);
  3305. return;
  3306. }
  3307. if (in->cap < INST_CAP_MAX) {
  3308. capability->cap[in->cap].cap = in->cap;
  3309. capability->cap[in->cap].min = in->min;
  3310. capability->cap[in->cap].max = in->max;
  3311. capability->cap[in->cap].step_or_mask = in->step_or_mask;
  3312. capability->cap[in->cap].value = in->value;
  3313. capability->cap[in->cap].flags = in->flags;
  3314. capability->cap[in->cap].v4l2_id = in->v4l2_id;
  3315. capability->cap[in->cap].hfi_id = in->hfi_id;
  3316. memcpy(capability->cap[in->cap].parents, in->parents,
  3317. sizeof(capability->cap[in->cap].parents));
  3318. memcpy(capability->cap[in->cap].children, in->children,
  3319. sizeof(capability->cap[in->cap].children));
  3320. capability->cap[in->cap].adjust = in->adjust;
  3321. capability->cap[in->cap].set = in->set;
  3322. } else {
  3323. d_vpr_e("%s: invalid cap %d\n",
  3324. __func__, in->cap);
  3325. }
  3326. }
  3327. int msm_vidc_deinit_instance_caps(struct msm_vidc_core *core)
  3328. {
  3329. int rc = 0;
  3330. if (!core) {
  3331. d_vpr_e("%s: invalid params\n", __func__);
  3332. return -EINVAL;
  3333. }
  3334. kfree(core->inst_caps);
  3335. core->inst_caps = NULL;
  3336. d_vpr_h("%s: core->inst_caps freed\n", __func__);
  3337. return rc;
  3338. }
  3339. int msm_vidc_init_instance_caps(struct msm_vidc_core *core)
  3340. {
  3341. int rc = 0;
  3342. u8 enc_valid_codecs, dec_valid_codecs;
  3343. u8 count_bits, enc_codec_count;
  3344. u8 codecs_count = 0;
  3345. int i, j, check_bit, num_platform_caps;
  3346. struct msm_platform_inst_capability *platform_data = NULL;
  3347. if (!core || !core->platform || !core->capabilities) {
  3348. d_vpr_e("%s: invalid params\n", __func__);
  3349. rc = -EINVAL;
  3350. goto error;
  3351. }
  3352. platform_data = core->platform->data.instance_data;
  3353. if (!platform_data) {
  3354. d_vpr_e("%s: platform instance data is NULL\n",
  3355. __func__);
  3356. rc = -EINVAL;
  3357. goto error;
  3358. }
  3359. enc_valid_codecs = core->capabilities[ENC_CODECS].value;
  3360. count_bits = enc_valid_codecs;
  3361. COUNT_BITS(count_bits, codecs_count);
  3362. enc_codec_count = codecs_count;
  3363. dec_valid_codecs = core->capabilities[DEC_CODECS].value;
  3364. count_bits = dec_valid_codecs;
  3365. COUNT_BITS(count_bits, codecs_count);
  3366. core->codecs_count = codecs_count;
  3367. core->inst_caps = kcalloc(codecs_count,
  3368. sizeof(struct msm_vidc_inst_capability),
  3369. GFP_KERNEL);
  3370. if (!core->inst_caps) {
  3371. d_vpr_e("%s: failed to allocate core capabilities\n",
  3372. __func__);
  3373. rc = -ENOMEM;
  3374. goto error;
  3375. }
  3376. check_bit = 0;
  3377. /* determine codecs for enc domain */
  3378. for (i = 0; i < enc_codec_count; i++) {
  3379. while (check_bit < (sizeof(enc_valid_codecs) * 8)) {
  3380. if (enc_valid_codecs & BIT(check_bit)) {
  3381. core->inst_caps[i].domain = MSM_VIDC_ENCODER;
  3382. core->inst_caps[i].codec = enc_valid_codecs &
  3383. BIT(check_bit);
  3384. check_bit++;
  3385. break;
  3386. }
  3387. check_bit++;
  3388. }
  3389. }
  3390. /* reset checkbit to check from 0th bit of decoder codecs set bits*/
  3391. check_bit = 0;
  3392. /* determine codecs for dec domain */
  3393. for (; i < codecs_count; i++) {
  3394. while (check_bit < (sizeof(dec_valid_codecs) * 8)) {
  3395. if (dec_valid_codecs & BIT(check_bit)) {
  3396. core->inst_caps[i].domain = MSM_VIDC_DECODER;
  3397. core->inst_caps[i].codec = dec_valid_codecs &
  3398. BIT(check_bit);
  3399. check_bit++;
  3400. break;
  3401. }
  3402. check_bit++;
  3403. }
  3404. }
  3405. num_platform_caps = core->platform->data.instance_data_size;
  3406. d_vpr_h("%s: num caps %d\n", __func__, num_platform_caps);
  3407. /* loop over each platform capability */
  3408. for (i = 0; i < num_platform_caps; i++) {
  3409. /* select matching core codec and update it */
  3410. for (j = 0; j < codecs_count; j++) {
  3411. if ((platform_data[i].domain &
  3412. core->inst_caps[j].domain) &&
  3413. (platform_data[i].codec &
  3414. core->inst_caps[j].codec)) {
  3415. /* update core capability */
  3416. update_inst_capability(&platform_data[i],
  3417. &core->inst_caps[j]);
  3418. }
  3419. }
  3420. }
  3421. error:
  3422. return rc;
  3423. }
  3424. int msm_vidc_core_deinit_locked(struct msm_vidc_core *core, bool force)
  3425. {
  3426. int rc = 0;
  3427. struct msm_vidc_inst *inst, *dummy;
  3428. if (!core) {
  3429. d_vpr_e("%s: invalid params\n", __func__);
  3430. return -EINVAL;
  3431. }
  3432. rc = __strict_check(core, __func__);
  3433. if (rc) {
  3434. d_vpr_e("%s(): core was not locked\n", __func__);
  3435. return rc;
  3436. }
  3437. if (core->state == MSM_VIDC_CORE_DEINIT)
  3438. return 0;
  3439. if (force) {
  3440. d_vpr_e("%s(): force deinit core\n", __func__);
  3441. } else {
  3442. /* in normal case, deinit core only if no session present */
  3443. if (!list_empty(&core->instances)) {
  3444. d_vpr_h("%s(): skip deinit\n", __func__);
  3445. return 0;
  3446. } else {
  3447. d_vpr_h("%s(): deinit core\n", __func__);
  3448. }
  3449. }
  3450. venus_hfi_core_deinit(core);
  3451. /* unlink all sessions from core, if any */
  3452. list_for_each_entry_safe(inst, dummy, &core->instances, list) {
  3453. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  3454. list_del_init(&inst->list);
  3455. }
  3456. msm_vidc_change_core_state(core, MSM_VIDC_CORE_DEINIT, __func__);
  3457. return rc;
  3458. }
  3459. int msm_vidc_core_deinit(struct msm_vidc_core *core, bool force)
  3460. {
  3461. int rc = 0;
  3462. if (!core) {
  3463. d_vpr_e("%s: invalid params\n", __func__);
  3464. return -EINVAL;
  3465. }
  3466. core_lock(core, __func__);
  3467. rc = msm_vidc_core_deinit_locked(core, force);
  3468. core_unlock(core, __func__);
  3469. return rc;
  3470. }
  3471. static int msm_vidc_core_init_wait(struct msm_vidc_core *core)
  3472. {
  3473. const int interval = 40;
  3474. int max_tries, count = 0, rc = 0;
  3475. if (!core || !core->capabilities) {
  3476. d_vpr_e("%s: invalid params\n", __func__);
  3477. return -EINVAL;
  3478. }
  3479. rc = __strict_check(core, __func__);
  3480. if (rc)
  3481. return rc;
  3482. if (core->state != MSM_VIDC_CORE_INIT_WAIT)
  3483. return 0;
  3484. d_vpr_h("%s(): waiting for state change\n", __func__);
  3485. max_tries = core->capabilities[HW_RESPONSE_TIMEOUT].value / interval;
  3486. /**
  3487. * attempt one more time to ensure triggering init_done
  3488. * timeout sequence for 1st session, incase response not
  3489. * received in reverse thread.
  3490. */
  3491. while (count < max_tries + 1) {
  3492. if (core->state != MSM_VIDC_CORE_INIT_WAIT)
  3493. break;
  3494. core_unlock(core, __func__);
  3495. msleep_interruptible(interval);
  3496. core_lock(core, __func__);
  3497. count++;
  3498. }
  3499. d_vpr_h("%s: state %s, interval %u, count %u, max_tries %u\n", __func__,
  3500. core_state_name(core->state), interval, count, max_tries);
  3501. /* treat as fatal and fail session_open */
  3502. if (core->state == MSM_VIDC_CORE_INIT_WAIT) {
  3503. d_vpr_e("%s: state change failed\n", __func__);
  3504. rc = -EINVAL;
  3505. }
  3506. return rc;
  3507. }
  3508. int msm_vidc_core_init(struct msm_vidc_core *core)
  3509. {
  3510. int rc = 0;
  3511. if (!core || !core->capabilities) {
  3512. d_vpr_e("%s: invalid params\n", __func__);
  3513. return -EINVAL;
  3514. }
  3515. core_lock(core, __func__);
  3516. rc = msm_vidc_core_init_wait(core);
  3517. if (rc)
  3518. goto unlock;
  3519. if (core->state == MSM_VIDC_CORE_INIT)
  3520. goto unlock;
  3521. msm_vidc_change_core_state(core, MSM_VIDC_CORE_INIT_WAIT, __func__);
  3522. init_completion(&core->init_done);
  3523. core->smmu_fault_handled = false;
  3524. core->ssr.trigger = false;
  3525. rc = venus_hfi_core_init(core);
  3526. if (rc) {
  3527. d_vpr_e("%s: core init failed\n", __func__);
  3528. goto unlock;
  3529. }
  3530. d_vpr_h("%s(): waiting for sys_init_done, %d ms\n", __func__,
  3531. core->capabilities[HW_RESPONSE_TIMEOUT].value);
  3532. core_unlock(core, __func__);
  3533. rc = wait_for_completion_timeout(&core->init_done, msecs_to_jiffies(
  3534. core->capabilities[HW_RESPONSE_TIMEOUT].value));
  3535. core_lock(core, __func__);
  3536. if (!rc) {
  3537. d_vpr_e("%s: core init timed out\n", __func__);
  3538. msm_vidc_core_deinit_locked(core, true);
  3539. rc = -ETIMEDOUT;
  3540. } else {
  3541. msm_vidc_change_core_state(core, MSM_VIDC_CORE_INIT, __func__);
  3542. d_vpr_h("%s: system init wait completed\n", __func__);
  3543. rc = 0;
  3544. }
  3545. unlock:
  3546. core_unlock(core, __func__);
  3547. return rc;
  3548. }
  3549. int msm_vidc_inst_timeout(struct msm_vidc_inst *inst)
  3550. {
  3551. int rc = 0;
  3552. struct msm_vidc_core *core;
  3553. struct msm_vidc_inst *instance;
  3554. bool found;
  3555. if (!inst || !inst->core) {
  3556. d_vpr_e("%s: invalid params\n", __func__);
  3557. return -EINVAL;
  3558. }
  3559. core = inst->core;
  3560. core_lock(core, __func__);
  3561. /*
  3562. * All sessions will be removed from core list in core deinit,
  3563. * do not deinit core from a session which is not present in
  3564. * core list.
  3565. */
  3566. found = false;
  3567. list_for_each_entry(instance, &core->instances, list) {
  3568. if (instance == inst) {
  3569. found = true;
  3570. break;
  3571. }
  3572. }
  3573. if (!found) {
  3574. i_vpr_e(inst,
  3575. "%s: session not available in core list\n", __func__);
  3576. rc = -EINVAL;
  3577. goto unlock;
  3578. }
  3579. /* call core deinit for a valid instance timeout case */
  3580. msm_vidc_core_deinit_locked(core, true);
  3581. unlock:
  3582. core_unlock(core, __func__);
  3583. return rc;
  3584. }
  3585. int msm_vidc_print_buffer_info(struct msm_vidc_inst *inst)
  3586. {
  3587. struct msm_vidc_buffers *buffers;
  3588. int i;
  3589. if (!inst) {
  3590. i_vpr_e(inst, "%s: invalid params\n", __func__);
  3591. return -EINVAL;
  3592. }
  3593. /* Print buffer details */
  3594. for (i = 0; i < ARRAY_SIZE(buf_type_name_arr); i++) {
  3595. buffers = msm_vidc_get_buffers(inst, buf_type_name_arr[i].type, __func__);
  3596. if (!buffers)
  3597. continue;
  3598. i_vpr_h(inst, "buf: type: %11s, count %2d, extra %2d, actual %2d, size %9u\n",
  3599. buf_type_name_arr[i].name, buffers->min_count,
  3600. buffers->extra_count, buffers->actual_count,
  3601. buffers->size);
  3602. }
  3603. return 0;
  3604. }
  3605. int msm_vidc_print_inst_info(struct msm_vidc_inst *inst)
  3606. {
  3607. struct msm_vidc_buffers *buffers;
  3608. struct msm_vidc_buffer *buf;
  3609. enum msm_vidc_port_type port;
  3610. bool is_secure, is_decode;
  3611. u32 bit_depth, bit_rate, frame_rate, width, height;
  3612. struct dma_buf *dbuf;
  3613. int i = 0;
  3614. if (!inst || !inst->capabilities) {
  3615. i_vpr_e(inst, "%s: invalid params\n", __func__);
  3616. return -EINVAL;
  3617. }
  3618. is_secure = is_secure_session(inst);
  3619. is_decode = inst->domain == MSM_VIDC_DECODER;
  3620. port = is_decode ? INPUT_PORT : OUTPUT_PORT;
  3621. width = inst->fmts[port].fmt.pix_mp.width;
  3622. height = inst->fmts[port].fmt.pix_mp.height;
  3623. bit_depth = inst->capabilities->cap[BIT_DEPTH].value & 0xFFFF;
  3624. bit_rate = inst->capabilities->cap[BIT_RATE].value;
  3625. frame_rate = inst->capabilities->cap[FRAME_RATE].value >> 16;
  3626. i_vpr_e(inst, "%s %s session, HxW: %d x %d, fps: %d, bitrate: %d, bit-depth: %d\n",
  3627. is_secure ? "Secure" : "Non-Secure",
  3628. is_decode ? "Decode" : "Encode",
  3629. height, width,
  3630. frame_rate, bit_rate, bit_depth);
  3631. /* Print buffer details */
  3632. for (i = 0; i < ARRAY_SIZE(buf_type_name_arr); i++) {
  3633. buffers = msm_vidc_get_buffers(inst, buf_type_name_arr[i].type, __func__);
  3634. if (!buffers)
  3635. continue;
  3636. i_vpr_e(inst, "count: type: %11s, min: %2d, extra: %2d, actual: %2d\n",
  3637. buf_type_name_arr[i].name, buffers->min_count,
  3638. buffers->extra_count, buffers->actual_count);
  3639. list_for_each_entry(buf, &buffers->list, list) {
  3640. if (!buf->dmabuf)
  3641. continue;
  3642. dbuf = (struct dma_buf *)buf->dmabuf;
  3643. i_vpr_e(inst,
  3644. "buf: type: %11s, index: %2d, fd: %4d, size: %9u, off: %8u, filled: %9u, iova: %8x, inode: %9ld, flags: %8x, ts: %16lld, attr: %8x\n",
  3645. buf_type_name_arr[i].name, buf->index, buf->fd, buf->buffer_size,
  3646. buf->data_offset, buf->data_size, buf->device_addr,
  3647. file_inode(dbuf->file)->i_ino,
  3648. buf->flags, buf->timestamp, buf->attr);
  3649. }
  3650. }
  3651. return 0;
  3652. }
  3653. void msm_vidc_print_core_info(struct msm_vidc_core *core)
  3654. {
  3655. struct msm_vidc_inst *inst = NULL;
  3656. struct msm_vidc_inst *instances[MAX_SUPPORTED_INSTANCES];
  3657. s32 num_instances = 0;
  3658. if (!core) {
  3659. d_vpr_e("%s: invalid params\n", __func__);
  3660. return;
  3661. }
  3662. core_lock(core, __func__);
  3663. list_for_each_entry(inst, &core->instances, list)
  3664. instances[num_instances++] = inst;
  3665. core_unlock(core, __func__);
  3666. while (num_instances--) {
  3667. inst = instances[num_instances];
  3668. inst = get_inst_ref(core, inst);
  3669. if (!inst)
  3670. continue;
  3671. inst_lock(inst, __func__);
  3672. msm_vidc_print_inst_info(inst);
  3673. inst_unlock(inst, __func__);
  3674. put_inst(inst);
  3675. }
  3676. }
  3677. int msm_vidc_smmu_fault_handler(struct iommu_domain *domain,
  3678. struct device *dev, unsigned long iova, int flags, void *data)
  3679. {
  3680. struct msm_vidc_core *core = data;
  3681. if (!domain || !core || !core->capabilities) {
  3682. d_vpr_e("%s: invalid params %pK %pK\n",
  3683. __func__, domain, core);
  3684. return -EINVAL;
  3685. }
  3686. if (core->smmu_fault_handled) {
  3687. if (core->capabilities[NON_FATAL_FAULTS].value) {
  3688. dprintk_ratelimit(VIDC_ERR, "err ",
  3689. "%s: non-fatal pagefault address: %lx\n",
  3690. __func__, iova);
  3691. return 0;
  3692. }
  3693. }
  3694. d_vpr_e("%s: faulting address: %lx\n", __func__, iova);
  3695. core->smmu_fault_handled = true;
  3696. /* print noc error log registers */
  3697. venus_hfi_noc_error_info(core);
  3698. msm_vidc_print_core_info(core);
  3699. /*
  3700. * Return -ENOSYS to elicit the default behaviour of smmu driver.
  3701. * If we return -ENOSYS, then smmu driver assumes page fault handler
  3702. * is not installed and prints a list of useful debug information like
  3703. * FAR, SID etc. This information is not printed if we return 0.
  3704. */
  3705. return -ENOSYS;
  3706. }
  3707. int msm_vidc_trigger_ssr(struct msm_vidc_core *core,
  3708. u64 trigger_ssr_val)
  3709. {
  3710. struct msm_vidc_ssr *ssr;
  3711. if (!core) {
  3712. d_vpr_e("%s: Invalid parameters\n", __func__);
  3713. return -EINVAL;
  3714. }
  3715. ssr = &core->ssr;
  3716. /*
  3717. * <test_addr><sub_client_id><ssr_type>
  3718. * ssr_type: 0-3 bits
  3719. * sub_client_id: 4-7 bits
  3720. * reserved: 8-31 bits
  3721. * test_addr: 32-63 bits
  3722. */
  3723. ssr->ssr_type = (trigger_ssr_val &
  3724. (unsigned long)SSR_TYPE) >> SSR_TYPE_SHIFT;
  3725. ssr->sub_client_id = (trigger_ssr_val &
  3726. (unsigned long)SSR_SUB_CLIENT_ID) >> SSR_SUB_CLIENT_ID_SHIFT;
  3727. ssr->test_addr = (trigger_ssr_val &
  3728. (unsigned long)SSR_ADDR_ID) >> SSR_ADDR_SHIFT;
  3729. schedule_work(&core->ssr_work);
  3730. return 0;
  3731. }
  3732. void msm_vidc_ssr_handler(struct work_struct *work)
  3733. {
  3734. int rc;
  3735. struct msm_vidc_core *core;
  3736. struct msm_vidc_ssr *ssr;
  3737. core = container_of(work, struct msm_vidc_core, ssr_work);
  3738. if (!core) {
  3739. d_vpr_e("%s: invalid params %pK\n", __func__, core);
  3740. return;
  3741. }
  3742. ssr = &core->ssr;
  3743. core_lock(core, __func__);
  3744. if (core->state == MSM_VIDC_CORE_INIT) {
  3745. /*
  3746. * In current implementation, user-initiated SSR triggers
  3747. * a fatal error from hardware. However, there is no way
  3748. * to know if fatal error is due to SSR or not. Handle
  3749. * user SSR as non-fatal.
  3750. */
  3751. core->ssr.trigger = true;
  3752. rc = venus_hfi_trigger_ssr(core, ssr->ssr_type,
  3753. ssr->sub_client_id, ssr->test_addr);
  3754. if (rc) {
  3755. d_vpr_e("%s: trigger_ssr failed\n", __func__);
  3756. core->ssr.trigger = false;
  3757. }
  3758. } else {
  3759. d_vpr_e("%s: video core not initialized\n", __func__);
  3760. }
  3761. core_unlock(core, __func__);
  3762. }
  3763. void msm_vidc_pm_work_handler(struct work_struct *work)
  3764. {
  3765. }
  3766. void msm_vidc_fw_unload_handler(struct work_struct *work)
  3767. {
  3768. struct msm_vidc_core *core = NULL;
  3769. int rc = 0;
  3770. core = container_of(work, struct msm_vidc_core, fw_unload_work.work);
  3771. if (!core) {
  3772. d_vpr_e("%s: invalid work or core handle\n", __func__);
  3773. return;
  3774. }
  3775. d_vpr_h("%s: deinitializing video core\n",__func__);
  3776. rc = msm_vidc_core_deinit(core, false);
  3777. if (rc)
  3778. d_vpr_e("%s: Failed to deinit core\n", __func__);
  3779. }
  3780. void msm_vidc_batch_handler(struct work_struct *work)
  3781. {
  3782. struct msm_vidc_inst *inst;
  3783. enum msm_vidc_allow allow;
  3784. int rc = 0;
  3785. inst = container_of(work, struct msm_vidc_inst, decode_batch.work.work);
  3786. inst = get_inst_ref(g_core, inst);
  3787. if (!inst) {
  3788. d_vpr_e("%s: invalid params\n", __func__);
  3789. return;
  3790. }
  3791. inst_lock(inst, __func__);
  3792. if (is_session_error(inst)) {
  3793. i_vpr_e(inst, "%s: failled. Session error\n", __func__);
  3794. goto exit;
  3795. }
  3796. allow = msm_vidc_allow_qbuf(inst, OUTPUT_MPLANE);
  3797. if (allow != MSM_VIDC_ALLOW) {
  3798. i_vpr_e(inst, "%s: not allowed in state: %s\n", __func__,
  3799. state_name(inst->state));
  3800. goto exit;
  3801. }
  3802. i_vpr_h(inst, "%s: queue pending batch buffers\n", __func__);
  3803. rc = msm_vidc_queue_deferred_buffers(inst, MSM_VIDC_BUF_OUTPUT);
  3804. if (rc) {
  3805. i_vpr_e(inst, "%s: batch qbufs failed\n", __func__);
  3806. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  3807. }
  3808. exit:
  3809. inst_unlock(inst, __func__);
  3810. put_inst(inst);
  3811. }
  3812. int msm_vidc_flush_buffers(struct msm_vidc_inst *inst,
  3813. enum msm_vidc_buffer_type type)
  3814. {
  3815. int rc = 0;
  3816. struct msm_vidc_buffers *buffers;
  3817. struct msm_vidc_buffer *buf, *dummy;
  3818. enum msm_vidc_buffer_type buffer_type[2];
  3819. int i;
  3820. if (!inst) {
  3821. d_vpr_e("%s: invalid params\n", __func__);
  3822. return -EINVAL;
  3823. }
  3824. if (type == MSM_VIDC_BUF_INPUT) {
  3825. buffer_type[0] = MSM_VIDC_BUF_INPUT_META;
  3826. buffer_type[1] = MSM_VIDC_BUF_INPUT;
  3827. } else if (type == MSM_VIDC_BUF_OUTPUT) {
  3828. buffer_type[0] = MSM_VIDC_BUF_OUTPUT_META;
  3829. buffer_type[1] = MSM_VIDC_BUF_OUTPUT;
  3830. } else {
  3831. i_vpr_h(inst, "%s: invalid buffer type %d\n",
  3832. __func__, type);
  3833. return -EINVAL;
  3834. }
  3835. for (i = 0; i < ARRAY_SIZE(buffer_type); i++) {
  3836. buffers = msm_vidc_get_buffers(inst, buffer_type[i], __func__);
  3837. if (!buffers)
  3838. return -EINVAL;
  3839. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  3840. if (buf->attr & MSM_VIDC_ATTR_QUEUED ||
  3841. buf->attr & MSM_VIDC_ATTR_DEFERRED) {
  3842. print_vidc_buffer(VIDC_HIGH, "high", "flushing buffer", inst, buf);
  3843. if (!(buf->attr & MSM_VIDC_ATTR_BUFFER_DONE))
  3844. msm_vidc_vb2_buffer_done(inst, buf);
  3845. msm_vidc_put_driver_buf(inst, buf);
  3846. }
  3847. }
  3848. }
  3849. return rc;
  3850. }
  3851. int msm_vidc_flush_delayed_unmap_buffers(struct msm_vidc_inst *inst,
  3852. enum msm_vidc_buffer_type type)
  3853. {
  3854. int rc = 0;
  3855. struct msm_vidc_mappings *maps;
  3856. struct msm_vidc_map *map, *dummy;
  3857. struct msm_vidc_buffer *ro_buf, *ro_dummy;
  3858. enum msm_vidc_buffer_type buffer_type[2];
  3859. int i;
  3860. bool found = false;
  3861. if (!inst) {
  3862. d_vpr_e("%s: invalid params\n", __func__);
  3863. return -EINVAL;
  3864. }
  3865. if (type == MSM_VIDC_BUF_INPUT) {
  3866. buffer_type[0] = MSM_VIDC_BUF_INPUT_META;
  3867. buffer_type[1] = MSM_VIDC_BUF_INPUT;
  3868. } else if (type == MSM_VIDC_BUF_OUTPUT) {
  3869. buffer_type[0] = MSM_VIDC_BUF_OUTPUT_META;
  3870. buffer_type[1] = MSM_VIDC_BUF_OUTPUT;
  3871. } else {
  3872. i_vpr_h(inst, "%s: invalid buffer type %d\n",
  3873. __func__, type);
  3874. return -EINVAL;
  3875. }
  3876. for (i = 0; i < ARRAY_SIZE(buffer_type); i++) {
  3877. maps = msm_vidc_get_mappings(inst, buffer_type[i], __func__);
  3878. if (!maps)
  3879. return -EINVAL;
  3880. list_for_each_entry_safe(map, dummy, &maps->list, list) {
  3881. /*
  3882. * decoder output bufs will have skip_delayed_unmap = true
  3883. * unmap all decoder output buffers except those present in
  3884. * read_only buffers list
  3885. */
  3886. if (!map->skip_delayed_unmap)
  3887. continue;
  3888. found = false;
  3889. list_for_each_entry_safe(ro_buf, ro_dummy,
  3890. &inst->buffers.read_only.list, list) {
  3891. if (map->dmabuf == ro_buf->dmabuf) {
  3892. found = true;
  3893. break;
  3894. }
  3895. }
  3896. /* completely unmap */
  3897. if (!found) {
  3898. if (map->refcount > 1) {
  3899. i_vpr_e(inst,
  3900. "%s: unexpected map refcount: %u device addr %#x\n",
  3901. __func__, map->refcount, map->device_addr);
  3902. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  3903. }
  3904. msm_vidc_memory_unmap_completely(inst, map);
  3905. }
  3906. }
  3907. }
  3908. return rc;
  3909. }
  3910. void msm_vidc_destroy_buffers(struct msm_vidc_inst *inst)
  3911. {
  3912. struct msm_vidc_buffers *buffers;
  3913. struct msm_vidc_buffer *buf, *dummy;
  3914. struct msm_vidc_timestamp *ts, *dummy_ts;
  3915. struct response_work *work, *dummy_work = NULL;
  3916. static const enum msm_vidc_buffer_type ext_buf_types[] = {
  3917. MSM_VIDC_BUF_INPUT,
  3918. MSM_VIDC_BUF_OUTPUT,
  3919. MSM_VIDC_BUF_INPUT_META,
  3920. MSM_VIDC_BUF_OUTPUT_META,
  3921. };
  3922. static const enum msm_vidc_buffer_type internal_buf_types[] = {
  3923. MSM_VIDC_BUF_BIN,
  3924. MSM_VIDC_BUF_ARP,
  3925. MSM_VIDC_BUF_COMV,
  3926. MSM_VIDC_BUF_NON_COMV,
  3927. MSM_VIDC_BUF_LINE,
  3928. MSM_VIDC_BUF_DPB,
  3929. MSM_VIDC_BUF_PERSIST,
  3930. MSM_VIDC_BUF_VPSS,
  3931. };
  3932. int i;
  3933. if (!inst) {
  3934. d_vpr_e("%s: invalid params\n", __func__);
  3935. return;
  3936. }
  3937. for (i = 0; i < ARRAY_SIZE(internal_buf_types); i++) {
  3938. buffers = msm_vidc_get_buffers(inst, internal_buf_types[i], __func__);
  3939. if (!buffers)
  3940. continue;
  3941. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  3942. i_vpr_h(inst,
  3943. "destroying internal buffer: type %d idx %d fd %d addr %#x size %d\n",
  3944. buf->type, buf->index, buf->fd, buf->device_addr, buf->buffer_size);
  3945. msm_vidc_destroy_internal_buffer(inst, buf);
  3946. }
  3947. }
  3948. for (i = 0; i < ARRAY_SIZE(ext_buf_types); i++) {
  3949. buffers = msm_vidc_get_buffers(inst, ext_buf_types[i], __func__);
  3950. if (!buffers)
  3951. continue;
  3952. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  3953. print_vidc_buffer(VIDC_ERR, "err ", "destroying ", inst, buf);
  3954. if (!(buf->attr & MSM_VIDC_ATTR_BUFFER_DONE))
  3955. msm_vidc_vb2_buffer_done(inst, buf);
  3956. msm_vidc_put_driver_buf(inst, buf);
  3957. }
  3958. msm_vidc_unmap_buffers(inst, ext_buf_types[i]);
  3959. }
  3960. list_for_each_entry_safe(buf, dummy, &inst->buffers.read_only.list, list) {
  3961. print_vidc_buffer(VIDC_ERR, "err ", "destroying ro buffer", inst, buf);
  3962. list_del(&buf->list);
  3963. msm_memory_free(inst, MSM_MEM_POOL_BUFFER, buf);
  3964. }
  3965. list_for_each_entry_safe(buf, dummy, &inst->buffers.release.list, list) {
  3966. print_vidc_buffer(VIDC_ERR, "err ", "destroying release buffer", inst, buf);
  3967. list_del(&buf->list);
  3968. msm_memory_free(inst, MSM_MEM_POOL_BUFFER, buf);
  3969. }
  3970. list_for_each_entry_safe(ts, dummy_ts, &inst->timestamps.list, sort.list) {
  3971. i_vpr_e(inst, "%s: removing ts: val %lld, rank %lld\n",
  3972. __func__, ts->sort.val, ts->rank);
  3973. list_del(&ts->sort.list);
  3974. msm_memory_free(inst, MSM_MEM_POOL_TIMESTAMP, ts);
  3975. }
  3976. list_for_each_entry_safe(work, dummy_work, &inst->response_works, list) {
  3977. list_del(&work->list);
  3978. kfree(work->data);
  3979. kfree(work);
  3980. }
  3981. /* destroy buffers from pool */
  3982. msm_memory_pools_deinit(inst);
  3983. }
  3984. static void msm_vidc_close_helper(struct kref *kref)
  3985. {
  3986. struct msm_vidc_inst *inst = container_of(kref,
  3987. struct msm_vidc_inst, kref);
  3988. i_vpr_h(inst, "%s()\n", __func__);
  3989. msm_vidc_event_queue_deinit(inst);
  3990. msm_vidc_vb2_queue_deinit(inst);
  3991. msm_vidc_debugfs_deinit_inst(inst);
  3992. if (is_decode_session(inst))
  3993. msm_vdec_inst_deinit(inst);
  3994. else if (is_encode_session(inst))
  3995. msm_venc_inst_deinit(inst);
  3996. msm_vidc_free_input_cr_list(inst);
  3997. msm_vidc_free_capabililty_list(inst, CHILD_LIST | FW_LIST);
  3998. kfree(inst->capabilities);
  3999. if (inst->response_workq)
  4000. destroy_workqueue(inst->response_workq);
  4001. kfree(inst);
  4002. }
  4003. struct msm_vidc_inst *get_inst_ref(struct msm_vidc_core *core,
  4004. struct msm_vidc_inst *instance)
  4005. {
  4006. struct msm_vidc_inst *inst = NULL;
  4007. bool matches = false;
  4008. if (!core) {
  4009. d_vpr_e("%s: invalid params\n", __func__);
  4010. return NULL;
  4011. }
  4012. mutex_lock(&core->lock);
  4013. list_for_each_entry(inst, &core->instances, list) {
  4014. if (inst == instance) {
  4015. matches = true;
  4016. break;
  4017. }
  4018. }
  4019. inst = (matches && kref_get_unless_zero(&inst->kref)) ? inst : NULL;
  4020. mutex_unlock(&core->lock);
  4021. return inst;
  4022. }
  4023. struct msm_vidc_inst *get_inst(struct msm_vidc_core *core,
  4024. u32 session_id)
  4025. {
  4026. struct msm_vidc_inst *inst = NULL;
  4027. bool matches = false;
  4028. if (!core) {
  4029. d_vpr_e("%s: invalid params\n", __func__);
  4030. return NULL;
  4031. }
  4032. mutex_lock(&core->lock);
  4033. list_for_each_entry(inst, &core->instances, list) {
  4034. if (inst->session_id == session_id) {
  4035. matches = true;
  4036. break;
  4037. }
  4038. }
  4039. inst = (matches && kref_get_unless_zero(&inst->kref)) ? inst : NULL;
  4040. mutex_unlock(&core->lock);
  4041. return inst;
  4042. }
  4043. void put_inst(struct msm_vidc_inst *inst)
  4044. {
  4045. if (!inst) {
  4046. d_vpr_e("%s: invalid params\n", __func__);
  4047. return;
  4048. }
  4049. kref_put(&inst->kref, msm_vidc_close_helper);
  4050. }
  4051. bool core_lock_check(struct msm_vidc_core *core, const char *func)
  4052. {
  4053. return mutex_is_locked(&core->lock);
  4054. }
  4055. void core_lock(struct msm_vidc_core *core, const char *function)
  4056. {
  4057. mutex_lock(&core->lock);
  4058. }
  4059. void core_unlock(struct msm_vidc_core *core, const char *function)
  4060. {
  4061. mutex_unlock(&core->lock);
  4062. }
  4063. bool inst_lock_check(struct msm_vidc_inst *inst, const char *func)
  4064. {
  4065. return mutex_is_locked(&inst->lock);
  4066. }
  4067. void inst_lock(struct msm_vidc_inst *inst, const char *function)
  4068. {
  4069. mutex_lock(&inst->lock);
  4070. }
  4071. void inst_unlock(struct msm_vidc_inst *inst, const char *function)
  4072. {
  4073. mutex_unlock(&inst->lock);
  4074. }
  4075. int msm_vidc_update_bitstream_buffer_size(struct msm_vidc_inst *inst)
  4076. {
  4077. struct msm_vidc_core *core;
  4078. struct v4l2_format *fmt;
  4079. if (!inst || !inst->core) {
  4080. d_vpr_e("%s: invalid params\n", __func__);
  4081. return -EINVAL;
  4082. }
  4083. core = inst->core;
  4084. if (is_decode_session(inst)) {
  4085. fmt = &inst->fmts[INPUT_PORT];
  4086. fmt->fmt.pix_mp.plane_fmt[0].sizeimage = call_session_op(core,
  4087. buffer_size, inst, MSM_VIDC_BUF_INPUT);
  4088. }
  4089. return 0;
  4090. }
  4091. int msm_vidc_update_meta_port_settings(struct msm_vidc_inst *inst)
  4092. {
  4093. struct msm_vidc_core *core;
  4094. struct v4l2_format *fmt;
  4095. if (!inst || !inst->core) {
  4096. d_vpr_e("%s: invalid params\n", __func__);
  4097. return -EINVAL;
  4098. }
  4099. core = inst->core;
  4100. fmt = &inst->fmts[INPUT_META_PORT];
  4101. if (is_input_meta_enabled(inst)) {
  4102. fmt->fmt.meta.buffersize = call_session_op(core,
  4103. buffer_size, inst, MSM_VIDC_BUF_INPUT_META);
  4104. inst->buffers.input_meta.min_count =
  4105. inst->buffers.input.min_count;
  4106. inst->buffers.input_meta.extra_count =
  4107. inst->buffers.input.extra_count;
  4108. inst->buffers.input_meta.actual_count =
  4109. inst->buffers.input.actual_count;
  4110. inst->buffers.input_meta.size = fmt->fmt.meta.buffersize;
  4111. } else {
  4112. fmt->fmt.meta.buffersize = 0;
  4113. inst->buffers.input_meta.min_count = 0;
  4114. inst->buffers.input_meta.extra_count = 0;
  4115. inst->buffers.input_meta.actual_count = 0;
  4116. inst->buffers.input_meta.size = 0;
  4117. }
  4118. fmt = &inst->fmts[OUTPUT_META_PORT];
  4119. if (is_output_meta_enabled(inst)) {
  4120. fmt->fmt.meta.buffersize = call_session_op(core,
  4121. buffer_size, inst, MSM_VIDC_BUF_OUTPUT_META);
  4122. inst->buffers.output_meta.min_count =
  4123. inst->buffers.output.min_count;
  4124. inst->buffers.output_meta.extra_count =
  4125. inst->buffers.output.extra_count;
  4126. inst->buffers.output_meta.actual_count =
  4127. inst->buffers.output.actual_count;
  4128. inst->buffers.output_meta.size = fmt->fmt.meta.buffersize;
  4129. } else {
  4130. fmt->fmt.meta.buffersize = 0;
  4131. inst->buffers.output_meta.min_count = 0;
  4132. inst->buffers.output_meta.extra_count = 0;
  4133. inst->buffers.output_meta.actual_count = 0;
  4134. inst->buffers.output_meta.size = 0;
  4135. }
  4136. return 0;
  4137. }
  4138. int msm_vidc_update_buffer_count(struct msm_vidc_inst *inst, u32 port)
  4139. {
  4140. struct msm_vidc_core *core;
  4141. if (!inst || !inst->core) {
  4142. d_vpr_e("%s: invalid params\n", __func__);
  4143. return -EINVAL;
  4144. }
  4145. core = inst->core;
  4146. switch (port) {
  4147. case INPUT_PORT:
  4148. inst->buffers.input.min_count = call_session_op(core,
  4149. min_count, inst, MSM_VIDC_BUF_INPUT);
  4150. inst->buffers.input.extra_count = call_session_op(core,
  4151. extra_count, inst, MSM_VIDC_BUF_INPUT);
  4152. if (inst->buffers.input.actual_count <
  4153. inst->buffers.input.min_count +
  4154. inst->buffers.input.extra_count) {
  4155. inst->buffers.input.actual_count =
  4156. inst->buffers.input.min_count +
  4157. inst->buffers.input.extra_count;
  4158. }
  4159. if (is_input_meta_enabled(inst)) {
  4160. inst->buffers.input_meta.min_count =
  4161. inst->buffers.input.min_count;
  4162. inst->buffers.input_meta.extra_count =
  4163. inst->buffers.input.extra_count;
  4164. inst->buffers.input_meta.actual_count =
  4165. inst->buffers.input.actual_count;
  4166. } else {
  4167. inst->buffers.input_meta.min_count = 0;
  4168. inst->buffers.input_meta.extra_count = 0;
  4169. inst->buffers.input_meta.actual_count = 0;
  4170. }
  4171. i_vpr_h(inst, "%s: type: INPUT, count: min %u, extra %u, actual %u\n", __func__,
  4172. inst->buffers.input.min_count,
  4173. inst->buffers.input.extra_count,
  4174. inst->buffers.input.actual_count);
  4175. break;
  4176. case OUTPUT_PORT:
  4177. if (!inst->vb2q[INPUT_PORT].streaming)
  4178. inst->buffers.output.min_count = call_session_op(core,
  4179. min_count, inst, MSM_VIDC_BUF_OUTPUT);
  4180. inst->buffers.output.extra_count = call_session_op(core,
  4181. extra_count, inst, MSM_VIDC_BUF_OUTPUT);
  4182. if (inst->buffers.output.actual_count <
  4183. inst->buffers.output.min_count +
  4184. inst->buffers.output.extra_count) {
  4185. inst->buffers.output.actual_count =
  4186. inst->buffers.output.min_count +
  4187. inst->buffers.output.extra_count;
  4188. }
  4189. if (is_output_meta_enabled(inst)) {
  4190. inst->buffers.output_meta.min_count =
  4191. inst->buffers.output.min_count;
  4192. inst->buffers.output_meta.extra_count =
  4193. inst->buffers.output.extra_count;
  4194. inst->buffers.output_meta.actual_count =
  4195. inst->buffers.output.actual_count;
  4196. } else {
  4197. inst->buffers.output_meta.min_count = 0;
  4198. inst->buffers.output_meta.extra_count = 0;
  4199. inst->buffers.output_meta.actual_count = 0;
  4200. }
  4201. i_vpr_h(inst, "%s: type: OUTPUT, count: min %u, extra %u, actual %u\n", __func__,
  4202. inst->buffers.output.min_count,
  4203. inst->buffers.output.extra_count,
  4204. inst->buffers.output.actual_count);
  4205. break;
  4206. default:
  4207. d_vpr_e("%s unknown port %d\n", __func__, port);
  4208. return -EINVAL;
  4209. }
  4210. return 0;
  4211. }
  4212. void msm_vidc_schedule_core_deinit(struct msm_vidc_core *core)
  4213. {
  4214. if (!core)
  4215. return;
  4216. if (!core->capabilities[FW_UNLOAD].value)
  4217. return;
  4218. cancel_delayed_work(&core->fw_unload_work);
  4219. schedule_delayed_work(&core->fw_unload_work,
  4220. msecs_to_jiffies(core->capabilities[FW_UNLOAD_DELAY].value));
  4221. d_vpr_h("firmware unload delayed by %u ms\n",
  4222. core->capabilities[FW_UNLOAD_DELAY].value);
  4223. return;
  4224. }
  4225. static const char *get_codec_str(enum msm_vidc_codec_type type)
  4226. {
  4227. switch (type) {
  4228. case MSM_VIDC_H264: return "h264";
  4229. case MSM_VIDC_HEVC: return "h265";
  4230. case MSM_VIDC_VP9: return " vp9";
  4231. case MSM_VIDC_HEIC: return "heic";
  4232. }
  4233. return "....";
  4234. }
  4235. static const char *get_domain_str(enum msm_vidc_domain_type type)
  4236. {
  4237. switch (type) {
  4238. case MSM_VIDC_ENCODER: return "e";
  4239. case MSM_VIDC_DECODER: return "d";
  4240. }
  4241. return ".";
  4242. }
  4243. int msm_vidc_update_debug_str(struct msm_vidc_inst *inst)
  4244. {
  4245. u32 sid;
  4246. const char *codec;
  4247. const char *domain;
  4248. if (!inst) {
  4249. d_vpr_e("%s: Invalid params\n", __func__);
  4250. return -EINVAL;
  4251. }
  4252. sid = inst->session_id;
  4253. codec = get_codec_str(inst->codec);
  4254. domain = get_domain_str(inst->domain);
  4255. snprintf(inst->debug_str, sizeof(inst->debug_str), "%08x: %s%s", sid, codec, domain);
  4256. d_vpr_h("%s: sid: %08x, codec: %s, domain: %s, final: %s\n",
  4257. __func__, sid, codec, domain, inst->debug_str);
  4258. return 0;
  4259. }
  4260. static int msm_vidc_print_insts_info(struct msm_vidc_core *core)
  4261. {
  4262. struct msm_vidc_inst *inst;
  4263. u32 height, width, fps, orate;
  4264. struct msm_vidc_inst_capability *capability;
  4265. struct v4l2_format *out_f;
  4266. struct v4l2_format *inp_f;
  4267. char prop[64];
  4268. d_vpr_e("Print all running instances\n");
  4269. d_vpr_e("%6s | %6s | %5s | %5s | %5s\n", "width", "height", "fps", "orate", "prop");
  4270. core_lock(core, __func__);
  4271. list_for_each_entry(inst, &core->instances, list) {
  4272. out_f = &inst->fmts[OUTPUT_PORT];
  4273. inp_f = &inst->fmts[INPUT_PORT];
  4274. capability = inst->capabilities;
  4275. memset(&prop, 0, sizeof(prop));
  4276. width = max(out_f->fmt.pix_mp.width, inp_f->fmt.pix_mp.width);
  4277. height = max(out_f->fmt.pix_mp.height, inp_f->fmt.pix_mp.height);
  4278. fps = capability->cap[FRAME_RATE].value >> 16;
  4279. orate = capability->cap[OPERATING_RATE].value >> 16;
  4280. if (is_realtime_session(inst))
  4281. strlcat(prop, "RT ", sizeof(prop));
  4282. else
  4283. strlcat(prop, "NRT", sizeof(prop));
  4284. if (is_thumbnail_session(inst))
  4285. strlcat(prop, "+THUMB", sizeof(prop));
  4286. if (is_image_session(inst))
  4287. strlcat(prop, "+IMAGE", sizeof(prop));
  4288. i_vpr_e(inst, "%6u | %6u | %5u | %5u | %5s\n", width, height, fps, orate, prop);
  4289. }
  4290. core_unlock(core, __func__);
  4291. return 0;
  4292. }
  4293. int msm_vidc_check_core_mbps(struct msm_vidc_inst *inst)
  4294. {
  4295. u32 mbps = 0;
  4296. struct msm_vidc_core *core;
  4297. struct msm_vidc_inst *instance;
  4298. if (!inst || !inst->core) {
  4299. d_vpr_e("%s: invalid params\n", __func__);
  4300. return -EINVAL;
  4301. }
  4302. core = inst->core;
  4303. core_lock(core, __func__);
  4304. list_for_each_entry(instance, &core->instances, list) {
  4305. /* ignore invalid/error session */
  4306. if (is_session_error(instance))
  4307. continue;
  4308. /* ignore thumbnail, image, and non realtime sessions */
  4309. if (is_thumbnail_session(instance) ||
  4310. is_image_session(instance) ||
  4311. !is_realtime_session(instance))
  4312. continue;
  4313. mbps += msm_vidc_get_inst_load(instance);
  4314. }
  4315. core_unlock(core, __func__);
  4316. if (mbps > core->capabilities[MAX_MBPS].value) {
  4317. i_vpr_e(inst, "%s: Hardware overloaded. needed %u, max %u", __func__,
  4318. mbps, core->capabilities[MAX_MBPS].value);
  4319. return -ENOMEM;
  4320. }
  4321. return 0;
  4322. }
  4323. static int msm_vidc_check_core_mbpf(struct msm_vidc_inst *inst)
  4324. {
  4325. u32 video_mbpf = 0, image_mbpf = 0;
  4326. struct msm_vidc_core *core;
  4327. struct msm_vidc_inst *instance;
  4328. if (!inst || !inst->core) {
  4329. d_vpr_e("%s: invalid params\n", __func__);
  4330. return -EINVAL;
  4331. }
  4332. core = inst->core;
  4333. core_lock(core, __func__);
  4334. list_for_each_entry(instance, &core->instances, list) {
  4335. /* ignore invalid/error session */
  4336. if (is_session_error(instance))
  4337. continue;
  4338. /* ignore thumbnail session */
  4339. if (is_thumbnail_session(instance))
  4340. continue;
  4341. if (is_image_session(instance))
  4342. image_mbpf += msm_vidc_get_mbs_per_frame(instance);
  4343. else
  4344. video_mbpf += msm_vidc_get_mbs_per_frame(instance);
  4345. }
  4346. core_unlock(core, __func__);
  4347. if (video_mbpf > core->capabilities[MAX_MBPF].value) {
  4348. i_vpr_e(inst, "%s: video overloaded. needed %u, max %u", __func__,
  4349. video_mbpf, core->capabilities[MAX_MBPF].value);
  4350. return -ENOMEM;
  4351. }
  4352. if (image_mbpf > core->capabilities[MAX_IMAGE_MBPF].value) {
  4353. i_vpr_e(inst, "%s: image overloaded. needed %u, max %u", __func__,
  4354. image_mbpf, core->capabilities[MAX_IMAGE_MBPF].value);
  4355. return -ENOMEM;
  4356. }
  4357. return 0;
  4358. }
  4359. static int msm_vidc_check_inst_mbpf(struct msm_vidc_inst *inst)
  4360. {
  4361. u32 mbpf = 0, max_mbpf = 0;
  4362. struct msm_vidc_inst_capability *capability;
  4363. if (!inst || !inst->capabilities) {
  4364. d_vpr_e("%s: invalid params\n", __func__);
  4365. return -EINVAL;
  4366. }
  4367. capability = inst->capabilities;
  4368. if (is_secure_session(inst))
  4369. max_mbpf = capability->cap[SECURE_MBPF].max;
  4370. else if (is_encode_session(inst) && capability->cap[LOSSLESS].value)
  4371. max_mbpf = capability->cap[LOSSLESS_MBPF].max;
  4372. else
  4373. max_mbpf = capability->cap[MBPF].max;
  4374. /* check current session mbpf */
  4375. mbpf = msm_vidc_get_mbs_per_frame(inst);
  4376. if (mbpf > max_mbpf) {
  4377. i_vpr_e(inst, "%s: session overloaded. needed %u, max %u", __func__,
  4378. mbpf, max_mbpf);
  4379. return -ENOMEM;
  4380. }
  4381. return 0;
  4382. }
  4383. static bool msm_vidc_allow_image_encode_session(struct msm_vidc_inst *inst)
  4384. {
  4385. struct msm_vidc_inst_capability *capability;
  4386. struct v4l2_format *fmt;
  4387. u32 min_width, min_height, max_width, max_height, pix_fmt, profile;
  4388. bool allow = false;
  4389. if (!inst || !inst->capabilities) {
  4390. d_vpr_e("%s: invalid params\n", __func__);
  4391. return false;
  4392. }
  4393. capability = inst->capabilities;
  4394. if (!is_image_encode_session(inst)) {
  4395. i_vpr_e(inst, "%s: not an image encode session\n", __func__);
  4396. return false;
  4397. }
  4398. pix_fmt = capability->cap[PIX_FMTS].value;
  4399. profile = capability->cap[PROFILE].value;
  4400. /* is input with & height is in allowed range */
  4401. min_width = capability->cap[FRAME_WIDTH].min;
  4402. max_width = capability->cap[FRAME_WIDTH].max;
  4403. min_height = capability->cap[FRAME_HEIGHT].min;
  4404. max_height = capability->cap[FRAME_HEIGHT].max;
  4405. fmt = &inst->fmts[INPUT_PORT];
  4406. if (!in_range(fmt->fmt.pix_mp.width, min_width, max_width) ||
  4407. !in_range(fmt->fmt.pix_mp.height, min_height, max_height)) {
  4408. i_vpr_e(inst, "unsupported wxh [%u x %u], allowed [%u x %u] to [%u x %u]\n",
  4409. fmt->fmt.pix_mp.width, fmt->fmt.pix_mp.height,
  4410. min_width, min_height, max_width, max_height);
  4411. allow = false;
  4412. goto exit;
  4413. }
  4414. /* is linear yuv color fmt */
  4415. allow = is_linear_yuv_colorformat(pix_fmt);
  4416. if (!allow) {
  4417. i_vpr_e(inst, "%s: compressed fmt: %#x\n", __func__, pix_fmt);
  4418. goto exit;
  4419. }
  4420. /* is input grid aligned */
  4421. fmt = &inst->fmts[INPUT_PORT];
  4422. allow = IS_ALIGNED(fmt->fmt.pix_mp.width, HEIC_GRID_DIMENSION);
  4423. allow &= IS_ALIGNED(fmt->fmt.pix_mp.height, HEIC_GRID_DIMENSION);
  4424. if (!allow) {
  4425. i_vpr_e(inst, "%s: input is not grid aligned: %u x %u\n", __func__,
  4426. fmt->fmt.pix_mp.width, fmt->fmt.pix_mp.height);
  4427. goto exit;
  4428. }
  4429. /* is output grid dimension */
  4430. fmt = &inst->fmts[OUTPUT_PORT];
  4431. allow = fmt->fmt.pix_mp.width == HEIC_GRID_DIMENSION;
  4432. allow &= fmt->fmt.pix_mp.height == HEIC_GRID_DIMENSION;
  4433. if (!allow) {
  4434. i_vpr_e(inst, "%s: output is not a grid dimension: %u x %u\n", __func__,
  4435. fmt->fmt.pix_mp.width, fmt->fmt.pix_mp.height);
  4436. goto exit;
  4437. }
  4438. /* is bitrate mode CQ */
  4439. allow = capability->cap[BITRATE_MODE].value == HFI_RC_CQ;
  4440. if (!allow) {
  4441. i_vpr_e(inst, "%s: bitrate mode is not CQ: %#x\n", __func__,
  4442. capability->cap[BITRATE_MODE].value);
  4443. goto exit;
  4444. }
  4445. /* is all intra */
  4446. allow = !capability->cap[GOP_SIZE].value;
  4447. allow &= !capability->cap[B_FRAME].value;
  4448. if (!allow) {
  4449. i_vpr_e(inst, "%s: not all intra: gop: %u, bframe: %u\n", __func__,
  4450. capability->cap[GOP_SIZE].value, capability->cap[B_FRAME].value);
  4451. goto exit;
  4452. }
  4453. /* is time delta based rc disabled */
  4454. allow = !capability->cap[TIME_DELTA_BASED_RC].value;
  4455. if (!allow) {
  4456. i_vpr_e(inst, "%s: time delta based rc not disabled: %#x\n", __func__,
  4457. capability->cap[TIME_DELTA_BASED_RC].value);
  4458. goto exit;
  4459. }
  4460. /* is frame skip mode disabled */
  4461. allow = !capability->cap[FRAME_SKIP_MODE].value;
  4462. if (!allow) {
  4463. i_vpr_e(inst, "%s: frame skip mode not disabled: %#x\n", __func__,
  4464. capability->cap[FRAME_SKIP_MODE].value);
  4465. goto exit;
  4466. }
  4467. exit:
  4468. if (!allow)
  4469. i_vpr_e(inst, "%s: current session not allowed\n", __func__);
  4470. return allow;
  4471. }
  4472. int msm_vidc_check_session_supported(struct msm_vidc_inst *inst)
  4473. {
  4474. struct msm_vidc_inst_capability *capability;
  4475. u32 width = 0, height = 0, min_width, min_height,
  4476. max_width, max_height;
  4477. bool allow = false, is_interlaced = false;
  4478. int rc = 0;
  4479. if (!inst || !inst->capabilities) {
  4480. d_vpr_e("%s: invalid params\n", __func__);
  4481. return -EINVAL;
  4482. }
  4483. capability = inst->capabilities;
  4484. if (is_image_session(inst) && is_secure_session(inst)) {
  4485. i_vpr_e(inst, "%s: secure image session not supported\n", __func__);
  4486. rc = -EINVAL;
  4487. goto exit;
  4488. }
  4489. rc = msm_vidc_check_core_mbps(inst);
  4490. if (rc)
  4491. goto exit;
  4492. rc = msm_vidc_check_core_mbpf(inst);
  4493. if (rc)
  4494. goto exit;
  4495. rc = msm_vidc_check_inst_mbpf(inst);
  4496. if (rc)
  4497. goto exit;
  4498. if (is_decode_session(inst)) {
  4499. width = inst->fmts[INPUT_PORT].fmt.pix_mp.width;
  4500. height = inst->fmts[INPUT_PORT].fmt.pix_mp.height;
  4501. } else if (is_encode_session(inst)) {
  4502. width = inst->crop.width;
  4503. height = inst->crop.height;
  4504. }
  4505. if (is_secure_session(inst)) {
  4506. min_width = capability->cap[SECURE_FRAME_WIDTH].min;
  4507. max_width = capability->cap[SECURE_FRAME_WIDTH].max;
  4508. min_height = capability->cap[SECURE_FRAME_HEIGHT].min;
  4509. max_height = capability->cap[SECURE_FRAME_HEIGHT].max;
  4510. } else if (is_encode_session(inst) && capability->cap[LOSSLESS].value) {
  4511. min_width = capability->cap[LOSSLESS_FRAME_WIDTH].min;
  4512. max_width = capability->cap[LOSSLESS_FRAME_WIDTH].max;
  4513. min_height = capability->cap[LOSSLESS_FRAME_HEIGHT].min;
  4514. max_height = capability->cap[LOSSLESS_FRAME_HEIGHT].max;
  4515. } else {
  4516. min_width = capability->cap[FRAME_WIDTH].min;
  4517. max_width = capability->cap[FRAME_WIDTH].max;
  4518. min_height = capability->cap[FRAME_HEIGHT].min;
  4519. max_height = capability->cap[FRAME_HEIGHT].max;
  4520. }
  4521. /* reject odd resolution session */
  4522. if (is_encode_session(inst) &&
  4523. (is_odd(width) || is_odd(height) ||
  4524. is_odd(inst->compose.width) ||
  4525. is_odd(inst->compose.height))) {
  4526. i_vpr_e(inst, "%s: resolution is not even. wxh [%u x %u], compose [%u x %u]\n",
  4527. __func__, width, height, inst->compose.width,
  4528. inst->compose.height);
  4529. rc = -EINVAL;
  4530. goto exit;
  4531. }
  4532. /* check decoder input width and height is in supported range */
  4533. if (is_decode_session(inst)) {
  4534. if (!in_range(width, min_width, max_width) ||
  4535. !in_range(height, min_height, max_height)) {
  4536. i_vpr_e(inst,
  4537. "%s: unsupported input wxh [%u x %u], allowed range: [%u x %u] to [%u x %u]\n",
  4538. __func__, width, height, min_width,
  4539. min_height, max_width, max_height);
  4540. rc = -EINVAL;
  4541. goto exit;
  4542. }
  4543. }
  4544. /* check encoder crop width and height is in supported range */
  4545. if (is_encode_session(inst)) {
  4546. if (!in_range(width, min_width, max_width) ||
  4547. !in_range(height, min_height, max_height)) {
  4548. i_vpr_e(inst,
  4549. "%s: unsupported wxh [%u x %u], allowed range: [%u x %u] to [%u x %u]\n",
  4550. __func__, width, height, min_width,
  4551. min_height, max_width, max_height);
  4552. rc = -EINVAL;
  4553. goto exit;
  4554. }
  4555. }
  4556. /* check image capabilities */
  4557. if (is_image_encode_session(inst)) {
  4558. allow = msm_vidc_allow_image_encode_session(inst);
  4559. if (!allow) {
  4560. rc = -EINVAL;
  4561. goto exit;
  4562. }
  4563. return 0;
  4564. }
  4565. /* check interlace supported resolution */
  4566. is_interlaced = capability->cap[CODED_FRAMES].value == CODED_FRAMES_INTERLACE;
  4567. if (is_interlaced && (width > INTERLACE_WIDTH_MAX || height > INTERLACE_HEIGHT_MAX ||
  4568. NUM_MBS_PER_FRAME(width, height) > INTERLACE_MB_PER_FRAME_MAX)) {
  4569. i_vpr_e(inst, "%s: unsupported interlace wxh [%u x %u], max [%u x %u]\n",
  4570. __func__, width, height, INTERLACE_WIDTH_MAX, INTERLACE_HEIGHT_MAX);
  4571. rc = -EINVAL;
  4572. goto exit;
  4573. }
  4574. exit:
  4575. if (rc) {
  4576. i_vpr_e(inst, "%s: current session not supported\n", __func__);
  4577. msm_vidc_print_insts_info(inst->core);
  4578. }
  4579. return rc;
  4580. }
  4581. int msm_vidc_check_scaling_supported(struct msm_vidc_inst *inst)
  4582. {
  4583. u32 iwidth, owidth, iheight, oheight, ds_factor;
  4584. if (!inst || !inst->capabilities) {
  4585. d_vpr_e("%s: invalid params\n", __func__);
  4586. return -EINVAL;
  4587. }
  4588. if (is_image_session(inst) || is_decode_session(inst)) {
  4589. i_vpr_h(inst, "%s: Scaling is supported for encode session only\n", __func__);
  4590. return 0;
  4591. }
  4592. if (!is_scaling_enabled(inst)) {
  4593. i_vpr_h(inst, "%s: Scaling not enabled. skip scaling check\n", __func__);
  4594. return 0;
  4595. }
  4596. iwidth = inst->crop.width;
  4597. iheight = inst->crop.height;
  4598. owidth = inst->compose.width;
  4599. oheight = inst->compose.height;
  4600. ds_factor = inst->capabilities->cap[SCALE_FACTOR].value;
  4601. /* upscaling: encoder doesnot support upscaling */
  4602. if (owidth > iwidth || oheight > iheight) {
  4603. i_vpr_e(inst, "%s: upscale not supported: input [%u x %u], output [%u x %u]\n",
  4604. __func__, iwidth, iheight, owidth, oheight);
  4605. return -EINVAL;
  4606. }
  4607. /* downscaling: only supported upto 1/8 of width & 1/8 of height */
  4608. if (iwidth > owidth * ds_factor || iheight > oheight * ds_factor) {
  4609. i_vpr_e(inst,
  4610. "%s: unsupported ratio: input [%u x %u], output [%u x %u], ratio %u\n",
  4611. __func__, iwidth, iheight, owidth, oheight, ds_factor);
  4612. return -EINVAL;
  4613. }
  4614. return 0;
  4615. }