msm_vidc_driver.c 178 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2022, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/iommu.h>
  6. #include <linux/workqueue.h>
  7. #include "msm_media_info.h"
  8. #include "msm_vidc_driver.h"
  9. #include "msm_vidc_platform.h"
  10. #include "msm_vidc_internal.h"
  11. #include "msm_vidc_control.h"
  12. #include "msm_vidc_memory.h"
  13. #include "msm_vidc_power.h"
  14. #include "msm_vidc_debug.h"
  15. #include "msm_vidc_power.h"
  16. #include "msm_vidc.h"
  17. #include "msm_vdec.h"
  18. #include "msm_venc.h"
  19. #include "msm_vidc_fence.h"
  20. #include "venus_hfi.h"
  21. #include "venus_hfi_response.h"
  22. #include "hfi_packet.h"
  23. #include "msm_vidc_events.h"
  24. extern struct msm_vidc_core *g_core;
  25. #define is_odd(val) ((val) % 2 == 1)
  26. #define in_range(val, min, max) (((min) <= (val)) && ((val) <= (max)))
  27. #define COUNT_BITS(a, out) { \
  28. while ((a) >= 1) { \
  29. (out) += (a) & (1); \
  30. (a) >>= (1); \
  31. } \
  32. }
  33. #define SSR_TYPE 0x0000000F
  34. #define SSR_TYPE_SHIFT 0
  35. #define SSR_SUB_CLIENT_ID 0x000000F0
  36. #define SSR_SUB_CLIENT_ID_SHIFT 4
  37. #define SSR_ADDR_ID 0xFFFFFFFF00000000
  38. #define SSR_ADDR_SHIFT 32
  39. #define STABILITY_TYPE 0x0000000F
  40. #define STABILITY_TYPE_SHIFT 0
  41. #define STABILITY_SUB_CLIENT_ID 0x000000F0
  42. #define STABILITY_SUB_CLIENT_ID_SHIFT 4
  43. #define STABILITY_PAYLOAD_ID 0xFFFFFFFF00000000
  44. #define STABILITY_PAYLOAD_SHIFT 32
  45. struct msm_vidc_cap_name {
  46. enum msm_vidc_inst_capability_type cap_id;
  47. char *name;
  48. };
  49. /* do not modify the cap names as it is used in test scripts */
  50. static const struct msm_vidc_cap_name cap_name_arr[] = {
  51. {INST_CAP_NONE, "INST_CAP_NONE" },
  52. {META_SEQ_HDR_NAL, "META_SEQ_HDR_NAL" },
  53. {META_BITSTREAM_RESOLUTION, "META_BITSTREAM_RESOLUTION" },
  54. {META_CROP_OFFSETS, "META_CROP_OFFSETS" },
  55. {META_DPB_MISR, "META_DPB_MISR" },
  56. {META_OPB_MISR, "META_OPB_MISR" },
  57. {META_INTERLACE, "META_INTERLACE" },
  58. {META_OUTBUF_FENCE, "META_OUTBUF_FENCE" },
  59. {META_LTR_MARK_USE, "META_LTR_MARK_USE" },
  60. {META_TIMESTAMP, "META_TIMESTAMP" },
  61. {META_CONCEALED_MB_CNT, "META_CONCEALED_MB_CNT" },
  62. {META_HIST_INFO, "META_HIST_INFO" },
  63. {META_PICTURE_TYPE, "META_PICTURE_TYPE" },
  64. {META_SEI_MASTERING_DISP, "META_SEI_MASTERING_DISP" },
  65. {META_SEI_CLL, "META_SEI_CLL" },
  66. {META_HDR10PLUS, "META_HDR10PLUS" },
  67. {META_BUF_TAG, "META_BUF_TAG" },
  68. {META_DPB_TAG_LIST, "META_DPB_TAG_LIST" },
  69. {META_SUBFRAME_OUTPUT, "META_SUBFRAME_OUTPUT" },
  70. {META_ENC_QP_METADATA, "META_ENC_QP_METADATA" },
  71. {META_DEC_QP_METADATA, "META_DEC_QP_METADATA" },
  72. {META_MAX_NUM_REORDER_FRAMES, "META_MAX_NUM_REORDER_FRAMES"},
  73. {META_EVA_STATS, "META_EVA_STATS" },
  74. {META_ROI_INFO, "META_ROI_INFO" },
  75. {META_SALIENCY_INFO, "META_SALIENCY_INFO" },
  76. {META_TRANSCODING_STAT_INFO, "META_TRANSCODING_STAT_INFO" },
  77. {META_DOLBY_RPU, "META_DOLBY_RPU" },
  78. {META_CAP_MAX, "META_CAP_MAX" },
  79. {FRAME_WIDTH, "FRAME_WIDTH" },
  80. {LOSSLESS_FRAME_WIDTH, "LOSSLESS_FRAME_WIDTH" },
  81. {SECURE_FRAME_WIDTH, "SECURE_FRAME_WIDTH" },
  82. {FRAME_HEIGHT, "FRAME_HEIGHT" },
  83. {LOSSLESS_FRAME_HEIGHT, "LOSSLESS_FRAME_HEIGHT" },
  84. {SECURE_FRAME_HEIGHT, "SECURE_FRAME_HEIGHT" },
  85. {PIX_FMTS, "PIX_FMTS" },
  86. {MIN_BUFFERS_INPUT, "MIN_BUFFERS_INPUT" },
  87. {MIN_BUFFERS_OUTPUT, "MIN_BUFFERS_OUTPUT" },
  88. {MBPF, "MBPF" },
  89. {BATCH_MBPF, "BATCH_MBPF" },
  90. {BATCH_FPS, "BATCH_FPS" },
  91. {LOSSLESS_MBPF, "LOSSLESS_MBPF" },
  92. {SECURE_MBPF, "SECURE_MBPF" },
  93. {MBPS, "MBPS" },
  94. {POWER_SAVE_MBPS, "POWER_SAVE_MBPS" },
  95. {CHECK_MBPS, "CHECK_MPBS" },
  96. {FRAME_RATE, "FRAME_RATE" },
  97. {OPERATING_RATE, "OPERATING_RATE" },
  98. {INPUT_RATE, "INPUT_RATE" },
  99. {TIMESTAMP_RATE, "TIMESTAMP_RATE" },
  100. {SCALE_FACTOR, "SCALE_FACTOR" },
  101. {MB_CYCLES_VSP, "MB_CYCLES_VSP" },
  102. {MB_CYCLES_VPP, "MB_CYCLES_VPP" },
  103. {MB_CYCLES_LP, "MB_CYCLES_LP" },
  104. {MB_CYCLES_FW, "MB_CYCLES_FW" },
  105. {MB_CYCLES_FW_VPP, "MB_CYCLES_FW_VPP" },
  106. {CLIENT_ID, "CLIENT_ID" },
  107. {SECURE_MODE, "SECURE_MODE" },
  108. {FENCE_ID, "FENCE_ID" },
  109. {FENCE_FD, "FENCE_FD" },
  110. {TS_REORDER, "TS_REORDER" },
  111. {SLICE_INTERFACE, "SLICE_INTERFACE" },
  112. {HFLIP, "HFLIP" },
  113. {VFLIP, "VFLIP" },
  114. {ROTATION, "ROTATION" },
  115. {SUPER_FRAME, "SUPER_FRAME" },
  116. {HEADER_MODE, "HEADER_MODE" },
  117. {PREPEND_SPSPPS_TO_IDR, "PREPEND_SPSPPS_TO_IDR" },
  118. {WITHOUT_STARTCODE, "WITHOUT_STARTCODE" },
  119. {NAL_LENGTH_FIELD, "NAL_LENGTH_FIELD" },
  120. {REQUEST_I_FRAME, "REQUEST_I_FRAME" },
  121. {BITRATE_MODE, "BITRATE_MODE" },
  122. {LOSSLESS, "LOSSLESS" },
  123. {FRAME_SKIP_MODE, "FRAME_SKIP_MODE" },
  124. {FRAME_RC_ENABLE, "FRAME_RC_ENABLE" },
  125. {GOP_CLOSURE, "GOP_CLOSURE" },
  126. {CSC, "CSC" },
  127. {CSC_CUSTOM_MATRIX, "CSC_CUSTOM_MATRIX" },
  128. {USE_LTR, "USE_LTR" },
  129. {MARK_LTR, "MARK_LTR" },
  130. {BASELAYER_PRIORITY, "BASELAYER_PRIORITY" },
  131. {IR_TYPE, "IR_TYPE" },
  132. {AU_DELIMITER, "AU_DELIMITER" },
  133. {GRID, "GRID" },
  134. {I_FRAME_MIN_QP, "I_FRAME_MIN_QP" },
  135. {P_FRAME_MIN_QP, "P_FRAME_MIN_QP" },
  136. {B_FRAME_MIN_QP, "B_FRAME_MIN_QP" },
  137. {I_FRAME_MAX_QP, "I_FRAME_MAX_QP" },
  138. {P_FRAME_MAX_QP, "P_FRAME_MAX_QP" },
  139. {B_FRAME_MAX_QP, "B_FRAME_MAX_QP" },
  140. {LAYER_TYPE, "LAYER_TYPE" },
  141. {LAYER_ENABLE, "LAYER_ENABLE" },
  142. {L0_BR, "L0_BR" },
  143. {L1_BR, "L1_BR" },
  144. {L2_BR, "L2_BR" },
  145. {L3_BR, "L3_BR" },
  146. {L4_BR, "L4_BR" },
  147. {L5_BR, "L5_BR" },
  148. {LEVEL, "LEVEL" },
  149. {HEVC_TIER, "HEVC_TIER" },
  150. {AV1_TIER, "AV1_TIER" },
  151. {DISPLAY_DELAY_ENABLE, "DISPLAY_DELAY_ENABLE" },
  152. {DISPLAY_DELAY, "DISPLAY_DELAY" },
  153. {CONCEAL_COLOR_8BIT, "CONCEAL_COLOR_8BIT" },
  154. {CONCEAL_COLOR_10BIT, "CONCEAL_COLOR_10BIT" },
  155. {LF_MODE, "LF_MODE" },
  156. {LF_ALPHA, "LF_ALPHA" },
  157. {LF_BETA, "LF_BETA" },
  158. {SLICE_MAX_BYTES, "SLICE_MAX_BYTES" },
  159. {SLICE_MAX_MB, "SLICE_MAX_MB" },
  160. {MB_RC, "MB_RC" },
  161. {CHROMA_QP_INDEX_OFFSET, "CHROMA_QP_INDEX_OFFSET" },
  162. {PIPE, "PIPE" },
  163. {POC, "POC" },
  164. {CODED_FRAMES, "CODED_FRAMES" },
  165. {BIT_DEPTH, "BIT_DEPTH" },
  166. {CODEC_CONFIG, "CODEC_CONFIG" },
  167. {BITSTREAM_SIZE_OVERWRITE, "BITSTREAM_SIZE_OVERWRITE" },
  168. {THUMBNAIL_MODE, "THUMBNAIL_MODE" },
  169. {DEFAULT_HEADER, "DEFAULT_HEADER" },
  170. {RAP_FRAME, "RAP_FRAME" },
  171. {SEQ_CHANGE_AT_SYNC_FRAME, "SEQ_CHANGE_AT_SYNC_FRAME" },
  172. {QUALITY_MODE, "QUALITY_MODE" },
  173. {PRIORITY, "PRIORITY" },
  174. {FIRMWARE_PRIORITY_OFFSET, "FIRMWARE_PRIORITY_OFFSET" },
  175. {CRITICAL_PRIORITY, "CRITICAL_PRIORITY" },
  176. {RESERVE_DURATION, "RESERVE_DURATION" },
  177. {DPB_LIST, "DPB_LIST" },
  178. {FILM_GRAIN, "FILM_GRAIN" },
  179. {SUPER_BLOCK, "SUPER_BLOCK" },
  180. {DRAP, "DRAP" },
  181. {INPUT_METADATA_FD, "INPUT_METADATA_FD" },
  182. {INPUT_META_VIA_REQUEST, "INPUT_META_VIA_REQUEST" },
  183. {ENC_IP_CR, "ENC_IP_CR" },
  184. {COMPLEXITY, "COMPLEXITY" },
  185. {CABAC_MAX_BITRATE, "CABAC_MAX_BITRATE" },
  186. {CAVLC_MAX_BITRATE, "CAVLC_MAX_BITRATE" },
  187. {ALLINTRA_MAX_BITRATE, "ALLINTRA_MAX_BITRATE" },
  188. {LOWLATENCY_MAX_BITRATE, "LOWLATENCY_MAX_BITRATE" },
  189. {LAST_FLAG_EVENT_ENABLE, "LAST_FLAG_EVENT_ENABLE" },
  190. {NUM_COMV, "NUM_COMV" },
  191. {PROFILE, "PROFILE" },
  192. {ENH_LAYER_COUNT, "ENH_LAYER_COUNT" },
  193. {BIT_RATE, "BIT_RATE" },
  194. {LOWLATENCY_MODE, "LOWLATENCY_MODE" },
  195. {GOP_SIZE, "GOP_SIZE" },
  196. {B_FRAME, "B_FRAME" },
  197. {ALL_INTRA, "ALL_INTRA" },
  198. {MIN_QUALITY, "MIN_QUALITY" },
  199. {CONTENT_ADAPTIVE_CODING, "CONTENT_ADAPTIVE_CODING" },
  200. {BLUR_TYPES, "BLUR_TYPES" },
  201. {REQUEST_PREPROCESS, "REQUEST_PREPROCESS" },
  202. {SLICE_MODE, "SLICE_MODE" },
  203. {MIN_FRAME_QP, "MIN_FRAME_QP" },
  204. {MAX_FRAME_QP, "MAX_FRAME_QP" },
  205. {I_FRAME_QP, "I_FRAME_QP" },
  206. {P_FRAME_QP, "P_FRAME_QP" },
  207. {B_FRAME_QP, "B_FRAME_QP" },
  208. {TIME_DELTA_BASED_RC, "TIME_DELTA_BASED_RC" },
  209. {CONSTANT_QUALITY, "CONSTANT_QUALITY" },
  210. {VBV_DELAY, "VBV_DELAY" },
  211. {PEAK_BITRATE, "PEAK_BITRATE" },
  212. {ENTROPY_MODE, "ENTROPY_MODE" },
  213. {TRANSFORM_8X8, "TRANSFORM_8X8" },
  214. {STAGE, "STAGE" },
  215. {LTR_COUNT, "LTR_COUNT" },
  216. {IR_PERIOD, "IR_PERIOD" },
  217. {BITRATE_BOOST, "BITRATE_BOOST" },
  218. {BLUR_RESOLUTION, "BLUR_RESOLUTION" },
  219. {OUTPUT_ORDER, "OUTPUT_ORDER" },
  220. {INPUT_BUF_HOST_MAX_COUNT, "INPUT_BUF_HOST_MAX_COUNT" },
  221. {OUTPUT_BUF_HOST_MAX_COUNT, "OUTPUT_BUF_HOST_MAX_COUNT" },
  222. {DELIVERY_MODE, "DELIVERY_MODE" },
  223. {VUI_TIMING_INFO, "VUI_TIMING_INFO" },
  224. {INST_CAP_MAX, "INST_CAP_MAX" },
  225. };
  226. const char *cap_name(enum msm_vidc_inst_capability_type cap_id)
  227. {
  228. const char *name = "UNKNOWN CAP";
  229. if (cap_id > ARRAY_SIZE(cap_name_arr))
  230. goto exit;
  231. if (cap_name_arr[cap_id].cap_id != cap_id)
  232. goto exit;
  233. name = cap_name_arr[cap_id].name;
  234. exit:
  235. return name;
  236. }
  237. struct msm_vidc_buf_type_name {
  238. enum msm_vidc_buffer_type type;
  239. char *name;
  240. };
  241. static const struct msm_vidc_buf_type_name buf_type_name_arr[] = {
  242. {MSM_VIDC_BUF_INPUT, "INPUT" },
  243. {MSM_VIDC_BUF_OUTPUT, "OUTPUT" },
  244. {MSM_VIDC_BUF_INPUT_META, "INPUT_META" },
  245. {MSM_VIDC_BUF_OUTPUT_META, "OUTPUT_META" },
  246. {MSM_VIDC_BUF_READ_ONLY, "READ_ONLY" },
  247. {MSM_VIDC_BUF_QUEUE, "QUEUE" },
  248. {MSM_VIDC_BUF_BIN, "BIN" },
  249. {MSM_VIDC_BUF_ARP, "ARP" },
  250. {MSM_VIDC_BUF_COMV, "COMV" },
  251. {MSM_VIDC_BUF_NON_COMV, "NON_COMV" },
  252. {MSM_VIDC_BUF_LINE, "LINE" },
  253. {MSM_VIDC_BUF_DPB, "DPB" },
  254. {MSM_VIDC_BUF_PERSIST, "PERSIST" },
  255. {MSM_VIDC_BUF_VPSS, "VPSS" },
  256. {MSM_VIDC_BUF_PARTIAL_DATA, "PARTIAL_DATA" },
  257. };
  258. const char *buf_name(enum msm_vidc_buffer_type type)
  259. {
  260. const char *name = "UNKNOWN BUF";
  261. if (!type || type > ARRAY_SIZE(buf_type_name_arr))
  262. goto exit;
  263. if (buf_type_name_arr[type - 1].type != type)
  264. goto exit;
  265. name = buf_type_name_arr[type - 1].name;
  266. exit:
  267. return name;
  268. }
  269. struct msm_vidc_allow_name {
  270. enum msm_vidc_allow allow;
  271. char *name;
  272. };
  273. static const struct msm_vidc_allow_name inst_allow_name_arr[] = {
  274. {MSM_VIDC_DISALLOW, "MSM_VIDC_DISALLOW" },
  275. {MSM_VIDC_ALLOW, "MSM_VIDC_ALLOW" },
  276. {MSM_VIDC_DEFER, "MSM_VIDC_DEFER" },
  277. {MSM_VIDC_DISCARD, "MSM_VIDC_DISCARD" },
  278. {MSM_VIDC_IGNORE, "MSM_VIDC_IGNORE" },
  279. };
  280. const char *allow_name(enum msm_vidc_allow allow)
  281. {
  282. const char *name = "UNKNOWN";
  283. if (allow > ARRAY_SIZE(inst_allow_name_arr))
  284. goto exit;
  285. if (inst_allow_name_arr[allow].allow != allow)
  286. goto exit;
  287. name = inst_allow_name_arr[allow].name;
  288. exit:
  289. return name;
  290. }
  291. struct msm_vidc_state_name {
  292. enum msm_vidc_state state;
  293. char *name;
  294. };
  295. /* do not modify the state names as it is used in test scripts */
  296. static const struct msm_vidc_state_name state_name_arr[] = {
  297. {MSM_VIDC_OPEN, "OPEN" },
  298. {MSM_VIDC_INPUT_STREAMING, "INPUT_STREAMING" },
  299. {MSM_VIDC_OUTPUT_STREAMING, "OUTPUT_STREAMING" },
  300. {MSM_VIDC_STREAMING, "STREAMING" },
  301. {MSM_VIDC_CLOSE, "CLOSE" },
  302. {MSM_VIDC_ERROR, "ERROR" },
  303. };
  304. const char *state_name(enum msm_vidc_state state)
  305. {
  306. const char *name = "UNKNOWN STATE";
  307. if (!state || state > ARRAY_SIZE(state_name_arr))
  308. goto exit;
  309. if (state_name_arr[state - 1].state != state)
  310. goto exit;
  311. name = state_name_arr[state - 1].name;
  312. exit:
  313. return name;
  314. }
  315. const char *sub_state_name(enum msm_vidc_sub_state sub_state)
  316. {
  317. switch (sub_state) {
  318. case MSM_VIDC_DRAIN: return "DRAIN ";
  319. case MSM_VIDC_DRC: return "DRC ";
  320. case MSM_VIDC_DRAIN_LAST_BUFFER: return "DRAIN_LAST_BUFFER ";
  321. case MSM_VIDC_DRC_LAST_BUFFER: return "DRC_LAST_BUFFER ";
  322. case MSM_VIDC_INPUT_PAUSE: return "INPUT_PAUSE ";
  323. case MSM_VIDC_OUTPUT_PAUSE: return "OUTPUT_PAUSE ";
  324. }
  325. return "SUB_STATE_NONE";
  326. }
  327. struct msm_vidc_core_state_name {
  328. enum msm_vidc_core_state state;
  329. char *name;
  330. };
  331. static const struct msm_vidc_core_state_name core_state_name_arr[] = {
  332. {MSM_VIDC_CORE_DEINIT, "CORE_DEINIT" },
  333. {MSM_VIDC_CORE_INIT_WAIT, "CORE_INIT_WAIT" },
  334. {MSM_VIDC_CORE_INIT, "CORE_INIT" },
  335. };
  336. const char *core_state_name(enum msm_vidc_core_state state)
  337. {
  338. const char *name = "UNKNOWN STATE";
  339. if (state >= ARRAY_SIZE(core_state_name_arr))
  340. goto exit;
  341. if (core_state_name_arr[state].state != state)
  342. goto exit;
  343. name = core_state_name_arr[state].name;
  344. exit:
  345. return name;
  346. }
  347. const char *v4l2_type_name(u32 port)
  348. {
  349. switch (port) {
  350. case INPUT_MPLANE: return "INPUT";
  351. case OUTPUT_MPLANE: return "OUTPUT";
  352. case INPUT_META_PLANE: return "INPUT_META";
  353. case OUTPUT_META_PLANE: return "OUTPUT_META";
  354. }
  355. return "UNKNOWN";
  356. }
  357. const char *v4l2_pixelfmt_name(u32 pixfmt)
  358. {
  359. switch (pixfmt) {
  360. /* raw port: color format */
  361. case V4L2_PIX_FMT_NV12: return "NV12";
  362. case V4L2_PIX_FMT_NV21: return "NV21";
  363. case V4L2_PIX_FMT_VIDC_NV12C: return "NV12C";
  364. case V4L2_PIX_FMT_VIDC_P010: return "P010";
  365. case V4L2_PIX_FMT_VIDC_TP10C: return "TP10C";
  366. case V4L2_PIX_FMT_RGBA32: return "RGBA";
  367. case V4L2_PIX_FMT_VIDC_ARGB32C: return "RGBAC";
  368. /* bitstream port: codec type */
  369. case V4L2_PIX_FMT_H264: return "AVC";
  370. case V4L2_PIX_FMT_HEVC: return "HEVC";
  371. case V4L2_PIX_FMT_HEIC: return "HEIC";
  372. case V4L2_PIX_FMT_VP9: return "VP9";
  373. case V4L2_PIX_FMT_AV1: return "AV1";
  374. /* meta port */
  375. case V4L2_META_FMT_VIDC: return "META";
  376. }
  377. return "UNKNOWN";
  378. }
  379. void print_vidc_buffer(u32 tag, const char *tag_str, const char *str, struct msm_vidc_inst *inst,
  380. struct msm_vidc_buffer *vbuf)
  381. {
  382. struct dma_buf *dbuf;
  383. struct inode *f_inode;
  384. unsigned long inode_num = 0;
  385. long ref_count = -1;
  386. if (!inst || !vbuf || !tag_str || !str)
  387. return;
  388. dbuf = (struct dma_buf *)vbuf->dmabuf;
  389. if (dbuf && dbuf->file) {
  390. f_inode = file_inode(dbuf->file);
  391. if (f_inode) {
  392. inode_num = f_inode->i_ino;
  393. ref_count = file_count(dbuf->file);
  394. }
  395. }
  396. dprintk_inst(tag, tag_str, inst,
  397. "%s: %s: idx %2d fd %3d off %d daddr %#llx inode %8lu ref %2ld size %8d filled %8d flags %#x ts %8lld attr %#x counts(etb ebd ftb fbd) %4llu %4llu %4llu %4llu\n",
  398. str, buf_name(vbuf->type),
  399. vbuf->index, vbuf->fd, vbuf->data_offset,
  400. vbuf->device_addr, inode_num, ref_count, vbuf->buffer_size, vbuf->data_size,
  401. vbuf->flags, vbuf->timestamp, vbuf->attr, inst->debug_count.etb,
  402. inst->debug_count.ebd, inst->debug_count.ftb, inst->debug_count.fbd);
  403. trace_msm_v4l2_vidc_buffer_event_log(inst, str, buf_name(vbuf->type), vbuf,
  404. inode_num, ref_count);
  405. }
  406. void print_vb2_buffer(const char *str, struct msm_vidc_inst *inst,
  407. struct vb2_buffer *vb2)
  408. {
  409. if (!inst || !vb2)
  410. return;
  411. if (vb2->type == INPUT_MPLANE || vb2->type == OUTPUT_MPLANE) {
  412. i_vpr_e(inst,
  413. "%s: %s: idx %2d fd %d off %d size %d filled %d\n",
  414. str, vb2->type == INPUT_MPLANE ? "INPUT" : "OUTPUT",
  415. vb2->index, vb2->planes[0].m.fd,
  416. vb2->planes[0].data_offset, vb2->planes[0].length,
  417. vb2->planes[0].bytesused);
  418. } else if (vb2->type == INPUT_META_PLANE || vb2->type == OUTPUT_META_PLANE) {
  419. i_vpr_e(inst,
  420. "%s: %s: idx %2d fd %d off %d size %d filled %d\n",
  421. str, vb2->type == INPUT_MPLANE ? "INPUT_META" : "OUTPUT_META",
  422. vb2->index, vb2->planes[0].m.fd,
  423. vb2->planes[0].data_offset, vb2->planes[0].length,
  424. vb2->planes[0].bytesused);
  425. }
  426. }
  427. static void __fatal_error(bool fatal)
  428. {
  429. WARN_ON(fatal);
  430. }
  431. static int __strict_check(struct msm_vidc_core *core, const char *function)
  432. {
  433. bool fatal = !mutex_is_locked(&core->lock);
  434. __fatal_error(fatal);
  435. if (fatal)
  436. d_vpr_e("%s: strict check failed\n", function);
  437. return fatal ? -EINVAL : 0;
  438. }
  439. enum msm_vidc_buffer_type v4l2_type_to_driver(u32 type, const char *func)
  440. {
  441. enum msm_vidc_buffer_type buffer_type = 0;
  442. switch (type) {
  443. case INPUT_MPLANE:
  444. buffer_type = MSM_VIDC_BUF_INPUT;
  445. break;
  446. case OUTPUT_MPLANE:
  447. buffer_type = MSM_VIDC_BUF_OUTPUT;
  448. break;
  449. case INPUT_META_PLANE:
  450. buffer_type = MSM_VIDC_BUF_INPUT_META;
  451. break;
  452. case OUTPUT_META_PLANE:
  453. buffer_type = MSM_VIDC_BUF_OUTPUT_META;
  454. break;
  455. default:
  456. d_vpr_e("%s: invalid v4l2 buffer type %#x\n", func, type);
  457. break;
  458. }
  459. return buffer_type;
  460. }
  461. u32 v4l2_type_from_driver(enum msm_vidc_buffer_type buffer_type,
  462. const char *func)
  463. {
  464. u32 type = 0;
  465. switch (buffer_type) {
  466. case MSM_VIDC_BUF_INPUT:
  467. type = INPUT_MPLANE;
  468. break;
  469. case MSM_VIDC_BUF_OUTPUT:
  470. type = OUTPUT_MPLANE;
  471. break;
  472. case MSM_VIDC_BUF_INPUT_META:
  473. type = INPUT_META_PLANE;
  474. break;
  475. case MSM_VIDC_BUF_OUTPUT_META:
  476. type = OUTPUT_META_PLANE;
  477. break;
  478. default:
  479. d_vpr_e("%s: invalid driver buffer type %d\n",
  480. func, buffer_type);
  481. break;
  482. }
  483. return type;
  484. }
  485. enum msm_vidc_codec_type v4l2_codec_to_driver(u32 v4l2_codec, const char *func)
  486. {
  487. enum msm_vidc_codec_type codec = 0;
  488. switch (v4l2_codec) {
  489. case V4L2_PIX_FMT_H264:
  490. codec = MSM_VIDC_H264;
  491. break;
  492. case V4L2_PIX_FMT_HEVC:
  493. codec = MSM_VIDC_HEVC;
  494. break;
  495. case V4L2_PIX_FMT_VP9:
  496. codec = MSM_VIDC_VP9;
  497. break;
  498. case V4L2_PIX_FMT_AV1:
  499. codec = MSM_VIDC_AV1;
  500. break;
  501. case V4L2_PIX_FMT_HEIC:
  502. codec = MSM_VIDC_HEIC;
  503. break;
  504. default:
  505. d_vpr_h("%s: invalid v4l2 codec %#x\n", func, v4l2_codec);
  506. break;
  507. }
  508. return codec;
  509. }
  510. u32 v4l2_codec_from_driver(enum msm_vidc_codec_type codec, const char *func)
  511. {
  512. u32 v4l2_codec = 0;
  513. switch (codec) {
  514. case MSM_VIDC_H264:
  515. v4l2_codec = V4L2_PIX_FMT_H264;
  516. break;
  517. case MSM_VIDC_HEVC:
  518. v4l2_codec = V4L2_PIX_FMT_HEVC;
  519. break;
  520. case MSM_VIDC_VP9:
  521. v4l2_codec = V4L2_PIX_FMT_VP9;
  522. break;
  523. case MSM_VIDC_AV1:
  524. v4l2_codec = V4L2_PIX_FMT_AV1;
  525. break;
  526. case MSM_VIDC_HEIC:
  527. v4l2_codec = V4L2_PIX_FMT_HEIC;
  528. break;
  529. default:
  530. d_vpr_e("%s: invalid driver codec %#x\n", func, codec);
  531. break;
  532. }
  533. return v4l2_codec;
  534. }
  535. enum msm_vidc_colorformat_type v4l2_colorformat_to_driver(u32 v4l2_colorformat,
  536. const char *func)
  537. {
  538. enum msm_vidc_colorformat_type colorformat = 0;
  539. switch (v4l2_colorformat) {
  540. case V4L2_PIX_FMT_NV12:
  541. colorformat = MSM_VIDC_FMT_NV12;
  542. break;
  543. case V4L2_PIX_FMT_NV21:
  544. colorformat = MSM_VIDC_FMT_NV21;
  545. break;
  546. case V4L2_PIX_FMT_VIDC_NV12C:
  547. colorformat = MSM_VIDC_FMT_NV12C;
  548. break;
  549. case V4L2_PIX_FMT_VIDC_TP10C:
  550. colorformat = MSM_VIDC_FMT_TP10C;
  551. break;
  552. case V4L2_PIX_FMT_RGBA32:
  553. colorformat = MSM_VIDC_FMT_RGBA8888;
  554. break;
  555. case V4L2_PIX_FMT_VIDC_ARGB32C:
  556. colorformat = MSM_VIDC_FMT_RGBA8888C;
  557. break;
  558. case V4L2_PIX_FMT_VIDC_P010:
  559. colorformat = MSM_VIDC_FMT_P010;
  560. break;
  561. default:
  562. d_vpr_e("%s: invalid v4l2 color format %#x\n",
  563. func, v4l2_colorformat);
  564. break;
  565. }
  566. return colorformat;
  567. }
  568. u32 v4l2_colorformat_from_driver(enum msm_vidc_colorformat_type colorformat,
  569. const char *func)
  570. {
  571. u32 v4l2_colorformat = 0;
  572. switch (colorformat) {
  573. case MSM_VIDC_FMT_NV12:
  574. v4l2_colorformat = V4L2_PIX_FMT_NV12;
  575. break;
  576. case MSM_VIDC_FMT_NV21:
  577. v4l2_colorformat = V4L2_PIX_FMT_NV21;
  578. break;
  579. case MSM_VIDC_FMT_NV12C:
  580. v4l2_colorformat = V4L2_PIX_FMT_VIDC_NV12C;
  581. break;
  582. case MSM_VIDC_FMT_TP10C:
  583. v4l2_colorformat = V4L2_PIX_FMT_VIDC_TP10C;
  584. break;
  585. case MSM_VIDC_FMT_RGBA8888:
  586. v4l2_colorformat = V4L2_PIX_FMT_RGBA32;
  587. break;
  588. case MSM_VIDC_FMT_RGBA8888C:
  589. v4l2_colorformat = V4L2_PIX_FMT_VIDC_ARGB32C;
  590. break;
  591. case MSM_VIDC_FMT_P010:
  592. v4l2_colorformat = V4L2_PIX_FMT_VIDC_P010;
  593. break;
  594. default:
  595. d_vpr_e("%s: invalid driver color format %#x\n",
  596. func, colorformat);
  597. break;
  598. }
  599. return v4l2_colorformat;
  600. }
  601. u32 v4l2_color_primaries_to_driver(struct msm_vidc_inst *inst,
  602. u32 v4l2_primaries, const char *func)
  603. {
  604. u32 vidc_color_primaries = MSM_VIDC_PRIMARIES_RESERVED;
  605. switch(v4l2_primaries) {
  606. case V4L2_COLORSPACE_DEFAULT:
  607. vidc_color_primaries = MSM_VIDC_PRIMARIES_RESERVED;
  608. break;
  609. case V4L2_COLORSPACE_REC709:
  610. vidc_color_primaries = MSM_VIDC_PRIMARIES_BT709;
  611. break;
  612. case V4L2_COLORSPACE_470_SYSTEM_M:
  613. vidc_color_primaries = MSM_VIDC_PRIMARIES_BT470_SYSTEM_M;
  614. break;
  615. case V4L2_COLORSPACE_470_SYSTEM_BG:
  616. vidc_color_primaries = MSM_VIDC_PRIMARIES_BT470_SYSTEM_BG;
  617. break;
  618. case V4L2_COLORSPACE_SMPTE170M:
  619. vidc_color_primaries = MSM_VIDC_PRIMARIES_BT601_525;
  620. break;
  621. case V4L2_COLORSPACE_SMPTE240M:
  622. vidc_color_primaries = MSM_VIDC_PRIMARIES_SMPTE_ST240M;
  623. break;
  624. case V4L2_COLORSPACE_VIDC_GENERIC_FILM:
  625. vidc_color_primaries = MSM_VIDC_PRIMARIES_GENERIC_FILM;
  626. break;
  627. case V4L2_COLORSPACE_BT2020:
  628. vidc_color_primaries = MSM_VIDC_PRIMARIES_BT2020;
  629. break;
  630. case V4L2_COLORSPACE_DCI_P3:
  631. vidc_color_primaries = MSM_VIDC_PRIMARIES_SMPTE_RP431_2;
  632. break;
  633. case V4L2_COLORSPACE_VIDC_EG431:
  634. vidc_color_primaries = MSM_VIDC_PRIMARIES_SMPTE_EG431_1;
  635. break;
  636. case V4L2_COLORSPACE_VIDC_EBU_TECH:
  637. vidc_color_primaries = MSM_VIDC_PRIMARIES_SMPTE_EBU_TECH;
  638. break;
  639. default:
  640. i_vpr_e(inst, "%s: invalid v4l2 color primaries %d\n",
  641. func, v4l2_primaries);
  642. break;
  643. }
  644. return vidc_color_primaries;
  645. }
  646. u32 v4l2_color_primaries_from_driver(struct msm_vidc_inst *inst,
  647. u32 vidc_color_primaries, const char *func)
  648. {
  649. u32 v4l2_primaries = V4L2_COLORSPACE_DEFAULT;
  650. switch(vidc_color_primaries) {
  651. case MSM_VIDC_PRIMARIES_UNSPECIFIED:
  652. v4l2_primaries = V4L2_COLORSPACE_DEFAULT;
  653. break;
  654. case MSM_VIDC_PRIMARIES_BT709:
  655. v4l2_primaries = V4L2_COLORSPACE_REC709;
  656. break;
  657. case MSM_VIDC_PRIMARIES_BT470_SYSTEM_M:
  658. v4l2_primaries = V4L2_COLORSPACE_470_SYSTEM_M;
  659. break;
  660. case MSM_VIDC_PRIMARIES_BT470_SYSTEM_BG:
  661. v4l2_primaries = V4L2_COLORSPACE_470_SYSTEM_BG;
  662. break;
  663. case MSM_VIDC_PRIMARIES_BT601_525:
  664. v4l2_primaries = V4L2_COLORSPACE_SMPTE170M;
  665. break;
  666. case MSM_VIDC_PRIMARIES_SMPTE_ST240M:
  667. v4l2_primaries = V4L2_COLORSPACE_SMPTE240M;
  668. break;
  669. case MSM_VIDC_PRIMARIES_GENERIC_FILM:
  670. v4l2_primaries = V4L2_COLORSPACE_VIDC_GENERIC_FILM;
  671. break;
  672. case MSM_VIDC_PRIMARIES_BT2020:
  673. v4l2_primaries = V4L2_COLORSPACE_BT2020;
  674. break;
  675. case MSM_VIDC_PRIMARIES_SMPTE_RP431_2:
  676. v4l2_primaries = V4L2_COLORSPACE_DCI_P3;
  677. break;
  678. case MSM_VIDC_PRIMARIES_SMPTE_EG431_1:
  679. v4l2_primaries = V4L2_COLORSPACE_VIDC_EG431;
  680. break;
  681. case MSM_VIDC_PRIMARIES_SMPTE_EBU_TECH:
  682. v4l2_primaries = V4L2_COLORSPACE_VIDC_EBU_TECH;
  683. break;
  684. default:
  685. i_vpr_e(inst, "%s: invalid hfi color primaries %d\n",
  686. func, vidc_color_primaries);
  687. break;
  688. }
  689. return v4l2_primaries;
  690. }
  691. u32 v4l2_transfer_char_to_driver(struct msm_vidc_inst *inst,
  692. u32 v4l2_transfer_char, const char *func)
  693. {
  694. u32 vidc_transfer_char = MSM_VIDC_TRANSFER_RESERVED;
  695. switch(v4l2_transfer_char) {
  696. case V4L2_XFER_FUNC_DEFAULT:
  697. vidc_transfer_char = MSM_VIDC_TRANSFER_RESERVED;
  698. break;
  699. case V4L2_XFER_FUNC_709:
  700. vidc_transfer_char = MSM_VIDC_TRANSFER_BT709;
  701. break;
  702. case V4L2_XFER_FUNC_VIDC_BT470_SYSTEM_M:
  703. vidc_transfer_char = MSM_VIDC_TRANSFER_BT470_SYSTEM_M;
  704. break;
  705. case V4L2_XFER_FUNC_VIDC_BT470_SYSTEM_BG:
  706. vidc_transfer_char = MSM_VIDC_TRANSFER_BT470_SYSTEM_BG;
  707. break;
  708. case V4L2_XFER_FUNC_VIDC_BT601_525_OR_625:
  709. vidc_transfer_char = MSM_VIDC_TRANSFER_BT601_525_OR_625;
  710. break;
  711. case V4L2_XFER_FUNC_SMPTE240M:
  712. vidc_transfer_char = MSM_VIDC_TRANSFER_SMPTE_ST240M;
  713. break;
  714. case V4L2_XFER_FUNC_VIDC_LINEAR:
  715. vidc_transfer_char = MSM_VIDC_TRANSFER_LINEAR;
  716. break;
  717. case V4L2_XFER_FUNC_VIDC_XVYCC:
  718. vidc_transfer_char = MSM_VIDC_TRANSFER_XVYCC;
  719. break;
  720. case V4L2_XFER_FUNC_VIDC_BT1361:
  721. vidc_transfer_char = MSM_VIDC_TRANSFER_BT1361_0;
  722. break;
  723. case V4L2_XFER_FUNC_SRGB:
  724. vidc_transfer_char = MSM_VIDC_TRANSFER_SRGB_SYCC;
  725. break;
  726. case V4L2_XFER_FUNC_VIDC_BT2020:
  727. vidc_transfer_char = MSM_VIDC_TRANSFER_BT2020_14;
  728. break;
  729. case V4L2_XFER_FUNC_SMPTE2084:
  730. vidc_transfer_char = MSM_VIDC_TRANSFER_SMPTE_ST2084_PQ;
  731. break;
  732. case V4L2_XFER_FUNC_VIDC_ST428:
  733. vidc_transfer_char = MSM_VIDC_TRANSFER_SMPTE_ST428_1;
  734. break;
  735. case V4L2_XFER_FUNC_VIDC_HLG:
  736. vidc_transfer_char = MSM_VIDC_TRANSFER_BT2100_2_HLG;
  737. break;
  738. default:
  739. i_vpr_e(inst, "%s: invalid v4l2 transfer char %d\n",
  740. func, v4l2_transfer_char);
  741. break;
  742. }
  743. return vidc_transfer_char;
  744. }
  745. u32 v4l2_transfer_char_from_driver(struct msm_vidc_inst *inst,
  746. u32 vidc_transfer_char, const char *func)
  747. {
  748. u32 v4l2_transfer_char = V4L2_XFER_FUNC_DEFAULT;
  749. switch(vidc_transfer_char) {
  750. case MSM_VIDC_TRANSFER_UNSPECIFIED:
  751. v4l2_transfer_char = V4L2_XFER_FUNC_DEFAULT;
  752. break;
  753. case MSM_VIDC_TRANSFER_BT709:
  754. v4l2_transfer_char = V4L2_XFER_FUNC_709;
  755. break;
  756. case MSM_VIDC_TRANSFER_BT470_SYSTEM_M:
  757. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_BT470_SYSTEM_M;
  758. break;
  759. case MSM_VIDC_TRANSFER_BT470_SYSTEM_BG:
  760. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_BT470_SYSTEM_BG;
  761. break;
  762. case MSM_VIDC_TRANSFER_BT601_525_OR_625:
  763. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_BT601_525_OR_625;
  764. break;
  765. case MSM_VIDC_TRANSFER_SMPTE_ST240M:
  766. v4l2_transfer_char = V4L2_XFER_FUNC_SMPTE240M;
  767. break;
  768. case MSM_VIDC_TRANSFER_LINEAR:
  769. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_LINEAR;
  770. break;
  771. case MSM_VIDC_TRANSFER_XVYCC:
  772. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_XVYCC;
  773. break;
  774. case MSM_VIDC_TRANSFER_BT1361_0:
  775. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_BT1361;
  776. break;
  777. case MSM_VIDC_TRANSFER_SRGB_SYCC:
  778. v4l2_transfer_char = V4L2_XFER_FUNC_SRGB;
  779. break;
  780. case MSM_VIDC_TRANSFER_BT2020_14:
  781. case MSM_VIDC_TRANSFER_BT2020_15:
  782. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_BT2020;
  783. break;
  784. case MSM_VIDC_TRANSFER_SMPTE_ST2084_PQ:
  785. v4l2_transfer_char = V4L2_XFER_FUNC_SMPTE2084;
  786. break;
  787. case MSM_VIDC_TRANSFER_SMPTE_ST428_1:
  788. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_ST428;
  789. break;
  790. case MSM_VIDC_TRANSFER_BT2100_2_HLG:
  791. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_HLG;
  792. break;
  793. default:
  794. i_vpr_e(inst, "%s: invalid hfi transfer char %d\n",
  795. func, vidc_transfer_char);
  796. break;
  797. }
  798. return v4l2_transfer_char;
  799. }
  800. u32 v4l2_matrix_coeff_to_driver(struct msm_vidc_inst *inst,
  801. u32 v4l2_matrix_coeff, const char *func)
  802. {
  803. u32 vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_RESERVED;
  804. switch(v4l2_matrix_coeff) {
  805. case V4L2_YCBCR_ENC_DEFAULT:
  806. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_RESERVED;
  807. break;
  808. case V4L2_YCBCR_VIDC_SRGB_OR_SMPTE_ST428:
  809. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_SRGB_SMPTE_ST428_1;
  810. break;
  811. case V4L2_YCBCR_ENC_709:
  812. case V4L2_YCBCR_ENC_XV709:
  813. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_BT709;
  814. break;
  815. case V4L2_YCBCR_VIDC_FCC47_73_682:
  816. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_FCC_TITLE_47;
  817. break;
  818. case V4L2_YCBCR_ENC_XV601:
  819. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_BT470_SYS_BG_OR_BT601_625;
  820. break;
  821. case V4L2_YCBCR_ENC_601:
  822. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_BT601_525_BT1358_525_OR_625;
  823. break;
  824. case V4L2_YCBCR_ENC_SMPTE240M:
  825. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_SMPTE_ST240;
  826. break;
  827. case V4L2_YCBCR_ENC_BT2020:
  828. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_BT2020_NON_CONSTANT;
  829. break;
  830. case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
  831. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_BT2020_CONSTANT;
  832. break;
  833. default:
  834. i_vpr_e(inst, "%s: invalid v4l2 matrix coeff %d\n",
  835. func, v4l2_matrix_coeff);
  836. break;
  837. }
  838. return vidc_matrix_coeff;
  839. }
  840. u32 v4l2_matrix_coeff_from_driver(struct msm_vidc_inst *inst,
  841. u32 vidc_matrix_coeff, const char *func)
  842. {
  843. u32 v4l2_matrix_coeff = V4L2_YCBCR_ENC_DEFAULT;
  844. switch(vidc_matrix_coeff) {
  845. case MSM_VIDC_MATRIX_COEFF_SRGB_SMPTE_ST428_1:
  846. v4l2_matrix_coeff = V4L2_YCBCR_VIDC_SRGB_OR_SMPTE_ST428;
  847. break;
  848. case MSM_VIDC_MATRIX_COEFF_BT709:
  849. v4l2_matrix_coeff = V4L2_YCBCR_ENC_709;
  850. break;
  851. case MSM_VIDC_MATRIX_COEFF_UNSPECIFIED:
  852. v4l2_matrix_coeff = V4L2_YCBCR_ENC_DEFAULT;
  853. break;
  854. case MSM_VIDC_MATRIX_COEFF_FCC_TITLE_47:
  855. v4l2_matrix_coeff = V4L2_YCBCR_VIDC_FCC47_73_682;
  856. break;
  857. case MSM_VIDC_MATRIX_COEFF_BT470_SYS_BG_OR_BT601_625:
  858. v4l2_matrix_coeff = V4L2_YCBCR_ENC_XV601;
  859. break;
  860. case MSM_VIDC_MATRIX_COEFF_BT601_525_BT1358_525_OR_625:
  861. v4l2_matrix_coeff = V4L2_YCBCR_ENC_601;
  862. break;
  863. case MSM_VIDC_MATRIX_COEFF_SMPTE_ST240:
  864. v4l2_matrix_coeff = V4L2_YCBCR_ENC_SMPTE240M;
  865. break;
  866. case MSM_VIDC_MATRIX_COEFF_BT2020_NON_CONSTANT:
  867. v4l2_matrix_coeff = V4L2_YCBCR_ENC_BT2020;
  868. break;
  869. case MSM_VIDC_MATRIX_COEFF_BT2020_CONSTANT:
  870. v4l2_matrix_coeff = V4L2_YCBCR_ENC_BT2020_CONST_LUM;
  871. break;
  872. default:
  873. i_vpr_e(inst, "%s: invalid hfi matrix coeff %d\n",
  874. func, vidc_matrix_coeff);
  875. break;
  876. }
  877. return v4l2_matrix_coeff;
  878. }
  879. int v4l2_type_to_driver_port(struct msm_vidc_inst *inst, u32 type,
  880. const char *func)
  881. {
  882. int port;
  883. if (type == INPUT_MPLANE) {
  884. port = INPUT_PORT;
  885. } else if (type == INPUT_META_PLANE) {
  886. port = INPUT_META_PORT;
  887. } else if (type == OUTPUT_MPLANE) {
  888. port = OUTPUT_PORT;
  889. } else if (type == OUTPUT_META_PLANE) {
  890. port = OUTPUT_META_PORT;
  891. } else {
  892. i_vpr_e(inst, "%s: port not found for v4l2 type %d\n",
  893. func, type);
  894. port = -EINVAL;
  895. }
  896. return port;
  897. }
  898. u32 msm_vidc_get_buffer_region(struct msm_vidc_inst *inst,
  899. enum msm_vidc_buffer_type buffer_type, const char *func)
  900. {
  901. u32 region = MSM_VIDC_NON_SECURE;
  902. if (!is_secure_session(inst)) {
  903. switch (buffer_type) {
  904. case MSM_VIDC_BUF_ARP:
  905. region = MSM_VIDC_SECURE_NONPIXEL;
  906. break;
  907. case MSM_VIDC_BUF_INPUT:
  908. if (is_encode_session(inst))
  909. region = MSM_VIDC_NON_SECURE_PIXEL;
  910. else
  911. region = MSM_VIDC_NON_SECURE;
  912. break;
  913. case MSM_VIDC_BUF_OUTPUT:
  914. if (is_encode_session(inst))
  915. region = MSM_VIDC_NON_SECURE;
  916. else
  917. region = MSM_VIDC_NON_SECURE_PIXEL;
  918. break;
  919. case MSM_VIDC_BUF_DPB:
  920. case MSM_VIDC_BUF_VPSS:
  921. case MSM_VIDC_BUF_PARTIAL_DATA:
  922. region = MSM_VIDC_NON_SECURE_PIXEL;
  923. break;
  924. case MSM_VIDC_BUF_INPUT_META:
  925. case MSM_VIDC_BUF_OUTPUT_META:
  926. case MSM_VIDC_BUF_BIN:
  927. case MSM_VIDC_BUF_COMV:
  928. case MSM_VIDC_BUF_NON_COMV:
  929. case MSM_VIDC_BUF_LINE:
  930. case MSM_VIDC_BUF_PERSIST:
  931. region = MSM_VIDC_NON_SECURE;
  932. break;
  933. default:
  934. i_vpr_e(inst, "%s: invalid driver buffer type %d\n",
  935. func, buffer_type);
  936. }
  937. } else {
  938. switch (buffer_type) {
  939. case MSM_VIDC_BUF_INPUT:
  940. if (is_encode_session(inst))
  941. region = MSM_VIDC_SECURE_PIXEL;
  942. else
  943. region = MSM_VIDC_SECURE_BITSTREAM;
  944. break;
  945. case MSM_VIDC_BUF_OUTPUT:
  946. if (is_encode_session(inst))
  947. region = MSM_VIDC_SECURE_BITSTREAM;
  948. else
  949. region = MSM_VIDC_SECURE_PIXEL;
  950. break;
  951. case MSM_VIDC_BUF_INPUT_META:
  952. case MSM_VIDC_BUF_OUTPUT_META:
  953. region = MSM_VIDC_NON_SECURE;
  954. break;
  955. case MSM_VIDC_BUF_DPB:
  956. case MSM_VIDC_BUF_VPSS:
  957. case MSM_VIDC_BUF_PARTIAL_DATA:
  958. region = MSM_VIDC_SECURE_PIXEL;
  959. break;
  960. case MSM_VIDC_BUF_BIN:
  961. region = MSM_VIDC_SECURE_BITSTREAM;
  962. break;
  963. case MSM_VIDC_BUF_ARP:
  964. case MSM_VIDC_BUF_COMV:
  965. case MSM_VIDC_BUF_NON_COMV:
  966. case MSM_VIDC_BUF_LINE:
  967. case MSM_VIDC_BUF_PERSIST:
  968. region = MSM_VIDC_SECURE_NONPIXEL;
  969. break;
  970. default:
  971. i_vpr_e(inst, "%s: invalid driver buffer type %d\n",
  972. func, buffer_type);
  973. }
  974. }
  975. return region;
  976. }
  977. struct msm_vidc_buffers *msm_vidc_get_buffers(
  978. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  979. const char *func)
  980. {
  981. switch (buffer_type) {
  982. case MSM_VIDC_BUF_INPUT:
  983. return &inst->buffers.input;
  984. case MSM_VIDC_BUF_INPUT_META:
  985. return &inst->buffers.input_meta;
  986. case MSM_VIDC_BUF_OUTPUT:
  987. return &inst->buffers.output;
  988. case MSM_VIDC_BUF_OUTPUT_META:
  989. return &inst->buffers.output_meta;
  990. case MSM_VIDC_BUF_READ_ONLY:
  991. return &inst->buffers.read_only;
  992. case MSM_VIDC_BUF_BIN:
  993. return &inst->buffers.bin;
  994. case MSM_VIDC_BUF_ARP:
  995. return &inst->buffers.arp;
  996. case MSM_VIDC_BUF_COMV:
  997. return &inst->buffers.comv;
  998. case MSM_VIDC_BUF_NON_COMV:
  999. return &inst->buffers.non_comv;
  1000. case MSM_VIDC_BUF_LINE:
  1001. return &inst->buffers.line;
  1002. case MSM_VIDC_BUF_DPB:
  1003. return &inst->buffers.dpb;
  1004. case MSM_VIDC_BUF_PERSIST:
  1005. return &inst->buffers.persist;
  1006. case MSM_VIDC_BUF_VPSS:
  1007. return &inst->buffers.vpss;
  1008. case MSM_VIDC_BUF_PARTIAL_DATA:
  1009. return &inst->buffers.partial_data;
  1010. case MSM_VIDC_BUF_QUEUE:
  1011. return NULL;
  1012. default:
  1013. i_vpr_e(inst, "%s: invalid driver buffer type %d\n",
  1014. func, buffer_type);
  1015. return NULL;
  1016. }
  1017. }
  1018. struct msm_vidc_mappings *msm_vidc_get_mappings(
  1019. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  1020. const char *func)
  1021. {
  1022. switch (buffer_type) {
  1023. case MSM_VIDC_BUF_INPUT:
  1024. return &inst->mappings.input;
  1025. case MSM_VIDC_BUF_INPUT_META:
  1026. return &inst->mappings.input_meta;
  1027. case MSM_VIDC_BUF_OUTPUT:
  1028. return &inst->mappings.output;
  1029. case MSM_VIDC_BUF_OUTPUT_META:
  1030. return &inst->mappings.output_meta;
  1031. case MSM_VIDC_BUF_BIN:
  1032. return &inst->mappings.bin;
  1033. case MSM_VIDC_BUF_ARP:
  1034. return &inst->mappings.arp;
  1035. case MSM_VIDC_BUF_COMV:
  1036. return &inst->mappings.comv;
  1037. case MSM_VIDC_BUF_NON_COMV:
  1038. return &inst->mappings.non_comv;
  1039. case MSM_VIDC_BUF_LINE:
  1040. return &inst->mappings.line;
  1041. case MSM_VIDC_BUF_DPB:
  1042. return &inst->mappings.dpb;
  1043. case MSM_VIDC_BUF_PERSIST:
  1044. return &inst->mappings.persist;
  1045. case MSM_VIDC_BUF_VPSS:
  1046. return &inst->mappings.vpss;
  1047. case MSM_VIDC_BUF_PARTIAL_DATA:
  1048. return &inst->mappings.partial_data;
  1049. default:
  1050. i_vpr_e(inst, "%s: invalid driver buffer type %d\n",
  1051. func, buffer_type);
  1052. return NULL;
  1053. }
  1054. }
  1055. struct msm_vidc_allocations *msm_vidc_get_allocations(
  1056. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  1057. const char *func)
  1058. {
  1059. switch (buffer_type) {
  1060. case MSM_VIDC_BUF_BIN:
  1061. return &inst->allocations.bin;
  1062. case MSM_VIDC_BUF_ARP:
  1063. return &inst->allocations.arp;
  1064. case MSM_VIDC_BUF_COMV:
  1065. return &inst->allocations.comv;
  1066. case MSM_VIDC_BUF_NON_COMV:
  1067. return &inst->allocations.non_comv;
  1068. case MSM_VIDC_BUF_LINE:
  1069. return &inst->allocations.line;
  1070. case MSM_VIDC_BUF_DPB:
  1071. return &inst->allocations.dpb;
  1072. case MSM_VIDC_BUF_PERSIST:
  1073. return &inst->allocations.persist;
  1074. case MSM_VIDC_BUF_VPSS:
  1075. return &inst->allocations.vpss;
  1076. case MSM_VIDC_BUF_PARTIAL_DATA:
  1077. return &inst->allocations.partial_data;
  1078. default:
  1079. i_vpr_e(inst, "%s: invalid driver buffer type %d\n",
  1080. func, buffer_type);
  1081. return NULL;
  1082. }
  1083. }
  1084. bool res_is_greater_than(u32 width, u32 height,
  1085. u32 ref_width, u32 ref_height)
  1086. {
  1087. u32 num_mbs = NUM_MBS_PER_FRAME(height, width);
  1088. u32 max_side = max(ref_width, ref_height);
  1089. if (num_mbs > NUM_MBS_PER_FRAME(ref_height, ref_width) ||
  1090. width > max_side ||
  1091. height > max_side)
  1092. return true;
  1093. else
  1094. return false;
  1095. }
  1096. bool res_is_greater_than_or_equal_to(u32 width, u32 height,
  1097. u32 ref_width, u32 ref_height)
  1098. {
  1099. u32 num_mbs = NUM_MBS_PER_FRAME(height, width);
  1100. u32 max_side = max(ref_width, ref_height);
  1101. if (num_mbs >= NUM_MBS_PER_FRAME(ref_height, ref_width) ||
  1102. width >= max_side ||
  1103. height >= max_side)
  1104. return true;
  1105. else
  1106. return false;
  1107. }
  1108. bool res_is_less_than(u32 width, u32 height,
  1109. u32 ref_width, u32 ref_height)
  1110. {
  1111. u32 num_mbs = NUM_MBS_PER_FRAME(height, width);
  1112. u32 max_side = max(ref_width, ref_height);
  1113. if (num_mbs < NUM_MBS_PER_FRAME(ref_height, ref_width) &&
  1114. width < max_side &&
  1115. height < max_side)
  1116. return true;
  1117. else
  1118. return false;
  1119. }
  1120. bool res_is_less_than_or_equal_to(u32 width, u32 height,
  1121. u32 ref_width, u32 ref_height)
  1122. {
  1123. u32 num_mbs = NUM_MBS_PER_FRAME(height, width);
  1124. u32 max_side = max(ref_width, ref_height);
  1125. if (num_mbs <= NUM_MBS_PER_FRAME(ref_height, ref_width) &&
  1126. width <= max_side &&
  1127. height <= max_side)
  1128. return true;
  1129. else
  1130. return false;
  1131. }
  1132. int signal_session_msg_receipt(struct msm_vidc_inst *inst,
  1133. enum signal_session_response cmd)
  1134. {
  1135. if (cmd < MAX_SIGNAL)
  1136. complete(&inst->completions[cmd]);
  1137. return 0;
  1138. }
  1139. int msm_vidc_change_core_state(struct msm_vidc_core *core,
  1140. enum msm_vidc_core_state request_state, const char *func)
  1141. {
  1142. if (!core) {
  1143. d_vpr_e("%s: invalid params\n", __func__);
  1144. return -EINVAL;
  1145. }
  1146. d_vpr_h("%s: core state changed to %s from %s\n",
  1147. func, core_state_name(request_state),
  1148. core_state_name(core->state));
  1149. core->state = request_state;
  1150. return 0;
  1151. }
  1152. int msm_vidc_change_state(struct msm_vidc_inst *inst,
  1153. enum msm_vidc_state request_state, const char *func)
  1154. {
  1155. if (!inst) {
  1156. d_vpr_e("%s: invalid params\n", __func__);
  1157. return -EINVAL;
  1158. }
  1159. if (!request_state) {
  1160. i_vpr_e(inst, "%s: invalid request state\n", func);
  1161. return -EINVAL;
  1162. }
  1163. if (is_session_error(inst)) {
  1164. i_vpr_h(inst,
  1165. "%s: inst is in bad state, can not change state to %s\n",
  1166. func, state_name(request_state));
  1167. return 0;
  1168. }
  1169. if (request_state == MSM_VIDC_ERROR)
  1170. i_vpr_e(inst, FMT_STRING_STATE_CHANGE,
  1171. func, state_name(request_state), state_name(inst->state));
  1172. else
  1173. i_vpr_h(inst, FMT_STRING_STATE_CHANGE,
  1174. func, state_name(request_state), state_name(inst->state));
  1175. trace_msm_vidc_common_state_change(inst, func, state_name(inst->state),
  1176. state_name(request_state));
  1177. inst->state = request_state;
  1178. return 0;
  1179. }
  1180. int msm_vidc_change_sub_state(struct msm_vidc_inst *inst,
  1181. enum msm_vidc_sub_state clear_sub_state,
  1182. enum msm_vidc_sub_state set_sub_state, const char *func)
  1183. {
  1184. int i = 0;
  1185. enum msm_vidc_sub_state prev_sub_state;
  1186. if (!inst) {
  1187. d_vpr_e("%s: invalid params\n", __func__);
  1188. return -EINVAL;
  1189. }
  1190. if (is_session_error(inst)) {
  1191. i_vpr_h(inst,
  1192. "%s: inst is in bad state, can not change sub state\n", func);
  1193. return 0;
  1194. }
  1195. if (!clear_sub_state && !set_sub_state)
  1196. return 0;
  1197. if ((clear_sub_state & set_sub_state) ||
  1198. (set_sub_state > MSM_VIDC_MAX_SUB_STATE_VALUE) ||
  1199. (clear_sub_state > MSM_VIDC_MAX_SUB_STATE_VALUE)) {
  1200. i_vpr_e(inst, "%s: invalid sub states to clear %#x or set %#x\n",
  1201. func, clear_sub_state, set_sub_state);
  1202. return -EINVAL;
  1203. }
  1204. prev_sub_state = inst->sub_state;
  1205. inst->sub_state |= set_sub_state;
  1206. inst->sub_state &= ~clear_sub_state;
  1207. /* print substates only when there is a change */
  1208. if (inst->sub_state != prev_sub_state) {
  1209. strlcpy(inst->sub_state_name, "\0", sizeof(inst->sub_state_name));
  1210. for (i = 0; i < MSM_VIDC_MAX_SUB_STATES; i++) {
  1211. if (inst->sub_state == MSM_VIDC_SUB_STATE_NONE) {
  1212. strlcpy(inst->sub_state_name, "SUB_STATE_NONE",
  1213. sizeof(inst->sub_state_name));
  1214. break;
  1215. }
  1216. if (inst->sub_state & BIT(i))
  1217. strlcat(inst->sub_state_name, sub_state_name(BIT(i)),
  1218. sizeof(inst->sub_state_name));
  1219. }
  1220. i_vpr_h(inst, "%s: sub state changed to %s\n", func, inst->sub_state_name);
  1221. }
  1222. return 0;
  1223. }
  1224. bool msm_vidc_allow_s_fmt(struct msm_vidc_inst *inst, u32 type)
  1225. {
  1226. bool allow = false;
  1227. if (!inst) {
  1228. d_vpr_e("%s: invalid params\n", __func__);
  1229. return false;
  1230. }
  1231. if (is_state(inst, MSM_VIDC_OPEN)) {
  1232. allow = true;
  1233. goto exit;
  1234. }
  1235. if (type == OUTPUT_MPLANE || type == OUTPUT_META_PLANE) {
  1236. if (is_state(inst, MSM_VIDC_INPUT_STREAMING)) {
  1237. allow = true;
  1238. goto exit;
  1239. }
  1240. }
  1241. if (type == INPUT_MPLANE || type == INPUT_META_PLANE) {
  1242. if (is_state(inst, MSM_VIDC_OUTPUT_STREAMING)) {
  1243. allow = true;
  1244. goto exit;
  1245. }
  1246. }
  1247. exit:
  1248. if (!allow)
  1249. i_vpr_e(inst, "%s: type %d not allowed in state %s\n",
  1250. __func__, type, state_name(inst->state));
  1251. return allow;
  1252. }
  1253. bool msm_vidc_allow_s_ctrl(struct msm_vidc_inst *inst,
  1254. enum msm_vidc_inst_capability_type cap_id)
  1255. {
  1256. bool allow = false;
  1257. if (!inst || !inst->capabilities) {
  1258. d_vpr_e("%s: invalid params\n", __func__);
  1259. return false;
  1260. }
  1261. if (is_state(inst, MSM_VIDC_OPEN)) {
  1262. allow = true;
  1263. goto exit;
  1264. }
  1265. if (!inst->capabilities->cap[cap_id].cap_id ||
  1266. !inst->capabilities->cap[cap_id].v4l2_id) {
  1267. allow = false;
  1268. goto exit;
  1269. }
  1270. if (is_decode_session(inst)) {
  1271. if (!inst->bufq[INPUT_PORT].vb2q->streaming) {
  1272. allow = true;
  1273. goto exit;
  1274. }
  1275. if (inst->bufq[INPUT_PORT].vb2q->streaming) {
  1276. if (inst->capabilities->cap[cap_id].flags &
  1277. CAP_FLAG_DYNAMIC_ALLOWED)
  1278. allow = true;
  1279. }
  1280. } else if (is_encode_session(inst)) {
  1281. if (!inst->bufq[OUTPUT_PORT].vb2q->streaming) {
  1282. allow = true;
  1283. goto exit;
  1284. }
  1285. if (inst->bufq[OUTPUT_PORT].vb2q->streaming) {
  1286. if (inst->capabilities->cap[cap_id].flags &
  1287. CAP_FLAG_DYNAMIC_ALLOWED)
  1288. allow = true;
  1289. }
  1290. }
  1291. exit:
  1292. if (!allow)
  1293. i_vpr_e(inst, "%s: cap_id %#x not allowed in state %s\n",
  1294. __func__, cap_id, state_name(inst->state));
  1295. return allow;
  1296. }
  1297. bool msm_vidc_allow_metadata_delivery(struct msm_vidc_inst *inst, u32 cap_id,
  1298. u32 port)
  1299. {
  1300. return true;
  1301. }
  1302. bool msm_vidc_allow_metadata_subscription(struct msm_vidc_inst *inst, u32 cap_id,
  1303. u32 port)
  1304. {
  1305. bool is_allowed = true;
  1306. if (!inst || !inst->capabilities) {
  1307. d_vpr_e("%s: invalid params\n", __func__);
  1308. return false;
  1309. }
  1310. if (port == INPUT_PORT) {
  1311. switch (cap_id) {
  1312. case META_BUF_TAG:
  1313. case META_BITSTREAM_RESOLUTION:
  1314. case META_CROP_OFFSETS:
  1315. case META_SEI_MASTERING_DISP:
  1316. case META_SEI_CLL:
  1317. case META_HDR10PLUS:
  1318. case META_PICTURE_TYPE:
  1319. if (!is_meta_rx_inp_enabled(inst, META_OUTBUF_FENCE)) {
  1320. i_vpr_h(inst,
  1321. "%s: cap: %24s not allowed as output buffer fence is disabled\n",
  1322. __func__, cap_name(cap_id));
  1323. is_allowed = false;
  1324. }
  1325. break;
  1326. default:
  1327. is_allowed = true;
  1328. break;
  1329. }
  1330. } else if (port == OUTPUT_PORT) {
  1331. switch (cap_id) {
  1332. case META_DPB_TAG_LIST:
  1333. if (!is_ubwc_colorformat(inst->capabilities->cap[PIX_FMTS].value)) {
  1334. i_vpr_h(inst,
  1335. "%s: cap: %24s not allowed for split mode\n",
  1336. __func__, cap_name(cap_id));
  1337. is_allowed = false;
  1338. }
  1339. break;
  1340. default:
  1341. is_allowed = true;
  1342. break;
  1343. }
  1344. } else {
  1345. i_vpr_e(inst, "%s: invalid port %d\n", __func__, port);
  1346. is_allowed = false;
  1347. }
  1348. return is_allowed;
  1349. }
  1350. bool msm_vidc_allow_property(struct msm_vidc_inst *inst, u32 hfi_id)
  1351. {
  1352. bool is_allowed = true;
  1353. if (!inst || !inst->capabilities) {
  1354. d_vpr_e("%s: invalid params\n", __func__);
  1355. return false;
  1356. }
  1357. switch (hfi_id) {
  1358. case HFI_PROP_WORST_COMPRESSION_RATIO:
  1359. case HFI_PROP_WORST_COMPLEXITY_FACTOR:
  1360. case HFI_PROP_PICTURE_TYPE:
  1361. is_allowed = true;
  1362. break;
  1363. case HFI_PROP_DPB_LIST:
  1364. if (!is_ubwc_colorformat(inst->capabilities->cap[PIX_FMTS].value)) {
  1365. i_vpr_h(inst,
  1366. "%s: cap: %24s not allowed for split mode\n",
  1367. __func__, cap_name(DPB_LIST));
  1368. is_allowed = false;
  1369. }
  1370. break;
  1371. case HFI_PROP_FENCE:
  1372. if (!is_meta_rx_inp_enabled(inst, META_OUTBUF_FENCE)) {
  1373. i_vpr_h(inst,
  1374. "%s: cap: %24s not enabled, hence not allowed to subscribe\n",
  1375. __func__, cap_name(META_OUTBUF_FENCE));
  1376. is_allowed = false;
  1377. }
  1378. break;
  1379. default:
  1380. is_allowed = true;
  1381. break;
  1382. }
  1383. return is_allowed;
  1384. }
  1385. int msm_vidc_update_property_cap(struct msm_vidc_inst *inst, u32 hfi_id,
  1386. bool allow)
  1387. {
  1388. int rc = 0;
  1389. if (!inst || !inst->capabilities) {
  1390. d_vpr_e("%s: invalid params\n", __func__);
  1391. return -EINVAL;
  1392. }
  1393. switch (hfi_id) {
  1394. case HFI_PROP_WORST_COMPRESSION_RATIO:
  1395. case HFI_PROP_WORST_COMPLEXITY_FACTOR:
  1396. case HFI_PROP_PICTURE_TYPE:
  1397. break;
  1398. case HFI_PROP_DPB_LIST:
  1399. if (!allow)
  1400. memset(inst->dpb_list_payload, 0, MAX_DPB_LIST_ARRAY_SIZE);
  1401. msm_vidc_update_cap_value(inst, DPB_LIST, allow, __func__);
  1402. break;
  1403. default:
  1404. break;
  1405. }
  1406. return rc;
  1407. }
  1408. bool msm_vidc_allow_reqbufs(struct msm_vidc_inst *inst, u32 type)
  1409. {
  1410. bool allow = false;
  1411. if (!inst) {
  1412. d_vpr_e("%s: invalid params\n", __func__);
  1413. return false;
  1414. }
  1415. if (is_state(inst, MSM_VIDC_OPEN)) {
  1416. allow = true;
  1417. goto exit;
  1418. }
  1419. if (type == OUTPUT_MPLANE || type == OUTPUT_META_PLANE) {
  1420. if (is_state(inst, MSM_VIDC_INPUT_STREAMING)) {
  1421. allow = true;
  1422. goto exit;
  1423. }
  1424. }
  1425. if (type == INPUT_MPLANE || type == INPUT_META_PLANE) {
  1426. if (is_state(inst, MSM_VIDC_OUTPUT_STREAMING)) {
  1427. allow = true;
  1428. goto exit;
  1429. }
  1430. }
  1431. exit:
  1432. if (!allow)
  1433. i_vpr_e(inst, "%s: type %d not allowed in state %s\n",
  1434. __func__, type, state_name(inst->state));
  1435. return allow;
  1436. }
  1437. enum msm_vidc_allow msm_vidc_allow_stop(struct msm_vidc_inst *inst)
  1438. {
  1439. enum msm_vidc_allow allow = MSM_VIDC_DISALLOW;
  1440. if (!inst) {
  1441. d_vpr_e("%s: invalid params\n", __func__);
  1442. return allow;
  1443. }
  1444. /* allow stop (drain) if input port is streaming */
  1445. if (is_state(inst, MSM_VIDC_INPUT_STREAMING) ||
  1446. is_state(inst, MSM_VIDC_STREAMING)) {
  1447. /* do not allow back to back drain */
  1448. if (!(is_sub_state(inst, MSM_VIDC_DRAIN)))
  1449. allow = MSM_VIDC_ALLOW;
  1450. } else if (is_state(inst, MSM_VIDC_OPEN)) {
  1451. allow = MSM_VIDC_IGNORE;
  1452. i_vpr_e(inst, "%s: ignored in state %s, sub state %s\n",
  1453. __func__, state_name(inst->state), inst->sub_state_name);
  1454. } else {
  1455. i_vpr_e(inst, "%s: not allowed in state %s, sub state %s\n",
  1456. __func__, state_name(inst->state), inst->sub_state_name);
  1457. }
  1458. return allow;
  1459. }
  1460. bool msm_vidc_allow_start(struct msm_vidc_inst *inst)
  1461. {
  1462. bool allow = false;
  1463. if (!inst) {
  1464. d_vpr_e("%s: invalid params\n", __func__);
  1465. return allow;
  1466. }
  1467. /* client would call start (resume) to complete DRC/drain sequence */
  1468. if (inst->state == MSM_VIDC_INPUT_STREAMING ||
  1469. inst->state == MSM_VIDC_OUTPUT_STREAMING ||
  1470. inst->state == MSM_VIDC_STREAMING) {
  1471. if ((is_sub_state(inst, MSM_VIDC_DRC) &&
  1472. is_sub_state(inst, MSM_VIDC_DRC_LAST_BUFFER)) ||
  1473. (is_sub_state(inst, MSM_VIDC_DRAIN) &&
  1474. is_sub_state(inst, MSM_VIDC_DRAIN_LAST_BUFFER)))
  1475. allow = true;
  1476. }
  1477. if (!allow)
  1478. i_vpr_e(inst, "%s: not allowed in state %s, sub state %s\n",
  1479. __func__, state_name(inst->state), inst->sub_state_name);
  1480. return allow;
  1481. }
  1482. bool msm_vidc_allow_streamon(struct msm_vidc_inst *inst, u32 type)
  1483. {
  1484. if (!inst) {
  1485. d_vpr_e("%s: invalid params\n", __func__);
  1486. return false;
  1487. }
  1488. if (type == INPUT_MPLANE || type == INPUT_META_PLANE) {
  1489. if (is_state(inst, MSM_VIDC_OPEN) ||
  1490. is_state(inst, MSM_VIDC_OUTPUT_STREAMING))
  1491. return true;
  1492. } else if (type == OUTPUT_MPLANE || type == OUTPUT_META_PLANE) {
  1493. if (is_state(inst, MSM_VIDC_OPEN) ||
  1494. is_state(inst, MSM_VIDC_INPUT_STREAMING))
  1495. return true;
  1496. }
  1497. i_vpr_e(inst, "%s: type %d not allowed in state %s\n",
  1498. __func__, type, state_name(inst->state));
  1499. return false;
  1500. }
  1501. enum msm_vidc_allow msm_vidc_allow_streamoff(struct msm_vidc_inst *inst, u32 type)
  1502. {
  1503. enum msm_vidc_allow allow = MSM_VIDC_ALLOW;
  1504. if (!inst) {
  1505. d_vpr_e("%s: invalid params\n", __func__);
  1506. return MSM_VIDC_DISALLOW;
  1507. }
  1508. if (type == INPUT_MPLANE) {
  1509. if (!inst->bufq[INPUT_PORT].vb2q->streaming)
  1510. allow = MSM_VIDC_IGNORE;
  1511. } else if (type == INPUT_META_PLANE) {
  1512. if (inst->bufq[INPUT_PORT].vb2q->streaming)
  1513. allow = MSM_VIDC_DISALLOW;
  1514. else if (!inst->bufq[INPUT_META_PORT].vb2q->streaming)
  1515. allow = MSM_VIDC_IGNORE;
  1516. } else if (type == OUTPUT_MPLANE) {
  1517. if (!inst->bufq[OUTPUT_PORT].vb2q->streaming)
  1518. allow = MSM_VIDC_IGNORE;
  1519. } else if (type == OUTPUT_META_PLANE) {
  1520. if (inst->bufq[OUTPUT_PORT].vb2q->streaming)
  1521. allow = MSM_VIDC_DISALLOW;
  1522. else if (!inst->bufq[OUTPUT_META_PORT].vb2q->streaming)
  1523. allow = MSM_VIDC_IGNORE;
  1524. }
  1525. if (allow != MSM_VIDC_ALLOW)
  1526. i_vpr_e(inst, "%s: type %d is %s in state %s\n",
  1527. __func__, type, allow_name(allow),
  1528. state_name(inst->state));
  1529. return allow;
  1530. }
  1531. enum msm_vidc_allow msm_vidc_allow_qbuf(struct msm_vidc_inst *inst, u32 type)
  1532. {
  1533. int port = 0;
  1534. if (!inst) {
  1535. d_vpr_e("%s: invalid params\n", __func__);
  1536. return MSM_VIDC_DISALLOW;
  1537. }
  1538. port = v4l2_type_to_driver_port(inst, type, __func__);
  1539. if (port < 0)
  1540. return MSM_VIDC_DISALLOW;
  1541. /* defer queuing if streamon not completed */
  1542. if (!inst->bufq[port].vb2q->streaming)
  1543. return MSM_VIDC_DEFER;
  1544. if (type == INPUT_META_PLANE || type == OUTPUT_META_PLANE)
  1545. return MSM_VIDC_DEFER;
  1546. if (type == INPUT_MPLANE) {
  1547. if (is_state(inst, MSM_VIDC_OPEN) ||
  1548. is_state(inst, MSM_VIDC_OUTPUT_STREAMING))
  1549. return MSM_VIDC_DEFER;
  1550. else
  1551. return MSM_VIDC_ALLOW;
  1552. } else if (type == OUTPUT_MPLANE) {
  1553. if (is_state(inst, MSM_VIDC_OPEN) ||
  1554. is_state(inst, MSM_VIDC_INPUT_STREAMING))
  1555. return MSM_VIDC_DEFER;
  1556. else
  1557. return MSM_VIDC_ALLOW;
  1558. } else {
  1559. i_vpr_e(inst, "%s: unknown buffer type %d\n", __func__, type);
  1560. return MSM_VIDC_DISALLOW;
  1561. }
  1562. return MSM_VIDC_DISALLOW;
  1563. }
  1564. enum msm_vidc_allow msm_vidc_allow_input_psc(struct msm_vidc_inst *inst)
  1565. {
  1566. enum msm_vidc_allow allow = MSM_VIDC_ALLOW;
  1567. if (!inst) {
  1568. d_vpr_e("%s: invalid params\n", __func__);
  1569. return MSM_VIDC_DISALLOW;
  1570. }
  1571. /*
  1572. * if drc sequence is not completed by client, fw is not
  1573. * expected to raise another ipsc
  1574. */
  1575. if (is_sub_state(inst, MSM_VIDC_DRC)) {
  1576. i_vpr_e(inst, "%s: not allowed in sub state %s\n",
  1577. __func__, inst->sub_state_name);
  1578. return MSM_VIDC_DISALLOW;
  1579. }
  1580. return allow;
  1581. }
  1582. bool msm_vidc_allow_drain_last_flag(struct msm_vidc_inst *inst)
  1583. {
  1584. if (!inst) {
  1585. d_vpr_e("%s: invalid params\n", __func__);
  1586. return false;
  1587. }
  1588. /*
  1589. * drain last flag is expected only when DRAIN, INPUT_PAUSE
  1590. * is set and DRAIN_LAST_BUFFER is not set
  1591. */
  1592. if (is_sub_state(inst, MSM_VIDC_DRAIN) &&
  1593. is_sub_state(inst, MSM_VIDC_INPUT_PAUSE) &&
  1594. !is_sub_state(inst, MSM_VIDC_DRAIN_LAST_BUFFER))
  1595. return true;
  1596. i_vpr_e(inst, "%s: not allowed in sub state %s\n",
  1597. __func__, inst->sub_state_name);
  1598. return false;
  1599. }
  1600. bool msm_vidc_allow_psc_last_flag(struct msm_vidc_inst *inst)
  1601. {
  1602. if (!inst) {
  1603. d_vpr_e("%s: invalid params\n", __func__);
  1604. return false;
  1605. }
  1606. /*
  1607. * drc last flag is expected only when DRC, INPUT_PAUSE
  1608. * is set and DRC_LAST_BUFFER is not set
  1609. */
  1610. if (is_sub_state(inst, MSM_VIDC_DRC) &&
  1611. is_sub_state(inst, MSM_VIDC_INPUT_PAUSE) &&
  1612. !is_sub_state(inst, MSM_VIDC_DRC_LAST_BUFFER))
  1613. return true;
  1614. i_vpr_e(inst, "%s: not allowed in sub state %s\n",
  1615. __func__, inst->sub_state_name);
  1616. return false;
  1617. }
  1618. int msm_vidc_state_change_streamon(struct msm_vidc_inst *inst,
  1619. enum msm_vidc_port_type port)
  1620. {
  1621. int rc = 0;
  1622. enum msm_vidc_state new_state = MSM_VIDC_ERROR;
  1623. if (!inst || !inst->core) {
  1624. d_vpr_e("%s: invalid params\n", __func__);
  1625. return -EINVAL;
  1626. }
  1627. if (port == INPUT_META_PORT || port == OUTPUT_META_PORT)
  1628. return 0;
  1629. if (port == INPUT_PORT) {
  1630. if (is_state(inst, MSM_VIDC_OPEN))
  1631. new_state = MSM_VIDC_INPUT_STREAMING;
  1632. else if (is_state(inst, MSM_VIDC_OUTPUT_STREAMING))
  1633. new_state = MSM_VIDC_STREAMING;
  1634. } else if (port == OUTPUT_PORT) {
  1635. if (is_state(inst, MSM_VIDC_OPEN))
  1636. new_state = MSM_VIDC_OUTPUT_STREAMING;
  1637. else if (is_state(inst, MSM_VIDC_INPUT_STREAMING))
  1638. new_state = MSM_VIDC_STREAMING;
  1639. }
  1640. rc = msm_vidc_change_state(inst, new_state, __func__);
  1641. if (rc)
  1642. return rc;
  1643. return rc;
  1644. }
  1645. int msm_vidc_state_change_streamoff(struct msm_vidc_inst *inst,
  1646. enum msm_vidc_port_type port)
  1647. {
  1648. int rc = 0;
  1649. enum msm_vidc_state new_state = MSM_VIDC_ERROR;
  1650. if (!inst || !inst->core) {
  1651. d_vpr_e("%s: invalid params\n", __func__);
  1652. return -EINVAL;
  1653. }
  1654. if (port == INPUT_META_PORT || port == OUTPUT_META_PORT)
  1655. return 0;
  1656. if (port == INPUT_PORT) {
  1657. if (is_state(inst, MSM_VIDC_INPUT_STREAMING)) {
  1658. new_state = MSM_VIDC_OPEN;
  1659. } else if (is_state(inst, MSM_VIDC_STREAMING)) {
  1660. new_state = MSM_VIDC_OUTPUT_STREAMING;
  1661. }
  1662. } else if (port == OUTPUT_PORT) {
  1663. if (is_state(inst, MSM_VIDC_OUTPUT_STREAMING)) {
  1664. new_state = MSM_VIDC_OPEN;
  1665. } else if (is_state(inst, MSM_VIDC_STREAMING)) {
  1666. new_state = MSM_VIDC_INPUT_STREAMING;
  1667. }
  1668. }
  1669. rc = msm_vidc_change_state(inst, new_state, __func__);
  1670. if (rc)
  1671. goto exit;
  1672. exit:
  1673. return rc;
  1674. }
  1675. int msm_vidc_process_drain(struct msm_vidc_inst *inst)
  1676. {
  1677. int rc = 0;
  1678. if (!inst) {
  1679. d_vpr_e("%s: invalid params\n", __func__);
  1680. return -EINVAL;
  1681. }
  1682. rc = venus_hfi_session_drain(inst, INPUT_PORT);
  1683. if (rc)
  1684. return rc;
  1685. rc = msm_vidc_change_sub_state(inst, 0, MSM_VIDC_DRAIN, __func__);
  1686. if (rc)
  1687. return rc;
  1688. msm_vidc_scale_power(inst, true);
  1689. return rc;
  1690. }
  1691. int msm_vidc_process_resume(struct msm_vidc_inst *inst)
  1692. {
  1693. int rc = 0;
  1694. enum msm_vidc_sub_state clear_sub_state = MSM_VIDC_SUB_STATE_NONE;
  1695. bool drain_pending = false;
  1696. if (!inst) {
  1697. d_vpr_e("%s: invalid params\n", __func__);
  1698. return -EINVAL;
  1699. }
  1700. msm_vidc_scale_power(inst, true);
  1701. /* first check DRC pending else check drain pending */
  1702. if (is_sub_state(inst, MSM_VIDC_DRC) &&
  1703. is_sub_state(inst, MSM_VIDC_DRC_LAST_BUFFER)) {
  1704. clear_sub_state = MSM_VIDC_DRC | MSM_VIDC_DRC_LAST_BUFFER;
  1705. /*
  1706. * if drain sequence is not completed then do not resume here.
  1707. * client will eventually complete drain sequence in which ports
  1708. * will be resumed.
  1709. */
  1710. drain_pending = is_sub_state(inst, MSM_VIDC_DRAIN) &&
  1711. is_sub_state(inst, MSM_VIDC_DRAIN_LAST_BUFFER);
  1712. if (!drain_pending) {
  1713. if (is_sub_state(inst, MSM_VIDC_INPUT_PAUSE)) {
  1714. rc = venus_hfi_session_resume(inst, INPUT_PORT,
  1715. HFI_CMD_SETTINGS_CHANGE);
  1716. if (rc)
  1717. return rc;
  1718. clear_sub_state |= MSM_VIDC_INPUT_PAUSE;
  1719. }
  1720. if (is_sub_state(inst, MSM_VIDC_OUTPUT_PAUSE)) {
  1721. rc = venus_hfi_session_resume(inst, OUTPUT_PORT,
  1722. HFI_CMD_SETTINGS_CHANGE);
  1723. if (rc)
  1724. return rc;
  1725. clear_sub_state |= MSM_VIDC_OUTPUT_PAUSE;
  1726. }
  1727. }
  1728. } else if (is_sub_state(inst, MSM_VIDC_DRAIN) &&
  1729. is_sub_state(inst, MSM_VIDC_DRAIN_LAST_BUFFER)) {
  1730. clear_sub_state = MSM_VIDC_DRAIN | MSM_VIDC_DRAIN_LAST_BUFFER;
  1731. if (is_sub_state(inst, MSM_VIDC_INPUT_PAUSE)) {
  1732. rc = venus_hfi_session_resume(inst, INPUT_PORT, HFI_CMD_DRAIN);
  1733. if (rc)
  1734. return rc;
  1735. clear_sub_state |= MSM_VIDC_INPUT_PAUSE;
  1736. }
  1737. if (is_sub_state(inst, MSM_VIDC_OUTPUT_PAUSE)) {
  1738. rc = venus_hfi_session_resume(inst, OUTPUT_PORT, HFI_CMD_DRAIN);
  1739. if (rc)
  1740. return rc;
  1741. clear_sub_state |= MSM_VIDC_OUTPUT_PAUSE;
  1742. }
  1743. }
  1744. rc = msm_vidc_change_sub_state(inst, clear_sub_state, 0, __func__);
  1745. if (rc)
  1746. return rc;
  1747. return rc;
  1748. }
  1749. int msm_vidc_process_streamon_input(struct msm_vidc_inst *inst)
  1750. {
  1751. int rc = 0;
  1752. enum msm_vidc_sub_state clear_sub_state = MSM_VIDC_SUB_STATE_NONE;
  1753. enum msm_vidc_sub_state set_sub_state = MSM_VIDC_SUB_STATE_NONE;
  1754. if (!inst) {
  1755. d_vpr_e("%s: invalid params\n", __func__);
  1756. return -EINVAL;
  1757. }
  1758. msm_vidc_scale_power(inst, true);
  1759. rc = venus_hfi_start(inst, INPUT_PORT);
  1760. if (rc)
  1761. return rc;
  1762. /* clear input pause substate immediately */
  1763. if (is_sub_state(inst, MSM_VIDC_INPUT_PAUSE)) {
  1764. rc = msm_vidc_change_sub_state(inst, MSM_VIDC_INPUT_PAUSE, 0, __func__);
  1765. if (rc)
  1766. return rc;
  1767. }
  1768. /*
  1769. * if DRC sequence is not completed by the client then PAUSE
  1770. * firmware input port to avoid firmware raising IPSC again.
  1771. * When client completes DRC or DRAIN sequences, firmware
  1772. * input port will be resumed.
  1773. */
  1774. if (is_sub_state(inst, MSM_VIDC_DRC) ||
  1775. is_sub_state(inst, MSM_VIDC_DRAIN)) {
  1776. if (!is_sub_state(inst, MSM_VIDC_INPUT_PAUSE)) {
  1777. rc = venus_hfi_session_pause(inst, INPUT_PORT);
  1778. if (rc)
  1779. return rc;
  1780. set_sub_state = MSM_VIDC_INPUT_PAUSE;
  1781. }
  1782. }
  1783. rc = msm_vidc_state_change_streamon(inst, INPUT_PORT);
  1784. if (rc)
  1785. return rc;
  1786. rc = msm_vidc_change_sub_state(inst, clear_sub_state, set_sub_state, __func__);
  1787. if (rc)
  1788. return rc;
  1789. return rc;
  1790. }
  1791. int msm_vidc_process_streamon_output(struct msm_vidc_inst *inst)
  1792. {
  1793. int rc = 0;
  1794. enum msm_vidc_sub_state clear_sub_state = MSM_VIDC_SUB_STATE_NONE;
  1795. enum msm_vidc_sub_state set_sub_state = MSM_VIDC_SUB_STATE_NONE;
  1796. bool drain_pending = false;
  1797. if (!inst) {
  1798. d_vpr_e("%s: invalid params\n", __func__);
  1799. return -EINVAL;
  1800. }
  1801. msm_vidc_scale_power(inst, true);
  1802. /*
  1803. * client completed drc sequence, reset DRC and
  1804. * MSM_VIDC_DRC_LAST_BUFFER substates
  1805. */
  1806. if (is_sub_state(inst, MSM_VIDC_DRC) &&
  1807. is_sub_state(inst, MSM_VIDC_DRC_LAST_BUFFER)) {
  1808. clear_sub_state = MSM_VIDC_DRC | MSM_VIDC_DRC_LAST_BUFFER;
  1809. }
  1810. /*
  1811. * Client is completing port reconfiguration, hence reallocate
  1812. * input internal buffers before input port is resumed.
  1813. * Drc sub-state cannot be checked because DRC sub-state will
  1814. * not be set during initial port reconfiguration.
  1815. */
  1816. if (is_decode_session(inst) &&
  1817. is_sub_state(inst, MSM_VIDC_INPUT_PAUSE)) {
  1818. rc = msm_vidc_alloc_and_queue_input_internal_buffers(inst);
  1819. if (rc)
  1820. return rc;
  1821. rc = msm_vidc_set_stage(inst, STAGE);
  1822. if (rc)
  1823. return rc;
  1824. rc = msm_vidc_set_pipe(inst, PIPE);
  1825. if (rc)
  1826. return rc;
  1827. }
  1828. /*
  1829. * fw input port is paused due to ipsc. now that client
  1830. * completed drc sequence, resume fw input port provided
  1831. * drain is not pending and input port is streaming.
  1832. */
  1833. drain_pending = is_sub_state(inst, MSM_VIDC_DRAIN) &&
  1834. is_sub_state(inst, MSM_VIDC_DRAIN_LAST_BUFFER);
  1835. if (!drain_pending && is_state(inst, MSM_VIDC_INPUT_STREAMING)) {
  1836. if (is_sub_state(inst, MSM_VIDC_INPUT_PAUSE)) {
  1837. rc = venus_hfi_session_resume(inst, INPUT_PORT,
  1838. HFI_CMD_SETTINGS_CHANGE);
  1839. if (rc)
  1840. return rc;
  1841. clear_sub_state |= MSM_VIDC_INPUT_PAUSE;
  1842. }
  1843. }
  1844. rc = venus_hfi_start(inst, OUTPUT_PORT);
  1845. if (rc)
  1846. return rc;
  1847. /* clear output pause substate immediately */
  1848. if (is_sub_state(inst, MSM_VIDC_OUTPUT_PAUSE)) {
  1849. rc = msm_vidc_change_sub_state(inst, MSM_VIDC_OUTPUT_PAUSE, 0, __func__);
  1850. if (rc)
  1851. return rc;
  1852. }
  1853. rc = msm_vidc_state_change_streamon(inst, OUTPUT_PORT);
  1854. if (rc)
  1855. return rc;
  1856. rc = msm_vidc_change_sub_state(inst, clear_sub_state, set_sub_state, __func__);
  1857. if (rc)
  1858. return rc;
  1859. return rc;
  1860. }
  1861. int msm_vidc_process_stop_done(struct msm_vidc_inst *inst,
  1862. enum signal_session_response signal_type)
  1863. {
  1864. int rc = 0;
  1865. enum msm_vidc_sub_state set_sub_state = MSM_VIDC_SUB_STATE_NONE;
  1866. if (!inst) {
  1867. d_vpr_e("%s: invalid params\n", __func__);
  1868. return -EINVAL;
  1869. }
  1870. if (signal_type == SIGNAL_CMD_STOP_INPUT) {
  1871. set_sub_state = MSM_VIDC_INPUT_PAUSE;
  1872. /*
  1873. * FW is expected to return DRC LAST flag before input
  1874. * stop done if DRC sequence is pending
  1875. */
  1876. if (is_sub_state(inst, MSM_VIDC_DRC) &&
  1877. !is_sub_state(inst, MSM_VIDC_DRC_LAST_BUFFER)) {
  1878. i_vpr_e(inst, "%s: drc last flag pkt not received\n", __func__);
  1879. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  1880. }
  1881. /*
  1882. * for a decode session, FW is expected to return
  1883. * DRAIN LAST flag before input stop done if
  1884. * DRAIN sequence is pending
  1885. */
  1886. if (is_decode_session(inst) &&
  1887. is_sub_state(inst, MSM_VIDC_DRAIN) &&
  1888. !is_sub_state(inst, MSM_VIDC_DRAIN_LAST_BUFFER)) {
  1889. i_vpr_e(inst, "%s: drain last flag pkt not received\n", __func__);
  1890. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  1891. }
  1892. } else if (signal_type == SIGNAL_CMD_STOP_OUTPUT) {
  1893. set_sub_state = MSM_VIDC_OUTPUT_PAUSE;
  1894. }
  1895. rc = msm_vidc_change_sub_state(inst, 0, set_sub_state, __func__);
  1896. if (rc)
  1897. return rc;
  1898. signal_session_msg_receipt(inst, signal_type);
  1899. return rc;
  1900. }
  1901. int msm_vidc_process_drain_done(struct msm_vidc_inst *inst)
  1902. {
  1903. int rc = 0;
  1904. if (!inst) {
  1905. d_vpr_e("%s: invalid params\n", __func__);
  1906. return -EINVAL;
  1907. }
  1908. if (is_sub_state(inst, MSM_VIDC_DRAIN)) {
  1909. rc = msm_vidc_change_sub_state(inst, 0, MSM_VIDC_INPUT_PAUSE, __func__);
  1910. if (rc)
  1911. return rc;
  1912. } else {
  1913. i_vpr_e(inst, "%s: unexpected drain done\n", __func__);
  1914. }
  1915. return rc;
  1916. }
  1917. int msm_vidc_process_drain_last_flag(struct msm_vidc_inst *inst)
  1918. {
  1919. int rc = 0;
  1920. struct v4l2_event event = {0};
  1921. struct v4l2_event_vidc_last_flag *event_data = NULL;
  1922. if (!inst || !inst->capabilities) {
  1923. d_vpr_e("%s: invalid params\n", __func__);
  1924. return -EINVAL;
  1925. }
  1926. rc = msm_vidc_state_change_drain_last_flag(inst);
  1927. if (rc)
  1928. return rc;
  1929. if (is_decode_session(inst) &&
  1930. !inst->capabilities->cap[LAST_FLAG_EVENT_ENABLE].value) {
  1931. i_vpr_h(inst, "%s: last flag event not enabled\n", __func__);
  1932. return 0;
  1933. }
  1934. event.type = V4L2_EVENT_VIDC_LAST_FLAG;
  1935. event_data = (struct v4l2_event_vidc_last_flag *)event.u.data;
  1936. event_data->flag_type = LAST_FLAG_DRAIN;
  1937. v4l2_event_queue_fh(&inst->event_handler, &event);
  1938. return rc;
  1939. }
  1940. int msm_vidc_process_psc_last_flag(struct msm_vidc_inst *inst)
  1941. {
  1942. int rc = 0;
  1943. struct v4l2_event event = {0};
  1944. struct v4l2_event_vidc_last_flag *event_data = NULL;
  1945. if (!inst || !inst->capabilities) {
  1946. d_vpr_e("%s: invalid params\n", __func__);
  1947. return -EINVAL;
  1948. }
  1949. rc = msm_vidc_state_change_psc_last_flag(inst);
  1950. if (rc)
  1951. return rc;
  1952. if (is_decode_session(inst) &&
  1953. !inst->capabilities->cap[LAST_FLAG_EVENT_ENABLE].value) {
  1954. i_vpr_h(inst, "%s: last flag event not enabled\n", __func__);
  1955. return 0;
  1956. }
  1957. event.type = V4L2_EVENT_VIDC_LAST_FLAG;
  1958. event_data = (struct v4l2_event_vidc_last_flag *)event.u.data;
  1959. event_data->flag_type = LAST_FLAG_DRC;
  1960. v4l2_event_queue_fh(&inst->event_handler, &event);
  1961. return rc;
  1962. }
  1963. int msm_vidc_state_change_input_psc(struct msm_vidc_inst *inst)
  1964. {
  1965. int rc = 0;
  1966. enum msm_vidc_sub_state set_sub_state = MSM_VIDC_SUB_STATE_NONE;
  1967. if (!inst || !inst->core) {
  1968. d_vpr_e("%s: invalid params\n", __func__);
  1969. return -EINVAL;
  1970. }
  1971. /*
  1972. * if output port is not streaming, then do not set DRC substate
  1973. * because DRC_LAST_FLAG is not going to be received. Update
  1974. * INPUT_PAUSE substate only
  1975. */
  1976. if (is_state(inst, MSM_VIDC_INPUT_STREAMING) ||
  1977. is_state(inst, MSM_VIDC_OPEN))
  1978. set_sub_state = MSM_VIDC_INPUT_PAUSE;
  1979. else
  1980. set_sub_state = MSM_VIDC_DRC | MSM_VIDC_INPUT_PAUSE;
  1981. rc = msm_vidc_change_sub_state(inst, 0, set_sub_state, __func__);
  1982. if (rc)
  1983. return rc;
  1984. return rc;
  1985. }
  1986. int msm_vidc_state_change_drain_last_flag(struct msm_vidc_inst *inst)
  1987. {
  1988. int rc = 0;
  1989. enum msm_vidc_sub_state set_sub_state = MSM_VIDC_SUB_STATE_NONE;
  1990. if (!inst || !inst->core) {
  1991. d_vpr_e("%s: invalid params\n", __func__);
  1992. return -EINVAL;
  1993. }
  1994. set_sub_state = MSM_VIDC_DRAIN_LAST_BUFFER | MSM_VIDC_OUTPUT_PAUSE;
  1995. rc = msm_vidc_change_sub_state(inst, 0, set_sub_state, __func__);
  1996. if (rc)
  1997. return rc;
  1998. return rc;
  1999. }
  2000. int msm_vidc_state_change_psc_last_flag(struct msm_vidc_inst *inst)
  2001. {
  2002. int rc = 0;
  2003. enum msm_vidc_sub_state set_sub_state = MSM_VIDC_SUB_STATE_NONE;
  2004. if (!inst || !inst->core) {
  2005. d_vpr_e("%s: invalid params\n", __func__);
  2006. return -EINVAL;
  2007. }
  2008. set_sub_state = MSM_VIDC_DRC_LAST_BUFFER | MSM_VIDC_OUTPUT_PAUSE;
  2009. rc = msm_vidc_change_sub_state(inst, 0, set_sub_state, __func__);
  2010. if (rc)
  2011. return rc;
  2012. return rc;
  2013. }
  2014. int msm_vidc_get_fence_fd(struct msm_vidc_inst *inst, int *fence_fd)
  2015. {
  2016. int rc = 0;
  2017. struct msm_vidc_fence *fence, *dummy_fence;
  2018. bool found = false;
  2019. *fence_fd = INVALID_FD;
  2020. if (!inst || !inst->capabilities) {
  2021. d_vpr_e("%s: invalid params\n", __func__);
  2022. return -EINVAL;
  2023. }
  2024. list_for_each_entry_safe(fence, dummy_fence, &inst->fence_list, list) {
  2025. if (fence->dma_fence.seqno ==
  2026. (u64)inst->capabilities->cap[FENCE_ID].value) {
  2027. found = true;
  2028. break;
  2029. }
  2030. }
  2031. if (!found) {
  2032. i_vpr_h(inst, "%s: could not find matching fence for fence id: %d\n",
  2033. __func__, inst->capabilities->cap[FENCE_ID].value);
  2034. goto exit;
  2035. }
  2036. if (fence->fd == INVALID_FD) {
  2037. rc = msm_vidc_create_fence_fd(inst, fence);
  2038. if (rc)
  2039. goto exit;
  2040. }
  2041. *fence_fd = fence->fd;
  2042. exit:
  2043. return rc;
  2044. }
  2045. int msm_vidc_get_control(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
  2046. {
  2047. int rc = 0;
  2048. enum msm_vidc_inst_capability_type cap_id;
  2049. if (!inst || !ctrl) {
  2050. d_vpr_e("%s: invalid params\n", __func__);
  2051. return -EINVAL;
  2052. }
  2053. cap_id = msm_vidc_get_cap_id(inst, ctrl->id);
  2054. if (!is_valid_cap_id(cap_id)) {
  2055. i_vpr_e(inst, "%s: could not find cap_id for ctrl %s\n",
  2056. __func__, ctrl->name);
  2057. return -EINVAL;
  2058. }
  2059. switch (cap_id) {
  2060. case MIN_BUFFERS_OUTPUT:
  2061. ctrl->val = inst->buffers.output.min_count +
  2062. inst->buffers.output.extra_count;
  2063. i_vpr_h(inst, "g_min: output buffers %d\n", ctrl->val);
  2064. break;
  2065. case MIN_BUFFERS_INPUT:
  2066. ctrl->val = inst->buffers.input.min_count +
  2067. inst->buffers.input.extra_count;
  2068. i_vpr_h(inst, "g_min: input buffers %d\n", ctrl->val);
  2069. break;
  2070. case FILM_GRAIN:
  2071. ctrl->val = inst->capabilities->cap[FILM_GRAIN].value;
  2072. i_vpr_h(inst, "%s: film grain present: %d\n",
  2073. __func__, ctrl->val);
  2074. break;
  2075. case FENCE_FD:
  2076. rc = msm_vidc_get_fence_fd(inst, &ctrl->val);
  2077. if (!rc)
  2078. i_vpr_l(inst, "%s: fence fd: %d\n",
  2079. __func__, ctrl->val);
  2080. break;
  2081. default:
  2082. i_vpr_e(inst, "invalid ctrl %s id %d\n",
  2083. ctrl->name, ctrl->id);
  2084. return -EINVAL;
  2085. }
  2086. return rc;
  2087. }
  2088. int msm_vidc_get_mbs_per_frame(struct msm_vidc_inst *inst)
  2089. {
  2090. int height = 0, width = 0;
  2091. struct v4l2_format *inp_f;
  2092. if (is_decode_session(inst)) {
  2093. inp_f = &inst->fmts[INPUT_PORT];
  2094. width = max(inp_f->fmt.pix_mp.width, inst->crop.width);
  2095. height = max(inp_f->fmt.pix_mp.height, inst->crop.height);
  2096. } else if (is_encode_session(inst)) {
  2097. width = inst->crop.width;
  2098. height = inst->crop.height;
  2099. }
  2100. return NUM_MBS_PER_FRAME(height, width);
  2101. }
  2102. int msm_vidc_get_fps(struct msm_vidc_inst *inst)
  2103. {
  2104. int fps;
  2105. u32 frame_rate, operating_rate;
  2106. if (!inst || !inst->capabilities) {
  2107. d_vpr_e("%s: invalid params\n", __func__);
  2108. return -EINVAL;
  2109. }
  2110. frame_rate = msm_vidc_get_frame_rate(inst);
  2111. operating_rate = msm_vidc_get_operating_rate(inst);
  2112. if (operating_rate > frame_rate)
  2113. fps = operating_rate ? operating_rate : 1;
  2114. else
  2115. fps = frame_rate;
  2116. return fps;
  2117. }
  2118. int msm_vidc_num_buffers(struct msm_vidc_inst *inst,
  2119. enum msm_vidc_buffer_type type, enum msm_vidc_buffer_attributes attr)
  2120. {
  2121. int count = 0;
  2122. struct msm_vidc_buffer *vbuf;
  2123. struct msm_vidc_buffers *buffers;
  2124. if (!inst) {
  2125. d_vpr_e("%s: invalid params\n", __func__);
  2126. return count;
  2127. }
  2128. if (type == MSM_VIDC_BUF_OUTPUT) {
  2129. buffers = &inst->buffers.output;
  2130. } else if (type == MSM_VIDC_BUF_INPUT) {
  2131. buffers = &inst->buffers.input;
  2132. } else {
  2133. i_vpr_e(inst, "%s: invalid buffer type %#x\n",
  2134. __func__, type);
  2135. return count;
  2136. }
  2137. list_for_each_entry(vbuf, &buffers->list, list) {
  2138. if (vbuf->type != type)
  2139. continue;
  2140. if (!(vbuf->attr & attr))
  2141. continue;
  2142. count++;
  2143. }
  2144. return count;
  2145. }
  2146. static int vb2_buffer_to_driver(struct vb2_buffer *vb2,
  2147. struct msm_vidc_buffer *buf)
  2148. {
  2149. int rc = 0;
  2150. if (!vb2 || !buf) {
  2151. d_vpr_e("%s: invalid params\n", __func__);
  2152. return -EINVAL;
  2153. }
  2154. buf->type = v4l2_type_to_driver(vb2->type, __func__);
  2155. if (!buf->type)
  2156. return -EINVAL;
  2157. buf->index = vb2->index;
  2158. buf->fd = vb2->planes[0].m.fd;
  2159. buf->data_offset = vb2->planes[0].data_offset;
  2160. buf->data_size = vb2->planes[0].bytesused - vb2->planes[0].data_offset;
  2161. buf->buffer_size = vb2->planes[0].length;
  2162. buf->timestamp = vb2->timestamp;
  2163. return rc;
  2164. }
  2165. int msm_vidc_process_readonly_buffers(struct msm_vidc_inst *inst,
  2166. struct msm_vidc_buffer *buf)
  2167. {
  2168. int rc = 0;
  2169. struct msm_vidc_buffer *ro_buf, *dummy;
  2170. struct msm_vidc_buffers *ro_buffers;
  2171. if (!inst || !buf) {
  2172. d_vpr_e("%s: invalid params\n", __func__);
  2173. return -EINVAL;
  2174. }
  2175. if (!is_decode_session(inst) || !is_output_buffer(buf->type))
  2176. return 0;
  2177. ro_buffers = msm_vidc_get_buffers(inst, MSM_VIDC_BUF_READ_ONLY, __func__);
  2178. if (!ro_buffers)
  2179. return -EINVAL;
  2180. /*
  2181. * check if buffer present in ro_buffers list
  2182. * if present: add ro flag to buf and remove from ro_buffers list
  2183. * if not present: do nothing
  2184. */
  2185. list_for_each_entry_safe(ro_buf, dummy, &ro_buffers->list, list) {
  2186. if (ro_buf->device_addr == buf->device_addr) {
  2187. buf->attr |= MSM_VIDC_ATTR_READ_ONLY;
  2188. print_vidc_buffer(VIDC_LOW, "low ", "ro buf removed", inst, ro_buf);
  2189. list_del(&ro_buf->list);
  2190. msm_memory_pool_free(inst, ro_buf);
  2191. break;
  2192. }
  2193. }
  2194. return rc;
  2195. }
  2196. int msm_vidc_memory_unmap_completely(struct msm_vidc_inst *inst,
  2197. struct msm_vidc_map *map)
  2198. {
  2199. int rc = 0;
  2200. if (!inst || !map) {
  2201. d_vpr_e("%s: invalid params\n", __func__);
  2202. return -EINVAL;
  2203. }
  2204. if (!map->refcount)
  2205. return 0;
  2206. while (map->refcount) {
  2207. rc = msm_vidc_memory_unmap(inst->core, map);
  2208. if (rc)
  2209. break;
  2210. if (!map->refcount) {
  2211. msm_vidc_memory_put_dmabuf(inst, map->dmabuf);
  2212. list_del(&map->list);
  2213. msm_memory_pool_free(inst, map);
  2214. break;
  2215. }
  2216. }
  2217. return rc;
  2218. }
  2219. int msm_vidc_set_auto_framerate(struct msm_vidc_inst *inst, u64 timestamp)
  2220. {
  2221. struct msm_vidc_core *core;
  2222. struct msm_vidc_timestamp *ts;
  2223. struct msm_vidc_timestamp *prev = NULL;
  2224. u32 counter = 0, prev_fr = 0, curr_fr = 0;
  2225. u64 time_us = 0;
  2226. int rc = 0;
  2227. if (!inst || !inst->core || !inst->capabilities) {
  2228. d_vpr_e("%s: invalid params\n", __func__);
  2229. return -EINVAL;
  2230. }
  2231. core = inst->core;
  2232. if (!core->capabilities[ENC_AUTO_FRAMERATE].value ||
  2233. is_image_session(inst) || msm_vidc_is_super_buffer(inst) ||
  2234. !inst->capabilities->cap[TIME_DELTA_BASED_RC].value)
  2235. goto exit;
  2236. rc = msm_vidc_update_timestamp_rate(inst, timestamp);
  2237. if (rc)
  2238. goto exit;
  2239. list_for_each_entry(ts, &inst->timestamps.list, sort.list) {
  2240. if (prev) {
  2241. time_us = ts->sort.val - prev->sort.val;
  2242. prev_fr = curr_fr;
  2243. curr_fr = time_us ? DIV64_U64_ROUND_CLOSEST(USEC_PER_SEC, time_us) << 16 :
  2244. inst->auto_framerate;
  2245. if (curr_fr > inst->capabilities->cap[FRAME_RATE].max)
  2246. curr_fr = inst->capabilities->cap[FRAME_RATE].max;
  2247. }
  2248. prev = ts;
  2249. counter++;
  2250. }
  2251. if (counter < ENC_FPS_WINDOW)
  2252. goto exit;
  2253. /* if framerate changed and stable for 2 frames, set to firmware */
  2254. if (curr_fr == prev_fr && curr_fr != inst->auto_framerate) {
  2255. i_vpr_l(inst, "%s: updated fps: %u -> %u\n", __func__,
  2256. inst->auto_framerate >> 16, curr_fr >> 16);
  2257. rc = venus_hfi_session_property(inst,
  2258. HFI_PROP_FRAME_RATE,
  2259. HFI_HOST_FLAGS_NONE,
  2260. HFI_PORT_BITSTREAM,
  2261. HFI_PAYLOAD_Q16,
  2262. &curr_fr,
  2263. sizeof(u32));
  2264. if (rc) {
  2265. i_vpr_e(inst, "%s: set auto frame rate failed\n",
  2266. __func__);
  2267. goto exit;
  2268. }
  2269. inst->auto_framerate = curr_fr;
  2270. }
  2271. exit:
  2272. return rc;
  2273. }
  2274. int msm_vidc_update_input_rate(struct msm_vidc_inst *inst, u64 time_us)
  2275. {
  2276. struct msm_vidc_input_timer *input_timer;
  2277. struct msm_vidc_input_timer *prev_timer = NULL;
  2278. u64 counter = 0;
  2279. u64 input_timer_sum_us = 0;
  2280. if (!inst || !inst->capabilities) {
  2281. d_vpr_e("%s: invalid params\n", __func__);
  2282. return -EINVAL;
  2283. }
  2284. input_timer = msm_memory_pool_alloc(inst, MSM_MEM_POOL_BUF_TIMER);
  2285. if (!input_timer)
  2286. return -ENOMEM;
  2287. input_timer->time_us = time_us;
  2288. INIT_LIST_HEAD(&input_timer->list);
  2289. list_add_tail(&input_timer->list, &inst->input_timer_list);
  2290. list_for_each_entry(input_timer, &inst->input_timer_list, list) {
  2291. if (prev_timer) {
  2292. input_timer_sum_us += input_timer->time_us - prev_timer->time_us;
  2293. counter++;
  2294. }
  2295. prev_timer = input_timer;
  2296. }
  2297. if (input_timer_sum_us && counter >= INPUT_TIMER_LIST_SIZE)
  2298. inst->capabilities->cap[INPUT_RATE].value =
  2299. (s32)(DIV64_U64_ROUND_CLOSEST(counter * 1000000,
  2300. input_timer_sum_us) << 16);
  2301. /* delete the first entry once counter >= INPUT_TIMER_LIST_SIZE */
  2302. if (counter >= INPUT_TIMER_LIST_SIZE) {
  2303. input_timer = list_first_entry(&inst->input_timer_list,
  2304. struct msm_vidc_input_timer, list);
  2305. list_del_init(&input_timer->list);
  2306. msm_memory_pool_free(inst, input_timer);
  2307. }
  2308. return 0;
  2309. }
  2310. int msm_vidc_flush_input_timer(struct msm_vidc_inst *inst)
  2311. {
  2312. struct msm_vidc_input_timer *input_timer, *dummy_timer;
  2313. if (!inst || !inst->capabilities) {
  2314. d_vpr_e("%s: invalid params\n", __func__);
  2315. return -EINVAL;
  2316. }
  2317. i_vpr_l(inst, "%s: flush input_timer list\n", __func__);
  2318. list_for_each_entry_safe(input_timer, dummy_timer, &inst->input_timer_list, list) {
  2319. list_del_init(&input_timer->list);
  2320. msm_memory_pool_free(inst, input_timer);
  2321. }
  2322. return 0;
  2323. }
  2324. int msm_vidc_get_input_rate(struct msm_vidc_inst *inst)
  2325. {
  2326. if (!inst || !inst->capabilities) {
  2327. d_vpr_e("%s: Invalid params\n", __func__);
  2328. return 0;
  2329. }
  2330. return inst->capabilities->cap[INPUT_RATE].value >> 16;
  2331. }
  2332. int msm_vidc_get_timestamp_rate(struct msm_vidc_inst *inst)
  2333. {
  2334. if (!inst || !inst->capabilities) {
  2335. d_vpr_e("%s: Invalid params\n", __func__);
  2336. return 0;
  2337. }
  2338. return inst->capabilities->cap[TIMESTAMP_RATE].value >> 16;
  2339. }
  2340. int msm_vidc_get_frame_rate(struct msm_vidc_inst *inst)
  2341. {
  2342. if (!inst || !inst->capabilities) {
  2343. d_vpr_e("%s: Invalid params\n", __func__);
  2344. return 0;
  2345. }
  2346. return inst->capabilities->cap[FRAME_RATE].value >> 16;
  2347. }
  2348. int msm_vidc_get_operating_rate(struct msm_vidc_inst *inst)
  2349. {
  2350. if (!inst || !inst->capabilities) {
  2351. d_vpr_e("%s: Invalid params\n", __func__);
  2352. return 0;
  2353. }
  2354. return inst->capabilities->cap[OPERATING_RATE].value >> 16;
  2355. }
  2356. static int msm_vidc_insert_sort(struct list_head *head,
  2357. struct msm_vidc_sort *entry)
  2358. {
  2359. struct msm_vidc_sort *first, *node;
  2360. struct msm_vidc_sort *prev = NULL;
  2361. bool is_inserted = false;
  2362. if (!head || !entry) {
  2363. d_vpr_e("%s: invalid params\n", __func__);
  2364. return -EINVAL;
  2365. }
  2366. if (list_empty(head)) {
  2367. list_add(&entry->list, head);
  2368. return 0;
  2369. }
  2370. first = list_first_entry(head, struct msm_vidc_sort, list);
  2371. if (entry->val < first->val) {
  2372. list_add(&entry->list, head);
  2373. return 0;
  2374. }
  2375. list_for_each_entry(node, head, list) {
  2376. if (prev &&
  2377. entry->val >= prev->val && entry->val <= node->val) {
  2378. list_add(&entry->list, &prev->list);
  2379. is_inserted = true;
  2380. break;
  2381. }
  2382. prev = node;
  2383. }
  2384. if (!is_inserted && prev)
  2385. list_add(&entry->list, &prev->list);
  2386. return 0;
  2387. }
  2388. static struct msm_vidc_timestamp *msm_vidc_get_least_rank_ts(struct msm_vidc_inst *inst)
  2389. {
  2390. struct msm_vidc_timestamp *ts, *final = NULL;
  2391. u64 least_rank = INT_MAX;
  2392. if (!inst) {
  2393. d_vpr_e("%s: Invalid params\n", __func__);
  2394. return NULL;
  2395. }
  2396. list_for_each_entry(ts, &inst->timestamps.list, sort.list) {
  2397. if (ts->rank < least_rank) {
  2398. least_rank = ts->rank;
  2399. final = ts;
  2400. }
  2401. }
  2402. return final;
  2403. }
  2404. int msm_vidc_flush_ts(struct msm_vidc_inst *inst)
  2405. {
  2406. struct msm_vidc_timestamp *temp, *ts = NULL;
  2407. if (!inst) {
  2408. d_vpr_e("%s: Invalid params\n", __func__);
  2409. return -EINVAL;
  2410. }
  2411. list_for_each_entry_safe(ts, temp, &inst->timestamps.list, sort.list) {
  2412. i_vpr_l(inst, "%s: flushing ts: val %llu, rank %llu\n",
  2413. __func__, ts->sort.val, ts->rank);
  2414. list_del(&ts->sort.list);
  2415. msm_memory_pool_free(inst, ts);
  2416. }
  2417. inst->timestamps.count = 0;
  2418. inst->timestamps.rank = 0;
  2419. return 0;
  2420. }
  2421. int msm_vidc_update_timestamp_rate(struct msm_vidc_inst *inst, u64 timestamp)
  2422. {
  2423. struct msm_vidc_timestamp *ts, *prev = NULL;
  2424. int rc = 0;
  2425. u32 window_size = 0;
  2426. u32 timestamp_rate = 0;
  2427. u64 ts_ms = 0;
  2428. u32 counter = 0;
  2429. if (!inst) {
  2430. d_vpr_e("%s: Invalid params\n", __func__);
  2431. return -EINVAL;
  2432. }
  2433. ts = msm_memory_pool_alloc(inst, MSM_MEM_POOL_TIMESTAMP);
  2434. if (!ts) {
  2435. i_vpr_e(inst, "%s: ts alloc failed\n", __func__);
  2436. return -ENOMEM;
  2437. }
  2438. INIT_LIST_HEAD(&ts->sort.list);
  2439. ts->sort.val = timestamp;
  2440. ts->rank = inst->timestamps.rank++;
  2441. rc = msm_vidc_insert_sort(&inst->timestamps.list, &ts->sort);
  2442. if (rc)
  2443. return rc;
  2444. inst->timestamps.count++;
  2445. if (is_encode_session(inst))
  2446. window_size = ENC_FPS_WINDOW;
  2447. else
  2448. window_size = DEC_FPS_WINDOW;
  2449. /* keep sliding window */
  2450. if (inst->timestamps.count > window_size) {
  2451. ts = msm_vidc_get_least_rank_ts(inst);
  2452. if (!ts) {
  2453. i_vpr_e(inst, "%s: least rank ts is NULL\n", __func__);
  2454. return -EINVAL;
  2455. }
  2456. inst->timestamps.count--;
  2457. list_del(&ts->sort.list);
  2458. msm_memory_pool_free(inst, ts);
  2459. }
  2460. /* Calculate timestamp rate */
  2461. list_for_each_entry(ts, &inst->timestamps.list, sort.list) {
  2462. if (prev) {
  2463. if (ts->sort.val == prev->sort.val)
  2464. continue;
  2465. ts_ms += div_u64(ts->sort.val - prev->sort.val, 1000000);
  2466. counter++;
  2467. }
  2468. prev = ts;
  2469. }
  2470. if (ts_ms)
  2471. timestamp_rate = (u32)div_u64((u64)counter * 1000, ts_ms);
  2472. msm_vidc_update_cap_value(inst, TIMESTAMP_RATE, timestamp_rate << 16, __func__);
  2473. return 0;
  2474. }
  2475. int msm_vidc_ts_reorder_insert_timestamp(struct msm_vidc_inst *inst, u64 timestamp)
  2476. {
  2477. struct msm_vidc_timestamp *ts;
  2478. int rc = 0;
  2479. if (!inst) {
  2480. d_vpr_e("%s: Invalid params\n", __func__);
  2481. return -EINVAL;
  2482. }
  2483. /* allocate ts from pool */
  2484. ts = msm_memory_pool_alloc(inst, MSM_MEM_POOL_TIMESTAMP);
  2485. if (!ts) {
  2486. i_vpr_e(inst, "%s: ts alloc failed\n", __func__);
  2487. return -ENOMEM;
  2488. }
  2489. /* initialize ts node */
  2490. INIT_LIST_HEAD(&ts->sort.list);
  2491. ts->sort.val = timestamp;
  2492. rc = msm_vidc_insert_sort(&inst->ts_reorder.list, &ts->sort);
  2493. if (rc)
  2494. return rc;
  2495. inst->ts_reorder.count++;
  2496. return 0;
  2497. }
  2498. int msm_vidc_ts_reorder_remove_timestamp(struct msm_vidc_inst *inst, u64 timestamp)
  2499. {
  2500. struct msm_vidc_timestamp *ts, *temp;
  2501. if (!inst) {
  2502. d_vpr_e("%s: Invalid params\n", __func__);
  2503. return -EINVAL;
  2504. }
  2505. /* remove matching node */
  2506. list_for_each_entry_safe(ts, temp, &inst->ts_reorder.list, sort.list) {
  2507. if (ts->sort.val == timestamp) {
  2508. list_del_init(&ts->sort.list);
  2509. inst->ts_reorder.count--;
  2510. msm_memory_pool_free(inst, ts);
  2511. break;
  2512. }
  2513. }
  2514. return 0;
  2515. }
  2516. int msm_vidc_ts_reorder_get_first_timestamp(struct msm_vidc_inst *inst, u64 *timestamp)
  2517. {
  2518. struct msm_vidc_timestamp *ts;
  2519. if (!inst || !timestamp) {
  2520. d_vpr_e("%s: Invalid params\n", __func__);
  2521. return -EINVAL;
  2522. }
  2523. /* check if list empty */
  2524. if (list_empty(&inst->ts_reorder.list)) {
  2525. i_vpr_e(inst, "%s: list empty. ts %lld\n", __func__, timestamp);
  2526. return -EINVAL;
  2527. }
  2528. /* get 1st node from reorder list */
  2529. ts = list_first_entry(&inst->ts_reorder.list,
  2530. struct msm_vidc_timestamp, sort.list);
  2531. list_del_init(&ts->sort.list);
  2532. /* copy timestamp */
  2533. *timestamp = ts->sort.val;
  2534. inst->ts_reorder.count--;
  2535. msm_memory_pool_free(inst, ts);
  2536. return 0;
  2537. }
  2538. int msm_vidc_ts_reorder_flush(struct msm_vidc_inst *inst)
  2539. {
  2540. struct msm_vidc_timestamp *temp, *ts = NULL;
  2541. if (!inst) {
  2542. d_vpr_e("%s: Invalid params\n", __func__);
  2543. return -EINVAL;
  2544. }
  2545. /* flush all entries */
  2546. list_for_each_entry_safe(ts, temp, &inst->ts_reorder.list, sort.list) {
  2547. i_vpr_l(inst, "%s: flushing ts: val %lld\n", __func__, ts->sort.val);
  2548. list_del(&ts->sort.list);
  2549. msm_memory_pool_free(inst, ts);
  2550. }
  2551. inst->ts_reorder.count = 0;
  2552. return 0;
  2553. }
  2554. int msm_vidc_get_delayed_unmap(struct msm_vidc_inst *inst, struct msm_vidc_map *map)
  2555. {
  2556. int rc = 0;
  2557. if (!inst || !map) {
  2558. d_vpr_e("%s: invalid params\n", __func__);
  2559. return -EINVAL;
  2560. }
  2561. rc = msm_vidc_memory_map(inst->core, map);
  2562. if (rc)
  2563. return rc;
  2564. map->skip_delayed_unmap = 1;
  2565. return 0;
  2566. }
  2567. int msm_vidc_put_delayed_unmap(struct msm_vidc_inst *inst, struct msm_vidc_map *map)
  2568. {
  2569. int rc = 0;
  2570. if (!inst || !map) {
  2571. d_vpr_e("%s: invalid params\n", __func__);
  2572. return -EINVAL;
  2573. }
  2574. if (!map->skip_delayed_unmap) {
  2575. i_vpr_e(inst, "%s: no delayed unmap, addr %#x\n",
  2576. __func__, map->device_addr);
  2577. return -EINVAL;
  2578. }
  2579. map->skip_delayed_unmap = 0;
  2580. rc = msm_vidc_memory_unmap(inst->core, map);
  2581. if (rc)
  2582. i_vpr_e(inst, "%s: unmap failed\n", __func__);
  2583. return rc;
  2584. }
  2585. int msm_vidc_unmap_buffers(struct msm_vidc_inst *inst,
  2586. enum msm_vidc_buffer_type type)
  2587. {
  2588. int rc = 0;
  2589. struct msm_vidc_mappings *mappings;
  2590. struct msm_vidc_map *map, *dummy;
  2591. if (!inst) {
  2592. d_vpr_e("%s: invalid params\n", __func__);
  2593. return -EINVAL;
  2594. }
  2595. mappings = msm_vidc_get_mappings(inst, type, __func__);
  2596. if (!mappings)
  2597. return -EINVAL;
  2598. list_for_each_entry_safe(map, dummy, &mappings->list, list) {
  2599. msm_vidc_memory_unmap_completely(inst, map);
  2600. }
  2601. return rc;
  2602. }
  2603. int msm_vidc_unmap_driver_buf(struct msm_vidc_inst *inst,
  2604. struct msm_vidc_buffer *buf)
  2605. {
  2606. int rc = 0;
  2607. struct msm_vidc_mappings *mappings;
  2608. struct msm_vidc_map *map = NULL;
  2609. bool found = false;
  2610. if (!inst || !buf) {
  2611. d_vpr_e("%s: invalid params\n", __func__);
  2612. return -EINVAL;
  2613. }
  2614. mappings = msm_vidc_get_mappings(inst, buf->type, __func__);
  2615. if (!mappings)
  2616. return -EINVAL;
  2617. /* sanity check to see if it was not removed */
  2618. list_for_each_entry(map, &mappings->list, list) {
  2619. if (map->dmabuf == buf->dmabuf) {
  2620. found = true;
  2621. break;
  2622. }
  2623. }
  2624. if (!found) {
  2625. print_vidc_buffer(VIDC_ERR, "err ", "no buf in mappings", inst, buf);
  2626. return -EINVAL;
  2627. }
  2628. rc = msm_vidc_memory_unmap(inst->core, map);
  2629. if (rc) {
  2630. print_vidc_buffer(VIDC_ERR, "err ", "unmap failed", inst, buf);
  2631. return -EINVAL;
  2632. }
  2633. /* finally delete if refcount is zero */
  2634. if (!map->refcount) {
  2635. msm_vidc_memory_put_dmabuf(inst, map->dmabuf);
  2636. list_del(&map->list);
  2637. msm_memory_pool_free(inst, map);
  2638. }
  2639. return rc;
  2640. }
  2641. int msm_vidc_map_driver_buf(struct msm_vidc_inst *inst,
  2642. struct msm_vidc_buffer *buf)
  2643. {
  2644. int rc = 0;
  2645. struct msm_vidc_mappings *mappings;
  2646. struct msm_vidc_map *map;
  2647. bool found = false;
  2648. if (!inst || !buf) {
  2649. d_vpr_e("%s: invalid params\n", __func__);
  2650. return -EINVAL;
  2651. }
  2652. mappings = msm_vidc_get_mappings(inst, buf->type, __func__);
  2653. if (!mappings)
  2654. return -EINVAL;
  2655. /*
  2656. * new buffer: map twice for delayed unmap feature sake
  2657. * existing buffer: map once
  2658. */
  2659. list_for_each_entry(map, &mappings->list, list) {
  2660. if (map->dmabuf == buf->dmabuf) {
  2661. found = true;
  2662. break;
  2663. }
  2664. }
  2665. if (!found) {
  2666. /* new buffer case */
  2667. map = msm_memory_pool_alloc(inst, MSM_MEM_POOL_MAP);
  2668. if (!map) {
  2669. i_vpr_e(inst, "%s: alloc failed\n", __func__);
  2670. return -ENOMEM;
  2671. }
  2672. INIT_LIST_HEAD(&map->list);
  2673. list_add_tail(&map->list, &mappings->list);
  2674. map->type = buf->type;
  2675. map->dmabuf = msm_vidc_memory_get_dmabuf(inst, buf->fd);
  2676. if (!map->dmabuf) {
  2677. rc = -EINVAL;
  2678. goto error;
  2679. }
  2680. map->region = msm_vidc_get_buffer_region(inst, buf->type, __func__);
  2681. /* delayed unmap feature needed for decoder output buffers */
  2682. if (is_decode_session(inst) && is_output_buffer(buf->type)) {
  2683. rc = msm_vidc_get_delayed_unmap(inst, map);
  2684. if (rc)
  2685. goto error;
  2686. }
  2687. }
  2688. rc = msm_vidc_memory_map(inst->core, map);
  2689. if (rc)
  2690. goto error;
  2691. buf->device_addr = map->device_addr;
  2692. return 0;
  2693. error:
  2694. if (!found) {
  2695. if (is_decode_session(inst) && is_output_buffer(buf->type))
  2696. msm_vidc_put_delayed_unmap(inst, map);
  2697. msm_vidc_memory_put_dmabuf(inst, map->dmabuf);
  2698. list_del_init(&map->list);
  2699. msm_memory_pool_free(inst, map);
  2700. }
  2701. return rc;
  2702. }
  2703. int msm_vidc_put_driver_buf(struct msm_vidc_inst *inst,
  2704. struct msm_vidc_buffer *buf)
  2705. {
  2706. int rc = 0;
  2707. if (!inst || !buf) {
  2708. d_vpr_e("%s: invalid params\n", __func__);
  2709. return -EINVAL;
  2710. }
  2711. msm_vidc_unmap_driver_buf(inst, buf);
  2712. msm_vidc_memory_put_dmabuf(inst, buf->dmabuf);
  2713. /* delete the buffer from buffers->list */
  2714. list_del(&buf->list);
  2715. msm_memory_pool_free(inst, buf);
  2716. return rc;
  2717. }
  2718. struct msm_vidc_buffer *msm_vidc_get_driver_buf(struct msm_vidc_inst *inst,
  2719. struct vb2_buffer *vb2)
  2720. {
  2721. int rc = 0;
  2722. struct msm_vidc_buffer *buf = NULL;
  2723. struct msm_vidc_buffers *buffers;
  2724. enum msm_vidc_buffer_type buf_type;
  2725. if (!inst || !vb2) {
  2726. d_vpr_e("%s: invalid params\n", __func__);
  2727. return NULL;
  2728. }
  2729. buf_type = v4l2_type_to_driver(vb2->type, __func__);
  2730. if (!buf_type)
  2731. return NULL;
  2732. buffers = msm_vidc_get_buffers(inst, buf_type, __func__);
  2733. if (!buffers)
  2734. return NULL;
  2735. buf = msm_memory_pool_alloc(inst, MSM_MEM_POOL_BUFFER);
  2736. if (!buf) {
  2737. i_vpr_e(inst, "%s: alloc failed\n", __func__);
  2738. return NULL;
  2739. }
  2740. INIT_LIST_HEAD(&buf->list);
  2741. list_add_tail(&buf->list, &buffers->list);
  2742. rc = vb2_buffer_to_driver(vb2, buf);
  2743. if (rc)
  2744. goto error;
  2745. buf->dmabuf = msm_vidc_memory_get_dmabuf(inst, buf->fd);
  2746. if (!buf->dmabuf)
  2747. goto error;
  2748. /* treat every buffer as deferred buffer initially */
  2749. buf->attr |= MSM_VIDC_ATTR_DEFERRED;
  2750. rc = msm_vidc_map_driver_buf(inst, buf);
  2751. if (rc)
  2752. goto error;
  2753. return buf;
  2754. error:
  2755. msm_vidc_memory_put_dmabuf(inst, buf->dmabuf);
  2756. list_del(&buf->list);
  2757. msm_memory_pool_free(inst, buf);
  2758. return NULL;
  2759. }
  2760. struct msm_vidc_buffer *get_meta_buffer(struct msm_vidc_inst *inst,
  2761. struct msm_vidc_buffer *buf)
  2762. {
  2763. struct msm_vidc_buffer *mbuf;
  2764. struct msm_vidc_buffers *buffers;
  2765. bool found = false;
  2766. if (!inst || !buf) {
  2767. d_vpr_e("%s: invalid params\n", __func__);
  2768. return NULL;
  2769. }
  2770. if (buf->type == MSM_VIDC_BUF_INPUT) {
  2771. buffers = &inst->buffers.input_meta;
  2772. } else if (buf->type == MSM_VIDC_BUF_OUTPUT) {
  2773. buffers = &inst->buffers.output_meta;
  2774. } else {
  2775. i_vpr_e(inst, "%s: invalid buffer type %d\n",
  2776. __func__, buf->type);
  2777. return NULL;
  2778. }
  2779. list_for_each_entry(mbuf, &buffers->list, list) {
  2780. if (mbuf->index == buf->index) {
  2781. found = true;
  2782. break;
  2783. }
  2784. }
  2785. if (!found)
  2786. return NULL;
  2787. return mbuf;
  2788. }
  2789. bool msm_vidc_is_super_buffer(struct msm_vidc_inst *inst)
  2790. {
  2791. struct msm_vidc_inst_capability *capability = NULL;
  2792. if (!inst || !inst->capabilities) {
  2793. d_vpr_e("%s: Invalid params\n", __func__);
  2794. return false;
  2795. }
  2796. capability = inst->capabilities;
  2797. return !!capability->cap[SUPER_FRAME].value;
  2798. }
  2799. static bool is_single_session(struct msm_vidc_inst *inst)
  2800. {
  2801. struct msm_vidc_core *core;
  2802. u32 count = 0;
  2803. if (!inst) {
  2804. d_vpr_e("%s: Invalid params\n", __func__);
  2805. return false;
  2806. }
  2807. core = inst->core;
  2808. core_lock(core, __func__);
  2809. list_for_each_entry(inst, &core->instances, list)
  2810. count++;
  2811. core_unlock(core, __func__);
  2812. return count == 1;
  2813. }
  2814. void msm_vidc_allow_dcvs(struct msm_vidc_inst *inst)
  2815. {
  2816. bool allow = false;
  2817. struct msm_vidc_core *core;
  2818. u32 fps;
  2819. if (!inst || !inst->core || !inst->capabilities) {
  2820. d_vpr_e("%s: Invalid args: %pK\n", __func__, inst);
  2821. return;
  2822. }
  2823. core = inst->core;
  2824. allow = !msm_vidc_clock_voting;
  2825. if (!allow) {
  2826. i_vpr_h(inst, "%s: core_clock_voting is set\n", __func__);
  2827. goto exit;
  2828. }
  2829. allow = core->capabilities[DCVS].value;
  2830. if (!allow) {
  2831. i_vpr_h(inst, "%s: core doesn't support dcvs\n", __func__);
  2832. goto exit;
  2833. }
  2834. allow = !inst->decode_batch.enable;
  2835. if (!allow) {
  2836. i_vpr_h(inst, "%s: decode_batching enabled\n", __func__);
  2837. goto exit;
  2838. }
  2839. allow = !msm_vidc_is_super_buffer(inst);
  2840. if (!allow) {
  2841. i_vpr_h(inst, "%s: encode_batching(super_buffer) enabled\n", __func__);
  2842. goto exit;
  2843. }
  2844. allow = !is_thumbnail_session(inst);
  2845. if (!allow) {
  2846. i_vpr_h(inst, "%s: thumbnail session\n", __func__);
  2847. goto exit;
  2848. }
  2849. allow = is_realtime_session(inst);
  2850. if (!allow) {
  2851. i_vpr_h(inst, "%s: non-realtime session\n", __func__);
  2852. goto exit;
  2853. }
  2854. allow = !is_critical_priority_session(inst);
  2855. if (!allow) {
  2856. i_vpr_h(inst, "%s: critical priority session\n", __func__);
  2857. goto exit;
  2858. }
  2859. allow = !is_image_session(inst);
  2860. if (!allow) {
  2861. i_vpr_h(inst, "%s: image session\n", __func__);
  2862. goto exit;
  2863. }
  2864. allow = !is_lowlatency_session(inst);
  2865. if (!allow) {
  2866. i_vpr_h(inst, "%s: lowlatency session\n", __func__);
  2867. goto exit;
  2868. }
  2869. fps = msm_vidc_get_fps(inst);
  2870. if (is_decode_session(inst) &&
  2871. fps >= inst->capabilities->cap[FRAME_RATE].max) {
  2872. allow = false;
  2873. i_vpr_h(inst, "%s: unsupported fps %d\n", __func__, fps);
  2874. goto exit;
  2875. }
  2876. exit:
  2877. i_vpr_hp(inst, "%s: dcvs: %s\n", __func__, allow ? "enabled" : "disabled");
  2878. inst->power.dcvs_flags = 0;
  2879. inst->power.dcvs_mode = allow;
  2880. }
  2881. bool msm_vidc_allow_decode_batch(struct msm_vidc_inst *inst)
  2882. {
  2883. struct msm_vidc_inst_capability *capability;
  2884. struct msm_vidc_core *core;
  2885. bool allow = false;
  2886. u32 value = 0;
  2887. if (!inst || !inst->core || !inst->capabilities) {
  2888. d_vpr_e("%s: invalid params\n", __func__);
  2889. return false;
  2890. }
  2891. core = inst->core;
  2892. capability = inst->capabilities;
  2893. allow = inst->decode_batch.enable;
  2894. if (!allow) {
  2895. i_vpr_h(inst, "%s: batching already disabled\n", __func__);
  2896. goto exit;
  2897. }
  2898. allow = core->capabilities[DECODE_BATCH].value;
  2899. if (!allow) {
  2900. i_vpr_h(inst, "%s: core doesn't support batching\n", __func__);
  2901. goto exit;
  2902. }
  2903. allow = is_single_session(inst);
  2904. if (!allow) {
  2905. i_vpr_h(inst, "%s: multiple sessions running\n", __func__);
  2906. goto exit;
  2907. }
  2908. allow = is_decode_session(inst);
  2909. if (!allow) {
  2910. i_vpr_h(inst, "%s: not a decoder session\n", __func__);
  2911. goto exit;
  2912. }
  2913. allow = !is_thumbnail_session(inst);
  2914. if (!allow) {
  2915. i_vpr_h(inst, "%s: thumbnail session\n", __func__);
  2916. goto exit;
  2917. }
  2918. allow = !is_image_session(inst);
  2919. if (!allow) {
  2920. i_vpr_h(inst, "%s: image session\n", __func__);
  2921. goto exit;
  2922. }
  2923. allow = is_realtime_session(inst);
  2924. if (!allow) {
  2925. i_vpr_h(inst, "%s: non-realtime session\n", __func__);
  2926. goto exit;
  2927. }
  2928. allow = !is_lowlatency_session(inst);
  2929. if (!allow) {
  2930. i_vpr_h(inst, "%s: lowlatency session\n", __func__);
  2931. goto exit;
  2932. }
  2933. value = msm_vidc_get_fps(inst);
  2934. allow = value < capability->cap[BATCH_FPS].value;
  2935. if (!allow) {
  2936. i_vpr_h(inst, "%s: unsupported fps %u, max %u\n", __func__,
  2937. value, capability->cap[BATCH_FPS].value);
  2938. goto exit;
  2939. }
  2940. value = msm_vidc_get_mbs_per_frame(inst);
  2941. allow = value < capability->cap[BATCH_MBPF].value;
  2942. if (!allow) {
  2943. i_vpr_h(inst, "%s: unsupported mbpf %u, max %u\n", __func__,
  2944. value, capability->cap[BATCH_MBPF].value);
  2945. goto exit;
  2946. }
  2947. exit:
  2948. i_vpr_hp(inst, "%s: batching: %s\n", __func__, allow ? "enabled" : "disabled");
  2949. return allow;
  2950. }
  2951. static void msm_vidc_update_input_cr(struct msm_vidc_inst *inst, u32 idx, u32 cr)
  2952. {
  2953. struct msm_vidc_input_cr_data *temp = NULL, *next = NULL;
  2954. bool found = false;
  2955. list_for_each_entry_safe(temp, next, &inst->enc_input_crs, list) {
  2956. if (temp->index == idx) {
  2957. temp->input_cr = cr;
  2958. found = true;
  2959. break;
  2960. }
  2961. }
  2962. if (!found) {
  2963. temp = NULL;
  2964. if (msm_vidc_vmem_alloc(sizeof(*temp), (void **)&temp, __func__))
  2965. return;
  2966. temp->index = idx;
  2967. temp->input_cr = cr;
  2968. list_add_tail(&temp->list, &inst->enc_input_crs);
  2969. }
  2970. }
  2971. static void msm_vidc_free_input_cr_list(struct msm_vidc_inst *inst)
  2972. {
  2973. struct msm_vidc_input_cr_data *temp, *next;
  2974. list_for_each_entry_safe(temp, next, &inst->enc_input_crs, list) {
  2975. list_del(&temp->list);
  2976. msm_vidc_vmem_free((void **)&temp);
  2977. }
  2978. INIT_LIST_HEAD(&inst->enc_input_crs);
  2979. }
  2980. void msm_vidc_update_stats(struct msm_vidc_inst *inst,
  2981. struct msm_vidc_buffer *buf, enum msm_vidc_debugfs_event etype)
  2982. {
  2983. if (!inst || !buf || !inst->capabilities) {
  2984. d_vpr_e("%s: invalid params\n", __func__);
  2985. return;
  2986. }
  2987. if ((is_decode_session(inst) && etype == MSM_VIDC_DEBUGFS_EVENT_ETB) ||
  2988. (is_encode_session(inst) && etype == MSM_VIDC_DEBUGFS_EVENT_FBD))
  2989. inst->stats.data_size += buf->data_size;
  2990. msm_vidc_debugfs_update(inst, etype);
  2991. }
  2992. void msm_vidc_print_stats(struct msm_vidc_inst *inst)
  2993. {
  2994. u32 frame_rate, operating_rate, achieved_fps, priority, etb, ebd, ftb, fbd, dt_ms;
  2995. u64 bitrate_kbps = 0, time_ms = ktime_get_ns() / 1000 / 1000;
  2996. if (!inst || !inst->capabilities) {
  2997. d_vpr_e("%s: invalid params\n", __func__);
  2998. return;
  2999. }
  3000. etb = inst->debug_count.etb - inst->stats.count.etb;
  3001. ebd = inst->debug_count.ebd - inst->stats.count.ebd;
  3002. ftb = inst->debug_count.ftb - inst->stats.count.ftb;
  3003. fbd = inst->debug_count.fbd - inst->stats.count.fbd;
  3004. frame_rate = inst->capabilities->cap[FRAME_RATE].value >> 16;
  3005. operating_rate = inst->capabilities->cap[OPERATING_RATE].value >> 16;
  3006. priority = inst->capabilities->cap[PRIORITY].value;
  3007. dt_ms = time_ms - inst->stats.time_ms;
  3008. achieved_fps = (fbd * 1000) / dt_ms;
  3009. bitrate_kbps = (inst->stats.data_size * 8 * 1000) / (dt_ms * 1024);
  3010. i_vpr_hp(inst,
  3011. "stats: counts (etb,ebd,ftb,fbd): %u %u %u %u (total %llu %llu %llu %llu), achieved bitrate %lldKbps fps %u/s, frame rate %u, operating rate %u, priority %u, dt %ums\n",
  3012. etb, ebd, ftb, fbd, inst->debug_count.etb, inst->debug_count.ebd,
  3013. inst->debug_count.ftb, inst->debug_count.fbd,
  3014. bitrate_kbps, achieved_fps, frame_rate, operating_rate, priority, dt_ms);
  3015. inst->stats.count = inst->debug_count;
  3016. inst->stats.data_size = 0;
  3017. inst->stats.time_ms = time_ms;
  3018. }
  3019. int schedule_stats_work(struct msm_vidc_inst *inst)
  3020. {
  3021. struct msm_vidc_core *core;
  3022. if (!inst || !inst->core) {
  3023. d_vpr_e("%s: invalid params\n", __func__);
  3024. return -EINVAL;
  3025. }
  3026. /**
  3027. * Hfi session is already closed and inst also going to be
  3028. * closed soon. So skip scheduling new stats_work to avoid
  3029. * use-after-free issues with close sequence.
  3030. */
  3031. if (!inst->packet) {
  3032. i_vpr_e(inst, "skip scheduling stats_work\n");
  3033. return 0;
  3034. }
  3035. core = inst->core;
  3036. mod_delayed_work(inst->workq, &inst->stats_work,
  3037. msecs_to_jiffies(core->capabilities[STATS_TIMEOUT_MS].value));
  3038. return 0;
  3039. }
  3040. int cancel_stats_work_sync(struct msm_vidc_inst *inst)
  3041. {
  3042. if (!inst) {
  3043. d_vpr_e("%s: Invalid arguments\n", __func__);
  3044. return -EINVAL;
  3045. }
  3046. cancel_delayed_work_sync(&inst->stats_work);
  3047. return 0;
  3048. }
  3049. void msm_vidc_stats_handler(struct work_struct *work)
  3050. {
  3051. struct msm_vidc_inst *inst;
  3052. inst = container_of(work, struct msm_vidc_inst, stats_work.work);
  3053. inst = get_inst_ref(g_core, inst);
  3054. if (!inst || !inst->packet) {
  3055. d_vpr_e("%s: invalid params\n", __func__);
  3056. return;
  3057. }
  3058. inst_lock(inst, __func__);
  3059. msm_vidc_print_stats(inst);
  3060. schedule_stats_work(inst);
  3061. inst_unlock(inst, __func__);
  3062. put_inst(inst);
  3063. }
  3064. static int msm_vidc_queue_buffer(struct msm_vidc_inst *inst, struct msm_vidc_buffer *buf)
  3065. {
  3066. struct msm_vidc_buffer *meta;
  3067. enum msm_vidc_debugfs_event etype;
  3068. int rc = 0;
  3069. u32 cr = 0;
  3070. if (!inst || !buf || !inst->capabilities) {
  3071. d_vpr_e("%s: invalid params\n", __func__);
  3072. return -EINVAL;
  3073. }
  3074. if (is_encode_session(inst) && is_input_buffer(buf->type)) {
  3075. cr = inst->capabilities->cap[ENC_IP_CR].value;
  3076. msm_vidc_update_input_cr(inst, buf->index, cr);
  3077. msm_vidc_update_cap_value(inst, ENC_IP_CR, 0, __func__);
  3078. }
  3079. if (is_decode_session(inst) && is_input_buffer(buf->type) &&
  3080. inst->capabilities->cap[CODEC_CONFIG].value) {
  3081. buf->flags |= MSM_VIDC_BUF_FLAG_CODECCONFIG;
  3082. msm_vidc_update_cap_value(inst, CODEC_CONFIG, 0, __func__);
  3083. }
  3084. if (is_decode_session(inst) && is_output_buffer(buf->type)) {
  3085. rc = msm_vidc_process_readonly_buffers(inst, buf);
  3086. if (rc)
  3087. return rc;
  3088. }
  3089. print_vidc_buffer(VIDC_HIGH, "high", "qbuf", inst, buf);
  3090. meta = get_meta_buffer(inst, buf);
  3091. if (meta)
  3092. print_vidc_buffer(VIDC_LOW, "low ", "qbuf", inst, meta);
  3093. if (!meta && is_meta_enabled(inst, buf->type)) {
  3094. print_vidc_buffer(VIDC_ERR, "err ", "missing meta for", inst, buf);
  3095. return -EINVAL;
  3096. }
  3097. if (msm_vidc_is_super_buffer(inst) && is_input_buffer(buf->type))
  3098. rc = venus_hfi_queue_super_buffer(inst, buf, meta);
  3099. else
  3100. rc = venus_hfi_queue_buffer(inst, buf, meta);
  3101. if (rc)
  3102. return rc;
  3103. buf->attr &= ~MSM_VIDC_ATTR_DEFERRED;
  3104. buf->attr |= MSM_VIDC_ATTR_QUEUED;
  3105. if (meta) {
  3106. meta->attr &= ~MSM_VIDC_ATTR_DEFERRED;
  3107. meta->attr |= MSM_VIDC_ATTR_QUEUED;
  3108. }
  3109. /* insert timestamp for ts_reorder enable case */
  3110. if (is_ts_reorder_allowed(inst) && is_input_buffer(buf->type)) {
  3111. rc = msm_vidc_ts_reorder_insert_timestamp(inst, buf->timestamp);
  3112. if (rc)
  3113. i_vpr_e(inst, "%s: insert timestamp failed\n", __func__);
  3114. }
  3115. if (is_input_buffer(buf->type))
  3116. inst->power.buffer_counter++;
  3117. if (is_input_buffer(buf->type))
  3118. etype = MSM_VIDC_DEBUGFS_EVENT_ETB;
  3119. else
  3120. etype = MSM_VIDC_DEBUGFS_EVENT_FTB;
  3121. msm_vidc_update_stats(inst, buf, etype);
  3122. return 0;
  3123. }
  3124. int msm_vidc_alloc_and_queue_input_internal_buffers(struct msm_vidc_inst *inst)
  3125. {
  3126. int rc = 0;
  3127. if (!inst) {
  3128. d_vpr_e("%s: invalid params\n", __func__);
  3129. return -EINVAL;
  3130. }
  3131. rc = msm_vdec_get_input_internal_buffers(inst);
  3132. if (rc)
  3133. return rc;
  3134. rc = msm_vdec_release_input_internal_buffers(inst);
  3135. if (rc)
  3136. return rc;
  3137. rc = msm_vdec_create_input_internal_buffers(inst);
  3138. if (rc)
  3139. return rc;
  3140. rc = msm_vdec_queue_input_internal_buffers(inst);
  3141. if (rc)
  3142. return rc;
  3143. return rc;
  3144. }
  3145. int msm_vidc_queue_deferred_buffers(struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buf_type)
  3146. {
  3147. struct msm_vidc_buffers *buffers;
  3148. struct msm_vidc_buffer *buf;
  3149. int rc = 0;
  3150. if (!inst || !buf_type) {
  3151. d_vpr_e("%s: invalid params\n", __func__);
  3152. return -EINVAL;
  3153. }
  3154. buffers = msm_vidc_get_buffers(inst, buf_type, __func__);
  3155. if (!buffers)
  3156. return -EINVAL;
  3157. msm_vidc_scale_power(inst, true);
  3158. list_for_each_entry(buf, &buffers->list, list) {
  3159. if (!(buf->attr & MSM_VIDC_ATTR_DEFERRED))
  3160. continue;
  3161. rc = msm_vidc_queue_buffer(inst, buf);
  3162. if (rc)
  3163. return rc;
  3164. }
  3165. return 0;
  3166. }
  3167. int msm_vidc_queue_buffer_single(struct msm_vidc_inst *inst, struct vb2_buffer *vb2)
  3168. {
  3169. int rc = 0;
  3170. struct msm_vidc_buffer *buf;
  3171. struct msm_vidc_fence *fence = NULL;
  3172. enum msm_vidc_allow allow;
  3173. if (!inst || !vb2 || !inst->capabilities) {
  3174. d_vpr_e("%s: invalid params\n", __func__);
  3175. return -EINVAL;
  3176. }
  3177. buf = msm_vidc_get_driver_buf(inst, vb2);
  3178. if (!buf)
  3179. return -EINVAL;
  3180. if (is_meta_rx_inp_enabled(inst, META_OUTBUF_FENCE) &&
  3181. is_output_buffer(buf->type)) {
  3182. fence = msm_vidc_fence_create(inst);
  3183. if (!fence)
  3184. return rc;
  3185. buf->fence_id = fence->dma_fence.seqno;
  3186. }
  3187. allow = msm_vidc_allow_qbuf(inst, vb2->type);
  3188. if (allow == MSM_VIDC_DISALLOW) {
  3189. i_vpr_e(inst, "%s: qbuf not allowed\n", __func__);
  3190. rc = -EINVAL;
  3191. goto exit;
  3192. } else if (allow == MSM_VIDC_DEFER) {
  3193. print_vidc_buffer(VIDC_LOW, "low ", "qbuf deferred", inst, buf);
  3194. rc = 0;
  3195. goto exit;
  3196. }
  3197. msm_vidc_scale_power(inst, is_input_buffer(buf->type));
  3198. rc = msm_vidc_queue_buffer(inst, buf);
  3199. if (rc)
  3200. goto exit;
  3201. exit:
  3202. if (rc) {
  3203. i_vpr_e(inst, "%s: qbuf failed\n", __func__);
  3204. if (fence)
  3205. msm_vidc_fence_destroy(inst, (u32)fence->dma_fence.seqno);
  3206. }
  3207. return rc;
  3208. }
  3209. int msm_vidc_destroy_internal_buffer(struct msm_vidc_inst *inst,
  3210. struct msm_vidc_buffer *buffer)
  3211. {
  3212. struct msm_vidc_buffers *buffers;
  3213. struct msm_vidc_allocations *allocations;
  3214. struct msm_vidc_mappings *mappings;
  3215. struct msm_vidc_alloc *alloc, *alloc_dummy;
  3216. struct msm_vidc_map *map, *map_dummy;
  3217. struct msm_vidc_buffer *buf, *dummy;
  3218. if (!inst || !inst->core) {
  3219. d_vpr_e("%s: invalid params\n", __func__);
  3220. return -EINVAL;
  3221. }
  3222. if (!is_internal_buffer(buffer->type)) {
  3223. i_vpr_e(inst, "%s: type: %s is not internal\n",
  3224. __func__, buf_name(buffer->type));
  3225. return 0;
  3226. }
  3227. i_vpr_h(inst, "%s: destroy: type: %8s, size: %9u, device_addr %#x\n", __func__,
  3228. buf_name(buffer->type), buffer->buffer_size, buffer->device_addr);
  3229. buffers = msm_vidc_get_buffers(inst, buffer->type, __func__);
  3230. if (!buffers)
  3231. return -EINVAL;
  3232. allocations = msm_vidc_get_allocations(inst, buffer->type, __func__);
  3233. if (!allocations)
  3234. return -EINVAL;
  3235. mappings = msm_vidc_get_mappings(inst, buffer->type, __func__);
  3236. if (!mappings)
  3237. return -EINVAL;
  3238. list_for_each_entry_safe(map, map_dummy, &mappings->list, list) {
  3239. if (map->dmabuf == buffer->dmabuf) {
  3240. msm_vidc_memory_unmap(inst->core, map);
  3241. list_del(&map->list);
  3242. msm_memory_pool_free(inst, map);
  3243. break;
  3244. }
  3245. }
  3246. list_for_each_entry_safe(alloc, alloc_dummy, &allocations->list, list) {
  3247. if (alloc->dmabuf == buffer->dmabuf) {
  3248. msm_vidc_memory_free(inst->core, alloc);
  3249. list_del(&alloc->list);
  3250. msm_memory_pool_free(inst, alloc);
  3251. break;
  3252. }
  3253. }
  3254. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  3255. if (buf->dmabuf == buffer->dmabuf) {
  3256. list_del(&buf->list);
  3257. msm_memory_pool_free(inst, buf);
  3258. break;
  3259. }
  3260. }
  3261. buffers->size = 0;
  3262. buffers->min_count = buffers->extra_count = buffers->actual_count = 0;
  3263. return 0;
  3264. }
  3265. int msm_vidc_get_internal_buffers(struct msm_vidc_inst *inst,
  3266. enum msm_vidc_buffer_type buffer_type)
  3267. {
  3268. u32 buf_size;
  3269. u32 buf_count;
  3270. struct msm_vidc_core *core;
  3271. struct msm_vidc_buffers *buffers;
  3272. if (!inst || !inst->core) {
  3273. d_vpr_e("%s: invalid params\n", __func__);
  3274. return -EINVAL;
  3275. }
  3276. core = inst->core;
  3277. buf_size = call_session_op(core, buffer_size,
  3278. inst, buffer_type);
  3279. buf_count = call_session_op(core, min_count,
  3280. inst, buffer_type);
  3281. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  3282. if (!buffers)
  3283. return -EINVAL;
  3284. if (buf_size <= buffers->size &&
  3285. buf_count <= buffers->min_count) {
  3286. buffers->reuse = true;
  3287. } else {
  3288. buffers->reuse = false;
  3289. buffers->size = buf_size;
  3290. buffers->min_count = buf_count;
  3291. }
  3292. return 0;
  3293. }
  3294. int msm_vidc_create_internal_buffer(struct msm_vidc_inst *inst,
  3295. enum msm_vidc_buffer_type buffer_type, u32 index)
  3296. {
  3297. int rc = 0;
  3298. struct msm_vidc_buffers *buffers;
  3299. struct msm_vidc_allocations *allocations;
  3300. struct msm_vidc_mappings *mappings;
  3301. struct msm_vidc_buffer *buffer;
  3302. struct msm_vidc_alloc *alloc;
  3303. struct msm_vidc_map *map;
  3304. if (!inst || !inst->core) {
  3305. d_vpr_e("%s: invalid params\n", __func__);
  3306. return -EINVAL;
  3307. }
  3308. if (!is_internal_buffer(buffer_type)) {
  3309. i_vpr_e(inst, "%s: type %s is not internal\n",
  3310. __func__, buf_name(buffer_type));
  3311. return 0;
  3312. }
  3313. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  3314. if (!buffers)
  3315. return -EINVAL;
  3316. allocations = msm_vidc_get_allocations(inst, buffer_type, __func__);
  3317. if (!allocations)
  3318. return -EINVAL;
  3319. mappings = msm_vidc_get_mappings(inst, buffer_type, __func__);
  3320. if (!mappings)
  3321. return -EINVAL;
  3322. if (!buffers->size)
  3323. return 0;
  3324. buffer = msm_memory_pool_alloc(inst, MSM_MEM_POOL_BUFFER);
  3325. if (!buffer) {
  3326. i_vpr_e(inst, "%s: buf alloc failed\n", __func__);
  3327. return -ENOMEM;
  3328. }
  3329. INIT_LIST_HEAD(&buffer->list);
  3330. buffer->type = buffer_type;
  3331. buffer->index = index;
  3332. buffer->buffer_size = buffers->size;
  3333. list_add_tail(&buffer->list, &buffers->list);
  3334. alloc = msm_memory_pool_alloc(inst, MSM_MEM_POOL_ALLOC);
  3335. if (!alloc) {
  3336. i_vpr_e(inst, "%s: alloc failed\n", __func__);
  3337. return -ENOMEM;
  3338. }
  3339. INIT_LIST_HEAD(&alloc->list);
  3340. alloc->type = buffer_type;
  3341. alloc->region = msm_vidc_get_buffer_region(inst,
  3342. buffer_type, __func__);
  3343. alloc->size = buffer->buffer_size;
  3344. alloc->secure = is_secure_region(alloc->region);
  3345. rc = msm_vidc_memory_alloc(inst->core, alloc);
  3346. if (rc)
  3347. return -ENOMEM;
  3348. list_add_tail(&alloc->list, &allocations->list);
  3349. map = msm_memory_pool_alloc(inst, MSM_MEM_POOL_MAP);
  3350. if (!map) {
  3351. i_vpr_e(inst, "%s: map alloc failed\n", __func__);
  3352. return -ENOMEM;
  3353. }
  3354. INIT_LIST_HEAD(&map->list);
  3355. map->type = alloc->type;
  3356. map->region = alloc->region;
  3357. map->dmabuf = alloc->dmabuf;
  3358. rc = msm_vidc_memory_map(inst->core, map);
  3359. if (rc)
  3360. return -ENOMEM;
  3361. list_add_tail(&map->list, &mappings->list);
  3362. buffer->dmabuf = alloc->dmabuf;
  3363. buffer->device_addr = map->device_addr;
  3364. i_vpr_h(inst, "%s: create: type: %8s, size: %9u, device_addr %#x\n", __func__,
  3365. buf_name(buffer_type), buffers->size, buffer->device_addr);
  3366. return 0;
  3367. }
  3368. int msm_vidc_create_internal_buffers(struct msm_vidc_inst *inst,
  3369. enum msm_vidc_buffer_type buffer_type)
  3370. {
  3371. int rc = 0;
  3372. struct msm_vidc_buffers *buffers;
  3373. int i;
  3374. if (!inst || !inst->core) {
  3375. d_vpr_e("%s: invalid params\n", __func__);
  3376. return -EINVAL;
  3377. }
  3378. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  3379. if (!buffers)
  3380. return -EINVAL;
  3381. if (buffers->reuse) {
  3382. i_vpr_l(inst, "%s: reuse enabled for %s\n", __func__, buf_name(buffer_type));
  3383. return 0;
  3384. }
  3385. for (i = 0; i < buffers->min_count; i++) {
  3386. rc = msm_vidc_create_internal_buffer(inst, buffer_type, i);
  3387. if (rc)
  3388. return rc;
  3389. }
  3390. return rc;
  3391. }
  3392. int msm_vidc_queue_internal_buffers(struct msm_vidc_inst *inst,
  3393. enum msm_vidc_buffer_type buffer_type)
  3394. {
  3395. int rc = 0;
  3396. struct msm_vidc_buffers *buffers;
  3397. struct msm_vidc_buffer *buffer, *dummy;
  3398. if (!inst || !inst->core) {
  3399. d_vpr_e("%s: invalid params\n", __func__);
  3400. return -EINVAL;
  3401. }
  3402. if (!is_internal_buffer(buffer_type)) {
  3403. i_vpr_e(inst, "%s: %s is not internal\n", __func__, buf_name(buffer_type));
  3404. return 0;
  3405. }
  3406. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  3407. if (!buffers)
  3408. return -EINVAL;
  3409. if (buffers->reuse) {
  3410. i_vpr_l(inst, "%s: reuse enabled for %s buf\n",
  3411. __func__, buf_name(buffer_type));
  3412. return 0;
  3413. }
  3414. if (is_decode_session(inst) && buffer_type == MSM_VIDC_BUF_COMV) {
  3415. rc = msm_vdec_set_num_comv(inst);
  3416. if (rc)
  3417. return rc;
  3418. }
  3419. list_for_each_entry_safe(buffer, dummy, &buffers->list, list) {
  3420. /* do not queue pending release buffers */
  3421. if (buffer->flags & MSM_VIDC_ATTR_PENDING_RELEASE)
  3422. continue;
  3423. /* do not queue already queued buffers */
  3424. if (buffer->attr & MSM_VIDC_ATTR_QUEUED)
  3425. continue;
  3426. rc = venus_hfi_queue_buffer(inst, buffer, NULL);
  3427. if (rc)
  3428. return rc;
  3429. /* mark queued */
  3430. buffer->attr |= MSM_VIDC_ATTR_QUEUED;
  3431. i_vpr_h(inst, "%s: queue: type: %8s, size: %9u, device_addr %#x\n", __func__,
  3432. buf_name(buffer->type), buffer->buffer_size, buffer->device_addr);
  3433. }
  3434. return 0;
  3435. }
  3436. int msm_vidc_alloc_and_queue_session_internal_buffers(struct msm_vidc_inst *inst,
  3437. enum msm_vidc_buffer_type buffer_type)
  3438. {
  3439. int rc = 0;
  3440. if (!inst || !inst->core) {
  3441. d_vpr_e("%s: invalid params\n", __func__);
  3442. return -EINVAL;
  3443. }
  3444. if (buffer_type != MSM_VIDC_BUF_ARP &&
  3445. buffer_type != MSM_VIDC_BUF_PERSIST) {
  3446. i_vpr_e(inst, "%s: invalid buffer type: %s\n",
  3447. __func__, buf_name(buffer_type));
  3448. rc = -EINVAL;
  3449. goto exit;
  3450. }
  3451. rc = msm_vidc_get_internal_buffers(inst, buffer_type);
  3452. if (rc)
  3453. goto exit;
  3454. rc = msm_vidc_create_internal_buffers(inst, buffer_type);
  3455. if (rc)
  3456. goto exit;
  3457. rc = msm_vidc_queue_internal_buffers(inst, buffer_type);
  3458. if (rc)
  3459. goto exit;
  3460. exit:
  3461. return rc;
  3462. }
  3463. int msm_vidc_release_internal_buffers(struct msm_vidc_inst *inst,
  3464. enum msm_vidc_buffer_type buffer_type)
  3465. {
  3466. int rc = 0;
  3467. struct msm_vidc_buffers *buffers;
  3468. struct msm_vidc_buffer *buffer, *dummy;
  3469. if (!inst || !inst->core) {
  3470. d_vpr_e("%s: invalid params\n", __func__);
  3471. return -EINVAL;
  3472. }
  3473. if (!is_internal_buffer(buffer_type)) {
  3474. i_vpr_e(inst, "%s: %s is not internal\n",
  3475. __func__, buf_name(buffer_type));
  3476. return 0;
  3477. }
  3478. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  3479. if (!buffers)
  3480. return -EINVAL;
  3481. if (buffers->reuse) {
  3482. i_vpr_l(inst, "%s: reuse enabled for %s buf\n",
  3483. __func__, buf_name(buffer_type));
  3484. return 0;
  3485. }
  3486. list_for_each_entry_safe(buffer, dummy, &buffers->list, list) {
  3487. /* do not release already pending release buffers */
  3488. if (buffer->attr & MSM_VIDC_ATTR_PENDING_RELEASE)
  3489. continue;
  3490. /* release only queued buffers */
  3491. if (!(buffer->attr & MSM_VIDC_ATTR_QUEUED))
  3492. continue;
  3493. rc = venus_hfi_release_buffer(inst, buffer);
  3494. if (rc)
  3495. return rc;
  3496. /* mark pending release */
  3497. buffer->attr |= MSM_VIDC_ATTR_PENDING_RELEASE;
  3498. i_vpr_h(inst, "%s: release: type: %8s, size: %9u, device_addr %#x\n", __func__,
  3499. buf_name(buffer->type), buffer->buffer_size, buffer->device_addr);
  3500. }
  3501. return 0;
  3502. }
  3503. static int msm_vidc_vb2_buffer_done(struct msm_vidc_inst *inst,
  3504. struct msm_vidc_buffer *buf)
  3505. {
  3506. int type, port, state;
  3507. struct vb2_queue *q;
  3508. struct vb2_buffer *vb2;
  3509. struct vb2_v4l2_buffer *vbuf;
  3510. bool found;
  3511. if (!inst || !inst->capabilities || !buf) {
  3512. d_vpr_e("%s: invalid params\n", __func__);
  3513. return -EINVAL;
  3514. }
  3515. type = v4l2_type_from_driver(buf->type, __func__);
  3516. if (!type)
  3517. return -EINVAL;
  3518. port = v4l2_type_to_driver_port(inst, type, __func__);
  3519. if (port < 0)
  3520. return -EINVAL;
  3521. /*
  3522. * vb2_buffer_done not required if input metadata
  3523. * buffer sent via request api
  3524. */
  3525. if (buf->type == MSM_VIDC_BUF_INPUT_META &&
  3526. inst->capabilities->cap[INPUT_META_VIA_REQUEST].value)
  3527. return 0;
  3528. q = inst->bufq[port].vb2q;
  3529. if (!q->streaming) {
  3530. i_vpr_e(inst, "%s: port %d is not streaming\n",
  3531. __func__, port);
  3532. return -EINVAL;
  3533. }
  3534. found = false;
  3535. list_for_each_entry(vb2, &q->queued_list, queued_entry) {
  3536. if (vb2->state != VB2_BUF_STATE_ACTIVE)
  3537. continue;
  3538. if (vb2->index == buf->index) {
  3539. found = true;
  3540. break;
  3541. }
  3542. }
  3543. if (!found) {
  3544. print_vidc_buffer(VIDC_ERR, "err ", "vb2 not found for", inst, buf);
  3545. return -EINVAL;
  3546. }
  3547. /**
  3548. * v4l2 clears buffer state related flags. For driver errors
  3549. * send state as error to avoid skipping V4L2_BUF_FLAG_ERROR
  3550. * flag at v4l2 side.
  3551. */
  3552. if (buf->flags & MSM_VIDC_BUF_FLAG_ERROR)
  3553. state = VB2_BUF_STATE_ERROR;
  3554. else
  3555. state = VB2_BUF_STATE_DONE;
  3556. vbuf = to_vb2_v4l2_buffer(vb2);
  3557. vbuf->flags = buf->flags;
  3558. vb2->timestamp = buf->timestamp;
  3559. vb2->planes[0].bytesused = buf->data_size + vb2->planes[0].data_offset;
  3560. vb2_buffer_done(vb2, state);
  3561. return 0;
  3562. }
  3563. static int msm_vidc_v4l2_buffer_event(struct msm_vidc_inst *inst,
  3564. struct msm_vidc_buffer *buf)
  3565. {
  3566. int rc = 0;
  3567. struct v4l2_event event = {0};
  3568. struct v4l2_event_vidc_metadata *event_data = NULL;
  3569. if (!inst || !buf) {
  3570. d_vpr_e("%s: invalid params\n", __func__);
  3571. return -EINVAL;
  3572. }
  3573. if (buf->type != MSM_VIDC_BUF_INPUT_META) {
  3574. i_vpr_e(inst, "%s: unsupported buffer type %s\n",
  3575. __func__, buf_name(buf->type));
  3576. return -EINVAL;
  3577. }
  3578. event.type = V4L2_EVENT_VIDC_METADATA;
  3579. event_data = (struct v4l2_event_vidc_metadata *)event.u.data;
  3580. event_data->type = INPUT_META_PLANE;
  3581. event_data->fd = buf->fd;
  3582. event_data->index = buf->index;
  3583. event_data->bytesused = buf->data_size;
  3584. event_data->offset = buf->data_offset;
  3585. v4l2_event_queue_fh(&inst->event_handler, &event);
  3586. return rc;
  3587. }
  3588. int msm_vidc_buffer_done(struct msm_vidc_inst *inst,
  3589. struct msm_vidc_buffer *buf)
  3590. {
  3591. if (!inst || !inst->capabilities || !buf) {
  3592. d_vpr_e("%s: invalid params\n", __func__);
  3593. return -EINVAL;
  3594. }
  3595. if (buf->type == MSM_VIDC_BUF_INPUT_META &&
  3596. inst->capabilities->cap[INPUT_META_VIA_REQUEST].value) {
  3597. if (is_meta_rx_inp_enabled(inst, META_OUTBUF_FENCE))
  3598. return msm_vidc_v4l2_buffer_event(inst, buf);
  3599. } else {
  3600. return msm_vidc_vb2_buffer_done(inst, buf);
  3601. }
  3602. return 0;
  3603. }
  3604. int msm_vidc_event_queue_init(struct msm_vidc_inst *inst)
  3605. {
  3606. int rc = 0;
  3607. int index;
  3608. struct msm_vidc_core *core;
  3609. if (!inst || !inst->core) {
  3610. d_vpr_e("%s: invalid params\n", __func__);
  3611. return -EINVAL;
  3612. }
  3613. core = inst->core;
  3614. if (is_decode_session(inst))
  3615. index = 0;
  3616. else if (is_encode_session(inst))
  3617. index = 1;
  3618. else
  3619. return -EINVAL;
  3620. v4l2_fh_init(&inst->event_handler, &core->vdev[index].vdev);
  3621. inst->event_handler.ctrl_handler = &inst->ctrl_handler;
  3622. v4l2_fh_add(&inst->event_handler);
  3623. return rc;
  3624. }
  3625. int msm_vidc_event_queue_deinit(struct msm_vidc_inst *inst)
  3626. {
  3627. int rc = 0;
  3628. if (!inst) {
  3629. d_vpr_e("%s: invalid params\n", __func__);
  3630. return -EINVAL;
  3631. }
  3632. /* do not deinit, if not already inited */
  3633. if (!inst->event_handler.vdev) {
  3634. i_vpr_e(inst, "%s: already not inited\n", __func__);
  3635. return 0;
  3636. }
  3637. v4l2_fh_del(&inst->event_handler);
  3638. v4l2_fh_exit(&inst->event_handler);
  3639. return rc;
  3640. }
  3641. static int vb2q_init(struct msm_vidc_inst *inst,
  3642. struct vb2_queue *q, enum v4l2_buf_type type)
  3643. {
  3644. int rc = 0;
  3645. struct msm_vidc_core *core;
  3646. if (!inst || !q || !inst->core) {
  3647. d_vpr_e("%s: invalid params\n", __func__);
  3648. return -EINVAL;
  3649. }
  3650. core = inst->core;
  3651. q->type = type;
  3652. q->io_modes = VB2_MMAP | VB2_DMABUF;
  3653. q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
  3654. q->ops = core->vb2_ops;
  3655. q->mem_ops = core->vb2_mem_ops;
  3656. q->drv_priv = inst;
  3657. q->allow_zero_bytesused = 1;
  3658. q->copy_timestamp = 1;
  3659. rc = vb2_queue_init(q);
  3660. if (rc)
  3661. i_vpr_e(inst, "%s: vb2_queue_init failed for type %d\n",
  3662. __func__, type);
  3663. return rc;
  3664. }
  3665. static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
  3666. struct vb2_queue *dst_vq)
  3667. {
  3668. int rc = 0;
  3669. struct msm_vidc_inst *inst = priv;
  3670. struct msm_vidc_core *core;
  3671. if (!inst || !inst->core || !src_vq || !dst_vq) {
  3672. d_vpr_e("%s: invalid params\n", __func__);
  3673. return -EINVAL;
  3674. }
  3675. core = inst->core;
  3676. src_vq->supports_requests = 1;
  3677. src_vq->lock = &inst->request_lock;
  3678. src_vq->dev = &core->pdev->dev;
  3679. rc = vb2q_init(inst, src_vq, INPUT_MPLANE);
  3680. if (rc)
  3681. goto fail_input_vb2q_init;
  3682. inst->bufq[INPUT_PORT].vb2q = src_vq;
  3683. dst_vq->lock = src_vq->lock;
  3684. dst_vq->dev = &core->pdev->dev;
  3685. rc = vb2q_init(inst, dst_vq, OUTPUT_MPLANE);
  3686. if (rc)
  3687. goto fail_out_vb2q_init;
  3688. inst->bufq[OUTPUT_PORT].vb2q = dst_vq;
  3689. return rc;
  3690. fail_out_vb2q_init:
  3691. vb2_queue_release(inst->bufq[INPUT_PORT].vb2q);
  3692. fail_input_vb2q_init:
  3693. return rc;
  3694. }
  3695. int msm_vidc_vb2_queue_init(struct msm_vidc_inst *inst)
  3696. {
  3697. int rc = 0;
  3698. struct msm_vidc_core *core;
  3699. if (!inst || !inst->core) {
  3700. d_vpr_e("%s: invalid params\n", __func__);
  3701. return -EINVAL;
  3702. }
  3703. core = inst->core;
  3704. if (inst->vb2q_init) {
  3705. i_vpr_h(inst, "%s: vb2q already inited\n", __func__);
  3706. return 0;
  3707. }
  3708. inst->m2m_dev = v4l2_m2m_init(core->v4l2_m2m_ops);
  3709. if (IS_ERR(inst->m2m_dev)) {
  3710. i_vpr_e(inst, "%s: failed to initialize v4l2 m2m device\n", __func__);
  3711. rc = PTR_ERR(inst->m2m_dev);
  3712. goto fail_m2m_init;
  3713. }
  3714. /* v4l2_m2m_ctx_init will do input & output queues initialization */
  3715. inst->m2m_ctx = v4l2_m2m_ctx_init(inst->m2m_dev, inst, m2m_queue_init);
  3716. if (!inst->m2m_ctx) {
  3717. i_vpr_e(inst, "%s: v4l2_m2m_ctx_init failed\n", __func__);
  3718. goto fail_m2m_ctx_init;
  3719. }
  3720. inst->event_handler.m2m_ctx = inst->m2m_ctx;
  3721. rc = msm_vidc_vmem_alloc(sizeof(struct vb2_queue),
  3722. (void **)&inst->bufq[INPUT_META_PORT].vb2q, "input meta port");
  3723. if (rc)
  3724. goto fail_in_meta_alloc;
  3725. /* do input meta port queues initialization */
  3726. rc = vb2q_init(inst, inst->bufq[INPUT_META_PORT].vb2q, INPUT_META_PLANE);
  3727. if (rc)
  3728. goto fail_in_meta_vb2q_init;
  3729. rc = msm_vidc_vmem_alloc(sizeof(struct vb2_queue),
  3730. (void **)&inst->bufq[OUTPUT_META_PORT].vb2q, "output meta port");
  3731. if (rc)
  3732. goto fail_out_meta_alloc;
  3733. /* do output meta port queues initialization */
  3734. rc = vb2q_init(inst, inst->bufq[OUTPUT_META_PORT].vb2q, OUTPUT_META_PLANE);
  3735. if (rc)
  3736. goto fail_out_meta_vb2q_init;
  3737. inst->vb2q_init = true;
  3738. return 0;
  3739. fail_out_meta_vb2q_init:
  3740. msm_vidc_vmem_free((void **)&inst->bufq[OUTPUT_META_PORT].vb2q);
  3741. inst->bufq[OUTPUT_META_PORT].vb2q = NULL;
  3742. fail_out_meta_alloc:
  3743. vb2_queue_release(inst->bufq[INPUT_META_PORT].vb2q);
  3744. fail_in_meta_vb2q_init:
  3745. msm_vidc_vmem_free((void **)&inst->bufq[INPUT_META_PORT].vb2q);
  3746. inst->bufq[INPUT_META_PORT].vb2q = NULL;
  3747. fail_in_meta_alloc:
  3748. v4l2_m2m_ctx_release(inst->m2m_ctx);
  3749. inst->bufq[OUTPUT_PORT].vb2q = NULL;
  3750. inst->bufq[INPUT_PORT].vb2q = NULL;
  3751. fail_m2m_ctx_init:
  3752. v4l2_m2m_release(inst->m2m_dev);
  3753. fail_m2m_init:
  3754. return rc;
  3755. }
  3756. int msm_vidc_vb2_queue_deinit(struct msm_vidc_inst *inst)
  3757. {
  3758. int rc = 0;
  3759. if (!inst) {
  3760. d_vpr_e("%s: invalid params\n", __func__);
  3761. return -EINVAL;
  3762. }
  3763. if (!inst->vb2q_init) {
  3764. i_vpr_h(inst, "%s: vb2q already deinited\n", __func__);
  3765. return 0;
  3766. }
  3767. vb2_queue_release(inst->bufq[OUTPUT_META_PORT].vb2q);
  3768. msm_vidc_vmem_free((void **)&inst->bufq[OUTPUT_META_PORT].vb2q);
  3769. inst->bufq[OUTPUT_META_PORT].vb2q = NULL;
  3770. vb2_queue_release(inst->bufq[INPUT_META_PORT].vb2q);
  3771. msm_vidc_vmem_free((void **)&inst->bufq[INPUT_META_PORT].vb2q);
  3772. inst->bufq[INPUT_META_PORT].vb2q = NULL;
  3773. /*
  3774. * vb2_queue_release() for input and output queues
  3775. * is called from v4l2_m2m_ctx_release()
  3776. */
  3777. v4l2_m2m_ctx_release(inst->m2m_ctx);
  3778. inst->bufq[OUTPUT_PORT].vb2q = NULL;
  3779. inst->bufq[INPUT_PORT].vb2q = NULL;
  3780. v4l2_m2m_release(inst->m2m_dev);
  3781. inst->vb2q_init = false;
  3782. return rc;
  3783. }
  3784. int msm_vidc_add_session(struct msm_vidc_inst *inst)
  3785. {
  3786. int rc = 0;
  3787. struct msm_vidc_inst *i;
  3788. struct msm_vidc_core *core;
  3789. u32 count = 0;
  3790. if (!inst || !inst->core) {
  3791. d_vpr_e("%s: invalid params\n", __func__);
  3792. return -EINVAL;
  3793. }
  3794. core = inst->core;
  3795. if (!core->capabilities) {
  3796. i_vpr_e(inst, "%s: invalid params\n", __func__);
  3797. return -EINVAL;
  3798. }
  3799. core_lock(core, __func__);
  3800. if (core->state != MSM_VIDC_CORE_INIT) {
  3801. i_vpr_e(inst, "%s: invalid state %s\n",
  3802. __func__, core_state_name(core->state));
  3803. rc = -EINVAL;
  3804. goto unlock;
  3805. }
  3806. list_for_each_entry(i, &core->instances, list)
  3807. count++;
  3808. if (count < core->capabilities[MAX_SESSION_COUNT].value) {
  3809. list_add_tail(&inst->list, &core->instances);
  3810. } else {
  3811. i_vpr_e(inst, "%s: max limit %d already running %d sessions\n",
  3812. __func__, core->capabilities[MAX_SESSION_COUNT].value, count);
  3813. rc = -EINVAL;
  3814. }
  3815. unlock:
  3816. core_unlock(core, __func__);
  3817. return rc;
  3818. }
  3819. int msm_vidc_remove_session(struct msm_vidc_inst *inst)
  3820. {
  3821. struct msm_vidc_inst *i, *temp;
  3822. struct msm_vidc_core *core;
  3823. u32 count = 0;
  3824. if (!inst || !inst->core) {
  3825. d_vpr_e("%s: invalid params\n", __func__);
  3826. return -EINVAL;
  3827. }
  3828. core = inst->core;
  3829. core_lock(core, __func__);
  3830. list_for_each_entry_safe(i, temp, &core->instances, list) {
  3831. if (i->session_id == inst->session_id) {
  3832. list_del_init(&i->list);
  3833. list_add_tail(&i->list, &core->dangling_instances);
  3834. i_vpr_h(inst, "%s: removed session %#x\n",
  3835. __func__, i->session_id);
  3836. }
  3837. }
  3838. list_for_each_entry(i, &core->instances, list)
  3839. count++;
  3840. i_vpr_h(inst, "%s: remaining sessions %d\n", __func__, count);
  3841. core_unlock(core, __func__);
  3842. return 0;
  3843. }
  3844. static int msm_vidc_remove_dangling_session(struct msm_vidc_inst *inst)
  3845. {
  3846. struct msm_vidc_inst *i, *temp;
  3847. struct msm_vidc_core *core;
  3848. u32 count = 0;
  3849. if (!inst || !inst->core) {
  3850. d_vpr_e("%s: invalid params\n", __func__);
  3851. return -EINVAL;
  3852. }
  3853. core = inst->core;
  3854. core_lock(core, __func__);
  3855. list_for_each_entry_safe(i, temp, &core->dangling_instances, list) {
  3856. if (i->session_id == inst->session_id) {
  3857. list_del_init(&i->list);
  3858. i_vpr_h(inst, "%s: removed dangling session %#x\n",
  3859. __func__, i->session_id);
  3860. break;
  3861. }
  3862. }
  3863. list_for_each_entry(i, &core->dangling_instances, list)
  3864. count++;
  3865. i_vpr_h(inst, "%s: remaining dangling sessions %d\n", __func__, count);
  3866. core_unlock(core, __func__);
  3867. return 0;
  3868. }
  3869. int msm_vidc_session_open(struct msm_vidc_inst *inst)
  3870. {
  3871. int rc = 0;
  3872. if (!inst) {
  3873. d_vpr_e("%s: invalid params\n", __func__);
  3874. return -EINVAL;
  3875. }
  3876. inst->packet_size = 4096;
  3877. rc = msm_vidc_vmem_alloc(inst->packet_size, (void **)&inst->packet, __func__);
  3878. if (rc)
  3879. return rc;
  3880. rc = venus_hfi_session_open(inst);
  3881. if (rc)
  3882. goto error;
  3883. return 0;
  3884. error:
  3885. i_vpr_e(inst, "%s(): session open failed\n", __func__);
  3886. msm_vidc_vmem_free((void **)&inst->packet);
  3887. inst->packet = NULL;
  3888. return rc;
  3889. }
  3890. int msm_vidc_session_set_codec(struct msm_vidc_inst *inst)
  3891. {
  3892. int rc = 0;
  3893. if (!inst) {
  3894. d_vpr_e("%s: invalid params\n", __func__);
  3895. return -EINVAL;
  3896. }
  3897. rc = venus_hfi_session_set_codec(inst);
  3898. if (rc)
  3899. return rc;
  3900. return 0;
  3901. }
  3902. int msm_vidc_session_set_secure_mode(struct msm_vidc_inst *inst)
  3903. {
  3904. int rc = 0;
  3905. if (!inst) {
  3906. d_vpr_e("%s: invalid params\n", __func__);
  3907. return -EINVAL;
  3908. }
  3909. rc = venus_hfi_session_set_secure_mode(inst);
  3910. if (rc)
  3911. return rc;
  3912. return 0;
  3913. }
  3914. int msm_vidc_session_set_default_header(struct msm_vidc_inst *inst)
  3915. {
  3916. int rc = 0;
  3917. u32 default_header = false;
  3918. if (!inst) {
  3919. d_vpr_e("%s: invalid params\n", __func__);
  3920. return -EINVAL;
  3921. }
  3922. default_header = inst->capabilities->cap[DEFAULT_HEADER].value;
  3923. i_vpr_h(inst, "%s: default header: %d", __func__, default_header);
  3924. rc = venus_hfi_session_property(inst,
  3925. HFI_PROP_DEC_DEFAULT_HEADER,
  3926. HFI_HOST_FLAGS_NONE,
  3927. get_hfi_port(inst, INPUT_PORT),
  3928. HFI_PAYLOAD_U32,
  3929. &default_header,
  3930. sizeof(u32));
  3931. if (rc)
  3932. i_vpr_e(inst, "%s: set property failed\n", __func__);
  3933. return rc;
  3934. }
  3935. int msm_vidc_session_streamoff(struct msm_vidc_inst *inst,
  3936. enum msm_vidc_port_type port)
  3937. {
  3938. int rc = 0;
  3939. int count = 0;
  3940. struct msm_vidc_core *core;
  3941. enum signal_session_response signal_type;
  3942. enum msm_vidc_buffer_type buffer_type;
  3943. if (!inst || !inst->core) {
  3944. d_vpr_e("%s: invalid params\n", __func__);
  3945. return -EINVAL;
  3946. }
  3947. if (port == INPUT_PORT) {
  3948. signal_type = SIGNAL_CMD_STOP_INPUT;
  3949. buffer_type = MSM_VIDC_BUF_INPUT;
  3950. } else if (port == OUTPUT_PORT) {
  3951. signal_type = SIGNAL_CMD_STOP_OUTPUT;
  3952. buffer_type = MSM_VIDC_BUF_OUTPUT;
  3953. } else {
  3954. i_vpr_e(inst, "%s: invalid port: %d\n", __func__, port);
  3955. return -EINVAL;
  3956. }
  3957. rc = venus_hfi_stop(inst, port);
  3958. if (rc)
  3959. goto error;
  3960. rc = msm_vidc_state_change_streamoff(inst, port);
  3961. if (rc)
  3962. goto error;
  3963. core = inst->core;
  3964. i_vpr_h(inst, "%s: wait on port: %d for time: %d ms\n",
  3965. __func__, port, core->capabilities[HW_RESPONSE_TIMEOUT].value);
  3966. inst_unlock(inst, __func__);
  3967. rc = wait_for_completion_timeout(
  3968. &inst->completions[signal_type],
  3969. msecs_to_jiffies(
  3970. core->capabilities[HW_RESPONSE_TIMEOUT].value));
  3971. if (!rc) {
  3972. i_vpr_e(inst, "%s: session stop timed out for port: %d\n",
  3973. __func__, port);
  3974. rc = -ETIMEDOUT;
  3975. msm_vidc_inst_timeout(inst);
  3976. } else {
  3977. rc = 0;
  3978. }
  3979. inst_lock(inst, __func__);
  3980. if(rc)
  3981. goto error;
  3982. if (port == INPUT_PORT) {
  3983. /* flush input timer list */
  3984. msm_vidc_flush_input_timer(inst);
  3985. }
  3986. /* no more queued buffers after streamoff */
  3987. count = msm_vidc_num_buffers(inst, buffer_type, MSM_VIDC_ATTR_QUEUED);
  3988. if (!count) {
  3989. i_vpr_h(inst, "%s: stop successful on port: %d\n",
  3990. __func__, port);
  3991. } else {
  3992. i_vpr_e(inst,
  3993. "%s: %d buffers pending with firmware on port: %d\n",
  3994. __func__, count, port);
  3995. rc = -EINVAL;
  3996. goto error;
  3997. }
  3998. /* flush deferred buffers */
  3999. msm_vidc_flush_buffers(inst, buffer_type);
  4000. msm_vidc_flush_delayed_unmap_buffers(inst, buffer_type);
  4001. return 0;
  4002. error:
  4003. msm_vidc_kill_session(inst);
  4004. msm_vidc_flush_buffers(inst, buffer_type);
  4005. return rc;
  4006. }
  4007. int msm_vidc_session_close(struct msm_vidc_inst *inst)
  4008. {
  4009. int rc = 0;
  4010. struct msm_vidc_core *core;
  4011. if (!inst || !inst->core) {
  4012. d_vpr_e("%s: invalid params\n", __func__);
  4013. return -EINVAL;
  4014. }
  4015. rc = venus_hfi_session_close(inst);
  4016. if (rc)
  4017. return rc;
  4018. /* we are not supposed to send any more commands after close */
  4019. i_vpr_h(inst, "%s: free session packet data\n", __func__);
  4020. msm_vidc_vmem_free((void **)&inst->packet);
  4021. inst->packet = NULL;
  4022. core = inst->core;
  4023. i_vpr_h(inst, "%s: wait on close for time: %d ms\n",
  4024. __func__, core->capabilities[HW_RESPONSE_TIMEOUT].value);
  4025. inst_unlock(inst, __func__);
  4026. rc = wait_for_completion_timeout(
  4027. &inst->completions[SIGNAL_CMD_CLOSE],
  4028. msecs_to_jiffies(
  4029. core->capabilities[HW_RESPONSE_TIMEOUT].value));
  4030. if (!rc) {
  4031. i_vpr_e(inst, "%s: session close timed out\n", __func__);
  4032. rc = -ETIMEDOUT;
  4033. msm_vidc_inst_timeout(inst);
  4034. } else {
  4035. rc = 0;
  4036. i_vpr_h(inst, "%s: close successful\n", __func__);
  4037. }
  4038. inst_lock(inst, __func__);
  4039. inst->state = MSM_VIDC_CLOSE;
  4040. inst->sub_state = MSM_VIDC_SUB_STATE_NONE;
  4041. strlcpy(inst->sub_state_name, "SUB_STATE_NONE", sizeof(inst->sub_state_name));
  4042. msm_vidc_remove_session(inst);
  4043. return rc;
  4044. }
  4045. int msm_vidc_kill_session(struct msm_vidc_inst *inst)
  4046. {
  4047. if (!inst) {
  4048. d_vpr_e("%s: invalid params\n", __func__);
  4049. return -EINVAL;
  4050. }
  4051. if (!inst->session_id) {
  4052. i_vpr_e(inst, "%s: already killed\n", __func__);
  4053. return 0;
  4054. }
  4055. i_vpr_e(inst, "%s: killing session\n", __func__);
  4056. msm_vidc_session_close(inst);
  4057. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  4058. return 0;
  4059. }
  4060. int msm_vidc_get_inst_capability(struct msm_vidc_inst *inst)
  4061. {
  4062. int rc = 0;
  4063. int i;
  4064. struct msm_vidc_core *core;
  4065. if (!inst || !inst->core || !inst->capabilities) {
  4066. d_vpr_e("%s: invalid params\n", __func__);
  4067. return -EINVAL;
  4068. }
  4069. core = inst->core;
  4070. for (i = 0; i < core->codecs_count; i++) {
  4071. if (core->inst_caps[i].domain == inst->domain &&
  4072. core->inst_caps[i].codec == inst->codec) {
  4073. i_vpr_h(inst,
  4074. "%s: copied capabilities with %#x codec, %#x domain\n",
  4075. __func__, inst->codec, inst->domain);
  4076. memcpy(inst->capabilities, &core->inst_caps[i],
  4077. sizeof(struct msm_vidc_inst_capability));
  4078. }
  4079. }
  4080. return rc;
  4081. }
  4082. int msm_vidc_deinit_core_caps(struct msm_vidc_core *core)
  4083. {
  4084. int rc = 0;
  4085. if (!core) {
  4086. d_vpr_e("%s: invalid params\n", __func__);
  4087. return -EINVAL;
  4088. }
  4089. msm_vidc_vmem_free((void **)&core->capabilities);
  4090. core->capabilities = NULL;
  4091. d_vpr_h("%s: Core capabilities freed\n", __func__);
  4092. return rc;
  4093. }
  4094. int msm_vidc_init_core_caps(struct msm_vidc_core *core)
  4095. {
  4096. int rc = 0;
  4097. int i, num_platform_caps;
  4098. struct msm_platform_core_capability *platform_data;
  4099. if (!core || !core->platform) {
  4100. d_vpr_e("%s: invalid params\n", __func__);
  4101. rc = -EINVAL;
  4102. goto exit;
  4103. }
  4104. platform_data = core->platform->data.core_data;
  4105. if (!platform_data) {
  4106. d_vpr_e("%s: platform core data is NULL\n",
  4107. __func__);
  4108. rc = -EINVAL;
  4109. goto exit;
  4110. }
  4111. rc = msm_vidc_vmem_alloc((sizeof(struct msm_vidc_core_capability) *
  4112. (CORE_CAP_MAX + 1)), (void **)&core->capabilities, __func__);
  4113. if (rc)
  4114. goto exit;
  4115. num_platform_caps = core->platform->data.core_data_size;
  4116. /* loop over platform caps */
  4117. for (i = 0; i < num_platform_caps && i < CORE_CAP_MAX; i++) {
  4118. core->capabilities[platform_data[i].type].type = platform_data[i].type;
  4119. core->capabilities[platform_data[i].type].value = platform_data[i].value;
  4120. }
  4121. exit:
  4122. return rc;
  4123. }
  4124. static void update_inst_capability(struct msm_platform_inst_capability *in,
  4125. struct msm_vidc_inst_capability *capability)
  4126. {
  4127. if (!in || !capability) {
  4128. d_vpr_e("%s: invalid params %pK %pK\n",
  4129. __func__, in, capability);
  4130. return;
  4131. }
  4132. if (in->cap_id >= INST_CAP_MAX) {
  4133. d_vpr_e("%s: invalid cap id %d\n", __func__, in->cap_id);
  4134. return;
  4135. }
  4136. capability->cap[in->cap_id].cap_id = in->cap_id;
  4137. capability->cap[in->cap_id].min = in->min;
  4138. capability->cap[in->cap_id].max = in->max;
  4139. capability->cap[in->cap_id].step_or_mask = in->step_or_mask;
  4140. capability->cap[in->cap_id].value = in->value;
  4141. capability->cap[in->cap_id].flags = in->flags;
  4142. capability->cap[in->cap_id].v4l2_id = in->v4l2_id;
  4143. capability->cap[in->cap_id].hfi_id = in->hfi_id;
  4144. }
  4145. static void update_inst_cap_dependency(
  4146. struct msm_platform_inst_cap_dependency *in,
  4147. struct msm_vidc_inst_capability *capability)
  4148. {
  4149. if (!in || !capability) {
  4150. d_vpr_e("%s: invalid params %pK %pK\n",
  4151. __func__, in, capability);
  4152. return;
  4153. }
  4154. if (in->cap_id >= INST_CAP_MAX) {
  4155. d_vpr_e("%s: invalid cap id %d\n", __func__, in->cap_id);
  4156. return;
  4157. }
  4158. capability->cap[in->cap_id].cap_id = in->cap_id;
  4159. memcpy(capability->cap[in->cap_id].parents, in->parents,
  4160. sizeof(capability->cap[in->cap_id].parents));
  4161. memcpy(capability->cap[in->cap_id].children, in->children,
  4162. sizeof(capability->cap[in->cap_id].children));
  4163. capability->cap[in->cap_id].adjust = in->adjust;
  4164. capability->cap[in->cap_id].set = in->set;
  4165. }
  4166. int msm_vidc_deinit_instance_caps(struct msm_vidc_core *core)
  4167. {
  4168. int rc = 0;
  4169. if (!core) {
  4170. d_vpr_e("%s: invalid params\n", __func__);
  4171. return -EINVAL;
  4172. }
  4173. msm_vidc_vmem_free((void **)&core->inst_caps);
  4174. core->inst_caps = NULL;
  4175. d_vpr_h("%s: core->inst_caps freed\n", __func__);
  4176. return rc;
  4177. }
  4178. int msm_vidc_init_instance_caps(struct msm_vidc_core *core)
  4179. {
  4180. int rc = 0;
  4181. u8 enc_valid_codecs, dec_valid_codecs;
  4182. u8 count_bits, enc_codec_count;
  4183. u8 codecs_count = 0;
  4184. int i, j, check_bit;
  4185. int num_platform_cap_data, num_platform_cap_dependency_data;
  4186. struct msm_platform_inst_capability *platform_cap_data = NULL;
  4187. struct msm_platform_inst_cap_dependency *platform_cap_dependency_data = NULL;
  4188. if (!core || !core->platform || !core->capabilities) {
  4189. d_vpr_e("%s: invalid params\n", __func__);
  4190. rc = -EINVAL;
  4191. goto error;
  4192. }
  4193. platform_cap_data = core->platform->data.inst_cap_data;
  4194. if (!platform_cap_data) {
  4195. d_vpr_e("%s: platform instance cap data is NULL\n",
  4196. __func__);
  4197. rc = -EINVAL;
  4198. goto error;
  4199. }
  4200. platform_cap_dependency_data = core->platform->data.inst_cap_dependency_data;
  4201. if (!platform_cap_dependency_data) {
  4202. d_vpr_e("%s: platform instance cap dependency data is NULL\n",
  4203. __func__);
  4204. rc = -EINVAL;
  4205. goto error;
  4206. }
  4207. enc_valid_codecs = core->capabilities[ENC_CODECS].value;
  4208. count_bits = enc_valid_codecs;
  4209. COUNT_BITS(count_bits, codecs_count);
  4210. enc_codec_count = codecs_count;
  4211. dec_valid_codecs = core->capabilities[DEC_CODECS].value;
  4212. count_bits = dec_valid_codecs;
  4213. COUNT_BITS(count_bits, codecs_count);
  4214. core->codecs_count = codecs_count;
  4215. rc = msm_vidc_vmem_alloc(codecs_count * sizeof(struct msm_vidc_inst_capability),
  4216. (void **)&core->inst_caps, __func__);
  4217. if (rc)
  4218. goto error;
  4219. check_bit = 0;
  4220. /* determine codecs for enc domain */
  4221. for (i = 0; i < enc_codec_count; i++) {
  4222. while (check_bit < (sizeof(enc_valid_codecs) * 8)) {
  4223. if (enc_valid_codecs & BIT(check_bit)) {
  4224. core->inst_caps[i].domain = MSM_VIDC_ENCODER;
  4225. core->inst_caps[i].codec = enc_valid_codecs &
  4226. BIT(check_bit);
  4227. check_bit++;
  4228. break;
  4229. }
  4230. check_bit++;
  4231. }
  4232. }
  4233. /* reset checkbit to check from 0th bit of decoder codecs set bits*/
  4234. check_bit = 0;
  4235. /* determine codecs for dec domain */
  4236. for (; i < codecs_count; i++) {
  4237. while (check_bit < (sizeof(dec_valid_codecs) * 8)) {
  4238. if (dec_valid_codecs & BIT(check_bit)) {
  4239. core->inst_caps[i].domain = MSM_VIDC_DECODER;
  4240. core->inst_caps[i].codec = dec_valid_codecs &
  4241. BIT(check_bit);
  4242. check_bit++;
  4243. break;
  4244. }
  4245. check_bit++;
  4246. }
  4247. }
  4248. num_platform_cap_data = core->platform->data.inst_cap_data_size;
  4249. num_platform_cap_dependency_data = core->platform->data.inst_cap_dependency_data_size;
  4250. d_vpr_h("%s: num caps %d, dependency %d\n", __func__,
  4251. num_platform_cap_data, num_platform_cap_dependency_data);
  4252. /* loop over each platform capability */
  4253. for (i = 0; i < num_platform_cap_data; i++) {
  4254. /* select matching core codec and update it */
  4255. for (j = 0; j < codecs_count; j++) {
  4256. if ((platform_cap_data[i].domain &
  4257. core->inst_caps[j].domain) &&
  4258. (platform_cap_data[i].codec &
  4259. core->inst_caps[j].codec)) {
  4260. /* update core capability */
  4261. update_inst_capability(&platform_cap_data[i],
  4262. &core->inst_caps[j]);
  4263. }
  4264. }
  4265. }
  4266. /* loop over each platform dependency capability */
  4267. for (i = 0; i < num_platform_cap_dependency_data; i++) {
  4268. /* select matching core codec and update it */
  4269. for (j = 0; j < codecs_count; j++) {
  4270. if ((platform_cap_dependency_data[i].domain &
  4271. core->inst_caps[j].domain) &&
  4272. (platform_cap_dependency_data[i].codec &
  4273. core->inst_caps[j].codec)) {
  4274. /* update core dependency capability */
  4275. update_inst_cap_dependency(
  4276. &platform_cap_dependency_data[i],
  4277. &core->inst_caps[j]);
  4278. }
  4279. }
  4280. }
  4281. error:
  4282. return rc;
  4283. }
  4284. int msm_vidc_core_deinit_locked(struct msm_vidc_core *core, bool force)
  4285. {
  4286. int rc = 0;
  4287. struct msm_vidc_inst *inst, *dummy;
  4288. if (!core) {
  4289. d_vpr_e("%s: invalid params\n", __func__);
  4290. return -EINVAL;
  4291. }
  4292. rc = __strict_check(core, __func__);
  4293. if (rc) {
  4294. d_vpr_e("%s(): core was not locked\n", __func__);
  4295. return rc;
  4296. }
  4297. if (core->state == MSM_VIDC_CORE_DEINIT)
  4298. return 0;
  4299. if (force) {
  4300. d_vpr_e("%s(): force deinit core\n", __func__);
  4301. } else {
  4302. /* in normal case, deinit core only if no session present */
  4303. if (!list_empty(&core->instances)) {
  4304. d_vpr_h("%s(): skip deinit\n", __func__);
  4305. return 0;
  4306. } else {
  4307. d_vpr_h("%s(): deinit core\n", __func__);
  4308. }
  4309. }
  4310. venus_hfi_core_deinit(core, force);
  4311. /* unlink all sessions from core, if any */
  4312. list_for_each_entry_safe(inst, dummy, &core->instances, list) {
  4313. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  4314. list_del_init(&inst->list);
  4315. list_add_tail(&inst->list, &core->dangling_instances);
  4316. }
  4317. msm_vidc_change_core_state(core, MSM_VIDC_CORE_DEINIT, __func__);
  4318. return rc;
  4319. }
  4320. int msm_vidc_core_deinit(struct msm_vidc_core *core, bool force)
  4321. {
  4322. int rc = 0;
  4323. if (!core) {
  4324. d_vpr_e("%s: invalid params\n", __func__);
  4325. return -EINVAL;
  4326. }
  4327. core_lock(core, __func__);
  4328. rc = msm_vidc_core_deinit_locked(core, force);
  4329. core_unlock(core, __func__);
  4330. return rc;
  4331. }
  4332. int msm_vidc_core_init_wait(struct msm_vidc_core *core)
  4333. {
  4334. const int interval = 10;
  4335. int max_tries, count = 0, rc = 0;
  4336. if (!core || !core->capabilities) {
  4337. d_vpr_e("%s: invalid params\n", __func__);
  4338. return -EINVAL;
  4339. }
  4340. core_lock(core, __func__);
  4341. if (core->state == MSM_VIDC_CORE_INIT) {
  4342. rc = 0;
  4343. goto unlock;
  4344. } else if (core->state == MSM_VIDC_CORE_DEINIT) {
  4345. rc = -EINVAL;
  4346. goto unlock;
  4347. }
  4348. d_vpr_h("%s(): waiting for state change\n", __func__);
  4349. max_tries = core->capabilities[HW_RESPONSE_TIMEOUT].value / interval;
  4350. while (count < max_tries) {
  4351. if (core->state != MSM_VIDC_CORE_INIT_WAIT)
  4352. break;
  4353. core_unlock(core, __func__);
  4354. msleep_interruptible(interval);
  4355. core_lock(core, __func__);
  4356. count++;
  4357. }
  4358. d_vpr_h("%s: state %s, interval %u, count %u, max_tries %u\n", __func__,
  4359. core_state_name(core->state), interval, count, max_tries);
  4360. if (core->state == MSM_VIDC_CORE_INIT) {
  4361. d_vpr_h("%s: sys init successful\n", __func__);
  4362. rc = 0;
  4363. goto unlock;
  4364. } else {
  4365. d_vpr_h("%s: sys init wait timedout. state %s\n",
  4366. __func__, core_state_name(core->state));
  4367. rc = -EINVAL;
  4368. goto unlock;
  4369. }
  4370. unlock:
  4371. if (rc)
  4372. msm_vidc_core_deinit_locked(core, true);
  4373. core_unlock(core, __func__);
  4374. return rc;
  4375. }
  4376. int msm_vidc_core_init(struct msm_vidc_core *core)
  4377. {
  4378. int rc = 0;
  4379. if (!core || !core->capabilities) {
  4380. d_vpr_e("%s: invalid params\n", __func__);
  4381. return -EINVAL;
  4382. }
  4383. core_lock(core, __func__);
  4384. if (core->state == MSM_VIDC_CORE_INIT ||
  4385. core->state == MSM_VIDC_CORE_INIT_WAIT)
  4386. goto unlock;
  4387. msm_vidc_change_core_state(core, MSM_VIDC_CORE_INIT_WAIT, __func__);
  4388. core->smmu_fault_handled = false;
  4389. core->ssr.trigger = false;
  4390. core->pm_suspended = false;
  4391. rc = venus_hfi_core_init(core);
  4392. if (rc) {
  4393. d_vpr_e("%s: core init failed\n", __func__);
  4394. goto unlock;
  4395. }
  4396. unlock:
  4397. if (rc)
  4398. msm_vidc_core_deinit_locked(core, true);
  4399. core_unlock(core, __func__);
  4400. return rc;
  4401. }
  4402. int msm_vidc_inst_timeout(struct msm_vidc_inst *inst)
  4403. {
  4404. int rc = 0;
  4405. struct msm_vidc_core *core;
  4406. struct msm_vidc_inst *instance;
  4407. bool found;
  4408. if (!inst || !inst->core) {
  4409. d_vpr_e("%s: invalid params\n", __func__);
  4410. return -EINVAL;
  4411. }
  4412. core = inst->core;
  4413. core_lock(core, __func__);
  4414. /*
  4415. * All sessions will be removed from core list in core deinit,
  4416. * do not deinit core from a session which is not present in
  4417. * core list.
  4418. */
  4419. found = false;
  4420. list_for_each_entry(instance, &core->instances, list) {
  4421. if (instance == inst) {
  4422. found = true;
  4423. break;
  4424. }
  4425. }
  4426. if (!found) {
  4427. i_vpr_e(inst,
  4428. "%s: session not available in core list\n", __func__);
  4429. rc = -EINVAL;
  4430. goto unlock;
  4431. }
  4432. /* call core deinit for a valid instance timeout case */
  4433. msm_vidc_core_deinit_locked(core, true);
  4434. unlock:
  4435. core_unlock(core, __func__);
  4436. return rc;
  4437. }
  4438. int msm_vidc_print_buffer_info(struct msm_vidc_inst *inst)
  4439. {
  4440. struct msm_vidc_buffers *buffers;
  4441. int i;
  4442. if (!inst) {
  4443. i_vpr_e(inst, "%s: invalid params\n", __func__);
  4444. return -EINVAL;
  4445. }
  4446. /* Print buffer details */
  4447. for (i = 0; i < ARRAY_SIZE(buf_type_name_arr); i++) {
  4448. buffers = msm_vidc_get_buffers(inst, buf_type_name_arr[i].type, __func__);
  4449. if (!buffers)
  4450. continue;
  4451. i_vpr_h(inst, "buf: type: %11s, count %2d, extra %2d, actual %2d, size %9u\n",
  4452. buf_type_name_arr[i].name, buffers->min_count,
  4453. buffers->extra_count, buffers->actual_count,
  4454. buffers->size);
  4455. }
  4456. return 0;
  4457. }
  4458. int msm_vidc_print_inst_info(struct msm_vidc_inst *inst)
  4459. {
  4460. struct msm_vidc_buffers *buffers;
  4461. struct msm_vidc_buffer *buf;
  4462. enum msm_vidc_port_type port;
  4463. bool is_secure, is_decode;
  4464. u32 bit_depth, bit_rate, frame_rate, width, height;
  4465. struct dma_buf *dbuf;
  4466. struct inode *f_inode;
  4467. unsigned long inode_num = 0;
  4468. long ref_count = -1;
  4469. int i = 0;
  4470. if (!inst || !inst->capabilities) {
  4471. i_vpr_e(inst, "%s: invalid params\n", __func__);
  4472. return -EINVAL;
  4473. }
  4474. is_secure = is_secure_session(inst);
  4475. is_decode = inst->domain == MSM_VIDC_DECODER;
  4476. port = is_decode ? INPUT_PORT : OUTPUT_PORT;
  4477. width = inst->fmts[port].fmt.pix_mp.width;
  4478. height = inst->fmts[port].fmt.pix_mp.height;
  4479. bit_depth = inst->capabilities->cap[BIT_DEPTH].value & 0xFFFF;
  4480. bit_rate = inst->capabilities->cap[BIT_RATE].value;
  4481. frame_rate = inst->capabilities->cap[FRAME_RATE].value >> 16;
  4482. i_vpr_e(inst, "%s %s session, HxW: %d x %d, fps: %d, bitrate: %d, bit-depth: %d\n",
  4483. is_secure ? "Secure" : "Non-Secure",
  4484. is_decode ? "Decode" : "Encode",
  4485. height, width,
  4486. frame_rate, bit_rate, bit_depth);
  4487. /* Print buffer details */
  4488. for (i = 0; i < ARRAY_SIZE(buf_type_name_arr); i++) {
  4489. buffers = msm_vidc_get_buffers(inst, buf_type_name_arr[i].type, __func__);
  4490. if (!buffers)
  4491. continue;
  4492. i_vpr_e(inst, "count: type: %11s, min: %2d, extra: %2d, actual: %2d\n",
  4493. buf_type_name_arr[i].name, buffers->min_count,
  4494. buffers->extra_count, buffers->actual_count);
  4495. list_for_each_entry(buf, &buffers->list, list) {
  4496. if (!buf->dmabuf)
  4497. continue;
  4498. dbuf = (struct dma_buf *)buf->dmabuf;
  4499. if (dbuf && dbuf->file) {
  4500. f_inode = file_inode(dbuf->file);
  4501. if (f_inode) {
  4502. inode_num = f_inode->i_ino;
  4503. ref_count = file_count(dbuf->file);
  4504. }
  4505. }
  4506. i_vpr_e(inst,
  4507. "buf: type: %11s, index: %2d, fd: %4d, size: %9u, off: %8u, filled: %9u, daddr: %#llx, inode: %8lu, ref: %2ld, flags: %8x, ts: %16lld, attr: %8x\n",
  4508. buf_type_name_arr[i].name, buf->index, buf->fd, buf->buffer_size,
  4509. buf->data_offset, buf->data_size, buf->device_addr,
  4510. inode_num, ref_count, buf->flags, buf->timestamp, buf->attr);
  4511. }
  4512. }
  4513. return 0;
  4514. }
  4515. void msm_vidc_print_core_info(struct msm_vidc_core *core)
  4516. {
  4517. struct msm_vidc_inst *inst = NULL;
  4518. struct msm_vidc_inst *instances[MAX_SUPPORTED_INSTANCES];
  4519. s32 num_instances = 0;
  4520. if (!core) {
  4521. d_vpr_e("%s: invalid params\n", __func__);
  4522. return;
  4523. }
  4524. core_lock(core, __func__);
  4525. list_for_each_entry(inst, &core->instances, list)
  4526. instances[num_instances++] = inst;
  4527. core_unlock(core, __func__);
  4528. while (num_instances--) {
  4529. inst = instances[num_instances];
  4530. inst = get_inst_ref(core, inst);
  4531. if (!inst)
  4532. continue;
  4533. inst_lock(inst, __func__);
  4534. msm_vidc_print_inst_info(inst);
  4535. inst_unlock(inst, __func__);
  4536. put_inst(inst);
  4537. }
  4538. }
  4539. int msm_vidc_smmu_fault_handler(struct iommu_domain *domain,
  4540. struct device *dev, unsigned long iova, int flags, void *data)
  4541. {
  4542. struct msm_vidc_core *core = data;
  4543. if (!domain || !core || !core->capabilities) {
  4544. d_vpr_e("%s: invalid params %pK %pK\n",
  4545. __func__, domain, core);
  4546. return -EINVAL;
  4547. }
  4548. if (core->smmu_fault_handled) {
  4549. if (core->capabilities[NON_FATAL_FAULTS].value) {
  4550. dprintk_ratelimit(VIDC_ERR, "err ",
  4551. "%s: non-fatal pagefault address: %lx\n",
  4552. __func__, iova);
  4553. return 0;
  4554. }
  4555. }
  4556. d_vpr_e(FMT_STRING_FAULT_HANDLER, __func__, iova);
  4557. core->smmu_fault_handled = true;
  4558. /* print noc error log registers */
  4559. venus_hfi_noc_error_info(core);
  4560. msm_vidc_print_core_info(core);
  4561. /*
  4562. * Return -ENOSYS to elicit the default behaviour of smmu driver.
  4563. * If we return -ENOSYS, then smmu driver assumes page fault handler
  4564. * is not installed and prints a list of useful debug information like
  4565. * FAR, SID etc. This information is not printed if we return 0.
  4566. */
  4567. return -ENOSYS;
  4568. }
  4569. int msm_vidc_trigger_ssr(struct msm_vidc_core *core,
  4570. u64 trigger_ssr_val)
  4571. {
  4572. struct msm_vidc_ssr *ssr;
  4573. if (!core) {
  4574. d_vpr_e("%s: Invalid parameters\n", __func__);
  4575. return -EINVAL;
  4576. }
  4577. ssr = &core->ssr;
  4578. /*
  4579. * <test_addr><sub_client_id><ssr_type>
  4580. * ssr_type: 0-3 bits
  4581. * sub_client_id: 4-7 bits
  4582. * reserved: 8-31 bits
  4583. * test_addr: 32-63 bits
  4584. */
  4585. ssr->ssr_type = (trigger_ssr_val &
  4586. (unsigned long)SSR_TYPE) >> SSR_TYPE_SHIFT;
  4587. ssr->sub_client_id = (trigger_ssr_val &
  4588. (unsigned long)SSR_SUB_CLIENT_ID) >> SSR_SUB_CLIENT_ID_SHIFT;
  4589. ssr->test_addr = (trigger_ssr_val &
  4590. (unsigned long)SSR_ADDR_ID) >> SSR_ADDR_SHIFT;
  4591. schedule_work(&core->ssr_work);
  4592. return 0;
  4593. }
  4594. void msm_vidc_ssr_handler(struct work_struct *work)
  4595. {
  4596. int rc;
  4597. struct msm_vidc_core *core;
  4598. struct msm_vidc_ssr *ssr;
  4599. core = container_of(work, struct msm_vidc_core, ssr_work);
  4600. if (!core) {
  4601. d_vpr_e("%s: invalid params %pK\n", __func__, core);
  4602. return;
  4603. }
  4604. ssr = &core->ssr;
  4605. core_lock(core, __func__);
  4606. if (core->state == MSM_VIDC_CORE_INIT) {
  4607. /*
  4608. * In current implementation, user-initiated SSR triggers
  4609. * a fatal error from hardware. However, there is no way
  4610. * to know if fatal error is due to SSR or not. Handle
  4611. * user SSR as non-fatal.
  4612. */
  4613. core->ssr.trigger = true;
  4614. rc = venus_hfi_trigger_ssr(core, ssr->ssr_type,
  4615. ssr->sub_client_id, ssr->test_addr);
  4616. if (rc) {
  4617. d_vpr_e("%s: trigger_ssr failed\n", __func__);
  4618. core->ssr.trigger = false;
  4619. }
  4620. } else {
  4621. d_vpr_e("%s: video core not initialized\n", __func__);
  4622. }
  4623. core_unlock(core, __func__);
  4624. }
  4625. int msm_vidc_trigger_stability(struct msm_vidc_core *core,
  4626. u64 trigger_stability_val)
  4627. {
  4628. struct msm_vidc_inst *inst = NULL;
  4629. struct msm_vidc_stability stability;
  4630. if (!core) {
  4631. d_vpr_e("%s: invalid params\n", __func__);
  4632. return -EINVAL;
  4633. }
  4634. /*
  4635. * <payload><sub_client_id><stability_type>
  4636. * stability_type: 0-3 bits
  4637. * sub_client_id: 4-7 bits
  4638. * reserved: 8-31 bits
  4639. * payload: 32-63 bits
  4640. */
  4641. memset(&stability, 0, sizeof(struct msm_vidc_stability));
  4642. stability.stability_type = (trigger_stability_val &
  4643. (unsigned long)STABILITY_TYPE) >> STABILITY_TYPE_SHIFT;
  4644. stability.sub_client_id = (trigger_stability_val &
  4645. (unsigned long)STABILITY_SUB_CLIENT_ID) >> STABILITY_SUB_CLIENT_ID_SHIFT;
  4646. stability.value = (trigger_stability_val &
  4647. (unsigned long)STABILITY_PAYLOAD_ID) >> STABILITY_PAYLOAD_SHIFT;
  4648. core_lock(core, __func__);
  4649. list_for_each_entry(inst, &core->instances, list) {
  4650. memcpy(&inst->stability, &stability, sizeof(struct msm_vidc_stability));
  4651. schedule_work(&inst->stability_work);
  4652. }
  4653. core_unlock(core, __func__);
  4654. return 0;
  4655. }
  4656. void msm_vidc_stability_handler(struct work_struct *work)
  4657. {
  4658. int rc;
  4659. struct msm_vidc_inst *inst;
  4660. struct msm_vidc_stability *stability;
  4661. inst = container_of(work, struct msm_vidc_inst, stability_work);
  4662. inst = get_inst_ref(g_core, inst);
  4663. if (!inst) {
  4664. d_vpr_e("%s: invalid params\n", __func__);
  4665. return;
  4666. }
  4667. inst_lock(inst, __func__);
  4668. stability = &inst->stability;
  4669. rc = venus_hfi_trigger_stability(inst, stability->stability_type,
  4670. stability->sub_client_id, stability->value);
  4671. if (rc)
  4672. i_vpr_e(inst, "%s: trigger_stability failed\n", __func__);
  4673. inst_unlock(inst, __func__);
  4674. put_inst(inst);
  4675. }
  4676. int cancel_stability_work_sync(struct msm_vidc_inst *inst)
  4677. {
  4678. if (!inst) {
  4679. d_vpr_e("%s: Invalid arguments\n", __func__);
  4680. return -EINVAL;
  4681. }
  4682. cancel_work_sync(&inst->stability_work);
  4683. return 0;
  4684. }
  4685. void msm_vidc_fw_unload_handler(struct work_struct *work)
  4686. {
  4687. struct msm_vidc_core *core = NULL;
  4688. int rc = 0;
  4689. core = container_of(work, struct msm_vidc_core, fw_unload_work.work);
  4690. if (!core) {
  4691. d_vpr_e("%s: invalid work or core handle\n", __func__);
  4692. return;
  4693. }
  4694. d_vpr_h("%s: deinitializing video core\n",__func__);
  4695. rc = msm_vidc_core_deinit(core, false);
  4696. if (rc)
  4697. d_vpr_e("%s: Failed to deinit core\n", __func__);
  4698. }
  4699. int msm_vidc_suspend(struct msm_vidc_core *core)
  4700. {
  4701. int rc = 0;
  4702. if (!core) {
  4703. d_vpr_e("%s: invalid params\n", __func__);
  4704. return -EINVAL;
  4705. }
  4706. rc = venus_hfi_suspend(core);
  4707. if (rc)
  4708. return rc;
  4709. return rc;
  4710. }
  4711. void msm_vidc_batch_handler(struct work_struct *work)
  4712. {
  4713. struct msm_vidc_inst *inst;
  4714. enum msm_vidc_allow allow;
  4715. struct msm_vidc_core *core;
  4716. int rc = 0;
  4717. inst = container_of(work, struct msm_vidc_inst, decode_batch.work.work);
  4718. inst = get_inst_ref(g_core, inst);
  4719. if (!inst || !inst->core) {
  4720. d_vpr_e("%s: invalid params\n", __func__);
  4721. return;
  4722. }
  4723. core = inst->core;
  4724. inst_lock(inst, __func__);
  4725. if (is_session_error(inst)) {
  4726. i_vpr_e(inst, "%s: failled. Session error\n", __func__);
  4727. goto exit;
  4728. }
  4729. if (core->pm_suspended) {
  4730. i_vpr_h(inst, "%s: device in pm suspend state\n", __func__);
  4731. goto exit;
  4732. }
  4733. allow = msm_vidc_allow_qbuf(inst, OUTPUT_MPLANE);
  4734. if (allow != MSM_VIDC_ALLOW) {
  4735. i_vpr_e(inst, "%s: not allowed in state: %s\n", __func__,
  4736. state_name(inst->state));
  4737. goto exit;
  4738. }
  4739. i_vpr_h(inst, "%s: queue pending batch buffers\n", __func__);
  4740. rc = msm_vidc_queue_deferred_buffers(inst, MSM_VIDC_BUF_OUTPUT);
  4741. if (rc) {
  4742. i_vpr_e(inst, "%s: batch qbufs failed\n", __func__);
  4743. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  4744. }
  4745. exit:
  4746. inst_unlock(inst, __func__);
  4747. put_inst(inst);
  4748. }
  4749. int msm_vidc_flush_buffers(struct msm_vidc_inst *inst,
  4750. enum msm_vidc_buffer_type type)
  4751. {
  4752. int rc = 0;
  4753. struct msm_vidc_buffers *buffers;
  4754. struct msm_vidc_buffer *buf, *dummy;
  4755. enum msm_vidc_buffer_type buffer_type[2];
  4756. int i;
  4757. if (!inst) {
  4758. d_vpr_e("%s: invalid params\n", __func__);
  4759. return -EINVAL;
  4760. }
  4761. if (type == MSM_VIDC_BUF_INPUT) {
  4762. buffer_type[0] = MSM_VIDC_BUF_INPUT_META;
  4763. buffer_type[1] = MSM_VIDC_BUF_INPUT;
  4764. } else if (type == MSM_VIDC_BUF_OUTPUT) {
  4765. buffer_type[0] = MSM_VIDC_BUF_OUTPUT_META;
  4766. buffer_type[1] = MSM_VIDC_BUF_OUTPUT;
  4767. } else {
  4768. i_vpr_h(inst, "%s: invalid buffer type %d\n",
  4769. __func__, type);
  4770. return -EINVAL;
  4771. }
  4772. for (i = 0; i < ARRAY_SIZE(buffer_type); i++) {
  4773. buffers = msm_vidc_get_buffers(inst, buffer_type[i], __func__);
  4774. if (!buffers)
  4775. return -EINVAL;
  4776. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  4777. if (buf->attr & MSM_VIDC_ATTR_QUEUED ||
  4778. buf->attr & MSM_VIDC_ATTR_DEFERRED) {
  4779. print_vidc_buffer(VIDC_HIGH, "high", "flushing buffer", inst, buf);
  4780. if (!(buf->attr & MSM_VIDC_ATTR_BUFFER_DONE))
  4781. msm_vidc_buffer_done(inst, buf);
  4782. msm_vidc_put_driver_buf(inst, buf);
  4783. }
  4784. }
  4785. }
  4786. return rc;
  4787. }
  4788. int msm_vidc_flush_delayed_unmap_buffers(struct msm_vidc_inst *inst,
  4789. enum msm_vidc_buffer_type type)
  4790. {
  4791. int rc = 0;
  4792. struct msm_vidc_mappings *maps;
  4793. struct msm_vidc_map *map, *dummy;
  4794. struct msm_vidc_buffer *ro_buf, *ro_dummy;
  4795. enum msm_vidc_buffer_type buffer_type[2];
  4796. int i;
  4797. bool found = false;
  4798. if (!inst) {
  4799. d_vpr_e("%s: invalid params\n", __func__);
  4800. return -EINVAL;
  4801. }
  4802. if (type == MSM_VIDC_BUF_INPUT) {
  4803. buffer_type[0] = MSM_VIDC_BUF_INPUT_META;
  4804. buffer_type[1] = MSM_VIDC_BUF_INPUT;
  4805. } else if (type == MSM_VIDC_BUF_OUTPUT) {
  4806. buffer_type[0] = MSM_VIDC_BUF_OUTPUT_META;
  4807. buffer_type[1] = MSM_VIDC_BUF_OUTPUT;
  4808. } else {
  4809. i_vpr_h(inst, "%s: invalid buffer type %d\n",
  4810. __func__, type);
  4811. return -EINVAL;
  4812. }
  4813. for (i = 0; i < ARRAY_SIZE(buffer_type); i++) {
  4814. maps = msm_vidc_get_mappings(inst, buffer_type[i], __func__);
  4815. if (!maps)
  4816. return -EINVAL;
  4817. list_for_each_entry_safe(map, dummy, &maps->list, list) {
  4818. /*
  4819. * decoder output bufs will have skip_delayed_unmap = true
  4820. * unmap all decoder output buffers except those present in
  4821. * read_only buffers list
  4822. */
  4823. if (!map->skip_delayed_unmap)
  4824. continue;
  4825. found = false;
  4826. list_for_each_entry_safe(ro_buf, ro_dummy,
  4827. &inst->buffers.read_only.list, list) {
  4828. if (map->dmabuf == ro_buf->dmabuf) {
  4829. found = true;
  4830. break;
  4831. }
  4832. }
  4833. /* completely unmap */
  4834. if (!found) {
  4835. if (map->refcount > 1) {
  4836. i_vpr_e(inst,
  4837. "%s: unexpected map refcount: %u device addr %#x\n",
  4838. __func__, map->refcount, map->device_addr);
  4839. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  4840. }
  4841. msm_vidc_memory_unmap_completely(inst, map);
  4842. }
  4843. }
  4844. }
  4845. return rc;
  4846. }
  4847. void msm_vidc_destroy_buffers(struct msm_vidc_inst *inst)
  4848. {
  4849. struct msm_vidc_buffers *buffers;
  4850. struct msm_vidc_buffer *buf, *dummy;
  4851. struct msm_vidc_timestamp *ts, *dummy_ts;
  4852. struct msm_memory_dmabuf *dbuf, *dummy_dbuf;
  4853. struct msm_vidc_input_timer *timer, *dummy_timer;
  4854. struct msm_vidc_inst_cap_entry *entry, *dummy_entry;
  4855. struct msm_vidc_fence *fence, *dummy_fence;
  4856. static const enum msm_vidc_buffer_type ext_buf_types[] = {
  4857. MSM_VIDC_BUF_INPUT,
  4858. MSM_VIDC_BUF_OUTPUT,
  4859. MSM_VIDC_BUF_INPUT_META,
  4860. MSM_VIDC_BUF_OUTPUT_META,
  4861. };
  4862. static const enum msm_vidc_buffer_type internal_buf_types[] = {
  4863. MSM_VIDC_BUF_BIN,
  4864. MSM_VIDC_BUF_ARP,
  4865. MSM_VIDC_BUF_COMV,
  4866. MSM_VIDC_BUF_NON_COMV,
  4867. MSM_VIDC_BUF_LINE,
  4868. MSM_VIDC_BUF_DPB,
  4869. MSM_VIDC_BUF_PERSIST,
  4870. MSM_VIDC_BUF_VPSS,
  4871. MSM_VIDC_BUF_PARTIAL_DATA,
  4872. };
  4873. int i;
  4874. if (!inst) {
  4875. d_vpr_e("%s: invalid params\n", __func__);
  4876. return;
  4877. }
  4878. for (i = 0; i < ARRAY_SIZE(internal_buf_types); i++) {
  4879. buffers = msm_vidc_get_buffers(inst, internal_buf_types[i], __func__);
  4880. if (!buffers)
  4881. continue;
  4882. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  4883. i_vpr_h(inst,
  4884. "destroying internal buffer: type %d idx %d fd %d addr %#x size %d\n",
  4885. buf->type, buf->index, buf->fd, buf->device_addr, buf->buffer_size);
  4886. msm_vidc_destroy_internal_buffer(inst, buf);
  4887. }
  4888. }
  4889. /* read_only and release list does not take dma ref_count using dma_buf_get().
  4890. dma_buf ptr will be obselete when its ref_count reaches zero. Hence print
  4891. the dma_buf info before releasing the ref count.
  4892. */
  4893. list_for_each_entry_safe(buf, dummy, &inst->buffers.read_only.list, list) {
  4894. print_vidc_buffer(VIDC_ERR, "err ", "destroying ro buffer", inst, buf);
  4895. list_del(&buf->list);
  4896. msm_memory_pool_free(inst, buf);
  4897. }
  4898. list_for_each_entry_safe(buf, dummy, &inst->buffers.release.list, list) {
  4899. print_vidc_buffer(VIDC_ERR, "err ", "destroying release buffer", inst, buf);
  4900. list_del(&buf->list);
  4901. msm_memory_pool_free(inst, buf);
  4902. }
  4903. for (i = 0; i < ARRAY_SIZE(ext_buf_types); i++) {
  4904. buffers = msm_vidc_get_buffers(inst, ext_buf_types[i], __func__);
  4905. if (!buffers)
  4906. continue;
  4907. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  4908. print_vidc_buffer(VIDC_ERR, "err ", "destroying ", inst, buf);
  4909. if (!(buf->attr & MSM_VIDC_ATTR_BUFFER_DONE))
  4910. msm_vidc_buffer_done(inst, buf);
  4911. msm_vidc_put_driver_buf(inst, buf);
  4912. }
  4913. msm_vidc_unmap_buffers(inst, ext_buf_types[i]);
  4914. }
  4915. list_for_each_entry_safe(ts, dummy_ts, &inst->timestamps.list, sort.list) {
  4916. i_vpr_e(inst, "%s: removing ts: val %lld, rank %lld\n",
  4917. __func__, ts->sort.val, ts->rank);
  4918. list_del(&ts->sort.list);
  4919. msm_memory_pool_free(inst, ts);
  4920. }
  4921. list_for_each_entry_safe(ts, dummy_ts, &inst->ts_reorder.list, sort.list) {
  4922. i_vpr_e(inst, "%s: removing reorder ts: val %lld\n",
  4923. __func__, ts->sort.val);
  4924. list_del(&ts->sort.list);
  4925. msm_memory_pool_free(inst, ts);
  4926. }
  4927. list_for_each_entry_safe(timer, dummy_timer, &inst->input_timer_list, list) {
  4928. i_vpr_e(inst, "%s: removing input_timer %lld\n",
  4929. __func__, timer->time_us);
  4930. msm_memory_pool_free(inst, timer);
  4931. }
  4932. list_for_each_entry_safe(dbuf, dummy_dbuf, &inst->dmabuf_tracker, list) {
  4933. i_vpr_e(inst, "%s: removing dma_buf %#x, refcount %u\n",
  4934. __func__, dbuf->dmabuf, dbuf->refcount);
  4935. msm_vidc_memory_put_dmabuf_completely(inst, dbuf);
  4936. }
  4937. list_for_each_entry_safe(entry, dummy_entry, &inst->firmware_list, list) {
  4938. i_vpr_e(inst, "%s: fw list: %s\n", __func__, cap_name(entry->cap_id));
  4939. list_del(&entry->list);
  4940. msm_vidc_vmem_free((void **)&entry);
  4941. }
  4942. list_for_each_entry_safe(entry, dummy_entry, &inst->children_list, list) {
  4943. i_vpr_e(inst, "%s: child list: %s\n", __func__, cap_name(entry->cap_id));
  4944. list_del(&entry->list);
  4945. msm_vidc_vmem_free((void **)&entry);
  4946. }
  4947. list_for_each_entry_safe(entry, dummy_entry, &inst->caps_list, list) {
  4948. list_del(&entry->list);
  4949. msm_vidc_vmem_free((void **)&entry);
  4950. }
  4951. list_for_each_entry_safe(fence, dummy_fence, &inst->fence_list, list) {
  4952. i_vpr_e(inst, "%s: destroying fence %s\n", __func__, fence->name);
  4953. msm_vidc_fence_destroy(inst, (u32)fence->dma_fence.seqno);
  4954. }
  4955. /* destroy buffers from pool */
  4956. msm_memory_pools_deinit(inst);
  4957. }
  4958. static void msm_vidc_close_helper(struct kref *kref)
  4959. {
  4960. struct msm_vidc_inst *inst = container_of(kref,
  4961. struct msm_vidc_inst, kref);
  4962. i_vpr_h(inst, "%s()\n", __func__);
  4963. msm_vidc_fence_deinit(inst);
  4964. msm_vidc_event_queue_deinit(inst);
  4965. msm_vidc_vb2_queue_deinit(inst);
  4966. msm_vidc_debugfs_deinit_inst(inst);
  4967. if (is_decode_session(inst))
  4968. msm_vdec_inst_deinit(inst);
  4969. else if (is_encode_session(inst))
  4970. msm_venc_inst_deinit(inst);
  4971. msm_vidc_free_input_cr_list(inst);
  4972. if (inst->workq)
  4973. destroy_workqueue(inst->workq);
  4974. msm_vidc_remove_dangling_session(inst);
  4975. mutex_destroy(&inst->client_lock);
  4976. mutex_destroy(&inst->request_lock);
  4977. mutex_destroy(&inst->lock);
  4978. msm_vidc_vmem_free((void **)&inst->capabilities);
  4979. msm_vidc_vmem_free((void **)&inst);
  4980. }
  4981. struct msm_vidc_inst *get_inst_ref(struct msm_vidc_core *core,
  4982. struct msm_vidc_inst *instance)
  4983. {
  4984. struct msm_vidc_inst *inst = NULL;
  4985. bool matches = false;
  4986. if (!core) {
  4987. d_vpr_e("%s: invalid params\n", __func__);
  4988. return NULL;
  4989. }
  4990. mutex_lock(&core->lock);
  4991. list_for_each_entry(inst, &core->instances, list) {
  4992. if (inst == instance) {
  4993. matches = true;
  4994. break;
  4995. }
  4996. }
  4997. inst = (matches && kref_get_unless_zero(&inst->kref)) ? inst : NULL;
  4998. mutex_unlock(&core->lock);
  4999. return inst;
  5000. }
  5001. struct msm_vidc_inst *get_inst(struct msm_vidc_core *core,
  5002. u32 session_id)
  5003. {
  5004. struct msm_vidc_inst *inst = NULL;
  5005. bool matches = false;
  5006. if (!core) {
  5007. d_vpr_e("%s: invalid params\n", __func__);
  5008. return NULL;
  5009. }
  5010. mutex_lock(&core->lock);
  5011. list_for_each_entry(inst, &core->instances, list) {
  5012. if (inst->session_id == session_id) {
  5013. matches = true;
  5014. break;
  5015. }
  5016. }
  5017. inst = (matches && kref_get_unless_zero(&inst->kref)) ? inst : NULL;
  5018. mutex_unlock(&core->lock);
  5019. return inst;
  5020. }
  5021. void put_inst(struct msm_vidc_inst *inst)
  5022. {
  5023. if (!inst) {
  5024. d_vpr_e("%s: invalid params\n", __func__);
  5025. return;
  5026. }
  5027. kref_put(&inst->kref, msm_vidc_close_helper);
  5028. }
  5029. bool core_lock_check(struct msm_vidc_core *core, const char *func)
  5030. {
  5031. return mutex_is_locked(&core->lock);
  5032. }
  5033. void core_lock(struct msm_vidc_core *core, const char *function)
  5034. {
  5035. mutex_lock(&core->lock);
  5036. }
  5037. void core_unlock(struct msm_vidc_core *core, const char *function)
  5038. {
  5039. mutex_unlock(&core->lock);
  5040. }
  5041. bool inst_lock_check(struct msm_vidc_inst *inst, const char *func)
  5042. {
  5043. return mutex_is_locked(&inst->lock);
  5044. }
  5045. void inst_lock(struct msm_vidc_inst *inst, const char *function)
  5046. {
  5047. mutex_lock(&inst->lock);
  5048. }
  5049. void inst_unlock(struct msm_vidc_inst *inst, const char *function)
  5050. {
  5051. mutex_unlock(&inst->lock);
  5052. }
  5053. bool client_lock_check(struct msm_vidc_inst *inst, const char *func)
  5054. {
  5055. return mutex_is_locked(&inst->client_lock);
  5056. }
  5057. void client_lock(struct msm_vidc_inst *inst, const char *function)
  5058. {
  5059. mutex_lock(&inst->client_lock);
  5060. }
  5061. void client_unlock(struct msm_vidc_inst *inst, const char *function)
  5062. {
  5063. mutex_unlock(&inst->client_lock);
  5064. }
  5065. int msm_vidc_update_bitstream_buffer_size(struct msm_vidc_inst *inst)
  5066. {
  5067. struct msm_vidc_core *core;
  5068. struct v4l2_format *fmt;
  5069. if (!inst || !inst->core) {
  5070. d_vpr_e("%s: invalid params\n", __func__);
  5071. return -EINVAL;
  5072. }
  5073. core = inst->core;
  5074. if (is_decode_session(inst)) {
  5075. fmt = &inst->fmts[INPUT_PORT];
  5076. fmt->fmt.pix_mp.plane_fmt[0].sizeimage = call_session_op(core,
  5077. buffer_size, inst, MSM_VIDC_BUF_INPUT);
  5078. }
  5079. return 0;
  5080. }
  5081. int msm_vidc_update_meta_port_settings(struct msm_vidc_inst *inst)
  5082. {
  5083. struct msm_vidc_core *core;
  5084. struct v4l2_format *fmt;
  5085. if (!inst || !inst->core) {
  5086. d_vpr_e("%s: invalid params\n", __func__);
  5087. return -EINVAL;
  5088. }
  5089. core = inst->core;
  5090. fmt = &inst->fmts[INPUT_META_PORT];
  5091. fmt->fmt.meta.buffersize = call_session_op(core,
  5092. buffer_size, inst, MSM_VIDC_BUF_INPUT_META);
  5093. inst->buffers.input_meta.min_count =
  5094. inst->buffers.input.min_count;
  5095. inst->buffers.input_meta.extra_count =
  5096. inst->buffers.input.extra_count;
  5097. inst->buffers.input_meta.actual_count =
  5098. inst->buffers.input.actual_count;
  5099. inst->buffers.input_meta.size = fmt->fmt.meta.buffersize;
  5100. fmt = &inst->fmts[OUTPUT_META_PORT];
  5101. fmt->fmt.meta.buffersize = call_session_op(core,
  5102. buffer_size, inst, MSM_VIDC_BUF_OUTPUT_META);
  5103. inst->buffers.output_meta.min_count =
  5104. inst->buffers.output.min_count;
  5105. inst->buffers.output_meta.extra_count =
  5106. inst->buffers.output.extra_count;
  5107. inst->buffers.output_meta.actual_count =
  5108. inst->buffers.output.actual_count;
  5109. inst->buffers.output_meta.size = fmt->fmt.meta.buffersize;
  5110. return 0;
  5111. }
  5112. int msm_vidc_update_buffer_count(struct msm_vidc_inst *inst, u32 port)
  5113. {
  5114. struct msm_vidc_core *core;
  5115. if (!inst || !inst->core) {
  5116. d_vpr_e("%s: invalid params\n", __func__);
  5117. return -EINVAL;
  5118. }
  5119. core = inst->core;
  5120. switch (port) {
  5121. case INPUT_PORT:
  5122. inst->buffers.input.min_count = call_session_op(core,
  5123. min_count, inst, MSM_VIDC_BUF_INPUT);
  5124. inst->buffers.input.extra_count = call_session_op(core,
  5125. extra_count, inst, MSM_VIDC_BUF_INPUT);
  5126. if (inst->buffers.input.actual_count <
  5127. inst->buffers.input.min_count +
  5128. inst->buffers.input.extra_count) {
  5129. inst->buffers.input.actual_count =
  5130. inst->buffers.input.min_count +
  5131. inst->buffers.input.extra_count;
  5132. }
  5133. if (is_input_meta_enabled(inst)) {
  5134. inst->buffers.input_meta.min_count =
  5135. inst->buffers.input.min_count;
  5136. inst->buffers.input_meta.extra_count =
  5137. inst->buffers.input.extra_count;
  5138. inst->buffers.input_meta.actual_count =
  5139. inst->buffers.input.actual_count;
  5140. } else {
  5141. inst->buffers.input_meta.min_count = 0;
  5142. inst->buffers.input_meta.extra_count = 0;
  5143. inst->buffers.input_meta.actual_count = 0;
  5144. }
  5145. i_vpr_h(inst, "%s: type: INPUT, count: min %u, extra %u, actual %u\n", __func__,
  5146. inst->buffers.input.min_count,
  5147. inst->buffers.input.extra_count,
  5148. inst->buffers.input.actual_count);
  5149. break;
  5150. case OUTPUT_PORT:
  5151. if (!inst->bufq[INPUT_PORT].vb2q->streaming)
  5152. inst->buffers.output.min_count = call_session_op(core,
  5153. min_count, inst, MSM_VIDC_BUF_OUTPUT);
  5154. inst->buffers.output.extra_count = call_session_op(core,
  5155. extra_count, inst, MSM_VIDC_BUF_OUTPUT);
  5156. if (inst->buffers.output.actual_count <
  5157. inst->buffers.output.min_count +
  5158. inst->buffers.output.extra_count) {
  5159. inst->buffers.output.actual_count =
  5160. inst->buffers.output.min_count +
  5161. inst->buffers.output.extra_count;
  5162. }
  5163. if (is_output_meta_enabled(inst)) {
  5164. inst->buffers.output_meta.min_count =
  5165. inst->buffers.output.min_count;
  5166. inst->buffers.output_meta.extra_count =
  5167. inst->buffers.output.extra_count;
  5168. inst->buffers.output_meta.actual_count =
  5169. inst->buffers.output.actual_count;
  5170. } else {
  5171. inst->buffers.output_meta.min_count = 0;
  5172. inst->buffers.output_meta.extra_count = 0;
  5173. inst->buffers.output_meta.actual_count = 0;
  5174. }
  5175. i_vpr_h(inst, "%s: type: OUTPUT, count: min %u, extra %u, actual %u\n", __func__,
  5176. inst->buffers.output.min_count,
  5177. inst->buffers.output.extra_count,
  5178. inst->buffers.output.actual_count);
  5179. break;
  5180. default:
  5181. d_vpr_e("%s unknown port %d\n", __func__, port);
  5182. return -EINVAL;
  5183. }
  5184. return 0;
  5185. }
  5186. void msm_vidc_schedule_core_deinit(struct msm_vidc_core *core)
  5187. {
  5188. if (!core)
  5189. return;
  5190. if (!core->capabilities[FW_UNLOAD].value)
  5191. return;
  5192. cancel_delayed_work(&core->fw_unload_work);
  5193. schedule_delayed_work(&core->fw_unload_work,
  5194. msecs_to_jiffies(core->capabilities[FW_UNLOAD_DELAY].value));
  5195. d_vpr_h("firmware unload delayed by %u ms\n",
  5196. core->capabilities[FW_UNLOAD_DELAY].value);
  5197. return;
  5198. }
  5199. static const char *get_codec_str(enum msm_vidc_codec_type type)
  5200. {
  5201. switch (type) {
  5202. case MSM_VIDC_H264: return " avc";
  5203. case MSM_VIDC_HEVC: return "hevc";
  5204. case MSM_VIDC_VP9: return " vp9";
  5205. case MSM_VIDC_AV1: return " av1";
  5206. case MSM_VIDC_HEIC: return "heic";
  5207. }
  5208. return "....";
  5209. }
  5210. static const char *get_domain_str(enum msm_vidc_domain_type type)
  5211. {
  5212. switch (type) {
  5213. case MSM_VIDC_ENCODER: return "E";
  5214. case MSM_VIDC_DECODER: return "D";
  5215. }
  5216. return ".";
  5217. }
  5218. int msm_vidc_update_debug_str(struct msm_vidc_inst *inst)
  5219. {
  5220. u32 sid;
  5221. int client_id = INVALID_CLIENT_ID;
  5222. const char *codec;
  5223. const char *domain;
  5224. if (!inst) {
  5225. d_vpr_e("%s: Invalid params\n", __func__);
  5226. return -EINVAL;
  5227. }
  5228. if (inst->capabilities)
  5229. client_id = inst->capabilities->cap[CLIENT_ID].value;
  5230. sid = inst->session_id;
  5231. codec = get_codec_str(inst->codec);
  5232. domain = get_domain_str(inst->domain);
  5233. if (client_id != INVALID_CLIENT_ID) {
  5234. snprintf(inst->debug_str, sizeof(inst->debug_str), "%08x: %s%s_%d",
  5235. sid, codec, domain, client_id);
  5236. } else {
  5237. snprintf(inst->debug_str, sizeof(inst->debug_str), "%08x: %s%s",
  5238. sid, codec, domain);
  5239. }
  5240. d_vpr_h("%s: sid: %08x, codec: %s, domain: %s, final: %s\n",
  5241. __func__, sid, codec, domain, inst->debug_str);
  5242. return 0;
  5243. }
  5244. static int msm_vidc_print_insts_info(struct msm_vidc_core *core)
  5245. {
  5246. struct msm_vidc_inst *inst;
  5247. u32 height, width, fps, orate;
  5248. struct msm_vidc_inst_capability *capability;
  5249. struct v4l2_format *out_f;
  5250. struct v4l2_format *inp_f;
  5251. char prop[64];
  5252. d_vpr_e("Print all running instances\n");
  5253. d_vpr_e("%6s | %6s | %5s | %5s | %5s\n", "width", "height", "fps", "orate", "prop");
  5254. core_lock(core, __func__);
  5255. list_for_each_entry(inst, &core->instances, list) {
  5256. out_f = &inst->fmts[OUTPUT_PORT];
  5257. inp_f = &inst->fmts[INPUT_PORT];
  5258. capability = inst->capabilities;
  5259. memset(&prop, 0, sizeof(prop));
  5260. width = max(out_f->fmt.pix_mp.width, inp_f->fmt.pix_mp.width);
  5261. height = max(out_f->fmt.pix_mp.height, inp_f->fmt.pix_mp.height);
  5262. fps = capability->cap[FRAME_RATE].value >> 16;
  5263. orate = capability->cap[OPERATING_RATE].value >> 16;
  5264. if (is_realtime_session(inst))
  5265. strlcat(prop, "RT ", sizeof(prop));
  5266. else
  5267. strlcat(prop, "NRT", sizeof(prop));
  5268. if (is_thumbnail_session(inst))
  5269. strlcat(prop, "+THUMB", sizeof(prop));
  5270. if (is_image_session(inst))
  5271. strlcat(prop, "+IMAGE", sizeof(prop));
  5272. i_vpr_e(inst, "%6u | %6u | %5u | %5u | %5s\n", width, height, fps, orate, prop);
  5273. }
  5274. core_unlock(core, __func__);
  5275. return 0;
  5276. }
  5277. bool msm_vidc_ignore_session_load(struct msm_vidc_inst *inst) {
  5278. if (!inst) {
  5279. d_vpr_e("%s: invalid params\n", __func__);
  5280. return -EINVAL;
  5281. }
  5282. if (!is_realtime_session(inst) || is_thumbnail_session(inst) ||
  5283. is_image_session(inst))
  5284. return true;
  5285. return false;
  5286. }
  5287. int msm_vidc_check_core_mbps(struct msm_vidc_inst *inst)
  5288. {
  5289. u32 mbps = 0, total_mbps = 0, enc_mbps = 0;
  5290. u32 critical_mbps = 0;
  5291. struct msm_vidc_core *core;
  5292. struct msm_vidc_inst *instance;
  5293. if (!inst || !inst->core || !inst->capabilities) {
  5294. d_vpr_e("%s: invalid params\n", __func__);
  5295. return -EINVAL;
  5296. }
  5297. core = inst->core;
  5298. /* skip mbps check for non-realtime, thumnail, image sessions */
  5299. if (msm_vidc_ignore_session_load(inst)) {
  5300. i_vpr_h(inst,
  5301. "%s: skip mbps check due to NRT %d, TH %d, IMG %d\n", __func__,
  5302. !is_realtime_session(inst), is_thumbnail_session(inst),
  5303. is_image_session(inst));
  5304. return 0;
  5305. }
  5306. core_lock(core, __func__);
  5307. list_for_each_entry(instance, &core->instances, list) {
  5308. if (is_critical_priority_session(instance))
  5309. critical_mbps += msm_vidc_get_inst_load(instance);
  5310. }
  5311. core_unlock(core, __func__);
  5312. if (critical_mbps > core->capabilities[MAX_MBPS].value) {
  5313. i_vpr_e(inst, "%s: Hardware overloaded with critical sessions. needed %u, max %u",
  5314. __func__, critical_mbps, core->capabilities[MAX_MBPS].value);
  5315. return -ENOMEM;
  5316. }
  5317. core_lock(core, __func__);
  5318. list_for_each_entry(instance, &core->instances, list) {
  5319. /* ignore invalid/error session */
  5320. if (is_session_error(instance))
  5321. continue;
  5322. /* ignore thumbnail, image, and non realtime sessions */
  5323. if (msm_vidc_ignore_session_load(instance))
  5324. continue;
  5325. mbps = msm_vidc_get_inst_load(instance);
  5326. total_mbps += mbps;
  5327. if (is_encode_session(instance))
  5328. enc_mbps += mbps;
  5329. }
  5330. core_unlock(core, __func__);
  5331. if (is_encode_session(inst)) {
  5332. /* reject encoder if all encoders mbps is greater than MAX_MBPS */
  5333. if (enc_mbps > core->capabilities[MAX_MBPS].value) {
  5334. i_vpr_e(inst, "%s: Hardware overloaded. needed %u, max %u", __func__,
  5335. mbps, core->capabilities[MAX_MBPS].value);
  5336. return -ENOMEM;
  5337. }
  5338. /*
  5339. * if total_mbps is greater than max_mbps then reduce all decoders
  5340. * priority by 1 to allow this encoder
  5341. */
  5342. if (total_mbps > core->capabilities[MAX_MBPS].value) {
  5343. core_lock(core, __func__);
  5344. list_for_each_entry(instance, &core->instances, list) {
  5345. /* reduce realtime decode sessions priority */
  5346. if (is_decode_session(instance) && is_realtime_session(instance)) {
  5347. instance->adjust_priority = RT_DEC_DOWN_PRORITY_OFFSET;
  5348. i_vpr_h(inst, "%s: pending adjust priority by %d\n",
  5349. __func__, instance->adjust_priority);
  5350. }
  5351. }
  5352. core_unlock(core, __func__);
  5353. }
  5354. } else if (is_decode_session(inst)){
  5355. if (total_mbps > core->capabilities[MAX_MBPS].value) {
  5356. inst->adjust_priority = RT_DEC_DOWN_PRORITY_OFFSET;
  5357. i_vpr_h(inst, "%s: pending adjust priority by %d\n",
  5358. __func__, inst->adjust_priority);
  5359. }
  5360. }
  5361. i_vpr_h(inst, "%s: HW load needed %u is within max %u", __func__,
  5362. total_mbps, core->capabilities[MAX_MBPS].value);
  5363. return 0;
  5364. }
  5365. int msm_vidc_check_core_mbpf(struct msm_vidc_inst *inst)
  5366. {
  5367. u32 video_mbpf = 0, image_mbpf = 0, video_rt_mbpf = 0;
  5368. u32 critical_mbpf = 0;
  5369. struct msm_vidc_core *core;
  5370. struct msm_vidc_inst *instance;
  5371. if (!inst || !inst->core) {
  5372. d_vpr_e("%s: invalid params\n", __func__);
  5373. return -EINVAL;
  5374. }
  5375. core = inst->core;
  5376. core_lock(core, __func__);
  5377. list_for_each_entry(instance, &core->instances, list) {
  5378. if (is_critical_priority_session(instance))
  5379. critical_mbpf += msm_vidc_get_mbs_per_frame(instance);
  5380. }
  5381. core_unlock(core, __func__);
  5382. if (critical_mbpf > core->capabilities[MAX_MBPF].value) {
  5383. i_vpr_e(inst, "%s: Hardware overloaded with critical sessions. needed %u, max %u",
  5384. __func__, critical_mbpf, core->capabilities[MAX_MBPF].value);
  5385. return -ENOMEM;
  5386. }
  5387. core_lock(core, __func__);
  5388. list_for_each_entry(instance, &core->instances, list) {
  5389. /* ignore thumbnail session */
  5390. if (is_thumbnail_session(instance))
  5391. continue;
  5392. if (is_image_session(instance))
  5393. image_mbpf += msm_vidc_get_mbs_per_frame(instance);
  5394. else
  5395. video_mbpf += msm_vidc_get_mbs_per_frame(instance);
  5396. }
  5397. core_unlock(core, __func__);
  5398. if (video_mbpf > core->capabilities[MAX_MBPF].value) {
  5399. i_vpr_e(inst, "%s: video overloaded. needed %u, max %u", __func__,
  5400. video_mbpf, core->capabilities[MAX_MBPF].value);
  5401. return -ENOMEM;
  5402. }
  5403. if (image_mbpf > core->capabilities[MAX_IMAGE_MBPF].value) {
  5404. i_vpr_e(inst, "%s: image overloaded. needed %u, max %u", __func__,
  5405. image_mbpf, core->capabilities[MAX_IMAGE_MBPF].value);
  5406. return -ENOMEM;
  5407. }
  5408. core_lock(core, __func__);
  5409. /* check real-time video sessions max limit */
  5410. list_for_each_entry(instance, &core->instances, list) {
  5411. if (msm_vidc_ignore_session_load(instance))
  5412. continue;
  5413. video_rt_mbpf += msm_vidc_get_mbs_per_frame(instance);
  5414. }
  5415. core_unlock(core, __func__);
  5416. if (video_rt_mbpf > core->capabilities[MAX_RT_MBPF].value) {
  5417. i_vpr_e(inst, "%s: real-time video overloaded. needed %u, max %u",
  5418. __func__, video_rt_mbpf, core->capabilities[MAX_RT_MBPF].value);
  5419. return -ENOMEM;
  5420. }
  5421. return 0;
  5422. }
  5423. static int msm_vidc_check_inst_mbpf(struct msm_vidc_inst *inst)
  5424. {
  5425. u32 mbpf = 0, max_mbpf = 0;
  5426. struct msm_vidc_inst_capability *capability;
  5427. if (!inst || !inst->capabilities) {
  5428. d_vpr_e("%s: invalid params\n", __func__);
  5429. return -EINVAL;
  5430. }
  5431. capability = inst->capabilities;
  5432. if (is_secure_session(inst))
  5433. max_mbpf = capability->cap[SECURE_MBPF].max;
  5434. else if (is_encode_session(inst) && capability->cap[LOSSLESS].value)
  5435. max_mbpf = capability->cap[LOSSLESS_MBPF].max;
  5436. else
  5437. max_mbpf = capability->cap[MBPF].max;
  5438. /* check current session mbpf */
  5439. mbpf = msm_vidc_get_mbs_per_frame(inst);
  5440. if (mbpf > max_mbpf) {
  5441. i_vpr_e(inst, "%s: session overloaded. needed %u, max %u", __func__,
  5442. mbpf, max_mbpf);
  5443. return -ENOMEM;
  5444. }
  5445. return 0;
  5446. }
  5447. u32 msm_vidc_get_max_bitrate(struct msm_vidc_inst* inst)
  5448. {
  5449. struct msm_vidc_inst_capability *capability;
  5450. u32 max_bitrate = 0x7fffffff;
  5451. if (!inst || !inst->capabilities) {
  5452. d_vpr_e("%s: invalid params\n", __func__);
  5453. return -EINVAL;
  5454. }
  5455. capability = inst->capabilities;
  5456. if (inst->capabilities->cap[LOWLATENCY_MODE].value)
  5457. max_bitrate = min(max_bitrate,
  5458. (u32)inst->capabilities->cap[LOWLATENCY_MAX_BITRATE].max);
  5459. if (inst->capabilities->cap[ALL_INTRA].value)
  5460. max_bitrate = min(max_bitrate,
  5461. (u32)inst->capabilities->cap[ALLINTRA_MAX_BITRATE].max);
  5462. if (inst->codec == MSM_VIDC_HEVC) {
  5463. max_bitrate = min(max_bitrate,
  5464. (u32)inst->capabilities->cap[CABAC_MAX_BITRATE].max);
  5465. } else if (inst->codec == MSM_VIDC_H264) {
  5466. if (inst->capabilities->cap[ENTROPY_MODE].value ==
  5467. V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC)
  5468. max_bitrate = min(max_bitrate,
  5469. (u32)inst->capabilities->cap[CAVLC_MAX_BITRATE].max);
  5470. else
  5471. max_bitrate = min(max_bitrate,
  5472. (u32)inst->capabilities->cap[CABAC_MAX_BITRATE].max);
  5473. }
  5474. if (max_bitrate == 0x7fffffff || !max_bitrate)
  5475. max_bitrate = min(max_bitrate, (u32)inst->capabilities->cap[BIT_RATE].max);
  5476. return max_bitrate;
  5477. }
  5478. static bool msm_vidc_allow_image_encode_session(struct msm_vidc_inst *inst)
  5479. {
  5480. struct msm_vidc_inst_capability *capability;
  5481. struct v4l2_format *fmt;
  5482. u32 min_width, min_height, max_width, max_height, pix_fmt, profile;
  5483. bool allow = false;
  5484. if (!inst || !inst->capabilities) {
  5485. d_vpr_e("%s: invalid params\n", __func__);
  5486. return false;
  5487. }
  5488. capability = inst->capabilities;
  5489. if (!is_image_encode_session(inst)) {
  5490. i_vpr_e(inst, "%s: not an image encode session\n", __func__);
  5491. return false;
  5492. }
  5493. pix_fmt = capability->cap[PIX_FMTS].value;
  5494. profile = capability->cap[PROFILE].value;
  5495. /* is input with & height is in allowed range */
  5496. min_width = capability->cap[FRAME_WIDTH].min;
  5497. max_width = capability->cap[FRAME_WIDTH].max;
  5498. min_height = capability->cap[FRAME_HEIGHT].min;
  5499. max_height = capability->cap[FRAME_HEIGHT].max;
  5500. fmt = &inst->fmts[INPUT_PORT];
  5501. if (!in_range(fmt->fmt.pix_mp.width, min_width, max_width) ||
  5502. !in_range(fmt->fmt.pix_mp.height, min_height, max_height)) {
  5503. i_vpr_e(inst, "unsupported wxh [%u x %u], allowed [%u x %u] to [%u x %u]\n",
  5504. fmt->fmt.pix_mp.width, fmt->fmt.pix_mp.height,
  5505. min_width, min_height, max_width, max_height);
  5506. allow = false;
  5507. goto exit;
  5508. }
  5509. /* is linear yuv color fmt */
  5510. allow = is_linear_yuv_colorformat(pix_fmt);
  5511. if (!allow) {
  5512. i_vpr_e(inst, "%s: compressed fmt: %#x\n", __func__, pix_fmt);
  5513. goto exit;
  5514. }
  5515. /* is output grid dimension */
  5516. fmt = &inst->fmts[OUTPUT_PORT];
  5517. allow = fmt->fmt.pix_mp.width == HEIC_GRID_DIMENSION;
  5518. allow &= fmt->fmt.pix_mp.height == HEIC_GRID_DIMENSION;
  5519. if (!allow) {
  5520. i_vpr_e(inst, "%s: output is not a grid dimension: %u x %u\n", __func__,
  5521. fmt->fmt.pix_mp.width, fmt->fmt.pix_mp.height);
  5522. goto exit;
  5523. }
  5524. /* is bitrate mode CQ */
  5525. allow = capability->cap[BITRATE_MODE].value == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ;
  5526. if (!allow) {
  5527. i_vpr_e(inst, "%s: bitrate mode is not CQ: %#x\n", __func__,
  5528. capability->cap[BITRATE_MODE].value);
  5529. goto exit;
  5530. }
  5531. /* is all intra */
  5532. allow = !capability->cap[GOP_SIZE].value;
  5533. allow &= !capability->cap[B_FRAME].value;
  5534. if (!allow) {
  5535. i_vpr_e(inst, "%s: not all intra: gop: %u, bframe: %u\n", __func__,
  5536. capability->cap[GOP_SIZE].value, capability->cap[B_FRAME].value);
  5537. goto exit;
  5538. }
  5539. /* is time delta based rc disabled */
  5540. allow = !capability->cap[TIME_DELTA_BASED_RC].value;
  5541. if (!allow) {
  5542. i_vpr_e(inst, "%s: time delta based rc not disabled: %#x\n", __func__,
  5543. capability->cap[TIME_DELTA_BASED_RC].value);
  5544. goto exit;
  5545. }
  5546. /* is frame skip mode disabled */
  5547. allow = !capability->cap[FRAME_SKIP_MODE].value;
  5548. if (!allow) {
  5549. i_vpr_e(inst, "%s: frame skip mode not disabled: %#x\n", __func__,
  5550. capability->cap[FRAME_SKIP_MODE].value);
  5551. goto exit;
  5552. }
  5553. exit:
  5554. if (!allow)
  5555. i_vpr_e(inst, "%s: current session not allowed\n", __func__);
  5556. return allow;
  5557. }
  5558. static int msm_vidc_check_resolution_supported(struct msm_vidc_inst *inst)
  5559. {
  5560. struct msm_vidc_inst_capability *capability;
  5561. u32 width = 0, height = 0, min_width, min_height,
  5562. max_width, max_height;
  5563. bool is_interlaced = false;
  5564. if (!inst || !inst->capabilities) {
  5565. d_vpr_e("%s: invalid params\n", __func__);
  5566. return -EINVAL;
  5567. }
  5568. capability = inst->capabilities;
  5569. if (is_decode_session(inst)) {
  5570. width = inst->fmts[INPUT_PORT].fmt.pix_mp.width;
  5571. height = inst->fmts[INPUT_PORT].fmt.pix_mp.height;
  5572. } else if (is_encode_session(inst)) {
  5573. width = inst->crop.width;
  5574. height = inst->crop.height;
  5575. }
  5576. if (is_secure_session(inst)) {
  5577. min_width = capability->cap[SECURE_FRAME_WIDTH].min;
  5578. max_width = capability->cap[SECURE_FRAME_WIDTH].max;
  5579. min_height = capability->cap[SECURE_FRAME_HEIGHT].min;
  5580. max_height = capability->cap[SECURE_FRAME_HEIGHT].max;
  5581. } else if (is_encode_session(inst) && capability->cap[LOSSLESS].value) {
  5582. min_width = capability->cap[LOSSLESS_FRAME_WIDTH].min;
  5583. max_width = capability->cap[LOSSLESS_FRAME_WIDTH].max;
  5584. min_height = capability->cap[LOSSLESS_FRAME_HEIGHT].min;
  5585. max_height = capability->cap[LOSSLESS_FRAME_HEIGHT].max;
  5586. } else {
  5587. min_width = capability->cap[FRAME_WIDTH].min;
  5588. max_width = capability->cap[FRAME_WIDTH].max;
  5589. min_height = capability->cap[FRAME_HEIGHT].min;
  5590. max_height = capability->cap[FRAME_HEIGHT].max;
  5591. }
  5592. /* reject odd resolution session */
  5593. if (is_encode_session(inst) &&
  5594. (is_odd(width) || is_odd(height) ||
  5595. is_odd(inst->compose.width) ||
  5596. is_odd(inst->compose.height))) {
  5597. i_vpr_e(inst, "%s: resolution is not even. wxh [%u x %u], compose [%u x %u]\n",
  5598. __func__, width, height, inst->compose.width,
  5599. inst->compose.height);
  5600. return -EINVAL;
  5601. }
  5602. /* check if input width and height is in supported range */
  5603. if (is_decode_session(inst) || is_encode_session(inst)) {
  5604. if (!in_range(width, min_width, max_width) ||
  5605. !in_range(height, min_height, max_height)) {
  5606. i_vpr_e(inst,
  5607. "%s: unsupported input wxh [%u x %u], allowed range: [%u x %u] to [%u x %u]\n",
  5608. __func__, width, height, min_width,
  5609. min_height, max_width, max_height);
  5610. return -EINVAL;
  5611. }
  5612. }
  5613. /* check interlace supported resolution */
  5614. is_interlaced = capability->cap[CODED_FRAMES].value == CODED_FRAMES_INTERLACE;
  5615. if (is_interlaced && (width > INTERLACE_WIDTH_MAX || height > INTERLACE_HEIGHT_MAX ||
  5616. NUM_MBS_PER_FRAME(width, height) > INTERLACE_MB_PER_FRAME_MAX)) {
  5617. i_vpr_e(inst, "%s: unsupported interlace wxh [%u x %u], max [%u x %u]\n",
  5618. __func__, width, height, INTERLACE_WIDTH_MAX, INTERLACE_HEIGHT_MAX);
  5619. return -EINVAL;
  5620. }
  5621. return 0;
  5622. }
  5623. static int msm_vidc_check_max_sessions(struct msm_vidc_inst *inst)
  5624. {
  5625. u32 width = 0, height = 0;
  5626. u32 num_1080p_sessions = 0, num_4k_sessions = 0, num_8k_sessions = 0;
  5627. struct msm_vidc_inst *i;
  5628. struct msm_vidc_core *core;
  5629. if (!inst || !inst->core) {
  5630. d_vpr_e("%s: invalid params\n", __func__);
  5631. return -EINVAL;
  5632. }
  5633. core = inst->core;
  5634. if (!core->capabilities) {
  5635. i_vpr_e(inst, "%s: invalid params\n", __func__);
  5636. return -EINVAL;
  5637. }
  5638. core_lock(core, __func__);
  5639. list_for_each_entry(i, &core->instances, list) {
  5640. /* skip image sessions count */
  5641. if (is_image_session(i))
  5642. continue;
  5643. if (is_decode_session(i)) {
  5644. width = i->fmts[INPUT_PORT].fmt.pix_mp.width;
  5645. height = i->fmts[INPUT_PORT].fmt.pix_mp.height;
  5646. } else if (is_encode_session(i)) {
  5647. width = i->crop.width;
  5648. height = i->crop.height;
  5649. }
  5650. /*
  5651. * one 8k session equals to 64 720p sessions in reality.
  5652. * So for one 8k session the number of 720p sessions will
  5653. * exceed max supported session count(16), hence one 8k session
  5654. * will be rejected as well.
  5655. * Therefore, treat one 8k session equal to two 4k sessions and
  5656. * one 4k session equal to two 1080p sessions and
  5657. * one 1080p session equal to two 720p sessions. This equation
  5658. * will make one 8k session equal to eight 720p sessions
  5659. * which looks good.
  5660. *
  5661. * Do not treat resolutions above 4k as 8k session instead
  5662. * treat (4K + half 4k) above as 8k session
  5663. */
  5664. if (res_is_greater_than(width, height, 4096 + (4096 >> 1), 2176 + (2176 >> 1))) {
  5665. num_8k_sessions += 1;
  5666. num_4k_sessions += 2;
  5667. num_1080p_sessions += 4;
  5668. } else if (res_is_greater_than(width, height, 1920 + (1920 >> 1), 1088 + (1088 >> 1))) {
  5669. num_4k_sessions += 1;
  5670. num_1080p_sessions += 2;
  5671. } else if (res_is_greater_than(width, height, 1280 + (1280 >> 1), 736 + (736 >> 1))) {
  5672. num_1080p_sessions += 1;
  5673. }
  5674. }
  5675. core_unlock(core, __func__);
  5676. if (num_8k_sessions > core->capabilities[MAX_NUM_8K_SESSIONS].value) {
  5677. i_vpr_e(inst, "%s: total 8k sessions %d, exceeded max limit %d\n",
  5678. __func__, num_8k_sessions,
  5679. core->capabilities[MAX_NUM_8K_SESSIONS].value);
  5680. return -ENOMEM;
  5681. }
  5682. if (num_4k_sessions > core->capabilities[MAX_NUM_4K_SESSIONS].value) {
  5683. i_vpr_e(inst, "%s: total 4K sessions %d, exceeded max limit %d\n",
  5684. __func__, num_4k_sessions,
  5685. core->capabilities[MAX_NUM_4K_SESSIONS].value);
  5686. return -ENOMEM;
  5687. }
  5688. if (num_1080p_sessions > core->capabilities[MAX_NUM_1080P_SESSIONS].value) {
  5689. i_vpr_e(inst, "%s: total 1080p sessions %d, exceeded max limit %d\n",
  5690. __func__, num_1080p_sessions,
  5691. core->capabilities[MAX_NUM_1080P_SESSIONS].value);
  5692. return -ENOMEM;
  5693. }
  5694. return 0;
  5695. }
  5696. int msm_vidc_check_session_supported(struct msm_vidc_inst *inst)
  5697. {
  5698. bool allow = false;
  5699. int rc = 0;
  5700. if (!inst) {
  5701. d_vpr_e("%s: invalid params\n", __func__);
  5702. return -EINVAL;
  5703. }
  5704. if (is_image_session(inst) && is_secure_session(inst)) {
  5705. i_vpr_e(inst, "%s: secure image session not supported\n", __func__);
  5706. rc = -EINVAL;
  5707. goto exit;
  5708. }
  5709. rc = msm_vidc_check_core_mbps(inst);
  5710. if (rc)
  5711. goto exit;
  5712. rc = msm_vidc_check_core_mbpf(inst);
  5713. if (rc)
  5714. goto exit;
  5715. rc = msm_vidc_check_inst_mbpf(inst);
  5716. if (rc)
  5717. goto exit;
  5718. rc = msm_vidc_check_resolution_supported(inst);
  5719. if (rc)
  5720. goto exit;
  5721. /* check image capabilities */
  5722. if (is_image_encode_session(inst)) {
  5723. allow = msm_vidc_allow_image_encode_session(inst);
  5724. if (!allow) {
  5725. rc = -EINVAL;
  5726. goto exit;
  5727. }
  5728. }
  5729. rc = msm_vidc_check_max_sessions(inst);
  5730. if (rc)
  5731. goto exit;
  5732. exit:
  5733. if (rc) {
  5734. i_vpr_e(inst, "%s: current session not supported\n", __func__);
  5735. msm_vidc_print_insts_info(inst->core);
  5736. }
  5737. return rc;
  5738. }
  5739. int msm_vidc_check_scaling_supported(struct msm_vidc_inst *inst)
  5740. {
  5741. u32 iwidth, owidth, iheight, oheight, ds_factor;
  5742. if (!inst || !inst->capabilities) {
  5743. d_vpr_e("%s: invalid params\n", __func__);
  5744. return -EINVAL;
  5745. }
  5746. if (is_image_session(inst) || is_decode_session(inst)) {
  5747. i_vpr_h(inst, "%s: Scaling is supported for encode session only\n", __func__);
  5748. return 0;
  5749. }
  5750. if (!is_scaling_enabled(inst)) {
  5751. i_vpr_h(inst, "%s: Scaling not enabled. skip scaling check\n", __func__);
  5752. return 0;
  5753. }
  5754. iwidth = inst->crop.width;
  5755. iheight = inst->crop.height;
  5756. owidth = inst->compose.width;
  5757. oheight = inst->compose.height;
  5758. ds_factor = inst->capabilities->cap[SCALE_FACTOR].value;
  5759. /* upscaling: encoder doesnot support upscaling */
  5760. if (owidth > iwidth || oheight > iheight) {
  5761. i_vpr_e(inst, "%s: upscale not supported: input [%u x %u], output [%u x %u]\n",
  5762. __func__, iwidth, iheight, owidth, oheight);
  5763. return -EINVAL;
  5764. }
  5765. /* downscaling: only supported upto 1/8 of width & 1/8 of height */
  5766. if (iwidth > owidth * ds_factor || iheight > oheight * ds_factor) {
  5767. i_vpr_e(inst,
  5768. "%s: unsupported ratio: input [%u x %u], output [%u x %u], ratio %u\n",
  5769. __func__, iwidth, iheight, owidth, oheight, ds_factor);
  5770. return -EINVAL;
  5771. }
  5772. return 0;
  5773. }
  5774. struct msm_vidc_fw_query_params {
  5775. u32 hfi_prop_name;
  5776. u32 port;
  5777. };
  5778. int msm_vidc_get_properties(struct msm_vidc_inst *inst)
  5779. {
  5780. int rc = 0;
  5781. int i;
  5782. static const struct msm_vidc_fw_query_params fw_query_params[] = {
  5783. {HFI_PROP_STAGE, HFI_PORT_NONE},
  5784. {HFI_PROP_PIPE, HFI_PORT_NONE},
  5785. {HFI_PROP_QUALITY_MODE, HFI_PORT_BITSTREAM}
  5786. };
  5787. if (!inst || !inst->capabilities) {
  5788. d_vpr_e("%s: invalid params\n", __func__);
  5789. return -EINVAL;
  5790. }
  5791. for (i = 0; i < ARRAY_SIZE(fw_query_params); i++) {
  5792. if (is_decode_session(inst)) {
  5793. if (fw_query_params[i].hfi_prop_name == HFI_PROP_QUALITY_MODE)
  5794. continue;
  5795. }
  5796. i_vpr_l(inst, "%s: querying fw for property %#x\n", __func__,
  5797. fw_query_params[i].hfi_prop_name);
  5798. rc = venus_hfi_session_property(inst,
  5799. fw_query_params[i].hfi_prop_name,
  5800. (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
  5801. HFI_HOST_FLAGS_INTR_REQUIRED |
  5802. HFI_HOST_FLAGS_GET_PROPERTY),
  5803. fw_query_params[i].port,
  5804. HFI_PAYLOAD_NONE,
  5805. NULL,
  5806. 0);
  5807. if (rc)
  5808. return rc;
  5809. }
  5810. return 0;
  5811. }
  5812. int msm_vidc_create_input_metadata_buffer(struct msm_vidc_inst *inst, int fd)
  5813. {
  5814. int rc = 0;
  5815. struct msm_vidc_buffer *buf = NULL;
  5816. struct msm_vidc_buffers *buffers;
  5817. struct dma_buf *dma_buf;
  5818. if (!inst) {
  5819. d_vpr_e("%s: invalid params\n", __func__);
  5820. return -EINVAL;
  5821. }
  5822. if (fd < 0) {
  5823. i_vpr_e(inst, "%s: invalid input metadata buffer fd %d\n",
  5824. __func__, fd);
  5825. return -EINVAL;
  5826. }
  5827. buffers = msm_vidc_get_buffers(inst, MSM_VIDC_BUF_INPUT_META, __func__);
  5828. if (!buffers)
  5829. return -EINVAL;
  5830. buf = msm_memory_pool_alloc(inst, MSM_MEM_POOL_BUFFER);
  5831. if (!buf) {
  5832. i_vpr_e(inst, "%s: buffer pool alloc failed\n", __func__);
  5833. return -EINVAL;
  5834. }
  5835. INIT_LIST_HEAD(&buf->list);
  5836. buf->type = MSM_VIDC_BUF_INPUT_META;
  5837. buf->index = INT_MAX;
  5838. buf->fd = fd;
  5839. dma_buf = msm_vidc_memory_get_dmabuf(inst, fd);
  5840. if (!dma_buf) {
  5841. rc = -ENOMEM;
  5842. goto error_dma_buf;
  5843. }
  5844. buf->dmabuf = dma_buf;
  5845. buf->data_size = dma_buf->size;
  5846. buf->buffer_size = dma_buf->size;
  5847. buf->attr |= MSM_VIDC_ATTR_DEFERRED;
  5848. rc = msm_vidc_map_driver_buf(inst, buf);
  5849. if (rc)
  5850. goto error_map;
  5851. list_add_tail(&buf->list, &buffers->list);
  5852. return rc;
  5853. error_map:
  5854. msm_vidc_memory_put_dmabuf(inst, buf->dmabuf);
  5855. error_dma_buf:
  5856. msm_memory_pool_free(inst, buf);
  5857. return rc;
  5858. }
  5859. int msm_vidc_update_input_meta_buffer_index(struct msm_vidc_inst *inst,
  5860. struct vb2_buffer *vb2)
  5861. {
  5862. int rc = 0;
  5863. bool found = false;
  5864. struct msm_vidc_buffer *buf = NULL;
  5865. struct msm_vidc_buffers *buffers;
  5866. if (!inst || !vb2) {
  5867. d_vpr_e("%s: invalid params\n", __func__);
  5868. return -EINVAL;
  5869. }
  5870. if (vb2->type != INPUT_MPLANE)
  5871. return 0;
  5872. buffers = msm_vidc_get_buffers(inst, MSM_VIDC_BUF_INPUT_META, __func__);
  5873. if (!buffers)
  5874. return -EINVAL;
  5875. list_for_each_entry(buf, &buffers->list, list) {
  5876. if (buf->index == INT_MAX) {
  5877. buf->index = vb2->index;
  5878. found = true;
  5879. break;
  5880. }
  5881. }
  5882. if (!found) {
  5883. i_vpr_e(inst, "%s: missing input metabuffer for index %d\n",
  5884. __func__, vb2->index);
  5885. rc = -EINVAL;
  5886. }
  5887. return rc;
  5888. }