msm_vidc_driver.c 173 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2022, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/iommu.h>
  6. #include <linux/workqueue.h>
  7. #include <media/v4l2_vidc_extensions.h>
  8. #include "msm_media_info.h"
  9. #include "msm_vidc_driver.h"
  10. #include "msm_vidc_platform.h"
  11. #include "msm_vidc_internal.h"
  12. #include "msm_vidc_control.h"
  13. #include "msm_vidc_memory.h"
  14. #include "msm_vidc_power.h"
  15. #include "msm_vidc_debug.h"
  16. #include "msm_vidc_power.h"
  17. #include "msm_vidc.h"
  18. #include "msm_vdec.h"
  19. #include "msm_venc.h"
  20. #include "msm_vidc_fence.h"
  21. #include "venus_hfi.h"
  22. #include "venus_hfi_response.h"
  23. #include "hfi_packet.h"
  24. #include "msm_vidc_events.h"
  25. extern struct msm_vidc_core *g_core;
  26. #define is_odd(val) ((val) % 2 == 1)
  27. #define in_range(val, min, max) (((min) <= (val)) && ((val) <= (max)))
  28. #define COUNT_BITS(a, out) { \
  29. while ((a) >= 1) { \
  30. (out) += (a) & (1); \
  31. (a) >>= (1); \
  32. } \
  33. }
  34. #define SSR_TYPE 0x0000000F
  35. #define SSR_TYPE_SHIFT 0
  36. #define SSR_SUB_CLIENT_ID 0x000000F0
  37. #define SSR_SUB_CLIENT_ID_SHIFT 4
  38. #define SSR_ADDR_ID 0xFFFFFFFF00000000
  39. #define SSR_ADDR_SHIFT 32
  40. #define STABILITY_TYPE 0x0000000F
  41. #define STABILITY_TYPE_SHIFT 0
  42. #define STABILITY_SUB_CLIENT_ID 0x000000F0
  43. #define STABILITY_SUB_CLIENT_ID_SHIFT 4
  44. #define STABILITY_PAYLOAD_ID 0xFFFFFFFF00000000
  45. #define STABILITY_PAYLOAD_SHIFT 32
  46. struct msm_vidc_cap_name {
  47. enum msm_vidc_inst_capability_type cap_id;
  48. char *name;
  49. };
  50. /* do not modify the cap names as it is used in test scripts */
  51. static const struct msm_vidc_cap_name cap_name_arr[] = {
  52. {INST_CAP_NONE, "INST_CAP_NONE" },
  53. {META_SEQ_HDR_NAL, "META_SEQ_HDR_NAL" },
  54. {META_BITSTREAM_RESOLUTION, "META_BITSTREAM_RESOLUTION" },
  55. {META_CROP_OFFSETS, "META_CROP_OFFSETS" },
  56. {META_DPB_MISR, "META_DPB_MISR" },
  57. {META_OPB_MISR, "META_OPB_MISR" },
  58. {META_INTERLACE, "META_INTERLACE" },
  59. {META_OUTBUF_FENCE, "META_OUTBUF_FENCE" },
  60. {META_LTR_MARK_USE, "META_LTR_MARK_USE" },
  61. {META_TIMESTAMP, "META_TIMESTAMP" },
  62. {META_CONCEALED_MB_CNT, "META_CONCEALED_MB_CNT" },
  63. {META_HIST_INFO, "META_HIST_INFO" },
  64. {META_PICTURE_TYPE, "META_PICTURE_TYPE" },
  65. {META_SEI_MASTERING_DISP, "META_SEI_MASTERING_DISP" },
  66. {META_SEI_CLL, "META_SEI_CLL" },
  67. {META_HDR10PLUS, "META_HDR10PLUS" },
  68. {META_BUF_TAG, "META_BUF_TAG" },
  69. {META_DPB_TAG_LIST, "META_DPB_TAG_LIST" },
  70. {META_SUBFRAME_OUTPUT, "META_SUBFRAME_OUTPUT" },
  71. {META_ENC_QP_METADATA, "META_ENC_QP_METADATA" },
  72. {META_DEC_QP_METADATA, "META_DEC_QP_METADATA" },
  73. {META_MAX_NUM_REORDER_FRAMES, "META_MAX_NUM_REORDER_FRAMES"},
  74. {META_EVA_STATS, "META_EVA_STATS" },
  75. {META_ROI_INFO, "META_ROI_INFO" },
  76. {META_SALIENCY_INFO, "META_SALIENCY_INFO" },
  77. {META_CAP_MAX, "META_CAP_MAX" },
  78. {FRAME_WIDTH, "FRAME_WIDTH" },
  79. {LOSSLESS_FRAME_WIDTH, "LOSSLESS_FRAME_WIDTH" },
  80. {SECURE_FRAME_WIDTH, "SECURE_FRAME_WIDTH" },
  81. {FRAME_HEIGHT, "FRAME_HEIGHT" },
  82. {LOSSLESS_FRAME_HEIGHT, "LOSSLESS_FRAME_HEIGHT" },
  83. {SECURE_FRAME_HEIGHT, "SECURE_FRAME_HEIGHT" },
  84. {PIX_FMTS, "PIX_FMTS" },
  85. {MIN_BUFFERS_INPUT, "MIN_BUFFERS_INPUT" },
  86. {MIN_BUFFERS_OUTPUT, "MIN_BUFFERS_OUTPUT" },
  87. {MBPF, "MBPF" },
  88. {BATCH_MBPF, "BATCH_MBPF" },
  89. {BATCH_FPS, "BATCH_FPS" },
  90. {LOSSLESS_MBPF, "LOSSLESS_MBPF" },
  91. {SECURE_MBPF, "SECURE_MBPF" },
  92. {MBPS, "MBPS" },
  93. {POWER_SAVE_MBPS, "POWER_SAVE_MBPS" },
  94. {CHECK_MBPS, "CHECK_MPBS" },
  95. {FRAME_RATE, "FRAME_RATE" },
  96. {OPERATING_RATE, "OPERATING_RATE" },
  97. {INPUT_RATE, "INPUT_RATE" },
  98. {TIMESTAMP_RATE, "TIMESTAMP_RATE" },
  99. {SCALE_FACTOR, "SCALE_FACTOR" },
  100. {MB_CYCLES_VSP, "MB_CYCLES_VSP" },
  101. {MB_CYCLES_VPP, "MB_CYCLES_VPP" },
  102. {MB_CYCLES_LP, "MB_CYCLES_LP" },
  103. {MB_CYCLES_FW, "MB_CYCLES_FW" },
  104. {MB_CYCLES_FW_VPP, "MB_CYCLES_FW_VPP" },
  105. {SECURE_MODE, "SECURE_MODE" },
  106. {FENCE_ID, "FENCE_ID" },
  107. {FENCE_FD, "FENCE_FD" },
  108. {TS_REORDER, "TS_REORDER" },
  109. {SLICE_INTERFACE, "SLICE_INTERFACE" },
  110. {HFLIP, "HFLIP" },
  111. {VFLIP, "VFLIP" },
  112. {ROTATION, "ROTATION" },
  113. {SUPER_FRAME, "SUPER_FRAME" },
  114. {HEADER_MODE, "HEADER_MODE" },
  115. {PREPEND_SPSPPS_TO_IDR, "PREPEND_SPSPPS_TO_IDR" },
  116. {WITHOUT_STARTCODE, "WITHOUT_STARTCODE" },
  117. {NAL_LENGTH_FIELD, "NAL_LENGTH_FIELD" },
  118. {REQUEST_I_FRAME, "REQUEST_I_FRAME" },
  119. {BITRATE_MODE, "BITRATE_MODE" },
  120. {LOSSLESS, "LOSSLESS" },
  121. {FRAME_SKIP_MODE, "FRAME_SKIP_MODE" },
  122. {FRAME_RC_ENABLE, "FRAME_RC_ENABLE" },
  123. {GOP_CLOSURE, "GOP_CLOSURE" },
  124. {CSC, "CSC" },
  125. {CSC_CUSTOM_MATRIX, "CSC_CUSTOM_MATRIX" },
  126. {USE_LTR, "USE_LTR" },
  127. {MARK_LTR, "MARK_LTR" },
  128. {BASELAYER_PRIORITY, "BASELAYER_PRIORITY" },
  129. {IR_TYPE, "IR_TYPE" },
  130. {AU_DELIMITER, "AU_DELIMITER" },
  131. {GRID, "GRID" },
  132. {I_FRAME_MIN_QP, "I_FRAME_MIN_QP" },
  133. {P_FRAME_MIN_QP, "P_FRAME_MIN_QP" },
  134. {B_FRAME_MIN_QP, "B_FRAME_MIN_QP" },
  135. {I_FRAME_MAX_QP, "I_FRAME_MAX_QP" },
  136. {P_FRAME_MAX_QP, "P_FRAME_MAX_QP" },
  137. {B_FRAME_MAX_QP, "B_FRAME_MAX_QP" },
  138. {LAYER_TYPE, "LAYER_TYPE" },
  139. {LAYER_ENABLE, "LAYER_ENABLE" },
  140. {L0_BR, "L0_BR" },
  141. {L1_BR, "L1_BR" },
  142. {L2_BR, "L2_BR" },
  143. {L3_BR, "L3_BR" },
  144. {L4_BR, "L4_BR" },
  145. {L5_BR, "L5_BR" },
  146. {LEVEL, "LEVEL" },
  147. {HEVC_TIER, "HEVC_TIER" },
  148. {AV1_TIER, "AV1_TIER" },
  149. {DISPLAY_DELAY_ENABLE, "DISPLAY_DELAY_ENABLE" },
  150. {DISPLAY_DELAY, "DISPLAY_DELAY" },
  151. {CONCEAL_COLOR_8BIT, "CONCEAL_COLOR_8BIT" },
  152. {CONCEAL_COLOR_10BIT, "CONCEAL_COLOR_10BIT" },
  153. {LF_MODE, "LF_MODE" },
  154. {LF_ALPHA, "LF_ALPHA" },
  155. {LF_BETA, "LF_BETA" },
  156. {SLICE_MAX_BYTES, "SLICE_MAX_BYTES" },
  157. {SLICE_MAX_MB, "SLICE_MAX_MB" },
  158. {MB_RC, "MB_RC" },
  159. {CHROMA_QP_INDEX_OFFSET, "CHROMA_QP_INDEX_OFFSET" },
  160. {PIPE, "PIPE" },
  161. {POC, "POC" },
  162. {CODED_FRAMES, "CODED_FRAMES" },
  163. {BIT_DEPTH, "BIT_DEPTH" },
  164. {CODEC_CONFIG, "CODEC_CONFIG" },
  165. {BITSTREAM_SIZE_OVERWRITE, "BITSTREAM_SIZE_OVERWRITE" },
  166. {THUMBNAIL_MODE, "THUMBNAIL_MODE" },
  167. {DEFAULT_HEADER, "DEFAULT_HEADER" },
  168. {RAP_FRAME, "RAP_FRAME" },
  169. {SEQ_CHANGE_AT_SYNC_FRAME, "SEQ_CHANGE_AT_SYNC_FRAME" },
  170. {QUALITY_MODE, "QUALITY_MODE" },
  171. {PRIORITY, "PRIORITY" },
  172. {FIRMWARE_PRIORITY_OFFSET, "FIRMWARE_PRIORITY_OFFSET" },
  173. {CRITICAL_PRIORITY, "CRITICAL_PRIORITY" },
  174. {RESERVE_DURATION, "RESERVE_DURATION" },
  175. {DPB_LIST, "DPB_LIST" },
  176. {FILM_GRAIN, "FILM_GRAIN" },
  177. {SUPER_BLOCK, "SUPER_BLOCK" },
  178. {DRAP, "DRAP" },
  179. {INPUT_METADATA_FD, "INPUT_METADATA_FD" },
  180. {INPUT_META_VIA_REQUEST, "INPUT_META_VIA_REQUEST" },
  181. {ENC_IP_CR, "ENC_IP_CR" },
  182. {COMPLEXITY, "COMPLEXITY" },
  183. {PROFILE, "PROFILE" },
  184. {ENH_LAYER_COUNT, "ENH_LAYER_COUNT" },
  185. {BIT_RATE, "BIT_RATE" },
  186. {LOWLATENCY_MODE, "LOWLATENCY_MODE" },
  187. {GOP_SIZE, "GOP_SIZE" },
  188. {B_FRAME, "B_FRAME" },
  189. {ALL_INTRA, "ALL_INTRA" },
  190. {MIN_QUALITY, "MIN_QUALITY" },
  191. {CONTENT_ADAPTIVE_CODING, "CONTENT_ADAPTIVE_CODING" },
  192. {BLUR_TYPES, "BLUR_TYPES" },
  193. {REQUEST_PREPROCESS, "REQUEST_PREPROCESS" },
  194. {SLICE_MODE, "SLICE_MODE" },
  195. {MIN_FRAME_QP, "MIN_FRAME_QP" },
  196. {MAX_FRAME_QP, "MAX_FRAME_QP" },
  197. {I_FRAME_QP, "I_FRAME_QP" },
  198. {P_FRAME_QP, "P_FRAME_QP" },
  199. {B_FRAME_QP, "B_FRAME_QP" },
  200. {TIME_DELTA_BASED_RC, "TIME_DELTA_BASED_RC" },
  201. {CONSTANT_QUALITY, "CONSTANT_QUALITY" },
  202. {VBV_DELAY, "VBV_DELAY" },
  203. {PEAK_BITRATE, "PEAK_BITRATE" },
  204. {ENTROPY_MODE, "ENTROPY_MODE" },
  205. {TRANSFORM_8X8, "TRANSFORM_8X8" },
  206. {STAGE, "STAGE" },
  207. {LTR_COUNT, "LTR_COUNT" },
  208. {IR_PERIOD, "IR_PERIOD" },
  209. {BITRATE_BOOST, "BITRATE_BOOST" },
  210. {BLUR_RESOLUTION, "BLUR_RESOLUTION" },
  211. {OUTPUT_ORDER, "OUTPUT_ORDER" },
  212. {INPUT_BUF_HOST_MAX_COUNT, "INPUT_BUF_HOST_MAX_COUNT" },
  213. {OUTPUT_BUF_HOST_MAX_COUNT, "OUTPUT_BUF_HOST_MAX_COUNT" },
  214. {DELIVERY_MODE, "DELIVERY_MODE" },
  215. {INST_CAP_MAX, "INST_CAP_MAX" },
  216. };
  217. const char *cap_name(enum msm_vidc_inst_capability_type cap_id)
  218. {
  219. const char *name = "UNKNOWN CAP";
  220. if (cap_id > ARRAY_SIZE(cap_name_arr))
  221. goto exit;
  222. if (cap_name_arr[cap_id].cap_id != cap_id)
  223. goto exit;
  224. name = cap_name_arr[cap_id].name;
  225. exit:
  226. return name;
  227. }
  228. struct msm_vidc_buf_type_name {
  229. enum msm_vidc_buffer_type type;
  230. char *name;
  231. };
  232. static const struct msm_vidc_buf_type_name buf_type_name_arr[] = {
  233. {MSM_VIDC_BUF_INPUT, "INPUT" },
  234. {MSM_VIDC_BUF_OUTPUT, "OUTPUT" },
  235. {MSM_VIDC_BUF_INPUT_META, "INPUT_META" },
  236. {MSM_VIDC_BUF_OUTPUT_META, "OUTPUT_META" },
  237. {MSM_VIDC_BUF_READ_ONLY, "READ_ONLY" },
  238. {MSM_VIDC_BUF_QUEUE, "QUEUE" },
  239. {MSM_VIDC_BUF_BIN, "BIN" },
  240. {MSM_VIDC_BUF_ARP, "ARP" },
  241. {MSM_VIDC_BUF_COMV, "COMV" },
  242. {MSM_VIDC_BUF_NON_COMV, "NON_COMV" },
  243. {MSM_VIDC_BUF_LINE, "LINE" },
  244. {MSM_VIDC_BUF_DPB, "DPB" },
  245. {MSM_VIDC_BUF_PERSIST, "PERSIST" },
  246. {MSM_VIDC_BUF_VPSS, "VPSS" },
  247. {MSM_VIDC_BUF_PARTIAL_DATA, "PARTIAL_DATA" },
  248. };
  249. const char *buf_name(enum msm_vidc_buffer_type type)
  250. {
  251. const char *name = "UNKNOWN BUF";
  252. if (!type || type > ARRAY_SIZE(buf_type_name_arr))
  253. goto exit;
  254. if (buf_type_name_arr[type - 1].type != type)
  255. goto exit;
  256. name = buf_type_name_arr[type - 1].name;
  257. exit:
  258. return name;
  259. }
  260. struct msm_vidc_allow_name {
  261. enum msm_vidc_allow allow;
  262. char *name;
  263. };
  264. static const struct msm_vidc_allow_name inst_allow_name_arr[] = {
  265. {MSM_VIDC_DISALLOW, "MSM_VIDC_DISALLOW" },
  266. {MSM_VIDC_ALLOW, "MSM_VIDC_ALLOW" },
  267. {MSM_VIDC_DEFER, "MSM_VIDC_DEFER" },
  268. {MSM_VIDC_DISCARD, "MSM_VIDC_DISCARD" },
  269. {MSM_VIDC_IGNORE, "MSM_VIDC_IGNORE" },
  270. };
  271. const char *allow_name(enum msm_vidc_allow allow)
  272. {
  273. const char *name = "UNKNOWN";
  274. if (allow > ARRAY_SIZE(inst_allow_name_arr))
  275. goto exit;
  276. if (inst_allow_name_arr[allow].allow != allow)
  277. goto exit;
  278. name = inst_allow_name_arr[allow].name;
  279. exit:
  280. return name;
  281. }
  282. struct msm_vidc_inst_state_name {
  283. enum msm_vidc_inst_state state;
  284. char *name;
  285. };
  286. /* do not modify the state names as it is used in test scripts */
  287. static const struct msm_vidc_inst_state_name inst_state_name_arr[] = {
  288. {MSM_VIDC_OPEN, "OPEN" },
  289. {MSM_VIDC_START_INPUT, "START_INPUT" },
  290. {MSM_VIDC_START_OUTPUT, "START_OUTPUT" },
  291. {MSM_VIDC_START, "START" },
  292. {MSM_VIDC_DRC, "DRC" },
  293. {MSM_VIDC_DRC_LAST_FLAG, "DRC_LAST_FLAG" },
  294. {MSM_VIDC_DRAIN, "DRAIN" },
  295. {MSM_VIDC_DRAIN_LAST_FLAG, "DRAIN_LAST_FLAG" },
  296. {MSM_VIDC_DRC_DRAIN, "DRC_DRAIN" },
  297. {MSM_VIDC_DRC_DRAIN_LAST_FLAG, "DRC_DRAIN_LAST_FLAG" },
  298. {MSM_VIDC_DRAIN_START_INPUT, "DRAIN_START_INPUT" },
  299. {MSM_VIDC_ERROR, "ERROR" },
  300. };
  301. const char *state_name(enum msm_vidc_inst_state state)
  302. {
  303. const char *name = "UNKNOWN STATE";
  304. if (!state || state > ARRAY_SIZE(inst_state_name_arr))
  305. goto exit;
  306. if (inst_state_name_arr[state - 1].state != state)
  307. goto exit;
  308. name = inst_state_name_arr[state - 1].name;
  309. exit:
  310. return name;
  311. }
  312. struct msm_vidc_core_state_name {
  313. enum msm_vidc_core_state state;
  314. char *name;
  315. };
  316. static const struct msm_vidc_core_state_name core_state_name_arr[] = {
  317. {MSM_VIDC_CORE_DEINIT, "CORE_DEINIT" },
  318. {MSM_VIDC_CORE_INIT_WAIT, "CORE_INIT_WAIT" },
  319. {MSM_VIDC_CORE_INIT, "CORE_INIT" },
  320. };
  321. const char *core_state_name(enum msm_vidc_core_state state)
  322. {
  323. const char *name = "UNKNOWN STATE";
  324. if (state >= ARRAY_SIZE(core_state_name_arr))
  325. goto exit;
  326. if (core_state_name_arr[state].state != state)
  327. goto exit;
  328. name = core_state_name_arr[state].name;
  329. exit:
  330. return name;
  331. }
  332. const char *v4l2_type_name(u32 port)
  333. {
  334. switch (port) {
  335. case INPUT_MPLANE: return "INPUT";
  336. case OUTPUT_MPLANE: return "OUTPUT";
  337. case INPUT_META_PLANE: return "INPUT_META";
  338. case OUTPUT_META_PLANE: return "OUTPUT_META";
  339. }
  340. return "UNKNOWN";
  341. }
  342. const char *v4l2_pixelfmt_name(u32 pixfmt)
  343. {
  344. switch (pixfmt) {
  345. /* raw port: color format */
  346. case V4L2_PIX_FMT_NV12: return "NV12";
  347. case V4L2_PIX_FMT_NV21: return "NV21";
  348. case V4L2_PIX_FMT_VIDC_NV12C: return "NV12C";
  349. case V4L2_PIX_FMT_VIDC_P010: return "P010";
  350. case V4L2_PIX_FMT_VIDC_TP10C: return "TP10C";
  351. case V4L2_PIX_FMT_RGBA32: return "RGBA";
  352. case V4L2_PIX_FMT_VIDC_ARGB32C: return "RGBAC";
  353. /* bitstream port: codec type */
  354. case V4L2_PIX_FMT_H264: return "AVC";
  355. case V4L2_PIX_FMT_HEVC: return "HEVC";
  356. case V4L2_PIX_FMT_HEIC: return "HEIC";
  357. case V4L2_PIX_FMT_VP9: return "VP9";
  358. case V4L2_PIX_FMT_AV1: return "AV1";
  359. /* meta port */
  360. case V4L2_META_FMT_VIDC: return "META";
  361. }
  362. return "UNKNOWN";
  363. }
  364. void print_vidc_buffer(u32 tag, const char *tag_str, const char *str, struct msm_vidc_inst *inst,
  365. struct msm_vidc_buffer *vbuf)
  366. {
  367. struct dma_buf *dbuf;
  368. struct inode *f_inode;
  369. unsigned long inode_num = 0;
  370. long ref_count = -1;
  371. if (!inst || !vbuf || !tag_str || !str)
  372. return;
  373. dbuf = (struct dma_buf *)vbuf->dmabuf;
  374. if (dbuf && dbuf->file) {
  375. f_inode = file_inode(dbuf->file);
  376. if (f_inode) {
  377. inode_num = f_inode->i_ino;
  378. ref_count = file_count(dbuf->file);
  379. }
  380. }
  381. dprintk_inst(tag, tag_str, inst,
  382. "%s: %s: idx %2d fd %3d off %d daddr %#llx inode %8lu ref %2ld size %8d filled %8d flags %#x ts %8lld attr %#x counts(etb ebd ftb fbd) %4llu %4llu %4llu %4llu\n",
  383. str, buf_name(vbuf->type),
  384. vbuf->index, vbuf->fd, vbuf->data_offset,
  385. vbuf->device_addr, inode_num, ref_count, vbuf->buffer_size, vbuf->data_size,
  386. vbuf->flags, vbuf->timestamp, vbuf->attr, inst->debug_count.etb,
  387. inst->debug_count.ebd, inst->debug_count.ftb, inst->debug_count.fbd);
  388. trace_msm_v4l2_vidc_buffer_event_log(inst, str, buf_name(vbuf->type), vbuf,
  389. inode_num, ref_count);
  390. }
  391. void print_vb2_buffer(const char *str, struct msm_vidc_inst *inst,
  392. struct vb2_buffer *vb2)
  393. {
  394. if (!inst || !vb2)
  395. return;
  396. if (vb2->type == INPUT_MPLANE || vb2->type == OUTPUT_MPLANE) {
  397. i_vpr_e(inst,
  398. "%s: %s: idx %2d fd %d off %d size %d filled %d\n",
  399. str, vb2->type == INPUT_MPLANE ? "INPUT" : "OUTPUT",
  400. vb2->index, vb2->planes[0].m.fd,
  401. vb2->planes[0].data_offset, vb2->planes[0].length,
  402. vb2->planes[0].bytesused);
  403. } else if (vb2->type == INPUT_META_PLANE || vb2->type == OUTPUT_META_PLANE) {
  404. i_vpr_e(inst,
  405. "%s: %s: idx %2d fd %d off %d size %d filled %d\n",
  406. str, vb2->type == INPUT_MPLANE ? "INPUT_META" : "OUTPUT_META",
  407. vb2->index, vb2->planes[0].m.fd,
  408. vb2->planes[0].data_offset, vb2->planes[0].length,
  409. vb2->planes[0].bytesused);
  410. }
  411. }
  412. static void __fatal_error(bool fatal)
  413. {
  414. WARN_ON(fatal);
  415. }
  416. static int __strict_check(struct msm_vidc_core *core, const char *function)
  417. {
  418. bool fatal = !mutex_is_locked(&core->lock);
  419. __fatal_error(fatal);
  420. if (fatal)
  421. d_vpr_e("%s: strict check failed\n", function);
  422. return fatal ? -EINVAL : 0;
  423. }
  424. enum msm_vidc_buffer_type v4l2_type_to_driver(u32 type, const char *func)
  425. {
  426. enum msm_vidc_buffer_type buffer_type = 0;
  427. switch (type) {
  428. case INPUT_MPLANE:
  429. buffer_type = MSM_VIDC_BUF_INPUT;
  430. break;
  431. case OUTPUT_MPLANE:
  432. buffer_type = MSM_VIDC_BUF_OUTPUT;
  433. break;
  434. case INPUT_META_PLANE:
  435. buffer_type = MSM_VIDC_BUF_INPUT_META;
  436. break;
  437. case OUTPUT_META_PLANE:
  438. buffer_type = MSM_VIDC_BUF_OUTPUT_META;
  439. break;
  440. default:
  441. d_vpr_e("%s: invalid v4l2 buffer type %#x\n", func, type);
  442. break;
  443. }
  444. return buffer_type;
  445. }
  446. u32 v4l2_type_from_driver(enum msm_vidc_buffer_type buffer_type,
  447. const char *func)
  448. {
  449. u32 type = 0;
  450. switch (buffer_type) {
  451. case MSM_VIDC_BUF_INPUT:
  452. type = INPUT_MPLANE;
  453. break;
  454. case MSM_VIDC_BUF_OUTPUT:
  455. type = OUTPUT_MPLANE;
  456. break;
  457. case MSM_VIDC_BUF_INPUT_META:
  458. type = INPUT_META_PLANE;
  459. break;
  460. case MSM_VIDC_BUF_OUTPUT_META:
  461. type = OUTPUT_META_PLANE;
  462. break;
  463. default:
  464. d_vpr_e("%s: invalid driver buffer type %d\n",
  465. func, buffer_type);
  466. break;
  467. }
  468. return type;
  469. }
  470. enum msm_vidc_codec_type v4l2_codec_to_driver(u32 v4l2_codec, const char *func)
  471. {
  472. enum msm_vidc_codec_type codec = 0;
  473. switch (v4l2_codec) {
  474. case V4L2_PIX_FMT_H264:
  475. codec = MSM_VIDC_H264;
  476. break;
  477. case V4L2_PIX_FMT_HEVC:
  478. codec = MSM_VIDC_HEVC;
  479. break;
  480. case V4L2_PIX_FMT_VP9:
  481. codec = MSM_VIDC_VP9;
  482. break;
  483. case V4L2_PIX_FMT_AV1:
  484. codec = MSM_VIDC_AV1;
  485. break;
  486. case V4L2_PIX_FMT_HEIC:
  487. codec = MSM_VIDC_HEIC;
  488. break;
  489. default:
  490. d_vpr_h("%s: invalid v4l2 codec %#x\n", func, v4l2_codec);
  491. break;
  492. }
  493. return codec;
  494. }
  495. u32 v4l2_codec_from_driver(enum msm_vidc_codec_type codec, const char *func)
  496. {
  497. u32 v4l2_codec = 0;
  498. switch (codec) {
  499. case MSM_VIDC_H264:
  500. v4l2_codec = V4L2_PIX_FMT_H264;
  501. break;
  502. case MSM_VIDC_HEVC:
  503. v4l2_codec = V4L2_PIX_FMT_HEVC;
  504. break;
  505. case MSM_VIDC_VP9:
  506. v4l2_codec = V4L2_PIX_FMT_VP9;
  507. break;
  508. case MSM_VIDC_AV1:
  509. v4l2_codec = V4L2_PIX_FMT_AV1;
  510. break;
  511. case MSM_VIDC_HEIC:
  512. v4l2_codec = V4L2_PIX_FMT_HEIC;
  513. break;
  514. default:
  515. d_vpr_e("%s: invalid driver codec %#x\n", func, codec);
  516. break;
  517. }
  518. return v4l2_codec;
  519. }
  520. enum msm_vidc_colorformat_type v4l2_colorformat_to_driver(u32 v4l2_colorformat,
  521. const char *func)
  522. {
  523. enum msm_vidc_colorformat_type colorformat = 0;
  524. switch (v4l2_colorformat) {
  525. case V4L2_PIX_FMT_NV12:
  526. colorformat = MSM_VIDC_FMT_NV12;
  527. break;
  528. case V4L2_PIX_FMT_NV21:
  529. colorformat = MSM_VIDC_FMT_NV21;
  530. break;
  531. case V4L2_PIX_FMT_VIDC_NV12C:
  532. colorformat = MSM_VIDC_FMT_NV12C;
  533. break;
  534. case V4L2_PIX_FMT_VIDC_TP10C:
  535. colorformat = MSM_VIDC_FMT_TP10C;
  536. break;
  537. case V4L2_PIX_FMT_RGBA32:
  538. colorformat = MSM_VIDC_FMT_RGBA8888;
  539. break;
  540. case V4L2_PIX_FMT_VIDC_ARGB32C:
  541. colorformat = MSM_VIDC_FMT_RGBA8888C;
  542. break;
  543. case V4L2_PIX_FMT_VIDC_P010:
  544. colorformat = MSM_VIDC_FMT_P010;
  545. break;
  546. default:
  547. d_vpr_e("%s: invalid v4l2 color format %#x\n",
  548. func, v4l2_colorformat);
  549. break;
  550. }
  551. return colorformat;
  552. }
  553. u32 v4l2_colorformat_from_driver(enum msm_vidc_colorformat_type colorformat,
  554. const char *func)
  555. {
  556. u32 v4l2_colorformat = 0;
  557. switch (colorformat) {
  558. case MSM_VIDC_FMT_NV12:
  559. v4l2_colorformat = V4L2_PIX_FMT_NV12;
  560. break;
  561. case MSM_VIDC_FMT_NV21:
  562. v4l2_colorformat = V4L2_PIX_FMT_NV21;
  563. break;
  564. case MSM_VIDC_FMT_NV12C:
  565. v4l2_colorformat = V4L2_PIX_FMT_VIDC_NV12C;
  566. break;
  567. case MSM_VIDC_FMT_TP10C:
  568. v4l2_colorformat = V4L2_PIX_FMT_VIDC_TP10C;
  569. break;
  570. case MSM_VIDC_FMT_RGBA8888:
  571. v4l2_colorformat = V4L2_PIX_FMT_RGBA32;
  572. break;
  573. case MSM_VIDC_FMT_RGBA8888C:
  574. v4l2_colorformat = V4L2_PIX_FMT_VIDC_ARGB32C;
  575. break;
  576. case MSM_VIDC_FMT_P010:
  577. v4l2_colorformat = V4L2_PIX_FMT_VIDC_P010;
  578. break;
  579. default:
  580. d_vpr_e("%s: invalid driver color format %#x\n",
  581. func, colorformat);
  582. break;
  583. }
  584. return v4l2_colorformat;
  585. }
  586. u32 v4l2_color_primaries_to_driver(struct msm_vidc_inst *inst,
  587. u32 v4l2_primaries, const char *func)
  588. {
  589. u32 vidc_color_primaries = MSM_VIDC_PRIMARIES_RESERVED;
  590. switch(v4l2_primaries) {
  591. case V4L2_COLORSPACE_DEFAULT:
  592. vidc_color_primaries = MSM_VIDC_PRIMARIES_RESERVED;
  593. break;
  594. case V4L2_COLORSPACE_REC709:
  595. vidc_color_primaries = MSM_VIDC_PRIMARIES_BT709;
  596. break;
  597. case V4L2_COLORSPACE_470_SYSTEM_M:
  598. vidc_color_primaries = MSM_VIDC_PRIMARIES_BT470_SYSTEM_M;
  599. break;
  600. case V4L2_COLORSPACE_470_SYSTEM_BG:
  601. vidc_color_primaries = MSM_VIDC_PRIMARIES_BT470_SYSTEM_BG;
  602. break;
  603. case V4L2_COLORSPACE_SMPTE170M:
  604. vidc_color_primaries = MSM_VIDC_PRIMARIES_BT601_525;
  605. break;
  606. case V4L2_COLORSPACE_SMPTE240M:
  607. vidc_color_primaries = MSM_VIDC_PRIMARIES_SMPTE_ST240M;
  608. break;
  609. case V4L2_COLORSPACE_VIDC_GENERIC_FILM:
  610. vidc_color_primaries = MSM_VIDC_PRIMARIES_GENERIC_FILM;
  611. break;
  612. case V4L2_COLORSPACE_BT2020:
  613. vidc_color_primaries = MSM_VIDC_PRIMARIES_BT2020;
  614. break;
  615. case V4L2_COLORSPACE_DCI_P3:
  616. vidc_color_primaries = MSM_VIDC_PRIMARIES_SMPTE_RP431_2;
  617. break;
  618. case V4L2_COLORSPACE_VIDC_EG431:
  619. vidc_color_primaries = MSM_VIDC_PRIMARIES_SMPTE_EG431_1;
  620. break;
  621. case V4L2_COLORSPACE_VIDC_EBU_TECH:
  622. vidc_color_primaries = MSM_VIDC_PRIMARIES_SMPTE_EBU_TECH;
  623. break;
  624. default:
  625. i_vpr_e(inst, "%s: invalid v4l2 color primaries %d\n",
  626. func, v4l2_primaries);
  627. break;
  628. }
  629. return vidc_color_primaries;
  630. }
  631. u32 v4l2_color_primaries_from_driver(struct msm_vidc_inst *inst,
  632. u32 vidc_color_primaries, const char *func)
  633. {
  634. u32 v4l2_primaries = V4L2_COLORSPACE_DEFAULT;
  635. switch(vidc_color_primaries) {
  636. case MSM_VIDC_PRIMARIES_UNSPECIFIED:
  637. v4l2_primaries = V4L2_COLORSPACE_DEFAULT;
  638. break;
  639. case MSM_VIDC_PRIMARIES_BT709:
  640. v4l2_primaries = V4L2_COLORSPACE_REC709;
  641. break;
  642. case MSM_VIDC_PRIMARIES_BT470_SYSTEM_M:
  643. v4l2_primaries = V4L2_COLORSPACE_470_SYSTEM_M;
  644. break;
  645. case MSM_VIDC_PRIMARIES_BT470_SYSTEM_BG:
  646. v4l2_primaries = V4L2_COLORSPACE_470_SYSTEM_BG;
  647. break;
  648. case MSM_VIDC_PRIMARIES_BT601_525:
  649. v4l2_primaries = V4L2_COLORSPACE_SMPTE170M;
  650. break;
  651. case MSM_VIDC_PRIMARIES_SMPTE_ST240M:
  652. v4l2_primaries = V4L2_COLORSPACE_SMPTE240M;
  653. break;
  654. case MSM_VIDC_PRIMARIES_GENERIC_FILM:
  655. v4l2_primaries = V4L2_COLORSPACE_VIDC_GENERIC_FILM;
  656. break;
  657. case MSM_VIDC_PRIMARIES_BT2020:
  658. v4l2_primaries = V4L2_COLORSPACE_BT2020;
  659. break;
  660. case MSM_VIDC_PRIMARIES_SMPTE_RP431_2:
  661. v4l2_primaries = V4L2_COLORSPACE_DCI_P3;
  662. break;
  663. case MSM_VIDC_PRIMARIES_SMPTE_EG431_1:
  664. v4l2_primaries = V4L2_COLORSPACE_VIDC_EG431;
  665. break;
  666. case MSM_VIDC_PRIMARIES_SMPTE_EBU_TECH:
  667. v4l2_primaries = V4L2_COLORSPACE_VIDC_EBU_TECH;
  668. break;
  669. default:
  670. i_vpr_e(inst, "%s: invalid hfi color primaries %d\n",
  671. func, vidc_color_primaries);
  672. break;
  673. }
  674. return v4l2_primaries;
  675. }
  676. u32 v4l2_transfer_char_to_driver(struct msm_vidc_inst *inst,
  677. u32 v4l2_transfer_char, const char *func)
  678. {
  679. u32 vidc_transfer_char = MSM_VIDC_TRANSFER_RESERVED;
  680. switch(v4l2_transfer_char) {
  681. case V4L2_XFER_FUNC_DEFAULT:
  682. vidc_transfer_char = MSM_VIDC_TRANSFER_RESERVED;
  683. break;
  684. case V4L2_XFER_FUNC_709:
  685. vidc_transfer_char = MSM_VIDC_TRANSFER_BT709;
  686. break;
  687. case V4L2_XFER_FUNC_VIDC_BT470_SYSTEM_M:
  688. vidc_transfer_char = MSM_VIDC_TRANSFER_BT470_SYSTEM_M;
  689. break;
  690. case V4L2_XFER_FUNC_VIDC_BT470_SYSTEM_BG:
  691. vidc_transfer_char = MSM_VIDC_TRANSFER_BT470_SYSTEM_BG;
  692. break;
  693. case V4L2_XFER_FUNC_VIDC_BT601_525_OR_625:
  694. vidc_transfer_char = MSM_VIDC_TRANSFER_BT601_525_OR_625;
  695. break;
  696. case V4L2_XFER_FUNC_SMPTE240M:
  697. vidc_transfer_char = MSM_VIDC_TRANSFER_SMPTE_ST240M;
  698. break;
  699. case V4L2_XFER_FUNC_VIDC_LINEAR:
  700. vidc_transfer_char = MSM_VIDC_TRANSFER_LINEAR;
  701. break;
  702. case V4L2_XFER_FUNC_VIDC_XVYCC:
  703. vidc_transfer_char = MSM_VIDC_TRANSFER_XVYCC;
  704. break;
  705. case V4L2_XFER_FUNC_VIDC_BT1361:
  706. vidc_transfer_char = MSM_VIDC_TRANSFER_BT1361_0;
  707. break;
  708. case V4L2_XFER_FUNC_SRGB:
  709. vidc_transfer_char = MSM_VIDC_TRANSFER_SRGB_SYCC;
  710. break;
  711. case V4L2_XFER_FUNC_VIDC_BT2020:
  712. vidc_transfer_char = MSM_VIDC_TRANSFER_BT2020_14;
  713. break;
  714. case V4L2_XFER_FUNC_SMPTE2084:
  715. vidc_transfer_char = MSM_VIDC_TRANSFER_SMPTE_ST2084_PQ;
  716. break;
  717. case V4L2_XFER_FUNC_VIDC_ST428:
  718. vidc_transfer_char = MSM_VIDC_TRANSFER_SMPTE_ST428_1;
  719. break;
  720. case V4L2_XFER_FUNC_VIDC_HLG:
  721. vidc_transfer_char = MSM_VIDC_TRANSFER_BT2100_2_HLG;
  722. break;
  723. default:
  724. i_vpr_e(inst, "%s: invalid v4l2 transfer char %d\n",
  725. func, v4l2_transfer_char);
  726. break;
  727. }
  728. return vidc_transfer_char;
  729. }
  730. u32 v4l2_transfer_char_from_driver(struct msm_vidc_inst *inst,
  731. u32 vidc_transfer_char, const char *func)
  732. {
  733. u32 v4l2_transfer_char = V4L2_XFER_FUNC_DEFAULT;
  734. switch(vidc_transfer_char) {
  735. case MSM_VIDC_TRANSFER_UNSPECIFIED:
  736. v4l2_transfer_char = V4L2_XFER_FUNC_DEFAULT;
  737. break;
  738. case MSM_VIDC_TRANSFER_BT709:
  739. v4l2_transfer_char = V4L2_XFER_FUNC_709;
  740. break;
  741. case MSM_VIDC_TRANSFER_BT470_SYSTEM_M:
  742. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_BT470_SYSTEM_M;
  743. break;
  744. case MSM_VIDC_TRANSFER_BT470_SYSTEM_BG:
  745. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_BT470_SYSTEM_BG;
  746. break;
  747. case MSM_VIDC_TRANSFER_BT601_525_OR_625:
  748. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_BT601_525_OR_625;
  749. break;
  750. case MSM_VIDC_TRANSFER_SMPTE_ST240M:
  751. v4l2_transfer_char = V4L2_XFER_FUNC_SMPTE240M;
  752. break;
  753. case MSM_VIDC_TRANSFER_LINEAR:
  754. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_LINEAR;
  755. break;
  756. case MSM_VIDC_TRANSFER_XVYCC:
  757. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_XVYCC;
  758. break;
  759. case MSM_VIDC_TRANSFER_BT1361_0:
  760. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_BT1361;
  761. break;
  762. case MSM_VIDC_TRANSFER_SRGB_SYCC:
  763. v4l2_transfer_char = V4L2_XFER_FUNC_SRGB;
  764. break;
  765. case MSM_VIDC_TRANSFER_BT2020_14:
  766. case MSM_VIDC_TRANSFER_BT2020_15:
  767. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_BT2020;
  768. break;
  769. case MSM_VIDC_TRANSFER_SMPTE_ST2084_PQ:
  770. v4l2_transfer_char = V4L2_XFER_FUNC_SMPTE2084;
  771. break;
  772. case MSM_VIDC_TRANSFER_SMPTE_ST428_1:
  773. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_ST428;
  774. break;
  775. case MSM_VIDC_TRANSFER_BT2100_2_HLG:
  776. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_HLG;
  777. break;
  778. default:
  779. i_vpr_e(inst, "%s: invalid hfi transfer char %d\n",
  780. func, vidc_transfer_char);
  781. break;
  782. }
  783. return v4l2_transfer_char;
  784. }
  785. u32 v4l2_matrix_coeff_to_driver(struct msm_vidc_inst *inst,
  786. u32 v4l2_matrix_coeff, const char *func)
  787. {
  788. u32 vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_RESERVED;
  789. switch(v4l2_matrix_coeff) {
  790. case V4L2_YCBCR_ENC_DEFAULT:
  791. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_RESERVED;
  792. break;
  793. case V4L2_YCBCR_VIDC_SRGB_OR_SMPTE_ST428:
  794. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_SRGB_SMPTE_ST428_1;
  795. break;
  796. case V4L2_YCBCR_ENC_709:
  797. case V4L2_YCBCR_ENC_XV709:
  798. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_BT709;
  799. break;
  800. case V4L2_YCBCR_VIDC_FCC47_73_682:
  801. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_FCC_TITLE_47;
  802. break;
  803. case V4L2_YCBCR_ENC_XV601:
  804. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_BT470_SYS_BG_OR_BT601_625;
  805. break;
  806. case V4L2_YCBCR_ENC_601:
  807. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_BT601_525_BT1358_525_OR_625;
  808. break;
  809. case V4L2_YCBCR_ENC_SMPTE240M:
  810. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_SMPTE_ST240;
  811. break;
  812. case V4L2_YCBCR_ENC_BT2020:
  813. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_BT2020_NON_CONSTANT;
  814. break;
  815. case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
  816. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_BT2020_CONSTANT;
  817. break;
  818. default:
  819. i_vpr_e(inst, "%s: invalid v4l2 matrix coeff %d\n",
  820. func, v4l2_matrix_coeff);
  821. break;
  822. }
  823. return vidc_matrix_coeff;
  824. }
  825. u32 v4l2_matrix_coeff_from_driver(struct msm_vidc_inst *inst,
  826. u32 vidc_matrix_coeff, const char *func)
  827. {
  828. u32 v4l2_matrix_coeff = V4L2_YCBCR_ENC_DEFAULT;
  829. switch(vidc_matrix_coeff) {
  830. case MSM_VIDC_MATRIX_COEFF_SRGB_SMPTE_ST428_1:
  831. v4l2_matrix_coeff = V4L2_YCBCR_VIDC_SRGB_OR_SMPTE_ST428;
  832. break;
  833. case MSM_VIDC_MATRIX_COEFF_BT709:
  834. v4l2_matrix_coeff = V4L2_YCBCR_ENC_709;
  835. break;
  836. case MSM_VIDC_MATRIX_COEFF_UNSPECIFIED:
  837. v4l2_matrix_coeff = V4L2_YCBCR_ENC_DEFAULT;
  838. break;
  839. case MSM_VIDC_MATRIX_COEFF_FCC_TITLE_47:
  840. v4l2_matrix_coeff = V4L2_YCBCR_VIDC_FCC47_73_682;
  841. break;
  842. case MSM_VIDC_MATRIX_COEFF_BT470_SYS_BG_OR_BT601_625:
  843. v4l2_matrix_coeff = V4L2_YCBCR_ENC_XV601;
  844. break;
  845. case MSM_VIDC_MATRIX_COEFF_BT601_525_BT1358_525_OR_625:
  846. v4l2_matrix_coeff = V4L2_YCBCR_ENC_601;
  847. break;
  848. case MSM_VIDC_MATRIX_COEFF_SMPTE_ST240:
  849. v4l2_matrix_coeff = V4L2_YCBCR_ENC_SMPTE240M;
  850. break;
  851. case MSM_VIDC_MATRIX_COEFF_BT2020_NON_CONSTANT:
  852. v4l2_matrix_coeff = V4L2_YCBCR_ENC_BT2020;
  853. break;
  854. case MSM_VIDC_MATRIX_COEFF_BT2020_CONSTANT:
  855. v4l2_matrix_coeff = V4L2_YCBCR_ENC_BT2020_CONST_LUM;
  856. break;
  857. default:
  858. i_vpr_e(inst, "%s: invalid hfi matrix coeff %d\n",
  859. func, vidc_matrix_coeff);
  860. break;
  861. }
  862. return v4l2_matrix_coeff;
  863. }
  864. int v4l2_type_to_driver_port(struct msm_vidc_inst *inst, u32 type,
  865. const char *func)
  866. {
  867. int port;
  868. if (type == INPUT_MPLANE) {
  869. port = INPUT_PORT;
  870. } else if (type == INPUT_META_PLANE) {
  871. port = INPUT_META_PORT;
  872. } else if (type == OUTPUT_MPLANE) {
  873. port = OUTPUT_PORT;
  874. } else if (type == OUTPUT_META_PLANE) {
  875. port = OUTPUT_META_PORT;
  876. } else {
  877. i_vpr_e(inst, "%s: port not found for v4l2 type %d\n",
  878. func, type);
  879. port = -EINVAL;
  880. }
  881. return port;
  882. }
  883. u32 msm_vidc_get_buffer_region(struct msm_vidc_inst *inst,
  884. enum msm_vidc_buffer_type buffer_type, const char *func)
  885. {
  886. u32 region = MSM_VIDC_NON_SECURE;
  887. if (!is_secure_session(inst)) {
  888. switch (buffer_type) {
  889. case MSM_VIDC_BUF_ARP:
  890. region = MSM_VIDC_SECURE_NONPIXEL;
  891. break;
  892. case MSM_VIDC_BUF_INPUT:
  893. if (is_encode_session(inst))
  894. region = MSM_VIDC_NON_SECURE_PIXEL;
  895. else
  896. region = MSM_VIDC_NON_SECURE;
  897. break;
  898. case MSM_VIDC_BUF_OUTPUT:
  899. if (is_encode_session(inst))
  900. region = MSM_VIDC_NON_SECURE;
  901. else
  902. region = MSM_VIDC_NON_SECURE_PIXEL;
  903. break;
  904. case MSM_VIDC_BUF_DPB:
  905. case MSM_VIDC_BUF_VPSS:
  906. case MSM_VIDC_BUF_PARTIAL_DATA:
  907. region = MSM_VIDC_NON_SECURE_PIXEL;
  908. break;
  909. case MSM_VIDC_BUF_INPUT_META:
  910. case MSM_VIDC_BUF_OUTPUT_META:
  911. case MSM_VIDC_BUF_BIN:
  912. case MSM_VIDC_BUF_COMV:
  913. case MSM_VIDC_BUF_NON_COMV:
  914. case MSM_VIDC_BUF_LINE:
  915. case MSM_VIDC_BUF_PERSIST:
  916. region = MSM_VIDC_NON_SECURE;
  917. break;
  918. default:
  919. i_vpr_e(inst, "%s: invalid driver buffer type %d\n",
  920. func, buffer_type);
  921. }
  922. } else {
  923. switch (buffer_type) {
  924. case MSM_VIDC_BUF_INPUT:
  925. if (is_encode_session(inst))
  926. region = MSM_VIDC_SECURE_PIXEL;
  927. else
  928. region = MSM_VIDC_SECURE_BITSTREAM;
  929. break;
  930. case MSM_VIDC_BUF_OUTPUT:
  931. if (is_encode_session(inst))
  932. region = MSM_VIDC_SECURE_BITSTREAM;
  933. else
  934. region = MSM_VIDC_SECURE_PIXEL;
  935. break;
  936. case MSM_VIDC_BUF_INPUT_META:
  937. case MSM_VIDC_BUF_OUTPUT_META:
  938. region = MSM_VIDC_NON_SECURE;
  939. break;
  940. case MSM_VIDC_BUF_DPB:
  941. case MSM_VIDC_BUF_VPSS:
  942. case MSM_VIDC_BUF_PARTIAL_DATA:
  943. region = MSM_VIDC_SECURE_PIXEL;
  944. break;
  945. case MSM_VIDC_BUF_BIN:
  946. region = MSM_VIDC_SECURE_BITSTREAM;
  947. break;
  948. case MSM_VIDC_BUF_ARP:
  949. case MSM_VIDC_BUF_COMV:
  950. case MSM_VIDC_BUF_NON_COMV:
  951. case MSM_VIDC_BUF_LINE:
  952. case MSM_VIDC_BUF_PERSIST:
  953. region = MSM_VIDC_SECURE_NONPIXEL;
  954. break;
  955. default:
  956. i_vpr_e(inst, "%s: invalid driver buffer type %d\n",
  957. func, buffer_type);
  958. }
  959. }
  960. return region;
  961. }
  962. struct msm_vidc_buffers *msm_vidc_get_buffers(
  963. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  964. const char *func)
  965. {
  966. switch (buffer_type) {
  967. case MSM_VIDC_BUF_INPUT:
  968. return &inst->buffers.input;
  969. case MSM_VIDC_BUF_INPUT_META:
  970. return &inst->buffers.input_meta;
  971. case MSM_VIDC_BUF_OUTPUT:
  972. return &inst->buffers.output;
  973. case MSM_VIDC_BUF_OUTPUT_META:
  974. return &inst->buffers.output_meta;
  975. case MSM_VIDC_BUF_READ_ONLY:
  976. return &inst->buffers.read_only;
  977. case MSM_VIDC_BUF_BIN:
  978. return &inst->buffers.bin;
  979. case MSM_VIDC_BUF_ARP:
  980. return &inst->buffers.arp;
  981. case MSM_VIDC_BUF_COMV:
  982. return &inst->buffers.comv;
  983. case MSM_VIDC_BUF_NON_COMV:
  984. return &inst->buffers.non_comv;
  985. case MSM_VIDC_BUF_LINE:
  986. return &inst->buffers.line;
  987. case MSM_VIDC_BUF_DPB:
  988. return &inst->buffers.dpb;
  989. case MSM_VIDC_BUF_PERSIST:
  990. return &inst->buffers.persist;
  991. case MSM_VIDC_BUF_VPSS:
  992. return &inst->buffers.vpss;
  993. case MSM_VIDC_BUF_PARTIAL_DATA:
  994. return &inst->buffers.partial_data;
  995. case MSM_VIDC_BUF_QUEUE:
  996. return NULL;
  997. default:
  998. i_vpr_e(inst, "%s: invalid driver buffer type %d\n",
  999. func, buffer_type);
  1000. return NULL;
  1001. }
  1002. }
  1003. struct msm_vidc_mappings *msm_vidc_get_mappings(
  1004. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  1005. const char *func)
  1006. {
  1007. switch (buffer_type) {
  1008. case MSM_VIDC_BUF_INPUT:
  1009. return &inst->mappings.input;
  1010. case MSM_VIDC_BUF_INPUT_META:
  1011. return &inst->mappings.input_meta;
  1012. case MSM_VIDC_BUF_OUTPUT:
  1013. return &inst->mappings.output;
  1014. case MSM_VIDC_BUF_OUTPUT_META:
  1015. return &inst->mappings.output_meta;
  1016. case MSM_VIDC_BUF_BIN:
  1017. return &inst->mappings.bin;
  1018. case MSM_VIDC_BUF_ARP:
  1019. return &inst->mappings.arp;
  1020. case MSM_VIDC_BUF_COMV:
  1021. return &inst->mappings.comv;
  1022. case MSM_VIDC_BUF_NON_COMV:
  1023. return &inst->mappings.non_comv;
  1024. case MSM_VIDC_BUF_LINE:
  1025. return &inst->mappings.line;
  1026. case MSM_VIDC_BUF_DPB:
  1027. return &inst->mappings.dpb;
  1028. case MSM_VIDC_BUF_PERSIST:
  1029. return &inst->mappings.persist;
  1030. case MSM_VIDC_BUF_VPSS:
  1031. return &inst->mappings.vpss;
  1032. case MSM_VIDC_BUF_PARTIAL_DATA:
  1033. return &inst->mappings.partial_data;
  1034. default:
  1035. i_vpr_e(inst, "%s: invalid driver buffer type %d\n",
  1036. func, buffer_type);
  1037. return NULL;
  1038. }
  1039. }
  1040. struct msm_vidc_allocations *msm_vidc_get_allocations(
  1041. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  1042. const char *func)
  1043. {
  1044. switch (buffer_type) {
  1045. case MSM_VIDC_BUF_BIN:
  1046. return &inst->allocations.bin;
  1047. case MSM_VIDC_BUF_ARP:
  1048. return &inst->allocations.arp;
  1049. case MSM_VIDC_BUF_COMV:
  1050. return &inst->allocations.comv;
  1051. case MSM_VIDC_BUF_NON_COMV:
  1052. return &inst->allocations.non_comv;
  1053. case MSM_VIDC_BUF_LINE:
  1054. return &inst->allocations.line;
  1055. case MSM_VIDC_BUF_DPB:
  1056. return &inst->allocations.dpb;
  1057. case MSM_VIDC_BUF_PERSIST:
  1058. return &inst->allocations.persist;
  1059. case MSM_VIDC_BUF_VPSS:
  1060. return &inst->allocations.vpss;
  1061. case MSM_VIDC_BUF_PARTIAL_DATA:
  1062. return &inst->allocations.partial_data;
  1063. default:
  1064. i_vpr_e(inst, "%s: invalid driver buffer type %d\n",
  1065. func, buffer_type);
  1066. return NULL;
  1067. }
  1068. }
  1069. bool res_is_greater_than(u32 width, u32 height,
  1070. u32 ref_width, u32 ref_height)
  1071. {
  1072. u32 num_mbs = NUM_MBS_PER_FRAME(height, width);
  1073. u32 max_side = max(ref_width, ref_height);
  1074. if (num_mbs > NUM_MBS_PER_FRAME(ref_height, ref_width) ||
  1075. width > max_side ||
  1076. height > max_side)
  1077. return true;
  1078. else
  1079. return false;
  1080. }
  1081. bool res_is_greater_than_or_equal_to(u32 width, u32 height,
  1082. u32 ref_width, u32 ref_height)
  1083. {
  1084. u32 num_mbs = NUM_MBS_PER_FRAME(height, width);
  1085. u32 max_side = max(ref_width, ref_height);
  1086. if (num_mbs >= NUM_MBS_PER_FRAME(ref_height, ref_width) ||
  1087. width >= max_side ||
  1088. height >= max_side)
  1089. return true;
  1090. else
  1091. return false;
  1092. }
  1093. bool res_is_less_than(u32 width, u32 height,
  1094. u32 ref_width, u32 ref_height)
  1095. {
  1096. u32 num_mbs = NUM_MBS_PER_FRAME(height, width);
  1097. u32 max_side = max(ref_width, ref_height);
  1098. if (num_mbs < NUM_MBS_PER_FRAME(ref_height, ref_width) &&
  1099. width < max_side &&
  1100. height < max_side)
  1101. return true;
  1102. else
  1103. return false;
  1104. }
  1105. bool res_is_less_than_or_equal_to(u32 width, u32 height,
  1106. u32 ref_width, u32 ref_height)
  1107. {
  1108. u32 num_mbs = NUM_MBS_PER_FRAME(height, width);
  1109. u32 max_side = max(ref_width, ref_height);
  1110. if (num_mbs <= NUM_MBS_PER_FRAME(ref_height, ref_width) &&
  1111. width <= max_side &&
  1112. height <= max_side)
  1113. return true;
  1114. else
  1115. return false;
  1116. }
  1117. int msm_vidc_change_core_state(struct msm_vidc_core *core,
  1118. enum msm_vidc_core_state request_state, const char *func)
  1119. {
  1120. if (!core) {
  1121. d_vpr_e("%s: invalid params\n", __func__);
  1122. return -EINVAL;
  1123. }
  1124. d_vpr_h("%s: core state changed to %s from %s\n",
  1125. func, core_state_name(request_state),
  1126. core_state_name(core->state));
  1127. core->state = request_state;
  1128. return 0;
  1129. }
  1130. int msm_vidc_change_inst_state(struct msm_vidc_inst *inst,
  1131. enum msm_vidc_inst_state request_state, const char *func)
  1132. {
  1133. if (!inst) {
  1134. d_vpr_e("%s: invalid params\n", __func__);
  1135. return -EINVAL;
  1136. }
  1137. if (!request_state) {
  1138. i_vpr_e(inst, "%s: invalid request state\n", func);
  1139. return -EINVAL;
  1140. }
  1141. if (is_session_error(inst)) {
  1142. i_vpr_h(inst,
  1143. "%s: inst is in bad state, can not change state to %s\n",
  1144. func, state_name(request_state));
  1145. return 0;
  1146. }
  1147. if (request_state == MSM_VIDC_ERROR)
  1148. i_vpr_e(inst, FMT_STRING_STATE_CHANGE,
  1149. func, state_name(request_state), state_name(inst->state));
  1150. else
  1151. i_vpr_h(inst, FMT_STRING_STATE_CHANGE,
  1152. func, state_name(request_state), state_name(inst->state));
  1153. trace_msm_vidc_common_state_change(inst, func, state_name(inst->state),
  1154. state_name(request_state));
  1155. inst->state = request_state;
  1156. return 0;
  1157. }
  1158. bool msm_vidc_allow_s_fmt(struct msm_vidc_inst *inst, u32 type)
  1159. {
  1160. bool allow = false;
  1161. if (!inst) {
  1162. d_vpr_e("%s: invalid params\n", __func__);
  1163. return false;
  1164. }
  1165. if (inst->state == MSM_VIDC_OPEN) {
  1166. allow = true;
  1167. goto exit;
  1168. }
  1169. if (type == OUTPUT_MPLANE || type == OUTPUT_META_PLANE) {
  1170. if (inst->state == MSM_VIDC_START_INPUT ||
  1171. inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  1172. allow = true;
  1173. goto exit;
  1174. }
  1175. }
  1176. if (type == INPUT_MPLANE || type == INPUT_META_PLANE) {
  1177. if (inst->state == MSM_VIDC_START_OUTPUT) {
  1178. allow = true;
  1179. goto exit;
  1180. }
  1181. }
  1182. exit:
  1183. if (!allow)
  1184. i_vpr_e(inst, "%s: type %d not allowed in state %s\n",
  1185. __func__, type, state_name(inst->state));
  1186. return allow;
  1187. }
  1188. bool msm_vidc_allow_s_ctrl(struct msm_vidc_inst *inst, u32 id)
  1189. {
  1190. bool allow = false;
  1191. if (!inst) {
  1192. d_vpr_e("%s: invalid params\n", __func__);
  1193. return false;
  1194. }
  1195. if (inst->state == MSM_VIDC_OPEN) {
  1196. allow = true;
  1197. goto exit;
  1198. }
  1199. if (is_decode_session(inst)) {
  1200. if (!inst->bufq[INPUT_PORT].vb2q->streaming) {
  1201. allow = true;
  1202. goto exit;
  1203. }
  1204. if (inst->bufq[INPUT_PORT].vb2q->streaming) {
  1205. switch (id) {
  1206. case V4L2_CID_MPEG_VIDC_CODEC_CONFIG:
  1207. case V4L2_CID_MPEG_VIDC_PRIORITY:
  1208. case V4L2_CID_MPEG_VIDC_LOWLATENCY_REQUEST:
  1209. case V4L2_CID_MPEG_VIDC_INPUT_METADATA_FD:
  1210. case V4L2_CID_MPEG_VIDC_FRAME_RATE:
  1211. case V4L2_CID_MPEG_VIDC_OPERATING_RATE:
  1212. case V4L2_CID_MPEG_VIDC_SW_FENCE_ID:
  1213. allow = true;
  1214. break;
  1215. default:
  1216. allow = false;
  1217. break;
  1218. }
  1219. }
  1220. } else if (is_encode_session(inst)) {
  1221. if (!inst->bufq[OUTPUT_PORT].vb2q->streaming) {
  1222. allow = true;
  1223. goto exit;
  1224. }
  1225. if (inst->bufq[OUTPUT_PORT].vb2q->streaming) {
  1226. switch (id) {
  1227. case V4L2_CID_MPEG_VIDEO_BITRATE:
  1228. case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
  1229. case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
  1230. case V4L2_CID_HFLIP:
  1231. case V4L2_CID_VFLIP:
  1232. case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP:
  1233. case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP:
  1234. case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP:
  1235. case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
  1236. case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
  1237. case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:
  1238. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER:
  1239. case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER:
  1240. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR:
  1241. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR:
  1242. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR:
  1243. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR:
  1244. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR:
  1245. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR:
  1246. case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR:
  1247. case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR:
  1248. case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR:
  1249. case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR:
  1250. case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR:
  1251. case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR:
  1252. case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES:
  1253. case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX:
  1254. case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_TYPES:
  1255. case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_RESOLUTION:
  1256. case V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY:
  1257. case V4L2_CID_MPEG_VIDC_ENC_INPUT_COMPRESSION_RATIO:
  1258. case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
  1259. case V4L2_CID_MPEG_VIDC_PRIORITY:
  1260. case V4L2_CID_MPEG_VIDC_INPUT_METADATA_FD:
  1261. case V4L2_CID_MPEG_VIDC_INTRA_REFRESH_PERIOD:
  1262. case V4L2_CID_MPEG_VIDC_RESERVE_DURATION:
  1263. allow = true;
  1264. break;
  1265. default:
  1266. allow = false;
  1267. break;
  1268. }
  1269. }
  1270. }
  1271. exit:
  1272. if (!allow)
  1273. i_vpr_e(inst, "%s: id %#x not allowed in state %s\n",
  1274. __func__, id, state_name(inst->state));
  1275. return allow;
  1276. }
  1277. bool msm_vidc_allow_metadata_delivery(struct msm_vidc_inst *inst, u32 cap_id,
  1278. u32 port)
  1279. {
  1280. return true;
  1281. }
  1282. bool msm_vidc_allow_metadata_subscription(struct msm_vidc_inst *inst, u32 cap_id,
  1283. u32 port)
  1284. {
  1285. bool is_allowed = true;
  1286. if (!inst || !inst->capabilities) {
  1287. d_vpr_e("%s: invalid params\n", __func__);
  1288. return false;
  1289. }
  1290. if (port == INPUT_PORT) {
  1291. switch (cap_id) {
  1292. case META_BUF_TAG:
  1293. case META_BITSTREAM_RESOLUTION:
  1294. case META_CROP_OFFSETS:
  1295. case META_SEI_MASTERING_DISP:
  1296. case META_SEI_CLL:
  1297. case META_HDR10PLUS:
  1298. case META_PICTURE_TYPE:
  1299. if (!is_meta_rx_inp_enabled(inst, META_OUTBUF_FENCE)) {
  1300. i_vpr_h(inst,
  1301. "%s: cap: %24s not allowed as output buffer fence is disabled\n",
  1302. __func__, cap_name(cap_id));
  1303. is_allowed = false;
  1304. }
  1305. break;
  1306. default:
  1307. is_allowed = true;
  1308. break;
  1309. }
  1310. } else if (port == OUTPUT_PORT) {
  1311. switch (cap_id) {
  1312. case META_DPB_TAG_LIST:
  1313. if (!is_ubwc_colorformat(inst->capabilities->cap[PIX_FMTS].value)) {
  1314. i_vpr_h(inst,
  1315. "%s: cap: %24s not allowed for split mode\n",
  1316. __func__, cap_name(cap_id));
  1317. is_allowed = false;
  1318. }
  1319. break;
  1320. default:
  1321. is_allowed = true;
  1322. break;
  1323. }
  1324. } else {
  1325. i_vpr_e(inst, "%s: invalid port %d\n", __func__, port);
  1326. is_allowed = false;
  1327. }
  1328. return is_allowed;
  1329. }
  1330. bool msm_vidc_allow_property(struct msm_vidc_inst *inst, u32 hfi_id)
  1331. {
  1332. bool is_allowed = true;
  1333. if (!inst || !inst->capabilities) {
  1334. d_vpr_e("%s: invalid params\n", __func__);
  1335. return false;
  1336. }
  1337. switch (hfi_id) {
  1338. case HFI_PROP_WORST_COMPRESSION_RATIO:
  1339. case HFI_PROP_WORST_COMPLEXITY_FACTOR:
  1340. case HFI_PROP_PICTURE_TYPE:
  1341. is_allowed = true;
  1342. break;
  1343. case HFI_PROP_DPB_LIST:
  1344. if (!is_ubwc_colorformat(inst->capabilities->cap[PIX_FMTS].value)) {
  1345. i_vpr_h(inst,
  1346. "%s: cap: %24s not allowed for split mode\n",
  1347. __func__, cap_name(DPB_LIST));
  1348. is_allowed = false;
  1349. }
  1350. break;
  1351. case HFI_PROP_FENCE:
  1352. if (!is_meta_rx_inp_enabled(inst, META_OUTBUF_FENCE)) {
  1353. i_vpr_h(inst,
  1354. "%s: cap: %24s not enabled, hence not allowed to subscribe\n",
  1355. __func__, cap_name(META_OUTBUF_FENCE));
  1356. is_allowed = false;
  1357. }
  1358. break;
  1359. default:
  1360. is_allowed = true;
  1361. break;
  1362. }
  1363. return is_allowed;
  1364. }
  1365. int msm_vidc_update_property_cap(struct msm_vidc_inst *inst, u32 hfi_id,
  1366. bool allow)
  1367. {
  1368. int rc = 0;
  1369. if (!inst || !inst->capabilities) {
  1370. d_vpr_e("%s: invalid params\n", __func__);
  1371. return -EINVAL;
  1372. }
  1373. switch (hfi_id) {
  1374. case HFI_PROP_WORST_COMPRESSION_RATIO:
  1375. case HFI_PROP_WORST_COMPLEXITY_FACTOR:
  1376. case HFI_PROP_PICTURE_TYPE:
  1377. break;
  1378. case HFI_PROP_DPB_LIST:
  1379. if (!allow)
  1380. memset(inst->dpb_list_payload, 0, MAX_DPB_LIST_ARRAY_SIZE);
  1381. msm_vidc_update_cap_value(inst, DPB_LIST, allow, __func__);
  1382. break;
  1383. default:
  1384. break;
  1385. }
  1386. return rc;
  1387. }
  1388. bool msm_vidc_allow_reqbufs(struct msm_vidc_inst *inst, u32 type)
  1389. {
  1390. bool allow = false;
  1391. if (!inst) {
  1392. d_vpr_e("%s: invalid params\n", __func__);
  1393. return false;
  1394. }
  1395. if (inst->state == MSM_VIDC_OPEN) {
  1396. allow = true;
  1397. goto exit;
  1398. }
  1399. if (type == OUTPUT_MPLANE || type == OUTPUT_META_PLANE) {
  1400. if (inst->state == MSM_VIDC_START_INPUT ||
  1401. inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  1402. allow = true;
  1403. goto exit;
  1404. }
  1405. }
  1406. if (type == INPUT_MPLANE || type == INPUT_META_PLANE) {
  1407. if (inst->state == MSM_VIDC_START_OUTPUT) {
  1408. allow = true;
  1409. goto exit;
  1410. }
  1411. }
  1412. exit:
  1413. if (!allow)
  1414. i_vpr_e(inst, "%s: type %d not allowed in state %s\n",
  1415. __func__, type, state_name(inst->state));
  1416. return allow;
  1417. }
  1418. enum msm_vidc_allow msm_vidc_allow_stop(struct msm_vidc_inst *inst)
  1419. {
  1420. enum msm_vidc_allow allow = MSM_VIDC_DISALLOW;
  1421. if (!inst) {
  1422. d_vpr_e("%s: invalid params\n", __func__);
  1423. return allow;
  1424. }
  1425. if (inst->state == MSM_VIDC_START ||
  1426. inst->state == MSM_VIDC_DRC ||
  1427. inst->state == MSM_VIDC_DRC_LAST_FLAG ||
  1428. inst->state == MSM_VIDC_DRC_DRAIN) {
  1429. allow = MSM_VIDC_ALLOW;
  1430. } else if (inst->state == MSM_VIDC_START_INPUT ||
  1431. inst->state == MSM_VIDC_OPEN) {
  1432. allow = MSM_VIDC_IGNORE;
  1433. i_vpr_e(inst, "%s: stop ignored in state %s\n",
  1434. __func__, state_name(inst->state));
  1435. } else {
  1436. i_vpr_e(inst, "%s: stop not allowed in state %s\n",
  1437. __func__, state_name(inst->state));
  1438. }
  1439. return allow;
  1440. }
  1441. bool msm_vidc_allow_start(struct msm_vidc_inst *inst)
  1442. {
  1443. if (!inst) {
  1444. d_vpr_e("%s: invalid params\n", __func__);
  1445. return false;
  1446. }
  1447. if (inst->state == MSM_VIDC_DRAIN_LAST_FLAG ||
  1448. inst->state == MSM_VIDC_DRC_LAST_FLAG ||
  1449. inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG)
  1450. return true;
  1451. i_vpr_e(inst, "%s: not allowed in state %s\n",
  1452. __func__, state_name(inst->state));
  1453. return false;
  1454. }
  1455. bool msm_vidc_allow_streamon(struct msm_vidc_inst *inst, u32 type)
  1456. {
  1457. if (!inst) {
  1458. d_vpr_e("%s: invalid params\n", __func__);
  1459. return false;
  1460. }
  1461. if (type == INPUT_MPLANE || type == INPUT_META_PLANE) {
  1462. if (inst->state == MSM_VIDC_OPEN ||
  1463. inst->state == MSM_VIDC_START_OUTPUT)
  1464. return true;
  1465. } else if (type == OUTPUT_MPLANE || type == OUTPUT_META_PLANE) {
  1466. if (inst->state == MSM_VIDC_OPEN ||
  1467. inst->state == MSM_VIDC_START_INPUT ||
  1468. inst->state == MSM_VIDC_DRAIN_START_INPUT)
  1469. return true;
  1470. }
  1471. i_vpr_e(inst, "%s: type %d not allowed in state %s\n",
  1472. __func__, type, state_name(inst->state));
  1473. return false;
  1474. }
  1475. enum msm_vidc_allow msm_vidc_allow_streamoff(struct msm_vidc_inst *inst, u32 type)
  1476. {
  1477. enum msm_vidc_allow allow = MSM_VIDC_ALLOW;
  1478. if (!inst) {
  1479. d_vpr_e("%s: invalid params\n", __func__);
  1480. return MSM_VIDC_DISALLOW;
  1481. }
  1482. if (type == INPUT_MPLANE) {
  1483. if (!inst->bufq[INPUT_PORT].vb2q->streaming)
  1484. allow = MSM_VIDC_IGNORE;
  1485. } else if (type == INPUT_META_PLANE) {
  1486. if (inst->bufq[INPUT_PORT].vb2q->streaming)
  1487. allow = MSM_VIDC_DISALLOW;
  1488. else if (!inst->bufq[INPUT_META_PORT].vb2q->streaming)
  1489. allow = MSM_VIDC_IGNORE;
  1490. } else if (type == OUTPUT_MPLANE) {
  1491. if (!inst->bufq[OUTPUT_PORT].vb2q->streaming)
  1492. allow = MSM_VIDC_IGNORE;
  1493. } else if (type == OUTPUT_META_PLANE) {
  1494. if (inst->bufq[OUTPUT_PORT].vb2q->streaming)
  1495. allow = MSM_VIDC_DISALLOW;
  1496. else if (!inst->bufq[OUTPUT_META_PORT].vb2q->streaming)
  1497. allow = MSM_VIDC_IGNORE;
  1498. }
  1499. if (allow != MSM_VIDC_ALLOW)
  1500. i_vpr_e(inst, "%s: type %d is %s in state %s\n",
  1501. __func__, type, allow_name(allow),
  1502. state_name(inst->state));
  1503. return allow;
  1504. }
  1505. enum msm_vidc_allow msm_vidc_allow_qbuf(struct msm_vidc_inst *inst, u32 type)
  1506. {
  1507. int port = 0;
  1508. if (!inst) {
  1509. d_vpr_e("%s: invalid params\n", __func__);
  1510. return MSM_VIDC_DISALLOW;
  1511. }
  1512. port = v4l2_type_to_driver_port(inst, type, __func__);
  1513. if (port < 0)
  1514. return MSM_VIDC_DISALLOW;
  1515. /* defer queuing if streamon not completed */
  1516. if (!inst->bufq[port].vb2q->streaming)
  1517. return MSM_VIDC_DEFER;
  1518. if (type == INPUT_META_PLANE || type == OUTPUT_META_PLANE)
  1519. return MSM_VIDC_DEFER;
  1520. if (type == INPUT_MPLANE) {
  1521. if (inst->state == MSM_VIDC_OPEN ||
  1522. inst->state == MSM_VIDC_START_OUTPUT)
  1523. return MSM_VIDC_DEFER;
  1524. else
  1525. return MSM_VIDC_ALLOW;
  1526. } else if (type == OUTPUT_MPLANE) {
  1527. if (inst->state == MSM_VIDC_OPEN ||
  1528. inst->state == MSM_VIDC_START_INPUT ||
  1529. inst->state == MSM_VIDC_DRAIN_START_INPUT)
  1530. return MSM_VIDC_DEFER;
  1531. else
  1532. return MSM_VIDC_ALLOW;
  1533. } else {
  1534. i_vpr_e(inst, "%s: unknown buffer type %d\n", __func__, type);
  1535. return MSM_VIDC_DISALLOW;
  1536. }
  1537. return MSM_VIDC_DISALLOW;
  1538. }
  1539. enum msm_vidc_allow msm_vidc_allow_input_psc(struct msm_vidc_inst *inst)
  1540. {
  1541. enum msm_vidc_allow allow = MSM_VIDC_DISALLOW;
  1542. if (!inst) {
  1543. d_vpr_e("%s: invalid params\n", __func__);
  1544. return MSM_VIDC_DISALLOW;
  1545. }
  1546. if (inst->state == MSM_VIDC_START ||
  1547. inst->state == MSM_VIDC_START_INPUT ||
  1548. inst->state == MSM_VIDC_DRAIN) {
  1549. allow = MSM_VIDC_ALLOW;
  1550. } else if (inst->state == MSM_VIDC_DRC ||
  1551. inst->state == MSM_VIDC_DRC_LAST_FLAG ||
  1552. inst->state == MSM_VIDC_DRC_DRAIN ||
  1553. inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG ||
  1554. inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  1555. i_vpr_h(inst, "%s: defer input psc, inst state %s\n",
  1556. __func__, state_name(inst->state));
  1557. allow = MSM_VIDC_DEFER;
  1558. } else if (inst->state == MSM_VIDC_OPEN ||
  1559. inst->state == MSM_VIDC_START_OUTPUT) {
  1560. i_vpr_h(inst, "%s: discard input psc, inst state %s\n",
  1561. __func__, state_name(inst->state));
  1562. allow = MSM_VIDC_DISCARD;
  1563. } else {
  1564. i_vpr_e(inst, "%s: input psc in wrong state %s\n",
  1565. __func__, state_name(inst->state));
  1566. allow = MSM_VIDC_DISALLOW;
  1567. }
  1568. return allow;
  1569. }
  1570. bool msm_vidc_allow_last_flag(struct msm_vidc_inst *inst)
  1571. {
  1572. if (!inst) {
  1573. d_vpr_e("%s: invalid params\n", __func__);
  1574. return false;
  1575. }
  1576. if (inst->state == MSM_VIDC_DRC ||
  1577. inst->state == MSM_VIDC_DRAIN ||
  1578. inst->state == MSM_VIDC_DRC_DRAIN)
  1579. return true;
  1580. i_vpr_e(inst, "%s: not allowed in state %s\n",
  1581. __func__, state_name(inst->state));
  1582. return false;
  1583. }
  1584. static int msm_vidc_flush_pending_last_flag(struct msm_vidc_inst *inst)
  1585. {
  1586. int rc = 0;
  1587. struct response_work *resp_work, *dummy = NULL;
  1588. if (!inst) {
  1589. d_vpr_e("%s: invalid params\n", __func__);
  1590. return -EINVAL;
  1591. }
  1592. if (list_empty(&inst->response_works))
  1593. return 0;
  1594. /* flush pending last flag buffers if any */
  1595. list_for_each_entry_safe(resp_work, dummy,
  1596. &inst->response_works, list) {
  1597. if (resp_work->type == RESP_WORK_LAST_FLAG) {
  1598. i_vpr_h(inst, "%s: flush pending last flag buffer\n",
  1599. __func__);
  1600. rc = handle_session_response_work(inst, resp_work);
  1601. if (rc) {
  1602. msm_vidc_change_inst_state(inst,
  1603. MSM_VIDC_ERROR, __func__);
  1604. return rc;
  1605. }
  1606. list_del(&resp_work->list);
  1607. kfree(resp_work->data);
  1608. kfree(resp_work);
  1609. }
  1610. }
  1611. return 0;
  1612. }
  1613. static int msm_vidc_discard_pending_opsc(struct msm_vidc_inst *inst)
  1614. {
  1615. struct response_work *resp_work, *dummy = NULL;
  1616. if (!inst) {
  1617. d_vpr_e("%s: invalid params\n", __func__);
  1618. return -EINVAL;
  1619. }
  1620. if (list_empty(&inst->response_works))
  1621. return 0;
  1622. /* discard pending port settings change if any */
  1623. list_for_each_entry_safe(resp_work, dummy,
  1624. &inst->response_works, list) {
  1625. if (resp_work->type == RESP_WORK_OUTPUT_PSC) {
  1626. i_vpr_h(inst,
  1627. "%s: discard pending output psc\n", __func__);
  1628. list_del(&resp_work->list);
  1629. kfree(resp_work->data);
  1630. kfree(resp_work);
  1631. }
  1632. }
  1633. return 0;
  1634. }
  1635. static int msm_vidc_discard_pending_ipsc(struct msm_vidc_inst *inst)
  1636. {
  1637. struct response_work *resp_work, *dummy = NULL;
  1638. if (!inst) {
  1639. d_vpr_e("%s: invalid params\n", __func__);
  1640. return -EINVAL;
  1641. }
  1642. if (list_empty(&inst->response_works))
  1643. return 0;
  1644. /* discard pending port settings change if any */
  1645. list_for_each_entry_safe(resp_work, dummy,
  1646. &inst->response_works, list) {
  1647. if (resp_work->type == RESP_WORK_INPUT_PSC) {
  1648. i_vpr_h(inst,
  1649. "%s: discard pending input psc\n", __func__);
  1650. /* override the psc properties again if ipsc discarded */
  1651. inst->ipsc_properties_set = false;
  1652. list_del(&resp_work->list);
  1653. kfree(resp_work->data);
  1654. kfree(resp_work);
  1655. }
  1656. }
  1657. return 0;
  1658. }
  1659. static int msm_vidc_process_pending_ipsc(struct msm_vidc_inst *inst,
  1660. enum msm_vidc_inst_state *new_state)
  1661. {
  1662. struct response_work *resp_work, *dummy = NULL;
  1663. int rc = 0;
  1664. if (!inst || !new_state) {
  1665. d_vpr_e("%s: invalid params\n", __func__);
  1666. return -EINVAL;
  1667. }
  1668. if (list_empty(&inst->response_works))
  1669. return 0;
  1670. i_vpr_h(inst, "%s: state %s, ipsc pending\n", __func__, state_name(inst->state));
  1671. list_for_each_entry_safe(resp_work, dummy, &inst->response_works, list) {
  1672. if (resp_work->type == RESP_WORK_INPUT_PSC) {
  1673. rc = handle_session_response_work(inst, resp_work);
  1674. if (rc) {
  1675. i_vpr_e(inst, "%s: handle ipsc failed\n", __func__);
  1676. *new_state = MSM_VIDC_ERROR;
  1677. } else {
  1678. if (inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG ||
  1679. inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  1680. *new_state = MSM_VIDC_DRC_DRAIN;
  1681. } else if (inst->state == MSM_VIDC_DRC_LAST_FLAG) {
  1682. *new_state = MSM_VIDC_DRC;
  1683. }
  1684. }
  1685. list_del(&resp_work->list);
  1686. kfree(resp_work->data);
  1687. kfree(resp_work);
  1688. /* list contains max only one ipsc at anytime */
  1689. break;
  1690. }
  1691. }
  1692. return rc;
  1693. }
  1694. int msm_vidc_state_change_streamon(struct msm_vidc_inst *inst, u32 type)
  1695. {
  1696. int rc = 0;
  1697. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  1698. if (!inst || !inst->core) {
  1699. d_vpr_e("%s: invalid params\n", __func__);
  1700. return -EINVAL;
  1701. }
  1702. if (type == INPUT_META_PLANE || type == OUTPUT_META_PLANE)
  1703. return 0;
  1704. if (type == INPUT_MPLANE) {
  1705. if (inst->state == MSM_VIDC_OPEN)
  1706. new_state = MSM_VIDC_START_INPUT;
  1707. else if (inst->state == MSM_VIDC_START_OUTPUT)
  1708. new_state = MSM_VIDC_START;
  1709. } else if (type == OUTPUT_MPLANE) {
  1710. if (inst->state == MSM_VIDC_OPEN) {
  1711. new_state = MSM_VIDC_START_OUTPUT;
  1712. } else if (inst->state == MSM_VIDC_START_INPUT) {
  1713. new_state = MSM_VIDC_START;
  1714. } else if (inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  1715. i_vpr_h(inst, "%s: streamon(output) in %s state\n",
  1716. __func__, state_name(inst->state));
  1717. new_state = MSM_VIDC_DRAIN;
  1718. rc = msm_vidc_process_pending_ipsc(inst, &new_state);
  1719. if (rc) {
  1720. i_vpr_e(inst, "%s: process pending ipsc failed\n", __func__);
  1721. goto state_change;
  1722. }
  1723. }
  1724. }
  1725. state_change:
  1726. msm_vidc_change_inst_state(inst, new_state, __func__);
  1727. return rc;
  1728. }
  1729. int msm_vidc_state_change_streamoff(struct msm_vidc_inst *inst, u32 type)
  1730. {
  1731. int rc = 0;
  1732. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  1733. if (!inst || !inst->core) {
  1734. d_vpr_e("%s: invalid params\n", __func__);
  1735. return -EINVAL;
  1736. }
  1737. if (type == INPUT_META_PLANE || type == OUTPUT_META_PLANE)
  1738. return 0;
  1739. if (type == INPUT_MPLANE) {
  1740. if (inst->state == MSM_VIDC_START_INPUT) {
  1741. new_state = MSM_VIDC_OPEN;
  1742. } else if (inst->state == MSM_VIDC_START) {
  1743. new_state = MSM_VIDC_START_OUTPUT;
  1744. } else if (inst->state == MSM_VIDC_DRC ||
  1745. inst->state == MSM_VIDC_DRC_LAST_FLAG ||
  1746. inst->state == MSM_VIDC_DRAIN ||
  1747. inst->state == MSM_VIDC_DRAIN_LAST_FLAG ||
  1748. inst->state == MSM_VIDC_DRC_DRAIN ||
  1749. inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG ||
  1750. inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  1751. new_state = MSM_VIDC_START_OUTPUT;
  1752. }
  1753. } else if (type == OUTPUT_MPLANE) {
  1754. if (inst->state == MSM_VIDC_START_OUTPUT) {
  1755. new_state = MSM_VIDC_OPEN;
  1756. } else if (inst->state == MSM_VIDC_START ||
  1757. inst->state == MSM_VIDC_DRAIN ||
  1758. inst->state == MSM_VIDC_DRAIN_LAST_FLAG ||
  1759. inst->state == MSM_VIDC_DRC ||
  1760. inst->state == MSM_VIDC_DRC_LAST_FLAG ||
  1761. inst->state == MSM_VIDC_DRC_DRAIN) {
  1762. new_state = MSM_VIDC_START_INPUT;
  1763. } else if (inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG) {
  1764. new_state = MSM_VIDC_DRAIN_START_INPUT;
  1765. }
  1766. }
  1767. rc = msm_vidc_change_inst_state(inst, new_state, __func__);
  1768. if (rc)
  1769. goto exit;
  1770. exit:
  1771. return rc;
  1772. }
  1773. int msm_vidc_state_change_stop(struct msm_vidc_inst *inst)
  1774. {
  1775. int rc = 0;
  1776. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  1777. if (!inst || !inst->core) {
  1778. d_vpr_e("%s: invalid params\n", __func__);
  1779. return -EINVAL;
  1780. }
  1781. if (inst->state == MSM_VIDC_START) {
  1782. new_state = MSM_VIDC_DRAIN;
  1783. } else if (inst->state == MSM_VIDC_DRC) {
  1784. new_state = MSM_VIDC_DRC_DRAIN;
  1785. } else if (inst->state == MSM_VIDC_DRC_DRAIN ||
  1786. inst->state == MSM_VIDC_DRC_LAST_FLAG) {
  1787. new_state = MSM_VIDC_DRC_DRAIN_LAST_FLAG;
  1788. } else {
  1789. i_vpr_e(inst, "%s: wrong state %s\n",
  1790. __func__, state_name(inst->state));
  1791. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  1792. return -EINVAL;
  1793. }
  1794. rc = msm_vidc_change_inst_state(inst, new_state, __func__);
  1795. if (rc)
  1796. return rc;
  1797. return rc;
  1798. }
  1799. int msm_vidc_state_change_start(struct msm_vidc_inst *inst)
  1800. {
  1801. int rc = 0;
  1802. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  1803. if (!inst || !inst->core) {
  1804. d_vpr_e("%s: invalid params\n", __func__);
  1805. return -EINVAL;
  1806. }
  1807. if (inst->state == MSM_VIDC_DRAIN_LAST_FLAG ||
  1808. inst->state == MSM_VIDC_DRC_LAST_FLAG) {
  1809. new_state = MSM_VIDC_START;
  1810. rc = msm_vidc_process_pending_ipsc(inst, &new_state);
  1811. if (rc) {
  1812. i_vpr_e(inst, "%s: process pending ipsc failed\n", __func__);
  1813. goto state_change;
  1814. }
  1815. } else if (inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG) {
  1816. new_state = MSM_VIDC_DRAIN;
  1817. rc = msm_vidc_process_pending_ipsc(inst, &new_state);
  1818. if (rc) {
  1819. i_vpr_e(inst, "%s: process pending ipsc failed\n", __func__);
  1820. goto state_change;
  1821. }
  1822. } else {
  1823. i_vpr_e(inst, "%s: wrong state %s\n", __func__, state_name(inst->state));
  1824. new_state = MSM_VIDC_ERROR;
  1825. rc = -EINVAL;
  1826. goto state_change;
  1827. }
  1828. state_change:
  1829. msm_vidc_change_inst_state(inst, new_state, __func__);
  1830. return rc;
  1831. }
  1832. int msm_vidc_state_change_input_psc(struct msm_vidc_inst *inst)
  1833. {
  1834. int rc = 0;
  1835. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  1836. if (!inst || !inst->core) {
  1837. d_vpr_e("%s: invalid params\n", __func__);
  1838. return -EINVAL;
  1839. }
  1840. /* don't change state as output port is not started yet */
  1841. if (inst->state == MSM_VIDC_START_INPUT)
  1842. return 0;
  1843. if (inst->state == MSM_VIDC_START) {
  1844. new_state = MSM_VIDC_DRC;
  1845. } else if (inst->state == MSM_VIDC_DRAIN) {
  1846. new_state = MSM_VIDC_DRC_DRAIN;
  1847. } else {
  1848. i_vpr_e(inst, "%s: wrong state %s\n",
  1849. __func__, state_name(inst->state));
  1850. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  1851. return -EINVAL;
  1852. }
  1853. rc = msm_vidc_change_inst_state(inst, new_state, __func__);
  1854. if (rc)
  1855. return rc;
  1856. return rc;
  1857. }
  1858. int msm_vidc_state_change_last_flag(struct msm_vidc_inst *inst)
  1859. {
  1860. int rc = 0;
  1861. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  1862. if (!inst || !inst->core) {
  1863. d_vpr_e("%s: invalid params\n", __func__);
  1864. return -EINVAL;
  1865. }
  1866. if (inst->state == MSM_VIDC_DRC) {
  1867. new_state = MSM_VIDC_DRC_LAST_FLAG;
  1868. } else if (inst->state == MSM_VIDC_DRAIN) {
  1869. new_state = MSM_VIDC_DRAIN_LAST_FLAG;
  1870. } else if (inst->state == MSM_VIDC_DRC_DRAIN) {
  1871. new_state = MSM_VIDC_DRC_DRAIN_LAST_FLAG;
  1872. } else {
  1873. i_vpr_e(inst, "%s: wrong state %s\n",
  1874. __func__, state_name(inst->state));
  1875. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  1876. return -EINVAL;
  1877. }
  1878. rc = msm_vidc_change_inst_state(inst, new_state, __func__);
  1879. if (rc)
  1880. return rc;
  1881. return rc;
  1882. }
  1883. int msm_vidc_get_fence_fd(struct msm_vidc_inst *inst, int *fence_fd)
  1884. {
  1885. int rc = 0;
  1886. struct msm_vidc_fence *fence, *dummy_fence;
  1887. bool found = false;
  1888. *fence_fd = INVALID_FD;
  1889. if (!inst || !inst->capabilities) {
  1890. d_vpr_e("%s: invalid params\n", __func__);
  1891. return -EINVAL;
  1892. }
  1893. list_for_each_entry_safe(fence, dummy_fence, &inst->fence_list, list) {
  1894. if (fence->dma_fence.seqno ==
  1895. (u64)inst->capabilities->cap[FENCE_ID].value) {
  1896. found = true;
  1897. break;
  1898. }
  1899. }
  1900. if (!found) {
  1901. i_vpr_h(inst, "%s: could not find matching fence for fence id: %d\n",
  1902. __func__, inst->capabilities->cap[FENCE_ID].value);
  1903. goto exit;
  1904. }
  1905. if (fence->fd == INVALID_FD) {
  1906. rc = msm_vidc_create_fence_fd(inst, fence);
  1907. if (rc)
  1908. goto exit;
  1909. }
  1910. *fence_fd = fence->fd;
  1911. exit:
  1912. return rc;
  1913. }
  1914. int msm_vidc_get_control(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
  1915. {
  1916. int rc = 0;
  1917. if (!inst || !ctrl) {
  1918. d_vpr_e("%s: invalid params\n", __func__);
  1919. return -EINVAL;
  1920. }
  1921. switch (ctrl->id) {
  1922. case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
  1923. ctrl->val = inst->buffers.output.min_count +
  1924. inst->buffers.output.extra_count;
  1925. i_vpr_h(inst, "g_min: output buffers %d\n", ctrl->val);
  1926. break;
  1927. case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
  1928. ctrl->val = inst->buffers.input.min_count +
  1929. inst->buffers.input.extra_count;
  1930. i_vpr_h(inst, "g_min: input buffers %d\n", ctrl->val);
  1931. break;
  1932. case V4L2_CID_MPEG_VIDC_AV1D_FILM_GRAIN_PRESENT:
  1933. ctrl->val = inst->capabilities->cap[FILM_GRAIN].value;
  1934. i_vpr_h(inst, "%s: film grain present: %d\n",
  1935. __func__, ctrl->val);
  1936. break;
  1937. case V4L2_CID_MPEG_VIDC_SW_FENCE_FD:
  1938. rc = msm_vidc_get_fence_fd(inst, &ctrl->val);
  1939. if (!rc)
  1940. i_vpr_l(inst, "%s: fence fd: %d\n",
  1941. __func__, ctrl->val);
  1942. break;
  1943. default:
  1944. i_vpr_e(inst, "invalid ctrl %s id %d\n",
  1945. ctrl->name, ctrl->id);
  1946. return -EINVAL;
  1947. }
  1948. return rc;
  1949. }
  1950. int msm_vidc_get_mbs_per_frame(struct msm_vidc_inst *inst)
  1951. {
  1952. int height = 0, width = 0;
  1953. struct v4l2_format *inp_f;
  1954. if (is_decode_session(inst)) {
  1955. inp_f = &inst->fmts[INPUT_PORT];
  1956. width = max(inp_f->fmt.pix_mp.width, inst->crop.width);
  1957. height = max(inp_f->fmt.pix_mp.height, inst->crop.height);
  1958. } else if (is_encode_session(inst)) {
  1959. width = inst->crop.width;
  1960. height = inst->crop.height;
  1961. }
  1962. return NUM_MBS_PER_FRAME(height, width);
  1963. }
  1964. int msm_vidc_get_fps(struct msm_vidc_inst *inst)
  1965. {
  1966. int fps;
  1967. u32 frame_rate, operating_rate;
  1968. if (!inst || !inst->capabilities) {
  1969. d_vpr_e("%s: invalid params\n", __func__);
  1970. return -EINVAL;
  1971. }
  1972. frame_rate = msm_vidc_get_frame_rate(inst);
  1973. operating_rate = msm_vidc_get_operating_rate(inst);
  1974. if (operating_rate > frame_rate)
  1975. fps = operating_rate ? operating_rate : 1;
  1976. else
  1977. fps = frame_rate;
  1978. return fps;
  1979. }
  1980. int msm_vidc_num_buffers(struct msm_vidc_inst *inst,
  1981. enum msm_vidc_buffer_type type, enum msm_vidc_buffer_attributes attr)
  1982. {
  1983. int count = 0;
  1984. struct msm_vidc_buffer *vbuf;
  1985. struct msm_vidc_buffers *buffers;
  1986. if (!inst) {
  1987. d_vpr_e("%s: invalid params\n", __func__);
  1988. return count;
  1989. }
  1990. if (type == MSM_VIDC_BUF_OUTPUT) {
  1991. buffers = &inst->buffers.output;
  1992. } else if (type == MSM_VIDC_BUF_INPUT) {
  1993. buffers = &inst->buffers.input;
  1994. } else {
  1995. i_vpr_e(inst, "%s: invalid buffer type %#x\n",
  1996. __func__, type);
  1997. return count;
  1998. }
  1999. list_for_each_entry(vbuf, &buffers->list, list) {
  2000. if (vbuf->type != type)
  2001. continue;
  2002. if (!(vbuf->attr & attr))
  2003. continue;
  2004. count++;
  2005. }
  2006. return count;
  2007. }
  2008. static int vb2_buffer_to_driver(struct vb2_buffer *vb2,
  2009. struct msm_vidc_buffer *buf)
  2010. {
  2011. int rc = 0;
  2012. if (!vb2 || !buf) {
  2013. d_vpr_e("%s: invalid params\n", __func__);
  2014. return -EINVAL;
  2015. }
  2016. buf->type = v4l2_type_to_driver(vb2->type, __func__);
  2017. if (!buf->type)
  2018. return -EINVAL;
  2019. buf->index = vb2->index;
  2020. buf->fd = vb2->planes[0].m.fd;
  2021. buf->data_offset = vb2->planes[0].data_offset;
  2022. buf->data_size = vb2->planes[0].bytesused - vb2->planes[0].data_offset;
  2023. buf->buffer_size = vb2->planes[0].length;
  2024. buf->timestamp = vb2->timestamp;
  2025. return rc;
  2026. }
  2027. int msm_vidc_process_readonly_buffers(struct msm_vidc_inst *inst,
  2028. struct msm_vidc_buffer *buf)
  2029. {
  2030. int rc = 0;
  2031. struct msm_vidc_buffer *ro_buf, *dummy;
  2032. struct msm_vidc_buffers *ro_buffers;
  2033. if (!inst || !buf) {
  2034. d_vpr_e("%s: invalid params\n", __func__);
  2035. return -EINVAL;
  2036. }
  2037. if (!is_decode_session(inst) || !is_output_buffer(buf->type))
  2038. return 0;
  2039. ro_buffers = msm_vidc_get_buffers(inst, MSM_VIDC_BUF_READ_ONLY, __func__);
  2040. if (!ro_buffers)
  2041. return -EINVAL;
  2042. /*
  2043. * check if buffer present in ro_buffers list
  2044. * if present: add ro flag to buf and remove from ro_buffers list
  2045. * if not present: do nothing
  2046. */
  2047. list_for_each_entry_safe(ro_buf, dummy, &ro_buffers->list, list) {
  2048. if (ro_buf->device_addr == buf->device_addr) {
  2049. buf->attr |= MSM_VIDC_ATTR_READ_ONLY;
  2050. print_vidc_buffer(VIDC_LOW, "low ", "ro buf removed", inst, ro_buf);
  2051. list_del(&ro_buf->list);
  2052. msm_memory_pool_free(inst, ro_buf);
  2053. break;
  2054. }
  2055. }
  2056. return rc;
  2057. }
  2058. int msm_vidc_memory_unmap_completely(struct msm_vidc_inst *inst,
  2059. struct msm_vidc_map *map)
  2060. {
  2061. int rc = 0;
  2062. if (!inst || !map) {
  2063. d_vpr_e("%s: invalid params\n", __func__);
  2064. return -EINVAL;
  2065. }
  2066. if (!map->refcount)
  2067. return 0;
  2068. while (map->refcount) {
  2069. rc = msm_vidc_memory_unmap(inst->core, map);
  2070. if (rc)
  2071. break;
  2072. if (!map->refcount) {
  2073. msm_vidc_memory_put_dmabuf(inst, map->dmabuf);
  2074. list_del(&map->list);
  2075. msm_memory_pool_free(inst, map);
  2076. break;
  2077. }
  2078. }
  2079. return rc;
  2080. }
  2081. int msm_vidc_set_auto_framerate(struct msm_vidc_inst *inst, u64 timestamp)
  2082. {
  2083. struct msm_vidc_core *core;
  2084. struct msm_vidc_timestamp *ts;
  2085. struct msm_vidc_timestamp *prev = NULL;
  2086. u32 counter = 0, prev_fr = 0, curr_fr = 0;
  2087. u64 time_us = 0;
  2088. int rc = 0;
  2089. if (!inst || !inst->core || !inst->capabilities) {
  2090. d_vpr_e("%s: invalid params\n", __func__);
  2091. return -EINVAL;
  2092. }
  2093. core = inst->core;
  2094. if (!core->capabilities[ENC_AUTO_FRAMERATE].value ||
  2095. is_image_session(inst) || msm_vidc_is_super_buffer(inst) ||
  2096. !inst->capabilities->cap[TIME_DELTA_BASED_RC].value)
  2097. goto exit;
  2098. rc = msm_vidc_update_timestamp_rate(inst, timestamp);
  2099. if (rc)
  2100. goto exit;
  2101. list_for_each_entry(ts, &inst->timestamps.list, sort.list) {
  2102. if (prev) {
  2103. time_us = ts->sort.val - prev->sort.val;
  2104. prev_fr = curr_fr;
  2105. curr_fr = time_us ? DIV64_U64_ROUND_CLOSEST(USEC_PER_SEC, time_us) << 16 :
  2106. inst->auto_framerate;
  2107. if (curr_fr > inst->capabilities->cap[FRAME_RATE].max)
  2108. curr_fr = inst->capabilities->cap[FRAME_RATE].max;
  2109. }
  2110. prev = ts;
  2111. counter++;
  2112. }
  2113. if (counter < ENC_FPS_WINDOW)
  2114. goto exit;
  2115. /* if framerate changed and stable for 2 frames, set to firmware */
  2116. if (curr_fr == prev_fr && curr_fr != inst->auto_framerate) {
  2117. i_vpr_l(inst, "%s: updated fps: %u -> %u\n", __func__,
  2118. inst->auto_framerate >> 16, curr_fr >> 16);
  2119. rc = venus_hfi_session_property(inst,
  2120. HFI_PROP_FRAME_RATE,
  2121. HFI_HOST_FLAGS_NONE,
  2122. HFI_PORT_BITSTREAM,
  2123. HFI_PAYLOAD_Q16,
  2124. &curr_fr,
  2125. sizeof(u32));
  2126. if (rc) {
  2127. i_vpr_e(inst, "%s: set auto frame rate failed\n",
  2128. __func__);
  2129. goto exit;
  2130. }
  2131. inst->auto_framerate = curr_fr;
  2132. }
  2133. exit:
  2134. return rc;
  2135. }
  2136. int msm_vidc_update_input_rate(struct msm_vidc_inst *inst, u64 time_us)
  2137. {
  2138. struct msm_vidc_input_timer *input_timer;
  2139. struct msm_vidc_input_timer *prev_timer = NULL;
  2140. u64 counter = 0;
  2141. u64 input_timer_sum_us = 0;
  2142. if (!inst || !inst->capabilities) {
  2143. d_vpr_e("%s: invalid params\n", __func__);
  2144. return -EINVAL;
  2145. }
  2146. input_timer = msm_memory_pool_alloc(inst, MSM_MEM_POOL_BUF_TIMER);
  2147. if (!input_timer)
  2148. return -ENOMEM;
  2149. input_timer->time_us = time_us;
  2150. INIT_LIST_HEAD(&input_timer->list);
  2151. list_add_tail(&input_timer->list, &inst->input_timer_list);
  2152. list_for_each_entry(input_timer, &inst->input_timer_list, list) {
  2153. if (prev_timer) {
  2154. input_timer_sum_us += input_timer->time_us - prev_timer->time_us;
  2155. counter++;
  2156. }
  2157. prev_timer = input_timer;
  2158. }
  2159. if (input_timer_sum_us)
  2160. inst->capabilities->cap[INPUT_RATE].value =
  2161. (s32)(DIV64_U64_ROUND_CLOSEST(counter * 1000000,
  2162. input_timer_sum_us) << 16);
  2163. /* delete the first entry once counter >= INPUT_TIMER_LIST_SIZE */
  2164. if (counter >= INPUT_TIMER_LIST_SIZE) {
  2165. input_timer = list_first_entry(&inst->input_timer_list,
  2166. struct msm_vidc_input_timer, list);
  2167. list_del_init(&input_timer->list);
  2168. msm_memory_pool_free(inst, input_timer);
  2169. }
  2170. return 0;
  2171. }
  2172. int msm_vidc_flush_input_timer(struct msm_vidc_inst *inst)
  2173. {
  2174. struct msm_vidc_input_timer *input_timer, *dummy_timer;
  2175. if (!inst || !inst->capabilities) {
  2176. d_vpr_e("%s: invalid params\n", __func__);
  2177. return -EINVAL;
  2178. }
  2179. i_vpr_l(inst, "%s: flush input_timer list\n", __func__);
  2180. list_for_each_entry_safe(input_timer, dummy_timer, &inst->input_timer_list, list) {
  2181. list_del_init(&input_timer->list);
  2182. msm_memory_pool_free(inst, input_timer);
  2183. }
  2184. return 0;
  2185. }
  2186. int msm_vidc_get_input_rate(struct msm_vidc_inst *inst)
  2187. {
  2188. if (!inst || !inst->capabilities) {
  2189. d_vpr_e("%s: Invalid params\n", __func__);
  2190. return 0;
  2191. }
  2192. return inst->capabilities->cap[INPUT_RATE].value >> 16;
  2193. }
  2194. int msm_vidc_get_timestamp_rate(struct msm_vidc_inst *inst)
  2195. {
  2196. if (!inst || !inst->capabilities) {
  2197. d_vpr_e("%s: Invalid params\n", __func__);
  2198. return 0;
  2199. }
  2200. return inst->capabilities->cap[TIMESTAMP_RATE].value >> 16;
  2201. }
  2202. int msm_vidc_get_frame_rate(struct msm_vidc_inst *inst)
  2203. {
  2204. if (!inst || !inst->capabilities) {
  2205. d_vpr_e("%s: Invalid params\n", __func__);
  2206. return 0;
  2207. }
  2208. return inst->capabilities->cap[FRAME_RATE].value >> 16;
  2209. }
  2210. int msm_vidc_get_operating_rate(struct msm_vidc_inst *inst)
  2211. {
  2212. if (!inst || !inst->capabilities) {
  2213. d_vpr_e("%s: Invalid params\n", __func__);
  2214. return 0;
  2215. }
  2216. return inst->capabilities->cap[OPERATING_RATE].value >> 16;
  2217. }
  2218. static int msm_vidc_insert_sort(struct list_head *head,
  2219. struct msm_vidc_sort *entry)
  2220. {
  2221. struct msm_vidc_sort *first, *node;
  2222. struct msm_vidc_sort *prev = NULL;
  2223. bool is_inserted = false;
  2224. if (!head || !entry) {
  2225. d_vpr_e("%s: invalid params\n", __func__);
  2226. return -EINVAL;
  2227. }
  2228. if (list_empty(head)) {
  2229. list_add(&entry->list, head);
  2230. return 0;
  2231. }
  2232. first = list_first_entry(head, struct msm_vidc_sort, list);
  2233. if (entry->val < first->val) {
  2234. list_add(&entry->list, head);
  2235. return 0;
  2236. }
  2237. list_for_each_entry(node, head, list) {
  2238. if (prev &&
  2239. entry->val >= prev->val && entry->val <= node->val) {
  2240. list_add(&entry->list, &prev->list);
  2241. is_inserted = true;
  2242. break;
  2243. }
  2244. prev = node;
  2245. }
  2246. if (!is_inserted && prev)
  2247. list_add(&entry->list, &prev->list);
  2248. return 0;
  2249. }
  2250. static struct msm_vidc_timestamp *msm_vidc_get_least_rank_ts(struct msm_vidc_inst *inst)
  2251. {
  2252. struct msm_vidc_timestamp *ts, *final = NULL;
  2253. u64 least_rank = INT_MAX;
  2254. if (!inst) {
  2255. d_vpr_e("%s: Invalid params\n", __func__);
  2256. return NULL;
  2257. }
  2258. list_for_each_entry(ts, &inst->timestamps.list, sort.list) {
  2259. if (ts->rank < least_rank) {
  2260. least_rank = ts->rank;
  2261. final = ts;
  2262. }
  2263. }
  2264. return final;
  2265. }
  2266. int msm_vidc_flush_ts(struct msm_vidc_inst *inst)
  2267. {
  2268. struct msm_vidc_timestamp *temp, *ts = NULL;
  2269. if (!inst) {
  2270. d_vpr_e("%s: Invalid params\n", __func__);
  2271. return -EINVAL;
  2272. }
  2273. list_for_each_entry_safe(ts, temp, &inst->timestamps.list, sort.list) {
  2274. i_vpr_l(inst, "%s: flushing ts: val %llu, rank %llu\n",
  2275. __func__, ts->sort.val, ts->rank);
  2276. list_del(&ts->sort.list);
  2277. msm_memory_pool_free(inst, ts);
  2278. }
  2279. inst->timestamps.count = 0;
  2280. inst->timestamps.rank = 0;
  2281. return 0;
  2282. }
  2283. int msm_vidc_update_timestamp_rate(struct msm_vidc_inst *inst, u64 timestamp)
  2284. {
  2285. struct msm_vidc_timestamp *ts, *prev;
  2286. int rc = 0;
  2287. u32 window_size = 0;
  2288. u32 timestamp_rate = 0;
  2289. u64 ts_ms = 0;
  2290. u32 counter = 0;
  2291. if (!inst) {
  2292. d_vpr_e("%s: Invalid params\n", __func__);
  2293. return -EINVAL;
  2294. }
  2295. ts = msm_memory_pool_alloc(inst, MSM_MEM_POOL_TIMESTAMP);
  2296. if (!ts) {
  2297. i_vpr_e(inst, "%s: ts alloc failed\n", __func__);
  2298. return -ENOMEM;
  2299. }
  2300. INIT_LIST_HEAD(&ts->sort.list);
  2301. ts->sort.val = timestamp;
  2302. ts->rank = inst->timestamps.rank++;
  2303. rc = msm_vidc_insert_sort(&inst->timestamps.list, &ts->sort);
  2304. if (rc)
  2305. return rc;
  2306. inst->timestamps.count++;
  2307. if (is_encode_session(inst))
  2308. window_size = ENC_FPS_WINDOW;
  2309. else
  2310. window_size = DEC_FPS_WINDOW;
  2311. /* keep sliding window */
  2312. if (inst->timestamps.count > window_size) {
  2313. ts = msm_vidc_get_least_rank_ts(inst);
  2314. if (!ts) {
  2315. i_vpr_e(inst, "%s: least rank ts is NULL\n", __func__);
  2316. return -EINVAL;
  2317. }
  2318. inst->timestamps.count--;
  2319. list_del(&ts->sort.list);
  2320. msm_memory_pool_free(inst, ts);
  2321. }
  2322. /* Calculate timestamp rate */
  2323. list_for_each_entry(ts, &inst->timestamps.list, sort.list) {
  2324. if (prev) {
  2325. if (ts->sort.val == prev->sort.val)
  2326. continue;
  2327. ts_ms += div_u64(ts->sort.val - prev->sort.val, 1000000);
  2328. counter++;
  2329. }
  2330. prev = ts;
  2331. }
  2332. if (ts_ms)
  2333. timestamp_rate = (u32)div_u64((u64)counter * 1000, ts_ms);
  2334. msm_vidc_update_cap_value(inst, TIMESTAMP_RATE, timestamp_rate << 16, __func__);
  2335. return 0;
  2336. }
  2337. int msm_vidc_ts_reorder_insert_timestamp(struct msm_vidc_inst *inst, u64 timestamp)
  2338. {
  2339. struct msm_vidc_timestamp *ts;
  2340. int rc = 0;
  2341. if (!inst) {
  2342. d_vpr_e("%s: Invalid params\n", __func__);
  2343. return -EINVAL;
  2344. }
  2345. /* allocate ts from pool */
  2346. ts = msm_memory_pool_alloc(inst, MSM_MEM_POOL_TIMESTAMP);
  2347. if (!ts) {
  2348. i_vpr_e(inst, "%s: ts alloc failed\n", __func__);
  2349. return -ENOMEM;
  2350. }
  2351. /* initialize ts node */
  2352. INIT_LIST_HEAD(&ts->sort.list);
  2353. ts->sort.val = timestamp;
  2354. rc = msm_vidc_insert_sort(&inst->ts_reorder.list, &ts->sort);
  2355. if (rc)
  2356. return rc;
  2357. inst->ts_reorder.count++;
  2358. return 0;
  2359. }
  2360. int msm_vidc_ts_reorder_remove_timestamp(struct msm_vidc_inst *inst, u64 timestamp)
  2361. {
  2362. struct msm_vidc_timestamp *ts, *temp;
  2363. if (!inst) {
  2364. d_vpr_e("%s: Invalid params\n", __func__);
  2365. return -EINVAL;
  2366. }
  2367. /* remove matching node */
  2368. list_for_each_entry_safe(ts, temp, &inst->ts_reorder.list, sort.list) {
  2369. if (ts->sort.val == timestamp) {
  2370. list_del_init(&ts->sort.list);
  2371. inst->ts_reorder.count--;
  2372. msm_memory_pool_free(inst, ts);
  2373. break;
  2374. }
  2375. }
  2376. return 0;
  2377. }
  2378. int msm_vidc_ts_reorder_get_first_timestamp(struct msm_vidc_inst *inst, u64 *timestamp)
  2379. {
  2380. struct msm_vidc_timestamp *ts;
  2381. if (!inst || !timestamp) {
  2382. d_vpr_e("%s: Invalid params\n", __func__);
  2383. return -EINVAL;
  2384. }
  2385. /* check if list empty */
  2386. if (list_empty(&inst->ts_reorder.list)) {
  2387. i_vpr_e(inst, "%s: list empty. ts %lld\n", __func__, timestamp);
  2388. return -EINVAL;
  2389. }
  2390. /* get 1st node from reorder list */
  2391. ts = list_first_entry(&inst->ts_reorder.list,
  2392. struct msm_vidc_timestamp, sort.list);
  2393. list_del_init(&ts->sort.list);
  2394. /* copy timestamp */
  2395. *timestamp = ts->sort.val;
  2396. inst->ts_reorder.count--;
  2397. msm_memory_pool_free(inst, ts);
  2398. return 0;
  2399. }
  2400. int msm_vidc_ts_reorder_flush(struct msm_vidc_inst *inst)
  2401. {
  2402. struct msm_vidc_timestamp *temp, *ts = NULL;
  2403. if (!inst) {
  2404. d_vpr_e("%s: Invalid params\n", __func__);
  2405. return -EINVAL;
  2406. }
  2407. /* flush all entries */
  2408. list_for_each_entry_safe(ts, temp, &inst->ts_reorder.list, sort.list) {
  2409. i_vpr_l(inst, "%s: flushing ts: val %lld\n", __func__, ts->sort.val);
  2410. list_del(&ts->sort.list);
  2411. msm_memory_pool_free(inst, ts);
  2412. }
  2413. inst->ts_reorder.count = 0;
  2414. return 0;
  2415. }
  2416. int msm_vidc_get_delayed_unmap(struct msm_vidc_inst *inst, struct msm_vidc_map *map)
  2417. {
  2418. int rc = 0;
  2419. if (!inst || !map) {
  2420. d_vpr_e("%s: invalid params\n", __func__);
  2421. return -EINVAL;
  2422. }
  2423. map->skip_delayed_unmap = 1;
  2424. rc = msm_vidc_memory_map(inst->core, map);
  2425. if (rc)
  2426. return rc;
  2427. return 0;
  2428. }
  2429. int msm_vidc_put_delayed_unmap(struct msm_vidc_inst *inst, struct msm_vidc_map *map)
  2430. {
  2431. int rc = 0;
  2432. if (!inst || !map) {
  2433. d_vpr_e("%s: invalid params\n", __func__);
  2434. return -EINVAL;
  2435. }
  2436. if (!map->skip_delayed_unmap) {
  2437. i_vpr_e(inst, "%s: no delayed unmap, addr %#x\n",
  2438. __func__, map->device_addr);
  2439. return -EINVAL;
  2440. }
  2441. map->skip_delayed_unmap = 0;
  2442. rc = msm_vidc_memory_unmap(inst->core, map);
  2443. if (rc)
  2444. i_vpr_e(inst, "%s: unmap failed\n", __func__);
  2445. if (!map->refcount) {
  2446. msm_vidc_memory_put_dmabuf(inst, map->dmabuf);
  2447. list_del(&map->list);
  2448. msm_memory_pool_free(inst, map);
  2449. }
  2450. return rc;
  2451. }
  2452. int msm_vidc_unmap_buffers(struct msm_vidc_inst *inst,
  2453. enum msm_vidc_buffer_type type)
  2454. {
  2455. int rc = 0;
  2456. struct msm_vidc_mappings *mappings;
  2457. struct msm_vidc_map *map, *dummy;
  2458. if (!inst) {
  2459. d_vpr_e("%s: invalid params\n", __func__);
  2460. return -EINVAL;
  2461. }
  2462. mappings = msm_vidc_get_mappings(inst, type, __func__);
  2463. if (!mappings)
  2464. return -EINVAL;
  2465. list_for_each_entry_safe(map, dummy, &mappings->list, list) {
  2466. msm_vidc_memory_unmap_completely(inst, map);
  2467. }
  2468. return rc;
  2469. }
  2470. int msm_vidc_unmap_driver_buf(struct msm_vidc_inst *inst,
  2471. struct msm_vidc_buffer *buf)
  2472. {
  2473. int rc = 0;
  2474. struct msm_vidc_mappings *mappings;
  2475. struct msm_vidc_map *map = NULL;
  2476. bool found = false;
  2477. if (!inst || !buf) {
  2478. d_vpr_e("%s: invalid params\n", __func__);
  2479. return -EINVAL;
  2480. }
  2481. mappings = msm_vidc_get_mappings(inst, buf->type, __func__);
  2482. if (!mappings)
  2483. return -EINVAL;
  2484. /* sanity check to see if it was not removed */
  2485. list_for_each_entry(map, &mappings->list, list) {
  2486. if (map->dmabuf == buf->dmabuf) {
  2487. found = true;
  2488. break;
  2489. }
  2490. }
  2491. if (!found) {
  2492. print_vidc_buffer(VIDC_ERR, "err ", "no buf in mappings", inst, buf);
  2493. return -EINVAL;
  2494. }
  2495. rc = msm_vidc_memory_unmap(inst->core, map);
  2496. if (rc) {
  2497. print_vidc_buffer(VIDC_ERR, "err ", "unmap failed", inst, buf);
  2498. return -EINVAL;
  2499. }
  2500. /* finally delete if refcount is zero */
  2501. if (!map->refcount) {
  2502. msm_vidc_memory_put_dmabuf(inst, map->dmabuf);
  2503. list_del(&map->list);
  2504. msm_memory_pool_free(inst, map);
  2505. }
  2506. return rc;
  2507. }
  2508. int msm_vidc_map_driver_buf(struct msm_vidc_inst *inst,
  2509. struct msm_vidc_buffer *buf)
  2510. {
  2511. int rc = 0;
  2512. struct msm_vidc_mappings *mappings;
  2513. struct msm_vidc_map *map;
  2514. bool found = false;
  2515. if (!inst || !buf) {
  2516. d_vpr_e("%s: invalid params\n", __func__);
  2517. return -EINVAL;
  2518. }
  2519. mappings = msm_vidc_get_mappings(inst, buf->type, __func__);
  2520. if (!mappings)
  2521. return -EINVAL;
  2522. /*
  2523. * new buffer: map twice for delayed unmap feature sake
  2524. * existing buffer: map once
  2525. */
  2526. list_for_each_entry(map, &mappings->list, list) {
  2527. if (map->dmabuf == buf->dmabuf) {
  2528. found = true;
  2529. break;
  2530. }
  2531. }
  2532. if (!found) {
  2533. /* new buffer case */
  2534. map = msm_memory_pool_alloc(inst, MSM_MEM_POOL_MAP);
  2535. if (!map) {
  2536. i_vpr_e(inst, "%s: alloc failed\n", __func__);
  2537. return -ENOMEM;
  2538. }
  2539. INIT_LIST_HEAD(&map->list);
  2540. map->type = buf->type;
  2541. map->dmabuf = msm_vidc_memory_get_dmabuf(inst, buf->fd);
  2542. if (!map->dmabuf)
  2543. return -EINVAL;
  2544. map->region = msm_vidc_get_buffer_region(inst, buf->type, __func__);
  2545. /* delayed unmap feature needed for decoder output buffers */
  2546. if (is_decode_session(inst) && is_output_buffer(buf->type)) {
  2547. rc = msm_vidc_get_delayed_unmap(inst, map);
  2548. if (rc) {
  2549. msm_vidc_memory_put_dmabuf(inst, map->dmabuf);
  2550. msm_memory_pool_free(inst, map);
  2551. return rc;
  2552. }
  2553. }
  2554. list_add_tail(&map->list, &mappings->list);
  2555. }
  2556. rc = msm_vidc_memory_map(inst->core, map);
  2557. if (rc)
  2558. return rc;
  2559. buf->device_addr = map->device_addr;
  2560. return 0;
  2561. }
  2562. int msm_vidc_put_driver_buf(struct msm_vidc_inst *inst,
  2563. struct msm_vidc_buffer *buf)
  2564. {
  2565. int rc = 0;
  2566. if (!inst || !buf) {
  2567. d_vpr_e("%s: invalid params\n", __func__);
  2568. return -EINVAL;
  2569. }
  2570. msm_vidc_unmap_driver_buf(inst, buf);
  2571. msm_vidc_memory_put_dmabuf(inst, buf->dmabuf);
  2572. /* delete the buffer from buffers->list */
  2573. list_del(&buf->list);
  2574. msm_memory_pool_free(inst, buf);
  2575. return rc;
  2576. }
  2577. struct msm_vidc_buffer *msm_vidc_get_driver_buf(struct msm_vidc_inst *inst,
  2578. struct vb2_buffer *vb2)
  2579. {
  2580. int rc = 0;
  2581. struct msm_vidc_buffer *buf = NULL;
  2582. struct msm_vidc_buffers *buffers;
  2583. enum msm_vidc_buffer_type buf_type;
  2584. if (!inst || !vb2) {
  2585. d_vpr_e("%s: invalid params\n", __func__);
  2586. return NULL;
  2587. }
  2588. buf_type = v4l2_type_to_driver(vb2->type, __func__);
  2589. if (!buf_type)
  2590. return NULL;
  2591. buffers = msm_vidc_get_buffers(inst, buf_type, __func__);
  2592. if (!buffers)
  2593. return NULL;
  2594. buf = msm_memory_pool_alloc(inst, MSM_MEM_POOL_BUFFER);
  2595. if (!buf) {
  2596. i_vpr_e(inst, "%s: alloc failed\n", __func__);
  2597. return NULL;
  2598. }
  2599. INIT_LIST_HEAD(&buf->list);
  2600. list_add_tail(&buf->list, &buffers->list);
  2601. rc = vb2_buffer_to_driver(vb2, buf);
  2602. if (rc)
  2603. goto error;
  2604. buf->dmabuf = msm_vidc_memory_get_dmabuf(inst, buf->fd);
  2605. if (!buf->dmabuf)
  2606. goto error;
  2607. /* treat every buffer as deferred buffer initially */
  2608. buf->attr |= MSM_VIDC_ATTR_DEFERRED;
  2609. rc = msm_vidc_map_driver_buf(inst, buf);
  2610. if (rc)
  2611. goto error;
  2612. return buf;
  2613. error:
  2614. msm_vidc_memory_put_dmabuf(inst, buf->dmabuf);
  2615. list_del(&buf->list);
  2616. msm_memory_pool_free(inst, buf);
  2617. return NULL;
  2618. }
  2619. struct msm_vidc_buffer *get_meta_buffer(struct msm_vidc_inst *inst,
  2620. struct msm_vidc_buffer *buf)
  2621. {
  2622. struct msm_vidc_buffer *mbuf;
  2623. struct msm_vidc_buffers *buffers;
  2624. bool found = false;
  2625. if (!inst || !buf) {
  2626. d_vpr_e("%s: invalid params\n", __func__);
  2627. return NULL;
  2628. }
  2629. if (buf->type == MSM_VIDC_BUF_INPUT) {
  2630. buffers = &inst->buffers.input_meta;
  2631. } else if (buf->type == MSM_VIDC_BUF_OUTPUT) {
  2632. buffers = &inst->buffers.output_meta;
  2633. } else {
  2634. i_vpr_e(inst, "%s: invalid buffer type %d\n",
  2635. __func__, buf->type);
  2636. return NULL;
  2637. }
  2638. list_for_each_entry(mbuf, &buffers->list, list) {
  2639. if (mbuf->index == buf->index) {
  2640. found = true;
  2641. break;
  2642. }
  2643. }
  2644. if (!found)
  2645. return NULL;
  2646. return mbuf;
  2647. }
  2648. bool msm_vidc_is_super_buffer(struct msm_vidc_inst *inst)
  2649. {
  2650. struct msm_vidc_inst_capability *capability = NULL;
  2651. if (!inst || !inst->capabilities) {
  2652. d_vpr_e("%s: Invalid params\n", __func__);
  2653. return false;
  2654. }
  2655. capability = inst->capabilities;
  2656. return !!capability->cap[SUPER_FRAME].value;
  2657. }
  2658. static bool is_single_session(struct msm_vidc_inst *inst)
  2659. {
  2660. struct msm_vidc_core *core;
  2661. u32 count = 0;
  2662. if (!inst) {
  2663. d_vpr_e("%s: Invalid params\n", __func__);
  2664. return false;
  2665. }
  2666. core = inst->core;
  2667. core_lock(core, __func__);
  2668. list_for_each_entry(inst, &core->instances, list)
  2669. count++;
  2670. core_unlock(core, __func__);
  2671. return count == 1;
  2672. }
  2673. void msm_vidc_allow_dcvs(struct msm_vidc_inst *inst)
  2674. {
  2675. bool allow = false;
  2676. struct msm_vidc_core *core;
  2677. u32 fps;
  2678. if (!inst || !inst->core || !inst->capabilities) {
  2679. d_vpr_e("%s: Invalid args: %pK\n", __func__, inst);
  2680. return;
  2681. }
  2682. core = inst->core;
  2683. allow = !msm_vidc_clock_voting;
  2684. if (!allow) {
  2685. i_vpr_h(inst, "%s: core_clock_voting is set\n", __func__);
  2686. goto exit;
  2687. }
  2688. allow = core->capabilities[DCVS].value;
  2689. if (!allow) {
  2690. i_vpr_h(inst, "%s: core doesn't support dcvs\n", __func__);
  2691. goto exit;
  2692. }
  2693. allow = !inst->decode_batch.enable;
  2694. if (!allow) {
  2695. i_vpr_h(inst, "%s: decode_batching enabled\n", __func__);
  2696. goto exit;
  2697. }
  2698. allow = !msm_vidc_is_super_buffer(inst);
  2699. if (!allow) {
  2700. i_vpr_h(inst, "%s: encode_batching(super_buffer) enabled\n", __func__);
  2701. goto exit;
  2702. }
  2703. allow = !is_thumbnail_session(inst);
  2704. if (!allow) {
  2705. i_vpr_h(inst, "%s: thumbnail session\n", __func__);
  2706. goto exit;
  2707. }
  2708. allow = is_realtime_session(inst);
  2709. if (!allow) {
  2710. i_vpr_h(inst, "%s: non-realtime session\n", __func__);
  2711. goto exit;
  2712. }
  2713. allow = !is_critical_priority_session(inst);
  2714. if (!allow) {
  2715. i_vpr_h(inst, "%s: critical priority session\n", __func__);
  2716. goto exit;
  2717. }
  2718. allow = !is_image_session(inst);
  2719. if (!allow) {
  2720. i_vpr_h(inst, "%s: image session\n", __func__);
  2721. goto exit;
  2722. }
  2723. allow = !is_lowlatency_session(inst);
  2724. if (!allow) {
  2725. i_vpr_h(inst, "%s: lowlatency session\n", __func__);
  2726. goto exit;
  2727. }
  2728. fps = msm_vidc_get_fps(inst);
  2729. if (is_decode_session(inst) &&
  2730. fps >= inst->capabilities->cap[FRAME_RATE].max) {
  2731. allow = false;
  2732. i_vpr_h(inst, "%s: unsupported fps %d\n", __func__, fps);
  2733. goto exit;
  2734. }
  2735. exit:
  2736. i_vpr_hp(inst, "%s: dcvs: %s\n", __func__, allow ? "enabled" : "disabled");
  2737. inst->power.dcvs_flags = 0;
  2738. inst->power.dcvs_mode = allow;
  2739. }
  2740. bool msm_vidc_allow_decode_batch(struct msm_vidc_inst *inst)
  2741. {
  2742. struct msm_vidc_inst_capability *capability;
  2743. struct msm_vidc_core *core;
  2744. bool allow = false;
  2745. u32 value = 0;
  2746. if (!inst || !inst->core || !inst->capabilities) {
  2747. d_vpr_e("%s: invalid params\n", __func__);
  2748. return false;
  2749. }
  2750. core = inst->core;
  2751. capability = inst->capabilities;
  2752. allow = inst->decode_batch.enable;
  2753. if (!allow) {
  2754. i_vpr_h(inst, "%s: batching already disabled\n", __func__);
  2755. goto exit;
  2756. }
  2757. allow = core->capabilities[DECODE_BATCH].value;
  2758. if (!allow) {
  2759. i_vpr_h(inst, "%s: core doesn't support batching\n", __func__);
  2760. goto exit;
  2761. }
  2762. allow = is_single_session(inst);
  2763. if (!allow) {
  2764. i_vpr_h(inst, "%s: multiple sessions running\n", __func__);
  2765. goto exit;
  2766. }
  2767. allow = is_decode_session(inst);
  2768. if (!allow) {
  2769. i_vpr_h(inst, "%s: not a decoder session\n", __func__);
  2770. goto exit;
  2771. }
  2772. allow = !is_thumbnail_session(inst);
  2773. if (!allow) {
  2774. i_vpr_h(inst, "%s: thumbnail session\n", __func__);
  2775. goto exit;
  2776. }
  2777. allow = !is_image_session(inst);
  2778. if (!allow) {
  2779. i_vpr_h(inst, "%s: image session\n", __func__);
  2780. goto exit;
  2781. }
  2782. allow = is_realtime_session(inst);
  2783. if (!allow) {
  2784. i_vpr_h(inst, "%s: non-realtime session\n", __func__);
  2785. goto exit;
  2786. }
  2787. allow = !is_lowlatency_session(inst);
  2788. if (!allow) {
  2789. i_vpr_h(inst, "%s: lowlatency session\n", __func__);
  2790. goto exit;
  2791. }
  2792. value = msm_vidc_get_fps(inst);
  2793. allow = value < capability->cap[BATCH_FPS].value;
  2794. if (!allow) {
  2795. i_vpr_h(inst, "%s: unsupported fps %u, max %u\n", __func__,
  2796. value, capability->cap[BATCH_FPS].value);
  2797. goto exit;
  2798. }
  2799. value = msm_vidc_get_mbs_per_frame(inst);
  2800. allow = value < capability->cap[BATCH_MBPF].value;
  2801. if (!allow) {
  2802. i_vpr_h(inst, "%s: unsupported mbpf %u, max %u\n", __func__,
  2803. value, capability->cap[BATCH_MBPF].value);
  2804. goto exit;
  2805. }
  2806. exit:
  2807. i_vpr_hp(inst, "%s: batching: %s\n", __func__, allow ? "enabled" : "disabled");
  2808. return allow;
  2809. }
  2810. static void msm_vidc_update_input_cr(struct msm_vidc_inst *inst, u32 idx, u32 cr)
  2811. {
  2812. struct msm_vidc_input_cr_data *temp, *next;
  2813. bool found = false;
  2814. list_for_each_entry_safe(temp, next, &inst->enc_input_crs, list) {
  2815. if (temp->index == idx) {
  2816. temp->input_cr = cr;
  2817. found = true;
  2818. break;
  2819. }
  2820. }
  2821. if (!found) {
  2822. temp = kzalloc(sizeof(*temp), GFP_KERNEL);
  2823. if (!temp) {
  2824. i_vpr_e(inst, "%s: malloc failure.\n", __func__);
  2825. return;
  2826. }
  2827. temp->index = idx;
  2828. temp->input_cr = cr;
  2829. list_add_tail(&temp->list, &inst->enc_input_crs);
  2830. }
  2831. }
  2832. static void msm_vidc_free_input_cr_list(struct msm_vidc_inst *inst)
  2833. {
  2834. struct msm_vidc_input_cr_data *temp, *next;
  2835. list_for_each_entry_safe(temp, next, &inst->enc_input_crs, list) {
  2836. list_del(&temp->list);
  2837. kfree(temp);
  2838. }
  2839. INIT_LIST_HEAD(&inst->enc_input_crs);
  2840. }
  2841. void msm_vidc_update_stats(struct msm_vidc_inst *inst,
  2842. struct msm_vidc_buffer *buf, enum msm_vidc_debugfs_event etype)
  2843. {
  2844. if (!inst || !buf || !inst->capabilities) {
  2845. d_vpr_e("%s: invalid params\n", __func__);
  2846. return;
  2847. }
  2848. if ((is_decode_session(inst) && etype == MSM_VIDC_DEBUGFS_EVENT_ETB) ||
  2849. (is_encode_session(inst) && etype == MSM_VIDC_DEBUGFS_EVENT_FBD))
  2850. inst->stats.data_size += buf->data_size;
  2851. msm_vidc_debugfs_update(inst, etype);
  2852. }
  2853. void msm_vidc_print_stats(struct msm_vidc_inst *inst)
  2854. {
  2855. u32 frame_rate, operating_rate, achieved_fps, priority, etb, ebd, ftb, fbd, dt_ms;
  2856. u64 bitrate_kbps = 0, time_ms = ktime_get_ns() / 1000 / 1000;
  2857. if (!inst || !inst->capabilities) {
  2858. d_vpr_e("%s: invalid params\n", __func__);
  2859. return;
  2860. }
  2861. etb = inst->debug_count.etb - inst->stats.count.etb;
  2862. ebd = inst->debug_count.ebd - inst->stats.count.ebd;
  2863. ftb = inst->debug_count.ftb - inst->stats.count.ftb;
  2864. fbd = inst->debug_count.fbd - inst->stats.count.fbd;
  2865. frame_rate = inst->capabilities->cap[FRAME_RATE].value >> 16;
  2866. operating_rate = inst->capabilities->cap[OPERATING_RATE].value >> 16;
  2867. priority = inst->capabilities->cap[PRIORITY].value;
  2868. dt_ms = time_ms - inst->stats.time_ms;
  2869. achieved_fps = (fbd * 1000) / dt_ms;
  2870. bitrate_kbps = (inst->stats.data_size * 8 * 1000) / (dt_ms * 1024);
  2871. i_vpr_hp(inst,
  2872. "stats: counts (etb,ebd,ftb,fbd): %u %u %u %u (total %llu %llu %llu %llu), achieved bitrate %lldKbps fps %u/s, frame rate %u, operating rate %u, priority %u, dt %ums\n",
  2873. etb, ebd, ftb, fbd, inst->debug_count.etb, inst->debug_count.ebd,
  2874. inst->debug_count.ftb, inst->debug_count.fbd,
  2875. bitrate_kbps, achieved_fps, frame_rate, operating_rate, priority, dt_ms);
  2876. inst->stats.count = inst->debug_count;
  2877. inst->stats.data_size = 0;
  2878. inst->stats.time_ms = time_ms;
  2879. }
  2880. int schedule_stats_work(struct msm_vidc_inst *inst)
  2881. {
  2882. struct msm_vidc_core *core;
  2883. if (!inst || !inst->core) {
  2884. d_vpr_e("%s: invalid params\n", __func__);
  2885. return -EINVAL;
  2886. }
  2887. /**
  2888. * Hfi session is already closed and inst also going to be
  2889. * closed soon. So skip scheduling new stats_work to avoid
  2890. * use-after-free issues with close sequence.
  2891. */
  2892. if (!inst->packet) {
  2893. i_vpr_e(inst, "skip scheduling stats_work\n");
  2894. return 0;
  2895. }
  2896. core = inst->core;
  2897. mod_delayed_work(inst->response_workq, &inst->stats_work,
  2898. msecs_to_jiffies(core->capabilities[STATS_TIMEOUT_MS].value));
  2899. return 0;
  2900. }
  2901. int cancel_stats_work_sync(struct msm_vidc_inst *inst)
  2902. {
  2903. if (!inst) {
  2904. d_vpr_e("%s: Invalid arguments\n", __func__);
  2905. return -EINVAL;
  2906. }
  2907. cancel_delayed_work_sync(&inst->stats_work);
  2908. return 0;
  2909. }
  2910. void msm_vidc_stats_handler(struct work_struct *work)
  2911. {
  2912. struct msm_vidc_inst *inst;
  2913. inst = container_of(work, struct msm_vidc_inst, stats_work.work);
  2914. inst = get_inst_ref(g_core, inst);
  2915. if (!inst || !inst->packet) {
  2916. d_vpr_e("%s: invalid params\n", __func__);
  2917. return;
  2918. }
  2919. inst_lock(inst, __func__);
  2920. msm_vidc_print_stats(inst);
  2921. schedule_stats_work(inst);
  2922. inst_unlock(inst, __func__);
  2923. put_inst(inst);
  2924. }
  2925. static int msm_vidc_queue_buffer(struct msm_vidc_inst *inst, struct msm_vidc_buffer *buf)
  2926. {
  2927. struct msm_vidc_buffer *meta;
  2928. enum msm_vidc_debugfs_event etype;
  2929. int rc = 0;
  2930. u32 cr = 0;
  2931. if (!inst || !buf || !inst->capabilities) {
  2932. d_vpr_e("%s: invalid params\n", __func__);
  2933. return -EINVAL;
  2934. }
  2935. if (is_encode_session(inst) && is_input_buffer(buf->type)) {
  2936. cr = inst->capabilities->cap[ENC_IP_CR].value;
  2937. msm_vidc_update_input_cr(inst, buf->index, cr);
  2938. msm_vidc_update_cap_value(inst, ENC_IP_CR, 0, __func__);
  2939. }
  2940. if (is_decode_session(inst) && is_input_buffer(buf->type) &&
  2941. inst->capabilities->cap[CODEC_CONFIG].value) {
  2942. buf->flags |= MSM_VIDC_BUF_FLAG_CODECCONFIG;
  2943. msm_vidc_update_cap_value(inst, CODEC_CONFIG, 0, __func__);
  2944. }
  2945. if (is_decode_session(inst) && is_output_buffer(buf->type)) {
  2946. rc = msm_vidc_process_readonly_buffers(inst, buf);
  2947. if (rc)
  2948. return rc;
  2949. }
  2950. print_vidc_buffer(VIDC_HIGH, "high", "qbuf", inst, buf);
  2951. meta = get_meta_buffer(inst, buf);
  2952. if (meta)
  2953. print_vidc_buffer(VIDC_LOW, "low ", "qbuf", inst, meta);
  2954. if (!meta && is_meta_enabled(inst, buf->type)) {
  2955. print_vidc_buffer(VIDC_ERR, "err ", "missing meta for", inst, buf);
  2956. return -EINVAL;
  2957. }
  2958. if (msm_vidc_is_super_buffer(inst) && is_input_buffer(buf->type))
  2959. rc = venus_hfi_queue_super_buffer(inst, buf, meta);
  2960. else
  2961. rc = venus_hfi_queue_buffer(inst, buf, meta);
  2962. if (rc)
  2963. return rc;
  2964. buf->attr &= ~MSM_VIDC_ATTR_DEFERRED;
  2965. buf->attr |= MSM_VIDC_ATTR_QUEUED;
  2966. if (meta) {
  2967. meta->attr &= ~MSM_VIDC_ATTR_DEFERRED;
  2968. meta->attr |= MSM_VIDC_ATTR_QUEUED;
  2969. }
  2970. /* insert timestamp for ts_reorder enable case */
  2971. if (is_ts_reorder_allowed(inst) && is_input_buffer(buf->type)) {
  2972. rc = msm_vidc_ts_reorder_insert_timestamp(inst, buf->timestamp);
  2973. if (rc)
  2974. i_vpr_e(inst, "%s: insert timestamp failed\n", __func__);
  2975. }
  2976. if (is_input_buffer(buf->type))
  2977. inst->power.buffer_counter++;
  2978. if (is_input_buffer(buf->type))
  2979. etype = MSM_VIDC_DEBUGFS_EVENT_ETB;
  2980. else
  2981. etype = MSM_VIDC_DEBUGFS_EVENT_FTB;
  2982. msm_vidc_update_stats(inst, buf, etype);
  2983. return 0;
  2984. }
  2985. int msm_vidc_queue_deferred_buffers(struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buf_type)
  2986. {
  2987. struct msm_vidc_buffers *buffers;
  2988. struct msm_vidc_buffer *buf;
  2989. int rc = 0;
  2990. if (!inst || !buf_type) {
  2991. d_vpr_e("%s: invalid params\n", __func__);
  2992. return -EINVAL;
  2993. }
  2994. buffers = msm_vidc_get_buffers(inst, buf_type, __func__);
  2995. if (!buffers)
  2996. return -EINVAL;
  2997. msm_vidc_scale_power(inst, true);
  2998. list_for_each_entry(buf, &buffers->list, list) {
  2999. if (!(buf->attr & MSM_VIDC_ATTR_DEFERRED))
  3000. continue;
  3001. rc = msm_vidc_queue_buffer(inst, buf);
  3002. if (rc)
  3003. return rc;
  3004. }
  3005. return 0;
  3006. }
  3007. int msm_vidc_queue_buffer_single(struct msm_vidc_inst *inst, struct vb2_buffer *vb2)
  3008. {
  3009. int rc = 0;
  3010. struct msm_vidc_buffer *buf;
  3011. struct msm_vidc_fence *fence = NULL;
  3012. enum msm_vidc_allow allow;
  3013. if (!inst || !vb2 || !inst->capabilities) {
  3014. d_vpr_e("%s: invalid params\n", __func__);
  3015. return -EINVAL;
  3016. }
  3017. buf = msm_vidc_get_driver_buf(inst, vb2);
  3018. if (!buf)
  3019. return -EINVAL;
  3020. if (is_meta_rx_inp_enabled(inst, META_OUTBUF_FENCE) &&
  3021. is_output_buffer(buf->type)) {
  3022. fence = msm_vidc_fence_create(inst);
  3023. if (!fence)
  3024. return rc;
  3025. buf->fence_id = fence->dma_fence.seqno;
  3026. }
  3027. allow = msm_vidc_allow_qbuf(inst, vb2->type);
  3028. if (allow == MSM_VIDC_DISALLOW) {
  3029. i_vpr_e(inst, "%s: qbuf not allowed\n", __func__);
  3030. rc = -EINVAL;
  3031. goto exit;
  3032. } else if (allow == MSM_VIDC_DEFER) {
  3033. print_vidc_buffer(VIDC_LOW, "low ", "qbuf deferred", inst, buf);
  3034. rc = 0;
  3035. goto exit;
  3036. }
  3037. msm_vidc_scale_power(inst, is_input_buffer(buf->type));
  3038. rc = msm_vidc_queue_buffer(inst, buf);
  3039. if (rc)
  3040. goto exit;
  3041. exit:
  3042. if (rc) {
  3043. i_vpr_e(inst, "%s: qbuf failed\n", __func__);
  3044. if (fence)
  3045. msm_vidc_fence_destroy(inst, (u32)fence->dma_fence.seqno);
  3046. }
  3047. return rc;
  3048. }
  3049. int msm_vidc_destroy_internal_buffer(struct msm_vidc_inst *inst,
  3050. struct msm_vidc_buffer *buffer)
  3051. {
  3052. struct msm_vidc_buffers *buffers;
  3053. struct msm_vidc_allocations *allocations;
  3054. struct msm_vidc_mappings *mappings;
  3055. struct msm_vidc_alloc *alloc, *alloc_dummy;
  3056. struct msm_vidc_map *map, *map_dummy;
  3057. struct msm_vidc_buffer *buf, *dummy;
  3058. if (!inst || !inst->core) {
  3059. d_vpr_e("%s: invalid params\n", __func__);
  3060. return -EINVAL;
  3061. }
  3062. if (!is_internal_buffer(buffer->type)) {
  3063. i_vpr_e(inst, "%s: type: %s is not internal\n",
  3064. __func__, buf_name(buffer->type));
  3065. return 0;
  3066. }
  3067. i_vpr_h(inst, "%s: destroy: type: %8s, size: %9u, device_addr %#x\n", __func__,
  3068. buf_name(buffer->type), buffer->buffer_size, buffer->device_addr);
  3069. buffers = msm_vidc_get_buffers(inst, buffer->type, __func__);
  3070. if (!buffers)
  3071. return -EINVAL;
  3072. allocations = msm_vidc_get_allocations(inst, buffer->type, __func__);
  3073. if (!allocations)
  3074. return -EINVAL;
  3075. mappings = msm_vidc_get_mappings(inst, buffer->type, __func__);
  3076. if (!mappings)
  3077. return -EINVAL;
  3078. list_for_each_entry_safe(map, map_dummy, &mappings->list, list) {
  3079. if (map->dmabuf == buffer->dmabuf) {
  3080. msm_vidc_memory_unmap(inst->core, map);
  3081. list_del(&map->list);
  3082. msm_memory_pool_free(inst, map);
  3083. break;
  3084. }
  3085. }
  3086. list_for_each_entry_safe(alloc, alloc_dummy, &allocations->list, list) {
  3087. if (alloc->dmabuf == buffer->dmabuf) {
  3088. msm_vidc_memory_free(inst->core, alloc);
  3089. list_del(&alloc->list);
  3090. msm_memory_pool_free(inst, alloc);
  3091. break;
  3092. }
  3093. }
  3094. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  3095. if (buf->dmabuf == buffer->dmabuf) {
  3096. list_del(&buf->list);
  3097. msm_memory_pool_free(inst, buf);
  3098. break;
  3099. }
  3100. }
  3101. buffers->size = 0;
  3102. buffers->min_count = buffers->extra_count = buffers->actual_count = 0;
  3103. return 0;
  3104. }
  3105. int msm_vidc_get_internal_buffers(struct msm_vidc_inst *inst,
  3106. enum msm_vidc_buffer_type buffer_type)
  3107. {
  3108. u32 buf_size;
  3109. u32 buf_count;
  3110. struct msm_vidc_core *core;
  3111. struct msm_vidc_buffers *buffers;
  3112. if (!inst || !inst->core) {
  3113. d_vpr_e("%s: invalid params\n", __func__);
  3114. return -EINVAL;
  3115. }
  3116. core = inst->core;
  3117. buf_size = call_session_op(core, buffer_size,
  3118. inst, buffer_type);
  3119. buf_count = call_session_op(core, min_count,
  3120. inst, buffer_type);
  3121. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  3122. if (!buffers)
  3123. return -EINVAL;
  3124. if (buf_size <= buffers->size &&
  3125. buf_count <= buffers->min_count) {
  3126. buffers->reuse = true;
  3127. } else {
  3128. buffers->reuse = false;
  3129. buffers->size = buf_size;
  3130. buffers->min_count = buf_count;
  3131. }
  3132. return 0;
  3133. }
  3134. int msm_vidc_create_internal_buffer(struct msm_vidc_inst *inst,
  3135. enum msm_vidc_buffer_type buffer_type, u32 index)
  3136. {
  3137. int rc = 0;
  3138. struct msm_vidc_buffers *buffers;
  3139. struct msm_vidc_allocations *allocations;
  3140. struct msm_vidc_mappings *mappings;
  3141. struct msm_vidc_buffer *buffer;
  3142. struct msm_vidc_alloc *alloc;
  3143. struct msm_vidc_map *map;
  3144. if (!inst || !inst->core) {
  3145. d_vpr_e("%s: invalid params\n", __func__);
  3146. return -EINVAL;
  3147. }
  3148. if (!is_internal_buffer(buffer_type)) {
  3149. i_vpr_e(inst, "%s: type %s is not internal\n",
  3150. __func__, buf_name(buffer_type));
  3151. return 0;
  3152. }
  3153. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  3154. if (!buffers)
  3155. return -EINVAL;
  3156. allocations = msm_vidc_get_allocations(inst, buffer_type, __func__);
  3157. if (!allocations)
  3158. return -EINVAL;
  3159. mappings = msm_vidc_get_mappings(inst, buffer_type, __func__);
  3160. if (!mappings)
  3161. return -EINVAL;
  3162. if (!buffers->size)
  3163. return 0;
  3164. buffer = msm_memory_pool_alloc(inst, MSM_MEM_POOL_BUFFER);
  3165. if (!buffer) {
  3166. i_vpr_e(inst, "%s: buf alloc failed\n", __func__);
  3167. return -ENOMEM;
  3168. }
  3169. INIT_LIST_HEAD(&buffer->list);
  3170. buffer->type = buffer_type;
  3171. buffer->index = index;
  3172. buffer->buffer_size = buffers->size;
  3173. list_add_tail(&buffer->list, &buffers->list);
  3174. alloc = msm_memory_pool_alloc(inst, MSM_MEM_POOL_ALLOC);
  3175. if (!alloc) {
  3176. i_vpr_e(inst, "%s: alloc failed\n", __func__);
  3177. return -ENOMEM;
  3178. }
  3179. INIT_LIST_HEAD(&alloc->list);
  3180. alloc->type = buffer_type;
  3181. alloc->region = msm_vidc_get_buffer_region(inst,
  3182. buffer_type, __func__);
  3183. alloc->size = buffer->buffer_size;
  3184. alloc->secure = is_secure_region(alloc->region);
  3185. rc = msm_vidc_memory_alloc(inst->core, alloc);
  3186. if (rc)
  3187. return -ENOMEM;
  3188. list_add_tail(&alloc->list, &allocations->list);
  3189. map = msm_memory_pool_alloc(inst, MSM_MEM_POOL_MAP);
  3190. if (!map) {
  3191. i_vpr_e(inst, "%s: map alloc failed\n", __func__);
  3192. return -ENOMEM;
  3193. }
  3194. INIT_LIST_HEAD(&map->list);
  3195. map->type = alloc->type;
  3196. map->region = alloc->region;
  3197. map->dmabuf = alloc->dmabuf;
  3198. rc = msm_vidc_memory_map(inst->core, map);
  3199. if (rc)
  3200. return -ENOMEM;
  3201. list_add_tail(&map->list, &mappings->list);
  3202. buffer->dmabuf = alloc->dmabuf;
  3203. buffer->device_addr = map->device_addr;
  3204. i_vpr_h(inst, "%s: create: type: %8s, size: %9u, device_addr %#x\n", __func__,
  3205. buf_name(buffer_type), buffers->size, buffer->device_addr);
  3206. return 0;
  3207. }
  3208. int msm_vidc_create_internal_buffers(struct msm_vidc_inst *inst,
  3209. enum msm_vidc_buffer_type buffer_type)
  3210. {
  3211. int rc = 0;
  3212. struct msm_vidc_buffers *buffers;
  3213. int i;
  3214. if (!inst || !inst->core) {
  3215. d_vpr_e("%s: invalid params\n", __func__);
  3216. return -EINVAL;
  3217. }
  3218. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  3219. if (!buffers)
  3220. return -EINVAL;
  3221. if (buffers->reuse) {
  3222. i_vpr_l(inst, "%s: reuse enabled for %s\n", __func__, buf_name(buffer_type));
  3223. return 0;
  3224. }
  3225. for (i = 0; i < buffers->min_count; i++) {
  3226. rc = msm_vidc_create_internal_buffer(inst, buffer_type, i);
  3227. if (rc)
  3228. return rc;
  3229. }
  3230. return rc;
  3231. }
  3232. int msm_vidc_queue_internal_buffers(struct msm_vidc_inst *inst,
  3233. enum msm_vidc_buffer_type buffer_type)
  3234. {
  3235. int rc = 0;
  3236. struct msm_vidc_buffers *buffers;
  3237. struct msm_vidc_buffer *buffer, *dummy;
  3238. if (!inst || !inst->core) {
  3239. d_vpr_e("%s: invalid params\n", __func__);
  3240. return -EINVAL;
  3241. }
  3242. if (!is_internal_buffer(buffer_type)) {
  3243. i_vpr_e(inst, "%s: %s is not internal\n", __func__, buf_name(buffer_type));
  3244. return 0;
  3245. }
  3246. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  3247. if (!buffers)
  3248. return -EINVAL;
  3249. if (buffers->reuse) {
  3250. i_vpr_l(inst, "%s: reuse enabled for %s buf\n",
  3251. __func__, buf_name(buffer_type));
  3252. return 0;
  3253. }
  3254. list_for_each_entry_safe(buffer, dummy, &buffers->list, list) {
  3255. /* do not queue pending release buffers */
  3256. if (buffer->flags & MSM_VIDC_ATTR_PENDING_RELEASE)
  3257. continue;
  3258. /* do not queue already queued buffers */
  3259. if (buffer->attr & MSM_VIDC_ATTR_QUEUED)
  3260. continue;
  3261. rc = venus_hfi_queue_buffer(inst, buffer, NULL);
  3262. if (rc)
  3263. return rc;
  3264. /* mark queued */
  3265. buffer->attr |= MSM_VIDC_ATTR_QUEUED;
  3266. i_vpr_h(inst, "%s: queue: type: %8s, size: %9u, device_addr %#x\n", __func__,
  3267. buf_name(buffer->type), buffer->buffer_size, buffer->device_addr);
  3268. }
  3269. return 0;
  3270. }
  3271. int msm_vidc_alloc_and_queue_session_internal_buffers(struct msm_vidc_inst *inst,
  3272. enum msm_vidc_buffer_type buffer_type)
  3273. {
  3274. int rc = 0;
  3275. if (!inst || !inst->core) {
  3276. d_vpr_e("%s: invalid params\n", __func__);
  3277. return -EINVAL;
  3278. }
  3279. if (buffer_type != MSM_VIDC_BUF_ARP &&
  3280. buffer_type != MSM_VIDC_BUF_PERSIST) {
  3281. i_vpr_e(inst, "%s: invalid buffer type: %s\n",
  3282. __func__, buf_name(buffer_type));
  3283. rc = -EINVAL;
  3284. goto exit;
  3285. }
  3286. rc = msm_vidc_get_internal_buffers(inst, buffer_type);
  3287. if (rc)
  3288. goto exit;
  3289. rc = msm_vidc_create_internal_buffers(inst, buffer_type);
  3290. if (rc)
  3291. goto exit;
  3292. rc = msm_vidc_queue_internal_buffers(inst, buffer_type);
  3293. if (rc)
  3294. goto exit;
  3295. exit:
  3296. return rc;
  3297. }
  3298. int msm_vidc_release_internal_buffers(struct msm_vidc_inst *inst,
  3299. enum msm_vidc_buffer_type buffer_type)
  3300. {
  3301. int rc = 0;
  3302. struct msm_vidc_buffers *buffers;
  3303. struct msm_vidc_buffer *buffer, *dummy;
  3304. if (!inst || !inst->core) {
  3305. d_vpr_e("%s: invalid params\n", __func__);
  3306. return -EINVAL;
  3307. }
  3308. if (!is_internal_buffer(buffer_type)) {
  3309. i_vpr_e(inst, "%s: %s is not internal\n",
  3310. __func__, buf_name(buffer_type));
  3311. return 0;
  3312. }
  3313. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  3314. if (!buffers)
  3315. return -EINVAL;
  3316. if (buffers->reuse) {
  3317. i_vpr_l(inst, "%s: reuse enabled for %s buf\n",
  3318. __func__, buf_name(buffer_type));
  3319. return 0;
  3320. }
  3321. list_for_each_entry_safe(buffer, dummy, &buffers->list, list) {
  3322. /* do not release already pending release buffers */
  3323. if (buffer->attr & MSM_VIDC_ATTR_PENDING_RELEASE)
  3324. continue;
  3325. /* release only queued buffers */
  3326. if (!(buffer->attr & MSM_VIDC_ATTR_QUEUED))
  3327. continue;
  3328. rc = venus_hfi_release_buffer(inst, buffer);
  3329. if (rc)
  3330. return rc;
  3331. /* mark pending release */
  3332. buffer->attr |= MSM_VIDC_ATTR_PENDING_RELEASE;
  3333. i_vpr_h(inst, "%s: release: type: %8s, size: %9u, device_addr %#x\n", __func__,
  3334. buf_name(buffer->type), buffer->buffer_size, buffer->device_addr);
  3335. }
  3336. return 0;
  3337. }
  3338. static int msm_vidc_vb2_buffer_done(struct msm_vidc_inst *inst,
  3339. struct msm_vidc_buffer *buf)
  3340. {
  3341. int type, port, state;
  3342. struct vb2_queue *q;
  3343. struct vb2_buffer *vb2;
  3344. struct vb2_v4l2_buffer *vbuf;
  3345. bool found;
  3346. if (!inst || !inst->capabilities || !buf) {
  3347. d_vpr_e("%s: invalid params\n", __func__);
  3348. return -EINVAL;
  3349. }
  3350. type = v4l2_type_from_driver(buf->type, __func__);
  3351. if (!type)
  3352. return -EINVAL;
  3353. port = v4l2_type_to_driver_port(inst, type, __func__);
  3354. if (port < 0)
  3355. return -EINVAL;
  3356. /*
  3357. * vb2_buffer_done not required if input metadata
  3358. * buffer sent via request api
  3359. */
  3360. if (buf->type == MSM_VIDC_BUF_INPUT_META &&
  3361. inst->capabilities->cap[INPUT_META_VIA_REQUEST].value)
  3362. return 0;
  3363. q = inst->bufq[port].vb2q;
  3364. if (!q->streaming) {
  3365. i_vpr_e(inst, "%s: port %d is not streaming\n",
  3366. __func__, port);
  3367. return -EINVAL;
  3368. }
  3369. found = false;
  3370. list_for_each_entry(vb2, &q->queued_list, queued_entry) {
  3371. if (vb2->state != VB2_BUF_STATE_ACTIVE)
  3372. continue;
  3373. if (vb2->index == buf->index) {
  3374. found = true;
  3375. break;
  3376. }
  3377. }
  3378. if (!found) {
  3379. print_vidc_buffer(VIDC_ERR, "err ", "vb2 not found for", inst, buf);
  3380. return -EINVAL;
  3381. }
  3382. /**
  3383. * v4l2 clears buffer state related flags. For driver errors
  3384. * send state as error to avoid skipping V4L2_BUF_FLAG_ERROR
  3385. * flag at v4l2 side.
  3386. */
  3387. if (buf->flags & MSM_VIDC_BUF_FLAG_ERROR)
  3388. state = VB2_BUF_STATE_ERROR;
  3389. else
  3390. state = VB2_BUF_STATE_DONE;
  3391. vbuf = to_vb2_v4l2_buffer(vb2);
  3392. vbuf->flags = buf->flags;
  3393. vb2->timestamp = buf->timestamp;
  3394. vb2->planes[0].bytesused = buf->data_size + vb2->planes[0].data_offset;
  3395. v4l2_ctrl_request_complete(vb2->req_obj.req, &inst->ctrl_handler);
  3396. vb2_buffer_done(vb2, state);
  3397. return 0;
  3398. }
  3399. static int msm_vidc_v4l2_buffer_event(struct msm_vidc_inst *inst,
  3400. struct msm_vidc_buffer *buf)
  3401. {
  3402. int rc = 0;
  3403. struct v4l2_event event = {0};
  3404. struct v4l2_event_vidc_metadata *event_data = NULL;
  3405. if (!inst || !buf) {
  3406. d_vpr_e("%s: invalid params\n", __func__);
  3407. return -EINVAL;
  3408. }
  3409. if (buf->type != MSM_VIDC_BUF_INPUT_META) {
  3410. i_vpr_e(inst, "%s: unsupported buffer type %s\n",
  3411. __func__, buf_name(buf->type));
  3412. return -EINVAL;
  3413. }
  3414. event.type = V4L2_EVENT_VIDC_METADATA;
  3415. event_data = (struct v4l2_event_vidc_metadata *)event.u.data;
  3416. event_data->type = INPUT_META_PLANE;
  3417. event_data->fd = buf->fd;
  3418. event_data->index = buf->index;
  3419. event_data->bytesused = buf->data_size;
  3420. event_data->offset = buf->data_offset;
  3421. v4l2_event_queue_fh(&inst->event_handler, &event);
  3422. return rc;
  3423. }
  3424. int msm_vidc_buffer_done(struct msm_vidc_inst *inst,
  3425. struct msm_vidc_buffer *buf)
  3426. {
  3427. if (!inst || !inst->capabilities || !buf) {
  3428. d_vpr_e("%s: invalid params\n", __func__);
  3429. return -EINVAL;
  3430. }
  3431. if (buf->type == MSM_VIDC_BUF_INPUT_META &&
  3432. inst->capabilities->cap[INPUT_META_VIA_REQUEST].value) {
  3433. if (is_meta_rx_inp_enabled(inst, META_OUTBUF_FENCE))
  3434. return msm_vidc_v4l2_buffer_event(inst, buf);
  3435. } else {
  3436. return msm_vidc_vb2_buffer_done(inst, buf);
  3437. }
  3438. return 0;
  3439. }
  3440. int msm_vidc_event_queue_init(struct msm_vidc_inst *inst)
  3441. {
  3442. int rc = 0;
  3443. int index;
  3444. struct msm_vidc_core *core;
  3445. if (!inst || !inst->core) {
  3446. d_vpr_e("%s: invalid params\n", __func__);
  3447. return -EINVAL;
  3448. }
  3449. core = inst->core;
  3450. if (is_decode_session(inst))
  3451. index = 0;
  3452. else if (is_encode_session(inst))
  3453. index = 1;
  3454. else
  3455. return -EINVAL;
  3456. v4l2_fh_init(&inst->event_handler, &core->vdev[index].vdev);
  3457. inst->event_handler.ctrl_handler = &inst->ctrl_handler;
  3458. v4l2_fh_add(&inst->event_handler);
  3459. return rc;
  3460. }
  3461. int msm_vidc_event_queue_deinit(struct msm_vidc_inst *inst)
  3462. {
  3463. int rc = 0;
  3464. if (!inst) {
  3465. d_vpr_e("%s: invalid params\n", __func__);
  3466. return -EINVAL;
  3467. }
  3468. /* do not deinit, if not already inited */
  3469. if (!inst->event_handler.vdev) {
  3470. i_vpr_e(inst, "%s: already not inited\n", __func__);
  3471. return 0;
  3472. }
  3473. v4l2_fh_del(&inst->event_handler);
  3474. v4l2_fh_exit(&inst->event_handler);
  3475. return rc;
  3476. }
  3477. static int vb2q_init(struct msm_vidc_inst *inst,
  3478. struct vb2_queue *q, enum v4l2_buf_type type)
  3479. {
  3480. int rc = 0;
  3481. struct msm_vidc_core *core;
  3482. if (!inst || !q || !inst->core) {
  3483. d_vpr_e("%s: invalid params\n", __func__);
  3484. return -EINVAL;
  3485. }
  3486. core = inst->core;
  3487. q->type = type;
  3488. q->io_modes = VB2_MMAP | VB2_DMABUF;
  3489. q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
  3490. q->ops = core->vb2_ops;
  3491. q->mem_ops = core->vb2_mem_ops;
  3492. q->drv_priv = inst;
  3493. q->allow_zero_bytesused = 1;
  3494. q->copy_timestamp = 1;
  3495. rc = vb2_queue_init(q);
  3496. if (rc)
  3497. i_vpr_e(inst, "%s: vb2_queue_init failed for type %d\n",
  3498. __func__, type);
  3499. return rc;
  3500. }
  3501. static int m2m_queue_init(void *priv, struct vb2_queue *src_vq,
  3502. struct vb2_queue *dst_vq)
  3503. {
  3504. int rc = 0;
  3505. struct msm_vidc_inst *inst = priv;
  3506. struct msm_vidc_core *core;
  3507. if (!inst || !inst->core || !src_vq || !dst_vq) {
  3508. d_vpr_e("%s: invalid params\n", __func__);
  3509. return -EINVAL;
  3510. }
  3511. core = inst->core;
  3512. src_vq->supports_requests = 1;
  3513. src_vq->lock = &inst->request_lock;
  3514. src_vq->dev = &core->pdev->dev;
  3515. rc = vb2q_init(inst, src_vq, INPUT_MPLANE);
  3516. if (rc)
  3517. goto fail_input_vb2q_init;
  3518. inst->bufq[INPUT_PORT].vb2q = src_vq;
  3519. dst_vq->lock = src_vq->lock;
  3520. dst_vq->dev = &core->pdev->dev;
  3521. rc = vb2q_init(inst, dst_vq, OUTPUT_MPLANE);
  3522. if (rc)
  3523. goto fail_out_vb2q_init;
  3524. inst->bufq[OUTPUT_PORT].vb2q = dst_vq;
  3525. return rc;
  3526. fail_out_vb2q_init:
  3527. vb2_queue_release(inst->bufq[INPUT_PORT].vb2q);
  3528. fail_input_vb2q_init:
  3529. return rc;
  3530. }
  3531. int msm_vidc_vb2_queue_init(struct msm_vidc_inst *inst)
  3532. {
  3533. int rc = 0;
  3534. struct msm_vidc_core *core;
  3535. if (!inst || !inst->core) {
  3536. d_vpr_e("%s: invalid params\n", __func__);
  3537. return -EINVAL;
  3538. }
  3539. core = inst->core;
  3540. if (inst->vb2q_init) {
  3541. i_vpr_h(inst, "%s: vb2q already inited\n", __func__);
  3542. return 0;
  3543. }
  3544. inst->m2m_dev = v4l2_m2m_init(core->v4l2_m2m_ops);
  3545. if (IS_ERR(inst->m2m_dev)) {
  3546. i_vpr_e(inst, "%s: failed to initialize v4l2 m2m device\n", __func__);
  3547. rc = PTR_ERR(inst->m2m_dev);
  3548. goto fail_m2m_init;
  3549. }
  3550. /* v4l2_m2m_ctx_init will do input & output queues initialization */
  3551. inst->m2m_ctx = v4l2_m2m_ctx_init(inst->m2m_dev, inst, m2m_queue_init);
  3552. if (!inst->m2m_ctx) {
  3553. i_vpr_e(inst, "%s: v4l2_m2m_ctx_init failed\n", __func__);
  3554. goto fail_m2m_ctx_init;
  3555. }
  3556. inst->event_handler.m2m_ctx = inst->m2m_ctx;
  3557. inst->bufq[INPUT_META_PORT].vb2q = kzalloc(sizeof(struct vb2_queue), GFP_KERNEL);
  3558. if (!inst->bufq[INPUT_META_PORT].vb2q) {
  3559. i_vpr_e(inst, "%s: queue allocation failed for input meta port\n", __func__);
  3560. goto fail_in_meta_alloc;
  3561. }
  3562. /* do input meta port queues initialization */
  3563. rc = vb2q_init(inst, inst->bufq[INPUT_META_PORT].vb2q, INPUT_META_PLANE);
  3564. if (rc)
  3565. goto fail_in_meta_vb2q_init;
  3566. inst->bufq[OUTPUT_META_PORT].vb2q = kzalloc(sizeof(struct vb2_queue), GFP_KERNEL);
  3567. if (!inst->bufq[OUTPUT_META_PORT].vb2q) {
  3568. i_vpr_e(inst, "%s: queue allocation failed for output meta port\n", __func__);
  3569. goto fail_out_meta_alloc;
  3570. }
  3571. /* do output meta port queues initialization */
  3572. rc = vb2q_init(inst, inst->bufq[OUTPUT_META_PORT].vb2q, OUTPUT_META_PLANE);
  3573. if (rc)
  3574. goto fail_out_meta_vb2q_init;
  3575. inst->vb2q_init = true;
  3576. return 0;
  3577. fail_out_meta_vb2q_init:
  3578. kfree(inst->bufq[OUTPUT_META_PORT].vb2q);
  3579. inst->bufq[OUTPUT_META_PORT].vb2q = NULL;
  3580. fail_out_meta_alloc:
  3581. vb2_queue_release(inst->bufq[INPUT_META_PORT].vb2q);
  3582. fail_in_meta_vb2q_init:
  3583. kfree(inst->bufq[INPUT_META_PORT].vb2q);
  3584. inst->bufq[INPUT_META_PORT].vb2q = NULL;
  3585. fail_in_meta_alloc:
  3586. v4l2_m2m_ctx_release(inst->m2m_ctx);
  3587. inst->bufq[OUTPUT_PORT].vb2q = NULL;
  3588. inst->bufq[INPUT_PORT].vb2q = NULL;
  3589. fail_m2m_ctx_init:
  3590. v4l2_m2m_release(inst->m2m_dev);
  3591. fail_m2m_init:
  3592. return rc;
  3593. }
  3594. int msm_vidc_vb2_queue_deinit(struct msm_vidc_inst *inst)
  3595. {
  3596. int rc = 0;
  3597. if (!inst) {
  3598. d_vpr_e("%s: invalid params\n", __func__);
  3599. return -EINVAL;
  3600. }
  3601. if (!inst->vb2q_init) {
  3602. i_vpr_h(inst, "%s: vb2q already deinited\n", __func__);
  3603. return 0;
  3604. }
  3605. vb2_queue_release(inst->bufq[OUTPUT_META_PORT].vb2q);
  3606. kfree(inst->bufq[OUTPUT_META_PORT].vb2q);
  3607. inst->bufq[OUTPUT_META_PORT].vb2q = NULL;
  3608. vb2_queue_release(inst->bufq[INPUT_META_PORT].vb2q);
  3609. kfree(inst->bufq[INPUT_META_PORT].vb2q);
  3610. inst->bufq[INPUT_META_PORT].vb2q = NULL;
  3611. /*
  3612. * vb2_queue_release() for input and output queues
  3613. * is called from v4l2_m2m_ctx_release()
  3614. */
  3615. v4l2_m2m_ctx_release(inst->m2m_ctx);
  3616. inst->bufq[OUTPUT_PORT].vb2q = NULL;
  3617. inst->bufq[INPUT_PORT].vb2q = NULL;
  3618. v4l2_m2m_release(inst->m2m_dev);
  3619. inst->vb2q_init = false;
  3620. return rc;
  3621. }
  3622. int msm_vidc_add_session(struct msm_vidc_inst *inst)
  3623. {
  3624. int rc = 0;
  3625. struct msm_vidc_inst *i;
  3626. struct msm_vidc_core *core;
  3627. u32 count = 0;
  3628. if (!inst || !inst->core) {
  3629. d_vpr_e("%s: invalid params\n", __func__);
  3630. return -EINVAL;
  3631. }
  3632. core = inst->core;
  3633. if (!core->capabilities) {
  3634. i_vpr_e(inst, "%s: invalid params\n", __func__);
  3635. return -EINVAL;
  3636. }
  3637. core_lock(core, __func__);
  3638. if (core->state != MSM_VIDC_CORE_INIT) {
  3639. i_vpr_e(inst, "%s: invalid state %s\n",
  3640. __func__, core_state_name(core->state));
  3641. rc = -EINVAL;
  3642. goto unlock;
  3643. }
  3644. list_for_each_entry(i, &core->instances, list)
  3645. count++;
  3646. if (count < core->capabilities[MAX_SESSION_COUNT].value) {
  3647. list_add_tail(&inst->list, &core->instances);
  3648. } else {
  3649. i_vpr_e(inst, "%s: max limit %d already running %d sessions\n",
  3650. __func__, core->capabilities[MAX_SESSION_COUNT].value, count);
  3651. rc = -EINVAL;
  3652. }
  3653. unlock:
  3654. core_unlock(core, __func__);
  3655. return rc;
  3656. }
  3657. int msm_vidc_remove_session(struct msm_vidc_inst *inst)
  3658. {
  3659. struct msm_vidc_inst *i, *temp;
  3660. struct msm_vidc_core *core;
  3661. u32 count = 0;
  3662. if (!inst || !inst->core) {
  3663. d_vpr_e("%s: invalid params\n", __func__);
  3664. return -EINVAL;
  3665. }
  3666. core = inst->core;
  3667. core_lock(core, __func__);
  3668. list_for_each_entry_safe(i, temp, &core->instances, list) {
  3669. if (i->session_id == inst->session_id) {
  3670. list_del_init(&i->list);
  3671. list_add_tail(&i->list, &core->dangling_instances);
  3672. i_vpr_h(inst, "%s: removed session %#x\n",
  3673. __func__, i->session_id);
  3674. }
  3675. }
  3676. list_for_each_entry(i, &core->instances, list)
  3677. count++;
  3678. i_vpr_h(inst, "%s: remaining sessions %d\n", __func__, count);
  3679. core_unlock(core, __func__);
  3680. return 0;
  3681. }
  3682. static int msm_vidc_remove_dangling_session(struct msm_vidc_inst *inst)
  3683. {
  3684. struct msm_vidc_inst *i, *temp;
  3685. struct msm_vidc_core *core;
  3686. u32 count = 0;
  3687. if (!inst || !inst->core) {
  3688. d_vpr_e("%s: invalid params\n", __func__);
  3689. return -EINVAL;
  3690. }
  3691. core = inst->core;
  3692. core_lock(core, __func__);
  3693. list_for_each_entry_safe(i, temp, &core->dangling_instances, list) {
  3694. if (i->session_id == inst->session_id) {
  3695. list_del_init(&i->list);
  3696. i_vpr_h(inst, "%s: removed dangling session %#x\n",
  3697. __func__, i->session_id);
  3698. break;
  3699. }
  3700. }
  3701. list_for_each_entry(i, &core->dangling_instances, list)
  3702. count++;
  3703. i_vpr_h(inst, "%s: remaining dangling sessions %d\n", __func__, count);
  3704. core_unlock(core, __func__);
  3705. return 0;
  3706. }
  3707. int msm_vidc_session_open(struct msm_vidc_inst *inst)
  3708. {
  3709. int rc = 0;
  3710. if (!inst) {
  3711. d_vpr_e("%s: invalid params\n", __func__);
  3712. return -EINVAL;
  3713. }
  3714. inst->packet_size = 4096;
  3715. inst->packet = kzalloc(inst->packet_size, GFP_KERNEL);
  3716. if (!inst->packet) {
  3717. i_vpr_e(inst, "%s(): inst packet allocation failed\n", __func__);
  3718. return -ENOMEM;
  3719. }
  3720. rc = venus_hfi_session_open(inst);
  3721. if (rc)
  3722. goto error;
  3723. return 0;
  3724. error:
  3725. i_vpr_e(inst, "%s(): session open failed\n", __func__);
  3726. kfree(inst->packet);
  3727. inst->packet = NULL;
  3728. return rc;
  3729. }
  3730. int msm_vidc_session_set_codec(struct msm_vidc_inst *inst)
  3731. {
  3732. int rc = 0;
  3733. if (!inst) {
  3734. d_vpr_e("%s: invalid params\n", __func__);
  3735. return -EINVAL;
  3736. }
  3737. rc = venus_hfi_session_set_codec(inst);
  3738. if (rc)
  3739. return rc;
  3740. return 0;
  3741. }
  3742. int msm_vidc_session_set_secure_mode(struct msm_vidc_inst *inst)
  3743. {
  3744. int rc = 0;
  3745. if (!inst) {
  3746. d_vpr_e("%s: invalid params\n", __func__);
  3747. return -EINVAL;
  3748. }
  3749. rc = venus_hfi_session_set_secure_mode(inst);
  3750. if (rc)
  3751. return rc;
  3752. return 0;
  3753. }
  3754. int msm_vidc_session_set_default_header(struct msm_vidc_inst *inst)
  3755. {
  3756. int rc = 0;
  3757. u32 default_header = false;
  3758. if (!inst) {
  3759. d_vpr_e("%s: invalid params\n", __func__);
  3760. return -EINVAL;
  3761. }
  3762. default_header = inst->capabilities->cap[DEFAULT_HEADER].value;
  3763. i_vpr_h(inst, "%s: default header: %d", __func__, default_header);
  3764. rc = venus_hfi_session_property(inst,
  3765. HFI_PROP_DEC_DEFAULT_HEADER,
  3766. HFI_HOST_FLAGS_NONE,
  3767. get_hfi_port(inst, INPUT_PORT),
  3768. HFI_PAYLOAD_U32,
  3769. &default_header,
  3770. sizeof(u32));
  3771. if (rc)
  3772. i_vpr_e(inst, "%s: set property failed\n", __func__);
  3773. return rc;
  3774. }
  3775. int msm_vidc_session_streamon(struct msm_vidc_inst *inst,
  3776. enum msm_vidc_port_type port)
  3777. {
  3778. int rc = 0;
  3779. if (!inst || !inst->core) {
  3780. d_vpr_e("%s: invalid params\n", __func__);
  3781. return -EINVAL;
  3782. }
  3783. msm_vidc_scale_power(inst, true);
  3784. rc = venus_hfi_start(inst, port);
  3785. if (rc)
  3786. return rc;
  3787. return rc;
  3788. }
  3789. int msm_vidc_session_streamoff(struct msm_vidc_inst *inst,
  3790. enum msm_vidc_port_type port)
  3791. {
  3792. int rc = 0;
  3793. int count = 0;
  3794. struct msm_vidc_core *core;
  3795. enum signal_session_response signal_type;
  3796. enum msm_vidc_buffer_type buffer_type;
  3797. if (!inst || !inst->core) {
  3798. d_vpr_e("%s: invalid params\n", __func__);
  3799. return -EINVAL;
  3800. }
  3801. if (port == INPUT_PORT) {
  3802. signal_type = SIGNAL_CMD_STOP_INPUT;
  3803. buffer_type = MSM_VIDC_BUF_INPUT;
  3804. } else if (port == OUTPUT_PORT) {
  3805. signal_type = SIGNAL_CMD_STOP_OUTPUT;
  3806. buffer_type = MSM_VIDC_BUF_OUTPUT;
  3807. } else {
  3808. i_vpr_e(inst, "%s: invalid port: %d\n", __func__, port);
  3809. return -EINVAL;
  3810. }
  3811. rc = venus_hfi_stop(inst, port);
  3812. if (rc)
  3813. goto error;
  3814. core = inst->core;
  3815. i_vpr_h(inst, "%s: wait on port: %d for time: %d ms\n",
  3816. __func__, port, core->capabilities[HW_RESPONSE_TIMEOUT].value);
  3817. inst_unlock(inst, __func__);
  3818. rc = wait_for_completion_timeout(
  3819. &inst->completions[signal_type],
  3820. msecs_to_jiffies(
  3821. core->capabilities[HW_RESPONSE_TIMEOUT].value));
  3822. if (!rc) {
  3823. i_vpr_e(inst, "%s: session stop timed out for port: %d\n",
  3824. __func__, port);
  3825. rc = -ETIMEDOUT;
  3826. msm_vidc_inst_timeout(inst);
  3827. } else {
  3828. rc = 0;
  3829. }
  3830. inst_lock(inst, __func__);
  3831. if(rc)
  3832. goto error;
  3833. if (port == INPUT_PORT) {
  3834. /* discard pending input port settings change if any */
  3835. msm_vidc_discard_pending_ipsc(inst);
  3836. /* flush input timer list */
  3837. msm_vidc_flush_input_timer(inst);
  3838. }
  3839. if (port == OUTPUT_PORT) {
  3840. /* discard pending opsc if any*/
  3841. msm_vidc_discard_pending_opsc(inst);
  3842. /* flush out pending last flag buffers if any */
  3843. msm_vidc_flush_pending_last_flag(inst);
  3844. }
  3845. /* no more queued buffers after streamoff */
  3846. count = msm_vidc_num_buffers(inst, buffer_type, MSM_VIDC_ATTR_QUEUED);
  3847. if (!count) {
  3848. i_vpr_h(inst, "%s: stop successful on port: %d\n",
  3849. __func__, port);
  3850. } else {
  3851. i_vpr_e(inst,
  3852. "%s: %d buffers pending with firmware on port: %d\n",
  3853. __func__, count, port);
  3854. rc = -EINVAL;
  3855. goto error;
  3856. }
  3857. /* flush deferred buffers */
  3858. msm_vidc_flush_buffers(inst, buffer_type);
  3859. msm_vidc_flush_delayed_unmap_buffers(inst, buffer_type);
  3860. return 0;
  3861. error:
  3862. msm_vidc_kill_session(inst);
  3863. msm_vidc_flush_buffers(inst, buffer_type);
  3864. return rc;
  3865. }
  3866. int msm_vidc_session_close(struct msm_vidc_inst *inst)
  3867. {
  3868. int rc = 0;
  3869. struct msm_vidc_core *core;
  3870. if (!inst || !inst->core) {
  3871. d_vpr_e("%s: invalid params\n", __func__);
  3872. return -EINVAL;
  3873. }
  3874. rc = venus_hfi_session_close(inst);
  3875. if (rc)
  3876. return rc;
  3877. /* we are not supposed to send any more commands after close */
  3878. i_vpr_h(inst, "%s: free session packet data\n", __func__);
  3879. kfree(inst->packet);
  3880. inst->packet = NULL;
  3881. core = inst->core;
  3882. i_vpr_h(inst, "%s: wait on close for time: %d ms\n",
  3883. __func__, core->capabilities[HW_RESPONSE_TIMEOUT].value);
  3884. inst_unlock(inst, __func__);
  3885. rc = wait_for_completion_timeout(
  3886. &inst->completions[SIGNAL_CMD_CLOSE],
  3887. msecs_to_jiffies(
  3888. core->capabilities[HW_RESPONSE_TIMEOUT].value));
  3889. if (!rc) {
  3890. i_vpr_e(inst, "%s: session close timed out\n", __func__);
  3891. rc = -ETIMEDOUT;
  3892. msm_vidc_inst_timeout(inst);
  3893. } else {
  3894. rc = 0;
  3895. i_vpr_h(inst, "%s: close successful\n", __func__);
  3896. }
  3897. inst_lock(inst, __func__);
  3898. msm_vidc_remove_session(inst);
  3899. return rc;
  3900. }
  3901. int msm_vidc_kill_session(struct msm_vidc_inst *inst)
  3902. {
  3903. if (!inst) {
  3904. d_vpr_e("%s: invalid params\n", __func__);
  3905. return -EINVAL;
  3906. }
  3907. if (!inst->session_id) {
  3908. i_vpr_e(inst, "%s: already killed\n", __func__);
  3909. return 0;
  3910. }
  3911. i_vpr_e(inst, "%s: killing session\n", __func__);
  3912. msm_vidc_session_close(inst);
  3913. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  3914. return 0;
  3915. }
  3916. int msm_vidc_get_inst_capability(struct msm_vidc_inst *inst)
  3917. {
  3918. int rc = 0;
  3919. int i;
  3920. struct msm_vidc_core *core;
  3921. if (!inst || !inst->core || !inst->capabilities) {
  3922. d_vpr_e("%s: invalid params\n", __func__);
  3923. return -EINVAL;
  3924. }
  3925. core = inst->core;
  3926. for (i = 0; i < core->codecs_count; i++) {
  3927. if (core->inst_caps[i].domain == inst->domain &&
  3928. core->inst_caps[i].codec == inst->codec) {
  3929. i_vpr_h(inst,
  3930. "%s: copied capabilities with %#x codec, %#x domain\n",
  3931. __func__, inst->codec, inst->domain);
  3932. memcpy(inst->capabilities, &core->inst_caps[i],
  3933. sizeof(struct msm_vidc_inst_capability));
  3934. }
  3935. }
  3936. return rc;
  3937. }
  3938. int msm_vidc_deinit_core_caps(struct msm_vidc_core *core)
  3939. {
  3940. int rc = 0;
  3941. if (!core) {
  3942. d_vpr_e("%s: invalid params\n", __func__);
  3943. return -EINVAL;
  3944. }
  3945. kfree(core->capabilities);
  3946. core->capabilities = NULL;
  3947. d_vpr_h("%s: Core capabilities freed\n", __func__);
  3948. return rc;
  3949. }
  3950. int msm_vidc_init_core_caps(struct msm_vidc_core *core)
  3951. {
  3952. int rc = 0;
  3953. int i, num_platform_caps;
  3954. struct msm_platform_core_capability *platform_data;
  3955. if (!core || !core->platform) {
  3956. d_vpr_e("%s: invalid params\n", __func__);
  3957. rc = -EINVAL;
  3958. goto exit;
  3959. }
  3960. platform_data = core->platform->data.core_data;
  3961. if (!platform_data) {
  3962. d_vpr_e("%s: platform core data is NULL\n",
  3963. __func__);
  3964. rc = -EINVAL;
  3965. goto exit;
  3966. }
  3967. core->capabilities = kcalloc(1,
  3968. (sizeof(struct msm_vidc_core_capability) *
  3969. (CORE_CAP_MAX + 1)), GFP_KERNEL);
  3970. if (!core->capabilities) {
  3971. d_vpr_e("%s: failed to allocate core capabilities\n",
  3972. __func__);
  3973. rc = -ENOMEM;
  3974. goto exit;
  3975. }
  3976. num_platform_caps = core->platform->data.core_data_size;
  3977. /* loop over platform caps */
  3978. for (i = 0; i < num_platform_caps && i < CORE_CAP_MAX; i++) {
  3979. core->capabilities[platform_data[i].type].type = platform_data[i].type;
  3980. core->capabilities[platform_data[i].type].value = platform_data[i].value;
  3981. }
  3982. exit:
  3983. return rc;
  3984. }
  3985. static void update_inst_capability(struct msm_platform_inst_capability *in,
  3986. struct msm_vidc_inst_capability *capability)
  3987. {
  3988. if (!in || !capability) {
  3989. d_vpr_e("%s: invalid params %pK %pK\n",
  3990. __func__, in, capability);
  3991. return;
  3992. }
  3993. if (in->cap_id >= INST_CAP_MAX) {
  3994. d_vpr_e("%s: invalid cap id %d\n", __func__, in->cap_id);
  3995. return;
  3996. }
  3997. capability->cap[in->cap_id].cap_id = in->cap_id;
  3998. capability->cap[in->cap_id].min = in->min;
  3999. capability->cap[in->cap_id].max = in->max;
  4000. capability->cap[in->cap_id].step_or_mask = in->step_or_mask;
  4001. capability->cap[in->cap_id].value = in->value;
  4002. capability->cap[in->cap_id].flags = in->flags;
  4003. capability->cap[in->cap_id].v4l2_id = in->v4l2_id;
  4004. capability->cap[in->cap_id].hfi_id = in->hfi_id;
  4005. }
  4006. static void update_inst_cap_dependency(
  4007. struct msm_platform_inst_cap_dependency *in,
  4008. struct msm_vidc_inst_capability *capability)
  4009. {
  4010. if (!in || !capability) {
  4011. d_vpr_e("%s: invalid params %pK %pK\n",
  4012. __func__, in, capability);
  4013. return;
  4014. }
  4015. if (in->cap_id >= INST_CAP_MAX) {
  4016. d_vpr_e("%s: invalid cap id %d\n", __func__, in->cap_id);
  4017. return;
  4018. }
  4019. capability->cap[in->cap_id].cap_id = in->cap_id;
  4020. memcpy(capability->cap[in->cap_id].parents, in->parents,
  4021. sizeof(capability->cap[in->cap_id].parents));
  4022. memcpy(capability->cap[in->cap_id].children, in->children,
  4023. sizeof(capability->cap[in->cap_id].children));
  4024. capability->cap[in->cap_id].adjust = in->adjust;
  4025. capability->cap[in->cap_id].set = in->set;
  4026. }
  4027. int msm_vidc_deinit_instance_caps(struct msm_vidc_core *core)
  4028. {
  4029. int rc = 0;
  4030. if (!core) {
  4031. d_vpr_e("%s: invalid params\n", __func__);
  4032. return -EINVAL;
  4033. }
  4034. kfree(core->inst_caps);
  4035. core->inst_caps = NULL;
  4036. d_vpr_h("%s: core->inst_caps freed\n", __func__);
  4037. return rc;
  4038. }
  4039. int msm_vidc_init_instance_caps(struct msm_vidc_core *core)
  4040. {
  4041. int rc = 0;
  4042. u8 enc_valid_codecs, dec_valid_codecs;
  4043. u8 count_bits, enc_codec_count;
  4044. u8 codecs_count = 0;
  4045. int i, j, check_bit;
  4046. int num_platform_cap_data, num_platform_cap_dependency_data;
  4047. struct msm_platform_inst_capability *platform_cap_data = NULL;
  4048. struct msm_platform_inst_cap_dependency *platform_cap_dependency_data = NULL;
  4049. if (!core || !core->platform || !core->capabilities) {
  4050. d_vpr_e("%s: invalid params\n", __func__);
  4051. rc = -EINVAL;
  4052. goto error;
  4053. }
  4054. platform_cap_data = core->platform->data.inst_cap_data;
  4055. if (!platform_cap_data) {
  4056. d_vpr_e("%s: platform instance cap data is NULL\n",
  4057. __func__);
  4058. rc = -EINVAL;
  4059. goto error;
  4060. }
  4061. platform_cap_dependency_data = core->platform->data.inst_cap_dependency_data;
  4062. if (!platform_cap_dependency_data) {
  4063. d_vpr_e("%s: platform instance cap dependency data is NULL\n",
  4064. __func__);
  4065. rc = -EINVAL;
  4066. goto error;
  4067. }
  4068. enc_valid_codecs = core->capabilities[ENC_CODECS].value;
  4069. count_bits = enc_valid_codecs;
  4070. COUNT_BITS(count_bits, codecs_count);
  4071. enc_codec_count = codecs_count;
  4072. dec_valid_codecs = core->capabilities[DEC_CODECS].value;
  4073. count_bits = dec_valid_codecs;
  4074. COUNT_BITS(count_bits, codecs_count);
  4075. core->codecs_count = codecs_count;
  4076. core->inst_caps = kcalloc(codecs_count,
  4077. sizeof(struct msm_vidc_inst_capability),
  4078. GFP_KERNEL);
  4079. if (!core->inst_caps) {
  4080. d_vpr_e("%s: failed to allocate core capabilities\n",
  4081. __func__);
  4082. rc = -ENOMEM;
  4083. goto error;
  4084. }
  4085. check_bit = 0;
  4086. /* determine codecs for enc domain */
  4087. for (i = 0; i < enc_codec_count; i++) {
  4088. while (check_bit < (sizeof(enc_valid_codecs) * 8)) {
  4089. if (enc_valid_codecs & BIT(check_bit)) {
  4090. core->inst_caps[i].domain = MSM_VIDC_ENCODER;
  4091. core->inst_caps[i].codec = enc_valid_codecs &
  4092. BIT(check_bit);
  4093. check_bit++;
  4094. break;
  4095. }
  4096. check_bit++;
  4097. }
  4098. }
  4099. /* reset checkbit to check from 0th bit of decoder codecs set bits*/
  4100. check_bit = 0;
  4101. /* determine codecs for dec domain */
  4102. for (; i < codecs_count; i++) {
  4103. while (check_bit < (sizeof(dec_valid_codecs) * 8)) {
  4104. if (dec_valid_codecs & BIT(check_bit)) {
  4105. core->inst_caps[i].domain = MSM_VIDC_DECODER;
  4106. core->inst_caps[i].codec = dec_valid_codecs &
  4107. BIT(check_bit);
  4108. check_bit++;
  4109. break;
  4110. }
  4111. check_bit++;
  4112. }
  4113. }
  4114. num_platform_cap_data = core->platform->data.inst_cap_data_size;
  4115. num_platform_cap_dependency_data = core->platform->data.inst_cap_dependency_data_size;
  4116. d_vpr_h("%s: num caps %d, dependency %d\n", __func__,
  4117. num_platform_cap_data, num_platform_cap_dependency_data);
  4118. /* loop over each platform capability */
  4119. for (i = 0; i < num_platform_cap_data; i++) {
  4120. /* select matching core codec and update it */
  4121. for (j = 0; j < codecs_count; j++) {
  4122. if ((platform_cap_data[i].domain &
  4123. core->inst_caps[j].domain) &&
  4124. (platform_cap_data[i].codec &
  4125. core->inst_caps[j].codec)) {
  4126. /* update core capability */
  4127. update_inst_capability(&platform_cap_data[i],
  4128. &core->inst_caps[j]);
  4129. }
  4130. }
  4131. }
  4132. /* loop over each platform dependency capability */
  4133. for (i = 0; i < num_platform_cap_dependency_data; i++) {
  4134. /* select matching core codec and update it */
  4135. for (j = 0; j < codecs_count; j++) {
  4136. if ((platform_cap_dependency_data[i].domain &
  4137. core->inst_caps[j].domain) &&
  4138. (platform_cap_dependency_data[i].codec &
  4139. core->inst_caps[j].codec)) {
  4140. /* update core dependency capability */
  4141. update_inst_cap_dependency(
  4142. &platform_cap_dependency_data[i],
  4143. &core->inst_caps[j]);
  4144. }
  4145. }
  4146. }
  4147. error:
  4148. return rc;
  4149. }
  4150. int msm_vidc_core_deinit_locked(struct msm_vidc_core *core, bool force)
  4151. {
  4152. int rc = 0;
  4153. struct msm_vidc_inst *inst, *dummy;
  4154. if (!core) {
  4155. d_vpr_e("%s: invalid params\n", __func__);
  4156. return -EINVAL;
  4157. }
  4158. rc = __strict_check(core, __func__);
  4159. if (rc) {
  4160. d_vpr_e("%s(): core was not locked\n", __func__);
  4161. return rc;
  4162. }
  4163. if (core->state == MSM_VIDC_CORE_DEINIT)
  4164. return 0;
  4165. if (force) {
  4166. d_vpr_e("%s(): force deinit core\n", __func__);
  4167. } else {
  4168. /* in normal case, deinit core only if no session present */
  4169. if (!list_empty(&core->instances)) {
  4170. d_vpr_h("%s(): skip deinit\n", __func__);
  4171. return 0;
  4172. } else {
  4173. d_vpr_h("%s(): deinit core\n", __func__);
  4174. }
  4175. }
  4176. venus_hfi_core_deinit(core, force);
  4177. /* unlink all sessions from core, if any */
  4178. list_for_each_entry_safe(inst, dummy, &core->instances, list) {
  4179. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  4180. list_del_init(&inst->list);
  4181. list_add_tail(&inst->list, &core->dangling_instances);
  4182. }
  4183. msm_vidc_change_core_state(core, MSM_VIDC_CORE_DEINIT, __func__);
  4184. return rc;
  4185. }
  4186. int msm_vidc_core_deinit(struct msm_vidc_core *core, bool force)
  4187. {
  4188. int rc = 0;
  4189. if (!core) {
  4190. d_vpr_e("%s: invalid params\n", __func__);
  4191. return -EINVAL;
  4192. }
  4193. core_lock(core, __func__);
  4194. rc = msm_vidc_core_deinit_locked(core, force);
  4195. core_unlock(core, __func__);
  4196. return rc;
  4197. }
  4198. int msm_vidc_core_init_wait(struct msm_vidc_core *core)
  4199. {
  4200. const int interval = 10;
  4201. int max_tries, count = 0, rc = 0;
  4202. if (!core || !core->capabilities) {
  4203. d_vpr_e("%s: invalid params\n", __func__);
  4204. return -EINVAL;
  4205. }
  4206. core_lock(core, __func__);
  4207. if (core->state == MSM_VIDC_CORE_INIT) {
  4208. rc = 0;
  4209. goto unlock;
  4210. } else if (core->state == MSM_VIDC_CORE_DEINIT) {
  4211. rc = -EINVAL;
  4212. goto unlock;
  4213. }
  4214. d_vpr_h("%s(): waiting for state change\n", __func__);
  4215. max_tries = core->capabilities[HW_RESPONSE_TIMEOUT].value / interval;
  4216. while (count < max_tries) {
  4217. if (core->state != MSM_VIDC_CORE_INIT_WAIT)
  4218. break;
  4219. core_unlock(core, __func__);
  4220. msleep_interruptible(interval);
  4221. core_lock(core, __func__);
  4222. count++;
  4223. }
  4224. d_vpr_h("%s: state %s, interval %u, count %u, max_tries %u\n", __func__,
  4225. core_state_name(core->state), interval, count, max_tries);
  4226. if (core->state == MSM_VIDC_CORE_INIT) {
  4227. d_vpr_h("%s: sys init successful\n", __func__);
  4228. rc = 0;
  4229. goto unlock;
  4230. } else {
  4231. d_vpr_h("%s: sys init wait timedout. state %s\n",
  4232. __func__, core_state_name(core->state));
  4233. rc = -EINVAL;
  4234. goto unlock;
  4235. }
  4236. unlock:
  4237. if (rc)
  4238. msm_vidc_core_deinit_locked(core, true);
  4239. core_unlock(core, __func__);
  4240. return rc;
  4241. }
  4242. int msm_vidc_core_init(struct msm_vidc_core *core)
  4243. {
  4244. int rc = 0;
  4245. if (!core || !core->capabilities) {
  4246. d_vpr_e("%s: invalid params\n", __func__);
  4247. return -EINVAL;
  4248. }
  4249. core_lock(core, __func__);
  4250. if (core->state == MSM_VIDC_CORE_INIT ||
  4251. core->state == MSM_VIDC_CORE_INIT_WAIT)
  4252. goto unlock;
  4253. msm_vidc_change_core_state(core, MSM_VIDC_CORE_INIT_WAIT, __func__);
  4254. core->smmu_fault_handled = false;
  4255. core->ssr.trigger = false;
  4256. core->pm_suspended = false;
  4257. rc = venus_hfi_core_init(core);
  4258. if (rc) {
  4259. d_vpr_e("%s: core init failed\n", __func__);
  4260. goto unlock;
  4261. }
  4262. unlock:
  4263. if (rc)
  4264. msm_vidc_core_deinit_locked(core, true);
  4265. core_unlock(core, __func__);
  4266. return rc;
  4267. }
  4268. int msm_vidc_inst_timeout(struct msm_vidc_inst *inst)
  4269. {
  4270. int rc = 0;
  4271. struct msm_vidc_core *core;
  4272. struct msm_vidc_inst *instance;
  4273. bool found;
  4274. if (!inst || !inst->core) {
  4275. d_vpr_e("%s: invalid params\n", __func__);
  4276. return -EINVAL;
  4277. }
  4278. core = inst->core;
  4279. core_lock(core, __func__);
  4280. /*
  4281. * All sessions will be removed from core list in core deinit,
  4282. * do not deinit core from a session which is not present in
  4283. * core list.
  4284. */
  4285. found = false;
  4286. list_for_each_entry(instance, &core->instances, list) {
  4287. if (instance == inst) {
  4288. found = true;
  4289. break;
  4290. }
  4291. }
  4292. if (!found) {
  4293. i_vpr_e(inst,
  4294. "%s: session not available in core list\n", __func__);
  4295. rc = -EINVAL;
  4296. goto unlock;
  4297. }
  4298. /* call core deinit for a valid instance timeout case */
  4299. msm_vidc_core_deinit_locked(core, true);
  4300. unlock:
  4301. core_unlock(core, __func__);
  4302. return rc;
  4303. }
  4304. int msm_vidc_print_buffer_info(struct msm_vidc_inst *inst)
  4305. {
  4306. struct msm_vidc_buffers *buffers;
  4307. int i;
  4308. if (!inst) {
  4309. i_vpr_e(inst, "%s: invalid params\n", __func__);
  4310. return -EINVAL;
  4311. }
  4312. /* Print buffer details */
  4313. for (i = 0; i < ARRAY_SIZE(buf_type_name_arr); i++) {
  4314. buffers = msm_vidc_get_buffers(inst, buf_type_name_arr[i].type, __func__);
  4315. if (!buffers)
  4316. continue;
  4317. i_vpr_h(inst, "buf: type: %11s, count %2d, extra %2d, actual %2d, size %9u\n",
  4318. buf_type_name_arr[i].name, buffers->min_count,
  4319. buffers->extra_count, buffers->actual_count,
  4320. buffers->size);
  4321. }
  4322. return 0;
  4323. }
  4324. int msm_vidc_print_inst_info(struct msm_vidc_inst *inst)
  4325. {
  4326. struct msm_vidc_buffers *buffers;
  4327. struct msm_vidc_buffer *buf;
  4328. enum msm_vidc_port_type port;
  4329. bool is_secure, is_decode;
  4330. u32 bit_depth, bit_rate, frame_rate, width, height;
  4331. struct dma_buf *dbuf;
  4332. struct inode *f_inode;
  4333. unsigned long inode_num = 0;
  4334. long ref_count = -1;
  4335. int i = 0;
  4336. if (!inst || !inst->capabilities) {
  4337. i_vpr_e(inst, "%s: invalid params\n", __func__);
  4338. return -EINVAL;
  4339. }
  4340. is_secure = is_secure_session(inst);
  4341. is_decode = inst->domain == MSM_VIDC_DECODER;
  4342. port = is_decode ? INPUT_PORT : OUTPUT_PORT;
  4343. width = inst->fmts[port].fmt.pix_mp.width;
  4344. height = inst->fmts[port].fmt.pix_mp.height;
  4345. bit_depth = inst->capabilities->cap[BIT_DEPTH].value & 0xFFFF;
  4346. bit_rate = inst->capabilities->cap[BIT_RATE].value;
  4347. frame_rate = inst->capabilities->cap[FRAME_RATE].value >> 16;
  4348. i_vpr_e(inst, "%s %s session, HxW: %d x %d, fps: %d, bitrate: %d, bit-depth: %d\n",
  4349. is_secure ? "Secure" : "Non-Secure",
  4350. is_decode ? "Decode" : "Encode",
  4351. height, width,
  4352. frame_rate, bit_rate, bit_depth);
  4353. /* Print buffer details */
  4354. for (i = 0; i < ARRAY_SIZE(buf_type_name_arr); i++) {
  4355. buffers = msm_vidc_get_buffers(inst, buf_type_name_arr[i].type, __func__);
  4356. if (!buffers)
  4357. continue;
  4358. i_vpr_e(inst, "count: type: %11s, min: %2d, extra: %2d, actual: %2d\n",
  4359. buf_type_name_arr[i].name, buffers->min_count,
  4360. buffers->extra_count, buffers->actual_count);
  4361. list_for_each_entry(buf, &buffers->list, list) {
  4362. if (!buf->dmabuf)
  4363. continue;
  4364. dbuf = (struct dma_buf *)buf->dmabuf;
  4365. if (dbuf && dbuf->file) {
  4366. f_inode = file_inode(dbuf->file);
  4367. if (f_inode) {
  4368. inode_num = f_inode->i_ino;
  4369. ref_count = file_count(dbuf->file);
  4370. }
  4371. }
  4372. i_vpr_e(inst,
  4373. "buf: type: %11s, index: %2d, fd: %4d, size: %9u, off: %8u, filled: %9u, daddr: %#llx, inode: %8lu, ref: %2ld, flags: %8x, ts: %16lld, attr: %8x\n",
  4374. buf_type_name_arr[i].name, buf->index, buf->fd, buf->buffer_size,
  4375. buf->data_offset, buf->data_size, buf->device_addr,
  4376. inode_num, ref_count, buf->flags, buf->timestamp, buf->attr);
  4377. }
  4378. }
  4379. return 0;
  4380. }
  4381. void msm_vidc_print_core_info(struct msm_vidc_core *core)
  4382. {
  4383. struct msm_vidc_inst *inst = NULL;
  4384. struct msm_vidc_inst *instances[MAX_SUPPORTED_INSTANCES];
  4385. s32 num_instances = 0;
  4386. if (!core) {
  4387. d_vpr_e("%s: invalid params\n", __func__);
  4388. return;
  4389. }
  4390. core_lock(core, __func__);
  4391. list_for_each_entry(inst, &core->instances, list)
  4392. instances[num_instances++] = inst;
  4393. core_unlock(core, __func__);
  4394. while (num_instances--) {
  4395. inst = instances[num_instances];
  4396. inst = get_inst_ref(core, inst);
  4397. if (!inst)
  4398. continue;
  4399. inst_lock(inst, __func__);
  4400. msm_vidc_print_inst_info(inst);
  4401. inst_unlock(inst, __func__);
  4402. put_inst(inst);
  4403. }
  4404. }
  4405. int msm_vidc_smmu_fault_handler(struct iommu_domain *domain,
  4406. struct device *dev, unsigned long iova, int flags, void *data)
  4407. {
  4408. struct msm_vidc_core *core = data;
  4409. if (!domain || !core || !core->capabilities) {
  4410. d_vpr_e("%s: invalid params %pK %pK\n",
  4411. __func__, domain, core);
  4412. return -EINVAL;
  4413. }
  4414. if (core->smmu_fault_handled) {
  4415. if (core->capabilities[NON_FATAL_FAULTS].value) {
  4416. dprintk_ratelimit(VIDC_ERR, "err ",
  4417. "%s: non-fatal pagefault address: %lx\n",
  4418. __func__, iova);
  4419. return 0;
  4420. }
  4421. }
  4422. d_vpr_e(FMT_STRING_FAULT_HANDLER, __func__, iova);
  4423. core->smmu_fault_handled = true;
  4424. /* print noc error log registers */
  4425. venus_hfi_noc_error_info(core);
  4426. msm_vidc_print_core_info(core);
  4427. /*
  4428. * Return -ENOSYS to elicit the default behaviour of smmu driver.
  4429. * If we return -ENOSYS, then smmu driver assumes page fault handler
  4430. * is not installed and prints a list of useful debug information like
  4431. * FAR, SID etc. This information is not printed if we return 0.
  4432. */
  4433. return -ENOSYS;
  4434. }
  4435. int msm_vidc_trigger_ssr(struct msm_vidc_core *core,
  4436. u64 trigger_ssr_val)
  4437. {
  4438. struct msm_vidc_ssr *ssr;
  4439. if (!core) {
  4440. d_vpr_e("%s: Invalid parameters\n", __func__);
  4441. return -EINVAL;
  4442. }
  4443. ssr = &core->ssr;
  4444. /*
  4445. * <test_addr><sub_client_id><ssr_type>
  4446. * ssr_type: 0-3 bits
  4447. * sub_client_id: 4-7 bits
  4448. * reserved: 8-31 bits
  4449. * test_addr: 32-63 bits
  4450. */
  4451. ssr->ssr_type = (trigger_ssr_val &
  4452. (unsigned long)SSR_TYPE) >> SSR_TYPE_SHIFT;
  4453. ssr->sub_client_id = (trigger_ssr_val &
  4454. (unsigned long)SSR_SUB_CLIENT_ID) >> SSR_SUB_CLIENT_ID_SHIFT;
  4455. ssr->test_addr = (trigger_ssr_val &
  4456. (unsigned long)SSR_ADDR_ID) >> SSR_ADDR_SHIFT;
  4457. schedule_work(&core->ssr_work);
  4458. return 0;
  4459. }
  4460. void msm_vidc_ssr_handler(struct work_struct *work)
  4461. {
  4462. int rc;
  4463. struct msm_vidc_core *core;
  4464. struct msm_vidc_ssr *ssr;
  4465. core = container_of(work, struct msm_vidc_core, ssr_work);
  4466. if (!core) {
  4467. d_vpr_e("%s: invalid params %pK\n", __func__, core);
  4468. return;
  4469. }
  4470. ssr = &core->ssr;
  4471. core_lock(core, __func__);
  4472. if (core->state == MSM_VIDC_CORE_INIT) {
  4473. /*
  4474. * In current implementation, user-initiated SSR triggers
  4475. * a fatal error from hardware. However, there is no way
  4476. * to know if fatal error is due to SSR or not. Handle
  4477. * user SSR as non-fatal.
  4478. */
  4479. core->ssr.trigger = true;
  4480. rc = venus_hfi_trigger_ssr(core, ssr->ssr_type,
  4481. ssr->sub_client_id, ssr->test_addr);
  4482. if (rc) {
  4483. d_vpr_e("%s: trigger_ssr failed\n", __func__);
  4484. core->ssr.trigger = false;
  4485. }
  4486. } else {
  4487. d_vpr_e("%s: video core not initialized\n", __func__);
  4488. }
  4489. core_unlock(core, __func__);
  4490. }
  4491. int msm_vidc_trigger_stability(struct msm_vidc_core *core,
  4492. u64 trigger_stability_val)
  4493. {
  4494. struct msm_vidc_inst *inst = NULL;
  4495. struct msm_vidc_stability stability;
  4496. if (!core) {
  4497. d_vpr_e("%s: invalid params\n", __func__);
  4498. return -EINVAL;
  4499. }
  4500. /*
  4501. * <payload><sub_client_id><stability_type>
  4502. * stability_type: 0-3 bits
  4503. * sub_client_id: 4-7 bits
  4504. * reserved: 8-31 bits
  4505. * payload: 32-63 bits
  4506. */
  4507. memset(&stability, 0, sizeof(struct msm_vidc_stability));
  4508. stability.stability_type = (trigger_stability_val &
  4509. (unsigned long)STABILITY_TYPE) >> STABILITY_TYPE_SHIFT;
  4510. stability.sub_client_id = (trigger_stability_val &
  4511. (unsigned long)STABILITY_SUB_CLIENT_ID) >> STABILITY_SUB_CLIENT_ID_SHIFT;
  4512. stability.value = (trigger_stability_val &
  4513. (unsigned long)STABILITY_PAYLOAD_ID) >> STABILITY_PAYLOAD_SHIFT;
  4514. core_lock(core, __func__);
  4515. list_for_each_entry(inst, &core->instances, list) {
  4516. memcpy(&inst->stability, &stability, sizeof(struct msm_vidc_stability));
  4517. schedule_work(&inst->stability_work);
  4518. }
  4519. core_unlock(core, __func__);
  4520. return 0;
  4521. }
  4522. void msm_vidc_stability_handler(struct work_struct *work)
  4523. {
  4524. int rc;
  4525. struct msm_vidc_inst *inst;
  4526. struct msm_vidc_stability *stability;
  4527. inst = container_of(work, struct msm_vidc_inst, stability_work);
  4528. inst = get_inst_ref(g_core, inst);
  4529. if (!inst) {
  4530. d_vpr_e("%s: invalid params\n", __func__);
  4531. return;
  4532. }
  4533. inst_lock(inst, __func__);
  4534. stability = &inst->stability;
  4535. rc = venus_hfi_trigger_stability(inst, stability->stability_type,
  4536. stability->sub_client_id, stability->value);
  4537. if (rc)
  4538. i_vpr_e(inst, "%s: trigger_stability failed\n", __func__);
  4539. inst_unlock(inst, __func__);
  4540. put_inst(inst);
  4541. }
  4542. int cancel_stability_work_sync(struct msm_vidc_inst *inst)
  4543. {
  4544. if (!inst) {
  4545. d_vpr_e("%s: Invalid arguments\n", __func__);
  4546. return -EINVAL;
  4547. }
  4548. cancel_work_sync(&inst->stability_work);
  4549. return 0;
  4550. }
  4551. void msm_vidc_fw_unload_handler(struct work_struct *work)
  4552. {
  4553. struct msm_vidc_core *core = NULL;
  4554. int rc = 0;
  4555. core = container_of(work, struct msm_vidc_core, fw_unload_work.work);
  4556. if (!core) {
  4557. d_vpr_e("%s: invalid work or core handle\n", __func__);
  4558. return;
  4559. }
  4560. d_vpr_h("%s: deinitializing video core\n",__func__);
  4561. rc = msm_vidc_core_deinit(core, false);
  4562. if (rc)
  4563. d_vpr_e("%s: Failed to deinit core\n", __func__);
  4564. }
  4565. int msm_vidc_suspend(struct msm_vidc_core *core)
  4566. {
  4567. int rc = 0;
  4568. if (!core) {
  4569. d_vpr_e("%s: invalid params\n", __func__);
  4570. return -EINVAL;
  4571. }
  4572. rc = venus_hfi_suspend(core);
  4573. if (rc)
  4574. return rc;
  4575. return rc;
  4576. }
  4577. void msm_vidc_batch_handler(struct work_struct *work)
  4578. {
  4579. struct msm_vidc_inst *inst;
  4580. enum msm_vidc_allow allow;
  4581. struct msm_vidc_core *core;
  4582. int rc = 0;
  4583. inst = container_of(work, struct msm_vidc_inst, decode_batch.work.work);
  4584. inst = get_inst_ref(g_core, inst);
  4585. if (!inst || !inst->core) {
  4586. d_vpr_e("%s: invalid params\n", __func__);
  4587. return;
  4588. }
  4589. core = inst->core;
  4590. inst_lock(inst, __func__);
  4591. if (is_session_error(inst)) {
  4592. i_vpr_e(inst, "%s: failled. Session error\n", __func__);
  4593. goto exit;
  4594. }
  4595. if (core->pm_suspended) {
  4596. i_vpr_h(inst, "%s: device in pm suspend state\n", __func__);
  4597. goto exit;
  4598. }
  4599. allow = msm_vidc_allow_qbuf(inst, OUTPUT_MPLANE);
  4600. if (allow != MSM_VIDC_ALLOW) {
  4601. i_vpr_e(inst, "%s: not allowed in state: %s\n", __func__,
  4602. state_name(inst->state));
  4603. goto exit;
  4604. }
  4605. i_vpr_h(inst, "%s: queue pending batch buffers\n", __func__);
  4606. rc = msm_vidc_queue_deferred_buffers(inst, MSM_VIDC_BUF_OUTPUT);
  4607. if (rc) {
  4608. i_vpr_e(inst, "%s: batch qbufs failed\n", __func__);
  4609. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  4610. }
  4611. exit:
  4612. inst_unlock(inst, __func__);
  4613. put_inst(inst);
  4614. }
  4615. int msm_vidc_flush_buffers(struct msm_vidc_inst *inst,
  4616. enum msm_vidc_buffer_type type)
  4617. {
  4618. int rc = 0;
  4619. struct msm_vidc_buffers *buffers;
  4620. struct msm_vidc_buffer *buf, *dummy;
  4621. enum msm_vidc_buffer_type buffer_type[2];
  4622. int i;
  4623. if (!inst) {
  4624. d_vpr_e("%s: invalid params\n", __func__);
  4625. return -EINVAL;
  4626. }
  4627. if (type == MSM_VIDC_BUF_INPUT) {
  4628. buffer_type[0] = MSM_VIDC_BUF_INPUT_META;
  4629. buffer_type[1] = MSM_VIDC_BUF_INPUT;
  4630. } else if (type == MSM_VIDC_BUF_OUTPUT) {
  4631. buffer_type[0] = MSM_VIDC_BUF_OUTPUT_META;
  4632. buffer_type[1] = MSM_VIDC_BUF_OUTPUT;
  4633. } else {
  4634. i_vpr_h(inst, "%s: invalid buffer type %d\n",
  4635. __func__, type);
  4636. return -EINVAL;
  4637. }
  4638. for (i = 0; i < ARRAY_SIZE(buffer_type); i++) {
  4639. buffers = msm_vidc_get_buffers(inst, buffer_type[i], __func__);
  4640. if (!buffers)
  4641. return -EINVAL;
  4642. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  4643. if (buf->attr & MSM_VIDC_ATTR_QUEUED ||
  4644. buf->attr & MSM_VIDC_ATTR_DEFERRED) {
  4645. print_vidc_buffer(VIDC_HIGH, "high", "flushing buffer", inst, buf);
  4646. if (!(buf->attr & MSM_VIDC_ATTR_BUFFER_DONE))
  4647. msm_vidc_buffer_done(inst, buf);
  4648. msm_vidc_put_driver_buf(inst, buf);
  4649. }
  4650. }
  4651. }
  4652. return rc;
  4653. }
  4654. int msm_vidc_flush_delayed_unmap_buffers(struct msm_vidc_inst *inst,
  4655. enum msm_vidc_buffer_type type)
  4656. {
  4657. int rc = 0;
  4658. struct msm_vidc_mappings *maps;
  4659. struct msm_vidc_map *map, *dummy;
  4660. struct msm_vidc_buffer *ro_buf, *ro_dummy;
  4661. enum msm_vidc_buffer_type buffer_type[2];
  4662. int i;
  4663. bool found = false;
  4664. if (!inst) {
  4665. d_vpr_e("%s: invalid params\n", __func__);
  4666. return -EINVAL;
  4667. }
  4668. if (type == MSM_VIDC_BUF_INPUT) {
  4669. buffer_type[0] = MSM_VIDC_BUF_INPUT_META;
  4670. buffer_type[1] = MSM_VIDC_BUF_INPUT;
  4671. } else if (type == MSM_VIDC_BUF_OUTPUT) {
  4672. buffer_type[0] = MSM_VIDC_BUF_OUTPUT_META;
  4673. buffer_type[1] = MSM_VIDC_BUF_OUTPUT;
  4674. } else {
  4675. i_vpr_h(inst, "%s: invalid buffer type %d\n",
  4676. __func__, type);
  4677. return -EINVAL;
  4678. }
  4679. for (i = 0; i < ARRAY_SIZE(buffer_type); i++) {
  4680. maps = msm_vidc_get_mappings(inst, buffer_type[i], __func__);
  4681. if (!maps)
  4682. return -EINVAL;
  4683. list_for_each_entry_safe(map, dummy, &maps->list, list) {
  4684. /*
  4685. * decoder output bufs will have skip_delayed_unmap = true
  4686. * unmap all decoder output buffers except those present in
  4687. * read_only buffers list
  4688. */
  4689. if (!map->skip_delayed_unmap)
  4690. continue;
  4691. found = false;
  4692. list_for_each_entry_safe(ro_buf, ro_dummy,
  4693. &inst->buffers.read_only.list, list) {
  4694. if (map->dmabuf == ro_buf->dmabuf) {
  4695. found = true;
  4696. break;
  4697. }
  4698. }
  4699. /* completely unmap */
  4700. if (!found) {
  4701. if (map->refcount > 1) {
  4702. i_vpr_e(inst,
  4703. "%s: unexpected map refcount: %u device addr %#x\n",
  4704. __func__, map->refcount, map->device_addr);
  4705. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  4706. }
  4707. msm_vidc_memory_unmap_completely(inst, map);
  4708. }
  4709. }
  4710. }
  4711. return rc;
  4712. }
  4713. void msm_vidc_destroy_buffers(struct msm_vidc_inst *inst)
  4714. {
  4715. struct msm_vidc_buffers *buffers;
  4716. struct msm_vidc_buffer *buf, *dummy;
  4717. struct msm_vidc_timestamp *ts, *dummy_ts;
  4718. struct msm_memory_dmabuf *dbuf, *dummy_dbuf;
  4719. struct msm_vidc_input_timer *timer, *dummy_timer;
  4720. struct response_work *work, *dummy_work = NULL;
  4721. struct msm_vidc_inst_cap_entry *entry, *dummy_entry;
  4722. struct msm_vidc_fence *fence, *dummy_fence;
  4723. static const enum msm_vidc_buffer_type ext_buf_types[] = {
  4724. MSM_VIDC_BUF_INPUT,
  4725. MSM_VIDC_BUF_OUTPUT,
  4726. MSM_VIDC_BUF_INPUT_META,
  4727. MSM_VIDC_BUF_OUTPUT_META,
  4728. };
  4729. static const enum msm_vidc_buffer_type internal_buf_types[] = {
  4730. MSM_VIDC_BUF_BIN,
  4731. MSM_VIDC_BUF_ARP,
  4732. MSM_VIDC_BUF_COMV,
  4733. MSM_VIDC_BUF_NON_COMV,
  4734. MSM_VIDC_BUF_LINE,
  4735. MSM_VIDC_BUF_DPB,
  4736. MSM_VIDC_BUF_PERSIST,
  4737. MSM_VIDC_BUF_VPSS,
  4738. MSM_VIDC_BUF_PARTIAL_DATA,
  4739. };
  4740. int i;
  4741. if (!inst) {
  4742. d_vpr_e("%s: invalid params\n", __func__);
  4743. return;
  4744. }
  4745. for (i = 0; i < ARRAY_SIZE(internal_buf_types); i++) {
  4746. buffers = msm_vidc_get_buffers(inst, internal_buf_types[i], __func__);
  4747. if (!buffers)
  4748. continue;
  4749. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  4750. i_vpr_h(inst,
  4751. "destroying internal buffer: type %d idx %d fd %d addr %#x size %d\n",
  4752. buf->type, buf->index, buf->fd, buf->device_addr, buf->buffer_size);
  4753. msm_vidc_destroy_internal_buffer(inst, buf);
  4754. }
  4755. }
  4756. /* read_only and release list does not take dma ref_count using dma_buf_get().
  4757. dma_buf ptr will be obselete when its ref_count reaches zero. Hence print
  4758. the dma_buf info before releasing the ref count.
  4759. */
  4760. list_for_each_entry_safe(buf, dummy, &inst->buffers.read_only.list, list) {
  4761. print_vidc_buffer(VIDC_ERR, "err ", "destroying ro buffer", inst, buf);
  4762. list_del(&buf->list);
  4763. msm_memory_pool_free(inst, buf);
  4764. }
  4765. list_for_each_entry_safe(buf, dummy, &inst->buffers.release.list, list) {
  4766. print_vidc_buffer(VIDC_ERR, "err ", "destroying release buffer", inst, buf);
  4767. list_del(&buf->list);
  4768. msm_memory_pool_free(inst, buf);
  4769. }
  4770. for (i = 0; i < ARRAY_SIZE(ext_buf_types); i++) {
  4771. buffers = msm_vidc_get_buffers(inst, ext_buf_types[i], __func__);
  4772. if (!buffers)
  4773. continue;
  4774. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  4775. print_vidc_buffer(VIDC_ERR, "err ", "destroying ", inst, buf);
  4776. if (!(buf->attr & MSM_VIDC_ATTR_BUFFER_DONE))
  4777. msm_vidc_buffer_done(inst, buf);
  4778. msm_vidc_put_driver_buf(inst, buf);
  4779. }
  4780. msm_vidc_unmap_buffers(inst, ext_buf_types[i]);
  4781. }
  4782. list_for_each_entry_safe(ts, dummy_ts, &inst->timestamps.list, sort.list) {
  4783. i_vpr_e(inst, "%s: removing ts: val %lld, rank %lld\n",
  4784. __func__, ts->sort.val, ts->rank);
  4785. list_del(&ts->sort.list);
  4786. msm_memory_pool_free(inst, ts);
  4787. }
  4788. list_for_each_entry_safe(ts, dummy_ts, &inst->ts_reorder.list, sort.list) {
  4789. i_vpr_e(inst, "%s: removing reorder ts: val %lld\n",
  4790. __func__, ts->sort.val);
  4791. list_del(&ts->sort.list);
  4792. msm_memory_pool_free(inst, ts);
  4793. }
  4794. list_for_each_entry_safe(timer, dummy_timer, &inst->input_timer_list, list) {
  4795. i_vpr_e(inst, "%s: removing input_timer %lld\n",
  4796. __func__, timer->time_us);
  4797. msm_memory_pool_free(inst, timer);
  4798. }
  4799. list_for_each_entry_safe(dbuf, dummy_dbuf, &inst->dmabuf_tracker, list) {
  4800. i_vpr_e(inst, "%s: removing dma_buf %#x, refcount %u\n",
  4801. __func__, dbuf->dmabuf, dbuf->refcount);
  4802. msm_vidc_memory_put_dmabuf_completely(inst, dbuf);
  4803. }
  4804. list_for_each_entry_safe(work, dummy_work, &inst->response_works, list) {
  4805. list_del(&work->list);
  4806. kfree(work->data);
  4807. kfree(work);
  4808. }
  4809. list_for_each_entry_safe(entry, dummy_entry, &inst->firmware_list, list) {
  4810. i_vpr_e(inst, "%s: fw list: %s\n", __func__, cap_name(entry->cap_id));
  4811. list_del(&entry->list);
  4812. kfree(entry);
  4813. }
  4814. list_for_each_entry_safe(entry, dummy_entry, &inst->children_list, list) {
  4815. i_vpr_e(inst, "%s: child list: %s\n", __func__, cap_name(entry->cap_id));
  4816. list_del(&entry->list);
  4817. kfree(entry);
  4818. }
  4819. list_for_each_entry_safe(entry, dummy_entry, &inst->caps_list, list) {
  4820. list_del(&entry->list);
  4821. kfree(entry);
  4822. }
  4823. list_for_each_entry_safe(fence, dummy_fence, &inst->fence_list, list) {
  4824. i_vpr_e(inst, "%s: destroying fence %s\n", __func__, fence->name);
  4825. msm_vidc_fence_destroy(inst, (u32)fence->dma_fence.seqno);
  4826. }
  4827. /* destroy buffers from pool */
  4828. msm_memory_pools_deinit(inst);
  4829. }
  4830. static void msm_vidc_close_helper(struct kref *kref)
  4831. {
  4832. struct msm_vidc_inst *inst = container_of(kref,
  4833. struct msm_vidc_inst, kref);
  4834. i_vpr_h(inst, "%s()\n", __func__);
  4835. msm_vidc_fence_deinit(inst);
  4836. msm_vidc_event_queue_deinit(inst);
  4837. msm_vidc_vb2_queue_deinit(inst);
  4838. msm_vidc_debugfs_deinit_inst(inst);
  4839. if (is_decode_session(inst))
  4840. msm_vdec_inst_deinit(inst);
  4841. else if (is_encode_session(inst))
  4842. msm_venc_inst_deinit(inst);
  4843. msm_vidc_free_input_cr_list(inst);
  4844. if (inst->response_workq)
  4845. destroy_workqueue(inst->response_workq);
  4846. msm_vidc_remove_dangling_session(inst);
  4847. mutex_destroy(&inst->request_lock);
  4848. mutex_destroy(&inst->lock);
  4849. kfree(inst->capabilities);
  4850. kfree(inst);
  4851. }
  4852. struct msm_vidc_inst *get_inst_ref(struct msm_vidc_core *core,
  4853. struct msm_vidc_inst *instance)
  4854. {
  4855. struct msm_vidc_inst *inst = NULL;
  4856. bool matches = false;
  4857. if (!core) {
  4858. d_vpr_e("%s: invalid params\n", __func__);
  4859. return NULL;
  4860. }
  4861. mutex_lock(&core->lock);
  4862. list_for_each_entry(inst, &core->instances, list) {
  4863. if (inst == instance) {
  4864. matches = true;
  4865. break;
  4866. }
  4867. }
  4868. inst = (matches && kref_get_unless_zero(&inst->kref)) ? inst : NULL;
  4869. mutex_unlock(&core->lock);
  4870. return inst;
  4871. }
  4872. struct msm_vidc_inst *get_inst(struct msm_vidc_core *core,
  4873. u32 session_id)
  4874. {
  4875. struct msm_vidc_inst *inst = NULL;
  4876. bool matches = false;
  4877. if (!core) {
  4878. d_vpr_e("%s: invalid params\n", __func__);
  4879. return NULL;
  4880. }
  4881. mutex_lock(&core->lock);
  4882. list_for_each_entry(inst, &core->instances, list) {
  4883. if (inst->session_id == session_id) {
  4884. matches = true;
  4885. break;
  4886. }
  4887. }
  4888. inst = (matches && kref_get_unless_zero(&inst->kref)) ? inst : NULL;
  4889. mutex_unlock(&core->lock);
  4890. return inst;
  4891. }
  4892. void put_inst(struct msm_vidc_inst *inst)
  4893. {
  4894. if (!inst) {
  4895. d_vpr_e("%s: invalid params\n", __func__);
  4896. return;
  4897. }
  4898. kref_put(&inst->kref, msm_vidc_close_helper);
  4899. }
  4900. bool core_lock_check(struct msm_vidc_core *core, const char *func)
  4901. {
  4902. return mutex_is_locked(&core->lock);
  4903. }
  4904. void core_lock(struct msm_vidc_core *core, const char *function)
  4905. {
  4906. mutex_lock(&core->lock);
  4907. }
  4908. void core_unlock(struct msm_vidc_core *core, const char *function)
  4909. {
  4910. mutex_unlock(&core->lock);
  4911. }
  4912. bool inst_lock_check(struct msm_vidc_inst *inst, const char *func)
  4913. {
  4914. return mutex_is_locked(&inst->lock);
  4915. }
  4916. void inst_lock(struct msm_vidc_inst *inst, const char *function)
  4917. {
  4918. mutex_lock(&inst->lock);
  4919. }
  4920. void inst_unlock(struct msm_vidc_inst *inst, const char *function)
  4921. {
  4922. mutex_unlock(&inst->lock);
  4923. }
  4924. int msm_vidc_update_bitstream_buffer_size(struct msm_vidc_inst *inst)
  4925. {
  4926. struct msm_vidc_core *core;
  4927. struct v4l2_format *fmt;
  4928. if (!inst || !inst->core) {
  4929. d_vpr_e("%s: invalid params\n", __func__);
  4930. return -EINVAL;
  4931. }
  4932. core = inst->core;
  4933. if (is_decode_session(inst)) {
  4934. fmt = &inst->fmts[INPUT_PORT];
  4935. fmt->fmt.pix_mp.plane_fmt[0].sizeimage = call_session_op(core,
  4936. buffer_size, inst, MSM_VIDC_BUF_INPUT);
  4937. }
  4938. return 0;
  4939. }
  4940. int msm_vidc_update_meta_port_settings(struct msm_vidc_inst *inst)
  4941. {
  4942. struct msm_vidc_core *core;
  4943. struct v4l2_format *fmt;
  4944. if (!inst || !inst->core) {
  4945. d_vpr_e("%s: invalid params\n", __func__);
  4946. return -EINVAL;
  4947. }
  4948. core = inst->core;
  4949. fmt = &inst->fmts[INPUT_META_PORT];
  4950. fmt->fmt.meta.buffersize = call_session_op(core,
  4951. buffer_size, inst, MSM_VIDC_BUF_INPUT_META);
  4952. inst->buffers.input_meta.min_count =
  4953. inst->buffers.input.min_count;
  4954. inst->buffers.input_meta.extra_count =
  4955. inst->buffers.input.extra_count;
  4956. inst->buffers.input_meta.actual_count =
  4957. inst->buffers.input.actual_count;
  4958. inst->buffers.input_meta.size = fmt->fmt.meta.buffersize;
  4959. fmt = &inst->fmts[OUTPUT_META_PORT];
  4960. fmt->fmt.meta.buffersize = call_session_op(core,
  4961. buffer_size, inst, MSM_VIDC_BUF_OUTPUT_META);
  4962. inst->buffers.output_meta.min_count =
  4963. inst->buffers.output.min_count;
  4964. inst->buffers.output_meta.extra_count =
  4965. inst->buffers.output.extra_count;
  4966. inst->buffers.output_meta.actual_count =
  4967. inst->buffers.output.actual_count;
  4968. inst->buffers.output_meta.size = fmt->fmt.meta.buffersize;
  4969. return 0;
  4970. }
  4971. int msm_vidc_update_buffer_count(struct msm_vidc_inst *inst, u32 port)
  4972. {
  4973. struct msm_vidc_core *core;
  4974. if (!inst || !inst->core) {
  4975. d_vpr_e("%s: invalid params\n", __func__);
  4976. return -EINVAL;
  4977. }
  4978. core = inst->core;
  4979. switch (port) {
  4980. case INPUT_PORT:
  4981. inst->buffers.input.min_count = call_session_op(core,
  4982. min_count, inst, MSM_VIDC_BUF_INPUT);
  4983. inst->buffers.input.extra_count = call_session_op(core,
  4984. extra_count, inst, MSM_VIDC_BUF_INPUT);
  4985. if (inst->buffers.input.actual_count <
  4986. inst->buffers.input.min_count +
  4987. inst->buffers.input.extra_count) {
  4988. inst->buffers.input.actual_count =
  4989. inst->buffers.input.min_count +
  4990. inst->buffers.input.extra_count;
  4991. }
  4992. if (is_input_meta_enabled(inst)) {
  4993. inst->buffers.input_meta.min_count =
  4994. inst->buffers.input.min_count;
  4995. inst->buffers.input_meta.extra_count =
  4996. inst->buffers.input.extra_count;
  4997. inst->buffers.input_meta.actual_count =
  4998. inst->buffers.input.actual_count;
  4999. } else {
  5000. inst->buffers.input_meta.min_count = 0;
  5001. inst->buffers.input_meta.extra_count = 0;
  5002. inst->buffers.input_meta.actual_count = 0;
  5003. }
  5004. i_vpr_h(inst, "%s: type: INPUT, count: min %u, extra %u, actual %u\n", __func__,
  5005. inst->buffers.input.min_count,
  5006. inst->buffers.input.extra_count,
  5007. inst->buffers.input.actual_count);
  5008. break;
  5009. case OUTPUT_PORT:
  5010. if (!inst->bufq[INPUT_PORT].vb2q->streaming)
  5011. inst->buffers.output.min_count = call_session_op(core,
  5012. min_count, inst, MSM_VIDC_BUF_OUTPUT);
  5013. inst->buffers.output.extra_count = call_session_op(core,
  5014. extra_count, inst, MSM_VIDC_BUF_OUTPUT);
  5015. if (inst->buffers.output.actual_count <
  5016. inst->buffers.output.min_count +
  5017. inst->buffers.output.extra_count) {
  5018. inst->buffers.output.actual_count =
  5019. inst->buffers.output.min_count +
  5020. inst->buffers.output.extra_count;
  5021. }
  5022. if (is_output_meta_enabled(inst)) {
  5023. inst->buffers.output_meta.min_count =
  5024. inst->buffers.output.min_count;
  5025. inst->buffers.output_meta.extra_count =
  5026. inst->buffers.output.extra_count;
  5027. inst->buffers.output_meta.actual_count =
  5028. inst->buffers.output.actual_count;
  5029. } else {
  5030. inst->buffers.output_meta.min_count = 0;
  5031. inst->buffers.output_meta.extra_count = 0;
  5032. inst->buffers.output_meta.actual_count = 0;
  5033. }
  5034. i_vpr_h(inst, "%s: type: OUTPUT, count: min %u, extra %u, actual %u\n", __func__,
  5035. inst->buffers.output.min_count,
  5036. inst->buffers.output.extra_count,
  5037. inst->buffers.output.actual_count);
  5038. break;
  5039. default:
  5040. d_vpr_e("%s unknown port %d\n", __func__, port);
  5041. return -EINVAL;
  5042. }
  5043. return 0;
  5044. }
  5045. void msm_vidc_schedule_core_deinit(struct msm_vidc_core *core)
  5046. {
  5047. if (!core)
  5048. return;
  5049. if (!core->capabilities[FW_UNLOAD].value)
  5050. return;
  5051. cancel_delayed_work(&core->fw_unload_work);
  5052. schedule_delayed_work(&core->fw_unload_work,
  5053. msecs_to_jiffies(core->capabilities[FW_UNLOAD_DELAY].value));
  5054. d_vpr_h("firmware unload delayed by %u ms\n",
  5055. core->capabilities[FW_UNLOAD_DELAY].value);
  5056. return;
  5057. }
  5058. static const char *get_codec_str(enum msm_vidc_codec_type type)
  5059. {
  5060. switch (type) {
  5061. case MSM_VIDC_H264: return "h264";
  5062. case MSM_VIDC_HEVC: return "h265";
  5063. case MSM_VIDC_VP9: return " vp9";
  5064. case MSM_VIDC_AV1: return " av1";
  5065. case MSM_VIDC_HEIC: return "heic";
  5066. }
  5067. return "....";
  5068. }
  5069. static const char *get_domain_str(enum msm_vidc_domain_type type)
  5070. {
  5071. switch (type) {
  5072. case MSM_VIDC_ENCODER: return "e";
  5073. case MSM_VIDC_DECODER: return "d";
  5074. }
  5075. return ".";
  5076. }
  5077. int msm_vidc_update_debug_str(struct msm_vidc_inst *inst)
  5078. {
  5079. u32 sid;
  5080. const char *codec;
  5081. const char *domain;
  5082. if (!inst) {
  5083. d_vpr_e("%s: Invalid params\n", __func__);
  5084. return -EINVAL;
  5085. }
  5086. sid = inst->session_id;
  5087. codec = get_codec_str(inst->codec);
  5088. domain = get_domain_str(inst->domain);
  5089. snprintf(inst->debug_str, sizeof(inst->debug_str), "%08x: %s%s", sid, codec, domain);
  5090. d_vpr_h("%s: sid: %08x, codec: %s, domain: %s, final: %s\n",
  5091. __func__, sid, codec, domain, inst->debug_str);
  5092. return 0;
  5093. }
  5094. static int msm_vidc_print_insts_info(struct msm_vidc_core *core)
  5095. {
  5096. struct msm_vidc_inst *inst;
  5097. u32 height, width, fps, orate;
  5098. struct msm_vidc_inst_capability *capability;
  5099. struct v4l2_format *out_f;
  5100. struct v4l2_format *inp_f;
  5101. char prop[64];
  5102. d_vpr_e("Print all running instances\n");
  5103. d_vpr_e("%6s | %6s | %5s | %5s | %5s\n", "width", "height", "fps", "orate", "prop");
  5104. core_lock(core, __func__);
  5105. list_for_each_entry(inst, &core->instances, list) {
  5106. out_f = &inst->fmts[OUTPUT_PORT];
  5107. inp_f = &inst->fmts[INPUT_PORT];
  5108. capability = inst->capabilities;
  5109. memset(&prop, 0, sizeof(prop));
  5110. width = max(out_f->fmt.pix_mp.width, inp_f->fmt.pix_mp.width);
  5111. height = max(out_f->fmt.pix_mp.height, inp_f->fmt.pix_mp.height);
  5112. fps = capability->cap[FRAME_RATE].value >> 16;
  5113. orate = capability->cap[OPERATING_RATE].value >> 16;
  5114. if (is_realtime_session(inst))
  5115. strlcat(prop, "RT ", sizeof(prop));
  5116. else
  5117. strlcat(prop, "NRT", sizeof(prop));
  5118. if (is_thumbnail_session(inst))
  5119. strlcat(prop, "+THUMB", sizeof(prop));
  5120. if (is_image_session(inst))
  5121. strlcat(prop, "+IMAGE", sizeof(prop));
  5122. i_vpr_e(inst, "%6u | %6u | %5u | %5u | %5s\n", width, height, fps, orate, prop);
  5123. }
  5124. core_unlock(core, __func__);
  5125. return 0;
  5126. }
  5127. bool msm_vidc_ignore_session_load(struct msm_vidc_inst *inst) {
  5128. if (!inst) {
  5129. d_vpr_e("%s: invalid params\n", __func__);
  5130. return -EINVAL;
  5131. }
  5132. if (!is_realtime_session(inst) || is_thumbnail_session(inst) ||
  5133. is_image_session(inst))
  5134. return true;
  5135. return false;
  5136. }
  5137. int msm_vidc_check_core_mbps(struct msm_vidc_inst *inst)
  5138. {
  5139. u32 mbps = 0, total_mbps = 0, enc_mbps = 0;
  5140. u32 critical_mbps = 0;
  5141. struct msm_vidc_core *core;
  5142. struct msm_vidc_inst *instance;
  5143. if (!inst || !inst->core || !inst->capabilities) {
  5144. d_vpr_e("%s: invalid params\n", __func__);
  5145. return -EINVAL;
  5146. }
  5147. core = inst->core;
  5148. /* skip mbps check for non-realtime, thumnail, image sessions */
  5149. if (msm_vidc_ignore_session_load(inst)) {
  5150. i_vpr_h(inst,
  5151. "%s: skip mbps check due to NRT %d, TH %d, IMG %d\n", __func__,
  5152. !is_realtime_session(inst), is_thumbnail_session(inst),
  5153. is_image_session(inst));
  5154. return 0;
  5155. }
  5156. core_lock(core, __func__);
  5157. list_for_each_entry(instance, &core->instances, list) {
  5158. if (is_critical_priority_session(instance))
  5159. critical_mbps += msm_vidc_get_inst_load(instance);
  5160. }
  5161. core_unlock(core, __func__);
  5162. if (critical_mbps > core->capabilities[MAX_MBPS].value) {
  5163. i_vpr_e(inst, "%s: Hardware overloaded with critical sessions. needed %u, max %u",
  5164. __func__, critical_mbps, core->capabilities[MAX_MBPS].value);
  5165. return -ENOMEM;
  5166. }
  5167. core_lock(core, __func__);
  5168. list_for_each_entry(instance, &core->instances, list) {
  5169. /* ignore invalid/error session */
  5170. if (is_session_error(instance))
  5171. continue;
  5172. /* ignore thumbnail, image, and non realtime sessions */
  5173. if (msm_vidc_ignore_session_load(inst))
  5174. continue;
  5175. mbps = msm_vidc_get_inst_load(instance);
  5176. total_mbps += mbps;
  5177. if (is_encode_session(instance))
  5178. enc_mbps += mbps;
  5179. }
  5180. core_unlock(core, __func__);
  5181. if (is_encode_session(inst)) {
  5182. /* reject encoder if all encoders mbps is greater than MAX_MBPS */
  5183. if (enc_mbps > core->capabilities[MAX_MBPS].value) {
  5184. i_vpr_e(inst, "%s: Hardware overloaded. needed %u, max %u", __func__,
  5185. mbps, core->capabilities[MAX_MBPS].value);
  5186. return -ENOMEM;
  5187. }
  5188. /*
  5189. * if total_mbps is greater than max_mbps then reduce all decoders
  5190. * priority by 1 to allow this encoder
  5191. */
  5192. if (total_mbps > core->capabilities[MAX_MBPS].value) {
  5193. core_lock(core, __func__);
  5194. list_for_each_entry(instance, &core->instances, list) {
  5195. /* reduce realtime decode sessions priority */
  5196. if (is_decode_session(inst) && is_realtime_session(inst)) {
  5197. instance->adjust_priority = RT_DEC_DOWN_PRORITY_OFFSET;
  5198. i_vpr_h(inst, "%s: pending adjust priority by %d\n",
  5199. __func__, inst->adjust_priority);
  5200. }
  5201. }
  5202. core_unlock(core, __func__);
  5203. }
  5204. } else if (is_decode_session(inst)){
  5205. if (total_mbps > core->capabilities[MAX_MBPS].value) {
  5206. inst->adjust_priority = RT_DEC_DOWN_PRORITY_OFFSET;
  5207. i_vpr_h(inst, "%s: pending adjust priority by %d\n",
  5208. __func__, inst->adjust_priority);
  5209. }
  5210. }
  5211. i_vpr_h(inst, "%s: HW load needed %u is within max %u", __func__,
  5212. total_mbps, core->capabilities[MAX_MBPS].value);
  5213. return 0;
  5214. }
  5215. int msm_vidc_check_core_mbpf(struct msm_vidc_inst *inst)
  5216. {
  5217. u32 video_mbpf = 0, image_mbpf = 0, video_rt_mbpf = 0;
  5218. u32 critical_mbpf = 0;
  5219. struct msm_vidc_core *core;
  5220. struct msm_vidc_inst *instance;
  5221. if (!inst || !inst->core) {
  5222. d_vpr_e("%s: invalid params\n", __func__);
  5223. return -EINVAL;
  5224. }
  5225. core = inst->core;
  5226. core_lock(core, __func__);
  5227. list_for_each_entry(instance, &core->instances, list) {
  5228. if (is_critical_priority_session(instance))
  5229. critical_mbpf += msm_vidc_get_mbs_per_frame(instance);
  5230. }
  5231. core_unlock(core, __func__);
  5232. if (critical_mbpf > core->capabilities[MAX_MBPF].value) {
  5233. i_vpr_e(inst, "%s: Hardware overloaded with critical sessions. needed %u, max %u",
  5234. __func__, critical_mbpf, core->capabilities[MAX_MBPF].value);
  5235. return -ENOMEM;
  5236. }
  5237. core_lock(core, __func__);
  5238. list_for_each_entry(instance, &core->instances, list) {
  5239. /* ignore thumbnail session */
  5240. if (is_thumbnail_session(instance))
  5241. continue;
  5242. if (is_image_session(instance))
  5243. image_mbpf += msm_vidc_get_mbs_per_frame(instance);
  5244. else
  5245. video_mbpf += msm_vidc_get_mbs_per_frame(instance);
  5246. }
  5247. core_unlock(core, __func__);
  5248. if (video_mbpf > core->capabilities[MAX_MBPF].value) {
  5249. i_vpr_e(inst, "%s: video overloaded. needed %u, max %u", __func__,
  5250. video_mbpf, core->capabilities[MAX_MBPF].value);
  5251. return -ENOMEM;
  5252. }
  5253. if (image_mbpf > core->capabilities[MAX_IMAGE_MBPF].value) {
  5254. i_vpr_e(inst, "%s: image overloaded. needed %u, max %u", __func__,
  5255. image_mbpf, core->capabilities[MAX_IMAGE_MBPF].value);
  5256. return -ENOMEM;
  5257. }
  5258. core_lock(core, __func__);
  5259. /* check real-time video sessions max limit */
  5260. list_for_each_entry(instance, &core->instances, list) {
  5261. if (msm_vidc_ignore_session_load(inst))
  5262. continue;
  5263. video_rt_mbpf += msm_vidc_get_mbs_per_frame(instance);
  5264. }
  5265. core_unlock(core, __func__);
  5266. if (video_rt_mbpf > core->capabilities[MAX_RT_MBPF].value) {
  5267. i_vpr_e(inst, "%s: real-time video overloaded. needed %u, max %u",
  5268. __func__, video_rt_mbpf, core->capabilities[MAX_RT_MBPF].value);
  5269. return -ENOMEM;
  5270. }
  5271. return 0;
  5272. }
  5273. static int msm_vidc_check_inst_mbpf(struct msm_vidc_inst *inst)
  5274. {
  5275. u32 mbpf = 0, max_mbpf = 0;
  5276. struct msm_vidc_inst_capability *capability;
  5277. if (!inst || !inst->capabilities) {
  5278. d_vpr_e("%s: invalid params\n", __func__);
  5279. return -EINVAL;
  5280. }
  5281. capability = inst->capabilities;
  5282. if (is_secure_session(inst))
  5283. max_mbpf = capability->cap[SECURE_MBPF].max;
  5284. else if (is_encode_session(inst) && capability->cap[LOSSLESS].value)
  5285. max_mbpf = capability->cap[LOSSLESS_MBPF].max;
  5286. else
  5287. max_mbpf = capability->cap[MBPF].max;
  5288. /* check current session mbpf */
  5289. mbpf = msm_vidc_get_mbs_per_frame(inst);
  5290. if (mbpf > max_mbpf) {
  5291. i_vpr_e(inst, "%s: session overloaded. needed %u, max %u", __func__,
  5292. mbpf, max_mbpf);
  5293. return -ENOMEM;
  5294. }
  5295. return 0;
  5296. }
  5297. static bool msm_vidc_allow_image_encode_session(struct msm_vidc_inst *inst)
  5298. {
  5299. struct msm_vidc_inst_capability *capability;
  5300. struct v4l2_format *fmt;
  5301. u32 min_width, min_height, max_width, max_height, pix_fmt, profile;
  5302. bool allow = false;
  5303. if (!inst || !inst->capabilities) {
  5304. d_vpr_e("%s: invalid params\n", __func__);
  5305. return false;
  5306. }
  5307. capability = inst->capabilities;
  5308. if (!is_image_encode_session(inst)) {
  5309. i_vpr_e(inst, "%s: not an image encode session\n", __func__);
  5310. return false;
  5311. }
  5312. pix_fmt = capability->cap[PIX_FMTS].value;
  5313. profile = capability->cap[PROFILE].value;
  5314. /* is input with & height is in allowed range */
  5315. min_width = capability->cap[FRAME_WIDTH].min;
  5316. max_width = capability->cap[FRAME_WIDTH].max;
  5317. min_height = capability->cap[FRAME_HEIGHT].min;
  5318. max_height = capability->cap[FRAME_HEIGHT].max;
  5319. fmt = &inst->fmts[INPUT_PORT];
  5320. if (!in_range(fmt->fmt.pix_mp.width, min_width, max_width) ||
  5321. !in_range(fmt->fmt.pix_mp.height, min_height, max_height)) {
  5322. i_vpr_e(inst, "unsupported wxh [%u x %u], allowed [%u x %u] to [%u x %u]\n",
  5323. fmt->fmt.pix_mp.width, fmt->fmt.pix_mp.height,
  5324. min_width, min_height, max_width, max_height);
  5325. allow = false;
  5326. goto exit;
  5327. }
  5328. /* is linear yuv color fmt */
  5329. allow = is_linear_yuv_colorformat(pix_fmt);
  5330. if (!allow) {
  5331. i_vpr_e(inst, "%s: compressed fmt: %#x\n", __func__, pix_fmt);
  5332. goto exit;
  5333. }
  5334. /* is input grid aligned */
  5335. fmt = &inst->fmts[INPUT_PORT];
  5336. allow = IS_ALIGNED(fmt->fmt.pix_mp.width, HEIC_GRID_DIMENSION);
  5337. allow &= IS_ALIGNED(fmt->fmt.pix_mp.height, HEIC_GRID_DIMENSION);
  5338. if (!allow) {
  5339. i_vpr_e(inst, "%s: input is not grid aligned: %u x %u\n", __func__,
  5340. fmt->fmt.pix_mp.width, fmt->fmt.pix_mp.height);
  5341. goto exit;
  5342. }
  5343. /* is output grid dimension */
  5344. fmt = &inst->fmts[OUTPUT_PORT];
  5345. allow = fmt->fmt.pix_mp.width == HEIC_GRID_DIMENSION;
  5346. allow &= fmt->fmt.pix_mp.height == HEIC_GRID_DIMENSION;
  5347. if (!allow) {
  5348. i_vpr_e(inst, "%s: output is not a grid dimension: %u x %u\n", __func__,
  5349. fmt->fmt.pix_mp.width, fmt->fmt.pix_mp.height);
  5350. goto exit;
  5351. }
  5352. /* is bitrate mode CQ */
  5353. allow = capability->cap[BITRATE_MODE].value == HFI_RC_CQ;
  5354. if (!allow) {
  5355. i_vpr_e(inst, "%s: bitrate mode is not CQ: %#x\n", __func__,
  5356. capability->cap[BITRATE_MODE].value);
  5357. goto exit;
  5358. }
  5359. /* is all intra */
  5360. allow = !capability->cap[GOP_SIZE].value;
  5361. allow &= !capability->cap[B_FRAME].value;
  5362. if (!allow) {
  5363. i_vpr_e(inst, "%s: not all intra: gop: %u, bframe: %u\n", __func__,
  5364. capability->cap[GOP_SIZE].value, capability->cap[B_FRAME].value);
  5365. goto exit;
  5366. }
  5367. /* is time delta based rc disabled */
  5368. allow = !capability->cap[TIME_DELTA_BASED_RC].value;
  5369. if (!allow) {
  5370. i_vpr_e(inst, "%s: time delta based rc not disabled: %#x\n", __func__,
  5371. capability->cap[TIME_DELTA_BASED_RC].value);
  5372. goto exit;
  5373. }
  5374. /* is frame skip mode disabled */
  5375. allow = !capability->cap[FRAME_SKIP_MODE].value;
  5376. if (!allow) {
  5377. i_vpr_e(inst, "%s: frame skip mode not disabled: %#x\n", __func__,
  5378. capability->cap[FRAME_SKIP_MODE].value);
  5379. goto exit;
  5380. }
  5381. exit:
  5382. if (!allow)
  5383. i_vpr_e(inst, "%s: current session not allowed\n", __func__);
  5384. return allow;
  5385. }
  5386. static int msm_vidc_check_resolution_supported(struct msm_vidc_inst *inst)
  5387. {
  5388. struct msm_vidc_inst_capability *capability;
  5389. u32 width = 0, height = 0, min_width, min_height,
  5390. max_width, max_height;
  5391. bool is_interlaced = false;
  5392. if (!inst || !inst->capabilities) {
  5393. d_vpr_e("%s: invalid params\n", __func__);
  5394. return -EINVAL;
  5395. }
  5396. capability = inst->capabilities;
  5397. if (is_decode_session(inst)) {
  5398. width = inst->fmts[INPUT_PORT].fmt.pix_mp.width;
  5399. height = inst->fmts[INPUT_PORT].fmt.pix_mp.height;
  5400. } else if (is_encode_session(inst)) {
  5401. width = inst->crop.width;
  5402. height = inst->crop.height;
  5403. }
  5404. if (is_secure_session(inst)) {
  5405. min_width = capability->cap[SECURE_FRAME_WIDTH].min;
  5406. max_width = capability->cap[SECURE_FRAME_WIDTH].max;
  5407. min_height = capability->cap[SECURE_FRAME_HEIGHT].min;
  5408. max_height = capability->cap[SECURE_FRAME_HEIGHT].max;
  5409. } else if (is_encode_session(inst) && capability->cap[LOSSLESS].value) {
  5410. min_width = capability->cap[LOSSLESS_FRAME_WIDTH].min;
  5411. max_width = capability->cap[LOSSLESS_FRAME_WIDTH].max;
  5412. min_height = capability->cap[LOSSLESS_FRAME_HEIGHT].min;
  5413. max_height = capability->cap[LOSSLESS_FRAME_HEIGHT].max;
  5414. } else {
  5415. min_width = capability->cap[FRAME_WIDTH].min;
  5416. max_width = capability->cap[FRAME_WIDTH].max;
  5417. min_height = capability->cap[FRAME_HEIGHT].min;
  5418. max_height = capability->cap[FRAME_HEIGHT].max;
  5419. }
  5420. /* reject odd resolution session */
  5421. if (is_encode_session(inst) &&
  5422. (is_odd(width) || is_odd(height) ||
  5423. is_odd(inst->compose.width) ||
  5424. is_odd(inst->compose.height))) {
  5425. i_vpr_e(inst, "%s: resolution is not even. wxh [%u x %u], compose [%u x %u]\n",
  5426. __func__, width, height, inst->compose.width,
  5427. inst->compose.height);
  5428. return -EINVAL;
  5429. }
  5430. /* check if input width and height is in supported range */
  5431. if (is_decode_session(inst) || is_encode_session(inst)) {
  5432. if (!in_range(width, min_width, max_width) ||
  5433. !in_range(height, min_height, max_height)) {
  5434. i_vpr_e(inst,
  5435. "%s: unsupported input wxh [%u x %u], allowed range: [%u x %u] to [%u x %u]\n",
  5436. __func__, width, height, min_width,
  5437. min_height, max_width, max_height);
  5438. return -EINVAL;
  5439. }
  5440. }
  5441. /* check interlace supported resolution */
  5442. is_interlaced = capability->cap[CODED_FRAMES].value == CODED_FRAMES_INTERLACE;
  5443. if (is_interlaced && (width > INTERLACE_WIDTH_MAX || height > INTERLACE_HEIGHT_MAX ||
  5444. NUM_MBS_PER_FRAME(width, height) > INTERLACE_MB_PER_FRAME_MAX)) {
  5445. i_vpr_e(inst, "%s: unsupported interlace wxh [%u x %u], max [%u x %u]\n",
  5446. __func__, width, height, INTERLACE_WIDTH_MAX, INTERLACE_HEIGHT_MAX);
  5447. return -EINVAL;
  5448. }
  5449. return 0;
  5450. }
  5451. static int msm_vidc_check_max_sessions(struct msm_vidc_inst *inst)
  5452. {
  5453. u32 width = 0, height = 0;
  5454. u32 num_1080p_sessions = 0, num_4k_sessions = 0, num_8k_sessions = 0;
  5455. struct msm_vidc_inst *i;
  5456. struct msm_vidc_core *core;
  5457. if (!inst || !inst->core) {
  5458. d_vpr_e("%s: invalid params\n", __func__);
  5459. return -EINVAL;
  5460. }
  5461. core = inst->core;
  5462. if (!core->capabilities) {
  5463. i_vpr_e(inst, "%s: invalid params\n", __func__);
  5464. return -EINVAL;
  5465. }
  5466. core_lock(core, __func__);
  5467. list_for_each_entry(i, &core->instances, list) {
  5468. /* skip image sessions count */
  5469. if (is_image_session(inst))
  5470. continue;
  5471. if (is_decode_session(i)) {
  5472. width = i->fmts[INPUT_PORT].fmt.pix_mp.width;
  5473. height = i->fmts[INPUT_PORT].fmt.pix_mp.height;
  5474. } else if (is_encode_session(i)) {
  5475. width = i->crop.width;
  5476. height = i->crop.height;
  5477. }
  5478. /*
  5479. * one 8k session equals to 64 720p sessions in reality.
  5480. * So for one 8k session the number of 720p sessions will
  5481. * exceed max supported session count(16), hence one 8k session
  5482. * will be rejected as well.
  5483. * Therefore, treat one 8k session equal to two 4k sessions and
  5484. * one 4k session equal to two 1080p sessions and
  5485. * one 1080p session equal to two 720p sessions. This equation
  5486. * will make one 8k session equal to eight 720p sessions
  5487. * which looks good.
  5488. *
  5489. * Do not treat resolutions above 4k as 8k session instead
  5490. * treat (4K + half 4k) above as 8k session
  5491. */
  5492. if (res_is_greater_than(width, height, 4096 + (4096 >> 1), 2176 + (2176 >> 1))) {
  5493. num_8k_sessions += 1;
  5494. num_4k_sessions += 2;
  5495. num_1080p_sessions += 4;
  5496. } else if (res_is_greater_than(width, height, 1920 + (1920 >> 1), 1088 + (1088 >> 1))) {
  5497. num_4k_sessions += 1;
  5498. num_1080p_sessions += 2;
  5499. } else if (res_is_greater_than(width, height, 1280 + (1280 >> 1), 736 + (736 >> 1))) {
  5500. num_1080p_sessions += 1;
  5501. }
  5502. }
  5503. core_unlock(core, __func__);
  5504. if (num_8k_sessions > core->capabilities[MAX_NUM_8K_SESSIONS].value) {
  5505. i_vpr_e(inst, "%s: total 8k sessions %d, exceeded max limit %d\n",
  5506. __func__, num_8k_sessions,
  5507. core->capabilities[MAX_NUM_8K_SESSIONS].value);
  5508. return -ENOMEM;
  5509. }
  5510. if (num_4k_sessions > core->capabilities[MAX_NUM_4K_SESSIONS].value) {
  5511. i_vpr_e(inst, "%s: total 4K sessions %d, exceeded max limit %d\n",
  5512. __func__, num_4k_sessions,
  5513. core->capabilities[MAX_NUM_4K_SESSIONS].value);
  5514. return -ENOMEM;
  5515. }
  5516. if (num_1080p_sessions > core->capabilities[MAX_NUM_1080P_SESSIONS].value) {
  5517. i_vpr_e(inst, "%s: total 1080p sessions %d, exceeded max limit %d\n",
  5518. __func__, num_1080p_sessions,
  5519. core->capabilities[MAX_NUM_1080P_SESSIONS].value);
  5520. return -ENOMEM;
  5521. }
  5522. return 0;
  5523. }
  5524. int msm_vidc_check_session_supported(struct msm_vidc_inst *inst)
  5525. {
  5526. bool allow = false;
  5527. int rc = 0;
  5528. if (!inst) {
  5529. d_vpr_e("%s: invalid params\n", __func__);
  5530. return -EINVAL;
  5531. }
  5532. if (is_image_session(inst) && is_secure_session(inst)) {
  5533. i_vpr_e(inst, "%s: secure image session not supported\n", __func__);
  5534. rc = -EINVAL;
  5535. goto exit;
  5536. }
  5537. rc = msm_vidc_check_core_mbps(inst);
  5538. if (rc)
  5539. goto exit;
  5540. rc = msm_vidc_check_core_mbpf(inst);
  5541. if (rc)
  5542. goto exit;
  5543. rc = msm_vidc_check_inst_mbpf(inst);
  5544. if (rc)
  5545. goto exit;
  5546. rc = msm_vidc_check_resolution_supported(inst);
  5547. if (rc)
  5548. goto exit;
  5549. /* check image capabilities */
  5550. if (is_image_encode_session(inst)) {
  5551. allow = msm_vidc_allow_image_encode_session(inst);
  5552. if (!allow) {
  5553. rc = -EINVAL;
  5554. goto exit;
  5555. }
  5556. }
  5557. rc = msm_vidc_check_max_sessions(inst);
  5558. if (rc)
  5559. goto exit;
  5560. exit:
  5561. if (rc) {
  5562. i_vpr_e(inst, "%s: current session not supported\n", __func__);
  5563. msm_vidc_print_insts_info(inst->core);
  5564. }
  5565. return rc;
  5566. }
  5567. int msm_vidc_check_scaling_supported(struct msm_vidc_inst *inst)
  5568. {
  5569. u32 iwidth, owidth, iheight, oheight, ds_factor;
  5570. if (!inst || !inst->capabilities) {
  5571. d_vpr_e("%s: invalid params\n", __func__);
  5572. return -EINVAL;
  5573. }
  5574. if (is_image_session(inst) || is_decode_session(inst)) {
  5575. i_vpr_h(inst, "%s: Scaling is supported for encode session only\n", __func__);
  5576. return 0;
  5577. }
  5578. if (!is_scaling_enabled(inst)) {
  5579. i_vpr_h(inst, "%s: Scaling not enabled. skip scaling check\n", __func__);
  5580. return 0;
  5581. }
  5582. iwidth = inst->crop.width;
  5583. iheight = inst->crop.height;
  5584. owidth = inst->compose.width;
  5585. oheight = inst->compose.height;
  5586. ds_factor = inst->capabilities->cap[SCALE_FACTOR].value;
  5587. /* upscaling: encoder doesnot support upscaling */
  5588. if (owidth > iwidth || oheight > iheight) {
  5589. i_vpr_e(inst, "%s: upscale not supported: input [%u x %u], output [%u x %u]\n",
  5590. __func__, iwidth, iheight, owidth, oheight);
  5591. return -EINVAL;
  5592. }
  5593. /* downscaling: only supported upto 1/8 of width & 1/8 of height */
  5594. if (iwidth > owidth * ds_factor || iheight > oheight * ds_factor) {
  5595. i_vpr_e(inst,
  5596. "%s: unsupported ratio: input [%u x %u], output [%u x %u], ratio %u\n",
  5597. __func__, iwidth, iheight, owidth, oheight, ds_factor);
  5598. return -EINVAL;
  5599. }
  5600. return 0;
  5601. }
  5602. struct msm_vidc_fw_query_params {
  5603. u32 hfi_prop_name;
  5604. u32 port;
  5605. };
  5606. int msm_vidc_get_properties(struct msm_vidc_inst *inst)
  5607. {
  5608. int rc = 0;
  5609. int i;
  5610. static const struct msm_vidc_fw_query_params fw_query_params[] = {
  5611. {HFI_PROP_STAGE, HFI_PORT_NONE},
  5612. {HFI_PROP_PIPE, HFI_PORT_NONE},
  5613. {HFI_PROP_QUALITY_MODE, HFI_PORT_BITSTREAM}
  5614. };
  5615. if (!inst || !inst->capabilities) {
  5616. d_vpr_e("%s: invalid params\n", __func__);
  5617. return -EINVAL;
  5618. }
  5619. for (i = 0; i < ARRAY_SIZE(fw_query_params); i++) {
  5620. if (is_decode_session(inst)) {
  5621. if (fw_query_params[i].hfi_prop_name == HFI_PROP_QUALITY_MODE)
  5622. continue;
  5623. }
  5624. i_vpr_l(inst, "%s: querying fw for property %#x\n", __func__,
  5625. fw_query_params[i].hfi_prop_name);
  5626. rc = venus_hfi_session_property(inst,
  5627. fw_query_params[i].hfi_prop_name,
  5628. (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
  5629. HFI_HOST_FLAGS_INTR_REQUIRED |
  5630. HFI_HOST_FLAGS_GET_PROPERTY),
  5631. fw_query_params[i].port,
  5632. HFI_PAYLOAD_NONE,
  5633. NULL,
  5634. 0);
  5635. if (rc)
  5636. return rc;
  5637. }
  5638. return 0;
  5639. }
  5640. int msm_vidc_create_input_metadata_buffer(struct msm_vidc_inst *inst, int fd)
  5641. {
  5642. int rc = 0;
  5643. struct msm_vidc_buffer *buf = NULL;
  5644. struct msm_vidc_buffers *buffers;
  5645. struct dma_buf *dma_buf;
  5646. if (!inst) {
  5647. d_vpr_e("%s: invalid params\n", __func__);
  5648. return -EINVAL;
  5649. }
  5650. if (fd < 0) {
  5651. i_vpr_e(inst, "%s: invalid input metadata buffer fd %d\n",
  5652. __func__, fd);
  5653. return -EINVAL;
  5654. }
  5655. buffers = msm_vidc_get_buffers(inst, MSM_VIDC_BUF_INPUT_META, __func__);
  5656. if (!buffers)
  5657. return -EINVAL;
  5658. buf = msm_memory_pool_alloc(inst, MSM_MEM_POOL_BUFFER);
  5659. if (!buf) {
  5660. i_vpr_e(inst, "%s: buffer pool alloc failed\n", __func__);
  5661. return -EINVAL;
  5662. }
  5663. INIT_LIST_HEAD(&buf->list);
  5664. buf->type = MSM_VIDC_BUF_INPUT_META;
  5665. buf->index = INT_MAX;
  5666. buf->fd = fd;
  5667. dma_buf = msm_vidc_memory_get_dmabuf(inst, fd);
  5668. if (!dma_buf) {
  5669. rc = -ENOMEM;
  5670. goto error_dma_buf;
  5671. }
  5672. buf->dmabuf = dma_buf;
  5673. buf->data_size = dma_buf->size;
  5674. buf->buffer_size = dma_buf->size;
  5675. buf->attr |= MSM_VIDC_ATTR_DEFERRED;
  5676. rc = msm_vidc_map_driver_buf(inst, buf);
  5677. if (rc)
  5678. goto error_map;
  5679. list_add_tail(&buf->list, &buffers->list);
  5680. return rc;
  5681. error_map:
  5682. msm_vidc_memory_put_dmabuf(inst, buf->dmabuf);
  5683. error_dma_buf:
  5684. msm_memory_pool_free(inst, buf);
  5685. return rc;
  5686. }
  5687. int msm_vidc_update_input_meta_buffer_index(struct msm_vidc_inst *inst,
  5688. struct vb2_buffer *vb2)
  5689. {
  5690. int rc = 0;
  5691. bool found = false;
  5692. struct msm_vidc_buffer *buf = NULL;
  5693. struct msm_vidc_buffers *buffers;
  5694. if (!inst || !vb2) {
  5695. d_vpr_e("%s: invalid params\n", __func__);
  5696. return -EINVAL;
  5697. }
  5698. if (vb2->type != INPUT_MPLANE)
  5699. return 0;
  5700. buffers = msm_vidc_get_buffers(inst, MSM_VIDC_BUF_INPUT_META, __func__);
  5701. if (!buffers)
  5702. return -EINVAL;
  5703. list_for_each_entry(buf, &buffers->list, list) {
  5704. if (buf->index == INT_MAX) {
  5705. buf->index = vb2->index;
  5706. found = true;
  5707. break;
  5708. }
  5709. }
  5710. if (!found) {
  5711. i_vpr_e(inst, "%s: missing input metabuffer for index %d\n",
  5712. __func__, vb2->index);
  5713. rc = -EINVAL;
  5714. }
  5715. return rc;
  5716. }