msm_vidc_driver.c 149 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/iommu.h>
  6. #include <linux/workqueue.h>
  7. #include <media/v4l2_vidc_extensions.h>
  8. #include "msm_media_info.h"
  9. #include "msm_vidc_driver.h"
  10. #include "msm_vidc_platform.h"
  11. #include "msm_vidc_internal.h"
  12. #include "msm_vidc_control.h"
  13. #include "msm_vidc_memory.h"
  14. #include "msm_vidc_power.h"
  15. #include "msm_vidc_debug.h"
  16. #include "msm_vidc_power.h"
  17. #include "msm_vidc.h"
  18. #include "msm_vdec.h"
  19. #include "msm_venc.h"
  20. #include "venus_hfi.h"
  21. #include "venus_hfi_response.h"
  22. #include "hfi_packet.h"
  23. #include "msm_vidc_events.h"
  24. extern struct msm_vidc_core *g_core;
  25. #define is_odd(val) ((val) % 2 == 1)
  26. #define in_range(val, min, max) (((min) <= (val)) && ((val) <= (max)))
  27. #define COUNT_BITS(a, out) { \
  28. while ((a) >= 1) { \
  29. (out) += (a) & (1); \
  30. (a) >>= (1); \
  31. } \
  32. }
  33. #define SSR_TYPE 0x0000000F
  34. #define SSR_TYPE_SHIFT 0
  35. #define SSR_SUB_CLIENT_ID 0x000000F0
  36. #define SSR_SUB_CLIENT_ID_SHIFT 4
  37. #define SSR_ADDR_ID 0xFFFFFFFF00000000
  38. #define SSR_ADDR_SHIFT 32
  39. struct msm_vidc_cap_name {
  40. enum msm_vidc_inst_capability_type cap;
  41. char *name;
  42. };
  43. static const struct msm_vidc_cap_name cap_name_arr[] = {
  44. {INST_CAP_NONE, "INST_CAP_NONE" },
  45. {FRAME_WIDTH, "FRAME_WIDTH" },
  46. {LOSSLESS_FRAME_WIDTH, "LOSSLESS_FRAME_WIDTH" },
  47. {SECURE_FRAME_WIDTH, "SECURE_FRAME_WIDTH" },
  48. {FRAME_HEIGHT, "FRAME_HEIGHT" },
  49. {LOSSLESS_FRAME_HEIGHT, "LOSSLESS_FRAME_HEIGHT" },
  50. {SECURE_FRAME_HEIGHT, "SECURE_FRAME_HEIGHT" },
  51. {PIX_FMTS, "PIX_FMTS" },
  52. {MIN_BUFFERS_INPUT, "MIN_BUFFERS_INPUT" },
  53. {MIN_BUFFERS_OUTPUT, "MIN_BUFFERS_OUTPUT" },
  54. {MBPF, "MBPF" },
  55. {LOSSLESS_MBPF, "LOSSLESS_MBPF" },
  56. {BATCH_MBPF, "BATCH_MBPF" },
  57. {BATCH_FPS, "BATCH_FPS" },
  58. {SECURE_MBPF, "SECURE_MBPF" },
  59. {MBPS, "MBPS" },
  60. {POWER_SAVE_MBPS, "POWER_SAVE_MBPS" },
  61. {FRAME_RATE, "FRAME_RATE" },
  62. {OPERATING_RATE, "OPERATING_RATE" },
  63. {SCALE_FACTOR, "SCALE_FACTOR" },
  64. {MB_CYCLES_VSP, "MB_CYCLES_VSP" },
  65. {MB_CYCLES_VPP, "MB_CYCLES_VPP" },
  66. {MB_CYCLES_LP, "MB_CYCLES_LP" },
  67. {MB_CYCLES_FW, "MB_CYCLES_FW" },
  68. {MB_CYCLES_FW_VPP, "MB_CYCLES_FW_VPP" },
  69. {SECURE_MODE, "SECURE_MODE" },
  70. {HFLIP, "HFLIP" },
  71. {VFLIP, "VFLIP" },
  72. {ROTATION, "ROTATION" },
  73. {SUPER_FRAME, "SUPER_FRAME" },
  74. {SLICE_INTERFACE, "SLICE_INTERFACE" },
  75. {HEADER_MODE, "HEADER_MODE" },
  76. {PREPEND_SPSPPS_TO_IDR, "PREPEND_SPSPPS_TO_IDR" },
  77. {META_SEQ_HDR_NAL, "META_SEQ_HDR_NAL" },
  78. {WITHOUT_STARTCODE, "WITHOUT_STARTCODE" },
  79. {NAL_LENGTH_FIELD, "NAL_LENGTH_FIELD" },
  80. {REQUEST_I_FRAME, "REQUEST_I_FRAME" },
  81. {BIT_RATE, "BIT_RATE" },
  82. {BITRATE_MODE, "BITRATE_MODE" },
  83. {LOSSLESS, "LOSSLESS" },
  84. {FRAME_SKIP_MODE, "FRAME_SKIP_MODE" },
  85. {FRAME_RC_ENABLE, "FRAME_RC_ENABLE" },
  86. {CONSTANT_QUALITY, "CONSTANT_QUALITY" },
  87. {GOP_SIZE, "GOP_SIZE" },
  88. {GOP_CLOSURE, "GOP_CLOSURE" },
  89. {B_FRAME, "B_FRAME" },
  90. {BLUR_TYPES, "BLUR_TYPES" },
  91. {BLUR_RESOLUTION, "BLUR_RESOLUTION" },
  92. {CSC, "CSC" },
  93. {CSC_CUSTOM_MATRIX, "CSC_CUSTOM_MATRIX" },
  94. {GRID, "GRID" },
  95. {LOWLATENCY_MODE, "LOWLATENCY_MODE" },
  96. {LTR_COUNT, "LTR_COUNT" },
  97. {USE_LTR, "USE_LTR" },
  98. {MARK_LTR, "MARK_LTR" },
  99. {BASELAYER_PRIORITY, "BASELAYER_PRIORITY" },
  100. {IR_RANDOM, "IR_RANDOM" },
  101. {AU_DELIMITER, "AU_DELIMITER" },
  102. {TIME_DELTA_BASED_RC, "TIME_DELTA_BASED_RC" },
  103. {CONTENT_ADAPTIVE_CODING, "CONTENT_ADAPTIVE_CODING" },
  104. {BITRATE_BOOST, "BITRATE_BOOST" },
  105. {MIN_QUALITY, "MIN_QUALITY" },
  106. {VBV_DELAY, "VBV_DELAY" },
  107. {PEAK_BITRATE, "PEAK_BITRATE" },
  108. {MIN_FRAME_QP, "MIN_FRAME_QP" },
  109. {I_FRAME_MIN_QP, "I_FRAME_MIN_QP" },
  110. {P_FRAME_MIN_QP, "P_FRAME_MIN_QP" },
  111. {B_FRAME_MIN_QP, "B_FRAME_MIN_QP" },
  112. {MAX_FRAME_QP, "MAX_FRAME_QP" },
  113. {I_FRAME_MAX_QP, "I_FRAME_MAX_QP" },
  114. {P_FRAME_MAX_QP, "P_FRAME_MAX_QP" },
  115. {B_FRAME_MAX_QP, "B_FRAME_MAX_QP" },
  116. {I_FRAME_QP, "I_FRAME_QP" },
  117. {P_FRAME_QP, "P_FRAME_QP" },
  118. {B_FRAME_QP, "B_FRAME_QP" },
  119. {LAYER_TYPE, "LAYER_TYPE" },
  120. {LAYER_ENABLE, "LAYER_ENABLE" },
  121. {ENH_LAYER_COUNT, "ENH_LAYER_COUNT" },
  122. {L0_BR, "L0_BR" },
  123. {L1_BR, "L1_BR" },
  124. {L2_BR, "L2_BR" },
  125. {L3_BR, "L3_BR" },
  126. {L4_BR, "L4_BR" },
  127. {L5_BR, "L5_BR" },
  128. {ENTROPY_MODE, "ENTROPY_MODE" },
  129. {PROFILE, "PROFILE" },
  130. {LEVEL, "LEVEL" },
  131. {HEVC_TIER, "HEVC_TIER" },
  132. {AV1_TIER, "AV1_TIER" },
  133. {LF_MODE, "LF_MODE" },
  134. {LF_ALPHA, "LF_ALPHA" },
  135. {LF_BETA, "LF_BETA" },
  136. {SLICE_MODE, "SLICE_MODE" },
  137. {SLICE_MAX_BYTES, "SLICE_MAX_BYTES" },
  138. {SLICE_MAX_MB, "SLICE_MAX_MB" },
  139. {MB_RC, "MB_RC" },
  140. {TRANSFORM_8X8, "TRANSFORM_8X8" },
  141. {CHROMA_QP_INDEX_OFFSET, "CHROMA_QP_INDEX_OFFSET" },
  142. {DISPLAY_DELAY_ENABLE, "DISPLAY_DELAY_ENABLE" },
  143. {DISPLAY_DELAY, "DISPLAY_DELAY" },
  144. {CONCEAL_COLOR_8BIT, "CONCEAL_COLOR_8BIT" },
  145. {CONCEAL_COLOR_10BIT, "CONCEAL_COLOR_10BIT" },
  146. {STAGE, "STAGE" },
  147. {PIPE, "PIPE" },
  148. {POC, "POC" },
  149. {QUALITY_MODE, "QUALITY_MODE" },
  150. {CODED_FRAMES, "CODED_FRAMES" },
  151. {BIT_DEPTH, "BIT_DEPTH" },
  152. {FILM_GRAIN, "FILM_GRAIN" },
  153. {SUPER_BLOCK, "SUPER_BLOCK" },
  154. {CODEC_CONFIG, "CODEC_CONFIG" },
  155. {BITSTREAM_SIZE_OVERWRITE, "BITSTREAM_SIZE_OVERWRITE" },
  156. {THUMBNAIL_MODE, "THUMBNAIL_MODE" },
  157. {DEFAULT_HEADER, "DEFAULT_HEADER" },
  158. {RAP_FRAME, "RAP_FRAME" },
  159. {SEQ_CHANGE_AT_SYNC_FRAME, "SEQ_CHANGE_AT_SYNC_FRAME" },
  160. {PRIORITY, "PRIORITY" },
  161. {ENC_IP_CR, "ENC_IP_CR" },
  162. {DPB_LIST, "DPB_LIST" },
  163. {ALL_INTRA, "ALL_INTRA" },
  164. {META_LTR_MARK_USE, "META_LTR_MARK_USE" },
  165. {META_DPB_MISR, "META_DPB_MISR" },
  166. {META_OPB_MISR, "META_OPB_MISR" },
  167. {META_INTERLACE, "META_INTERLACE" },
  168. {META_TIMESTAMP, "META_TIMESTAMP" },
  169. {META_CONCEALED_MB_CNT, "META_CONCEALED_MB_CNT" },
  170. {META_HIST_INFO, "META_HIST_INFO" },
  171. {META_SEI_MASTERING_DISP, "META_SEI_MASTERING_DISP" },
  172. {META_SEI_CLL, "META_SEI_CLL" },
  173. {META_HDR10PLUS, "META_HDR10PLUS" },
  174. {META_EVA_STATS, "META_EVA_STATS" },
  175. {META_BUF_TAG, "META_BUF_TAG" },
  176. {META_DPB_TAG_LIST, "META_DPB_TAG_LIST" },
  177. {META_OUTPUT_BUF_TAG, "META_OUTPUT_BUF_TAG" },
  178. {META_SUBFRAME_OUTPUT, "META_SUBFRAME_OUTPUT" },
  179. {META_ENC_QP_METADATA, "META_ENC_QP_METADATA" },
  180. {META_ROI_INFO, "META_ROI_INFO" },
  181. {META_DEC_QP_METADATA, "META_DEC_QP_METADATA" },
  182. {COMPLEXITY, "COMPLEXITY" },
  183. {META_MAX_NUM_REORDER_FRAMES, "META_MAX_NUM_REORDER_FRAMES"},
  184. {INST_CAP_MAX, "INST_CAP_MAX" },
  185. };
  186. const char *cap_name(enum msm_vidc_inst_capability_type cap)
  187. {
  188. const char *name = "UNKNOWN CAP";
  189. if (cap > ARRAY_SIZE(cap_name_arr))
  190. goto exit;
  191. if (cap_name_arr[cap].cap != cap)
  192. goto exit;
  193. name = cap_name_arr[cap].name;
  194. exit:
  195. return name;
  196. }
  197. struct msm_vidc_buf_type_name {
  198. enum msm_vidc_buffer_type type;
  199. char *name;
  200. };
  201. static const struct msm_vidc_buf_type_name buf_type_name_arr[] = {
  202. {MSM_VIDC_BUF_INPUT, "INPUT" },
  203. {MSM_VIDC_BUF_OUTPUT, "OUTPUT" },
  204. {MSM_VIDC_BUF_INPUT_META, "INPUT_META" },
  205. {MSM_VIDC_BUF_OUTPUT_META, "OUTPUT_META" },
  206. {MSM_VIDC_BUF_READ_ONLY, "READ_ONLY" },
  207. {MSM_VIDC_BUF_QUEUE, "QUEUE" },
  208. {MSM_VIDC_BUF_BIN, "BIN" },
  209. {MSM_VIDC_BUF_ARP, "ARP" },
  210. {MSM_VIDC_BUF_COMV, "COMV" },
  211. {MSM_VIDC_BUF_NON_COMV, "NON_COMV" },
  212. {MSM_VIDC_BUF_LINE, "LINE" },
  213. {MSM_VIDC_BUF_DPB, "DPB" },
  214. {MSM_VIDC_BUF_PERSIST, "PERSIST" },
  215. {MSM_VIDC_BUF_VPSS, "VPSS" },
  216. };
  217. const char *buf_name(enum msm_vidc_buffer_type type)
  218. {
  219. const char *name = "UNKNOWN BUF";
  220. if (!type || type > ARRAY_SIZE(buf_type_name_arr))
  221. goto exit;
  222. if (buf_type_name_arr[type - 1].type != type)
  223. goto exit;
  224. name = buf_type_name_arr[type - 1].name;
  225. exit:
  226. return name;
  227. }
  228. struct msm_vidc_allow_name {
  229. enum msm_vidc_allow allow;
  230. char *name;
  231. };
  232. static const struct msm_vidc_allow_name inst_allow_name_arr[] = {
  233. {MSM_VIDC_DISALLOW, "MSM_VIDC_DISALLOW" },
  234. {MSM_VIDC_ALLOW, "MSM_VIDC_ALLOW" },
  235. {MSM_VIDC_DEFER, "MSM_VIDC_DEFER" },
  236. {MSM_VIDC_DISCARD, "MSM_VIDC_DISCARD" },
  237. {MSM_VIDC_IGNORE, "MSM_VIDC_IGNORE" },
  238. };
  239. const char *allow_name(enum msm_vidc_allow allow)
  240. {
  241. const char *name = "UNKNOWN";
  242. if (allow > ARRAY_SIZE(inst_allow_name_arr))
  243. goto exit;
  244. if (inst_allow_name_arr[allow].allow != allow)
  245. goto exit;
  246. name = inst_allow_name_arr[allow].name;
  247. exit:
  248. return name;
  249. }
  250. struct msm_vidc_inst_state_name {
  251. enum msm_vidc_inst_state state;
  252. char *name;
  253. };
  254. static const struct msm_vidc_inst_state_name inst_state_name_arr[] = {
  255. {MSM_VIDC_OPEN, "OPEN" },
  256. {MSM_VIDC_START_INPUT, "START_INPUT" },
  257. {MSM_VIDC_START_OUTPUT, "START_OUTPUT" },
  258. {MSM_VIDC_START, "START" },
  259. {MSM_VIDC_DRC, "DRC" },
  260. {MSM_VIDC_DRC_LAST_FLAG, "DRC_LAST_FLAG" },
  261. {MSM_VIDC_DRAIN, "DRAIN" },
  262. {MSM_VIDC_DRAIN_LAST_FLAG, "DRAIN_LAST_FLAG" },
  263. {MSM_VIDC_DRC_DRAIN, "DRC_DRAIN" },
  264. {MSM_VIDC_DRC_DRAIN_LAST_FLAG, "DRC_DRAIN_LAST_FLAG" },
  265. {MSM_VIDC_DRAIN_START_INPUT, "DRAIN_START_INPUT" },
  266. {MSM_VIDC_ERROR, "ERROR" },
  267. };
  268. const char *state_name(enum msm_vidc_inst_state state)
  269. {
  270. const char *name = "UNKNOWN STATE";
  271. if (!state || state > ARRAY_SIZE(inst_state_name_arr))
  272. goto exit;
  273. if (inst_state_name_arr[state - 1].state != state)
  274. goto exit;
  275. name = inst_state_name_arr[state - 1].name;
  276. exit:
  277. return name;
  278. }
  279. struct msm_vidc_core_state_name {
  280. enum msm_vidc_core_state state;
  281. char *name;
  282. };
  283. static const struct msm_vidc_core_state_name core_state_name_arr[] = {
  284. {MSM_VIDC_CORE_DEINIT, "CORE_DEINIT" },
  285. {MSM_VIDC_CORE_INIT_WAIT, "CORE_INIT_WAIT" },
  286. {MSM_VIDC_CORE_INIT, "CORE_INIT" },
  287. };
  288. const char *core_state_name(enum msm_vidc_core_state state)
  289. {
  290. const char *name = "UNKNOWN STATE";
  291. if (state >= ARRAY_SIZE(core_state_name_arr))
  292. goto exit;
  293. if (core_state_name_arr[state].state != state)
  294. goto exit;
  295. name = core_state_name_arr[state].name;
  296. exit:
  297. return name;
  298. }
  299. const char *v4l2_type_name(u32 port)
  300. {
  301. switch (port) {
  302. case INPUT_MPLANE: return "INPUT";
  303. case OUTPUT_MPLANE: return "OUTPUT";
  304. case INPUT_META_PLANE: return "INPUT_META";
  305. case OUTPUT_META_PLANE: return "OUTPUT_META";
  306. }
  307. return "UNKNOWN";
  308. }
  309. const char *v4l2_pixelfmt_name(u32 pixfmt)
  310. {
  311. switch (pixfmt) {
  312. /* raw port: color format */
  313. case V4L2_PIX_FMT_NV12: return "NV12";
  314. case V4L2_PIX_FMT_NV21: return "NV21";
  315. case V4L2_PIX_FMT_VIDC_NV12C: return "NV12C";
  316. case V4L2_PIX_FMT_VIDC_P010: return "P010";
  317. case V4L2_PIX_FMT_VIDC_TP10C: return "TP10C";
  318. case V4L2_PIX_FMT_RGBA32: return "RGBA";
  319. case V4L2_PIX_FMT_VIDC_ARGB32C: return "RGBAC";
  320. /* bitstream port: codec type */
  321. case V4L2_PIX_FMT_H264: return "AVC";
  322. case V4L2_PIX_FMT_HEVC: return "HEVC";
  323. case V4L2_PIX_FMT_HEIC: return "HEIC";
  324. case V4L2_PIX_FMT_VP9: return "VP9";
  325. case V4L2_PIX_FMT_AV1: return "AV1";
  326. /* meta port */
  327. case V4L2_META_FMT_VIDC: return "META";
  328. }
  329. return "UNKNOWN";
  330. }
  331. void print_vidc_buffer(u32 tag, const char *tag_str, const char *str, struct msm_vidc_inst *inst,
  332. struct msm_vidc_buffer *vbuf)
  333. {
  334. struct dma_buf *dbuf;
  335. struct inode *f_inode;
  336. unsigned long inode_num = 0;
  337. long ref_count = -1;
  338. if (!inst || !vbuf || !tag_str || !str)
  339. return;
  340. dbuf = (struct dma_buf *)vbuf->dmabuf;
  341. if (dbuf && dbuf->file) {
  342. f_inode = file_inode(dbuf->file);
  343. if (f_inode) {
  344. inode_num = f_inode->i_ino;
  345. ref_count = file_count(dbuf->file);
  346. }
  347. }
  348. dprintk_inst(tag, tag_str, inst,
  349. "%s: %s: idx %2d fd %3d off %d daddr %#llx inode %8lu ref %2ld size %8d filled %8d flags %#x ts %8lld attr %#x counts(etb ebd ftb fbd) %4llu %4llu %4llu %4llu\n",
  350. str, buf_name(vbuf->type),
  351. vbuf->index, vbuf->fd, vbuf->data_offset,
  352. vbuf->device_addr, inode_num, ref_count, vbuf->buffer_size, vbuf->data_size,
  353. vbuf->flags, vbuf->timestamp, vbuf->attr, inst->debug_count.etb,
  354. inst->debug_count.ebd, inst->debug_count.ftb, inst->debug_count.fbd);
  355. trace_msm_v4l2_vidc_buffer_event_log(inst, str, buf_name(vbuf->type), vbuf,
  356. inode_num, ref_count);
  357. }
  358. void print_vb2_buffer(const char *str, struct msm_vidc_inst *inst,
  359. struct vb2_buffer *vb2)
  360. {
  361. if (!inst || !vb2)
  362. return;
  363. if (vb2->type == INPUT_MPLANE || vb2->type == OUTPUT_MPLANE) {
  364. i_vpr_e(inst,
  365. "%s: %s: idx %2d fd %d off %d size %d filled %d\n",
  366. str, vb2->type == INPUT_MPLANE ? "INPUT" : "OUTPUT",
  367. vb2->index, vb2->planes[0].m.fd,
  368. vb2->planes[0].data_offset, vb2->planes[0].length,
  369. vb2->planes[0].bytesused);
  370. } else if (vb2->type == INPUT_META_PLANE || vb2->type == OUTPUT_META_PLANE) {
  371. i_vpr_e(inst,
  372. "%s: %s: idx %2d fd %d off %d size %d filled %d\n",
  373. str, vb2->type == INPUT_MPLANE ? "INPUT_META" : "OUTPUT_META",
  374. vb2->index, vb2->planes[0].m.fd,
  375. vb2->planes[0].data_offset, vb2->planes[0].length,
  376. vb2->planes[0].bytesused);
  377. }
  378. }
  379. static void __fatal_error(bool fatal)
  380. {
  381. WARN_ON(fatal);
  382. }
  383. static int __strict_check(struct msm_vidc_core *core, const char *function)
  384. {
  385. bool fatal = !mutex_is_locked(&core->lock);
  386. __fatal_error(fatal);
  387. if (fatal)
  388. d_vpr_e("%s: strict check failed\n", function);
  389. return fatal ? -EINVAL : 0;
  390. }
  391. enum msm_vidc_buffer_type v4l2_type_to_driver(u32 type, const char *func)
  392. {
  393. enum msm_vidc_buffer_type buffer_type = 0;
  394. switch (type) {
  395. case INPUT_MPLANE:
  396. buffer_type = MSM_VIDC_BUF_INPUT;
  397. break;
  398. case OUTPUT_MPLANE:
  399. buffer_type = MSM_VIDC_BUF_OUTPUT;
  400. break;
  401. case INPUT_META_PLANE:
  402. buffer_type = MSM_VIDC_BUF_INPUT_META;
  403. break;
  404. case OUTPUT_META_PLANE:
  405. buffer_type = MSM_VIDC_BUF_OUTPUT_META;
  406. break;
  407. default:
  408. d_vpr_e("%s: invalid v4l2 buffer type %#x\n", func, type);
  409. break;
  410. }
  411. return buffer_type;
  412. }
  413. u32 v4l2_type_from_driver(enum msm_vidc_buffer_type buffer_type,
  414. const char *func)
  415. {
  416. u32 type = 0;
  417. switch (buffer_type) {
  418. case MSM_VIDC_BUF_INPUT:
  419. type = INPUT_MPLANE;
  420. break;
  421. case MSM_VIDC_BUF_OUTPUT:
  422. type = OUTPUT_MPLANE;
  423. break;
  424. case MSM_VIDC_BUF_INPUT_META:
  425. type = INPUT_META_PLANE;
  426. break;
  427. case MSM_VIDC_BUF_OUTPUT_META:
  428. type = OUTPUT_META_PLANE;
  429. break;
  430. default:
  431. d_vpr_e("%s: invalid driver buffer type %d\n",
  432. func, buffer_type);
  433. break;
  434. }
  435. return type;
  436. }
  437. enum msm_vidc_codec_type v4l2_codec_to_driver(u32 v4l2_codec, const char *func)
  438. {
  439. enum msm_vidc_codec_type codec = 0;
  440. switch (v4l2_codec) {
  441. case V4L2_PIX_FMT_H264:
  442. codec = MSM_VIDC_H264;
  443. break;
  444. case V4L2_PIX_FMT_HEVC:
  445. codec = MSM_VIDC_HEVC;
  446. break;
  447. case V4L2_PIX_FMT_VP9:
  448. codec = MSM_VIDC_VP9;
  449. break;
  450. case V4L2_PIX_FMT_AV1:
  451. codec = MSM_VIDC_AV1;
  452. break;
  453. case V4L2_PIX_FMT_HEIC:
  454. codec = MSM_VIDC_HEIC;
  455. break;
  456. default:
  457. d_vpr_e("%s: invalid v4l2 codec %#x\n", func, v4l2_codec);
  458. break;
  459. }
  460. return codec;
  461. }
  462. u32 v4l2_codec_from_driver(enum msm_vidc_codec_type codec, const char *func)
  463. {
  464. u32 v4l2_codec = 0;
  465. switch (codec) {
  466. case MSM_VIDC_H264:
  467. v4l2_codec = V4L2_PIX_FMT_H264;
  468. break;
  469. case MSM_VIDC_HEVC:
  470. v4l2_codec = V4L2_PIX_FMT_HEVC;
  471. break;
  472. case MSM_VIDC_VP9:
  473. v4l2_codec = V4L2_PIX_FMT_VP9;
  474. break;
  475. case MSM_VIDC_AV1:
  476. v4l2_codec = V4L2_PIX_FMT_AV1;
  477. break;
  478. case MSM_VIDC_HEIC:
  479. v4l2_codec = V4L2_PIX_FMT_HEIC;
  480. break;
  481. default:
  482. d_vpr_e("%s: invalid driver codec %#x\n", func, codec);
  483. break;
  484. }
  485. return v4l2_codec;
  486. }
  487. enum msm_vidc_colorformat_type v4l2_colorformat_to_driver(u32 v4l2_colorformat,
  488. const char *func)
  489. {
  490. enum msm_vidc_colorformat_type colorformat = 0;
  491. switch (v4l2_colorformat) {
  492. case V4L2_PIX_FMT_NV12:
  493. colorformat = MSM_VIDC_FMT_NV12;
  494. break;
  495. case V4L2_PIX_FMT_NV21:
  496. colorformat = MSM_VIDC_FMT_NV21;
  497. break;
  498. case V4L2_PIX_FMT_VIDC_NV12C:
  499. colorformat = MSM_VIDC_FMT_NV12C;
  500. break;
  501. case V4L2_PIX_FMT_VIDC_TP10C:
  502. colorformat = MSM_VIDC_FMT_TP10C;
  503. break;
  504. case V4L2_PIX_FMT_RGBA32:
  505. colorformat = MSM_VIDC_FMT_RGBA8888;
  506. break;
  507. case V4L2_PIX_FMT_VIDC_ARGB32C:
  508. colorformat = MSM_VIDC_FMT_RGBA8888C;
  509. break;
  510. case V4L2_PIX_FMT_VIDC_P010:
  511. colorformat = MSM_VIDC_FMT_P010;
  512. break;
  513. default:
  514. d_vpr_e("%s: invalid v4l2 color format %#x\n",
  515. func, v4l2_colorformat);
  516. break;
  517. }
  518. return colorformat;
  519. }
  520. u32 v4l2_colorformat_from_driver(enum msm_vidc_colorformat_type colorformat,
  521. const char *func)
  522. {
  523. u32 v4l2_colorformat = 0;
  524. switch (colorformat) {
  525. case MSM_VIDC_FMT_NV12:
  526. v4l2_colorformat = V4L2_PIX_FMT_NV12;
  527. break;
  528. case MSM_VIDC_FMT_NV21:
  529. v4l2_colorformat = V4L2_PIX_FMT_NV21;
  530. break;
  531. case MSM_VIDC_FMT_NV12C:
  532. v4l2_colorformat = V4L2_PIX_FMT_VIDC_NV12C;
  533. break;
  534. case MSM_VIDC_FMT_TP10C:
  535. v4l2_colorformat = V4L2_PIX_FMT_VIDC_TP10C;
  536. break;
  537. case MSM_VIDC_FMT_RGBA8888:
  538. v4l2_colorformat = V4L2_PIX_FMT_RGBA32;
  539. break;
  540. case MSM_VIDC_FMT_RGBA8888C:
  541. v4l2_colorformat = V4L2_PIX_FMT_VIDC_ARGB32C;
  542. break;
  543. case MSM_VIDC_FMT_P010:
  544. v4l2_colorformat = V4L2_PIX_FMT_VIDC_P010;
  545. break;
  546. default:
  547. d_vpr_e("%s: invalid driver color format %#x\n",
  548. func, colorformat);
  549. break;
  550. }
  551. return v4l2_colorformat;
  552. }
  553. u32 v4l2_color_primaries_to_driver(struct msm_vidc_inst *inst,
  554. u32 v4l2_primaries, const char *func)
  555. {
  556. u32 vidc_color_primaries = MSM_VIDC_PRIMARIES_RESERVED;
  557. switch(v4l2_primaries) {
  558. case V4L2_COLORSPACE_DEFAULT:
  559. vidc_color_primaries = MSM_VIDC_PRIMARIES_RESERVED;
  560. break;
  561. case V4L2_COLORSPACE_REC709:
  562. vidc_color_primaries = MSM_VIDC_PRIMARIES_BT709;
  563. break;
  564. case V4L2_COLORSPACE_470_SYSTEM_M:
  565. vidc_color_primaries = MSM_VIDC_PRIMARIES_BT470_SYSTEM_M;
  566. break;
  567. case V4L2_COLORSPACE_470_SYSTEM_BG:
  568. vidc_color_primaries = MSM_VIDC_PRIMARIES_BT470_SYSTEM_BG;
  569. break;
  570. case V4L2_COLORSPACE_SMPTE170M:
  571. vidc_color_primaries = MSM_VIDC_PRIMARIES_BT601_525;
  572. break;
  573. case V4L2_COLORSPACE_SMPTE240M:
  574. vidc_color_primaries = MSM_VIDC_PRIMARIES_SMPTE_ST240M;
  575. break;
  576. case V4L2_COLORSPACE_VIDC_GENERIC_FILM:
  577. vidc_color_primaries = MSM_VIDC_PRIMARIES_GENERIC_FILM;
  578. break;
  579. case V4L2_COLORSPACE_BT2020:
  580. vidc_color_primaries = MSM_VIDC_PRIMARIES_BT2020;
  581. break;
  582. case V4L2_COLORSPACE_DCI_P3:
  583. vidc_color_primaries = MSM_VIDC_PRIMARIES_SMPTE_RP431_2;
  584. break;
  585. case V4L2_COLORSPACE_VIDC_EG431:
  586. vidc_color_primaries = MSM_VIDC_PRIMARIES_SMPTE_EG431_1;
  587. break;
  588. case V4L2_COLORSPACE_VIDC_EBU_TECH:
  589. vidc_color_primaries = MSM_VIDC_PRIMARIES_SMPTE_EBU_TECH;
  590. break;
  591. default:
  592. i_vpr_e(inst, "%s: invalid v4l2 color primaries %d\n",
  593. func, v4l2_primaries);
  594. break;
  595. }
  596. return vidc_color_primaries;
  597. }
  598. u32 v4l2_color_primaries_from_driver(struct msm_vidc_inst *inst,
  599. u32 vidc_color_primaries, const char *func)
  600. {
  601. u32 v4l2_primaries = V4L2_COLORSPACE_DEFAULT;
  602. switch(vidc_color_primaries) {
  603. case MSM_VIDC_PRIMARIES_UNSPECIFIED:
  604. v4l2_primaries = V4L2_COLORSPACE_DEFAULT;
  605. break;
  606. case MSM_VIDC_PRIMARIES_BT709:
  607. v4l2_primaries = V4L2_COLORSPACE_REC709;
  608. break;
  609. case MSM_VIDC_PRIMARIES_BT470_SYSTEM_M:
  610. v4l2_primaries = V4L2_COLORSPACE_470_SYSTEM_M;
  611. break;
  612. case MSM_VIDC_PRIMARIES_BT470_SYSTEM_BG:
  613. v4l2_primaries = V4L2_COLORSPACE_470_SYSTEM_BG;
  614. break;
  615. case MSM_VIDC_PRIMARIES_BT601_525:
  616. v4l2_primaries = V4L2_COLORSPACE_SMPTE170M;
  617. break;
  618. case MSM_VIDC_PRIMARIES_SMPTE_ST240M:
  619. v4l2_primaries = V4L2_COLORSPACE_SMPTE240M;
  620. break;
  621. case MSM_VIDC_PRIMARIES_GENERIC_FILM:
  622. v4l2_primaries = V4L2_COLORSPACE_VIDC_GENERIC_FILM;
  623. break;
  624. case MSM_VIDC_PRIMARIES_BT2020:
  625. v4l2_primaries = V4L2_COLORSPACE_BT2020;
  626. break;
  627. case MSM_VIDC_PRIMARIES_SMPTE_RP431_2:
  628. v4l2_primaries = V4L2_COLORSPACE_DCI_P3;
  629. break;
  630. case MSM_VIDC_PRIMARIES_SMPTE_EG431_1:
  631. v4l2_primaries = V4L2_COLORSPACE_VIDC_EG431;
  632. break;
  633. case MSM_VIDC_PRIMARIES_SMPTE_EBU_TECH:
  634. v4l2_primaries = V4L2_COLORSPACE_VIDC_EBU_TECH;
  635. break;
  636. default:
  637. i_vpr_e(inst, "%s: invalid hfi color primaries %d\n",
  638. func, vidc_color_primaries);
  639. break;
  640. }
  641. return v4l2_primaries;
  642. }
  643. u32 v4l2_transfer_char_to_driver(struct msm_vidc_inst *inst,
  644. u32 v4l2_transfer_char, const char *func)
  645. {
  646. u32 vidc_transfer_char = MSM_VIDC_TRANSFER_RESERVED;
  647. switch(v4l2_transfer_char) {
  648. case V4L2_XFER_FUNC_DEFAULT:
  649. vidc_transfer_char = MSM_VIDC_TRANSFER_RESERVED;
  650. break;
  651. case V4L2_XFER_FUNC_709:
  652. vidc_transfer_char = MSM_VIDC_TRANSFER_BT709;
  653. break;
  654. case V4L2_XFER_FUNC_VIDC_BT470_SYSTEM_M:
  655. vidc_transfer_char = MSM_VIDC_TRANSFER_BT470_SYSTEM_M;
  656. break;
  657. case V4L2_XFER_FUNC_VIDC_BT470_SYSTEM_BG:
  658. vidc_transfer_char = MSM_VIDC_TRANSFER_BT470_SYSTEM_BG;
  659. break;
  660. case V4L2_XFER_FUNC_VIDC_BT601_525_OR_625:
  661. vidc_transfer_char = MSM_VIDC_TRANSFER_BT601_525_OR_625;
  662. break;
  663. case V4L2_XFER_FUNC_SMPTE240M:
  664. vidc_transfer_char = MSM_VIDC_TRANSFER_SMPTE_ST240M;
  665. break;
  666. case V4L2_XFER_FUNC_VIDC_LINEAR:
  667. vidc_transfer_char = MSM_VIDC_TRANSFER_LINEAR;
  668. break;
  669. case V4L2_XFER_FUNC_VIDC_XVYCC:
  670. vidc_transfer_char = MSM_VIDC_TRANSFER_XVYCC;
  671. break;
  672. case V4L2_XFER_FUNC_VIDC_BT1361:
  673. vidc_transfer_char = MSM_VIDC_TRANSFER_BT1361_0;
  674. break;
  675. case V4L2_XFER_FUNC_SRGB:
  676. vidc_transfer_char = MSM_VIDC_TRANSFER_SRGB_SYCC;
  677. break;
  678. case V4L2_XFER_FUNC_VIDC_BT2020:
  679. vidc_transfer_char = MSM_VIDC_TRANSFER_BT2020_14;
  680. break;
  681. case V4L2_XFER_FUNC_SMPTE2084:
  682. vidc_transfer_char = MSM_VIDC_TRANSFER_SMPTE_ST2084_PQ;
  683. break;
  684. case V4L2_XFER_FUNC_VIDC_ST428:
  685. vidc_transfer_char = MSM_VIDC_TRANSFER_SMPTE_ST428_1;
  686. break;
  687. case V4L2_XFER_FUNC_VIDC_HLG:
  688. vidc_transfer_char = MSM_VIDC_TRANSFER_BT2100_2_HLG;
  689. break;
  690. default:
  691. i_vpr_e(inst, "%s: invalid v4l2 transfer char %d\n",
  692. func, v4l2_transfer_char);
  693. break;
  694. }
  695. return vidc_transfer_char;
  696. }
  697. u32 v4l2_transfer_char_from_driver(struct msm_vidc_inst *inst,
  698. u32 vidc_transfer_char, const char *func)
  699. {
  700. u32 v4l2_transfer_char = V4L2_XFER_FUNC_DEFAULT;
  701. switch(vidc_transfer_char) {
  702. case MSM_VIDC_TRANSFER_UNSPECIFIED:
  703. v4l2_transfer_char = V4L2_XFER_FUNC_DEFAULT;
  704. break;
  705. case MSM_VIDC_TRANSFER_BT709:
  706. v4l2_transfer_char = V4L2_XFER_FUNC_709;
  707. break;
  708. case MSM_VIDC_TRANSFER_BT470_SYSTEM_M:
  709. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_BT470_SYSTEM_M;
  710. break;
  711. case MSM_VIDC_TRANSFER_BT470_SYSTEM_BG:
  712. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_BT470_SYSTEM_BG;
  713. break;
  714. case MSM_VIDC_TRANSFER_BT601_525_OR_625:
  715. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_BT601_525_OR_625;
  716. break;
  717. case MSM_VIDC_TRANSFER_SMPTE_ST240M:
  718. v4l2_transfer_char = V4L2_XFER_FUNC_SMPTE240M;
  719. break;
  720. case MSM_VIDC_TRANSFER_LINEAR:
  721. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_LINEAR;
  722. break;
  723. case MSM_VIDC_TRANSFER_XVYCC:
  724. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_XVYCC;
  725. break;
  726. case MSM_VIDC_TRANSFER_BT1361_0:
  727. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_BT1361;
  728. break;
  729. case MSM_VIDC_TRANSFER_SRGB_SYCC:
  730. v4l2_transfer_char = V4L2_XFER_FUNC_SRGB;
  731. break;
  732. case MSM_VIDC_TRANSFER_BT2020_14:
  733. case MSM_VIDC_TRANSFER_BT2020_15:
  734. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_BT2020;
  735. break;
  736. case MSM_VIDC_TRANSFER_SMPTE_ST2084_PQ:
  737. v4l2_transfer_char = V4L2_XFER_FUNC_SMPTE2084;
  738. break;
  739. case MSM_VIDC_TRANSFER_SMPTE_ST428_1:
  740. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_ST428;
  741. break;
  742. case MSM_VIDC_TRANSFER_BT2100_2_HLG:
  743. v4l2_transfer_char = V4L2_XFER_FUNC_VIDC_HLG;
  744. break;
  745. default:
  746. i_vpr_e(inst, "%s: invalid hfi transfer char %d\n",
  747. func, vidc_transfer_char);
  748. break;
  749. }
  750. return v4l2_transfer_char;
  751. }
  752. u32 v4l2_matrix_coeff_to_driver(struct msm_vidc_inst *inst,
  753. u32 v4l2_matrix_coeff, const char *func)
  754. {
  755. u32 vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_RESERVED;
  756. switch(v4l2_matrix_coeff) {
  757. case V4L2_YCBCR_ENC_DEFAULT:
  758. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_RESERVED;
  759. break;
  760. case V4L2_YCBCR_VIDC_SRGB_OR_SMPTE_ST428:
  761. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_SRGB_SMPTE_ST428_1;
  762. break;
  763. case V4L2_YCBCR_ENC_709:
  764. case V4L2_YCBCR_ENC_XV709:
  765. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_BT709;
  766. break;
  767. case V4L2_YCBCR_VIDC_FCC47_73_682:
  768. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_FCC_TITLE_47;
  769. break;
  770. case V4L2_YCBCR_ENC_XV601:
  771. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_BT470_SYS_BG_OR_BT601_625;
  772. break;
  773. case V4L2_YCBCR_ENC_601:
  774. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_BT601_525_BT1358_525_OR_625;
  775. break;
  776. case V4L2_YCBCR_ENC_SMPTE240M:
  777. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_SMPTE_ST240;
  778. break;
  779. case V4L2_YCBCR_ENC_BT2020:
  780. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_BT2020_NON_CONSTANT;
  781. break;
  782. case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
  783. vidc_matrix_coeff = MSM_VIDC_MATRIX_COEFF_BT2020_CONSTANT;
  784. break;
  785. default:
  786. i_vpr_e(inst, "%s: invalid v4l2 matrix coeff %d\n",
  787. func, v4l2_matrix_coeff);
  788. break;
  789. }
  790. return vidc_matrix_coeff;
  791. }
  792. u32 v4l2_matrix_coeff_from_driver(struct msm_vidc_inst *inst,
  793. u32 vidc_matrix_coeff, const char *func)
  794. {
  795. u32 v4l2_matrix_coeff = V4L2_YCBCR_ENC_DEFAULT;
  796. switch(vidc_matrix_coeff) {
  797. case MSM_VIDC_MATRIX_COEFF_SRGB_SMPTE_ST428_1:
  798. v4l2_matrix_coeff = V4L2_YCBCR_VIDC_SRGB_OR_SMPTE_ST428;
  799. break;
  800. case MSM_VIDC_MATRIX_COEFF_BT709:
  801. v4l2_matrix_coeff = V4L2_YCBCR_ENC_709;
  802. break;
  803. case MSM_VIDC_MATRIX_COEFF_UNSPECIFIED:
  804. v4l2_matrix_coeff = V4L2_YCBCR_ENC_DEFAULT;
  805. break;
  806. case MSM_VIDC_MATRIX_COEFF_FCC_TITLE_47:
  807. v4l2_matrix_coeff = V4L2_YCBCR_VIDC_FCC47_73_682;
  808. break;
  809. case MSM_VIDC_MATRIX_COEFF_BT470_SYS_BG_OR_BT601_625:
  810. v4l2_matrix_coeff = V4L2_YCBCR_ENC_XV601;
  811. break;
  812. case MSM_VIDC_MATRIX_COEFF_BT601_525_BT1358_525_OR_625:
  813. v4l2_matrix_coeff = V4L2_YCBCR_ENC_601;
  814. break;
  815. case MSM_VIDC_MATRIX_COEFF_SMPTE_ST240:
  816. v4l2_matrix_coeff = V4L2_YCBCR_ENC_SMPTE240M;
  817. break;
  818. case MSM_VIDC_MATRIX_COEFF_BT2020_NON_CONSTANT:
  819. v4l2_matrix_coeff = V4L2_YCBCR_ENC_BT2020;
  820. break;
  821. case MSM_VIDC_MATRIX_COEFF_BT2020_CONSTANT:
  822. v4l2_matrix_coeff = V4L2_YCBCR_ENC_BT2020_CONST_LUM;
  823. break;
  824. default:
  825. i_vpr_e(inst, "%s: invalid hfi matrix coeff %d\n",
  826. func, vidc_matrix_coeff);
  827. break;
  828. }
  829. return v4l2_matrix_coeff;
  830. }
  831. int v4l2_type_to_driver_port(struct msm_vidc_inst *inst, u32 type,
  832. const char *func)
  833. {
  834. int port;
  835. if (type == INPUT_MPLANE) {
  836. port = INPUT_PORT;
  837. } else if (type == INPUT_META_PLANE) {
  838. port = INPUT_META_PORT;
  839. } else if (type == OUTPUT_MPLANE) {
  840. port = OUTPUT_PORT;
  841. } else if (type == OUTPUT_META_PLANE) {
  842. port = OUTPUT_META_PORT;
  843. } else {
  844. i_vpr_e(inst, "%s: port not found for v4l2 type %d\n",
  845. func, type);
  846. port = -EINVAL;
  847. }
  848. return port;
  849. }
  850. u32 msm_vidc_get_buffer_region(struct msm_vidc_inst *inst,
  851. enum msm_vidc_buffer_type buffer_type, const char *func)
  852. {
  853. u32 region = MSM_VIDC_NON_SECURE;
  854. if (!is_secure_session(inst)) {
  855. switch (buffer_type) {
  856. case MSM_VIDC_BUF_ARP:
  857. region = MSM_VIDC_SECURE_NONPIXEL;
  858. break;
  859. case MSM_VIDC_BUF_INPUT:
  860. if (is_encode_session(inst))
  861. region = MSM_VIDC_NON_SECURE_PIXEL;
  862. else
  863. region = MSM_VIDC_NON_SECURE;
  864. break;
  865. case MSM_VIDC_BUF_OUTPUT:
  866. if (is_encode_session(inst))
  867. region = MSM_VIDC_NON_SECURE;
  868. else
  869. region = MSM_VIDC_NON_SECURE_PIXEL;
  870. break;
  871. case MSM_VIDC_BUF_DPB:
  872. case MSM_VIDC_BUF_VPSS:
  873. region = MSM_VIDC_NON_SECURE_PIXEL;
  874. break;
  875. case MSM_VIDC_BUF_INPUT_META:
  876. case MSM_VIDC_BUF_OUTPUT_META:
  877. case MSM_VIDC_BUF_BIN:
  878. case MSM_VIDC_BUF_COMV:
  879. case MSM_VIDC_BUF_NON_COMV:
  880. case MSM_VIDC_BUF_LINE:
  881. case MSM_VIDC_BUF_PERSIST:
  882. region = MSM_VIDC_NON_SECURE;
  883. break;
  884. default:
  885. i_vpr_e(inst, "%s: invalid driver buffer type %d\n",
  886. func, buffer_type);
  887. }
  888. } else {
  889. switch (buffer_type) {
  890. case MSM_VIDC_BUF_INPUT:
  891. if (is_encode_session(inst))
  892. region = MSM_VIDC_SECURE_PIXEL;
  893. else
  894. region = MSM_VIDC_SECURE_BITSTREAM;
  895. break;
  896. case MSM_VIDC_BUF_OUTPUT:
  897. if (is_encode_session(inst))
  898. region = MSM_VIDC_SECURE_BITSTREAM;
  899. else
  900. region = MSM_VIDC_SECURE_PIXEL;
  901. break;
  902. case MSM_VIDC_BUF_INPUT_META:
  903. case MSM_VIDC_BUF_OUTPUT_META:
  904. region = MSM_VIDC_NON_SECURE;
  905. break;
  906. case MSM_VIDC_BUF_DPB:
  907. case MSM_VIDC_BUF_VPSS:
  908. region = MSM_VIDC_SECURE_PIXEL;
  909. break;
  910. case MSM_VIDC_BUF_BIN:
  911. region = MSM_VIDC_SECURE_BITSTREAM;
  912. break;
  913. case MSM_VIDC_BUF_ARP:
  914. case MSM_VIDC_BUF_COMV:
  915. case MSM_VIDC_BUF_NON_COMV:
  916. case MSM_VIDC_BUF_LINE:
  917. case MSM_VIDC_BUF_PERSIST:
  918. region = MSM_VIDC_SECURE_NONPIXEL;
  919. break;
  920. default:
  921. i_vpr_e(inst, "%s: invalid driver buffer type %d\n",
  922. func, buffer_type);
  923. }
  924. }
  925. return region;
  926. }
  927. struct msm_vidc_buffers *msm_vidc_get_buffers(
  928. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  929. const char *func)
  930. {
  931. switch (buffer_type) {
  932. case MSM_VIDC_BUF_INPUT:
  933. return &inst->buffers.input;
  934. case MSM_VIDC_BUF_INPUT_META:
  935. return &inst->buffers.input_meta;
  936. case MSM_VIDC_BUF_OUTPUT:
  937. return &inst->buffers.output;
  938. case MSM_VIDC_BUF_OUTPUT_META:
  939. return &inst->buffers.output_meta;
  940. case MSM_VIDC_BUF_READ_ONLY:
  941. return &inst->buffers.read_only;
  942. case MSM_VIDC_BUF_BIN:
  943. return &inst->buffers.bin;
  944. case MSM_VIDC_BUF_ARP:
  945. return &inst->buffers.arp;
  946. case MSM_VIDC_BUF_COMV:
  947. return &inst->buffers.comv;
  948. case MSM_VIDC_BUF_NON_COMV:
  949. return &inst->buffers.non_comv;
  950. case MSM_VIDC_BUF_LINE:
  951. return &inst->buffers.line;
  952. case MSM_VIDC_BUF_DPB:
  953. return &inst->buffers.dpb;
  954. case MSM_VIDC_BUF_PERSIST:
  955. return &inst->buffers.persist;
  956. case MSM_VIDC_BUF_VPSS:
  957. return &inst->buffers.vpss;
  958. case MSM_VIDC_BUF_QUEUE:
  959. return NULL;
  960. default:
  961. i_vpr_e(inst, "%s: invalid driver buffer type %d\n",
  962. func, buffer_type);
  963. return NULL;
  964. }
  965. }
  966. struct msm_vidc_mappings *msm_vidc_get_mappings(
  967. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  968. const char *func)
  969. {
  970. switch (buffer_type) {
  971. case MSM_VIDC_BUF_INPUT:
  972. return &inst->mappings.input;
  973. case MSM_VIDC_BUF_INPUT_META:
  974. return &inst->mappings.input_meta;
  975. case MSM_VIDC_BUF_OUTPUT:
  976. return &inst->mappings.output;
  977. case MSM_VIDC_BUF_OUTPUT_META:
  978. return &inst->mappings.output_meta;
  979. case MSM_VIDC_BUF_BIN:
  980. return &inst->mappings.bin;
  981. case MSM_VIDC_BUF_ARP:
  982. return &inst->mappings.arp;
  983. case MSM_VIDC_BUF_COMV:
  984. return &inst->mappings.comv;
  985. case MSM_VIDC_BUF_NON_COMV:
  986. return &inst->mappings.non_comv;
  987. case MSM_VIDC_BUF_LINE:
  988. return &inst->mappings.line;
  989. case MSM_VIDC_BUF_DPB:
  990. return &inst->mappings.dpb;
  991. case MSM_VIDC_BUF_PERSIST:
  992. return &inst->mappings.persist;
  993. case MSM_VIDC_BUF_VPSS:
  994. return &inst->mappings.vpss;
  995. default:
  996. i_vpr_e(inst, "%s: invalid driver buffer type %d\n",
  997. func, buffer_type);
  998. return NULL;
  999. }
  1000. }
  1001. struct msm_vidc_allocations *msm_vidc_get_allocations(
  1002. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  1003. const char *func)
  1004. {
  1005. switch (buffer_type) {
  1006. case MSM_VIDC_BUF_BIN:
  1007. return &inst->allocations.bin;
  1008. case MSM_VIDC_BUF_ARP:
  1009. return &inst->allocations.arp;
  1010. case MSM_VIDC_BUF_COMV:
  1011. return &inst->allocations.comv;
  1012. case MSM_VIDC_BUF_NON_COMV:
  1013. return &inst->allocations.non_comv;
  1014. case MSM_VIDC_BUF_LINE:
  1015. return &inst->allocations.line;
  1016. case MSM_VIDC_BUF_DPB:
  1017. return &inst->allocations.dpb;
  1018. case MSM_VIDC_BUF_PERSIST:
  1019. return &inst->allocations.persist;
  1020. case MSM_VIDC_BUF_VPSS:
  1021. return &inst->allocations.vpss;
  1022. default:
  1023. i_vpr_e(inst, "%s: invalid driver buffer type %d\n",
  1024. func, buffer_type);
  1025. return NULL;
  1026. }
  1027. }
  1028. bool res_is_greater_than(u32 width, u32 height,
  1029. u32 ref_width, u32 ref_height)
  1030. {
  1031. u32 num_mbs = NUM_MBS_PER_FRAME(height, width);
  1032. u32 max_side = max(ref_width, ref_height);
  1033. if (num_mbs > NUM_MBS_PER_FRAME(ref_height, ref_width) ||
  1034. width > max_side ||
  1035. height > max_side)
  1036. return true;
  1037. else
  1038. return false;
  1039. }
  1040. bool res_is_greater_than_or_equal_to(u32 width, u32 height,
  1041. u32 ref_width, u32 ref_height)
  1042. {
  1043. u32 num_mbs = NUM_MBS_PER_FRAME(height, width);
  1044. u32 max_side = max(ref_width, ref_height);
  1045. if (num_mbs >= NUM_MBS_PER_FRAME(ref_height, ref_width) ||
  1046. width >= max_side ||
  1047. height >= max_side)
  1048. return true;
  1049. else
  1050. return false;
  1051. }
  1052. bool res_is_less_than(u32 width, u32 height,
  1053. u32 ref_width, u32 ref_height)
  1054. {
  1055. u32 num_mbs = NUM_MBS_PER_FRAME(height, width);
  1056. u32 max_side = max(ref_width, ref_height);
  1057. if (num_mbs < NUM_MBS_PER_FRAME(ref_height, ref_width) &&
  1058. width < max_side &&
  1059. height < max_side)
  1060. return true;
  1061. else
  1062. return false;
  1063. }
  1064. bool res_is_less_than_or_equal_to(u32 width, u32 height,
  1065. u32 ref_width, u32 ref_height)
  1066. {
  1067. u32 num_mbs = NUM_MBS_PER_FRAME(height, width);
  1068. u32 max_side = max(ref_width, ref_height);
  1069. if (num_mbs <= NUM_MBS_PER_FRAME(ref_height, ref_width) &&
  1070. width <= max_side &&
  1071. height <= max_side)
  1072. return true;
  1073. else
  1074. return false;
  1075. }
  1076. int msm_vidc_change_core_state(struct msm_vidc_core *core,
  1077. enum msm_vidc_core_state request_state, const char *func)
  1078. {
  1079. if (!core) {
  1080. d_vpr_e("%s: invalid params\n", __func__);
  1081. return -EINVAL;
  1082. }
  1083. d_vpr_h("%s: core state changed to %s from %s\n",
  1084. func, core_state_name(request_state),
  1085. core_state_name(core->state));
  1086. core->state = request_state;
  1087. return 0;
  1088. }
  1089. int msm_vidc_change_inst_state(struct msm_vidc_inst *inst,
  1090. enum msm_vidc_inst_state request_state, const char *func)
  1091. {
  1092. if (!inst) {
  1093. d_vpr_e("%s: invalid params\n", __func__);
  1094. return -EINVAL;
  1095. }
  1096. if (!request_state) {
  1097. i_vpr_e(inst, "%s: invalid request state\n", func);
  1098. return -EINVAL;
  1099. }
  1100. if (is_session_error(inst)) {
  1101. i_vpr_h(inst,
  1102. "%s: inst is in bad state, can not change state to %s\n",
  1103. func, state_name(request_state));
  1104. return 0;
  1105. }
  1106. if (request_state == MSM_VIDC_ERROR)
  1107. i_vpr_e(inst, "%s: state changed to %s from %s\n",
  1108. func, state_name(request_state), state_name(inst->state));
  1109. else
  1110. i_vpr_h(inst, "%s: state changed to %s from %s\n",
  1111. func, state_name(request_state), state_name(inst->state));
  1112. trace_msm_vidc_common_state_change(inst, func, state_name(inst->state),
  1113. state_name(request_state));
  1114. inst->state = request_state;
  1115. return 0;
  1116. }
  1117. bool msm_vidc_allow_s_fmt(struct msm_vidc_inst *inst, u32 type)
  1118. {
  1119. bool allow = false;
  1120. if (!inst) {
  1121. d_vpr_e("%s: invalid params\n", __func__);
  1122. return false;
  1123. }
  1124. if (inst->state == MSM_VIDC_OPEN) {
  1125. allow = true;
  1126. goto exit;
  1127. }
  1128. if (type == OUTPUT_MPLANE || type == OUTPUT_META_PLANE) {
  1129. if (inst->state == MSM_VIDC_START_INPUT ||
  1130. inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  1131. allow = true;
  1132. goto exit;
  1133. }
  1134. }
  1135. if (type == INPUT_MPLANE || type == INPUT_META_PLANE) {
  1136. if (inst->state == MSM_VIDC_START_OUTPUT) {
  1137. allow = true;
  1138. goto exit;
  1139. }
  1140. }
  1141. exit:
  1142. if (!allow)
  1143. i_vpr_e(inst, "%s: type %d not allowed in state %s\n",
  1144. __func__, type, state_name(inst->state));
  1145. return allow;
  1146. }
  1147. bool msm_vidc_allow_s_ctrl(struct msm_vidc_inst *inst, u32 id)
  1148. {
  1149. bool allow = false;
  1150. if (!inst) {
  1151. d_vpr_e("%s: invalid params\n", __func__);
  1152. return false;
  1153. }
  1154. if (inst->state == MSM_VIDC_OPEN) {
  1155. allow = true;
  1156. goto exit;
  1157. }
  1158. if (is_decode_session(inst)) {
  1159. if (!inst->vb2q[INPUT_PORT].streaming) {
  1160. allow = true;
  1161. goto exit;
  1162. }
  1163. if (inst->vb2q[INPUT_PORT].streaming) {
  1164. switch (id) {
  1165. case V4L2_CID_MPEG_VIDC_CODEC_CONFIG:
  1166. case V4L2_CID_MPEG_VIDC_PRIORITY:
  1167. case V4L2_CID_MPEG_VIDC_LOWLATENCY_REQUEST:
  1168. allow = true;
  1169. break;
  1170. default:
  1171. allow = false;
  1172. break;
  1173. }
  1174. }
  1175. } else if (is_encode_session(inst)) {
  1176. if (inst->state == MSM_VIDC_START || inst->state == MSM_VIDC_START_OUTPUT) {
  1177. switch (id) {
  1178. case V4L2_CID_MPEG_VIDEO_BITRATE:
  1179. case V4L2_CID_MPEG_VIDEO_GOP_SIZE:
  1180. case V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME:
  1181. case V4L2_CID_HFLIP:
  1182. case V4L2_CID_VFLIP:
  1183. case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP:
  1184. case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP:
  1185. case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP:
  1186. case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:
  1187. case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:
  1188. case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:
  1189. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER:
  1190. case V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER:
  1191. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L0_BR:
  1192. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L1_BR:
  1193. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L2_BR:
  1194. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L3_BR:
  1195. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L4_BR:
  1196. case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_L5_BR:
  1197. case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L0_BR:
  1198. case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L1_BR:
  1199. case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L2_BR:
  1200. case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L3_BR:
  1201. case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L4_BR:
  1202. case V4L2_CID_MPEG_VIDEO_H264_HIER_CODING_L5_BR:
  1203. case V4L2_CID_MPEG_VIDEO_USE_LTR_FRAMES:
  1204. case V4L2_CID_MPEG_VIDEO_FRAME_LTR_INDEX:
  1205. case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_TYPES:
  1206. case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_RESOLUTION:
  1207. case V4L2_CID_MPEG_VIDEO_CONSTANT_QUALITY:
  1208. case V4L2_CID_MPEG_VIDC_ENC_INPUT_COMPRESSION_RATIO:
  1209. case V4L2_CID_MPEG_VIDEO_BITRATE_PEAK:
  1210. case V4L2_CID_MPEG_VIDC_PRIORITY:
  1211. allow = true;
  1212. break;
  1213. default:
  1214. allow = false;
  1215. break;
  1216. }
  1217. }
  1218. }
  1219. exit:
  1220. if (!allow)
  1221. i_vpr_e(inst, "%s: id %#x not allowed in state %s\n",
  1222. __func__, id, state_name(inst->state));
  1223. return allow;
  1224. }
  1225. bool msm_vidc_allow_metadata(struct msm_vidc_inst *inst, u32 cap_id)
  1226. {
  1227. bool is_allowed = true;
  1228. if (!inst || !inst->capabilities) {
  1229. d_vpr_e("%s: invalid params\n", __func__);
  1230. return false;
  1231. }
  1232. switch (cap_id) {
  1233. case META_OUTPUT_BUF_TAG:
  1234. case META_DPB_TAG_LIST:
  1235. if (!is_ubwc_colorformat(inst->capabilities->cap[PIX_FMTS].value)) {
  1236. i_vpr_h(inst,
  1237. "%s: cap: %24s not allowed for split mode\n",
  1238. __func__, cap_name(cap_id));
  1239. is_allowed = false;
  1240. }
  1241. break;
  1242. default:
  1243. is_allowed = true;
  1244. break;
  1245. }
  1246. return is_allowed;
  1247. }
  1248. bool msm_vidc_allow_property(struct msm_vidc_inst *inst, u32 hfi_id)
  1249. {
  1250. bool is_allowed = true;
  1251. if (!inst || !inst->capabilities) {
  1252. d_vpr_e("%s: invalid params\n", __func__);
  1253. return false;
  1254. }
  1255. switch (hfi_id) {
  1256. case HFI_PROP_WORST_COMPRESSION_RATIO:
  1257. case HFI_PROP_WORST_COMPLEXITY_FACTOR:
  1258. case HFI_PROP_PICTURE_TYPE:
  1259. is_allowed = true;
  1260. break;
  1261. case HFI_PROP_DPB_LIST:
  1262. if (!is_ubwc_colorformat(inst->capabilities->cap[PIX_FMTS].value)) {
  1263. i_vpr_h(inst,
  1264. "%s: cap: %24s not allowed for split mode\n",
  1265. __func__, cap_name(DPB_LIST));
  1266. is_allowed = false;
  1267. }
  1268. break;
  1269. default:
  1270. is_allowed = true;
  1271. break;
  1272. }
  1273. return is_allowed;
  1274. }
  1275. int msm_vidc_update_property_cap(struct msm_vidc_inst *inst, u32 hfi_id,
  1276. bool allow)
  1277. {
  1278. int rc = 0;
  1279. if (!inst || !inst->capabilities) {
  1280. d_vpr_e("%s: invalid params\n", __func__);
  1281. return -EINVAL;
  1282. }
  1283. switch (hfi_id) {
  1284. case HFI_PROP_WORST_COMPRESSION_RATIO:
  1285. case HFI_PROP_WORST_COMPLEXITY_FACTOR:
  1286. case HFI_PROP_PICTURE_TYPE:
  1287. break;
  1288. case HFI_PROP_DPB_LIST:
  1289. if (!allow)
  1290. memset(inst->dpb_list_payload, 0, MAX_DPB_LIST_ARRAY_SIZE);
  1291. msm_vidc_update_cap_value(inst, DPB_LIST, allow, __func__);
  1292. break;
  1293. default:
  1294. break;
  1295. }
  1296. return rc;
  1297. }
  1298. bool msm_vidc_allow_reqbufs(struct msm_vidc_inst *inst, u32 type)
  1299. {
  1300. bool allow = false;
  1301. if (!inst) {
  1302. d_vpr_e("%s: invalid params\n", __func__);
  1303. return false;
  1304. }
  1305. if (inst->state == MSM_VIDC_OPEN) {
  1306. allow = true;
  1307. goto exit;
  1308. }
  1309. if (type == OUTPUT_MPLANE || type == OUTPUT_META_PLANE) {
  1310. if (inst->state == MSM_VIDC_START_INPUT ||
  1311. inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  1312. allow = true;
  1313. goto exit;
  1314. }
  1315. }
  1316. if (type == INPUT_MPLANE || type == INPUT_META_PLANE) {
  1317. if (inst->state == MSM_VIDC_START_OUTPUT) {
  1318. allow = true;
  1319. goto exit;
  1320. }
  1321. }
  1322. exit:
  1323. if (!allow)
  1324. i_vpr_e(inst, "%s: type %d not allowed in state %s\n",
  1325. __func__, type, state_name(inst->state));
  1326. return allow;
  1327. }
  1328. enum msm_vidc_allow msm_vidc_allow_stop(struct msm_vidc_inst *inst)
  1329. {
  1330. enum msm_vidc_allow allow = MSM_VIDC_DISALLOW;
  1331. if (!inst) {
  1332. d_vpr_e("%s: invalid params\n", __func__);
  1333. return allow;
  1334. }
  1335. if (inst->state == MSM_VIDC_START ||
  1336. inst->state == MSM_VIDC_DRC ||
  1337. inst->state == MSM_VIDC_DRC_LAST_FLAG ||
  1338. inst->state == MSM_VIDC_DRC_DRAIN) {
  1339. allow = MSM_VIDC_ALLOW;
  1340. } else if (inst->state == MSM_VIDC_START_INPUT) {
  1341. allow = MSM_VIDC_IGNORE;
  1342. i_vpr_e(inst, "%s: stop ignored in state %s\n",
  1343. __func__, state_name(inst->state));
  1344. } else {
  1345. i_vpr_e(inst, "%s: stop not allowed in state %s\n",
  1346. __func__, state_name(inst->state));
  1347. }
  1348. return allow;
  1349. }
  1350. bool msm_vidc_allow_start(struct msm_vidc_inst *inst)
  1351. {
  1352. if (!inst) {
  1353. d_vpr_e("%s: invalid params\n", __func__);
  1354. return false;
  1355. }
  1356. if (inst->state == MSM_VIDC_DRAIN_LAST_FLAG ||
  1357. inst->state == MSM_VIDC_DRC_LAST_FLAG ||
  1358. inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG)
  1359. return true;
  1360. i_vpr_e(inst, "%s: not allowed in state %s\n",
  1361. __func__, state_name(inst->state));
  1362. return false;
  1363. }
  1364. bool msm_vidc_allow_streamon(struct msm_vidc_inst *inst, u32 type)
  1365. {
  1366. if (!inst) {
  1367. d_vpr_e("%s: invalid params\n", __func__);
  1368. return false;
  1369. }
  1370. if (type == INPUT_MPLANE || type == INPUT_META_PLANE) {
  1371. if (inst->state == MSM_VIDC_OPEN ||
  1372. inst->state == MSM_VIDC_START_OUTPUT)
  1373. return true;
  1374. } else if (type == OUTPUT_MPLANE || type == OUTPUT_META_PLANE) {
  1375. if (inst->state == MSM_VIDC_OPEN ||
  1376. inst->state == MSM_VIDC_START_INPUT ||
  1377. inst->state == MSM_VIDC_DRAIN_START_INPUT)
  1378. return true;
  1379. }
  1380. i_vpr_e(inst, "%s: type %d not allowed in state %s\n",
  1381. __func__, type, state_name(inst->state));
  1382. return false;
  1383. }
  1384. enum msm_vidc_allow msm_vidc_allow_streamoff(struct msm_vidc_inst *inst, u32 type)
  1385. {
  1386. enum msm_vidc_allow allow = MSM_VIDC_ALLOW;
  1387. if (!inst) {
  1388. d_vpr_e("%s: invalid params\n", __func__);
  1389. return MSM_VIDC_DISALLOW;
  1390. }
  1391. if (type == INPUT_MPLANE) {
  1392. if (!inst->vb2q[INPUT_PORT].streaming)
  1393. allow = MSM_VIDC_IGNORE;
  1394. } else if (type == INPUT_META_PLANE) {
  1395. if (inst->vb2q[INPUT_PORT].streaming)
  1396. allow = MSM_VIDC_DISALLOW;
  1397. else if (!inst->vb2q[INPUT_META_PORT].streaming)
  1398. allow = MSM_VIDC_IGNORE;
  1399. } else if (type == OUTPUT_MPLANE) {
  1400. if (!inst->vb2q[OUTPUT_PORT].streaming)
  1401. allow = MSM_VIDC_IGNORE;
  1402. } else if (type == OUTPUT_META_PLANE) {
  1403. if (inst->vb2q[OUTPUT_PORT].streaming)
  1404. allow = MSM_VIDC_DISALLOW;
  1405. else if (!inst->vb2q[OUTPUT_META_PORT].streaming)
  1406. allow = MSM_VIDC_IGNORE;
  1407. }
  1408. if (allow != MSM_VIDC_ALLOW)
  1409. i_vpr_e(inst, "%s: type %d is %s in state %s\n",
  1410. __func__, type, allow_name(allow),
  1411. state_name(inst->state));
  1412. return allow;
  1413. }
  1414. enum msm_vidc_allow msm_vidc_allow_qbuf(struct msm_vidc_inst *inst, u32 type)
  1415. {
  1416. int port = 0;
  1417. if (!inst) {
  1418. d_vpr_e("%s: invalid params\n", __func__);
  1419. return MSM_VIDC_DISALLOW;
  1420. }
  1421. port = v4l2_type_to_driver_port(inst, type, __func__);
  1422. if (port < 0)
  1423. return MSM_VIDC_DISALLOW;
  1424. /* defer queuing if streamon not completed */
  1425. if (!inst->vb2q[port].streaming)
  1426. return MSM_VIDC_DEFER;
  1427. if (type == INPUT_META_PLANE || type == OUTPUT_META_PLANE)
  1428. return MSM_VIDC_DEFER;
  1429. if (type == INPUT_MPLANE) {
  1430. if (inst->state == MSM_VIDC_OPEN ||
  1431. inst->state == MSM_VIDC_START_OUTPUT)
  1432. return MSM_VIDC_DEFER;
  1433. else
  1434. return MSM_VIDC_ALLOW;
  1435. } else if (type == OUTPUT_MPLANE) {
  1436. if (inst->state == MSM_VIDC_OPEN ||
  1437. inst->state == MSM_VIDC_START_INPUT ||
  1438. inst->state == MSM_VIDC_DRAIN_START_INPUT)
  1439. return MSM_VIDC_DEFER;
  1440. else
  1441. return MSM_VIDC_ALLOW;
  1442. } else {
  1443. i_vpr_e(inst, "%s: unknown buffer type %d\n", __func__, type);
  1444. return MSM_VIDC_DISALLOW;
  1445. }
  1446. return MSM_VIDC_DISALLOW;
  1447. }
  1448. enum msm_vidc_allow msm_vidc_allow_input_psc(struct msm_vidc_inst *inst)
  1449. {
  1450. enum msm_vidc_allow allow = MSM_VIDC_DISALLOW;
  1451. if (!inst) {
  1452. d_vpr_e("%s: invalid params\n", __func__);
  1453. return MSM_VIDC_DISALLOW;
  1454. }
  1455. if (inst->state == MSM_VIDC_START ||
  1456. inst->state == MSM_VIDC_START_INPUT ||
  1457. inst->state == MSM_VIDC_DRAIN) {
  1458. allow = MSM_VIDC_ALLOW;
  1459. } else if (inst->state == MSM_VIDC_DRC ||
  1460. inst->state == MSM_VIDC_DRC_LAST_FLAG ||
  1461. inst->state == MSM_VIDC_DRC_DRAIN ||
  1462. inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG ||
  1463. inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  1464. i_vpr_h(inst, "%s: defer input psc, inst state %s\n",
  1465. __func__, state_name(inst->state));
  1466. allow = MSM_VIDC_DEFER;
  1467. } else if (inst->state == MSM_VIDC_OPEN ||
  1468. inst->state == MSM_VIDC_START_OUTPUT) {
  1469. i_vpr_h(inst, "%s: discard input psc, inst state %s\n",
  1470. __func__, state_name(inst->state));
  1471. allow = MSM_VIDC_DISCARD;
  1472. } else {
  1473. i_vpr_e(inst, "%s: input psc in wrong state %s\n",
  1474. __func__, state_name(inst->state));
  1475. allow = MSM_VIDC_DISALLOW;
  1476. }
  1477. return allow;
  1478. }
  1479. bool msm_vidc_allow_last_flag(struct msm_vidc_inst *inst)
  1480. {
  1481. if (!inst) {
  1482. d_vpr_e("%s: invalid params\n", __func__);
  1483. return false;
  1484. }
  1485. if (inst->state == MSM_VIDC_DRC ||
  1486. inst->state == MSM_VIDC_DRAIN ||
  1487. inst->state == MSM_VIDC_DRC_DRAIN)
  1488. return true;
  1489. i_vpr_e(inst, "%s: not allowed in state %s\n",
  1490. __func__, state_name(inst->state));
  1491. return false;
  1492. }
  1493. static int msm_vidc_discard_pending_ipsc(struct msm_vidc_inst *inst)
  1494. {
  1495. struct response_work *resp_work, *dummy = NULL;
  1496. if (!inst) {
  1497. d_vpr_e("%s: invalid params\n", __func__);
  1498. return -EINVAL;
  1499. }
  1500. if (list_empty(&inst->response_works))
  1501. return 0;
  1502. /* discard pending port settings change if any */
  1503. list_for_each_entry_safe(resp_work, dummy,
  1504. &inst->response_works, list) {
  1505. if (resp_work->type == RESP_WORK_INPUT_PSC) {
  1506. i_vpr_h(inst,
  1507. "%s: discard pending input psc\n", __func__);
  1508. /* override the psc properties again if ipsc discarded */
  1509. inst->ipsc_properties_set = false;
  1510. list_del(&resp_work->list);
  1511. kfree(resp_work->data);
  1512. kfree(resp_work);
  1513. }
  1514. }
  1515. return 0;
  1516. }
  1517. static int msm_vidc_process_pending_ipsc(struct msm_vidc_inst *inst,
  1518. enum msm_vidc_inst_state *new_state)
  1519. {
  1520. struct response_work *resp_work, *dummy = NULL;
  1521. int rc = 0;
  1522. if (!inst || !new_state) {
  1523. d_vpr_e("%s: invalid params\n", __func__);
  1524. return -EINVAL;
  1525. }
  1526. if (list_empty(&inst->response_works))
  1527. return 0;
  1528. i_vpr_h(inst, "%s: state %s, ipsc pending\n", __func__, state_name(inst->state));
  1529. list_for_each_entry_safe(resp_work, dummy, &inst->response_works, list) {
  1530. if (resp_work->type == RESP_WORK_INPUT_PSC) {
  1531. rc = handle_session_response_work(inst, resp_work);
  1532. if (rc) {
  1533. i_vpr_e(inst, "%s: handle ipsc failed\n", __func__);
  1534. *new_state = MSM_VIDC_ERROR;
  1535. } else {
  1536. if (inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG ||
  1537. inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  1538. *new_state = MSM_VIDC_DRC_DRAIN;
  1539. } else if (inst->state == MSM_VIDC_DRC_LAST_FLAG) {
  1540. *new_state = MSM_VIDC_DRC;
  1541. }
  1542. }
  1543. list_del(&resp_work->list);
  1544. kfree(resp_work->data);
  1545. kfree(resp_work);
  1546. /* list contains max only one ipsc at anytime */
  1547. break;
  1548. }
  1549. }
  1550. return rc;
  1551. }
  1552. int msm_vidc_state_change_streamon(struct msm_vidc_inst *inst, u32 type)
  1553. {
  1554. int rc = 0;
  1555. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  1556. if (!inst || !inst->core) {
  1557. d_vpr_e("%s: invalid params\n", __func__);
  1558. return -EINVAL;
  1559. }
  1560. if (type == INPUT_META_PLANE || type == OUTPUT_META_PLANE)
  1561. return 0;
  1562. if (type == INPUT_MPLANE) {
  1563. if (inst->state == MSM_VIDC_OPEN)
  1564. new_state = MSM_VIDC_START_INPUT;
  1565. else if (inst->state == MSM_VIDC_START_OUTPUT)
  1566. new_state = MSM_VIDC_START;
  1567. } else if (type == OUTPUT_MPLANE) {
  1568. if (inst->state == MSM_VIDC_OPEN) {
  1569. new_state = MSM_VIDC_START_OUTPUT;
  1570. } else if (inst->state == MSM_VIDC_START_INPUT) {
  1571. new_state = MSM_VIDC_START;
  1572. } else if (inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  1573. i_vpr_h(inst, "%s: streamon(output) in %s state\n",
  1574. __func__, state_name(inst->state));
  1575. new_state = MSM_VIDC_DRAIN;
  1576. rc = msm_vidc_process_pending_ipsc(inst, &new_state);
  1577. if (rc) {
  1578. i_vpr_e(inst, "%s: process pending ipsc failed\n", __func__);
  1579. goto state_change;
  1580. }
  1581. }
  1582. }
  1583. state_change:
  1584. msm_vidc_change_inst_state(inst, new_state, __func__);
  1585. return rc;
  1586. }
  1587. int msm_vidc_state_change_streamoff(struct msm_vidc_inst *inst, u32 type)
  1588. {
  1589. int rc = 0;
  1590. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  1591. if (!inst || !inst->core) {
  1592. d_vpr_e("%s: invalid params\n", __func__);
  1593. return -EINVAL;
  1594. }
  1595. if (type == INPUT_META_PLANE || type == OUTPUT_META_PLANE)
  1596. return 0;
  1597. if (type == INPUT_MPLANE) {
  1598. if (inst->state == MSM_VIDC_START_INPUT) {
  1599. new_state = MSM_VIDC_OPEN;
  1600. } else if (inst->state == MSM_VIDC_START) {
  1601. new_state = MSM_VIDC_START_OUTPUT;
  1602. } else if (inst->state == MSM_VIDC_DRC ||
  1603. inst->state == MSM_VIDC_DRC_LAST_FLAG ||
  1604. inst->state == MSM_VIDC_DRAIN ||
  1605. inst->state == MSM_VIDC_DRAIN_LAST_FLAG ||
  1606. inst->state == MSM_VIDC_DRC_DRAIN ||
  1607. inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG ||
  1608. inst->state == MSM_VIDC_DRAIN_START_INPUT) {
  1609. new_state = MSM_VIDC_START_OUTPUT;
  1610. }
  1611. } else if (type == OUTPUT_MPLANE) {
  1612. if (inst->state == MSM_VIDC_START_OUTPUT) {
  1613. new_state = MSM_VIDC_OPEN;
  1614. } else if (inst->state == MSM_VIDC_START ||
  1615. inst->state == MSM_VIDC_DRAIN ||
  1616. inst->state == MSM_VIDC_DRAIN_LAST_FLAG ||
  1617. inst->state == MSM_VIDC_DRC ||
  1618. inst->state == MSM_VIDC_DRC_LAST_FLAG ||
  1619. inst->state == MSM_VIDC_DRC_DRAIN) {
  1620. new_state = MSM_VIDC_START_INPUT;
  1621. } else if (inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG) {
  1622. new_state = MSM_VIDC_DRAIN_START_INPUT;
  1623. }
  1624. }
  1625. rc = msm_vidc_change_inst_state(inst, new_state, __func__);
  1626. if (rc)
  1627. goto exit;
  1628. exit:
  1629. return rc;
  1630. }
  1631. int msm_vidc_state_change_stop(struct msm_vidc_inst *inst)
  1632. {
  1633. int rc = 0;
  1634. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  1635. if (!inst || !inst->core) {
  1636. d_vpr_e("%s: invalid params\n", __func__);
  1637. return -EINVAL;
  1638. }
  1639. if (inst->state == MSM_VIDC_START) {
  1640. new_state = MSM_VIDC_DRAIN;
  1641. } else if (inst->state == MSM_VIDC_DRC) {
  1642. new_state = MSM_VIDC_DRC_DRAIN;
  1643. } else if (inst->state == MSM_VIDC_DRC_DRAIN ||
  1644. inst->state == MSM_VIDC_DRC_LAST_FLAG) {
  1645. new_state = MSM_VIDC_DRC_DRAIN_LAST_FLAG;
  1646. } else {
  1647. i_vpr_e(inst, "%s: wrong state %s\n",
  1648. __func__, state_name(inst->state));
  1649. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  1650. return -EINVAL;
  1651. }
  1652. rc = msm_vidc_change_inst_state(inst, new_state, __func__);
  1653. if (rc)
  1654. return rc;
  1655. return rc;
  1656. }
  1657. int msm_vidc_state_change_start(struct msm_vidc_inst *inst)
  1658. {
  1659. int rc = 0;
  1660. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  1661. if (!inst || !inst->core) {
  1662. d_vpr_e("%s: invalid params\n", __func__);
  1663. return -EINVAL;
  1664. }
  1665. if (inst->state == MSM_VIDC_DRAIN_LAST_FLAG ||
  1666. inst->state == MSM_VIDC_DRC_LAST_FLAG) {
  1667. new_state = MSM_VIDC_START;
  1668. rc = msm_vidc_process_pending_ipsc(inst, &new_state);
  1669. if (rc) {
  1670. i_vpr_e(inst, "%s: process pending ipsc failed\n", __func__);
  1671. goto state_change;
  1672. }
  1673. } else if (inst->state == MSM_VIDC_DRC_DRAIN_LAST_FLAG) {
  1674. new_state = MSM_VIDC_DRAIN;
  1675. rc = msm_vidc_process_pending_ipsc(inst, &new_state);
  1676. if (rc) {
  1677. i_vpr_e(inst, "%s: process pending ipsc failed\n", __func__);
  1678. goto state_change;
  1679. }
  1680. } else {
  1681. i_vpr_e(inst, "%s: wrong state %s\n", __func__, state_name(inst->state));
  1682. new_state = MSM_VIDC_ERROR;
  1683. rc = -EINVAL;
  1684. goto state_change;
  1685. }
  1686. state_change:
  1687. msm_vidc_change_inst_state(inst, new_state, __func__);
  1688. return rc;
  1689. }
  1690. int msm_vidc_state_change_input_psc(struct msm_vidc_inst *inst)
  1691. {
  1692. int rc = 0;
  1693. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  1694. if (!inst || !inst->core) {
  1695. d_vpr_e("%s: invalid params\n", __func__);
  1696. return -EINVAL;
  1697. }
  1698. /* don't change state as output port is not started yet */
  1699. if (inst->state == MSM_VIDC_START_INPUT)
  1700. return 0;
  1701. if (inst->state == MSM_VIDC_START) {
  1702. new_state = MSM_VIDC_DRC;
  1703. } else if (inst->state == MSM_VIDC_DRAIN) {
  1704. new_state = MSM_VIDC_DRC_DRAIN;
  1705. } else {
  1706. i_vpr_e(inst, "%s: wrong state %s\n",
  1707. __func__, state_name(inst->state));
  1708. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  1709. return -EINVAL;
  1710. }
  1711. rc = msm_vidc_change_inst_state(inst, new_state, __func__);
  1712. if (rc)
  1713. return rc;
  1714. return rc;
  1715. }
  1716. int msm_vidc_state_change_last_flag(struct msm_vidc_inst *inst)
  1717. {
  1718. int rc = 0;
  1719. enum msm_vidc_inst_state new_state = MSM_VIDC_ERROR;
  1720. if (!inst || !inst->core) {
  1721. d_vpr_e("%s: invalid params\n", __func__);
  1722. return -EINVAL;
  1723. }
  1724. if (inst->state == MSM_VIDC_DRC) {
  1725. new_state = MSM_VIDC_DRC_LAST_FLAG;
  1726. } else if (inst->state == MSM_VIDC_DRAIN) {
  1727. new_state = MSM_VIDC_DRAIN_LAST_FLAG;
  1728. } else if (inst->state == MSM_VIDC_DRC_DRAIN) {
  1729. new_state = MSM_VIDC_DRC_DRAIN_LAST_FLAG;
  1730. } else {
  1731. i_vpr_e(inst, "%s: wrong state %s\n",
  1732. __func__, state_name(inst->state));
  1733. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  1734. return -EINVAL;
  1735. }
  1736. rc = msm_vidc_change_inst_state(inst, new_state, __func__);
  1737. if (rc)
  1738. return rc;
  1739. return rc;
  1740. }
  1741. int msm_vidc_get_control(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
  1742. {
  1743. int rc = 0;
  1744. if (!inst || !ctrl) {
  1745. d_vpr_e("%s: invalid params\n", __func__);
  1746. return -EINVAL;
  1747. }
  1748. switch (ctrl->id) {
  1749. case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
  1750. ctrl->val = inst->buffers.output.min_count +
  1751. inst->buffers.output.extra_count;
  1752. i_vpr_h(inst, "g_min: output buffers %d\n", ctrl->val);
  1753. break;
  1754. case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
  1755. ctrl->val = inst->buffers.input.min_count +
  1756. inst->buffers.input.extra_count;
  1757. i_vpr_h(inst, "g_min: input buffers %d\n", ctrl->val);
  1758. break;
  1759. default:
  1760. break;
  1761. }
  1762. return rc;
  1763. }
  1764. int msm_vidc_get_mbs_per_frame(struct msm_vidc_inst *inst)
  1765. {
  1766. int height = 0, width = 0;
  1767. struct v4l2_format *inp_f;
  1768. if (is_decode_session(inst)) {
  1769. inp_f = &inst->fmts[INPUT_PORT];
  1770. width = max(inp_f->fmt.pix_mp.width, inst->crop.width);
  1771. height = max(inp_f->fmt.pix_mp.height, inst->crop.height);
  1772. } else if (is_encode_session(inst)) {
  1773. width = inst->crop.width;
  1774. height = inst->crop.height;
  1775. }
  1776. return NUM_MBS_PER_FRAME(height, width);
  1777. }
  1778. int msm_vidc_get_fps(struct msm_vidc_inst *inst)
  1779. {
  1780. int fps;
  1781. u32 frame_rate, operating_rate;
  1782. if (!inst || !inst->capabilities) {
  1783. d_vpr_e("%s: invalid params\n", __func__);
  1784. return -EINVAL;
  1785. }
  1786. frame_rate = inst->capabilities->cap[FRAME_RATE].value;
  1787. operating_rate = inst->capabilities->cap[OPERATING_RATE].value;
  1788. if (operating_rate > frame_rate)
  1789. fps = (operating_rate >> 16) ?
  1790. (operating_rate >> 16) : 1;
  1791. else
  1792. fps = frame_rate >> 16;
  1793. return fps;
  1794. }
  1795. int msm_vidc_num_buffers(struct msm_vidc_inst *inst,
  1796. enum msm_vidc_buffer_type type, enum msm_vidc_buffer_attributes attr)
  1797. {
  1798. int count = 0;
  1799. struct msm_vidc_buffer *vbuf;
  1800. struct msm_vidc_buffers *buffers;
  1801. if (!inst) {
  1802. d_vpr_e("%s: invalid params\n", __func__);
  1803. return count;
  1804. }
  1805. if (type == MSM_VIDC_BUF_OUTPUT) {
  1806. buffers = &inst->buffers.output;
  1807. } else if (type == MSM_VIDC_BUF_INPUT) {
  1808. buffers = &inst->buffers.input;
  1809. } else {
  1810. i_vpr_e(inst, "%s: invalid buffer type %#x\n",
  1811. __func__, type);
  1812. return count;
  1813. }
  1814. list_for_each_entry(vbuf, &buffers->list, list) {
  1815. if (vbuf->type != type)
  1816. continue;
  1817. if (!(vbuf->attr & attr))
  1818. continue;
  1819. count++;
  1820. }
  1821. return count;
  1822. }
  1823. static int vb2_buffer_to_driver(struct vb2_buffer *vb2,
  1824. struct msm_vidc_buffer *buf)
  1825. {
  1826. int rc = 0;
  1827. if (!vb2 || !buf) {
  1828. d_vpr_e("%s: invalid params\n", __func__);
  1829. return -EINVAL;
  1830. }
  1831. buf->type = v4l2_type_to_driver(vb2->type, __func__);
  1832. if (!buf->type)
  1833. return -EINVAL;
  1834. buf->index = vb2->index;
  1835. buf->fd = vb2->planes[0].m.fd;
  1836. buf->data_offset = vb2->planes[0].data_offset;
  1837. buf->data_size = vb2->planes[0].bytesused - vb2->planes[0].data_offset;
  1838. buf->buffer_size = vb2->planes[0].length;
  1839. buf->timestamp = vb2->timestamp;
  1840. return rc;
  1841. }
  1842. int msm_vidc_process_readonly_buffers(struct msm_vidc_inst *inst,
  1843. struct msm_vidc_buffer *buf)
  1844. {
  1845. int rc = 0;
  1846. struct msm_vidc_buffer *ro_buf, *dummy;
  1847. struct msm_vidc_buffers *ro_buffers;
  1848. if (!inst || !buf) {
  1849. d_vpr_e("%s: invalid params\n", __func__);
  1850. return -EINVAL;
  1851. }
  1852. if (!is_decode_session(inst) || !is_output_buffer(buf->type))
  1853. return 0;
  1854. ro_buffers = msm_vidc_get_buffers(inst, MSM_VIDC_BUF_READ_ONLY, __func__);
  1855. if (!ro_buffers)
  1856. return -EINVAL;
  1857. /*
  1858. * check if buffer present in ro_buffers list
  1859. * if present: add ro flag to buf and remove from ro_buffers list
  1860. * if not present: do nothing
  1861. */
  1862. list_for_each_entry_safe(ro_buf, dummy, &ro_buffers->list, list) {
  1863. if (ro_buf->device_addr == buf->device_addr) {
  1864. buf->attr |= MSM_VIDC_ATTR_READ_ONLY;
  1865. print_vidc_buffer(VIDC_LOW, "low ", "ro buf removed", inst, ro_buf);
  1866. list_del(&ro_buf->list);
  1867. msm_memory_free(inst, ro_buf);
  1868. break;
  1869. }
  1870. }
  1871. return rc;
  1872. }
  1873. int msm_vidc_memory_unmap_completely(struct msm_vidc_inst *inst,
  1874. struct msm_vidc_map *map)
  1875. {
  1876. int rc = 0;
  1877. if (!inst || !map) {
  1878. d_vpr_e("%s: invalid params\n", __func__);
  1879. return -EINVAL;
  1880. }
  1881. if (!map->refcount)
  1882. return 0;
  1883. while (map->refcount) {
  1884. rc = msm_vidc_memory_unmap(inst->core, map);
  1885. if (rc)
  1886. break;
  1887. if (!map->refcount) {
  1888. msm_vidc_memory_put_dmabuf(inst, map->dmabuf);
  1889. list_del(&map->list);
  1890. msm_memory_free(inst, map);
  1891. break;
  1892. }
  1893. }
  1894. return rc;
  1895. }
  1896. int msm_vidc_set_auto_framerate(struct msm_vidc_inst *inst, u64 timestamp)
  1897. {
  1898. struct msm_vidc_core *core;
  1899. struct msm_vidc_timestamp *ts;
  1900. struct msm_vidc_timestamp *prev = NULL;
  1901. u32 counter = 0, prev_fr = 0, curr_fr = 0;
  1902. u64 time_us = 0;
  1903. int rc = 0;
  1904. if (!inst || !inst->core || !inst->capabilities) {
  1905. d_vpr_e("%s: invalid params\n", __func__);
  1906. return -EINVAL;
  1907. }
  1908. core = inst->core;
  1909. if (!core->capabilities[ENC_AUTO_FRAMERATE].value ||
  1910. is_image_session(inst) || msm_vidc_is_super_buffer(inst))
  1911. goto exit;
  1912. rc = msm_vidc_update_timestamp(inst, timestamp);
  1913. if (rc)
  1914. goto exit;
  1915. list_for_each_entry(ts, &inst->timestamps.list, sort.list) {
  1916. if (prev) {
  1917. time_us = ts->sort.val - prev->sort.val;
  1918. prev_fr = curr_fr;
  1919. curr_fr = time_us ? DIV64_U64_ROUND_CLOSEST(USEC_PER_SEC, time_us) << 16 :
  1920. inst->auto_framerate;
  1921. if (curr_fr > inst->capabilities->cap[FRAME_RATE].max)
  1922. curr_fr = inst->capabilities->cap[FRAME_RATE].max;
  1923. }
  1924. prev = ts;
  1925. counter++;
  1926. }
  1927. if (counter < ENC_FPS_WINDOW)
  1928. goto exit;
  1929. /* if framerate changed and stable for 2 frames, set to firmware */
  1930. if (curr_fr == prev_fr && curr_fr != inst->auto_framerate) {
  1931. i_vpr_l(inst, "%s: updated fps: %u -> %u\n", __func__,
  1932. inst->auto_framerate >> 16, curr_fr >> 16);
  1933. rc = venus_hfi_session_property(inst,
  1934. HFI_PROP_FRAME_RATE,
  1935. HFI_HOST_FLAGS_NONE,
  1936. HFI_PORT_BITSTREAM,
  1937. HFI_PAYLOAD_Q16,
  1938. &curr_fr,
  1939. sizeof(u32));
  1940. if (rc) {
  1941. i_vpr_e(inst, "%s: set auto frame rate failed\n",
  1942. __func__);
  1943. goto exit;
  1944. }
  1945. inst->auto_framerate = curr_fr;
  1946. }
  1947. exit:
  1948. return rc;
  1949. }
  1950. int msm_vidc_calc_window_avg_framerate(struct msm_vidc_inst *inst)
  1951. {
  1952. struct msm_vidc_timestamp *ts;
  1953. struct msm_vidc_timestamp *prev = NULL;
  1954. u32 counter = 0;
  1955. u64 ts_ms = 0;
  1956. if (!inst) {
  1957. d_vpr_e("%s: invalid params\n", __func__);
  1958. return -EINVAL;
  1959. }
  1960. list_for_each_entry(ts, &inst->timestamps.list, sort.list) {
  1961. if (prev) {
  1962. if (ts->sort.val == prev->sort.val)
  1963. continue;
  1964. ts_ms += div_u64(ts->sort.val - prev->sort.val, 1000000);
  1965. counter++;
  1966. }
  1967. prev = ts;
  1968. }
  1969. return ts_ms ? (1000 * counter) / ts_ms : 0;
  1970. }
  1971. static int msm_vidc_insert_sort(struct list_head *head,
  1972. struct msm_vidc_sort *entry)
  1973. {
  1974. struct msm_vidc_sort *first, *node;
  1975. struct msm_vidc_sort *prev = NULL;
  1976. bool is_inserted = false;
  1977. if (!head || !entry) {
  1978. d_vpr_e("%s: invalid params\n", __func__);
  1979. return -EINVAL;
  1980. }
  1981. if (list_empty(head)) {
  1982. list_add(&entry->list, head);
  1983. return 0;
  1984. }
  1985. first = list_first_entry(head, struct msm_vidc_sort, list);
  1986. if (entry->val < first->val) {
  1987. list_add(&entry->list, head);
  1988. return 0;
  1989. }
  1990. list_for_each_entry(node, head, list) {
  1991. if (prev &&
  1992. entry->val >= prev->val && entry->val <= node->val) {
  1993. list_add(&entry->list, &prev->list);
  1994. is_inserted = true;
  1995. break;
  1996. }
  1997. prev = node;
  1998. }
  1999. if (!is_inserted && prev)
  2000. list_add(&entry->list, &prev->list);
  2001. return 0;
  2002. }
  2003. static struct msm_vidc_timestamp *msm_vidc_get_least_rank_ts(struct msm_vidc_inst *inst)
  2004. {
  2005. struct msm_vidc_timestamp *ts, *final = NULL;
  2006. u64 least_rank = INT_MAX;
  2007. if (!inst) {
  2008. d_vpr_e("%s: Invalid params\n", __func__);
  2009. return NULL;
  2010. }
  2011. list_for_each_entry(ts, &inst->timestamps.list, sort.list) {
  2012. if (ts->rank < least_rank) {
  2013. least_rank = ts->rank;
  2014. final = ts;
  2015. }
  2016. }
  2017. return final;
  2018. }
  2019. int msm_vidc_flush_ts(struct msm_vidc_inst *inst)
  2020. {
  2021. struct msm_vidc_timestamp *temp, *ts = NULL;
  2022. if (!inst) {
  2023. d_vpr_e("%s: Invalid params\n", __func__);
  2024. return -EINVAL;
  2025. }
  2026. list_for_each_entry_safe(ts, temp, &inst->timestamps.list, sort.list) {
  2027. i_vpr_l(inst, "%s: flushing ts: val %lld, rank %%lld\n",
  2028. __func__, ts->sort.val, ts->rank);
  2029. list_del(&ts->sort.list);
  2030. msm_memory_free(inst, ts);
  2031. }
  2032. inst->timestamps.count = 0;
  2033. inst->timestamps.rank = 0;
  2034. return 0;
  2035. }
  2036. int msm_vidc_update_timestamp(struct msm_vidc_inst *inst, u64 timestamp)
  2037. {
  2038. struct msm_vidc_timestamp *ts;
  2039. int rc = 0;
  2040. u32 window_size = 0;
  2041. if (!inst) {
  2042. d_vpr_e("%s: Invalid params\n", __func__);
  2043. return -EINVAL;
  2044. }
  2045. ts = msm_memory_alloc(inst, MSM_MEM_POOL_TIMESTAMP);
  2046. if (!ts) {
  2047. i_vpr_e(inst, "%s: ts alloc failed\n", __func__);
  2048. return -ENOMEM;
  2049. }
  2050. INIT_LIST_HEAD(&ts->sort.list);
  2051. ts->sort.val = timestamp;
  2052. ts->rank = inst->timestamps.rank++;
  2053. rc = msm_vidc_insert_sort(&inst->timestamps.list, &ts->sort);
  2054. if (rc)
  2055. return rc;
  2056. inst->timestamps.count++;
  2057. if (is_encode_session(inst))
  2058. window_size = ENC_FPS_WINDOW;
  2059. else
  2060. window_size = DEC_FPS_WINDOW;
  2061. /* keep sliding window */
  2062. if (inst->timestamps.count > window_size) {
  2063. ts = msm_vidc_get_least_rank_ts(inst);
  2064. if (!ts) {
  2065. i_vpr_e(inst, "%s: least rank ts is NULL\n", __func__);
  2066. return -EINVAL;
  2067. }
  2068. inst->timestamps.count--;
  2069. list_del(&ts->sort.list);
  2070. msm_memory_free(inst, ts);
  2071. }
  2072. return 0;
  2073. }
  2074. int msm_vidc_get_delayed_unmap(struct msm_vidc_inst *inst, struct msm_vidc_map *map)
  2075. {
  2076. int rc = 0;
  2077. if (!inst || !map) {
  2078. d_vpr_e("%s: invalid params\n", __func__);
  2079. return -EINVAL;
  2080. }
  2081. map->skip_delayed_unmap = 1;
  2082. rc = msm_vidc_memory_map(inst->core, map);
  2083. if (rc)
  2084. return rc;
  2085. return 0;
  2086. }
  2087. int msm_vidc_put_delayed_unmap(struct msm_vidc_inst *inst, struct msm_vidc_map *map)
  2088. {
  2089. int rc = 0;
  2090. if (!inst || !map) {
  2091. d_vpr_e("%s: invalid params\n", __func__);
  2092. return -EINVAL;
  2093. }
  2094. if (!map->skip_delayed_unmap) {
  2095. i_vpr_e(inst, "%s: no delayed unmap, addr %#x\n",
  2096. __func__, map->device_addr);
  2097. return -EINVAL;
  2098. }
  2099. map->skip_delayed_unmap = 0;
  2100. rc = msm_vidc_memory_unmap(inst->core, map);
  2101. if (rc)
  2102. i_vpr_e(inst, "%s: unmap failed\n", __func__);
  2103. if (!map->refcount) {
  2104. msm_vidc_memory_put_dmabuf(inst, map->dmabuf);
  2105. list_del(&map->list);
  2106. msm_memory_free(inst, map);
  2107. }
  2108. return rc;
  2109. }
  2110. int msm_vidc_unmap_buffers(struct msm_vidc_inst *inst,
  2111. enum msm_vidc_buffer_type type)
  2112. {
  2113. int rc = 0;
  2114. struct msm_vidc_mappings *mappings;
  2115. struct msm_vidc_map *map, *dummy;
  2116. if (!inst) {
  2117. d_vpr_e("%s: invalid params\n", __func__);
  2118. return -EINVAL;
  2119. }
  2120. mappings = msm_vidc_get_mappings(inst, type, __func__);
  2121. if (!mappings)
  2122. return -EINVAL;
  2123. list_for_each_entry_safe(map, dummy, &mappings->list, list) {
  2124. msm_vidc_memory_unmap_completely(inst, map);
  2125. }
  2126. return rc;
  2127. }
  2128. int msm_vidc_unmap_driver_buf(struct msm_vidc_inst *inst,
  2129. struct msm_vidc_buffer *buf)
  2130. {
  2131. int rc = 0;
  2132. struct msm_vidc_mappings *mappings;
  2133. struct msm_vidc_map *map = NULL;
  2134. bool found = false;
  2135. if (!inst || !buf) {
  2136. d_vpr_e("%s: invalid params\n", __func__);
  2137. return -EINVAL;
  2138. }
  2139. mappings = msm_vidc_get_mappings(inst, buf->type, __func__);
  2140. if (!mappings)
  2141. return -EINVAL;
  2142. /* sanity check to see if it was not removed */
  2143. list_for_each_entry(map, &mappings->list, list) {
  2144. if (map->dmabuf == buf->dmabuf) {
  2145. found = true;
  2146. break;
  2147. }
  2148. }
  2149. if (!found) {
  2150. print_vidc_buffer(VIDC_ERR, "err ", "no buf in mappings", inst, buf);
  2151. return -EINVAL;
  2152. }
  2153. rc = msm_vidc_memory_unmap(inst->core, map);
  2154. if (rc) {
  2155. print_vidc_buffer(VIDC_ERR, "err ", "unmap failed", inst, buf);
  2156. return -EINVAL;
  2157. }
  2158. /* finally delete if refcount is zero */
  2159. if (!map->refcount) {
  2160. msm_vidc_memory_put_dmabuf(inst, map->dmabuf);
  2161. list_del(&map->list);
  2162. msm_memory_free(inst, map);
  2163. }
  2164. return rc;
  2165. }
  2166. int msm_vidc_map_driver_buf(struct msm_vidc_inst *inst,
  2167. struct msm_vidc_buffer *buf)
  2168. {
  2169. int rc = 0;
  2170. struct msm_vidc_mappings *mappings;
  2171. struct msm_vidc_map *map;
  2172. bool found = false;
  2173. if (!inst || !buf) {
  2174. d_vpr_e("%s: invalid params\n", __func__);
  2175. return -EINVAL;
  2176. }
  2177. mappings = msm_vidc_get_mappings(inst, buf->type, __func__);
  2178. if (!mappings)
  2179. return -EINVAL;
  2180. /*
  2181. * new buffer: map twice for delayed unmap feature sake
  2182. * existing buffer: map once
  2183. */
  2184. list_for_each_entry(map, &mappings->list, list) {
  2185. if (map->dmabuf == buf->dmabuf) {
  2186. found = true;
  2187. break;
  2188. }
  2189. }
  2190. if (!found) {
  2191. /* new buffer case */
  2192. map = msm_memory_alloc(inst, MSM_MEM_POOL_MAP);
  2193. if (!map) {
  2194. i_vpr_e(inst, "%s: alloc failed\n", __func__);
  2195. return -ENOMEM;
  2196. }
  2197. INIT_LIST_HEAD(&map->list);
  2198. map->type = buf->type;
  2199. map->dmabuf = msm_vidc_memory_get_dmabuf(inst, buf->fd);
  2200. if (!map->dmabuf)
  2201. return -EINVAL;
  2202. map->region = msm_vidc_get_buffer_region(inst, buf->type, __func__);
  2203. /* delayed unmap feature needed for decoder output buffers */
  2204. if (is_decode_session(inst) && is_output_buffer(buf->type)) {
  2205. rc = msm_vidc_get_delayed_unmap(inst, map);
  2206. if (rc) {
  2207. msm_vidc_memory_put_dmabuf(inst, map->dmabuf);
  2208. msm_memory_free(inst, map);
  2209. return rc;
  2210. }
  2211. }
  2212. list_add_tail(&map->list, &mappings->list);
  2213. }
  2214. rc = msm_vidc_memory_map(inst->core, map);
  2215. if (rc)
  2216. return rc;
  2217. buf->device_addr = map->device_addr;
  2218. return 0;
  2219. }
  2220. int msm_vidc_put_driver_buf(struct msm_vidc_inst *inst,
  2221. struct msm_vidc_buffer *buf)
  2222. {
  2223. int rc = 0;
  2224. if (!inst || !buf) {
  2225. d_vpr_e("%s: invalid params\n", __func__);
  2226. return -EINVAL;
  2227. }
  2228. msm_vidc_unmap_driver_buf(inst, buf);
  2229. msm_vidc_memory_put_dmabuf(inst, buf->dmabuf);
  2230. /* delete the buffer from buffers->list */
  2231. list_del(&buf->list);
  2232. msm_memory_free(inst, buf);
  2233. return rc;
  2234. }
  2235. struct msm_vidc_buffer *msm_vidc_get_driver_buf(struct msm_vidc_inst *inst,
  2236. struct vb2_buffer *vb2)
  2237. {
  2238. int rc = 0;
  2239. struct msm_vidc_buffer *buf = NULL;
  2240. struct msm_vidc_buffers *buffers;
  2241. enum msm_vidc_buffer_type buf_type;
  2242. if (!inst || !vb2) {
  2243. d_vpr_e("%s: invalid params\n", __func__);
  2244. return NULL;
  2245. }
  2246. buf_type = v4l2_type_to_driver(vb2->type, __func__);
  2247. if (!buf_type)
  2248. return NULL;
  2249. buffers = msm_vidc_get_buffers(inst, buf_type, __func__);
  2250. if (!buffers)
  2251. return NULL;
  2252. buf = msm_memory_alloc(inst, MSM_MEM_POOL_BUFFER);
  2253. if (!buf) {
  2254. i_vpr_e(inst, "%s: alloc failed\n", __func__);
  2255. return NULL;
  2256. }
  2257. INIT_LIST_HEAD(&buf->list);
  2258. list_add_tail(&buf->list, &buffers->list);
  2259. rc = vb2_buffer_to_driver(vb2, buf);
  2260. if (rc)
  2261. goto error;
  2262. buf->dmabuf = msm_vidc_memory_get_dmabuf(inst, buf->fd);
  2263. if (!buf->dmabuf)
  2264. goto error;
  2265. /* treat every buffer as deferred buffer initially */
  2266. buf->attr |= MSM_VIDC_ATTR_DEFERRED;
  2267. rc = msm_vidc_map_driver_buf(inst, buf);
  2268. if (rc)
  2269. goto error;
  2270. return buf;
  2271. error:
  2272. msm_vidc_memory_put_dmabuf(inst, buf->dmabuf);
  2273. list_del(&buf->list);
  2274. msm_memory_free(inst, buf);
  2275. return NULL;
  2276. }
  2277. struct msm_vidc_buffer *get_meta_buffer(struct msm_vidc_inst *inst,
  2278. struct msm_vidc_buffer *buf)
  2279. {
  2280. struct msm_vidc_buffer *mbuf;
  2281. struct msm_vidc_buffers *buffers;
  2282. bool found = false;
  2283. if (!inst || !buf) {
  2284. d_vpr_e("%s: invalid params\n", __func__);
  2285. return NULL;
  2286. }
  2287. if (buf->type == MSM_VIDC_BUF_INPUT) {
  2288. buffers = &inst->buffers.input_meta;
  2289. } else if (buf->type == MSM_VIDC_BUF_OUTPUT) {
  2290. buffers = &inst->buffers.output_meta;
  2291. } else {
  2292. i_vpr_e(inst, "%s: invalid buffer type %d\n",
  2293. __func__, buf->type);
  2294. return NULL;
  2295. }
  2296. list_for_each_entry(mbuf, &buffers->list, list) {
  2297. if (mbuf->index == buf->index) {
  2298. found = true;
  2299. break;
  2300. }
  2301. }
  2302. if (!found)
  2303. return NULL;
  2304. return mbuf;
  2305. }
  2306. bool msm_vidc_is_super_buffer(struct msm_vidc_inst *inst)
  2307. {
  2308. struct msm_vidc_inst_capability *capability = NULL;
  2309. if (!inst || !inst->capabilities) {
  2310. d_vpr_e("%s: Invalid params\n", __func__);
  2311. return false;
  2312. }
  2313. capability = inst->capabilities;
  2314. return !!capability->cap[SUPER_FRAME].value;
  2315. }
  2316. static bool is_single_session(struct msm_vidc_inst *inst)
  2317. {
  2318. struct msm_vidc_core *core;
  2319. u32 count = 0;
  2320. if (!inst) {
  2321. d_vpr_e("%s: Invalid params\n", __func__);
  2322. return false;
  2323. }
  2324. core = inst->core;
  2325. core_lock(core, __func__);
  2326. list_for_each_entry(inst, &core->instances, list)
  2327. count++;
  2328. core_unlock(core, __func__);
  2329. return count == 1;
  2330. }
  2331. void msm_vidc_allow_dcvs(struct msm_vidc_inst *inst)
  2332. {
  2333. bool allow = false;
  2334. struct msm_vidc_core *core;
  2335. u32 fps;
  2336. if (!inst || !inst->core || !inst->capabilities) {
  2337. d_vpr_e("%s: Invalid args: %pK\n", __func__, inst);
  2338. return;
  2339. }
  2340. core = inst->core;
  2341. allow = !msm_vidc_clock_voting;
  2342. if (!allow) {
  2343. i_vpr_h(inst, "%s: core_clock_voting is set\n", __func__);
  2344. goto exit;
  2345. }
  2346. allow = core->capabilities[DCVS].value;
  2347. if (!allow) {
  2348. i_vpr_h(inst, "%s: core doesn't support dcvs\n", __func__);
  2349. goto exit;
  2350. }
  2351. allow = !inst->decode_batch.enable;
  2352. if (!allow) {
  2353. i_vpr_h(inst, "%s: decode_batching enabled\n", __func__);
  2354. goto exit;
  2355. }
  2356. allow = !msm_vidc_is_super_buffer(inst);
  2357. if (!allow) {
  2358. i_vpr_h(inst, "%s: encode_batching(super_buffer) enabled\n", __func__);
  2359. goto exit;
  2360. }
  2361. allow = !is_thumbnail_session(inst);
  2362. if (!allow) {
  2363. i_vpr_h(inst, "%s: thumbnail session\n", __func__);
  2364. goto exit;
  2365. }
  2366. allow = is_realtime_session(inst);
  2367. if (!allow) {
  2368. i_vpr_h(inst, "%s: non-realtime session\n", __func__);
  2369. goto exit;
  2370. }
  2371. allow = !is_image_session(inst);
  2372. if (!allow) {
  2373. i_vpr_h(inst, "%s: image session\n", __func__);
  2374. goto exit;
  2375. }
  2376. allow = !is_lowlatency_session(inst);
  2377. if (!allow) {
  2378. i_vpr_h(inst, "%s: lowlatency session\n", __func__);
  2379. goto exit;
  2380. }
  2381. fps = msm_vidc_get_fps(inst);
  2382. if (is_decode_session(inst) &&
  2383. fps >= inst->capabilities->cap[FRAME_RATE].max) {
  2384. allow = false;
  2385. i_vpr_h(inst, "%s: unsupported fps %d\n", __func__, fps);
  2386. goto exit;
  2387. }
  2388. exit:
  2389. i_vpr_hp(inst, "%s: dcvs: %s\n", __func__, allow ? "enabled" : "disabled");
  2390. inst->power.dcvs_flags = 0;
  2391. inst->power.dcvs_mode = allow;
  2392. }
  2393. bool msm_vidc_allow_decode_batch(struct msm_vidc_inst *inst)
  2394. {
  2395. struct msm_vidc_inst_capability *capability;
  2396. struct msm_vidc_core *core;
  2397. bool allow = false;
  2398. u32 value = 0;
  2399. if (!inst || !inst->core || !inst->capabilities) {
  2400. d_vpr_e("%s: invalid params\n", __func__);
  2401. return false;
  2402. }
  2403. core = inst->core;
  2404. capability = inst->capabilities;
  2405. allow = inst->decode_batch.enable;
  2406. if (!allow) {
  2407. i_vpr_h(inst, "%s: batching already disabled\n", __func__);
  2408. goto exit;
  2409. }
  2410. allow = core->capabilities[DECODE_BATCH].value;
  2411. if (!allow) {
  2412. i_vpr_h(inst, "%s: core doesn't support batching\n", __func__);
  2413. goto exit;
  2414. }
  2415. allow = is_single_session(inst);
  2416. if (!allow) {
  2417. i_vpr_h(inst, "%s: multiple sessions running\n", __func__);
  2418. goto exit;
  2419. }
  2420. allow = is_decode_session(inst);
  2421. if (!allow) {
  2422. i_vpr_h(inst, "%s: not a decoder session\n", __func__);
  2423. goto exit;
  2424. }
  2425. allow = !is_thumbnail_session(inst);
  2426. if (!allow) {
  2427. i_vpr_h(inst, "%s: thumbnail session\n", __func__);
  2428. goto exit;
  2429. }
  2430. allow = !is_image_session(inst);
  2431. if (!allow) {
  2432. i_vpr_h(inst, "%s: image session\n", __func__);
  2433. goto exit;
  2434. }
  2435. allow = is_realtime_session(inst);
  2436. if (!allow) {
  2437. i_vpr_h(inst, "%s: non-realtime session\n", __func__);
  2438. goto exit;
  2439. }
  2440. allow = !is_lowlatency_session(inst);
  2441. if (!allow) {
  2442. i_vpr_h(inst, "%s: lowlatency session\n", __func__);
  2443. goto exit;
  2444. }
  2445. value = msm_vidc_get_fps(inst);
  2446. allow = value < capability->cap[BATCH_FPS].value;
  2447. if (!allow) {
  2448. i_vpr_h(inst, "%s: unsupported fps %u, max %u\n", __func__,
  2449. value, capability->cap[BATCH_FPS].value);
  2450. goto exit;
  2451. }
  2452. value = msm_vidc_get_mbs_per_frame(inst);
  2453. allow = value < capability->cap[BATCH_MBPF].value;
  2454. if (!allow) {
  2455. i_vpr_h(inst, "%s: unsupported mbpf %u, max %u\n", __func__,
  2456. value, capability->cap[BATCH_MBPF].value);
  2457. goto exit;
  2458. }
  2459. exit:
  2460. i_vpr_hp(inst, "%s: batching: %s\n", __func__, allow ? "enabled" : "disabled");
  2461. return allow;
  2462. }
  2463. static void msm_vidc_update_input_cr(struct msm_vidc_inst *inst, u32 idx, u32 cr)
  2464. {
  2465. struct msm_vidc_input_cr_data *temp, *next;
  2466. bool found = false;
  2467. list_for_each_entry_safe(temp, next, &inst->enc_input_crs, list) {
  2468. if (temp->index == idx) {
  2469. temp->input_cr = cr;
  2470. found = true;
  2471. break;
  2472. }
  2473. }
  2474. if (!found) {
  2475. temp = kzalloc(sizeof(*temp), GFP_KERNEL);
  2476. if (!temp) {
  2477. i_vpr_e(inst, "%s: malloc failure.\n", __func__);
  2478. return;
  2479. }
  2480. temp->index = idx;
  2481. temp->input_cr = cr;
  2482. list_add_tail(&temp->list, &inst->enc_input_crs);
  2483. }
  2484. }
  2485. static void msm_vidc_free_input_cr_list(struct msm_vidc_inst *inst)
  2486. {
  2487. struct msm_vidc_input_cr_data *temp, *next;
  2488. list_for_each_entry_safe(temp, next, &inst->enc_input_crs, list) {
  2489. list_del(&temp->list);
  2490. kfree(temp);
  2491. }
  2492. INIT_LIST_HEAD(&inst->enc_input_crs);
  2493. }
  2494. void msm_vidc_free_capabililty_list(struct msm_vidc_inst *inst,
  2495. enum msm_vidc_ctrl_list_type list_type)
  2496. {
  2497. struct msm_vidc_inst_cap_entry *temp = NULL, *next = NULL;
  2498. if (list_type & CHILD_LIST) {
  2499. list_for_each_entry_safe(temp, next, &inst->children.list, list) {
  2500. list_del(&temp->list);
  2501. kfree(temp);
  2502. }
  2503. INIT_LIST_HEAD(&inst->children.list);
  2504. }
  2505. temp = NULL;
  2506. next = NULL;
  2507. if (list_type & FW_LIST) {
  2508. list_for_each_entry_safe(temp, next, &inst->firmware.list, list) {
  2509. list_del(&temp->list);
  2510. kfree(temp);
  2511. }
  2512. INIT_LIST_HEAD(&inst->firmware.list);
  2513. }
  2514. }
  2515. void msm_vidc_update_stats(struct msm_vidc_inst *inst,
  2516. struct msm_vidc_buffer *buf, enum msm_vidc_debugfs_event etype)
  2517. {
  2518. if (!inst || !buf || !inst->capabilities) {
  2519. d_vpr_e("%s: invalid params\n", __func__);
  2520. return;
  2521. }
  2522. if ((is_decode_session(inst) && etype == MSM_VIDC_DEBUGFS_EVENT_ETB) ||
  2523. (is_encode_session(inst) && etype == MSM_VIDC_DEBUGFS_EVENT_FBD))
  2524. inst->stats.data_size += buf->data_size;
  2525. msm_vidc_debugfs_update(inst, etype);
  2526. }
  2527. void msm_vidc_print_stats(struct msm_vidc_inst *inst)
  2528. {
  2529. u32 frame_rate, operating_rate, achieved_fps, priority, etb, ebd, ftb, fbd, dt_ms;
  2530. u64 bitrate_kbps = 0, time_ms = ktime_get_ns() / 1000 / 1000;
  2531. if (!inst || !inst->capabilities) {
  2532. d_vpr_e("%s: invalid params\n", __func__);
  2533. return;
  2534. }
  2535. etb = inst->debug_count.etb - inst->stats.count.etb;
  2536. ebd = inst->debug_count.ebd - inst->stats.count.ebd;
  2537. ftb = inst->debug_count.ftb - inst->stats.count.ftb;
  2538. fbd = inst->debug_count.fbd - inst->stats.count.fbd;
  2539. frame_rate = inst->capabilities->cap[FRAME_RATE].value >> 16;
  2540. operating_rate = inst->capabilities->cap[OPERATING_RATE].value >> 16;
  2541. priority = inst->capabilities->cap[PRIORITY].value;
  2542. dt_ms = time_ms - inst->stats.time_ms;
  2543. achieved_fps = (fbd * 1000) / dt_ms;
  2544. bitrate_kbps = (inst->stats.data_size * 8 * 1000) / (dt_ms * 1024);
  2545. i_vpr_hp(inst,
  2546. "stats: counts (etb,ebd,ftb,fbd): %u %u %u %u (total %llu %llu %llu %llu), achieved bitrate %lldKbps fps %u/s, frame rate %u, operating rate %u, priority %u, dt %ums\n",
  2547. etb, ebd, ftb, fbd, inst->debug_count.etb, inst->debug_count.ebd,
  2548. inst->debug_count.ftb, inst->debug_count.fbd,
  2549. bitrate_kbps, achieved_fps, frame_rate, operating_rate, priority, dt_ms);
  2550. inst->stats.count = inst->debug_count;
  2551. inst->stats.data_size = 0;
  2552. inst->stats.time_ms = time_ms;
  2553. }
  2554. int schedule_stats_work(struct msm_vidc_inst *inst)
  2555. {
  2556. struct msm_vidc_core *core;
  2557. if (!inst || !inst->core) {
  2558. d_vpr_e("%s: invalid params\n", __func__);
  2559. return -EINVAL;
  2560. }
  2561. /**
  2562. * Hfi session is already closed and inst also going to be
  2563. * closed soon. So skip scheduling new stats_work to avoid
  2564. * use-after-free issues with close sequence.
  2565. */
  2566. if (!inst->packet) {
  2567. i_vpr_e(inst, "skip scheduling stats_work\n");
  2568. return 0;
  2569. }
  2570. core = inst->core;
  2571. mod_delayed_work(inst->response_workq, &inst->stats_work,
  2572. msecs_to_jiffies(core->capabilities[STATS_TIMEOUT_MS].value));
  2573. return 0;
  2574. }
  2575. int cancel_stats_work_sync(struct msm_vidc_inst *inst)
  2576. {
  2577. if (!inst) {
  2578. d_vpr_e("%s: Invalid arguments\n", __func__);
  2579. return -EINVAL;
  2580. }
  2581. cancel_delayed_work_sync(&inst->stats_work);
  2582. return 0;
  2583. }
  2584. void msm_vidc_stats_handler(struct work_struct *work)
  2585. {
  2586. struct msm_vidc_inst *inst;
  2587. inst = container_of(work, struct msm_vidc_inst, stats_work.work);
  2588. inst = get_inst_ref(g_core, inst);
  2589. if (!inst || !inst->packet) {
  2590. d_vpr_e("%s: invalid params\n", __func__);
  2591. return;
  2592. }
  2593. inst_lock(inst, __func__);
  2594. msm_vidc_print_stats(inst);
  2595. schedule_stats_work(inst);
  2596. inst_unlock(inst, __func__);
  2597. put_inst(inst);
  2598. }
  2599. static int msm_vidc_queue_buffer(struct msm_vidc_inst *inst, struct msm_vidc_buffer *buf)
  2600. {
  2601. struct msm_vidc_buffer *meta;
  2602. enum msm_vidc_debugfs_event etype;
  2603. int rc = 0;
  2604. u32 cr = 0;
  2605. if (!inst || !buf || !inst->capabilities) {
  2606. d_vpr_e("%s: invalid params\n", __func__);
  2607. return -EINVAL;
  2608. }
  2609. if (is_encode_session(inst) && is_input_buffer(buf->type)) {
  2610. cr = inst->capabilities->cap[ENC_IP_CR].value;
  2611. msm_vidc_update_input_cr(inst, buf->index, cr);
  2612. msm_vidc_update_cap_value(inst, ENC_IP_CR, 0, __func__);
  2613. }
  2614. if (is_decode_session(inst) && is_input_buffer(buf->type) &&
  2615. inst->capabilities->cap[CODEC_CONFIG].value) {
  2616. buf->flags |= MSM_VIDC_BUF_FLAG_CODECCONFIG;
  2617. msm_vidc_update_cap_value(inst, CODEC_CONFIG, 0, __func__);
  2618. }
  2619. if (is_decode_session(inst) && is_output_buffer(buf->type)) {
  2620. rc = msm_vidc_process_readonly_buffers(inst, buf);
  2621. if (rc)
  2622. return rc;
  2623. }
  2624. print_vidc_buffer(VIDC_HIGH, "high", "qbuf", inst, buf);
  2625. meta = get_meta_buffer(inst, buf);
  2626. if (meta)
  2627. print_vidc_buffer(VIDC_LOW, "low ", "qbuf", inst, meta);
  2628. if (!meta && is_meta_enabled(inst, buf->type)) {
  2629. print_vidc_buffer(VIDC_ERR, "err ", "missing meta for", inst, buf);
  2630. return -EINVAL;
  2631. }
  2632. if (msm_vidc_is_super_buffer(inst) && is_input_buffer(buf->type))
  2633. rc = venus_hfi_queue_super_buffer(inst, buf, meta);
  2634. else
  2635. rc = venus_hfi_queue_buffer(inst, buf, meta);
  2636. if (rc)
  2637. return rc;
  2638. buf->attr &= ~MSM_VIDC_ATTR_DEFERRED;
  2639. buf->attr |= MSM_VIDC_ATTR_QUEUED;
  2640. if (meta) {
  2641. meta->attr &= ~MSM_VIDC_ATTR_DEFERRED;
  2642. meta->attr |= MSM_VIDC_ATTR_QUEUED;
  2643. }
  2644. if (is_input_buffer(buf->type))
  2645. inst->power.buffer_counter++;
  2646. if (is_input_buffer(buf->type))
  2647. etype = MSM_VIDC_DEBUGFS_EVENT_ETB;
  2648. else
  2649. etype = MSM_VIDC_DEBUGFS_EVENT_FTB;
  2650. msm_vidc_update_stats(inst, buf, etype);
  2651. return 0;
  2652. }
  2653. int msm_vidc_queue_deferred_buffers(struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buf_type)
  2654. {
  2655. struct msm_vidc_buffers *buffers;
  2656. struct msm_vidc_buffer *buf;
  2657. int rc = 0;
  2658. if (!inst || !buf_type) {
  2659. d_vpr_e("%s: invalid params\n", __func__);
  2660. return -EINVAL;
  2661. }
  2662. buffers = msm_vidc_get_buffers(inst, buf_type, __func__);
  2663. if (!buffers)
  2664. return -EINVAL;
  2665. msm_vidc_scale_power(inst, true);
  2666. list_for_each_entry(buf, &buffers->list, list) {
  2667. if (!(buf->attr & MSM_VIDC_ATTR_DEFERRED))
  2668. continue;
  2669. rc = msm_vidc_queue_buffer(inst, buf);
  2670. if (rc)
  2671. return rc;
  2672. }
  2673. return 0;
  2674. }
  2675. int msm_vidc_queue_buffer_single(struct msm_vidc_inst *inst, struct vb2_buffer *vb2)
  2676. {
  2677. int rc = 0;
  2678. struct msm_vidc_buffer *buf;
  2679. enum msm_vidc_allow allow;
  2680. if (!inst || !vb2) {
  2681. d_vpr_e("%s: invalid params\n", __func__);
  2682. return -EINVAL;
  2683. }
  2684. buf = msm_vidc_get_driver_buf(inst, vb2);
  2685. if (!buf)
  2686. return -EINVAL;
  2687. allow = msm_vidc_allow_qbuf(inst, vb2->type);
  2688. if (allow == MSM_VIDC_DISALLOW) {
  2689. i_vpr_e(inst, "%s: qbuf not allowed\n", __func__);
  2690. return -EINVAL;
  2691. } else if (allow == MSM_VIDC_DEFER) {
  2692. print_vidc_buffer(VIDC_LOW, "low ", "qbuf deferred", inst, buf);
  2693. return 0;
  2694. }
  2695. msm_vidc_scale_power(inst, is_input_buffer(buf->type));
  2696. rc = msm_vidc_queue_buffer(inst, buf);
  2697. if (rc)
  2698. return rc;
  2699. return rc;
  2700. }
  2701. int msm_vidc_destroy_internal_buffer(struct msm_vidc_inst *inst,
  2702. struct msm_vidc_buffer *buffer)
  2703. {
  2704. struct msm_vidc_buffers *buffers;
  2705. struct msm_vidc_allocations *allocations;
  2706. struct msm_vidc_mappings *mappings;
  2707. struct msm_vidc_alloc *alloc, *alloc_dummy;
  2708. struct msm_vidc_map *map, *map_dummy;
  2709. struct msm_vidc_buffer *buf, *dummy;
  2710. if (!inst || !inst->core) {
  2711. d_vpr_e("%s: invalid params\n", __func__);
  2712. return -EINVAL;
  2713. }
  2714. if (!is_internal_buffer(buffer->type)) {
  2715. i_vpr_e(inst, "%s: type: %s is not internal\n",
  2716. __func__, buf_name(buffer->type));
  2717. return 0;
  2718. }
  2719. i_vpr_h(inst, "%s: destroy: type: %8s, size: %9u, device_addr %#x\n", __func__,
  2720. buf_name(buffer->type), buffer->buffer_size, buffer->device_addr);
  2721. buffers = msm_vidc_get_buffers(inst, buffer->type, __func__);
  2722. if (!buffers)
  2723. return -EINVAL;
  2724. allocations = msm_vidc_get_allocations(inst, buffer->type, __func__);
  2725. if (!allocations)
  2726. return -EINVAL;
  2727. mappings = msm_vidc_get_mappings(inst, buffer->type, __func__);
  2728. if (!mappings)
  2729. return -EINVAL;
  2730. list_for_each_entry_safe(map, map_dummy, &mappings->list, list) {
  2731. if (map->dmabuf == buffer->dmabuf) {
  2732. msm_vidc_memory_unmap(inst->core, map);
  2733. list_del(&map->list);
  2734. msm_memory_free(inst, map);
  2735. break;
  2736. }
  2737. }
  2738. list_for_each_entry_safe(alloc, alloc_dummy, &allocations->list, list) {
  2739. if (alloc->dmabuf == buffer->dmabuf) {
  2740. msm_vidc_memory_free(inst->core, alloc);
  2741. list_del(&alloc->list);
  2742. msm_memory_free(inst, alloc);
  2743. break;
  2744. }
  2745. }
  2746. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  2747. if (buf->dmabuf == buffer->dmabuf) {
  2748. list_del(&buf->list);
  2749. msm_memory_free(inst, buf);
  2750. break;
  2751. }
  2752. }
  2753. buffers->size = 0;
  2754. buffers->min_count = buffers->extra_count = buffers->actual_count = 0;
  2755. return 0;
  2756. }
  2757. int msm_vidc_get_internal_buffers(struct msm_vidc_inst *inst,
  2758. enum msm_vidc_buffer_type buffer_type)
  2759. {
  2760. u32 buf_size;
  2761. u32 buf_count;
  2762. struct msm_vidc_core *core;
  2763. struct msm_vidc_buffers *buffers;
  2764. if (!inst || !inst->core) {
  2765. d_vpr_e("%s: invalid params\n", __func__);
  2766. return -EINVAL;
  2767. }
  2768. core = inst->core;
  2769. buf_size = call_session_op(core, buffer_size,
  2770. inst, buffer_type);
  2771. buf_count = call_session_op(core, min_count,
  2772. inst, buffer_type);
  2773. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  2774. if (!buffers)
  2775. return -EINVAL;
  2776. if (buf_size <= buffers->size &&
  2777. buf_count <= buffers->min_count) {
  2778. buffers->reuse = true;
  2779. } else {
  2780. buffers->reuse = false;
  2781. buffers->size = buf_size;
  2782. buffers->min_count = buf_count;
  2783. }
  2784. return 0;
  2785. }
  2786. int msm_vidc_create_internal_buffer(struct msm_vidc_inst *inst,
  2787. enum msm_vidc_buffer_type buffer_type, u32 index)
  2788. {
  2789. int rc = 0;
  2790. struct msm_vidc_buffers *buffers;
  2791. struct msm_vidc_allocations *allocations;
  2792. struct msm_vidc_mappings *mappings;
  2793. struct msm_vidc_buffer *buffer;
  2794. struct msm_vidc_alloc *alloc;
  2795. struct msm_vidc_map *map;
  2796. if (!inst || !inst->core) {
  2797. d_vpr_e("%s: invalid params\n", __func__);
  2798. return -EINVAL;
  2799. }
  2800. if (!is_internal_buffer(buffer_type)) {
  2801. i_vpr_e(inst, "%s: type %s is not internal\n",
  2802. __func__, buf_name(buffer_type));
  2803. return 0;
  2804. }
  2805. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  2806. if (!buffers)
  2807. return -EINVAL;
  2808. allocations = msm_vidc_get_allocations(inst, buffer_type, __func__);
  2809. if (!allocations)
  2810. return -EINVAL;
  2811. mappings = msm_vidc_get_mappings(inst, buffer_type, __func__);
  2812. if (!mappings)
  2813. return -EINVAL;
  2814. if (!buffers->size)
  2815. return 0;
  2816. buffer = msm_memory_alloc(inst, MSM_MEM_POOL_BUFFER);
  2817. if (!buffer) {
  2818. i_vpr_e(inst, "%s: buf alloc failed\n", __func__);
  2819. return -ENOMEM;
  2820. }
  2821. INIT_LIST_HEAD(&buffer->list);
  2822. buffer->type = buffer_type;
  2823. buffer->index = index;
  2824. buffer->buffer_size = buffers->size;
  2825. list_add_tail(&buffer->list, &buffers->list);
  2826. alloc = msm_memory_alloc(inst, MSM_MEM_POOL_ALLOC);
  2827. if (!alloc) {
  2828. i_vpr_e(inst, "%s: alloc failed\n", __func__);
  2829. return -ENOMEM;
  2830. }
  2831. INIT_LIST_HEAD(&alloc->list);
  2832. alloc->type = buffer_type;
  2833. alloc->region = msm_vidc_get_buffer_region(inst,
  2834. buffer_type, __func__);
  2835. alloc->size = buffer->buffer_size;
  2836. alloc->secure = is_secure_region(alloc->region);
  2837. rc = msm_vidc_memory_alloc(inst->core, alloc);
  2838. if (rc)
  2839. return -ENOMEM;
  2840. list_add_tail(&alloc->list, &allocations->list);
  2841. map = msm_memory_alloc(inst, MSM_MEM_POOL_MAP);
  2842. if (!map) {
  2843. i_vpr_e(inst, "%s: map alloc failed\n", __func__);
  2844. return -ENOMEM;
  2845. }
  2846. INIT_LIST_HEAD(&map->list);
  2847. map->type = alloc->type;
  2848. map->region = alloc->region;
  2849. map->dmabuf = alloc->dmabuf;
  2850. rc = msm_vidc_memory_map(inst->core, map);
  2851. if (rc)
  2852. return -ENOMEM;
  2853. list_add_tail(&map->list, &mappings->list);
  2854. buffer->dmabuf = alloc->dmabuf;
  2855. buffer->device_addr = map->device_addr;
  2856. i_vpr_h(inst, "%s: create: type: %8s, size: %9u, device_addr %#x\n", __func__,
  2857. buf_name(buffer_type), buffers->size, buffer->device_addr);
  2858. return 0;
  2859. }
  2860. int msm_vidc_create_internal_buffers(struct msm_vidc_inst *inst,
  2861. enum msm_vidc_buffer_type buffer_type)
  2862. {
  2863. int rc = 0;
  2864. struct msm_vidc_buffers *buffers;
  2865. int i;
  2866. if (!inst || !inst->core) {
  2867. d_vpr_e("%s: invalid params\n", __func__);
  2868. return -EINVAL;
  2869. }
  2870. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  2871. if (!buffers)
  2872. return -EINVAL;
  2873. if (buffers->reuse) {
  2874. i_vpr_l(inst, "%s: reuse enabled for %s\n", __func__, buf_name(buffer_type));
  2875. return 0;
  2876. }
  2877. for (i = 0; i < buffers->min_count; i++) {
  2878. rc = msm_vidc_create_internal_buffer(inst, buffer_type, i);
  2879. if (rc)
  2880. return rc;
  2881. }
  2882. return rc;
  2883. }
  2884. int msm_vidc_queue_internal_buffers(struct msm_vidc_inst *inst,
  2885. enum msm_vidc_buffer_type buffer_type)
  2886. {
  2887. int rc = 0;
  2888. struct msm_vidc_buffers *buffers;
  2889. struct msm_vidc_buffer *buffer, *dummy;
  2890. if (!inst || !inst->core) {
  2891. d_vpr_e("%s: invalid params\n", __func__);
  2892. return -EINVAL;
  2893. }
  2894. if (!is_internal_buffer(buffer_type)) {
  2895. i_vpr_e(inst, "%s: %s is not internal\n", __func__, buf_name(buffer_type));
  2896. return 0;
  2897. }
  2898. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  2899. if (!buffers)
  2900. return -EINVAL;
  2901. if (buffers->reuse) {
  2902. i_vpr_l(inst, "%s: reuse enabled for %s buf\n",
  2903. __func__, buf_name(buffer_type));
  2904. return 0;
  2905. }
  2906. list_for_each_entry_safe(buffer, dummy, &buffers->list, list) {
  2907. /* do not queue pending release buffers */
  2908. if (buffer->flags & MSM_VIDC_ATTR_PENDING_RELEASE)
  2909. continue;
  2910. /* do not queue already queued buffers */
  2911. if (buffer->attr & MSM_VIDC_ATTR_QUEUED)
  2912. continue;
  2913. rc = venus_hfi_queue_buffer(inst, buffer, NULL);
  2914. if (rc)
  2915. return rc;
  2916. /* mark queued */
  2917. buffer->attr |= MSM_VIDC_ATTR_QUEUED;
  2918. i_vpr_h(inst, "%s: queue: type: %8s, size: %9u, device_addr %#x\n", __func__,
  2919. buf_name(buffer->type), buffer->buffer_size, buffer->device_addr);
  2920. }
  2921. return 0;
  2922. }
  2923. int msm_vidc_alloc_and_queue_session_internal_buffers(struct msm_vidc_inst *inst,
  2924. enum msm_vidc_buffer_type buffer_type)
  2925. {
  2926. int rc = 0;
  2927. if (!inst || !inst->core) {
  2928. d_vpr_e("%s: invalid params\n", __func__);
  2929. return -EINVAL;
  2930. }
  2931. if (buffer_type != MSM_VIDC_BUF_ARP &&
  2932. buffer_type != MSM_VIDC_BUF_PERSIST) {
  2933. i_vpr_e(inst, "%s: invalid buffer type: %s\n",
  2934. __func__, buf_name(buffer_type));
  2935. rc = -EINVAL;
  2936. goto exit;
  2937. }
  2938. rc = msm_vidc_get_internal_buffers(inst, buffer_type);
  2939. if (rc)
  2940. goto exit;
  2941. rc = msm_vidc_create_internal_buffers(inst, buffer_type);
  2942. if (rc)
  2943. goto exit;
  2944. rc = msm_vidc_queue_internal_buffers(inst, buffer_type);
  2945. if (rc)
  2946. goto exit;
  2947. exit:
  2948. return rc;
  2949. }
  2950. int msm_vidc_release_internal_buffers(struct msm_vidc_inst *inst,
  2951. enum msm_vidc_buffer_type buffer_type)
  2952. {
  2953. int rc = 0;
  2954. struct msm_vidc_buffers *buffers;
  2955. struct msm_vidc_buffer *buffer, *dummy;
  2956. if (!inst || !inst->core) {
  2957. d_vpr_e("%s: invalid params\n", __func__);
  2958. return -EINVAL;
  2959. }
  2960. if (!is_internal_buffer(buffer_type)) {
  2961. i_vpr_e(inst, "%s: %s is not internal\n",
  2962. __func__, buf_name(buffer_type));
  2963. return 0;
  2964. }
  2965. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  2966. if (!buffers)
  2967. return -EINVAL;
  2968. if (buffers->reuse) {
  2969. i_vpr_l(inst, "%s: reuse enabled for %s buf\n",
  2970. __func__, buf_name(buffer_type));
  2971. return 0;
  2972. }
  2973. list_for_each_entry_safe(buffer, dummy, &buffers->list, list) {
  2974. /* do not release already pending release buffers */
  2975. if (buffer->attr & MSM_VIDC_ATTR_PENDING_RELEASE)
  2976. continue;
  2977. /* release only queued buffers */
  2978. if (!(buffer->attr & MSM_VIDC_ATTR_QUEUED))
  2979. continue;
  2980. rc = venus_hfi_release_buffer(inst, buffer);
  2981. if (rc)
  2982. return rc;
  2983. /* mark pending release */
  2984. buffer->attr |= MSM_VIDC_ATTR_PENDING_RELEASE;
  2985. i_vpr_h(inst, "%s: release: type: %8s, size: %9u, device_addr %#x\n", __func__,
  2986. buf_name(buffer->type), buffer->buffer_size, buffer->device_addr);
  2987. }
  2988. return 0;
  2989. }
  2990. int msm_vidc_vb2_buffer_done(struct msm_vidc_inst *inst,
  2991. struct msm_vidc_buffer *buf)
  2992. {
  2993. int type, port, state;
  2994. struct vb2_queue *q;
  2995. struct vb2_buffer *vb2;
  2996. struct vb2_v4l2_buffer *vbuf;
  2997. bool found;
  2998. if (!inst || !buf) {
  2999. d_vpr_e("%s: invalid params\n", __func__);
  3000. return -EINVAL;
  3001. }
  3002. type = v4l2_type_from_driver(buf->type, __func__);
  3003. if (!type)
  3004. return -EINVAL;
  3005. port = v4l2_type_to_driver_port(inst, type, __func__);
  3006. if (port < 0)
  3007. return -EINVAL;
  3008. q = &inst->vb2q[port];
  3009. if (!q->streaming) {
  3010. i_vpr_e(inst, "%s: port %d is not streaming\n",
  3011. __func__, port);
  3012. return -EINVAL;
  3013. }
  3014. found = false;
  3015. list_for_each_entry(vb2, &q->queued_list, queued_entry) {
  3016. if (vb2->state != VB2_BUF_STATE_ACTIVE)
  3017. continue;
  3018. if (vb2->index == buf->index) {
  3019. found = true;
  3020. break;
  3021. }
  3022. }
  3023. if (!found) {
  3024. print_vidc_buffer(VIDC_ERR, "err ", "vb2 not found for", inst, buf);
  3025. return -EINVAL;
  3026. }
  3027. /**
  3028. * v4l2 clears buffer state related flags. For driver errors
  3029. * send state as error to avoid skipping V4L2_BUF_FLAG_ERROR
  3030. * flag at v4l2 side.
  3031. */
  3032. if (buf->flags & MSM_VIDC_BUF_FLAG_ERROR)
  3033. state = VB2_BUF_STATE_ERROR;
  3034. else
  3035. state = VB2_BUF_STATE_DONE;
  3036. vbuf = to_vb2_v4l2_buffer(vb2);
  3037. vbuf->flags = buf->flags;
  3038. vb2->timestamp = buf->timestamp;
  3039. vb2->planes[0].bytesused = buf->data_size + vb2->planes[0].data_offset;
  3040. vb2_buffer_done(vb2, state);
  3041. return 0;
  3042. }
  3043. int msm_vidc_event_queue_init(struct msm_vidc_inst *inst)
  3044. {
  3045. int rc = 0;
  3046. int index;
  3047. struct msm_vidc_core *core;
  3048. if (!inst || !inst->core) {
  3049. d_vpr_e("%s: invalid params\n", __func__);
  3050. return -EINVAL;
  3051. }
  3052. core = inst->core;
  3053. if (is_decode_session(inst))
  3054. index = 0;
  3055. else if (is_encode_session(inst))
  3056. index = 1;
  3057. else
  3058. return -EINVAL;
  3059. v4l2_fh_init(&inst->event_handler, &core->vdev[index].vdev);
  3060. v4l2_fh_add(&inst->event_handler);
  3061. return rc;
  3062. }
  3063. int msm_vidc_event_queue_deinit(struct msm_vidc_inst *inst)
  3064. {
  3065. int rc = 0;
  3066. if (!inst) {
  3067. d_vpr_e("%s: invalid params\n", __func__);
  3068. return -EINVAL;
  3069. }
  3070. /* do not deinit, if not already inited */
  3071. if (!inst->event_handler.vdev) {
  3072. i_vpr_e(inst, "%s: already not inited\n", __func__);
  3073. return 0;
  3074. }
  3075. v4l2_fh_del(&inst->event_handler);
  3076. v4l2_fh_exit(&inst->event_handler);
  3077. return rc;
  3078. }
  3079. static int vb2q_init(struct msm_vidc_inst *inst,
  3080. struct vb2_queue *q, enum v4l2_buf_type type)
  3081. {
  3082. int rc = 0;
  3083. struct msm_vidc_core *core;
  3084. if (!inst || !q || !inst->core) {
  3085. d_vpr_e("%s: invalid params\n", __func__);
  3086. return -EINVAL;
  3087. }
  3088. core = inst->core;
  3089. q->type = type;
  3090. q->io_modes = VB2_DMABUF;
  3091. q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
  3092. q->ops = core->vb2_ops;
  3093. q->mem_ops = core->vb2_mem_ops;
  3094. q->drv_priv = inst;
  3095. q->allow_zero_bytesused = 1;
  3096. q->copy_timestamp = 1;
  3097. rc = vb2_queue_init(q);
  3098. if (rc)
  3099. i_vpr_e(inst, "%s: vb2_queue_init failed for type %d\n",
  3100. __func__, type);
  3101. return rc;
  3102. }
  3103. int msm_vidc_vb2_queue_init(struct msm_vidc_inst *inst)
  3104. {
  3105. int rc = 0;
  3106. if (!inst) {
  3107. i_vpr_e(inst, "%s: invalid params\n", __func__);
  3108. return -EINVAL;
  3109. }
  3110. if (inst->vb2q_init) {
  3111. i_vpr_h(inst, "%s: vb2q already inited\n", __func__);
  3112. return 0;
  3113. }
  3114. rc = vb2q_init(inst, &inst->vb2q[INPUT_PORT], INPUT_MPLANE);
  3115. if (rc)
  3116. goto exit;
  3117. rc = vb2q_init(inst, &inst->vb2q[OUTPUT_PORT], OUTPUT_MPLANE);
  3118. if (rc)
  3119. goto fail_out_vb2q_init;
  3120. rc = vb2q_init(inst, &inst->vb2q[INPUT_META_PORT], INPUT_META_PLANE);
  3121. if (rc)
  3122. goto fail_in_meta_vb2q_init;
  3123. rc = vb2q_init(inst, &inst->vb2q[OUTPUT_META_PORT], OUTPUT_META_PLANE);
  3124. if (rc)
  3125. goto fail_out_meta_vb2q_init;
  3126. inst->vb2q_init = true;
  3127. return 0;
  3128. fail_out_meta_vb2q_init:
  3129. vb2_queue_release(&inst->vb2q[INPUT_META_PORT]);
  3130. fail_in_meta_vb2q_init:
  3131. vb2_queue_release(&inst->vb2q[OUTPUT_PORT]);
  3132. fail_out_vb2q_init:
  3133. vb2_queue_release(&inst->vb2q[INPUT_PORT]);
  3134. exit:
  3135. return rc;
  3136. }
  3137. int msm_vidc_vb2_queue_deinit(struct msm_vidc_inst *inst)
  3138. {
  3139. int rc = 0;
  3140. if (!inst) {
  3141. d_vpr_e("%s: invalid params\n", __func__);
  3142. return -EINVAL;
  3143. }
  3144. if (!inst->vb2q_init) {
  3145. i_vpr_h(inst, "%s: vb2q already deinited\n", __func__);
  3146. return 0;
  3147. }
  3148. vb2_queue_release(&inst->vb2q[OUTPUT_META_PORT]);
  3149. vb2_queue_release(&inst->vb2q[INPUT_META_PORT]);
  3150. vb2_queue_release(&inst->vb2q[OUTPUT_PORT]);
  3151. vb2_queue_release(&inst->vb2q[INPUT_PORT]);
  3152. inst->vb2q_init = false;
  3153. return rc;
  3154. }
  3155. int msm_vidc_add_session(struct msm_vidc_inst *inst)
  3156. {
  3157. int rc = 0;
  3158. struct msm_vidc_inst *i;
  3159. struct msm_vidc_core *core;
  3160. u32 count = 0;
  3161. if (!inst || !inst->core) {
  3162. d_vpr_e("%s: invalid params\n", __func__);
  3163. return -EINVAL;
  3164. }
  3165. core = inst->core;
  3166. if (!core->capabilities) {
  3167. i_vpr_e(inst, "%s: invalid params\n", __func__);
  3168. return -EINVAL;
  3169. }
  3170. core_lock(core, __func__);
  3171. list_for_each_entry(i, &core->instances, list)
  3172. count++;
  3173. if (count < core->capabilities[MAX_SESSION_COUNT].value) {
  3174. list_add_tail(&inst->list, &core->instances);
  3175. } else {
  3176. i_vpr_e(inst, "%s: max limit %d already running %d sessions\n",
  3177. __func__, core->capabilities[MAX_SESSION_COUNT].value, count);
  3178. rc = -EINVAL;
  3179. }
  3180. core_unlock(core, __func__);
  3181. return rc;
  3182. }
  3183. int msm_vidc_remove_session(struct msm_vidc_inst *inst)
  3184. {
  3185. struct msm_vidc_inst *i, *temp;
  3186. struct msm_vidc_core *core;
  3187. u32 count = 0;
  3188. if (!inst || !inst->core) {
  3189. d_vpr_e("%s: invalid params\n", __func__);
  3190. return -EINVAL;
  3191. }
  3192. core = inst->core;
  3193. core_lock(core, __func__);
  3194. list_for_each_entry_safe(i, temp, &core->instances, list) {
  3195. if (i->session_id == inst->session_id) {
  3196. list_del_init(&i->list);
  3197. list_add_tail(&i->list, &core->dangling_instances);
  3198. i_vpr_h(inst, "%s: removed session %#x\n",
  3199. __func__, i->session_id);
  3200. }
  3201. }
  3202. list_for_each_entry(i, &core->instances, list)
  3203. count++;
  3204. i_vpr_h(inst, "%s: remaining sessions %d\n", __func__, count);
  3205. core_unlock(core, __func__);
  3206. return 0;
  3207. }
  3208. static int msm_vidc_remove_dangling_session(struct msm_vidc_inst *inst)
  3209. {
  3210. struct msm_vidc_inst *i, *temp;
  3211. struct msm_vidc_core *core;
  3212. u32 count = 0;
  3213. if (!inst || !inst->core) {
  3214. d_vpr_e("%s: invalid params\n", __func__);
  3215. return -EINVAL;
  3216. }
  3217. core = inst->core;
  3218. core_lock(core, __func__);
  3219. list_for_each_entry_safe(i, temp, &core->dangling_instances, list) {
  3220. if (i->session_id == inst->session_id) {
  3221. list_del_init(&i->list);
  3222. i_vpr_h(inst, "%s: removed dangling session %#x\n",
  3223. __func__, i->session_id);
  3224. break;
  3225. }
  3226. }
  3227. list_for_each_entry(i, &core->dangling_instances, list)
  3228. count++;
  3229. i_vpr_h(inst, "%s: remaining dangling sessions %d\n", __func__, count);
  3230. core_unlock(core, __func__);
  3231. return 0;
  3232. }
  3233. int msm_vidc_session_open(struct msm_vidc_inst *inst)
  3234. {
  3235. int rc = 0;
  3236. if (!inst) {
  3237. d_vpr_e("%s: invalid params\n", __func__);
  3238. return -EINVAL;
  3239. }
  3240. inst->packet_size = 4096;
  3241. inst->packet = kzalloc(inst->packet_size, GFP_KERNEL);
  3242. if (!inst->packet) {
  3243. i_vpr_e(inst, "%s(): inst packet allocation failed\n", __func__);
  3244. return -ENOMEM;
  3245. }
  3246. rc = venus_hfi_session_open(inst);
  3247. if (rc)
  3248. goto error;
  3249. return 0;
  3250. error:
  3251. i_vpr_e(inst, "%s(): session open failed\n", __func__);
  3252. kfree(inst->packet);
  3253. inst->packet = NULL;
  3254. return rc;
  3255. }
  3256. int msm_vidc_session_set_codec(struct msm_vidc_inst *inst)
  3257. {
  3258. int rc = 0;
  3259. if (!inst) {
  3260. d_vpr_e("%s: invalid params\n", __func__);
  3261. return -EINVAL;
  3262. }
  3263. rc = venus_hfi_session_set_codec(inst);
  3264. if (rc)
  3265. return rc;
  3266. return 0;
  3267. }
  3268. int msm_vidc_session_set_secure_mode(struct msm_vidc_inst *inst)
  3269. {
  3270. int rc = 0;
  3271. if (!inst) {
  3272. d_vpr_e("%s: invalid params\n", __func__);
  3273. return -EINVAL;
  3274. }
  3275. rc = venus_hfi_session_set_secure_mode(inst);
  3276. if (rc)
  3277. return rc;
  3278. return 0;
  3279. }
  3280. int msm_vidc_session_set_default_header(struct msm_vidc_inst *inst)
  3281. {
  3282. int rc = 0;
  3283. u32 default_header = false;
  3284. if (!inst) {
  3285. d_vpr_e("%s: invalid params\n", __func__);
  3286. return -EINVAL;
  3287. }
  3288. default_header = inst->capabilities->cap[DEFAULT_HEADER].value;
  3289. i_vpr_h(inst, "%s: default header: %d", __func__, default_header);
  3290. rc = venus_hfi_session_property(inst,
  3291. HFI_PROP_DEC_DEFAULT_HEADER,
  3292. HFI_HOST_FLAGS_NONE,
  3293. get_hfi_port(inst, INPUT_PORT),
  3294. HFI_PAYLOAD_U32,
  3295. &default_header,
  3296. sizeof(u32));
  3297. if (rc)
  3298. i_vpr_e(inst, "%s: set property failed\n", __func__);
  3299. return rc;
  3300. }
  3301. int msm_vidc_session_streamon(struct msm_vidc_inst *inst,
  3302. enum msm_vidc_port_type port)
  3303. {
  3304. int rc = 0;
  3305. if (!inst || !inst->core) {
  3306. d_vpr_e("%s: invalid params\n", __func__);
  3307. return -EINVAL;
  3308. }
  3309. msm_vidc_scale_power(inst, true);
  3310. rc = venus_hfi_start(inst, port);
  3311. if (rc)
  3312. return rc;
  3313. return rc;
  3314. }
  3315. int msm_vidc_session_streamoff(struct msm_vidc_inst *inst,
  3316. enum msm_vidc_port_type port)
  3317. {
  3318. int rc = 0;
  3319. int count = 0;
  3320. struct msm_vidc_core *core;
  3321. enum signal_session_response signal_type;
  3322. enum msm_vidc_buffer_type buffer_type;
  3323. if (!inst || !inst->core) {
  3324. d_vpr_e("%s: invalid params\n", __func__);
  3325. return -EINVAL;
  3326. }
  3327. if (port == INPUT_PORT) {
  3328. signal_type = SIGNAL_CMD_STOP_INPUT;
  3329. buffer_type = MSM_VIDC_BUF_INPUT;
  3330. } else if (port == OUTPUT_PORT) {
  3331. signal_type = SIGNAL_CMD_STOP_OUTPUT;
  3332. buffer_type = MSM_VIDC_BUF_OUTPUT;
  3333. } else {
  3334. i_vpr_e(inst, "%s: invalid port: %d\n", __func__, port);
  3335. return -EINVAL;
  3336. }
  3337. rc = venus_hfi_stop(inst, port);
  3338. if (rc)
  3339. goto error;
  3340. core = inst->core;
  3341. i_vpr_h(inst, "%s: wait on port: %d for time: %d ms\n",
  3342. __func__, port, core->capabilities[HW_RESPONSE_TIMEOUT].value);
  3343. inst_unlock(inst, __func__);
  3344. rc = wait_for_completion_timeout(
  3345. &inst->completions[signal_type],
  3346. msecs_to_jiffies(
  3347. core->capabilities[HW_RESPONSE_TIMEOUT].value));
  3348. if (!rc) {
  3349. i_vpr_e(inst, "%s: session stop timed out for port: %d\n",
  3350. __func__, port);
  3351. rc = -ETIMEDOUT;
  3352. msm_vidc_inst_timeout(inst);
  3353. } else {
  3354. rc = 0;
  3355. }
  3356. inst_lock(inst, __func__);
  3357. if(rc)
  3358. goto error;
  3359. /* no more queued buffers after streamoff */
  3360. count = msm_vidc_num_buffers(inst, buffer_type, MSM_VIDC_ATTR_QUEUED);
  3361. if (!count) {
  3362. i_vpr_h(inst, "%s: stop successful on port: %d\n",
  3363. __func__, port);
  3364. } else {
  3365. i_vpr_e(inst,
  3366. "%s: %d buffers pending with firmware on port: %d\n",
  3367. __func__, count, port);
  3368. rc = -EINVAL;
  3369. goto error;
  3370. }
  3371. /* discard pending port settings change if any */
  3372. msm_vidc_discard_pending_ipsc(inst);
  3373. /* flush deferred buffers */
  3374. msm_vidc_flush_buffers(inst, buffer_type);
  3375. msm_vidc_flush_delayed_unmap_buffers(inst, buffer_type);
  3376. return 0;
  3377. error:
  3378. msm_vidc_kill_session(inst);
  3379. msm_vidc_flush_buffers(inst, buffer_type);
  3380. return rc;
  3381. }
  3382. int msm_vidc_session_close(struct msm_vidc_inst *inst)
  3383. {
  3384. int rc = 0;
  3385. struct msm_vidc_core *core;
  3386. if (!inst || !inst->core) {
  3387. d_vpr_e("%s: invalid params\n", __func__);
  3388. return -EINVAL;
  3389. }
  3390. rc = venus_hfi_session_close(inst);
  3391. if (rc)
  3392. return rc;
  3393. /* we are not supposed to send any more commands after close */
  3394. i_vpr_h(inst, "%s: free session packet data\n", __func__);
  3395. kfree(inst->packet);
  3396. inst->packet = NULL;
  3397. core = inst->core;
  3398. i_vpr_h(inst, "%s: wait on close for time: %d ms\n",
  3399. __func__, core->capabilities[HW_RESPONSE_TIMEOUT].value);
  3400. inst_unlock(inst, __func__);
  3401. rc = wait_for_completion_timeout(
  3402. &inst->completions[SIGNAL_CMD_CLOSE],
  3403. msecs_to_jiffies(
  3404. core->capabilities[HW_RESPONSE_TIMEOUT].value));
  3405. if (!rc) {
  3406. i_vpr_e(inst, "%s: session close timed out\n", __func__);
  3407. rc = -ETIMEDOUT;
  3408. msm_vidc_inst_timeout(inst);
  3409. } else {
  3410. rc = 0;
  3411. i_vpr_h(inst, "%s: close successful\n", __func__);
  3412. }
  3413. inst_lock(inst, __func__);
  3414. msm_vidc_remove_session(inst);
  3415. return rc;
  3416. }
  3417. int msm_vidc_kill_session(struct msm_vidc_inst *inst)
  3418. {
  3419. if (!inst) {
  3420. d_vpr_e("%s: invalid params\n", __func__);
  3421. return -EINVAL;
  3422. }
  3423. if (!inst->session_id) {
  3424. i_vpr_e(inst, "%s: already killed\n", __func__);
  3425. return 0;
  3426. }
  3427. i_vpr_e(inst, "%s: killing session\n", __func__);
  3428. msm_vidc_session_close(inst);
  3429. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  3430. return 0;
  3431. }
  3432. int msm_vidc_get_inst_capability(struct msm_vidc_inst *inst)
  3433. {
  3434. int rc = 0;
  3435. int i;
  3436. struct msm_vidc_core *core;
  3437. if (!inst || !inst->core || !inst->capabilities) {
  3438. d_vpr_e("%s: invalid params\n", __func__);
  3439. return -EINVAL;
  3440. }
  3441. core = inst->core;
  3442. for (i = 0; i < core->codecs_count; i++) {
  3443. if (core->inst_caps[i].domain == inst->domain &&
  3444. core->inst_caps[i].codec == inst->codec) {
  3445. i_vpr_h(inst,
  3446. "%s: copied capabilities with %#x codec, %#x domain\n",
  3447. __func__, inst->codec, inst->domain);
  3448. memcpy(inst->capabilities, &core->inst_caps[i],
  3449. sizeof(struct msm_vidc_inst_capability));
  3450. }
  3451. }
  3452. return rc;
  3453. }
  3454. int msm_vidc_deinit_core_caps(struct msm_vidc_core *core)
  3455. {
  3456. int rc = 0;
  3457. if (!core) {
  3458. d_vpr_e("%s: invalid params\n", __func__);
  3459. return -EINVAL;
  3460. }
  3461. kfree(core->capabilities);
  3462. core->capabilities = NULL;
  3463. d_vpr_h("%s: Core capabilities freed\n", __func__);
  3464. return rc;
  3465. }
  3466. int msm_vidc_init_core_caps(struct msm_vidc_core *core)
  3467. {
  3468. int rc = 0;
  3469. int i, num_platform_caps;
  3470. struct msm_platform_core_capability *platform_data;
  3471. if (!core || !core->platform) {
  3472. d_vpr_e("%s: invalid params\n", __func__);
  3473. rc = -EINVAL;
  3474. goto exit;
  3475. }
  3476. platform_data = core->platform->data.core_data;
  3477. if (!platform_data) {
  3478. d_vpr_e("%s: platform core data is NULL\n",
  3479. __func__);
  3480. rc = -EINVAL;
  3481. goto exit;
  3482. }
  3483. core->capabilities = kcalloc(1,
  3484. (sizeof(struct msm_vidc_core_capability) *
  3485. (CORE_CAP_MAX + 1)), GFP_KERNEL);
  3486. if (!core->capabilities) {
  3487. d_vpr_e("%s: failed to allocate core capabilities\n",
  3488. __func__);
  3489. rc = -ENOMEM;
  3490. goto exit;
  3491. }
  3492. num_platform_caps = core->platform->data.core_data_size;
  3493. /* loop over platform caps */
  3494. for (i = 0; i < num_platform_caps && i < CORE_CAP_MAX; i++) {
  3495. core->capabilities[platform_data[i].type].type = platform_data[i].type;
  3496. core->capabilities[platform_data[i].type].value = platform_data[i].value;
  3497. }
  3498. exit:
  3499. return rc;
  3500. }
  3501. static void update_inst_capability(struct msm_platform_inst_capability *in,
  3502. struct msm_vidc_inst_capability *capability)
  3503. {
  3504. if (!in || !capability) {
  3505. d_vpr_e("%s: invalid params %pK %pK\n",
  3506. __func__, in, capability);
  3507. return;
  3508. }
  3509. if (in->cap < INST_CAP_MAX) {
  3510. capability->cap[in->cap].cap = in->cap;
  3511. capability->cap[in->cap].min = in->min;
  3512. capability->cap[in->cap].max = in->max;
  3513. capability->cap[in->cap].step_or_mask = in->step_or_mask;
  3514. capability->cap[in->cap].value = in->value;
  3515. capability->cap[in->cap].flags = in->flags;
  3516. capability->cap[in->cap].v4l2_id = in->v4l2_id;
  3517. capability->cap[in->cap].hfi_id = in->hfi_id;
  3518. memcpy(capability->cap[in->cap].parents, in->parents,
  3519. sizeof(capability->cap[in->cap].parents));
  3520. memcpy(capability->cap[in->cap].children, in->children,
  3521. sizeof(capability->cap[in->cap].children));
  3522. capability->cap[in->cap].adjust = in->adjust;
  3523. capability->cap[in->cap].set = in->set;
  3524. } else {
  3525. d_vpr_e("%s: invalid cap %d\n",
  3526. __func__, in->cap);
  3527. }
  3528. }
  3529. int msm_vidc_deinit_instance_caps(struct msm_vidc_core *core)
  3530. {
  3531. int rc = 0;
  3532. if (!core) {
  3533. d_vpr_e("%s: invalid params\n", __func__);
  3534. return -EINVAL;
  3535. }
  3536. kfree(core->inst_caps);
  3537. core->inst_caps = NULL;
  3538. d_vpr_h("%s: core->inst_caps freed\n", __func__);
  3539. return rc;
  3540. }
  3541. int msm_vidc_init_instance_caps(struct msm_vidc_core *core)
  3542. {
  3543. int rc = 0;
  3544. u8 enc_valid_codecs, dec_valid_codecs;
  3545. u8 count_bits, enc_codec_count;
  3546. u8 codecs_count = 0;
  3547. int i, j, check_bit, num_platform_caps;
  3548. struct msm_platform_inst_capability *platform_data = NULL;
  3549. if (!core || !core->platform || !core->capabilities) {
  3550. d_vpr_e("%s: invalid params\n", __func__);
  3551. rc = -EINVAL;
  3552. goto error;
  3553. }
  3554. platform_data = core->platform->data.instance_data;
  3555. if (!platform_data) {
  3556. d_vpr_e("%s: platform instance data is NULL\n",
  3557. __func__);
  3558. rc = -EINVAL;
  3559. goto error;
  3560. }
  3561. enc_valid_codecs = core->capabilities[ENC_CODECS].value;
  3562. count_bits = enc_valid_codecs;
  3563. COUNT_BITS(count_bits, codecs_count);
  3564. enc_codec_count = codecs_count;
  3565. dec_valid_codecs = core->capabilities[DEC_CODECS].value;
  3566. count_bits = dec_valid_codecs;
  3567. COUNT_BITS(count_bits, codecs_count);
  3568. core->codecs_count = codecs_count;
  3569. core->inst_caps = kcalloc(codecs_count,
  3570. sizeof(struct msm_vidc_inst_capability),
  3571. GFP_KERNEL);
  3572. if (!core->inst_caps) {
  3573. d_vpr_e("%s: failed to allocate core capabilities\n",
  3574. __func__);
  3575. rc = -ENOMEM;
  3576. goto error;
  3577. }
  3578. check_bit = 0;
  3579. /* determine codecs for enc domain */
  3580. for (i = 0; i < enc_codec_count; i++) {
  3581. while (check_bit < (sizeof(enc_valid_codecs) * 8)) {
  3582. if (enc_valid_codecs & BIT(check_bit)) {
  3583. core->inst_caps[i].domain = MSM_VIDC_ENCODER;
  3584. core->inst_caps[i].codec = enc_valid_codecs &
  3585. BIT(check_bit);
  3586. check_bit++;
  3587. break;
  3588. }
  3589. check_bit++;
  3590. }
  3591. }
  3592. /* reset checkbit to check from 0th bit of decoder codecs set bits*/
  3593. check_bit = 0;
  3594. /* determine codecs for dec domain */
  3595. for (; i < codecs_count; i++) {
  3596. while (check_bit < (sizeof(dec_valid_codecs) * 8)) {
  3597. if (dec_valid_codecs & BIT(check_bit)) {
  3598. core->inst_caps[i].domain = MSM_VIDC_DECODER;
  3599. core->inst_caps[i].codec = dec_valid_codecs &
  3600. BIT(check_bit);
  3601. check_bit++;
  3602. break;
  3603. }
  3604. check_bit++;
  3605. }
  3606. }
  3607. num_platform_caps = core->platform->data.instance_data_size;
  3608. d_vpr_h("%s: num caps %d\n", __func__, num_platform_caps);
  3609. /* loop over each platform capability */
  3610. for (i = 0; i < num_platform_caps; i++) {
  3611. /* select matching core codec and update it */
  3612. for (j = 0; j < codecs_count; j++) {
  3613. if ((platform_data[i].domain &
  3614. core->inst_caps[j].domain) &&
  3615. (platform_data[i].codec &
  3616. core->inst_caps[j].codec)) {
  3617. /* update core capability */
  3618. update_inst_capability(&platform_data[i],
  3619. &core->inst_caps[j]);
  3620. }
  3621. }
  3622. }
  3623. error:
  3624. return rc;
  3625. }
  3626. int msm_vidc_core_deinit_locked(struct msm_vidc_core *core, bool force)
  3627. {
  3628. int rc = 0;
  3629. struct msm_vidc_inst *inst, *dummy;
  3630. if (!core) {
  3631. d_vpr_e("%s: invalid params\n", __func__);
  3632. return -EINVAL;
  3633. }
  3634. rc = __strict_check(core, __func__);
  3635. if (rc) {
  3636. d_vpr_e("%s(): core was not locked\n", __func__);
  3637. return rc;
  3638. }
  3639. if (core->state == MSM_VIDC_CORE_DEINIT)
  3640. return 0;
  3641. if (force) {
  3642. d_vpr_e("%s(): force deinit core\n", __func__);
  3643. } else {
  3644. /* in normal case, deinit core only if no session present */
  3645. if (!list_empty(&core->instances)) {
  3646. d_vpr_h("%s(): skip deinit\n", __func__);
  3647. return 0;
  3648. } else {
  3649. d_vpr_h("%s(): deinit core\n", __func__);
  3650. }
  3651. }
  3652. venus_hfi_core_deinit(core, force);
  3653. /* unlink all sessions from core, if any */
  3654. list_for_each_entry_safe(inst, dummy, &core->instances, list) {
  3655. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  3656. list_del_init(&inst->list);
  3657. list_add_tail(&inst->list, &core->dangling_instances);
  3658. }
  3659. msm_vidc_change_core_state(core, MSM_VIDC_CORE_DEINIT, __func__);
  3660. return rc;
  3661. }
  3662. int msm_vidc_core_deinit(struct msm_vidc_core *core, bool force)
  3663. {
  3664. int rc = 0;
  3665. if (!core) {
  3666. d_vpr_e("%s: invalid params\n", __func__);
  3667. return -EINVAL;
  3668. }
  3669. core_lock(core, __func__);
  3670. rc = msm_vidc_core_deinit_locked(core, force);
  3671. core_unlock(core, __func__);
  3672. return rc;
  3673. }
  3674. static int msm_vidc_core_init_wait(struct msm_vidc_core *core)
  3675. {
  3676. const int interval = 40;
  3677. int max_tries, count = 0, rc = 0;
  3678. if (!core || !core->capabilities) {
  3679. d_vpr_e("%s: invalid params\n", __func__);
  3680. return -EINVAL;
  3681. }
  3682. rc = __strict_check(core, __func__);
  3683. if (rc)
  3684. return rc;
  3685. if (core->state != MSM_VIDC_CORE_INIT_WAIT)
  3686. return 0;
  3687. d_vpr_h("%s(): waiting for state change\n", __func__);
  3688. max_tries = core->capabilities[HW_RESPONSE_TIMEOUT].value / interval;
  3689. /**
  3690. * attempt one more time to ensure triggering init_done
  3691. * timeout sequence for 1st session, incase response not
  3692. * received in reverse thread.
  3693. */
  3694. while (count < max_tries + 1) {
  3695. if (core->state != MSM_VIDC_CORE_INIT_WAIT)
  3696. break;
  3697. core_unlock(core, __func__);
  3698. msleep_interruptible(interval);
  3699. core_lock(core, __func__);
  3700. count++;
  3701. }
  3702. d_vpr_h("%s: state %s, interval %u, count %u, max_tries %u\n", __func__,
  3703. core_state_name(core->state), interval, count, max_tries);
  3704. /* treat as fatal and fail session_open */
  3705. if (core->state == MSM_VIDC_CORE_INIT_WAIT) {
  3706. d_vpr_e("%s: state change failed\n", __func__);
  3707. rc = -EINVAL;
  3708. }
  3709. return rc;
  3710. }
  3711. int msm_vidc_core_init(struct msm_vidc_core *core)
  3712. {
  3713. int rc = 0;
  3714. if (!core || !core->capabilities) {
  3715. d_vpr_e("%s: invalid params\n", __func__);
  3716. return -EINVAL;
  3717. }
  3718. core_lock(core, __func__);
  3719. rc = msm_vidc_core_init_wait(core);
  3720. if (rc)
  3721. goto unlock;
  3722. if (core->state == MSM_VIDC_CORE_INIT)
  3723. goto unlock;
  3724. msm_vidc_change_core_state(core, MSM_VIDC_CORE_INIT_WAIT, __func__);
  3725. init_completion(&core->init_done);
  3726. core->smmu_fault_handled = false;
  3727. core->ssr.trigger = false;
  3728. core->pm_suspended = false;
  3729. rc = venus_hfi_core_init(core);
  3730. if (rc) {
  3731. d_vpr_e("%s: core init failed\n", __func__);
  3732. goto unlock;
  3733. }
  3734. d_vpr_h("%s(): waiting for sys_init_done, %d ms\n", __func__,
  3735. core->capabilities[HW_RESPONSE_TIMEOUT].value);
  3736. core_unlock(core, __func__);
  3737. rc = wait_for_completion_timeout(&core->init_done, msecs_to_jiffies(
  3738. core->capabilities[HW_RESPONSE_TIMEOUT].value));
  3739. core_lock(core, __func__);
  3740. if (!rc) {
  3741. d_vpr_e("%s: core init timed out\n", __func__);
  3742. rc = -ETIMEDOUT;
  3743. } else {
  3744. msm_vidc_change_core_state(core, MSM_VIDC_CORE_INIT, __func__);
  3745. d_vpr_h("%s: system init wait completed\n", __func__);
  3746. rc = 0;
  3747. }
  3748. unlock:
  3749. if (rc)
  3750. msm_vidc_core_deinit_locked(core, true);
  3751. core_unlock(core, __func__);
  3752. return rc;
  3753. }
  3754. int msm_vidc_inst_timeout(struct msm_vidc_inst *inst)
  3755. {
  3756. int rc = 0;
  3757. struct msm_vidc_core *core;
  3758. struct msm_vidc_inst *instance;
  3759. bool found;
  3760. if (!inst || !inst->core) {
  3761. d_vpr_e("%s: invalid params\n", __func__);
  3762. return -EINVAL;
  3763. }
  3764. core = inst->core;
  3765. core_lock(core, __func__);
  3766. /*
  3767. * All sessions will be removed from core list in core deinit,
  3768. * do not deinit core from a session which is not present in
  3769. * core list.
  3770. */
  3771. found = false;
  3772. list_for_each_entry(instance, &core->instances, list) {
  3773. if (instance == inst) {
  3774. found = true;
  3775. break;
  3776. }
  3777. }
  3778. if (!found) {
  3779. i_vpr_e(inst,
  3780. "%s: session not available in core list\n", __func__);
  3781. rc = -EINVAL;
  3782. goto unlock;
  3783. }
  3784. /* call core deinit for a valid instance timeout case */
  3785. msm_vidc_core_deinit_locked(core, true);
  3786. unlock:
  3787. core_unlock(core, __func__);
  3788. return rc;
  3789. }
  3790. int msm_vidc_print_buffer_info(struct msm_vidc_inst *inst)
  3791. {
  3792. struct msm_vidc_buffers *buffers;
  3793. int i;
  3794. if (!inst) {
  3795. i_vpr_e(inst, "%s: invalid params\n", __func__);
  3796. return -EINVAL;
  3797. }
  3798. /* Print buffer details */
  3799. for (i = 0; i < ARRAY_SIZE(buf_type_name_arr); i++) {
  3800. buffers = msm_vidc_get_buffers(inst, buf_type_name_arr[i].type, __func__);
  3801. if (!buffers)
  3802. continue;
  3803. i_vpr_h(inst, "buf: type: %11s, count %2d, extra %2d, actual %2d, size %9u\n",
  3804. buf_type_name_arr[i].name, buffers->min_count,
  3805. buffers->extra_count, buffers->actual_count,
  3806. buffers->size);
  3807. }
  3808. return 0;
  3809. }
  3810. int msm_vidc_print_inst_info(struct msm_vidc_inst *inst)
  3811. {
  3812. struct msm_vidc_buffers *buffers;
  3813. struct msm_vidc_buffer *buf;
  3814. enum msm_vidc_port_type port;
  3815. bool is_secure, is_decode;
  3816. u32 bit_depth, bit_rate, frame_rate, width, height;
  3817. struct dma_buf *dbuf;
  3818. struct inode *f_inode;
  3819. unsigned long inode_num = 0;
  3820. long ref_count = -1;
  3821. int i = 0;
  3822. if (!inst || !inst->capabilities) {
  3823. i_vpr_e(inst, "%s: invalid params\n", __func__);
  3824. return -EINVAL;
  3825. }
  3826. is_secure = is_secure_session(inst);
  3827. is_decode = inst->domain == MSM_VIDC_DECODER;
  3828. port = is_decode ? INPUT_PORT : OUTPUT_PORT;
  3829. width = inst->fmts[port].fmt.pix_mp.width;
  3830. height = inst->fmts[port].fmt.pix_mp.height;
  3831. bit_depth = inst->capabilities->cap[BIT_DEPTH].value & 0xFFFF;
  3832. bit_rate = inst->capabilities->cap[BIT_RATE].value;
  3833. frame_rate = inst->capabilities->cap[FRAME_RATE].value >> 16;
  3834. i_vpr_e(inst, "%s %s session, HxW: %d x %d, fps: %d, bitrate: %d, bit-depth: %d\n",
  3835. is_secure ? "Secure" : "Non-Secure",
  3836. is_decode ? "Decode" : "Encode",
  3837. height, width,
  3838. frame_rate, bit_rate, bit_depth);
  3839. /* Print buffer details */
  3840. for (i = 0; i < ARRAY_SIZE(buf_type_name_arr); i++) {
  3841. buffers = msm_vidc_get_buffers(inst, buf_type_name_arr[i].type, __func__);
  3842. if (!buffers)
  3843. continue;
  3844. i_vpr_e(inst, "count: type: %11s, min: %2d, extra: %2d, actual: %2d\n",
  3845. buf_type_name_arr[i].name, buffers->min_count,
  3846. buffers->extra_count, buffers->actual_count);
  3847. list_for_each_entry(buf, &buffers->list, list) {
  3848. if (!buf->dmabuf)
  3849. continue;
  3850. dbuf = (struct dma_buf *)buf->dmabuf;
  3851. if (dbuf && dbuf->file) {
  3852. f_inode = file_inode(dbuf->file);
  3853. if (f_inode) {
  3854. inode_num = f_inode->i_ino;
  3855. ref_count = file_count(dbuf->file);
  3856. }
  3857. }
  3858. i_vpr_e(inst,
  3859. "buf: type: %11s, index: %2d, fd: %4d, size: %9u, off: %8u, filled: %9u, daddr: %#llx, inode: %8lu, ref: %2ld, flags: %8x, ts: %16lld, attr: %8x\n",
  3860. buf_type_name_arr[i].name, buf->index, buf->fd, buf->buffer_size,
  3861. buf->data_offset, buf->data_size, buf->device_addr,
  3862. inode_num, ref_count, buf->flags, buf->timestamp, buf->attr);
  3863. }
  3864. }
  3865. return 0;
  3866. }
  3867. void msm_vidc_print_core_info(struct msm_vidc_core *core)
  3868. {
  3869. struct msm_vidc_inst *inst = NULL;
  3870. struct msm_vidc_inst *instances[MAX_SUPPORTED_INSTANCES];
  3871. s32 num_instances = 0;
  3872. if (!core) {
  3873. d_vpr_e("%s: invalid params\n", __func__);
  3874. return;
  3875. }
  3876. core_lock(core, __func__);
  3877. list_for_each_entry(inst, &core->instances, list)
  3878. instances[num_instances++] = inst;
  3879. core_unlock(core, __func__);
  3880. while (num_instances--) {
  3881. inst = instances[num_instances];
  3882. inst = get_inst_ref(core, inst);
  3883. if (!inst)
  3884. continue;
  3885. inst_lock(inst, __func__);
  3886. msm_vidc_print_inst_info(inst);
  3887. inst_unlock(inst, __func__);
  3888. put_inst(inst);
  3889. }
  3890. }
  3891. int msm_vidc_smmu_fault_handler(struct iommu_domain *domain,
  3892. struct device *dev, unsigned long iova, int flags, void *data)
  3893. {
  3894. struct msm_vidc_core *core = data;
  3895. if (!domain || !core || !core->capabilities) {
  3896. d_vpr_e("%s: invalid params %pK %pK\n",
  3897. __func__, domain, core);
  3898. return -EINVAL;
  3899. }
  3900. if (core->smmu_fault_handled) {
  3901. if (core->capabilities[NON_FATAL_FAULTS].value) {
  3902. dprintk_ratelimit(VIDC_ERR, "err ",
  3903. "%s: non-fatal pagefault address: %lx\n",
  3904. __func__, iova);
  3905. return 0;
  3906. }
  3907. }
  3908. d_vpr_e("%s: faulting address: %lx\n", __func__, iova);
  3909. core->smmu_fault_handled = true;
  3910. /* print noc error log registers */
  3911. venus_hfi_noc_error_info(core);
  3912. msm_vidc_print_core_info(core);
  3913. /*
  3914. * Return -ENOSYS to elicit the default behaviour of smmu driver.
  3915. * If we return -ENOSYS, then smmu driver assumes page fault handler
  3916. * is not installed and prints a list of useful debug information like
  3917. * FAR, SID etc. This information is not printed if we return 0.
  3918. */
  3919. return -ENOSYS;
  3920. }
  3921. int msm_vidc_trigger_ssr(struct msm_vidc_core *core,
  3922. u64 trigger_ssr_val)
  3923. {
  3924. struct msm_vidc_ssr *ssr;
  3925. if (!core) {
  3926. d_vpr_e("%s: Invalid parameters\n", __func__);
  3927. return -EINVAL;
  3928. }
  3929. ssr = &core->ssr;
  3930. /*
  3931. * <test_addr><sub_client_id><ssr_type>
  3932. * ssr_type: 0-3 bits
  3933. * sub_client_id: 4-7 bits
  3934. * reserved: 8-31 bits
  3935. * test_addr: 32-63 bits
  3936. */
  3937. ssr->ssr_type = (trigger_ssr_val &
  3938. (unsigned long)SSR_TYPE) >> SSR_TYPE_SHIFT;
  3939. ssr->sub_client_id = (trigger_ssr_val &
  3940. (unsigned long)SSR_SUB_CLIENT_ID) >> SSR_SUB_CLIENT_ID_SHIFT;
  3941. ssr->test_addr = (trigger_ssr_val &
  3942. (unsigned long)SSR_ADDR_ID) >> SSR_ADDR_SHIFT;
  3943. schedule_work(&core->ssr_work);
  3944. return 0;
  3945. }
  3946. void msm_vidc_ssr_handler(struct work_struct *work)
  3947. {
  3948. int rc;
  3949. struct msm_vidc_core *core;
  3950. struct msm_vidc_ssr *ssr;
  3951. core = container_of(work, struct msm_vidc_core, ssr_work);
  3952. if (!core) {
  3953. d_vpr_e("%s: invalid params %pK\n", __func__, core);
  3954. return;
  3955. }
  3956. ssr = &core->ssr;
  3957. core_lock(core, __func__);
  3958. if (core->state == MSM_VIDC_CORE_INIT) {
  3959. /*
  3960. * In current implementation, user-initiated SSR triggers
  3961. * a fatal error from hardware. However, there is no way
  3962. * to know if fatal error is due to SSR or not. Handle
  3963. * user SSR as non-fatal.
  3964. */
  3965. core->ssr.trigger = true;
  3966. rc = venus_hfi_trigger_ssr(core, ssr->ssr_type,
  3967. ssr->sub_client_id, ssr->test_addr);
  3968. if (rc) {
  3969. d_vpr_e("%s: trigger_ssr failed\n", __func__);
  3970. core->ssr.trigger = false;
  3971. }
  3972. } else {
  3973. d_vpr_e("%s: video core not initialized\n", __func__);
  3974. }
  3975. core_unlock(core, __func__);
  3976. }
  3977. void msm_vidc_pm_work_handler(struct work_struct *work)
  3978. {
  3979. }
  3980. void msm_vidc_fw_unload_handler(struct work_struct *work)
  3981. {
  3982. struct msm_vidc_core *core = NULL;
  3983. int rc = 0;
  3984. core = container_of(work, struct msm_vidc_core, fw_unload_work.work);
  3985. if (!core) {
  3986. d_vpr_e("%s: invalid work or core handle\n", __func__);
  3987. return;
  3988. }
  3989. d_vpr_h("%s: deinitializing video core\n",__func__);
  3990. rc = msm_vidc_core_deinit(core, false);
  3991. if (rc)
  3992. d_vpr_e("%s: Failed to deinit core\n", __func__);
  3993. }
  3994. int msm_vidc_suspend(struct msm_vidc_core *core)
  3995. {
  3996. int rc = 0;
  3997. if (!core) {
  3998. d_vpr_e("%s: invalid params\n", __func__);
  3999. return -EINVAL;
  4000. }
  4001. rc = venus_hfi_suspend(core);
  4002. if (rc)
  4003. return rc;
  4004. return rc;
  4005. }
  4006. void msm_vidc_batch_handler(struct work_struct *work)
  4007. {
  4008. struct msm_vidc_inst *inst;
  4009. enum msm_vidc_allow allow;
  4010. struct msm_vidc_core *core;
  4011. int rc = 0;
  4012. inst = container_of(work, struct msm_vidc_inst, decode_batch.work.work);
  4013. inst = get_inst_ref(g_core, inst);
  4014. if (!inst || !inst->core) {
  4015. d_vpr_e("%s: invalid params\n", __func__);
  4016. return;
  4017. }
  4018. core = inst->core;
  4019. inst_lock(inst, __func__);
  4020. if (is_session_error(inst)) {
  4021. i_vpr_e(inst, "%s: failled. Session error\n", __func__);
  4022. goto exit;
  4023. }
  4024. if (core->pm_suspended) {
  4025. i_vpr_h(inst, "%s: device in pm suspend state\n", __func__);
  4026. goto exit;
  4027. }
  4028. allow = msm_vidc_allow_qbuf(inst, OUTPUT_MPLANE);
  4029. if (allow != MSM_VIDC_ALLOW) {
  4030. i_vpr_e(inst, "%s: not allowed in state: %s\n", __func__,
  4031. state_name(inst->state));
  4032. goto exit;
  4033. }
  4034. i_vpr_h(inst, "%s: queue pending batch buffers\n", __func__);
  4035. rc = msm_vidc_queue_deferred_buffers(inst, MSM_VIDC_BUF_OUTPUT);
  4036. if (rc) {
  4037. i_vpr_e(inst, "%s: batch qbufs failed\n", __func__);
  4038. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  4039. }
  4040. exit:
  4041. inst_unlock(inst, __func__);
  4042. put_inst(inst);
  4043. }
  4044. int msm_vidc_flush_buffers(struct msm_vidc_inst *inst,
  4045. enum msm_vidc_buffer_type type)
  4046. {
  4047. int rc = 0;
  4048. struct msm_vidc_buffers *buffers;
  4049. struct msm_vidc_buffer *buf, *dummy;
  4050. enum msm_vidc_buffer_type buffer_type[2];
  4051. int i;
  4052. if (!inst) {
  4053. d_vpr_e("%s: invalid params\n", __func__);
  4054. return -EINVAL;
  4055. }
  4056. if (type == MSM_VIDC_BUF_INPUT) {
  4057. buffer_type[0] = MSM_VIDC_BUF_INPUT_META;
  4058. buffer_type[1] = MSM_VIDC_BUF_INPUT;
  4059. } else if (type == MSM_VIDC_BUF_OUTPUT) {
  4060. buffer_type[0] = MSM_VIDC_BUF_OUTPUT_META;
  4061. buffer_type[1] = MSM_VIDC_BUF_OUTPUT;
  4062. } else {
  4063. i_vpr_h(inst, "%s: invalid buffer type %d\n",
  4064. __func__, type);
  4065. return -EINVAL;
  4066. }
  4067. for (i = 0; i < ARRAY_SIZE(buffer_type); i++) {
  4068. buffers = msm_vidc_get_buffers(inst, buffer_type[i], __func__);
  4069. if (!buffers)
  4070. return -EINVAL;
  4071. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  4072. if (buf->attr & MSM_VIDC_ATTR_QUEUED ||
  4073. buf->attr & MSM_VIDC_ATTR_DEFERRED) {
  4074. print_vidc_buffer(VIDC_HIGH, "high", "flushing buffer", inst, buf);
  4075. if (!(buf->attr & MSM_VIDC_ATTR_BUFFER_DONE))
  4076. msm_vidc_vb2_buffer_done(inst, buf);
  4077. msm_vidc_put_driver_buf(inst, buf);
  4078. }
  4079. }
  4080. }
  4081. return rc;
  4082. }
  4083. int msm_vidc_flush_delayed_unmap_buffers(struct msm_vidc_inst *inst,
  4084. enum msm_vidc_buffer_type type)
  4085. {
  4086. int rc = 0;
  4087. struct msm_vidc_mappings *maps;
  4088. struct msm_vidc_map *map, *dummy;
  4089. struct msm_vidc_buffer *ro_buf, *ro_dummy;
  4090. enum msm_vidc_buffer_type buffer_type[2];
  4091. int i;
  4092. bool found = false;
  4093. if (!inst) {
  4094. d_vpr_e("%s: invalid params\n", __func__);
  4095. return -EINVAL;
  4096. }
  4097. if (type == MSM_VIDC_BUF_INPUT) {
  4098. buffer_type[0] = MSM_VIDC_BUF_INPUT_META;
  4099. buffer_type[1] = MSM_VIDC_BUF_INPUT;
  4100. } else if (type == MSM_VIDC_BUF_OUTPUT) {
  4101. buffer_type[0] = MSM_VIDC_BUF_OUTPUT_META;
  4102. buffer_type[1] = MSM_VIDC_BUF_OUTPUT;
  4103. } else {
  4104. i_vpr_h(inst, "%s: invalid buffer type %d\n",
  4105. __func__, type);
  4106. return -EINVAL;
  4107. }
  4108. for (i = 0; i < ARRAY_SIZE(buffer_type); i++) {
  4109. maps = msm_vidc_get_mappings(inst, buffer_type[i], __func__);
  4110. if (!maps)
  4111. return -EINVAL;
  4112. list_for_each_entry_safe(map, dummy, &maps->list, list) {
  4113. /*
  4114. * decoder output bufs will have skip_delayed_unmap = true
  4115. * unmap all decoder output buffers except those present in
  4116. * read_only buffers list
  4117. */
  4118. if (!map->skip_delayed_unmap)
  4119. continue;
  4120. found = false;
  4121. list_for_each_entry_safe(ro_buf, ro_dummy,
  4122. &inst->buffers.read_only.list, list) {
  4123. if (map->dmabuf == ro_buf->dmabuf) {
  4124. found = true;
  4125. break;
  4126. }
  4127. }
  4128. /* completely unmap */
  4129. if (!found) {
  4130. if (map->refcount > 1) {
  4131. i_vpr_e(inst,
  4132. "%s: unexpected map refcount: %u device addr %#x\n",
  4133. __func__, map->refcount, map->device_addr);
  4134. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  4135. }
  4136. msm_vidc_memory_unmap_completely(inst, map);
  4137. }
  4138. }
  4139. }
  4140. return rc;
  4141. }
  4142. void msm_vidc_destroy_buffers(struct msm_vidc_inst *inst)
  4143. {
  4144. struct msm_vidc_buffers *buffers;
  4145. struct msm_vidc_buffer *buf, *dummy;
  4146. struct msm_vidc_timestamp *ts, *dummy_ts;
  4147. struct msm_memory_dmabuf *dbuf, *dummy_dbuf;
  4148. struct response_work *work, *dummy_work = NULL;
  4149. static const enum msm_vidc_buffer_type ext_buf_types[] = {
  4150. MSM_VIDC_BUF_INPUT,
  4151. MSM_VIDC_BUF_OUTPUT,
  4152. MSM_VIDC_BUF_INPUT_META,
  4153. MSM_VIDC_BUF_OUTPUT_META,
  4154. };
  4155. static const enum msm_vidc_buffer_type internal_buf_types[] = {
  4156. MSM_VIDC_BUF_BIN,
  4157. MSM_VIDC_BUF_ARP,
  4158. MSM_VIDC_BUF_COMV,
  4159. MSM_VIDC_BUF_NON_COMV,
  4160. MSM_VIDC_BUF_LINE,
  4161. MSM_VIDC_BUF_DPB,
  4162. MSM_VIDC_BUF_PERSIST,
  4163. MSM_VIDC_BUF_VPSS,
  4164. };
  4165. int i;
  4166. if (!inst) {
  4167. d_vpr_e("%s: invalid params\n", __func__);
  4168. return;
  4169. }
  4170. for (i = 0; i < ARRAY_SIZE(internal_buf_types); i++) {
  4171. buffers = msm_vidc_get_buffers(inst, internal_buf_types[i], __func__);
  4172. if (!buffers)
  4173. continue;
  4174. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  4175. i_vpr_h(inst,
  4176. "destroying internal buffer: type %d idx %d fd %d addr %#x size %d\n",
  4177. buf->type, buf->index, buf->fd, buf->device_addr, buf->buffer_size);
  4178. msm_vidc_destroy_internal_buffer(inst, buf);
  4179. }
  4180. }
  4181. for (i = 0; i < ARRAY_SIZE(ext_buf_types); i++) {
  4182. buffers = msm_vidc_get_buffers(inst, ext_buf_types[i], __func__);
  4183. if (!buffers)
  4184. continue;
  4185. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  4186. print_vidc_buffer(VIDC_ERR, "err ", "destroying ", inst, buf);
  4187. if (!(buf->attr & MSM_VIDC_ATTR_BUFFER_DONE))
  4188. msm_vidc_vb2_buffer_done(inst, buf);
  4189. msm_vidc_put_driver_buf(inst, buf);
  4190. }
  4191. msm_vidc_unmap_buffers(inst, ext_buf_types[i]);
  4192. }
  4193. list_for_each_entry_safe(buf, dummy, &inst->buffers.read_only.list, list) {
  4194. print_vidc_buffer(VIDC_ERR, "err ", "destroying ro buffer", inst, buf);
  4195. list_del(&buf->list);
  4196. msm_memory_free(inst, buf);
  4197. }
  4198. list_for_each_entry_safe(buf, dummy, &inst->buffers.release.list, list) {
  4199. print_vidc_buffer(VIDC_ERR, "err ", "destroying release buffer", inst, buf);
  4200. list_del(&buf->list);
  4201. msm_memory_free(inst, buf);
  4202. }
  4203. list_for_each_entry_safe(ts, dummy_ts, &inst->timestamps.list, sort.list) {
  4204. i_vpr_e(inst, "%s: removing ts: val %lld, rank %lld\n",
  4205. __func__, ts->sort.val, ts->rank);
  4206. list_del(&ts->sort.list);
  4207. msm_memory_free(inst, ts);
  4208. }
  4209. list_for_each_entry_safe(dbuf, dummy_dbuf, &inst->dmabuf_tracker, list) {
  4210. i_vpr_e(inst, "%s: removing dma_buf %#x, refcount %u\n",
  4211. __func__, dbuf->dmabuf, dbuf->refcount);
  4212. msm_vidc_memory_put_dmabuf_completely(inst, dbuf);
  4213. }
  4214. list_for_each_entry_safe(work, dummy_work, &inst->response_works, list) {
  4215. list_del(&work->list);
  4216. kfree(work->data);
  4217. kfree(work);
  4218. }
  4219. /* destroy buffers from pool */
  4220. msm_memory_pools_deinit(inst);
  4221. }
  4222. static void msm_vidc_close_helper(struct kref *kref)
  4223. {
  4224. struct msm_vidc_inst *inst = container_of(kref,
  4225. struct msm_vidc_inst, kref);
  4226. i_vpr_h(inst, "%s()\n", __func__);
  4227. msm_vidc_event_queue_deinit(inst);
  4228. msm_vidc_vb2_queue_deinit(inst);
  4229. msm_vidc_debugfs_deinit_inst(inst);
  4230. if (is_decode_session(inst))
  4231. msm_vdec_inst_deinit(inst);
  4232. else if (is_encode_session(inst))
  4233. msm_venc_inst_deinit(inst);
  4234. msm_vidc_free_input_cr_list(inst);
  4235. msm_vidc_free_capabililty_list(inst, CHILD_LIST | FW_LIST);
  4236. if (inst->response_workq)
  4237. destroy_workqueue(inst->response_workq);
  4238. msm_vidc_remove_dangling_session(inst);
  4239. kfree(inst->capabilities);
  4240. kfree(inst);
  4241. }
  4242. struct msm_vidc_inst *get_inst_ref(struct msm_vidc_core *core,
  4243. struct msm_vidc_inst *instance)
  4244. {
  4245. struct msm_vidc_inst *inst = NULL;
  4246. bool matches = false;
  4247. if (!core) {
  4248. d_vpr_e("%s: invalid params\n", __func__);
  4249. return NULL;
  4250. }
  4251. mutex_lock(&core->lock);
  4252. list_for_each_entry(inst, &core->instances, list) {
  4253. if (inst == instance) {
  4254. matches = true;
  4255. break;
  4256. }
  4257. }
  4258. inst = (matches && kref_get_unless_zero(&inst->kref)) ? inst : NULL;
  4259. mutex_unlock(&core->lock);
  4260. return inst;
  4261. }
  4262. struct msm_vidc_inst *get_inst(struct msm_vidc_core *core,
  4263. u32 session_id)
  4264. {
  4265. struct msm_vidc_inst *inst = NULL;
  4266. bool matches = false;
  4267. if (!core) {
  4268. d_vpr_e("%s: invalid params\n", __func__);
  4269. return NULL;
  4270. }
  4271. mutex_lock(&core->lock);
  4272. list_for_each_entry(inst, &core->instances, list) {
  4273. if (inst->session_id == session_id) {
  4274. matches = true;
  4275. break;
  4276. }
  4277. }
  4278. inst = (matches && kref_get_unless_zero(&inst->kref)) ? inst : NULL;
  4279. mutex_unlock(&core->lock);
  4280. return inst;
  4281. }
  4282. void put_inst(struct msm_vidc_inst *inst)
  4283. {
  4284. if (!inst) {
  4285. d_vpr_e("%s: invalid params\n", __func__);
  4286. return;
  4287. }
  4288. kref_put(&inst->kref, msm_vidc_close_helper);
  4289. }
  4290. bool core_lock_check(struct msm_vidc_core *core, const char *func)
  4291. {
  4292. return mutex_is_locked(&core->lock);
  4293. }
  4294. void core_lock(struct msm_vidc_core *core, const char *function)
  4295. {
  4296. mutex_lock(&core->lock);
  4297. }
  4298. void core_unlock(struct msm_vidc_core *core, const char *function)
  4299. {
  4300. mutex_unlock(&core->lock);
  4301. }
  4302. bool inst_lock_check(struct msm_vidc_inst *inst, const char *func)
  4303. {
  4304. return mutex_is_locked(&inst->lock);
  4305. }
  4306. void inst_lock(struct msm_vidc_inst *inst, const char *function)
  4307. {
  4308. mutex_lock(&inst->lock);
  4309. }
  4310. void inst_unlock(struct msm_vidc_inst *inst, const char *function)
  4311. {
  4312. mutex_unlock(&inst->lock);
  4313. }
  4314. int msm_vidc_update_bitstream_buffer_size(struct msm_vidc_inst *inst)
  4315. {
  4316. struct msm_vidc_core *core;
  4317. struct v4l2_format *fmt;
  4318. if (!inst || !inst->core) {
  4319. d_vpr_e("%s: invalid params\n", __func__);
  4320. return -EINVAL;
  4321. }
  4322. core = inst->core;
  4323. if (is_decode_session(inst)) {
  4324. fmt = &inst->fmts[INPUT_PORT];
  4325. fmt->fmt.pix_mp.plane_fmt[0].sizeimage = call_session_op(core,
  4326. buffer_size, inst, MSM_VIDC_BUF_INPUT);
  4327. }
  4328. return 0;
  4329. }
  4330. int msm_vidc_update_meta_port_settings(struct msm_vidc_inst *inst)
  4331. {
  4332. struct msm_vidc_core *core;
  4333. struct v4l2_format *fmt;
  4334. if (!inst || !inst->core) {
  4335. d_vpr_e("%s: invalid params\n", __func__);
  4336. return -EINVAL;
  4337. }
  4338. core = inst->core;
  4339. fmt = &inst->fmts[INPUT_META_PORT];
  4340. if (is_input_meta_enabled(inst)) {
  4341. fmt->fmt.meta.buffersize = call_session_op(core,
  4342. buffer_size, inst, MSM_VIDC_BUF_INPUT_META);
  4343. inst->buffers.input_meta.min_count =
  4344. inst->buffers.input.min_count;
  4345. inst->buffers.input_meta.extra_count =
  4346. inst->buffers.input.extra_count;
  4347. inst->buffers.input_meta.actual_count =
  4348. inst->buffers.input.actual_count;
  4349. inst->buffers.input_meta.size = fmt->fmt.meta.buffersize;
  4350. } else {
  4351. fmt->fmt.meta.buffersize = 0;
  4352. inst->buffers.input_meta.min_count = 0;
  4353. inst->buffers.input_meta.extra_count = 0;
  4354. inst->buffers.input_meta.actual_count = 0;
  4355. inst->buffers.input_meta.size = 0;
  4356. }
  4357. fmt = &inst->fmts[OUTPUT_META_PORT];
  4358. if (is_output_meta_enabled(inst)) {
  4359. fmt->fmt.meta.buffersize = call_session_op(core,
  4360. buffer_size, inst, MSM_VIDC_BUF_OUTPUT_META);
  4361. inst->buffers.output_meta.min_count =
  4362. inst->buffers.output.min_count;
  4363. inst->buffers.output_meta.extra_count =
  4364. inst->buffers.output.extra_count;
  4365. inst->buffers.output_meta.actual_count =
  4366. inst->buffers.output.actual_count;
  4367. inst->buffers.output_meta.size = fmt->fmt.meta.buffersize;
  4368. } else {
  4369. fmt->fmt.meta.buffersize = 0;
  4370. inst->buffers.output_meta.min_count = 0;
  4371. inst->buffers.output_meta.extra_count = 0;
  4372. inst->buffers.output_meta.actual_count = 0;
  4373. inst->buffers.output_meta.size = 0;
  4374. }
  4375. return 0;
  4376. }
  4377. int msm_vidc_update_buffer_count(struct msm_vidc_inst *inst, u32 port)
  4378. {
  4379. struct msm_vidc_core *core;
  4380. if (!inst || !inst->core) {
  4381. d_vpr_e("%s: invalid params\n", __func__);
  4382. return -EINVAL;
  4383. }
  4384. core = inst->core;
  4385. switch (port) {
  4386. case INPUT_PORT:
  4387. inst->buffers.input.min_count = call_session_op(core,
  4388. min_count, inst, MSM_VIDC_BUF_INPUT);
  4389. inst->buffers.input.extra_count = call_session_op(core,
  4390. extra_count, inst, MSM_VIDC_BUF_INPUT);
  4391. if (inst->buffers.input.actual_count <
  4392. inst->buffers.input.min_count +
  4393. inst->buffers.input.extra_count) {
  4394. inst->buffers.input.actual_count =
  4395. inst->buffers.input.min_count +
  4396. inst->buffers.input.extra_count;
  4397. }
  4398. if (is_input_meta_enabled(inst)) {
  4399. inst->buffers.input_meta.min_count =
  4400. inst->buffers.input.min_count;
  4401. inst->buffers.input_meta.extra_count =
  4402. inst->buffers.input.extra_count;
  4403. inst->buffers.input_meta.actual_count =
  4404. inst->buffers.input.actual_count;
  4405. } else {
  4406. inst->buffers.input_meta.min_count = 0;
  4407. inst->buffers.input_meta.extra_count = 0;
  4408. inst->buffers.input_meta.actual_count = 0;
  4409. }
  4410. i_vpr_h(inst, "%s: type: INPUT, count: min %u, extra %u, actual %u\n", __func__,
  4411. inst->buffers.input.min_count,
  4412. inst->buffers.input.extra_count,
  4413. inst->buffers.input.actual_count);
  4414. break;
  4415. case OUTPUT_PORT:
  4416. if (!inst->vb2q[INPUT_PORT].streaming)
  4417. inst->buffers.output.min_count = call_session_op(core,
  4418. min_count, inst, MSM_VIDC_BUF_OUTPUT);
  4419. inst->buffers.output.extra_count = call_session_op(core,
  4420. extra_count, inst, MSM_VIDC_BUF_OUTPUT);
  4421. if (inst->buffers.output.actual_count <
  4422. inst->buffers.output.min_count +
  4423. inst->buffers.output.extra_count) {
  4424. inst->buffers.output.actual_count =
  4425. inst->buffers.output.min_count +
  4426. inst->buffers.output.extra_count;
  4427. }
  4428. if (is_output_meta_enabled(inst)) {
  4429. inst->buffers.output_meta.min_count =
  4430. inst->buffers.output.min_count;
  4431. inst->buffers.output_meta.extra_count =
  4432. inst->buffers.output.extra_count;
  4433. inst->buffers.output_meta.actual_count =
  4434. inst->buffers.output.actual_count;
  4435. } else {
  4436. inst->buffers.output_meta.min_count = 0;
  4437. inst->buffers.output_meta.extra_count = 0;
  4438. inst->buffers.output_meta.actual_count = 0;
  4439. }
  4440. i_vpr_h(inst, "%s: type: OUTPUT, count: min %u, extra %u, actual %u\n", __func__,
  4441. inst->buffers.output.min_count,
  4442. inst->buffers.output.extra_count,
  4443. inst->buffers.output.actual_count);
  4444. break;
  4445. default:
  4446. d_vpr_e("%s unknown port %d\n", __func__, port);
  4447. return -EINVAL;
  4448. }
  4449. return 0;
  4450. }
  4451. void msm_vidc_schedule_core_deinit(struct msm_vidc_core *core)
  4452. {
  4453. if (!core)
  4454. return;
  4455. if (!core->capabilities[FW_UNLOAD].value)
  4456. return;
  4457. cancel_delayed_work(&core->fw_unload_work);
  4458. schedule_delayed_work(&core->fw_unload_work,
  4459. msecs_to_jiffies(core->capabilities[FW_UNLOAD_DELAY].value));
  4460. d_vpr_h("firmware unload delayed by %u ms\n",
  4461. core->capabilities[FW_UNLOAD_DELAY].value);
  4462. return;
  4463. }
  4464. static const char *get_codec_str(enum msm_vidc_codec_type type)
  4465. {
  4466. switch (type) {
  4467. case MSM_VIDC_H264: return "h264";
  4468. case MSM_VIDC_HEVC: return "h265";
  4469. case MSM_VIDC_VP9: return " vp9";
  4470. case MSM_VIDC_AV1: return " av1";
  4471. case MSM_VIDC_HEIC: return "heic";
  4472. }
  4473. return "....";
  4474. }
  4475. static const char *get_domain_str(enum msm_vidc_domain_type type)
  4476. {
  4477. switch (type) {
  4478. case MSM_VIDC_ENCODER: return "e";
  4479. case MSM_VIDC_DECODER: return "d";
  4480. }
  4481. return ".";
  4482. }
  4483. int msm_vidc_update_debug_str(struct msm_vidc_inst *inst)
  4484. {
  4485. u32 sid;
  4486. const char *codec;
  4487. const char *domain;
  4488. if (!inst) {
  4489. d_vpr_e("%s: Invalid params\n", __func__);
  4490. return -EINVAL;
  4491. }
  4492. sid = inst->session_id;
  4493. codec = get_codec_str(inst->codec);
  4494. domain = get_domain_str(inst->domain);
  4495. snprintf(inst->debug_str, sizeof(inst->debug_str), "%08x: %s%s", sid, codec, domain);
  4496. d_vpr_h("%s: sid: %08x, codec: %s, domain: %s, final: %s\n",
  4497. __func__, sid, codec, domain, inst->debug_str);
  4498. return 0;
  4499. }
  4500. static int msm_vidc_print_insts_info(struct msm_vidc_core *core)
  4501. {
  4502. struct msm_vidc_inst *inst;
  4503. u32 height, width, fps, orate;
  4504. struct msm_vidc_inst_capability *capability;
  4505. struct v4l2_format *out_f;
  4506. struct v4l2_format *inp_f;
  4507. char prop[64];
  4508. d_vpr_e("Print all running instances\n");
  4509. d_vpr_e("%6s | %6s | %5s | %5s | %5s\n", "width", "height", "fps", "orate", "prop");
  4510. core_lock(core, __func__);
  4511. list_for_each_entry(inst, &core->instances, list) {
  4512. out_f = &inst->fmts[OUTPUT_PORT];
  4513. inp_f = &inst->fmts[INPUT_PORT];
  4514. capability = inst->capabilities;
  4515. memset(&prop, 0, sizeof(prop));
  4516. width = max(out_f->fmt.pix_mp.width, inp_f->fmt.pix_mp.width);
  4517. height = max(out_f->fmt.pix_mp.height, inp_f->fmt.pix_mp.height);
  4518. fps = capability->cap[FRAME_RATE].value >> 16;
  4519. orate = capability->cap[OPERATING_RATE].value >> 16;
  4520. if (is_realtime_session(inst))
  4521. strlcat(prop, "RT ", sizeof(prop));
  4522. else
  4523. strlcat(prop, "NRT", sizeof(prop));
  4524. if (is_thumbnail_session(inst))
  4525. strlcat(prop, "+THUMB", sizeof(prop));
  4526. if (is_image_session(inst))
  4527. strlcat(prop, "+IMAGE", sizeof(prop));
  4528. i_vpr_e(inst, "%6u | %6u | %5u | %5u | %5s\n", width, height, fps, orate, prop);
  4529. }
  4530. core_unlock(core, __func__);
  4531. return 0;
  4532. }
  4533. int msm_vidc_check_core_mbps(struct msm_vidc_inst *inst)
  4534. {
  4535. u32 mbps = 0, num_inactive_sessions = 0;
  4536. struct msm_vidc_core *core;
  4537. struct msm_vidc_inst *instance;
  4538. u64 curr_time_ns;
  4539. int rc = 0;
  4540. if (!inst || !inst->core) {
  4541. d_vpr_e("%s: invalid params\n", __func__);
  4542. return -EINVAL;
  4543. }
  4544. core = inst->core;
  4545. curr_time_ns = ktime_get_ns();
  4546. core_lock(core, __func__);
  4547. list_for_each_entry(instance, &core->instances, list) {
  4548. /* ignore invalid/error session */
  4549. if (is_session_error(instance))
  4550. continue;
  4551. if (!is_active_session(instance->last_qbuf_time_ns, curr_time_ns)) {
  4552. num_inactive_sessions++;
  4553. }
  4554. /* ignore thumbnail, image, and non realtime sessions */
  4555. if (is_thumbnail_session(instance) ||
  4556. is_image_session(instance) ||
  4557. !is_realtime_session(instance))
  4558. continue;
  4559. mbps += msm_vidc_get_inst_load(instance);
  4560. }
  4561. core_unlock(core, __func__);
  4562. if (mbps > core->capabilities[MAX_MBPS].value) {
  4563. rc = num_inactive_sessions ? -ENOMEM : -EAGAIN;
  4564. i_vpr_e(inst, "%s: Hardware overloaded. needed %u, max %u", __func__,
  4565. mbps, core->capabilities[MAX_MBPS].value);
  4566. return rc;
  4567. } else {
  4568. i_vpr_h(inst, "%s: HW load needed %u is within max %u", __func__,
  4569. mbps, core->capabilities[MAX_MBPS].value);
  4570. }
  4571. return 0;
  4572. }
  4573. int msm_vidc_check_core_mbpf(struct msm_vidc_inst *inst)
  4574. {
  4575. u32 video_mbpf = 0, image_mbpf = 0, video_rt_mbpf = 0;
  4576. struct msm_vidc_core *core;
  4577. struct msm_vidc_inst *instance;
  4578. if (!inst || !inst->core) {
  4579. d_vpr_e("%s: invalid params\n", __func__);
  4580. return -EINVAL;
  4581. }
  4582. core = inst->core;
  4583. core_lock(core, __func__);
  4584. list_for_each_entry(instance, &core->instances, list) {
  4585. /* ignore thumbnail session */
  4586. if (is_thumbnail_session(instance))
  4587. continue;
  4588. if (is_image_session(instance))
  4589. image_mbpf += msm_vidc_get_mbs_per_frame(instance);
  4590. else
  4591. video_mbpf += msm_vidc_get_mbs_per_frame(instance);
  4592. }
  4593. core_unlock(core, __func__);
  4594. if (video_mbpf > core->capabilities[MAX_MBPF].value) {
  4595. i_vpr_e(inst, "%s: video overloaded. needed %u, max %u", __func__,
  4596. video_mbpf, core->capabilities[MAX_MBPF].value);
  4597. return -ENOMEM;
  4598. }
  4599. if (image_mbpf > core->capabilities[MAX_IMAGE_MBPF].value) {
  4600. i_vpr_e(inst, "%s: image overloaded. needed %u, max %u", __func__,
  4601. image_mbpf, core->capabilities[MAX_IMAGE_MBPF].value);
  4602. return -ENOMEM;
  4603. }
  4604. core_lock(core, __func__);
  4605. /* check real-time video sessions max limit */
  4606. list_for_each_entry(instance, &core->instances, list) {
  4607. if (is_thumbnail_session(instance) ||
  4608. is_image_session(instance) ||
  4609. !is_realtime_session(instance))
  4610. continue;
  4611. video_rt_mbpf += msm_vidc_get_mbs_per_frame(instance);
  4612. }
  4613. core_unlock(core, __func__);
  4614. if (video_rt_mbpf > core->capabilities[MAX_RT_MBPF].value) {
  4615. i_vpr_e(inst, "%s: real-time video overloaded. needed %u, max %u",
  4616. __func__, video_rt_mbpf, core->capabilities[MAX_RT_MBPF].value);
  4617. return -ENOMEM;
  4618. }
  4619. return 0;
  4620. }
  4621. static int msm_vidc_check_inst_mbpf(struct msm_vidc_inst *inst)
  4622. {
  4623. u32 mbpf = 0, max_mbpf = 0;
  4624. struct msm_vidc_inst_capability *capability;
  4625. if (!inst || !inst->capabilities) {
  4626. d_vpr_e("%s: invalid params\n", __func__);
  4627. return -EINVAL;
  4628. }
  4629. capability = inst->capabilities;
  4630. if (is_secure_session(inst))
  4631. max_mbpf = capability->cap[SECURE_MBPF].max;
  4632. else if (is_encode_session(inst) && capability->cap[LOSSLESS].value)
  4633. max_mbpf = capability->cap[LOSSLESS_MBPF].max;
  4634. else
  4635. max_mbpf = capability->cap[MBPF].max;
  4636. /* check current session mbpf */
  4637. mbpf = msm_vidc_get_mbs_per_frame(inst);
  4638. if (mbpf > max_mbpf) {
  4639. i_vpr_e(inst, "%s: session overloaded. needed %u, max %u", __func__,
  4640. mbpf, max_mbpf);
  4641. return -ENOMEM;
  4642. }
  4643. return 0;
  4644. }
  4645. static bool msm_vidc_allow_image_encode_session(struct msm_vidc_inst *inst)
  4646. {
  4647. struct msm_vidc_inst_capability *capability;
  4648. struct v4l2_format *fmt;
  4649. u32 min_width, min_height, max_width, max_height, pix_fmt, profile;
  4650. bool allow = false;
  4651. if (!inst || !inst->capabilities) {
  4652. d_vpr_e("%s: invalid params\n", __func__);
  4653. return false;
  4654. }
  4655. capability = inst->capabilities;
  4656. if (!is_image_encode_session(inst)) {
  4657. i_vpr_e(inst, "%s: not an image encode session\n", __func__);
  4658. return false;
  4659. }
  4660. pix_fmt = capability->cap[PIX_FMTS].value;
  4661. profile = capability->cap[PROFILE].value;
  4662. /* is input with & height is in allowed range */
  4663. min_width = capability->cap[FRAME_WIDTH].min;
  4664. max_width = capability->cap[FRAME_WIDTH].max;
  4665. min_height = capability->cap[FRAME_HEIGHT].min;
  4666. max_height = capability->cap[FRAME_HEIGHT].max;
  4667. fmt = &inst->fmts[INPUT_PORT];
  4668. if (!in_range(fmt->fmt.pix_mp.width, min_width, max_width) ||
  4669. !in_range(fmt->fmt.pix_mp.height, min_height, max_height)) {
  4670. i_vpr_e(inst, "unsupported wxh [%u x %u], allowed [%u x %u] to [%u x %u]\n",
  4671. fmt->fmt.pix_mp.width, fmt->fmt.pix_mp.height,
  4672. min_width, min_height, max_width, max_height);
  4673. allow = false;
  4674. goto exit;
  4675. }
  4676. /* is linear yuv color fmt */
  4677. allow = is_linear_yuv_colorformat(pix_fmt);
  4678. if (!allow) {
  4679. i_vpr_e(inst, "%s: compressed fmt: %#x\n", __func__, pix_fmt);
  4680. goto exit;
  4681. }
  4682. /* is input grid aligned */
  4683. fmt = &inst->fmts[INPUT_PORT];
  4684. allow = IS_ALIGNED(fmt->fmt.pix_mp.width, HEIC_GRID_DIMENSION);
  4685. allow &= IS_ALIGNED(fmt->fmt.pix_mp.height, HEIC_GRID_DIMENSION);
  4686. if (!allow) {
  4687. i_vpr_e(inst, "%s: input is not grid aligned: %u x %u\n", __func__,
  4688. fmt->fmt.pix_mp.width, fmt->fmt.pix_mp.height);
  4689. goto exit;
  4690. }
  4691. /* is output grid dimension */
  4692. fmt = &inst->fmts[OUTPUT_PORT];
  4693. allow = fmt->fmt.pix_mp.width == HEIC_GRID_DIMENSION;
  4694. allow &= fmt->fmt.pix_mp.height == HEIC_GRID_DIMENSION;
  4695. if (!allow) {
  4696. i_vpr_e(inst, "%s: output is not a grid dimension: %u x %u\n", __func__,
  4697. fmt->fmt.pix_mp.width, fmt->fmt.pix_mp.height);
  4698. goto exit;
  4699. }
  4700. /* is bitrate mode CQ */
  4701. allow = capability->cap[BITRATE_MODE].value == HFI_RC_CQ;
  4702. if (!allow) {
  4703. i_vpr_e(inst, "%s: bitrate mode is not CQ: %#x\n", __func__,
  4704. capability->cap[BITRATE_MODE].value);
  4705. goto exit;
  4706. }
  4707. /* is all intra */
  4708. allow = !capability->cap[GOP_SIZE].value;
  4709. allow &= !capability->cap[B_FRAME].value;
  4710. if (!allow) {
  4711. i_vpr_e(inst, "%s: not all intra: gop: %u, bframe: %u\n", __func__,
  4712. capability->cap[GOP_SIZE].value, capability->cap[B_FRAME].value);
  4713. goto exit;
  4714. }
  4715. /* is time delta based rc disabled */
  4716. allow = !capability->cap[TIME_DELTA_BASED_RC].value;
  4717. if (!allow) {
  4718. i_vpr_e(inst, "%s: time delta based rc not disabled: %#x\n", __func__,
  4719. capability->cap[TIME_DELTA_BASED_RC].value);
  4720. goto exit;
  4721. }
  4722. /* is frame skip mode disabled */
  4723. allow = !capability->cap[FRAME_SKIP_MODE].value;
  4724. if (!allow) {
  4725. i_vpr_e(inst, "%s: frame skip mode not disabled: %#x\n", __func__,
  4726. capability->cap[FRAME_SKIP_MODE].value);
  4727. goto exit;
  4728. }
  4729. exit:
  4730. if (!allow)
  4731. i_vpr_e(inst, "%s: current session not allowed\n", __func__);
  4732. return allow;
  4733. }
  4734. static int msm_vidc_check_resolution_supported(struct msm_vidc_inst *inst)
  4735. {
  4736. struct msm_vidc_inst_capability *capability;
  4737. u32 width = 0, height = 0, min_width, min_height,
  4738. max_width, max_height;
  4739. bool is_interlaced = false;
  4740. if (!inst || !inst->capabilities) {
  4741. d_vpr_e("%s: invalid params\n", __func__);
  4742. return -EINVAL;
  4743. }
  4744. capability = inst->capabilities;
  4745. if (is_decode_session(inst)) {
  4746. width = inst->fmts[INPUT_PORT].fmt.pix_mp.width;
  4747. height = inst->fmts[INPUT_PORT].fmt.pix_mp.height;
  4748. } else if (is_encode_session(inst)) {
  4749. width = inst->crop.width;
  4750. height = inst->crop.height;
  4751. }
  4752. if (is_secure_session(inst)) {
  4753. min_width = capability->cap[SECURE_FRAME_WIDTH].min;
  4754. max_width = capability->cap[SECURE_FRAME_WIDTH].max;
  4755. min_height = capability->cap[SECURE_FRAME_HEIGHT].min;
  4756. max_height = capability->cap[SECURE_FRAME_HEIGHT].max;
  4757. } else if (is_encode_session(inst) && capability->cap[LOSSLESS].value) {
  4758. min_width = capability->cap[LOSSLESS_FRAME_WIDTH].min;
  4759. max_width = capability->cap[LOSSLESS_FRAME_WIDTH].max;
  4760. min_height = capability->cap[LOSSLESS_FRAME_HEIGHT].min;
  4761. max_height = capability->cap[LOSSLESS_FRAME_HEIGHT].max;
  4762. } else {
  4763. min_width = capability->cap[FRAME_WIDTH].min;
  4764. max_width = capability->cap[FRAME_WIDTH].max;
  4765. min_height = capability->cap[FRAME_HEIGHT].min;
  4766. max_height = capability->cap[FRAME_HEIGHT].max;
  4767. }
  4768. /* reject odd resolution session */
  4769. if (is_encode_session(inst) &&
  4770. (is_odd(width) || is_odd(height) ||
  4771. is_odd(inst->compose.width) ||
  4772. is_odd(inst->compose.height))) {
  4773. i_vpr_e(inst, "%s: resolution is not even. wxh [%u x %u], compose [%u x %u]\n",
  4774. __func__, width, height, inst->compose.width,
  4775. inst->compose.height);
  4776. return -EINVAL;
  4777. }
  4778. /* check if input width and height is in supported range */
  4779. if (is_decode_session(inst) || is_encode_session(inst)) {
  4780. if (!in_range(width, min_width, max_width) ||
  4781. !in_range(height, min_height, max_height)) {
  4782. i_vpr_e(inst,
  4783. "%s: unsupported input wxh [%u x %u], allowed range: [%u x %u] to [%u x %u]\n",
  4784. __func__, width, height, min_width,
  4785. min_height, max_width, max_height);
  4786. return -EINVAL;
  4787. }
  4788. }
  4789. /* check interlace supported resolution */
  4790. is_interlaced = capability->cap[CODED_FRAMES].value == CODED_FRAMES_INTERLACE;
  4791. if (is_interlaced && (width > INTERLACE_WIDTH_MAX || height > INTERLACE_HEIGHT_MAX ||
  4792. NUM_MBS_PER_FRAME(width, height) > INTERLACE_MB_PER_FRAME_MAX)) {
  4793. i_vpr_e(inst, "%s: unsupported interlace wxh [%u x %u], max [%u x %u]\n",
  4794. __func__, width, height, INTERLACE_WIDTH_MAX, INTERLACE_HEIGHT_MAX);
  4795. return -EINVAL;
  4796. }
  4797. return 0;
  4798. }
  4799. static int msm_vidc_check_max_sessions(struct msm_vidc_inst *inst)
  4800. {
  4801. u32 width = 0, height = 0;
  4802. u32 num_720p_sessions = 0, num_1080p_sessions = 0;
  4803. u32 num_4k_sessions = 0, num_8k_sessions = 0;
  4804. struct msm_vidc_inst *i;
  4805. struct msm_vidc_core *core;
  4806. if (!inst || !inst->core) {
  4807. d_vpr_e("%s: invalid params\n", __func__);
  4808. return -EINVAL;
  4809. }
  4810. core = inst->core;
  4811. if (!core->capabilities) {
  4812. i_vpr_e(inst, "%s: invalid params\n", __func__);
  4813. return -EINVAL;
  4814. }
  4815. core_lock(core, __func__);
  4816. list_for_each_entry(i, &core->instances, list) {
  4817. /* skip image sessions count */
  4818. if (is_image_session(inst))
  4819. continue;
  4820. if (is_decode_session(i)) {
  4821. width = i->fmts[INPUT_PORT].fmt.pix_mp.width;
  4822. height = i->fmts[INPUT_PORT].fmt.pix_mp.height;
  4823. } else if (is_encode_session(i)) {
  4824. width = i->crop.width;
  4825. height = i->crop.height;
  4826. }
  4827. /*
  4828. * one 8k session equals to 64 720p sessions in reality.
  4829. * So for one 8k session the number of 720p sessions will
  4830. * exceed max supported session count(16), hence one 8k session
  4831. * will be rejected as well.
  4832. * Therefore, treat one 8k session equal to two 4k sessions and
  4833. * one 4k session equal to two 1080p sessions and
  4834. * one 1080p session equal to two 720p sessions. This equation
  4835. * will make one 8k session equal to eight 720p sessions
  4836. * which looks good.
  4837. *
  4838. * Do not treat resolutions above 4k as 8k session instead
  4839. * treat (4K + half 4k) above as 8k session
  4840. */
  4841. if (res_is_greater_than(width, height, 4096 + (4096 >> 1), 2176 + (2176 >> 1))) {
  4842. num_8k_sessions += 1;
  4843. num_4k_sessions += 2;
  4844. num_1080p_sessions += 4;
  4845. num_720p_sessions += 8;
  4846. } else if (res_is_greater_than(width, height, 1920 + (1920 >> 1), 1088 + (1088 >> 1))) {
  4847. num_4k_sessions += 1;
  4848. num_1080p_sessions += 2;
  4849. num_720p_sessions += 4;
  4850. } else if (res_is_greater_than(width, height, 1280 + (1280 >> 1), 736 + (736 >> 1))) {
  4851. num_1080p_sessions += 1;
  4852. num_720p_sessions += 2;
  4853. } else {
  4854. num_720p_sessions += 1;
  4855. }
  4856. }
  4857. core_unlock(core, __func__);
  4858. if (num_8k_sessions > core->capabilities[MAX_NUM_8K_SESSIONS].value) {
  4859. i_vpr_e(inst, "%s: total 8k sessions %d, exceeded max limit %d\n",
  4860. __func__, num_8k_sessions,
  4861. core->capabilities[MAX_NUM_8K_SESSIONS].value);
  4862. return -ENOMEM;
  4863. }
  4864. if (num_4k_sessions > core->capabilities[MAX_NUM_4K_SESSIONS].value) {
  4865. i_vpr_e(inst, "%s: total 4K sessions %d, exceeded max limit %d\n",
  4866. __func__, num_4k_sessions,
  4867. core->capabilities[MAX_NUM_4K_SESSIONS].value);
  4868. return -ENOMEM;
  4869. }
  4870. if (num_1080p_sessions > core->capabilities[MAX_NUM_1080P_SESSIONS].value) {
  4871. i_vpr_e(inst, "%s: total 1080p sessions %d, exceeded max limit %d\n",
  4872. __func__, num_1080p_sessions,
  4873. core->capabilities[MAX_NUM_1080P_SESSIONS].value);
  4874. return -ENOMEM;
  4875. }
  4876. if (num_720p_sessions > core->capabilities[MAX_NUM_720P_SESSIONS].value) {
  4877. i_vpr_e(inst, "%s: total sessions(<=720p) %d, exceeded max limit %d\n",
  4878. __func__, num_720p_sessions,
  4879. core->capabilities[MAX_NUM_720P_SESSIONS].value);
  4880. return -ENOMEM;
  4881. }
  4882. return 0;
  4883. }
  4884. int msm_vidc_check_session_supported(struct msm_vidc_inst *inst)
  4885. {
  4886. bool allow = false;
  4887. int rc = 0;
  4888. if (!inst) {
  4889. d_vpr_e("%s: invalid params\n", __func__);
  4890. return -EINVAL;
  4891. }
  4892. if (is_image_session(inst) && is_secure_session(inst)) {
  4893. i_vpr_e(inst, "%s: secure image session not supported\n", __func__);
  4894. rc = -EINVAL;
  4895. goto exit;
  4896. }
  4897. rc = msm_vidc_check_core_mbps(inst);
  4898. if (rc)
  4899. goto exit;
  4900. rc = msm_vidc_check_core_mbpf(inst);
  4901. if (rc)
  4902. goto exit;
  4903. rc = msm_vidc_check_inst_mbpf(inst);
  4904. if (rc)
  4905. goto exit;
  4906. rc = msm_vidc_check_resolution_supported(inst);
  4907. if (rc)
  4908. goto exit;
  4909. /* check image capabilities */
  4910. if (is_image_encode_session(inst)) {
  4911. allow = msm_vidc_allow_image_encode_session(inst);
  4912. if (!allow) {
  4913. rc = -EINVAL;
  4914. goto exit;
  4915. }
  4916. }
  4917. rc = msm_vidc_check_max_sessions(inst);
  4918. if (rc)
  4919. goto exit;
  4920. exit:
  4921. if (rc) {
  4922. i_vpr_e(inst, "%s: current session not supported\n", __func__);
  4923. msm_vidc_print_insts_info(inst->core);
  4924. }
  4925. return rc;
  4926. }
  4927. int msm_vidc_check_scaling_supported(struct msm_vidc_inst *inst)
  4928. {
  4929. u32 iwidth, owidth, iheight, oheight, ds_factor;
  4930. if (!inst || !inst->capabilities) {
  4931. d_vpr_e("%s: invalid params\n", __func__);
  4932. return -EINVAL;
  4933. }
  4934. if (is_image_session(inst) || is_decode_session(inst)) {
  4935. i_vpr_h(inst, "%s: Scaling is supported for encode session only\n", __func__);
  4936. return 0;
  4937. }
  4938. if (!is_scaling_enabled(inst)) {
  4939. i_vpr_h(inst, "%s: Scaling not enabled. skip scaling check\n", __func__);
  4940. return 0;
  4941. }
  4942. iwidth = inst->crop.width;
  4943. iheight = inst->crop.height;
  4944. owidth = inst->compose.width;
  4945. oheight = inst->compose.height;
  4946. ds_factor = inst->capabilities->cap[SCALE_FACTOR].value;
  4947. /* upscaling: encoder doesnot support upscaling */
  4948. if (owidth > iwidth || oheight > iheight) {
  4949. i_vpr_e(inst, "%s: upscale not supported: input [%u x %u], output [%u x %u]\n",
  4950. __func__, iwidth, iheight, owidth, oheight);
  4951. return -EINVAL;
  4952. }
  4953. /* downscaling: only supported upto 1/8 of width & 1/8 of height */
  4954. if (iwidth > owidth * ds_factor || iheight > oheight * ds_factor) {
  4955. i_vpr_e(inst,
  4956. "%s: unsupported ratio: input [%u x %u], output [%u x %u], ratio %u\n",
  4957. __func__, iwidth, iheight, owidth, oheight, ds_factor);
  4958. return -EINVAL;
  4959. }
  4960. return 0;
  4961. }
  4962. struct msm_vidc_fw_query_params {
  4963. u32 hfi_prop_name;
  4964. u32 port;
  4965. };
  4966. int msm_vidc_get_properties(struct msm_vidc_inst *inst)
  4967. {
  4968. int rc = 0;
  4969. int i;
  4970. static const struct msm_vidc_fw_query_params fw_query_params[] = {
  4971. {HFI_PROP_STAGE, HFI_PORT_NONE},
  4972. {HFI_PROP_PIPE, HFI_PORT_NONE},
  4973. {HFI_PROP_QUALITY_MODE, HFI_PORT_BITSTREAM}
  4974. };
  4975. if (!inst || !inst->capabilities) {
  4976. d_vpr_e("%s: invalid params\n", __func__);
  4977. return -EINVAL;
  4978. }
  4979. for (i = 0; i < ARRAY_SIZE(fw_query_params); i++) {
  4980. if (is_decode_session(inst)) {
  4981. if (fw_query_params[i].hfi_prop_name == HFI_PROP_QUALITY_MODE)
  4982. continue;
  4983. }
  4984. i_vpr_l(inst, "%s: querying fw for property %#x\n", __func__,
  4985. fw_query_params[i].hfi_prop_name);
  4986. rc = venus_hfi_session_property(inst,
  4987. fw_query_params[i].hfi_prop_name,
  4988. (HFI_HOST_FLAGS_RESPONSE_REQUIRED |
  4989. HFI_HOST_FLAGS_INTR_REQUIRED |
  4990. HFI_HOST_FLAGS_GET_PROPERTY),
  4991. fw_query_params[i].port,
  4992. HFI_PAYLOAD_NONE,
  4993. NULL,
  4994. 0);
  4995. if (rc)
  4996. return rc;
  4997. }
  4998. return 0;
  4999. }