msm_vidc_control.c 113 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. /* Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved. */
  6. #include "msm_vidc_control.h"
  7. #include "msm_vidc_debug.h"
  8. #include "hfi_packet.h"
  9. #include "hfi_property.h"
  10. #include "venus_hfi.h"
  11. #include "msm_vidc_internal.h"
  12. #include "msm_vidc_driver.h"
  13. #include "msm_venc.h"
  14. #include "msm_vidc_platform.h"
  15. #define CAP_TO_8BIT_QP(a) { \
  16. if ((a) < MIN_QP_8BIT) \
  17. (a) = MIN_QP_8BIT; \
  18. }
  19. extern struct msm_vidc_core *g_core;
  20. static bool is_priv_ctrl(u32 id)
  21. {
  22. bool private = false;
  23. if (IS_PRIV_CTRL(id))
  24. return true;
  25. /*
  26. * Treat below standard controls as private because
  27. * we have added custom values to the controls
  28. */
  29. switch (id) {
  30. /*
  31. * TODO: V4L2_CID_MPEG_VIDEO_HEVC_PROFILE is std ctrl. But
  32. * V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10_STILL_PICTURE support is not
  33. * available yet. Hence, make this as private ctrl for time being
  34. */
  35. case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
  36. private = true;
  37. break;
  38. default:
  39. private = false;
  40. break;
  41. }
  42. return private;
  43. }
  44. static const char *const mpeg_video_blur_types[] = {
  45. "Blur None",
  46. "Blur External",
  47. "Blur Adaptive",
  48. NULL,
  49. };
  50. static const char *const mpeg_video_hevc_profile[] = {
  51. "Main",
  52. "Main Still Picture",
  53. "Main 10",
  54. "Main 10 Still Picture",
  55. NULL,
  56. };
  57. static const char * const av1_profile[] = {
  58. "Main",
  59. "High",
  60. "Professional",
  61. NULL,
  62. };
  63. static const char * const av1_level[] = {
  64. "2.0",
  65. "2.1",
  66. "2.2",
  67. "2.3",
  68. "3.0",
  69. "3.1",
  70. "3.2",
  71. "3.3",
  72. "4.0",
  73. "4.1",
  74. "4.2",
  75. "4.3",
  76. "5.0",
  77. "5.1",
  78. "5.2",
  79. "5.3",
  80. "6.0",
  81. "6.1",
  82. "6.2",
  83. "6.3",
  84. "7.0",
  85. "7.1",
  86. "7.2",
  87. "7.3",
  88. NULL,
  89. };
  90. static const char * const av1_tier[] = {
  91. "Main",
  92. "High",
  93. NULL,
  94. };
  95. static const char *const mpeg_video_vidc_ir_type[] = {
  96. "Random",
  97. "Cyclic",
  98. NULL,
  99. };
  100. u32 msm_vidc_get_port_info(struct msm_vidc_inst *inst,
  101. enum msm_vidc_inst_capability_type cap_id)
  102. {
  103. struct msm_vidc_inst_capability *capability = inst->capabilities;
  104. if (capability->cap[cap_id].flags & CAP_FLAG_INPUT_PORT &&
  105. capability->cap[cap_id].flags & CAP_FLAG_OUTPUT_PORT) {
  106. if (inst->bufq[OUTPUT_PORT].vb2q->streaming)
  107. return get_hfi_port(inst, INPUT_PORT);
  108. else
  109. return get_hfi_port(inst, OUTPUT_PORT);
  110. }
  111. if (capability->cap[cap_id].flags & CAP_FLAG_INPUT_PORT)
  112. return get_hfi_port(inst, INPUT_PORT);
  113. else if (capability->cap[cap_id].flags & CAP_FLAG_OUTPUT_PORT)
  114. return get_hfi_port(inst, OUTPUT_PORT);
  115. else
  116. return HFI_PORT_NONE;
  117. }
  118. static const char * const * msm_vidc_get_qmenu_type(
  119. struct msm_vidc_inst *inst, u32 cap_id)
  120. {
  121. switch (cap_id) {
  122. case BLUR_TYPES:
  123. return mpeg_video_blur_types;
  124. case PROFILE:
  125. if (inst->codec == MSM_VIDC_HEVC || inst->codec == MSM_VIDC_HEIC) {
  126. return mpeg_video_hevc_profile;
  127. } else if (inst->codec == MSM_VIDC_AV1) {
  128. return av1_profile;
  129. } else {
  130. i_vpr_e(inst, "%s: invalid codec type %d for cap id %d\n",
  131. __func__, inst->codec, cap_id);
  132. return NULL;
  133. }
  134. case LEVEL:
  135. if (inst->codec == MSM_VIDC_AV1) {
  136. return av1_level;
  137. } else {
  138. i_vpr_e(inst, "%s: invalid codec type %d for cap id %d\n",
  139. __func__, inst->codec, cap_id);
  140. return NULL;
  141. }
  142. case AV1_TIER:
  143. return av1_tier;
  144. case IR_TYPE:
  145. return mpeg_video_vidc_ir_type;
  146. default:
  147. i_vpr_e(inst, "%s: No available qmenu for cap id %d\n",
  148. __func__, cap_id);
  149. return NULL;
  150. }
  151. }
  152. static inline bool has_parents(struct msm_vidc_inst_cap *cap)
  153. {
  154. return !!cap->parents[0];
  155. }
  156. static inline bool has_childrens(struct msm_vidc_inst_cap *cap)
  157. {
  158. return !!cap->children[0];
  159. }
  160. static inline bool is_root(struct msm_vidc_inst_cap *cap)
  161. {
  162. return !has_parents(cap);
  163. }
  164. bool is_valid_cap_id(enum msm_vidc_inst_capability_type cap_id)
  165. {
  166. return cap_id > INST_CAP_NONE && cap_id < INST_CAP_MAX;
  167. }
  168. static inline bool is_valid_cap(struct msm_vidc_inst_cap *cap)
  169. {
  170. return is_valid_cap_id(cap->cap_id);
  171. }
  172. static inline bool is_all_parents_visited(
  173. struct msm_vidc_inst_cap *cap, bool lookup[INST_CAP_MAX]) {
  174. bool found = true;
  175. int i;
  176. for (i = 0; i < MAX_CAP_PARENTS; i++) {
  177. if (cap->parents[i] == INST_CAP_NONE)
  178. continue;
  179. if (!lookup[cap->parents[i]]) {
  180. found = false;
  181. break;
  182. }
  183. }
  184. return found;
  185. }
  186. static int add_node_list(struct list_head *list, enum msm_vidc_inst_capability_type cap_id)
  187. {
  188. int rc = 0;
  189. struct msm_vidc_inst_cap_entry *entry = NULL;
  190. rc = msm_vidc_vmem_alloc(sizeof(struct msm_vidc_inst_cap_entry),
  191. (void **)&entry, __func__);
  192. if (rc)
  193. return rc;
  194. INIT_LIST_HEAD(&entry->list);
  195. entry->cap_id = cap_id;
  196. list_add_tail(&entry->list, list);
  197. return rc;
  198. }
  199. static int add_node(
  200. struct list_head *list, struct msm_vidc_inst_cap *rcap, bool lookup[INST_CAP_MAX])
  201. {
  202. int rc = 0;
  203. if (lookup[rcap->cap_id])
  204. return 0;
  205. rc = add_node_list(list, rcap->cap_id);
  206. if (rc)
  207. return rc;
  208. lookup[rcap->cap_id] = true;
  209. return 0;
  210. }
  211. static int swap_node(struct msm_vidc_inst_cap *rcap,
  212. struct list_head *src_list, bool src_lookup[INST_CAP_MAX],
  213. struct list_head *dest_list, bool dest_lookup[INST_CAP_MAX])
  214. {
  215. struct msm_vidc_inst_cap_entry *entry, *temp;
  216. bool found = false;
  217. /* cap must be available in src and not present in dest */
  218. if (!src_lookup[rcap->cap_id] || dest_lookup[rcap->cap_id]) {
  219. d_vpr_e("%s: not found in src or already found in dest for cap %s\n",
  220. __func__, cap_name(rcap->cap_id));
  221. return -EINVAL;
  222. }
  223. /* check if entry present in src_list */
  224. list_for_each_entry_safe(entry, temp, src_list, list) {
  225. if (entry->cap_id == rcap->cap_id) {
  226. found = true;
  227. break;
  228. }
  229. }
  230. if (!found) {
  231. d_vpr_e("%s: cap %s not found in src list\n",
  232. __func__, cap_name(rcap->cap_id));
  233. return -EINVAL;
  234. }
  235. /* remove from src_list */
  236. list_del_init(&entry->list);
  237. src_lookup[rcap->cap_id] = false;
  238. /* add it to dest_list */
  239. list_add_tail(&entry->list, dest_list);
  240. dest_lookup[rcap->cap_id] = true;
  241. return 0;
  242. }
  243. int msm_vidc_packetize_control(struct msm_vidc_inst *inst,
  244. enum msm_vidc_inst_capability_type cap_id, u32 payload_type,
  245. void *hfi_val, u32 payload_size, const char *func)
  246. {
  247. int rc = 0;
  248. u64 payload = 0;
  249. if (payload_size > sizeof(u32)) {
  250. i_vpr_e(inst, "%s: payload size is more than u32 for cap[%d] %s\n",
  251. func, cap_id, cap_name(cap_id));
  252. return -EINVAL;
  253. }
  254. if (payload_size == sizeof(u32))
  255. payload = *(u32 *)hfi_val;
  256. else if (payload_size == sizeof(u8))
  257. payload = *(u8 *)hfi_val;
  258. else if (payload_size == sizeof(u16))
  259. payload = *(u16 *)hfi_val;
  260. i_vpr_h(inst, FMT_STRING_SET_CAP,
  261. cap_name(cap_id), inst->capabilities->cap[cap_id].value, payload);
  262. rc = venus_hfi_session_property(inst,
  263. inst->capabilities->cap[cap_id].hfi_id,
  264. HFI_HOST_FLAGS_NONE,
  265. msm_vidc_get_port_info(inst, cap_id),
  266. payload_type,
  267. hfi_val,
  268. payload_size);
  269. if (rc) {
  270. i_vpr_e(inst, "%s: failed to set cap[%d] %s to fw\n",
  271. func, cap_id, cap_name(cap_id));
  272. return rc;
  273. }
  274. return 0;
  275. }
  276. enum msm_vidc_inst_capability_type msm_vidc_get_cap_id(
  277. struct msm_vidc_inst *inst, u32 id)
  278. {
  279. enum msm_vidc_inst_capability_type i = INST_CAP_NONE + 1;
  280. struct msm_vidc_inst_capability *capability;
  281. enum msm_vidc_inst_capability_type cap_id = INST_CAP_NONE;
  282. capability = inst->capabilities;
  283. do {
  284. if (capability->cap[i].v4l2_id == id) {
  285. cap_id = capability->cap[i].cap_id;
  286. break;
  287. }
  288. i++;
  289. } while (i < INST_CAP_MAX);
  290. return cap_id;
  291. }
  292. static int msm_vidc_add_capid_to_fw_list(struct msm_vidc_inst *inst,
  293. enum msm_vidc_inst_capability_type cap_id)
  294. {
  295. struct msm_vidc_inst_cap_entry *entry = NULL;
  296. int rc = 0;
  297. /* skip adding if cap_id already present in firmware list */
  298. list_for_each_entry(entry, &inst->firmware_list, list) {
  299. if (entry->cap_id == cap_id) {
  300. i_vpr_l(inst,
  301. "%s: cap[%d] %s already present in fw list\n",
  302. __func__, cap_id, cap_name(cap_id));
  303. return 0;
  304. }
  305. }
  306. rc = add_node_list(&inst->firmware_list, cap_id);
  307. if (rc)
  308. return rc;
  309. return 0;
  310. }
  311. static int msm_vidc_add_children(struct msm_vidc_inst *inst,
  312. enum msm_vidc_inst_capability_type cap_id)
  313. {
  314. struct msm_vidc_inst_cap *cap;
  315. int i, rc = 0;
  316. cap = &inst->capabilities->cap[cap_id];
  317. for (i = 0; i < MAX_CAP_CHILDREN; i++) {
  318. if (!cap->children[i])
  319. break;
  320. if (!is_valid_cap_id(cap->children[i]))
  321. continue;
  322. rc = add_node_list(&inst->children_list, cap->children[i]);
  323. if (rc)
  324. return rc;
  325. }
  326. return rc;
  327. }
  328. bool is_parent_available(struct msm_vidc_inst *inst,
  329. u32 cap_id, u32 check_parent, const char *func)
  330. {
  331. int i = 0;
  332. u32 cap_parent;
  333. while (i < MAX_CAP_PARENTS &&
  334. inst->capabilities->cap[cap_id].parents[i]) {
  335. cap_parent = inst->capabilities->cap[cap_id].parents[i];
  336. if (cap_parent == check_parent) {
  337. return true;
  338. }
  339. i++;
  340. }
  341. i_vpr_e(inst,
  342. "%s: missing parent %s for %s\n",
  343. func, cap_name(check_parent), cap_name(cap_id));
  344. return false;
  345. }
  346. int msm_vidc_update_cap_value(struct msm_vidc_inst *inst, u32 cap_id,
  347. s32 adjusted_val, const char *func)
  348. {
  349. int prev_value = 0;
  350. if (!inst || !inst->capabilities) {
  351. d_vpr_e("%s: invalid params\n", __func__);
  352. return -EINVAL;
  353. }
  354. prev_value = inst->capabilities->cap[cap_id].value;
  355. if (is_meta_cap(inst, cap_id)) {
  356. /*
  357. * cumulative control value if client set same metadata
  358. * control multiple times.
  359. */
  360. if (adjusted_val & MSM_VIDC_META_ENABLE) {
  361. /* enable metadata */
  362. inst->capabilities->cap[cap_id].value |= adjusted_val;
  363. } else {
  364. /* disable metadata */
  365. inst->capabilities->cap[cap_id].value &= ~adjusted_val;
  366. }
  367. } else {
  368. inst->capabilities->cap[cap_id].value = adjusted_val;
  369. }
  370. if (prev_value != inst->capabilities->cap[cap_id].value) {
  371. i_vpr_h(inst,
  372. "%s: updated database: name: %s, value: %#x -> %#x\n",
  373. func, cap_name(cap_id),
  374. prev_value, inst->capabilities->cap[cap_id].value);
  375. }
  376. return 0;
  377. }
  378. int msm_vidc_get_parent_value(struct msm_vidc_inst* inst,
  379. u32 cap_id, u32 parent, s32 *value, const char *func)
  380. {
  381. int rc = 0;
  382. if (is_parent_available(inst, cap_id, parent, func)) {
  383. switch (parent) {
  384. case BITRATE_MODE:
  385. *value = inst->hfi_rc_type;
  386. break;
  387. case LAYER_TYPE:
  388. *value = inst->hfi_layer_type;
  389. break;
  390. default:
  391. *value = inst->capabilities->cap[parent].value;
  392. break;
  393. }
  394. } else {
  395. rc = -EINVAL;
  396. }
  397. return rc;
  398. }
  399. static int msm_vidc_adjust_hevc_qp(struct msm_vidc_inst *inst,
  400. enum msm_vidc_inst_capability_type cap_id)
  401. {
  402. struct msm_vidc_inst_capability *capability;
  403. s32 pix_fmt = -1;
  404. capability = inst->capabilities;
  405. if (!(inst->codec == MSM_VIDC_HEVC || inst->codec == MSM_VIDC_HEIC)) {
  406. i_vpr_e(inst,
  407. "%s: incorrect cap[%d] %s entry in database, fix database\n",
  408. __func__, cap_id, cap_name(cap_id));
  409. return -EINVAL;
  410. }
  411. if (msm_vidc_get_parent_value(inst, cap_id,
  412. PIX_FMTS, &pix_fmt, __func__))
  413. return -EINVAL;
  414. if (pix_fmt == MSM_VIDC_FMT_P010 || pix_fmt == MSM_VIDC_FMT_TP10C)
  415. goto exit;
  416. CAP_TO_8BIT_QP(capability->cap[cap_id].value);
  417. if (cap_id == MIN_FRAME_QP) {
  418. CAP_TO_8BIT_QP(capability->cap[I_FRAME_MIN_QP].value);
  419. CAP_TO_8BIT_QP(capability->cap[P_FRAME_MIN_QP].value);
  420. CAP_TO_8BIT_QP(capability->cap[B_FRAME_MIN_QP].value);
  421. } else if (cap_id == MAX_FRAME_QP) {
  422. CAP_TO_8BIT_QP(capability->cap[I_FRAME_MAX_QP].value);
  423. CAP_TO_8BIT_QP(capability->cap[P_FRAME_MAX_QP].value);
  424. CAP_TO_8BIT_QP(capability->cap[B_FRAME_MAX_QP].value);
  425. }
  426. exit:
  427. return 0;
  428. }
  429. static int msm_vidc_adjust_cap(struct msm_vidc_inst *inst,
  430. enum msm_vidc_inst_capability_type cap_id,
  431. struct v4l2_ctrl *ctrl, const char *func)
  432. {
  433. struct msm_vidc_inst_cap *cap;
  434. int rc = 0;
  435. /* validate cap_id */
  436. if (!is_valid_cap_id(cap_id))
  437. return 0;
  438. /* validate cap */
  439. cap = &inst->capabilities->cap[cap_id];
  440. if (!is_valid_cap(cap))
  441. return 0;
  442. /* check if adjust supported */
  443. if (!cap->adjust) {
  444. if (ctrl)
  445. msm_vidc_update_cap_value(inst, cap_id, ctrl->val, func);
  446. return 0;
  447. }
  448. /* call adjust */
  449. rc = cap->adjust(inst, ctrl);
  450. if (rc) {
  451. i_vpr_e(inst, "%s: adjust cap failed for %s\n", func, cap_name(cap_id));
  452. return rc;
  453. }
  454. return rc;
  455. }
  456. static int msm_vidc_set_cap(struct msm_vidc_inst *inst,
  457. enum msm_vidc_inst_capability_type cap_id,
  458. const char *func)
  459. {
  460. struct msm_vidc_inst_cap *cap;
  461. int rc = 0;
  462. /* validate cap_id */
  463. if (!is_valid_cap_id(cap_id))
  464. return 0;
  465. /* validate cap */
  466. cap = &inst->capabilities->cap[cap_id];
  467. if (!is_valid_cap(cap))
  468. return 0;
  469. /* check if set supported */
  470. if (!cap->set)
  471. return 0;
  472. /* call set */
  473. rc = cap->set(inst, cap_id);
  474. if (rc) {
  475. i_vpr_e(inst, "%s: set cap failed for %s\n", func, cap_name(cap_id));
  476. return rc;
  477. }
  478. return rc;
  479. }
  480. static int msm_vidc_adjust_dynamic_property(struct msm_vidc_inst *inst,
  481. enum msm_vidc_inst_capability_type cap_id, struct v4l2_ctrl *ctrl)
  482. {
  483. struct msm_vidc_inst_cap_entry *entry = NULL, *temp = NULL;
  484. struct msm_vidc_inst_capability *capability;
  485. s32 prev_value;
  486. int rc = 0;
  487. if (!inst || !inst->capabilities || !ctrl) {
  488. d_vpr_e("%s: invalid param\n", __func__);
  489. return -EINVAL;
  490. }
  491. capability = inst->capabilities;
  492. /* sanitize cap_id */
  493. if (!is_valid_cap_id(cap_id)) {
  494. i_vpr_e(inst, "%s: invalid cap_id %u\n", __func__, cap_id);
  495. return -EINVAL;
  496. }
  497. if (!(capability->cap[cap_id].flags & CAP_FLAG_DYNAMIC_ALLOWED)) {
  498. i_vpr_h(inst,
  499. "%s: dynamic setting of cap[%d] %s is not allowed\n",
  500. __func__, cap_id, cap_name(cap_id));
  501. return -EBUSY;
  502. }
  503. i_vpr_h(inst, "%s: cap[%d] %s\n", __func__, cap_id, cap_name(cap_id));
  504. prev_value = capability->cap[cap_id].value;
  505. rc = msm_vidc_adjust_cap(inst, cap_id, ctrl, __func__);
  506. if (rc)
  507. return rc;
  508. if (capability->cap[cap_id].value == prev_value && cap_id == GOP_SIZE) {
  509. /*
  510. * Ignore setting same GOP size value to firmware to avoid
  511. * unnecessary generation of IDR frame.
  512. */
  513. return 0;
  514. }
  515. /* add cap_id to firmware list always */
  516. rc = msm_vidc_add_capid_to_fw_list(inst, cap_id);
  517. if (rc)
  518. goto error;
  519. /* add children only if cap value modified */
  520. if (capability->cap[cap_id].value == prev_value)
  521. return 0;
  522. rc = msm_vidc_add_children(inst, cap_id);
  523. if (rc)
  524. goto error;
  525. list_for_each_entry_safe(entry, temp, &inst->children_list, list) {
  526. if (!is_valid_cap_id(entry->cap_id)) {
  527. rc = -EINVAL;
  528. goto error;
  529. }
  530. if (!capability->cap[entry->cap_id].adjust) {
  531. i_vpr_e(inst, "%s: child cap must have ajdust function %s\n",
  532. __func__, cap_name(entry->cap_id));
  533. rc = -EINVAL;
  534. goto error;
  535. }
  536. prev_value = capability->cap[entry->cap_id].value;
  537. rc = msm_vidc_adjust_cap(inst, entry->cap_id, NULL, __func__);
  538. if (rc)
  539. goto error;
  540. /* add children if cap value modified */
  541. if (capability->cap[entry->cap_id].value != prev_value) {
  542. /* add cap_id to firmware list always */
  543. rc = msm_vidc_add_capid_to_fw_list(inst, entry->cap_id);
  544. if (rc)
  545. goto error;
  546. rc = msm_vidc_add_children(inst, entry->cap_id);
  547. if (rc)
  548. goto error;
  549. }
  550. list_del_init(&entry->list);
  551. msm_vidc_vmem_free((void **)&entry);
  552. }
  553. /* expecting children_list to be empty */
  554. if (!list_empty(&inst->children_list)) {
  555. i_vpr_e(inst, "%s: child_list is not empty\n", __func__);
  556. rc = -EINVAL;
  557. goto error;
  558. }
  559. return 0;
  560. error:
  561. list_for_each_entry_safe(entry, temp, &inst->children_list, list) {
  562. i_vpr_e(inst, "%s: child list: %s\n", __func__, cap_name(entry->cap_id));
  563. list_del_init(&entry->list);
  564. msm_vidc_vmem_free((void **)&entry);
  565. }
  566. list_for_each_entry_safe(entry, temp, &inst->firmware_list, list) {
  567. i_vpr_e(inst, "%s: fw list: %s\n", __func__, cap_name(entry->cap_id));
  568. list_del_init(&entry->list);
  569. msm_vidc_vmem_free((void **)&entry);
  570. }
  571. return rc;
  572. }
  573. static int msm_vidc_set_dynamic_property(struct msm_vidc_inst *inst)
  574. {
  575. struct msm_vidc_inst_cap_entry *entry = NULL, *temp = NULL;
  576. int rc = 0;
  577. if (!inst) {
  578. d_vpr_e("%s: invalid params\n", __func__);
  579. return -EINVAL;
  580. }
  581. i_vpr_h(inst, "%s()\n", __func__);
  582. list_for_each_entry_safe(entry, temp, &inst->firmware_list, list) {
  583. rc = msm_vidc_set_cap(inst, entry->cap_id, __func__);
  584. if (rc)
  585. goto error;
  586. list_del_init(&entry->list);
  587. msm_vidc_vmem_free((void **)&entry);
  588. }
  589. return 0;
  590. error:
  591. list_for_each_entry_safe(entry, temp, &inst->firmware_list, list) {
  592. i_vpr_e(inst, "%s: fw list: %s\n", __func__, cap_name(entry->cap_id));
  593. list_del_init(&entry->list);
  594. msm_vidc_vmem_free((void **)&entry);
  595. }
  596. return rc;
  597. }
  598. int msm_vidc_ctrl_deinit(struct msm_vidc_inst *inst)
  599. {
  600. if (!inst) {
  601. d_vpr_e("%s: invalid parameters\n", __func__);
  602. return -EINVAL;
  603. }
  604. i_vpr_h(inst, "%s(): num ctrls %d\n", __func__, inst->num_ctrls);
  605. v4l2_ctrl_handler_free(&inst->ctrl_handler);
  606. memset(&inst->ctrl_handler, 0, sizeof(struct v4l2_ctrl_handler));
  607. msm_vidc_vmem_free((void **)&inst->ctrls);
  608. inst->ctrls = NULL;
  609. return 0;
  610. }
  611. int msm_vidc_ctrl_init(struct msm_vidc_inst *inst)
  612. {
  613. int rc = 0;
  614. struct msm_vidc_inst_capability *capability;
  615. struct msm_vidc_core *core;
  616. int idx = 0;
  617. struct v4l2_ctrl_config ctrl_cfg = {0};
  618. int num_ctrls = 0, ctrl_idx = 0;
  619. if (!inst || !inst->core || !inst->capabilities) {
  620. d_vpr_e("%s: invalid params\n", __func__);
  621. return -EINVAL;
  622. }
  623. core = inst->core;
  624. capability = inst->capabilities;
  625. if (!core->v4l2_ctrl_ops) {
  626. i_vpr_e(inst, "%s: no control ops\n", __func__);
  627. return -EINVAL;
  628. }
  629. for (idx = 0; idx < INST_CAP_MAX; idx++) {
  630. if (capability->cap[idx].v4l2_id)
  631. num_ctrls++;
  632. }
  633. if (!num_ctrls) {
  634. i_vpr_e(inst, "%s: no ctrls available in cap database\n",
  635. __func__);
  636. return -EINVAL;
  637. }
  638. rc = msm_vidc_vmem_alloc(num_ctrls * sizeof(struct v4l2_ctrl *),
  639. (void **)&inst->ctrls, __func__);
  640. if (rc)
  641. return rc;
  642. rc = v4l2_ctrl_handler_init(&inst->ctrl_handler, num_ctrls);
  643. if (rc) {
  644. i_vpr_e(inst, "control handler init failed, %d\n",
  645. inst->ctrl_handler.error);
  646. goto error;
  647. }
  648. for (idx = 0; idx < INST_CAP_MAX; idx++) {
  649. struct v4l2_ctrl *ctrl;
  650. if (!capability->cap[idx].v4l2_id)
  651. continue;
  652. if (ctrl_idx >= num_ctrls) {
  653. i_vpr_e(inst,
  654. "%s: invalid ctrl %#x, max allowed %d\n",
  655. __func__, capability->cap[idx].v4l2_id,
  656. num_ctrls);
  657. rc = -EINVAL;
  658. goto error;
  659. }
  660. i_vpr_l(inst,
  661. "%s: cap[%d] %24s, value %d min %d max %d step_or_mask %#x flags %#x v4l2_id %#x hfi_id %#x\n",
  662. __func__, idx, cap_name(idx),
  663. capability->cap[idx].value,
  664. capability->cap[idx].min,
  665. capability->cap[idx].max,
  666. capability->cap[idx].step_or_mask,
  667. capability->cap[idx].flags,
  668. capability->cap[idx].v4l2_id,
  669. capability->cap[idx].hfi_id);
  670. memset(&ctrl_cfg, 0, sizeof(struct v4l2_ctrl_config));
  671. if (is_priv_ctrl(capability->cap[idx].v4l2_id)) {
  672. /* add private control */
  673. ctrl_cfg.def = capability->cap[idx].value;
  674. ctrl_cfg.flags = 0;
  675. ctrl_cfg.id = capability->cap[idx].v4l2_id;
  676. ctrl_cfg.max = capability->cap[idx].max;
  677. ctrl_cfg.min = capability->cap[idx].min;
  678. ctrl_cfg.ops = core->v4l2_ctrl_ops;
  679. if (capability->cap[idx].flags & CAP_FLAG_MENU)
  680. ctrl_cfg.type = V4L2_CTRL_TYPE_MENU;
  681. else if (capability->cap[idx].flags & CAP_FLAG_BITMASK)
  682. ctrl_cfg.type = V4L2_CTRL_TYPE_BITMASK;
  683. else
  684. ctrl_cfg.type = V4L2_CTRL_TYPE_INTEGER;
  685. if (is_meta_cap(inst, idx)) {
  686. /* bitmask is expected to be enabled for meta controls */
  687. if (ctrl_cfg.type != V4L2_CTRL_TYPE_BITMASK) {
  688. i_vpr_e(inst,
  689. "%s: missing bitmask for cap %s\n",
  690. __func__, cap_name(idx));
  691. rc = -EINVAL;
  692. goto error;
  693. }
  694. }
  695. if (ctrl_cfg.type == V4L2_CTRL_TYPE_MENU) {
  696. ctrl_cfg.menu_skip_mask =
  697. ~(capability->cap[idx].step_or_mask);
  698. ctrl_cfg.qmenu = msm_vidc_get_qmenu_type(inst,
  699. capability->cap[idx].cap_id);
  700. } else {
  701. ctrl_cfg.step =
  702. capability->cap[idx].step_or_mask;
  703. }
  704. ctrl_cfg.name = cap_name(capability->cap[idx].cap_id);
  705. if (!ctrl_cfg.name) {
  706. i_vpr_e(inst, "%s: %#x ctrl name is null\n",
  707. __func__, ctrl_cfg.id);
  708. rc = -EINVAL;
  709. goto error;
  710. }
  711. ctrl = v4l2_ctrl_new_custom(&inst->ctrl_handler,
  712. &ctrl_cfg, NULL);
  713. } else {
  714. if (capability->cap[idx].flags & CAP_FLAG_MENU) {
  715. ctrl = v4l2_ctrl_new_std_menu(
  716. &inst->ctrl_handler,
  717. core->v4l2_ctrl_ops,
  718. capability->cap[idx].v4l2_id,
  719. capability->cap[idx].max,
  720. ~(capability->cap[idx].step_or_mask),
  721. capability->cap[idx].value);
  722. } else {
  723. ctrl = v4l2_ctrl_new_std(&inst->ctrl_handler,
  724. core->v4l2_ctrl_ops,
  725. capability->cap[idx].v4l2_id,
  726. capability->cap[idx].min,
  727. capability->cap[idx].max,
  728. capability->cap[idx].step_or_mask,
  729. capability->cap[idx].value);
  730. }
  731. }
  732. if (!ctrl) {
  733. i_vpr_e(inst, "%s: invalid ctrl %#x cap %24s\n", __func__,
  734. capability->cap[idx].v4l2_id, cap_name(idx));
  735. rc = -EINVAL;
  736. goto error;
  737. }
  738. rc = inst->ctrl_handler.error;
  739. if (rc) {
  740. i_vpr_e(inst,
  741. "error adding ctrl (%#x) to ctrl handle, %d\n",
  742. capability->cap[idx].v4l2_id,
  743. inst->ctrl_handler.error);
  744. goto error;
  745. }
  746. if (capability->cap[idx].flags & CAP_FLAG_VOLATILE)
  747. ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
  748. ctrl->flags |= V4L2_CTRL_FLAG_EXECUTE_ON_WRITE;
  749. inst->ctrls[ctrl_idx] = ctrl;
  750. ctrl_idx++;
  751. }
  752. inst->num_ctrls = num_ctrls;
  753. i_vpr_h(inst, "%s(): num ctrls %d\n", __func__, inst->num_ctrls);
  754. return 0;
  755. error:
  756. msm_vidc_ctrl_deinit(inst);
  757. return rc;
  758. }
  759. static int msm_vidc_update_buffer_count_if_needed(struct msm_vidc_inst* inst,
  760. enum msm_vidc_inst_capability_type cap_id)
  761. {
  762. int rc = 0;
  763. bool update_input_port = false, update_output_port = false;
  764. if (!inst) {
  765. d_vpr_e("%s: invalid parameters\n", __func__);
  766. return -EINVAL;
  767. }
  768. switch (cap_id) {
  769. case LAYER_TYPE:
  770. case ENH_LAYER_COUNT:
  771. case LAYER_ENABLE:
  772. update_input_port = true;
  773. break;
  774. case THUMBNAIL_MODE:
  775. case PRIORITY:
  776. update_input_port = true;
  777. update_output_port = true;
  778. break;
  779. default:
  780. update_input_port = false;
  781. update_output_port = false;
  782. break;
  783. }
  784. if (update_input_port) {
  785. rc = msm_vidc_update_buffer_count(inst, INPUT_PORT);
  786. if (rc)
  787. return rc;
  788. }
  789. if (update_output_port) {
  790. rc = msm_vidc_update_buffer_count(inst, OUTPUT_PORT);
  791. if (rc)
  792. return rc;
  793. }
  794. return rc;
  795. }
  796. static int msm_vidc_allow_secure_session(struct msm_vidc_inst *inst)
  797. {
  798. int rc = 0;
  799. struct msm_vidc_inst *i;
  800. struct msm_vidc_core *core;
  801. u32 count = 0;
  802. if (!inst || !inst->core) {
  803. d_vpr_e("%s: invalid params\n", __func__);
  804. return -EINVAL;
  805. }
  806. core = inst->core;
  807. if (!core->capabilities) {
  808. i_vpr_e(inst, "%s: invalid params\n", __func__);
  809. return -EINVAL;
  810. }
  811. core_lock(core, __func__);
  812. list_for_each_entry(i, &core->instances, list) {
  813. if (i->capabilities) {
  814. if (i->capabilities->cap[SECURE_MODE].value)
  815. count++;
  816. }
  817. }
  818. if (count > core->capabilities[MAX_SECURE_SESSION_COUNT].value) {
  819. i_vpr_e(inst,
  820. "%s: total secure sessions %d exceeded max limit %d\n",
  821. __func__, count,
  822. core->capabilities[MAX_SECURE_SESSION_COUNT].value);
  823. rc = -EINVAL;
  824. }
  825. core_unlock(core, __func__);
  826. return rc;
  827. }
  828. int msm_v4l2_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
  829. {
  830. int rc = 0;
  831. struct msm_vidc_inst *inst;
  832. if (!ctrl) {
  833. d_vpr_e("%s: invalid ctrl parameter\n", __func__);
  834. return -EINVAL;
  835. }
  836. inst = container_of(ctrl->handler,
  837. struct msm_vidc_inst, ctrl_handler);
  838. inst = get_inst_ref(g_core, inst);
  839. if (!inst) {
  840. d_vpr_e("%s: could not find inst for ctrl %s id %#x\n",
  841. __func__, ctrl->name, ctrl->id);
  842. return -EINVAL;
  843. }
  844. client_lock(inst, __func__);
  845. inst_lock(inst, __func__);
  846. rc = msm_vidc_get_control(inst, ctrl);
  847. if (rc) {
  848. i_vpr_e(inst, "%s: failed for ctrl %s id %#x\n",
  849. __func__, ctrl->name, ctrl->id);
  850. goto unlock;
  851. } else {
  852. i_vpr_h(inst, "%s: ctrl %s id %#x, value %d\n",
  853. __func__, ctrl->name, ctrl->id, ctrl->val);
  854. }
  855. unlock:
  856. inst_unlock(inst, __func__);
  857. client_unlock(inst, __func__);
  858. put_inst(inst);
  859. return rc;
  860. }
  861. static int msm_vidc_update_static_property(struct msm_vidc_inst *inst,
  862. enum msm_vidc_inst_capability_type cap_id, struct v4l2_ctrl *ctrl)
  863. {
  864. int rc = 0;
  865. if (!inst || !ctrl) {
  866. d_vpr_e("%s: invalid params\n", __func__);
  867. return -EINVAL;
  868. }
  869. /* update value to db */
  870. msm_vidc_update_cap_value(inst, cap_id, ctrl->val, __func__);
  871. if (cap_id == CLIENT_ID) {
  872. rc = msm_vidc_update_debug_str(inst);
  873. if (rc)
  874. return rc;
  875. }
  876. if (cap_id == SECURE_MODE) {
  877. if (ctrl->val) {
  878. rc = msm_vidc_allow_secure_session(inst);
  879. if (rc)
  880. return rc;
  881. }
  882. }
  883. if (cap_id == ROTATION) {
  884. struct v4l2_format *output_fmt;
  885. output_fmt = &inst->fmts[OUTPUT_PORT];
  886. rc = msm_venc_s_fmt_output(inst, output_fmt);
  887. if (rc)
  888. return rc;
  889. }
  890. if (cap_id == DELIVERY_MODE) {
  891. struct v4l2_format *output_fmt;
  892. output_fmt = &inst->fmts[OUTPUT_PORT];
  893. rc = msm_venc_s_fmt_output(inst, output_fmt);
  894. if (rc)
  895. return rc;
  896. }
  897. if (cap_id == BITSTREAM_SIZE_OVERWRITE) {
  898. rc = msm_vidc_update_bitstream_buffer_size(inst);
  899. if (rc)
  900. return rc;
  901. }
  902. /* call this explicitly to adjust client priority */
  903. if (cap_id == PRIORITY) {
  904. rc = msm_vidc_adjust_session_priority(inst, ctrl);
  905. if (rc)
  906. return rc;
  907. }
  908. if (cap_id == CRITICAL_PRIORITY)
  909. msm_vidc_update_cap_value(inst, PRIORITY, 0, __func__);
  910. if (cap_id == ENH_LAYER_COUNT && inst->codec == MSM_VIDC_HEVC) {
  911. u32 enable;
  912. /* enable LAYER_ENABLE cap if HEVC_HIER enh layers > 0 */
  913. if (ctrl->val > 0)
  914. enable = 1;
  915. else
  916. enable = 0;
  917. msm_vidc_update_cap_value(inst, LAYER_ENABLE, enable, __func__);
  918. }
  919. if (is_meta_cap(inst, cap_id)) {
  920. rc = msm_vidc_update_meta_port_settings(inst);
  921. if (rc)
  922. return rc;
  923. }
  924. rc = msm_vidc_update_buffer_count_if_needed(inst, cap_id);
  925. if (rc)
  926. return rc;
  927. return rc;
  928. }
  929. int msm_v4l2_op_s_ctrl(struct v4l2_ctrl *ctrl)
  930. {
  931. int rc = 0;
  932. struct msm_vidc_inst *inst;
  933. enum msm_vidc_inst_capability_type cap_id;
  934. struct msm_vidc_inst_capability *capability;
  935. u32 port;
  936. if (!ctrl) {
  937. d_vpr_e("%s: invalid ctrl parameter\n", __func__);
  938. return -EINVAL;
  939. }
  940. inst = container_of(ctrl->handler,
  941. struct msm_vidc_inst, ctrl_handler);
  942. inst = get_inst_ref(g_core, inst);
  943. if (!inst || !inst->capabilities) {
  944. d_vpr_e("%s: invalid parameters for inst\n", __func__);
  945. return -EINVAL;
  946. }
  947. client_lock(inst, __func__);
  948. inst_lock(inst, __func__);
  949. capability = inst->capabilities;
  950. i_vpr_h(inst, FMT_STRING_SET_CTRL,
  951. __func__, state_name(inst->state), ctrl->name, ctrl->id, ctrl->val);
  952. cap_id = msm_vidc_get_cap_id(inst, ctrl->id);
  953. if (!is_valid_cap_id(cap_id)) {
  954. i_vpr_e(inst, "%s: could not find cap_id for ctrl %s\n",
  955. __func__, ctrl->name);
  956. rc = -EINVAL;
  957. goto unlock;
  958. }
  959. if (!msm_vidc_allow_s_ctrl(inst, cap_id)) {
  960. rc = -EINVAL;
  961. goto unlock;
  962. }
  963. /* mark client set flag */
  964. capability->cap[cap_id].flags |= CAP_FLAG_CLIENT_SET;
  965. port = is_encode_session(inst) ? OUTPUT_PORT : INPUT_PORT;
  966. if (!inst->bufq[port].vb2q->streaming) {
  967. /* static case */
  968. rc = msm_vidc_update_static_property(inst, cap_id, ctrl);
  969. if (rc)
  970. goto unlock;
  971. } else {
  972. /* dynamic case */
  973. rc = msm_vidc_adjust_dynamic_property(inst, cap_id, ctrl);
  974. if (rc)
  975. goto unlock;
  976. rc = msm_vidc_set_dynamic_property(inst);
  977. if (rc)
  978. goto unlock;
  979. }
  980. unlock:
  981. inst_unlock(inst, __func__);
  982. client_unlock(inst, __func__);
  983. put_inst(inst);
  984. return rc;
  985. }
  986. int msm_vidc_adjust_entropy_mode(void *instance, struct v4l2_ctrl *ctrl)
  987. {
  988. struct msm_vidc_inst_capability *capability;
  989. s32 adjusted_value;
  990. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  991. s32 profile = -1;
  992. if (!inst || !inst->capabilities) {
  993. d_vpr_e("%s: invalid params\n", __func__);
  994. return -EINVAL;
  995. }
  996. capability = inst->capabilities;
  997. /* ctrl is always NULL in streamon case */
  998. adjusted_value = ctrl ? ctrl->val :
  999. capability->cap[ENTROPY_MODE].value;
  1000. if (inst->codec != MSM_VIDC_H264) {
  1001. i_vpr_e(inst,
  1002. "%s: incorrect entry in database. fix the database\n",
  1003. __func__);
  1004. return 0;
  1005. }
  1006. if (msm_vidc_get_parent_value(inst, ENTROPY_MODE,
  1007. PROFILE, &profile, __func__))
  1008. return -EINVAL;
  1009. if (profile == V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE ||
  1010. profile == V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE)
  1011. adjusted_value = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC;
  1012. msm_vidc_update_cap_value(inst, ENTROPY_MODE,
  1013. adjusted_value, __func__);
  1014. return 0;
  1015. }
  1016. int msm_vidc_adjust_bitrate_mode(void *instance, struct v4l2_ctrl *ctrl)
  1017. {
  1018. struct msm_vidc_inst_capability *capability;
  1019. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1020. int lossless, frame_rc, bitrate_mode, frame_skip;
  1021. u32 hfi_value = 0;
  1022. if (!inst || !inst->capabilities) {
  1023. d_vpr_e("%s: invalid params\n", __func__);
  1024. return -EINVAL;
  1025. }
  1026. capability = inst->capabilities;
  1027. bitrate_mode = capability->cap[BITRATE_MODE].value;
  1028. lossless = capability->cap[LOSSLESS].value;
  1029. frame_rc = capability->cap[FRAME_RC_ENABLE].value;
  1030. frame_skip = capability->cap[FRAME_SKIP_MODE].value;
  1031. if (lossless || (msm_vidc_lossless_encode &&
  1032. inst->codec == MSM_VIDC_HEVC)) {
  1033. hfi_value = HFI_RC_LOSSLESS;
  1034. goto update;
  1035. }
  1036. if (!frame_rc && !is_image_session(inst)) {
  1037. hfi_value = HFI_RC_OFF;
  1038. goto update;
  1039. }
  1040. if (bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) {
  1041. hfi_value = HFI_RC_VBR_CFR;
  1042. } else if (bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) {
  1043. if (frame_skip)
  1044. hfi_value = HFI_RC_CBR_VFR;
  1045. else
  1046. hfi_value = HFI_RC_CBR_CFR;
  1047. } else if (bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ) {
  1048. hfi_value = HFI_RC_CQ;
  1049. }
  1050. update:
  1051. inst->hfi_rc_type = hfi_value;
  1052. i_vpr_h(inst, "%s: hfi rc type: %#x\n",
  1053. __func__, inst->hfi_rc_type);
  1054. return 0;
  1055. }
  1056. int msm_vidc_adjust_profile(void *instance, struct v4l2_ctrl *ctrl)
  1057. {
  1058. struct msm_vidc_inst_capability *capability;
  1059. s32 adjusted_value;
  1060. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1061. s32 pix_fmt = -1;
  1062. if (!inst || !inst->capabilities) {
  1063. d_vpr_e("%s: invalid params\n", __func__);
  1064. return -EINVAL;
  1065. }
  1066. capability = inst->capabilities;
  1067. adjusted_value = ctrl ? ctrl->val : capability->cap[PROFILE].value;
  1068. /* PIX_FMTS dependency is common across all chipsets.
  1069. * Hence, PIX_FMTS must be specified as Parent for HEVC profile.
  1070. * Otherwise it would be a database error that should be fixed.
  1071. */
  1072. if (msm_vidc_get_parent_value(inst, PROFILE, PIX_FMTS,
  1073. &pix_fmt, __func__))
  1074. return -EINVAL;
  1075. /* 10 bit profile for 10 bit color format */
  1076. if (pix_fmt == MSM_VIDC_FMT_TP10C || pix_fmt == MSM_VIDC_FMT_P010) {
  1077. if (is_image_session(inst))
  1078. adjusted_value = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10_STILL_PICTURE;
  1079. else
  1080. adjusted_value = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10;
  1081. } else {
  1082. /* 8 bit profile for 8 bit color format */
  1083. if (is_image_session(inst))
  1084. adjusted_value = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE;
  1085. else
  1086. adjusted_value = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN;
  1087. }
  1088. msm_vidc_update_cap_value(inst, PROFILE,
  1089. adjusted_value, __func__);
  1090. return 0;
  1091. }
  1092. int msm_vidc_adjust_ltr_count(void *instance, struct v4l2_ctrl *ctrl)
  1093. {
  1094. struct msm_vidc_inst_capability *capability;
  1095. s32 adjusted_value;
  1096. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1097. s32 rc_type = -1, all_intra = 0;
  1098. if (!inst || !inst->capabilities) {
  1099. d_vpr_e("%s: invalid params\n", __func__);
  1100. return -EINVAL;
  1101. }
  1102. capability = inst->capabilities;
  1103. adjusted_value = ctrl ? ctrl->val : capability->cap[LTR_COUNT].value;
  1104. if (msm_vidc_get_parent_value(inst, LTR_COUNT, BITRATE_MODE,
  1105. &rc_type, __func__) ||
  1106. msm_vidc_get_parent_value(inst, LTR_COUNT, ALL_INTRA,
  1107. &all_intra, __func__))
  1108. return -EINVAL;
  1109. if ((rc_type != HFI_RC_OFF &&
  1110. rc_type != HFI_RC_CBR_CFR &&
  1111. rc_type != HFI_RC_CBR_VFR) ||
  1112. all_intra) {
  1113. adjusted_value = 0;
  1114. i_vpr_h(inst,
  1115. "%s: ltr count unsupported, rc_type: %#x, all_intra %d\n",
  1116. __func__,rc_type, all_intra);
  1117. }
  1118. msm_vidc_update_cap_value(inst, LTR_COUNT,
  1119. adjusted_value, __func__);
  1120. return 0;
  1121. }
  1122. int msm_vidc_adjust_use_ltr(void *instance, struct v4l2_ctrl *ctrl)
  1123. {
  1124. struct msm_vidc_inst_capability *capability;
  1125. s32 adjusted_value, ltr_count;
  1126. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1127. if (!inst || !inst->capabilities) {
  1128. d_vpr_e("%s: invalid params\n", __func__);
  1129. return -EINVAL;
  1130. }
  1131. capability = inst->capabilities;
  1132. adjusted_value = ctrl ? ctrl->val : capability->cap[USE_LTR].value;
  1133. /*
  1134. * Since USE_LTR is only set dynamically, and LTR_COUNT is static
  1135. * control, no need to make LTR_COUNT as parent for USE_LTR as
  1136. * LTR_COUNT value will always be updated when dynamically USE_LTR
  1137. * is set
  1138. */
  1139. ltr_count = capability->cap[LTR_COUNT].value;
  1140. if (!ltr_count)
  1141. return 0;
  1142. if (adjusted_value <= 0 ||
  1143. adjusted_value > ((1 << ltr_count) - 1)) {
  1144. /*
  1145. * USE_LTR is bitmask value, hence should be
  1146. * > 0 and <= (2 ^ LTR_COUNT) - 1
  1147. */
  1148. i_vpr_e(inst, "%s: invalid value %d\n",
  1149. __func__, adjusted_value);
  1150. return 0;
  1151. }
  1152. /* USE_LTR value is a bitmask value */
  1153. msm_vidc_update_cap_value(inst, USE_LTR,
  1154. adjusted_value, __func__);
  1155. return 0;
  1156. }
  1157. int msm_vidc_adjust_mark_ltr(void *instance, struct v4l2_ctrl *ctrl)
  1158. {
  1159. struct msm_vidc_inst_capability *capability;
  1160. s32 adjusted_value, ltr_count;
  1161. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1162. if (!inst || !inst->capabilities) {
  1163. d_vpr_e("%s: invalid params\n", __func__);
  1164. return -EINVAL;
  1165. }
  1166. capability = inst->capabilities;
  1167. adjusted_value = ctrl ? ctrl->val : capability->cap[MARK_LTR].value;
  1168. /*
  1169. * Since MARK_LTR is only set dynamically, and LTR_COUNT is static
  1170. * control, no need to make LTR_COUNT as parent for MARK_LTR as
  1171. * LTR_COUNT value will always be updated when dynamically MARK_LTR
  1172. * is set
  1173. */
  1174. ltr_count = capability->cap[LTR_COUNT].value;
  1175. if (!ltr_count)
  1176. return 0;
  1177. if (adjusted_value < 0 ||
  1178. adjusted_value > (ltr_count - 1)) {
  1179. /* MARK_LTR value should be >= 0 and <= (LTR_COUNT - 1) */
  1180. i_vpr_e(inst, "%s: invalid value %d\n",
  1181. __func__, adjusted_value);
  1182. return 0;
  1183. }
  1184. msm_vidc_update_cap_value(inst, MARK_LTR,
  1185. adjusted_value, __func__);
  1186. return 0;
  1187. }
  1188. int msm_vidc_adjust_delta_based_rc(void *instance, struct v4l2_ctrl *ctrl)
  1189. {
  1190. struct msm_vidc_inst_capability *capability;
  1191. s32 adjusted_value;
  1192. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1193. s32 rc_type = -1;
  1194. if (!inst || !inst->capabilities) {
  1195. d_vpr_e("%s: invalid params\n", __func__);
  1196. return -EINVAL;
  1197. }
  1198. capability = inst->capabilities;
  1199. adjusted_value = ctrl ? ctrl->val :
  1200. capability->cap[TIME_DELTA_BASED_RC].value;
  1201. if (msm_vidc_get_parent_value(inst, TIME_DELTA_BASED_RC,
  1202. BITRATE_MODE, &rc_type, __func__))
  1203. return -EINVAL;
  1204. if (rc_type == HFI_RC_OFF ||
  1205. rc_type == HFI_RC_CQ)
  1206. adjusted_value = 0;
  1207. msm_vidc_update_cap_value(inst, TIME_DELTA_BASED_RC,
  1208. adjusted_value, __func__);
  1209. return 0;
  1210. }
  1211. int msm_vidc_adjust_output_order(void *instance, struct v4l2_ctrl *ctrl)
  1212. {
  1213. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1214. struct msm_vidc_inst_capability *capability;
  1215. s32 tn_mode = -1, display_delay = -1, display_delay_enable = -1;
  1216. u32 adjusted_value;
  1217. if (!inst || !inst->capabilities) {
  1218. d_vpr_e("%s: invalid params\n", __func__);
  1219. return -EINVAL;
  1220. }
  1221. capability = inst->capabilities;
  1222. adjusted_value = ctrl ? ctrl->val :
  1223. capability->cap[OUTPUT_ORDER].value;
  1224. if (msm_vidc_get_parent_value(inst, OUTPUT_ORDER, THUMBNAIL_MODE,
  1225. &tn_mode, __func__) ||
  1226. msm_vidc_get_parent_value(inst, OUTPUT_ORDER, DISPLAY_DELAY,
  1227. &display_delay, __func__) ||
  1228. msm_vidc_get_parent_value(inst, OUTPUT_ORDER, DISPLAY_DELAY_ENABLE,
  1229. &display_delay_enable, __func__))
  1230. return -EINVAL;
  1231. if (tn_mode || (display_delay_enable && !display_delay))
  1232. adjusted_value = 1;
  1233. msm_vidc_update_cap_value(inst, OUTPUT_ORDER,
  1234. adjusted_value, __func__);
  1235. return 0;
  1236. }
  1237. int msm_vidc_adjust_input_buf_host_max_count(void *instance, struct v4l2_ctrl *ctrl)
  1238. {
  1239. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1240. struct msm_vidc_inst_capability *capability;
  1241. u32 adjusted_value;
  1242. if (!inst || !inst->capabilities) {
  1243. d_vpr_e("%s: invalid params\n", __func__);
  1244. return -EINVAL;
  1245. }
  1246. capability = inst->capabilities;
  1247. adjusted_value = ctrl ? ctrl->val :
  1248. capability->cap[INPUT_BUF_HOST_MAX_COUNT].value;
  1249. if (msm_vidc_is_super_buffer(inst) || is_image_session(inst))
  1250. adjusted_value = DEFAULT_MAX_HOST_BURST_BUF_COUNT;
  1251. msm_vidc_update_cap_value(inst, INPUT_BUF_HOST_MAX_COUNT,
  1252. adjusted_value, __func__);
  1253. return 0;
  1254. }
  1255. int msm_vidc_adjust_output_buf_host_max_count(void *instance, struct v4l2_ctrl *ctrl)
  1256. {
  1257. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1258. struct msm_vidc_inst_capability *capability;
  1259. u32 adjusted_value;
  1260. if (!inst || !inst->capabilities) {
  1261. d_vpr_e("%s: invalid params\n", __func__);
  1262. return -EINVAL;
  1263. }
  1264. capability = inst->capabilities;
  1265. adjusted_value = ctrl ? ctrl->val :
  1266. capability->cap[OUTPUT_BUF_HOST_MAX_COUNT].value;
  1267. if (msm_vidc_is_super_buffer(inst) || is_image_session(inst) ||
  1268. is_enc_slice_delivery_mode(inst))
  1269. adjusted_value = DEFAULT_MAX_HOST_BURST_BUF_COUNT;
  1270. msm_vidc_update_cap_value(inst, OUTPUT_BUF_HOST_MAX_COUNT,
  1271. adjusted_value, __func__);
  1272. return 0;
  1273. }
  1274. int msm_vidc_adjust_transform_8x8(void *instance, struct v4l2_ctrl *ctrl)
  1275. {
  1276. struct msm_vidc_inst_capability *capability;
  1277. s32 adjusted_value;
  1278. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1279. s32 profile = -1;
  1280. if (!inst || !inst->capabilities) {
  1281. d_vpr_e("%s: invalid params\n", __func__);
  1282. return -EINVAL;
  1283. }
  1284. capability = inst->capabilities;
  1285. adjusted_value = ctrl ? ctrl->val :
  1286. capability->cap[TRANSFORM_8X8].value;
  1287. if (inst->codec != MSM_VIDC_H264) {
  1288. i_vpr_e(inst,
  1289. "%s: incorrect entry in database. fix the database\n",
  1290. __func__);
  1291. return 0;
  1292. }
  1293. if (msm_vidc_get_parent_value(inst, TRANSFORM_8X8,
  1294. PROFILE, &profile, __func__))
  1295. return -EINVAL;
  1296. if (profile != V4L2_MPEG_VIDEO_H264_PROFILE_HIGH &&
  1297. profile != V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH)
  1298. adjusted_value = 0;
  1299. msm_vidc_update_cap_value(inst, TRANSFORM_8X8,
  1300. adjusted_value, __func__);
  1301. return 0;
  1302. }
  1303. int msm_vidc_adjust_chroma_qp_index_offset(void *instance,
  1304. struct v4l2_ctrl *ctrl)
  1305. {
  1306. struct msm_vidc_inst_capability *capability;
  1307. s32 adjusted_value;
  1308. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1309. if (!inst || !inst->capabilities) {
  1310. d_vpr_e("%s: invalid params\n", __func__);
  1311. return -EINVAL;
  1312. }
  1313. capability = inst->capabilities;
  1314. adjusted_value = ctrl ? ctrl->val :
  1315. capability->cap[CHROMA_QP_INDEX_OFFSET].value;
  1316. if (adjusted_value != MIN_CHROMA_QP_OFFSET)
  1317. adjusted_value = MAX_CHROMA_QP_OFFSET;
  1318. msm_vidc_update_cap_value(inst, CHROMA_QP_INDEX_OFFSET,
  1319. adjusted_value, __func__);
  1320. return 0;
  1321. }
  1322. static bool msm_vidc_check_all_layer_bitrate_set(struct msm_vidc_inst *inst)
  1323. {
  1324. bool layer_bitrate_set = true;
  1325. u32 cap_id = 0, i, enh_layer_count;
  1326. u32 layer_br_caps[6] = {L0_BR, L1_BR, L2_BR, L3_BR, L4_BR, L5_BR};
  1327. enh_layer_count = inst->capabilities->cap[ENH_LAYER_COUNT].value;
  1328. for (i = 0; i <= enh_layer_count; i++) {
  1329. if (i >= ARRAY_SIZE(layer_br_caps))
  1330. break;
  1331. cap_id = layer_br_caps[i];
  1332. if (!(inst->capabilities->cap[cap_id].flags & CAP_FLAG_CLIENT_SET)) {
  1333. layer_bitrate_set = false;
  1334. break;
  1335. }
  1336. }
  1337. return layer_bitrate_set;
  1338. }
  1339. static u32 msm_vidc_get_cumulative_bitrate(struct msm_vidc_inst *inst)
  1340. {
  1341. int i;
  1342. u32 cap_id = 0;
  1343. u32 cumulative_br = 0;
  1344. s32 enh_layer_count;
  1345. u32 layer_br_caps[6] = {L0_BR, L1_BR, L2_BR, L3_BR, L4_BR, L5_BR};
  1346. enh_layer_count = inst->capabilities->cap[ENH_LAYER_COUNT].value;
  1347. for (i = 0; i <= enh_layer_count; i++) {
  1348. if (i >= ARRAY_SIZE(layer_br_caps))
  1349. break;
  1350. cap_id = layer_br_caps[i];
  1351. cumulative_br += inst->capabilities->cap[cap_id].value;
  1352. }
  1353. return cumulative_br;
  1354. }
  1355. int msm_vidc_adjust_slice_count(void *instance, struct v4l2_ctrl *ctrl)
  1356. {
  1357. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1358. struct msm_vidc_inst_capability *capability;
  1359. struct v4l2_format *output_fmt;
  1360. s32 adjusted_value, rc_type = -1, slice_mode, all_intra, enh_layer_count = 0;
  1361. u32 slice_val, mbpf = 0, mbps = 0, max_mbpf = 0, max_mbps = 0, bitrate = 0;
  1362. u32 update_cap, max_avg_slicesize, output_width, output_height;
  1363. u32 min_width, min_height, max_width, max_height, fps;
  1364. if (!inst || !inst->capabilities) {
  1365. d_vpr_e("%s: invalid params\n", __func__);
  1366. return -EINVAL;
  1367. }
  1368. capability = inst->capabilities;
  1369. slice_mode = ctrl ? ctrl->val :
  1370. capability->cap[SLICE_MODE].value;
  1371. if (slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE)
  1372. return 0;
  1373. if (msm_vidc_get_parent_value(inst, SLICE_MODE,
  1374. BITRATE_MODE, &rc_type, __func__) ||
  1375. msm_vidc_get_parent_value(inst, SLICE_MODE,
  1376. ALL_INTRA, &all_intra, __func__) ||
  1377. msm_vidc_get_parent_value(inst, SLICE_MODE,
  1378. ENH_LAYER_COUNT, &enh_layer_count, __func__))
  1379. return -EINVAL;
  1380. if (capability->cap[BIT_RATE].flags & CAP_FLAG_CLIENT_SET) {
  1381. bitrate = capability->cap[BIT_RATE].value;
  1382. } else if (msm_vidc_check_all_layer_bitrate_set(inst)) {
  1383. bitrate = msm_vidc_get_cumulative_bitrate(inst);
  1384. } else {
  1385. adjusted_value = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE;
  1386. update_cap = SLICE_MODE;
  1387. i_vpr_h(inst,
  1388. "%s: client did not set bitrate & layerwise bitrates\n",
  1389. __func__);
  1390. goto exit;
  1391. }
  1392. fps = capability->cap[FRAME_RATE].value >> 16;
  1393. if (fps > MAX_SLICES_FRAME_RATE ||
  1394. (rc_type != HFI_RC_OFF &&
  1395. rc_type != HFI_RC_CBR_CFR &&
  1396. rc_type != HFI_RC_CBR_VFR &&
  1397. rc_type != HFI_RC_VBR_CFR) ||
  1398. all_intra) {
  1399. adjusted_value = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE;
  1400. update_cap = SLICE_MODE;
  1401. i_vpr_h(inst,
  1402. "%s: slice unsupported, fps: %u, rc_type: %#x, all_intra %d\n",
  1403. __func__, fps, rc_type, all_intra);
  1404. goto exit;
  1405. }
  1406. output_fmt = &inst->fmts[OUTPUT_PORT];
  1407. output_width = output_fmt->fmt.pix_mp.width;
  1408. output_height = output_fmt->fmt.pix_mp.height;
  1409. max_width = (slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB) ?
  1410. MAX_MB_SLICE_WIDTH : MAX_BYTES_SLICE_WIDTH;
  1411. max_height = (slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB) ?
  1412. MAX_MB_SLICE_HEIGHT : MAX_BYTES_SLICE_HEIGHT;
  1413. min_width = (inst->codec == MSM_VIDC_HEVC) ?
  1414. MIN_HEVC_SLICE_WIDTH : MIN_AVC_SLICE_WIDTH;
  1415. min_height = MIN_SLICE_HEIGHT;
  1416. /*
  1417. * For V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB:
  1418. * - width >= 384 and height >= 128
  1419. * - width and height <= 4096
  1420. * For V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES:
  1421. * - width >= 192 and height >= 128
  1422. * - width and height <= 1920
  1423. */
  1424. if (output_width < min_width || output_height < min_height ||
  1425. output_width > max_width || output_height > max_width) {
  1426. adjusted_value = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE;
  1427. update_cap = SLICE_MODE;
  1428. i_vpr_h(inst,
  1429. "%s: slice unsupported, codec: %#x wxh: [%dx%d]\n",
  1430. __func__, inst->codec, output_width, output_height);
  1431. goto exit;
  1432. }
  1433. mbpf = NUM_MBS_PER_FRAME(output_height, output_width);
  1434. mbps = NUM_MBS_PER_SEC(output_height, output_width, fps);
  1435. max_mbpf = NUM_MBS_PER_FRAME(max_height, max_width);
  1436. max_mbps = NUM_MBS_PER_SEC(max_height, max_width, MAX_SLICES_FRAME_RATE);
  1437. if (mbpf > max_mbpf || mbps > max_mbps) {
  1438. adjusted_value = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE;
  1439. update_cap = SLICE_MODE;
  1440. i_vpr_h(inst,
  1441. "%s: Unsupported, mbpf[%u] > max[%u], mbps[%u] > max[%u]\n",
  1442. __func__, mbpf, max_mbpf, mbps, max_mbps);
  1443. goto exit;
  1444. }
  1445. if (slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB) {
  1446. update_cap = SLICE_MAX_MB;
  1447. slice_val = capability->cap[SLICE_MAX_MB].value;
  1448. slice_val = max(slice_val, mbpf / MAX_SLICES_PER_FRAME);
  1449. } else {
  1450. slice_val = capability->cap[SLICE_MAX_BYTES].value;
  1451. update_cap = SLICE_MAX_BYTES;
  1452. if (rc_type != HFI_RC_OFF) {
  1453. max_avg_slicesize = ((bitrate / fps) / 8) /
  1454. MAX_SLICES_PER_FRAME;
  1455. slice_val = max(slice_val, max_avg_slicesize);
  1456. }
  1457. }
  1458. adjusted_value = slice_val;
  1459. exit:
  1460. msm_vidc_update_cap_value(inst, update_cap,
  1461. adjusted_value, __func__);
  1462. return 0;
  1463. }
  1464. static int msm_vidc_adjust_static_layer_count_and_type(struct msm_vidc_inst *inst,
  1465. s32 layer_count)
  1466. {
  1467. bool hb_requested = false;
  1468. if (!inst || !inst->capabilities) {
  1469. d_vpr_e("%s: invalid params\n", __func__);
  1470. return -EINVAL;
  1471. }
  1472. if (!layer_count) {
  1473. i_vpr_h(inst, "client not enabled layer encoding\n");
  1474. goto exit;
  1475. }
  1476. if (inst->hfi_rc_type == HFI_RC_CQ) {
  1477. i_vpr_h(inst, "rc type is CQ, disabling layer encoding\n");
  1478. layer_count = 0;
  1479. goto exit;
  1480. }
  1481. if (inst->codec == MSM_VIDC_H264) {
  1482. if (!inst->capabilities->cap[LAYER_ENABLE].value) {
  1483. layer_count = 0;
  1484. goto exit;
  1485. }
  1486. hb_requested = (inst->capabilities->cap[LAYER_TYPE].value ==
  1487. V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_B) ?
  1488. true : false;
  1489. } else if (inst->codec == MSM_VIDC_HEVC) {
  1490. hb_requested = (inst->capabilities->cap[LAYER_TYPE].value ==
  1491. V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B) ?
  1492. true : false;
  1493. }
  1494. if (hb_requested && inst->hfi_rc_type != HFI_RC_VBR_CFR) {
  1495. i_vpr_h(inst,
  1496. "%s: HB layer encoding is supported for VBR rc only\n",
  1497. __func__);
  1498. layer_count = 0;
  1499. goto exit;
  1500. }
  1501. if (!is_meta_tx_inp_enabled(inst, META_EVA_STATS) &&
  1502. hb_requested && (layer_count > 1)) {
  1503. layer_count = 1;
  1504. i_vpr_h(inst,
  1505. "%s: cvp disable supports only one enh layer HB\n",
  1506. __func__);
  1507. }
  1508. /* decide hfi layer type */
  1509. if (hb_requested) {
  1510. inst->hfi_layer_type = HFI_HIER_B;
  1511. } else {
  1512. /* HP requested */
  1513. inst->hfi_layer_type = HFI_HIER_P_SLIDING_WINDOW;
  1514. if (inst->codec == MSM_VIDC_H264 &&
  1515. inst->hfi_rc_type == HFI_RC_VBR_CFR)
  1516. inst->hfi_layer_type = HFI_HIER_P_HYBRID_LTR;
  1517. }
  1518. /* sanitize layer count based on layer type and codec, and rc type */
  1519. if (inst->hfi_layer_type == HFI_HIER_B) {
  1520. if (layer_count > MAX_ENH_LAYER_HB)
  1521. layer_count = MAX_ENH_LAYER_HB;
  1522. } else if (inst->hfi_layer_type == HFI_HIER_P_HYBRID_LTR) {
  1523. if (layer_count > MAX_AVC_ENH_LAYER_HYBRID_HP)
  1524. layer_count = MAX_AVC_ENH_LAYER_HYBRID_HP;
  1525. } else if (inst->hfi_layer_type == HFI_HIER_P_SLIDING_WINDOW) {
  1526. if (inst->codec == MSM_VIDC_H264) {
  1527. if (layer_count > MAX_AVC_ENH_LAYER_SLIDING_WINDOW)
  1528. layer_count = MAX_AVC_ENH_LAYER_SLIDING_WINDOW;
  1529. } else if (inst->codec == MSM_VIDC_HEVC) {
  1530. if (inst->hfi_rc_type == HFI_RC_VBR_CFR) {
  1531. if (layer_count > MAX_HEVC_VBR_ENH_LAYER_SLIDING_WINDOW)
  1532. layer_count = MAX_HEVC_VBR_ENH_LAYER_SLIDING_WINDOW;
  1533. } else {
  1534. if (layer_count > MAX_HEVC_NON_VBR_ENH_LAYER_SLIDING_WINDOW)
  1535. layer_count = MAX_HEVC_NON_VBR_ENH_LAYER_SLIDING_WINDOW;
  1536. }
  1537. }
  1538. }
  1539. exit:
  1540. msm_vidc_update_cap_value(inst, ENH_LAYER_COUNT,
  1541. layer_count, __func__);
  1542. inst->capabilities->cap[ENH_LAYER_COUNT].max = layer_count;
  1543. return 0;
  1544. }
  1545. int msm_vidc_adjust_layer_count(void *instance, struct v4l2_ctrl *ctrl)
  1546. {
  1547. int rc = 0;
  1548. struct msm_vidc_inst_capability *capability;
  1549. s32 client_layer_count;
  1550. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1551. if (!inst || !inst->capabilities) {
  1552. d_vpr_e("%s: invalid params\n", __func__);
  1553. return -EINVAL;
  1554. }
  1555. capability = inst->capabilities;
  1556. client_layer_count = ctrl ? ctrl->val :
  1557. capability->cap[ENH_LAYER_COUNT].value;
  1558. if (!is_parent_available(inst, ENH_LAYER_COUNT,
  1559. BITRATE_MODE, __func__) ||
  1560. !is_parent_available(inst, ENH_LAYER_COUNT,
  1561. META_EVA_STATS, __func__))
  1562. return -EINVAL;
  1563. if (!inst->bufq[OUTPUT_PORT].vb2q->streaming) {
  1564. rc = msm_vidc_adjust_static_layer_count_and_type(inst,
  1565. client_layer_count);
  1566. if (rc)
  1567. goto exit;
  1568. } else {
  1569. if (inst->hfi_layer_type == HFI_HIER_P_HYBRID_LTR ||
  1570. inst->hfi_layer_type == HFI_HIER_P_SLIDING_WINDOW) {
  1571. /* dynamic layer count change is only supported for HP */
  1572. if (client_layer_count >
  1573. inst->capabilities->cap[ENH_LAYER_COUNT].max)
  1574. client_layer_count =
  1575. inst->capabilities->cap[ENH_LAYER_COUNT].max;
  1576. msm_vidc_update_cap_value(inst, ENH_LAYER_COUNT,
  1577. client_layer_count, __func__);
  1578. }
  1579. }
  1580. exit:
  1581. return rc;
  1582. }
  1583. int msm_vidc_adjust_gop_size(void *instance, struct v4l2_ctrl *ctrl)
  1584. {
  1585. struct msm_vidc_inst_capability *capability;
  1586. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  1587. s32 adjusted_value, enh_layer_count = -1;
  1588. u32 min_gop_size, num_subgops;
  1589. if (!inst || !inst->capabilities) {
  1590. d_vpr_e("%s: invalid params\n", __func__);
  1591. return -EINVAL;
  1592. }
  1593. capability = inst->capabilities;
  1594. adjusted_value = ctrl ? ctrl->val : capability->cap[GOP_SIZE].value;
  1595. if (msm_vidc_get_parent_value(inst, GOP_SIZE,
  1596. ENH_LAYER_COUNT, &enh_layer_count, __func__))
  1597. return -EINVAL;
  1598. if (!enh_layer_count)
  1599. goto exit;
  1600. /*
  1601. * Layer encoding needs GOP size to be multiple of subgop size
  1602. * And subgop size is 2 ^ number of enhancement layers.
  1603. */
  1604. /* v4l2 layer count is the number of enhancement layers */
  1605. min_gop_size = 1 << enh_layer_count;
  1606. num_subgops = (adjusted_value + (min_gop_size >> 1)) /
  1607. min_gop_size;
  1608. if (num_subgops)
  1609. adjusted_value = num_subgops * min_gop_size;
  1610. else
  1611. adjusted_value = min_gop_size;
  1612. exit:
  1613. msm_vidc_update_cap_value(inst, GOP_SIZE, adjusted_value, __func__);
  1614. return 0;
  1615. }
  1616. int msm_vidc_adjust_b_frame(void *instance, struct v4l2_ctrl *ctrl)
  1617. {
  1618. struct msm_vidc_inst_capability *capability;
  1619. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  1620. s32 adjusted_value, enh_layer_count = -1;
  1621. const u32 max_bframe_size = 7;
  1622. if (!inst || !inst->capabilities) {
  1623. d_vpr_e("%s: invalid params\n", __func__);
  1624. return -EINVAL;
  1625. }
  1626. capability = inst->capabilities;
  1627. adjusted_value = ctrl ? ctrl->val : capability->cap[B_FRAME].value;
  1628. if (msm_vidc_get_parent_value(inst, B_FRAME,
  1629. ENH_LAYER_COUNT, &enh_layer_count, __func__))
  1630. return -EINVAL;
  1631. if (!enh_layer_count || inst->hfi_layer_type != HFI_HIER_B) {
  1632. adjusted_value = 0;
  1633. goto exit;
  1634. }
  1635. adjusted_value = (1 << enh_layer_count) - 1;
  1636. /* Allowed Bframe values are 0, 1, 3, 7 */
  1637. if (adjusted_value > max_bframe_size)
  1638. adjusted_value = max_bframe_size;
  1639. exit:
  1640. msm_vidc_update_cap_value(inst, B_FRAME, adjusted_value, __func__);
  1641. return 0;
  1642. }
  1643. int msm_vidc_adjust_bitrate(void *instance, struct v4l2_ctrl *ctrl)
  1644. {
  1645. int i, rc = 0;
  1646. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1647. struct msm_vidc_inst_capability *capability;
  1648. s32 adjusted_value, enh_layer_count;
  1649. u32 cumulative_bitrate = 0, cap_id = 0, cap_value = 0;
  1650. u32 layer_br_caps[6] = {L0_BR, L1_BR, L2_BR, L3_BR, L4_BR, L5_BR};
  1651. u32 max_bitrate = 0;
  1652. if (!inst || !inst->capabilities) {
  1653. d_vpr_e("%s: invalid params\n", __func__);
  1654. return -EINVAL;
  1655. }
  1656. capability = inst->capabilities;
  1657. /* ignore layer bitrate when total bitrate is set */
  1658. if (capability->cap[BIT_RATE].flags & CAP_FLAG_CLIENT_SET) {
  1659. /*
  1660. * For static case, ctrl is null.
  1661. * For dynamic case, only BIT_RATE cap uses this adjust function.
  1662. * Hence, no need to check for ctrl id to be BIT_RATE control, and not
  1663. * any of layer bitrate controls.
  1664. */
  1665. adjusted_value = ctrl ? ctrl->val : capability->cap[BIT_RATE].value;
  1666. msm_vidc_update_cap_value(inst, BIT_RATE, adjusted_value, __func__);
  1667. return 0;
  1668. }
  1669. if (inst->bufq[OUTPUT_PORT].vb2q->streaming)
  1670. return 0;
  1671. if (msm_vidc_get_parent_value(inst, BIT_RATE,
  1672. ENH_LAYER_COUNT, &enh_layer_count, __func__))
  1673. return -EINVAL;
  1674. /* get max bit rate for current session config*/
  1675. max_bitrate = msm_vidc_get_max_bitrate(inst);
  1676. if (inst->capabilities->cap[BIT_RATE].value > max_bitrate)
  1677. msm_vidc_update_cap_value(inst, BIT_RATE, max_bitrate, __func__);
  1678. /*
  1679. * ENH_LAYER_COUNT cap max is positive only if
  1680. * layer encoding is enabled during streamon.
  1681. */
  1682. if (capability->cap[ENH_LAYER_COUNT].max) {
  1683. if (!msm_vidc_check_all_layer_bitrate_set(inst)) {
  1684. i_vpr_h(inst,
  1685. "%s: client did not set all layer bitrates\n",
  1686. __func__);
  1687. return 0;
  1688. }
  1689. cumulative_bitrate = msm_vidc_get_cumulative_bitrate(inst);
  1690. /* cap layer bitrates to max supported bitrate */
  1691. if (cumulative_bitrate > max_bitrate) {
  1692. u32 decrement_in_value = 0;
  1693. u32 decrement_in_percent = ((cumulative_bitrate - max_bitrate) * 100) /
  1694. max_bitrate;
  1695. cumulative_bitrate = 0;
  1696. for (i = 0; i <= enh_layer_count; i++) {
  1697. if (i >= ARRAY_SIZE(layer_br_caps))
  1698. break;
  1699. cap_id = layer_br_caps[i];
  1700. cap_value = inst->capabilities->cap[cap_id].value;
  1701. decrement_in_value = (cap_value *
  1702. decrement_in_percent) / 100;
  1703. cumulative_bitrate += (cap_value - decrement_in_value);
  1704. /*
  1705. * cap value for the L*_BR is changed. Hence, update cap,
  1706. * and add to FW_LIST to set new values to firmware.
  1707. */
  1708. msm_vidc_update_cap_value(inst, cap_id,
  1709. (cap_value - decrement_in_value), __func__);
  1710. }
  1711. }
  1712. i_vpr_h(inst,
  1713. "%s: update BIT_RATE with cumulative bitrate\n",
  1714. __func__);
  1715. msm_vidc_update_cap_value(inst, BIT_RATE,
  1716. cumulative_bitrate, __func__);
  1717. }
  1718. return rc;
  1719. }
  1720. int msm_vidc_adjust_dynamic_layer_bitrate(void *instance, struct v4l2_ctrl *ctrl)
  1721. {
  1722. int rc = 0;
  1723. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1724. struct msm_vidc_inst_capability *capability;
  1725. u32 cumulative_bitrate = 0;
  1726. u32 client_set_cap_id = INST_CAP_NONE;
  1727. u32 old_br = 0, new_br = 0, exceeded_br = 0;
  1728. s32 max_bitrate;
  1729. if (!inst || !inst->capabilities) {
  1730. d_vpr_e("%s: invalid params\n", __func__);
  1731. return -EINVAL;
  1732. }
  1733. capability = inst->capabilities;
  1734. if (!ctrl)
  1735. return 0;
  1736. /* ignore layer bitrate when total bitrate is set */
  1737. if (capability->cap[BIT_RATE].flags & CAP_FLAG_CLIENT_SET)
  1738. return 0;
  1739. if (!inst->bufq[OUTPUT_PORT].vb2q->streaming)
  1740. return 0;
  1741. /*
  1742. * ENH_LAYER_COUNT cap max is positive only if
  1743. * layer encoding is enabled during streamon.
  1744. */
  1745. if (!capability->cap[ENH_LAYER_COUNT].max) {
  1746. i_vpr_e(inst, "%s: layers not enabled\n", __func__);
  1747. return -EINVAL;
  1748. }
  1749. if (!msm_vidc_check_all_layer_bitrate_set(inst)) {
  1750. i_vpr_h(inst,
  1751. "%s: client did not set all layer bitrates\n",
  1752. __func__);
  1753. return 0;
  1754. }
  1755. client_set_cap_id = msm_vidc_get_cap_id(inst, ctrl->id);
  1756. if (client_set_cap_id == INST_CAP_NONE) {
  1757. i_vpr_e(inst, "%s: could not find cap_id for ctrl %s\n",
  1758. __func__, ctrl->name);
  1759. return -EINVAL;
  1760. }
  1761. cumulative_bitrate = msm_vidc_get_cumulative_bitrate(inst);
  1762. max_bitrate = inst->capabilities->cap[BIT_RATE].max;
  1763. old_br = capability->cap[client_set_cap_id].value;
  1764. new_br = ctrl->val;
  1765. /*
  1766. * new bitrate is not supposed to cause cumulative bitrate to
  1767. * exceed max supported bitrate
  1768. */
  1769. if ((cumulative_bitrate - old_br + new_br) > max_bitrate) {
  1770. /* adjust new bitrate */
  1771. exceeded_br = (cumulative_bitrate - old_br + new_br) - max_bitrate;
  1772. new_br = ctrl->val - exceeded_br;
  1773. }
  1774. msm_vidc_update_cap_value(inst, client_set_cap_id, new_br, __func__);
  1775. /* adjust totol bitrate cap */
  1776. i_vpr_h(inst,
  1777. "%s: update BIT_RATE with cumulative bitrate\n",
  1778. __func__);
  1779. msm_vidc_update_cap_value(inst, BIT_RATE,
  1780. msm_vidc_get_cumulative_bitrate(inst), __func__);
  1781. return rc;
  1782. }
  1783. int msm_vidc_adjust_peak_bitrate(void *instance, struct v4l2_ctrl *ctrl)
  1784. {
  1785. struct msm_vidc_inst_capability *capability;
  1786. s32 adjusted_value;
  1787. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1788. s32 rc_type = -1, bitrate = -1;
  1789. if (!inst || !inst->capabilities) {
  1790. d_vpr_e("%s: invalid params\n", __func__);
  1791. return -EINVAL;
  1792. }
  1793. capability = inst->capabilities;
  1794. adjusted_value = ctrl ? ctrl->val :
  1795. capability->cap[PEAK_BITRATE].value;
  1796. if (msm_vidc_get_parent_value(inst, PEAK_BITRATE,
  1797. BITRATE_MODE, &rc_type, __func__))
  1798. return -EINVAL;
  1799. if (rc_type != HFI_RC_CBR_CFR &&
  1800. rc_type != HFI_RC_CBR_VFR)
  1801. return 0;
  1802. if (msm_vidc_get_parent_value(inst, PEAK_BITRATE,
  1803. BIT_RATE, &bitrate, __func__))
  1804. return -EINVAL;
  1805. /* Peak Bitrate should be larger than or equal to avg bitrate */
  1806. if (capability->cap[PEAK_BITRATE].flags & CAP_FLAG_CLIENT_SET) {
  1807. if (adjusted_value < bitrate)
  1808. adjusted_value = bitrate;
  1809. } else {
  1810. adjusted_value = capability->cap[BIT_RATE].value;
  1811. }
  1812. msm_vidc_update_cap_value(inst, PEAK_BITRATE,
  1813. adjusted_value, __func__);
  1814. return 0;
  1815. }
  1816. int msm_vidc_adjust_hevc_min_qp(void *instance, struct v4l2_ctrl *ctrl)
  1817. {
  1818. int rc = 0;
  1819. struct msm_vidc_inst_capability *capability;
  1820. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1821. if (!inst || !inst->capabilities) {
  1822. d_vpr_e("%s: invalid params\n", __func__);
  1823. return -EINVAL;
  1824. }
  1825. capability = inst->capabilities;
  1826. if (ctrl)
  1827. msm_vidc_update_cap_value(inst, MIN_FRAME_QP,
  1828. ctrl->val, __func__);
  1829. rc = msm_vidc_adjust_hevc_qp(inst, MIN_FRAME_QP);
  1830. return rc;
  1831. }
  1832. int msm_vidc_adjust_hevc_max_qp(void *instance, struct v4l2_ctrl *ctrl)
  1833. {
  1834. int rc = 0;
  1835. struct msm_vidc_inst_capability *capability;
  1836. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1837. if (!inst || !inst->capabilities) {
  1838. d_vpr_e("%s: invalid params\n", __func__);
  1839. return -EINVAL;
  1840. }
  1841. capability = inst->capabilities;
  1842. if (ctrl)
  1843. msm_vidc_update_cap_value(inst, MAX_FRAME_QP,
  1844. ctrl->val, __func__);
  1845. rc = msm_vidc_adjust_hevc_qp(inst, MAX_FRAME_QP);
  1846. return rc;
  1847. }
  1848. int msm_vidc_adjust_hevc_i_frame_qp(void *instance, struct v4l2_ctrl *ctrl)
  1849. {
  1850. int rc = 0;
  1851. struct msm_vidc_inst_capability *capability;
  1852. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1853. if (!inst || !inst->capabilities) {
  1854. d_vpr_e("%s: invalid params\n", __func__);
  1855. return -EINVAL;
  1856. }
  1857. capability = inst->capabilities;
  1858. if (ctrl)
  1859. msm_vidc_update_cap_value(inst, I_FRAME_QP,
  1860. ctrl->val, __func__);
  1861. rc = msm_vidc_adjust_hevc_qp(inst, I_FRAME_QP);
  1862. if (rc)
  1863. return rc;
  1864. return rc;
  1865. }
  1866. int msm_vidc_adjust_hevc_p_frame_qp(void *instance, struct v4l2_ctrl *ctrl)
  1867. {
  1868. int rc = 0;
  1869. struct msm_vidc_inst_capability *capability;
  1870. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1871. if (!inst || !inst->capabilities) {
  1872. d_vpr_e("%s: invalid params\n", __func__);
  1873. return -EINVAL;
  1874. }
  1875. capability = inst->capabilities;
  1876. if (ctrl)
  1877. msm_vidc_update_cap_value(inst, P_FRAME_QP,
  1878. ctrl->val, __func__);
  1879. rc = msm_vidc_adjust_hevc_qp(inst, P_FRAME_QP);
  1880. if (rc)
  1881. return rc;
  1882. return rc;
  1883. }
  1884. int msm_vidc_adjust_hevc_b_frame_qp(void *instance, struct v4l2_ctrl *ctrl)
  1885. {
  1886. int rc = 0;
  1887. struct msm_vidc_inst_capability *capability;
  1888. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1889. if (!inst || !inst->capabilities) {
  1890. d_vpr_e("%s: invalid params\n", __func__);
  1891. return -EINVAL;
  1892. }
  1893. capability = inst->capabilities;
  1894. if (ctrl)
  1895. msm_vidc_update_cap_value(inst, B_FRAME_QP,
  1896. ctrl->val, __func__);
  1897. rc = msm_vidc_adjust_hevc_qp(inst, B_FRAME_QP);
  1898. if (rc)
  1899. return rc;
  1900. return rc;
  1901. }
  1902. int msm_vidc_adjust_blur_type(void *instance, struct v4l2_ctrl *ctrl)
  1903. {
  1904. struct msm_vidc_inst_capability *capability;
  1905. s32 adjusted_value;
  1906. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1907. s32 rc_type = -1, roi_enable = -1;
  1908. s32 pix_fmts = -1, min_quality = -1;
  1909. if (!inst || !inst->capabilities) {
  1910. d_vpr_e("%s: invalid params\n", __func__);
  1911. return -EINVAL;
  1912. }
  1913. capability = inst->capabilities;
  1914. adjusted_value = ctrl ? ctrl->val :
  1915. capability->cap[BLUR_TYPES].value;
  1916. if (adjusted_value == MSM_VIDC_BLUR_NONE)
  1917. return 0;
  1918. if (msm_vidc_get_parent_value(inst, BLUR_TYPES, BITRATE_MODE,
  1919. &rc_type, __func__) ||
  1920. msm_vidc_get_parent_value(inst, BLUR_TYPES, PIX_FMTS,
  1921. &pix_fmts, __func__) ||
  1922. msm_vidc_get_parent_value(inst, BLUR_TYPES, MIN_QUALITY,
  1923. &min_quality, __func__) ||
  1924. msm_vidc_get_parent_value(inst, BLUR_TYPES, META_ROI_INFO,
  1925. &roi_enable, __func__))
  1926. return -EINVAL;
  1927. if (adjusted_value == MSM_VIDC_BLUR_EXTERNAL) {
  1928. if (is_scaling_enabled(inst) || min_quality) {
  1929. adjusted_value = MSM_VIDC_BLUR_NONE;
  1930. }
  1931. } else if (adjusted_value == MSM_VIDC_BLUR_ADAPTIVE) {
  1932. if (is_scaling_enabled(inst) || min_quality ||
  1933. (rc_type != HFI_RC_VBR_CFR &&
  1934. rc_type != HFI_RC_CBR_CFR &&
  1935. rc_type != HFI_RC_CBR_VFR) ||
  1936. is_10bit_colorformat(pix_fmts) || roi_enable) {
  1937. adjusted_value = MSM_VIDC_BLUR_NONE;
  1938. }
  1939. }
  1940. msm_vidc_update_cap_value(inst, BLUR_TYPES,
  1941. adjusted_value, __func__);
  1942. return 0;
  1943. }
  1944. int msm_vidc_adjust_all_intra(void *instance, struct v4l2_ctrl *ctrl)
  1945. {
  1946. struct msm_vidc_inst_capability *capability;
  1947. s32 adjusted_value;
  1948. struct msm_vidc_core *core;
  1949. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1950. s32 gop_size = -1, bframe = -1;
  1951. u32 width, height, fps, mbps, max_mbps;
  1952. if (!inst || !inst->capabilities || !inst->core) {
  1953. d_vpr_e("%s: invalid params\n", __func__);
  1954. return -EINVAL;
  1955. }
  1956. capability = inst->capabilities;
  1957. adjusted_value = capability->cap[ALL_INTRA].value;
  1958. if (msm_vidc_get_parent_value(inst, ALL_INTRA, GOP_SIZE,
  1959. &gop_size, __func__) ||
  1960. msm_vidc_get_parent_value(inst, ALL_INTRA, B_FRAME,
  1961. &bframe, __func__))
  1962. return -EINVAL;
  1963. width = inst->crop.width;
  1964. height = inst->crop.height;
  1965. fps = msm_vidc_get_fps(inst);
  1966. mbps = NUM_MBS_PER_SEC(height, width, fps);
  1967. core = inst->core;
  1968. max_mbps = core->capabilities[MAX_MBPS_ALL_INTRA].value;
  1969. if (mbps > max_mbps) {
  1970. adjusted_value = 0;
  1971. i_vpr_h(inst, "%s: mbps %d exceeds max supported mbps %d\n",
  1972. __func__, mbps, max_mbps);
  1973. goto exit;
  1974. }
  1975. if (!gop_size && !bframe)
  1976. adjusted_value = 1;
  1977. exit:
  1978. msm_vidc_update_cap_value(inst, ALL_INTRA,
  1979. adjusted_value, __func__);
  1980. return 0;
  1981. }
  1982. int msm_vidc_adjust_blur_resolution(void *instance, struct v4l2_ctrl *ctrl)
  1983. {
  1984. struct msm_vidc_inst_capability *capability;
  1985. s32 adjusted_value;
  1986. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  1987. s32 blur_type = -1;
  1988. if (!inst || !inst->capabilities) {
  1989. d_vpr_e("%s: invalid params\n", __func__);
  1990. return -EINVAL;
  1991. }
  1992. capability = inst->capabilities;
  1993. adjusted_value = ctrl ? ctrl->val :
  1994. capability->cap[BLUR_RESOLUTION].value;
  1995. if (msm_vidc_get_parent_value(inst, BLUR_RESOLUTION, BLUR_TYPES,
  1996. &blur_type, __func__))
  1997. return -EINVAL;
  1998. if (blur_type != MSM_VIDC_BLUR_EXTERNAL)
  1999. return 0;
  2000. msm_vidc_update_cap_value(inst, BLUR_RESOLUTION,
  2001. adjusted_value, __func__);
  2002. return 0;
  2003. }
  2004. int msm_vidc_adjust_brs(void *instance, struct v4l2_ctrl *ctrl)
  2005. {
  2006. struct msm_vidc_inst_capability *capability;
  2007. s32 adjusted_value;
  2008. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  2009. s32 rc_type = -1, layer_enabled = -1, layer_type = -1;
  2010. bool hp_requested = false;
  2011. if (!inst || !inst->capabilities) {
  2012. d_vpr_e("%s: invalid params\n", __func__);
  2013. return -EINVAL;
  2014. }
  2015. capability = inst->capabilities;
  2016. adjusted_value = ctrl ? ctrl->val :
  2017. capability->cap[CONTENT_ADAPTIVE_CODING].value;
  2018. if (inst->bufq[OUTPUT_PORT].vb2q->streaming)
  2019. return 0;
  2020. if (msm_vidc_get_parent_value(inst, CONTENT_ADAPTIVE_CODING,
  2021. BITRATE_MODE, &rc_type, __func__) ||
  2022. msm_vidc_get_parent_value(inst, CONTENT_ADAPTIVE_CODING,
  2023. LAYER_ENABLE, &layer_enabled, __func__) ||
  2024. msm_vidc_get_parent_value(inst, CONTENT_ADAPTIVE_CODING,
  2025. LAYER_TYPE, &layer_type, __func__))
  2026. return -EINVAL;
  2027. /*
  2028. * -BRS is supported only for VBR rc type.
  2029. * Hence, do not adjust or set to firmware for non VBR rc's
  2030. * -If HP is enabled then BRS is not allowed.
  2031. */
  2032. if (rc_type != HFI_RC_VBR_CFR) {
  2033. adjusted_value = 0;
  2034. goto adjust;
  2035. }
  2036. if (inst->codec == MSM_VIDC_H264) {
  2037. layer_type = V4L2_MPEG_VIDEO_H264_HIERARCHICAL_CODING_P;
  2038. } else if (inst->codec == MSM_VIDC_HEVC) {
  2039. layer_type = V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P;
  2040. }
  2041. hp_requested = (inst->capabilities->cap[LAYER_TYPE].value == layer_type);
  2042. /*
  2043. * Disable BRS in case of HP encoding
  2044. * Hence set adjust value to 0.
  2045. */
  2046. if (layer_enabled == 1 && hp_requested) {
  2047. adjusted_value = 0;
  2048. goto adjust;
  2049. }
  2050. adjust:
  2051. msm_vidc_update_cap_value(inst, CONTENT_ADAPTIVE_CODING,
  2052. adjusted_value, __func__);
  2053. return 0;
  2054. }
  2055. int msm_vidc_adjust_bitrate_boost(void *instance, struct v4l2_ctrl *ctrl)
  2056. {
  2057. struct msm_vidc_inst_capability *capability;
  2058. s32 adjusted_value;
  2059. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  2060. s32 min_quality = -1, rc_type = -1;
  2061. u32 max_bitrate = 0, bitrate = 0;
  2062. if (!inst || !inst->capabilities) {
  2063. d_vpr_e("%s: invalid params\n", __func__);
  2064. return -EINVAL;
  2065. }
  2066. capability = inst->capabilities;
  2067. adjusted_value = ctrl ? ctrl->val :
  2068. capability->cap[BITRATE_BOOST].value;
  2069. if (inst->bufq[OUTPUT_PORT].vb2q->streaming)
  2070. return 0;
  2071. if (msm_vidc_get_parent_value(inst, BITRATE_BOOST,
  2072. MIN_QUALITY, &min_quality, __func__) ||
  2073. msm_vidc_get_parent_value(inst, BITRATE_BOOST,
  2074. BITRATE_MODE, &rc_type, __func__))
  2075. return -EINVAL;
  2076. /*
  2077. * Bitrate Boost are supported only for VBR rc type.
  2078. * Hence, do not adjust or set to firmware for non VBR rc's
  2079. */
  2080. if (rc_type != HFI_RC_VBR_CFR) {
  2081. adjusted_value = 0;
  2082. goto adjust;
  2083. }
  2084. if (min_quality) {
  2085. adjusted_value = MAX_BITRATE_BOOST;
  2086. goto adjust;
  2087. }
  2088. max_bitrate = msm_vidc_get_max_bitrate(inst);
  2089. bitrate = inst->capabilities->cap[BIT_RATE].value;
  2090. if (adjusted_value) {
  2091. if ((bitrate + bitrate / (100 / adjusted_value)) > max_bitrate) {
  2092. i_vpr_h(inst,
  2093. "%s: bitrate %d is beyond max bitrate %d, remove bitrate boost\n",
  2094. __func__, max_bitrate, bitrate);
  2095. adjusted_value = 0;
  2096. }
  2097. }
  2098. adjust:
  2099. msm_vidc_update_cap_value(inst, BITRATE_BOOST,
  2100. adjusted_value, __func__);
  2101. return 0;
  2102. }
  2103. int msm_vidc_adjust_min_quality(void *instance, struct v4l2_ctrl *ctrl)
  2104. {
  2105. struct msm_vidc_inst_capability *capability;
  2106. s32 adjusted_value;
  2107. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  2108. s32 roi_enable = -1, rc_type = -1, enh_layer_count = -1, pix_fmts = -1;
  2109. u32 width, height, frame_rate;
  2110. struct v4l2_format *f;
  2111. if (!inst || !inst->capabilities) {
  2112. d_vpr_e("%s: invalid params\n", __func__);
  2113. return -EINVAL;
  2114. }
  2115. capability = inst->capabilities;
  2116. adjusted_value = ctrl ? ctrl->val : capability->cap[MIN_QUALITY].value;
  2117. /*
  2118. * Although MIN_QUALITY is static, one of its parents,
  2119. * ENH_LAYER_COUNT is dynamic cap. Hence, dynamic call
  2120. * may be made for MIN_QUALITY via ENH_LAYER_COUNT.
  2121. * Therefore, below streaming check is required to avoid
  2122. * runtime modification of MIN_QUALITY.
  2123. */
  2124. if (inst->bufq[OUTPUT_PORT].vb2q->streaming)
  2125. return 0;
  2126. if (msm_vidc_get_parent_value(inst, MIN_QUALITY,
  2127. BITRATE_MODE, &rc_type, __func__) ||
  2128. msm_vidc_get_parent_value(inst, MIN_QUALITY,
  2129. META_ROI_INFO, &roi_enable, __func__) ||
  2130. msm_vidc_get_parent_value(inst, MIN_QUALITY,
  2131. ENH_LAYER_COUNT, &enh_layer_count, __func__))
  2132. return -EINVAL;
  2133. /*
  2134. * Min Quality is supported only for VBR rc type.
  2135. * Hence, do not adjust or set to firmware for non VBR rc's
  2136. */
  2137. if (rc_type != HFI_RC_VBR_CFR) {
  2138. adjusted_value = 0;
  2139. goto update_and_exit;
  2140. }
  2141. frame_rate = inst->capabilities->cap[FRAME_RATE].value >> 16;
  2142. f = &inst->fmts[OUTPUT_PORT];
  2143. width = f->fmt.pix_mp.width;
  2144. height = f->fmt.pix_mp.height;
  2145. /*
  2146. * VBR Min Quality not supported for:
  2147. * - HEVC 10bit
  2148. * - ROI support
  2149. * - HP encoding
  2150. * - External Blur
  2151. * - Resolution beyond 1080P
  2152. * (It will fall back to CQCAC 25% or 0% (CAC) or CQCAC-OFF)
  2153. */
  2154. if (inst->codec == MSM_VIDC_HEVC) {
  2155. if (msm_vidc_get_parent_value(inst, MIN_QUALITY,
  2156. PIX_FMTS, &pix_fmts, __func__))
  2157. return -EINVAL;
  2158. if (is_10bit_colorformat(pix_fmts)) {
  2159. i_vpr_h(inst,
  2160. "%s: min quality is supported only for 8 bit\n",
  2161. __func__);
  2162. adjusted_value = 0;
  2163. goto update_and_exit;
  2164. }
  2165. }
  2166. if (res_is_greater_than(width, height, 1920, 1080)) {
  2167. i_vpr_h(inst, "%s: unsupported res, wxh %ux%u\n",
  2168. __func__, width, height);
  2169. adjusted_value = 0;
  2170. goto update_and_exit;
  2171. }
  2172. if (frame_rate > 60) {
  2173. i_vpr_h(inst, "%s: unsupported fps %u\n",
  2174. __func__, frame_rate);
  2175. adjusted_value = 0;
  2176. goto update_and_exit;
  2177. }
  2178. if (is_meta_tx_inp_enabled(inst, META_ROI_INFO)) {
  2179. i_vpr_h(inst,
  2180. "%s: min quality not supported with roi metadata\n",
  2181. __func__);
  2182. adjusted_value = 0;
  2183. goto update_and_exit;
  2184. }
  2185. if (enh_layer_count > 0 && inst->hfi_layer_type != HFI_HIER_B) {
  2186. i_vpr_h(inst,
  2187. "%s: min quality not supported for HP encoding\n",
  2188. __func__);
  2189. adjusted_value = 0;
  2190. goto update_and_exit;
  2191. }
  2192. /* Above conditions are met. Hence enable min quality */
  2193. adjusted_value = MAX_SUPPORTED_MIN_QUALITY;
  2194. update_and_exit:
  2195. msm_vidc_update_cap_value(inst, MIN_QUALITY,
  2196. adjusted_value, __func__);
  2197. return 0;
  2198. }
  2199. int msm_vidc_adjust_preprocess(void *instance, struct v4l2_ctrl *ctrl)
  2200. {
  2201. s32 adjusted_value;
  2202. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  2203. s32 brs = -1, eva_status = -1;
  2204. u32 width, height, frame_rate, operating_rate, max_fps;
  2205. struct v4l2_format *f;
  2206. if (!inst || !inst->capabilities || !inst->core) {
  2207. d_vpr_e("%s: invalid params\n", __func__);
  2208. return -EINVAL;
  2209. }
  2210. adjusted_value = inst->capabilities->cap[REQUEST_PREPROCESS].value;
  2211. if (msm_vidc_get_parent_value(inst, REQUEST_PREPROCESS, CONTENT_ADAPTIVE_CODING,
  2212. &brs, __func__) ||
  2213. msm_vidc_get_parent_value(inst, REQUEST_PREPROCESS, META_EVA_STATS,
  2214. &eva_status, __func__))
  2215. return -EINVAL;
  2216. width = inst->crop.width;
  2217. height = inst->crop.height;
  2218. frame_rate = msm_vidc_get_frame_rate(inst);;
  2219. operating_rate = msm_vidc_get_operating_rate(inst);;
  2220. max_fps = max(frame_rate, operating_rate);
  2221. f= &inst->fmts[OUTPUT_PORT];
  2222. /*
  2223. * enable preprocess if
  2224. * client did not enable EVA metadata statistics and
  2225. * BRS enabled and upto 4k @ 60 fps
  2226. */
  2227. if (!is_meta_tx_inp_enabled(inst, META_EVA_STATS) &&
  2228. brs == 1 &&
  2229. res_is_less_than_or_equal_to(width, height, 3840, 2160) &&
  2230. max_fps <= 60)
  2231. adjusted_value = 1;
  2232. else
  2233. adjusted_value = 0;
  2234. msm_vidc_update_cap_value(inst, REQUEST_PREPROCESS,
  2235. adjusted_value, __func__);
  2236. return 0;
  2237. }
  2238. int msm_vidc_adjust_enc_lowlatency_mode(void *instance, struct v4l2_ctrl *ctrl)
  2239. {
  2240. struct msm_vidc_inst_capability *capability;
  2241. s32 adjusted_value;
  2242. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  2243. s32 rc_type = -1;
  2244. if (!inst || !inst->capabilities) {
  2245. d_vpr_e("%s: invalid params\n", __func__);
  2246. return -EINVAL;
  2247. }
  2248. capability = inst->capabilities;
  2249. adjusted_value = ctrl ? ctrl->val :
  2250. capability->cap[LOWLATENCY_MODE].value;
  2251. if (msm_vidc_get_parent_value(inst, LOWLATENCY_MODE, BITRATE_MODE,
  2252. &rc_type, __func__))
  2253. return -EINVAL;
  2254. if (rc_type == HFI_RC_CBR_CFR ||
  2255. rc_type == HFI_RC_CBR_VFR ||
  2256. is_enc_slice_delivery_mode(inst))
  2257. adjusted_value = 1;
  2258. msm_vidc_update_cap_value(inst, LOWLATENCY_MODE,
  2259. adjusted_value, __func__);
  2260. return 0;
  2261. }
  2262. int msm_vidc_adjust_dec_lowlatency_mode(void *instance, struct v4l2_ctrl *ctrl)
  2263. {
  2264. struct msm_vidc_inst_capability *capability;
  2265. s32 adjusted_value;
  2266. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  2267. s32 outbuf_fence = MSM_VIDC_META_DISABLE;
  2268. if (!inst || !inst->capabilities) {
  2269. d_vpr_e("%s: invalid params\n", __func__);
  2270. return -EINVAL;
  2271. }
  2272. capability = inst->capabilities;
  2273. adjusted_value = ctrl ? ctrl->val :
  2274. capability->cap[LOWLATENCY_MODE].value;
  2275. if (msm_vidc_get_parent_value(inst, LOWLATENCY_MODE, META_OUTBUF_FENCE,
  2276. &outbuf_fence, __func__))
  2277. return -EINVAL;
  2278. /* enable lowlatency if outbuf fence is enabled */
  2279. if (outbuf_fence & MSM_VIDC_META_ENABLE &&
  2280. outbuf_fence & MSM_VIDC_META_RX_INPUT)
  2281. adjusted_value = 1;
  2282. msm_vidc_update_cap_value(inst, LOWLATENCY_MODE,
  2283. adjusted_value, __func__);
  2284. return 0;
  2285. }
  2286. int msm_vidc_adjust_session_priority(void *instance, struct v4l2_ctrl *ctrl)
  2287. {
  2288. int adjusted_value;
  2289. struct msm_vidc_inst_capability *capability;
  2290. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2291. if (!inst || !inst->capabilities) {
  2292. d_vpr_e("%s: invalid params\n", __func__);
  2293. return -EINVAL;
  2294. }
  2295. capability = inst->capabilities;
  2296. /*
  2297. * Priority handling
  2298. * Client will set 0 (realtime), 1+ (non-realtime)
  2299. * Driver adds NRT_PRIORITY_OFFSET (2) to clients non-realtime priority
  2300. * and hence PRIORITY values in the driver become 0, 3+.
  2301. * Driver may move decode realtime sessions to non-realtime by
  2302. * increasing priority by 1 to RT sessions in HW overloaded cases.
  2303. * So driver PRIORITY values can be 0, 1, 3+.
  2304. * When driver setting priority to firmware, driver adds
  2305. * FIRMWARE_PRIORITY_OFFSET (1) for all sessions except
  2306. * non-critical sessions. So finally firmware priority values ranges
  2307. * from 0 (Critical session), 1 (realtime session),
  2308. * 2+ (non-realtime session)
  2309. */
  2310. if (ctrl) {
  2311. /* add offset when client sets non-realtime */
  2312. if (ctrl->val)
  2313. adjusted_value = ctrl->val + NRT_PRIORITY_OFFSET;
  2314. else
  2315. adjusted_value = ctrl->val;
  2316. } else {
  2317. adjusted_value = capability->cap[PRIORITY].value;
  2318. }
  2319. msm_vidc_update_cap_value(inst, PRIORITY, adjusted_value, __func__);
  2320. return 0;
  2321. }
  2322. int msm_vidc_adjust_roi_info(void *instance, struct v4l2_ctrl *ctrl)
  2323. {
  2324. struct msm_vidc_inst_capability *capability;
  2325. s32 adjusted_value;
  2326. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  2327. s32 rc_type = -1, pix_fmt = -1;
  2328. if (!inst || !inst->capabilities) {
  2329. d_vpr_e("%s: invalid params\n", __func__);
  2330. return -EINVAL;
  2331. }
  2332. capability = inst->capabilities;
  2333. adjusted_value = ctrl ? ctrl->val : capability->cap[META_ROI_INFO].value;
  2334. if (msm_vidc_get_parent_value(inst, META_ROI_INFO, BITRATE_MODE,
  2335. &rc_type, __func__))
  2336. return -EINVAL;
  2337. if (msm_vidc_get_parent_value(inst, META_ROI_INFO, PIX_FMTS,
  2338. &pix_fmt, __func__))
  2339. return -EINVAL;
  2340. if ((rc_type != HFI_RC_VBR_CFR && rc_type != HFI_RC_CBR_CFR
  2341. && rc_type != HFI_RC_CBR_VFR) || !is_8bit_colorformat(pix_fmt)
  2342. || is_scaling_enabled(inst) || is_rotation_90_or_270(inst))
  2343. adjusted_value = 0;
  2344. msm_vidc_update_cap_value(inst, META_ROI_INFO,
  2345. adjusted_value, __func__);
  2346. return 0;
  2347. }
  2348. int msm_vidc_adjust_dec_outbuf_fence(void *instance, struct v4l2_ctrl *ctrl)
  2349. {
  2350. struct msm_vidc_inst_capability *capability;
  2351. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  2352. u32 adjusted_value = 0;
  2353. s32 picture_order = -1;
  2354. if (!inst || !inst->capabilities) {
  2355. d_vpr_e("%s: invalid params\n", __func__);
  2356. return -EINVAL;
  2357. }
  2358. capability = inst->capabilities;
  2359. adjusted_value = ctrl ? ctrl->val : capability->cap[META_OUTBUF_FENCE].value;
  2360. if (msm_vidc_get_parent_value(inst, META_OUTBUF_FENCE, OUTPUT_ORDER,
  2361. &picture_order, __func__))
  2362. return -EINVAL;
  2363. if (picture_order == 0) {
  2364. /* disable outbuf fence */
  2365. adjusted_value = MSM_VIDC_META_DISABLE |
  2366. MSM_VIDC_META_RX_INPUT;
  2367. }
  2368. msm_vidc_update_cap_value(inst, META_OUTBUF_FENCE,
  2369. adjusted_value, __func__);
  2370. return 0;
  2371. }
  2372. int msm_vidc_adjust_dec_slice_mode(void *instance, struct v4l2_ctrl *ctrl)
  2373. {
  2374. struct msm_vidc_inst_capability *capability;
  2375. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  2376. u32 adjusted_value = 0;
  2377. s32 low_latency = -1;
  2378. s32 picture_order = -1;
  2379. s32 outbuf_fence = 0;
  2380. if (!inst || !inst->capabilities) {
  2381. d_vpr_e("%s: invalid params\n", __func__);
  2382. return -EINVAL;
  2383. }
  2384. capability = inst->capabilities;
  2385. adjusted_value = ctrl ? ctrl->val : capability->cap[SLICE_DECODE].value;
  2386. if (msm_vidc_get_parent_value(inst, SLICE_DECODE, LOWLATENCY_MODE,
  2387. &low_latency, __func__) ||
  2388. msm_vidc_get_parent_value(inst, SLICE_DECODE, OUTPUT_ORDER,
  2389. &picture_order, __func__) ||
  2390. msm_vidc_get_parent_value(inst, SLICE_DECODE, META_OUTBUF_FENCE,
  2391. &outbuf_fence, __func__))
  2392. return -EINVAL;
  2393. if (!low_latency || !picture_order ||
  2394. !is_meta_rx_inp_enabled(inst, META_OUTBUF_FENCE))
  2395. adjusted_value = 0;
  2396. msm_vidc_update_cap_value(inst, SLICE_DECODE,
  2397. adjusted_value, __func__);
  2398. return 0;
  2399. }
  2400. int msm_vidc_prepare_dependency_list(struct msm_vidc_inst *inst)
  2401. {
  2402. struct list_head root_list, opt_list;
  2403. struct msm_vidc_inst_capability *capability;
  2404. struct msm_vidc_inst_cap *cap, *rcap;
  2405. struct msm_vidc_inst_cap_entry *entry = NULL, *temp = NULL;
  2406. bool root_visited[INST_CAP_MAX];
  2407. bool opt_visited[INST_CAP_MAX];
  2408. int tmp_count_total, tmp_count, num_nodes = 0;
  2409. int i, rc = 0;
  2410. if (!inst || !inst->capabilities) {
  2411. d_vpr_e("%s: invalid params\n", __func__);
  2412. return -EINVAL;
  2413. }
  2414. capability = inst->capabilities;
  2415. if (!list_empty(&inst->caps_list)) {
  2416. i_vpr_h(inst, "%s: dependency list already prepared\n", __func__);
  2417. return 0;
  2418. }
  2419. /* init local list and lookup table entries */
  2420. INIT_LIST_HEAD(&root_list);
  2421. INIT_LIST_HEAD(&opt_list);
  2422. memset(&root_visited, 0, sizeof(root_visited));
  2423. memset(&opt_visited, 0, sizeof(opt_visited));
  2424. /* populate root nodes first */
  2425. for (i = 1; i < INST_CAP_MAX; i++) {
  2426. rcap = &capability->cap[i];
  2427. if (!is_valid_cap(rcap))
  2428. continue;
  2429. /* sanitize cap value */
  2430. if (i != rcap->cap_id) {
  2431. i_vpr_e(inst, "%s: cap id mismatch. expected %s, actual %s\n",
  2432. __func__, cap_name(i), cap_name(rcap->cap_id));
  2433. rc = -EINVAL;
  2434. goto error;
  2435. }
  2436. /* add all root nodes */
  2437. if (is_root(rcap)) {
  2438. rc = add_node(&root_list, rcap, root_visited);
  2439. if (rc)
  2440. goto error;
  2441. } else {
  2442. rc = add_node(&opt_list, rcap, opt_visited);
  2443. if (rc)
  2444. goto error;
  2445. }
  2446. }
  2447. /* add all dependent parents */
  2448. list_for_each_entry_safe(entry, temp, &root_list, list) {
  2449. rcap = &capability->cap[entry->cap_id];
  2450. /* skip leaf node */
  2451. if (!has_childrens(rcap))
  2452. continue;
  2453. for (i = 0; i < MAX_CAP_CHILDREN; i++) {
  2454. if (!rcap->children[i])
  2455. break;
  2456. if (!is_valid_cap_id(rcap->children[i]))
  2457. continue;
  2458. cap = &capability->cap[rcap->children[i]];
  2459. if (!is_valid_cap(cap))
  2460. continue;
  2461. /**
  2462. * if child node is already part of root list
  2463. * then no need to add it again.
  2464. */
  2465. if (root_visited[cap->cap_id])
  2466. continue;
  2467. /**
  2468. * if child node's all parents are already present in root list
  2469. * then add it to root list else remains in optional list.
  2470. */
  2471. if (is_all_parents_visited(cap, root_visited)) {
  2472. rc = swap_node(cap,
  2473. &opt_list, opt_visited, &root_list, root_visited);
  2474. if (rc)
  2475. goto error;
  2476. }
  2477. }
  2478. }
  2479. /* find total optional list entries */
  2480. list_for_each_entry(entry, &opt_list, list)
  2481. num_nodes++;
  2482. /* used for loop detection */
  2483. tmp_count_total = num_nodes;
  2484. tmp_count = num_nodes;
  2485. /* sort final outstanding nodes */
  2486. list_for_each_entry_safe(entry, temp, &opt_list, list) {
  2487. /* initially remove entry from opt list */
  2488. list_del_init(&entry->list);
  2489. opt_visited[entry->cap_id] = false;
  2490. tmp_count--;
  2491. cap = &capability->cap[entry->cap_id];
  2492. /**
  2493. * if all parents are visited then add this entry to
  2494. * root list else add it to the end of optional list.
  2495. */
  2496. if (is_all_parents_visited(cap, root_visited)) {
  2497. list_add_tail(&entry->list, &root_list);
  2498. root_visited[entry->cap_id] = true;
  2499. tmp_count_total--;
  2500. } else {
  2501. list_add_tail(&entry->list, &opt_list);
  2502. opt_visited[entry->cap_id] = true;
  2503. }
  2504. /* detect loop */
  2505. if (!tmp_count) {
  2506. if (num_nodes == tmp_count_total) {
  2507. i_vpr_e(inst, "%s: loop detected in subgraph %d\n",
  2508. __func__, num_nodes);
  2509. rc = -EINVAL;
  2510. goto error;
  2511. }
  2512. num_nodes = tmp_count_total;
  2513. tmp_count = tmp_count_total;
  2514. }
  2515. }
  2516. /* expecting opt_list to be empty */
  2517. if (!list_empty(&opt_list)) {
  2518. i_vpr_e(inst, "%s: opt_list is not empty\n", __func__);
  2519. rc = -EINVAL;
  2520. goto error;
  2521. }
  2522. /* move elements to &inst->caps_list from local */
  2523. list_replace_init(&root_list, &inst->caps_list);
  2524. return 0;
  2525. error:
  2526. list_for_each_entry_safe(entry, temp, &opt_list, list) {
  2527. i_vpr_e(inst, "%s: opt_list: %s\n", __func__, cap_name(entry->cap_id));
  2528. list_del_init(&entry->list);
  2529. msm_vidc_vmem_free((void **)&entry);
  2530. }
  2531. list_for_each_entry_safe(entry, temp, &root_list, list) {
  2532. i_vpr_e(inst, "%s: root_list: %s\n", __func__, cap_name(entry->cap_id));
  2533. list_del_init(&entry->list);
  2534. msm_vidc_vmem_free((void **)&entry);
  2535. }
  2536. return rc;
  2537. }
  2538. /*
  2539. * Loop over instance capabilities from caps_list
  2540. * and call adjust and set function
  2541. */
  2542. int msm_vidc_adjust_set_v4l2_properties(struct msm_vidc_inst *inst)
  2543. {
  2544. struct msm_vidc_inst_cap_entry *entry = NULL, *temp = NULL;
  2545. int rc = 0;
  2546. if (!inst || !inst->capabilities) {
  2547. d_vpr_e("%s: invalid params\n", __func__);
  2548. return -EINVAL;
  2549. }
  2550. i_vpr_h(inst, "%s()\n", __func__);
  2551. /* adjust all possible caps from caps_list */
  2552. list_for_each_entry_safe(entry, temp, &inst->caps_list, list) {
  2553. i_vpr_l(inst, "%s: cap: id %3u, name %s\n", __func__,
  2554. entry->cap_id, cap_name(entry->cap_id));
  2555. rc = msm_vidc_adjust_cap(inst, entry->cap_id, NULL, __func__);
  2556. if (rc)
  2557. return rc;
  2558. }
  2559. /* set all caps from caps_list */
  2560. list_for_each_entry_safe(entry, temp, &inst->caps_list, list) {
  2561. rc = msm_vidc_set_cap(inst, entry->cap_id, __func__);
  2562. if (rc)
  2563. return rc;
  2564. }
  2565. return rc;
  2566. }
  2567. int msm_vidc_set_header_mode(void *instance,
  2568. enum msm_vidc_inst_capability_type cap_id)
  2569. {
  2570. int rc = 0;
  2571. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  2572. int header_mode, prepend_sps_pps;
  2573. u32 hfi_value = 0;
  2574. struct msm_vidc_inst_capability *capability;
  2575. if (!inst || !inst->capabilities) {
  2576. d_vpr_e("%s: invalid params\n", __func__);
  2577. return -EINVAL;
  2578. }
  2579. capability = inst->capabilities;
  2580. header_mode = capability->cap[cap_id].value;
  2581. prepend_sps_pps = capability->cap[PREPEND_SPSPPS_TO_IDR].value;
  2582. /* prioritize PREPEND_SPSPPS_TO_IDR mode over other header modes */
  2583. if (prepend_sps_pps)
  2584. hfi_value = HFI_SEQ_HEADER_PREFIX_WITH_SYNC_FRAME;
  2585. else if (header_mode == V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME)
  2586. hfi_value = HFI_SEQ_HEADER_JOINED_WITH_1ST_FRAME;
  2587. else
  2588. hfi_value = HFI_SEQ_HEADER_SEPERATE_FRAME;
  2589. if (is_meta_rx_inp_enabled(inst, META_SEQ_HDR_NAL))
  2590. hfi_value |= HFI_SEQ_HEADER_METADATA;
  2591. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32_ENUM,
  2592. &hfi_value, sizeof(u32), __func__);
  2593. if (rc)
  2594. return rc;
  2595. return rc;
  2596. }
  2597. int msm_vidc_set_deblock_mode(void *instance,
  2598. enum msm_vidc_inst_capability_type cap_id)
  2599. {
  2600. int rc = 0;
  2601. struct msm_vidc_inst *inst = (struct msm_vidc_inst *) instance;
  2602. s32 alpha = 0, beta = 0;
  2603. u32 lf_mode, hfi_value = 0, lf_offset = 6;
  2604. struct msm_vidc_inst_capability *capability;
  2605. if (!inst || !inst->capabilities) {
  2606. d_vpr_e("%s: invalid params\n", __func__);
  2607. return -EINVAL;
  2608. }
  2609. capability = inst->capabilities;
  2610. rc = msm_vidc_v4l2_to_hfi_enum(inst, LF_MODE, &lf_mode);
  2611. if (rc)
  2612. return -EINVAL;
  2613. beta = inst->capabilities->cap[LF_BETA].value + lf_offset;
  2614. alpha = inst->capabilities->cap[LF_ALPHA].value + lf_offset;
  2615. hfi_value = (alpha << 16) | (beta << 8) | lf_mode;
  2616. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_32_PACKED,
  2617. &hfi_value, sizeof(u32), __func__);
  2618. if (rc)
  2619. return rc;
  2620. return rc;
  2621. }
  2622. int msm_vidc_set_constant_quality(void *instance,
  2623. enum msm_vidc_inst_capability_type cap_id)
  2624. {
  2625. int rc = 0;
  2626. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2627. u32 hfi_value = 0;
  2628. s32 rc_type = -1;
  2629. if (!inst || !inst->capabilities) {
  2630. d_vpr_e("%s: invalid params\n", __func__);
  2631. return -EINVAL;
  2632. }
  2633. if (msm_vidc_get_parent_value(inst, cap_id,
  2634. BITRATE_MODE, &rc_type, __func__))
  2635. return -EINVAL;
  2636. if (rc_type != HFI_RC_CQ)
  2637. return 0;
  2638. hfi_value = inst->capabilities->cap[cap_id].value;
  2639. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  2640. &hfi_value, sizeof(u32), __func__);
  2641. if (rc)
  2642. return rc;
  2643. return rc;
  2644. }
  2645. int msm_vidc_set_vbr_related_properties(void *instance,
  2646. enum msm_vidc_inst_capability_type cap_id)
  2647. {
  2648. int rc = 0;
  2649. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2650. u32 hfi_value = 0;
  2651. s32 rc_type = -1;
  2652. if (!inst || !inst->capabilities) {
  2653. d_vpr_e("%s: invalid params\n", __func__);
  2654. return -EINVAL;
  2655. }
  2656. if (msm_vidc_get_parent_value(inst, cap_id,
  2657. BITRATE_MODE, &rc_type, __func__))
  2658. return -EINVAL;
  2659. if (rc_type != HFI_RC_VBR_CFR)
  2660. return 0;
  2661. hfi_value = inst->capabilities->cap[cap_id].value;
  2662. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  2663. &hfi_value, sizeof(u32), __func__);
  2664. if (rc)
  2665. return rc;
  2666. return rc;
  2667. }
  2668. int msm_vidc_set_cbr_related_properties(void *instance,
  2669. enum msm_vidc_inst_capability_type cap_id)
  2670. {
  2671. int rc = 0;
  2672. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2673. u32 hfi_value = 0;
  2674. s32 rc_type = -1;
  2675. if (!inst || !inst->capabilities) {
  2676. d_vpr_e("%s: invalid params\n", __func__);
  2677. return -EINVAL;
  2678. }
  2679. if (msm_vidc_get_parent_value(inst, cap_id,
  2680. BITRATE_MODE, &rc_type, __func__))
  2681. return -EINVAL;
  2682. if (rc_type != HFI_RC_CBR_VFR &&
  2683. rc_type != HFI_RC_CBR_CFR)
  2684. return 0;
  2685. hfi_value = inst->capabilities->cap[cap_id].value;
  2686. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  2687. &hfi_value, sizeof(u32), __func__);
  2688. if (rc)
  2689. return rc;
  2690. return rc;
  2691. }
  2692. int msm_vidc_set_use_and_mark_ltr(void *instance,
  2693. enum msm_vidc_inst_capability_type cap_id)
  2694. {
  2695. int rc = 0;
  2696. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2697. u32 hfi_value = 0;
  2698. if (!inst || !inst->capabilities) {
  2699. d_vpr_e("%s: invalid params\n", __func__);
  2700. return -EINVAL;
  2701. }
  2702. if (!inst->capabilities->cap[LTR_COUNT].value ||
  2703. (inst->capabilities->cap[cap_id].value ==
  2704. INVALID_DEFAULT_MARK_OR_USE_LTR)) {
  2705. i_vpr_h(inst,
  2706. "%s: LTR_COUNT: %d %s: %d, cap %s is not set\n",
  2707. __func__, inst->capabilities->cap[LTR_COUNT].value,
  2708. cap_name(cap_id),
  2709. inst->capabilities->cap[cap_id].value,
  2710. cap_name(cap_id));
  2711. return 0;
  2712. }
  2713. hfi_value = inst->capabilities->cap[cap_id].value;
  2714. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  2715. &hfi_value, sizeof(u32), __func__);
  2716. if (rc)
  2717. return rc;
  2718. return rc;
  2719. }
  2720. int msm_vidc_set_min_qp(void *instance,
  2721. enum msm_vidc_inst_capability_type cap_id)
  2722. {
  2723. int rc = 0;
  2724. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2725. struct msm_vidc_inst_capability *capability;
  2726. s32 i_frame_qp = 0, p_frame_qp = 0, b_frame_qp = 0, min_qp_enable = 0;
  2727. u32 i_qp_enable = 0, p_qp_enable = 0, b_qp_enable = 0;
  2728. u32 client_qp_enable = 0, hfi_value = 0, offset = 0;
  2729. if (!inst || !inst->capabilities) {
  2730. d_vpr_e("%s: invalid params\n", __func__);
  2731. return -EINVAL;
  2732. }
  2733. capability = inst->capabilities;
  2734. if (capability->cap[MIN_FRAME_QP].flags & CAP_FLAG_CLIENT_SET)
  2735. min_qp_enable = 1;
  2736. if (min_qp_enable ||
  2737. (capability->cap[I_FRAME_MIN_QP].flags & CAP_FLAG_CLIENT_SET))
  2738. i_qp_enable = 1;
  2739. if (min_qp_enable ||
  2740. (capability->cap[P_FRAME_MIN_QP].flags & CAP_FLAG_CLIENT_SET))
  2741. p_qp_enable = 1;
  2742. if (min_qp_enable ||
  2743. (capability->cap[B_FRAME_MIN_QP].flags & CAP_FLAG_CLIENT_SET))
  2744. b_qp_enable = 1;
  2745. client_qp_enable = i_qp_enable | p_qp_enable << 1 | b_qp_enable << 2;
  2746. if (!client_qp_enable) {
  2747. i_vpr_h(inst,
  2748. "%s: client did not set min qp, cap %s is not set\n",
  2749. __func__, cap_name(cap_id));
  2750. return 0;
  2751. }
  2752. if (is_10bit_colorformat(capability->cap[PIX_FMTS].value))
  2753. offset = 12;
  2754. /*
  2755. * I_FRAME_MIN_QP, P_FRAME_MIN_QP, B_FRAME_MIN_QP,
  2756. * MIN_FRAME_QP caps have default value as MIN_QP_10BIT values.
  2757. * Hence, if client sets either one among MIN_FRAME_QP
  2758. * and (I_FRAME_MIN_QP or P_FRAME_MIN_QP or B_FRAME_MIN_QP),
  2759. * max of both caps will result into client set value.
  2760. */
  2761. i_frame_qp = max(capability->cap[I_FRAME_MIN_QP].value,
  2762. capability->cap[MIN_FRAME_QP].value) + offset;
  2763. p_frame_qp = max(capability->cap[P_FRAME_MIN_QP].value,
  2764. capability->cap[MIN_FRAME_QP].value) + offset;
  2765. b_frame_qp = max(capability->cap[B_FRAME_MIN_QP].value,
  2766. capability->cap[MIN_FRAME_QP].value) + offset;
  2767. hfi_value = i_frame_qp | p_frame_qp << 8 | b_frame_qp << 16 |
  2768. client_qp_enable << 24;
  2769. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_32_PACKED,
  2770. &hfi_value, sizeof(u32), __func__);
  2771. if (rc)
  2772. return rc;
  2773. return rc;
  2774. }
  2775. int msm_vidc_set_max_qp(void *instance,
  2776. enum msm_vidc_inst_capability_type cap_id)
  2777. {
  2778. int rc = 0;
  2779. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2780. struct msm_vidc_inst_capability *capability;
  2781. s32 i_frame_qp = 0, p_frame_qp = 0, b_frame_qp = 0, max_qp_enable = 0;
  2782. u32 i_qp_enable = 0, p_qp_enable = 0, b_qp_enable = 0;
  2783. u32 client_qp_enable = 0, hfi_value = 0, offset = 0;
  2784. if (!inst || !inst->capabilities) {
  2785. d_vpr_e("%s: invalid params\n", __func__);
  2786. return -EINVAL;
  2787. }
  2788. capability = inst->capabilities;
  2789. if (capability->cap[MAX_FRAME_QP].flags & CAP_FLAG_CLIENT_SET)
  2790. max_qp_enable = 1;
  2791. if (max_qp_enable ||
  2792. (capability->cap[I_FRAME_MAX_QP].flags & CAP_FLAG_CLIENT_SET))
  2793. i_qp_enable = 1;
  2794. if (max_qp_enable ||
  2795. (capability->cap[P_FRAME_MAX_QP].flags & CAP_FLAG_CLIENT_SET))
  2796. p_qp_enable = 1;
  2797. if (max_qp_enable ||
  2798. (capability->cap[B_FRAME_MAX_QP].flags & CAP_FLAG_CLIENT_SET))
  2799. b_qp_enable = 1;
  2800. client_qp_enable = i_qp_enable | p_qp_enable << 1 | b_qp_enable << 2;
  2801. if (!client_qp_enable) {
  2802. i_vpr_h(inst,
  2803. "%s: client did not set max qp, cap %s is not set\n",
  2804. __func__, cap_name(cap_id));
  2805. return 0;
  2806. }
  2807. if (is_10bit_colorformat(capability->cap[PIX_FMTS].value))
  2808. offset = 12;
  2809. /*
  2810. * I_FRAME_MAX_QP, P_FRAME_MAX_QP, B_FRAME_MAX_QP,
  2811. * MAX_FRAME_QP caps have default value as MAX_QP values.
  2812. * Hence, if client sets either one among MAX_FRAME_QP
  2813. * and (I_FRAME_MAX_QP or P_FRAME_MAX_QP or B_FRAME_MAX_QP),
  2814. * min of both caps will result into client set value.
  2815. */
  2816. i_frame_qp = min(capability->cap[I_FRAME_MAX_QP].value,
  2817. capability->cap[MAX_FRAME_QP].value) + offset;
  2818. p_frame_qp = min(capability->cap[P_FRAME_MAX_QP].value,
  2819. capability->cap[MAX_FRAME_QP].value) + offset;
  2820. b_frame_qp = min(capability->cap[B_FRAME_MAX_QP].value,
  2821. capability->cap[MAX_FRAME_QP].value) + offset;
  2822. hfi_value = i_frame_qp | p_frame_qp << 8 | b_frame_qp << 16 |
  2823. client_qp_enable << 24;
  2824. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_32_PACKED,
  2825. &hfi_value, sizeof(u32), __func__);
  2826. if (rc)
  2827. return rc;
  2828. return rc;
  2829. }
  2830. int msm_vidc_set_frame_qp(void *instance,
  2831. enum msm_vidc_inst_capability_type cap_id)
  2832. {
  2833. int rc = 0;
  2834. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2835. struct msm_vidc_inst_capability *capab;
  2836. s32 i_frame_qp = 0, p_frame_qp = 0, b_frame_qp = 0;
  2837. u32 i_qp_enable = 0, p_qp_enable = 0, b_qp_enable = 0;
  2838. u32 client_qp_enable = 0, hfi_value = 0, offset = 0;
  2839. s32 rc_type = -1;
  2840. if (!inst || !inst->capabilities) {
  2841. d_vpr_e("%s: invalid params\n", __func__);
  2842. return -EINVAL;
  2843. }
  2844. capab = inst->capabilities;
  2845. if (msm_vidc_get_parent_value(inst, cap_id,
  2846. BITRATE_MODE, &rc_type, __func__))
  2847. return -EINVAL;
  2848. if (inst->bufq[OUTPUT_PORT].vb2q->streaming) {
  2849. if (rc_type != HFI_RC_OFF) {
  2850. i_vpr_h(inst,
  2851. "%s: dynamic qp not allowed for rc type %d\n",
  2852. __func__, rc_type);
  2853. return 0;
  2854. }
  2855. }
  2856. if (rc_type == HFI_RC_OFF) {
  2857. /* Mandatorily set for rc off case */
  2858. i_qp_enable = p_qp_enable = b_qp_enable = 1;
  2859. } else {
  2860. /* Set only if client has set for NON rc off case */
  2861. if (capab->cap[I_FRAME_QP].flags & CAP_FLAG_CLIENT_SET)
  2862. i_qp_enable = 1;
  2863. if (capab->cap[P_FRAME_QP].flags & CAP_FLAG_CLIENT_SET)
  2864. p_qp_enable = 1;
  2865. if (capab->cap[B_FRAME_QP].flags & CAP_FLAG_CLIENT_SET)
  2866. b_qp_enable = 1;
  2867. }
  2868. client_qp_enable = i_qp_enable | p_qp_enable << 1 | b_qp_enable << 2;
  2869. if (!client_qp_enable) {
  2870. i_vpr_h(inst,
  2871. "%s: client did not set frame qp, cap %s is not set\n",
  2872. __func__, cap_name(cap_id));
  2873. return 0;
  2874. }
  2875. if (is_10bit_colorformat(capab->cap[PIX_FMTS].value))
  2876. offset = 12;
  2877. i_frame_qp = capab->cap[I_FRAME_QP].value + offset;
  2878. p_frame_qp = capab->cap[P_FRAME_QP].value + offset;
  2879. b_frame_qp = capab->cap[B_FRAME_QP].value + offset;
  2880. hfi_value = i_frame_qp | p_frame_qp << 8 | b_frame_qp << 16 |
  2881. client_qp_enable << 24;
  2882. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_32_PACKED,
  2883. &hfi_value, sizeof(u32), __func__);
  2884. if (rc)
  2885. return rc;
  2886. return rc;
  2887. }
  2888. int msm_vidc_set_req_sync_frame(void *instance,
  2889. enum msm_vidc_inst_capability_type cap_id)
  2890. {
  2891. int rc = 0;
  2892. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2893. s32 prepend_spspps;
  2894. u32 hfi_value = 0;
  2895. if (!inst || !inst->capabilities) {
  2896. d_vpr_e("%s: invalid params\n", __func__);
  2897. return -EINVAL;
  2898. }
  2899. prepend_spspps = inst->capabilities->cap[PREPEND_SPSPPS_TO_IDR].value;
  2900. if (prepend_spspps)
  2901. hfi_value = HFI_SYNC_FRAME_REQUEST_WITH_PREFIX_SEQ_HDR;
  2902. else
  2903. hfi_value = HFI_SYNC_FRAME_REQUEST_WITHOUT_SEQ_HDR;
  2904. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32_ENUM,
  2905. &hfi_value, sizeof(u32), __func__);
  2906. if (rc)
  2907. return rc;
  2908. return rc;
  2909. }
  2910. int msm_vidc_set_chroma_qp_index_offset(void *instance,
  2911. enum msm_vidc_inst_capability_type cap_id)
  2912. {
  2913. int rc = 0;
  2914. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2915. u32 hfi_value = 0, chroma_qp_offset_mode = 0, chroma_qp = 0;
  2916. u32 offset = 12;
  2917. if (!inst || !inst->capabilities) {
  2918. d_vpr_e("%s: invalid params\n", __func__);
  2919. return -EINVAL;
  2920. }
  2921. if (inst->capabilities->cap[cap_id].flags & CAP_FLAG_CLIENT_SET)
  2922. chroma_qp_offset_mode = HFI_FIXED_CHROMAQP_OFFSET;
  2923. else
  2924. chroma_qp_offset_mode = HFI_ADAPTIVE_CHROMAQP_OFFSET;
  2925. chroma_qp = inst->capabilities->cap[cap_id].value + offset;
  2926. hfi_value = chroma_qp_offset_mode | chroma_qp << 8 | chroma_qp << 16 ;
  2927. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_32_PACKED,
  2928. &hfi_value, sizeof(u32), __func__);
  2929. if (rc)
  2930. return rc;
  2931. return rc;
  2932. }
  2933. int msm_vidc_set_slice_count(void* instance,
  2934. enum msm_vidc_inst_capability_type cap_id)
  2935. {
  2936. int rc = 0;
  2937. struct msm_vidc_inst* inst = (struct msm_vidc_inst*)instance;
  2938. s32 slice_mode = -1;
  2939. u32 hfi_value = 0, set_cap_id = 0;
  2940. if (!inst || !inst->capabilities) {
  2941. d_vpr_e("%s: invalid params\n", __func__);
  2942. return -EINVAL;
  2943. }
  2944. slice_mode = inst->capabilities->cap[SLICE_MODE].value;
  2945. if (slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE) {
  2946. i_vpr_h(inst, "%s: slice mode is: %u, ignore setting to fw\n",
  2947. __func__, slice_mode);
  2948. return 0;
  2949. }
  2950. if (slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_MB) {
  2951. hfi_value = (inst->codec == MSM_VIDC_HEVC) ?
  2952. ((inst->capabilities->cap[SLICE_MAX_MB].value + 3) / 4) :
  2953. inst->capabilities->cap[SLICE_MAX_MB].value;
  2954. set_cap_id = SLICE_MAX_MB;
  2955. } else if (slice_mode == V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_MAX_BYTES) {
  2956. hfi_value = inst->capabilities->cap[SLICE_MAX_BYTES].value;
  2957. set_cap_id = SLICE_MAX_BYTES;
  2958. }
  2959. rc = msm_vidc_packetize_control(inst, set_cap_id, HFI_PAYLOAD_U32,
  2960. &hfi_value, sizeof(u32), __func__);
  2961. if (rc)
  2962. return rc;
  2963. return rc;
  2964. }
  2965. int msm_vidc_set_nal_length(void* instance,
  2966. enum msm_vidc_inst_capability_type cap_id)
  2967. {
  2968. int rc = 0;
  2969. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2970. u32 hfi_value = HFI_NAL_LENGTH_STARTCODES;
  2971. if (!inst || !inst->capabilities) {
  2972. d_vpr_e("%s: invalid params\n", __func__);
  2973. return -EINVAL;
  2974. }
  2975. if (!inst->capabilities->cap[WITHOUT_STARTCODE].value) {
  2976. hfi_value = HFI_NAL_LENGTH_STARTCODES;
  2977. } else {
  2978. rc = msm_vidc_v4l2_to_hfi_enum(inst, NAL_LENGTH_FIELD, &hfi_value);
  2979. if (rc)
  2980. return -EINVAL;
  2981. }
  2982. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32_ENUM,
  2983. &hfi_value, sizeof(u32), __func__);
  2984. if (rc)
  2985. return rc;
  2986. return rc;
  2987. }
  2988. int msm_vidc_set_layer_count_and_type(void *instance,
  2989. enum msm_vidc_inst_capability_type cap_id)
  2990. {
  2991. int rc = 0;
  2992. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  2993. u32 hfi_layer_count, hfi_layer_type = 0;
  2994. if (!inst || !inst->capabilities) {
  2995. d_vpr_e("%s: invalid params\n", __func__);
  2996. return -EINVAL;
  2997. }
  2998. if (!inst->bufq[OUTPUT_PORT].vb2q->streaming) {
  2999. /* set layer type */
  3000. hfi_layer_type = inst->hfi_layer_type;
  3001. cap_id = LAYER_TYPE;
  3002. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32_ENUM,
  3003. &hfi_layer_type, sizeof(u32), __func__);
  3004. if (rc)
  3005. goto exit;
  3006. } else {
  3007. if (inst->hfi_layer_type == HFI_HIER_B) {
  3008. i_vpr_l(inst,
  3009. "%s: HB dyn layers change is not supported\n",
  3010. __func__);
  3011. return 0;
  3012. }
  3013. }
  3014. /* set layer count */
  3015. cap_id = ENH_LAYER_COUNT;
  3016. /* hfi baselayer starts from 1 */
  3017. hfi_layer_count = inst->capabilities->cap[ENH_LAYER_COUNT].value + 1;
  3018. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  3019. &hfi_layer_count, sizeof(u32), __func__);
  3020. if (rc)
  3021. goto exit;
  3022. exit:
  3023. return rc;
  3024. }
  3025. int msm_vidc_set_gop_size(void *instance,
  3026. enum msm_vidc_inst_capability_type cap_id)
  3027. {
  3028. int rc = 0;
  3029. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3030. u32 hfi_value;
  3031. if (!inst || !inst->capabilities) {
  3032. d_vpr_e("%s: invalid params\n", __func__);
  3033. return -EINVAL;
  3034. }
  3035. if (inst->bufq[OUTPUT_PORT].vb2q->streaming) {
  3036. if (inst->hfi_layer_type == HFI_HIER_B) {
  3037. i_vpr_l(inst,
  3038. "%s: HB dyn GOP setting is not supported\n",
  3039. __func__);
  3040. return 0;
  3041. }
  3042. }
  3043. hfi_value = inst->capabilities->cap[GOP_SIZE].value;
  3044. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  3045. &hfi_value, sizeof(u32), __func__);
  3046. if (rc)
  3047. return rc;
  3048. return rc;
  3049. }
  3050. int msm_vidc_set_bitrate(void *instance,
  3051. enum msm_vidc_inst_capability_type cap_id)
  3052. {
  3053. int rc = 0, i;
  3054. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3055. u32 hfi_value = 0;
  3056. s32 rc_type = -1, enh_layer_count = -1;
  3057. u32 layer_br_caps[6] = {L0_BR, L1_BR, L2_BR, L3_BR, L4_BR, L5_BR};
  3058. if (!inst || !inst->capabilities) {
  3059. d_vpr_e("%s: invalid params\n", __func__);
  3060. return -EINVAL;
  3061. }
  3062. /* set Total Bitrate */
  3063. if (inst->capabilities->cap[BIT_RATE].flags & CAP_FLAG_CLIENT_SET)
  3064. goto set_total_bitrate;
  3065. /*
  3066. * During runtime, if BIT_RATE cap CLIENT_SET flag is not set,
  3067. * then this function will be called due to change in ENH_LAYER_COUNT.
  3068. * In this case, client did not change bitrate, hence, no need to set
  3069. * to fw.
  3070. */
  3071. if (inst->bufq[OUTPUT_PORT].vb2q->streaming)
  3072. return 0;
  3073. if (msm_vidc_get_parent_value(inst, BIT_RATE,
  3074. BITRATE_MODE, &rc_type, __func__))
  3075. return -EINVAL;
  3076. if (rc_type != HFI_RC_CBR_CFR && rc_type != HFI_RC_CBR_VFR) {
  3077. i_vpr_h(inst, "%s: set total bitrate for non CBR rc type\n",
  3078. __func__);
  3079. goto set_total_bitrate;
  3080. }
  3081. if (msm_vidc_get_parent_value(inst, BIT_RATE,
  3082. ENH_LAYER_COUNT, &enh_layer_count, __func__))
  3083. return -EINVAL;
  3084. /*
  3085. * ENH_LAYER_COUNT cap max is positive only if
  3086. * layer encoding is enabled during streamon.
  3087. */
  3088. if (inst->capabilities->cap[ENH_LAYER_COUNT].max) {
  3089. if (!msm_vidc_check_all_layer_bitrate_set(inst))
  3090. goto set_total_bitrate;
  3091. /* set Layer Bitrate */
  3092. for (i = 0; i <= enh_layer_count; i++) {
  3093. if (i >= ARRAY_SIZE(layer_br_caps))
  3094. break;
  3095. cap_id = layer_br_caps[i];
  3096. hfi_value = inst->capabilities->cap[cap_id].value;
  3097. rc = msm_vidc_packetize_control(inst, cap_id,
  3098. HFI_PAYLOAD_U32, &hfi_value,
  3099. sizeof(u32), __func__);
  3100. if (rc)
  3101. return rc;
  3102. }
  3103. goto exit;
  3104. }
  3105. set_total_bitrate:
  3106. hfi_value = inst->capabilities->cap[BIT_RATE].value;
  3107. rc = msm_vidc_packetize_control(inst, BIT_RATE, HFI_PAYLOAD_U32,
  3108. &hfi_value, sizeof(u32), __func__);
  3109. if (rc)
  3110. return rc;
  3111. exit:
  3112. return rc;
  3113. }
  3114. int msm_vidc_set_dynamic_layer_bitrate(void *instance,
  3115. enum msm_vidc_inst_capability_type cap_id)
  3116. {
  3117. int rc = 0;
  3118. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3119. u32 hfi_value = 0;
  3120. s32 rc_type = -1;
  3121. if (!inst || !inst->capabilities) {
  3122. d_vpr_e("%s: invalid params\n", __func__);
  3123. return -EINVAL;
  3124. }
  3125. if (!inst->bufq[OUTPUT_PORT].vb2q->streaming)
  3126. return 0;
  3127. /* set Total Bitrate */
  3128. if (inst->capabilities->cap[BIT_RATE].flags & CAP_FLAG_CLIENT_SET) {
  3129. i_vpr_h(inst,
  3130. "%s: Total bitrate is set, ignore layer bitrate\n",
  3131. __func__);
  3132. return 0;
  3133. }
  3134. /*
  3135. * ENH_LAYER_COUNT cap max is positive only if
  3136. * layer encoding is enabled during streamon.
  3137. */
  3138. if (!inst->capabilities->cap[ENH_LAYER_COUNT].max ||
  3139. !msm_vidc_check_all_layer_bitrate_set(inst)) {
  3140. i_vpr_h(inst,
  3141. "%s: invalid layer bitrate, ignore setting to fw\n",
  3142. __func__);
  3143. return 0;
  3144. }
  3145. if (inst->hfi_rc_type == HFI_RC_CBR_CFR ||
  3146. rc_type == HFI_RC_CBR_VFR) {
  3147. /* set layer bitrate for the client set layer */
  3148. hfi_value = inst->capabilities->cap[cap_id].value;
  3149. rc = msm_vidc_packetize_control(inst, cap_id,
  3150. HFI_PAYLOAD_U32, &hfi_value,
  3151. sizeof(u32), __func__);
  3152. if (rc)
  3153. return rc;
  3154. } else {
  3155. /*
  3156. * All layer bitartes set for unsupported rc type.
  3157. * Hence accept layer bitrates, but set total bitrate prop
  3158. * with cumulative bitrate.
  3159. */
  3160. hfi_value = inst->capabilities->cap[BIT_RATE].value;
  3161. rc = msm_vidc_packetize_control(inst, BIT_RATE, HFI_PAYLOAD_U32,
  3162. &hfi_value, sizeof(u32), __func__);
  3163. if (rc)
  3164. return rc;
  3165. }
  3166. return rc;
  3167. }
  3168. int msm_vidc_set_session_priority(void *instance,
  3169. enum msm_vidc_inst_capability_type cap_id)
  3170. {
  3171. int rc = 0;
  3172. u32 hfi_value = 0;
  3173. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3174. if (!inst || !inst->capabilities) {
  3175. d_vpr_e("%s: invalid params\n", __func__);
  3176. return -EINVAL;
  3177. }
  3178. hfi_value = inst->capabilities->cap[cap_id].value;
  3179. if (!is_critical_priority_session(inst))
  3180. hfi_value = inst->capabilities->cap[cap_id].value +
  3181. inst->capabilities->cap[FIRMWARE_PRIORITY_OFFSET].value;
  3182. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  3183. &hfi_value, sizeof(u32), __func__);
  3184. if (rc)
  3185. return rc;
  3186. return rc;
  3187. }
  3188. int msm_vidc_set_flip(void *instance,
  3189. enum msm_vidc_inst_capability_type cap_id)
  3190. {
  3191. int rc = 0;
  3192. u32 hflip, vflip, hfi_value = HFI_DISABLE_FLIP;
  3193. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3194. if (!inst || !inst->capabilities) {
  3195. d_vpr_e("%s: invalid params\n", __func__);
  3196. return -EINVAL;
  3197. }
  3198. hflip = inst->capabilities->cap[HFLIP].value;
  3199. vflip = inst->capabilities->cap[VFLIP].value;
  3200. if (hflip)
  3201. hfi_value |= HFI_HORIZONTAL_FLIP;
  3202. if (vflip)
  3203. hfi_value |= HFI_VERTICAL_FLIP;
  3204. if (inst->bufq[OUTPUT_PORT].vb2q->streaming) {
  3205. if (hfi_value != HFI_DISABLE_FLIP) {
  3206. rc = msm_vidc_set_req_sync_frame(inst,
  3207. REQUEST_I_FRAME);
  3208. if (rc)
  3209. return rc;
  3210. }
  3211. }
  3212. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32_ENUM,
  3213. &hfi_value, sizeof(u32), __func__);
  3214. if (rc)
  3215. return rc;
  3216. return rc;
  3217. }
  3218. int msm_vidc_set_preprocess(void *instance,
  3219. enum msm_vidc_inst_capability_type cap_id)
  3220. {
  3221. int rc = 0;
  3222. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3223. u32 hfi_value;
  3224. if (!inst || !inst->capabilities) {
  3225. d_vpr_e("%s: invalid params\n", __func__);
  3226. return -EINVAL;
  3227. }
  3228. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32_ENUM,
  3229. &hfi_value, sizeof(u32), __func__);
  3230. if (rc)
  3231. return rc;
  3232. return rc;
  3233. }
  3234. int msm_vidc_set_rotation(void *instance,
  3235. enum msm_vidc_inst_capability_type cap_id)
  3236. {
  3237. int rc = 0;
  3238. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3239. u32 hfi_value;
  3240. if (!inst || !inst->capabilities) {
  3241. d_vpr_e("%s: invalid params\n", __func__);
  3242. return -EINVAL;
  3243. }
  3244. rc = msm_vidc_v4l2_to_hfi_enum(inst, cap_id, &hfi_value);
  3245. if (rc)
  3246. return -EINVAL;
  3247. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  3248. &hfi_value, sizeof(u32), __func__);
  3249. if (rc)
  3250. return rc;
  3251. return rc;
  3252. }
  3253. int msm_vidc_set_blur_resolution(void *instance,
  3254. enum msm_vidc_inst_capability_type cap_id)
  3255. {
  3256. int rc = 0;
  3257. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3258. s32 blur_type = -1;
  3259. u32 hfi_value, blur_width, blur_height;
  3260. if (!inst || !inst->capabilities) {
  3261. d_vpr_e("%s: invalid params\n", __func__);
  3262. return -EINVAL;
  3263. }
  3264. if (msm_vidc_get_parent_value(inst, cap_id,
  3265. BLUR_TYPES, &blur_type, __func__))
  3266. return -EINVAL;
  3267. if (blur_type != MSM_VIDC_BLUR_EXTERNAL)
  3268. return 0;
  3269. hfi_value = inst->capabilities->cap[cap_id].value;
  3270. blur_width = (hfi_value & 0xFFFF0000) >> 16;
  3271. blur_height = hfi_value & 0xFFFF;
  3272. if (blur_width > inst->crop.width ||
  3273. blur_height > inst->crop.height) {
  3274. i_vpr_e(inst,
  3275. "%s: blur wxh: %dx%d exceeds crop wxh: %dx%d\n",
  3276. __func__, blur_width, blur_height,
  3277. inst->crop.width, inst->crop.height);
  3278. hfi_value = 0;
  3279. }
  3280. if (blur_width == inst->crop.width &&
  3281. blur_height == inst->crop.height) {
  3282. i_vpr_e(inst,
  3283. "%s: blur wxh: %dx%d is equal to crop wxh: %dx%d\n",
  3284. __func__, blur_width, blur_height,
  3285. inst->crop.width, inst->crop.height);
  3286. hfi_value = 0;
  3287. }
  3288. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_32_PACKED,
  3289. &hfi_value, sizeof(u32), __func__);
  3290. if (rc)
  3291. return rc;
  3292. return rc;
  3293. }
  3294. static int msm_venc_set_csc_coeff(struct msm_vidc_inst *inst,
  3295. const char *prop_name, u32 hfi_id, void *payload,
  3296. u32 payload_size, u32 row_count, u32 column_count)
  3297. {
  3298. int rc = 0;
  3299. i_vpr_h(inst,
  3300. "set cap: name: %24s, hard coded %dx%d matrix array\n",
  3301. prop_name, row_count, column_count);
  3302. rc = venus_hfi_session_property(inst,
  3303. hfi_id,
  3304. HFI_HOST_FLAGS_NONE,
  3305. HFI_PORT_BITSTREAM,
  3306. HFI_PAYLOAD_S32_ARRAY,
  3307. payload,
  3308. payload_size);
  3309. if (rc) {
  3310. i_vpr_e(inst,
  3311. "%s: failed to set %s to fw\n",
  3312. __func__, prop_name);
  3313. }
  3314. return rc;
  3315. }
  3316. int msm_vidc_set_csc_custom_matrix(void *instance,
  3317. enum msm_vidc_inst_capability_type cap_id)
  3318. {
  3319. int rc = 0;
  3320. int i;
  3321. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3322. struct msm_vidc_core *core;
  3323. struct msm_vidc_csc_coeff *csc_coeff;
  3324. s32 matrix_payload[MAX_MATRIX_COEFFS + 2];
  3325. s32 csc_bias_payload[MAX_BIAS_COEFFS + 2];
  3326. s32 csc_limit_payload[MAX_LIMIT_COEFFS + 2];
  3327. if (!inst || !inst->capabilities || !inst->core) {
  3328. d_vpr_e("%s: invalid params\n", __func__);
  3329. return -EINVAL;
  3330. }
  3331. core = inst->core;
  3332. if (!core->platform) {
  3333. d_vpr_e("%s: invalid core platform\n", __func__);
  3334. return -EINVAL;
  3335. }
  3336. csc_coeff = &core->platform->data.csc_data;
  3337. if (!inst->capabilities->cap[cap_id].value ||
  3338. !inst->capabilities->cap[CSC].value) {
  3339. i_vpr_h(inst,
  3340. "%s: ignored as custom martix %u, csc %u\n",
  3341. __func__, inst->capabilities->cap[cap_id].value,
  3342. inst->capabilities->cap[CSC].value);
  3343. return 0;
  3344. }
  3345. /*
  3346. * first 2 u32's of payload in each case are for
  3347. * row and column count, next remaining u32's are
  3348. * for the actual payload values.
  3349. */
  3350. /* set custom matrix */
  3351. matrix_payload[0] = 3;
  3352. matrix_payload[1] = 3;
  3353. for(i = 0; i < MAX_MATRIX_COEFFS; i++) {
  3354. if ((i + 2) >= ARRAY_SIZE(matrix_payload))
  3355. break;
  3356. matrix_payload[i + 2] =
  3357. csc_coeff->vpe_csc_custom_matrix_coeff[i];
  3358. }
  3359. rc = msm_venc_set_csc_coeff(inst, "CSC_CUSTOM_MATRIX",
  3360. HFI_PROP_CSC_MATRIX, &matrix_payload[0],
  3361. ARRAY_SIZE(matrix_payload) * sizeof(s32),
  3362. matrix_payload[0], matrix_payload[1]);
  3363. if (rc)
  3364. return rc;
  3365. /* set csc bias */
  3366. csc_bias_payload[0] = 1;
  3367. csc_bias_payload[1] = 3;
  3368. for(i = 0; i < MAX_BIAS_COEFFS; i++) {
  3369. if ((i + 2) >= ARRAY_SIZE(csc_bias_payload))
  3370. break;
  3371. csc_bias_payload[i + 2] =
  3372. csc_coeff->vpe_csc_custom_bias_coeff[i];
  3373. }
  3374. rc = msm_venc_set_csc_coeff(inst, "CSC_BIAS",
  3375. HFI_PROP_CSC_BIAS, &csc_bias_payload[0],
  3376. ARRAY_SIZE(csc_bias_payload) * sizeof(s32),
  3377. csc_bias_payload[0], csc_bias_payload[1]);
  3378. if (rc)
  3379. return rc;
  3380. /* set csc limit */
  3381. csc_limit_payload[0] = 1;
  3382. csc_limit_payload[1] = 6;
  3383. for(i = 0; i < MAX_LIMIT_COEFFS; i++) {
  3384. if ((i + 2) >= ARRAY_SIZE(csc_limit_payload))
  3385. break;
  3386. csc_limit_payload[i + 2] =
  3387. csc_coeff->vpe_csc_custom_limit_coeff[i];
  3388. }
  3389. rc = msm_venc_set_csc_coeff(inst, "CSC_LIMIT",
  3390. HFI_PROP_CSC_LIMIT, &csc_limit_payload[0],
  3391. ARRAY_SIZE(csc_limit_payload) * sizeof(s32),
  3392. csc_limit_payload[0], csc_limit_payload[1]);
  3393. if (rc)
  3394. return rc;
  3395. return rc;
  3396. }
  3397. int msm_vidc_set_reserve_duration(void *instance,
  3398. enum msm_vidc_inst_capability_type cap_id)
  3399. {
  3400. int rc = 0;
  3401. u32 hfi_value = 0;
  3402. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3403. if (!inst || !inst->capabilities) {
  3404. d_vpr_e("%s: invalid params\n", __func__);
  3405. return -EINVAL;
  3406. }
  3407. /* reserve hardware only when input port is streaming*/
  3408. if (!inst->bufq[INPUT_PORT].vb2q->streaming)
  3409. return 0;
  3410. if (!(inst->capabilities->cap[cap_id].flags & CAP_FLAG_CLIENT_SET))
  3411. return 0;
  3412. inst->capabilities->cap[cap_id].flags &= (~CAP_FLAG_CLIENT_SET);
  3413. if (!is_critical_priority_session(inst)) {
  3414. i_vpr_h(inst, "%s: reserve duration allowed only for critical session\n", __func__);
  3415. return 0;
  3416. }
  3417. hfi_value = inst->capabilities->cap[cap_id].value;
  3418. rc = venus_hfi_reserve_hardware(inst, hfi_value);
  3419. if (rc)
  3420. return rc;
  3421. return rc;
  3422. }
  3423. int msm_vidc_set_level(void *instance,
  3424. enum msm_vidc_inst_capability_type cap_id)
  3425. {
  3426. int rc = 0;
  3427. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3428. u32 hfi_value = 0;
  3429. if (!inst || !inst->capabilities) {
  3430. d_vpr_e("%s: invalid params\n", __func__);
  3431. return -EINVAL;
  3432. }
  3433. hfi_value = inst->capabilities->cap[cap_id].value;
  3434. if (!(inst->capabilities->cap[cap_id].flags & CAP_FLAG_CLIENT_SET))
  3435. hfi_value = HFI_LEVEL_NONE;
  3436. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32_ENUM,
  3437. &hfi_value, sizeof(u32), __func__);
  3438. if (rc)
  3439. return rc;
  3440. return rc;
  3441. }
  3442. int msm_vidc_set_q16(void *instance,
  3443. enum msm_vidc_inst_capability_type cap_id)
  3444. {
  3445. int rc = 0;
  3446. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3447. u32 hfi_value = 0;
  3448. if (!inst || !inst->capabilities) {
  3449. d_vpr_e("%s: invalid params\n", __func__);
  3450. return -EINVAL;
  3451. }
  3452. hfi_value = inst->capabilities->cap[cap_id].value;
  3453. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_Q16,
  3454. &hfi_value, sizeof(u32), __func__);
  3455. if (rc)
  3456. return rc;
  3457. return rc;
  3458. }
  3459. int msm_vidc_set_u32(void *instance,
  3460. enum msm_vidc_inst_capability_type cap_id)
  3461. {
  3462. int rc = 0;
  3463. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3464. u32 hfi_value;
  3465. if (!inst || !inst->capabilities) {
  3466. d_vpr_e("%s: invalid params\n", __func__);
  3467. return -EINVAL;
  3468. }
  3469. if (inst->capabilities->cap[cap_id].flags & CAP_FLAG_MENU) {
  3470. rc = msm_vidc_v4l2_menu_to_hfi(inst, cap_id, &hfi_value);
  3471. if (rc)
  3472. return -EINVAL;
  3473. } else {
  3474. hfi_value = inst->capabilities->cap[cap_id].value;
  3475. }
  3476. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  3477. &hfi_value, sizeof(u32), __func__);
  3478. if (rc)
  3479. return rc;
  3480. return rc;
  3481. }
  3482. int msm_vidc_set_u32_packed(void *instance,
  3483. enum msm_vidc_inst_capability_type cap_id)
  3484. {
  3485. int rc = 0;
  3486. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3487. u32 hfi_value;
  3488. if (!inst || !inst->capabilities) {
  3489. d_vpr_e("%s: invalid params\n", __func__);
  3490. return -EINVAL;
  3491. }
  3492. if (inst->capabilities->cap[cap_id].flags & CAP_FLAG_MENU) {
  3493. rc = msm_vidc_v4l2_menu_to_hfi(inst, cap_id, &hfi_value);
  3494. if (rc)
  3495. return -EINVAL;
  3496. } else {
  3497. hfi_value = inst->capabilities->cap[cap_id].value;
  3498. }
  3499. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_32_PACKED,
  3500. &hfi_value, sizeof(u32), __func__);
  3501. if (rc)
  3502. return rc;
  3503. return rc;
  3504. }
  3505. int msm_vidc_set_u32_enum(void *instance,
  3506. enum msm_vidc_inst_capability_type cap_id)
  3507. {
  3508. int rc = 0;
  3509. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3510. u32 hfi_value;
  3511. if (!inst || !inst->capabilities) {
  3512. d_vpr_e("%s: invalid params\n", __func__);
  3513. return -EINVAL;
  3514. }
  3515. rc = msm_vidc_v4l2_to_hfi_enum(inst, cap_id, &hfi_value);
  3516. if (rc)
  3517. return -EINVAL;
  3518. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32_ENUM,
  3519. &hfi_value, sizeof(u32), __func__);
  3520. if (rc)
  3521. return rc;
  3522. return rc;
  3523. }
  3524. int msm_vidc_set_s32(void *instance,
  3525. enum msm_vidc_inst_capability_type cap_id)
  3526. {
  3527. int rc = 0;
  3528. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3529. s32 hfi_value = 0;
  3530. if (!inst || !inst->capabilities) {
  3531. d_vpr_e("%s: invalid params\n", __func__);
  3532. return -EINVAL;
  3533. }
  3534. hfi_value = inst->capabilities->cap[cap_id].value;
  3535. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_S32,
  3536. &hfi_value, sizeof(s32), __func__);
  3537. if (rc)
  3538. return rc;
  3539. return rc;
  3540. }
  3541. int msm_vidc_v4l2_menu_to_hfi(struct msm_vidc_inst *inst,
  3542. enum msm_vidc_inst_capability_type cap_id, u32 *value)
  3543. {
  3544. struct msm_vidc_inst_capability *capability = inst->capabilities;
  3545. switch (cap_id) {
  3546. case ENTROPY_MODE:
  3547. switch (capability->cap[cap_id].value) {
  3548. case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC:
  3549. *value = 1;
  3550. break;
  3551. case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC:
  3552. *value = 0;
  3553. break;
  3554. default:
  3555. *value = 1;
  3556. goto set_default;
  3557. }
  3558. return 0;
  3559. default:
  3560. i_vpr_e(inst,
  3561. "%s: mapping not specified for ctrl_id: %#x\n",
  3562. __func__, capability->cap[cap_id].v4l2_id);
  3563. return -EINVAL;
  3564. }
  3565. set_default:
  3566. i_vpr_e(inst,
  3567. "%s: invalid value %d for ctrl id: %#x. Set default: %u\n",
  3568. __func__, capability->cap[cap_id].value,
  3569. capability->cap[cap_id].v4l2_id, *value);
  3570. return 0;
  3571. }
  3572. int msm_vidc_v4l2_to_hfi_enum(struct msm_vidc_inst *inst,
  3573. enum msm_vidc_inst_capability_type cap_id, u32 *value)
  3574. {
  3575. struct msm_vidc_inst_capability *capability = inst->capabilities;
  3576. switch (cap_id) {
  3577. case BITRATE_MODE:
  3578. *value = inst->hfi_rc_type;
  3579. return 0;
  3580. case PROFILE:
  3581. case LEVEL:
  3582. case HEVC_TIER:
  3583. case AV1_TIER:
  3584. case BLUR_TYPES:
  3585. *value = capability->cap[cap_id].value;
  3586. return 0;
  3587. case LAYER_TYPE:
  3588. if (inst->codec == MSM_VIDC_HEVC) {
  3589. switch (capability->cap[cap_id].value) {
  3590. case V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_B:
  3591. *value = HFI_HIER_B;
  3592. break;
  3593. case V4L2_MPEG_VIDEO_HEVC_HIERARCHICAL_CODING_P:
  3594. //TODO (AS): check if this is right mapping
  3595. *value = HFI_HIER_P_SLIDING_WINDOW;
  3596. break;
  3597. default:
  3598. *value = HFI_HIER_P_SLIDING_WINDOW;
  3599. goto set_default;
  3600. }
  3601. }
  3602. return 0;
  3603. case ROTATION:
  3604. switch (capability->cap[cap_id].value) {
  3605. case 0:
  3606. *value = HFI_ROTATION_NONE;
  3607. break;
  3608. case 90:
  3609. *value = HFI_ROTATION_90;
  3610. break;
  3611. case 180:
  3612. *value = HFI_ROTATION_180;
  3613. break;
  3614. case 270:
  3615. *value = HFI_ROTATION_270;
  3616. break;
  3617. default:
  3618. *value = HFI_ROTATION_NONE;
  3619. goto set_default;
  3620. }
  3621. return 0;
  3622. case LF_MODE:
  3623. if (inst->codec == MSM_VIDC_HEVC) {
  3624. switch (capability->cap[cap_id].value) {
  3625. case V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_ENABLED:
  3626. *value = HFI_DEBLOCK_ALL_BOUNDARY;
  3627. break;
  3628. case V4L2_MPEG_VIDEO_HEVC_LOOP_FILTER_MODE_DISABLED:
  3629. *value = HFI_DEBLOCK_DISABLE;
  3630. break;
  3631. case DB_HEVC_DISABLE_SLICE_BOUNDARY:
  3632. *value = HFI_DEBLOCK_DISABLE_AT_SLICE_BOUNDARY;
  3633. break;
  3634. default:
  3635. *value = HFI_DEBLOCK_ALL_BOUNDARY;
  3636. goto set_default;
  3637. }
  3638. } else if (inst->codec == MSM_VIDC_H264) {
  3639. switch (capability->cap[cap_id].value) {
  3640. case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED:
  3641. *value = HFI_DEBLOCK_ALL_BOUNDARY;
  3642. break;
  3643. case V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED:
  3644. *value = HFI_DEBLOCK_DISABLE;
  3645. break;
  3646. case DB_H264_DISABLE_SLICE_BOUNDARY:
  3647. *value = HFI_DEBLOCK_DISABLE_AT_SLICE_BOUNDARY;
  3648. break;
  3649. default:
  3650. *value = HFI_DEBLOCK_ALL_BOUNDARY;
  3651. goto set_default;
  3652. }
  3653. }
  3654. return 0;
  3655. case NAL_LENGTH_FIELD:
  3656. switch (capability->cap[cap_id].value) {
  3657. case V4L2_MPEG_VIDEO_HEVC_SIZE_4:
  3658. *value = HFI_NAL_LENGTH_SIZE_4;
  3659. break;
  3660. default:
  3661. *value = HFI_NAL_LENGTH_STARTCODES;
  3662. goto set_default;
  3663. }
  3664. return 0;
  3665. default:
  3666. i_vpr_e(inst,
  3667. "%s: mapping not specified for ctrl_id: %#x\n",
  3668. __func__, capability->cap[cap_id].v4l2_id);
  3669. return -EINVAL;
  3670. }
  3671. set_default:
  3672. i_vpr_e(inst,
  3673. "%s: invalid value %d for ctrl id: %#x. Set default: %u\n",
  3674. __func__, capability->cap[cap_id].value,
  3675. capability->cap[cap_id].v4l2_id, *value);
  3676. return 0;
  3677. }
  3678. int msm_vidc_set_stage(void *instance,
  3679. enum msm_vidc_inst_capability_type cap_id)
  3680. {
  3681. int rc = 0;
  3682. u32 stage = 0;
  3683. struct msm_vidc_core *core;
  3684. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3685. if (!inst || !inst->capabilities || !inst->core) {
  3686. d_vpr_e("%s: invalid params\n", __func__);
  3687. return -EINVAL;
  3688. }
  3689. core = inst->core;
  3690. rc = call_session_op(core, decide_work_mode, inst);
  3691. if (rc) {
  3692. i_vpr_e(inst, "%s: decide_work_mode failed\n", __func__);
  3693. return -EINVAL;
  3694. }
  3695. stage = inst->capabilities->cap[STAGE].value;
  3696. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  3697. &stage, sizeof(u32), __func__);
  3698. if (rc)
  3699. return rc;
  3700. return rc;
  3701. }
  3702. int msm_vidc_set_pipe(void *instance,
  3703. enum msm_vidc_inst_capability_type cap_id)
  3704. {
  3705. int rc = 0;
  3706. u32 pipe;
  3707. struct msm_vidc_core *core;
  3708. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3709. if (!inst || !inst->capabilities || !inst->core) {
  3710. d_vpr_e("%s: invalid params\n", __func__);
  3711. return -EINVAL;
  3712. }
  3713. core = inst->core;
  3714. rc = call_session_op(core, decide_work_route, inst);
  3715. if (rc) {
  3716. i_vpr_e(inst, "%s: decide_work_route failed\n",
  3717. __func__);
  3718. return -EINVAL;
  3719. }
  3720. pipe = inst->capabilities->cap[PIPE].value;
  3721. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  3722. &pipe, sizeof(u32), __func__);
  3723. if (rc)
  3724. return rc;
  3725. return rc;
  3726. }
  3727. int msm_vidc_set_vui_timing_info(void *instance,
  3728. enum msm_vidc_inst_capability_type cap_id)
  3729. {
  3730. int rc = 0;
  3731. struct msm_vidc_inst *inst = (struct msm_vidc_inst *)instance;
  3732. u32 hfi_value;
  3733. if (!inst || !inst->capabilities) {
  3734. d_vpr_e("%s: invalid params\n", __func__);
  3735. return -EINVAL;
  3736. }
  3737. /*
  3738. * hfi is HFI_PROP_DISABLE_VUI_TIMING_INFO and v4l2 cap is
  3739. * V4L2_CID_MPEG_VIDC_VUI_TIMING_INFO and hence reverse
  3740. * the hfi_value from cap_id value.
  3741. */
  3742. if (inst->capabilities->cap[cap_id].value == 1)
  3743. hfi_value = 0;
  3744. else
  3745. hfi_value = 1;
  3746. rc = msm_vidc_packetize_control(inst, cap_id, HFI_PAYLOAD_U32,
  3747. &hfi_value, sizeof(u32), __func__);
  3748. if (rc)
  3749. return rc;
  3750. return rc;
  3751. }