cam_isp_context.c 176 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/debugfs.h>
  6. #include <linux/videodev2.h>
  7. #include <linux/slab.h>
  8. #include <linux/uaccess.h>
  9. #include <linux/ratelimit.h>
  10. #include "cam_mem_mgr.h"
  11. #include "cam_sync_api.h"
  12. #include "cam_req_mgr_dev.h"
  13. #include "cam_trace.h"
  14. #include "cam_debug_util.h"
  15. #include "cam_packet_util.h"
  16. #include "cam_context_utils.h"
  17. #include "cam_cdm_util.h"
  18. #include "cam_isp_context.h"
  19. #include "cam_common_util.h"
  20. #include "cam_req_mgr_debug.h"
  21. #include "cam_cpas_api.h"
  22. static const char isp_dev_name[] = "cam-isp";
  23. static struct cam_isp_ctx_debug isp_ctx_debug;
  24. #define INC_HEAD(head, max_entries, ret) \
  25. div_u64_rem(atomic64_add_return(1, head),\
  26. max_entries, (ret))
  27. static int cam_isp_context_dump_requests(void *data,
  28. struct cam_smmu_pf_info *pf_info);
  29. static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
  30. struct cam_start_stop_dev_cmd *cmd);
  31. static const char *__cam_isp_evt_val_to_type(
  32. uint32_t evt_id)
  33. {
  34. switch (evt_id) {
  35. case CAM_ISP_CTX_EVENT_SUBMIT:
  36. return "SUBMIT";
  37. case CAM_ISP_CTX_EVENT_APPLY:
  38. return "APPLY";
  39. case CAM_ISP_CTX_EVENT_EPOCH:
  40. return "EPOCH";
  41. case CAM_ISP_CTX_EVENT_RUP:
  42. return "RUP";
  43. case CAM_ISP_CTX_EVENT_BUFDONE:
  44. return "BUFDONE";
  45. default:
  46. return "CAM_ISP_EVENT_INVALID";
  47. }
  48. }
  49. static void __cam_isp_ctx_update_event_record(
  50. struct cam_isp_context *ctx_isp,
  51. enum cam_isp_ctx_event event,
  52. struct cam_ctx_request *req)
  53. {
  54. int iterator = 0;
  55. ktime_t cur_time;
  56. struct cam_isp_ctx_req *req_isp;
  57. if (!ctx_isp) {
  58. CAM_ERR(CAM_ISP, "Invalid Args");
  59. return;
  60. }
  61. switch (event) {
  62. case CAM_ISP_CTX_EVENT_EPOCH:
  63. case CAM_ISP_CTX_EVENT_RUP:
  64. case CAM_ISP_CTX_EVENT_BUFDONE:
  65. break;
  66. case CAM_ISP_CTX_EVENT_SUBMIT:
  67. case CAM_ISP_CTX_EVENT_APPLY:
  68. if (!req) {
  69. CAM_ERR(CAM_ISP, "Invalid arg for event %d", event);
  70. return;
  71. }
  72. break;
  73. default:
  74. break;
  75. }
  76. INC_HEAD(&ctx_isp->event_record_head[event],
  77. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES, &iterator);
  78. cur_time = ktime_get();
  79. if (req) {
  80. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  81. ctx_isp->event_record[event][iterator].req_id =
  82. req->request_id;
  83. req_isp->event_timestamp[event] = cur_time;
  84. } else {
  85. ctx_isp->event_record[event][iterator].req_id = 0;
  86. }
  87. ctx_isp->event_record[event][iterator].timestamp = cur_time;
  88. }
  89. static int __cam_isp_ctx_dump_event_record(
  90. struct cam_isp_context *ctx_isp,
  91. uintptr_t cpu_addr,
  92. size_t buf_len,
  93. size_t *offset)
  94. {
  95. int i, j;
  96. int index;
  97. size_t remain_len;
  98. uint8_t *dst;
  99. uint32_t oldest_entry, num_entries;
  100. uint32_t min_len;
  101. uint64_t *addr, *start;
  102. uint64_t state_head;
  103. struct timespec64 ts;
  104. struct cam_isp_context_dump_header *hdr;
  105. struct cam_isp_context_event_record *record;
  106. if (!cpu_addr || !buf_len || !offset || !ctx_isp) {
  107. CAM_ERR(CAM_ISP, "Invalid args %pK %zu %pK %pK",
  108. cpu_addr, buf_len, offset, ctx_isp);
  109. return -EINVAL;
  110. }
  111. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) {
  112. state_head = atomic64_read(&ctx_isp->event_record_head[i]);
  113. if (state_head == -1) {
  114. return 0;
  115. } else if (state_head < CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES) {
  116. num_entries = state_head + 1;
  117. oldest_entry = 0;
  118. } else {
  119. num_entries = CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES;
  120. div_u64_rem(state_head + 1,
  121. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES,
  122. &oldest_entry);
  123. }
  124. index = oldest_entry;
  125. if (buf_len <= *offset) {
  126. CAM_WARN(CAM_ISP,
  127. "Dump buffer overshoot len %zu offset %zu",
  128. buf_len, *offset);
  129. return -ENOSPC;
  130. }
  131. min_len = sizeof(struct cam_isp_context_dump_header) +
  132. ((num_entries * CAM_ISP_CTX_DUMP_EVENT_NUM_WORDS) *
  133. sizeof(uint64_t));
  134. remain_len = buf_len - *offset;
  135. if (remain_len < min_len) {
  136. CAM_WARN(CAM_ISP,
  137. "Dump buffer exhaust remain %zu min %u",
  138. remain_len, min_len);
  139. return -ENOSPC;
  140. }
  141. dst = (uint8_t *)cpu_addr + *offset;
  142. hdr = (struct cam_isp_context_dump_header *)dst;
  143. scnprintf(hdr->tag,
  144. CAM_ISP_CONTEXT_DUMP_TAG_MAX_LEN, "ISP_EVT_%s:",
  145. __cam_isp_evt_val_to_type(i));
  146. hdr->word_size = sizeof(uint64_t);
  147. addr = (uint64_t *)(dst +
  148. sizeof(struct cam_isp_context_dump_header));
  149. start = addr;
  150. for (j = 0; j < num_entries; j++) {
  151. record = &ctx_isp->event_record[i][index];
  152. ts = ktime_to_timespec64(record->timestamp);
  153. *addr++ = record->req_id;
  154. *addr++ = ts.tv_sec;
  155. *addr++ = ts.tv_nsec/NSEC_PER_USEC;
  156. index = (index + 1) %
  157. CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES;
  158. }
  159. hdr->size = hdr->word_size * (addr - start);
  160. *offset += hdr->size +
  161. sizeof(struct cam_isp_context_dump_header);
  162. }
  163. return 0;
  164. }
  165. static void __cam_isp_ctx_update_state_monitor_array(
  166. struct cam_isp_context *ctx_isp,
  167. enum cam_isp_state_change_trigger trigger_type,
  168. uint64_t req_id)
  169. {
  170. int iterator;
  171. INC_HEAD(&ctx_isp->state_monitor_head,
  172. CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &iterator);
  173. ctx_isp->cam_isp_ctx_state_monitor[iterator].curr_state =
  174. ctx_isp->substate_activated;
  175. ctx_isp->cam_isp_ctx_state_monitor[iterator].frame_id =
  176. ctx_isp->frame_id;
  177. ctx_isp->cam_isp_ctx_state_monitor[iterator].trigger =
  178. trigger_type;
  179. ctx_isp->cam_isp_ctx_state_monitor[iterator].req_id =
  180. req_id;
  181. ctx_isp->cam_isp_ctx_state_monitor[iterator].evt_time_stamp =
  182. jiffies_to_msecs(jiffies) - ctx_isp->init_timestamp;
  183. }
  184. static const char *__cam_isp_ctx_substate_val_to_type(
  185. enum cam_isp_ctx_activated_substate type)
  186. {
  187. switch (type) {
  188. case CAM_ISP_CTX_ACTIVATED_SOF:
  189. return "SOF";
  190. case CAM_ISP_CTX_ACTIVATED_APPLIED:
  191. return "APPLIED";
  192. case CAM_ISP_CTX_ACTIVATED_EPOCH:
  193. return "EPOCH";
  194. case CAM_ISP_CTX_ACTIVATED_BUBBLE:
  195. return "BUBBLE";
  196. case CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED:
  197. return "BUBBLE_APPLIED";
  198. case CAM_ISP_CTX_ACTIVATED_HW_ERROR:
  199. return "HW_ERROR";
  200. case CAM_ISP_CTX_ACTIVATED_HALT:
  201. return "HALT";
  202. default:
  203. return "INVALID";
  204. }
  205. }
  206. static const char *__cam_isp_hw_evt_val_to_type(
  207. uint32_t evt_id)
  208. {
  209. switch (evt_id) {
  210. case CAM_ISP_STATE_CHANGE_TRIGGER_ERROR:
  211. return "ERROR";
  212. case CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED:
  213. return "APPLIED";
  214. case CAM_ISP_STATE_CHANGE_TRIGGER_SOF:
  215. return "SOF";
  216. case CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE:
  217. return "REG_UPDATE";
  218. case CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH:
  219. return "EPOCH";
  220. case CAM_ISP_STATE_CHANGE_TRIGGER_EOF:
  221. return "EOF";
  222. case CAM_ISP_STATE_CHANGE_TRIGGER_DONE:
  223. return "DONE";
  224. case CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH:
  225. return "FLUSH";
  226. default:
  227. return "CAM_ISP_EVENT_INVALID";
  228. }
  229. }
  230. static void __cam_isp_ctx_dump_state_monitor_array(
  231. struct cam_isp_context *ctx_isp)
  232. {
  233. int i = 0;
  234. int64_t state_head = 0;
  235. uint32_t index, num_entries, oldest_entry;
  236. state_head = atomic64_read(&ctx_isp->state_monitor_head);
  237. if (state_head == -1) {
  238. return;
  239. } else if (state_head < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES) {
  240. num_entries = state_head;
  241. oldest_entry = 0;
  242. } else {
  243. num_entries = CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
  244. div_u64_rem(state_head + 1,
  245. CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES, &oldest_entry);
  246. }
  247. CAM_ERR(CAM_ISP,
  248. "Dumping state information for preceding requests");
  249. index = oldest_entry;
  250. for (i = 0; i < num_entries; i++) {
  251. CAM_ERR(CAM_ISP,
  252. "Index[%d] time[%d] : Substate[%s] Frame[%lld] ReqId[%llu] evt_type[%s]",
  253. index,
  254. ctx_isp->cam_isp_ctx_state_monitor[index].evt_time_stamp,
  255. __cam_isp_ctx_substate_val_to_type(
  256. ctx_isp->cam_isp_ctx_state_monitor[index].curr_state),
  257. ctx_isp->cam_isp_ctx_state_monitor[index].frame_id,
  258. ctx_isp->cam_isp_ctx_state_monitor[index].req_id,
  259. __cam_isp_hw_evt_val_to_type(
  260. ctx_isp->cam_isp_ctx_state_monitor[index].trigger));
  261. index = (index + 1) % CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES;
  262. }
  263. }
  264. static int cam_isp_context_info_dump(void *context,
  265. enum cam_context_dump_id id)
  266. {
  267. struct cam_context *ctx = (struct cam_context *)context;
  268. switch (id) {
  269. case CAM_CTX_DUMP_ACQ_INFO: {
  270. cam_context_dump_hw_acq_info(ctx);
  271. break;
  272. }
  273. default:
  274. CAM_DBG(CAM_ISP, "DUMP id not valid %u", id);
  275. break;
  276. }
  277. return 0;
  278. }
  279. static int cam_isp_ctx_dump_req(
  280. struct cam_isp_ctx_req *req_isp,
  281. uintptr_t cpu_addr,
  282. size_t buf_len,
  283. size_t *offset,
  284. bool dump_to_buff)
  285. {
  286. int i, rc = 0;
  287. size_t len = 0;
  288. uint32_t *buf_addr;
  289. uint32_t *buf_start, *buf_end;
  290. size_t remain_len = 0;
  291. struct cam_cdm_cmd_buf_dump_info dump_info;
  292. for (i = 0; i < req_isp->num_cfg; i++) {
  293. rc = cam_packet_util_get_cmd_mem_addr(
  294. req_isp->cfg[i].handle, &buf_addr, &len);
  295. if (rc) {
  296. CAM_ERR_RATE_LIMIT(CAM_ISP,
  297. "Failed to get_cmd_mem_addr, rc=%d",
  298. rc);
  299. } else {
  300. if (req_isp->cfg[i].offset >= ((uint32_t)len)) {
  301. CAM_ERR(CAM_ISP,
  302. "Invalid offset exp %u actual %u",
  303. req_isp->cfg[i].offset, (uint32_t)len);
  304. return -EINVAL;
  305. }
  306. remain_len = len - req_isp->cfg[i].offset;
  307. if (req_isp->cfg[i].len >
  308. ((uint32_t)remain_len)) {
  309. CAM_ERR(CAM_ISP,
  310. "Invalid len exp %u remain_len %u",
  311. req_isp->cfg[i].len,
  312. (uint32_t)remain_len);
  313. return -EINVAL;
  314. }
  315. buf_start = (uint32_t *)((uint8_t *) buf_addr +
  316. req_isp->cfg[i].offset);
  317. buf_end = (uint32_t *)((uint8_t *) buf_start +
  318. req_isp->cfg[i].len - 1);
  319. if (dump_to_buff) {
  320. if (!cpu_addr || !offset || !buf_len) {
  321. CAM_ERR(CAM_ISP, "Invalid args");
  322. break;
  323. }
  324. dump_info.src_start = buf_start;
  325. dump_info.src_end = buf_end;
  326. dump_info.dst_start = cpu_addr;
  327. dump_info.dst_offset = *offset;
  328. dump_info.dst_max_size = buf_len;
  329. rc = cam_cdm_util_dump_cmd_bufs_v2(
  330. &dump_info);
  331. *offset = dump_info.dst_offset;
  332. if (rc)
  333. return rc;
  334. } else
  335. cam_cdm_util_dump_cmd_buf(buf_start, buf_end);
  336. }
  337. }
  338. return rc;
  339. }
  340. static int __cam_isp_ctx_enqueue_request_in_order(
  341. struct cam_context *ctx, struct cam_ctx_request *req)
  342. {
  343. struct cam_ctx_request *req_current;
  344. struct cam_ctx_request *req_prev;
  345. struct list_head temp_list;
  346. struct cam_isp_context *ctx_isp;
  347. INIT_LIST_HEAD(&temp_list);
  348. spin_lock_bh(&ctx->lock);
  349. if (list_empty(&ctx->pending_req_list)) {
  350. list_add_tail(&req->list, &ctx->pending_req_list);
  351. } else {
  352. list_for_each_entry_safe_reverse(
  353. req_current, req_prev, &ctx->pending_req_list, list) {
  354. if (req->request_id < req_current->request_id) {
  355. list_del_init(&req_current->list);
  356. list_add(&req_current->list, &temp_list);
  357. continue;
  358. } else if (req->request_id == req_current->request_id) {
  359. CAM_WARN(CAM_ISP,
  360. "Received duplicated request %lld",
  361. req->request_id);
  362. }
  363. break;
  364. }
  365. list_add_tail(&req->list, &ctx->pending_req_list);
  366. if (!list_empty(&temp_list)) {
  367. list_for_each_entry_safe(
  368. req_current, req_prev, &temp_list, list) {
  369. list_del_init(&req_current->list);
  370. list_add_tail(&req_current->list,
  371. &ctx->pending_req_list);
  372. }
  373. }
  374. }
  375. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  376. __cam_isp_ctx_update_event_record(ctx_isp,
  377. CAM_ISP_CTX_EVENT_SUBMIT, req);
  378. spin_unlock_bh(&ctx->lock);
  379. return 0;
  380. }
  381. static int __cam_isp_ctx_enqueue_init_request(
  382. struct cam_context *ctx, struct cam_ctx_request *req)
  383. {
  384. int rc = 0;
  385. struct cam_ctx_request *req_old;
  386. struct cam_isp_ctx_req *req_isp_old;
  387. struct cam_isp_ctx_req *req_isp_new;
  388. struct cam_isp_prepare_hw_update_data *req_update_old;
  389. struct cam_isp_prepare_hw_update_data *req_update_new;
  390. struct cam_isp_prepare_hw_update_data *hw_update_data;
  391. spin_lock_bh(&ctx->lock);
  392. if (list_empty(&ctx->pending_req_list)) {
  393. list_add_tail(&req->list, &ctx->pending_req_list);
  394. CAM_DBG(CAM_ISP, "INIT packet added req id= %d",
  395. req->request_id);
  396. goto end;
  397. }
  398. req_old = list_first_entry(&ctx->pending_req_list,
  399. struct cam_ctx_request, list);
  400. req_isp_old = (struct cam_isp_ctx_req *) req_old->req_priv;
  401. req_isp_new = (struct cam_isp_ctx_req *) req->req_priv;
  402. if (req_isp_old->hw_update_data.packet_opcode_type ==
  403. CAM_ISP_PACKET_INIT_DEV) {
  404. if ((req_isp_old->num_cfg + req_isp_new->num_cfg) >=
  405. ctx->max_hw_update_entries) {
  406. CAM_WARN(CAM_ISP,
  407. "Can not merge INIT pkt num_cfgs = %d",
  408. (req_isp_old->num_cfg +
  409. req_isp_new->num_cfg));
  410. rc = -ENOMEM;
  411. }
  412. if (req_isp_old->num_fence_map_out != 0 ||
  413. req_isp_old->num_fence_map_in != 0) {
  414. CAM_WARN(CAM_ISP, "Invalid INIT pkt sequence");
  415. rc = -EINVAL;
  416. }
  417. if (!rc) {
  418. memcpy(req_isp_old->fence_map_out,
  419. req_isp_new->fence_map_out,
  420. sizeof(req_isp_new->fence_map_out[0])*
  421. req_isp_new->num_fence_map_out);
  422. req_isp_old->num_fence_map_out =
  423. req_isp_new->num_fence_map_out;
  424. memcpy(req_isp_old->fence_map_in,
  425. req_isp_new->fence_map_in,
  426. sizeof(req_isp_new->fence_map_in[0])*
  427. req_isp_new->num_fence_map_in);
  428. req_isp_old->num_fence_map_in =
  429. req_isp_new->num_fence_map_in;
  430. memcpy(&req_isp_old->cfg[req_isp_old->num_cfg],
  431. req_isp_new->cfg,
  432. sizeof(req_isp_new->cfg[0]) *
  433. req_isp_new->num_cfg);
  434. req_isp_old->num_cfg += req_isp_new->num_cfg;
  435. memcpy(&req_old->pf_data, &req->pf_data,
  436. sizeof(struct cam_hw_mgr_dump_pf_data));
  437. if (req_isp_new->hw_update_data.num_reg_dump_buf) {
  438. req_update_new = &req_isp_new->hw_update_data;
  439. req_update_old = &req_isp_old->hw_update_data;
  440. memcpy(&req_update_old->reg_dump_buf_desc,
  441. &req_update_new->reg_dump_buf_desc,
  442. sizeof(struct cam_cmd_buf_desc) *
  443. req_update_new->num_reg_dump_buf);
  444. req_update_old->num_reg_dump_buf =
  445. req_update_new->num_reg_dump_buf;
  446. }
  447. /* Update frame header params for EPCR */
  448. hw_update_data = &req_isp_new->hw_update_data;
  449. req_isp_old->hw_update_data.frame_header_res_id =
  450. req_isp_new->hw_update_data.frame_header_res_id;
  451. req_isp_old->hw_update_data.frame_header_cpu_addr =
  452. hw_update_data->frame_header_cpu_addr;
  453. req_old->request_id = req->request_id;
  454. list_add_tail(&req->list, &ctx->free_req_list);
  455. }
  456. } else {
  457. CAM_WARN(CAM_ISP,
  458. "Received Update pkt before INIT pkt. req_id= %lld",
  459. req->request_id);
  460. rc = -EINVAL;
  461. }
  462. end:
  463. spin_unlock_bh(&ctx->lock);
  464. return rc;
  465. }
  466. static const char *__cam_isp_resource_handle_id_to_type(
  467. uint32_t resource_handle)
  468. {
  469. switch (resource_handle) {
  470. case CAM_ISP_IFE_OUT_RES_FULL:
  471. return "FULL";
  472. case CAM_ISP_IFE_OUT_RES_DS4:
  473. return "DS4";
  474. case CAM_ISP_IFE_OUT_RES_DS16:
  475. return "DS16";
  476. case CAM_ISP_IFE_OUT_RES_RAW_DUMP:
  477. return "RAW_DUMP";
  478. case CAM_ISP_IFE_OUT_RES_FD:
  479. return "FD";
  480. case CAM_ISP_IFE_OUT_RES_PDAF:
  481. return "PDAF";
  482. case CAM_ISP_IFE_OUT_RES_RDI_0:
  483. return "RDI_0";
  484. case CAM_ISP_IFE_OUT_RES_RDI_1:
  485. return "RDI_1";
  486. case CAM_ISP_IFE_OUT_RES_RDI_2:
  487. return "RDI_2";
  488. case CAM_ISP_IFE_OUT_RES_RDI_3:
  489. return "RDI_3";
  490. case CAM_ISP_IFE_OUT_RES_STATS_HDR_BE:
  491. return "STATS_HDR_BE";
  492. case CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST:
  493. return "STATS_HDR_BHIST";
  494. case CAM_ISP_IFE_OUT_RES_STATS_TL_BG:
  495. return "STATS_TL_BG";
  496. case CAM_ISP_IFE_OUT_RES_STATS_BF:
  497. return "STATS_BF";
  498. case CAM_ISP_IFE_OUT_RES_STATS_AWB_BG:
  499. return "STATS_AWB_BG";
  500. case CAM_ISP_IFE_OUT_RES_STATS_BHIST:
  501. return "STATS_BHIST";
  502. case CAM_ISP_IFE_OUT_RES_STATS_RS:
  503. return "STATS_RS";
  504. case CAM_ISP_IFE_OUT_RES_STATS_CS:
  505. return "STATS_CS";
  506. case CAM_ISP_IFE_OUT_RES_STATS_IHIST:
  507. return "STATS_IHIST";
  508. case CAM_ISP_IFE_OUT_RES_FULL_DISP:
  509. return "FULL_DISP";
  510. case CAM_ISP_IFE_OUT_RES_DS4_DISP:
  511. return "DS4_DISP";
  512. case CAM_ISP_IFE_OUT_RES_DS16_DISP:
  513. return "DS16_DISP";
  514. case CAM_ISP_IFE_OUT_RES_2PD:
  515. return "2PD";
  516. case CAM_ISP_IFE_OUT_RES_RDI_RD:
  517. return "RDI_RD";
  518. case CAM_ISP_IFE_OUT_RES_LCR:
  519. return "LCR";
  520. case CAM_ISP_IFE_OUT_RES_AWB_BFW:
  521. return "AWB_BFW";
  522. case CAM_ISP_IFE_OUT_RES_PREPROCESS_2PD:
  523. return "PREPROCESS_2PD";
  524. case CAM_ISP_IFE_OUT_RES_STATS_AEC_BE:
  525. return "STATS_AEC_BE";
  526. case CAM_ISP_IFE_OUT_RES_LTM_STATS:
  527. return "LTM_STATS";
  528. case CAM_ISP_IFE_OUT_RES_STATS_GTM_BHIST:
  529. return "STATS_GTM_BHIST";
  530. case CAM_ISP_IFE_LITE_OUT_RES_STATS_BG:
  531. return "STATS_BG";
  532. case CAM_ISP_IFE_LITE_OUT_RES_PREPROCESS_RAW:
  533. return "PREPROCESS_RAW";
  534. case CAM_ISP_IFE_OUT_RES_SPARSE_PD:
  535. return "SPARSE_PD";
  536. default:
  537. return "CAM_ISP_Invalid_Resource_Type";
  538. }
  539. }
  540. static const char *__cam_isp_tfe_resource_handle_id_to_type(
  541. uint32_t resource_handle)
  542. {
  543. switch (resource_handle) {
  544. case CAM_ISP_TFE_OUT_RES_FULL:
  545. return "FULL";
  546. case CAM_ISP_TFE_OUT_RES_RAW_DUMP:
  547. return "RAW_DUMP";
  548. case CAM_ISP_TFE_OUT_RES_PDAF:
  549. return "PDAF";
  550. case CAM_ISP_TFE_OUT_RES_RDI_0:
  551. return "RDI_0";
  552. case CAM_ISP_TFE_OUT_RES_RDI_1:
  553. return "RDI_1";
  554. case CAM_ISP_TFE_OUT_RES_RDI_2:
  555. return "RDI_2";
  556. case CAM_ISP_TFE_OUT_RES_STATS_HDR_BE:
  557. return "STATS_HDR_BE";
  558. case CAM_ISP_TFE_OUT_RES_STATS_HDR_BHIST:
  559. return "STATS_HDR_BHIST";
  560. case CAM_ISP_TFE_OUT_RES_STATS_TL_BG:
  561. return "STATS_TL_BG";
  562. case CAM_ISP_TFE_OUT_RES_STATS_BF:
  563. return "STATS_BF";
  564. case CAM_ISP_TFE_OUT_RES_STATS_AWB_BG:
  565. return "STATS_AWB_BG";
  566. case CAM_ISP_TFE_OUT_RES_STATS_RS:
  567. return "STATS_RS";
  568. case CAM_ISP_TFE_OUT_RES_DS4:
  569. return "DS_4";
  570. case CAM_ISP_TFE_OUT_RES_DS16:
  571. return "DS_16";
  572. case CAM_ISP_TFE_OUT_RES_AI:
  573. return "AI";
  574. default:
  575. return "CAM_ISP_Invalid_Resource_Type";
  576. }
  577. }
  578. static uint64_t __cam_isp_ctx_get_event_ts(uint32_t evt_id, void *evt_data)
  579. {
  580. uint64_t ts = 0;
  581. if (!evt_data)
  582. return 0;
  583. switch (evt_id) {
  584. case CAM_ISP_HW_EVENT_ERROR:
  585. ts = ((struct cam_isp_hw_error_event_data *)evt_data)->
  586. timestamp;
  587. break;
  588. case CAM_ISP_HW_EVENT_SOF:
  589. ts = ((struct cam_isp_hw_sof_event_data *)evt_data)->
  590. timestamp;
  591. break;
  592. case CAM_ISP_HW_EVENT_REG_UPDATE:
  593. ts = ((struct cam_isp_hw_reg_update_event_data *)evt_data)->
  594. timestamp;
  595. break;
  596. case CAM_ISP_HW_EVENT_EPOCH:
  597. ts = ((struct cam_isp_hw_epoch_event_data *)evt_data)->
  598. timestamp;
  599. break;
  600. case CAM_ISP_HW_EVENT_EOF:
  601. ts = ((struct cam_isp_hw_eof_event_data *)evt_data)->
  602. timestamp;
  603. break;
  604. case CAM_ISP_HW_EVENT_DONE:
  605. break;
  606. default:
  607. CAM_DBG(CAM_ISP, "Invalid Event Type %d", evt_id);
  608. }
  609. return ts;
  610. }
  611. static void __cam_isp_ctx_send_sof_boot_timestamp(
  612. struct cam_isp_context *ctx_isp, uint64_t request_id,
  613. uint32_t sof_event_status)
  614. {
  615. struct cam_req_mgr_message req_msg;
  616. req_msg.session_hdl = ctx_isp->base->session_hdl;
  617. req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
  618. req_msg.u.frame_msg.request_id = request_id;
  619. req_msg.u.frame_msg.timestamp = ctx_isp->boot_timestamp;
  620. req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
  621. req_msg.u.frame_msg.sof_status = sof_event_status;
  622. req_msg.u.frame_msg.frame_id_meta = ctx_isp->frame_id_meta;
  623. CAM_DBG(CAM_ISP,
  624. "request id:%lld frame number:%lld boot time stamp:0x%llx status:%u",
  625. request_id, ctx_isp->frame_id,
  626. ctx_isp->boot_timestamp, sof_event_status);
  627. if (cam_req_mgr_notify_message(&req_msg,
  628. V4L_EVENT_CAM_REQ_MGR_SOF_BOOT_TS,
  629. V4L_EVENT_CAM_REQ_MGR_EVENT))
  630. CAM_ERR(CAM_ISP,
  631. "Error in notifying the boot time for req id:%lld",
  632. request_id);
  633. }
  634. static void __cam_isp_ctx_send_sof_timestamp_frame_header(
  635. struct cam_isp_context *ctx_isp, uint32_t *frame_header_cpu_addr,
  636. uint64_t request_id, uint32_t sof_event_status)
  637. {
  638. uint32_t *time32 = NULL;
  639. uint64_t timestamp = 0;
  640. struct cam_req_mgr_message req_msg;
  641. time32 = frame_header_cpu_addr;
  642. timestamp = (uint64_t) time32[1];
  643. timestamp = timestamp << 24;
  644. timestamp |= (uint64_t)(time32[0] >> 8);
  645. timestamp = mul_u64_u32_div(timestamp,
  646. CAM_IFE_QTIMER_MUL_FACTOR,
  647. CAM_IFE_QTIMER_DIV_FACTOR);
  648. ctx_isp->sof_timestamp_val = timestamp;
  649. req_msg.session_hdl = ctx_isp->base->session_hdl;
  650. req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
  651. req_msg.u.frame_msg.request_id = request_id;
  652. req_msg.u.frame_msg.timestamp = ctx_isp->sof_timestamp_val;
  653. req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
  654. req_msg.u.frame_msg.sof_status = sof_event_status;
  655. CAM_DBG(CAM_ISP,
  656. "request id:%lld frame number:%lld SOF time stamp:0x%llx status:%u",
  657. request_id, ctx_isp->frame_id,
  658. ctx_isp->sof_timestamp_val, sof_event_status);
  659. if (cam_req_mgr_notify_message(&req_msg,
  660. V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
  661. CAM_ERR(CAM_ISP,
  662. "Error in notifying the sof time for req id:%lld",
  663. request_id);
  664. }
  665. static void __cam_isp_ctx_send_sof_timestamp(
  666. struct cam_isp_context *ctx_isp, uint64_t request_id,
  667. uint32_t sof_event_status)
  668. {
  669. struct cam_req_mgr_message req_msg;
  670. if ((ctx_isp->use_frame_header_ts) || (request_id == 0))
  671. goto end;
  672. req_msg.session_hdl = ctx_isp->base->session_hdl;
  673. req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
  674. req_msg.u.frame_msg.request_id = request_id;
  675. req_msg.u.frame_msg.timestamp = ctx_isp->sof_timestamp_val;
  676. req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
  677. req_msg.u.frame_msg.sof_status = sof_event_status;
  678. req_msg.u.frame_msg.frame_id_meta = ctx_isp->frame_id_meta;
  679. CAM_DBG(CAM_ISP,
  680. "request id:%lld frame number:%lld SOF time stamp:0x%llx status:%u",
  681. request_id, ctx_isp->frame_id,
  682. ctx_isp->sof_timestamp_val, sof_event_status);
  683. if (cam_req_mgr_notify_message(&req_msg,
  684. V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
  685. CAM_ERR(CAM_ISP,
  686. "Error in notifying the sof time for req id:%lld",
  687. request_id);
  688. end:
  689. __cam_isp_ctx_send_sof_boot_timestamp(ctx_isp,
  690. request_id, sof_event_status);
  691. }
  692. static void __cam_isp_ctx_handle_buf_done_fail_log(
  693. uint64_t request_id, struct cam_isp_ctx_req *req_isp,
  694. uint32_t isp_device_type)
  695. {
  696. int i;
  697. const char *handle_type;
  698. if (req_isp->num_fence_map_out >= CAM_ISP_CTX_RES_MAX) {
  699. CAM_ERR(CAM_ISP,
  700. "Num Resources exceed mMAX %d >= %d ",
  701. req_isp->num_fence_map_out, CAM_ISP_CTX_RES_MAX);
  702. return;
  703. }
  704. CAM_WARN_RATE_LIMIT(CAM_ISP,
  705. "Prev Req[%lld] : num_out=%d, num_acked=%d, bubble : report=%d, detected=%d",
  706. request_id, req_isp->num_fence_map_out, req_isp->num_acked,
  707. req_isp->bubble_report, req_isp->bubble_detected);
  708. CAM_WARN_RATE_LIMIT(CAM_ISP,
  709. "Resource Handles that fail to generate buf_done in prev frame");
  710. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  711. if (req_isp->fence_map_out[i].sync_id != -1) {
  712. if (isp_device_type == CAM_IFE_DEVICE_TYPE) {
  713. handle_type =
  714. __cam_isp_resource_handle_id_to_type(
  715. req_isp->fence_map_out[i].resource_handle);
  716. trace_cam_log_event("Buf_done Congestion",
  717. __cam_isp_resource_handle_id_to_type(
  718. req_isp->fence_map_out[i].resource_handle),
  719. request_id, req_isp->fence_map_out[i].sync_id);
  720. } else {
  721. handle_type =
  722. __cam_isp_tfe_resource_handle_id_to_type(
  723. req_isp->fence_map_out[i].resource_handle);
  724. trace_cam_log_event("Buf_done Congestion",
  725. __cam_isp_tfe_resource_handle_id_to_type(
  726. req_isp->fence_map_out[i].resource_handle),
  727. request_id, req_isp->fence_map_out[i].sync_id);
  728. }
  729. CAM_WARN_RATE_LIMIT(CAM_ISP,
  730. "Resource_Handle: [%s][0x%x] Sync_ID: [0x%x]",
  731. handle_type,
  732. req_isp->fence_map_out[i].resource_handle,
  733. req_isp->fence_map_out[i].sync_id);
  734. }
  735. }
  736. }
  737. static int __cam_isp_ctx_handle_buf_done_for_req_list(
  738. struct cam_isp_context *ctx_isp,
  739. struct cam_ctx_request *req)
  740. {
  741. int rc = 0, i;
  742. uint64_t buf_done_req_id;
  743. struct cam_isp_ctx_req *req_isp;
  744. struct cam_context *ctx = ctx_isp->base;
  745. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  746. ctx_isp->active_req_cnt--;
  747. buf_done_req_id = req->request_id;
  748. if (req_isp->bubble_detected && req_isp->bubble_report) {
  749. req_isp->num_acked = 0;
  750. req_isp->num_deferred_acks = 0;
  751. req_isp->bubble_detected = false;
  752. list_del_init(&req->list);
  753. atomic_set(&ctx_isp->process_bubble, 0);
  754. req_isp->cdm_reset_before_apply = false;
  755. ctx_isp->bubble_frame_cnt = 0;
  756. if (buf_done_req_id <= ctx->last_flush_req) {
  757. for (i = 0; i < req_isp->num_fence_map_out; i++)
  758. rc = cam_sync_signal(
  759. req_isp->fence_map_out[i].sync_id,
  760. CAM_SYNC_STATE_SIGNALED_ERROR,
  761. CAM_SYNC_ISP_EVENT_BUBBLE);
  762. list_add_tail(&req->list, &ctx->free_req_list);
  763. CAM_DBG(CAM_REQ,
  764. "Move active request %lld to free list(cnt = %d) [flushed], ctx %u",
  765. buf_done_req_id, ctx_isp->active_req_cnt,
  766. ctx->ctx_id);
  767. ctx_isp->last_bufdone_err_apply_req_id = 0;
  768. } else {
  769. list_add(&req->list, &ctx->pending_req_list);
  770. CAM_DBG(CAM_REQ,
  771. "Move active request %lld to pending list(cnt = %d) [bubble recovery], ctx %u",
  772. req->request_id, ctx_isp->active_req_cnt,
  773. ctx->ctx_id);
  774. }
  775. } else {
  776. if (!ctx_isp->use_frame_header_ts) {
  777. if (ctx_isp->reported_req_id < buf_done_req_id) {
  778. ctx_isp->reported_req_id = buf_done_req_id;
  779. __cam_isp_ctx_send_sof_timestamp(ctx_isp,
  780. buf_done_req_id,
  781. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  782. }
  783. }
  784. list_del_init(&req->list);
  785. list_add_tail(&req->list, &ctx->free_req_list);
  786. req_isp->reapply = false;
  787. req_isp->cdm_reset_before_apply = false;
  788. CAM_DBG(CAM_REQ,
  789. "Move active request %lld to free list(cnt = %d) [all fences done], ctx %u",
  790. buf_done_req_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  791. ctx_isp->req_info.last_bufdone_req_id = req->request_id;
  792. ctx_isp->last_bufdone_err_apply_req_id = 0;
  793. }
  794. cam_cpas_notify_event("IFE BufDone", buf_done_req_id);
  795. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  796. CAM_ISP_STATE_CHANGE_TRIGGER_DONE, buf_done_req_id);
  797. __cam_isp_ctx_update_event_record(ctx_isp,
  798. CAM_ISP_CTX_EVENT_BUFDONE, req);
  799. return rc;
  800. }
  801. static int __cam_isp_ctx_handle_buf_done_for_request(
  802. struct cam_isp_context *ctx_isp,
  803. struct cam_ctx_request *req,
  804. struct cam_isp_hw_done_event_data *done,
  805. uint32_t bubble_state,
  806. struct cam_isp_hw_done_event_data *done_next_req)
  807. {
  808. int rc = 0;
  809. int i, j;
  810. struct cam_isp_ctx_req *req_isp;
  811. struct cam_context *ctx = ctx_isp->base;
  812. const char *handle_type;
  813. trace_cam_buf_done("ISP", ctx, req);
  814. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  815. CAM_DBG(CAM_ISP, "Enter with bubble_state %d, req_bubble_detected %d",
  816. bubble_state, req_isp->bubble_detected);
  817. done_next_req->num_handles = 0;
  818. done_next_req->timestamp = done->timestamp;
  819. for (i = 0; i < done->num_handles; i++) {
  820. for (j = 0; j < req_isp->num_fence_map_out; j++) {
  821. if (done->resource_handle[i] ==
  822. req_isp->fence_map_out[j].resource_handle)
  823. break;
  824. }
  825. if (j == req_isp->num_fence_map_out) {
  826. /*
  827. * If not found in current request, it could be
  828. * belonging to next request, this can happen if
  829. * IRQ delay happens. It is only valid when the
  830. * platform doesn't have last consumed address.
  831. */
  832. CAM_WARN(CAM_ISP,
  833. "BUF_DONE for res %s not found in Req %lld ",
  834. __cam_isp_resource_handle_id_to_type(
  835. done->resource_handle[i]),
  836. req->request_id);
  837. done_next_req->resource_handle
  838. [done_next_req->num_handles++] =
  839. done->resource_handle[i];
  840. continue;
  841. }
  842. if (req_isp->fence_map_out[j].sync_id == -1) {
  843. if (ctx_isp->isp_device_type == CAM_IFE_DEVICE_TYPE)
  844. handle_type =
  845. __cam_isp_resource_handle_id_to_type(
  846. req_isp->fence_map_out[i].resource_handle);
  847. else
  848. handle_type =
  849. __cam_isp_tfe_resource_handle_id_to_type(
  850. req_isp->fence_map_out[i].resource_handle);
  851. CAM_WARN(CAM_ISP,
  852. "Duplicate BUF_DONE for req %lld : i=%d, j=%d, res=%s",
  853. req->request_id, i, j, handle_type);
  854. trace_cam_log_event("Duplicate BufDone",
  855. handle_type, req->request_id, ctx->ctx_id);
  856. done_next_req->resource_handle
  857. [done_next_req->num_handles++] =
  858. done->resource_handle[i];
  859. continue;
  860. }
  861. if (!req_isp->bubble_detected) {
  862. CAM_DBG(CAM_ISP,
  863. "Sync with success: req %lld res 0x%x fd 0x%x, ctx %u",
  864. req->request_id,
  865. req_isp->fence_map_out[j].resource_handle,
  866. req_isp->fence_map_out[j].sync_id,
  867. ctx->ctx_id);
  868. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  869. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  870. CAM_SYNC_COMMON_EVENT_SUCCESS);
  871. if (rc)
  872. CAM_DBG(CAM_ISP, "Sync failed with rc = %d",
  873. rc);
  874. } else if (!req_isp->bubble_report) {
  875. CAM_DBG(CAM_ISP,
  876. "Sync with failure: req %lld res 0x%x fd 0x%x, ctx %u",
  877. req->request_id,
  878. req_isp->fence_map_out[j].resource_handle,
  879. req_isp->fence_map_out[j].sync_id,
  880. ctx->ctx_id);
  881. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  882. CAM_SYNC_STATE_SIGNALED_ERROR,
  883. CAM_SYNC_ISP_EVENT_BUBBLE);
  884. if (rc)
  885. CAM_ERR(CAM_ISP, "Sync failed with rc = %d",
  886. rc);
  887. } else {
  888. /*
  889. * Ignore the buffer done if bubble detect is on
  890. * Increment the ack number here, and queue the
  891. * request back to pending list whenever all the
  892. * buffers are done.
  893. */
  894. req_isp->num_acked++;
  895. CAM_DBG(CAM_ISP,
  896. "buf done with bubble state %d recovery %d",
  897. bubble_state, req_isp->bubble_report);
  898. continue;
  899. }
  900. CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x ctx %u",
  901. req->request_id,
  902. req_isp->fence_map_out[j].sync_id, ctx->ctx_id);
  903. if (!rc) {
  904. req_isp->num_acked++;
  905. req_isp->fence_map_out[j].sync_id = -1;
  906. }
  907. if ((ctx_isp->use_frame_header_ts) &&
  908. (req_isp->hw_update_data.frame_header_res_id ==
  909. req_isp->fence_map_out[j].resource_handle))
  910. __cam_isp_ctx_send_sof_timestamp_frame_header(
  911. ctx_isp,
  912. req_isp->hw_update_data.frame_header_cpu_addr,
  913. req->request_id, CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  914. }
  915. if (req_isp->num_acked > req_isp->num_fence_map_out) {
  916. /* Should not happen */
  917. CAM_ERR(CAM_ISP,
  918. "WARNING: req_id %lld num_acked %d > map_out %d, ctx %u",
  919. req->request_id, req_isp->num_acked,
  920. req_isp->num_fence_map_out, ctx->ctx_id);
  921. WARN_ON(req_isp->num_acked > req_isp->num_fence_map_out);
  922. }
  923. if (req_isp->num_acked != req_isp->num_fence_map_out)
  924. return rc;
  925. rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
  926. return rc;
  927. }
  928. static int __cam_isp_handle_deferred_buf_done(
  929. struct cam_isp_context *ctx_isp,
  930. struct cam_ctx_request *req,
  931. bool bubble_handling,
  932. uint32_t status, uint32_t event_cause)
  933. {
  934. int i, j;
  935. int rc = 0;
  936. struct cam_isp_ctx_req *req_isp =
  937. (struct cam_isp_ctx_req *) req->req_priv;
  938. struct cam_context *ctx = ctx_isp->base;
  939. CAM_DBG(CAM_ISP,
  940. "ctx[%d] : Req %llu : Handling %d deferred buf_dones num_acked=%d, bubble_handling=%d",
  941. ctx->ctx_id, req->request_id, req_isp->num_deferred_acks,
  942. req_isp->num_acked, bubble_handling);
  943. for (i = 0; i < req_isp->num_deferred_acks; i++) {
  944. j = req_isp->deferred_fence_map_index[i];
  945. CAM_DBG(CAM_ISP,
  946. "ctx[%d] : Sync with status=%d, event_cause=%d: req %lld res 0x%x sync_id 0x%x",
  947. ctx->ctx_id, status, event_cause,
  948. req->request_id,
  949. req_isp->fence_map_out[j].resource_handle,
  950. req_isp->fence_map_out[j].sync_id);
  951. if (req_isp->fence_map_out[j].sync_id == -1) {
  952. CAM_WARN(CAM_ISP,
  953. "ctx[%d Deferred buf_done already signalled, req_id=%llu, j=%d, res=0x%x",
  954. ctx->ctx_id, req->request_id, j,
  955. req_isp->fence_map_out[j].resource_handle);
  956. continue;
  957. }
  958. if (!bubble_handling) {
  959. CAM_WARN(CAM_ISP,
  960. "ctx[%d] : Req %llu, status=%d res=0x%x should never happen",
  961. ctx->ctx_id, req->request_id, status,
  962. req_isp->fence_map_out[j].resource_handle);
  963. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  964. status, event_cause);
  965. if (rc) {
  966. CAM_ERR(CAM_ISP,
  967. "ctx[%d] : Sync signal for Req %llu, sync_id %d status=%d failed with rc = %d",
  968. ctx->ctx_id, req->request_id,
  969. req_isp->fence_map_out[j].sync_id,
  970. status, rc);
  971. } else {
  972. req_isp->num_acked++;
  973. req_isp->fence_map_out[j].sync_id = -1;
  974. }
  975. } else {
  976. req_isp->num_acked++;
  977. }
  978. }
  979. req_isp->num_deferred_acks = 0;
  980. return rc;
  981. }
  982. static int __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  983. struct cam_isp_context *ctx_isp,
  984. struct cam_ctx_request *req,
  985. struct cam_isp_hw_done_event_data *done,
  986. uint32_t bubble_state,
  987. bool verify_consumed_addr,
  988. bool defer_buf_done)
  989. {
  990. int rc = 0;
  991. int i, j;
  992. struct cam_isp_ctx_req *req_isp;
  993. struct cam_context *ctx = ctx_isp->base;
  994. const char *handle_type;
  995. trace_cam_buf_done("ISP", ctx, req);
  996. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  997. CAM_DBG(CAM_ISP, "Enter with bubble_state %d, req_bubble_detected %d",
  998. bubble_state, req_isp->bubble_detected);
  999. for (i = 0; i < done->num_handles; i++) {
  1000. for (j = 0; j < req_isp->num_fence_map_out; j++) {
  1001. if (verify_consumed_addr &&
  1002. (done->last_consumed_addr[i] !=
  1003. req_isp->fence_map_out[j].image_buf_addr[0]))
  1004. continue;
  1005. if (done->resource_handle[i] ==
  1006. req_isp->fence_map_out[j].resource_handle)
  1007. break;
  1008. }
  1009. if (j == req_isp->num_fence_map_out) {
  1010. /*
  1011. * If not found in current request, it could be
  1012. * belonging to next request, this can happen if
  1013. * IRQ delay happens. It is only valid when the
  1014. * platform doesn't have last consumed address.
  1015. */
  1016. CAM_DBG(CAM_ISP,
  1017. "BUF_DONE for res %s not found in Req %lld ",
  1018. __cam_isp_resource_handle_id_to_type(
  1019. done->resource_handle[i]),
  1020. req->request_id);
  1021. continue;
  1022. }
  1023. if (req_isp->fence_map_out[j].sync_id == -1) {
  1024. if (ctx_isp->isp_device_type == CAM_IFE_DEVICE_TYPE)
  1025. handle_type =
  1026. __cam_isp_resource_handle_id_to_type(
  1027. req_isp->fence_map_out[i].resource_handle);
  1028. else
  1029. handle_type =
  1030. __cam_isp_tfe_resource_handle_id_to_type(
  1031. req_isp->fence_map_out[i].resource_handle);
  1032. CAM_WARN(CAM_ISP,
  1033. "Duplicate BUF_DONE for req %lld : i=%d, j=%d, res=%s",
  1034. req->request_id, i, j, handle_type);
  1035. trace_cam_log_event("Duplicate BufDone",
  1036. handle_type, req->request_id, ctx->ctx_id);
  1037. continue;
  1038. }
  1039. if (defer_buf_done) {
  1040. uint32_t deferred_indx = req_isp->num_deferred_acks;
  1041. /*
  1042. * If we are handling this BUF_DONE event for a request
  1043. * that is still in wait_list, do not signal now,
  1044. * instead mark it as done and handle it later -
  1045. * if this request is going into BUBBLE state later
  1046. * it will automatically be re-applied. If this is not
  1047. * going into BUBBLE, signal fences later.
  1048. * Note - we will come here only if the last consumed
  1049. * address matches with this ports buffer.
  1050. */
  1051. req_isp->deferred_fence_map_index[deferred_indx] = j;
  1052. req_isp->num_deferred_acks++;
  1053. CAM_DBG(CAM_ISP,
  1054. "ctx[%d] : Deferred buf done for %llu with bubble state %d recovery %d",
  1055. ctx->ctx_id, req->request_id, bubble_state,
  1056. req_isp->bubble_report);
  1057. CAM_DBG(CAM_ISP,
  1058. "ctx[%d] : Deferred info : num_acks=%d, fence_map_index=%d, resource_handle=0x%x, sync_id=%d",
  1059. ctx->ctx_id, req_isp->num_deferred_acks, j,
  1060. req_isp->fence_map_out[j].resource_handle,
  1061. req_isp->fence_map_out[j].sync_id);
  1062. continue;
  1063. } else if (!req_isp->bubble_detected) {
  1064. CAM_DBG(CAM_ISP,
  1065. "Sync with success: req %lld res 0x%x fd 0x%x, ctx %u",
  1066. req->request_id,
  1067. req_isp->fence_map_out[j].resource_handle,
  1068. req_isp->fence_map_out[j].sync_id,
  1069. ctx->ctx_id);
  1070. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1071. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  1072. CAM_SYNC_COMMON_EVENT_SUCCESS);
  1073. if (rc) {
  1074. CAM_DBG(CAM_ISP, "Sync failed with rc = %d",
  1075. rc);
  1076. } else if (req_isp->num_deferred_acks) {
  1077. /* Process deferred buf_done acks */
  1078. __cam_isp_handle_deferred_buf_done(ctx_isp,
  1079. req, false,
  1080. CAM_SYNC_STATE_SIGNALED_SUCCESS,
  1081. CAM_SYNC_COMMON_EVENT_SUCCESS);
  1082. }
  1083. } else if (!req_isp->bubble_report) {
  1084. CAM_DBG(CAM_ISP,
  1085. "Sync with failure: req %lld res 0x%x fd 0x%x, ctx %u",
  1086. req->request_id,
  1087. req_isp->fence_map_out[j].resource_handle,
  1088. req_isp->fence_map_out[j].sync_id,
  1089. ctx->ctx_id);
  1090. rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
  1091. CAM_SYNC_STATE_SIGNALED_ERROR,
  1092. CAM_SYNC_ISP_EVENT_BUBBLE);
  1093. if (rc) {
  1094. CAM_ERR(CAM_ISP, "Sync failed with rc = %d",
  1095. rc);
  1096. } else if (req_isp->num_deferred_acks) {
  1097. /* Process deferred buf_done acks */
  1098. __cam_isp_handle_deferred_buf_done(ctx_isp, req,
  1099. false,
  1100. CAM_SYNC_STATE_SIGNALED_ERROR,
  1101. CAM_SYNC_ISP_EVENT_BUBBLE);
  1102. }
  1103. } else {
  1104. /*
  1105. * Ignore the buffer done if bubble detect is on
  1106. * Increment the ack number here, and queue the
  1107. * request back to pending list whenever all the
  1108. * buffers are done.
  1109. */
  1110. req_isp->num_acked++;
  1111. CAM_DBG(CAM_ISP,
  1112. "buf done with bubble state %d recovery %d",
  1113. bubble_state, req_isp->bubble_report);
  1114. /* Process deferred buf_done acks */
  1115. if (req_isp->num_deferred_acks)
  1116. __cam_isp_handle_deferred_buf_done(ctx_isp, req,
  1117. true,
  1118. CAM_SYNC_STATE_SIGNALED_ERROR,
  1119. CAM_SYNC_ISP_EVENT_BUBBLE);
  1120. continue;
  1121. }
  1122. CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x ctx %u",
  1123. req->request_id,
  1124. req_isp->fence_map_out[j].sync_id, ctx->ctx_id);
  1125. if (!rc) {
  1126. req_isp->num_acked++;
  1127. req_isp->fence_map_out[j].sync_id = -1;
  1128. }
  1129. if ((ctx_isp->use_frame_header_ts) &&
  1130. (req_isp->hw_update_data.frame_header_res_id ==
  1131. req_isp->fence_map_out[j].resource_handle))
  1132. __cam_isp_ctx_send_sof_timestamp_frame_header(
  1133. ctx_isp,
  1134. req_isp->hw_update_data.frame_header_cpu_addr,
  1135. req->request_id, CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1136. }
  1137. if (req_isp->num_acked > req_isp->num_fence_map_out) {
  1138. /* Should not happen */
  1139. CAM_ERR(CAM_ISP,
  1140. "WARNING: req_id %lld num_acked %d > map_out %d, ctx %u",
  1141. req->request_id, req_isp->num_acked,
  1142. req_isp->num_fence_map_out, ctx->ctx_id);
  1143. }
  1144. if (req_isp->num_acked != req_isp->num_fence_map_out)
  1145. return rc;
  1146. rc = __cam_isp_ctx_handle_buf_done_for_req_list(ctx_isp, req);
  1147. return rc;
  1148. }
  1149. static int __cam_isp_ctx_handle_buf_done(
  1150. struct cam_isp_context *ctx_isp,
  1151. struct cam_isp_hw_done_event_data *done,
  1152. uint32_t bubble_state)
  1153. {
  1154. int rc = 0;
  1155. struct cam_ctx_request *req;
  1156. struct cam_context *ctx = ctx_isp->base;
  1157. struct cam_isp_hw_done_event_data done_next_req;
  1158. if (list_empty(&ctx->active_req_list)) {
  1159. CAM_WARN(CAM_ISP, "Buf done with no active request");
  1160. return 0;
  1161. }
  1162. req = list_first_entry(&ctx->active_req_list,
  1163. struct cam_ctx_request, list);
  1164. rc = __cam_isp_ctx_handle_buf_done_for_request(ctx_isp, req, done,
  1165. bubble_state, &done_next_req);
  1166. if (done_next_req.num_handles) {
  1167. struct cam_isp_hw_done_event_data unhandled_res;
  1168. struct cam_ctx_request *next_req = list_last_entry(
  1169. &ctx->active_req_list, struct cam_ctx_request, list);
  1170. if (next_req->request_id != req->request_id) {
  1171. /*
  1172. * Few resource handles are already signalled in the
  1173. * current request, lets check if there is another
  1174. * request waiting for these resources. This can
  1175. * happen if handling some of next request's buf done
  1176. * events are happening first before handling current
  1177. * request's remaining buf dones due to IRQ scheduling.
  1178. * Lets check only one more request as we will have
  1179. * maximum of 2 requests in active_list at any time.
  1180. */
  1181. CAM_WARN(CAM_ISP,
  1182. "Unhandled buf done resources for req %lld, trying next request %lld in active_list",
  1183. req->request_id, next_req->request_id);
  1184. __cam_isp_ctx_handle_buf_done_for_request(ctx_isp,
  1185. next_req, &done_next_req,
  1186. bubble_state, &unhandled_res);
  1187. if (unhandled_res.num_handles == 0)
  1188. CAM_INFO(CAM_ISP,
  1189. "BUF Done event handed for next request %lld",
  1190. next_req->request_id);
  1191. else
  1192. CAM_ERR(CAM_ISP,
  1193. "BUF Done not handled for next request %lld",
  1194. next_req->request_id);
  1195. } else {
  1196. CAM_WARN(CAM_ISP,
  1197. "Req %lld only active request, spurious buf_done rxd",
  1198. req->request_id);
  1199. }
  1200. }
  1201. return rc;
  1202. }
  1203. static void __cam_isp_ctx_buf_done_match_req(
  1204. struct cam_ctx_request *req,
  1205. struct cam_isp_hw_done_event_data *done,
  1206. bool *irq_delay_detected)
  1207. {
  1208. int i, j;
  1209. uint32_t match_count = 0;
  1210. struct cam_isp_ctx_req *req_isp;
  1211. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1212. for (i = 0; i < done->num_handles; i++) {
  1213. for (j = 0; j < req_isp->num_fence_map_out; j++) {
  1214. if ((done->resource_handle[i] ==
  1215. req_isp->fence_map_out[j].resource_handle) &&
  1216. (done->last_consumed_addr[i] ==
  1217. req_isp->fence_map_out[j].image_buf_addr[0])) {
  1218. match_count++;
  1219. break;
  1220. }
  1221. }
  1222. }
  1223. if (match_count > 0)
  1224. *irq_delay_detected = true;
  1225. else
  1226. *irq_delay_detected = false;
  1227. CAM_DBG(CAM_ISP,
  1228. "buf done num handles %d match count %d for next req:%lld",
  1229. done->num_handles, match_count, req->request_id);
  1230. CAM_DBG(CAM_ISP,
  1231. "irq_delay_detected %d", *irq_delay_detected);
  1232. }
  1233. static int __cam_isp_ctx_handle_buf_done_verify_addr(
  1234. struct cam_isp_context *ctx_isp,
  1235. struct cam_isp_hw_done_event_data *done,
  1236. uint32_t bubble_state)
  1237. {
  1238. int rc = 0;
  1239. bool irq_delay_detected = false;
  1240. struct cam_ctx_request *req;
  1241. struct cam_ctx_request *next_req = NULL;
  1242. struct cam_context *ctx = ctx_isp->base;
  1243. bool req_in_wait_list = false;
  1244. if (list_empty(&ctx->active_req_list)) {
  1245. if (!list_empty(&ctx->wait_req_list)) {
  1246. struct cam_isp_ctx_req *req_isp;
  1247. req = list_first_entry(&ctx->wait_req_list,
  1248. struct cam_ctx_request, list);
  1249. req_in_wait_list = true;
  1250. if (ctx_isp->last_applied_req_id !=
  1251. ctx_isp->last_bufdone_err_apply_req_id) {
  1252. CAM_WARN(CAM_ISP,
  1253. "Buf done with no active request but with req in wait list, req %llu last apply id:%lld last err id:%lld",
  1254. req->request_id,
  1255. ctx_isp->last_applied_req_id,
  1256. ctx_isp->last_bufdone_err_apply_req_id);
  1257. ctx_isp->last_bufdone_err_apply_req_id =
  1258. ctx_isp->last_applied_req_id;
  1259. }
  1260. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1261. /*
  1262. * Verify consumed address for this request to make sure
  1263. * we are handling the buf_done for the correct
  1264. * buffer. Also defer actual buf_done handling, i.e
  1265. * do not signal the fence as this request may go into
  1266. * Bubble state eventully.
  1267. */
  1268. rc =
  1269. __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  1270. ctx_isp, req, done, bubble_state, true, true);
  1271. }
  1272. if (!req_in_wait_list && (ctx_isp->last_applied_req_id !=
  1273. ctx_isp->last_bufdone_err_apply_req_id)) {
  1274. CAM_WARN(CAM_ISP,
  1275. "Buf done with no active request bubble_state=%d last_applied_req_id:%lld ",
  1276. bubble_state, ctx_isp->last_applied_req_id);
  1277. ctx_isp->last_bufdone_err_apply_req_id =
  1278. ctx_isp->last_applied_req_id;
  1279. }
  1280. return 0;
  1281. }
  1282. req = list_first_entry(&ctx->active_req_list,
  1283. struct cam_ctx_request, list);
  1284. if (ctx_isp->active_req_cnt > 1) {
  1285. next_req = list_last_entry(
  1286. &ctx->active_req_list,
  1287. struct cam_ctx_request, list);
  1288. if (next_req->request_id != req->request_id)
  1289. __cam_isp_ctx_buf_done_match_req(next_req, done,
  1290. &irq_delay_detected);
  1291. else
  1292. CAM_WARN(CAM_ISP,
  1293. "Req %lld only active request, spurious buf_done rxd",
  1294. req->request_id);
  1295. }
  1296. /*
  1297. * If irq delay isn't detected, then we need to verify
  1298. * the consumed address for current req, otherwise, we
  1299. * can't verify the consumed address.
  1300. */
  1301. rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  1302. ctx_isp, req, done, bubble_state,
  1303. !irq_delay_detected, false);
  1304. /*
  1305. * Verify the consumed address for next req all the time,
  1306. * since the reported buf done event may belong to current
  1307. * req, then we can't signal this event for next req.
  1308. */
  1309. if (!rc && irq_delay_detected)
  1310. rc = __cam_isp_ctx_handle_buf_done_for_request_verify_addr(
  1311. ctx_isp, next_req, done,
  1312. bubble_state, true, false);
  1313. return rc;
  1314. }
  1315. static int __cam_isp_ctx_handle_buf_done_in_activated_state(
  1316. struct cam_isp_context *ctx_isp,
  1317. struct cam_isp_hw_done_event_data *done,
  1318. uint32_t bubble_state)
  1319. {
  1320. int rc = 0;
  1321. if (ctx_isp->support_consumed_addr)
  1322. rc = __cam_isp_ctx_handle_buf_done_verify_addr(
  1323. ctx_isp, done, bubble_state);
  1324. else
  1325. rc = __cam_isp_ctx_handle_buf_done(
  1326. ctx_isp, done, bubble_state);
  1327. return rc;
  1328. }
  1329. static int __cam_isp_ctx_apply_req_offline(
  1330. void *priv, void *data)
  1331. {
  1332. int rc = 0;
  1333. int64_t prev_applied_req;
  1334. struct cam_context *ctx = NULL;
  1335. struct cam_isp_context *ctx_isp = priv;
  1336. struct cam_ctx_request *req;
  1337. struct cam_isp_ctx_req *req_isp;
  1338. struct cam_hw_config_args cfg;
  1339. if (!ctx_isp) {
  1340. CAM_ERR(CAM_ISP, "Invalid ctx_isp:%pK", ctx);
  1341. rc = -EINVAL;
  1342. goto end;
  1343. }
  1344. ctx = ctx_isp->base;
  1345. if (list_empty(&ctx->pending_req_list)) {
  1346. CAM_DBG(CAM_ISP, "No pending requests to apply");
  1347. rc = -EFAULT;
  1348. goto end;
  1349. }
  1350. if ((ctx->state != CAM_CTX_ACTIVATED) ||
  1351. (!atomic_read(&ctx_isp->rxd_epoch)) ||
  1352. (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_APPLIED))
  1353. goto end;
  1354. if (ctx_isp->active_req_cnt >= 2)
  1355. goto end;
  1356. spin_lock_bh(&ctx->lock);
  1357. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
  1358. list);
  1359. spin_unlock_bh(&ctx->lock);
  1360. CAM_DBG(CAM_REQ, "Apply request %lld in substate %d ctx %u",
  1361. req->request_id, ctx_isp->substate_activated, ctx->ctx_id);
  1362. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1363. memset(&cfg, 0, sizeof(cfg));
  1364. cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  1365. cfg.request_id = req->request_id;
  1366. cfg.hw_update_entries = req_isp->cfg;
  1367. cfg.num_hw_update_entries = req_isp->num_cfg;
  1368. cfg.priv = &req_isp->hw_update_data;
  1369. cfg.init_packet = 0;
  1370. /*
  1371. * Offline mode may receive the SOF and REG_UPD earlier than
  1372. * CDM processing return back, so we set the substate before
  1373. * apply setting.
  1374. */
  1375. spin_lock_bh(&ctx->lock);
  1376. atomic_set(&ctx_isp->rxd_epoch, 0);
  1377. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_APPLIED;
  1378. prev_applied_req = ctx_isp->last_applied_req_id;
  1379. ctx_isp->last_applied_req_id = req->request_id;
  1380. atomic_set(&ctx_isp->apply_in_progress, 1);
  1381. list_del_init(&req->list);
  1382. list_add_tail(&req->list, &ctx->wait_req_list);
  1383. spin_unlock_bh(&ctx->lock);
  1384. rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  1385. if (rc) {
  1386. CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not apply the configuration");
  1387. spin_lock_bh(&ctx->lock);
  1388. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  1389. ctx_isp->last_applied_req_id = prev_applied_req;
  1390. atomic_set(&ctx_isp->apply_in_progress, 0);
  1391. list_del_init(&req->list);
  1392. list_add(&req->list, &ctx->pending_req_list);
  1393. spin_unlock_bh(&ctx->lock);
  1394. } else {
  1395. atomic_set(&ctx_isp->apply_in_progress, 0);
  1396. CAM_DBG(CAM_ISP, "New substate state %d, applied req %lld",
  1397. CAM_ISP_CTX_ACTIVATED_APPLIED,
  1398. ctx_isp->last_applied_req_id);
  1399. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  1400. CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED,
  1401. req->request_id);
  1402. }
  1403. end:
  1404. return rc;
  1405. }
  1406. static int __cam_isp_ctx_schedule_apply_req_offline(
  1407. struct cam_isp_context *ctx_isp)
  1408. {
  1409. int rc = 0;
  1410. struct crm_workq_task *task;
  1411. task = cam_req_mgr_workq_get_task(ctx_isp->workq);
  1412. if (!task) {
  1413. CAM_ERR(CAM_ISP, "No task for worker");
  1414. return -ENOMEM;
  1415. }
  1416. task->process_cb = __cam_isp_ctx_apply_req_offline;
  1417. rc = cam_req_mgr_workq_enqueue_task(task, ctx_isp, CRM_TASK_PRIORITY_0);
  1418. if (rc)
  1419. CAM_ERR(CAM_ISP, "Failed to schedule task rc:%d", rc);
  1420. return rc;
  1421. }
  1422. static int __cam_isp_ctx_offline_epoch_in_activated_state(
  1423. struct cam_isp_context *ctx_isp, void *evt_data)
  1424. {
  1425. struct cam_context *ctx = ctx_isp->base;
  1426. struct cam_ctx_request *req, *req_temp;
  1427. uint64_t request_id = 0;
  1428. atomic_set(&ctx_isp->rxd_epoch, 1);
  1429. CAM_DBG(CAM_ISP, "SOF frame %lld ctx %u", ctx_isp->frame_id,
  1430. ctx->ctx_id);
  1431. list_for_each_entry_safe(req, req_temp, &ctx->active_req_list, list) {
  1432. if (req->request_id > ctx_isp->reported_req_id) {
  1433. request_id = req->request_id;
  1434. ctx_isp->reported_req_id = request_id;
  1435. break;
  1436. }
  1437. }
  1438. __cam_isp_ctx_schedule_apply_req_offline(ctx_isp);
  1439. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  1440. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1441. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  1442. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH,
  1443. request_id);
  1444. return 0;
  1445. }
  1446. static int __cam_isp_ctx_reg_upd_in_epoch_bubble_state(
  1447. struct cam_isp_context *ctx_isp, void *evt_data)
  1448. {
  1449. if (ctx_isp->frame_id == 1)
  1450. CAM_DBG(CAM_ISP, "Reg update in Substate[%s] for early PCR",
  1451. __cam_isp_ctx_substate_val_to_type(
  1452. ctx_isp->substate_activated));
  1453. else
  1454. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1455. "ctx_id:%d Unexpected reg update in activated Substate[%s] for frame_id:%lld",
  1456. ctx_isp->base->ctx_id,
  1457. __cam_isp_ctx_substate_val_to_type(
  1458. ctx_isp->substate_activated),
  1459. ctx_isp->frame_id);
  1460. return 0;
  1461. }
  1462. static int __cam_isp_ctx_reg_upd_in_applied_state(
  1463. struct cam_isp_context *ctx_isp, void *evt_data)
  1464. {
  1465. int rc = 0;
  1466. struct cam_ctx_request *req;
  1467. struct cam_context *ctx = ctx_isp->base;
  1468. struct cam_isp_ctx_req *req_isp;
  1469. uint64_t request_id = 0;
  1470. if (list_empty(&ctx->wait_req_list)) {
  1471. CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
  1472. goto end;
  1473. }
  1474. req = list_first_entry(&ctx->wait_req_list,
  1475. struct cam_ctx_request, list);
  1476. list_del_init(&req->list);
  1477. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1478. if (req_isp->num_fence_map_out != 0) {
  1479. list_add_tail(&req->list, &ctx->active_req_list);
  1480. ctx_isp->active_req_cnt++;
  1481. request_id = req->request_id;
  1482. CAM_DBG(CAM_REQ,
  1483. "move request %lld to active list(cnt = %d), ctx %u",
  1484. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  1485. __cam_isp_ctx_update_event_record(ctx_isp,
  1486. CAM_ISP_CTX_EVENT_RUP, req);
  1487. } else {
  1488. /* no io config, so the request is completed. */
  1489. list_add_tail(&req->list, &ctx->free_req_list);
  1490. CAM_DBG(CAM_ISP,
  1491. "move active request %lld to free list(cnt = %d), ctx %u",
  1492. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  1493. }
  1494. /*
  1495. * This function only called directly from applied and bubble applied
  1496. * state so change substate here.
  1497. */
  1498. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  1499. CAM_DBG(CAM_ISP, "next Substate[%s]",
  1500. __cam_isp_ctx_substate_val_to_type(
  1501. ctx_isp->substate_activated));
  1502. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  1503. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE, request_id);
  1504. end:
  1505. return rc;
  1506. }
  1507. static int __cam_isp_ctx_notify_sof_in_activated_state(
  1508. struct cam_isp_context *ctx_isp, void *evt_data)
  1509. {
  1510. int rc = 0;
  1511. uint64_t request_id = 0;
  1512. struct cam_req_mgr_trigger_notify notify;
  1513. struct cam_context *ctx = ctx_isp->base;
  1514. struct cam_ctx_request *req;
  1515. struct cam_isp_ctx_req *req_isp;
  1516. struct cam_hw_cmd_args hw_cmd_args;
  1517. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  1518. uint64_t last_cdm_done_req = 0;
  1519. struct cam_isp_hw_epoch_event_data *epoch_done_event_data =
  1520. (struct cam_isp_hw_epoch_event_data *)evt_data;
  1521. if (!evt_data) {
  1522. CAM_ERR(CAM_ISP, "invalid event data");
  1523. return -EINVAL;
  1524. }
  1525. ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta;
  1526. if (atomic_read(&ctx_isp->process_bubble)) {
  1527. if (list_empty(&ctx->active_req_list)) {
  1528. CAM_ERR(CAM_ISP,
  1529. "No available active req in bubble");
  1530. atomic_set(&ctx_isp->process_bubble, 0);
  1531. ctx_isp->bubble_frame_cnt = 0;
  1532. rc = -EINVAL;
  1533. return rc;
  1534. }
  1535. if (ctx_isp->last_sof_timestamp ==
  1536. ctx_isp->sof_timestamp_val) {
  1537. CAM_DBG(CAM_ISP,
  1538. "Tasklet delay detected! Bubble frame check skipped, sof_timestamp: %lld",
  1539. ctx_isp->sof_timestamp_val);
  1540. goto notify_only;
  1541. }
  1542. req = list_first_entry(&ctx->active_req_list,
  1543. struct cam_ctx_request, list);
  1544. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1545. if (ctx_isp->bubble_frame_cnt >= 1 &&
  1546. req_isp->bubble_detected) {
  1547. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  1548. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  1549. isp_hw_cmd_args.cmd_type =
  1550. CAM_ISP_HW_MGR_GET_LAST_CDM_DONE;
  1551. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  1552. rc = ctx->hw_mgr_intf->hw_cmd(
  1553. ctx->hw_mgr_intf->hw_mgr_priv,
  1554. &hw_cmd_args);
  1555. if (rc) {
  1556. CAM_ERR(CAM_ISP, "HW command failed");
  1557. return rc;
  1558. }
  1559. last_cdm_done_req = isp_hw_cmd_args.u.last_cdm_done;
  1560. CAM_DBG(CAM_ISP, "last_cdm_done req: %d",
  1561. last_cdm_done_req);
  1562. if (last_cdm_done_req >= req->request_id) {
  1563. CAM_DBG(CAM_ISP,
  1564. "CDM callback detected for req: %lld, possible buf_done delay, waiting for buf_done",
  1565. req->request_id);
  1566. ctx_isp->bubble_frame_cnt = 0;
  1567. } else {
  1568. CAM_DBG(CAM_ISP,
  1569. "CDM callback not happened for req: %lld, possible CDM stuck or workqueue delay",
  1570. req->request_id);
  1571. req_isp->num_acked = 0;
  1572. req_isp->num_deferred_acks = 0;
  1573. ctx_isp->bubble_frame_cnt = 0;
  1574. req_isp->bubble_detected = false;
  1575. req_isp->cdm_reset_before_apply = true;
  1576. list_del_init(&req->list);
  1577. list_add(&req->list, &ctx->pending_req_list);
  1578. atomic_set(&ctx_isp->process_bubble, 0);
  1579. ctx_isp->active_req_cnt--;
  1580. CAM_DBG(CAM_REQ,
  1581. "Move active req: %lld to pending list(cnt = %d) [bubble re-apply],ctx %u",
  1582. req->request_id,
  1583. ctx_isp->active_req_cnt, ctx->ctx_id);
  1584. }
  1585. } else if (req_isp->bubble_detected) {
  1586. ctx_isp->bubble_frame_cnt++;
  1587. CAM_DBG(CAM_ISP,
  1588. "Waiting on bufdone for bubble req: %lld, since frame_cnt = %lld",
  1589. req->request_id,
  1590. ctx_isp->bubble_frame_cnt);
  1591. } else {
  1592. CAM_DBG(CAM_ISP, "Delayed bufdone for req: %lld",
  1593. req->request_id);
  1594. }
  1595. }
  1596. notify_only:
  1597. /*
  1598. * notify reqmgr with sof signal. Note, due to scheduling delay
  1599. * we can run into situation that two active requests has already
  1600. * be in the active queue while we try to do the notification.
  1601. * In this case, we need to skip the current notification. This
  1602. * helps the state machine to catch up the delay.
  1603. */
  1604. if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger &&
  1605. ctx_isp->active_req_cnt <= 2) {
  1606. if (ctx_isp->subscribe_event & CAM_TRIGGER_POINT_SOF) {
  1607. notify.link_hdl = ctx->link_hdl;
  1608. notify.dev_hdl = ctx->dev_hdl;
  1609. notify.frame_id = ctx_isp->frame_id;
  1610. notify.trigger = CAM_TRIGGER_POINT_SOF;
  1611. notify.req_id = ctx_isp->req_info.last_bufdone_req_id;
  1612. notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
  1613. notify.trigger_id = ctx_isp->trigger_id;
  1614. ctx->ctx_crm_intf->notify_trigger(&notify);
  1615. CAM_DBG(CAM_ISP, "Notify CRM SOF frame %lld ctx %u",
  1616. ctx_isp->frame_id, ctx->ctx_id);
  1617. }
  1618. list_for_each_entry(req, &ctx->active_req_list, list) {
  1619. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1620. if ((!req_isp->bubble_detected) &&
  1621. (req->request_id > ctx_isp->reported_req_id)) {
  1622. request_id = req->request_id;
  1623. ctx_isp->reported_req_id = request_id;
  1624. __cam_isp_ctx_update_event_record(ctx_isp,
  1625. CAM_ISP_CTX_EVENT_EPOCH, req);
  1626. break;
  1627. }
  1628. }
  1629. if (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_BUBBLE)
  1630. request_id = 0;
  1631. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  1632. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1633. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  1634. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH,
  1635. request_id);
  1636. } else {
  1637. CAM_ERR_RATE_LIMIT(CAM_ISP,
  1638. "Can not notify SOF to CRM for ctx %u",
  1639. ctx->ctx_id);
  1640. rc = -EFAULT;
  1641. }
  1642. ctx_isp->last_sof_timestamp = ctx_isp->sof_timestamp_val;
  1643. return 0;
  1644. }
  1645. static int __cam_isp_ctx_notify_eof_in_activated_state(
  1646. struct cam_isp_context *ctx_isp, void *evt_data)
  1647. {
  1648. int rc = 0;
  1649. struct cam_req_mgr_trigger_notify notify;
  1650. struct cam_context *ctx = ctx_isp->base;
  1651. if (!(ctx_isp->subscribe_event & CAM_TRIGGER_POINT_EOF))
  1652. return rc;
  1653. /* notify reqmgr with eof signal */
  1654. if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger) {
  1655. notify.link_hdl = ctx->link_hdl;
  1656. notify.dev_hdl = ctx->dev_hdl;
  1657. notify.frame_id = ctx_isp->frame_id;
  1658. notify.trigger = CAM_TRIGGER_POINT_EOF;
  1659. notify.trigger_id = ctx_isp->trigger_id;
  1660. ctx->ctx_crm_intf->notify_trigger(&notify);
  1661. CAM_DBG(CAM_ISP, "Notify CRM EOF frame %lld ctx %u",
  1662. ctx_isp->frame_id, ctx->ctx_id);
  1663. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  1664. CAM_ISP_STATE_CHANGE_TRIGGER_EOF, 0);
  1665. } else {
  1666. CAM_ERR(CAM_ISP, "Can not notify EOF to CRM for ctx %u",
  1667. ctx->ctx_id);
  1668. rc = -EFAULT;
  1669. }
  1670. return rc;
  1671. }
  1672. static int __cam_isp_ctx_reg_upd_in_hw_error(
  1673. struct cam_isp_context *ctx_isp, void *evt_data)
  1674. {
  1675. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  1676. return 0;
  1677. }
  1678. static int __cam_isp_ctx_sof_in_activated_state(
  1679. struct cam_isp_context *ctx_isp, void *evt_data)
  1680. {
  1681. int rc = 0;
  1682. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  1683. struct cam_ctx_request *req = NULL;
  1684. struct cam_context *ctx = ctx_isp->base;
  1685. uint64_t request_id = 0;
  1686. /* First check if there is a valid request in active list */
  1687. list_for_each_entry(req, &ctx->active_req_list, list) {
  1688. if (req->request_id > ctx_isp->reported_req_id) {
  1689. request_id = req->request_id;
  1690. break;
  1691. }
  1692. }
  1693. /*
  1694. * If nothing in active list, current request might have not moved
  1695. * from wait to active list. This could happen if REG_UPDATE to sw
  1696. * is coming immediately after SOF
  1697. */
  1698. if (request_id == 0) {
  1699. req = list_first_entry(&ctx->wait_req_list,
  1700. struct cam_ctx_request, list);
  1701. if (req)
  1702. request_id = req->request_id;
  1703. }
  1704. if (!evt_data) {
  1705. CAM_ERR(CAM_ISP, "in valid sof event data");
  1706. return -EINVAL;
  1707. }
  1708. ctx_isp->frame_id++;
  1709. ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
  1710. ctx_isp->boot_timestamp = sof_event_data->boot_time;
  1711. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  1712. CAM_ISP_STATE_CHANGE_TRIGGER_SOF, request_id);
  1713. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx, ctx %u",
  1714. ctx_isp->frame_id, ctx_isp->sof_timestamp_val, ctx->ctx_id);
  1715. return rc;
  1716. }
  1717. static int __cam_isp_ctx_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
  1718. void *evt_data)
  1719. {
  1720. int rc = 0;
  1721. struct cam_ctx_request *req = NULL;
  1722. struct cam_isp_ctx_req *req_isp;
  1723. struct cam_context *ctx = ctx_isp->base;
  1724. if (ctx->state != CAM_CTX_ACTIVATED && ctx_isp->frame_id > 1) {
  1725. CAM_DBG(CAM_ISP, "invalid RUP");
  1726. goto end;
  1727. }
  1728. /*
  1729. * This is for the first update. The initial setting will
  1730. * cause the reg_upd in the first frame.
  1731. */
  1732. if (!list_empty(&ctx->wait_req_list)) {
  1733. req = list_first_entry(&ctx->wait_req_list,
  1734. struct cam_ctx_request, list);
  1735. list_del_init(&req->list);
  1736. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1737. if (req_isp->num_fence_map_out == req_isp->num_acked)
  1738. list_add_tail(&req->list, &ctx->free_req_list);
  1739. else
  1740. CAM_ERR(CAM_ISP,
  1741. "receive rup in unexpected state");
  1742. }
  1743. if (req != NULL) {
  1744. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  1745. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
  1746. req->request_id);
  1747. }
  1748. end:
  1749. return rc;
  1750. }
  1751. static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
  1752. void *evt_data)
  1753. {
  1754. uint64_t request_id = 0;
  1755. uint32_t sof_event_status = CAM_REQ_MGR_SOF_EVENT_SUCCESS;
  1756. struct cam_req_mgr_trigger_notify notify;
  1757. struct cam_ctx_request *req;
  1758. struct cam_isp_ctx_req *req_isp;
  1759. struct cam_context *ctx = ctx_isp->base;
  1760. struct cam_isp_hw_epoch_event_data *epoch_done_event_data =
  1761. (struct cam_isp_hw_epoch_event_data *)evt_data;
  1762. if (!evt_data) {
  1763. CAM_ERR(CAM_ISP, "invalid event data");
  1764. return -EINVAL;
  1765. }
  1766. ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta;
  1767. if (list_empty(&ctx->wait_req_list)) {
  1768. /*
  1769. * If no wait req in epoch, this is an error case.
  1770. * The recovery is to go back to sof state
  1771. */
  1772. CAM_ERR(CAM_ISP, "Ctx:%d No wait request", ctx->ctx_id);
  1773. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  1774. /* Send SOF event as empty frame*/
  1775. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  1776. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1777. __cam_isp_ctx_update_event_record(ctx_isp,
  1778. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  1779. goto end;
  1780. }
  1781. /* Update state prior to notifying CRM */
  1782. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  1783. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
  1784. list);
  1785. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  1786. req_isp->bubble_detected = true;
  1787. req_isp->reapply = true;
  1788. req_isp->cdm_reset_before_apply = false;
  1789. CAM_INFO_RATE_LIMIT(CAM_ISP, "ctx:%d Report Bubble flag %d req id:%lld",
  1790. ctx->ctx_id, req_isp->bubble_report, req->request_id);
  1791. if (req_isp->bubble_report && ctx->ctx_crm_intf &&
  1792. ctx->ctx_crm_intf->notify_err) {
  1793. struct cam_req_mgr_error_notify notify;
  1794. notify.link_hdl = ctx->link_hdl;
  1795. notify.dev_hdl = ctx->dev_hdl;
  1796. notify.req_id = req->request_id;
  1797. notify.error = CRM_KMD_ERR_BUBBLE;
  1798. notify.trigger = 0;
  1799. if (ctx_isp->subscribe_event & CAM_TRIGGER_POINT_SOF)
  1800. notify.trigger = CAM_TRIGGER_POINT_SOF;
  1801. notify.frame_id = ctx_isp->frame_id;
  1802. notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
  1803. CAM_WARN_RATE_LIMIT(CAM_ISP,
  1804. "Notify CRM about Bubble req %lld frame %lld, ctx %u",
  1805. req->request_id, ctx_isp->frame_id, ctx->ctx_id);
  1806. trace_cam_log_event("Bubble", "Rcvd epoch in applied state",
  1807. req->request_id, ctx->ctx_id);
  1808. ctx->ctx_crm_intf->notify_err(&notify);
  1809. atomic_set(&ctx_isp->process_bubble, 1);
  1810. } else {
  1811. req_isp->bubble_report = 0;
  1812. CAM_DBG(CAM_ISP, "Skip bubble recovery for req %lld ctx %u",
  1813. req->request_id, ctx->ctx_id);
  1814. if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger &&
  1815. ctx_isp->active_req_cnt <= 1) {
  1816. if (ctx_isp->subscribe_event & CAM_TRIGGER_POINT_SOF) {
  1817. notify.link_hdl = ctx->link_hdl;
  1818. notify.dev_hdl = ctx->dev_hdl;
  1819. notify.frame_id = ctx_isp->frame_id;
  1820. notify.trigger = CAM_TRIGGER_POINT_SOF;
  1821. notify.req_id =
  1822. ctx_isp->req_info.last_bufdone_req_id;
  1823. notify.sof_timestamp_val =
  1824. ctx_isp->sof_timestamp_val;
  1825. notify.trigger_id = ctx_isp->trigger_id;
  1826. ctx->ctx_crm_intf->notify_trigger(&notify);
  1827. CAM_DBG(CAM_ISP,
  1828. "Notify CRM SOF frame %lld ctx %u",
  1829. ctx_isp->frame_id, ctx->ctx_id);
  1830. }
  1831. }
  1832. }
  1833. /*
  1834. * Always move the request to active list. Let buf done
  1835. * function handles the rest.
  1836. */
  1837. list_del_init(&req->list);
  1838. list_add_tail(&req->list, &ctx->active_req_list);
  1839. ctx_isp->active_req_cnt++;
  1840. CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d), ctx %u",
  1841. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  1842. /*
  1843. * Update the record before req pointer to
  1844. * other invalid req.
  1845. */
  1846. __cam_isp_ctx_update_event_record(ctx_isp,
  1847. CAM_ISP_CTX_EVENT_EPOCH, req);
  1848. /*
  1849. * Get the req again from active_req_list in case
  1850. * the active req cnt is 2.
  1851. */
  1852. list_for_each_entry(req, &ctx->active_req_list, list) {
  1853. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  1854. if ((!req_isp->bubble_report) &&
  1855. (req->request_id > ctx_isp->reported_req_id)) {
  1856. request_id = req->request_id;
  1857. ctx_isp->reported_req_id = request_id;
  1858. CAM_DBG(CAM_ISP,
  1859. "ctx %d reported_req_id update to %lld",
  1860. ctx->ctx_id, ctx_isp->reported_req_id);
  1861. break;
  1862. }
  1863. }
  1864. if ((request_id != 0) && req_isp->bubble_detected)
  1865. sof_event_status = CAM_REQ_MGR_SOF_EVENT_ERROR;
  1866. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  1867. sof_event_status);
  1868. cam_req_mgr_debug_delay_detect();
  1869. trace_cam_delay_detect("ISP",
  1870. "bubble epoch_in_applied", req->request_id,
  1871. ctx->ctx_id, ctx->link_hdl, ctx->session_hdl,
  1872. CAM_DEFAULT_VALUE);
  1873. end:
  1874. if (request_id == 0) {
  1875. req = list_last_entry(&ctx->active_req_list,
  1876. struct cam_ctx_request, list);
  1877. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  1878. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id);
  1879. } else {
  1880. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  1881. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, request_id);
  1882. }
  1883. CAM_DBG(CAM_ISP, "next Substate[%s]",
  1884. __cam_isp_ctx_substate_val_to_type(
  1885. ctx_isp->substate_activated));
  1886. return 0;
  1887. }
  1888. static int __cam_isp_ctx_buf_done_in_applied(struct cam_isp_context *ctx_isp,
  1889. void *evt_data)
  1890. {
  1891. int rc = 0;
  1892. struct cam_isp_hw_done_event_data *done =
  1893. (struct cam_isp_hw_done_event_data *) evt_data;
  1894. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  1895. return rc;
  1896. }
  1897. static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp,
  1898. void *evt_data)
  1899. {
  1900. int rc = 0;
  1901. struct cam_context *ctx = ctx_isp->base;
  1902. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  1903. struct cam_ctx_request *req;
  1904. if (!evt_data) {
  1905. CAM_ERR(CAM_ISP, "in valid sof event data");
  1906. return -EINVAL;
  1907. }
  1908. if (atomic_read(&ctx_isp->apply_in_progress))
  1909. CAM_INFO(CAM_ISP, "Apply is in progress at the time of SOF");
  1910. ctx_isp->frame_id++;
  1911. ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
  1912. ctx_isp->boot_timestamp = sof_event_data->boot_time;
  1913. if (list_empty(&ctx->active_req_list))
  1914. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  1915. else
  1916. CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
  1917. req = list_last_entry(&ctx->active_req_list,
  1918. struct cam_ctx_request, list);
  1919. if (req)
  1920. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  1921. CAM_ISP_STATE_CHANGE_TRIGGER_SOF,
  1922. req->request_id);
  1923. if (ctx_isp->frame_id == 1)
  1924. CAM_INFO(CAM_ISP,
  1925. "First SOF in EPCR ctx:%d frame_id:%lld next substate %s",
  1926. ctx->ctx_id, ctx_isp->frame_id,
  1927. __cam_isp_ctx_substate_val_to_type(
  1928. ctx_isp->substate_activated));
  1929. CAM_DBG(CAM_ISP, "SOF in epoch ctx:%d frame_id:%lld next substate:%s",
  1930. ctx->ctx_id, ctx_isp->frame_id,
  1931. __cam_isp_ctx_substate_val_to_type(
  1932. ctx_isp->substate_activated));
  1933. return rc;
  1934. }
  1935. static int __cam_isp_ctx_buf_done_in_epoch(struct cam_isp_context *ctx_isp,
  1936. void *evt_data)
  1937. {
  1938. int rc = 0;
  1939. struct cam_isp_hw_done_event_data *done =
  1940. (struct cam_isp_hw_done_event_data *) evt_data;
  1941. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  1942. return rc;
  1943. }
  1944. static int __cam_isp_ctx_buf_done_in_bubble(
  1945. struct cam_isp_context *ctx_isp, void *evt_data)
  1946. {
  1947. int rc = 0;
  1948. struct cam_isp_hw_done_event_data *done =
  1949. (struct cam_isp_hw_done_event_data *) evt_data;
  1950. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
  1951. return rc;
  1952. }
  1953. static int __cam_isp_ctx_epoch_in_bubble_applied(
  1954. struct cam_isp_context *ctx_isp, void *evt_data)
  1955. {
  1956. uint64_t request_id = 0;
  1957. struct cam_req_mgr_trigger_notify notify;
  1958. struct cam_ctx_request *req;
  1959. struct cam_isp_ctx_req *req_isp;
  1960. struct cam_context *ctx = ctx_isp->base;
  1961. struct cam_isp_hw_epoch_event_data *epoch_done_event_data =
  1962. (struct cam_isp_hw_epoch_event_data *)evt_data;
  1963. if (!evt_data) {
  1964. CAM_ERR(CAM_ISP, "invalid event data");
  1965. return -EINVAL;
  1966. }
  1967. ctx_isp->frame_id_meta = epoch_done_event_data->frame_id_meta;
  1968. /*
  1969. * This means we missed the reg upd ack. So we need to
  1970. * transition to BUBBLE state again.
  1971. */
  1972. if (list_empty(&ctx->wait_req_list)) {
  1973. /*
  1974. * If no pending req in epoch, this is an error case.
  1975. * Just go back to the bubble state.
  1976. */
  1977. CAM_ERR(CAM_ISP, "ctx:%d No pending request.", ctx->ctx_id);
  1978. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  1979. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  1980. __cam_isp_ctx_update_event_record(ctx_isp,
  1981. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  1982. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  1983. goto end;
  1984. }
  1985. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
  1986. list);
  1987. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  1988. req_isp->bubble_detected = true;
  1989. CAM_INFO_RATE_LIMIT(CAM_ISP, "Ctx:%d Report Bubble flag %d req id:%lld",
  1990. ctx->ctx_id, req_isp->bubble_report, req->request_id);
  1991. req_isp->reapply = true;
  1992. req_isp->cdm_reset_before_apply = false;
  1993. if (req_isp->bubble_report && ctx->ctx_crm_intf &&
  1994. ctx->ctx_crm_intf->notify_err) {
  1995. struct cam_req_mgr_error_notify notify;
  1996. notify.link_hdl = ctx->link_hdl;
  1997. notify.dev_hdl = ctx->dev_hdl;
  1998. notify.req_id = req->request_id;
  1999. notify.error = CRM_KMD_ERR_BUBBLE;
  2000. notify.trigger = 0;
  2001. if (ctx_isp->subscribe_event & CAM_TRIGGER_POINT_SOF)
  2002. notify.trigger = CAM_TRIGGER_POINT_SOF;
  2003. notify.frame_id = ctx_isp->frame_id;
  2004. notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
  2005. CAM_WARN_RATE_LIMIT(CAM_REQ,
  2006. "Notify CRM about Bubble req_id %llu frame %lld, ctx %u",
  2007. req->request_id, ctx_isp->frame_id, ctx->ctx_id);
  2008. ctx->ctx_crm_intf->notify_err(&notify);
  2009. atomic_set(&ctx_isp->process_bubble, 1);
  2010. } else {
  2011. req_isp->bubble_report = 0;
  2012. CAM_DBG(CAM_ISP, "Skip bubble recovery for req %lld ctx %u",
  2013. req->request_id, ctx->ctx_id);
  2014. if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger &&
  2015. ctx_isp->active_req_cnt <= 1) {
  2016. if (ctx_isp->subscribe_event & CAM_TRIGGER_POINT_SOF) {
  2017. notify.link_hdl = ctx->link_hdl;
  2018. notify.dev_hdl = ctx->dev_hdl;
  2019. notify.frame_id = ctx_isp->frame_id;
  2020. notify.trigger = CAM_TRIGGER_POINT_SOF;
  2021. notify.req_id =
  2022. ctx_isp->req_info.last_bufdone_req_id;
  2023. notify.sof_timestamp_val =
  2024. ctx_isp->sof_timestamp_val;
  2025. notify.trigger_id = ctx_isp->trigger_id;
  2026. ctx->ctx_crm_intf->notify_trigger(&notify);
  2027. CAM_DBG(CAM_ISP,
  2028. "Notify CRM SOF frame %lld ctx %u",
  2029. ctx_isp->frame_id, ctx->ctx_id);
  2030. }
  2031. }
  2032. }
  2033. /*
  2034. * Always move the request to active list. Let buf done
  2035. * function handles the rest.
  2036. */
  2037. list_del_init(&req->list);
  2038. list_add_tail(&req->list, &ctx->active_req_list);
  2039. ctx_isp->active_req_cnt++;
  2040. CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d) ctx %u",
  2041. req->request_id, ctx_isp->active_req_cnt);
  2042. if (!req_isp->bubble_report) {
  2043. if (req->request_id > ctx_isp->reported_req_id) {
  2044. request_id = req->request_id;
  2045. ctx_isp->reported_req_id = request_id;
  2046. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2047. CAM_REQ_MGR_SOF_EVENT_ERROR);
  2048. __cam_isp_ctx_update_event_record(ctx_isp,
  2049. CAM_ISP_CTX_EVENT_EPOCH, req);
  2050. } else {
  2051. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2052. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2053. __cam_isp_ctx_update_event_record(ctx_isp,
  2054. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2055. }
  2056. } else {
  2057. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2058. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2059. __cam_isp_ctx_update_event_record(ctx_isp,
  2060. CAM_ISP_CTX_EVENT_EPOCH, NULL);
  2061. }
  2062. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  2063. CAM_DBG(CAM_ISP, "next Substate[%s]",
  2064. __cam_isp_ctx_substate_val_to_type(
  2065. ctx_isp->substate_activated));
  2066. cam_req_mgr_debug_delay_detect();
  2067. trace_cam_delay_detect("ISP",
  2068. "bubble epoch_in_bubble_applied",
  2069. req->request_id, ctx->ctx_id,
  2070. ctx->link_hdl, ctx->session_hdl,
  2071. CAM_DEFAULT_VALUE);
  2072. end:
  2073. req = list_last_entry(&ctx->active_req_list, struct cam_ctx_request,
  2074. list);
  2075. if (req)
  2076. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2077. CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id);
  2078. return 0;
  2079. }
  2080. static int __cam_isp_ctx_buf_done_in_bubble_applied(
  2081. struct cam_isp_context *ctx_isp, void *evt_data)
  2082. {
  2083. int rc = 0;
  2084. struct cam_isp_hw_done_event_data *done =
  2085. (struct cam_isp_hw_done_event_data *) evt_data;
  2086. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
  2087. return rc;
  2088. }
  2089. static uint32_t get_evt_param(uint32_t error_type)
  2090. {
  2091. switch (error_type) {
  2092. case CAM_ISP_HW_ERROR_OVERFLOW:
  2093. return CAM_SYNC_ISP_EVENT_OVERFLOW;
  2094. case CAM_ISP_HW_ERROR_P2I_ERROR:
  2095. return CAM_SYNC_ISP_EVENT_P2I_ERROR;
  2096. case CAM_ISP_HW_ERROR_VIOLATION:
  2097. return CAM_SYNC_ISP_EVENT_VIOLATION;
  2098. case CAM_ISP_HW_ERROR_BUSIF_OVERFLOW:
  2099. return CAM_SYNC_ISP_EVENT_BUSIF_OVERFLOW;
  2100. default:
  2101. return CAM_SYNC_ISP_EVENT_UNKNOWN;
  2102. }
  2103. }
  2104. static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
  2105. void *evt_data)
  2106. {
  2107. int rc = 0;
  2108. uint32_t i = 0;
  2109. bool found = 0;
  2110. struct cam_ctx_request *req = NULL;
  2111. struct cam_ctx_request *req_to_report = NULL;
  2112. struct cam_ctx_request *req_to_dump = NULL;
  2113. struct cam_ctx_request *req_temp;
  2114. struct cam_isp_ctx_req *req_isp = NULL;
  2115. struct cam_isp_ctx_req *req_isp_to_report = NULL;
  2116. struct cam_req_mgr_error_notify notify = {};
  2117. uint64_t error_request_id;
  2118. struct cam_hw_fence_map_entry *fence_map_out = NULL;
  2119. struct cam_req_mgr_message req_msg;
  2120. uint32_t evt_param;
  2121. struct cam_req_mgr_timer_notify timer;
  2122. struct cam_context *ctx = ctx_isp->base;
  2123. struct cam_isp_hw_error_event_data *error_event_data =
  2124. (struct cam_isp_hw_error_event_data *)evt_data;
  2125. uint32_t error_type = error_event_data->error_type;
  2126. CAM_DBG(CAM_ISP, "Enter error_type = %d", error_type);
  2127. if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_timer) {
  2128. timer.link_hdl = ctx->link_hdl;
  2129. timer.dev_hdl = ctx->dev_hdl;
  2130. timer.state = false;
  2131. ctx->ctx_crm_intf->notify_timer(&timer);
  2132. CAM_DBG(CAM_ISP, "Notify CRM to pause timer for ctx %u",
  2133. ctx->ctx_id);
  2134. }
  2135. if ((error_type == CAM_ISP_HW_ERROR_OVERFLOW) ||
  2136. (error_type == CAM_ISP_HW_ERROR_BUSIF_OVERFLOW) ||
  2137. (error_type == CAM_ISP_HW_ERROR_VIOLATION)) {
  2138. struct cam_hw_cmd_args hw_cmd_args;
  2139. memset(&hw_cmd_args, 0, sizeof(hw_cmd_args));
  2140. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  2141. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_REG_DUMP_ON_ERROR;
  2142. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  2143. &hw_cmd_args);
  2144. if (rc) {
  2145. CAM_ERR(CAM_ISP, "Reg dump on error failed rc: %d", rc);
  2146. rc = 0;
  2147. }
  2148. }
  2149. evt_param = get_evt_param(error_type);
  2150. /*
  2151. * The error is likely caused by first request on the active list.
  2152. * If active list is empty check wait list (maybe error hit as soon
  2153. * as RUP and we handle error before RUP.
  2154. */
  2155. if (list_empty(&ctx->active_req_list)) {
  2156. CAM_DBG(CAM_ISP,
  2157. "handling error with no active request");
  2158. if (list_empty(&ctx->wait_req_list)) {
  2159. CAM_ERR_RATE_LIMIT(CAM_ISP,
  2160. "Error with no active/wait request");
  2161. goto end;
  2162. } else {
  2163. req_to_dump = list_first_entry(&ctx->wait_req_list,
  2164. struct cam_ctx_request, list);
  2165. }
  2166. } else {
  2167. req_to_dump = list_first_entry(&ctx->active_req_list,
  2168. struct cam_ctx_request, list);
  2169. }
  2170. req_isp = (struct cam_isp_ctx_req *) req_to_dump->req_priv;
  2171. if (error_event_data->enable_req_dump)
  2172. rc = cam_isp_ctx_dump_req(req_isp, 0, 0, NULL, false);
  2173. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2174. CAM_ISP_STATE_CHANGE_TRIGGER_ERROR, req_to_dump->request_id);
  2175. list_for_each_entry_safe(req, req_temp,
  2176. &ctx->active_req_list, list) {
  2177. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2178. if (!req_isp->bubble_report) {
  2179. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  2180. fence_map_out =
  2181. &req_isp->fence_map_out[i];
  2182. CAM_ERR(CAM_ISP,
  2183. "req %llu, Sync fd 0x%x ctx %u",
  2184. req->request_id,
  2185. req_isp->fence_map_out[i].sync_id,
  2186. ctx->ctx_id);
  2187. if (req_isp->fence_map_out[i].sync_id != -1) {
  2188. rc = cam_sync_signal(
  2189. fence_map_out->sync_id,
  2190. CAM_SYNC_STATE_SIGNALED_ERROR,
  2191. evt_param);
  2192. fence_map_out->sync_id = -1;
  2193. }
  2194. }
  2195. list_del_init(&req->list);
  2196. list_add_tail(&req->list, &ctx->free_req_list);
  2197. ctx_isp->active_req_cnt--;
  2198. } else {
  2199. found = 1;
  2200. break;
  2201. }
  2202. }
  2203. if (found)
  2204. goto move_to_pending;
  2205. list_for_each_entry_safe(req, req_temp,
  2206. &ctx->wait_req_list, list) {
  2207. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2208. if (!req_isp->bubble_report) {
  2209. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  2210. fence_map_out =
  2211. &req_isp->fence_map_out[i];
  2212. CAM_ERR(CAM_ISP,
  2213. "req %llu, Sync fd 0x%x ctx %u",
  2214. req->request_id,
  2215. req_isp->fence_map_out[i].sync_id,
  2216. ctx->ctx_id);
  2217. if (req_isp->fence_map_out[i].sync_id != -1) {
  2218. rc = cam_sync_signal(
  2219. fence_map_out->sync_id,
  2220. CAM_SYNC_STATE_SIGNALED_ERROR,
  2221. evt_param);
  2222. fence_map_out->sync_id = -1;
  2223. }
  2224. }
  2225. list_del_init(&req->list);
  2226. list_add_tail(&req->list, &ctx->free_req_list);
  2227. ctx_isp->active_req_cnt--;
  2228. } else {
  2229. found = 1;
  2230. break;
  2231. }
  2232. }
  2233. move_to_pending:
  2234. /*
  2235. * If bubble recovery is enabled on any request we need to move that
  2236. * request and all the subsequent requests to the pending list.
  2237. * Note:
  2238. * We need to traverse the active list in reverse order and add
  2239. * to head of pending list.
  2240. * e.g. pending current state: 10, 11 | active current state: 8, 9
  2241. * intermittent for loop iteration- pending: 9, 10, 11 | active: 8
  2242. * final state - pending: 8, 9, 10, 11 | active: NULL
  2243. */
  2244. if (found) {
  2245. list_for_each_entry_safe_reverse(req, req_temp,
  2246. &ctx->active_req_list, list) {
  2247. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2248. list_del_init(&req->list);
  2249. list_add(&req->list, &ctx->pending_req_list);
  2250. ctx_isp->active_req_cnt--;
  2251. }
  2252. list_for_each_entry_safe_reverse(req, req_temp,
  2253. &ctx->wait_req_list, list) {
  2254. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2255. list_del_init(&req->list);
  2256. list_add(&req->list, &ctx->pending_req_list);
  2257. ctx_isp->active_req_cnt--;
  2258. }
  2259. }
  2260. end:
  2261. do {
  2262. if (list_empty(&ctx->pending_req_list)) {
  2263. error_request_id = ctx_isp->last_applied_req_id;
  2264. req_isp = NULL;
  2265. break;
  2266. }
  2267. req = list_first_entry(&ctx->pending_req_list,
  2268. struct cam_ctx_request, list);
  2269. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2270. error_request_id = ctx_isp->last_applied_req_id;
  2271. if (req_isp->bubble_report) {
  2272. req_to_report = req;
  2273. req_isp_to_report = req_to_report->req_priv;
  2274. break;
  2275. }
  2276. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  2277. if (req_isp->fence_map_out[i].sync_id != -1)
  2278. rc = cam_sync_signal(
  2279. req_isp->fence_map_out[i].sync_id,
  2280. CAM_SYNC_STATE_SIGNALED_ERROR,
  2281. evt_param);
  2282. req_isp->fence_map_out[i].sync_id = -1;
  2283. }
  2284. list_del_init(&req->list);
  2285. list_add_tail(&req->list, &ctx->free_req_list);
  2286. } while (req->request_id < ctx_isp->last_applied_req_id);
  2287. if (ctx_isp->offline_context)
  2288. goto exit;
  2289. if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_err) {
  2290. notify.link_hdl = ctx->link_hdl;
  2291. notify.dev_hdl = ctx->dev_hdl;
  2292. notify.req_id = error_request_id;
  2293. notify.error = CRM_KMD_ERR_FATAL;
  2294. if (req_isp_to_report && req_isp_to_report->bubble_report)
  2295. if (error_event_data->recovery_enabled)
  2296. notify.error = CRM_KMD_ERR_BUBBLE;
  2297. CAM_WARN(CAM_ISP,
  2298. "Notify CRM: req %lld, frame %lld ctx %u, error %d",
  2299. error_request_id, ctx_isp->frame_id, ctx->ctx_id,
  2300. notify.error);
  2301. ctx->ctx_crm_intf->notify_err(&notify);
  2302. /*
  2303. * Need to send error occurred in KMD
  2304. * This will help UMD to take necessary action
  2305. * and to dump relevant info
  2306. */
  2307. if (notify.error == CRM_KMD_ERR_FATAL) {
  2308. req_msg.session_hdl = ctx_isp->base->session_hdl;
  2309. req_msg.u.err_msg.device_hdl = ctx_isp->base->dev_hdl;
  2310. if (error_type == CAM_ISP_HW_ERROR_CSID_FATAL)
  2311. req_msg.u.err_msg.error_type =
  2312. CAM_REQ_MGR_ERROR_TYPE_FULL_RECOVERY;
  2313. else
  2314. req_msg.u.err_msg.error_type =
  2315. CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
  2316. req_msg.u.err_msg.link_hdl = ctx_isp->base->link_hdl;
  2317. req_msg.u.err_msg.request_id = error_request_id;
  2318. req_msg.u.err_msg.resource_size = 0x0;
  2319. if (cam_req_mgr_notify_message(&req_msg,
  2320. V4L_EVENT_CAM_REQ_MGR_ERROR,
  2321. V4L_EVENT_CAM_REQ_MGR_EVENT))
  2322. CAM_ERR(CAM_ISP,
  2323. "Error in notifying the error time for req id:%lld ctx %u",
  2324. ctx_isp->last_applied_req_id,
  2325. ctx->ctx_id);
  2326. }
  2327. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HW_ERROR;
  2328. } else {
  2329. CAM_ERR_RATE_LIMIT(CAM_ISP,
  2330. "Can not notify ERRROR to CRM for ctx %u",
  2331. ctx->ctx_id);
  2332. rc = -EFAULT;
  2333. }
  2334. CAM_DBG(CAM_ISP, "Exit");
  2335. exit:
  2336. return rc;
  2337. }
  2338. static int __cam_isp_ctx_fs2_sof_in_sof_state(
  2339. struct cam_isp_context *ctx_isp, void *evt_data)
  2340. {
  2341. int rc = 0;
  2342. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  2343. struct cam_ctx_request *req;
  2344. struct cam_context *ctx = ctx_isp->base;
  2345. struct cam_req_mgr_trigger_notify notify;
  2346. uint64_t request_id = 0;
  2347. if (!evt_data) {
  2348. CAM_ERR(CAM_ISP, "in valid sof event data");
  2349. return -EINVAL;
  2350. }
  2351. ctx_isp->frame_id++;
  2352. ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
  2353. ctx_isp->boot_timestamp = sof_event_data->boot_time;
  2354. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  2355. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  2356. if (!(list_empty(&ctx->wait_req_list)))
  2357. goto end;
  2358. if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger &&
  2359. ctx_isp->active_req_cnt <= 2) {
  2360. if (ctx_isp->subscribe_event & CAM_TRIGGER_POINT_SOF) {
  2361. notify.link_hdl = ctx->link_hdl;
  2362. notify.dev_hdl = ctx->dev_hdl;
  2363. notify.frame_id = ctx_isp->frame_id;
  2364. notify.trigger = CAM_TRIGGER_POINT_SOF;
  2365. notify.req_id = ctx_isp->req_info.last_bufdone_req_id;
  2366. notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
  2367. ctx->ctx_crm_intf->notify_trigger(&notify);
  2368. CAM_DBG(CAM_ISP, "Notify CRM SOF frame %lld",
  2369. ctx_isp->frame_id);
  2370. }
  2371. list_for_each_entry(req, &ctx->active_req_list, list) {
  2372. if (req->request_id > ctx_isp->reported_req_id) {
  2373. request_id = req->request_id;
  2374. ctx_isp->reported_req_id = request_id;
  2375. break;
  2376. }
  2377. }
  2378. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2379. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2380. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2381. CAM_ISP_STATE_CHANGE_TRIGGER_SOF, request_id);
  2382. } else {
  2383. CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify SOF to CRM");
  2384. rc = -EFAULT;
  2385. }
  2386. end:
  2387. return rc;
  2388. }
  2389. static int __cam_isp_ctx_fs2_buf_done(struct cam_isp_context *ctx_isp,
  2390. void *evt_data)
  2391. {
  2392. int rc = 0;
  2393. struct cam_isp_hw_done_event_data *done =
  2394. (struct cam_isp_hw_done_event_data *) evt_data;
  2395. struct cam_context *ctx = ctx_isp->base;
  2396. int prev_active_req_cnt = 0;
  2397. int curr_req_id = 0;
  2398. struct cam_ctx_request *req;
  2399. prev_active_req_cnt = ctx_isp->active_req_cnt;
  2400. req = list_first_entry(&ctx->active_req_list,
  2401. struct cam_ctx_request, list);
  2402. if (req)
  2403. curr_req_id = req->request_id;
  2404. rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
  2405. if (prev_active_req_cnt == ctx_isp->active_req_cnt + 1) {
  2406. if (list_empty(&ctx->wait_req_list) &&
  2407. list_empty(&ctx->active_req_list)) {
  2408. CAM_DBG(CAM_ISP, "No request, move to SOF");
  2409. ctx_isp->substate_activated =
  2410. CAM_ISP_CTX_ACTIVATED_SOF;
  2411. if (ctx_isp->reported_req_id < curr_req_id) {
  2412. ctx_isp->reported_req_id = curr_req_id;
  2413. __cam_isp_ctx_send_sof_timestamp(ctx_isp,
  2414. curr_req_id,
  2415. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2416. }
  2417. }
  2418. }
  2419. return rc;
  2420. }
  2421. static int __cam_isp_ctx_fs2_buf_done_in_epoch(struct cam_isp_context *ctx_isp,
  2422. void *evt_data)
  2423. {
  2424. int rc = 0;
  2425. rc = __cam_isp_ctx_fs2_buf_done(ctx_isp, evt_data);
  2426. return rc;
  2427. }
  2428. static int __cam_isp_ctx_fs2_buf_done_in_applied(
  2429. struct cam_isp_context *ctx_isp,
  2430. void *evt_data)
  2431. {
  2432. int rc = 0;
  2433. rc = __cam_isp_ctx_fs2_buf_done(ctx_isp, evt_data);
  2434. return rc;
  2435. }
  2436. static int __cam_isp_ctx_fs2_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
  2437. void *evt_data)
  2438. {
  2439. int rc = 0;
  2440. struct cam_ctx_request *req = NULL;
  2441. struct cam_isp_ctx_req *req_isp;
  2442. struct cam_context *ctx = ctx_isp->base;
  2443. if (ctx->state != CAM_CTX_ACTIVATED && ctx_isp->frame_id > 1) {
  2444. CAM_DBG(CAM_ISP, "invalid RUP");
  2445. goto end;
  2446. }
  2447. /*
  2448. * This is for the first update. The initial setting will
  2449. * cause the reg_upd in the first frame.
  2450. */
  2451. if (!list_empty(&ctx->wait_req_list)) {
  2452. req = list_first_entry(&ctx->wait_req_list,
  2453. struct cam_ctx_request, list);
  2454. list_del_init(&req->list);
  2455. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2456. if (req_isp->num_fence_map_out == req_isp->num_acked)
  2457. list_add_tail(&req->list, &ctx->free_req_list);
  2458. else
  2459. CAM_ERR(CAM_ISP,
  2460. "receive rup in unexpected state");
  2461. }
  2462. if (req != NULL) {
  2463. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2464. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
  2465. req->request_id);
  2466. }
  2467. end:
  2468. return rc;
  2469. }
  2470. static int __cam_isp_ctx_fs2_reg_upd_in_applied_state(
  2471. struct cam_isp_context *ctx_isp, void *evt_data)
  2472. {
  2473. int rc = 0;
  2474. struct cam_ctx_request *req = NULL;
  2475. struct cam_context *ctx = ctx_isp->base;
  2476. struct cam_isp_ctx_req *req_isp;
  2477. struct cam_req_mgr_trigger_notify notify;
  2478. uint64_t request_id = 0;
  2479. if (list_empty(&ctx->wait_req_list)) {
  2480. CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
  2481. goto end;
  2482. }
  2483. req = list_first_entry(&ctx->wait_req_list,
  2484. struct cam_ctx_request, list);
  2485. list_del_init(&req->list);
  2486. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2487. if (req_isp->num_fence_map_out != 0) {
  2488. list_add_tail(&req->list, &ctx->active_req_list);
  2489. ctx_isp->active_req_cnt++;
  2490. CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d)",
  2491. req->request_id, ctx_isp->active_req_cnt);
  2492. } else {
  2493. /* no io config, so the request is completed. */
  2494. list_add_tail(&req->list, &ctx->free_req_list);
  2495. }
  2496. /*
  2497. * This function only called directly from applied and bubble applied
  2498. * state so change substate here.
  2499. */
  2500. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  2501. if (req_isp->num_fence_map_out != 1)
  2502. goto end;
  2503. if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger &&
  2504. ctx_isp->active_req_cnt <= 2) {
  2505. list_for_each_entry(req, &ctx->active_req_list, list) {
  2506. if (req->request_id > ctx_isp->reported_req_id) {
  2507. request_id = req->request_id;
  2508. ctx_isp->reported_req_id = request_id;
  2509. break;
  2510. }
  2511. }
  2512. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  2513. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  2514. if (ctx_isp->subscribe_event & CAM_TRIGGER_POINT_SOF) {
  2515. notify.link_hdl = ctx->link_hdl;
  2516. notify.dev_hdl = ctx->dev_hdl;
  2517. notify.frame_id = ctx_isp->frame_id;
  2518. notify.trigger = CAM_TRIGGER_POINT_SOF;
  2519. notify.req_id = ctx_isp->req_info.last_bufdone_req_id;
  2520. notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
  2521. ctx->ctx_crm_intf->notify_trigger(&notify);
  2522. CAM_DBG(CAM_ISP, "Notify CRM SOF frame %lld",
  2523. ctx_isp->frame_id);
  2524. }
  2525. } else {
  2526. CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify SOF to CRM");
  2527. rc = -EFAULT;
  2528. }
  2529. CAM_DBG(CAM_ISP, "next Substate[%s]",
  2530. __cam_isp_ctx_substate_val_to_type(
  2531. ctx_isp->substate_activated));
  2532. end:
  2533. if (req != NULL && !rc) {
  2534. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2535. CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
  2536. req->request_id);
  2537. }
  2538. return rc;
  2539. }
  2540. static struct cam_isp_ctx_irq_ops
  2541. cam_isp_ctx_activated_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
  2542. /* SOF */
  2543. {
  2544. .irq_ops = {
  2545. __cam_isp_ctx_handle_error,
  2546. __cam_isp_ctx_sof_in_activated_state,
  2547. __cam_isp_ctx_reg_upd_in_sof,
  2548. __cam_isp_ctx_notify_sof_in_activated_state,
  2549. __cam_isp_ctx_notify_eof_in_activated_state,
  2550. NULL,
  2551. },
  2552. },
  2553. /* APPLIED */
  2554. {
  2555. .irq_ops = {
  2556. __cam_isp_ctx_handle_error,
  2557. __cam_isp_ctx_sof_in_activated_state,
  2558. __cam_isp_ctx_reg_upd_in_applied_state,
  2559. __cam_isp_ctx_epoch_in_applied,
  2560. __cam_isp_ctx_notify_eof_in_activated_state,
  2561. __cam_isp_ctx_buf_done_in_applied,
  2562. },
  2563. },
  2564. /* EPOCH */
  2565. {
  2566. .irq_ops = {
  2567. __cam_isp_ctx_handle_error,
  2568. __cam_isp_ctx_sof_in_epoch,
  2569. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  2570. __cam_isp_ctx_notify_sof_in_activated_state,
  2571. __cam_isp_ctx_notify_eof_in_activated_state,
  2572. __cam_isp_ctx_buf_done_in_epoch,
  2573. },
  2574. },
  2575. /* BUBBLE */
  2576. {
  2577. .irq_ops = {
  2578. __cam_isp_ctx_handle_error,
  2579. __cam_isp_ctx_sof_in_activated_state,
  2580. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  2581. __cam_isp_ctx_notify_sof_in_activated_state,
  2582. __cam_isp_ctx_notify_eof_in_activated_state,
  2583. __cam_isp_ctx_buf_done_in_bubble,
  2584. },
  2585. },
  2586. /* Bubble Applied */
  2587. {
  2588. .irq_ops = {
  2589. __cam_isp_ctx_handle_error,
  2590. __cam_isp_ctx_sof_in_activated_state,
  2591. __cam_isp_ctx_reg_upd_in_applied_state,
  2592. __cam_isp_ctx_epoch_in_bubble_applied,
  2593. NULL,
  2594. __cam_isp_ctx_buf_done_in_bubble_applied,
  2595. },
  2596. },
  2597. /* HW ERROR */
  2598. {
  2599. .irq_ops = {
  2600. NULL,
  2601. __cam_isp_ctx_sof_in_activated_state,
  2602. __cam_isp_ctx_reg_upd_in_hw_error,
  2603. NULL,
  2604. NULL,
  2605. NULL,
  2606. },
  2607. },
  2608. /* HALT */
  2609. {
  2610. },
  2611. };
  2612. static struct cam_isp_ctx_irq_ops
  2613. cam_isp_ctx_fs2_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
  2614. /* SOF */
  2615. {
  2616. .irq_ops = {
  2617. __cam_isp_ctx_handle_error,
  2618. __cam_isp_ctx_fs2_sof_in_sof_state,
  2619. __cam_isp_ctx_fs2_reg_upd_in_sof,
  2620. __cam_isp_ctx_fs2_sof_in_sof_state,
  2621. __cam_isp_ctx_notify_eof_in_activated_state,
  2622. NULL,
  2623. },
  2624. },
  2625. /* APPLIED */
  2626. {
  2627. .irq_ops = {
  2628. __cam_isp_ctx_handle_error,
  2629. __cam_isp_ctx_sof_in_activated_state,
  2630. __cam_isp_ctx_fs2_reg_upd_in_applied_state,
  2631. __cam_isp_ctx_epoch_in_applied,
  2632. __cam_isp_ctx_notify_eof_in_activated_state,
  2633. __cam_isp_ctx_fs2_buf_done_in_applied,
  2634. },
  2635. },
  2636. /* EPOCH */
  2637. {
  2638. .irq_ops = {
  2639. __cam_isp_ctx_handle_error,
  2640. __cam_isp_ctx_sof_in_epoch,
  2641. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  2642. __cam_isp_ctx_notify_sof_in_activated_state,
  2643. __cam_isp_ctx_notify_eof_in_activated_state,
  2644. __cam_isp_ctx_fs2_buf_done_in_epoch,
  2645. },
  2646. },
  2647. /* BUBBLE */
  2648. {
  2649. .irq_ops = {
  2650. __cam_isp_ctx_handle_error,
  2651. __cam_isp_ctx_sof_in_activated_state,
  2652. __cam_isp_ctx_reg_upd_in_epoch_bubble_state,
  2653. __cam_isp_ctx_notify_sof_in_activated_state,
  2654. __cam_isp_ctx_notify_eof_in_activated_state,
  2655. __cam_isp_ctx_buf_done_in_bubble,
  2656. },
  2657. },
  2658. /* Bubble Applied */
  2659. {
  2660. .irq_ops = {
  2661. __cam_isp_ctx_handle_error,
  2662. __cam_isp_ctx_sof_in_activated_state,
  2663. __cam_isp_ctx_reg_upd_in_applied_state,
  2664. __cam_isp_ctx_epoch_in_bubble_applied,
  2665. NULL,
  2666. __cam_isp_ctx_buf_done_in_bubble_applied,
  2667. },
  2668. },
  2669. /* HW ERROR */
  2670. {
  2671. .irq_ops = {
  2672. NULL,
  2673. __cam_isp_ctx_sof_in_activated_state,
  2674. __cam_isp_ctx_reg_upd_in_hw_error,
  2675. NULL,
  2676. NULL,
  2677. NULL,
  2678. },
  2679. },
  2680. /* HALT */
  2681. {
  2682. },
  2683. };
  2684. static struct cam_isp_ctx_irq_ops
  2685. cam_isp_ctx_offline_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
  2686. /* SOF */
  2687. {
  2688. .irq_ops = {
  2689. __cam_isp_ctx_handle_error,
  2690. NULL,
  2691. NULL,
  2692. NULL,
  2693. NULL,
  2694. NULL,
  2695. },
  2696. },
  2697. /* APPLIED */
  2698. {
  2699. .irq_ops = {
  2700. __cam_isp_ctx_handle_error,
  2701. __cam_isp_ctx_sof_in_activated_state,
  2702. __cam_isp_ctx_reg_upd_in_applied_state,
  2703. NULL,
  2704. NULL,
  2705. __cam_isp_ctx_buf_done_in_applied,
  2706. },
  2707. },
  2708. /* EPOCH */
  2709. {
  2710. .irq_ops = {
  2711. __cam_isp_ctx_handle_error,
  2712. NULL,
  2713. NULL,
  2714. __cam_isp_ctx_offline_epoch_in_activated_state,
  2715. NULL,
  2716. __cam_isp_ctx_buf_done_in_epoch,
  2717. },
  2718. },
  2719. /* BUBBLE */
  2720. {
  2721. },
  2722. /* Bubble Applied */
  2723. {
  2724. },
  2725. /* HW ERROR */
  2726. {
  2727. .irq_ops = {
  2728. NULL,
  2729. __cam_isp_ctx_sof_in_activated_state,
  2730. __cam_isp_ctx_reg_upd_in_hw_error,
  2731. NULL,
  2732. NULL,
  2733. NULL,
  2734. },
  2735. },
  2736. /* HALT */
  2737. {
  2738. },
  2739. };
  2740. static int __cam_isp_ctx_apply_req_in_activated_state(
  2741. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply,
  2742. enum cam_isp_ctx_activated_substate next_state)
  2743. {
  2744. int rc = 0;
  2745. struct cam_ctx_request *req;
  2746. struct cam_ctx_request *active_req = NULL;
  2747. struct cam_isp_ctx_req *req_isp;
  2748. struct cam_isp_ctx_req *active_req_isp;
  2749. struct cam_isp_context *ctx_isp = NULL;
  2750. struct cam_hw_config_args cfg = {0};
  2751. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  2752. if (apply->re_apply)
  2753. if (apply->request_id <= ctx_isp->last_applied_req_id) {
  2754. CAM_INFO_RATE_LIMIT(CAM_ISP,
  2755. "ctx_id:%d Trying to reapply the same request %llu again",
  2756. ctx->ctx_id,
  2757. apply->request_id);
  2758. return 0;
  2759. }
  2760. if (list_empty(&ctx->pending_req_list)) {
  2761. CAM_ERR_RATE_LIMIT(CAM_ISP,
  2762. "ctx_id:%d No available request for Apply id %lld",
  2763. ctx->ctx_id,
  2764. apply->request_id);
  2765. rc = -EFAULT;
  2766. goto end;
  2767. }
  2768. /*
  2769. * When the pipeline has issue, the requests can be queued up in the
  2770. * pipeline. In this case, we should reject the additional request.
  2771. * The maximum number of request allowed to be outstanding is 2.
  2772. *
  2773. */
  2774. if (atomic_read(&ctx_isp->process_bubble)) {
  2775. CAM_INFO_RATE_LIMIT(CAM_ISP,
  2776. "ctx_id:%d Processing bubble cannot apply Request Id %llu",
  2777. ctx->ctx_id,
  2778. apply->request_id);
  2779. rc = -EAGAIN;
  2780. goto end;
  2781. }
  2782. spin_lock_bh(&ctx->lock);
  2783. req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
  2784. list);
  2785. spin_unlock_bh(&ctx->lock);
  2786. /*
  2787. * Check whether the request id is matching the tip, if not, this means
  2788. * we are in the middle of the error handling. Need to reject this apply
  2789. */
  2790. if (req->request_id != apply->request_id) {
  2791. CAM_ERR_RATE_LIMIT(CAM_ISP,
  2792. "ctx_id:%d Invalid Request Id asking %llu existing %llu",
  2793. ctx->ctx_id,
  2794. apply->request_id, req->request_id);
  2795. rc = -EFAULT;
  2796. goto end;
  2797. }
  2798. CAM_DBG(CAM_REQ, "Apply request %lld in Substate[%s] ctx %u",
  2799. req->request_id,
  2800. __cam_isp_ctx_substate_val_to_type(ctx_isp->substate_activated),
  2801. ctx->ctx_id);
  2802. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  2803. if (ctx_isp->active_req_cnt >= 2) {
  2804. CAM_WARN_RATE_LIMIT(CAM_ISP,
  2805. "Reject apply request (id %lld) due to congestion(cnt = %d) ctx %u",
  2806. req->request_id,
  2807. ctx_isp->active_req_cnt,
  2808. ctx->ctx_id);
  2809. spin_lock_bh(&ctx->lock);
  2810. if (!list_empty(&ctx->active_req_list))
  2811. active_req = list_first_entry(&ctx->active_req_list,
  2812. struct cam_ctx_request, list);
  2813. else
  2814. CAM_ERR_RATE_LIMIT(CAM_ISP,
  2815. "WARNING: should not happen (cnt = %d) but active_list empty",
  2816. ctx_isp->active_req_cnt);
  2817. spin_unlock_bh(&ctx->lock);
  2818. if (active_req) {
  2819. active_req_isp =
  2820. (struct cam_isp_ctx_req *) active_req->req_priv;
  2821. __cam_isp_ctx_handle_buf_done_fail_log(
  2822. active_req->request_id, active_req_isp,
  2823. ctx_isp->isp_device_type);
  2824. }
  2825. rc = -EFAULT;
  2826. goto end;
  2827. }
  2828. req_isp->bubble_report = apply->report_if_bubble;
  2829. cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  2830. cfg.request_id = req->request_id;
  2831. cfg.hw_update_entries = req_isp->cfg;
  2832. cfg.num_hw_update_entries = req_isp->num_cfg;
  2833. cfg.priv = &req_isp->hw_update_data;
  2834. cfg.init_packet = 0;
  2835. cfg.reapply = req_isp->reapply;
  2836. cfg.cdm_reset_before_apply = req_isp->cdm_reset_before_apply;
  2837. atomic_set(&ctx_isp->apply_in_progress, 1);
  2838. rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  2839. if (!rc) {
  2840. spin_lock_bh(&ctx->lock);
  2841. ctx_isp->substate_activated = next_state;
  2842. ctx_isp->last_applied_req_id = apply->request_id;
  2843. list_del_init(&req->list);
  2844. list_add_tail(&req->list, &ctx->wait_req_list);
  2845. CAM_DBG(CAM_ISP, "new substate Substate[%s], applied req %lld",
  2846. __cam_isp_ctx_substate_val_to_type(next_state),
  2847. ctx_isp->last_applied_req_id);
  2848. spin_unlock_bh(&ctx->lock);
  2849. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  2850. CAM_ISP_STATE_CHANGE_TRIGGER_APPLIED,
  2851. req->request_id);
  2852. __cam_isp_ctx_update_event_record(ctx_isp,
  2853. CAM_ISP_CTX_EVENT_APPLY, req);
  2854. } else if (rc == -EALREADY) {
  2855. spin_lock_bh(&ctx->lock);
  2856. req_isp->bubble_detected = true;
  2857. req_isp->cdm_reset_before_apply = false;
  2858. atomic_set(&ctx_isp->process_bubble, 1);
  2859. list_del_init(&req->list);
  2860. list_add(&req->list, &ctx->active_req_list);
  2861. ctx_isp->active_req_cnt++;
  2862. spin_unlock_bh(&ctx->lock);
  2863. CAM_DBG(CAM_REQ,
  2864. "move request %lld to active list(cnt = %d), ctx %u",
  2865. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  2866. } else {
  2867. CAM_ERR_RATE_LIMIT(CAM_ISP,
  2868. "ctx_id:%d ,Can not apply (req %lld) the configuration, rc %d",
  2869. ctx->ctx_id, apply->request_id, rc);
  2870. }
  2871. atomic_set(&ctx_isp->apply_in_progress, 0);
  2872. end:
  2873. return rc;
  2874. }
  2875. static int __cam_isp_ctx_apply_req_in_sof(
  2876. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  2877. {
  2878. int rc = 0;
  2879. struct cam_isp_context *ctx_isp =
  2880. (struct cam_isp_context *) ctx->ctx_priv;
  2881. CAM_DBG(CAM_ISP, "current Substate[%s]",
  2882. __cam_isp_ctx_substate_val_to_type(
  2883. ctx_isp->substate_activated));
  2884. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  2885. CAM_ISP_CTX_ACTIVATED_APPLIED);
  2886. CAM_DBG(CAM_ISP, "new Substate[%s]",
  2887. __cam_isp_ctx_substate_val_to_type(
  2888. ctx_isp->substate_activated));
  2889. if (rc)
  2890. CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d",
  2891. __cam_isp_ctx_substate_val_to_type(
  2892. ctx_isp->substate_activated), rc);
  2893. return rc;
  2894. }
  2895. static int __cam_isp_ctx_apply_req_in_epoch(
  2896. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  2897. {
  2898. int rc = 0;
  2899. struct cam_isp_context *ctx_isp =
  2900. (struct cam_isp_context *) ctx->ctx_priv;
  2901. CAM_DBG(CAM_ISP, "current Substate[%s]",
  2902. __cam_isp_ctx_substate_val_to_type(
  2903. ctx_isp->substate_activated));
  2904. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  2905. CAM_ISP_CTX_ACTIVATED_APPLIED);
  2906. CAM_DBG(CAM_ISP, "new Substate[%s]",
  2907. __cam_isp_ctx_substate_val_to_type(
  2908. ctx_isp->substate_activated));
  2909. if (rc)
  2910. CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d",
  2911. __cam_isp_ctx_substate_val_to_type(
  2912. ctx_isp->substate_activated), rc);
  2913. return rc;
  2914. }
  2915. static int __cam_isp_ctx_apply_req_in_bubble(
  2916. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  2917. {
  2918. int rc = 0;
  2919. struct cam_isp_context *ctx_isp =
  2920. (struct cam_isp_context *) ctx->ctx_priv;
  2921. CAM_DBG(CAM_ISP, "current Substate[%s]",
  2922. __cam_isp_ctx_substate_val_to_type(
  2923. ctx_isp->substate_activated));
  2924. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  2925. CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED);
  2926. CAM_DBG(CAM_ISP, "new Substate[%s]",
  2927. __cam_isp_ctx_substate_val_to_type(
  2928. ctx_isp->substate_activated));
  2929. if (rc)
  2930. CAM_DBG(CAM_ISP, "Apply failed in Substate[%s], rc %d",
  2931. __cam_isp_ctx_substate_val_to_type(
  2932. ctx_isp->substate_activated), rc);
  2933. return rc;
  2934. }
  2935. static int __cam_isp_ctx_apply_default_req_settings(
  2936. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  2937. {
  2938. int rc = 0;
  2939. struct cam_isp_context *isp_ctx =
  2940. (struct cam_isp_context *) ctx->ctx_priv;
  2941. struct cam_hw_cmd_args hw_cmd_args;
  2942. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  2943. hw_cmd_args.ctxt_to_hw_map = isp_ctx->hw_ctx;
  2944. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  2945. isp_hw_cmd_args.cmd_type =
  2946. CAM_ISP_HW_MGR_CMD_PROG_DEFAULT_CFG;
  2947. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  2948. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  2949. &hw_cmd_args);
  2950. if (rc)
  2951. CAM_ERR(CAM_ISP,
  2952. "Failed to apply default settings rc %d", rc);
  2953. else
  2954. CAM_DBG(CAM_ISP, "Applied default settings rc %d", rc);
  2955. return rc;
  2956. }
  2957. static int __cam_isp_ctx_dump_req_info(
  2958. struct cam_context *ctx,
  2959. struct cam_ctx_request *req,
  2960. uintptr_t cpu_addr,
  2961. size_t buf_len,
  2962. size_t *offset)
  2963. {
  2964. int i, rc;
  2965. uint8_t *dst;
  2966. int32_t *addr, *start;
  2967. uint32_t min_len;
  2968. size_t remain_len;
  2969. struct cam_isp_ctx_req *req_isp;
  2970. struct cam_isp_context *ctx_isp;
  2971. struct cam_isp_context_dump_header *hdr;
  2972. if (!req || !ctx || !offset || !cpu_addr || !buf_len) {
  2973. CAM_ERR(CAM_ISP, "Invalid parameters %pK %pK %pK %zu",
  2974. req, ctx, offset, buf_len);
  2975. return -EINVAL;
  2976. }
  2977. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  2978. ctx_isp = (struct cam_isp_context *)ctx->ctx_priv;
  2979. if (buf_len <= *offset) {
  2980. CAM_WARN(CAM_ISP, "Dump buffer overshoot len %zu offset %zu",
  2981. buf_len, *offset);
  2982. return -ENOSPC;
  2983. }
  2984. remain_len = buf_len - *offset;
  2985. min_len = sizeof(struct cam_isp_context_dump_header) +
  2986. (CAM_ISP_CTX_DUMP_REQUEST_NUM_WORDS *
  2987. req_isp->num_fence_map_out *
  2988. sizeof(int32_t));
  2989. if (remain_len < min_len) {
  2990. CAM_WARN(CAM_ISP, "Dump buffer exhaust remain %zu min %u",
  2991. remain_len, min_len);
  2992. return -ENOSPC;
  2993. }
  2994. dst = (uint8_t *)cpu_addr + *offset;
  2995. hdr = (struct cam_isp_context_dump_header *)dst;
  2996. hdr->word_size = sizeof(int32_t);
  2997. scnprintf(hdr->tag, CAM_ISP_CONTEXT_DUMP_TAG_MAX_LEN,
  2998. "ISP_OUT_FENCE:");
  2999. addr = (int32_t *)(dst + sizeof(struct cam_isp_context_dump_header));
  3000. start = addr;
  3001. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  3002. *addr++ = req_isp->fence_map_out[i].resource_handle;
  3003. *addr++ = req_isp->fence_map_out[i].sync_id;
  3004. }
  3005. hdr->size = hdr->word_size * (addr - start);
  3006. *offset += hdr->size + sizeof(struct cam_isp_context_dump_header);
  3007. rc = cam_isp_ctx_dump_req(req_isp, cpu_addr, buf_len,
  3008. offset, true);
  3009. return rc;
  3010. }
  3011. static int __cam_isp_ctx_dump_in_top_state(
  3012. struct cam_context *ctx,
  3013. struct cam_req_mgr_dump_info *dump_info)
  3014. {
  3015. int rc = 0;
  3016. bool dump_only_event_record = false;
  3017. size_t buf_len;
  3018. size_t remain_len;
  3019. uint8_t *dst;
  3020. ktime_t cur_time;
  3021. uint32_t min_len;
  3022. uint64_t diff;
  3023. uint64_t *addr, *start;
  3024. uintptr_t cpu_addr;
  3025. struct timespec64 ts;
  3026. struct cam_isp_context *ctx_isp;
  3027. struct cam_ctx_request *req = NULL;
  3028. struct cam_isp_ctx_req *req_isp;
  3029. struct cam_ctx_request *req_temp;
  3030. struct cam_hw_dump_args dump_args;
  3031. struct cam_isp_context_dump_header *hdr;
  3032. spin_lock_bh(&ctx->lock);
  3033. list_for_each_entry_safe(req, req_temp,
  3034. &ctx->active_req_list, list) {
  3035. if (req->request_id == dump_info->req_id) {
  3036. CAM_INFO(CAM_ISP, "isp dump active list req: %lld",
  3037. dump_info->req_id);
  3038. goto hw_dump;
  3039. }
  3040. }
  3041. list_for_each_entry_safe(req, req_temp,
  3042. &ctx->wait_req_list, list) {
  3043. if (req->request_id == dump_info->req_id) {
  3044. CAM_INFO(CAM_ISP, "isp dump wait list req: %lld",
  3045. dump_info->req_id);
  3046. goto hw_dump;
  3047. }
  3048. }
  3049. spin_unlock_bh(&ctx->lock);
  3050. return rc;
  3051. hw_dump:
  3052. rc = cam_mem_get_cpu_buf(dump_info->buf_handle,
  3053. &cpu_addr, &buf_len);
  3054. if (rc) {
  3055. CAM_ERR(CAM_ISP, "Invalid handle %u rc %d",
  3056. dump_info->buf_handle, rc);
  3057. spin_unlock_bh(&ctx->lock);
  3058. return rc;
  3059. }
  3060. if (buf_len <= dump_info->offset) {
  3061. spin_unlock_bh(&ctx->lock);
  3062. CAM_WARN(CAM_ISP, "Dump buffer overshoot len %zu offset %zu",
  3063. buf_len, dump_info->offset);
  3064. return -ENOSPC;
  3065. }
  3066. remain_len = buf_len - dump_info->offset;
  3067. min_len = sizeof(struct cam_isp_context_dump_header) +
  3068. (CAM_ISP_CTX_DUMP_NUM_WORDS * sizeof(uint64_t));
  3069. if (remain_len < min_len) {
  3070. spin_unlock_bh(&ctx->lock);
  3071. CAM_WARN(CAM_ISP, "Dump buffer exhaust remain %zu min %u",
  3072. remain_len, min_len);
  3073. return -ENOSPC;
  3074. }
  3075. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  3076. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3077. cur_time = ktime_get();
  3078. diff = ktime_us_delta(
  3079. req_isp->event_timestamp[CAM_ISP_CTX_EVENT_APPLY],
  3080. cur_time);
  3081. if (diff < CAM_ISP_CTX_RESPONSE_TIME_THRESHOLD) {
  3082. CAM_INFO(CAM_ISP, "req %lld found no error",
  3083. req->request_id);
  3084. dump_only_event_record = true;
  3085. }
  3086. dst = (uint8_t *)cpu_addr + dump_info->offset;
  3087. hdr = (struct cam_isp_context_dump_header *)dst;
  3088. scnprintf(hdr->tag, CAM_ISP_CONTEXT_DUMP_TAG_MAX_LEN,
  3089. "ISP_CTX_DUMP:");
  3090. hdr->word_size = sizeof(uint64_t);
  3091. addr = (uint64_t *)(dst +
  3092. sizeof(struct cam_isp_context_dump_header));
  3093. start = addr;
  3094. *addr++ = req->request_id;
  3095. ts = ktime_to_timespec64(
  3096. req_isp->event_timestamp[CAM_ISP_CTX_EVENT_APPLY]);
  3097. *addr++ = ts.tv_sec;
  3098. *addr++ = ts.tv_nsec/NSEC_PER_USEC;
  3099. ts = ktime_to_timespec64(cur_time);
  3100. *addr++ = ts.tv_sec;
  3101. *addr++ = ts.tv_nsec/NSEC_PER_USEC;
  3102. hdr->size = hdr->word_size * (addr - start);
  3103. dump_info->offset += hdr->size +
  3104. sizeof(struct cam_isp_context_dump_header);
  3105. rc = __cam_isp_ctx_dump_event_record(ctx_isp, cpu_addr,
  3106. buf_len, &dump_info->offset);
  3107. if (rc) {
  3108. CAM_ERR(CAM_ISP, "Dump event fail %lld",
  3109. req->request_id);
  3110. spin_unlock_bh(&ctx->lock);
  3111. return rc;
  3112. }
  3113. if (dump_only_event_record) {
  3114. spin_unlock_bh(&ctx->lock);
  3115. return rc;
  3116. }
  3117. rc = __cam_isp_ctx_dump_req_info(ctx, req, cpu_addr,
  3118. buf_len, &dump_info->offset);
  3119. if (rc) {
  3120. CAM_ERR(CAM_ISP, "Dump Req info fail %lld",
  3121. req->request_id);
  3122. spin_unlock_bh(&ctx->lock);
  3123. return rc;
  3124. }
  3125. spin_unlock_bh(&ctx->lock);
  3126. if (ctx->hw_mgr_intf->hw_dump) {
  3127. dump_args.offset = dump_info->offset;
  3128. dump_args.request_id = dump_info->req_id;
  3129. dump_args.buf_handle = dump_info->buf_handle;
  3130. dump_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  3131. rc = ctx->hw_mgr_intf->hw_dump(
  3132. ctx->hw_mgr_intf->hw_mgr_priv,
  3133. &dump_args);
  3134. dump_info->offset = dump_args.offset;
  3135. }
  3136. return rc;
  3137. }
  3138. static int __cam_isp_ctx_flush_req(struct cam_context *ctx,
  3139. struct list_head *req_list, struct cam_req_mgr_flush_request *flush_req)
  3140. {
  3141. int i, rc, tmp = 0;
  3142. uint32_t cancel_req_id_found = 0;
  3143. struct cam_ctx_request *req;
  3144. struct cam_ctx_request *req_temp;
  3145. struct cam_isp_ctx_req *req_isp;
  3146. struct list_head flush_list;
  3147. struct cam_isp_context *ctx_isp = NULL;
  3148. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  3149. INIT_LIST_HEAD(&flush_list);
  3150. if (list_empty(req_list)) {
  3151. CAM_DBG(CAM_ISP, "request list is empty");
  3152. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
  3153. CAM_ERR(CAM_ISP, "no request to cancel");
  3154. return -EINVAL;
  3155. } else
  3156. return 0;
  3157. }
  3158. CAM_DBG(CAM_REQ, "Flush [%u] in progress for req_id %llu",
  3159. flush_req->type, flush_req->req_id);
  3160. list_for_each_entry_safe(req, req_temp, req_list, list) {
  3161. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
  3162. if (req->request_id != flush_req->req_id) {
  3163. continue;
  3164. } else {
  3165. list_del_init(&req->list);
  3166. list_add_tail(&req->list, &flush_list);
  3167. cancel_req_id_found = 1;
  3168. __cam_isp_ctx_update_state_monitor_array(
  3169. ctx_isp,
  3170. CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH,
  3171. req->request_id);
  3172. break;
  3173. }
  3174. }
  3175. list_del_init(&req->list);
  3176. list_add_tail(&req->list, &flush_list);
  3177. __cam_isp_ctx_update_state_monitor_array(ctx_isp,
  3178. CAM_ISP_STATE_CHANGE_TRIGGER_FLUSH, req->request_id);
  3179. }
  3180. if (list_empty(&flush_list)) {
  3181. /*
  3182. * Maybe the req isn't sent to KMD since UMD already skip
  3183. * req in CSL layer.
  3184. */
  3185. CAM_INFO(CAM_ISP,
  3186. "flush list is empty, flush type %d for req %llu",
  3187. flush_req->type, flush_req->req_id);
  3188. return 0;
  3189. }
  3190. list_for_each_entry_safe(req, req_temp, &flush_list, list) {
  3191. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3192. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  3193. if (req_isp->fence_map_out[i].sync_id != -1) {
  3194. CAM_DBG(CAM_ISP, "Flush req 0x%llx, fence %d",
  3195. req->request_id,
  3196. req_isp->fence_map_out[i].sync_id);
  3197. rc = cam_sync_signal(
  3198. req_isp->fence_map_out[i].sync_id,
  3199. CAM_SYNC_STATE_SIGNALED_CANCEL,
  3200. CAM_SYNC_ISP_EVENT_FLUSH);
  3201. if (rc) {
  3202. tmp = req_isp->fence_map_out[i].sync_id;
  3203. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3204. "signal fence %d failed", tmp);
  3205. }
  3206. req_isp->fence_map_out[i].sync_id = -1;
  3207. }
  3208. }
  3209. req_isp->reapply = false;
  3210. req_isp->cdm_reset_before_apply = false;
  3211. list_del_init(&req->list);
  3212. list_add_tail(&req->list, &ctx->free_req_list);
  3213. }
  3214. return 0;
  3215. }
  3216. static int __cam_isp_ctx_flush_req_in_top_state(
  3217. struct cam_context *ctx,
  3218. struct cam_req_mgr_flush_request *flush_req)
  3219. {
  3220. int rc = 0;
  3221. struct cam_isp_context *ctx_isp;
  3222. struct cam_isp_stop_args stop_isp;
  3223. struct cam_hw_stop_args stop_args;
  3224. struct cam_hw_reset_args reset_args;
  3225. struct cam_hw_cmd_args hw_cmd_args;
  3226. struct cam_req_mgr_timer_notify timer;
  3227. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  3228. CAM_DBG(CAM_ISP, "Flush pending list");
  3229. spin_lock_bh(&ctx->lock);
  3230. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
  3231. spin_unlock_bh(&ctx->lock);
  3232. if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
  3233. if (ctx->state <= CAM_CTX_READY) {
  3234. ctx->state = CAM_CTX_ACQUIRED;
  3235. goto end;
  3236. }
  3237. spin_lock_bh(&ctx->lock);
  3238. ctx->state = CAM_CTX_FLUSHED;
  3239. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  3240. spin_unlock_bh(&ctx->lock);
  3241. CAM_INFO(CAM_ISP, "Last request id to flush is %lld, ctx_id:%d",
  3242. flush_req->req_id, ctx->ctx_id);
  3243. ctx->last_flush_req = flush_req->req_id;
  3244. memset(&hw_cmd_args, 0, sizeof(hw_cmd_args));
  3245. hw_cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
  3246. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_REG_DUMP_ON_FLUSH;
  3247. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  3248. &hw_cmd_args);
  3249. if (rc) {
  3250. CAM_ERR(CAM_ISP, "Reg dump on flush failed rc: %d", rc);
  3251. rc = 0;
  3252. }
  3253. stop_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  3254. stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
  3255. stop_isp.stop_only = true;
  3256. stop_args.args = (void *)&stop_isp;
  3257. rc = ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
  3258. &stop_args);
  3259. if (rc)
  3260. CAM_ERR(CAM_ISP, "Failed to stop HW in Flush rc: %d",
  3261. rc);
  3262. CAM_INFO(CAM_ISP, "Stop HW complete. Reset HW next.");
  3263. CAM_DBG(CAM_ISP, "Flush wait and active lists");
  3264. if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_timer) {
  3265. timer.link_hdl = ctx->link_hdl;
  3266. timer.dev_hdl = ctx->dev_hdl;
  3267. timer.state = false;
  3268. ctx->ctx_crm_intf->notify_timer(&timer);
  3269. }
  3270. spin_lock_bh(&ctx->lock);
  3271. if (!list_empty(&ctx->wait_req_list))
  3272. rc = __cam_isp_ctx_flush_req(ctx, &ctx->wait_req_list,
  3273. flush_req);
  3274. if (!list_empty(&ctx->active_req_list))
  3275. rc = __cam_isp_ctx_flush_req(ctx, &ctx->active_req_list,
  3276. flush_req);
  3277. ctx_isp->active_req_cnt = 0;
  3278. spin_unlock_bh(&ctx->lock);
  3279. reset_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  3280. rc = ctx->hw_mgr_intf->hw_reset(ctx->hw_mgr_intf->hw_mgr_priv,
  3281. &reset_args);
  3282. if (rc)
  3283. CAM_ERR(CAM_ISP, "Failed to reset HW rc: %d", rc);
  3284. ctx_isp->init_received = false;
  3285. }
  3286. end:
  3287. ctx_isp->bubble_frame_cnt = 0;
  3288. atomic_set(&ctx_isp->process_bubble, 0);
  3289. atomic_set(&ctx_isp->rxd_epoch, 0);
  3290. return rc;
  3291. }
  3292. static int __cam_isp_ctx_flush_req_in_ready(
  3293. struct cam_context *ctx,
  3294. struct cam_req_mgr_flush_request *flush_req)
  3295. {
  3296. int rc = 0;
  3297. CAM_DBG(CAM_ISP, "try to flush pending list");
  3298. spin_lock_bh(&ctx->lock);
  3299. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
  3300. /* if nothing is in pending req list, change state to acquire */
  3301. if (list_empty(&ctx->pending_req_list))
  3302. ctx->state = CAM_CTX_ACQUIRED;
  3303. spin_unlock_bh(&ctx->lock);
  3304. trace_cam_context_state("ISP", ctx);
  3305. CAM_DBG(CAM_ISP, "Flush request in ready state. next state %d",
  3306. ctx->state);
  3307. return rc;
  3308. }
  3309. static struct cam_ctx_ops
  3310. cam_isp_ctx_activated_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
  3311. /* SOF */
  3312. {
  3313. .ioctl_ops = {},
  3314. .crm_ops = {
  3315. .apply_req = __cam_isp_ctx_apply_req_in_sof,
  3316. .notify_frame_skip =
  3317. __cam_isp_ctx_apply_default_req_settings,
  3318. },
  3319. .irq_ops = NULL,
  3320. },
  3321. /* APPLIED */
  3322. {
  3323. .ioctl_ops = {},
  3324. .crm_ops = {},
  3325. .irq_ops = NULL,
  3326. },
  3327. /* EPOCH */
  3328. {
  3329. .ioctl_ops = {},
  3330. .crm_ops = {
  3331. .apply_req = __cam_isp_ctx_apply_req_in_epoch,
  3332. .notify_frame_skip =
  3333. __cam_isp_ctx_apply_default_req_settings,
  3334. },
  3335. .irq_ops = NULL,
  3336. },
  3337. /* BUBBLE */
  3338. {
  3339. .ioctl_ops = {},
  3340. .crm_ops = {
  3341. .apply_req = __cam_isp_ctx_apply_req_in_bubble,
  3342. .notify_frame_skip =
  3343. __cam_isp_ctx_apply_default_req_settings,
  3344. },
  3345. .irq_ops = NULL,
  3346. },
  3347. /* Bubble Applied */
  3348. {
  3349. .ioctl_ops = {},
  3350. .crm_ops = {},
  3351. .irq_ops = NULL,
  3352. },
  3353. /* HW ERROR */
  3354. {
  3355. .ioctl_ops = {},
  3356. .crm_ops = {},
  3357. .irq_ops = NULL,
  3358. },
  3359. /* HALT */
  3360. {
  3361. .ioctl_ops = {},
  3362. .crm_ops = {},
  3363. .irq_ops = NULL,
  3364. },
  3365. };
  3366. static struct cam_ctx_ops
  3367. cam_isp_ctx_fs2_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
  3368. /* SOF */
  3369. {
  3370. .ioctl_ops = {},
  3371. .crm_ops = {
  3372. .apply_req = __cam_isp_ctx_apply_req_in_sof,
  3373. },
  3374. .irq_ops = NULL,
  3375. },
  3376. /* APPLIED */
  3377. {
  3378. .ioctl_ops = {},
  3379. .crm_ops = {},
  3380. .irq_ops = NULL,
  3381. },
  3382. /* EPOCH */
  3383. {
  3384. .ioctl_ops = {},
  3385. .crm_ops = {
  3386. .apply_req = __cam_isp_ctx_apply_req_in_epoch,
  3387. },
  3388. .irq_ops = NULL,
  3389. },
  3390. /* BUBBLE */
  3391. {
  3392. .ioctl_ops = {},
  3393. .crm_ops = {
  3394. .apply_req = __cam_isp_ctx_apply_req_in_bubble,
  3395. },
  3396. .irq_ops = NULL,
  3397. },
  3398. /* Bubble Applied */
  3399. {
  3400. .ioctl_ops = {},
  3401. .crm_ops = {},
  3402. .irq_ops = NULL,
  3403. },
  3404. /* HW ERROR */
  3405. {
  3406. .ioctl_ops = {},
  3407. .crm_ops = {},
  3408. .irq_ops = NULL,
  3409. },
  3410. /* HALT */
  3411. {
  3412. .ioctl_ops = {},
  3413. .crm_ops = {},
  3414. .irq_ops = NULL,
  3415. },
  3416. };
  3417. static int __cam_isp_ctx_rdi_only_sof_in_top_state(
  3418. struct cam_isp_context *ctx_isp, void *evt_data)
  3419. {
  3420. int rc = 0;
  3421. struct cam_context *ctx = ctx_isp->base;
  3422. struct cam_req_mgr_trigger_notify notify;
  3423. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  3424. uint64_t request_id = 0;
  3425. if (!evt_data) {
  3426. CAM_ERR(CAM_ISP, "in valid sof event data");
  3427. return -EINVAL;
  3428. }
  3429. ctx_isp->frame_id++;
  3430. ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
  3431. ctx_isp->boot_timestamp = sof_event_data->boot_time;
  3432. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  3433. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  3434. /*
  3435. * notify reqmgr with sof signal. Note, due to scheduling delay
  3436. * we can run into situation that two active requests has already
  3437. * be in the active queue while we try to do the notification.
  3438. * In this case, we need to skip the current notification. This
  3439. * helps the state machine to catch up the delay.
  3440. */
  3441. if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger &&
  3442. ctx_isp->active_req_cnt <= 2) {
  3443. notify.link_hdl = ctx->link_hdl;
  3444. notify.dev_hdl = ctx->dev_hdl;
  3445. notify.frame_id = ctx_isp->frame_id;
  3446. notify.trigger = CAM_TRIGGER_POINT_SOF;
  3447. notify.req_id = ctx_isp->req_info.last_bufdone_req_id;
  3448. notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
  3449. ctx->ctx_crm_intf->notify_trigger(&notify);
  3450. CAM_DBG(CAM_ISP, "Notify CRM SOF frame %lld",
  3451. ctx_isp->frame_id);
  3452. /*
  3453. * It is idle frame with out any applied request id, send
  3454. * request id as zero
  3455. */
  3456. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3457. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3458. } else {
  3459. CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify SOF to CRM");
  3460. }
  3461. if (list_empty(&ctx->active_req_list))
  3462. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  3463. else
  3464. CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
  3465. CAM_DBG(CAM_ISP, "next Substate[%s]",
  3466. __cam_isp_ctx_substate_val_to_type(
  3467. ctx_isp->substate_activated));
  3468. return rc;
  3469. }
  3470. static int __cam_isp_ctx_rdi_only_sof_in_applied_state(
  3471. struct cam_isp_context *ctx_isp, void *evt_data)
  3472. {
  3473. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  3474. if (!evt_data) {
  3475. CAM_ERR(CAM_ISP, "in valid sof event data");
  3476. return -EINVAL;
  3477. }
  3478. ctx_isp->frame_id++;
  3479. ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
  3480. ctx_isp->boot_timestamp = sof_event_data->boot_time;
  3481. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  3482. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  3483. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED;
  3484. CAM_DBG(CAM_ISP, "next Substate[%s]",
  3485. __cam_isp_ctx_substate_val_to_type(
  3486. ctx_isp->substate_activated));
  3487. return 0;
  3488. }
  3489. static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied(
  3490. struct cam_isp_context *ctx_isp, void *evt_data)
  3491. {
  3492. struct cam_ctx_request *req;
  3493. struct cam_isp_ctx_req *req_isp;
  3494. struct cam_context *ctx = ctx_isp->base;
  3495. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  3496. uint64_t request_id = 0;
  3497. /*
  3498. * Sof in bubble applied state means, reg update not received.
  3499. * before increment frame id and override time stamp value, send
  3500. * the previous sof time stamp that got captured in the
  3501. * sof in applied state.
  3502. */
  3503. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  3504. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  3505. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3506. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3507. ctx_isp->frame_id++;
  3508. ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
  3509. ctx_isp->boot_timestamp = sof_event_data->boot_time;
  3510. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  3511. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  3512. if (list_empty(&ctx->wait_req_list)) {
  3513. /*
  3514. * If no pending req in epoch, this is an error case.
  3515. * The recovery is to go back to sof state
  3516. */
  3517. CAM_ERR(CAM_ISP, "No wait request");
  3518. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  3519. /* Send SOF event as empty frame*/
  3520. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3521. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3522. goto end;
  3523. }
  3524. req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
  3525. list);
  3526. req_isp = (struct cam_isp_ctx_req *)req->req_priv;
  3527. req_isp->bubble_detected = true;
  3528. CAM_INFO_RATE_LIMIT(CAM_ISP, "Ctx:%d Report Bubble flag %d req id:%lld",
  3529. ctx->ctx_id, req_isp->bubble_report, req->request_id);
  3530. req_isp->reapply = true;
  3531. req_isp->cdm_reset_before_apply = false;
  3532. if (req_isp->bubble_report && ctx->ctx_crm_intf &&
  3533. ctx->ctx_crm_intf->notify_err) {
  3534. struct cam_req_mgr_error_notify notify;
  3535. notify.link_hdl = ctx->link_hdl;
  3536. notify.dev_hdl = ctx->dev_hdl;
  3537. notify.req_id = req->request_id;
  3538. notify.error = CRM_KMD_ERR_BUBBLE;
  3539. notify.trigger = 0;
  3540. if (ctx_isp->subscribe_event & CAM_TRIGGER_POINT_SOF)
  3541. notify.trigger = CAM_TRIGGER_POINT_SOF;
  3542. notify.frame_id = ctx_isp->frame_id;
  3543. notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
  3544. CAM_WARN_RATE_LIMIT(CAM_ISP,
  3545. "Notify CRM about Bubble req %lld frame %lld ctx %u",
  3546. req->request_id,
  3547. ctx_isp->frame_id,
  3548. ctx->ctx_id);
  3549. ctx->ctx_crm_intf->notify_err(&notify);
  3550. } else {
  3551. req_isp->bubble_report = 0;
  3552. }
  3553. /*
  3554. * Always move the request to active list. Let buf done
  3555. * function handles the rest.
  3556. */
  3557. list_del_init(&req->list);
  3558. list_add_tail(&req->list, &ctx->active_req_list);
  3559. ctx_isp->active_req_cnt++;
  3560. CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
  3561. req->request_id, ctx_isp->active_req_cnt);
  3562. if (!req_isp->bubble_report) {
  3563. if (req->request_id > ctx_isp->reported_req_id) {
  3564. request_id = req->request_id;
  3565. ctx_isp->reported_req_id = request_id;
  3566. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3567. CAM_REQ_MGR_SOF_EVENT_ERROR);
  3568. } else
  3569. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3570. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3571. } else
  3572. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3573. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3574. /* change the state to bubble, as reg update has not come */
  3575. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
  3576. CAM_DBG(CAM_ISP, "next Substate[%s]",
  3577. __cam_isp_ctx_substate_val_to_type(
  3578. ctx_isp->substate_activated));
  3579. end:
  3580. return 0;
  3581. }
  3582. static int __cam_isp_ctx_rdi_only_sof_in_bubble_state(
  3583. struct cam_isp_context *ctx_isp, void *evt_data)
  3584. {
  3585. uint32_t i;
  3586. struct cam_ctx_request *req;
  3587. struct cam_context *ctx = ctx_isp->base;
  3588. struct cam_req_mgr_trigger_notify notify;
  3589. struct cam_isp_hw_sof_event_data *sof_event_data = evt_data;
  3590. struct cam_isp_ctx_req *req_isp;
  3591. uint64_t request_id = 0;
  3592. if (!evt_data) {
  3593. CAM_ERR(CAM_ISP, "in valid sof event data");
  3594. return -EINVAL;
  3595. }
  3596. ctx_isp->frame_id++;
  3597. ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
  3598. ctx_isp->boot_timestamp = sof_event_data->boot_time;
  3599. CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
  3600. ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
  3601. /*
  3602. * Signal all active requests with error and move the all the active
  3603. * requests to free list
  3604. */
  3605. while (!list_empty(&ctx->active_req_list)) {
  3606. req = list_first_entry(&ctx->active_req_list,
  3607. struct cam_ctx_request, list);
  3608. list_del_init(&req->list);
  3609. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3610. CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
  3611. req_isp->num_fence_map_out);
  3612. for (i = 0; i < req_isp->num_fence_map_out; i++)
  3613. if (req_isp->fence_map_out[i].sync_id != -1) {
  3614. cam_sync_signal(
  3615. req_isp->fence_map_out[i].sync_id,
  3616. CAM_SYNC_STATE_SIGNALED_ERROR,
  3617. CAM_SYNC_ISP_EVENT_BUBBLE);
  3618. }
  3619. list_add_tail(&req->list, &ctx->free_req_list);
  3620. ctx_isp->active_req_cnt--;
  3621. }
  3622. /* notify reqmgr with sof signal */
  3623. if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger) {
  3624. notify.link_hdl = ctx->link_hdl;
  3625. notify.dev_hdl = ctx->dev_hdl;
  3626. notify.frame_id = ctx_isp->frame_id;
  3627. notify.trigger = CAM_TRIGGER_POINT_SOF;
  3628. notify.req_id = ctx_isp->req_info.last_bufdone_req_id;
  3629. notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
  3630. ctx->ctx_crm_intf->notify_trigger(&notify);
  3631. CAM_DBG(CAM_ISP, "Notify CRM SOF frame %lld",
  3632. ctx_isp->frame_id);
  3633. } else {
  3634. CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
  3635. }
  3636. /*
  3637. * It is idle frame with out any applied request id, send
  3638. * request id as zero
  3639. */
  3640. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3641. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3642. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  3643. CAM_DBG(CAM_ISP, "next Substate[%s]",
  3644. __cam_isp_ctx_substate_val_to_type(
  3645. ctx_isp->substate_activated));
  3646. return 0;
  3647. }
  3648. static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_state(
  3649. struct cam_isp_context *ctx_isp, void *evt_data)
  3650. {
  3651. struct cam_ctx_request *req = NULL;
  3652. struct cam_context *ctx = ctx_isp->base;
  3653. req = list_first_entry(&ctx->active_req_list,
  3654. struct cam_ctx_request, list);
  3655. CAM_INFO(CAM_ISP, "Received RUP for Bubble Request", req->request_id);
  3656. return 0;
  3657. }
  3658. static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state(
  3659. struct cam_isp_context *ctx_isp, void *evt_data)
  3660. {
  3661. struct cam_ctx_request *req = NULL;
  3662. struct cam_context *ctx = ctx_isp->base;
  3663. struct cam_isp_ctx_req *req_isp;
  3664. struct cam_req_mgr_trigger_notify notify;
  3665. uint64_t request_id = 0;
  3666. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
  3667. /* notify reqmgr with sof signal*/
  3668. if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger) {
  3669. if (list_empty(&ctx->wait_req_list)) {
  3670. CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
  3671. goto error;
  3672. }
  3673. req = list_first_entry(&ctx->wait_req_list,
  3674. struct cam_ctx_request, list);
  3675. list_del_init(&req->list);
  3676. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  3677. request_id =
  3678. (req_isp->hw_update_data.packet_opcode_type ==
  3679. CAM_ISP_PACKET_INIT_DEV) ?
  3680. 0 : req->request_id;
  3681. if (req_isp->num_fence_map_out != 0) {
  3682. list_add_tail(&req->list, &ctx->active_req_list);
  3683. ctx_isp->active_req_cnt++;
  3684. CAM_DBG(CAM_ISP,
  3685. "move request %lld to active list(cnt = %d)",
  3686. req->request_id, ctx_isp->active_req_cnt);
  3687. /* if packet has buffers, set correct request id */
  3688. request_id = req->request_id;
  3689. } else {
  3690. /* no io config, so the request is completed. */
  3691. list_add_tail(&req->list, &ctx->free_req_list);
  3692. CAM_DBG(CAM_ISP,
  3693. "move active req %lld to free list(cnt=%d)",
  3694. req->request_id, ctx_isp->active_req_cnt);
  3695. }
  3696. notify.link_hdl = ctx->link_hdl;
  3697. notify.dev_hdl = ctx->dev_hdl;
  3698. notify.frame_id = ctx_isp->frame_id;
  3699. notify.trigger = CAM_TRIGGER_POINT_SOF;
  3700. notify.req_id = ctx_isp->req_info.last_bufdone_req_id;
  3701. notify.sof_timestamp_val = ctx_isp->sof_timestamp_val;
  3702. ctx->ctx_crm_intf->notify_trigger(&notify);
  3703. CAM_DBG(CAM_ISP, "Notify CRM SOF frame %lld",
  3704. ctx_isp->frame_id);
  3705. } else {
  3706. CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
  3707. }
  3708. if (request_id)
  3709. ctx_isp->reported_req_id = request_id;
  3710. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3711. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3712. CAM_DBG(CAM_ISP, "next Substate[%s]",
  3713. __cam_isp_ctx_substate_val_to_type(
  3714. ctx_isp->substate_activated));
  3715. __cam_isp_ctx_update_event_record(ctx_isp,
  3716. CAM_ISP_CTX_EVENT_RUP, req);
  3717. return 0;
  3718. error:
  3719. /* Send SOF event as idle frame*/
  3720. __cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
  3721. CAM_REQ_MGR_SOF_EVENT_SUCCESS);
  3722. __cam_isp_ctx_update_event_record(ctx_isp,
  3723. CAM_ISP_CTX_EVENT_RUP, NULL);
  3724. /*
  3725. * There is no request in the pending list, move the sub state machine
  3726. * to SOF sub state
  3727. */
  3728. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  3729. return 0;
  3730. }
  3731. static struct cam_isp_ctx_irq_ops
  3732. cam_isp_ctx_rdi_only_activated_state_machine_irq
  3733. [CAM_ISP_CTX_ACTIVATED_MAX] = {
  3734. /* SOF */
  3735. {
  3736. .irq_ops = {
  3737. NULL,
  3738. __cam_isp_ctx_rdi_only_sof_in_top_state,
  3739. __cam_isp_ctx_reg_upd_in_sof,
  3740. NULL,
  3741. NULL,
  3742. NULL,
  3743. },
  3744. },
  3745. /* APPLIED */
  3746. {
  3747. .irq_ops = {
  3748. __cam_isp_ctx_handle_error,
  3749. __cam_isp_ctx_rdi_only_sof_in_applied_state,
  3750. __cam_isp_ctx_reg_upd_in_applied_state,
  3751. NULL,
  3752. NULL,
  3753. __cam_isp_ctx_buf_done_in_applied,
  3754. },
  3755. },
  3756. /* EPOCH */
  3757. {
  3758. .irq_ops = {
  3759. __cam_isp_ctx_handle_error,
  3760. __cam_isp_ctx_rdi_only_sof_in_top_state,
  3761. NULL,
  3762. NULL,
  3763. NULL,
  3764. __cam_isp_ctx_buf_done_in_epoch,
  3765. },
  3766. },
  3767. /* BUBBLE*/
  3768. {
  3769. .irq_ops = {
  3770. __cam_isp_ctx_handle_error,
  3771. __cam_isp_ctx_rdi_only_sof_in_bubble_state,
  3772. __cam_isp_ctx_rdi_only_reg_upd_in_bubble_state,
  3773. NULL,
  3774. NULL,
  3775. __cam_isp_ctx_buf_done_in_bubble,
  3776. },
  3777. },
  3778. /* BUBBLE APPLIED ie PRE_BUBBLE */
  3779. {
  3780. .irq_ops = {
  3781. __cam_isp_ctx_handle_error,
  3782. __cam_isp_ctx_rdi_only_sof_in_bubble_applied,
  3783. __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state,
  3784. NULL,
  3785. NULL,
  3786. __cam_isp_ctx_buf_done_in_bubble_applied,
  3787. },
  3788. },
  3789. /* HW ERROR */
  3790. {
  3791. },
  3792. /* HALT */
  3793. {
  3794. },
  3795. };
  3796. static int __cam_isp_ctx_rdi_only_apply_req_top_state(
  3797. struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
  3798. {
  3799. int rc = 0;
  3800. struct cam_isp_context *ctx_isp =
  3801. (struct cam_isp_context *) ctx->ctx_priv;
  3802. CAM_DBG(CAM_ISP, "current Substate[%s]",
  3803. __cam_isp_ctx_substate_val_to_type(
  3804. ctx_isp->substate_activated));
  3805. rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
  3806. CAM_ISP_CTX_ACTIVATED_APPLIED);
  3807. CAM_DBG(CAM_ISP, "new Substate[%s]",
  3808. __cam_isp_ctx_substate_val_to_type(
  3809. ctx_isp->substate_activated));
  3810. if (rc)
  3811. CAM_ERR_RATE_LIMIT(CAM_ISP,
  3812. "ctx_id:%d Apply failed in Substate[%s], rc %d",
  3813. ctx->ctx_id,
  3814. __cam_isp_ctx_substate_val_to_type(
  3815. ctx_isp->substate_activated), rc);
  3816. return rc;
  3817. }
  3818. static struct cam_ctx_ops
  3819. cam_isp_ctx_rdi_only_activated_state_machine
  3820. [CAM_ISP_CTX_ACTIVATED_MAX] = {
  3821. /* SOF */
  3822. {
  3823. .ioctl_ops = {},
  3824. .crm_ops = {
  3825. .apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
  3826. },
  3827. .irq_ops = NULL,
  3828. },
  3829. /* APPLIED */
  3830. {
  3831. .ioctl_ops = {},
  3832. .crm_ops = {},
  3833. .irq_ops = NULL,
  3834. },
  3835. /* EPOCH */
  3836. {
  3837. .ioctl_ops = {},
  3838. .crm_ops = {
  3839. .apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
  3840. },
  3841. .irq_ops = NULL,
  3842. },
  3843. /* PRE BUBBLE */
  3844. {
  3845. .ioctl_ops = {},
  3846. .crm_ops = {},
  3847. .irq_ops = NULL,
  3848. },
  3849. /* BUBBLE */
  3850. {
  3851. .ioctl_ops = {},
  3852. .crm_ops = {},
  3853. .irq_ops = NULL,
  3854. },
  3855. /* HW ERROR */
  3856. {
  3857. .ioctl_ops = {},
  3858. .crm_ops = {},
  3859. .irq_ops = NULL,
  3860. },
  3861. /* HALT */
  3862. {
  3863. .ioctl_ops = {},
  3864. .crm_ops = {},
  3865. .irq_ops = NULL,
  3866. },
  3867. };
  3868. static void __cam_isp_ctx_free_mem_hw_entries(struct cam_context *ctx)
  3869. {
  3870. kfree(ctx->out_map_entries);
  3871. kfree(ctx->in_map_entries);
  3872. kfree(ctx->hw_update_entry);
  3873. ctx->out_map_entries = NULL;
  3874. ctx->in_map_entries = NULL;
  3875. ctx->hw_update_entry = NULL;
  3876. ctx->max_out_map_entries = 0;
  3877. ctx->max_in_map_entries = 0;
  3878. ctx->max_hw_update_entries = 0;
  3879. }
  3880. static int __cam_isp_ctx_release_hw_in_top_state(struct cam_context *ctx,
  3881. void *cmd)
  3882. {
  3883. int rc = 0;
  3884. struct cam_hw_release_args rel_arg;
  3885. struct cam_isp_context *ctx_isp =
  3886. (struct cam_isp_context *) ctx->ctx_priv;
  3887. struct cam_req_mgr_flush_request flush_req;
  3888. int i;
  3889. if (ctx_isp->hw_ctx) {
  3890. rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  3891. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
  3892. &rel_arg);
  3893. ctx_isp->hw_ctx = NULL;
  3894. } else {
  3895. CAM_ERR(CAM_ISP, "No hw resources acquired for ctx[%u]", ctx->ctx_id);
  3896. }
  3897. ctx->last_flush_req = 0;
  3898. ctx_isp->custom_enabled = false;
  3899. ctx_isp->use_frame_header_ts = false;
  3900. ctx_isp->use_default_apply = false;
  3901. ctx_isp->frame_id = 0;
  3902. ctx_isp->active_req_cnt = 0;
  3903. ctx_isp->reported_req_id = 0;
  3904. ctx_isp->hw_acquired = false;
  3905. ctx_isp->init_received = false;
  3906. ctx_isp->support_consumed_addr = false;
  3907. ctx_isp->req_info.last_bufdone_req_id = 0;
  3908. atomic64_set(&ctx_isp->state_monitor_head, -1);
  3909. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  3910. atomic64_set(&ctx_isp->event_record_head[i], -1);
  3911. /*
  3912. * Ideally, we should never have any active request here.
  3913. * But we still add some sanity check code here to help the debug
  3914. */
  3915. if (!list_empty(&ctx->active_req_list))
  3916. CAM_WARN(CAM_ISP, "Active list is not empty");
  3917. /* Flush all the pending request list */
  3918. flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
  3919. flush_req.link_hdl = ctx->link_hdl;
  3920. flush_req.dev_hdl = ctx->dev_hdl;
  3921. flush_req.req_id = 0;
  3922. CAM_DBG(CAM_ISP, "try to flush pending list");
  3923. spin_lock_bh(&ctx->lock);
  3924. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
  3925. spin_unlock_bh(&ctx->lock);
  3926. __cam_isp_ctx_free_mem_hw_entries(ctx);
  3927. ctx->state = CAM_CTX_ACQUIRED;
  3928. trace_cam_context_state("ISP", ctx);
  3929. CAM_DBG(CAM_ISP, "Release device success[%u] next state %d",
  3930. ctx->ctx_id, ctx->state);
  3931. return rc;
  3932. }
  3933. /* top level state machine */
  3934. static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
  3935. struct cam_release_dev_cmd *cmd)
  3936. {
  3937. int rc = 0;
  3938. int i;
  3939. struct cam_hw_release_args rel_arg;
  3940. struct cam_isp_context *ctx_isp =
  3941. (struct cam_isp_context *) ctx->ctx_priv;
  3942. struct cam_req_mgr_flush_request flush_req;
  3943. if (cmd && ctx_isp->hw_ctx) {
  3944. CAM_ERR(CAM_ISP, "releasing hw");
  3945. __cam_isp_ctx_release_hw_in_top_state(ctx, NULL);
  3946. }
  3947. if (ctx_isp->hw_ctx) {
  3948. rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  3949. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
  3950. &rel_arg);
  3951. ctx_isp->hw_ctx = NULL;
  3952. }
  3953. ctx->session_hdl = -1;
  3954. ctx->dev_hdl = -1;
  3955. ctx->link_hdl = -1;
  3956. ctx->ctx_crm_intf = NULL;
  3957. ctx->last_flush_req = 0;
  3958. ctx_isp->frame_id = 0;
  3959. ctx_isp->active_req_cnt = 0;
  3960. ctx_isp->reported_req_id = 0;
  3961. ctx_isp->hw_acquired = false;
  3962. ctx_isp->init_received = false;
  3963. ctx_isp->offline_context = false;
  3964. ctx_isp->rdi_only_context = false;
  3965. ctx_isp->req_info.last_bufdone_req_id = 0;
  3966. atomic64_set(&ctx_isp->state_monitor_head, -1);
  3967. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  3968. atomic64_set(&ctx_isp->event_record_head[i], -1);
  3969. /*
  3970. * Ideally, we should never have any active request here.
  3971. * But we still add some sanity check code here to help the debug
  3972. */
  3973. if (!list_empty(&ctx->active_req_list))
  3974. CAM_ERR(CAM_ISP, "Active list is not empty");
  3975. /* Flush all the pending request list */
  3976. flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
  3977. flush_req.link_hdl = ctx->link_hdl;
  3978. flush_req.dev_hdl = ctx->dev_hdl;
  3979. flush_req.req_id = 0;
  3980. CAM_DBG(CAM_ISP, "try to flush pending list");
  3981. spin_lock_bh(&ctx->lock);
  3982. rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
  3983. spin_unlock_bh(&ctx->lock);
  3984. __cam_isp_ctx_free_mem_hw_entries(ctx);
  3985. ctx->state = CAM_CTX_AVAILABLE;
  3986. trace_cam_context_state("ISP", ctx);
  3987. CAM_DBG(CAM_ISP, "Release device success[%u] next state %d",
  3988. ctx->ctx_id, ctx->state);
  3989. return rc;
  3990. }
  3991. static int __cam_isp_ctx_config_dev_in_top_state(
  3992. struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
  3993. {
  3994. int rc = 0, i;
  3995. struct cam_ctx_request *req = NULL;
  3996. struct cam_isp_ctx_req *req_isp;
  3997. uintptr_t packet_addr;
  3998. struct cam_packet *packet;
  3999. size_t len = 0;
  4000. size_t remain_len = 0;
  4001. struct cam_hw_prepare_update_args cfg = {0};
  4002. struct cam_req_mgr_add_request add_req;
  4003. struct cam_isp_context *ctx_isp =
  4004. (struct cam_isp_context *) ctx->ctx_priv;
  4005. struct cam_hw_cmd_args hw_cmd_args;
  4006. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  4007. uint32_t packet_opcode = 0;
  4008. CAM_DBG(CAM_ISP, "get free request object......");
  4009. /* get free request */
  4010. spin_lock_bh(&ctx->lock);
  4011. if (!list_empty(&ctx->free_req_list)) {
  4012. req = list_first_entry(&ctx->free_req_list,
  4013. struct cam_ctx_request, list);
  4014. list_del_init(&req->list);
  4015. }
  4016. spin_unlock_bh(&ctx->lock);
  4017. if (!req) {
  4018. CAM_ERR(CAM_ISP, "No more request obj free");
  4019. return -ENOMEM;
  4020. }
  4021. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4022. /* for config dev, only memory handle is supported */
  4023. /* map packet from the memhandle */
  4024. rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
  4025. &packet_addr, &len);
  4026. if (rc != 0) {
  4027. CAM_ERR(CAM_ISP, "Can not get packet address");
  4028. rc = -EINVAL;
  4029. goto free_req;
  4030. }
  4031. remain_len = len;
  4032. if ((len < sizeof(struct cam_packet)) ||
  4033. ((size_t)cmd->offset >= len - sizeof(struct cam_packet))) {
  4034. CAM_ERR(CAM_ISP, "invalid buff length: %zu or offset", len);
  4035. rc = -EINVAL;
  4036. goto free_req;
  4037. }
  4038. remain_len -= (size_t)cmd->offset;
  4039. packet = (struct cam_packet *)(packet_addr + (uint32_t)cmd->offset);
  4040. CAM_DBG(CAM_ISP, "pack_handle %llx", cmd->packet_handle);
  4041. CAM_DBG(CAM_ISP, "packet address is 0x%zx", packet_addr);
  4042. CAM_DBG(CAM_ISP, "packet with length %zu, offset 0x%llx",
  4043. len, cmd->offset);
  4044. CAM_DBG(CAM_ISP, "Packet request id %lld",
  4045. packet->header.request_id);
  4046. CAM_DBG(CAM_ISP, "Packet size 0x%x", packet->header.size);
  4047. CAM_DBG(CAM_ISP, "packet op %d", packet->header.op_code);
  4048. /* Query the packet opcode */
  4049. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4050. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  4051. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_GET_PACKET_OPCODE;
  4052. isp_hw_cmd_args.cmd_data = (void *)packet;
  4053. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  4054. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  4055. &hw_cmd_args);
  4056. if (rc) {
  4057. CAM_ERR(CAM_ISP, "HW command failed");
  4058. goto free_req;
  4059. }
  4060. packet_opcode = isp_hw_cmd_args.u.packet_op_code;
  4061. CAM_DBG(CAM_ISP, "packet op %d", packet_opcode);
  4062. if ((packet_opcode == CAM_ISP_PACKET_UPDATE_DEV)
  4063. && (packet->header.request_id <= ctx->last_flush_req)) {
  4064. CAM_INFO(CAM_ISP,
  4065. "request %lld has been flushed, reject packet",
  4066. packet->header.request_id);
  4067. rc = -EBADR;
  4068. goto free_req;
  4069. }
  4070. cfg.packet = packet;
  4071. cfg.remain_len = remain_len;
  4072. cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4073. cfg.max_hw_update_entries = ctx->max_hw_update_entries;
  4074. cfg.hw_update_entries = req_isp->cfg;
  4075. cfg.max_out_map_entries = ctx->max_out_map_entries;
  4076. cfg.max_in_map_entries = ctx->max_in_map_entries;
  4077. cfg.out_map_entries = req_isp->fence_map_out;
  4078. cfg.in_map_entries = req_isp->fence_map_in;
  4079. cfg.priv = &req_isp->hw_update_data;
  4080. cfg.pf_data = &(req->pf_data);
  4081. cfg.num_out_map_entries = 0;
  4082. cfg.num_in_map_entries = 0;
  4083. CAM_DBG(CAM_ISP, "try to prepare config packet......");
  4084. rc = ctx->hw_mgr_intf->hw_prepare_update(
  4085. ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
  4086. if (rc != 0) {
  4087. CAM_ERR(CAM_ISP, "Prepare config packet failed in HW layer");
  4088. rc = -EFAULT;
  4089. goto free_req;
  4090. }
  4091. req_isp->num_cfg = cfg.num_hw_update_entries;
  4092. req_isp->num_fence_map_out = cfg.num_out_map_entries;
  4093. req_isp->num_fence_map_in = cfg.num_in_map_entries;
  4094. req_isp->num_acked = 0;
  4095. req_isp->num_deferred_acks = 0;
  4096. req_isp->bubble_detected = false;
  4097. req_isp->cdm_reset_before_apply = false;
  4098. req_isp->hw_update_data.packet = packet;
  4099. for (i = 0; i < req_isp->num_fence_map_out; i++) {
  4100. rc = cam_sync_get_obj_ref(req_isp->fence_map_out[i].sync_id);
  4101. if (rc) {
  4102. CAM_ERR(CAM_ISP, "Can't get ref for fence %d",
  4103. req_isp->fence_map_out[i].sync_id);
  4104. goto put_ref;
  4105. }
  4106. }
  4107. CAM_DBG(CAM_ISP, "num_entry: %d, num fence out: %d, num fence in: %d",
  4108. req_isp->num_cfg, req_isp->num_fence_map_out,
  4109. req_isp->num_fence_map_in);
  4110. req->request_id = packet->header.request_id;
  4111. req->status = 1;
  4112. CAM_DBG(CAM_ISP, "Packet request id %lld packet opcode:%d",
  4113. packet->header.request_id,
  4114. req_isp->hw_update_data.packet_opcode_type);
  4115. if (req_isp->hw_update_data.packet_opcode_type ==
  4116. CAM_ISP_PACKET_INIT_DEV) {
  4117. if (ctx->state < CAM_CTX_ACTIVATED) {
  4118. rc = __cam_isp_ctx_enqueue_init_request(ctx, req);
  4119. if (rc)
  4120. CAM_ERR(CAM_ISP, "Enqueue INIT pkt failed");
  4121. ctx_isp->init_received = true;
  4122. } else {
  4123. rc = -EINVAL;
  4124. CAM_ERR(CAM_ISP, "Recevied INIT pkt in wrong state:%d",
  4125. ctx->state);
  4126. }
  4127. } else {
  4128. if (ctx_isp->offline_context) {
  4129. __cam_isp_ctx_enqueue_request_in_order(ctx, req);
  4130. } else if ((ctx->state != CAM_CTX_FLUSHED) &&
  4131. (ctx->state >= CAM_CTX_READY) &&
  4132. ctx->ctx_crm_intf->add_req) {
  4133. memset(&add_req, 0, sizeof(add_req));
  4134. add_req.link_hdl = ctx->link_hdl;
  4135. add_req.dev_hdl = ctx->dev_hdl;
  4136. add_req.req_id = req->request_id;
  4137. rc = ctx->ctx_crm_intf->add_req(&add_req);
  4138. if (rc) {
  4139. CAM_ERR(CAM_ISP, "Add req failed: req id=%llu",
  4140. req->request_id);
  4141. } else {
  4142. __cam_isp_ctx_enqueue_request_in_order(
  4143. ctx, req);
  4144. }
  4145. } else {
  4146. rc = -EINVAL;
  4147. CAM_ERR(CAM_ISP,
  4148. "Recevied update req %lld in wrong state:%d",
  4149. req->request_id, ctx->state);
  4150. }
  4151. }
  4152. if (rc)
  4153. goto put_ref;
  4154. CAM_DBG(CAM_REQ,
  4155. "Preprocessing Config req_id %lld successful on ctx %u",
  4156. req->request_id, ctx->ctx_id);
  4157. if (ctx_isp->offline_context && atomic_read(&ctx_isp->rxd_epoch)) {
  4158. __cam_isp_ctx_schedule_apply_req_offline(ctx_isp);
  4159. }
  4160. return rc;
  4161. put_ref:
  4162. for (--i; i >= 0; i--) {
  4163. if (cam_sync_put_obj_ref(req_isp->fence_map_out[i].sync_id))
  4164. CAM_ERR(CAM_CTXT, "Failed to put ref of fence %d",
  4165. req_isp->fence_map_out[i].sync_id);
  4166. }
  4167. free_req:
  4168. spin_lock_bh(&ctx->lock);
  4169. list_add_tail(&req->list, &ctx->free_req_list);
  4170. spin_unlock_bh(&ctx->lock);
  4171. return rc;
  4172. }
  4173. static int __cam_isp_ctx_allocate_mem_hw_entries(
  4174. struct cam_context *ctx,
  4175. struct cam_hw_acquire_args *param)
  4176. {
  4177. int rc = 0;
  4178. uint32_t max_res = 0;
  4179. uint32_t max_hw_upd_entries = CAM_ISP_CTX_CFG_MAX;
  4180. struct cam_ctx_request *req;
  4181. struct cam_ctx_request *temp_req;
  4182. struct cam_isp_ctx_req *req_isp;
  4183. size_t num_entries = 0;
  4184. if (!param->op_params.param_list[0])
  4185. max_res = CAM_ISP_CTX_RES_MAX;
  4186. else {
  4187. max_res = param->op_params.param_list[0];
  4188. if (param->op_flags & CAM_IFE_CTX_SFE_EN) {
  4189. max_res += param->op_params.param_list[1];
  4190. max_hw_upd_entries = CAM_ISP_SFE_CTX_CFG_MAX;
  4191. }
  4192. }
  4193. ctx->max_in_map_entries = max_res;
  4194. ctx->max_out_map_entries = max_res;
  4195. ctx->max_hw_update_entries = max_hw_upd_entries;
  4196. CAM_DBG(CAM_ISP,
  4197. "Allocate max_entries: 0x%x max_res: 0x%x is_sfe_en: %d",
  4198. max_hw_upd_entries, max_res, (param->op_flags & CAM_IFE_CTX_SFE_EN));
  4199. num_entries = ctx->max_hw_update_entries * CAM_ISP_CTX_REQ_MAX;
  4200. ctx->hw_update_entry = kcalloc(num_entries,
  4201. sizeof(struct cam_hw_update_entry),
  4202. GFP_KERNEL);
  4203. if (!ctx->hw_update_entry) {
  4204. CAM_ERR(CAM_CTXT, "%s[%d] no memory", ctx->dev_name, ctx->ctx_id);
  4205. return -ENOMEM;
  4206. }
  4207. num_entries = ctx->max_in_map_entries * CAM_ISP_CTX_REQ_MAX;
  4208. ctx->in_map_entries = kcalloc(num_entries,
  4209. sizeof(struct cam_hw_fence_map_entry),
  4210. GFP_KERNEL);
  4211. if (!ctx->in_map_entries) {
  4212. CAM_ERR(CAM_CTXT, "%s[%d] no memory", ctx->dev_name, ctx->ctx_id);
  4213. rc = -ENOMEM;
  4214. goto end;
  4215. }
  4216. num_entries = ctx->max_out_map_entries * CAM_ISP_CTX_REQ_MAX;
  4217. ctx->out_map_entries = kcalloc(num_entries,
  4218. sizeof(struct cam_hw_fence_map_entry),
  4219. GFP_KERNEL);
  4220. if (!ctx->out_map_entries) {
  4221. CAM_ERR(CAM_CTXT, "%s[%d] no memory", ctx->dev_name, ctx->ctx_id);
  4222. rc = -ENOMEM;
  4223. goto end;
  4224. }
  4225. list_for_each_entry_safe(req, temp_req,
  4226. &ctx->free_req_list, list) {
  4227. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4228. req_isp->cfg =
  4229. &ctx->hw_update_entry[req->index * ctx->max_hw_update_entries];
  4230. req_isp->fence_map_in =
  4231. &ctx->in_map_entries[req->index * ctx->max_in_map_entries];
  4232. req_isp->fence_map_out =
  4233. &ctx->out_map_entries[req->index * ctx->max_out_map_entries];
  4234. }
  4235. return rc;
  4236. end:
  4237. __cam_isp_ctx_free_mem_hw_entries(ctx);
  4238. return rc;
  4239. }
  4240. static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
  4241. struct cam_acquire_dev_cmd *cmd)
  4242. {
  4243. int rc = 0;
  4244. int i;
  4245. struct cam_hw_acquire_args param;
  4246. struct cam_isp_resource *isp_res = NULL;
  4247. struct cam_create_dev_hdl req_hdl_param;
  4248. struct cam_hw_release_args release;
  4249. struct cam_isp_context *ctx_isp =
  4250. (struct cam_isp_context *) ctx->ctx_priv;
  4251. struct cam_hw_cmd_args hw_cmd_args;
  4252. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  4253. if (!ctx->hw_mgr_intf) {
  4254. CAM_ERR(CAM_ISP, "HW interface is not ready");
  4255. rc = -EFAULT;
  4256. goto end;
  4257. }
  4258. CAM_DBG(CAM_ISP,
  4259. "session_hdl 0x%x, num_resources %d, hdl type %d, res %lld",
  4260. cmd->session_handle, cmd->num_resources,
  4261. cmd->handle_type, cmd->resource_hdl);
  4262. if (cmd->num_resources == CAM_API_COMPAT_CONSTANT) {
  4263. ctx_isp->split_acquire = true;
  4264. CAM_DBG(CAM_ISP, "Acquire dev handle");
  4265. goto get_dev_handle;
  4266. }
  4267. if (cmd->num_resources > CAM_ISP_CTX_RES_MAX) {
  4268. CAM_ERR(CAM_ISP, "Too much resources in the acquire");
  4269. rc = -ENOMEM;
  4270. goto end;
  4271. }
  4272. /* for now we only support user pointer */
  4273. if (cmd->handle_type != 1) {
  4274. CAM_ERR(CAM_ISP, "Only user pointer is supported");
  4275. rc = -EINVAL;
  4276. goto end;
  4277. }
  4278. isp_res = kzalloc(
  4279. sizeof(*isp_res)*cmd->num_resources, GFP_KERNEL);
  4280. if (!isp_res) {
  4281. rc = -ENOMEM;
  4282. goto end;
  4283. }
  4284. CAM_DBG(CAM_ISP, "start copy %d resources from user",
  4285. cmd->num_resources);
  4286. if (copy_from_user(isp_res, u64_to_user_ptr(cmd->resource_hdl),
  4287. sizeof(*isp_res)*cmd->num_resources)) {
  4288. rc = -EFAULT;
  4289. goto free_res;
  4290. }
  4291. memset(&param, 0, sizeof(param));
  4292. param.context_data = ctx;
  4293. param.event_cb = ctx->irq_cb_intf;
  4294. param.num_acq = cmd->num_resources;
  4295. param.acquire_info = (uintptr_t) isp_res;
  4296. rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx, &param);
  4297. if (rc) {
  4298. CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
  4299. ctx->ctx_id);
  4300. goto free_res;
  4301. }
  4302. /* call HW manager to reserve the resource */
  4303. rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
  4304. &param);
  4305. if (rc != 0) {
  4306. CAM_ERR(CAM_ISP, "Acquire device failed");
  4307. goto free_res;
  4308. }
  4309. /* Query the context has rdi only resource */
  4310. hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
  4311. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  4312. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE;
  4313. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  4314. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  4315. &hw_cmd_args);
  4316. if (rc) {
  4317. CAM_ERR(CAM_ISP, "HW command failed");
  4318. goto free_hw;
  4319. }
  4320. if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_RDI) {
  4321. /*
  4322. * this context has rdi only resource assign rdi only
  4323. * state machine
  4324. */
  4325. CAM_DBG(CAM_ISP, "RDI only session Context");
  4326. ctx_isp->substate_machine_irq =
  4327. cam_isp_ctx_rdi_only_activated_state_machine_irq;
  4328. ctx_isp->substate_machine =
  4329. cam_isp_ctx_rdi_only_activated_state_machine;
  4330. ctx_isp->rdi_only_context = true;
  4331. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_FS2) {
  4332. CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI");
  4333. ctx_isp->substate_machine_irq =
  4334. cam_isp_ctx_fs2_state_machine_irq;
  4335. ctx_isp->substate_machine =
  4336. cam_isp_ctx_fs2_state_machine;
  4337. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
  4338. CAM_DBG(CAM_ISP, "offline Session has PIX and RD resources");
  4339. ctx_isp->substate_machine_irq =
  4340. cam_isp_ctx_offline_state_machine_irq;
  4341. } else {
  4342. CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
  4343. ctx_isp->substate_machine_irq =
  4344. cam_isp_ctx_activated_state_machine_irq;
  4345. ctx_isp->substate_machine =
  4346. cam_isp_ctx_activated_state_machine;
  4347. }
  4348. ctx_isp->hw_ctx = param.ctxt_to_hw_map;
  4349. ctx_isp->hw_acquired = true;
  4350. ctx_isp->split_acquire = false;
  4351. ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
  4352. atomic64_set(&ctx_isp->state_monitor_head, -1);
  4353. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  4354. atomic64_set(&ctx_isp->event_record_head[i], -1);
  4355. kfree(isp_res);
  4356. isp_res = NULL;
  4357. get_dev_handle:
  4358. req_hdl_param.session_hdl = cmd->session_handle;
  4359. /* bridge is not ready for these flags. so false for now */
  4360. req_hdl_param.v4l2_sub_dev_flag = 0;
  4361. req_hdl_param.media_entity_flag = 0;
  4362. req_hdl_param.ops = ctx->crm_ctx_intf;
  4363. req_hdl_param.priv = ctx;
  4364. req_hdl_param.dev_id = CAM_ISP;
  4365. CAM_DBG(CAM_ISP, "get device handle form bridge");
  4366. ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
  4367. if (ctx->dev_hdl <= 0) {
  4368. rc = -EFAULT;
  4369. CAM_ERR(CAM_ISP, "Can not create device handle");
  4370. goto free_hw;
  4371. }
  4372. cmd->dev_handle = ctx->dev_hdl;
  4373. /* store session information */
  4374. ctx->session_hdl = cmd->session_handle;
  4375. ctx->state = CAM_CTX_ACQUIRED;
  4376. trace_cam_context_state("ISP", ctx);
  4377. CAM_DBG(CAM_ISP,
  4378. "Acquire success on session_hdl 0x%x num_rsrces %d ctx %u",
  4379. cmd->session_handle, cmd->num_resources, ctx->ctx_id);
  4380. return rc;
  4381. free_hw:
  4382. release.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4383. if (ctx_isp->hw_acquired)
  4384. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
  4385. &release);
  4386. ctx_isp->hw_ctx = NULL;
  4387. ctx_isp->hw_acquired = false;
  4388. free_res:
  4389. kfree(isp_res);
  4390. end:
  4391. return rc;
  4392. }
  4393. static int __cam_isp_ctx_acquire_hw_v1(struct cam_context *ctx,
  4394. void *args)
  4395. {
  4396. int rc = 0;
  4397. int i;
  4398. struct cam_acquire_hw_cmd_v1 *cmd =
  4399. (struct cam_acquire_hw_cmd_v1 *)args;
  4400. struct cam_hw_acquire_args param;
  4401. struct cam_hw_release_args release;
  4402. struct cam_isp_context *ctx_isp =
  4403. (struct cam_isp_context *) ctx->ctx_priv;
  4404. struct cam_hw_cmd_args hw_cmd_args;
  4405. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  4406. struct cam_isp_acquire_hw_info *acquire_hw_info = NULL;
  4407. if (!ctx->hw_mgr_intf) {
  4408. CAM_ERR(CAM_ISP, "HW interface is not ready");
  4409. rc = -EFAULT;
  4410. goto end;
  4411. }
  4412. CAM_DBG(CAM_ISP,
  4413. "session_hdl 0x%x, hdl type %d, res %lld",
  4414. cmd->session_handle, cmd->handle_type, cmd->resource_hdl);
  4415. /* for now we only support user pointer */
  4416. if (cmd->handle_type != 1) {
  4417. CAM_ERR(CAM_ISP, "Only user pointer is supported");
  4418. rc = -EINVAL;
  4419. goto end;
  4420. }
  4421. if (cmd->data_size < sizeof(*acquire_hw_info)) {
  4422. CAM_ERR(CAM_ISP, "data_size is not a valid value");
  4423. goto end;
  4424. }
  4425. acquire_hw_info = kzalloc(cmd->data_size, GFP_KERNEL);
  4426. if (!acquire_hw_info) {
  4427. rc = -ENOMEM;
  4428. goto end;
  4429. }
  4430. CAM_DBG(CAM_ISP, "start copy resources from user");
  4431. if (copy_from_user(acquire_hw_info, (void __user *)cmd->resource_hdl,
  4432. cmd->data_size)) {
  4433. rc = -EFAULT;
  4434. goto free_res;
  4435. }
  4436. memset(&param, 0, sizeof(param));
  4437. param.context_data = ctx;
  4438. param.event_cb = ctx->irq_cb_intf;
  4439. param.num_acq = CAM_API_COMPAT_CONSTANT;
  4440. param.acquire_info_size = cmd->data_size;
  4441. param.acquire_info = (uint64_t) acquire_hw_info;
  4442. rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx,
  4443. &param);
  4444. if (rc) {
  4445. CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
  4446. ctx->ctx_id);
  4447. goto free_res;
  4448. }
  4449. /* call HW manager to reserve the resource */
  4450. rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
  4451. &param);
  4452. if (rc != 0) {
  4453. CAM_ERR(CAM_ISP, "Acquire device failed");
  4454. goto free_res;
  4455. }
  4456. ctx_isp->support_consumed_addr =
  4457. (param.op_flags & CAM_IFE_CTX_FRAME_HEADER_EN);
  4458. /* Query the context has rdi only resource */
  4459. hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
  4460. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  4461. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE;
  4462. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  4463. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  4464. &hw_cmd_args);
  4465. if (rc) {
  4466. CAM_ERR(CAM_ISP, "HW command failed");
  4467. goto free_hw;
  4468. }
  4469. if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_RDI) {
  4470. /*
  4471. * this context has rdi only resource assign rdi only
  4472. * state machine
  4473. */
  4474. CAM_DBG(CAM_ISP, "RDI only session Context");
  4475. ctx_isp->substate_machine_irq =
  4476. cam_isp_ctx_rdi_only_activated_state_machine_irq;
  4477. ctx_isp->substate_machine =
  4478. cam_isp_ctx_rdi_only_activated_state_machine;
  4479. ctx_isp->rdi_only_context = true;
  4480. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_FS2) {
  4481. CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI");
  4482. ctx_isp->substate_machine_irq =
  4483. cam_isp_ctx_fs2_state_machine_irq;
  4484. ctx_isp->substate_machine =
  4485. cam_isp_ctx_fs2_state_machine;
  4486. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
  4487. CAM_DBG(CAM_ISP, "Offline session has PIX and RD resources");
  4488. ctx_isp->substate_machine_irq =
  4489. cam_isp_ctx_offline_state_machine_irq;
  4490. ctx_isp->substate_machine = NULL;
  4491. } else {
  4492. CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
  4493. ctx_isp->substate_machine_irq =
  4494. cam_isp_ctx_activated_state_machine_irq;
  4495. ctx_isp->substate_machine =
  4496. cam_isp_ctx_activated_state_machine;
  4497. }
  4498. ctx_isp->hw_ctx = param.ctxt_to_hw_map;
  4499. ctx_isp->hw_acquired = true;
  4500. ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
  4501. atomic64_set(&ctx_isp->state_monitor_head, -1);
  4502. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  4503. atomic64_set(&ctx_isp->event_record_head[i], -1);
  4504. trace_cam_context_state("ISP", ctx);
  4505. CAM_DBG(CAM_ISP,
  4506. "Acquire success on session_hdl 0x%xs ctx_type %d ctx_id %u",
  4507. ctx->session_hdl, isp_hw_cmd_args.u.ctx_type, ctx->ctx_id);
  4508. kfree(acquire_hw_info);
  4509. return rc;
  4510. free_hw:
  4511. release.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4512. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
  4513. ctx_isp->hw_ctx = NULL;
  4514. ctx_isp->hw_acquired = false;
  4515. free_res:
  4516. kfree(acquire_hw_info);
  4517. end:
  4518. return rc;
  4519. }
  4520. static void cam_req_mgr_process_workq_offline_ife_worker(struct work_struct *w)
  4521. {
  4522. cam_req_mgr_process_workq(w);
  4523. }
  4524. static int __cam_isp_ctx_acquire_hw_v2(struct cam_context *ctx,
  4525. void *args)
  4526. {
  4527. int rc = 0, i, j;
  4528. struct cam_acquire_hw_cmd_v2 *cmd =
  4529. (struct cam_acquire_hw_cmd_v2 *)args;
  4530. struct cam_hw_acquire_args param;
  4531. struct cam_hw_release_args release;
  4532. struct cam_isp_context *ctx_isp =
  4533. (struct cam_isp_context *) ctx->ctx_priv;
  4534. struct cam_hw_cmd_args hw_cmd_args;
  4535. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  4536. struct cam_isp_acquire_hw_info *acquire_hw_info = NULL;
  4537. if (!ctx->hw_mgr_intf) {
  4538. CAM_ERR(CAM_ISP, "HW interface is not ready");
  4539. rc = -EFAULT;
  4540. goto end;
  4541. }
  4542. CAM_DBG(CAM_ISP,
  4543. "session_hdl 0x%x, hdl type %d, res %lld",
  4544. cmd->session_handle, cmd->handle_type, cmd->resource_hdl);
  4545. /* for now we only support user pointer */
  4546. if (cmd->handle_type != 1) {
  4547. CAM_ERR(CAM_ISP, "Only user pointer is supported");
  4548. rc = -EINVAL;
  4549. goto end;
  4550. }
  4551. if (cmd->data_size < sizeof(*acquire_hw_info)) {
  4552. CAM_ERR(CAM_ISP, "data_size is not a valid value");
  4553. goto end;
  4554. }
  4555. acquire_hw_info = kzalloc(cmd->data_size, GFP_KERNEL);
  4556. if (!acquire_hw_info) {
  4557. rc = -ENOMEM;
  4558. goto end;
  4559. }
  4560. CAM_DBG(CAM_ISP, "start copy resources from user");
  4561. if (copy_from_user(acquire_hw_info, (void __user *)cmd->resource_hdl,
  4562. cmd->data_size)) {
  4563. rc = -EFAULT;
  4564. goto free_res;
  4565. }
  4566. memset(&param, 0, sizeof(param));
  4567. param.context_data = ctx;
  4568. param.event_cb = ctx->irq_cb_intf;
  4569. param.num_acq = CAM_API_COMPAT_CONSTANT;
  4570. param.acquire_info_size = cmd->data_size;
  4571. param.acquire_info = (uint64_t) acquire_hw_info;
  4572. /* call HW manager to reserve the resource */
  4573. rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
  4574. &param);
  4575. if (rc != 0) {
  4576. CAM_ERR(CAM_ISP, "Acquire device failed");
  4577. goto free_res;
  4578. }
  4579. rc = __cam_isp_ctx_allocate_mem_hw_entries(ctx, &param);
  4580. if (rc) {
  4581. CAM_ERR(CAM_ISP, "Ctx[%d] allocate hw entry fail",
  4582. ctx->ctx_id);
  4583. goto free_hw;
  4584. }
  4585. /*
  4586. * Set feature flag if applicable
  4587. * custom hw is supported only on v2
  4588. */
  4589. ctx_isp->custom_enabled =
  4590. (param.op_flags & CAM_IFE_CTX_CUSTOM_EN);
  4591. ctx_isp->use_frame_header_ts =
  4592. (param.op_flags & CAM_IFE_CTX_FRAME_HEADER_EN);
  4593. ctx_isp->use_default_apply =
  4594. (param.op_flags & CAM_IFE_CTX_APPLY_DEFAULT_CFG);
  4595. ctx_isp->support_consumed_addr =
  4596. (param.op_flags & CAM_IFE_CTX_CONSUME_ADDR_EN);
  4597. /* Query the context has rdi only resource */
  4598. hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
  4599. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  4600. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_CTX_TYPE;
  4601. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  4602. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  4603. &hw_cmd_args);
  4604. if (rc) {
  4605. CAM_ERR(CAM_ISP, "HW command failed");
  4606. goto free_hw;
  4607. }
  4608. if (param.valid_acquired_hw) {
  4609. for (i = 0; i < CAM_MAX_ACQ_RES; i++)
  4610. cmd->hw_info.acquired_hw_id[i] =
  4611. param.acquired_hw_id[i];
  4612. for (i = 0; i < CAM_MAX_ACQ_RES; i++)
  4613. for (j = 0; j < CAM_MAX_HW_SPLIT; j++)
  4614. cmd->hw_info.acquired_hw_path[i][j] =
  4615. param.acquired_hw_path[i][j];
  4616. }
  4617. cmd->hw_info.valid_acquired_hw =
  4618. param.valid_acquired_hw;
  4619. cmd->hw_info.valid_acquired_hw = param.valid_acquired_hw;
  4620. if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_RDI) {
  4621. /*
  4622. * this context has rdi only resource assign rdi only
  4623. * state machine
  4624. */
  4625. CAM_DBG(CAM_ISP, "RDI only session Context");
  4626. ctx_isp->substate_machine_irq =
  4627. cam_isp_ctx_rdi_only_activated_state_machine_irq;
  4628. ctx_isp->substate_machine =
  4629. cam_isp_ctx_rdi_only_activated_state_machine;
  4630. ctx_isp->rdi_only_context = true;
  4631. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_FS2) {
  4632. CAM_DBG(CAM_ISP, "FS2 Session has PIX, RD and RDI");
  4633. ctx_isp->substate_machine_irq =
  4634. cam_isp_ctx_fs2_state_machine_irq;
  4635. ctx_isp->substate_machine =
  4636. cam_isp_ctx_fs2_state_machine;
  4637. } else if (isp_hw_cmd_args.u.ctx_type == CAM_ISP_CTX_OFFLINE) {
  4638. CAM_DBG(CAM_ISP, "Offline Session has PIX and RD resources");
  4639. ctx_isp->substate_machine_irq =
  4640. cam_isp_ctx_offline_state_machine_irq;
  4641. ctx_isp->substate_machine = NULL;
  4642. ctx_isp->offline_context = true;
  4643. rc = cam_req_mgr_workq_create("offline_ife", 20,
  4644. &ctx_isp->workq, CRM_WORKQ_USAGE_IRQ, 0,
  4645. cam_req_mgr_process_workq_offline_ife_worker);
  4646. if (rc)
  4647. CAM_ERR(CAM_ISP,
  4648. "Failed to create workq for offline IFE rc:%d",
  4649. rc);
  4650. } else {
  4651. CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
  4652. ctx_isp->substate_machine_irq =
  4653. cam_isp_ctx_activated_state_machine_irq;
  4654. ctx_isp->substate_machine =
  4655. cam_isp_ctx_activated_state_machine;
  4656. }
  4657. ctx_isp->hw_ctx = param.ctxt_to_hw_map;
  4658. ctx_isp->hw_acquired = true;
  4659. ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
  4660. trace_cam_context_state("ISP", ctx);
  4661. CAM_DBG(CAM_ISP,
  4662. "Acquire success on session_hdl 0x%xs ctx_type %d ctx_id %u",
  4663. ctx->session_hdl, isp_hw_cmd_args.u.ctx_type, ctx->ctx_id);
  4664. kfree(acquire_hw_info);
  4665. return rc;
  4666. free_hw:
  4667. release.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4668. ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
  4669. ctx_isp->hw_ctx = NULL;
  4670. ctx_isp->hw_acquired = false;
  4671. free_res:
  4672. kfree(acquire_hw_info);
  4673. end:
  4674. return rc;
  4675. }
  4676. static int __cam_isp_ctx_acquire_hw_in_acquired(struct cam_context *ctx,
  4677. void *args)
  4678. {
  4679. int rc = -EINVAL;
  4680. uint32_t api_version;
  4681. if (!ctx || !args) {
  4682. CAM_ERR(CAM_ISP, "Invalid input pointer");
  4683. return rc;
  4684. }
  4685. api_version = *((uint32_t *)args);
  4686. if (api_version == 1)
  4687. rc = __cam_isp_ctx_acquire_hw_v1(ctx, args);
  4688. else if (api_version == 2)
  4689. rc = __cam_isp_ctx_acquire_hw_v2(ctx, args);
  4690. else
  4691. CAM_ERR(CAM_ISP, "Unsupported api version %d", api_version);
  4692. return rc;
  4693. }
  4694. static int __cam_isp_ctx_config_dev_in_acquired(struct cam_context *ctx,
  4695. struct cam_config_dev_cmd *cmd)
  4696. {
  4697. int rc = 0;
  4698. struct cam_isp_context *ctx_isp =
  4699. (struct cam_isp_context *) ctx->ctx_priv;
  4700. if (!ctx_isp->hw_acquired) {
  4701. CAM_ERR(CAM_ISP, "HW is not acquired, reject packet");
  4702. return -EINVAL;
  4703. }
  4704. rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
  4705. if (!rc && ((ctx->link_hdl >= 0) || ctx_isp->offline_context)) {
  4706. ctx->state = CAM_CTX_READY;
  4707. trace_cam_context_state("ISP", ctx);
  4708. }
  4709. CAM_DBG(CAM_ISP, "next state %d", ctx->state);
  4710. return rc;
  4711. }
  4712. static int __cam_isp_ctx_config_dev_in_flushed(struct cam_context *ctx,
  4713. struct cam_config_dev_cmd *cmd)
  4714. {
  4715. int rc = 0;
  4716. struct cam_start_stop_dev_cmd start_cmd;
  4717. struct cam_hw_cmd_args hw_cmd_args;
  4718. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  4719. struct cam_isp_context *ctx_isp =
  4720. (struct cam_isp_context *) ctx->ctx_priv;
  4721. if (!ctx_isp->hw_acquired) {
  4722. CAM_ERR(CAM_ISP, "HW is not acquired, reject packet");
  4723. rc = -EINVAL;
  4724. goto end;
  4725. }
  4726. rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
  4727. if (rc)
  4728. goto end;
  4729. if (!ctx_isp->init_received) {
  4730. CAM_WARN(CAM_ISP,
  4731. "Received update packet in flushed state, skip start");
  4732. goto end;
  4733. }
  4734. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4735. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  4736. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
  4737. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  4738. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  4739. &hw_cmd_args);
  4740. if (rc) {
  4741. CAM_ERR(CAM_ISP, "Failed to resume HW rc: %d", rc);
  4742. goto end;
  4743. }
  4744. start_cmd.dev_handle = cmd->dev_handle;
  4745. start_cmd.session_handle = cmd->session_handle;
  4746. rc = __cam_isp_ctx_start_dev_in_ready(ctx, &start_cmd);
  4747. if (rc)
  4748. CAM_ERR(CAM_ISP,
  4749. "Failed to re-start HW after flush rc: %d", rc);
  4750. else
  4751. CAM_INFO(CAM_ISP,
  4752. "Received init after flush. Re-start HW complete in ctx:%d",
  4753. ctx->ctx_id);
  4754. end:
  4755. CAM_DBG(CAM_ISP, "next state %d sub_state:%d", ctx->state,
  4756. ctx_isp->substate_activated);
  4757. return rc;
  4758. }
  4759. static int __cam_isp_ctx_link_in_acquired(struct cam_context *ctx,
  4760. struct cam_req_mgr_core_dev_link_setup *link)
  4761. {
  4762. int rc = 0;
  4763. struct cam_isp_context *ctx_isp =
  4764. (struct cam_isp_context *) ctx->ctx_priv;
  4765. CAM_DBG(CAM_ISP, "Enter.........");
  4766. ctx->link_hdl = link->link_hdl;
  4767. ctx->ctx_crm_intf = link->crm_cb;
  4768. ctx_isp->subscribe_event =
  4769. CAM_TRIGGER_POINT_SOF | CAM_TRIGGER_POINT_EOF;
  4770. ctx_isp->trigger_id = link->trigger_id;
  4771. /* change state only if we had the init config */
  4772. if (ctx_isp->init_received) {
  4773. ctx->state = CAM_CTX_READY;
  4774. trace_cam_context_state("ISP", ctx);
  4775. }
  4776. CAM_DBG(CAM_ISP, "next state %d", ctx->state);
  4777. return rc;
  4778. }
  4779. static int __cam_isp_ctx_unlink_in_acquired(struct cam_context *ctx,
  4780. struct cam_req_mgr_core_dev_link_setup *unlink)
  4781. {
  4782. int rc = 0;
  4783. struct cam_isp_context *ctx_isp =
  4784. (struct cam_isp_context *) ctx->ctx_priv;
  4785. ctx->link_hdl = -1;
  4786. ctx->ctx_crm_intf = NULL;
  4787. ctx_isp->trigger_id = -1;
  4788. return rc;
  4789. }
  4790. static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
  4791. struct cam_req_mgr_device_info *dev_info)
  4792. {
  4793. int rc = 0;
  4794. dev_info->dev_hdl = ctx->dev_hdl;
  4795. strlcpy(dev_info->name, CAM_ISP_DEV_NAME, sizeof(dev_info->name));
  4796. dev_info->dev_id = CAM_REQ_MGR_DEVICE_IFE;
  4797. dev_info->p_delay = 1;
  4798. dev_info->trigger = CAM_TRIGGER_POINT_SOF;
  4799. dev_info->trigger_on = true;
  4800. return rc;
  4801. }
  4802. static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
  4803. struct cam_start_stop_dev_cmd *cmd)
  4804. {
  4805. int rc = 0;
  4806. int i;
  4807. struct cam_isp_start_args start_isp;
  4808. struct cam_ctx_request *req;
  4809. struct cam_isp_ctx_req *req_isp;
  4810. struct cam_isp_context *ctx_isp =
  4811. (struct cam_isp_context *) ctx->ctx_priv;
  4812. if (cmd->session_handle != ctx->session_hdl ||
  4813. cmd->dev_handle != ctx->dev_hdl) {
  4814. rc = -EPERM;
  4815. goto end;
  4816. }
  4817. if (list_empty(&ctx->pending_req_list)) {
  4818. /* should never happen */
  4819. CAM_ERR(CAM_ISP, "Start device with empty configuration");
  4820. rc = -EFAULT;
  4821. goto end;
  4822. } else {
  4823. req = list_first_entry(&ctx->pending_req_list,
  4824. struct cam_ctx_request, list);
  4825. }
  4826. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4827. if (!ctx_isp->hw_ctx) {
  4828. CAM_ERR(CAM_ISP, "Wrong hw context pointer.");
  4829. rc = -EFAULT;
  4830. goto end;
  4831. }
  4832. start_isp.hw_config.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4833. start_isp.hw_config.request_id = req->request_id;
  4834. start_isp.hw_config.hw_update_entries = req_isp->cfg;
  4835. start_isp.hw_config.num_hw_update_entries = req_isp->num_cfg;
  4836. start_isp.hw_config.priv = &req_isp->hw_update_data;
  4837. start_isp.hw_config.init_packet = 1;
  4838. start_isp.hw_config.reapply = 0;
  4839. start_isp.hw_config.cdm_reset_before_apply = false;
  4840. ctx_isp->last_applied_req_id = req->request_id;
  4841. if (ctx->state == CAM_CTX_FLUSHED)
  4842. start_isp.start_only = true;
  4843. else
  4844. start_isp.start_only = false;
  4845. atomic_set(&ctx_isp->process_bubble, 0);
  4846. atomic_set(&ctx_isp->rxd_epoch, 0);
  4847. ctx_isp->frame_id = 0;
  4848. ctx_isp->active_req_cnt = 0;
  4849. ctx_isp->reported_req_id = 0;
  4850. ctx_isp->bubble_frame_cnt = 0;
  4851. ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
  4852. CAM_ISP_CTX_ACTIVATED_APPLIED :
  4853. (req_isp->num_fence_map_out) ? CAM_ISP_CTX_ACTIVATED_EPOCH :
  4854. CAM_ISP_CTX_ACTIVATED_SOF;
  4855. atomic64_set(&ctx_isp->state_monitor_head, -1);
  4856. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  4857. atomic64_set(&ctx_isp->event_record_head[i], -1);
  4858. /*
  4859. * In case of CSID TPG we might receive SOF and RUP IRQs
  4860. * before hw_mgr_intf->hw_start has returned. So move
  4861. * req out of pending list before hw_start and add it
  4862. * back to pending list if hw_start fails.
  4863. */
  4864. list_del_init(&req->list);
  4865. if (ctx_isp->offline_context && !req_isp->num_fence_map_out) {
  4866. list_add_tail(&req->list, &ctx->free_req_list);
  4867. atomic_set(&ctx_isp->rxd_epoch, 1);
  4868. CAM_DBG(CAM_REQ,
  4869. "Move pending req: %lld to free list(cnt: %d) offline ctx %u",
  4870. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  4871. } else if (ctx_isp->rdi_only_context || !req_isp->num_fence_map_out) {
  4872. list_add_tail(&req->list, &ctx->wait_req_list);
  4873. CAM_DBG(CAM_REQ,
  4874. "Move pending req: %lld to wait list(cnt: %d) ctx %u",
  4875. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
  4876. } else {
  4877. list_add_tail(&req->list, &ctx->active_req_list);
  4878. ctx_isp->active_req_cnt++;
  4879. CAM_DBG(CAM_REQ,
  4880. "Move pending req: %lld to active list(cnt: %d) ctx %u offline %d",
  4881. req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id,
  4882. ctx_isp->offline_context);
  4883. }
  4884. /*
  4885. * Only place to change state before calling the hw due to
  4886. * hardware tasklet has higher priority that can cause the
  4887. * irq handling comes early
  4888. */
  4889. ctx->state = CAM_CTX_ACTIVATED;
  4890. trace_cam_context_state("ISP", ctx);
  4891. rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
  4892. &start_isp);
  4893. if (rc) {
  4894. /* HW failure. user need to clean up the resource */
  4895. CAM_ERR(CAM_ISP, "Start HW failed");
  4896. ctx->state = CAM_CTX_READY;
  4897. if ((rc == -ETIMEDOUT) &&
  4898. (isp_ctx_debug.enable_cdm_cmd_buff_dump))
  4899. rc = cam_isp_ctx_dump_req(req_isp, 0, 0, NULL, false);
  4900. trace_cam_context_state("ISP", ctx);
  4901. list_del_init(&req->list);
  4902. list_add(&req->list, &ctx->pending_req_list);
  4903. goto end;
  4904. }
  4905. CAM_DBG(CAM_ISP, "start device success ctx %u", ctx->ctx_id);
  4906. end:
  4907. return rc;
  4908. }
  4909. static int __cam_isp_ctx_unlink_in_ready(struct cam_context *ctx,
  4910. struct cam_req_mgr_core_dev_link_setup *unlink)
  4911. {
  4912. int rc = 0;
  4913. ctx->link_hdl = -1;
  4914. ctx->ctx_crm_intf = NULL;
  4915. ctx->state = CAM_CTX_ACQUIRED;
  4916. trace_cam_context_state("ISP", ctx);
  4917. return rc;
  4918. }
  4919. static int __cam_isp_ctx_stop_dev_in_activated_unlock(
  4920. struct cam_context *ctx, struct cam_start_stop_dev_cmd *stop_cmd)
  4921. {
  4922. int rc = 0;
  4923. uint32_t i;
  4924. struct cam_hw_stop_args stop;
  4925. struct cam_ctx_request *req;
  4926. struct cam_isp_ctx_req *req_isp;
  4927. struct cam_isp_context *ctx_isp =
  4928. (struct cam_isp_context *) ctx->ctx_priv;
  4929. struct cam_isp_stop_args stop_isp;
  4930. /* Mask off all the incoming hardware events */
  4931. spin_lock_bh(&ctx->lock);
  4932. ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
  4933. spin_unlock_bh(&ctx->lock);
  4934. /* stop hw first */
  4935. if (ctx_isp->hw_ctx) {
  4936. stop.ctxt_to_hw_map = ctx_isp->hw_ctx;
  4937. stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
  4938. stop_isp.stop_only = false;
  4939. stop.args = (void *) &stop_isp;
  4940. ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
  4941. &stop);
  4942. }
  4943. CAM_DBG(CAM_ISP, "next Substate[%s]",
  4944. __cam_isp_ctx_substate_val_to_type(
  4945. ctx_isp->substate_activated));
  4946. if (ctx->ctx_crm_intf &&
  4947. ctx->ctx_crm_intf->notify_stop) {
  4948. struct cam_req_mgr_notify_stop notify;
  4949. notify.link_hdl = ctx->link_hdl;
  4950. CAM_DBG(CAM_ISP,
  4951. "Notify CRM about device stop ctx %u link 0x%x",
  4952. ctx->ctx_id, ctx->link_hdl);
  4953. ctx->ctx_crm_intf->notify_stop(&notify);
  4954. } else
  4955. CAM_ERR(CAM_ISP, "cb not present");
  4956. while (!list_empty(&ctx->pending_req_list)) {
  4957. req = list_first_entry(&ctx->pending_req_list,
  4958. struct cam_ctx_request, list);
  4959. list_del_init(&req->list);
  4960. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4961. CAM_DBG(CAM_ISP, "signal fence in pending list. fence num %d",
  4962. req_isp->num_fence_map_out);
  4963. for (i = 0; i < req_isp->num_fence_map_out; i++)
  4964. if (req_isp->fence_map_out[i].sync_id != -1) {
  4965. cam_sync_signal(
  4966. req_isp->fence_map_out[i].sync_id,
  4967. CAM_SYNC_STATE_SIGNALED_CANCEL,
  4968. CAM_SYNC_ISP_EVENT_HW_STOP);
  4969. }
  4970. list_add_tail(&req->list, &ctx->free_req_list);
  4971. }
  4972. while (!list_empty(&ctx->wait_req_list)) {
  4973. req = list_first_entry(&ctx->wait_req_list,
  4974. struct cam_ctx_request, list);
  4975. list_del_init(&req->list);
  4976. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4977. CAM_DBG(CAM_ISP, "signal fence in wait list. fence num %d",
  4978. req_isp->num_fence_map_out);
  4979. for (i = 0; i < req_isp->num_fence_map_out; i++)
  4980. if (req_isp->fence_map_out[i].sync_id != -1) {
  4981. cam_sync_signal(
  4982. req_isp->fence_map_out[i].sync_id,
  4983. CAM_SYNC_STATE_SIGNALED_CANCEL,
  4984. CAM_SYNC_ISP_EVENT_HW_STOP);
  4985. }
  4986. list_add_tail(&req->list, &ctx->free_req_list);
  4987. }
  4988. while (!list_empty(&ctx->active_req_list)) {
  4989. req = list_first_entry(&ctx->active_req_list,
  4990. struct cam_ctx_request, list);
  4991. list_del_init(&req->list);
  4992. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  4993. CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
  4994. req_isp->num_fence_map_out);
  4995. for (i = 0; i < req_isp->num_fence_map_out; i++)
  4996. if (req_isp->fence_map_out[i].sync_id != -1) {
  4997. cam_sync_signal(
  4998. req_isp->fence_map_out[i].sync_id,
  4999. CAM_SYNC_STATE_SIGNALED_CANCEL,
  5000. CAM_SYNC_ISP_EVENT_HW_STOP);
  5001. }
  5002. list_add_tail(&req->list, &ctx->free_req_list);
  5003. }
  5004. ctx_isp->frame_id = 0;
  5005. ctx_isp->active_req_cnt = 0;
  5006. ctx_isp->reported_req_id = 0;
  5007. ctx_isp->last_applied_req_id = 0;
  5008. ctx_isp->req_info.last_bufdone_req_id = 0;
  5009. ctx_isp->bubble_frame_cnt = 0;
  5010. atomic_set(&ctx_isp->process_bubble, 0);
  5011. atomic_set(&ctx_isp->rxd_epoch, 0);
  5012. atomic64_set(&ctx_isp->state_monitor_head, -1);
  5013. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  5014. atomic64_set(&ctx_isp->event_record_head[i], -1);
  5015. CAM_DBG(CAM_ISP, "Stop device success next state %d on ctx %u",
  5016. ctx->state, ctx->ctx_id);
  5017. if (!stop_cmd) {
  5018. rc = __cam_isp_ctx_unlink_in_ready(ctx, NULL);
  5019. if (rc)
  5020. CAM_ERR(CAM_ISP, "Unlink failed rc=%d", rc);
  5021. }
  5022. return rc;
  5023. }
  5024. static int __cam_isp_ctx_stop_dev_in_activated(struct cam_context *ctx,
  5025. struct cam_start_stop_dev_cmd *cmd)
  5026. {
  5027. int rc = 0;
  5028. struct cam_isp_context *ctx_isp =
  5029. (struct cam_isp_context *)ctx->ctx_priv;
  5030. __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, cmd);
  5031. ctx_isp->init_received = false;
  5032. ctx->state = CAM_CTX_ACQUIRED;
  5033. trace_cam_context_state("ISP", ctx);
  5034. return rc;
  5035. }
  5036. static int __cam_isp_ctx_release_dev_in_activated(struct cam_context *ctx,
  5037. struct cam_release_dev_cmd *cmd)
  5038. {
  5039. int rc = 0;
  5040. rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
  5041. if (rc)
  5042. CAM_ERR(CAM_ISP, "Stop device failed rc=%d", rc);
  5043. rc = __cam_isp_ctx_release_dev_in_top_state(ctx, cmd);
  5044. if (rc)
  5045. CAM_ERR(CAM_ISP, "Release device failed rc=%d", rc);
  5046. return rc;
  5047. }
  5048. static int __cam_isp_ctx_release_hw_in_activated(struct cam_context *ctx,
  5049. void *cmd)
  5050. {
  5051. int rc = 0;
  5052. rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
  5053. if (rc)
  5054. CAM_ERR(CAM_ISP, "Stop device failed rc=%d", rc);
  5055. rc = __cam_isp_ctx_release_hw_in_top_state(ctx, cmd);
  5056. if (rc)
  5057. CAM_ERR(CAM_ISP, "Release hw failed rc=%d", rc);
  5058. return rc;
  5059. }
  5060. static int __cam_isp_ctx_link_pause(struct cam_context *ctx)
  5061. {
  5062. int rc = 0;
  5063. struct cam_hw_cmd_args hw_cmd_args;
  5064. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5065. struct cam_isp_context *ctx_isp =
  5066. (struct cam_isp_context *) ctx->ctx_priv;
  5067. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5068. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5069. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_PAUSE_HW;
  5070. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5071. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5072. &hw_cmd_args);
  5073. return rc;
  5074. }
  5075. static int __cam_isp_ctx_link_resume(struct cam_context *ctx)
  5076. {
  5077. int rc = 0;
  5078. struct cam_hw_cmd_args hw_cmd_args;
  5079. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5080. struct cam_isp_context *ctx_isp =
  5081. (struct cam_isp_context *) ctx->ctx_priv;
  5082. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5083. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5084. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
  5085. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5086. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5087. &hw_cmd_args);
  5088. return rc;
  5089. }
  5090. static int __cam_isp_ctx_handle_sof_freeze_evt(
  5091. struct cam_context *ctx)
  5092. {
  5093. int rc = 0;
  5094. struct cam_hw_cmd_args hw_cmd_args;
  5095. struct cam_isp_hw_cmd_args isp_hw_cmd_args;
  5096. struct cam_isp_context *ctx_isp =
  5097. (struct cam_isp_context *) ctx->ctx_priv;
  5098. hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
  5099. hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
  5100. isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_SOF_DEBUG;
  5101. isp_hw_cmd_args.u.sof_irq_enable = 1;
  5102. hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
  5103. rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
  5104. &hw_cmd_args);
  5105. return rc;
  5106. }
  5107. static int __cam_isp_ctx_process_evt(struct cam_context *ctx,
  5108. struct cam_req_mgr_link_evt_data *link_evt_data)
  5109. {
  5110. int rc = 0;
  5111. switch (link_evt_data->evt_type) {
  5112. case CAM_REQ_MGR_LINK_EVT_ERR:
  5113. /* No need to handle this message now */
  5114. break;
  5115. case CAM_REQ_MGR_LINK_EVT_PAUSE:
  5116. __cam_isp_ctx_link_pause(ctx);
  5117. break;
  5118. case CAM_REQ_MGR_LINK_EVT_RESUME:
  5119. __cam_isp_ctx_link_resume(ctx);
  5120. break;
  5121. case CAM_REQ_MGR_LINK_EVT_SOF_FREEZE:
  5122. __cam_isp_ctx_handle_sof_freeze_evt(ctx);
  5123. break;
  5124. default:
  5125. CAM_WARN(CAM_ISP, "Unknown event from CRM");
  5126. break;
  5127. }
  5128. return rc;
  5129. }
  5130. static int __cam_isp_ctx_unlink_in_activated(struct cam_context *ctx,
  5131. struct cam_req_mgr_core_dev_link_setup *unlink)
  5132. {
  5133. int rc = 0;
  5134. CAM_WARN(CAM_ISP,
  5135. "Received unlink in activated state. It's unexpected");
  5136. rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
  5137. if (rc)
  5138. CAM_WARN(CAM_ISP, "Stop device failed rc=%d", rc);
  5139. rc = __cam_isp_ctx_unlink_in_ready(ctx, unlink);
  5140. if (rc)
  5141. CAM_ERR(CAM_ISP, "Unlink failed rc=%d", rc);
  5142. return rc;
  5143. }
  5144. static int __cam_isp_ctx_apply_req(struct cam_context *ctx,
  5145. struct cam_req_mgr_apply_request *apply)
  5146. {
  5147. int rc = 0;
  5148. struct cam_ctx_ops *ctx_ops = NULL;
  5149. struct cam_isp_context *ctx_isp =
  5150. (struct cam_isp_context *) ctx->ctx_priv;
  5151. trace_cam_apply_req("ISP", apply->request_id);
  5152. CAM_DBG(CAM_ISP, "Enter: apply req in Substate[%s] request_id:%lld",
  5153. __cam_isp_ctx_substate_val_to_type(
  5154. ctx_isp->substate_activated), apply->request_id);
  5155. ctx_ops = &ctx_isp->substate_machine[ctx_isp->substate_activated];
  5156. if (ctx_ops->crm_ops.apply_req) {
  5157. rc = ctx_ops->crm_ops.apply_req(ctx, apply);
  5158. } else {
  5159. CAM_WARN_RATE_LIMIT(CAM_ISP,
  5160. "No handle function in activated Substate[%s]",
  5161. __cam_isp_ctx_substate_val_to_type(
  5162. ctx_isp->substate_activated));
  5163. rc = -EFAULT;
  5164. }
  5165. if (rc)
  5166. CAM_WARN_RATE_LIMIT(CAM_ISP,
  5167. "Apply failed in active Substate[%s] rc %d",
  5168. __cam_isp_ctx_substate_val_to_type(
  5169. ctx_isp->substate_activated), rc);
  5170. return rc;
  5171. }
  5172. static int __cam_isp_ctx_apply_default_settings(
  5173. struct cam_context *ctx,
  5174. struct cam_req_mgr_apply_request *apply)
  5175. {
  5176. int rc = 0;
  5177. struct cam_ctx_ops *ctx_ops = NULL;
  5178. struct cam_isp_context *ctx_isp =
  5179. (struct cam_isp_context *) ctx->ctx_priv;
  5180. if (!ctx_isp->use_default_apply)
  5181. return 0;
  5182. CAM_DBG(CAM_ISP,
  5183. "Enter: apply req in Substate %d request _id:%lld",
  5184. ctx_isp->substate_activated, apply->request_id);
  5185. ctx_ops = &ctx_isp->substate_machine[
  5186. ctx_isp->substate_activated];
  5187. if (ctx_ops->crm_ops.notify_frame_skip) {
  5188. rc = ctx_ops->crm_ops.notify_frame_skip(ctx, apply);
  5189. } else {
  5190. CAM_WARN_RATE_LIMIT(CAM_ISP,
  5191. "No handle function in activated substate %d",
  5192. ctx_isp->substate_activated);
  5193. rc = -EFAULT;
  5194. }
  5195. if (rc)
  5196. CAM_WARN_RATE_LIMIT(CAM_ISP,
  5197. "Apply default failed in active substate %d rc %d",
  5198. ctx_isp->substate_activated, rc);
  5199. return rc;
  5200. }
  5201. static int __cam_isp_ctx_handle_irq_in_activated(void *context,
  5202. uint32_t evt_id, void *evt_data)
  5203. {
  5204. int rc = 0;
  5205. struct cam_isp_ctx_irq_ops *irq_ops = NULL;
  5206. struct cam_context *ctx = (struct cam_context *)context;
  5207. struct cam_isp_context *ctx_isp =
  5208. (struct cam_isp_context *)ctx->ctx_priv;
  5209. spin_lock(&ctx->lock);
  5210. trace_cam_isp_activated_irq(ctx, ctx_isp->substate_activated, evt_id,
  5211. __cam_isp_ctx_get_event_ts(evt_id, evt_data));
  5212. CAM_DBG(CAM_ISP, "Enter: State %d, Substate[%s], evt id %d",
  5213. ctx->state, __cam_isp_ctx_substate_val_to_type(
  5214. ctx_isp->substate_activated), evt_id);
  5215. irq_ops = &ctx_isp->substate_machine_irq[ctx_isp->substate_activated];
  5216. if (irq_ops->irq_ops[evt_id]) {
  5217. rc = irq_ops->irq_ops[evt_id](ctx_isp, evt_data);
  5218. } else {
  5219. CAM_DBG(CAM_ISP, "No handle function for Substate[%s]",
  5220. __cam_isp_ctx_substate_val_to_type(
  5221. ctx_isp->substate_activated));
  5222. if (isp_ctx_debug.enable_state_monitor_dump)
  5223. __cam_isp_ctx_dump_state_monitor_array(ctx_isp);
  5224. }
  5225. CAM_DBG(CAM_ISP, "Exit: State %d Substate[%s]",
  5226. ctx->state, __cam_isp_ctx_substate_val_to_type(
  5227. ctx_isp->substate_activated));
  5228. spin_unlock(&ctx->lock);
  5229. return rc;
  5230. }
  5231. /* top state machine */
  5232. static struct cam_ctx_ops
  5233. cam_isp_ctx_top_state_machine[CAM_CTX_STATE_MAX] = {
  5234. /* Uninit */
  5235. {
  5236. .ioctl_ops = {},
  5237. .crm_ops = {},
  5238. .irq_ops = NULL,
  5239. },
  5240. /* Available */
  5241. {
  5242. .ioctl_ops = {
  5243. .acquire_dev = __cam_isp_ctx_acquire_dev_in_available,
  5244. },
  5245. .crm_ops = {},
  5246. .irq_ops = NULL,
  5247. },
  5248. /* Acquired */
  5249. {
  5250. .ioctl_ops = {
  5251. .acquire_hw = __cam_isp_ctx_acquire_hw_in_acquired,
  5252. .release_dev = __cam_isp_ctx_release_dev_in_top_state,
  5253. .config_dev = __cam_isp_ctx_config_dev_in_acquired,
  5254. .release_hw = __cam_isp_ctx_release_hw_in_top_state,
  5255. },
  5256. .crm_ops = {
  5257. .link = __cam_isp_ctx_link_in_acquired,
  5258. .unlink = __cam_isp_ctx_unlink_in_acquired,
  5259. .get_dev_info = __cam_isp_ctx_get_dev_info_in_acquired,
  5260. .flush_req = __cam_isp_ctx_flush_req_in_top_state,
  5261. .dump_req = __cam_isp_ctx_dump_in_top_state,
  5262. },
  5263. .irq_ops = NULL,
  5264. .pagefault_ops = cam_isp_context_dump_requests,
  5265. .dumpinfo_ops = cam_isp_context_info_dump,
  5266. },
  5267. /* Ready */
  5268. {
  5269. .ioctl_ops = {
  5270. .start_dev = __cam_isp_ctx_start_dev_in_ready,
  5271. .release_dev = __cam_isp_ctx_release_dev_in_top_state,
  5272. .config_dev = __cam_isp_ctx_config_dev_in_top_state,
  5273. .release_hw = __cam_isp_ctx_release_hw_in_top_state,
  5274. },
  5275. .crm_ops = {
  5276. .unlink = __cam_isp_ctx_unlink_in_ready,
  5277. .flush_req = __cam_isp_ctx_flush_req_in_ready,
  5278. .dump_req = __cam_isp_ctx_dump_in_top_state,
  5279. },
  5280. .irq_ops = NULL,
  5281. .pagefault_ops = cam_isp_context_dump_requests,
  5282. .dumpinfo_ops = cam_isp_context_info_dump,
  5283. },
  5284. /* Flushed */
  5285. {
  5286. .ioctl_ops = {
  5287. .stop_dev = __cam_isp_ctx_stop_dev_in_activated,
  5288. .release_dev = __cam_isp_ctx_release_dev_in_activated,
  5289. .config_dev = __cam_isp_ctx_config_dev_in_flushed,
  5290. .release_hw = __cam_isp_ctx_release_hw_in_activated,
  5291. },
  5292. .crm_ops = {
  5293. .unlink = __cam_isp_ctx_unlink_in_ready,
  5294. .process_evt = __cam_isp_ctx_process_evt,
  5295. },
  5296. .irq_ops = NULL,
  5297. .pagefault_ops = cam_isp_context_dump_requests,
  5298. .dumpinfo_ops = cam_isp_context_info_dump,
  5299. },
  5300. /* Activated */
  5301. {
  5302. .ioctl_ops = {
  5303. .stop_dev = __cam_isp_ctx_stop_dev_in_activated,
  5304. .release_dev = __cam_isp_ctx_release_dev_in_activated,
  5305. .config_dev = __cam_isp_ctx_config_dev_in_top_state,
  5306. .release_hw = __cam_isp_ctx_release_hw_in_activated,
  5307. },
  5308. .crm_ops = {
  5309. .unlink = __cam_isp_ctx_unlink_in_activated,
  5310. .apply_req = __cam_isp_ctx_apply_req,
  5311. .notify_frame_skip =
  5312. __cam_isp_ctx_apply_default_settings,
  5313. .flush_req = __cam_isp_ctx_flush_req_in_top_state,
  5314. .process_evt = __cam_isp_ctx_process_evt,
  5315. .dump_req = __cam_isp_ctx_dump_in_top_state,
  5316. },
  5317. .irq_ops = __cam_isp_ctx_handle_irq_in_activated,
  5318. .pagefault_ops = cam_isp_context_dump_requests,
  5319. .dumpinfo_ops = cam_isp_context_info_dump,
  5320. },
  5321. };
  5322. static int cam_isp_context_dump_requests(void *data,
  5323. struct cam_smmu_pf_info *pf_info)
  5324. {
  5325. struct cam_context *ctx = (struct cam_context *)data;
  5326. struct cam_ctx_request *req = NULL;
  5327. struct cam_ctx_request *req_temp = NULL;
  5328. struct cam_isp_ctx_req *req_isp = NULL;
  5329. struct cam_isp_prepare_hw_update_data *hw_update_data = NULL;
  5330. struct cam_hw_mgr_dump_pf_data *pf_dbg_entry = NULL;
  5331. struct cam_req_mgr_message req_msg;
  5332. struct cam_isp_context *ctx_isp;
  5333. uint32_t resource_type = 0;
  5334. bool mem_found = false, ctx_found = false, send_error = false;
  5335. int rc = 0;
  5336. struct cam_isp_context *isp_ctx =
  5337. (struct cam_isp_context *)ctx->ctx_priv;
  5338. if (!isp_ctx) {
  5339. CAM_ERR(CAM_ISP, "Invalid isp ctx");
  5340. return -EINVAL;
  5341. }
  5342. CAM_INFO(CAM_ISP, "iommu fault handler for isp ctx %d state %d",
  5343. ctx->ctx_id, ctx->state);
  5344. list_for_each_entry_safe(req, req_temp,
  5345. &ctx->active_req_list, list) {
  5346. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5347. hw_update_data = &req_isp->hw_update_data;
  5348. pf_dbg_entry = &(req->pf_data);
  5349. CAM_INFO(CAM_ISP, "Active List: req_id : %lld ",
  5350. req->request_id);
  5351. rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry,
  5352. &mem_found, &ctx_found, &resource_type, pf_info);
  5353. if (rc)
  5354. CAM_ERR(CAM_ISP, "Failed to dump pf info");
  5355. if (ctx_found)
  5356. send_error = true;
  5357. }
  5358. CAM_INFO(CAM_ISP, "Iterating over wait_list of isp ctx %d state %d",
  5359. ctx->ctx_id, ctx->state);
  5360. list_for_each_entry_safe(req, req_temp,
  5361. &ctx->wait_req_list, list) {
  5362. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5363. hw_update_data = &req_isp->hw_update_data;
  5364. pf_dbg_entry = &(req->pf_data);
  5365. CAM_INFO(CAM_ISP, "Wait List: req_id : %lld ", req->request_id);
  5366. rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry,
  5367. &mem_found, &ctx_found, &resource_type, pf_info);
  5368. if (rc)
  5369. CAM_ERR(CAM_ISP, "Failed to dump pf info");
  5370. if (ctx_found)
  5371. send_error = true;
  5372. }
  5373. /*
  5374. * In certain scenarios we observe both overflow and SMMU pagefault
  5375. * for a particular request. If overflow is handled before page fault
  5376. * we need to traverse through pending request list because if
  5377. * bubble recovery is enabled on any request we move that request
  5378. * and all the subsequent requests to the pending list while handling
  5379. * overflow error.
  5380. */
  5381. CAM_INFO(CAM_ISP,
  5382. "Iterating over pending req list of isp ctx %d state %d",
  5383. ctx->ctx_id, ctx->state);
  5384. list_for_each_entry_safe(req, req_temp,
  5385. &ctx->pending_req_list, list) {
  5386. req_isp = (struct cam_isp_ctx_req *) req->req_priv;
  5387. hw_update_data = &req_isp->hw_update_data;
  5388. pf_dbg_entry = &(req->pf_data);
  5389. CAM_INFO(CAM_ISP, "Pending List: req_id : %lld ",
  5390. req->request_id);
  5391. rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry,
  5392. &mem_found, &ctx_found, &resource_type, pf_info);
  5393. if (rc)
  5394. CAM_ERR(CAM_ISP, "Failed to dump pf info");
  5395. if (ctx_found)
  5396. send_error = true;
  5397. }
  5398. if (resource_type) {
  5399. ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
  5400. if (ctx_isp->isp_device_type == CAM_IFE_DEVICE_TYPE)
  5401. CAM_ERR(CAM_ISP,
  5402. "Page fault on resource id:%s (0x%x) ctx id:%d frame id:%d reported id:%lld applied id:%lld",
  5403. __cam_isp_resource_handle_id_to_type(
  5404. resource_type),
  5405. resource_type, ctx->ctx_id, ctx_isp->frame_id,
  5406. ctx_isp->reported_req_id,
  5407. ctx_isp->last_applied_req_id);
  5408. else
  5409. CAM_ERR(CAM_ISP,
  5410. "Page fault on resource id:%s (0x%x) ctx id:%d frame id:%d reported id:%lld applied id:%lld",
  5411. __cam_isp_tfe_resource_handle_id_to_type(
  5412. resource_type),
  5413. resource_type, ctx->ctx_id, ctx_isp->frame_id,
  5414. ctx_isp->reported_req_id,
  5415. ctx_isp->last_applied_req_id);
  5416. }
  5417. if (send_error) {
  5418. CAM_INFO(CAM_ISP,
  5419. "page fault notifying to umd ctx %u session_hdl:%d device_hdl:%d link_hdl:%d",
  5420. ctx->ctx_id, ctx->session_hdl,
  5421. ctx->dev_hdl, ctx->link_hdl);
  5422. req_msg.session_hdl = ctx->session_hdl;
  5423. req_msg.u.err_msg.device_hdl = ctx->dev_hdl;
  5424. req_msg.u.err_msg.error_type =
  5425. CAM_REQ_MGR_ERROR_TYPE_PAGE_FAULT;
  5426. req_msg.u.err_msg.link_hdl = ctx->link_hdl;
  5427. req_msg.u.err_msg.request_id = 0;
  5428. req_msg.u.err_msg.resource_size = 0x0;
  5429. if (cam_req_mgr_notify_message(&req_msg,
  5430. V4L_EVENT_CAM_REQ_MGR_ERROR,
  5431. V4L_EVENT_CAM_REQ_MGR_EVENT))
  5432. CAM_ERR(CAM_ISP,
  5433. "could not send page fault notification ctx %u session_hdl:%d device_hdl:%d link_hdl:%d",
  5434. ctx->ctx_id, ctx->session_hdl,
  5435. ctx->dev_hdl, ctx->link_hdl);
  5436. }
  5437. return rc;
  5438. }
  5439. static int cam_isp_context_debug_register(void)
  5440. {
  5441. int rc = 0;
  5442. struct dentry *dbgfileptr = NULL;
  5443. dbgfileptr = debugfs_create_dir("camera_isp_ctx", NULL);
  5444. if (!dbgfileptr) {
  5445. CAM_ERR(CAM_ISP, "DebugFS could not create directory!");
  5446. rc = -ENOENT;
  5447. goto end;
  5448. }
  5449. /* Store parent inode for cleanup in caller */
  5450. isp_ctx_debug.dentry = dbgfileptr;
  5451. debugfs_create_u32("enable_state_monitor_dump", 0644,
  5452. isp_ctx_debug.dentry, &isp_ctx_debug.enable_state_monitor_dump);
  5453. debugfs_create_u8("enable_cdm_cmd_buffer_dump", 0644,
  5454. isp_ctx_debug.dentry, &isp_ctx_debug.enable_cdm_cmd_buff_dump);
  5455. if (IS_ERR(dbgfileptr)) {
  5456. if (PTR_ERR(dbgfileptr) == -ENODEV)
  5457. CAM_WARN(CAM_ISP, "DebugFS not enabled in kernel!");
  5458. else
  5459. rc = PTR_ERR(dbgfileptr);
  5460. }
  5461. end:
  5462. return rc;
  5463. }
  5464. int cam_isp_context_init(struct cam_isp_context *ctx,
  5465. struct cam_context *ctx_base,
  5466. struct cam_req_mgr_kmd_ops *crm_node_intf,
  5467. struct cam_hw_mgr_intf *hw_intf,
  5468. uint32_t ctx_id,
  5469. uint32_t isp_device_type)
  5470. {
  5471. int rc = -1;
  5472. int i;
  5473. if (!ctx || !ctx_base) {
  5474. CAM_ERR(CAM_ISP, "Invalid Context");
  5475. goto err;
  5476. }
  5477. /* ISP context setup */
  5478. memset(ctx, 0, sizeof(*ctx));
  5479. ctx->base = ctx_base;
  5480. ctx->frame_id = 0;
  5481. ctx->custom_enabled = false;
  5482. ctx->use_frame_header_ts = false;
  5483. ctx->use_default_apply = false;
  5484. ctx->active_req_cnt = 0;
  5485. ctx->reported_req_id = 0;
  5486. ctx->bubble_frame_cnt = 0;
  5487. ctx->req_info.last_bufdone_req_id = 0;
  5488. ctx->hw_ctx = NULL;
  5489. ctx->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
  5490. ctx->substate_machine = cam_isp_ctx_activated_state_machine;
  5491. ctx->substate_machine_irq = cam_isp_ctx_activated_state_machine_irq;
  5492. ctx->init_timestamp = jiffies_to_msecs(jiffies);
  5493. ctx->isp_device_type = isp_device_type;
  5494. for (i = 0; i < CAM_ISP_CTX_REQ_MAX; i++) {
  5495. ctx->req_base[i].req_priv = &ctx->req_isp[i];
  5496. ctx->req_isp[i].base = &ctx->req_base[i];
  5497. }
  5498. /* camera context setup */
  5499. rc = cam_context_init(ctx_base, isp_dev_name, CAM_ISP, ctx_id,
  5500. crm_node_intf, hw_intf, ctx->req_base, CAM_ISP_CTX_REQ_MAX);
  5501. if (rc) {
  5502. CAM_ERR(CAM_ISP, "Camera Context Base init failed");
  5503. goto err;
  5504. }
  5505. /* link camera context with isp context */
  5506. ctx_base->state_machine = cam_isp_ctx_top_state_machine;
  5507. ctx_base->ctx_priv = ctx;
  5508. /* initializing current state for error logging */
  5509. for (i = 0; i < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES; i++) {
  5510. ctx->cam_isp_ctx_state_monitor[i].curr_state =
  5511. CAM_ISP_CTX_ACTIVATED_MAX;
  5512. }
  5513. atomic64_set(&ctx->state_monitor_head, -1);
  5514. for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++)
  5515. atomic64_set(&ctx->event_record_head[i], -1);
  5516. if (!isp_ctx_debug.dentry)
  5517. cam_isp_context_debug_register();
  5518. err:
  5519. return rc;
  5520. }
  5521. int cam_isp_context_deinit(struct cam_isp_context *ctx)
  5522. {
  5523. if (ctx->base)
  5524. cam_context_deinit(ctx->base);
  5525. if (ctx->substate_activated != CAM_ISP_CTX_ACTIVATED_SOF)
  5526. CAM_ERR(CAM_ISP, "ISP context Substate[%s] is invalid",
  5527. __cam_isp_ctx_substate_val_to_type(
  5528. ctx->substate_activated));
  5529. debugfs_remove_recursive(isp_ctx_debug.dentry);
  5530. isp_ctx_debug.dentry = NULL;
  5531. memset(ctx, 0, sizeof(*ctx));
  5532. return 0;
  5533. }