qseecom.c 264 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QTI Secure Execution Environment Communicator (QSEECOM) driver
  4. *
  5. * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  6. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  7. */
  8. #define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
  9. #include <linux/kernel.h>
  10. #include <linux/slab.h>
  11. #include <linux/module.h>
  12. #include <linux/fs.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/cdev.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/sched.h>
  18. #include <linux/list.h>
  19. #include <linux/mutex.h>
  20. #include <linux/io.h>
  21. #include <linux/dma-buf.h>
  22. #include <linux/ion.h>
  23. #include <linux/msm_ion.h>
  24. #include <linux/types.h>
  25. #include <linux/clk.h>
  26. #include <linux/elf.h>
  27. #include <linux/firmware.h>
  28. #include <linux/freezer.h>
  29. #include <linux/scatterlist.h>
  30. #include <linux/regulator/consumer.h>
  31. #include <linux/dma-mapping.h>
  32. #include <soc/qcom/qseecom_scm.h>
  33. #include <asm/cacheflush.h>
  34. #include <linux/delay.h>
  35. #include <linux/signal.h>
  36. #include <linux/compat.h>
  37. #include <linux/kthread.h>
  38. #include <linux/dma-map-ops.h>
  39. #include <linux/cma.h>
  40. #include <linux/of_platform.h>
  41. #include <linux/interconnect.h>
  42. #include <linux/of_reserved_mem.h>
  43. #include <linux/qtee_shmbridge.h>
  44. #include <linux/mem-buf.h>
  45. #include <linux/version.h>
  46. #include "linux/qseecom_api.h"
  47. #include "ice.h"
  48. #if IS_ENABLED(CONFIG_QSEECOM_PROXY)
  49. #include <linux/qseecom_kernel.h>
  50. #include "misc/qseecom_priv.h"
  51. #else
  52. #include "misc/qseecom_kernel.h"
  53. #endif
  54. #include "misc/qseecomi.h"
  55. #if (LINUX_VERSION_CODE <= KERNEL_VERSION(6,0,0))
  56. #define KERNEL_VERSION_LEGACY
  57. #endif
  58. #define QSEECOM_DEV "qseecom"
  59. #define QSEOS_VERSION_14 0x14
  60. #define QSEEE_VERSION_00 0x400000
  61. #define QSEE_VERSION_01 0x401000
  62. #define QSEE_VERSION_02 0x402000
  63. #define QSEE_VERSION_03 0x403000
  64. #define QSEE_VERSION_04 0x404000
  65. #define QSEE_VERSION_05 0x405000
  66. #define QSEE_VERSION_20 0x800000
  67. #define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
  68. #define QSEE_CE_CLK_100MHZ 100000000
  69. #define CE_CLK_DIV 1000000
  70. #define QSEECOM_MAX_SG_ENTRY 4096
  71. #define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
  72. (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
  73. #define QSEECOM_INVALID_KEY_ID 0xff
  74. /* Save partition image hash for authentication check */
  75. #define SCM_SAVE_PARTITION_HASH_ID 0x01
  76. /* Check if enterprise security is activate */
  77. #define SCM_IS_ACTIVATED_ID 0x02
  78. /* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
  79. #define SCM_MDTP_CIPHER_DIP 0x01
  80. /* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
  81. #define MAX_DIP 0x20000
  82. #define RPMB_SERVICE 0x2000
  83. #define SSD_SERVICE 0x3000
  84. #define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
  85. #define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
  86. #define TWO 2
  87. #define QSEECOM_UFS_ICE_CE_NUM 10
  88. #define QSEECOM_SDCC_ICE_CE_NUM 20
  89. #define QSEECOM_ICE_FDE_KEY_INDEX 0
  90. #define PHY_ADDR_4G (1ULL<<32)
  91. #define QSEECOM_STATE_NOT_READY 0
  92. #define QSEECOM_STATE_SUSPEND 1
  93. #define QSEECOM_STATE_READY 2
  94. #define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
  95. /*
  96. * default ce info unit to 0 for
  97. * services which
  98. * support only single instance.
  99. * Most of services are in this category.
  100. */
  101. #define DEFAULT_CE_INFO_UNIT 0
  102. #define DEFAULT_NUM_CE_INFO_UNIT 1
  103. #define FDE_FLAG_POS 4
  104. #define ENABLE_KEY_WRAP_IN_KS (1 << FDE_FLAG_POS)
  105. /*
  106. * sg list buf format version
  107. * 1: Legacy format to support only 512 SG list entries
  108. * 2: new format to support > 512 entries
  109. */
  110. #define QSEECOM_SG_LIST_BUF_FORMAT_VERSION_1 1
  111. #define QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2 2
  112. struct qseecom_sg_list_buf_hdr_64bit {
  113. struct qseecom_sg_entry_64bit blank_entry; /* must be all 0 */
  114. __u32 version; /* sg list buf format version */
  115. __u64 new_buf_phys_addr; /* PA of new buffer */
  116. __u32 nents_total; /* Total number of SG entries */
  117. } __packed;
  118. #define QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT \
  119. sizeof(struct qseecom_sg_list_buf_hdr_64bit)
  120. #define MAX_CE_PIPE_PAIR_PER_UNIT 3
  121. #define INVALID_CE_INFO_UNIT_NUM 0xffffffff
  122. #define CE_PIPE_PAIR_USE_TYPE_FDE 0
  123. #define CE_PIPE_PAIR_USE_TYPE_PFE 1
  124. #define SG_ENTRY_SZ sizeof(struct qseecom_sg_entry)
  125. #define SG_ENTRY_SZ_64BIT sizeof(struct qseecom_sg_entry_64bit)
  126. enum qseecom_bandwidth_request_mode {
  127. INACTIVE = 0,
  128. LOW,
  129. MEDIUM,
  130. HIGH,
  131. };
  132. enum qseecom_clk_definitions {
  133. CLK_DFAB = 0,
  134. CLK_SFPB,
  135. };
  136. enum qseecom_ice_key_size_type {
  137. QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
  138. (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
  139. QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
  140. (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
  141. QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
  142. (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
  143. };
  144. enum qseecom_client_handle_type {
  145. QSEECOM_CLIENT_APP = 1,
  146. QSEECOM_LISTENER_SERVICE,
  147. QSEECOM_SECURE_SERVICE,
  148. QSEECOM_GENERIC,
  149. QSEECOM_UNAVAILABLE_CLIENT_APP,
  150. };
  151. enum qseecom_ce_hw_instance {
  152. CLK_QSEE = 0,
  153. CLK_CE_DRV,
  154. CLK_INVALID,
  155. };
  156. enum qseecom_cache_ops {
  157. QSEECOM_CACHE_CLEAN,
  158. QSEECOM_CACHE_INVALIDATE,
  159. };
  160. enum qseecom_listener_unregister_kthread_state {
  161. LSNR_UNREG_KT_SLEEP = 0,
  162. LSNR_UNREG_KT_WAKEUP,
  163. };
  164. enum qseecom_unload_app_kthread_state {
  165. UNLOAD_APP_KT_SLEEP = 0,
  166. UNLOAD_APP_KT_WAKEUP,
  167. };
  168. static DEFINE_MUTEX(qsee_bw_mutex);
  169. static DEFINE_MUTEX(app_access_lock);
  170. static DEFINE_MUTEX(clk_access_lock);
  171. static DEFINE_MUTEX(listener_access_lock);
  172. static DEFINE_MUTEX(unload_app_pending_list_lock);
  173. struct sglist_info {
  174. uint32_t indexAndFlags;
  175. uint32_t sizeOrCount;
  176. };
  177. /*
  178. * The 31st bit indicates only one or multiple physical address inside
  179. * the request buffer. If it is set, the index locates a single physical addr
  180. * inside the request buffer, and `sizeOrCount` is the size of the memory being
  181. * shared at that physical address.
  182. * Otherwise, the index locates an array of {start, len} pairs (a
  183. * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
  184. * that array.
  185. *
  186. * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
  187. * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
  188. *
  189. * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
  190. */
  191. #define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
  192. ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
  193. #define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
  194. #define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
  195. #define MAKE_WHITELIST_VERSION(major, minor, patch) \
  196. (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
  197. #define MAKE_NULL(sgt, attach, dmabuf) do {\
  198. sgt = NULL;\
  199. attach = NULL;\
  200. dmabuf = NULL;\
  201. } while (0)
  202. struct qseecom_registered_listener_list {
  203. struct list_head list;
  204. struct qseecom_register_listener_req svc;
  205. void *user_virt_sb_base;
  206. struct dma_buf *dmabuf;
  207. struct dma_buf_attachment *attach;
  208. struct sg_table *sgt;
  209. u8 *sb_virt;
  210. phys_addr_t sb_phys;
  211. size_t sb_length;
  212. wait_queue_head_t rcv_req_wq;
  213. /* rcv_req_flag: 0: ready and empty; 1: received req */
  214. int rcv_req_flag;
  215. int send_resp_flag;
  216. bool listener_in_use;
  217. /* wq for thread blocked on this listener*/
  218. wait_queue_head_t listener_block_app_wq;
  219. struct sglist_info *sglistinfo_ptr;
  220. struct qtee_shm sglistinfo_shm;
  221. uint32_t sglist_cnt;
  222. int abort;
  223. bool unregister_pending;
  224. };
  225. struct qseecom_unregister_pending_list {
  226. struct list_head list;
  227. struct qseecom_dev_handle *data;
  228. };
  229. struct qseecom_registered_app_list {
  230. struct list_head list;
  231. u32 app_id;
  232. u32 ref_cnt;
  233. char app_name[MAX_APP_NAME_SIZE];
  234. u32 app_arch;
  235. bool app_blocked;
  236. u32 check_block;
  237. u32 blocked_on_listener_id;
  238. };
  239. struct qseecom_registered_kclient_list {
  240. struct list_head list;
  241. struct qseecom_handle *handle;
  242. };
  243. struct qseecom_ce_info_use {
  244. unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
  245. unsigned int unit_num;
  246. unsigned int num_ce_pipe_entries;
  247. struct qseecom_ce_pipe_entry *ce_pipe_entry;
  248. bool alloc;
  249. uint32_t type;
  250. };
  251. struct ce_hw_usage_info {
  252. uint32_t qsee_ce_hw_instance;
  253. uint32_t num_fde;
  254. struct qseecom_ce_info_use *fde;
  255. uint32_t num_pfe;
  256. struct qseecom_ce_info_use *pfe;
  257. };
  258. struct qseecom_clk {
  259. enum qseecom_ce_hw_instance instance;
  260. struct clk *ce_core_clk;
  261. struct clk *ce_clk;
  262. struct clk *ce_core_src_clk;
  263. struct clk *ce_bus_clk;
  264. uint32_t clk_access_cnt;
  265. };
  266. struct qseecom_control {
  267. struct list_head registered_listener_list_head;
  268. struct list_head registered_app_list_head;
  269. spinlock_t registered_app_list_lock;
  270. struct list_head registered_kclient_list_head;
  271. spinlock_t registered_kclient_list_lock;
  272. wait_queue_head_t send_resp_wq;
  273. int send_resp_flag;
  274. uint32_t qseos_version;
  275. uint32_t qsee_version;
  276. struct device *pdev; /* class_dev */
  277. struct device *dev; /* platform_dev->dev */
  278. struct class *driver_class;
  279. dev_t qseecom_device_no;
  280. bool whitelist_support;
  281. bool commonlib_loaded;
  282. bool commonlib64_loaded;
  283. struct ce_hw_usage_info ce_info;
  284. int qsee_bw_count;
  285. int qsee_sfpb_bw_count;
  286. uint32_t qsee_perf_client;
  287. struct icc_path *icc_path;
  288. uint32_t avg_bw;
  289. uint32_t peak_bw;
  290. struct qseecom_clk qsee;
  291. struct qseecom_clk ce_drv;
  292. bool support_bus_scaling;
  293. bool support_fde;
  294. bool support_pfe;
  295. bool fde_key_size;
  296. uint32_t cumulative_mode;
  297. enum qseecom_bandwidth_request_mode current_mode;
  298. struct timer_list bw_scale_down_timer;
  299. struct work_struct bw_inactive_req_ws;
  300. struct cdev cdev;
  301. bool timer_running;
  302. bool no_clock_support;
  303. unsigned int ce_opp_freq_hz;
  304. bool appsbl_qseecom_support;
  305. uint32_t qsee_reentrancy_support;
  306. bool enable_key_wrap_in_ks;
  307. uint32_t app_block_ref_cnt;
  308. wait_queue_head_t app_block_wq;
  309. atomic_t qseecom_state;
  310. int is_apps_region_protected;
  311. bool smcinvoke_support;
  312. uint64_t qseecom_bridge_handle;
  313. uint64_t ta_bridge_handle;
  314. uint64_t user_contig_bridge_handle;
  315. struct list_head unregister_lsnr_pending_list_head;
  316. wait_queue_head_t register_lsnr_pending_wq;
  317. struct task_struct *unregister_lsnr_kthread_task;
  318. wait_queue_head_t unregister_lsnr_kthread_wq;
  319. atomic_t unregister_lsnr_kthread_state;
  320. struct list_head unload_app_pending_list_head;
  321. struct task_struct *unload_app_kthread_task;
  322. wait_queue_head_t unload_app_kthread_wq;
  323. atomic_t unload_app_kthread_state;
  324. };
  325. struct qseecom_unload_app_pending_list {
  326. struct list_head list;
  327. struct qseecom_dev_handle *data;
  328. };
  329. struct qseecom_sec_buf_fd_info {
  330. bool is_sec_buf_fd;
  331. size_t size;
  332. void *vbase;
  333. phys_addr_t pbase;
  334. struct qtee_shm shm;
  335. };
  336. struct qseecom_param_memref {
  337. uint32_t buffer;
  338. uint32_t size;
  339. };
  340. struct qseecom_client_handle {
  341. u32 app_id;
  342. struct dma_buf *dmabuf;
  343. struct dma_buf_attachment *attach;
  344. struct sg_table *sgt;
  345. u8 *sb_virt;
  346. phys_addr_t sb_phys;
  347. size_t sb_length;
  348. unsigned long user_virt_sb_base;
  349. char app_name[MAX_APP_NAME_SIZE];
  350. u32 app_arch;
  351. struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
  352. bool from_smcinvoke;
  353. struct qtee_shm shm; /* kernel client's shm for req/rsp buf */
  354. bool unload_pending;
  355. bool from_loadapp;
  356. };
  357. struct qseecom_listener_handle {
  358. u32 id;
  359. bool unregister_pending;
  360. bool release_called;
  361. };
  362. static struct qseecom_control qseecom;
  363. struct qseecom_dev_handle {
  364. enum qseecom_client_handle_type type;
  365. union {
  366. struct qseecom_client_handle client;
  367. struct qseecom_listener_handle listener;
  368. };
  369. bool released;
  370. int abort;
  371. wait_queue_head_t abort_wq;
  372. atomic_t ioctl_count;
  373. bool perf_enabled;
  374. bool fast_load_enabled;
  375. enum qseecom_bandwidth_request_mode mode;
  376. struct sglist_info *sglistinfo_ptr;
  377. struct qtee_shm sglistinfo_shm;
  378. uint32_t sglist_cnt;
  379. bool use_legacy_cmd;
  380. };
  381. struct qseecom_key_id_usage_desc {
  382. uint8_t desc[QSEECOM_KEY_ID_SIZE];
  383. };
  384. struct qseecom_crypto_info {
  385. unsigned int unit_num;
  386. unsigned int ce;
  387. unsigned int pipe_pair;
  388. };
  389. static struct qseecom_key_id_usage_desc key_id_array[] = {
  390. {
  391. .desc = "Undefined Usage Index",
  392. },
  393. {
  394. .desc = "Full Disk Encryption",
  395. },
  396. {
  397. .desc = "Per File Encryption",
  398. },
  399. {
  400. .desc = "UFS ICE Full Disk Encryption",
  401. },
  402. {
  403. .desc = "SDCC ICE Full Disk Encryption",
  404. },
  405. };
  406. /* Function proto types */
  407. static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
  408. static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
  409. static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
  410. static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
  411. static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
  412. static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
  413. char *cmnlib_name);
  414. static int qseecom_enable_ice_setup(int usage);
  415. static int qseecom_disable_ice_setup(int usage);
  416. static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
  417. static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
  418. void __user *argp);
  419. static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
  420. void __user *argp);
  421. static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
  422. void __user *argp);
  423. static int __qseecom_unload_app(struct qseecom_dev_handle *data,
  424. uint32_t app_id);
  425. static int __maybe_unused get_qseecom_keymaster_status(char *str)
  426. {
  427. get_option(&str, &qseecom.is_apps_region_protected);
  428. return 1;
  429. }
  430. __setup("androidboot.keymaster=", get_qseecom_keymaster_status);
  431. static int __qseecom_alloc_coherent_buf(
  432. uint32_t size, u8 **vaddr, phys_addr_t *paddr);
  433. static void __qseecom_free_coherent_buf(uint32_t size,
  434. u8 *vaddr, phys_addr_t paddr);
  435. #define QSEECOM_SCM_EBUSY_WAIT_MS 30
  436. #define QSEECOM_SCM_EBUSY_MAX_RETRY 67
  437. #define QSEE_RESULT_FAIL_APP_BUSY 315
  438. static int __qseecom_scm_call2_locked(uint32_t smc_id, struct qseecom_scm_desc *desc)
  439. {
  440. int ret = 0;
  441. int retry_count = 0;
  442. do {
  443. ret = qcom_scm_qseecom_call(smc_id, desc, false);
  444. if ((ret == -EBUSY) || (desc && (desc->ret[0] == -QSEE_RESULT_FAIL_APP_BUSY))) {
  445. mutex_unlock(&app_access_lock);
  446. msleep(QSEECOM_SCM_EBUSY_WAIT_MS);
  447. mutex_lock(&app_access_lock);
  448. }
  449. if (retry_count == 33)
  450. pr_warn("secure world has been busy for 1 second!\n");
  451. } while (((ret == -EBUSY) || (desc && (desc->ret[0] == -QSEE_RESULT_FAIL_APP_BUSY))) &&
  452. (retry_count++ < QSEECOM_SCM_EBUSY_MAX_RETRY));
  453. return ret;
  454. }
  455. static char *__qseecom_alloc_tzbuf(uint32_t size,
  456. phys_addr_t *pa, struct qtee_shm *shm)
  457. {
  458. char *tzbuf = NULL;
  459. int ret = qtee_shmbridge_allocate_shm(size, shm);
  460. if (ret)
  461. return NULL;
  462. tzbuf = shm->vaddr;
  463. memset(tzbuf, 0, size);
  464. *pa = shm->paddr;
  465. return tzbuf;
  466. }
  467. static void __qseecom_free_tzbuf(struct qtee_shm *shm)
  468. {
  469. qtee_shmbridge_free_shm(shm);
  470. }
  471. static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
  472. const void *req_buf, void *resp_buf)
  473. {
  474. int ret = 0;
  475. uint32_t smc_id = 0;
  476. uint32_t qseos_cmd_id = 0;
  477. struct qseecom_scm_desc desc = {0};
  478. struct qseecom_command_scm_resp *scm_resp = NULL;
  479. struct qtee_shm shm = {0};
  480. phys_addr_t pa;
  481. if (!req_buf || !resp_buf) {
  482. pr_err("Invalid buffer pointer\n");
  483. return -EINVAL;
  484. }
  485. qseos_cmd_id = *(uint32_t *)req_buf;
  486. scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
  487. switch (svc_id) {
  488. case SCM_SVC_INFO: {
  489. if (tz_cmd_id == 3) {
  490. smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
  491. desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
  492. desc.args[0] = *(uint32_t *)req_buf;
  493. } else {
  494. pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
  495. svc_id, tz_cmd_id);
  496. return -EINVAL;
  497. }
  498. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  499. break;
  500. }
  501. case SCM_SVC_ES: {
  502. switch (tz_cmd_id) {
  503. case SCM_SAVE_PARTITION_HASH_ID: {
  504. u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
  505. struct qseecom_save_partition_hash_req *p_hash_req =
  506. (struct qseecom_save_partition_hash_req *)
  507. req_buf;
  508. char *tzbuf = __qseecom_alloc_tzbuf(
  509. tzbuflen, &pa, &shm);
  510. if (!tzbuf)
  511. return -ENOMEM;
  512. memset(tzbuf, 0, tzbuflen);
  513. memcpy(tzbuf, p_hash_req->digest,
  514. SHA256_DIGEST_LENGTH);
  515. qtee_shmbridge_flush_shm_buf(&shm);
  516. smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
  517. desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
  518. desc.args[0] = p_hash_req->partition_id;
  519. desc.args[1] = pa;
  520. desc.args[2] = SHA256_DIGEST_LENGTH;
  521. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  522. __qseecom_free_tzbuf(&shm);
  523. break;
  524. }
  525. default: {
  526. pr_err("tz_cmd_id %d is not supported\n", tz_cmd_id);
  527. ret = -EINVAL;
  528. break;
  529. }
  530. } /* end of switch (tz_cmd_id) */
  531. break;
  532. } /* end of case SCM_SVC_ES */
  533. case SCM_SVC_TZSCHEDULER: {
  534. switch (qseos_cmd_id) {
  535. case QSEOS_APP_START_COMMAND: {
  536. struct qseecom_load_app_ireq *req;
  537. struct qseecom_load_app_64bit_ireq *req_64bit;
  538. smc_id = TZ_OS_APP_START_ID;
  539. desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
  540. if (qseecom.qsee_version < QSEE_VERSION_40) {
  541. req = (struct qseecom_load_app_ireq *)req_buf;
  542. desc.args[0] = req->mdt_len;
  543. desc.args[1] = req->img_len;
  544. desc.args[2] = req->phy_addr;
  545. } else {
  546. req_64bit =
  547. (struct qseecom_load_app_64bit_ireq *)
  548. req_buf;
  549. desc.args[0] = req_64bit->mdt_len;
  550. desc.args[1] = req_64bit->img_len;
  551. desc.args[2] = req_64bit->phy_addr;
  552. }
  553. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  554. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  555. break;
  556. }
  557. case QSEOS_APP_SHUTDOWN_COMMAND: {
  558. struct qseecom_unload_app_ireq *req;
  559. req = (struct qseecom_unload_app_ireq *)req_buf;
  560. smc_id = TZ_OS_APP_SHUTDOWN_ID;
  561. desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
  562. desc.args[0] = req->app_id;
  563. ret = qcom_scm_qseecom_call(smc_id, &desc, true);
  564. break;
  565. }
  566. case QSEOS_APP_LOOKUP_COMMAND: {
  567. struct qseecom_check_app_ireq *req;
  568. u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
  569. char *tzbuf = __qseecom_alloc_tzbuf(
  570. tzbuflen, &pa, &shm);
  571. if (!tzbuf)
  572. return -ENOMEM;
  573. req = (struct qseecom_check_app_ireq *)req_buf;
  574. pr_debug("Lookup app_name = %s\n", req->app_name);
  575. strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
  576. qtee_shmbridge_flush_shm_buf(&shm);
  577. smc_id = TZ_OS_APP_LOOKUP_ID;
  578. desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
  579. desc.args[0] = pa;
  580. desc.args[1] = strlen(req->app_name);
  581. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  582. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  583. __qseecom_free_tzbuf(&shm);
  584. break;
  585. }
  586. case QSEOS_APP_REGION_NOTIFICATION: {
  587. struct qsee_apps_region_info_ireq *req;
  588. struct qsee_apps_region_info_64bit_ireq *req_64bit;
  589. smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
  590. desc.arginfo =
  591. TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
  592. if (qseecom.qsee_version < QSEE_VERSION_40) {
  593. req = (struct qsee_apps_region_info_ireq *)
  594. req_buf;
  595. desc.args[0] = req->addr;
  596. desc.args[1] = req->size;
  597. } else {
  598. req_64bit =
  599. (struct qsee_apps_region_info_64bit_ireq *)
  600. req_buf;
  601. desc.args[0] = req_64bit->addr;
  602. desc.args[1] = req_64bit->size;
  603. }
  604. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  605. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  606. break;
  607. }
  608. case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
  609. struct qseecom_load_lib_image_ireq *req;
  610. struct qseecom_load_lib_image_64bit_ireq *req_64bit;
  611. smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
  612. desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
  613. if (qseecom.qsee_version < QSEE_VERSION_40) {
  614. req = (struct qseecom_load_lib_image_ireq *)
  615. req_buf;
  616. desc.args[0] = req->mdt_len;
  617. desc.args[1] = req->img_len;
  618. desc.args[2] = req->phy_addr;
  619. } else {
  620. req_64bit =
  621. (struct qseecom_load_lib_image_64bit_ireq *)
  622. req_buf;
  623. desc.args[0] = req_64bit->mdt_len;
  624. desc.args[1] = req_64bit->img_len;
  625. desc.args[2] = req_64bit->phy_addr;
  626. }
  627. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  628. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  629. break;
  630. }
  631. case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
  632. smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
  633. desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
  634. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  635. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  636. break;
  637. }
  638. case QSEOS_REGISTER_LISTENER: {
  639. struct qseecom_register_listener_ireq *req;
  640. struct qseecom_register_listener_64bit_ireq *req_64bit;
  641. desc.arginfo =
  642. TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
  643. if (qseecom.qsee_version < QSEE_VERSION_40) {
  644. req = (struct qseecom_register_listener_ireq *)
  645. req_buf;
  646. desc.args[0] = req->listener_id;
  647. desc.args[1] = req->sb_ptr;
  648. desc.args[2] = req->sb_len;
  649. } else {
  650. req_64bit =
  651. (struct qseecom_register_listener_64bit_ireq *)
  652. req_buf;
  653. desc.args[0] = req_64bit->listener_id;
  654. desc.args[1] = req_64bit->sb_ptr;
  655. desc.args[2] = req_64bit->sb_len;
  656. }
  657. qseecom.smcinvoke_support = true;
  658. smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
  659. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  660. if (ret == -EIO) {
  661. /* smcinvoke is not supported */
  662. qseecom.smcinvoke_support = false;
  663. smc_id = TZ_OS_REGISTER_LISTENER_ID;
  664. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  665. }
  666. break;
  667. }
  668. case QSEOS_DEREGISTER_LISTENER: {
  669. struct qseecom_unregister_listener_ireq *req;
  670. req = (struct qseecom_unregister_listener_ireq *)
  671. req_buf;
  672. smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
  673. desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
  674. desc.args[0] = req->listener_id;
  675. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  676. break;
  677. }
  678. case QSEOS_LISTENER_DATA_RSP_COMMAND: {
  679. struct qseecom_client_listener_data_irsp *req;
  680. req = (struct qseecom_client_listener_data_irsp *)
  681. req_buf;
  682. smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
  683. desc.arginfo =
  684. TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
  685. desc.args[0] = req->listener_id;
  686. desc.args[1] = req->status;
  687. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  688. break;
  689. }
  690. case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
  691. struct qseecom_client_listener_data_irsp *req;
  692. struct qseecom_client_listener_data_64bit_irsp *req_64;
  693. smc_id =
  694. TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
  695. desc.arginfo =
  696. TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
  697. if (qseecom.qsee_version < QSEE_VERSION_40) {
  698. req =
  699. (struct qseecom_client_listener_data_irsp *)
  700. req_buf;
  701. desc.args[0] = req->listener_id;
  702. desc.args[1] = req->status;
  703. desc.args[2] = req->sglistinfo_ptr;
  704. desc.args[3] = req->sglistinfo_len;
  705. } else {
  706. req_64 =
  707. (struct qseecom_client_listener_data_64bit_irsp *)
  708. req_buf;
  709. desc.args[0] = req_64->listener_id;
  710. desc.args[1] = req_64->status;
  711. desc.args[2] = req_64->sglistinfo_ptr;
  712. desc.args[3] = req_64->sglistinfo_len;
  713. }
  714. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  715. break;
  716. }
  717. case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
  718. struct qseecom_load_app_ireq *req;
  719. struct qseecom_load_app_64bit_ireq *req_64bit;
  720. smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
  721. desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
  722. if (qseecom.qsee_version < QSEE_VERSION_40) {
  723. req = (struct qseecom_load_app_ireq *)req_buf;
  724. desc.args[0] = req->mdt_len;
  725. desc.args[1] = req->img_len;
  726. desc.args[2] = req->phy_addr;
  727. } else {
  728. req_64bit =
  729. (struct qseecom_load_app_64bit_ireq *)req_buf;
  730. desc.args[0] = req_64bit->mdt_len;
  731. desc.args[1] = req_64bit->img_len;
  732. desc.args[2] = req_64bit->phy_addr;
  733. }
  734. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  735. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  736. break;
  737. }
  738. case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
  739. smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
  740. desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
  741. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  742. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  743. break;
  744. }
  745. case QSEOS_CLIENT_SEND_DATA_COMMAND: {
  746. struct qseecom_client_send_data_ireq *req;
  747. struct qseecom_client_send_data_64bit_ireq *req_64bit;
  748. smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
  749. desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
  750. if (qseecom.qsee_version < QSEE_VERSION_40) {
  751. req = (struct qseecom_client_send_data_ireq *)
  752. req_buf;
  753. desc.args[0] = req->app_id;
  754. desc.args[1] = req->req_ptr;
  755. desc.args[2] = req->req_len;
  756. desc.args[3] = req->rsp_ptr;
  757. desc.args[4] = req->rsp_len;
  758. } else {
  759. req_64bit =
  760. (struct qseecom_client_send_data_64bit_ireq *)
  761. req_buf;
  762. desc.args[0] = req_64bit->app_id;
  763. desc.args[1] = req_64bit->req_ptr;
  764. desc.args[2] = req_64bit->req_len;
  765. desc.args[3] = req_64bit->rsp_ptr;
  766. desc.args[4] = req_64bit->rsp_len;
  767. }
  768. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  769. break;
  770. }
  771. case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
  772. struct qseecom_client_send_data_ireq *req;
  773. struct qseecom_client_send_data_64bit_ireq *req_64bit;
  774. smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
  775. desc.arginfo =
  776. TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
  777. if (qseecom.qsee_version < QSEE_VERSION_40) {
  778. req = (struct qseecom_client_send_data_ireq *)
  779. req_buf;
  780. desc.args[0] = req->app_id;
  781. desc.args[1] = req->req_ptr;
  782. desc.args[2] = req->req_len;
  783. desc.args[3] = req->rsp_ptr;
  784. desc.args[4] = req->rsp_len;
  785. desc.args[5] = req->sglistinfo_ptr;
  786. desc.args[6] = req->sglistinfo_len;
  787. } else {
  788. req_64bit =
  789. (struct qseecom_client_send_data_64bit_ireq *)
  790. req_buf;
  791. desc.args[0] = req_64bit->app_id;
  792. desc.args[1] = req_64bit->req_ptr;
  793. desc.args[2] = req_64bit->req_len;
  794. desc.args[3] = req_64bit->rsp_ptr;
  795. desc.args[4] = req_64bit->rsp_len;
  796. desc.args[5] = req_64bit->sglistinfo_ptr;
  797. desc.args[6] = req_64bit->sglistinfo_len;
  798. }
  799. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  800. break;
  801. }
  802. case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
  803. struct qseecom_client_send_service_ireq *req;
  804. req = (struct qseecom_client_send_service_ireq *)
  805. req_buf;
  806. smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
  807. desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
  808. desc.args[0] = req->key_type;
  809. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  810. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  811. break;
  812. }
  813. case QSEOS_RPMB_ERASE_COMMAND: {
  814. smc_id = TZ_OS_RPMB_ERASE_ID;
  815. desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
  816. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  817. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  818. break;
  819. }
  820. case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
  821. smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
  822. desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
  823. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  824. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  825. break;
  826. }
  827. case QSEOS_DIAG_FUSE_REQ_CMD:
  828. case QSEOS_DIAG_FUSE_REQ_RSP_CMD: {
  829. struct qseecom_client_send_fsm_diag_req *req;
  830. smc_id = TZ_SECBOOT_GET_FUSE_INFO;
  831. desc.arginfo = TZ_SECBOOT_GET_FUSE_INFO_PARAM_ID;
  832. req = (struct qseecom_client_send_fsm_diag_req *) req_buf;
  833. desc.args[0] = req->req_ptr;
  834. desc.args[1] = req->req_len;
  835. desc.args[2] = req->rsp_ptr;
  836. desc.args[3] = req->rsp_len;
  837. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  838. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  839. break;
  840. }
  841. case QSEOS_GENERATE_KEY: {
  842. u32 tzbuflen = PAGE_ALIGN(sizeof
  843. (struct qseecom_key_generate_ireq) -
  844. sizeof(uint32_t));
  845. char *tzbuf = __qseecom_alloc_tzbuf(
  846. tzbuflen, &pa, &shm);
  847. if (!tzbuf)
  848. return -ENOMEM;
  849. memset(tzbuf, 0, tzbuflen);
  850. memcpy(tzbuf, req_buf + sizeof(uint32_t),
  851. (sizeof(struct qseecom_key_generate_ireq) -
  852. sizeof(uint32_t)));
  853. qtee_shmbridge_flush_shm_buf(&shm);
  854. smc_id = TZ_OS_KS_GEN_KEY_ID;
  855. desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
  856. desc.args[0] = pa;
  857. desc.args[1] = tzbuflen;
  858. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  859. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  860. __qseecom_free_tzbuf(&shm);
  861. break;
  862. }
  863. case QSEOS_DELETE_KEY: {
  864. u32 tzbuflen = PAGE_ALIGN(sizeof
  865. (struct qseecom_key_delete_ireq) -
  866. sizeof(uint32_t));
  867. char *tzbuf = __qseecom_alloc_tzbuf(
  868. tzbuflen, &pa, &shm);
  869. if (!tzbuf)
  870. return -ENOMEM;
  871. memset(tzbuf, 0, tzbuflen);
  872. memcpy(tzbuf, req_buf + sizeof(uint32_t),
  873. (sizeof(struct qseecom_key_delete_ireq) -
  874. sizeof(uint32_t)));
  875. qtee_shmbridge_flush_shm_buf(&shm);
  876. smc_id = TZ_OS_KS_DEL_KEY_ID;
  877. desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
  878. desc.args[0] = pa;
  879. desc.args[1] = tzbuflen;
  880. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  881. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  882. __qseecom_free_tzbuf(&shm);
  883. break;
  884. }
  885. case QSEOS_SET_KEY: {
  886. u32 tzbuflen = PAGE_ALIGN(sizeof
  887. (struct qseecom_key_select_ireq) -
  888. sizeof(uint32_t));
  889. char *tzbuf = __qseecom_alloc_tzbuf(
  890. tzbuflen, &pa, &shm);
  891. if (!tzbuf)
  892. return -ENOMEM;
  893. memset(tzbuf, 0, tzbuflen);
  894. memcpy(tzbuf, req_buf + sizeof(uint32_t),
  895. (sizeof(struct qseecom_key_select_ireq) -
  896. sizeof(uint32_t)));
  897. qtee_shmbridge_flush_shm_buf(&shm);
  898. smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
  899. desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
  900. desc.args[0] = pa;
  901. desc.args[1] = tzbuflen;
  902. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  903. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  904. __qseecom_free_tzbuf(&shm);
  905. break;
  906. }
  907. case QSEOS_UPDATE_KEY_USERINFO: {
  908. u32 tzbuflen = PAGE_ALIGN(sizeof
  909. (struct qseecom_key_userinfo_update_ireq) -
  910. sizeof(uint32_t));
  911. char *tzbuf = __qseecom_alloc_tzbuf(
  912. tzbuflen, &pa, &shm);
  913. if (!tzbuf)
  914. return -ENOMEM;
  915. memset(tzbuf, 0, tzbuflen);
  916. memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
  917. (struct qseecom_key_userinfo_update_ireq) -
  918. sizeof(uint32_t)));
  919. qtee_shmbridge_flush_shm_buf(&shm);
  920. smc_id = TZ_OS_KS_UPDATE_KEY_ID;
  921. desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
  922. desc.args[0] = pa;
  923. desc.args[1] = tzbuflen;
  924. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  925. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  926. __qseecom_free_tzbuf(&shm);
  927. break;
  928. }
  929. case QSEOS_TEE_OPEN_SESSION: {
  930. struct qseecom_qteec_ireq *req;
  931. struct qseecom_qteec_64bit_ireq *req_64bit;
  932. smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
  933. desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
  934. if (qseecom.qsee_version < QSEE_VERSION_40) {
  935. req = (struct qseecom_qteec_ireq *)req_buf;
  936. desc.args[0] = req->app_id;
  937. desc.args[1] = req->req_ptr;
  938. desc.args[2] = req->req_len;
  939. desc.args[3] = req->resp_ptr;
  940. desc.args[4] = req->resp_len;
  941. } else {
  942. req_64bit = (struct qseecom_qteec_64bit_ireq *)
  943. req_buf;
  944. desc.args[0] = req_64bit->app_id;
  945. desc.args[1] = req_64bit->req_ptr;
  946. desc.args[2] = req_64bit->req_len;
  947. desc.args[3] = req_64bit->resp_ptr;
  948. desc.args[4] = req_64bit->resp_len;
  949. }
  950. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  951. break;
  952. }
  953. case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
  954. struct qseecom_qteec_ireq *req;
  955. struct qseecom_qteec_64bit_ireq *req_64bit;
  956. smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
  957. desc.arginfo =
  958. TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
  959. if (qseecom.qsee_version < QSEE_VERSION_40) {
  960. req = (struct qseecom_qteec_ireq *)req_buf;
  961. desc.args[0] = req->app_id;
  962. desc.args[1] = req->req_ptr;
  963. desc.args[2] = req->req_len;
  964. desc.args[3] = req->resp_ptr;
  965. desc.args[4] = req->resp_len;
  966. desc.args[5] = req->sglistinfo_ptr;
  967. desc.args[6] = req->sglistinfo_len;
  968. } else {
  969. req_64bit = (struct qseecom_qteec_64bit_ireq *)
  970. req_buf;
  971. desc.args[0] = req_64bit->app_id;
  972. desc.args[1] = req_64bit->req_ptr;
  973. desc.args[2] = req_64bit->req_len;
  974. desc.args[3] = req_64bit->resp_ptr;
  975. desc.args[4] = req_64bit->resp_len;
  976. desc.args[5] = req_64bit->sglistinfo_ptr;
  977. desc.args[6] = req_64bit->sglistinfo_len;
  978. }
  979. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  980. break;
  981. }
  982. case QSEOS_TEE_INVOKE_COMMAND: {
  983. struct qseecom_qteec_ireq *req;
  984. struct qseecom_qteec_64bit_ireq *req_64bit;
  985. smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
  986. desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
  987. if (qseecom.qsee_version < QSEE_VERSION_40) {
  988. req = (struct qseecom_qteec_ireq *)req_buf;
  989. desc.args[0] = req->app_id;
  990. desc.args[1] = req->req_ptr;
  991. desc.args[2] = req->req_len;
  992. desc.args[3] = req->resp_ptr;
  993. desc.args[4] = req->resp_len;
  994. } else {
  995. req_64bit = (struct qseecom_qteec_64bit_ireq *)
  996. req_buf;
  997. desc.args[0] = req_64bit->app_id;
  998. desc.args[1] = req_64bit->req_ptr;
  999. desc.args[2] = req_64bit->req_len;
  1000. desc.args[3] = req_64bit->resp_ptr;
  1001. desc.args[4] = req_64bit->resp_len;
  1002. }
  1003. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  1004. break;
  1005. }
  1006. case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
  1007. struct qseecom_qteec_ireq *req;
  1008. struct qseecom_qteec_64bit_ireq *req_64bit;
  1009. smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
  1010. desc.arginfo =
  1011. TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
  1012. if (qseecom.qsee_version < QSEE_VERSION_40) {
  1013. req = (struct qseecom_qteec_ireq *)req_buf;
  1014. desc.args[0] = req->app_id;
  1015. desc.args[1] = req->req_ptr;
  1016. desc.args[2] = req->req_len;
  1017. desc.args[3] = req->resp_ptr;
  1018. desc.args[4] = req->resp_len;
  1019. desc.args[5] = req->sglistinfo_ptr;
  1020. desc.args[6] = req->sglistinfo_len;
  1021. } else {
  1022. req_64bit = (struct qseecom_qteec_64bit_ireq *)
  1023. req_buf;
  1024. desc.args[0] = req_64bit->app_id;
  1025. desc.args[1] = req_64bit->req_ptr;
  1026. desc.args[2] = req_64bit->req_len;
  1027. desc.args[3] = req_64bit->resp_ptr;
  1028. desc.args[4] = req_64bit->resp_len;
  1029. desc.args[5] = req_64bit->sglistinfo_ptr;
  1030. desc.args[6] = req_64bit->sglistinfo_len;
  1031. }
  1032. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  1033. break;
  1034. }
  1035. case QSEOS_TEE_CLOSE_SESSION: {
  1036. struct qseecom_qteec_ireq *req;
  1037. struct qseecom_qteec_64bit_ireq *req_64bit;
  1038. smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
  1039. desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
  1040. if (qseecom.qsee_version < QSEE_VERSION_40) {
  1041. req = (struct qseecom_qteec_ireq *)req_buf;
  1042. desc.args[0] = req->app_id;
  1043. desc.args[1] = req->req_ptr;
  1044. desc.args[2] = req->req_len;
  1045. desc.args[3] = req->resp_ptr;
  1046. desc.args[4] = req->resp_len;
  1047. } else {
  1048. req_64bit = (struct qseecom_qteec_64bit_ireq *)
  1049. req_buf;
  1050. desc.args[0] = req_64bit->app_id;
  1051. desc.args[1] = req_64bit->req_ptr;
  1052. desc.args[2] = req_64bit->req_len;
  1053. desc.args[3] = req_64bit->resp_ptr;
  1054. desc.args[4] = req_64bit->resp_len;
  1055. }
  1056. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  1057. break;
  1058. }
  1059. case QSEOS_TEE_REQUEST_CANCELLATION: {
  1060. struct qseecom_qteec_ireq *req;
  1061. struct qseecom_qteec_64bit_ireq *req_64bit;
  1062. smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
  1063. desc.arginfo =
  1064. TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
  1065. if (qseecom.qsee_version < QSEE_VERSION_40) {
  1066. req = (struct qseecom_qteec_ireq *)req_buf;
  1067. desc.args[0] = req->app_id;
  1068. desc.args[1] = req->req_ptr;
  1069. desc.args[2] = req->req_len;
  1070. desc.args[3] = req->resp_ptr;
  1071. desc.args[4] = req->resp_len;
  1072. } else {
  1073. req_64bit = (struct qseecom_qteec_64bit_ireq *)
  1074. req_buf;
  1075. desc.args[0] = req_64bit->app_id;
  1076. desc.args[1] = req_64bit->req_ptr;
  1077. desc.args[2] = req_64bit->req_len;
  1078. desc.args[3] = req_64bit->resp_ptr;
  1079. desc.args[4] = req_64bit->resp_len;
  1080. }
  1081. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  1082. break;
  1083. }
  1084. case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
  1085. struct qseecom_continue_blocked_request_ireq *req =
  1086. (struct qseecom_continue_blocked_request_ireq *)
  1087. req_buf;
  1088. if (qseecom.smcinvoke_support)
  1089. smc_id =
  1090. TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
  1091. else
  1092. smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
  1093. desc.arginfo =
  1094. TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
  1095. desc.args[0] = req->app_or_session_id;
  1096. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  1097. break;
  1098. }
  1099. default: {
  1100. pr_err("qseos_cmd_id %d is not supported.\n",
  1101. qseos_cmd_id);
  1102. ret = -EINVAL;
  1103. break;
  1104. }
  1105. } /*end of switch (qsee_cmd_id) */
  1106. break;
  1107. } /*end of case SCM_SVC_TZSCHEDULER*/
  1108. default: {
  1109. pr_err("svc_id 0x%x is not supported.\n", svc_id);
  1110. ret = -EINVAL;
  1111. break;
  1112. }
  1113. } /*end of switch svc_id */
  1114. scm_resp->result = desc.ret[0];
  1115. scm_resp->resp_type = desc.ret[1];
  1116. scm_resp->data = desc.ret[2];
  1117. pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
  1118. svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
  1119. pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
  1120. scm_resp->result, scm_resp->resp_type, scm_resp->data);
  1121. return ret;
  1122. }
  1123. static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
  1124. size_t cmd_len, void *resp_buf, size_t resp_len)
  1125. {
  1126. return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
  1127. }
  1128. static struct qseecom_registered_listener_list *__qseecom_find_svc(
  1129. int32_t listener_id)
  1130. {
  1131. struct qseecom_registered_listener_list *entry = NULL;
  1132. list_for_each_entry(entry,
  1133. &qseecom.registered_listener_list_head, list) {
  1134. if (entry->svc.listener_id == listener_id)
  1135. break;
  1136. }
  1137. if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
  1138. pr_debug("Service id: %u is not found\n", listener_id);
  1139. return NULL;
  1140. }
  1141. return entry;
  1142. }
  1143. static int qseecom_dmabuf_cache_operations(struct dma_buf *dmabuf,
  1144. enum qseecom_cache_ops cache_op)
  1145. {
  1146. int ret = 0;
  1147. if (!dmabuf) {
  1148. pr_err("dmabuf is NULL\n");
  1149. ret = -EINVAL;
  1150. goto exit;
  1151. }
  1152. switch (cache_op) {
  1153. case QSEECOM_CACHE_CLEAN: /* Doing CLEAN and INVALIDATE */
  1154. dma_buf_begin_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  1155. dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  1156. break;
  1157. case QSEECOM_CACHE_INVALIDATE:
  1158. dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE);
  1159. dma_buf_end_cpu_access(dmabuf, DMA_FROM_DEVICE);
  1160. break;
  1161. default:
  1162. pr_err("cache (%d) operation not supported\n",
  1163. cache_op);
  1164. ret = -EINVAL;
  1165. goto exit;
  1166. }
  1167. exit:
  1168. return ret;
  1169. }
  1170. static int qseecom_destroy_bridge_callback(void *dtor_data)
  1171. {
  1172. int ret = 0;
  1173. uint64_t handle = (uint64_t)dtor_data;
  1174. pr_debug("to destroy shm bridge %lld\n", handle);
  1175. ret = qtee_shmbridge_deregister(handle);
  1176. if (ret) {
  1177. pr_err("failed to destroy shm bridge %lld\n", handle);
  1178. return ret;
  1179. }
  1180. return ret;
  1181. }
  1182. static int qseecom_create_bridge_for_secbuf(int ion_fd, struct dma_buf *dmabuf,
  1183. struct sg_table *sgt)
  1184. {
  1185. int ret = 0;
  1186. phys_addr_t phys;
  1187. size_t size = 0;
  1188. uint64_t handle = 0;
  1189. int tz_perm = PERM_READ|PERM_WRITE;
  1190. uint32_t *vmid_list;
  1191. uint32_t *perms_list;
  1192. uint32_t nelems = 0;
  1193. struct scatterlist *sg = sgt->sgl;
  1194. if (!qtee_shmbridge_is_enabled())
  1195. return 0;
  1196. phys = sg_phys(sg);
  1197. size = sg->length;
  1198. ret = qtee_shmbridge_query(phys);
  1199. if (ret) {
  1200. pr_debug("bridge exists\n");
  1201. return 0;
  1202. }
  1203. if (mem_buf_dma_buf_exclusive_owner(dmabuf) || (sgt->nents != 1)) {
  1204. pr_debug("just create bridge for contiguous secure buf\n");
  1205. return 0;
  1206. }
  1207. ret = mem_buf_dma_buf_copy_vmperm(dmabuf, (int **)&vmid_list,
  1208. (int **)&perms_list, (int *)&nelems);
  1209. if (ret) {
  1210. pr_err("mem_buf_dma_buf_copy_vmperm failure, err=%d\n", ret);
  1211. return ret;
  1212. }
  1213. ret = qtee_shmbridge_register(phys, size, vmid_list, perms_list, nelems,
  1214. tz_perm, &handle);
  1215. if (ret && ret != -EEXIST) {
  1216. pr_err("creation of shm bridge failed with ret: %d\n",
  1217. ret);
  1218. goto exit;
  1219. }
  1220. pr_debug("created shm bridge %lld\n", handle);
  1221. mem_buf_dma_buf_set_destructor(dmabuf, qseecom_destroy_bridge_callback,
  1222. (void *)handle);
  1223. exit:
  1224. kfree(perms_list);
  1225. kfree(vmid_list);
  1226. return ret;
  1227. }
  1228. static int qseecom_dmabuf_map(int ion_fd, struct sg_table **sgt,
  1229. struct dma_buf_attachment **attach,
  1230. struct dma_buf **dmabuf)
  1231. {
  1232. struct dma_buf *new_dma_buf = NULL;
  1233. struct dma_buf_attachment *new_attach = NULL;
  1234. struct sg_table *new_sgt = NULL;
  1235. int ret = 0;
  1236. new_dma_buf = dma_buf_get(ion_fd);
  1237. if (IS_ERR_OR_NULL(new_dma_buf)) {
  1238. pr_err("dma_buf_get() for ion_fd %d failed\n", ion_fd);
  1239. ret = -ENOMEM;
  1240. goto err;
  1241. }
  1242. new_attach = dma_buf_attach(new_dma_buf, qseecom.dev);
  1243. if (IS_ERR_OR_NULL(new_attach)) {
  1244. pr_err("dma_buf_attach() for ion_fd %d failed\n", ion_fd);
  1245. ret = -ENOMEM;
  1246. goto err_put;
  1247. }
  1248. new_sgt = dma_buf_map_attachment(new_attach, DMA_BIDIRECTIONAL);
  1249. if (IS_ERR_OR_NULL(new_sgt)) {
  1250. ret = PTR_ERR(new_sgt);
  1251. pr_err("dma_buf_map_attachment for ion_fd %d failed ret = %d\n",
  1252. ion_fd, ret);
  1253. goto err_detach;
  1254. }
  1255. ret = qseecom_create_bridge_for_secbuf(ion_fd, new_dma_buf, new_sgt);
  1256. if (ret) {
  1257. pr_err("failed to create bridge for fd %d\n", ion_fd);
  1258. goto err_unmap_attachment;
  1259. }
  1260. *sgt = new_sgt;
  1261. *attach = new_attach;
  1262. *dmabuf = new_dma_buf;
  1263. return ret;
  1264. err_unmap_attachment:
  1265. dma_buf_unmap_attachment(new_attach, new_sgt, DMA_BIDIRECTIONAL);
  1266. err_detach:
  1267. dma_buf_detach(new_dma_buf, new_attach);
  1268. err_put:
  1269. dma_buf_put(new_dma_buf);
  1270. err:
  1271. return ret;
  1272. }
  1273. static void qseecom_dmabuf_unmap(struct sg_table *sgt,
  1274. struct dma_buf_attachment *attach,
  1275. struct dma_buf *dmabuf)
  1276. {
  1277. dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
  1278. dma_buf_detach(dmabuf, attach);
  1279. dma_buf_put(dmabuf);
  1280. }
  1281. /* convert ion_fd to phys_adds and virt_addr*/
  1282. static int qseecom_vaddr_map(int ion_fd,
  1283. phys_addr_t *paddr, void **vaddr,
  1284. struct sg_table **sgt,
  1285. struct dma_buf_attachment **attach,
  1286. size_t *sb_length, struct dma_buf **dmabuf)
  1287. {
  1288. struct dma_buf *new_dma_buf = NULL;
  1289. struct dma_buf_attachment *new_attach = NULL;
  1290. #ifdef KERNEL_VERSION_LEGACY
  1291. struct dma_buf_map new_dma_buf_map = {0};
  1292. #else
  1293. struct iosys_map new_dma_buf_map = {0};
  1294. #endif
  1295. struct sg_table *new_sgt = NULL;
  1296. void *new_va = NULL;
  1297. int ret = 0;
  1298. ret = qseecom_dmabuf_map(ion_fd, &new_sgt, &new_attach, &new_dma_buf);
  1299. if (ret) {
  1300. pr_err("qseecom_dmabuf_map for ion_fd %d failed ret = %d\n",
  1301. ion_fd, ret);
  1302. goto err;
  1303. }
  1304. ret = 0;
  1305. *paddr = sg_dma_address(new_sgt->sgl);
  1306. *sb_length = new_sgt->sgl->length;
  1307. dma_buf_begin_cpu_access(new_dma_buf, DMA_BIDIRECTIONAL);
  1308. ret = dma_buf_vmap(new_dma_buf, &new_dma_buf_map);
  1309. new_va = ret ? NULL : new_dma_buf_map.vaddr;
  1310. if (!new_va) {
  1311. pr_err("dma_buf_vmap failed\n");
  1312. ret = -ENOMEM;
  1313. goto err_unmap;
  1314. }
  1315. *dmabuf = new_dma_buf;
  1316. *attach = new_attach;
  1317. *sgt = new_sgt;
  1318. *vaddr = new_va;
  1319. return ret;
  1320. err_unmap:
  1321. dma_buf_end_cpu_access(new_dma_buf, DMA_BIDIRECTIONAL);
  1322. qseecom_dmabuf_unmap(new_sgt, new_attach, new_dma_buf);
  1323. MAKE_NULL(*sgt, *attach, *dmabuf);
  1324. err:
  1325. return ret;
  1326. }
  1327. static void qseecom_vaddr_unmap(void *vaddr, struct sg_table *sgt,
  1328. struct dma_buf_attachment *attach,
  1329. struct dma_buf *dmabuf)
  1330. {
  1331. #ifdef KERNEL_VERSION_LEGACY
  1332. struct dma_buf_map dmabufmap = DMA_BUF_MAP_INIT_VADDR(vaddr);
  1333. #else
  1334. struct iosys_map dmabufmap = IOSYS_MAP_INIT_VADDR(vaddr);
  1335. #endif
  1336. if (!dmabuf || !vaddr || !sgt || !attach)
  1337. return;
  1338. pr_err("Trying to unmap vaddr");
  1339. dma_buf_vunmap(dmabuf, &dmabufmap);
  1340. dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  1341. qseecom_dmabuf_unmap(sgt, attach, dmabuf);
  1342. }
  1343. static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
  1344. struct qseecom_dev_handle *handle,
  1345. struct qseecom_register_listener_req *listener)
  1346. {
  1347. int ret = 0;
  1348. struct qseecom_register_listener_ireq req;
  1349. struct qseecom_register_listener_64bit_ireq req_64bit;
  1350. struct qseecom_command_scm_resp resp;
  1351. void *cmd_buf = NULL;
  1352. size_t cmd_len;
  1353. ret = qseecom_vaddr_map(listener->ifd_data_fd,
  1354. &svc->sb_phys, (void **)&svc->sb_virt,
  1355. &svc->sgt, &svc->attach,
  1356. &svc->sb_length, &svc->dmabuf);
  1357. if (ret) {
  1358. pr_err("failed to convert ion_fd %d for lsnr %d with err: %d\n",
  1359. listener->ifd_data_fd, svc->svc.listener_id, ret);
  1360. return -EINVAL;
  1361. }
  1362. if (qseecom.qsee_version < QSEE_VERSION_40) {
  1363. req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
  1364. req.listener_id = svc->svc.listener_id;
  1365. req.sb_len = svc->sb_length;
  1366. req.sb_ptr = (uint32_t)svc->sb_phys;
  1367. cmd_buf = (void *)&req;
  1368. cmd_len = sizeof(struct qseecom_register_listener_ireq);
  1369. } else {
  1370. req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
  1371. req_64bit.listener_id = svc->svc.listener_id;
  1372. req_64bit.sb_len = svc->sb_length;
  1373. req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
  1374. cmd_buf = (void *)&req_64bit;
  1375. cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
  1376. }
  1377. resp.result = QSEOS_RESULT_INCOMPLETE;
  1378. mutex_unlock(&listener_access_lock);
  1379. mutex_lock(&app_access_lock);
  1380. __qseecom_reentrancy_check_if_no_app_blocked(
  1381. TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID);
  1382. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
  1383. &resp, sizeof(resp));
  1384. mutex_unlock(&app_access_lock);
  1385. mutex_lock(&listener_access_lock);
  1386. if (ret) {
  1387. pr_err("qseecom_scm_call failed with err: %d\n", ret);
  1388. ret = -EINVAL;
  1389. goto err;
  1390. }
  1391. if (resp.result != QSEOS_RESULT_SUCCESS) {
  1392. pr_err("Error SB registration req: resp.result = %d\n",
  1393. resp.result);
  1394. ret = -EPERM;
  1395. goto err;
  1396. }
  1397. return 0;
  1398. err:
  1399. if (svc->dmabuf) {
  1400. qseecom_vaddr_unmap(svc->sb_virt, svc->sgt, svc->attach,
  1401. svc->dmabuf);
  1402. MAKE_NULL(svc->sgt, svc->attach, svc->dmabuf);
  1403. }
  1404. return ret;
  1405. }
  1406. static int qseecom_register_listener(struct qseecom_dev_handle *data,
  1407. void __user *argp)
  1408. {
  1409. int ret = 0;
  1410. struct qseecom_register_listener_req rcvd_lstnr;
  1411. struct qseecom_registered_listener_list *new_entry;
  1412. struct qseecom_registered_listener_list *ptr_svc;
  1413. ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
  1414. if (ret) {
  1415. pr_err("copy_from_user failed\n");
  1416. return ret;
  1417. }
  1418. if (!access_ok((void __user *)rcvd_lstnr.virt_sb_base,
  1419. rcvd_lstnr.sb_size))
  1420. return -EFAULT;
  1421. ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id);
  1422. if (ptr_svc) {
  1423. if (!ptr_svc->unregister_pending) {
  1424. pr_err("Service %d is not unique\n",
  1425. rcvd_lstnr.listener_id);
  1426. data->released = true;
  1427. return -EBUSY;
  1428. } else {
  1429. /*wait until listener is unregistered*/
  1430. pr_debug("register %d has to wait\n",
  1431. rcvd_lstnr.listener_id);
  1432. mutex_unlock(&listener_access_lock);
  1433. ret = wait_event_interruptible(
  1434. qseecom.register_lsnr_pending_wq,
  1435. list_empty(
  1436. &qseecom.unregister_lsnr_pending_list_head));
  1437. if (ret) {
  1438. pr_err("interrupted register_pending_wq %d\n",
  1439. rcvd_lstnr.listener_id);
  1440. mutex_lock(&listener_access_lock);
  1441. return -ERESTARTSYS;
  1442. }
  1443. mutex_lock(&listener_access_lock);
  1444. }
  1445. }
  1446. new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
  1447. if (!new_entry)
  1448. return -ENOMEM;
  1449. memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
  1450. new_entry->rcv_req_flag = 0;
  1451. new_entry->sglistinfo_ptr =
  1452. (struct sglist_info *)__qseecom_alloc_tzbuf(
  1453. sizeof(struct sglist_info) * MAX_ION_FD,
  1454. &new_entry->sglistinfo_shm.paddr,
  1455. &new_entry->sglistinfo_shm);
  1456. if (!new_entry->sglistinfo_ptr) {
  1457. kfree(new_entry);
  1458. return -ENOMEM;
  1459. }
  1460. new_entry->svc.listener_id = rcvd_lstnr.listener_id;
  1461. new_entry->sb_length = rcvd_lstnr.sb_size;
  1462. new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
  1463. if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
  1464. pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
  1465. rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
  1466. __qseecom_free_tzbuf(&new_entry->sglistinfo_shm);
  1467. kfree_sensitive(new_entry);
  1468. return -ENOMEM;
  1469. }
  1470. init_waitqueue_head(&new_entry->rcv_req_wq);
  1471. init_waitqueue_head(&new_entry->listener_block_app_wq);
  1472. new_entry->send_resp_flag = 0;
  1473. new_entry->listener_in_use = false;
  1474. list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
  1475. data->listener.id = rcvd_lstnr.listener_id;
  1476. pr_debug("Service %d is registered\n", rcvd_lstnr.listener_id);
  1477. return ret;
  1478. }
  1479. static int __qseecom_unregister_listener(struct qseecom_dev_handle *data,
  1480. struct qseecom_registered_listener_list *ptr_svc)
  1481. {
  1482. int ret = 0;
  1483. struct qseecom_register_listener_ireq req;
  1484. struct qseecom_command_scm_resp resp;
  1485. req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
  1486. req.listener_id = data->listener.id;
  1487. resp.result = QSEOS_RESULT_INCOMPLETE;
  1488. mutex_unlock(&listener_access_lock);
  1489. mutex_lock(&app_access_lock);
  1490. __qseecom_reentrancy_check_if_no_app_blocked(
  1491. TZ_OS_DEREGISTER_LISTENER_ID);
  1492. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
  1493. sizeof(req), &resp, sizeof(resp));
  1494. mutex_unlock(&app_access_lock);
  1495. mutex_lock(&listener_access_lock);
  1496. if (ret) {
  1497. pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
  1498. ret, data->listener.id);
  1499. return ret;
  1500. }
  1501. if (resp.result != QSEOS_RESULT_SUCCESS) {
  1502. pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
  1503. resp.result, data->listener.id);
  1504. ret = -EPERM;
  1505. goto exit;
  1506. }
  1507. while (atomic_read(&data->ioctl_count) > 1) {
  1508. if (wait_event_interruptible(data->abort_wq,
  1509. atomic_read(&data->ioctl_count) <= 1)) {
  1510. pr_err("Interrupted from abort\n");
  1511. ret = -ERESTARTSYS;
  1512. }
  1513. }
  1514. exit:
  1515. if (ptr_svc->dmabuf) {
  1516. qseecom_vaddr_unmap(ptr_svc->sb_virt,
  1517. ptr_svc->sgt, ptr_svc->attach, ptr_svc->dmabuf);
  1518. MAKE_NULL(ptr_svc->sgt, ptr_svc->attach, ptr_svc->dmabuf);
  1519. }
  1520. __qseecom_free_tzbuf(&ptr_svc->sglistinfo_shm);
  1521. list_del(&ptr_svc->list);
  1522. kfree_sensitive(ptr_svc);
  1523. data->released = true;
  1524. pr_debug("Service %d is unregistered\n", data->listener.id);
  1525. return ret;
  1526. }
  1527. static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
  1528. {
  1529. struct qseecom_registered_listener_list *ptr_svc = NULL;
  1530. struct qseecom_unregister_pending_list *entry = NULL;
  1531. if (data->released) {
  1532. pr_err("Don't unregister lsnr %d\n", data->listener.id);
  1533. return -EINVAL;
  1534. }
  1535. ptr_svc = __qseecom_find_svc(data->listener.id);
  1536. if (!ptr_svc) {
  1537. pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
  1538. return -ENODATA;
  1539. }
  1540. /* stop CA thread waiting for listener response */
  1541. ptr_svc->abort = 1;
  1542. wake_up_interruptible_all(&qseecom.send_resp_wq);
  1543. /* stop listener thread waiting for listener request */
  1544. data->abort = 1;
  1545. wake_up_all(&ptr_svc->rcv_req_wq);
  1546. /* return directly if pending*/
  1547. if (ptr_svc->unregister_pending)
  1548. return 0;
  1549. /*add unregistration into pending list*/
  1550. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  1551. if (!entry)
  1552. return -ENOMEM;
  1553. entry->data = data;
  1554. list_add_tail(&entry->list,
  1555. &qseecom.unregister_lsnr_pending_list_head);
  1556. ptr_svc->unregister_pending = true;
  1557. pr_debug("unregister %d pending\n", data->listener.id);
  1558. return 0;
  1559. }
  1560. static void __qseecom_processing_pending_lsnr_unregister(void)
  1561. {
  1562. struct qseecom_unregister_pending_list *entry = NULL;
  1563. struct qseecom_registered_listener_list *ptr_svc = NULL;
  1564. struct list_head *pos;
  1565. int ret = 0;
  1566. mutex_lock(&listener_access_lock);
  1567. while (!list_empty(&qseecom.unregister_lsnr_pending_list_head)) {
  1568. pos = qseecom.unregister_lsnr_pending_list_head.next;
  1569. entry = list_entry(pos,
  1570. struct qseecom_unregister_pending_list, list);
  1571. if (entry && entry->data) {
  1572. pr_debug("process pending unregister %d\n",
  1573. entry->data->listener.id);
  1574. /* don't process the entry if qseecom_release is not called*/
  1575. if (!entry->data->listener.release_called) {
  1576. list_del(pos);
  1577. list_add_tail(&entry->list,
  1578. &qseecom.unregister_lsnr_pending_list_head);
  1579. break;
  1580. }
  1581. ptr_svc = __qseecom_find_svc(
  1582. entry->data->listener.id);
  1583. if (ptr_svc) {
  1584. ret = __qseecom_unregister_listener(
  1585. entry->data, ptr_svc);
  1586. if (ret) {
  1587. pr_debug("unregister %d pending again\n",
  1588. entry->data->listener.id);
  1589. mutex_unlock(&listener_access_lock);
  1590. return;
  1591. }
  1592. } else
  1593. pr_err("invalid listener %d\n",
  1594. entry->data->listener.id);
  1595. __qseecom_free_tzbuf(&entry->data->sglistinfo_shm);
  1596. kfree_sensitive(entry->data);
  1597. }
  1598. list_del(pos);
  1599. kfree_sensitive(entry);
  1600. }
  1601. mutex_unlock(&listener_access_lock);
  1602. wake_up_interruptible(&qseecom.register_lsnr_pending_wq);
  1603. }
  1604. static void __wakeup_unregister_listener_kthread(void)
  1605. {
  1606. atomic_set(&qseecom.unregister_lsnr_kthread_state,
  1607. LSNR_UNREG_KT_WAKEUP);
  1608. wake_up_interruptible(&qseecom.unregister_lsnr_kthread_wq);
  1609. }
  1610. static int __qseecom_unregister_listener_kthread_func(void *data)
  1611. {
  1612. while (!kthread_should_stop()) {
  1613. wait_event_interruptible(
  1614. qseecom.unregister_lsnr_kthread_wq,
  1615. atomic_read(&qseecom.unregister_lsnr_kthread_state)
  1616. == LSNR_UNREG_KT_WAKEUP);
  1617. pr_debug("kthread to unregister listener is called %d\n",
  1618. atomic_read(&qseecom.unregister_lsnr_kthread_state));
  1619. __qseecom_processing_pending_lsnr_unregister();
  1620. atomic_set(&qseecom.unregister_lsnr_kthread_state,
  1621. LSNR_UNREG_KT_SLEEP);
  1622. }
  1623. pr_warn("kthread to unregister listener stopped\n");
  1624. return 0;
  1625. }
  1626. static int qseecom_bus_scale_update_request(
  1627. int client, int mode)
  1628. {
  1629. pr_debug("client %d, mode %d\n", client, mode);
  1630. /*TODO: get ab/ib from device tree for different mode*/
  1631. if (!mode)
  1632. return icc_set_bw(qseecom.icc_path, 0, 0);
  1633. else
  1634. return icc_set_bw(qseecom.icc_path,
  1635. qseecom.avg_bw, qseecom.peak_bw);
  1636. }
  1637. static int __qseecom_set_msm_bus_request(uint32_t mode)
  1638. {
  1639. int ret = 0;
  1640. struct qseecom_clk *qclk;
  1641. qclk = &qseecom.qsee;
  1642. if (qclk->ce_core_src_clk != NULL) {
  1643. if (mode == INACTIVE) {
  1644. __qseecom_disable_clk(CLK_QSEE);
  1645. } else {
  1646. ret = __qseecom_enable_clk(CLK_QSEE);
  1647. if (ret)
  1648. pr_err("CLK enabling failed (%d) MODE (%d)\n",
  1649. ret, mode);
  1650. }
  1651. }
  1652. if ((!ret) && (qseecom.current_mode != mode)) {
  1653. ret = qseecom_bus_scale_update_request(
  1654. qseecom.qsee_perf_client, mode);
  1655. if (ret) {
  1656. pr_err("Bandwidth req failed(%d) MODE (%d)\n",
  1657. ret, mode);
  1658. if (qclk->ce_core_src_clk != NULL) {
  1659. if (mode == INACTIVE) {
  1660. ret = __qseecom_enable_clk(CLK_QSEE);
  1661. if (ret)
  1662. pr_err("CLK enable failed\n");
  1663. } else
  1664. __qseecom_disable_clk(CLK_QSEE);
  1665. }
  1666. }
  1667. qseecom.current_mode = mode;
  1668. }
  1669. return ret;
  1670. }
  1671. static void qseecom_bw_inactive_req_work(struct work_struct *work)
  1672. {
  1673. mutex_lock(&app_access_lock);
  1674. mutex_lock(&qsee_bw_mutex);
  1675. if (qseecom.timer_running)
  1676. __qseecom_set_msm_bus_request(INACTIVE);
  1677. pr_debug("current_mode = %d, cumulative_mode = %d\n",
  1678. qseecom.current_mode, qseecom.cumulative_mode);
  1679. qseecom.timer_running = false;
  1680. mutex_unlock(&qsee_bw_mutex);
  1681. mutex_unlock(&app_access_lock);
  1682. }
  1683. static void qseecom_scale_bus_bandwidth_timer_callback(struct timer_list *data)
  1684. {
  1685. schedule_work(&qseecom.bw_inactive_req_ws);
  1686. }
  1687. static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
  1688. {
  1689. struct qseecom_clk *qclk;
  1690. int ret = 0;
  1691. mutex_lock(&clk_access_lock);
  1692. if (ce == CLK_QSEE)
  1693. qclk = &qseecom.qsee;
  1694. else
  1695. qclk = &qseecom.ce_drv;
  1696. if (qclk->clk_access_cnt > 0) {
  1697. qclk->clk_access_cnt--;
  1698. } else {
  1699. pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
  1700. ret = -EINVAL;
  1701. }
  1702. mutex_unlock(&clk_access_lock);
  1703. return ret;
  1704. }
  1705. static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
  1706. {
  1707. int32_t ret = 0;
  1708. int32_t request_mode = INACTIVE;
  1709. mutex_lock(&qsee_bw_mutex);
  1710. if (mode == 0) {
  1711. if (qseecom.cumulative_mode > MEDIUM)
  1712. request_mode = HIGH;
  1713. else
  1714. request_mode = qseecom.cumulative_mode;
  1715. } else {
  1716. request_mode = mode;
  1717. }
  1718. ret = __qseecom_set_msm_bus_request(request_mode);
  1719. if (ret) {
  1720. pr_err("set msm bus request failed (%d),request_mode (%d)\n",
  1721. ret, request_mode);
  1722. goto err_scale_timer;
  1723. }
  1724. if (qseecom.timer_running) {
  1725. ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
  1726. if (ret) {
  1727. pr_err("Failed to decrease clk ref count.\n");
  1728. goto err_scale_timer;
  1729. }
  1730. del_timer_sync(&(qseecom.bw_scale_down_timer));
  1731. qseecom.timer_running = false;
  1732. }
  1733. err_scale_timer:
  1734. mutex_unlock(&qsee_bw_mutex);
  1735. return ret;
  1736. }
  1737. static int qseecom_unregister_bus_bandwidth_needs(
  1738. struct qseecom_dev_handle *data)
  1739. {
  1740. qseecom.cumulative_mode -= data->mode;
  1741. data->mode = INACTIVE;
  1742. return 0;
  1743. }
  1744. static int __qseecom_register_bus_bandwidth_needs(
  1745. struct qseecom_dev_handle *data, uint32_t request_mode)
  1746. {
  1747. if (data->mode == INACTIVE) {
  1748. qseecom.cumulative_mode += request_mode;
  1749. data->mode = request_mode;
  1750. } else {
  1751. if (data->mode != request_mode) {
  1752. qseecom.cumulative_mode -= data->mode;
  1753. qseecom.cumulative_mode += request_mode;
  1754. data->mode = request_mode;
  1755. }
  1756. }
  1757. return 0;
  1758. }
  1759. static int qseecom_perf_enable(struct qseecom_dev_handle *data)
  1760. {
  1761. int ret = 0;
  1762. ret = qsee_vote_for_clock(data, CLK_DFAB);
  1763. if (ret) {
  1764. pr_err("Failed to vote for DFAB clock with err %d\n", ret);
  1765. goto perf_enable_exit;
  1766. }
  1767. ret = qsee_vote_for_clock(data, CLK_SFPB);
  1768. if (ret) {
  1769. qsee_disable_clock_vote(data, CLK_DFAB);
  1770. pr_err("Failed to vote for SFPB clock with err %d\n", ret);
  1771. goto perf_enable_exit;
  1772. }
  1773. perf_enable_exit:
  1774. return ret;
  1775. }
  1776. static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
  1777. {
  1778. if (qseecom.no_clock_support)
  1779. return;
  1780. mutex_lock(&qsee_bw_mutex);
  1781. qseecom.bw_scale_down_timer.expires = jiffies +
  1782. msecs_to_jiffies(duration);
  1783. mod_timer(&(qseecom.bw_scale_down_timer),
  1784. qseecom.bw_scale_down_timer.expires);
  1785. qseecom.timer_running = true;
  1786. mutex_unlock(&qsee_bw_mutex);
  1787. }
  1788. static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
  1789. {
  1790. if (!qseecom.support_bus_scaling)
  1791. qsee_disable_clock_vote(data, CLK_SFPB);
  1792. else
  1793. __qseecom_add_bw_scale_down_timer(
  1794. QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
  1795. }
  1796. static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
  1797. {
  1798. int ret = 0;
  1799. if (qseecom.support_bus_scaling) {
  1800. ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
  1801. if (ret)
  1802. pr_err("Failed to set bw MEDIUM.\n");
  1803. } else {
  1804. ret = qsee_vote_for_clock(data, CLK_SFPB);
  1805. if (ret)
  1806. pr_err("Fail vote for clk SFPB ret %d\n", ret);
  1807. }
  1808. return ret;
  1809. }
  1810. static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
  1811. void __user *argp)
  1812. {
  1813. int32_t ret;
  1814. struct qseecom_set_sb_mem_param_req req;
  1815. size_t len;
  1816. /* Copy the relevant information needed for loading the image */
  1817. if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
  1818. return -EFAULT;
  1819. if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
  1820. (req.sb_len == 0)) {
  1821. pr_err("Invalid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
  1822. req.ifd_data_fd, req.sb_len, req.virt_sb_base);
  1823. return -EFAULT;
  1824. }
  1825. if (!access_ok((void __user *)req.virt_sb_base,
  1826. req.sb_len))
  1827. return -EFAULT;
  1828. ret = qseecom_vaddr_map(req.ifd_data_fd, &data->client.sb_phys,
  1829. (void **)&data->client.sb_virt,
  1830. &data->client.sgt, &data->client.attach,
  1831. &len, &data->client.dmabuf);
  1832. if (ret) {
  1833. pr_err("failed to convert ion_fd %d for lsnr %d with err: %d\n",
  1834. req.ifd_data_fd, data->client.app_id, ret);
  1835. return -EINVAL;
  1836. }
  1837. if (len < req.sb_len) {
  1838. pr_err("Requested length (0x%x) is > allocated (%zu)\n",
  1839. req.sb_len, len);
  1840. ret = -EINVAL;
  1841. goto exit;
  1842. }
  1843. data->client.sb_length = req.sb_len;
  1844. data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
  1845. return ret;
  1846. exit:
  1847. if (data->client.dmabuf) {
  1848. qseecom_vaddr_unmap(data->client.sb_virt, data->client.sgt,
  1849. data->client.attach, data->client.dmabuf);
  1850. MAKE_NULL(data->client.sgt,
  1851. data->client.attach, data->client.dmabuf);
  1852. }
  1853. return ret;
  1854. }
  1855. static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
  1856. struct qseecom_registered_listener_list *ptr_svc)
  1857. {
  1858. int ret;
  1859. ret = (qseecom.send_resp_flag != 0);
  1860. return ret || data->abort || ptr_svc->abort;
  1861. }
  1862. static int __qseecom_reentrancy_listener_has_sent_rsp(
  1863. struct qseecom_dev_handle *data,
  1864. struct qseecom_registered_listener_list *ptr_svc)
  1865. {
  1866. int ret;
  1867. ret = (ptr_svc->send_resp_flag != 0);
  1868. return ret || data->abort || ptr_svc->abort;
  1869. }
  1870. static void __qseecom_clean_listener_sglistinfo(
  1871. struct qseecom_registered_listener_list *ptr_svc)
  1872. {
  1873. if (ptr_svc->sglist_cnt) {
  1874. memset(ptr_svc->sglistinfo_ptr, 0,
  1875. SGLISTINFO_TABLE_SIZE);
  1876. ptr_svc->sglist_cnt = 0;
  1877. }
  1878. }
  1879. static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
  1880. struct qseecom_command_scm_resp *resp)
  1881. {
  1882. int ret = 0;
  1883. int rc = 0;
  1884. uint32_t lstnr;
  1885. struct qseecom_client_listener_data_irsp send_data_rsp = {0};
  1886. struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
  1887. = {0};
  1888. struct qseecom_registered_listener_list *ptr_svc = NULL;
  1889. sigset_t new_sigset;
  1890. uint32_t status;
  1891. void *cmd_buf = NULL;
  1892. size_t cmd_len;
  1893. struct sglist_info *table = NULL;
  1894. qseecom.app_block_ref_cnt++;
  1895. while (resp->result == QSEOS_RESULT_INCOMPLETE) {
  1896. lstnr = resp->data;
  1897. /*
  1898. * Wake up blocking lsitener service with the lstnr id
  1899. */
  1900. mutex_lock(&listener_access_lock);
  1901. list_for_each_entry(ptr_svc,
  1902. &qseecom.registered_listener_list_head, list) {
  1903. if (ptr_svc->svc.listener_id == lstnr) {
  1904. ptr_svc->listener_in_use = true;
  1905. ptr_svc->rcv_req_flag = 1;
  1906. ret = qseecom_dmabuf_cache_operations(
  1907. ptr_svc->dmabuf,
  1908. QSEECOM_CACHE_INVALIDATE);
  1909. if (ret) {
  1910. rc = -EINVAL;
  1911. status = QSEOS_RESULT_FAILURE;
  1912. goto err_resp;
  1913. }
  1914. wake_up_interruptible(&ptr_svc->rcv_req_wq);
  1915. break;
  1916. }
  1917. }
  1918. if (ptr_svc == NULL) {
  1919. pr_err("Listener Svc %d does not exist\n", lstnr);
  1920. rc = -EINVAL;
  1921. status = QSEOS_RESULT_FAILURE;
  1922. goto err_resp;
  1923. }
  1924. if (!ptr_svc->dmabuf) {
  1925. pr_err("Client dmabuf is not initialized\n");
  1926. rc = -EINVAL;
  1927. status = QSEOS_RESULT_FAILURE;
  1928. goto err_resp;
  1929. }
  1930. if (ptr_svc->svc.listener_id != lstnr) {
  1931. pr_err("Service %d does not exist\n",
  1932. lstnr);
  1933. rc = -ERESTARTSYS;
  1934. ptr_svc = NULL;
  1935. status = QSEOS_RESULT_FAILURE;
  1936. goto err_resp;
  1937. }
  1938. if (ptr_svc->abort == 1) {
  1939. pr_debug("Service %d abort %d\n",
  1940. lstnr, ptr_svc->abort);
  1941. rc = -ENODEV;
  1942. status = QSEOS_RESULT_FAILURE;
  1943. goto err_resp;
  1944. }
  1945. pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
  1946. /* initialize the new signal mask with all signals*/
  1947. sigfillset(&new_sigset);
  1948. /* block all signals */
  1949. mutex_unlock(&listener_access_lock);
  1950. do {
  1951. /*
  1952. * When reentrancy is not supported, check global
  1953. * send_resp_flag; otherwise, check this listener's
  1954. * send_resp_flag.
  1955. */
  1956. if (!qseecom.qsee_reentrancy_support &&
  1957. !wait_event_interruptible(qseecom.send_resp_wq,
  1958. __qseecom_listener_has_sent_rsp(
  1959. data, ptr_svc))) {
  1960. break;
  1961. }
  1962. if (qseecom.qsee_reentrancy_support &&
  1963. !wait_event_interruptible(qseecom.send_resp_wq,
  1964. __qseecom_reentrancy_listener_has_sent_rsp(
  1965. data, ptr_svc))) {
  1966. break;
  1967. }
  1968. } while (1);
  1969. mutex_lock(&listener_access_lock);
  1970. /* restore signal mask */
  1971. if (data->abort || ptr_svc->abort) {
  1972. pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d\n",
  1973. data->client.app_id, lstnr, ret);
  1974. rc = -ENODEV;
  1975. status = QSEOS_RESULT_FAILURE;
  1976. } else {
  1977. status = QSEOS_RESULT_SUCCESS;
  1978. }
  1979. err_resp:
  1980. qseecom.send_resp_flag = 0;
  1981. if (ptr_svc) {
  1982. ptr_svc->send_resp_flag = 0;
  1983. table = ptr_svc->sglistinfo_ptr;
  1984. }
  1985. if (qseecom.qsee_version < QSEE_VERSION_40) {
  1986. send_data_rsp.listener_id = lstnr;
  1987. send_data_rsp.status = status;
  1988. if (table) {
  1989. send_data_rsp.sglistinfo_ptr =
  1990. (uint32_t)virt_to_phys(table);
  1991. send_data_rsp.sglistinfo_len =
  1992. SGLISTINFO_TABLE_SIZE;
  1993. qtee_shmbridge_flush_shm_buf(
  1994. &ptr_svc->sglistinfo_shm);
  1995. }
  1996. cmd_buf = (void *)&send_data_rsp;
  1997. cmd_len = sizeof(send_data_rsp);
  1998. } else {
  1999. send_data_rsp_64bit.listener_id = lstnr;
  2000. send_data_rsp_64bit.status = status;
  2001. if (table) {
  2002. send_data_rsp_64bit.sglistinfo_ptr =
  2003. virt_to_phys(table);
  2004. send_data_rsp_64bit.sglistinfo_len =
  2005. SGLISTINFO_TABLE_SIZE;
  2006. qtee_shmbridge_flush_shm_buf(
  2007. &ptr_svc->sglistinfo_shm);
  2008. }
  2009. cmd_buf = (void *)&send_data_rsp_64bit;
  2010. cmd_len = sizeof(send_data_rsp_64bit);
  2011. }
  2012. if (!qseecom.whitelist_support || table == NULL)
  2013. *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
  2014. else
  2015. *(uint32_t *)cmd_buf =
  2016. QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
  2017. if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
  2018. ret = __qseecom_enable_clk(CLK_QSEE);
  2019. if (ret)
  2020. goto exit;
  2021. }
  2022. if (ptr_svc) {
  2023. ret = qseecom_dmabuf_cache_operations(ptr_svc->dmabuf,
  2024. QSEECOM_CACHE_CLEAN);
  2025. if (ret)
  2026. goto exit;
  2027. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  2028. cmd_buf, cmd_len, resp, sizeof(*resp));
  2029. ptr_svc->listener_in_use = false;
  2030. __qseecom_clean_listener_sglistinfo(ptr_svc);
  2031. if (ret) {
  2032. pr_err("scm_call() failed with err: %d (app_id = %d)\n",
  2033. ret, data->client.app_id);
  2034. goto exit;
  2035. }
  2036. } else {
  2037. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  2038. cmd_buf, cmd_len, resp, sizeof(*resp));
  2039. if (ret) {
  2040. pr_err("scm_call() failed with err: %d (app_id = %d)\n",
  2041. ret, data->client.app_id);
  2042. goto exit;
  2043. }
  2044. }
  2045. pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
  2046. status, resp->result, data->client.app_id, lstnr);
  2047. if ((resp->result != QSEOS_RESULT_SUCCESS) &&
  2048. (resp->result != QSEOS_RESULT_INCOMPLETE)) {
  2049. pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
  2050. resp->result, data->client.app_id, lstnr);
  2051. ret = -EINVAL;
  2052. }
  2053. exit:
  2054. mutex_unlock(&listener_access_lock);
  2055. if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
  2056. __qseecom_disable_clk(CLK_QSEE);
  2057. }
  2058. qseecom.app_block_ref_cnt--;
  2059. wake_up_interruptible_all(&qseecom.app_block_wq);
  2060. if (rc)
  2061. return rc;
  2062. return ret;
  2063. }
  2064. static int __qseecom_process_reentrancy_blocked_on_listener(
  2065. struct qseecom_command_scm_resp *resp,
  2066. struct qseecom_registered_app_list *ptr_app,
  2067. struct qseecom_dev_handle *data)
  2068. {
  2069. struct qseecom_registered_listener_list *list_ptr;
  2070. int ret = 0;
  2071. struct qseecom_continue_blocked_request_ireq ireq;
  2072. struct qseecom_command_scm_resp continue_resp;
  2073. unsigned int session_id;
  2074. sigset_t new_sigset;
  2075. unsigned long flags;
  2076. bool found_app = false;
  2077. struct qseecom_registered_app_list dummy_app_entry = { {NULL} };
  2078. if (!resp || !data) {
  2079. pr_err("invalid resp or data pointer\n");
  2080. ret = -EINVAL;
  2081. goto exit;
  2082. }
  2083. /* find app_id & img_name from list */
  2084. if (!ptr_app) {
  2085. if (data->client.from_smcinvoke || data->client.from_loadapp) {
  2086. pr_debug("This request is from %s\n",
  2087. (data->client.from_smcinvoke ? "smcinvoke" : "load_app"));
  2088. ptr_app = &dummy_app_entry;
  2089. ptr_app->app_id = data->client.app_id;
  2090. } else {
  2091. spin_lock_irqsave(&qseecom.registered_app_list_lock,
  2092. flags);
  2093. list_for_each_entry(ptr_app,
  2094. &qseecom.registered_app_list_head, list) {
  2095. if ((ptr_app->app_id == data->client.app_id) &&
  2096. (!strcmp(ptr_app->app_name,
  2097. data->client.app_name))) {
  2098. found_app = true;
  2099. break;
  2100. }
  2101. }
  2102. spin_unlock_irqrestore(
  2103. &qseecom.registered_app_list_lock, flags);
  2104. if (!found_app) {
  2105. pr_err("app_id %d (%s) is not found\n",
  2106. data->client.app_id,
  2107. (char *)data->client.app_name);
  2108. ret = -ENOENT;
  2109. goto exit;
  2110. }
  2111. }
  2112. }
  2113. do {
  2114. session_id = resp->resp_type;
  2115. mutex_lock(&listener_access_lock);
  2116. list_ptr = __qseecom_find_svc(resp->data);
  2117. if (!list_ptr) {
  2118. pr_err("Invalid listener ID %d\n", resp->data);
  2119. ret = -ENODATA;
  2120. mutex_unlock(&listener_access_lock);
  2121. goto exit;
  2122. }
  2123. ptr_app->blocked_on_listener_id = resp->data;
  2124. pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
  2125. resp->data, list_ptr->listener_in_use,
  2126. session_id, data->client.app_id);
  2127. /* sleep until listener is available */
  2128. sigfillset(&new_sigset);
  2129. do {
  2130. qseecom.app_block_ref_cnt++;
  2131. ptr_app->app_blocked = true;
  2132. mutex_unlock(&listener_access_lock);
  2133. mutex_unlock(&app_access_lock);
  2134. wait_event_interruptible(
  2135. list_ptr->listener_block_app_wq,
  2136. !list_ptr->listener_in_use);
  2137. mutex_lock(&app_access_lock);
  2138. mutex_lock(&listener_access_lock);
  2139. ptr_app->app_blocked = false;
  2140. qseecom.app_block_ref_cnt--;
  2141. } while (list_ptr->listener_in_use);
  2142. ptr_app->blocked_on_listener_id = 0;
  2143. pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
  2144. resp->data, session_id, data->client.app_id);
  2145. /* notify TZ that listener is available */
  2146. ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
  2147. if (qseecom.smcinvoke_support)
  2148. ireq.app_or_session_id = session_id;
  2149. else
  2150. ireq.app_or_session_id = data->client.app_id;
  2151. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  2152. &ireq, sizeof(ireq),
  2153. &continue_resp, sizeof(continue_resp));
  2154. if (ret && qseecom.smcinvoke_support) {
  2155. /* retry with legacy cmd */
  2156. pr_warn("falling back to legacy method\n");
  2157. qseecom.smcinvoke_support = false;
  2158. ireq.app_or_session_id = data->client.app_id;
  2159. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  2160. &ireq, sizeof(ireq),
  2161. &continue_resp, sizeof(continue_resp));
  2162. qseecom.smcinvoke_support = true;
  2163. if (ret) {
  2164. pr_err("unblock app %d or session %d fail\n",
  2165. data->client.app_id, session_id);
  2166. mutex_unlock(&listener_access_lock);
  2167. goto exit;
  2168. }
  2169. }
  2170. mutex_unlock(&listener_access_lock);
  2171. resp->result = continue_resp.result;
  2172. resp->resp_type = continue_resp.resp_type;
  2173. resp->data = continue_resp.data;
  2174. pr_err("unblock resp = %d\n", resp->result);
  2175. } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
  2176. if (resp->result != QSEOS_RESULT_INCOMPLETE) {
  2177. pr_err("Unexpected unblock resp %d\n", resp->result);
  2178. ret = -EINVAL;
  2179. }
  2180. exit:
  2181. return ret;
  2182. }
  2183. static int __qseecom_reentrancy_process_incomplete_cmd(
  2184. struct qseecom_dev_handle *data,
  2185. struct qseecom_command_scm_resp *resp)
  2186. {
  2187. int ret = 0;
  2188. int rc = 0;
  2189. uint32_t lstnr;
  2190. struct qseecom_client_listener_data_irsp send_data_rsp = {0};
  2191. struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
  2192. = {0};
  2193. struct qseecom_registered_listener_list *ptr_svc = NULL;
  2194. sigset_t new_sigset;
  2195. uint32_t status;
  2196. void *cmd_buf = NULL;
  2197. size_t cmd_len;
  2198. struct sglist_info *table = NULL;
  2199. while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
  2200. lstnr = resp->data;
  2201. /*
  2202. * Wake up blocking lsitener service with the lstnr id
  2203. */
  2204. mutex_lock(&listener_access_lock);
  2205. list_for_each_entry(ptr_svc,
  2206. &qseecom.registered_listener_list_head, list) {
  2207. if (ptr_svc->svc.listener_id == lstnr) {
  2208. ptr_svc->listener_in_use = true;
  2209. ptr_svc->rcv_req_flag = 1;
  2210. ret = qseecom_dmabuf_cache_operations(
  2211. ptr_svc->dmabuf,
  2212. QSEECOM_CACHE_INVALIDATE);
  2213. if (ret) {
  2214. rc = -EINVAL;
  2215. status = QSEOS_RESULT_FAILURE;
  2216. goto err_resp;
  2217. }
  2218. wake_up_interruptible(&ptr_svc->rcv_req_wq);
  2219. break;
  2220. }
  2221. }
  2222. if (ptr_svc == NULL) {
  2223. pr_err("Listener Svc %d does not exist\n", lstnr);
  2224. rc = -EINVAL;
  2225. status = QSEOS_RESULT_FAILURE;
  2226. goto err_resp;
  2227. }
  2228. if (!ptr_svc->dmabuf) {
  2229. pr_err("Client dmabuf is not initialized\n");
  2230. rc = -EINVAL;
  2231. status = QSEOS_RESULT_FAILURE;
  2232. goto err_resp;
  2233. }
  2234. if (ptr_svc->svc.listener_id != lstnr) {
  2235. pr_err("Service %d does not exist\n",
  2236. lstnr);
  2237. rc = -ERESTARTSYS;
  2238. ptr_svc = NULL;
  2239. table = NULL;
  2240. status = QSEOS_RESULT_FAILURE;
  2241. goto err_resp;
  2242. }
  2243. if (ptr_svc->abort == 1) {
  2244. pr_debug("Service %d abort %d\n",
  2245. lstnr, ptr_svc->abort);
  2246. rc = -ENODEV;
  2247. status = QSEOS_RESULT_FAILURE;
  2248. goto err_resp;
  2249. }
  2250. pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
  2251. /* initialize the new signal mask with all signals*/
  2252. sigfillset(&new_sigset);
  2253. /* block all signals */
  2254. /* unlock mutex btw waking listener and sleep-wait */
  2255. mutex_unlock(&listener_access_lock);
  2256. mutex_unlock(&app_access_lock);
  2257. do {
  2258. if (!wait_event_interruptible(qseecom.send_resp_wq,
  2259. __qseecom_reentrancy_listener_has_sent_rsp(
  2260. data, ptr_svc))) {
  2261. break;
  2262. }
  2263. } while (1);
  2264. /* lock mutex again after resp sent */
  2265. mutex_lock(&app_access_lock);
  2266. mutex_lock(&listener_access_lock);
  2267. ptr_svc->send_resp_flag = 0;
  2268. qseecom.send_resp_flag = 0;
  2269. /* restore signal mask */
  2270. if (data->abort || ptr_svc->abort) {
  2271. pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d\n",
  2272. data->client.app_id, lstnr, ret);
  2273. rc = -ENODEV;
  2274. status = QSEOS_RESULT_FAILURE;
  2275. } else {
  2276. status = QSEOS_RESULT_SUCCESS;
  2277. }
  2278. err_resp:
  2279. if (ptr_svc)
  2280. table = ptr_svc->sglistinfo_ptr;
  2281. if (qseecom.qsee_version < QSEE_VERSION_40) {
  2282. send_data_rsp.listener_id = lstnr;
  2283. send_data_rsp.status = status;
  2284. if (table) {
  2285. send_data_rsp.sglistinfo_ptr =
  2286. (uint32_t)virt_to_phys(table);
  2287. send_data_rsp.sglistinfo_len =
  2288. SGLISTINFO_TABLE_SIZE;
  2289. qtee_shmbridge_flush_shm_buf(
  2290. &ptr_svc->sglistinfo_shm);
  2291. }
  2292. cmd_buf = (void *)&send_data_rsp;
  2293. cmd_len = sizeof(send_data_rsp);
  2294. } else {
  2295. send_data_rsp_64bit.listener_id = lstnr;
  2296. send_data_rsp_64bit.status = status;
  2297. if (table) {
  2298. send_data_rsp_64bit.sglistinfo_ptr =
  2299. virt_to_phys(table);
  2300. send_data_rsp_64bit.sglistinfo_len =
  2301. SGLISTINFO_TABLE_SIZE;
  2302. qtee_shmbridge_flush_shm_buf(
  2303. &ptr_svc->sglistinfo_shm);
  2304. }
  2305. cmd_buf = (void *)&send_data_rsp_64bit;
  2306. cmd_len = sizeof(send_data_rsp_64bit);
  2307. }
  2308. if (!qseecom.whitelist_support || table == NULL)
  2309. *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
  2310. else
  2311. *(uint32_t *)cmd_buf =
  2312. QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
  2313. if (lstnr == RPMB_SERVICE) {
  2314. ret = __qseecom_enable_clk(CLK_QSEE);
  2315. if (ret)
  2316. goto exit;
  2317. }
  2318. if (ptr_svc) {
  2319. ret = qseecom_dmabuf_cache_operations(ptr_svc->dmabuf,
  2320. QSEECOM_CACHE_CLEAN);
  2321. if (ret)
  2322. goto exit;
  2323. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  2324. cmd_buf, cmd_len, resp, sizeof(*resp));
  2325. ptr_svc->listener_in_use = false;
  2326. __qseecom_clean_listener_sglistinfo(ptr_svc);
  2327. wake_up_interruptible(&ptr_svc->listener_block_app_wq);
  2328. if (ret) {
  2329. pr_err("scm_call() failed with err: %d (app_id = %d)\n",
  2330. ret, data->client.app_id);
  2331. goto exit;
  2332. }
  2333. } else {
  2334. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  2335. cmd_buf, cmd_len, resp, sizeof(*resp));
  2336. if (ret) {
  2337. pr_err("scm_call() failed with err: %d (app_id = %d)\n",
  2338. ret, data->client.app_id);
  2339. goto exit;
  2340. }
  2341. }
  2342. switch (resp->result) {
  2343. case QSEOS_RESULT_BLOCKED_ON_LISTENER:
  2344. pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
  2345. lstnr, data->client.app_id, resp->data);
  2346. if (lstnr == resp->data) {
  2347. pr_err("lstnr %d should not be blocked!\n",
  2348. lstnr);
  2349. ret = -EINVAL;
  2350. goto exit;
  2351. }
  2352. mutex_unlock(&listener_access_lock);
  2353. ret = __qseecom_process_reentrancy_blocked_on_listener(
  2354. resp, NULL, data);
  2355. mutex_lock(&listener_access_lock);
  2356. if (ret) {
  2357. pr_err("failed to process App(%d) %s blocked on listener %d\n",
  2358. data->client.app_id,
  2359. data->client.app_name, resp->data);
  2360. goto exit;
  2361. }
  2362. fallthrough;
  2363. case QSEOS_RESULT_SUCCESS:
  2364. break;
  2365. case QSEOS_RESULT_INCOMPLETE:
  2366. break;
  2367. case QSEOS_RESULT_CBACK_REQUEST:
  2368. pr_warn("get cback req app_id = %d, resp->data = %d\n",
  2369. data->client.app_id, resp->data);
  2370. resp->resp_type = SMCINVOKE_RESULT_INBOUND_REQ_NEEDED;
  2371. break;
  2372. default:
  2373. pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
  2374. resp->result, data->client.app_id, lstnr);
  2375. ret = -EINVAL;
  2376. goto exit;
  2377. }
  2378. exit:
  2379. mutex_unlock(&listener_access_lock);
  2380. if (lstnr == RPMB_SERVICE)
  2381. __qseecom_disable_clk(CLK_QSEE);
  2382. }
  2383. if (rc)
  2384. return rc;
  2385. return ret;
  2386. }
  2387. /*
  2388. * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
  2389. * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
  2390. * So, needs to first check if no app blocked before sending OS level scm call,
  2391. * then wait until all apps are unblocked.
  2392. */
  2393. static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
  2394. {
  2395. if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
  2396. qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
  2397. IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
  2398. /* thread sleep until this app unblocked */
  2399. while (qseecom.app_block_ref_cnt > 0) {
  2400. mutex_unlock(&app_access_lock);
  2401. wait_event_interruptible(qseecom.app_block_wq,
  2402. (!qseecom.app_block_ref_cnt));
  2403. mutex_lock(&app_access_lock);
  2404. }
  2405. }
  2406. }
  2407. /*
  2408. * scm_call of send data will fail if this TA is blocked or there are more
  2409. * than one TA requesting listener services; So, first check to see if need
  2410. * to wait.
  2411. */
  2412. static void __qseecom_reentrancy_check_if_this_app_blocked(
  2413. struct qseecom_registered_app_list *ptr_app)
  2414. {
  2415. if (qseecom.qsee_reentrancy_support) {
  2416. ptr_app->check_block++;
  2417. while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
  2418. /* thread sleep until this app unblocked */
  2419. mutex_unlock(&app_access_lock);
  2420. wait_event_interruptible(qseecom.app_block_wq,
  2421. (!ptr_app->app_blocked &&
  2422. qseecom.app_block_ref_cnt <= 1));
  2423. mutex_lock(&app_access_lock);
  2424. }
  2425. ptr_app->check_block--;
  2426. }
  2427. }
  2428. static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
  2429. uint32_t *app_id)
  2430. {
  2431. int32_t ret;
  2432. struct qseecom_command_scm_resp resp;
  2433. bool found_app = false;
  2434. struct qseecom_registered_app_list *entry = NULL;
  2435. unsigned long flags = 0;
  2436. if (!app_id) {
  2437. pr_err("Null pointer to app_id\n");
  2438. return -EINVAL;
  2439. }
  2440. *app_id = 0;
  2441. /* check if app exists and has been registered locally */
  2442. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  2443. list_for_each_entry(entry,
  2444. &qseecom.registered_app_list_head, list) {
  2445. if (!strcmp(entry->app_name, req.app_name)) {
  2446. found_app = true;
  2447. break;
  2448. }
  2449. }
  2450. spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
  2451. if (found_app) {
  2452. pr_debug("Found app with id %d\n", entry->app_id);
  2453. *app_id = entry->app_id;
  2454. return 0;
  2455. }
  2456. memset((void *)&resp, 0, sizeof(resp));
  2457. /* SCM_CALL to check if app_id for the mentioned app exists */
  2458. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
  2459. sizeof(struct qseecom_check_app_ireq),
  2460. &resp, sizeof(resp));
  2461. if (ret) {
  2462. pr_err("scm_call to check if app is already loaded failed\n");
  2463. return -EINVAL;
  2464. }
  2465. if (resp.result == QSEOS_RESULT_FAILURE)
  2466. return 0;
  2467. switch (resp.resp_type) {
  2468. /*qsee returned listener type response */
  2469. case QSEOS_LISTENER_ID:
  2470. pr_err("resp type is of listener type instead of app\n");
  2471. return -EINVAL;
  2472. case QSEOS_APP_ID:
  2473. *app_id = resp.data;
  2474. return 0;
  2475. default:
  2476. pr_err("invalid resp type (%d) from qsee\n",
  2477. resp.resp_type);
  2478. return -ENODEV;
  2479. }
  2480. }
  2481. static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
  2482. {
  2483. struct qseecom_registered_app_list *entry = NULL;
  2484. unsigned long flags = 0;
  2485. u32 app_id = 0;
  2486. struct qseecom_load_img_req load_img_req;
  2487. int32_t ret = 0;
  2488. phys_addr_t pa = 0;
  2489. void *vaddr = NULL;
  2490. struct dma_buf_attachment *attach = NULL;
  2491. struct dma_buf *dmabuf = NULL;
  2492. struct sg_table *sgt = NULL;
  2493. size_t len;
  2494. struct qseecom_command_scm_resp resp;
  2495. struct qseecom_check_app_ireq req;
  2496. struct qseecom_load_app_ireq load_req;
  2497. struct qseecom_load_app_64bit_ireq load_req_64bit;
  2498. void *cmd_buf = NULL;
  2499. size_t cmd_len;
  2500. bool first_time = false;
  2501. /* Copy the relevant information needed for loading the image */
  2502. if (copy_from_user(&load_img_req,
  2503. (void __user *)argp,
  2504. sizeof(struct qseecom_load_img_req))) {
  2505. pr_err("copy_from_user failed\n");
  2506. return -EFAULT;
  2507. }
  2508. /* Check and load cmnlib */
  2509. if (qseecom.qsee_version > QSEEE_VERSION_00) {
  2510. if (!qseecom.commonlib_loaded &&
  2511. load_img_req.app_arch == ELFCLASS32) {
  2512. ret = qseecom_load_commonlib_image(data, "cmnlib");
  2513. if (ret) {
  2514. pr_err("failed to load cmnlib\n");
  2515. return -EIO;
  2516. }
  2517. qseecom.commonlib_loaded = true;
  2518. pr_debug("cmnlib is loaded\n");
  2519. }
  2520. if (!qseecom.commonlib64_loaded &&
  2521. load_img_req.app_arch == ELFCLASS64) {
  2522. ret = qseecom_load_commonlib_image(data, "cmnlib64");
  2523. if (ret) {
  2524. pr_err("failed to load cmnlib64\n");
  2525. return -EIO;
  2526. }
  2527. qseecom.commonlib64_loaded = true;
  2528. pr_debug("cmnlib64 is loaded\n");
  2529. }
  2530. }
  2531. if (qseecom.support_bus_scaling) {
  2532. mutex_lock(&qsee_bw_mutex);
  2533. ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
  2534. mutex_unlock(&qsee_bw_mutex);
  2535. if (ret)
  2536. return ret;
  2537. }
  2538. /* Vote for the SFPB clock */
  2539. ret = __qseecom_enable_clk_scale_up(data);
  2540. if (ret)
  2541. goto enable_clk_err;
  2542. req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
  2543. load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
  2544. strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
  2545. ret = __qseecom_check_app_exists(req, &app_id);
  2546. if (ret < 0)
  2547. goto checkapp_err;
  2548. if (app_id) {
  2549. pr_debug("App id %d (%s) already exists\n", app_id,
  2550. (char *)(req.app_name));
  2551. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  2552. list_for_each_entry(entry,
  2553. &qseecom.registered_app_list_head, list){
  2554. if (entry->app_id == app_id) {
  2555. if (entry->ref_cnt == U32_MAX) {
  2556. pr_err("App %d (%s) ref_cnt overflow\n",
  2557. app_id, req.app_name);
  2558. ret = -EINVAL;
  2559. goto loadapp_err;
  2560. }
  2561. entry->ref_cnt++;
  2562. break;
  2563. }
  2564. }
  2565. spin_unlock_irqrestore(
  2566. &qseecom.registered_app_list_lock, flags);
  2567. ret = 0;
  2568. } else {
  2569. first_time = true;
  2570. pr_warn("App (%s) does'nt exist, loading apps for first time\n",
  2571. (char *)(load_img_req.img_name));
  2572. ret = qseecom_vaddr_map(load_img_req.ifd_data_fd,
  2573. &pa, &vaddr, &sgt, &attach, &len, &dmabuf);
  2574. if (ret) {
  2575. pr_err("Ion client could not retrieve the handle\n");
  2576. ret = -ENOMEM;
  2577. goto loadapp_err;
  2578. }
  2579. if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
  2580. pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
  2581. len, load_img_req.mdt_len,
  2582. load_img_req.img_len);
  2583. ret = -EINVAL;
  2584. goto loadapp_err;
  2585. }
  2586. /* Populate the structure for sending scm call to load image */
  2587. if (qseecom.qsee_version < QSEE_VERSION_40) {
  2588. load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
  2589. load_req.mdt_len = load_img_req.mdt_len;
  2590. load_req.img_len = load_img_req.img_len;
  2591. strlcpy(load_req.app_name, load_img_req.img_name,
  2592. MAX_APP_NAME_SIZE);
  2593. load_req.phy_addr = (uint32_t)pa;
  2594. cmd_buf = (void *)&load_req;
  2595. cmd_len = sizeof(struct qseecom_load_app_ireq);
  2596. } else {
  2597. load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
  2598. load_req_64bit.mdt_len = load_img_req.mdt_len;
  2599. load_req_64bit.img_len = load_img_req.img_len;
  2600. strlcpy(load_req_64bit.app_name, load_img_req.img_name,
  2601. MAX_APP_NAME_SIZE);
  2602. load_req_64bit.phy_addr = (uint64_t)pa;
  2603. cmd_buf = (void *)&load_req_64bit;
  2604. cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
  2605. }
  2606. ret = qseecom_dmabuf_cache_operations(dmabuf,
  2607. QSEECOM_CACHE_CLEAN);
  2608. if (ret) {
  2609. pr_err("cache operation failed %d\n", ret);
  2610. goto loadapp_err;
  2611. }
  2612. /* SCM_CALL to load the app and get the app_id back */
  2613. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
  2614. cmd_len, &resp, sizeof(resp));
  2615. if (ret) {
  2616. pr_err("scm_call to load app failed\n");
  2617. ret = -EINVAL;
  2618. goto loadapp_err;
  2619. }
  2620. ret = qseecom_dmabuf_cache_operations(dmabuf,
  2621. QSEECOM_CACHE_INVALIDATE);
  2622. if (ret) {
  2623. pr_err("cache operation failed %d\n", ret);
  2624. goto loadapp_err;
  2625. }
  2626. do {
  2627. if (resp.result == QSEOS_RESULT_FAILURE) {
  2628. pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
  2629. ret = -EFAULT;
  2630. goto loadapp_err;
  2631. }
  2632. if (resp.result == QSEOS_RESULT_INCOMPLETE) {
  2633. ret = __qseecom_process_incomplete_cmd(data, &resp);
  2634. if (ret) {
  2635. /* TZ has created app_id, need to unload it */
  2636. pr_err("incomp_cmd err %d, %d, unload %d %s\n",
  2637. ret, resp.result, resp.data,
  2638. load_img_req.img_name);
  2639. __qseecom_unload_app(data, resp.data);
  2640. ret = -EFAULT;
  2641. goto loadapp_err;
  2642. }
  2643. }
  2644. if (resp.result == QSEOS_RESULT_BLOCKED_ON_LISTENER) {
  2645. pr_err("load app blocked on listener\n");
  2646. data->client.app_id = resp.result;
  2647. data->client.from_loadapp = true;
  2648. ret = __qseecom_process_reentrancy_blocked_on_listener(&resp,
  2649. NULL, data);
  2650. if (ret) {
  2651. pr_err("load app fail proc block on listener,ret :%d\n",
  2652. ret);
  2653. ret = -EFAULT;
  2654. goto loadapp_err;
  2655. }
  2656. }
  2657. } while ((resp.result == QSEOS_RESULT_BLOCKED_ON_LISTENER) ||
  2658. (resp.result == QSEOS_RESULT_INCOMPLETE));
  2659. if (resp.result != QSEOS_RESULT_SUCCESS) {
  2660. pr_err("scm_call failed resp.result unknown, %d\n",
  2661. resp.result);
  2662. ret = -EFAULT;
  2663. goto loadapp_err;
  2664. }
  2665. app_id = resp.data;
  2666. entry = kmalloc(sizeof(*entry), GFP_KERNEL);
  2667. if (!entry) {
  2668. ret = -ENOMEM;
  2669. goto loadapp_err;
  2670. }
  2671. entry->app_id = app_id;
  2672. entry->ref_cnt = 1;
  2673. entry->app_arch = load_img_req.app_arch;
  2674. /*
  2675. * keymaster app may be first loaded as "keymaste" by qseecomd,
  2676. * and then used as "keymaster" on some targets. To avoid app
  2677. * name checking error, register "keymaster" into app_list and
  2678. * thread private data.
  2679. */
  2680. if (!strcmp(load_img_req.img_name, "keymaste"))
  2681. strlcpy(entry->app_name, "keymaster",
  2682. MAX_APP_NAME_SIZE);
  2683. else
  2684. strlcpy(entry->app_name, load_img_req.img_name,
  2685. MAX_APP_NAME_SIZE);
  2686. entry->app_blocked = false;
  2687. entry->blocked_on_listener_id = 0;
  2688. entry->check_block = 0;
  2689. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  2690. list_add_tail(&entry->list, &qseecom.registered_app_list_head);
  2691. spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
  2692. flags);
  2693. pr_warn("App with id %u (%s) now loaded\n", app_id,
  2694. (char *)(load_img_req.img_name));
  2695. }
  2696. data->client.app_id = app_id;
  2697. data->client.app_arch = load_img_req.app_arch;
  2698. if (!strcmp(load_img_req.img_name, "keymaste"))
  2699. strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
  2700. else
  2701. strlcpy(data->client.app_name, load_img_req.img_name,
  2702. MAX_APP_NAME_SIZE);
  2703. load_img_req.app_id = app_id;
  2704. if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
  2705. pr_err("copy_to_user failed\n");
  2706. ret = -EFAULT;
  2707. if (first_time) {
  2708. spin_lock_irqsave(
  2709. &qseecom.registered_app_list_lock, flags);
  2710. list_del(&entry->list);
  2711. spin_unlock_irqrestore(
  2712. &qseecom.registered_app_list_lock, flags);
  2713. kfree_sensitive(entry);
  2714. }
  2715. }
  2716. loadapp_err:
  2717. if (dmabuf) {
  2718. qseecom_vaddr_unmap(vaddr, sgt, attach, dmabuf);
  2719. MAKE_NULL(sgt, attach, dmabuf);
  2720. }
  2721. checkapp_err:
  2722. __qseecom_disable_clk_scale_down(data);
  2723. enable_clk_err:
  2724. if (qseecom.support_bus_scaling) {
  2725. mutex_lock(&qsee_bw_mutex);
  2726. qseecom_unregister_bus_bandwidth_needs(data);
  2727. mutex_unlock(&qsee_bw_mutex);
  2728. }
  2729. return ret;
  2730. }
  2731. static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
  2732. {
  2733. int ret = 0; /* Set unload app */
  2734. wake_up_all(&qseecom.send_resp_wq);
  2735. if (qseecom.qsee_reentrancy_support)
  2736. mutex_unlock(&app_access_lock);
  2737. while (atomic_read(&data->ioctl_count) > 1) {
  2738. if (wait_event_interruptible(data->abort_wq,
  2739. atomic_read(&data->ioctl_count) <= 1)) {
  2740. pr_err("Interrupted from abort\n");
  2741. ret = -ERESTARTSYS;
  2742. break;
  2743. }
  2744. }
  2745. if (qseecom.qsee_reentrancy_support)
  2746. mutex_lock(&app_access_lock);
  2747. return ret;
  2748. }
  2749. static int __qseecom_unload_app(struct qseecom_dev_handle *data,
  2750. uint32_t app_id)
  2751. {
  2752. struct qseecom_unload_app_ireq req;
  2753. struct qseecom_command_scm_resp resp;
  2754. int ret = 0;
  2755. /* Populate the structure for sending scm call to load image */
  2756. req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
  2757. req.app_id = app_id;
  2758. /* SCM_CALL to unload the app */
  2759. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
  2760. sizeof(struct qseecom_unload_app_ireq),
  2761. &resp, sizeof(resp));
  2762. if (ret) {
  2763. pr_err("scm_call to unload app (id = %d) failed ret: %d\n",
  2764. app_id, ret);
  2765. return ret;
  2766. }
  2767. do {
  2768. switch (resp.result) {
  2769. case QSEOS_RESULT_SUCCESS:
  2770. pr_warn("App (%d) is unloaded\n", app_id);
  2771. break;
  2772. case QSEOS_RESULT_INCOMPLETE:
  2773. ret = __qseecom_process_incomplete_cmd(data, &resp);
  2774. if (ret)
  2775. pr_err("unload app %d fail proc incom cmd: %d,%d,%d\n",
  2776. app_id, ret, resp.result, resp.data);
  2777. else
  2778. pr_warn("App (%d) is unloaded\n", app_id);
  2779. break;
  2780. case QSEOS_RESULT_FAILURE:
  2781. pr_err("app (%d) unload_failed!!\n", app_id);
  2782. ret = -EFAULT;
  2783. break;
  2784. case QSEOS_RESULT_BLOCKED_ON_LISTENER:
  2785. pr_err("unload app (%d) blocked on listener\n", app_id);
  2786. ret = __qseecom_process_reentrancy_blocked_on_listener(&resp, NULL, data);
  2787. if (ret) {
  2788. pr_err("unload app fail proc block on listener cmd,ret :%d\n",
  2789. ret);
  2790. ret = -EFAULT;
  2791. }
  2792. break;
  2793. default:
  2794. pr_err("unload app %d get unknown resp.result %d\n",
  2795. app_id, resp.result);
  2796. ret = -EFAULT;
  2797. break;
  2798. }
  2799. } while ((resp.result == QSEOS_RESULT_INCOMPLETE) ||
  2800. (resp.result == QSEOS_RESULT_BLOCKED_ON_LISTENER));
  2801. return ret;
  2802. }
  2803. static int qseecom_unload_app(struct qseecom_dev_handle *data,
  2804. bool app_crash)
  2805. {
  2806. unsigned long flags;
  2807. int ret = 0;
  2808. struct qseecom_registered_app_list *ptr_app = NULL;
  2809. bool found_app = false;
  2810. if (!data) {
  2811. pr_err("Invalid/uninitialized device handle\n");
  2812. return -EINVAL;
  2813. }
  2814. pr_debug("unload app %d(%s), app_crash flag %d\n", data->client.app_id,
  2815. data->client.app_name, app_crash);
  2816. if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
  2817. pr_debug("Do not unload keymaster app from tz\n");
  2818. goto unload_exit;
  2819. }
  2820. ret = __qseecom_cleanup_app(data);
  2821. if (ret && !app_crash) {
  2822. pr_err("cleanup app failed, pending ioctl:%d\n", data->ioctl_count);
  2823. return ret;
  2824. }
  2825. __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
  2826. /* ignore app_id 0, it happens when close qseecom_fd if load app fail*/
  2827. if (!data->client.app_id)
  2828. goto unload_exit;
  2829. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  2830. list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
  2831. list) {
  2832. if ((ptr_app->app_id == data->client.app_id) &&
  2833. (!strcmp(ptr_app->app_name, data->client.app_name))) {
  2834. pr_debug("unload app %d (%s), ref_cnt %d\n",
  2835. ptr_app->app_id, ptr_app->app_name,
  2836. ptr_app->ref_cnt);
  2837. ptr_app->ref_cnt--;
  2838. found_app = true;
  2839. break;
  2840. }
  2841. }
  2842. spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
  2843. flags);
  2844. if (!found_app) {
  2845. pr_err("Cannot find app with id = %d (%s)\n",
  2846. data->client.app_id, data->client.app_name);
  2847. ret = -EINVAL;
  2848. goto unload_exit;
  2849. }
  2850. if (!ptr_app->ref_cnt) {
  2851. ret = __qseecom_unload_app(data, data->client.app_id);
  2852. if (ret == -EBUSY) {
  2853. /*
  2854. * If unload failed due to EBUSY, don't free mem
  2855. * just restore app ref_cnt and return -EBUSY
  2856. */
  2857. pr_warn("unload ta %d(%s) EBUSY\n",
  2858. data->client.app_id, data->client.app_name);
  2859. ptr_app->ref_cnt++;
  2860. return ret;
  2861. }
  2862. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  2863. list_del(&ptr_app->list);
  2864. spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
  2865. flags);
  2866. kfree_sensitive(ptr_app);
  2867. }
  2868. unload_exit:
  2869. if (data->client.dmabuf) {
  2870. qseecom_vaddr_unmap(data->client.sb_virt, data->client.sgt,
  2871. data->client.attach, data->client.dmabuf);
  2872. MAKE_NULL(data->client.sgt,
  2873. data->client.attach, data->client.dmabuf);
  2874. }
  2875. data->released = true;
  2876. return ret;
  2877. }
  2878. static int qseecom_prepare_unload_app(struct qseecom_dev_handle *data)
  2879. {
  2880. struct qseecom_unload_app_pending_list *entry = NULL;
  2881. pr_debug("prepare to unload app(%d)(%s), pending %d\n",
  2882. data->client.app_id, data->client.app_name,
  2883. data->client.unload_pending);
  2884. if (data->client.unload_pending)
  2885. return 0;
  2886. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  2887. if (!entry)
  2888. return -ENOMEM;
  2889. entry->data = data;
  2890. list_add_tail(&entry->list,
  2891. &qseecom.unload_app_pending_list_head);
  2892. data->client.unload_pending = true;
  2893. pr_debug("unload ta %d pending\n", data->client.app_id);
  2894. return 0;
  2895. }
  2896. static void __wakeup_unload_app_kthread(void)
  2897. {
  2898. atomic_set(&qseecom.unload_app_kthread_state,
  2899. UNLOAD_APP_KT_WAKEUP);
  2900. wake_up_interruptible(&qseecom.unload_app_kthread_wq);
  2901. }
  2902. static bool __qseecom_find_pending_unload_app(uint32_t app_id, char *app_name)
  2903. {
  2904. struct qseecom_unload_app_pending_list *entry = NULL;
  2905. bool found = false;
  2906. mutex_lock(&unload_app_pending_list_lock);
  2907. list_for_each_entry(entry, &qseecom.unload_app_pending_list_head,
  2908. list) {
  2909. if ((entry->data->client.app_id == app_id) &&
  2910. (!strcmp(entry->data->client.app_name, app_name))) {
  2911. found = true;
  2912. break;
  2913. }
  2914. }
  2915. mutex_unlock(&unload_app_pending_list_lock);
  2916. return found;
  2917. }
  2918. static void __qseecom_processing_pending_unload_app(void)
  2919. {
  2920. struct qseecom_unload_app_pending_list *entry = NULL;
  2921. struct list_head *pos;
  2922. int ret = 0;
  2923. mutex_lock(&unload_app_pending_list_lock);
  2924. while (!list_empty(&qseecom.unload_app_pending_list_head)) {
  2925. pos = qseecom.unload_app_pending_list_head.next;
  2926. entry = list_entry(pos,
  2927. struct qseecom_unload_app_pending_list, list);
  2928. if (entry && entry->data) {
  2929. pr_debug("process pending unload app %d (%s)\n",
  2930. entry->data->client.app_id,
  2931. entry->data->client.app_name);
  2932. mutex_unlock(&unload_app_pending_list_lock);
  2933. mutex_lock(&app_access_lock);
  2934. ret = qseecom_unload_app(entry->data, true);
  2935. if (ret)
  2936. pr_err("unload app %d pending failed %d\n",
  2937. entry->data->client.app_id, ret);
  2938. mutex_unlock(&app_access_lock);
  2939. mutex_lock(&unload_app_pending_list_lock);
  2940. __qseecom_free_tzbuf(&entry->data->sglistinfo_shm);
  2941. kfree_sensitive(entry->data);
  2942. }
  2943. list_del(pos);
  2944. kfree_sensitive(entry);
  2945. }
  2946. mutex_unlock(&unload_app_pending_list_lock);
  2947. }
  2948. static int __qseecom_unload_app_kthread_func(void *data)
  2949. {
  2950. while (!kthread_should_stop()) {
  2951. wait_event_interruptible(
  2952. qseecom.unload_app_kthread_wq,
  2953. atomic_read(&qseecom.unload_app_kthread_state)
  2954. == UNLOAD_APP_KT_WAKEUP);
  2955. pr_debug("kthread to unload app is called, state %d\n",
  2956. atomic_read(&qseecom.unload_app_kthread_state));
  2957. __qseecom_processing_pending_unload_app();
  2958. atomic_set(&qseecom.unload_app_kthread_state,
  2959. UNLOAD_APP_KT_SLEEP);
  2960. }
  2961. pr_warn("kthread to unload app stopped\n");
  2962. return 0;
  2963. }
  2964. static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
  2965. unsigned long virt)
  2966. {
  2967. return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
  2968. }
  2969. static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
  2970. unsigned long virt)
  2971. {
  2972. return (uintptr_t)data->client.sb_virt +
  2973. (virt - data->client.user_virt_sb_base);
  2974. }
  2975. static int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
  2976. struct qseecom_send_svc_cmd_req *req_ptr,
  2977. struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
  2978. {
  2979. int ret = 0;
  2980. void *req_buf = NULL;
  2981. if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
  2982. pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
  2983. req_ptr, send_svc_ireq_ptr);
  2984. return -EINVAL;
  2985. }
  2986. /* Clients need to ensure req_buf is at base offset of shared buffer */
  2987. if ((uintptr_t)req_ptr->cmd_req_buf !=
  2988. data_ptr->client.user_virt_sb_base) {
  2989. pr_err("cmd buf not pointing to base offset of shared buffer\n");
  2990. return -EINVAL;
  2991. }
  2992. if (data_ptr->client.sb_length <
  2993. sizeof(struct qseecom_rpmb_provision_key)) {
  2994. pr_err("shared buffer is too small to hold key type\n");
  2995. return -EINVAL;
  2996. }
  2997. req_buf = data_ptr->client.sb_virt;
  2998. send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
  2999. send_svc_ireq_ptr->key_type =
  3000. ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
  3001. send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
  3002. send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
  3003. data_ptr, (uintptr_t)req_ptr->resp_buf));
  3004. send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
  3005. return ret;
  3006. }
  3007. static int __qseecom_process_fsm_key_svc_cmd(
  3008. struct qseecom_dev_handle *data_ptr,
  3009. struct qseecom_send_svc_cmd_req *req_ptr,
  3010. struct qseecom_client_send_fsm_diag_req *send_svc_ireq_ptr)
  3011. {
  3012. int ret = 0;
  3013. uint32_t reqd_len_sb_in = 0;
  3014. if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
  3015. pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
  3016. req_ptr, send_svc_ireq_ptr);
  3017. return -EINVAL;
  3018. }
  3019. reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
  3020. if (reqd_len_sb_in > data_ptr->client.sb_length) {
  3021. pr_err("Not enough memory to fit cmd_buf and resp_buf.\n");
  3022. pr_err("Required: %u, Available: %zu\n",
  3023. reqd_len_sb_in, data_ptr->client.sb_length);
  3024. return -ENOMEM;
  3025. }
  3026. send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
  3027. send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
  3028. send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
  3029. data_ptr, (uintptr_t)req_ptr->resp_buf));
  3030. send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
  3031. send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
  3032. data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
  3033. return ret;
  3034. }
  3035. static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
  3036. struct qseecom_send_svc_cmd_req *req)
  3037. {
  3038. if (!req || !req->resp_buf || !req->cmd_req_buf) {
  3039. pr_err("req or cmd buffer or response buffer is null\n");
  3040. return -EINVAL;
  3041. }
  3042. if (!data || !data->client.sb_virt) {
  3043. pr_err("Client or client buf is not initialized\n");
  3044. return -EINVAL;
  3045. }
  3046. if (data->client.sb_virt == NULL) {
  3047. pr_err("sb_virt null\n");
  3048. return -EINVAL;
  3049. }
  3050. if (data->client.user_virt_sb_base == 0) {
  3051. pr_err("user_virt_sb_base is null\n");
  3052. return -EINVAL;
  3053. }
  3054. if (data->client.sb_length == 0) {
  3055. pr_err("sb_length is 0\n");
  3056. return -EINVAL;
  3057. }
  3058. if (((uintptr_t)req->cmd_req_buf <
  3059. data->client.user_virt_sb_base) ||
  3060. ((uintptr_t)req->cmd_req_buf >=
  3061. (data->client.user_virt_sb_base + data->client.sb_length))) {
  3062. pr_err("cmd buffer address not within shared bufffer\n");
  3063. return -EINVAL;
  3064. }
  3065. if (((uintptr_t)req->resp_buf <
  3066. data->client.user_virt_sb_base) ||
  3067. ((uintptr_t)req->resp_buf >=
  3068. (data->client.user_virt_sb_base + data->client.sb_length))) {
  3069. pr_err("response buffer address not within shared bufffer\n");
  3070. return -EINVAL;
  3071. }
  3072. if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
  3073. (req->cmd_req_len > data->client.sb_length) ||
  3074. (req->resp_len > data->client.sb_length)) {
  3075. pr_err("cmd buf length or response buf length not valid\n");
  3076. return -EINVAL;
  3077. }
  3078. if (req->cmd_req_len > UINT_MAX - req->resp_len) {
  3079. pr_err("Integer overflow detected in req_len & rsp_len\n");
  3080. return -EINVAL;
  3081. }
  3082. if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
  3083. pr_debug("Not enough memory to fit cmd_buf.\n");
  3084. pr_debug("resp_buf. Required: %u, Available: %zu\n",
  3085. (req->cmd_req_len + req->resp_len),
  3086. data->client.sb_length);
  3087. return -ENOMEM;
  3088. }
  3089. if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
  3090. pr_err("Integer overflow in req_len & cmd_req_buf\n");
  3091. return -EINVAL;
  3092. }
  3093. if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
  3094. pr_err("Integer overflow in resp_len & resp_buf\n");
  3095. return -EINVAL;
  3096. }
  3097. if (data->client.user_virt_sb_base >
  3098. (ULONG_MAX - data->client.sb_length)) {
  3099. pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
  3100. return -EINVAL;
  3101. }
  3102. if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
  3103. ((uintptr_t)data->client.user_virt_sb_base +
  3104. data->client.sb_length)) ||
  3105. (((uintptr_t)req->resp_buf + req->resp_len) >
  3106. ((uintptr_t)data->client.user_virt_sb_base +
  3107. data->client.sb_length))) {
  3108. pr_err("cmd buf or resp buf is out of shared buffer region\n");
  3109. return -EINVAL;
  3110. }
  3111. return 0;
  3112. }
  3113. static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
  3114. void __user *argp)
  3115. {
  3116. int ret = 0;
  3117. struct qseecom_client_send_service_ireq send_svc_ireq;
  3118. struct qseecom_client_send_fsm_diag_req send_fsm_diag_svc_ireq;
  3119. struct qseecom_command_scm_resp resp;
  3120. struct qseecom_send_svc_cmd_req req;
  3121. void *send_req_ptr;
  3122. size_t req_buf_size;
  3123. /*struct qseecom_command_scm_resp resp;*/
  3124. if (copy_from_user(&req,
  3125. (void __user *)argp,
  3126. sizeof(req))) {
  3127. pr_err("copy_from_user failed\n");
  3128. return -EFAULT;
  3129. }
  3130. if (__validate_send_service_cmd_inputs(data, &req))
  3131. return -EINVAL;
  3132. data->type = QSEECOM_SECURE_SERVICE;
  3133. switch (req.cmd_id) {
  3134. case QSEOS_RPMB_PROVISION_KEY_COMMAND:
  3135. case QSEOS_RPMB_ERASE_COMMAND:
  3136. case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
  3137. send_req_ptr = &send_svc_ireq;
  3138. req_buf_size = sizeof(send_svc_ireq);
  3139. if (__qseecom_process_rpmb_svc_cmd(data, &req,
  3140. send_req_ptr))
  3141. return -EINVAL;
  3142. break;
  3143. case QSEOS_FSM_LTEOTA_REQ_CMD:
  3144. case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
  3145. case QSEOS_FSM_IKE_REQ_CMD:
  3146. case QSEOS_FSM_IKE_REQ_RSP_CMD:
  3147. case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
  3148. case QSEOS_FSM_OEM_FUSE_READ_ROW:
  3149. case QSEOS_FSM_ENCFS_REQ_CMD:
  3150. case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
  3151. case QSEOS_DIAG_FUSE_REQ_CMD:
  3152. case QSEOS_DIAG_FUSE_REQ_RSP_CMD:
  3153. send_req_ptr = &send_fsm_diag_svc_ireq;
  3154. req_buf_size = sizeof(send_fsm_diag_svc_ireq);
  3155. if (__qseecom_process_fsm_key_svc_cmd(data, &req,
  3156. send_req_ptr))
  3157. return -EINVAL;
  3158. break;
  3159. default:
  3160. pr_err("Unsupported cmd_id %d\n", req.cmd_id);
  3161. return -EINVAL;
  3162. }
  3163. ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
  3164. QSEECOM_CACHE_CLEAN);
  3165. if (ret) {
  3166. pr_err("cache operation failed %d\n", ret);
  3167. return ret;
  3168. }
  3169. if (qseecom.support_bus_scaling) {
  3170. ret = qseecom_scale_bus_bandwidth_timer(HIGH);
  3171. if (ret) {
  3172. pr_err("Fail to set bw HIGH\n");
  3173. return ret;
  3174. }
  3175. } else {
  3176. ret = qseecom_perf_enable(data);
  3177. if (ret) {
  3178. pr_err("Failed to vote for clocks with err %d\n", ret);
  3179. return ret;
  3180. }
  3181. }
  3182. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  3183. (const void *)send_req_ptr,
  3184. req_buf_size, &resp, sizeof(resp));
  3185. if (ret) {
  3186. pr_err("qseecom_scm_call failed with err: %d\n", ret);
  3187. goto exit;
  3188. }
  3189. ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
  3190. QSEECOM_CACHE_INVALIDATE);
  3191. if (ret) {
  3192. pr_err("cache operation failed %d\n", ret);
  3193. goto exit;
  3194. }
  3195. switch (resp.result) {
  3196. case QSEOS_RESULT_SUCCESS:
  3197. break;
  3198. case QSEOS_RESULT_INCOMPLETE:
  3199. pr_debug("qseos_result_incomplete\n");
  3200. ret = __qseecom_process_incomplete_cmd(data, &resp);
  3201. if (ret) {
  3202. pr_err("process_incomplete_cmd fail with result: %d\n",
  3203. resp.result);
  3204. }
  3205. if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
  3206. pr_warn("RPMB key status is 0x%x\n", resp.result);
  3207. if (put_user(resp.result,
  3208. (uint32_t __user *)req.resp_buf)) {
  3209. ret = -EINVAL;
  3210. goto exit;
  3211. }
  3212. ret = 0;
  3213. }
  3214. break;
  3215. case QSEOS_RESULT_FAILURE:
  3216. pr_err("scm call failed with resp.result: %d\n", resp.result);
  3217. ret = -EINVAL;
  3218. break;
  3219. default:
  3220. pr_err("Response result %d not supported\n",
  3221. resp.result);
  3222. ret = -EINVAL;
  3223. break;
  3224. }
  3225. exit:
  3226. if (!qseecom.support_bus_scaling) {
  3227. qsee_disable_clock_vote(data, CLK_DFAB);
  3228. qsee_disable_clock_vote(data, CLK_SFPB);
  3229. } else {
  3230. __qseecom_add_bw_scale_down_timer(
  3231. QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
  3232. }
  3233. return ret;
  3234. }
  3235. static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
  3236. struct qseecom_send_cmd_req *req)
  3237. {
  3238. if (!data || !data->client.sb_virt) {
  3239. pr_err("Client or client buf is not initialized\n");
  3240. return -EINVAL;
  3241. }
  3242. if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
  3243. (req->cmd_req_buf == NULL)) {
  3244. pr_err("cmd buffer or response buffer is null\n");
  3245. return -EINVAL;
  3246. }
  3247. if (((uintptr_t)req->cmd_req_buf <
  3248. data->client.user_virt_sb_base) ||
  3249. ((uintptr_t)req->cmd_req_buf >=
  3250. (data->client.user_virt_sb_base + data->client.sb_length))) {
  3251. pr_err("cmd buffer address not within shared bufffer\n");
  3252. return -EINVAL;
  3253. }
  3254. if (((uintptr_t)req->resp_buf <
  3255. data->client.user_virt_sb_base) ||
  3256. ((uintptr_t)req->resp_buf >=
  3257. (data->client.user_virt_sb_base + data->client.sb_length))) {
  3258. pr_err("response buffer address not within shared bufffer\n");
  3259. return -EINVAL;
  3260. }
  3261. if ((req->cmd_req_len == 0) ||
  3262. (req->cmd_req_len > data->client.sb_length) ||
  3263. (req->resp_len > data->client.sb_length)) {
  3264. pr_err("cmd buf length or response buf length not valid\n");
  3265. return -EINVAL;
  3266. }
  3267. if (req->cmd_req_len > UINT_MAX - req->resp_len) {
  3268. pr_err("Integer overflow detected in req_len & rsp_len\n");
  3269. return -EINVAL;
  3270. }
  3271. if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
  3272. pr_debug("Not enough memory to fit cmd_buf.\n");
  3273. pr_debug("resp_buf. Required: %u, Available: %zu\n",
  3274. (req->cmd_req_len + req->resp_len),
  3275. data->client.sb_length);
  3276. return -ENOMEM;
  3277. }
  3278. if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
  3279. pr_err("Integer overflow in req_len & cmd_req_buf\n");
  3280. return -EINVAL;
  3281. }
  3282. if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
  3283. pr_err("Integer overflow in resp_len & resp_buf\n");
  3284. return -EINVAL;
  3285. }
  3286. if (data->client.user_virt_sb_base >
  3287. (ULONG_MAX - data->client.sb_length)) {
  3288. pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
  3289. return -EINVAL;
  3290. }
  3291. if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
  3292. ((uintptr_t)data->client.user_virt_sb_base +
  3293. data->client.sb_length)) ||
  3294. (((uintptr_t)req->resp_buf + req->resp_len) >
  3295. ((uintptr_t)data->client.user_virt_sb_base +
  3296. data->client.sb_length))) {
  3297. pr_err("cmd buf or resp buf is out of shared buffer region\n");
  3298. return -EINVAL;
  3299. }
  3300. return 0;
  3301. }
  3302. static int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
  3303. struct qseecom_registered_app_list *ptr_app,
  3304. struct qseecom_dev_handle *data)
  3305. {
  3306. int ret = 0;
  3307. switch (resp->result) {
  3308. case QSEOS_RESULT_BLOCKED_ON_LISTENER:
  3309. pr_warn("App(%d) %s is blocked on listener %d\n",
  3310. data->client.app_id, data->client.app_name,
  3311. resp->data);
  3312. ret = __qseecom_process_reentrancy_blocked_on_listener(
  3313. resp, ptr_app, data);
  3314. if (ret) {
  3315. pr_err("failed to process App(%d) %s is blocked on listener %d\n",
  3316. data->client.app_id, data->client.app_name, resp->data);
  3317. return ret;
  3318. }
  3319. fallthrough;
  3320. /* fall through to process incomplete request */
  3321. case QSEOS_RESULT_INCOMPLETE:
  3322. qseecom.app_block_ref_cnt++;
  3323. ptr_app->app_blocked = true;
  3324. ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
  3325. ptr_app->app_blocked = false;
  3326. qseecom.app_block_ref_cnt--;
  3327. wake_up_interruptible_all(&qseecom.app_block_wq);
  3328. if (ret)
  3329. pr_err("process_incomplete_cmd failed err: %d\n",
  3330. ret);
  3331. return ret;
  3332. case QSEOS_RESULT_SUCCESS:
  3333. return ret;
  3334. default:
  3335. pr_err("Response result %d not supported\n",
  3336. resp->result);
  3337. return -EINVAL;
  3338. }
  3339. return ret;
  3340. }
  3341. static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
  3342. struct qseecom_send_cmd_req *req,
  3343. bool is_phys_adr)
  3344. {
  3345. int ret = 0;
  3346. u32 reqd_len_sb_in = 0;
  3347. struct qseecom_client_send_data_ireq send_data_req = {0};
  3348. struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
  3349. struct qseecom_command_scm_resp resp;
  3350. unsigned long flags;
  3351. struct qseecom_registered_app_list *ptr_app;
  3352. bool found_app = false;
  3353. void *cmd_buf = NULL;
  3354. size_t cmd_len;
  3355. reqd_len_sb_in = req->cmd_req_len + req->resp_len;
  3356. /* find app_id & img_name from list */
  3357. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  3358. list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
  3359. list) {
  3360. if ((ptr_app->app_id == data->client.app_id) &&
  3361. (!strcmp(ptr_app->app_name, data->client.app_name))) {
  3362. found_app = true;
  3363. break;
  3364. }
  3365. }
  3366. spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
  3367. if (!found_app) {
  3368. pr_err("app_id %d (%s) is not found\n", data->client.app_id,
  3369. (char *)data->client.app_name);
  3370. return -ENOENT;
  3371. }
  3372. if (__qseecom_find_pending_unload_app(data->client.app_id,
  3373. data->client.app_name)) {
  3374. pr_err("app %d (%s) unload is pending\n",
  3375. data->client.app_id, data->client.app_name);
  3376. return -ENOENT;
  3377. }
  3378. if (qseecom.qsee_version < QSEE_VERSION_40) {
  3379. send_data_req.app_id = data->client.app_id;
  3380. if (!is_phys_adr) {
  3381. send_data_req.req_ptr =
  3382. (uint32_t)(__qseecom_uvirt_to_kphys
  3383. (data, (uintptr_t)req->cmd_req_buf));
  3384. send_data_req.rsp_ptr =
  3385. (uint32_t)(__qseecom_uvirt_to_kphys(
  3386. data, (uintptr_t)req->resp_buf));
  3387. } else {
  3388. send_data_req.req_ptr = (uint32_t)(uintptr_t)req->cmd_req_buf;
  3389. send_data_req.rsp_ptr = (uint32_t)(uintptr_t)req->resp_buf;
  3390. }
  3391. send_data_req.req_len = req->cmd_req_len;
  3392. send_data_req.rsp_len = req->resp_len;
  3393. send_data_req.sglistinfo_ptr =
  3394. (uint32_t)data->sglistinfo_shm.paddr;
  3395. send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
  3396. qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm);
  3397. cmd_buf = (void *)&send_data_req;
  3398. cmd_len = sizeof(struct qseecom_client_send_data_ireq);
  3399. } else {
  3400. send_data_req_64bit.app_id = data->client.app_id;
  3401. if (!is_phys_adr) {
  3402. send_data_req_64bit.req_ptr =
  3403. __qseecom_uvirt_to_kphys(data,
  3404. (uintptr_t)req->cmd_req_buf);
  3405. send_data_req_64bit.rsp_ptr =
  3406. __qseecom_uvirt_to_kphys(data,
  3407. (uintptr_t)req->resp_buf);
  3408. } else {
  3409. send_data_req_64bit.req_ptr =
  3410. (uintptr_t)req->cmd_req_buf;
  3411. send_data_req_64bit.rsp_ptr =
  3412. (uintptr_t)req->resp_buf;
  3413. }
  3414. send_data_req_64bit.req_len = req->cmd_req_len;
  3415. send_data_req_64bit.rsp_len = req->resp_len;
  3416. /* check if 32bit app's phys_addr region is under 4GB.*/
  3417. if ((data->client.app_arch == ELFCLASS32) &&
  3418. ((send_data_req_64bit.req_ptr >=
  3419. PHY_ADDR_4G - send_data_req_64bit.req_len) ||
  3420. (send_data_req_64bit.rsp_ptr >=
  3421. PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
  3422. pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
  3423. data->client.app_name,
  3424. send_data_req_64bit.req_ptr,
  3425. send_data_req_64bit.req_len,
  3426. send_data_req_64bit.rsp_ptr,
  3427. send_data_req_64bit.rsp_len);
  3428. return -EFAULT;
  3429. }
  3430. send_data_req_64bit.sglistinfo_ptr =
  3431. (uint64_t)data->sglistinfo_shm.paddr;
  3432. send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
  3433. qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm);
  3434. cmd_buf = (void *)&send_data_req_64bit;
  3435. cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
  3436. }
  3437. if (!qseecom.whitelist_support || data->use_legacy_cmd)
  3438. *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
  3439. else
  3440. *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
  3441. if (data->client.dmabuf) {
  3442. ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
  3443. QSEECOM_CACHE_CLEAN);
  3444. if (ret) {
  3445. pr_err("cache operation failed %d\n", ret);
  3446. return ret;
  3447. }
  3448. }
  3449. __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
  3450. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  3451. cmd_buf, cmd_len,
  3452. &resp, sizeof(resp));
  3453. if (ret) {
  3454. pr_err("scm_call() failed with err: %d (app_id = %d)\n",
  3455. ret, data->client.app_id);
  3456. goto exit;
  3457. }
  3458. if (qseecom.qsee_reentrancy_support) {
  3459. ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
  3460. if (ret)
  3461. goto exit;
  3462. } else {
  3463. if (resp.result == QSEOS_RESULT_INCOMPLETE) {
  3464. ret = __qseecom_process_incomplete_cmd(data, &resp);
  3465. if (ret) {
  3466. pr_err("process_incomplete_cmd failed err: %d\n",
  3467. ret);
  3468. goto exit;
  3469. }
  3470. } else {
  3471. if (resp.result != QSEOS_RESULT_SUCCESS) {
  3472. pr_err("Response result %d not supported\n",
  3473. resp.result);
  3474. ret = -EINVAL;
  3475. goto exit;
  3476. }
  3477. }
  3478. }
  3479. if (data->client.dmabuf) {
  3480. ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
  3481. QSEECOM_CACHE_INVALIDATE);
  3482. if (ret) {
  3483. pr_err("cache operation failed %d\n", ret);
  3484. goto exit;
  3485. }
  3486. }
  3487. exit:
  3488. return ret;
  3489. }
  3490. static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
  3491. {
  3492. int ret = 0;
  3493. struct qseecom_send_cmd_req req;
  3494. ret = copy_from_user(&req, argp, sizeof(req));
  3495. if (ret) {
  3496. pr_err("copy_from_user failed\n");
  3497. return ret;
  3498. }
  3499. if (__validate_send_cmd_inputs(data, &req))
  3500. return -EINVAL;
  3501. ret = __qseecom_send_cmd(data, &req, false);
  3502. return ret;
  3503. }
  3504. static int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
  3505. struct qseecom_send_modfd_listener_resp *lstnr_resp,
  3506. struct qseecom_dev_handle *data, int i, size_t size)
  3507. {
  3508. char *curr_field = NULL;
  3509. char *temp_field = NULL;
  3510. int j = 0;
  3511. if ((data->type != QSEECOM_LISTENER_SERVICE) &&
  3512. (req->ifd_data[i].fd > 0)) {
  3513. if ((req->cmd_req_len < size) ||
  3514. (req->ifd_data[i].cmd_buf_offset >
  3515. req->cmd_req_len - size)) {
  3516. pr_err("Invalid offset (req len) 0x%x\n",
  3517. req->ifd_data[i].cmd_buf_offset);
  3518. return -EINVAL;
  3519. }
  3520. curr_field = (char *) (req->cmd_req_buf +
  3521. req->ifd_data[i].cmd_buf_offset);
  3522. for (j = 0; j < MAX_ION_FD; j++) {
  3523. if ((req->ifd_data[j].fd > 0) && i != j) {
  3524. temp_field = (char *) (req->cmd_req_buf +
  3525. req->ifd_data[j].cmd_buf_offset);
  3526. if (temp_field >= curr_field && temp_field <
  3527. (curr_field + size)) {
  3528. pr_err("Invalid field offset 0x%x\n",
  3529. req->ifd_data[i].cmd_buf_offset);
  3530. return -EINVAL;
  3531. }
  3532. }
  3533. }
  3534. } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
  3535. (lstnr_resp->ifd_data[i].fd > 0)) {
  3536. if ((lstnr_resp->resp_len < size) ||
  3537. (lstnr_resp->ifd_data[i].cmd_buf_offset >
  3538. lstnr_resp->resp_len - size)) {
  3539. pr_err("Invalid offset (lstnr resp len) 0x%x\n",
  3540. lstnr_resp->ifd_data[i].cmd_buf_offset);
  3541. return -EINVAL;
  3542. }
  3543. curr_field = (char *) (lstnr_resp->resp_buf_ptr +
  3544. lstnr_resp->ifd_data[i].cmd_buf_offset);
  3545. for (j = 0; j < MAX_ION_FD; j++) {
  3546. if ((lstnr_resp->ifd_data[j].fd > 0) && i != j) {
  3547. temp_field = (char *) lstnr_resp->resp_buf_ptr +
  3548. lstnr_resp->ifd_data[j].cmd_buf_offset;
  3549. if (temp_field >= curr_field && temp_field <
  3550. (curr_field + size)) {
  3551. pr_err("Invalid lstnr field offset 0x%x\n",
  3552. lstnr_resp->ifd_data[i].cmd_buf_offset);
  3553. return -EINVAL;
  3554. }
  3555. }
  3556. }
  3557. }
  3558. return 0;
  3559. }
  3560. static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
  3561. struct qseecom_dev_handle *data)
  3562. {
  3563. char *field;
  3564. int ret = 0;
  3565. int i = 0;
  3566. uint32_t len = 0;
  3567. struct scatterlist *sg;
  3568. struct qseecom_send_modfd_cmd_req *req = NULL;
  3569. struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
  3570. struct qseecom_registered_listener_list *this_lstnr = NULL;
  3571. uint32_t offset;
  3572. struct sg_table *sg_ptr = NULL;
  3573. int ion_fd = -1;
  3574. struct dma_buf *dmabuf = NULL;
  3575. struct dma_buf_attachment *attach = NULL;
  3576. if ((data->type != QSEECOM_LISTENER_SERVICE) &&
  3577. (data->type != QSEECOM_CLIENT_APP))
  3578. return -EFAULT;
  3579. if (msg == NULL) {
  3580. pr_err("Invalid address\n");
  3581. return -EINVAL;
  3582. }
  3583. if (data->type == QSEECOM_LISTENER_SERVICE) {
  3584. lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
  3585. this_lstnr = __qseecom_find_svc(data->listener.id);
  3586. if (IS_ERR_OR_NULL(this_lstnr)) {
  3587. pr_err("Invalid listener ID\n");
  3588. return -ENOMEM;
  3589. }
  3590. } else {
  3591. req = (struct qseecom_send_modfd_cmd_req *)msg;
  3592. }
  3593. for (i = 0; i < MAX_ION_FD; i++) {
  3594. if ((data->type != QSEECOM_LISTENER_SERVICE) &&
  3595. (req->ifd_data[i].fd > 0)) {
  3596. ion_fd = req->ifd_data[i].fd;
  3597. field = (char *) req->cmd_req_buf +
  3598. req->ifd_data[i].cmd_buf_offset;
  3599. } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
  3600. (lstnr_resp->ifd_data[i].fd > 0)) {
  3601. ion_fd = lstnr_resp->ifd_data[i].fd;
  3602. field = lstnr_resp->resp_buf_ptr +
  3603. lstnr_resp->ifd_data[i].cmd_buf_offset;
  3604. } else {
  3605. continue;
  3606. }
  3607. /* Populate the cmd data structure with the phys_addr */
  3608. ret = qseecom_dmabuf_map(ion_fd, &sg_ptr, &attach, &dmabuf);
  3609. if (ret) {
  3610. pr_err("IOn client could not retrieve sg table\n");
  3611. goto err;
  3612. }
  3613. if (sg_ptr->nents == 0) {
  3614. pr_err("Num of scattered entries is 0\n");
  3615. goto err;
  3616. }
  3617. if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
  3618. pr_err("Num of scattered entries\n");
  3619. pr_err(" (%d) is greater than max supported %d\n",
  3620. sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
  3621. goto err;
  3622. }
  3623. sg = sg_ptr->sgl;
  3624. if (sg_ptr->nents == 1) {
  3625. uint32_t *update;
  3626. if (__boundary_checks_offset(req, lstnr_resp, data, i, sizeof(uint32_t)))
  3627. goto err;
  3628. if ((data->type == QSEECOM_CLIENT_APP &&
  3629. (data->client.app_arch == ELFCLASS32 ||
  3630. data->client.app_arch == ELFCLASS64)) ||
  3631. (data->type == QSEECOM_LISTENER_SERVICE)) {
  3632. /*
  3633. * Check if sg list phy add region is under 4GB
  3634. */
  3635. if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
  3636. (!cleanup) &&
  3637. ((uint64_t)sg_dma_address(sg_ptr->sgl)
  3638. >= PHY_ADDR_4G - sg->length)) {
  3639. pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
  3640. data->client.app_name,
  3641. &(sg_dma_address(sg_ptr->sgl)),
  3642. sg->length);
  3643. goto err;
  3644. }
  3645. update = (uint32_t *) field;
  3646. *update = cleanup ? 0 :
  3647. (uint32_t)sg_dma_address(sg_ptr->sgl);
  3648. } else {
  3649. pr_err("QSEE app arch %u is not supported\n",
  3650. data->client.app_arch);
  3651. goto err;
  3652. }
  3653. len += (uint32_t)sg->length;
  3654. } else {
  3655. struct qseecom_sg_entry *update;
  3656. int j = 0;
  3657. if (__boundary_checks_offset(req, lstnr_resp, data, i,
  3658. (SG_ENTRY_SZ * sg_ptr->nents)))
  3659. goto err;
  3660. if ((data->type == QSEECOM_CLIENT_APP &&
  3661. (data->client.app_arch == ELFCLASS32 ||
  3662. data->client.app_arch == ELFCLASS64)) ||
  3663. (data->type == QSEECOM_LISTENER_SERVICE)) {
  3664. update = (struct qseecom_sg_entry *)field;
  3665. for (j = 0; j < sg_ptr->nents; j++) {
  3666. /*
  3667. * Check if sg list PA is under 4GB
  3668. */
  3669. if ((qseecom.qsee_version >=
  3670. QSEE_VERSION_40) &&
  3671. (!cleanup) &&
  3672. ((uint64_t)(sg_dma_address(sg))
  3673. >= PHY_ADDR_4G - sg->length)) {
  3674. pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
  3675. data->client.app_name,
  3676. &(sg_dma_address(sg)),
  3677. sg->length);
  3678. goto err;
  3679. }
  3680. update->phys_addr = cleanup ? 0 :
  3681. (uint32_t)sg_dma_address(sg);
  3682. update->len = cleanup ? 0 : sg->length;
  3683. update++;
  3684. len += sg->length;
  3685. sg = sg_next(sg);
  3686. }
  3687. } else {
  3688. pr_err("QSEE app arch %u is not supported\n",
  3689. data->client.app_arch);
  3690. goto err;
  3691. }
  3692. }
  3693. if (cleanup) {
  3694. ret = qseecom_dmabuf_cache_operations(dmabuf,
  3695. QSEECOM_CACHE_INVALIDATE);
  3696. if (ret) {
  3697. pr_err("cache operation failed %d\n", ret);
  3698. goto err;
  3699. }
  3700. } else {
  3701. ret = qseecom_dmabuf_cache_operations(dmabuf,
  3702. QSEECOM_CACHE_CLEAN);
  3703. if (ret) {
  3704. pr_err("cache operation failed %d\n", ret);
  3705. goto err;
  3706. }
  3707. if (data->type == QSEECOM_CLIENT_APP) {
  3708. offset = req->ifd_data[i].cmd_buf_offset;
  3709. data->sglistinfo_ptr[i].indexAndFlags =
  3710. SGLISTINFO_SET_INDEX_FLAG(
  3711. (sg_ptr->nents == 1), 0, offset);
  3712. data->sglistinfo_ptr[i].sizeOrCount =
  3713. (sg_ptr->nents == 1) ?
  3714. sg->length : sg_ptr->nents;
  3715. data->sglist_cnt = i + 1;
  3716. } else {
  3717. offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
  3718. + (uintptr_t)lstnr_resp->resp_buf_ptr -
  3719. (uintptr_t)this_lstnr->sb_virt);
  3720. this_lstnr->sglistinfo_ptr[i].indexAndFlags =
  3721. SGLISTINFO_SET_INDEX_FLAG(
  3722. (sg_ptr->nents == 1), 0, offset);
  3723. this_lstnr->sglistinfo_ptr[i].sizeOrCount =
  3724. (sg_ptr->nents == 1) ?
  3725. sg->length : sg_ptr->nents;
  3726. this_lstnr->sglist_cnt = i + 1;
  3727. }
  3728. }
  3729. /* Deallocate the kbuf */
  3730. qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
  3731. sg_ptr = NULL;
  3732. dmabuf = NULL;
  3733. attach = NULL;
  3734. }
  3735. return ret;
  3736. err:
  3737. if (!IS_ERR_OR_NULL(sg_ptr)) {
  3738. qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
  3739. MAKE_NULL(sg_ptr, attach, dmabuf);
  3740. }
  3741. return -ENOMEM;
  3742. }
  3743. static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
  3744. char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
  3745. {
  3746. struct scatterlist *sg = sg_ptr->sgl;
  3747. struct qseecom_sg_entry_64bit *sg_entry;
  3748. struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
  3749. void *buf;
  3750. uint i;
  3751. size_t size;
  3752. dma_addr_t coh_pmem;
  3753. if (fd_idx >= MAX_ION_FD) {
  3754. pr_err("fd_idx [%d] is invalid\n", fd_idx);
  3755. return -ENOMEM;
  3756. }
  3757. buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
  3758. memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
  3759. /* Allocate a contiguous kernel buffer */
  3760. size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
  3761. size = (size + PAGE_SIZE) & PAGE_MASK;
  3762. buf = dma_alloc_coherent(qseecom.dev,
  3763. size, &coh_pmem, GFP_KERNEL);
  3764. if (buf == NULL)
  3765. return -ENOMEM;
  3766. /* update qseecom_sg_list_buf_hdr_64bit */
  3767. buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
  3768. buf_hdr->new_buf_phys_addr = coh_pmem;
  3769. buf_hdr->nents_total = sg_ptr->nents;
  3770. /* save the left sg entries into new allocated buf */
  3771. sg_entry = (struct qseecom_sg_entry_64bit *)buf;
  3772. for (i = 0; i < sg_ptr->nents; i++) {
  3773. sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
  3774. sg_entry->len = sg->length;
  3775. sg_entry++;
  3776. sg = sg_next(sg);
  3777. }
  3778. data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
  3779. data->client.sec_buf_fd[fd_idx].vbase = buf;
  3780. data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
  3781. data->client.sec_buf_fd[fd_idx].size = size;
  3782. return 0;
  3783. }
  3784. static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
  3785. struct qseecom_dev_handle *data)
  3786. {
  3787. char *field;
  3788. int ret = 0;
  3789. int i = 0;
  3790. uint32_t len = 0;
  3791. struct scatterlist *sg;
  3792. struct qseecom_send_modfd_cmd_req *req = NULL;
  3793. struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
  3794. struct qseecom_registered_listener_list *this_lstnr = NULL;
  3795. uint32_t offset;
  3796. struct sg_table *sg_ptr;
  3797. int ion_fd = -1;
  3798. struct dma_buf *dmabuf = NULL;
  3799. struct dma_buf_attachment *attach = NULL;
  3800. if ((data->type != QSEECOM_LISTENER_SERVICE) &&
  3801. (data->type != QSEECOM_CLIENT_APP))
  3802. return -EFAULT;
  3803. if (msg == NULL) {
  3804. pr_err("Invalid address\n");
  3805. return -EINVAL;
  3806. }
  3807. if (data->type == QSEECOM_LISTENER_SERVICE) {
  3808. lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
  3809. this_lstnr = __qseecom_find_svc(data->listener.id);
  3810. if (IS_ERR_OR_NULL(this_lstnr)) {
  3811. pr_err("Invalid listener ID\n");
  3812. return -ENOMEM;
  3813. }
  3814. } else {
  3815. req = (struct qseecom_send_modfd_cmd_req *)msg;
  3816. }
  3817. for (i = 0; i < MAX_ION_FD; i++) {
  3818. if ((data->type != QSEECOM_LISTENER_SERVICE) &&
  3819. (req->ifd_data[i].fd > 0)) {
  3820. ion_fd = req->ifd_data[i].fd;
  3821. field = (char *) req->cmd_req_buf +
  3822. req->ifd_data[i].cmd_buf_offset;
  3823. } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
  3824. (lstnr_resp->ifd_data[i].fd > 0)) {
  3825. ion_fd = lstnr_resp->ifd_data[i].fd;
  3826. field = lstnr_resp->resp_buf_ptr +
  3827. lstnr_resp->ifd_data[i].cmd_buf_offset;
  3828. } else {
  3829. continue;
  3830. }
  3831. /* Populate the cmd data structure with the phys_addr */
  3832. ret = qseecom_dmabuf_map(ion_fd, &sg_ptr, &attach, &dmabuf);
  3833. if (ret) {
  3834. pr_err("IOn client could not retrieve sg table\n");
  3835. goto err;
  3836. }
  3837. if (sg_ptr->nents == 0) {
  3838. pr_err("Num of scattered entries is 0\n");
  3839. goto err;
  3840. }
  3841. if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
  3842. pr_warn("Num of scattered entries\n");
  3843. pr_warn(" (%d) is greater than %d\n",
  3844. sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
  3845. if (cleanup) {
  3846. if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
  3847. data->client.sec_buf_fd[i].vbase)
  3848. dma_free_coherent(qseecom.dev,
  3849. data->client.sec_buf_fd[i].size,
  3850. data->client.sec_buf_fd[i].vbase,
  3851. data->client.sec_buf_fd[i].pbase);
  3852. } else {
  3853. ret = __qseecom_allocate_sg_list_buffer(data,
  3854. field, i, sg_ptr);
  3855. if (ret) {
  3856. pr_err("Failed to allocate sg list buffer\n");
  3857. goto err;
  3858. }
  3859. }
  3860. len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
  3861. sg = sg_ptr->sgl;
  3862. goto cleanup;
  3863. }
  3864. sg = sg_ptr->sgl;
  3865. if (sg_ptr->nents == 1) {
  3866. uint64_t *update_64bit;
  3867. if (__boundary_checks_offset(req, lstnr_resp, data, i, sizeof(uint64_t)))
  3868. goto err;
  3869. /* 64bit app uses 64bit address */
  3870. update_64bit = (uint64_t *) field;
  3871. *update_64bit = cleanup ? 0 :
  3872. (uint64_t)sg_dma_address(sg_ptr->sgl);
  3873. len += (uint32_t)sg->length;
  3874. } else {
  3875. struct qseecom_sg_entry_64bit *update_64bit;
  3876. int j = 0;
  3877. if (__boundary_checks_offset(req, lstnr_resp, data, i,
  3878. (SG_ENTRY_SZ_64BIT * sg_ptr->nents)))
  3879. goto err;
  3880. /* 64bit app uses 64bit address */
  3881. update_64bit = (struct qseecom_sg_entry_64bit *)field;
  3882. for (j = 0; j < sg_ptr->nents; j++) {
  3883. update_64bit->phys_addr = cleanup ? 0 :
  3884. (uint64_t)sg_dma_address(sg);
  3885. update_64bit->len = cleanup ? 0 :
  3886. (uint32_t)sg->length;
  3887. update_64bit++;
  3888. len += sg->length;
  3889. sg = sg_next(sg);
  3890. }
  3891. }
  3892. cleanup:
  3893. if (cleanup) {
  3894. ret = qseecom_dmabuf_cache_operations(dmabuf,
  3895. QSEECOM_CACHE_INVALIDATE);
  3896. if (ret) {
  3897. pr_err("cache operation failed %d\n", ret);
  3898. goto err;
  3899. }
  3900. } else {
  3901. ret = qseecom_dmabuf_cache_operations(dmabuf,
  3902. QSEECOM_CACHE_CLEAN);
  3903. if (ret) {
  3904. pr_err("cache operation failed %d\n", ret);
  3905. goto err;
  3906. }
  3907. if (data->type == QSEECOM_CLIENT_APP) {
  3908. offset = req->ifd_data[i].cmd_buf_offset;
  3909. data->sglistinfo_ptr[i].indexAndFlags =
  3910. SGLISTINFO_SET_INDEX_FLAG(
  3911. (sg_ptr->nents == 1), 1, offset);
  3912. data->sglistinfo_ptr[i].sizeOrCount =
  3913. (sg_ptr->nents == 1) ?
  3914. sg->length : sg_ptr->nents;
  3915. data->sglist_cnt = i + 1;
  3916. } else {
  3917. offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
  3918. + (uintptr_t)lstnr_resp->resp_buf_ptr -
  3919. (uintptr_t)this_lstnr->sb_virt);
  3920. this_lstnr->sglistinfo_ptr[i].indexAndFlags =
  3921. SGLISTINFO_SET_INDEX_FLAG(
  3922. (sg_ptr->nents == 1), 1, offset);
  3923. this_lstnr->sglistinfo_ptr[i].sizeOrCount =
  3924. (sg_ptr->nents == 1) ?
  3925. sg->length : sg_ptr->nents;
  3926. this_lstnr->sglist_cnt = i + 1;
  3927. }
  3928. }
  3929. /* unmap the dmabuf */
  3930. qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
  3931. sg_ptr = NULL;
  3932. dmabuf = NULL;
  3933. attach = NULL;
  3934. }
  3935. return ret;
  3936. err:
  3937. for (i = 0; i < MAX_ION_FD; i++)
  3938. if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
  3939. data->client.sec_buf_fd[i].vbase)
  3940. dma_free_coherent(qseecom.dev,
  3941. data->client.sec_buf_fd[i].size,
  3942. data->client.sec_buf_fd[i].vbase,
  3943. data->client.sec_buf_fd[i].pbase);
  3944. if (!IS_ERR_OR_NULL(sg_ptr)) {
  3945. qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
  3946. MAKE_NULL(sg_ptr, attach, dmabuf);
  3947. }
  3948. return -ENOMEM;
  3949. }
  3950. static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
  3951. void __user *argp,
  3952. bool is_64bit_addr)
  3953. {
  3954. int ret = 0;
  3955. int i;
  3956. struct qseecom_send_modfd_cmd_req req;
  3957. struct qseecom_send_cmd_req send_cmd_req;
  3958. void *origin_req_buf_kvirt, *origin_rsp_buf_kvirt;
  3959. phys_addr_t pa;
  3960. u8 *va = NULL;
  3961. ret = copy_from_user(&req, argp, sizeof(req));
  3962. if (ret) {
  3963. pr_err("copy_from_user failed\n");
  3964. return ret;
  3965. }
  3966. send_cmd_req.cmd_req_buf = req.cmd_req_buf;
  3967. send_cmd_req.cmd_req_len = req.cmd_req_len;
  3968. send_cmd_req.resp_buf = req.resp_buf;
  3969. send_cmd_req.resp_len = req.resp_len;
  3970. if (__validate_send_cmd_inputs(data, &send_cmd_req))
  3971. return -EINVAL;
  3972. /* validate offsets */
  3973. for (i = 0; i < MAX_ION_FD; i++) {
  3974. if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
  3975. pr_err("Invalid offset %d = 0x%x\n",
  3976. i, req.ifd_data[i].cmd_buf_offset);
  3977. return -EINVAL;
  3978. }
  3979. }
  3980. /*Back up original address */
  3981. origin_req_buf_kvirt = (void *)__qseecom_uvirt_to_kvirt(data,
  3982. (uintptr_t)req.cmd_req_buf);
  3983. origin_rsp_buf_kvirt = (void *)__qseecom_uvirt_to_kvirt(data,
  3984. (uintptr_t)req.resp_buf);
  3985. /* Allocate kernel buffer for request and response*/
  3986. ret = __qseecom_alloc_coherent_buf(req.cmd_req_len + req.resp_len,
  3987. &va, &pa);
  3988. if (ret) {
  3989. pr_err("Failed to allocate coherent buf, ret %d\n", ret);
  3990. return ret;
  3991. }
  3992. req.cmd_req_buf = va;
  3993. send_cmd_req.cmd_req_buf = (void *)pa;
  3994. req.resp_buf = va + req.cmd_req_len;
  3995. send_cmd_req.resp_buf = (void *)pa + req.cmd_req_len;
  3996. /* Copy the data to kernel request and response buffers*/
  3997. memcpy(req.cmd_req_buf, origin_req_buf_kvirt, req.cmd_req_len);
  3998. memcpy(req.resp_buf, origin_rsp_buf_kvirt, req.resp_len);
  3999. if (!is_64bit_addr) {
  4000. ret = __qseecom_update_cmd_buf(&req, false, data);
  4001. if (ret)
  4002. goto out;
  4003. ret = __qseecom_send_cmd(data, &send_cmd_req, true);
  4004. if (ret)
  4005. goto out;
  4006. ret = __qseecom_update_cmd_buf(&req, true, data);
  4007. if (ret)
  4008. goto out;
  4009. } else {
  4010. ret = __qseecom_update_cmd_buf_64(&req, false, data);
  4011. if (ret)
  4012. goto out;
  4013. ret = __qseecom_send_cmd(data, &send_cmd_req, true);
  4014. if (ret)
  4015. goto out;
  4016. ret = __qseecom_update_cmd_buf_64(&req, true, data);
  4017. if (ret)
  4018. goto out;
  4019. }
  4020. /*Copy the response back to the userspace buffer*/
  4021. memcpy(origin_rsp_buf_kvirt, req.resp_buf, req.resp_len);
  4022. memcpy(origin_req_buf_kvirt, req.cmd_req_buf, req.cmd_req_len);
  4023. out:
  4024. if (req.cmd_req_buf)
  4025. __qseecom_free_coherent_buf(req.cmd_req_len + req.resp_len,
  4026. req.cmd_req_buf, (phys_addr_t)send_cmd_req.cmd_req_buf);
  4027. return ret;
  4028. }
  4029. static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
  4030. void __user *argp)
  4031. {
  4032. return __qseecom_send_modfd_cmd(data, argp, false);
  4033. }
  4034. static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
  4035. void __user *argp)
  4036. {
  4037. return __qseecom_send_modfd_cmd(data, argp, true);
  4038. }
  4039. static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
  4040. struct qseecom_registered_listener_list *svc)
  4041. {
  4042. int ret;
  4043. ret = (svc->rcv_req_flag == 1);
  4044. return ret || data->abort;
  4045. }
  4046. static int qseecom_receive_req(struct qseecom_dev_handle *data)
  4047. {
  4048. int ret = 0;
  4049. struct qseecom_registered_listener_list *this_lstnr;
  4050. mutex_lock(&listener_access_lock);
  4051. this_lstnr = __qseecom_find_svc(data->listener.id);
  4052. if (!this_lstnr) {
  4053. pr_err("Invalid listener ID\n");
  4054. mutex_unlock(&listener_access_lock);
  4055. return -ENODATA;
  4056. }
  4057. mutex_unlock(&listener_access_lock);
  4058. while (1) {
  4059. if (wait_event_interruptible(this_lstnr->rcv_req_wq,
  4060. __qseecom_listener_has_rcvd_req(data,
  4061. this_lstnr))) {
  4062. pr_debug("Interrupted: exiting Listener Service = %d\n",
  4063. (uint32_t)data->listener.id);
  4064. /* woken up for different reason */
  4065. return -ERESTARTSYS;
  4066. }
  4067. if (data->abort) {
  4068. pr_err("Aborting Listener Service = %d\n",
  4069. (uint32_t)data->listener.id);
  4070. return -ENODEV;
  4071. }
  4072. mutex_lock(&listener_access_lock);
  4073. this_lstnr->rcv_req_flag = 0;
  4074. mutex_unlock(&listener_access_lock);
  4075. break;
  4076. }
  4077. return ret;
  4078. }
  4079. static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
  4080. {
  4081. unsigned char app_arch = 0;
  4082. struct elf32_hdr *ehdr;
  4083. struct elf64_hdr *ehdr64;
  4084. app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
  4085. switch (app_arch) {
  4086. case ELFCLASS32: {
  4087. ehdr = (struct elf32_hdr *)fw_entry->data;
  4088. if (fw_entry->size < sizeof(*ehdr)) {
  4089. pr_err("%s: Not big enough to be an elf32 header\n",
  4090. qseecom.pdev->init_name);
  4091. return false;
  4092. }
  4093. if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
  4094. pr_err("%s: Not an elf32 header\n",
  4095. qseecom.pdev->init_name);
  4096. return false;
  4097. }
  4098. if (ehdr->e_phnum == 0) {
  4099. pr_err("%s: No loadable segments\n",
  4100. qseecom.pdev->init_name);
  4101. return false;
  4102. }
  4103. if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
  4104. sizeof(struct elf32_hdr) > fw_entry->size) {
  4105. pr_err("%s: Program headers not within mdt\n",
  4106. qseecom.pdev->init_name);
  4107. return false;
  4108. }
  4109. break;
  4110. }
  4111. case ELFCLASS64: {
  4112. ehdr64 = (struct elf64_hdr *)fw_entry->data;
  4113. if (fw_entry->size < sizeof(*ehdr64)) {
  4114. pr_err("%s: Not big enough to be an elf64 header\n",
  4115. qseecom.pdev->init_name);
  4116. return false;
  4117. }
  4118. if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
  4119. pr_err("%s: Not an elf64 header\n",
  4120. qseecom.pdev->init_name);
  4121. return false;
  4122. }
  4123. if (ehdr64->e_phnum == 0) {
  4124. pr_err("%s: No loadable segments\n",
  4125. qseecom.pdev->init_name);
  4126. return false;
  4127. }
  4128. if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
  4129. sizeof(struct elf64_hdr) > fw_entry->size) {
  4130. pr_err("%s: Program headers not within mdt\n",
  4131. qseecom.pdev->init_name);
  4132. return false;
  4133. }
  4134. break;
  4135. }
  4136. default: {
  4137. pr_err("QSEE app arch %u is not supported\n", app_arch);
  4138. return false;
  4139. }
  4140. }
  4141. return true;
  4142. }
  4143. static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
  4144. uint32_t *app_arch)
  4145. {
  4146. int ret = -1;
  4147. int i = 0, rc = 0;
  4148. const struct firmware *fw_entry = NULL;
  4149. char fw_name[MAX_APP_NAME_SIZE];
  4150. struct elf32_hdr *ehdr;
  4151. struct elf64_hdr *ehdr64;
  4152. int num_images = 0;
  4153. snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
  4154. rc = firmware_request_nowarn(&fw_entry, fw_name, qseecom.pdev);
  4155. if (rc) {
  4156. pr_err("error with firmware_request_nowarn, rc = %d\n", rc);
  4157. ret = -EIO;
  4158. goto err;
  4159. }
  4160. if (!__qseecom_is_fw_image_valid(fw_entry)) {
  4161. ret = -EIO;
  4162. goto err;
  4163. }
  4164. *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
  4165. *fw_size = fw_entry->size;
  4166. if (*app_arch == ELFCLASS32) {
  4167. ehdr = (struct elf32_hdr *)fw_entry->data;
  4168. num_images = ehdr->e_phnum;
  4169. } else if (*app_arch == ELFCLASS64) {
  4170. ehdr64 = (struct elf64_hdr *)fw_entry->data;
  4171. num_images = ehdr64->e_phnum;
  4172. } else {
  4173. pr_err("QSEE %s app, arch %u is not supported\n",
  4174. appname, *app_arch);
  4175. ret = -EIO;
  4176. goto err;
  4177. }
  4178. pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
  4179. release_firmware(fw_entry);
  4180. fw_entry = NULL;
  4181. for (i = 0; i < num_images; i++) {
  4182. memset(fw_name, 0, sizeof(fw_name));
  4183. snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
  4184. ret = firmware_request_nowarn(&fw_entry, fw_name, qseecom.pdev);
  4185. if (ret)
  4186. goto err;
  4187. if (*fw_size > U32_MAX - fw_entry->size) {
  4188. pr_err("QSEE %s app file size overflow\n", appname);
  4189. ret = -EINVAL;
  4190. goto err;
  4191. }
  4192. *fw_size += fw_entry->size;
  4193. release_firmware(fw_entry);
  4194. fw_entry = NULL;
  4195. }
  4196. return ret;
  4197. err:
  4198. if (fw_entry)
  4199. release_firmware(fw_entry);
  4200. *fw_size = 0;
  4201. return ret;
  4202. }
  4203. static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
  4204. uint32_t fw_size,
  4205. struct qseecom_load_app_ireq *load_req)
  4206. {
  4207. int ret = -1;
  4208. int i = 0, rc = 0;
  4209. const struct firmware *fw_entry = NULL;
  4210. char fw_name[MAX_APP_NAME_SIZE];
  4211. u8 *img_data_ptr = img_data;
  4212. struct elf32_hdr *ehdr;
  4213. struct elf64_hdr *ehdr64;
  4214. int num_images = 0;
  4215. unsigned char app_arch = 0;
  4216. snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
  4217. rc = firmware_request_nowarn(&fw_entry, fw_name, qseecom.pdev);
  4218. if (rc) {
  4219. ret = -EIO;
  4220. goto err;
  4221. }
  4222. load_req->img_len = fw_entry->size;
  4223. if (load_req->img_len > fw_size) {
  4224. pr_err("app %s size %zu is larger than buf size %u\n",
  4225. appname, fw_entry->size, fw_size);
  4226. ret = -EINVAL;
  4227. goto err;
  4228. }
  4229. memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
  4230. img_data_ptr = img_data_ptr + fw_entry->size;
  4231. load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
  4232. app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
  4233. if (app_arch == ELFCLASS32) {
  4234. ehdr = (struct elf32_hdr *)fw_entry->data;
  4235. num_images = ehdr->e_phnum;
  4236. } else if (app_arch == ELFCLASS64) {
  4237. ehdr64 = (struct elf64_hdr *)fw_entry->data;
  4238. num_images = ehdr64->e_phnum;
  4239. } else {
  4240. pr_err("QSEE %s app, arch %u is not supported\n",
  4241. appname, app_arch);
  4242. ret = -EIO;
  4243. goto err;
  4244. }
  4245. release_firmware(fw_entry);
  4246. fw_entry = NULL;
  4247. for (i = 0; i < num_images; i++) {
  4248. snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
  4249. ret = firmware_request_nowarn(&fw_entry, fw_name, qseecom.pdev);
  4250. if (ret) {
  4251. pr_err("Failed to locate blob %s\n", fw_name);
  4252. goto err;
  4253. }
  4254. if ((fw_entry->size > U32_MAX - load_req->img_len) ||
  4255. (fw_entry->size + load_req->img_len > fw_size)) {
  4256. pr_err("Invalid file size for %s\n", fw_name);
  4257. ret = -EINVAL;
  4258. goto err;
  4259. }
  4260. memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
  4261. img_data_ptr = img_data_ptr + fw_entry->size;
  4262. load_req->img_len += fw_entry->size;
  4263. release_firmware(fw_entry);
  4264. fw_entry = NULL;
  4265. }
  4266. return ret;
  4267. err:
  4268. release_firmware(fw_entry);
  4269. return ret;
  4270. }
  4271. static int __qseecom_alloc_coherent_buf(
  4272. uint32_t size, u8 **vaddr, phys_addr_t *paddr)
  4273. {
  4274. dma_addr_t coh_pmem;
  4275. void *buf = NULL;
  4276. /* Allocate a contiguous kernel buffer */
  4277. size = (size + PAGE_SIZE) & PAGE_MASK;
  4278. buf = dma_alloc_coherent(qseecom.dev,
  4279. size, &coh_pmem, GFP_KERNEL);
  4280. if (buf == NULL)
  4281. return -ENOMEM;
  4282. *vaddr = buf;
  4283. *paddr = coh_pmem;
  4284. return 0;
  4285. }
  4286. static void __qseecom_free_coherent_buf(uint32_t size,
  4287. u8 *vaddr, phys_addr_t paddr)
  4288. {
  4289. if (!vaddr)
  4290. return;
  4291. size = (size + PAGE_SIZE) & PAGE_MASK;
  4292. dma_free_coherent(qseecom.dev, size, vaddr, paddr);
  4293. }
  4294. static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
  4295. uint32_t *app_id)
  4296. {
  4297. int ret = -1;
  4298. uint32_t fw_size = 0;
  4299. struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
  4300. struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
  4301. struct qseecom_command_scm_resp resp;
  4302. u8 *img_data = NULL;
  4303. phys_addr_t pa = 0;
  4304. void *cmd_buf = NULL;
  4305. size_t cmd_len;
  4306. uint32_t app_arch = 0;
  4307. if (!data || !appname || !app_id) {
  4308. pr_err("Null pointer to data or appname or appid\n");
  4309. return -EINVAL;
  4310. }
  4311. *app_id = 0;
  4312. if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
  4313. return -EIO;
  4314. data->client.app_arch = app_arch;
  4315. /* Check and load cmnlib */
  4316. if (qseecom.qsee_version > QSEEE_VERSION_00) {
  4317. if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
  4318. ret = qseecom_load_commonlib_image(data, "cmnlib");
  4319. if (ret) {
  4320. pr_err("failed to load cmnlib\n");
  4321. return -EIO;
  4322. }
  4323. qseecom.commonlib_loaded = true;
  4324. pr_debug("cmnlib is loaded\n");
  4325. }
  4326. if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
  4327. ret = qseecom_load_commonlib_image(data, "cmnlib64");
  4328. if (ret) {
  4329. pr_err("failed to load cmnlib64\n");
  4330. return -EIO;
  4331. }
  4332. qseecom.commonlib64_loaded = true;
  4333. pr_debug("cmnlib64 is loaded\n");
  4334. }
  4335. }
  4336. ret = __qseecom_alloc_coherent_buf(fw_size, &img_data, &pa);
  4337. if (ret)
  4338. return ret;
  4339. ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
  4340. if (ret) {
  4341. ret = -EIO;
  4342. goto exit_free_img_data;
  4343. }
  4344. /* Populate the load_req parameters */
  4345. if (qseecom.qsee_version < QSEE_VERSION_40) {
  4346. load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
  4347. load_req.mdt_len = load_req.mdt_len;
  4348. load_req.img_len = load_req.img_len;
  4349. strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
  4350. load_req.phy_addr = (uint32_t)pa;
  4351. cmd_buf = (void *)&load_req;
  4352. cmd_len = sizeof(struct qseecom_load_app_ireq);
  4353. } else {
  4354. load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
  4355. load_req_64bit.mdt_len = load_req.mdt_len;
  4356. load_req_64bit.img_len = load_req.img_len;
  4357. strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
  4358. load_req_64bit.phy_addr = (uint64_t)pa;
  4359. cmd_buf = (void *)&load_req_64bit;
  4360. cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
  4361. }
  4362. if (qseecom.support_bus_scaling) {
  4363. mutex_lock(&qsee_bw_mutex);
  4364. ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
  4365. mutex_unlock(&qsee_bw_mutex);
  4366. if (ret) {
  4367. ret = -EIO;
  4368. goto exit_free_img_data;
  4369. }
  4370. }
  4371. ret = __qseecom_enable_clk_scale_up(data);
  4372. if (ret) {
  4373. ret = -EIO;
  4374. goto exit_unregister_bus_bw_need;
  4375. }
  4376. /* SCM_CALL to load the image */
  4377. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
  4378. &resp, sizeof(resp));
  4379. if (ret) {
  4380. pr_err("scm_call to load failed : ret %d\n", ret);
  4381. ret = -EIO;
  4382. goto exit_disable_clk_vote;
  4383. }
  4384. switch (resp.result) {
  4385. case QSEOS_RESULT_SUCCESS:
  4386. *app_id = resp.data;
  4387. break;
  4388. case QSEOS_RESULT_INCOMPLETE:
  4389. ret = __qseecom_process_incomplete_cmd(data, &resp);
  4390. if (ret) {
  4391. pr_err("incomp_cmd err %d, %d, unload %d %s\n",
  4392. ret, resp.result, resp.data, appname);
  4393. __qseecom_unload_app(data, resp.data);
  4394. ret = -EFAULT;
  4395. } else {
  4396. *app_id = resp.data;
  4397. }
  4398. break;
  4399. case QSEOS_RESULT_FAILURE:
  4400. pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
  4401. break;
  4402. default:
  4403. pr_err("scm call return unknown response %d\n", resp.result);
  4404. ret = -EINVAL;
  4405. break;
  4406. }
  4407. exit_disable_clk_vote:
  4408. __qseecom_disable_clk_scale_down(data);
  4409. exit_unregister_bus_bw_need:
  4410. if (qseecom.support_bus_scaling) {
  4411. mutex_lock(&qsee_bw_mutex);
  4412. qseecom_unregister_bus_bandwidth_needs(data);
  4413. mutex_unlock(&qsee_bw_mutex);
  4414. }
  4415. exit_free_img_data:
  4416. if (img_data)
  4417. __qseecom_free_coherent_buf(fw_size, img_data, pa);
  4418. return ret;
  4419. }
  4420. static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
  4421. char *cmnlib_name)
  4422. {
  4423. int ret = 0;
  4424. uint32_t fw_size = 0;
  4425. struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
  4426. struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
  4427. struct qseecom_command_scm_resp resp;
  4428. u8 *img_data = NULL;
  4429. phys_addr_t pa = 0;
  4430. void *cmd_buf = NULL;
  4431. size_t cmd_len;
  4432. uint32_t app_arch = 0;
  4433. if (!cmnlib_name) {
  4434. pr_err("cmnlib_name is NULL\n");
  4435. return -EINVAL;
  4436. }
  4437. if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
  4438. pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
  4439. cmnlib_name, strlen(cmnlib_name));
  4440. return -EINVAL;
  4441. }
  4442. if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
  4443. return -EIO;
  4444. ret = __qseecom_alloc_coherent_buf(fw_size, &img_data, &pa);
  4445. if (ret)
  4446. return -EIO;
  4447. ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
  4448. if (ret) {
  4449. ret = -EIO;
  4450. goto exit_free_img_data;
  4451. }
  4452. if (qseecom.qsee_version < QSEE_VERSION_40) {
  4453. load_req.phy_addr = (uint32_t)pa;
  4454. load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
  4455. cmd_buf = (void *)&load_req;
  4456. cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
  4457. } else {
  4458. load_req_64bit.phy_addr = (uint64_t)pa;
  4459. load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
  4460. load_req_64bit.img_len = load_req.img_len;
  4461. load_req_64bit.mdt_len = load_req.mdt_len;
  4462. cmd_buf = (void *)&load_req_64bit;
  4463. cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
  4464. }
  4465. if (qseecom.support_bus_scaling) {
  4466. mutex_lock(&qsee_bw_mutex);
  4467. ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
  4468. mutex_unlock(&qsee_bw_mutex);
  4469. if (ret) {
  4470. ret = -EIO;
  4471. goto exit_free_img_data;
  4472. }
  4473. }
  4474. /* Vote for the SFPB clock */
  4475. ret = __qseecom_enable_clk_scale_up(data);
  4476. if (ret) {
  4477. ret = -EIO;
  4478. goto exit_unregister_bus_bw_need;
  4479. }
  4480. /* SCM_CALL to load the image */
  4481. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
  4482. &resp, sizeof(resp));
  4483. if (ret) {
  4484. pr_err("scm_call to load failed : ret %d\n", ret);
  4485. ret = -EIO;
  4486. goto exit_disable_clk_vote;
  4487. }
  4488. switch (resp.result) {
  4489. case QSEOS_RESULT_SUCCESS:
  4490. break;
  4491. case QSEOS_RESULT_FAILURE:
  4492. pr_err("scm call failed w/response result%d\n", resp.result);
  4493. ret = -EINVAL;
  4494. goto exit_disable_clk_vote;
  4495. case QSEOS_RESULT_INCOMPLETE:
  4496. ret = __qseecom_process_incomplete_cmd(data, &resp);
  4497. if (ret) {
  4498. pr_err("process_incomplete_cmd failed err: %d\n", ret);
  4499. goto exit_disable_clk_vote;
  4500. }
  4501. break;
  4502. default:
  4503. pr_err("scm call return unknown response %d\n", resp.result);
  4504. ret = -EINVAL;
  4505. goto exit_disable_clk_vote;
  4506. }
  4507. exit_disable_clk_vote:
  4508. __qseecom_disable_clk_scale_down(data);
  4509. exit_unregister_bus_bw_need:
  4510. if (qseecom.support_bus_scaling) {
  4511. mutex_lock(&qsee_bw_mutex);
  4512. qseecom_unregister_bus_bandwidth_needs(data);
  4513. mutex_unlock(&qsee_bw_mutex);
  4514. }
  4515. exit_free_img_data:
  4516. if (img_data)
  4517. __qseecom_free_coherent_buf(fw_size, img_data, pa);
  4518. return ret;
  4519. }
  4520. static int qseecom_unload_commonlib_image(void)
  4521. {
  4522. int ret = -EINVAL;
  4523. struct qseecom_unload_lib_image_ireq unload_req = {0};
  4524. struct qseecom_command_scm_resp resp;
  4525. /* Populate the remaining parameters */
  4526. unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
  4527. /* SCM_CALL to load the image */
  4528. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
  4529. sizeof(struct qseecom_unload_lib_image_ireq),
  4530. &resp, sizeof(resp));
  4531. if (ret) {
  4532. pr_err("scm_call to unload lib failed : ret %d\n", ret);
  4533. ret = -EIO;
  4534. } else {
  4535. switch (resp.result) {
  4536. case QSEOS_RESULT_SUCCESS:
  4537. break;
  4538. case QSEOS_RESULT_FAILURE:
  4539. pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
  4540. break;
  4541. default:
  4542. pr_err("scm call return unknown response %d\n",
  4543. resp.result);
  4544. ret = -EINVAL;
  4545. break;
  4546. }
  4547. }
  4548. return ret;
  4549. }
  4550. static int __qseecom_start_app(struct qseecom_handle **handle,
  4551. char *app_name, uint32_t size)
  4552. {
  4553. int32_t ret = 0;
  4554. unsigned long flags = 0;
  4555. struct qseecom_dev_handle *data = NULL;
  4556. struct qseecom_check_app_ireq app_ireq;
  4557. struct qseecom_registered_app_list *entry = NULL;
  4558. struct qseecom_registered_kclient_list *kclient_entry = NULL;
  4559. bool found_app = false;
  4560. phys_addr_t pa = 0;
  4561. u8 *va = NULL;
  4562. uint32_t fw_size, app_arch;
  4563. uint32_t app_id = 0;
  4564. __wakeup_unregister_listener_kthread();
  4565. __wakeup_unload_app_kthread();
  4566. if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
  4567. pr_err("Not allowed to be called in %d state\n",
  4568. atomic_read(&qseecom.qseecom_state));
  4569. return -EPERM;
  4570. }
  4571. if (!app_name) {
  4572. pr_err("failed to get the app name\n");
  4573. return -EINVAL;
  4574. }
  4575. if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
  4576. pr_err("The app_name (%s) with length %zu is not valid\n",
  4577. app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
  4578. return -EINVAL;
  4579. }
  4580. *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
  4581. if (!(*handle))
  4582. return -ENOMEM;
  4583. data = kzalloc(sizeof(*data), GFP_KERNEL);
  4584. if (!data) {
  4585. kfree(*handle);
  4586. *handle = NULL;
  4587. return -ENOMEM;
  4588. }
  4589. mutex_lock(&app_access_lock);
  4590. data->abort = 0;
  4591. data->type = QSEECOM_CLIENT_APP;
  4592. data->released = false;
  4593. data->client.sb_length = size;
  4594. data->client.user_virt_sb_base = 0;
  4595. data->sglistinfo_ptr = (struct sglist_info *)__qseecom_alloc_tzbuf(
  4596. sizeof(struct sglist_info) * MAX_ION_FD,
  4597. &data->sglistinfo_shm.paddr,
  4598. &data->sglistinfo_shm);
  4599. if (!data->sglistinfo_ptr) {
  4600. ret = -ENOMEM;
  4601. goto err;
  4602. }
  4603. init_waitqueue_head(&data->abort_wq);
  4604. app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
  4605. strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
  4606. ret = __qseecom_check_app_exists(app_ireq, &app_id);
  4607. if (ret)
  4608. goto err;
  4609. strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
  4610. if (app_id) {
  4611. pr_warn("App id %d for [%s] app exists\n", app_id,
  4612. (char *)app_ireq.app_name);
  4613. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  4614. list_for_each_entry(entry,
  4615. &qseecom.registered_app_list_head, list){
  4616. if (entry->app_id == app_id) {
  4617. if (entry->ref_cnt == U32_MAX) {
  4618. pr_err("App %d (%s) ref_cnt overflow\n",
  4619. app_id, app_ireq.app_name);
  4620. ret = -EINVAL;
  4621. goto err;
  4622. }
  4623. entry->ref_cnt++;
  4624. found_app = true;
  4625. break;
  4626. }
  4627. }
  4628. spin_unlock_irqrestore(
  4629. &qseecom.registered_app_list_lock, flags);
  4630. if (!found_app)
  4631. pr_warn("App_id %d [%s] was loaded but not registered\n",
  4632. ret, (char *)app_ireq.app_name);
  4633. } else {
  4634. /* load the app and get the app_id */
  4635. pr_debug("%s: Loading app for the first time'\n",
  4636. qseecom.pdev->init_name);
  4637. ret = __qseecom_load_fw(data, app_name, &app_id);
  4638. if (ret < 0)
  4639. goto err;
  4640. }
  4641. data->client.app_id = app_id;
  4642. if (!found_app) {
  4643. entry = kmalloc(sizeof(*entry), GFP_KERNEL);
  4644. if (!entry) {
  4645. ret = -ENOMEM;
  4646. goto err;
  4647. }
  4648. entry->app_id = app_id;
  4649. entry->ref_cnt = 1;
  4650. strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
  4651. if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
  4652. ret = -EIO;
  4653. kfree(entry);
  4654. goto err;
  4655. }
  4656. entry->app_arch = app_arch;
  4657. entry->app_blocked = false;
  4658. entry->blocked_on_listener_id = 0;
  4659. entry->check_block = 0;
  4660. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  4661. list_add_tail(&entry->list, &qseecom.registered_app_list_head);
  4662. spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
  4663. flags);
  4664. }
  4665. /* Get the physical address of the req/resp buffer */
  4666. ret = __qseecom_alloc_coherent_buf(size, &va, &pa);
  4667. if (ret) {
  4668. pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
  4669. ret);
  4670. goto err;
  4671. }
  4672. /* Populate the structure for sending scm call to load image */
  4673. data->client.sb_virt = va;
  4674. data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
  4675. data->client.sb_phys = (phys_addr_t)pa;
  4676. (*handle)->dev = (void *)data;
  4677. (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
  4678. (*handle)->sbuf_len = data->client.sb_length;
  4679. kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
  4680. if (!kclient_entry) {
  4681. ret = -ENOMEM;
  4682. goto err;
  4683. }
  4684. kclient_entry->handle = *handle;
  4685. spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
  4686. list_add_tail(&kclient_entry->list,
  4687. &qseecom.registered_kclient_list_head);
  4688. spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
  4689. mutex_unlock(&app_access_lock);
  4690. __wakeup_unload_app_kthread();
  4691. return 0;
  4692. err:
  4693. __qseecom_free_coherent_buf(size, va, pa);
  4694. __qseecom_free_tzbuf(&data->sglistinfo_shm);
  4695. kfree(data);
  4696. kfree(*handle);
  4697. *handle = NULL;
  4698. mutex_unlock(&app_access_lock);
  4699. __wakeup_unload_app_kthread();
  4700. return ret;
  4701. }
  4702. static int __qseecom_shutdown_app(struct qseecom_handle **handle)
  4703. {
  4704. int ret = -EINVAL;
  4705. struct qseecom_dev_handle *data;
  4706. struct qseecom_registered_kclient_list *kclient = NULL;
  4707. unsigned long flags = 0;
  4708. bool found_handle = false;
  4709. __wakeup_unregister_listener_kthread();
  4710. __wakeup_unload_app_kthread();
  4711. if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
  4712. pr_err("Not allowed to be called in %d state\n",
  4713. atomic_read(&qseecom.qseecom_state));
  4714. return -EPERM;
  4715. }
  4716. if ((handle == NULL) || (*handle == NULL)) {
  4717. pr_err("Handle is not initialized\n");
  4718. return -EINVAL;
  4719. }
  4720. data = (struct qseecom_dev_handle *) ((*handle)->dev);
  4721. mutex_lock(&app_access_lock);
  4722. spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
  4723. list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
  4724. list) {
  4725. if (kclient->handle == (*handle)) {
  4726. list_del(&kclient->list);
  4727. found_handle = true;
  4728. break;
  4729. }
  4730. }
  4731. spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
  4732. if (!found_handle)
  4733. pr_err("Unable to find the handle, exiting\n");
  4734. else
  4735. ret = qseecom_unload_app(data, false);
  4736. mutex_unlock(&app_access_lock);
  4737. if (ret == 0) {
  4738. if (data->client.sb_virt)
  4739. __qseecom_free_coherent_buf(data->client.sb_length,
  4740. data->client.sb_virt, data->client.sb_phys);
  4741. __qseecom_free_tzbuf(&data->sglistinfo_shm);
  4742. kfree_sensitive(data);
  4743. kfree_sensitive(*handle);
  4744. kfree_sensitive(kclient);
  4745. *handle = NULL;
  4746. }
  4747. __wakeup_unload_app_kthread();
  4748. return ret;
  4749. }
  4750. static int __qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
  4751. uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
  4752. {
  4753. int ret = 0;
  4754. struct qseecom_send_cmd_req req = {NULL, 0, NULL, 0};
  4755. struct qseecom_dev_handle *data;
  4756. bool perf_enabled = false;
  4757. __wakeup_unregister_listener_kthread();
  4758. __wakeup_unload_app_kthread();
  4759. if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
  4760. pr_err("Not allowed to be called in %d state\n",
  4761. atomic_read(&qseecom.qseecom_state));
  4762. return -EPERM;
  4763. }
  4764. if (handle == NULL) {
  4765. pr_err("Handle is not initialized\n");
  4766. return -EINVAL;
  4767. }
  4768. data = handle->dev;
  4769. req.cmd_req_len = sbuf_len;
  4770. req.resp_len = rbuf_len;
  4771. req.cmd_req_buf = send_buf;
  4772. req.resp_buf = resp_buf;
  4773. if (__validate_send_cmd_inputs(data, &req))
  4774. return -EINVAL;
  4775. mutex_lock(&app_access_lock);
  4776. if (qseecom.support_bus_scaling) {
  4777. ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
  4778. if (ret) {
  4779. pr_err("Failed to set bw.\n");
  4780. mutex_unlock(&app_access_lock);
  4781. return ret;
  4782. }
  4783. }
  4784. /*
  4785. * On targets where crypto clock is handled by HLOS,
  4786. * if clk_access_cnt is zero and perf_enabled is false,
  4787. * then the crypto clock was not enabled before sending cmd
  4788. * to tz, qseecom will enable the clock to avoid service failure.
  4789. */
  4790. if (!qseecom.no_clock_support &&
  4791. !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
  4792. pr_debug("ce clock is not enabled!\n");
  4793. ret = qseecom_perf_enable(data);
  4794. if (ret) {
  4795. pr_err("Failed to vote for clock with err %d\n",
  4796. ret);
  4797. mutex_unlock(&app_access_lock);
  4798. return -EINVAL;
  4799. }
  4800. perf_enabled = true;
  4801. }
  4802. if (!strcmp(data->client.app_name, "securemm"))
  4803. data->use_legacy_cmd = true;
  4804. ret = __qseecom_send_cmd(data, &req, false);
  4805. data->use_legacy_cmd = false;
  4806. if (qseecom.support_bus_scaling)
  4807. __qseecom_add_bw_scale_down_timer(
  4808. QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
  4809. if (perf_enabled) {
  4810. qsee_disable_clock_vote(data, CLK_DFAB);
  4811. qsee_disable_clock_vote(data, CLK_SFPB);
  4812. }
  4813. mutex_unlock(&app_access_lock);
  4814. if (ret)
  4815. return ret;
  4816. pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
  4817. req.resp_len, req.resp_buf);
  4818. return ret;
  4819. }
  4820. #if IS_ENABLED(CONFIG_QSEECOM_PROXY)
  4821. const static struct qseecom_drv_ops qseecom_driver_ops = {
  4822. .qseecom_send_command = __qseecom_send_command,
  4823. .qseecom_start_app = __qseecom_start_app,
  4824. .qseecom_shutdown_app = __qseecom_shutdown_app,
  4825. };
  4826. int get_qseecom_kernel_fun_ops(void)
  4827. {
  4828. return provide_qseecom_kernel_fun_ops(&qseecom_driver_ops);
  4829. }
  4830. #else
  4831. int qseecom_start_app(struct qseecom_handle **handle,
  4832. char *app_name, uint32_t size)
  4833. {
  4834. return __qseecom_start_app(handle, app_name, size);
  4835. }
  4836. EXPORT_SYMBOL(qseecom_start_app);
  4837. int qseecom_shutdown_app(struct qseecom_handle **handle)
  4838. {
  4839. return __qseecom_shutdown_app(handle);
  4840. }
  4841. EXPORT_SYMBOL(qseecom_shutdown_app);
  4842. int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
  4843. uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
  4844. {
  4845. return __qseecom_send_command(handle, send_buf, sbuf_len,
  4846. resp_buf, rbuf_len);
  4847. }
  4848. EXPORT_SYMBOL(qseecom_send_command);
  4849. #endif
  4850. int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
  4851. {
  4852. int ret = 0;
  4853. if ((handle == NULL) || (handle->dev == NULL)) {
  4854. pr_err("No valid kernel client\n");
  4855. return -EINVAL;
  4856. }
  4857. if (high) {
  4858. if (qseecom.support_bus_scaling) {
  4859. mutex_lock(&qsee_bw_mutex);
  4860. __qseecom_register_bus_bandwidth_needs(handle->dev,
  4861. HIGH);
  4862. mutex_unlock(&qsee_bw_mutex);
  4863. } else {
  4864. ret = qseecom_perf_enable(handle->dev);
  4865. if (ret)
  4866. pr_err("Failed to vote for clock with err %d\n",
  4867. ret);
  4868. }
  4869. } else {
  4870. if (!qseecom.support_bus_scaling) {
  4871. qsee_disable_clock_vote(handle->dev, CLK_DFAB);
  4872. qsee_disable_clock_vote(handle->dev, CLK_SFPB);
  4873. } else {
  4874. mutex_lock(&qsee_bw_mutex);
  4875. qseecom_unregister_bus_bandwidth_needs(handle->dev);
  4876. mutex_unlock(&qsee_bw_mutex);
  4877. }
  4878. }
  4879. return ret;
  4880. }
  4881. EXPORT_SYMBOL(qseecom_set_bandwidth);
  4882. int qseecom_process_listener_from_smcinvoke(uint32_t *result,
  4883. u64 *response_type, unsigned int *data)
  4884. {
  4885. struct qseecom_registered_app_list dummy_app_entry;
  4886. struct qseecom_dev_handle dummy_private_data = {0};
  4887. struct qseecom_command_scm_resp resp;
  4888. int ret = 0;
  4889. if (!result || !response_type || !data) {
  4890. pr_err("input parameter NULL\n");
  4891. return -EINVAL;
  4892. }
  4893. memset((void *)&dummy_app_entry, 0, sizeof(dummy_app_entry));
  4894. /*
  4895. * smcinvoke expects result in scm call resp.ret[1] and type in ret[0],
  4896. * while qseecom expects result in ret[0] and type in ret[1].
  4897. * To simplify API interface and code changes in smcinvoke, here
  4898. * internally switch result and resp_type to let qseecom work with
  4899. * smcinvoke and upstream scm driver protocol.
  4900. */
  4901. resp.result = *response_type;
  4902. resp.resp_type = *result;
  4903. resp.data = *data;
  4904. dummy_private_data.client.app_id = *response_type;
  4905. dummy_private_data.client.from_smcinvoke = true;
  4906. dummy_app_entry.app_id = *response_type;
  4907. mutex_lock(&app_access_lock);
  4908. if (qseecom.qsee_reentrancy_support)
  4909. ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
  4910. &dummy_private_data);
  4911. else
  4912. ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
  4913. &resp);
  4914. mutex_unlock(&app_access_lock);
  4915. if (ret)
  4916. pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
  4917. resp.result, resp.data, resp.resp_type, ret);
  4918. *result = resp.resp_type;
  4919. *response_type = resp.result;
  4920. *data = resp.data;
  4921. return ret;
  4922. }
  4923. EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
  4924. static int qseecom_send_resp(void)
  4925. {
  4926. qseecom.send_resp_flag = 1;
  4927. wake_up_interruptible(&qseecom.send_resp_wq);
  4928. return 0;
  4929. }
  4930. static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
  4931. {
  4932. struct qseecom_registered_listener_list *this_lstnr = NULL;
  4933. pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
  4934. this_lstnr = __qseecom_find_svc(data->listener.id);
  4935. if (this_lstnr == NULL)
  4936. return -EINVAL;
  4937. qseecom.send_resp_flag = 1;
  4938. this_lstnr->send_resp_flag = 1;
  4939. wake_up_interruptible(&qseecom.send_resp_wq);
  4940. return 0;
  4941. }
  4942. static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
  4943. struct qseecom_send_modfd_listener_resp *resp,
  4944. struct qseecom_registered_listener_list *this_lstnr)
  4945. {
  4946. int i;
  4947. if (!data || !resp || !this_lstnr) {
  4948. pr_err("listener handle or resp msg is null\n");
  4949. return -EINVAL;
  4950. }
  4951. if (resp->resp_buf_ptr == NULL) {
  4952. pr_err("resp buffer is null\n");
  4953. return -EINVAL;
  4954. }
  4955. /* validate resp buf length */
  4956. if ((resp->resp_len == 0) ||
  4957. (resp->resp_len > this_lstnr->sb_length)) {
  4958. pr_err("resp buf length %d not valid\n", resp->resp_len);
  4959. return -EINVAL;
  4960. }
  4961. if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
  4962. pr_err("Integer overflow in resp_len & resp_buf\n");
  4963. return -EINVAL;
  4964. }
  4965. if ((uintptr_t)this_lstnr->user_virt_sb_base >
  4966. (ULONG_MAX - this_lstnr->sb_length)) {
  4967. pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
  4968. return -EINVAL;
  4969. }
  4970. /* validate resp buf */
  4971. if (((uintptr_t)resp->resp_buf_ptr <
  4972. (uintptr_t)this_lstnr->user_virt_sb_base) ||
  4973. ((uintptr_t)resp->resp_buf_ptr >=
  4974. ((uintptr_t)this_lstnr->user_virt_sb_base +
  4975. this_lstnr->sb_length)) ||
  4976. (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
  4977. ((uintptr_t)this_lstnr->user_virt_sb_base +
  4978. this_lstnr->sb_length))) {
  4979. pr_err("resp buf is out of shared buffer region\n");
  4980. return -EINVAL;
  4981. }
  4982. /* validate offsets */
  4983. for (i = 0; i < MAX_ION_FD; i++) {
  4984. if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
  4985. pr_err("Invalid offset %d = 0x%x\n",
  4986. i, resp->ifd_data[i].cmd_buf_offset);
  4987. return -EINVAL;
  4988. }
  4989. }
  4990. return 0;
  4991. }
  4992. static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
  4993. void __user *argp, bool is_64bit_addr)
  4994. {
  4995. struct qseecom_send_modfd_listener_resp resp;
  4996. struct qseecom_registered_listener_list *this_lstnr = NULL;
  4997. if (copy_from_user(&resp, argp, sizeof(resp))) {
  4998. pr_err("copy_from_user failed\n");
  4999. return -EINVAL;
  5000. }
  5001. this_lstnr = __qseecom_find_svc(data->listener.id);
  5002. if (this_lstnr == NULL)
  5003. return -EINVAL;
  5004. if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
  5005. return -EINVAL;
  5006. resp.resp_buf_ptr = this_lstnr->sb_virt +
  5007. (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
  5008. if (!is_64bit_addr)
  5009. __qseecom_update_cmd_buf(&resp, false, data);
  5010. else
  5011. __qseecom_update_cmd_buf_64(&resp, false, data);
  5012. qseecom.send_resp_flag = 1;
  5013. this_lstnr->send_resp_flag = 1;
  5014. wake_up_interruptible(&qseecom.send_resp_wq);
  5015. return 0;
  5016. }
  5017. static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
  5018. void __user *argp)
  5019. {
  5020. return __qseecom_send_modfd_resp(data, argp, false);
  5021. }
  5022. static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
  5023. void __user *argp)
  5024. {
  5025. return __qseecom_send_modfd_resp(data, argp, true);
  5026. }
  5027. static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
  5028. void __user *argp)
  5029. {
  5030. struct qseecom_qseos_version_req req;
  5031. if (copy_from_user(&req, argp, sizeof(req))) {
  5032. pr_err("copy_from_user failed\n");
  5033. return -EINVAL;
  5034. }
  5035. req.qseos_version = qseecom.qseos_version;
  5036. if (copy_to_user(argp, &req, sizeof(req))) {
  5037. pr_err("copy_to_user failed\n");
  5038. return -EINVAL;
  5039. }
  5040. return 0;
  5041. }
  5042. static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
  5043. {
  5044. int rc = 0;
  5045. struct qseecom_clk *qclk = NULL;
  5046. if (qseecom.no_clock_support)
  5047. return 0;
  5048. if (ce == CLK_QSEE)
  5049. qclk = &qseecom.qsee;
  5050. if (ce == CLK_CE_DRV)
  5051. qclk = &qseecom.ce_drv;
  5052. if (qclk == NULL) {
  5053. pr_err("CLK type not supported\n");
  5054. return -EINVAL;
  5055. }
  5056. mutex_lock(&clk_access_lock);
  5057. if (qclk->clk_access_cnt == ULONG_MAX) {
  5058. pr_err("clk_access_cnt beyond limitation\n");
  5059. goto err;
  5060. }
  5061. if (qclk->clk_access_cnt > 0) {
  5062. qclk->clk_access_cnt++;
  5063. mutex_unlock(&clk_access_lock);
  5064. return rc;
  5065. }
  5066. /* Enable CE core clk */
  5067. if (qclk->ce_core_clk != NULL) {
  5068. rc = clk_prepare_enable(qclk->ce_core_clk);
  5069. if (rc) {
  5070. pr_err("Unable to enable/prepare CE core clk\n");
  5071. goto err;
  5072. }
  5073. }
  5074. /* Enable CE clk */
  5075. if (qclk->ce_clk != NULL) {
  5076. rc = clk_prepare_enable(qclk->ce_clk);
  5077. if (rc) {
  5078. pr_err("Unable to enable/prepare CE iface clk\n");
  5079. goto ce_clk_err;
  5080. }
  5081. }
  5082. /* Enable AXI clk */
  5083. if (qclk->ce_bus_clk != NULL) {
  5084. rc = clk_prepare_enable(qclk->ce_bus_clk);
  5085. if (rc) {
  5086. pr_err("Unable to enable/prepare CE bus clk\n");
  5087. goto ce_bus_clk_err;
  5088. }
  5089. }
  5090. qclk->clk_access_cnt++;
  5091. mutex_unlock(&clk_access_lock);
  5092. return 0;
  5093. ce_bus_clk_err:
  5094. if (qclk->ce_clk != NULL)
  5095. clk_disable_unprepare(qclk->ce_clk);
  5096. ce_clk_err:
  5097. if (qclk->ce_core_clk != NULL)
  5098. clk_disable_unprepare(qclk->ce_core_clk);
  5099. err:
  5100. mutex_unlock(&clk_access_lock);
  5101. return -EIO;
  5102. }
  5103. static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
  5104. {
  5105. struct qseecom_clk *qclk;
  5106. if (qseecom.no_clock_support)
  5107. return;
  5108. if (ce == CLK_QSEE)
  5109. qclk = &qseecom.qsee;
  5110. else
  5111. qclk = &qseecom.ce_drv;
  5112. mutex_lock(&clk_access_lock);
  5113. if (qclk->clk_access_cnt == 0) {
  5114. mutex_unlock(&clk_access_lock);
  5115. return;
  5116. }
  5117. if (qclk->clk_access_cnt == 1) {
  5118. if (qclk->ce_clk != NULL)
  5119. clk_disable_unprepare(qclk->ce_clk);
  5120. if (qclk->ce_core_clk != NULL)
  5121. clk_disable_unprepare(qclk->ce_core_clk);
  5122. if (qclk->ce_bus_clk != NULL)
  5123. clk_disable_unprepare(qclk->ce_bus_clk);
  5124. }
  5125. qclk->clk_access_cnt--;
  5126. mutex_unlock(&clk_access_lock);
  5127. }
  5128. static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
  5129. int32_t clk_type)
  5130. {
  5131. int ret = 0;
  5132. struct qseecom_clk *qclk;
  5133. if (qseecom.no_clock_support)
  5134. return 0;
  5135. qclk = &qseecom.qsee;
  5136. if (!qseecom.qsee_perf_client)
  5137. return ret;
  5138. switch (clk_type) {
  5139. case CLK_DFAB:
  5140. mutex_lock(&qsee_bw_mutex);
  5141. if (!qseecom.qsee_bw_count) {
  5142. if (qseecom.qsee_sfpb_bw_count > 0)
  5143. ret = qseecom_bus_scale_update_request(
  5144. qseecom.qsee_perf_client, 3);
  5145. else {
  5146. if (qclk->ce_core_src_clk != NULL)
  5147. ret = __qseecom_enable_clk(CLK_QSEE);
  5148. if (!ret) {
  5149. ret =
  5150. qseecom_bus_scale_update_request(
  5151. qseecom.qsee_perf_client, 1);
  5152. if ((ret) &&
  5153. (qclk->ce_core_src_clk != NULL))
  5154. __qseecom_disable_clk(CLK_QSEE);
  5155. }
  5156. }
  5157. if (ret)
  5158. pr_err("DFAB Bandwidth req failed (%d)\n",
  5159. ret);
  5160. else {
  5161. qseecom.qsee_bw_count++;
  5162. data->perf_enabled = true;
  5163. }
  5164. } else {
  5165. qseecom.qsee_bw_count++;
  5166. data->perf_enabled = true;
  5167. }
  5168. mutex_unlock(&qsee_bw_mutex);
  5169. break;
  5170. case CLK_SFPB:
  5171. mutex_lock(&qsee_bw_mutex);
  5172. if (!qseecom.qsee_sfpb_bw_count) {
  5173. if (qseecom.qsee_bw_count > 0)
  5174. ret = qseecom_bus_scale_update_request(
  5175. qseecom.qsee_perf_client, 3);
  5176. else {
  5177. if (qclk->ce_core_src_clk != NULL)
  5178. ret = __qseecom_enable_clk(CLK_QSEE);
  5179. if (!ret) {
  5180. ret =
  5181. qseecom_bus_scale_update_request(
  5182. qseecom.qsee_perf_client, 2);
  5183. if ((ret) &&
  5184. (qclk->ce_core_src_clk != NULL))
  5185. __qseecom_disable_clk(CLK_QSEE);
  5186. }
  5187. }
  5188. if (ret)
  5189. pr_err("SFPB Bandwidth req failed (%d)\n",
  5190. ret);
  5191. else {
  5192. qseecom.qsee_sfpb_bw_count++;
  5193. data->fast_load_enabled = true;
  5194. }
  5195. } else {
  5196. qseecom.qsee_sfpb_bw_count++;
  5197. data->fast_load_enabled = true;
  5198. }
  5199. mutex_unlock(&qsee_bw_mutex);
  5200. break;
  5201. default:
  5202. pr_err("Clock type not defined\n");
  5203. break;
  5204. }
  5205. return ret;
  5206. }
  5207. static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
  5208. int32_t clk_type)
  5209. {
  5210. int32_t ret = 0;
  5211. struct qseecom_clk *qclk;
  5212. qclk = &qseecom.qsee;
  5213. if (qseecom.no_clock_support)
  5214. return;
  5215. if (!qseecom.qsee_perf_client)
  5216. return;
  5217. switch (clk_type) {
  5218. case CLK_DFAB:
  5219. mutex_lock(&qsee_bw_mutex);
  5220. if (qseecom.qsee_bw_count == 0) {
  5221. pr_err("Client error.Extra call to disable DFAB clk\n");
  5222. mutex_unlock(&qsee_bw_mutex);
  5223. return;
  5224. }
  5225. if (qseecom.qsee_bw_count == 1) {
  5226. if (qseecom.qsee_sfpb_bw_count > 0)
  5227. ret = qseecom_bus_scale_update_request(
  5228. qseecom.qsee_perf_client, 2);
  5229. else {
  5230. ret = qseecom_bus_scale_update_request(
  5231. qseecom.qsee_perf_client, 0);
  5232. if ((!ret) && (qclk->ce_core_src_clk != NULL))
  5233. __qseecom_disable_clk(CLK_QSEE);
  5234. }
  5235. if (ret)
  5236. pr_err("SFPB Bandwidth req fail (%d)\n",
  5237. ret);
  5238. else {
  5239. qseecom.qsee_bw_count--;
  5240. data->perf_enabled = false;
  5241. }
  5242. } else {
  5243. qseecom.qsee_bw_count--;
  5244. data->perf_enabled = false;
  5245. }
  5246. mutex_unlock(&qsee_bw_mutex);
  5247. break;
  5248. case CLK_SFPB:
  5249. mutex_lock(&qsee_bw_mutex);
  5250. if (qseecom.qsee_sfpb_bw_count == 0) {
  5251. pr_err("Client error.Extra call to disable SFPB clk\n");
  5252. mutex_unlock(&qsee_bw_mutex);
  5253. return;
  5254. }
  5255. if (qseecom.qsee_sfpb_bw_count == 1) {
  5256. if (qseecom.qsee_bw_count > 0)
  5257. ret = qseecom_bus_scale_update_request(
  5258. qseecom.qsee_perf_client, 1);
  5259. else {
  5260. ret = qseecom_bus_scale_update_request(
  5261. qseecom.qsee_perf_client, 0);
  5262. if ((!ret) && (qclk->ce_core_src_clk != NULL))
  5263. __qseecom_disable_clk(CLK_QSEE);
  5264. }
  5265. if (ret)
  5266. pr_err("SFPB Bandwidth req fail (%d)\n",
  5267. ret);
  5268. else {
  5269. qseecom.qsee_sfpb_bw_count--;
  5270. data->fast_load_enabled = false;
  5271. }
  5272. } else {
  5273. qseecom.qsee_sfpb_bw_count--;
  5274. data->fast_load_enabled = false;
  5275. }
  5276. mutex_unlock(&qsee_bw_mutex);
  5277. break;
  5278. default:
  5279. pr_err("Clock type not defined\n");
  5280. break;
  5281. }
  5282. }
  5283. static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
  5284. void __user *argp)
  5285. {
  5286. struct qseecom_load_img_req load_img_req;
  5287. int uret = 0;
  5288. int ret = 0;
  5289. phys_addr_t pa = 0;
  5290. size_t len;
  5291. struct qseecom_load_app_ireq load_req;
  5292. struct qseecom_load_app_64bit_ireq load_req_64bit;
  5293. struct qseecom_command_scm_resp resp;
  5294. void *cmd_buf = NULL;
  5295. size_t cmd_len;
  5296. struct sg_table *sgt = NULL;
  5297. struct dma_buf_attachment *attach = NULL;
  5298. struct dma_buf *dmabuf = NULL;
  5299. void *va = NULL;
  5300. /* Copy the relevant information needed for loading the image */
  5301. if (copy_from_user(&load_img_req,
  5302. (void __user *)argp,
  5303. sizeof(struct qseecom_load_img_req))) {
  5304. pr_err("copy_from_user failed\n");
  5305. return -EFAULT;
  5306. }
  5307. /* Get the handle of the shared fd */
  5308. ret = qseecom_vaddr_map(load_img_req.ifd_data_fd, &pa, &va,
  5309. &sgt, &attach, &len, &dmabuf);
  5310. if (ret) {
  5311. pr_err("Failed to map vaddr for ion_fd %d\n",
  5312. load_img_req.ifd_data_fd);
  5313. return -ENOMEM;
  5314. }
  5315. if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
  5316. pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
  5317. len, load_img_req.mdt_len,
  5318. load_img_req.img_len);
  5319. ret = -EINVAL;
  5320. goto exit_cpu_restore;
  5321. }
  5322. /* Populate the structure for sending scm call to load image */
  5323. if (qseecom.qsee_version < QSEE_VERSION_40) {
  5324. load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
  5325. load_req.mdt_len = load_img_req.mdt_len;
  5326. load_req.img_len = load_img_req.img_len;
  5327. load_req.phy_addr = (uint32_t)pa;
  5328. cmd_buf = (void *)&load_req;
  5329. cmd_len = sizeof(struct qseecom_load_app_ireq);
  5330. } else {
  5331. load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
  5332. load_req_64bit.mdt_len = load_img_req.mdt_len;
  5333. load_req_64bit.img_len = load_img_req.img_len;
  5334. load_req_64bit.phy_addr = (uint64_t)pa;
  5335. cmd_buf = (void *)&load_req_64bit;
  5336. cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
  5337. }
  5338. if (qseecom.support_bus_scaling) {
  5339. mutex_lock(&qsee_bw_mutex);
  5340. ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
  5341. mutex_unlock(&qsee_bw_mutex);
  5342. if (ret) {
  5343. ret = -EIO;
  5344. goto exit_cpu_restore;
  5345. }
  5346. }
  5347. /* Vote for the SFPB clock */
  5348. ret = __qseecom_enable_clk_scale_up(data);
  5349. if (ret) {
  5350. ret = -EIO;
  5351. goto exit_register_bus_bandwidth_needs;
  5352. }
  5353. ret = qseecom_dmabuf_cache_operations(dmabuf,
  5354. QSEECOM_CACHE_CLEAN);
  5355. if (ret) {
  5356. pr_err("cache operation failed %d\n", ret);
  5357. goto exit_disable_clock;
  5358. }
  5359. /* SCM_CALL to load the external elf */
  5360. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
  5361. &resp, sizeof(resp));
  5362. if (ret) {
  5363. pr_err("scm_call to load failed : ret %d\n",
  5364. ret);
  5365. ret = -EFAULT;
  5366. goto exit_disable_clock;
  5367. }
  5368. ret = qseecom_dmabuf_cache_operations(dmabuf,
  5369. QSEECOM_CACHE_INVALIDATE);
  5370. if (ret) {
  5371. pr_err("cache operation failed %d\n", ret);
  5372. goto exit_disable_clock;
  5373. }
  5374. switch (resp.result) {
  5375. case QSEOS_RESULT_SUCCESS:
  5376. break;
  5377. case QSEOS_RESULT_INCOMPLETE:
  5378. pr_err("%s: qseos result incomplete\n", __func__);
  5379. ret = __qseecom_process_incomplete_cmd(data, &resp);
  5380. if (ret)
  5381. pr_err("process_incomplete_cmd failed: err: %d\n", ret);
  5382. break;
  5383. case QSEOS_RESULT_FAILURE:
  5384. pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
  5385. ret = -EFAULT;
  5386. break;
  5387. default:
  5388. pr_err("scm_call response result %d not supported\n",
  5389. resp.result);
  5390. ret = -EFAULT;
  5391. break;
  5392. }
  5393. exit_disable_clock:
  5394. __qseecom_disable_clk_scale_down(data);
  5395. exit_register_bus_bandwidth_needs:
  5396. if (qseecom.support_bus_scaling) {
  5397. mutex_lock(&qsee_bw_mutex);
  5398. uret = qseecom_unregister_bus_bandwidth_needs(data);
  5399. mutex_unlock(&qsee_bw_mutex);
  5400. if (uret)
  5401. pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
  5402. uret, ret);
  5403. }
  5404. exit_cpu_restore:
  5405. if (dmabuf) {
  5406. qseecom_vaddr_unmap(va, sgt, attach, dmabuf);
  5407. MAKE_NULL(sgt, attach, dmabuf);
  5408. }
  5409. return ret;
  5410. }
  5411. static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
  5412. {
  5413. int ret = 0;
  5414. struct qseecom_command_scm_resp resp;
  5415. struct qseecom_unload_app_ireq req;
  5416. /* unavailable client app */
  5417. data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
  5418. /* Populate the structure for sending scm call to unload image */
  5419. req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
  5420. /* SCM_CALL to unload the external elf */
  5421. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
  5422. sizeof(struct qseecom_unload_app_ireq),
  5423. &resp, sizeof(resp));
  5424. if (ret) {
  5425. pr_err("scm_call to unload failed : ret %d\n",
  5426. ret);
  5427. ret = -EFAULT;
  5428. goto qseecom_unload_external_elf_scm_err;
  5429. }
  5430. if (resp.result == QSEOS_RESULT_INCOMPLETE) {
  5431. ret = __qseecom_process_incomplete_cmd(data, &resp);
  5432. if (ret)
  5433. pr_err("process_incomplete_cmd fail err: %d\n",
  5434. ret);
  5435. } else {
  5436. if (resp.result != QSEOS_RESULT_SUCCESS) {
  5437. pr_err("scm_call to unload image failed resp.result =%d\n",
  5438. resp.result);
  5439. ret = -EFAULT;
  5440. }
  5441. }
  5442. qseecom_unload_external_elf_scm_err:
  5443. return ret;
  5444. }
  5445. static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
  5446. void __user *argp)
  5447. {
  5448. int32_t ret = 0;
  5449. struct qseecom_qseos_app_load_query query_req = { {0} };
  5450. struct qseecom_check_app_ireq req;
  5451. struct qseecom_registered_app_list *entry = NULL;
  5452. unsigned long flags = 0;
  5453. uint32_t app_arch = 0, app_id = 0;
  5454. bool found_app = false;
  5455. /* Copy the relevant information needed for loading the image */
  5456. if (copy_from_user(&query_req, (void __user *)argp,
  5457. sizeof(struct qseecom_qseos_app_load_query))) {
  5458. pr_err("copy_from_user failed\n");
  5459. ret = -EFAULT;
  5460. goto exit_free;
  5461. }
  5462. req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
  5463. query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
  5464. strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
  5465. ret = __qseecom_check_app_exists(req, &app_id);
  5466. if (ret) {
  5467. pr_err(" scm call to check if app is loaded failed\n");
  5468. goto exit_free;
  5469. }
  5470. if (app_id) {
  5471. pr_debug("App id %d (%s) already exists\n", app_id,
  5472. (char *)(req.app_name));
  5473. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  5474. list_for_each_entry(entry,
  5475. &qseecom.registered_app_list_head, list){
  5476. if (entry->app_id == app_id) {
  5477. app_arch = entry->app_arch;
  5478. if (entry->ref_cnt == U32_MAX) {
  5479. pr_err("App %d (%s) ref_cnt overflow\n",
  5480. app_id, req.app_name);
  5481. ret = -EINVAL;
  5482. spin_unlock_irqrestore(
  5483. &qseecom.registered_app_list_lock,
  5484. flags);
  5485. goto exit_free;
  5486. }
  5487. entry->ref_cnt++;
  5488. found_app = true;
  5489. break;
  5490. }
  5491. }
  5492. spin_unlock_irqrestore(
  5493. &qseecom.registered_app_list_lock, flags);
  5494. data->client.app_id = app_id;
  5495. query_req.app_id = app_id;
  5496. if (app_arch) {
  5497. data->client.app_arch = app_arch;
  5498. query_req.app_arch = app_arch;
  5499. } else {
  5500. data->client.app_arch = 0;
  5501. query_req.app_arch = 0;
  5502. }
  5503. strlcpy(data->client.app_name, query_req.app_name,
  5504. MAX_APP_NAME_SIZE);
  5505. /*
  5506. * If app was loaded by appsbl before and was not registered,
  5507. * regiser this app now.
  5508. */
  5509. if (!found_app) {
  5510. pr_debug("Register app %d [%s] which was loaded before\n",
  5511. ret, (char *)query_req.app_name);
  5512. entry = kmalloc(sizeof(*entry), GFP_KERNEL);
  5513. if (!entry) {
  5514. ret = -ENOMEM;
  5515. goto exit_free;
  5516. }
  5517. entry->app_id = app_id;
  5518. entry->ref_cnt = 1;
  5519. entry->app_arch = data->client.app_arch;
  5520. strlcpy(entry->app_name, data->client.app_name,
  5521. MAX_APP_NAME_SIZE);
  5522. entry->app_blocked = false;
  5523. entry->blocked_on_listener_id = 0;
  5524. entry->check_block = 0;
  5525. spin_lock_irqsave(&qseecom.registered_app_list_lock,
  5526. flags);
  5527. list_add_tail(&entry->list,
  5528. &qseecom.registered_app_list_head);
  5529. spin_unlock_irqrestore(
  5530. &qseecom.registered_app_list_lock, flags);
  5531. }
  5532. if (copy_to_user(argp, &query_req, sizeof(query_req))) {
  5533. pr_err("copy_to_user failed\n");
  5534. ret = -EFAULT;
  5535. goto exit_free;
  5536. }
  5537. ret = -EEXIST; /* app already loaded */
  5538. goto exit_free;
  5539. }
  5540. exit_free:
  5541. return ret; /* app not loaded */
  5542. }
  5543. static int __qseecom_get_ce_pipe_info(
  5544. enum qseecom_key_management_usage_type usage,
  5545. uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
  5546. {
  5547. int ret = -EINVAL;
  5548. int i, j;
  5549. struct qseecom_ce_info_use *p = NULL;
  5550. int total = 0;
  5551. struct qseecom_ce_pipe_entry *pcepipe;
  5552. switch (usage) {
  5553. case QSEOS_KM_USAGE_DISK_ENCRYPTION:
  5554. case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
  5555. case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
  5556. if (qseecom.support_fde) {
  5557. p = qseecom.ce_info.fde;
  5558. total = qseecom.ce_info.num_fde;
  5559. } else {
  5560. pr_err("system does not support fde\n");
  5561. return -EINVAL;
  5562. }
  5563. break;
  5564. case QSEOS_KM_USAGE_FILE_ENCRYPTION:
  5565. if (qseecom.support_pfe) {
  5566. p = qseecom.ce_info.pfe;
  5567. total = qseecom.ce_info.num_pfe;
  5568. } else {
  5569. pr_err("system does not support pfe\n");
  5570. return -EINVAL;
  5571. }
  5572. break;
  5573. default:
  5574. pr_err("unsupported usage %d\n", usage);
  5575. return -EINVAL;
  5576. }
  5577. for (j = 0; j < total; j++) {
  5578. if (p->unit_num == unit) {
  5579. pcepipe = p->ce_pipe_entry;
  5580. for (i = 0; i < p->num_ce_pipe_entries; i++) {
  5581. (*ce_hw)[i] = pcepipe->ce_num;
  5582. *pipe = pcepipe->ce_pipe_pair;
  5583. pcepipe++;
  5584. }
  5585. ret = 0;
  5586. break;
  5587. }
  5588. p++;
  5589. }
  5590. return ret;
  5591. }
  5592. static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
  5593. enum qseecom_key_management_usage_type usage,
  5594. struct qseecom_key_generate_ireq *ireq)
  5595. {
  5596. struct qseecom_command_scm_resp resp;
  5597. int ret;
  5598. if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  5599. usage >= QSEOS_KM_USAGE_MAX) {
  5600. pr_err("Error:: unsupported usage %d\n", usage);
  5601. return -EFAULT;
  5602. }
  5603. ret = __qseecom_enable_clk(CLK_QSEE);
  5604. if (ret)
  5605. return ret;
  5606. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  5607. ireq, sizeof(struct qseecom_key_generate_ireq),
  5608. &resp, sizeof(resp));
  5609. if (ret) {
  5610. if (ret == -EINVAL &&
  5611. resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
  5612. pr_debug("Key ID exists.\n");
  5613. ret = 0;
  5614. } else {
  5615. pr_err("scm call to generate key failed : %d\n", ret);
  5616. ret = -EFAULT;
  5617. }
  5618. goto generate_key_exit;
  5619. }
  5620. switch (resp.result) {
  5621. case QSEOS_RESULT_SUCCESS:
  5622. break;
  5623. case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
  5624. pr_debug("Key ID exists.\n");
  5625. break;
  5626. case QSEOS_RESULT_INCOMPLETE:
  5627. ret = __qseecom_process_incomplete_cmd(data, &resp);
  5628. if (ret) {
  5629. if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
  5630. pr_debug("Key ID exists.\n");
  5631. ret = 0;
  5632. } else {
  5633. pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
  5634. resp.result);
  5635. }
  5636. }
  5637. break;
  5638. case QSEOS_RESULT_FAILURE:
  5639. default:
  5640. pr_err("gen key scm call failed resp.result %d\n", resp.result);
  5641. ret = -EINVAL;
  5642. break;
  5643. }
  5644. generate_key_exit:
  5645. __qseecom_disable_clk(CLK_QSEE);
  5646. return ret;
  5647. }
  5648. static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
  5649. enum qseecom_key_management_usage_type usage,
  5650. struct qseecom_key_delete_ireq *ireq)
  5651. {
  5652. struct qseecom_command_scm_resp resp;
  5653. int ret;
  5654. if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  5655. usage >= QSEOS_KM_USAGE_MAX) {
  5656. pr_err("Error:: unsupported usage %d\n", usage);
  5657. return -EFAULT;
  5658. }
  5659. ret = __qseecom_enable_clk(CLK_QSEE);
  5660. if (ret)
  5661. return ret;
  5662. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  5663. ireq, sizeof(struct qseecom_key_delete_ireq),
  5664. &resp, sizeof(struct qseecom_command_scm_resp));
  5665. if (ret) {
  5666. if (ret == -EINVAL &&
  5667. resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
  5668. pr_debug("Max attempts to input password reached.\n");
  5669. ret = -ERANGE;
  5670. } else {
  5671. pr_err("scm call to delete key failed : %d\n", ret);
  5672. ret = -EFAULT;
  5673. }
  5674. goto del_key_exit;
  5675. }
  5676. switch (resp.result) {
  5677. case QSEOS_RESULT_SUCCESS:
  5678. break;
  5679. case QSEOS_RESULT_INCOMPLETE:
  5680. ret = __qseecom_process_incomplete_cmd(data, &resp);
  5681. if (ret) {
  5682. pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
  5683. resp.result);
  5684. if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
  5685. pr_debug("Max attempts to input password reached.\n");
  5686. ret = -ERANGE;
  5687. }
  5688. }
  5689. break;
  5690. case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
  5691. pr_debug("Max attempts to input password reached.\n");
  5692. ret = -ERANGE;
  5693. break;
  5694. case QSEOS_RESULT_FAILURE:
  5695. default:
  5696. pr_err("Delete key scm call failed resp.result %d\n",
  5697. resp.result);
  5698. ret = -EINVAL;
  5699. break;
  5700. }
  5701. del_key_exit:
  5702. __qseecom_disable_clk(CLK_QSEE);
  5703. return ret;
  5704. }
  5705. static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
  5706. enum qseecom_key_management_usage_type usage,
  5707. struct qseecom_key_select_ireq *ireq)
  5708. {
  5709. struct qseecom_command_scm_resp resp;
  5710. int ret;
  5711. if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  5712. usage >= QSEOS_KM_USAGE_MAX) {
  5713. pr_err("Error:: unsupported usage %d\n", usage);
  5714. return -EFAULT;
  5715. }
  5716. ret = __qseecom_enable_clk(CLK_QSEE);
  5717. if (ret)
  5718. return ret;
  5719. if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
  5720. ret = __qseecom_enable_clk(CLK_CE_DRV);
  5721. if (ret)
  5722. return ret;
  5723. }
  5724. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  5725. ireq, sizeof(struct qseecom_key_select_ireq),
  5726. &resp, sizeof(struct qseecom_command_scm_resp));
  5727. if (ret) {
  5728. if (ret == -EINVAL &&
  5729. resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
  5730. pr_debug("Max attempts to input password reached.\n");
  5731. ret = -ERANGE;
  5732. } else if (ret == -EINVAL &&
  5733. resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
  5734. pr_debug("Set Key operation under processing...\n");
  5735. ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
  5736. } else {
  5737. pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
  5738. ret);
  5739. ret = -EFAULT;
  5740. }
  5741. goto set_key_exit;
  5742. }
  5743. switch (resp.result) {
  5744. case QSEOS_RESULT_SUCCESS:
  5745. break;
  5746. case QSEOS_RESULT_INCOMPLETE:
  5747. ret = __qseecom_process_incomplete_cmd(data, &resp);
  5748. if (ret) {
  5749. pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
  5750. resp.result);
  5751. if (resp.result ==
  5752. QSEOS_RESULT_FAIL_PENDING_OPERATION) {
  5753. pr_debug("Set Key operation under processing...\n");
  5754. ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
  5755. }
  5756. if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
  5757. pr_debug("Max attempts to input password reached.\n");
  5758. ret = -ERANGE;
  5759. }
  5760. }
  5761. break;
  5762. case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
  5763. pr_debug("Max attempts to input password reached.\n");
  5764. ret = -ERANGE;
  5765. break;
  5766. case QSEOS_RESULT_FAIL_PENDING_OPERATION:
  5767. pr_debug("Set Key operation under processing...\n");
  5768. ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
  5769. break;
  5770. case QSEOS_RESULT_FAILURE:
  5771. default:
  5772. pr_err("Set key scm call failed resp.result %d\n", resp.result);
  5773. ret = -EINVAL;
  5774. break;
  5775. }
  5776. set_key_exit:
  5777. __qseecom_disable_clk(CLK_QSEE);
  5778. if (qseecom.qsee.instance != qseecom.ce_drv.instance)
  5779. __qseecom_disable_clk(CLK_CE_DRV);
  5780. return ret;
  5781. }
  5782. static int __qseecom_update_current_key_user_info(
  5783. struct qseecom_dev_handle *data,
  5784. enum qseecom_key_management_usage_type usage,
  5785. struct qseecom_key_userinfo_update_ireq *ireq)
  5786. {
  5787. struct qseecom_command_scm_resp resp;
  5788. int ret;
  5789. if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  5790. usage >= QSEOS_KM_USAGE_MAX) {
  5791. pr_err("Error:: unsupported usage %d\n", usage);
  5792. return -EFAULT;
  5793. }
  5794. ret = __qseecom_enable_clk(CLK_QSEE);
  5795. if (ret)
  5796. return ret;
  5797. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  5798. ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
  5799. &resp, sizeof(struct qseecom_command_scm_resp));
  5800. if (ret) {
  5801. if (ret == -EINVAL &&
  5802. resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
  5803. pr_debug("Set Key operation under processing...\n");
  5804. ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
  5805. } else {
  5806. pr_err("scm call to update key userinfo failed: %d\n",
  5807. ret);
  5808. __qseecom_disable_clk(CLK_QSEE);
  5809. return -EFAULT;
  5810. }
  5811. }
  5812. switch (resp.result) {
  5813. case QSEOS_RESULT_SUCCESS:
  5814. break;
  5815. case QSEOS_RESULT_INCOMPLETE:
  5816. ret = __qseecom_process_incomplete_cmd(data, &resp);
  5817. if (resp.result ==
  5818. QSEOS_RESULT_FAIL_PENDING_OPERATION) {
  5819. pr_debug("Set Key operation under processing...\n");
  5820. ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
  5821. }
  5822. if (ret)
  5823. pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
  5824. resp.result);
  5825. break;
  5826. case QSEOS_RESULT_FAIL_PENDING_OPERATION:
  5827. pr_debug("Update Key operation under processing...\n");
  5828. ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
  5829. break;
  5830. case QSEOS_RESULT_FAILURE:
  5831. default:
  5832. pr_err("Set key scm call failed resp.result %d\n", resp.result);
  5833. ret = -EINVAL;
  5834. break;
  5835. }
  5836. __qseecom_disable_clk(CLK_QSEE);
  5837. return ret;
  5838. }
  5839. static int qseecom_enable_ice_setup(int usage)
  5840. {
  5841. int ret = 0;
  5842. if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
  5843. ret = qcom_ice_setup_ice_hw("ufs", true);
  5844. else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
  5845. ret = qcom_ice_setup_ice_hw("sdcc", true);
  5846. return ret;
  5847. }
  5848. static int qseecom_disable_ice_setup(int usage)
  5849. {
  5850. int ret = 0;
  5851. if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
  5852. ret = qcom_ice_setup_ice_hw("ufs", false);
  5853. else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
  5854. ret = qcom_ice_setup_ice_hw("sdcc", false);
  5855. return ret;
  5856. }
  5857. static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
  5858. {
  5859. struct qseecom_ce_info_use *pce_info_use, *p;
  5860. int total = 0;
  5861. int i;
  5862. switch (usage) {
  5863. case QSEOS_KM_USAGE_DISK_ENCRYPTION:
  5864. case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
  5865. case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
  5866. p = qseecom.ce_info.fde;
  5867. total = qseecom.ce_info.num_fde;
  5868. break;
  5869. case QSEOS_KM_USAGE_FILE_ENCRYPTION:
  5870. p = qseecom.ce_info.pfe;
  5871. total = qseecom.ce_info.num_pfe;
  5872. break;
  5873. default:
  5874. pr_err("unsupported usage %d\n", usage);
  5875. return -EINVAL;
  5876. }
  5877. pce_info_use = NULL;
  5878. for (i = 0; i < total; i++) {
  5879. if (p->unit_num == unit) {
  5880. pce_info_use = p;
  5881. break;
  5882. }
  5883. p++;
  5884. }
  5885. if (!pce_info_use) {
  5886. pr_err("can not find %d\n", unit);
  5887. return -EINVAL;
  5888. }
  5889. return pce_info_use->num_ce_pipe_entries;
  5890. }
  5891. static int qseecom_create_key(struct qseecom_dev_handle *data,
  5892. void __user *argp)
  5893. {
  5894. int i;
  5895. uint32_t *ce_hw = NULL;
  5896. uint32_t pipe = 0;
  5897. int ret = 0;
  5898. uint32_t flags = 0;
  5899. struct qseecom_create_key_req create_key_req;
  5900. struct qseecom_key_generate_ireq generate_key_ireq;
  5901. struct qseecom_key_select_ireq set_key_ireq;
  5902. uint32_t entries = 0;
  5903. ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
  5904. if (ret) {
  5905. pr_err("copy_from_user failed\n");
  5906. return ret;
  5907. }
  5908. if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  5909. create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
  5910. pr_err("unsupported usage %d\n", create_key_req.usage);
  5911. ret = -EFAULT;
  5912. return ret;
  5913. }
  5914. entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
  5915. create_key_req.usage);
  5916. if (entries <= 0) {
  5917. pr_err("no ce instance for usage %d instance %d\n",
  5918. DEFAULT_CE_INFO_UNIT, create_key_req.usage);
  5919. ret = -EINVAL;
  5920. return ret;
  5921. }
  5922. ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
  5923. if (!ce_hw) {
  5924. ret = -ENOMEM;
  5925. return ret;
  5926. }
  5927. ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
  5928. DEFAULT_CE_INFO_UNIT);
  5929. if (ret) {
  5930. pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
  5931. ret = -EINVAL;
  5932. goto free_buf;
  5933. }
  5934. if (qseecom.fde_key_size)
  5935. flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
  5936. else
  5937. flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
  5938. if (qseecom.enable_key_wrap_in_ks)
  5939. flags |= ENABLE_KEY_WRAP_IN_KS;
  5940. generate_key_ireq.flags = flags;
  5941. generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
  5942. memset((void *)generate_key_ireq.key_id,
  5943. 0, QSEECOM_KEY_ID_SIZE);
  5944. memset((void *)generate_key_ireq.hash32,
  5945. 0, QSEECOM_HASH_SIZE);
  5946. memcpy((void *)generate_key_ireq.key_id,
  5947. (void *)key_id_array[create_key_req.usage].desc,
  5948. QSEECOM_KEY_ID_SIZE);
  5949. memcpy((void *)generate_key_ireq.hash32,
  5950. (void *)create_key_req.hash32,
  5951. QSEECOM_HASH_SIZE);
  5952. ret = __qseecom_generate_and_save_key(data,
  5953. create_key_req.usage, &generate_key_ireq);
  5954. if (ret) {
  5955. pr_err("Failed to generate key on storage: %d\n", ret);
  5956. goto free_buf;
  5957. }
  5958. for (i = 0; i < entries; i++) {
  5959. set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
  5960. if (create_key_req.usage ==
  5961. QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
  5962. set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
  5963. set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
  5964. } else if (create_key_req.usage ==
  5965. QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
  5966. set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
  5967. set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
  5968. } else {
  5969. set_key_ireq.ce = ce_hw[i];
  5970. set_key_ireq.pipe = pipe;
  5971. }
  5972. set_key_ireq.flags = flags;
  5973. /* set both PIPE_ENC and PIPE_ENC_XTS*/
  5974. set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
  5975. memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
  5976. memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
  5977. memcpy((void *)set_key_ireq.key_id,
  5978. (void *)key_id_array[create_key_req.usage].desc,
  5979. QSEECOM_KEY_ID_SIZE);
  5980. memcpy((void *)set_key_ireq.hash32,
  5981. (void *)create_key_req.hash32,
  5982. QSEECOM_HASH_SIZE);
  5983. /*
  5984. * It will return false if it is GPCE based crypto instance or
  5985. * ICE is setup properly
  5986. */
  5987. ret = qseecom_enable_ice_setup(create_key_req.usage);
  5988. if (ret)
  5989. goto free_buf;
  5990. do {
  5991. ret = __qseecom_set_clear_ce_key(data,
  5992. create_key_req.usage,
  5993. &set_key_ireq);
  5994. /*
  5995. * wait a little before calling scm again to let other
  5996. * processes run
  5997. */
  5998. if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
  5999. msleep(50);
  6000. } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
  6001. qseecom_disable_ice_setup(create_key_req.usage);
  6002. if (ret) {
  6003. pr_err("Failed to create key: pipe %d, ce %d: %d\n",
  6004. pipe, ce_hw[i], ret);
  6005. goto free_buf;
  6006. } else {
  6007. pr_err("Set the key successfully\n");
  6008. if ((create_key_req.usage ==
  6009. QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
  6010. (create_key_req.usage ==
  6011. QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
  6012. goto free_buf;
  6013. }
  6014. }
  6015. free_buf:
  6016. kfree_sensitive(ce_hw);
  6017. return ret;
  6018. }
  6019. static int qseecom_wipe_key(struct qseecom_dev_handle *data,
  6020. void __user *argp)
  6021. {
  6022. uint32_t *ce_hw = NULL;
  6023. uint32_t pipe = 0;
  6024. int ret = 0;
  6025. uint32_t flags = 0;
  6026. int i, j;
  6027. struct qseecom_wipe_key_req wipe_key_req;
  6028. struct qseecom_key_delete_ireq delete_key_ireq;
  6029. struct qseecom_key_select_ireq clear_key_ireq;
  6030. uint32_t entries = 0;
  6031. ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
  6032. if (ret) {
  6033. pr_err("copy_from_user failed\n");
  6034. return ret;
  6035. }
  6036. if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  6037. wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
  6038. pr_err("unsupported usage %d\n", wipe_key_req.usage);
  6039. ret = -EFAULT;
  6040. return ret;
  6041. }
  6042. entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
  6043. wipe_key_req.usage);
  6044. if (entries <= 0) {
  6045. pr_err("no ce instance for usage %d instance %d\n",
  6046. DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
  6047. ret = -EINVAL;
  6048. return ret;
  6049. }
  6050. ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
  6051. if (!ce_hw) {
  6052. ret = -ENOMEM;
  6053. return ret;
  6054. }
  6055. ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
  6056. DEFAULT_CE_INFO_UNIT);
  6057. if (ret) {
  6058. pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
  6059. ret = -EINVAL;
  6060. goto free_buf;
  6061. }
  6062. if (wipe_key_req.wipe_key_flag) {
  6063. delete_key_ireq.flags = flags;
  6064. delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
  6065. memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
  6066. memcpy((void *)delete_key_ireq.key_id,
  6067. (void *)key_id_array[wipe_key_req.usage].desc,
  6068. QSEECOM_KEY_ID_SIZE);
  6069. memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
  6070. ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
  6071. &delete_key_ireq);
  6072. if (ret) {
  6073. pr_err("Failed to delete key from ssd storage: %d\n",
  6074. ret);
  6075. ret = -EFAULT;
  6076. goto free_buf;
  6077. }
  6078. }
  6079. for (j = 0; j < entries; j++) {
  6080. clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
  6081. if (wipe_key_req.usage ==
  6082. QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
  6083. clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
  6084. clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
  6085. } else if (wipe_key_req.usage ==
  6086. QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
  6087. clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
  6088. clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
  6089. } else {
  6090. clear_key_ireq.ce = ce_hw[j];
  6091. clear_key_ireq.pipe = pipe;
  6092. }
  6093. clear_key_ireq.flags = flags;
  6094. clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
  6095. for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
  6096. clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
  6097. memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
  6098. /*
  6099. * It will return false if it is GPCE based crypto instance or
  6100. * ICE is setup properly
  6101. */
  6102. ret = qseecom_enable_ice_setup(wipe_key_req.usage);
  6103. if (ret)
  6104. goto free_buf;
  6105. ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
  6106. &clear_key_ireq);
  6107. qseecom_disable_ice_setup(wipe_key_req.usage);
  6108. if (ret) {
  6109. pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
  6110. pipe, ce_hw[j], ret);
  6111. ret = -EFAULT;
  6112. goto free_buf;
  6113. }
  6114. }
  6115. free_buf:
  6116. kfree_sensitive(ce_hw);
  6117. return ret;
  6118. }
  6119. static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
  6120. void __user *argp)
  6121. {
  6122. int ret = 0;
  6123. uint32_t flags = 0;
  6124. struct qseecom_update_key_userinfo_req update_key_req;
  6125. struct qseecom_key_userinfo_update_ireq ireq;
  6126. ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
  6127. if (ret) {
  6128. pr_err("copy_from_user failed\n");
  6129. return ret;
  6130. }
  6131. if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  6132. update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
  6133. pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
  6134. return -EFAULT;
  6135. }
  6136. ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
  6137. if (qseecom.fde_key_size)
  6138. flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
  6139. else
  6140. flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
  6141. ireq.flags = flags;
  6142. memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
  6143. memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
  6144. memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
  6145. memcpy((void *)ireq.key_id,
  6146. (void *)key_id_array[update_key_req.usage].desc,
  6147. QSEECOM_KEY_ID_SIZE);
  6148. memcpy((void *)ireq.current_hash32,
  6149. (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
  6150. memcpy((void *)ireq.new_hash32,
  6151. (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
  6152. do {
  6153. ret = __qseecom_update_current_key_user_info(data,
  6154. update_key_req.usage,
  6155. &ireq);
  6156. /*
  6157. * wait a little before calling scm again to let other
  6158. * processes run
  6159. */
  6160. if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
  6161. msleep(50);
  6162. } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
  6163. if (ret) {
  6164. pr_err("Failed to update key info: %d\n", ret);
  6165. return ret;
  6166. }
  6167. return ret;
  6168. }
  6169. static int qseecom_is_es_activated(void __user *argp)
  6170. {
  6171. struct qseecom_is_es_activated_req req = {0};
  6172. struct qseecom_command_scm_resp resp;
  6173. int ret;
  6174. if (qseecom.qsee_version < QSEE_VERSION_04) {
  6175. pr_err("invalid qsee version\n");
  6176. return -ENODEV;
  6177. }
  6178. if (argp == NULL) {
  6179. pr_err("arg is null\n");
  6180. return -EINVAL;
  6181. }
  6182. ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
  6183. &req, sizeof(req), &resp, sizeof(resp));
  6184. if (ret) {
  6185. pr_err("scm_call failed\n");
  6186. return ret;
  6187. }
  6188. req.is_activated = resp.result;
  6189. ret = copy_to_user(argp, &req, sizeof(req));
  6190. if (ret) {
  6191. pr_err("copy_to_user failed\n");
  6192. return ret;
  6193. }
  6194. return 0;
  6195. }
  6196. static int qseecom_save_partition_hash(void __user *argp)
  6197. {
  6198. struct qseecom_save_partition_hash_req req;
  6199. struct qseecom_command_scm_resp resp;
  6200. int ret;
  6201. memset(&resp, 0x00, sizeof(resp));
  6202. if (qseecom.qsee_version < QSEE_VERSION_04) {
  6203. pr_err("invalid qsee version\n");
  6204. return -ENODEV;
  6205. }
  6206. if (argp == NULL) {
  6207. pr_err("arg is null\n");
  6208. return -EINVAL;
  6209. }
  6210. ret = copy_from_user(&req, argp, sizeof(req));
  6211. if (ret) {
  6212. pr_err("copy_from_user failed\n");
  6213. return ret;
  6214. }
  6215. ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
  6216. (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
  6217. if (ret) {
  6218. pr_err("qseecom_scm_call failed\n");
  6219. return ret;
  6220. }
  6221. return 0;
  6222. }
  6223. static int qseecom_mdtp_cipher_dip(void __user *argp)
  6224. {
  6225. struct qseecom_mdtp_cipher_dip_req req;
  6226. u32 tzbuflenin, tzbuflenout;
  6227. char *tzbufin = NULL, *tzbufout = NULL;
  6228. struct qseecom_scm_desc desc = {0};
  6229. int ret;
  6230. phys_addr_t pain, paout;
  6231. struct qtee_shm shmin = {0}, shmout = {0};
  6232. do {
  6233. /* Copy the parameters from userspace */
  6234. if (argp == NULL) {
  6235. pr_err("arg is null\n");
  6236. ret = -EINVAL;
  6237. break;
  6238. }
  6239. ret = copy_from_user(&req, argp, sizeof(req));
  6240. if (ret) {
  6241. pr_err("copy_from_user failed, ret= %d\n", ret);
  6242. break;
  6243. }
  6244. if (req.in_buf == NULL || req.out_buf == NULL ||
  6245. req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
  6246. req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
  6247. req.direction > 1) {
  6248. pr_err("invalid parameters\n");
  6249. ret = -EINVAL;
  6250. break;
  6251. }
  6252. /* Copy the input buffer from userspace to kernel space */
  6253. tzbuflenin = PAGE_ALIGN(req.in_buf_size);
  6254. tzbufin = __qseecom_alloc_tzbuf(tzbuflenin, &pain, &shmin);
  6255. if (!tzbufin) {
  6256. pr_err("error allocating in buffer\n");
  6257. ret = -ENOMEM;
  6258. break;
  6259. }
  6260. ret = copy_from_user(tzbufin, (void __user *)req.in_buf,
  6261. req.in_buf_size);
  6262. if (ret) {
  6263. pr_err("copy_from_user failed, ret=%d\n", ret);
  6264. break;
  6265. }
  6266. qtee_shmbridge_flush_shm_buf(&shmin);
  6267. /* Prepare the output buffer in kernel space */
  6268. tzbuflenout = PAGE_ALIGN(req.out_buf_size);
  6269. tzbufout = __qseecom_alloc_tzbuf(tzbuflenout, &paout, &shmout);
  6270. if (!tzbufout) {
  6271. pr_err("error allocating out buffer\n");
  6272. ret = -ENOMEM;
  6273. break;
  6274. }
  6275. qtee_shmbridge_flush_shm_buf(&shmout);
  6276. /* Send the command to TZ */
  6277. desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
  6278. desc.args[0] = pain;
  6279. desc.args[1] = req.in_buf_size;
  6280. desc.args[2] = paout;
  6281. desc.args[3] = req.out_buf_size;
  6282. desc.args[4] = req.direction;
  6283. ret = __qseecom_enable_clk(CLK_QSEE);
  6284. if (ret)
  6285. break;
  6286. ret = __qseecom_scm_call2_locked(TZ_MDTP_CIPHER_DIP_ID, &desc);
  6287. __qseecom_disable_clk(CLK_QSEE);
  6288. if (ret) {
  6289. pr_err("failed for SCM_SVC_MDTP, ret=%d\n",
  6290. ret);
  6291. break;
  6292. }
  6293. /* Copy the output buffer from kernel space to userspace */
  6294. qtee_shmbridge_flush_shm_buf(&shmout);
  6295. ret = copy_to_user((void __user *)req.out_buf,
  6296. tzbufout, req.out_buf_size);
  6297. if (ret) {
  6298. pr_err("copy_to_user failed, ret=%d\n", ret);
  6299. break;
  6300. }
  6301. } while (0);
  6302. __qseecom_free_tzbuf(&shmin);
  6303. __qseecom_free_tzbuf(&shmout);
  6304. return ret;
  6305. }
  6306. static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
  6307. struct qseecom_qteec_req *req)
  6308. {
  6309. if (!data || !data->client.sb_virt) {
  6310. pr_err("Client or client buf is not initialized\n");
  6311. return -EINVAL;
  6312. }
  6313. if (data->type != QSEECOM_CLIENT_APP)
  6314. return -EFAULT;
  6315. if (req->req_len > UINT_MAX - req->resp_len) {
  6316. pr_err("Integer overflow detected in req_len & rsp_len\n");
  6317. return -EINVAL;
  6318. }
  6319. if (req->req_len + req->resp_len > data->client.sb_length) {
  6320. pr_debug("Not enough memory to fit cmd_buf.\n");
  6321. pr_debug("resp_buf. Required: %u, Available: %zu\n",
  6322. (req->req_len + req->resp_len), data->client.sb_length);
  6323. return -ENOMEM;
  6324. }
  6325. if (req->req_ptr == NULL || req->resp_ptr == NULL) {
  6326. pr_err("cmd buffer or response buffer is null\n");
  6327. return -EINVAL;
  6328. }
  6329. if (((uintptr_t)req->req_ptr <
  6330. data->client.user_virt_sb_base) ||
  6331. ((uintptr_t)req->req_ptr >=
  6332. (data->client.user_virt_sb_base + data->client.sb_length))) {
  6333. pr_err("cmd buffer address not within shared bufffer\n");
  6334. return -EINVAL;
  6335. }
  6336. if (((uintptr_t)req->resp_ptr <
  6337. data->client.user_virt_sb_base) ||
  6338. ((uintptr_t)req->resp_ptr >=
  6339. (data->client.user_virt_sb_base + data->client.sb_length))) {
  6340. pr_err("response buffer address not within shared bufffer\n");
  6341. return -EINVAL;
  6342. }
  6343. if ((req->req_len == 0) || (req->resp_len == 0)) {
  6344. pr_err("cmd buf lengtgh/response buf length not valid\n");
  6345. return -EINVAL;
  6346. }
  6347. if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
  6348. pr_err("Integer overflow in req_len & req_ptr\n");
  6349. return -EINVAL;
  6350. }
  6351. if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
  6352. pr_err("Integer overflow in resp_len & resp_ptr\n");
  6353. return -EINVAL;
  6354. }
  6355. if (data->client.user_virt_sb_base >
  6356. (ULONG_MAX - data->client.sb_length)) {
  6357. pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
  6358. return -EINVAL;
  6359. }
  6360. if ((((uintptr_t)req->req_ptr + req->req_len) >
  6361. ((uintptr_t)data->client.user_virt_sb_base +
  6362. data->client.sb_length)) ||
  6363. (((uintptr_t)req->resp_ptr + req->resp_len) >
  6364. ((uintptr_t)data->client.user_virt_sb_base +
  6365. data->client.sb_length))) {
  6366. pr_err("cmd buf or resp buf is out of shared buffer region\n");
  6367. return -EINVAL;
  6368. }
  6369. return 0;
  6370. }
  6371. static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
  6372. uint32_t fd_idx, struct sg_table *sg_ptr)
  6373. {
  6374. struct scatterlist *sg = sg_ptr->sgl;
  6375. struct qseecom_sg_entry *sg_entry;
  6376. void *buf;
  6377. uint i;
  6378. size_t size;
  6379. dma_addr_t coh_pmem;
  6380. if (fd_idx >= MAX_ION_FD) {
  6381. pr_err("fd_idx [%d] is invalid\n", fd_idx);
  6382. return -ENOMEM;
  6383. }
  6384. /*
  6385. * Allocate a buffer, populate it with number of entry plus
  6386. * each sg entry's phy addr and length; then return the
  6387. * phy_addr of the buffer.
  6388. */
  6389. size = sizeof(uint32_t) +
  6390. sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
  6391. size = (size + PAGE_SIZE) & PAGE_MASK;
  6392. buf = dma_alloc_coherent(qseecom.dev,
  6393. size, &coh_pmem, GFP_KERNEL);
  6394. if (buf == NULL)
  6395. return -ENOMEM;
  6396. *(uint32_t *)buf = sg_ptr->nents;
  6397. sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
  6398. for (i = 0; i < sg_ptr->nents; i++) {
  6399. sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
  6400. sg_entry->len = sg->length;
  6401. sg_entry++;
  6402. sg = sg_next(sg);
  6403. }
  6404. data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
  6405. data->client.sec_buf_fd[fd_idx].vbase = buf;
  6406. data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
  6407. data->client.sec_buf_fd[fd_idx].size = size;
  6408. return 0;
  6409. }
  6410. static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
  6411. struct qseecom_dev_handle *data, bool cleanup)
  6412. {
  6413. int ret = 0;
  6414. int i = 0;
  6415. uint32_t *update;
  6416. struct sg_table *sg_ptr = NULL;
  6417. struct scatterlist *sg;
  6418. struct qseecom_param_memref *memref;
  6419. int ion_fd = -1;
  6420. struct dma_buf *dmabuf = NULL;
  6421. struct dma_buf_attachment *attach = NULL;
  6422. if (req == NULL) {
  6423. pr_err("Invalid address\n");
  6424. return -EINVAL;
  6425. }
  6426. for (i = 0; i < MAX_ION_FD; i++) {
  6427. if (req->ifd_data[i].fd > 0) {
  6428. ion_fd = req->ifd_data[i].fd;
  6429. if ((req->req_len <
  6430. sizeof(struct qseecom_param_memref)) ||
  6431. (req->ifd_data[i].cmd_buf_offset >
  6432. req->req_len -
  6433. sizeof(struct qseecom_param_memref))) {
  6434. pr_err("Invalid offset/req len 0x%x/0x%x\n",
  6435. req->req_len,
  6436. req->ifd_data[i].cmd_buf_offset);
  6437. return -EINVAL;
  6438. }
  6439. update = (uint32_t *)((char *) req->req_ptr +
  6440. req->ifd_data[i].cmd_buf_offset);
  6441. if (!update) {
  6442. pr_err("update pointer is NULL\n");
  6443. return -EINVAL;
  6444. }
  6445. } else {
  6446. continue;
  6447. }
  6448. /* Populate the cmd data structure with the phys_addr */
  6449. ret = qseecom_dmabuf_map(ion_fd, &sg_ptr, &attach, &dmabuf);
  6450. if (ret) {
  6451. pr_err("IOn client could not retrieve sg table\n");
  6452. goto err;
  6453. }
  6454. sg = sg_ptr->sgl;
  6455. if (sg == NULL) {
  6456. pr_err("sg is NULL\n");
  6457. goto err;
  6458. }
  6459. if ((sg_ptr->nents == 0) || (sg->length == 0)) {
  6460. pr_err("Num of scat entr (%d)or length(%d) invalid\n",
  6461. sg_ptr->nents, sg->length);
  6462. goto err;
  6463. }
  6464. /* clean up buf for pre-allocated fd */
  6465. if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
  6466. (*update)) {
  6467. if (data->client.sec_buf_fd[i].vbase)
  6468. dma_free_coherent(qseecom.dev,
  6469. data->client.sec_buf_fd[i].size,
  6470. data->client.sec_buf_fd[i].vbase,
  6471. data->client.sec_buf_fd[i].pbase);
  6472. memset((void *)update, 0,
  6473. sizeof(struct qseecom_param_memref));
  6474. memset(&(data->client.sec_buf_fd[i]), 0,
  6475. sizeof(struct qseecom_sec_buf_fd_info));
  6476. goto clean;
  6477. }
  6478. if (*update == 0) {
  6479. /* update buf for pre-allocated fd from secure heap*/
  6480. ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
  6481. sg_ptr);
  6482. if (ret) {
  6483. pr_err("Failed to handle buf for fd[%d]\n", i);
  6484. goto err;
  6485. }
  6486. memref = (struct qseecom_param_memref *)update;
  6487. memref->buffer =
  6488. (uint32_t)(data->client.sec_buf_fd[i].pbase);
  6489. memref->size =
  6490. (uint32_t)(data->client.sec_buf_fd[i].size);
  6491. } else {
  6492. /* update buf for fd from non-secure qseecom heap */
  6493. if (sg_ptr->nents != 1) {
  6494. pr_err("Num of scat entr (%d) invalid\n",
  6495. sg_ptr->nents);
  6496. goto err;
  6497. }
  6498. if (cleanup)
  6499. *update = 0;
  6500. else
  6501. *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
  6502. }
  6503. clean:
  6504. if (cleanup) {
  6505. ret = qseecom_dmabuf_cache_operations(dmabuf,
  6506. QSEECOM_CACHE_INVALIDATE);
  6507. if (ret) {
  6508. pr_err("cache operation failed %d\n", ret);
  6509. goto err;
  6510. }
  6511. } else {
  6512. ret = qseecom_dmabuf_cache_operations(dmabuf,
  6513. QSEECOM_CACHE_CLEAN);
  6514. if (ret) {
  6515. pr_err("cache operation failed %d\n", ret);
  6516. goto err;
  6517. }
  6518. data->sglistinfo_ptr[i].indexAndFlags =
  6519. SGLISTINFO_SET_INDEX_FLAG(
  6520. (sg_ptr->nents == 1), 0,
  6521. req->ifd_data[i].cmd_buf_offset);
  6522. data->sglistinfo_ptr[i].sizeOrCount =
  6523. (sg_ptr->nents == 1) ?
  6524. sg->length : sg_ptr->nents;
  6525. data->sglist_cnt = i + 1;
  6526. }
  6527. /* unmap the dmabuf */
  6528. qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
  6529. sg_ptr = NULL;
  6530. dmabuf = NULL;
  6531. attach = NULL;
  6532. }
  6533. return ret;
  6534. err:
  6535. if (!IS_ERR_OR_NULL(sg_ptr)) {
  6536. qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
  6537. MAKE_NULL(sg_ptr, attach, dmabuf);
  6538. }
  6539. return -ENOMEM;
  6540. }
  6541. static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
  6542. struct qseecom_qteec_req *req, uint32_t cmd_id)
  6543. {
  6544. struct qseecom_command_scm_resp resp;
  6545. struct qseecom_qteec_ireq ireq;
  6546. struct qseecom_qteec_64bit_ireq ireq_64bit;
  6547. struct qseecom_registered_app_list *ptr_app;
  6548. bool found_app = false;
  6549. unsigned long flags;
  6550. int ret = 0;
  6551. int ret2 = 0;
  6552. uint32_t reqd_len_sb_in = 0;
  6553. void *cmd_buf = NULL;
  6554. size_t cmd_len;
  6555. struct sglist_info *table = data->sglistinfo_ptr;
  6556. void *req_ptr = NULL;
  6557. void *resp_ptr = NULL;
  6558. ret = __qseecom_qteec_validate_msg(data, req);
  6559. if (ret)
  6560. return ret;
  6561. req_ptr = req->req_ptr;
  6562. resp_ptr = req->resp_ptr;
  6563. /* find app_id & img_name from list */
  6564. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  6565. list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
  6566. list) {
  6567. if ((ptr_app->app_id == data->client.app_id) &&
  6568. (!strcmp(ptr_app->app_name, data->client.app_name))) {
  6569. found_app = true;
  6570. break;
  6571. }
  6572. }
  6573. spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
  6574. if (!found_app) {
  6575. pr_err("app_id %d (%s) is not found\n", data->client.app_id,
  6576. (char *)data->client.app_name);
  6577. return -ENOENT;
  6578. }
  6579. if (__qseecom_find_pending_unload_app(data->client.app_id,
  6580. data->client.app_name)) {
  6581. pr_err("app %d (%s) unload is pending\n",
  6582. data->client.app_id, data->client.app_name);
  6583. return -ENOENT;
  6584. }
  6585. req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
  6586. (uintptr_t)req->req_ptr);
  6587. req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
  6588. (uintptr_t)req->resp_ptr);
  6589. if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
  6590. (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
  6591. ret = __qseecom_update_qteec_req_buf(
  6592. (struct qseecom_qteec_modfd_req *)req, data, false);
  6593. if (ret)
  6594. return ret;
  6595. }
  6596. if (qseecom.qsee_version < QSEE_VERSION_40) {
  6597. ireq.app_id = data->client.app_id;
  6598. ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
  6599. (uintptr_t)req_ptr);
  6600. ireq.req_len = req->req_len;
  6601. ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
  6602. (uintptr_t)resp_ptr);
  6603. ireq.resp_len = req->resp_len;
  6604. ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
  6605. ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
  6606. qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm);
  6607. cmd_buf = (void *)&ireq;
  6608. cmd_len = sizeof(struct qseecom_qteec_ireq);
  6609. } else {
  6610. ireq_64bit.app_id = data->client.app_id;
  6611. ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
  6612. (uintptr_t)req_ptr);
  6613. ireq_64bit.req_len = req->req_len;
  6614. ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
  6615. (uintptr_t)resp_ptr);
  6616. ireq_64bit.resp_len = req->resp_len;
  6617. if ((data->client.app_arch == ELFCLASS32) &&
  6618. ((ireq_64bit.req_ptr >=
  6619. PHY_ADDR_4G - ireq_64bit.req_len) ||
  6620. (ireq_64bit.resp_ptr >=
  6621. PHY_ADDR_4G - ireq_64bit.resp_len))){
  6622. pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
  6623. data->client.app_name, data->client.app_id);
  6624. pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
  6625. ireq_64bit.req_ptr, ireq_64bit.req_len,
  6626. ireq_64bit.resp_ptr, ireq_64bit.resp_len);
  6627. return -EFAULT;
  6628. }
  6629. ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
  6630. ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
  6631. qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm);
  6632. cmd_buf = (void *)&ireq_64bit;
  6633. cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
  6634. }
  6635. if (qseecom.whitelist_support
  6636. && cmd_id == QSEOS_TEE_OPEN_SESSION)
  6637. *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
  6638. else
  6639. *(uint32_t *)cmd_buf = cmd_id;
  6640. reqd_len_sb_in = req->req_len + req->resp_len;
  6641. ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
  6642. QSEECOM_CACHE_CLEAN);
  6643. if (ret) {
  6644. pr_err("cache operation failed %d\n", ret);
  6645. return ret;
  6646. }
  6647. __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
  6648. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  6649. cmd_buf, cmd_len,
  6650. &resp, sizeof(resp));
  6651. if (ret) {
  6652. pr_err("scm_call() failed with err: %d (app_id = %d)\n",
  6653. ret, data->client.app_id);
  6654. goto exit;
  6655. }
  6656. ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
  6657. QSEECOM_CACHE_INVALIDATE);
  6658. if (ret) {
  6659. pr_err("cache operation failed %d\n", ret);
  6660. return ret;
  6661. }
  6662. if (qseecom.qsee_reentrancy_support) {
  6663. ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
  6664. if (ret)
  6665. goto exit;
  6666. } else {
  6667. if (resp.result == QSEOS_RESULT_INCOMPLETE) {
  6668. ret = __qseecom_process_incomplete_cmd(data, &resp);
  6669. if (ret) {
  6670. pr_err("process_incomplete_cmd failed err: %d\n",
  6671. ret);
  6672. goto exit;
  6673. }
  6674. } else {
  6675. if (resp.result != QSEOS_RESULT_SUCCESS) {
  6676. pr_err("Response result %d not supported\n",
  6677. resp.result);
  6678. ret = -EINVAL;
  6679. goto exit;
  6680. }
  6681. }
  6682. }
  6683. exit:
  6684. if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
  6685. (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
  6686. ret2 = __qseecom_update_qteec_req_buf(
  6687. (struct qseecom_qteec_modfd_req *)req, data, true);
  6688. if (ret2)
  6689. return ret2;
  6690. }
  6691. return ret;
  6692. }
  6693. static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
  6694. void __user *argp)
  6695. {
  6696. struct qseecom_qteec_modfd_req req;
  6697. int ret = 0;
  6698. ret = copy_from_user(&req, argp,
  6699. sizeof(struct qseecom_qteec_modfd_req));
  6700. if (ret) {
  6701. pr_err("copy_from_user failed\n");
  6702. return ret;
  6703. }
  6704. ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
  6705. QSEOS_TEE_OPEN_SESSION);
  6706. return ret;
  6707. }
  6708. static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
  6709. void __user *argp)
  6710. {
  6711. struct qseecom_qteec_req req;
  6712. int ret = 0;
  6713. ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
  6714. if (ret) {
  6715. pr_err("copy_from_user failed\n");
  6716. return ret;
  6717. }
  6718. ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
  6719. return ret;
  6720. }
  6721. static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
  6722. void __user *argp)
  6723. {
  6724. struct qseecom_qteec_modfd_req req;
  6725. struct qseecom_command_scm_resp resp;
  6726. struct qseecom_qteec_ireq ireq;
  6727. struct qseecom_qteec_64bit_ireq ireq_64bit;
  6728. struct qseecom_registered_app_list *ptr_app;
  6729. bool found_app = false;
  6730. unsigned long flags;
  6731. int ret = 0;
  6732. int i = 0;
  6733. uint32_t reqd_len_sb_in = 0;
  6734. void *cmd_buf = NULL;
  6735. size_t cmd_len;
  6736. struct sglist_info *table = data->sglistinfo_ptr;
  6737. void *req_ptr = NULL;
  6738. void *resp_ptr = NULL;
  6739. ret = copy_from_user(&req, argp,
  6740. sizeof(struct qseecom_qteec_modfd_req));
  6741. if (ret) {
  6742. pr_err("copy_from_user failed\n");
  6743. return ret;
  6744. }
  6745. ret = __qseecom_qteec_validate_msg(data,
  6746. (struct qseecom_qteec_req *)(&req));
  6747. if (ret)
  6748. return ret;
  6749. req_ptr = req.req_ptr;
  6750. resp_ptr = req.resp_ptr;
  6751. /* find app_id & img_name from list */
  6752. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  6753. list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
  6754. list) {
  6755. if ((ptr_app->app_id == data->client.app_id) &&
  6756. (!strcmp(ptr_app->app_name, data->client.app_name))) {
  6757. found_app = true;
  6758. break;
  6759. }
  6760. }
  6761. spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
  6762. if (!found_app) {
  6763. pr_err("app_id %d (%s) is not found\n", data->client.app_id,
  6764. (char *)data->client.app_name);
  6765. return -ENOENT;
  6766. }
  6767. if (__qseecom_find_pending_unload_app(data->client.app_id,
  6768. data->client.app_name)) {
  6769. pr_err("app %d (%s) unload is pending\n",
  6770. data->client.app_id, data->client.app_name);
  6771. return -ENOENT;
  6772. }
  6773. /* validate offsets */
  6774. for (i = 0; i < MAX_ION_FD; i++) {
  6775. if (req.ifd_data[i].fd) {
  6776. if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
  6777. return -EINVAL;
  6778. }
  6779. }
  6780. req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
  6781. (uintptr_t)req.req_ptr);
  6782. req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
  6783. (uintptr_t)req.resp_ptr);
  6784. ret = __qseecom_update_qteec_req_buf(&req, data, false);
  6785. if (ret)
  6786. return ret;
  6787. if (qseecom.qsee_version < QSEE_VERSION_40) {
  6788. ireq.app_id = data->client.app_id;
  6789. ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
  6790. (uintptr_t)req_ptr);
  6791. ireq.req_len = req.req_len;
  6792. ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
  6793. (uintptr_t)resp_ptr);
  6794. ireq.resp_len = req.resp_len;
  6795. cmd_buf = (void *)&ireq;
  6796. cmd_len = sizeof(struct qseecom_qteec_ireq);
  6797. ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
  6798. ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
  6799. qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm);
  6800. } else {
  6801. ireq_64bit.app_id = data->client.app_id;
  6802. ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
  6803. (uintptr_t)req_ptr);
  6804. ireq_64bit.req_len = req.req_len;
  6805. ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
  6806. (uintptr_t)resp_ptr);
  6807. ireq_64bit.resp_len = req.resp_len;
  6808. cmd_buf = (void *)&ireq_64bit;
  6809. cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
  6810. ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
  6811. ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
  6812. qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm);
  6813. }
  6814. reqd_len_sb_in = req.req_len + req.resp_len;
  6815. if (qseecom.whitelist_support)
  6816. *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
  6817. else
  6818. *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
  6819. ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
  6820. QSEECOM_CACHE_CLEAN);
  6821. if (ret) {
  6822. pr_err("cache operation failed %d\n", ret);
  6823. return ret;
  6824. }
  6825. __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
  6826. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  6827. cmd_buf, cmd_len,
  6828. &resp, sizeof(resp));
  6829. if (ret) {
  6830. pr_err("scm_call() failed with err: %d (app_id = %d)\n",
  6831. ret, data->client.app_id);
  6832. return ret;
  6833. }
  6834. ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
  6835. QSEECOM_CACHE_INVALIDATE);
  6836. if (ret) {
  6837. pr_err("cache operation failed %d\n", ret);
  6838. return ret;
  6839. }
  6840. if (qseecom.qsee_reentrancy_support) {
  6841. ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
  6842. } else {
  6843. if (resp.result == QSEOS_RESULT_INCOMPLETE) {
  6844. ret = __qseecom_process_incomplete_cmd(data, &resp);
  6845. if (ret) {
  6846. pr_err("process_incomplete_cmd failed err: %d\n",
  6847. ret);
  6848. return ret;
  6849. }
  6850. } else {
  6851. if (resp.result != QSEOS_RESULT_SUCCESS) {
  6852. pr_err("Response result %d not supported\n",
  6853. resp.result);
  6854. ret = -EINVAL;
  6855. }
  6856. }
  6857. }
  6858. ret = __qseecom_update_qteec_req_buf(&req, data, true);
  6859. if (ret)
  6860. return ret;
  6861. return 0;
  6862. }
  6863. static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
  6864. void __user *argp)
  6865. {
  6866. struct qseecom_qteec_modfd_req req;
  6867. int ret = 0;
  6868. ret = copy_from_user(&req, argp,
  6869. sizeof(struct qseecom_qteec_modfd_req));
  6870. if (ret) {
  6871. pr_err("copy_from_user failed\n");
  6872. return ret;
  6873. }
  6874. ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
  6875. QSEOS_TEE_REQUEST_CANCELLATION);
  6876. return ret;
  6877. }
  6878. static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
  6879. {
  6880. if (data->sglist_cnt) {
  6881. memset(data->sglistinfo_ptr, 0,
  6882. SGLISTINFO_TABLE_SIZE);
  6883. data->sglist_cnt = 0;
  6884. }
  6885. }
  6886. long qseecom_ioctl(struct file *file,
  6887. unsigned int cmd, unsigned long arg)
  6888. {
  6889. int ret = 0;
  6890. struct qseecom_dev_handle *data = file->private_data;
  6891. void __user *argp = (void __user *) arg;
  6892. bool perf_enabled = false;
  6893. if (!data) {
  6894. pr_err("Invalid/uninitialized device handle\n");
  6895. return -EINVAL;
  6896. }
  6897. if (data->abort) {
  6898. pr_err("Aborting qseecom driver\n");
  6899. return -ENODEV;
  6900. }
  6901. if (cmd != QSEECOM_IOCTL_RECEIVE_REQ &&
  6902. cmd != QSEECOM_IOCTL_SEND_RESP_REQ &&
  6903. cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
  6904. cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
  6905. __wakeup_unregister_listener_kthread();
  6906. __wakeup_unload_app_kthread();
  6907. switch (cmd) {
  6908. case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
  6909. if (data->type != QSEECOM_GENERIC) {
  6910. pr_err("reg lstnr req: invalid handle (%d)\n",
  6911. data->type);
  6912. ret = -EINVAL;
  6913. break;
  6914. }
  6915. pr_debug("ioctl register_listener_req()\n");
  6916. mutex_lock(&listener_access_lock);
  6917. atomic_inc(&data->ioctl_count);
  6918. data->type = QSEECOM_LISTENER_SERVICE;
  6919. ret = qseecom_register_listener(data, argp);
  6920. atomic_dec(&data->ioctl_count);
  6921. wake_up_all(&data->abort_wq);
  6922. mutex_unlock(&listener_access_lock);
  6923. if (ret)
  6924. pr_err("failed qseecom_register_listener: %d\n", ret);
  6925. break;
  6926. }
  6927. case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
  6928. if ((data->listener.id == 0) ||
  6929. (data->type != QSEECOM_LISTENER_SERVICE)) {
  6930. pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
  6931. data->type, data->listener.id);
  6932. ret = -EINVAL;
  6933. break;
  6934. }
  6935. pr_debug("ioctl unregister_listener_req()\n");
  6936. mutex_lock(&listener_access_lock);
  6937. atomic_inc(&data->ioctl_count);
  6938. ret = qseecom_unregister_listener(data);
  6939. atomic_dec(&data->ioctl_count);
  6940. wake_up_all(&data->abort_wq);
  6941. mutex_unlock(&listener_access_lock);
  6942. if (ret)
  6943. pr_err("failed qseecom_unregister_listener: %d\n", ret);
  6944. break;
  6945. }
  6946. case QSEECOM_IOCTL_SEND_CMD_REQ: {
  6947. if ((data->client.app_id == 0) ||
  6948. (data->type != QSEECOM_CLIENT_APP)) {
  6949. pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
  6950. data->type, data->client.app_id);
  6951. ret = -EINVAL;
  6952. break;
  6953. }
  6954. /* Only one client allowed here at a time */
  6955. mutex_lock(&app_access_lock);
  6956. if (qseecom.support_bus_scaling) {
  6957. /* register bus bw in case the client doesn't do it */
  6958. if (!data->mode) {
  6959. mutex_lock(&qsee_bw_mutex);
  6960. __qseecom_register_bus_bandwidth_needs(
  6961. data, HIGH);
  6962. mutex_unlock(&qsee_bw_mutex);
  6963. }
  6964. ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
  6965. if (ret) {
  6966. pr_err("Failed to set bw.\n");
  6967. ret = -EINVAL;
  6968. mutex_unlock(&app_access_lock);
  6969. break;
  6970. }
  6971. }
  6972. /*
  6973. * On targets where crypto clock is handled by HLOS,
  6974. * if clk_access_cnt is zero and perf_enabled is false,
  6975. * then the crypto clock was not enabled before sending cmd to
  6976. * tz, qseecom will enable the clock to avoid service failure.
  6977. */
  6978. if (!qseecom.no_clock_support &&
  6979. !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
  6980. pr_debug("ce clock is not enabled!\n");
  6981. ret = qseecom_perf_enable(data);
  6982. if (ret) {
  6983. pr_err("Failed to vote for clock with err %d\n",
  6984. ret);
  6985. mutex_unlock(&app_access_lock);
  6986. ret = -EINVAL;
  6987. break;
  6988. }
  6989. perf_enabled = true;
  6990. }
  6991. atomic_inc(&data->ioctl_count);
  6992. ret = qseecom_send_cmd(data, argp);
  6993. if (qseecom.support_bus_scaling)
  6994. __qseecom_add_bw_scale_down_timer(
  6995. QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
  6996. if (perf_enabled) {
  6997. qsee_disable_clock_vote(data, CLK_DFAB);
  6998. qsee_disable_clock_vote(data, CLK_SFPB);
  6999. }
  7000. atomic_dec(&data->ioctl_count);
  7001. wake_up_all(&data->abort_wq);
  7002. mutex_unlock(&app_access_lock);
  7003. if (ret)
  7004. pr_err("failed qseecom_send_cmd: %d\n", ret);
  7005. break;
  7006. }
  7007. case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
  7008. case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
  7009. if ((data->client.app_id == 0) ||
  7010. (data->type != QSEECOM_CLIENT_APP)) {
  7011. pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
  7012. data->type, data->client.app_id);
  7013. ret = -EINVAL;
  7014. break;
  7015. }
  7016. /* Only one client allowed here at a time */
  7017. mutex_lock(&app_access_lock);
  7018. if (qseecom.support_bus_scaling) {
  7019. if (!data->mode) {
  7020. mutex_lock(&qsee_bw_mutex);
  7021. __qseecom_register_bus_bandwidth_needs(
  7022. data, HIGH);
  7023. mutex_unlock(&qsee_bw_mutex);
  7024. }
  7025. ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
  7026. if (ret) {
  7027. pr_err("Failed to set bw.\n");
  7028. mutex_unlock(&app_access_lock);
  7029. ret = -EINVAL;
  7030. break;
  7031. }
  7032. }
  7033. /*
  7034. * On targets where crypto clock is handled by HLOS,
  7035. * if clk_access_cnt is zero and perf_enabled is false,
  7036. * then the crypto clock was not enabled before sending cmd to
  7037. * tz, qseecom will enable the clock to avoid service failure.
  7038. */
  7039. if (!qseecom.no_clock_support &&
  7040. !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
  7041. pr_debug("ce clock is not enabled!\n");
  7042. ret = qseecom_perf_enable(data);
  7043. if (ret) {
  7044. pr_err("Failed to vote for clock with err %d\n",
  7045. ret);
  7046. mutex_unlock(&app_access_lock);
  7047. ret = -EINVAL;
  7048. break;
  7049. }
  7050. perf_enabled = true;
  7051. }
  7052. atomic_inc(&data->ioctl_count);
  7053. if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
  7054. ret = qseecom_send_modfd_cmd(data, argp);
  7055. else
  7056. ret = qseecom_send_modfd_cmd_64(data, argp);
  7057. if (qseecom.support_bus_scaling)
  7058. __qseecom_add_bw_scale_down_timer(
  7059. QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
  7060. if (perf_enabled) {
  7061. qsee_disable_clock_vote(data, CLK_DFAB);
  7062. qsee_disable_clock_vote(data, CLK_SFPB);
  7063. }
  7064. atomic_dec(&data->ioctl_count);
  7065. wake_up_all(&data->abort_wq);
  7066. mutex_unlock(&app_access_lock);
  7067. if (ret)
  7068. pr_err("failed qseecom_send_cmd: %d\n", ret);
  7069. __qseecom_clean_data_sglistinfo(data);
  7070. break;
  7071. }
  7072. case QSEECOM_IOCTL_RECEIVE_REQ: {
  7073. if ((data->listener.id == 0) ||
  7074. (data->type != QSEECOM_LISTENER_SERVICE)) {
  7075. pr_err("receive req: invalid handle (%d), lid(%d)\n",
  7076. data->type, data->listener.id);
  7077. ret = -EINVAL;
  7078. break;
  7079. }
  7080. atomic_inc(&data->ioctl_count);
  7081. ret = qseecom_receive_req(data);
  7082. atomic_dec(&data->ioctl_count);
  7083. wake_up_all(&data->abort_wq);
  7084. if (ret && (ret != -ERESTARTSYS))
  7085. pr_err("failed qseecom_receive_req: %d\n", ret);
  7086. break;
  7087. }
  7088. case QSEECOM_IOCTL_SEND_RESP_REQ: {
  7089. if ((data->listener.id == 0) ||
  7090. (data->type != QSEECOM_LISTENER_SERVICE)) {
  7091. pr_err("send resp req: invalid handle (%d), lid(%d)\n",
  7092. data->type, data->listener.id);
  7093. ret = -EINVAL;
  7094. break;
  7095. }
  7096. mutex_lock(&listener_access_lock);
  7097. atomic_inc(&data->ioctl_count);
  7098. if (!qseecom.qsee_reentrancy_support)
  7099. ret = qseecom_send_resp();
  7100. else
  7101. ret = qseecom_reentrancy_send_resp(data);
  7102. atomic_dec(&data->ioctl_count);
  7103. wake_up_all(&data->abort_wq);
  7104. mutex_unlock(&listener_access_lock);
  7105. if (ret)
  7106. pr_err("failed qseecom_send_resp: %d\n", ret);
  7107. break;
  7108. }
  7109. case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
  7110. if ((data->type != QSEECOM_CLIENT_APP) &&
  7111. (data->type != QSEECOM_GENERIC) &&
  7112. (data->type != QSEECOM_SECURE_SERVICE)) {
  7113. pr_err("set mem param req: invalid handle (%d)\n",
  7114. data->type);
  7115. ret = -EINVAL;
  7116. break;
  7117. }
  7118. pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
  7119. mutex_lock(&app_access_lock);
  7120. atomic_inc(&data->ioctl_count);
  7121. ret = qseecom_set_client_mem_param(data, argp);
  7122. atomic_dec(&data->ioctl_count);
  7123. mutex_unlock(&app_access_lock);
  7124. if (ret)
  7125. pr_err("failed Qqseecom_set_mem_param request: %d\n",
  7126. ret);
  7127. break;
  7128. }
  7129. case QSEECOM_IOCTL_LOAD_APP_REQ: {
  7130. if ((data->type != QSEECOM_GENERIC) &&
  7131. (data->type != QSEECOM_CLIENT_APP)) {
  7132. pr_err("load app req: invalid handle (%d)\n",
  7133. data->type);
  7134. ret = -EINVAL;
  7135. break;
  7136. }
  7137. data->type = QSEECOM_CLIENT_APP;
  7138. pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
  7139. mutex_lock(&app_access_lock);
  7140. atomic_inc(&data->ioctl_count);
  7141. ret = qseecom_load_app(data, argp);
  7142. atomic_dec(&data->ioctl_count);
  7143. mutex_unlock(&app_access_lock);
  7144. if (ret)
  7145. pr_err("failed load_app request: %d\n", ret);
  7146. __wakeup_unload_app_kthread();
  7147. break;
  7148. }
  7149. case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
  7150. if ((data->client.app_id == 0) ||
  7151. (data->type != QSEECOM_CLIENT_APP)) {
  7152. pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
  7153. data->type, data->client.app_id);
  7154. ret = -EINVAL;
  7155. break;
  7156. }
  7157. pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
  7158. mutex_lock(&app_access_lock);
  7159. atomic_inc(&data->ioctl_count);
  7160. ret = qseecom_unload_app(data, false);
  7161. atomic_dec(&data->ioctl_count);
  7162. mutex_unlock(&app_access_lock);
  7163. if (ret)
  7164. pr_err("failed unload_app request: %d\n", ret);
  7165. __wakeup_unload_app_kthread();
  7166. break;
  7167. }
  7168. case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
  7169. atomic_inc(&data->ioctl_count);
  7170. ret = qseecom_get_qseos_version(data, argp);
  7171. if (ret)
  7172. pr_err("qseecom_get_qseos_version: %d\n", ret);
  7173. atomic_dec(&data->ioctl_count);
  7174. break;
  7175. }
  7176. case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
  7177. if (data->type != QSEECOM_GENERIC) {
  7178. pr_err("load ext elf req: invalid client handle (%d)\n",
  7179. data->type);
  7180. ret = -EINVAL;
  7181. break;
  7182. }
  7183. data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
  7184. data->released = true;
  7185. mutex_lock(&app_access_lock);
  7186. atomic_inc(&data->ioctl_count);
  7187. ret = qseecom_load_external_elf(data, argp);
  7188. atomic_dec(&data->ioctl_count);
  7189. mutex_unlock(&app_access_lock);
  7190. if (ret)
  7191. pr_err("failed load_external_elf request: %d\n", ret);
  7192. break;
  7193. }
  7194. case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
  7195. if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
  7196. pr_err("unload ext elf req: invalid handle (%d)\n",
  7197. data->type);
  7198. ret = -EINVAL;
  7199. break;
  7200. }
  7201. data->released = true;
  7202. mutex_lock(&app_access_lock);
  7203. atomic_inc(&data->ioctl_count);
  7204. ret = qseecom_unload_external_elf(data);
  7205. atomic_dec(&data->ioctl_count);
  7206. mutex_unlock(&app_access_lock);
  7207. if (ret)
  7208. pr_err("failed unload_app request: %d\n", ret);
  7209. break;
  7210. }
  7211. case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
  7212. if ((data->type != QSEECOM_GENERIC) &&
  7213. (data->type != QSEECOM_CLIENT_APP)) {
  7214. pr_err("app loaded query req: invalid handle (%d)\n",
  7215. data->type);
  7216. ret = -EINVAL;
  7217. break;
  7218. }
  7219. data->type = QSEECOM_CLIENT_APP;
  7220. mutex_lock(&app_access_lock);
  7221. atomic_inc(&data->ioctl_count);
  7222. pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
  7223. ret = qseecom_query_app_loaded(data, argp);
  7224. atomic_dec(&data->ioctl_count);
  7225. mutex_unlock(&app_access_lock);
  7226. break;
  7227. }
  7228. case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
  7229. if (data->type != QSEECOM_GENERIC) {
  7230. pr_err("send cmd svc req: invalid handle (%d)\n",
  7231. data->type);
  7232. ret = -EINVAL;
  7233. break;
  7234. }
  7235. data->type = QSEECOM_SECURE_SERVICE;
  7236. if (qseecom.qsee_version < QSEE_VERSION_03) {
  7237. pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
  7238. qseecom.qsee_version);
  7239. return -EINVAL;
  7240. }
  7241. mutex_lock(&app_access_lock);
  7242. atomic_inc(&data->ioctl_count);
  7243. ret = qseecom_send_service_cmd(data, argp);
  7244. atomic_dec(&data->ioctl_count);
  7245. mutex_unlock(&app_access_lock);
  7246. break;
  7247. }
  7248. case QSEECOM_IOCTL_CREATE_KEY_REQ: {
  7249. if (!(qseecom.support_pfe || qseecom.support_fde))
  7250. pr_err("Features requiring key init not supported\n");
  7251. if (data->type != QSEECOM_GENERIC) {
  7252. pr_err("create key req: invalid handle (%d)\n",
  7253. data->type);
  7254. ret = -EINVAL;
  7255. break;
  7256. }
  7257. if (qseecom.qsee_version < QSEE_VERSION_05) {
  7258. pr_err("Create Key feature unsupported: qsee ver %u\n",
  7259. qseecom.qsee_version);
  7260. return -EINVAL;
  7261. }
  7262. data->released = true;
  7263. mutex_lock(&app_access_lock);
  7264. atomic_inc(&data->ioctl_count);
  7265. ret = qseecom_create_key(data, argp);
  7266. if (ret)
  7267. pr_err("failed to create encryption key: %d\n", ret);
  7268. atomic_dec(&data->ioctl_count);
  7269. mutex_unlock(&app_access_lock);
  7270. break;
  7271. }
  7272. case QSEECOM_IOCTL_WIPE_KEY_REQ: {
  7273. if (!(qseecom.support_pfe || qseecom.support_fde))
  7274. pr_err("Features requiring key init not supported\n");
  7275. if (data->type != QSEECOM_GENERIC) {
  7276. pr_err("wipe key req: invalid handle (%d)\n",
  7277. data->type);
  7278. ret = -EINVAL;
  7279. break;
  7280. }
  7281. if (qseecom.qsee_version < QSEE_VERSION_05) {
  7282. pr_err("Wipe Key feature unsupported in qsee ver %u\n",
  7283. qseecom.qsee_version);
  7284. return -EINVAL;
  7285. }
  7286. data->released = true;
  7287. mutex_lock(&app_access_lock);
  7288. atomic_inc(&data->ioctl_count);
  7289. ret = qseecom_wipe_key(data, argp);
  7290. if (ret)
  7291. pr_err("failed to wipe encryption key: %d\n", ret);
  7292. atomic_dec(&data->ioctl_count);
  7293. mutex_unlock(&app_access_lock);
  7294. break;
  7295. }
  7296. case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
  7297. if (!(qseecom.support_pfe || qseecom.support_fde))
  7298. pr_err("Features requiring key init not supported\n");
  7299. if (data->type != QSEECOM_GENERIC) {
  7300. pr_err("update key req: invalid handle (%d)\n",
  7301. data->type);
  7302. ret = -EINVAL;
  7303. break;
  7304. }
  7305. if (qseecom.qsee_version < QSEE_VERSION_05) {
  7306. pr_err("Update Key feature unsupported in qsee ver %u\n",
  7307. qseecom.qsee_version);
  7308. return -EINVAL;
  7309. }
  7310. data->released = true;
  7311. mutex_lock(&app_access_lock);
  7312. atomic_inc(&data->ioctl_count);
  7313. ret = qseecom_update_key_user_info(data, argp);
  7314. if (ret)
  7315. pr_err("failed to update key user info: %d\n", ret);
  7316. atomic_dec(&data->ioctl_count);
  7317. mutex_unlock(&app_access_lock);
  7318. break;
  7319. }
  7320. case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
  7321. if (data->type != QSEECOM_GENERIC) {
  7322. pr_err("save part hash req: invalid handle (%d)\n",
  7323. data->type);
  7324. ret = -EINVAL;
  7325. break;
  7326. }
  7327. data->released = true;
  7328. mutex_lock(&app_access_lock);
  7329. atomic_inc(&data->ioctl_count);
  7330. ret = qseecom_save_partition_hash(argp);
  7331. atomic_dec(&data->ioctl_count);
  7332. mutex_unlock(&app_access_lock);
  7333. break;
  7334. }
  7335. case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
  7336. if (data->type != QSEECOM_GENERIC) {
  7337. pr_err("ES activated req: invalid handle (%d)\n",
  7338. data->type);
  7339. ret = -EINVAL;
  7340. break;
  7341. }
  7342. data->released = true;
  7343. mutex_lock(&app_access_lock);
  7344. atomic_inc(&data->ioctl_count);
  7345. ret = qseecom_is_es_activated(argp);
  7346. atomic_dec(&data->ioctl_count);
  7347. mutex_unlock(&app_access_lock);
  7348. break;
  7349. }
  7350. case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
  7351. if (data->type != QSEECOM_GENERIC) {
  7352. pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
  7353. data->type);
  7354. ret = -EINVAL;
  7355. break;
  7356. }
  7357. data->released = true;
  7358. mutex_lock(&app_access_lock);
  7359. atomic_inc(&data->ioctl_count);
  7360. ret = qseecom_mdtp_cipher_dip(argp);
  7361. atomic_dec(&data->ioctl_count);
  7362. mutex_unlock(&app_access_lock);
  7363. break;
  7364. }
  7365. case QSEECOM_IOCTL_SEND_MODFD_RESP:
  7366. case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
  7367. if ((data->listener.id == 0) ||
  7368. (data->type != QSEECOM_LISTENER_SERVICE)) {
  7369. pr_err("receive req: invalid handle (%d), lid(%d)\n",
  7370. data->type, data->listener.id);
  7371. ret = -EINVAL;
  7372. break;
  7373. }
  7374. mutex_lock(&listener_access_lock);
  7375. atomic_inc(&data->ioctl_count);
  7376. if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
  7377. ret = qseecom_send_modfd_resp(data, argp);
  7378. else
  7379. ret = qseecom_send_modfd_resp_64(data, argp);
  7380. atomic_dec(&data->ioctl_count);
  7381. wake_up_all(&data->abort_wq);
  7382. mutex_unlock(&listener_access_lock);
  7383. if (ret)
  7384. pr_err("failed qseecom_send_mod_resp: %d\n", ret);
  7385. __qseecom_clean_data_sglistinfo(data);
  7386. break;
  7387. }
  7388. case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
  7389. if ((data->client.app_id == 0) ||
  7390. (data->type != QSEECOM_CLIENT_APP)) {
  7391. pr_err("Open session: invalid handle (%d) appid(%d)\n",
  7392. data->type, data->client.app_id);
  7393. ret = -EINVAL;
  7394. break;
  7395. }
  7396. if (qseecom.qsee_version < QSEE_VERSION_40) {
  7397. pr_err("GP feature unsupported: qsee ver %u\n",
  7398. qseecom.qsee_version);
  7399. return -EINVAL;
  7400. }
  7401. /* Only one client allowed here at a time */
  7402. mutex_lock(&app_access_lock);
  7403. atomic_inc(&data->ioctl_count);
  7404. ret = qseecom_qteec_open_session(data, argp);
  7405. atomic_dec(&data->ioctl_count);
  7406. wake_up_all(&data->abort_wq);
  7407. mutex_unlock(&app_access_lock);
  7408. if (ret)
  7409. pr_err("failed open_session_cmd: %d\n", ret);
  7410. __qseecom_clean_data_sglistinfo(data);
  7411. break;
  7412. }
  7413. case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
  7414. if ((data->client.app_id == 0) ||
  7415. (data->type != QSEECOM_CLIENT_APP)) {
  7416. pr_err("Close session: invalid handle (%d) appid(%d)\n",
  7417. data->type, data->client.app_id);
  7418. ret = -EINVAL;
  7419. break;
  7420. }
  7421. if (qseecom.qsee_version < QSEE_VERSION_40) {
  7422. pr_err("GP feature unsupported: qsee ver %u\n",
  7423. qseecom.qsee_version);
  7424. return -EINVAL;
  7425. }
  7426. /* Only one client allowed here at a time */
  7427. mutex_lock(&app_access_lock);
  7428. atomic_inc(&data->ioctl_count);
  7429. ret = qseecom_qteec_close_session(data, argp);
  7430. atomic_dec(&data->ioctl_count);
  7431. wake_up_all(&data->abort_wq);
  7432. mutex_unlock(&app_access_lock);
  7433. if (ret)
  7434. pr_err("failed close_session_cmd: %d\n", ret);
  7435. break;
  7436. }
  7437. case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
  7438. if ((data->client.app_id == 0) ||
  7439. (data->type != QSEECOM_CLIENT_APP)) {
  7440. pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
  7441. data->type, data->client.app_id);
  7442. ret = -EINVAL;
  7443. break;
  7444. }
  7445. if (qseecom.qsee_version < QSEE_VERSION_40) {
  7446. pr_err("GP feature unsupported: qsee ver %u\n",
  7447. qseecom.qsee_version);
  7448. return -EINVAL;
  7449. }
  7450. /* Only one client allowed here at a time */
  7451. mutex_lock(&app_access_lock);
  7452. atomic_inc(&data->ioctl_count);
  7453. ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
  7454. atomic_dec(&data->ioctl_count);
  7455. wake_up_all(&data->abort_wq);
  7456. mutex_unlock(&app_access_lock);
  7457. if (ret)
  7458. pr_err("failed Invoke cmd: %d\n", ret);
  7459. __qseecom_clean_data_sglistinfo(data);
  7460. break;
  7461. }
  7462. case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
  7463. if ((data->client.app_id == 0) ||
  7464. (data->type != QSEECOM_CLIENT_APP)) {
  7465. pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
  7466. data->type, data->client.app_id);
  7467. ret = -EINVAL;
  7468. break;
  7469. }
  7470. if (qseecom.qsee_version < QSEE_VERSION_40) {
  7471. pr_err("GP feature unsupported: qsee ver %u\n",
  7472. qseecom.qsee_version);
  7473. return -EINVAL;
  7474. }
  7475. /* Only one client allowed here at a time */
  7476. mutex_lock(&app_access_lock);
  7477. atomic_inc(&data->ioctl_count);
  7478. ret = qseecom_qteec_request_cancellation(data, argp);
  7479. atomic_dec(&data->ioctl_count);
  7480. wake_up_all(&data->abort_wq);
  7481. mutex_unlock(&app_access_lock);
  7482. if (ret)
  7483. pr_err("failed request_cancellation: %d\n", ret);
  7484. break;
  7485. }
  7486. case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
  7487. atomic_inc(&data->ioctl_count);
  7488. ret = qseecom_get_ce_info(data, argp);
  7489. if (ret)
  7490. pr_err("failed get fde ce pipe info: %d\n", ret);
  7491. atomic_dec(&data->ioctl_count);
  7492. break;
  7493. }
  7494. case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
  7495. atomic_inc(&data->ioctl_count);
  7496. ret = qseecom_free_ce_info(data, argp);
  7497. if (ret)
  7498. pr_err("failed get fde ce pipe info: %d\n", ret);
  7499. atomic_dec(&data->ioctl_count);
  7500. break;
  7501. }
  7502. case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
  7503. atomic_inc(&data->ioctl_count);
  7504. ret = qseecom_query_ce_info(data, argp);
  7505. if (ret)
  7506. pr_err("failed get fde ce pipe info: %d\n", ret);
  7507. atomic_dec(&data->ioctl_count);
  7508. break;
  7509. }
  7510. case QSEECOM_IOCTL_SET_ICE_INFO: {
  7511. struct qseecom_ice_data_t ice_data;
  7512. ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
  7513. if (ret) {
  7514. pr_err("copy_from_user failed\n");
  7515. return -EFAULT;
  7516. }
  7517. qcom_ice_set_fde_flag(ice_data.flag);
  7518. break;
  7519. }
  7520. case QSEECOM_IOCTL_FBE_CLEAR_KEY: {
  7521. pr_err("QSEECOM_IOCTL_FBE_CLEAR_KEY IOCTL is deprecated\n");
  7522. return -EINVAL;
  7523. }
  7524. default:
  7525. pr_err("Invalid IOCTL: 0x%x\n", cmd);
  7526. return -ENOIOCTLCMD;
  7527. }
  7528. return ret;
  7529. }
  7530. static int qseecom_open(struct inode *inode, struct file *file)
  7531. {
  7532. int ret = 0;
  7533. struct qseecom_dev_handle *data;
  7534. data = kzalloc(sizeof(*data), GFP_KERNEL);
  7535. if (!data)
  7536. {
  7537. return -ENOMEM;
  7538. }
  7539. file->private_data = data;
  7540. data->abort = 0;
  7541. data->type = QSEECOM_GENERIC;
  7542. data->released = false;
  7543. memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
  7544. data->mode = INACTIVE;
  7545. init_waitqueue_head(&data->abort_wq);
  7546. atomic_set(&data->ioctl_count, 0);
  7547. data->sglistinfo_ptr = (struct sglist_info *)__qseecom_alloc_tzbuf(
  7548. sizeof(struct sglist_info) * MAX_ION_FD,
  7549. &data->sglistinfo_shm.paddr,
  7550. &data->sglistinfo_shm);
  7551. if (!data->sglistinfo_ptr)
  7552. {
  7553. return -ENOMEM;
  7554. }
  7555. return ret;
  7556. }
  7557. static void __qseecom_release_disable_clk(struct qseecom_dev_handle *data)
  7558. {
  7559. if (qseecom.no_clock_support)
  7560. return;
  7561. if (qseecom.support_bus_scaling) {
  7562. mutex_lock(&qsee_bw_mutex);
  7563. if (data->mode != INACTIVE) {
  7564. qseecom_unregister_bus_bandwidth_needs(data);
  7565. if (qseecom.cumulative_mode == INACTIVE)
  7566. __qseecom_set_msm_bus_request(INACTIVE);
  7567. }
  7568. mutex_unlock(&qsee_bw_mutex);
  7569. } else {
  7570. if (data->fast_load_enabled)
  7571. qsee_disable_clock_vote(data, CLK_SFPB);
  7572. if (data->perf_enabled)
  7573. qsee_disable_clock_vote(data, CLK_DFAB);
  7574. }
  7575. }
  7576. static int qseecom_release(struct inode *inode, struct file *file)
  7577. {
  7578. struct qseecom_dev_handle *data = file->private_data;
  7579. int ret = 0;
  7580. bool free_private_data = true;
  7581. __qseecom_release_disable_clk(data);
  7582. if (!data->released) {
  7583. pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
  7584. data->type, data->mode, data);
  7585. switch (data->type) {
  7586. case QSEECOM_LISTENER_SERVICE:
  7587. pr_debug("release lsnr svc %d\n", data->listener.id);
  7588. mutex_lock(&listener_access_lock);
  7589. ret = qseecom_unregister_listener(data);
  7590. if (!ret)
  7591. free_private_data = false;
  7592. data->listener.release_called = true;
  7593. mutex_unlock(&listener_access_lock);
  7594. __wakeup_unregister_listener_kthread();
  7595. break;
  7596. case QSEECOM_CLIENT_APP:
  7597. pr_debug("release app %d (%s)\n",
  7598. data->client.app_id, data->client.app_name);
  7599. if (data->client.app_id) {
  7600. free_private_data = false;
  7601. mutex_lock(&unload_app_pending_list_lock);
  7602. ret = qseecom_prepare_unload_app(data);
  7603. mutex_unlock(&unload_app_pending_list_lock);
  7604. __wakeup_unload_app_kthread();
  7605. }
  7606. break;
  7607. case QSEECOM_SECURE_SERVICE:
  7608. case QSEECOM_GENERIC:
  7609. if (data->client.dmabuf) {
  7610. qseecom_vaddr_unmap(data->client.sb_virt,
  7611. data->client.sgt, data->client.attach,
  7612. data->client.dmabuf);
  7613. MAKE_NULL(data->client.sgt, data->client.attach,
  7614. data->client.dmabuf);
  7615. }
  7616. break;
  7617. case QSEECOM_UNAVAILABLE_CLIENT_APP:
  7618. break;
  7619. default:
  7620. pr_err("Unsupported clnt_handle_type %d\n",
  7621. data->type);
  7622. break;
  7623. }
  7624. }
  7625. if (free_private_data) {
  7626. __qseecom_free_tzbuf(&data->sglistinfo_shm);
  7627. kfree(data);
  7628. }
  7629. return ret;
  7630. }
  7631. static const struct file_operations qseecom_fops = {
  7632. .owner = THIS_MODULE,
  7633. .unlocked_ioctl = qseecom_ioctl,
  7634. .open = qseecom_open,
  7635. .release = qseecom_release
  7636. };
  7637. static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
  7638. {
  7639. int rc = 0;
  7640. struct device *pdev;
  7641. struct qseecom_clk *qclk;
  7642. char *core_clk_src = NULL;
  7643. char *core_clk = NULL;
  7644. char *iface_clk = NULL;
  7645. char *bus_clk = NULL;
  7646. switch (ce) {
  7647. case CLK_QSEE: {
  7648. core_clk_src = "core_clk_src";
  7649. core_clk = "core_clk";
  7650. iface_clk = "iface_clk";
  7651. bus_clk = "bus_clk";
  7652. qclk = &qseecom.qsee;
  7653. qclk->instance = CLK_QSEE;
  7654. break;
  7655. };
  7656. case CLK_CE_DRV: {
  7657. core_clk_src = "ce_drv_core_clk_src";
  7658. core_clk = "ce_drv_core_clk";
  7659. iface_clk = "ce_drv_iface_clk";
  7660. bus_clk = "ce_drv_bus_clk";
  7661. qclk = &qseecom.ce_drv;
  7662. qclk->instance = CLK_CE_DRV;
  7663. break;
  7664. };
  7665. default:
  7666. pr_err("Invalid ce hw instance: %d!\n", ce);
  7667. return -EIO;
  7668. }
  7669. if (qseecom.no_clock_support) {
  7670. qclk->ce_core_clk = NULL;
  7671. qclk->ce_clk = NULL;
  7672. qclk->ce_bus_clk = NULL;
  7673. qclk->ce_core_src_clk = NULL;
  7674. return 0;
  7675. }
  7676. pdev = qseecom.pdev;
  7677. /* Get CE3 src core clk. */
  7678. qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
  7679. if (!IS_ERR(qclk->ce_core_src_clk)) {
  7680. rc = clk_set_rate(qclk->ce_core_src_clk,
  7681. qseecom.ce_opp_freq_hz);
  7682. if (rc) {
  7683. clk_put(qclk->ce_core_src_clk);
  7684. qclk->ce_core_src_clk = NULL;
  7685. pr_err("Unable to set the core src clk @%uMhz.\n",
  7686. qseecom.ce_opp_freq_hz/CE_CLK_DIV);
  7687. return -EIO;
  7688. }
  7689. } else {
  7690. pr_warn("Unable to get CE core src clk, set to NULL\n");
  7691. qclk->ce_core_src_clk = NULL;
  7692. }
  7693. /* Get CE core clk */
  7694. qclk->ce_core_clk = clk_get(pdev, core_clk);
  7695. if (IS_ERR(qclk->ce_core_clk)) {
  7696. rc = PTR_ERR(qclk->ce_core_clk);
  7697. pr_err("Unable to get CE core clk\n");
  7698. if (qclk->ce_core_src_clk != NULL)
  7699. clk_put(qclk->ce_core_src_clk);
  7700. return -EIO;
  7701. }
  7702. /* Get CE Interface clk */
  7703. qclk->ce_clk = clk_get(pdev, iface_clk);
  7704. if (IS_ERR(qclk->ce_clk)) {
  7705. rc = PTR_ERR(qclk->ce_clk);
  7706. pr_err("Unable to get CE interface clk\n");
  7707. if (qclk->ce_core_src_clk != NULL)
  7708. clk_put(qclk->ce_core_src_clk);
  7709. clk_put(qclk->ce_core_clk);
  7710. return -EIO;
  7711. }
  7712. /* Get CE AXI clk */
  7713. qclk->ce_bus_clk = clk_get(pdev, bus_clk);
  7714. if (IS_ERR(qclk->ce_bus_clk)) {
  7715. rc = PTR_ERR(qclk->ce_bus_clk);
  7716. pr_err("Unable to get CE BUS interface clk\n");
  7717. if (qclk->ce_core_src_clk != NULL)
  7718. clk_put(qclk->ce_core_src_clk);
  7719. clk_put(qclk->ce_core_clk);
  7720. clk_put(qclk->ce_clk);
  7721. return -EIO;
  7722. }
  7723. return rc;
  7724. }
  7725. static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
  7726. {
  7727. struct qseecom_clk *qclk;
  7728. if (ce == CLK_QSEE)
  7729. qclk = &qseecom.qsee;
  7730. else
  7731. qclk = &qseecom.ce_drv;
  7732. if (qclk->ce_clk != NULL) {
  7733. clk_put(qclk->ce_clk);
  7734. qclk->ce_clk = NULL;
  7735. }
  7736. if (qclk->ce_core_clk != NULL) {
  7737. clk_put(qclk->ce_core_clk);
  7738. qclk->ce_core_clk = NULL;
  7739. }
  7740. if (qclk->ce_bus_clk != NULL) {
  7741. clk_put(qclk->ce_bus_clk);
  7742. qclk->ce_bus_clk = NULL;
  7743. }
  7744. if (qclk->ce_core_src_clk != NULL) {
  7745. clk_put(qclk->ce_core_src_clk);
  7746. qclk->ce_core_src_clk = NULL;
  7747. }
  7748. qclk->instance = CLK_INVALID;
  7749. }
  7750. static int qseecom_retrieve_ce_data(struct platform_device *pdev)
  7751. {
  7752. int rc = 0;
  7753. uint32_t hlos_num_ce_hw_instances;
  7754. uint32_t disk_encrypt_pipe;
  7755. uint32_t file_encrypt_pipe;
  7756. uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
  7757. int i;
  7758. const int *tbl;
  7759. int size;
  7760. int entry;
  7761. struct qseecom_crypto_info *pfde_tbl = NULL;
  7762. struct qseecom_crypto_info *p;
  7763. int tbl_size;
  7764. int j;
  7765. bool old_db = true;
  7766. struct qseecom_ce_info_use *pce_info_use;
  7767. uint32_t *unit_tbl = NULL;
  7768. int total_units = 0;
  7769. struct qseecom_ce_pipe_entry *pce_entry;
  7770. qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
  7771. qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
  7772. if (of_property_read_u32((&pdev->dev)->of_node,
  7773. "qcom,qsee-ce-hw-instance",
  7774. &qseecom.ce_info.qsee_ce_hw_instance)) {
  7775. pr_err("Fail to get qsee ce hw instance information.\n");
  7776. rc = -EINVAL;
  7777. goto out;
  7778. } else {
  7779. pr_debug("qsee-ce-hw-instance=0x%x\n",
  7780. qseecom.ce_info.qsee_ce_hw_instance);
  7781. }
  7782. qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
  7783. "qcom,support-fde");
  7784. qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
  7785. "qcom,support-pfe");
  7786. if (!qseecom.support_pfe && !qseecom.support_fde) {
  7787. pr_warn("Device does not support PFE/FDE\n");
  7788. goto out;
  7789. }
  7790. if (qseecom.support_fde)
  7791. tbl = of_get_property((&pdev->dev)->of_node,
  7792. "qcom,full-disk-encrypt-info", &size);
  7793. else
  7794. tbl = NULL;
  7795. if (tbl) {
  7796. old_db = false;
  7797. if (size % sizeof(struct qseecom_crypto_info)) {
  7798. pr_err("full-disk-encrypt-info tbl size(%d)\n",
  7799. size);
  7800. rc = -EINVAL;
  7801. goto out;
  7802. }
  7803. tbl_size = size / sizeof
  7804. (struct qseecom_crypto_info);
  7805. pfde_tbl = kzalloc(size, GFP_KERNEL);
  7806. unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
  7807. total_units = 0;
  7808. if (!pfde_tbl || !unit_tbl) {
  7809. rc = -ENOMEM;
  7810. goto out;
  7811. }
  7812. if (of_property_read_u32_array((&pdev->dev)->of_node,
  7813. "qcom,full-disk-encrypt-info",
  7814. (u32 *)pfde_tbl, size/sizeof(u32))) {
  7815. pr_err("failed to read full-disk-encrypt-info tbl\n");
  7816. rc = -EINVAL;
  7817. goto out;
  7818. }
  7819. for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
  7820. for (j = 0; j < total_units; j++) {
  7821. if (p->unit_num == *(unit_tbl + j))
  7822. break;
  7823. }
  7824. if (j == total_units) {
  7825. *(unit_tbl + total_units) = p->unit_num;
  7826. total_units++;
  7827. }
  7828. }
  7829. qseecom.ce_info.num_fde = total_units;
  7830. pce_info_use = qseecom.ce_info.fde = kcalloc(
  7831. total_units, sizeof(struct qseecom_ce_info_use),
  7832. GFP_KERNEL);
  7833. if (!pce_info_use) {
  7834. rc = -ENOMEM;
  7835. goto out;
  7836. }
  7837. for (j = 0; j < total_units; j++, pce_info_use++) {
  7838. pce_info_use->unit_num = *(unit_tbl + j);
  7839. pce_info_use->alloc = false;
  7840. pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
  7841. pce_info_use->num_ce_pipe_entries = 0;
  7842. pce_info_use->ce_pipe_entry = NULL;
  7843. for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
  7844. if (p->unit_num == pce_info_use->unit_num)
  7845. pce_info_use->num_ce_pipe_entries++;
  7846. }
  7847. entry = pce_info_use->num_ce_pipe_entries;
  7848. pce_entry = pce_info_use->ce_pipe_entry =
  7849. kcalloc(entry,
  7850. sizeof(struct qseecom_ce_pipe_entry),
  7851. GFP_KERNEL);
  7852. if (pce_entry == NULL) {
  7853. rc = -ENOMEM;
  7854. goto out;
  7855. }
  7856. for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
  7857. if (p->unit_num == pce_info_use->unit_num) {
  7858. pce_entry->ce_num = p->ce;
  7859. pce_entry->ce_pipe_pair =
  7860. p->pipe_pair;
  7861. pce_entry->valid = true;
  7862. pce_entry++;
  7863. }
  7864. }
  7865. }
  7866. kfree(unit_tbl);
  7867. unit_tbl = NULL;
  7868. kfree(pfde_tbl);
  7869. pfde_tbl = NULL;
  7870. }
  7871. if (qseecom.support_pfe)
  7872. tbl = of_get_property((&pdev->dev)->of_node,
  7873. "qcom,per-file-encrypt-info", &size);
  7874. else
  7875. tbl = NULL;
  7876. if (tbl) {
  7877. old_db = false;
  7878. if (size % sizeof(struct qseecom_crypto_info)) {
  7879. pr_err("per-file-encrypt-info tbl size(%d)\n",
  7880. size);
  7881. rc = -EINVAL;
  7882. goto out;
  7883. }
  7884. tbl_size = size / sizeof
  7885. (struct qseecom_crypto_info);
  7886. pfde_tbl = kzalloc(size, GFP_KERNEL);
  7887. unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
  7888. total_units = 0;
  7889. if (!pfde_tbl || !unit_tbl) {
  7890. rc = -ENOMEM;
  7891. goto out;
  7892. }
  7893. if (of_property_read_u32_array((&pdev->dev)->of_node,
  7894. "qcom,per-file-encrypt-info",
  7895. (u32 *)pfde_tbl, size/sizeof(u32))) {
  7896. pr_err("failed to read per-file-encrypt-info tbl\n");
  7897. rc = -EINVAL;
  7898. goto out;
  7899. }
  7900. for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
  7901. for (j = 0; j < total_units; j++) {
  7902. if (p->unit_num == *(unit_tbl + j))
  7903. break;
  7904. }
  7905. if (j == total_units) {
  7906. *(unit_tbl + total_units) = p->unit_num;
  7907. total_units++;
  7908. }
  7909. }
  7910. qseecom.ce_info.num_pfe = total_units;
  7911. pce_info_use = qseecom.ce_info.pfe = kcalloc(
  7912. total_units, sizeof(struct qseecom_ce_info_use),
  7913. GFP_KERNEL);
  7914. if (!pce_info_use) {
  7915. rc = -ENOMEM;
  7916. goto out;
  7917. }
  7918. for (j = 0; j < total_units; j++, pce_info_use++) {
  7919. pce_info_use->unit_num = *(unit_tbl + j);
  7920. pce_info_use->alloc = false;
  7921. pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
  7922. pce_info_use->num_ce_pipe_entries = 0;
  7923. pce_info_use->ce_pipe_entry = NULL;
  7924. for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
  7925. if (p->unit_num == pce_info_use->unit_num)
  7926. pce_info_use->num_ce_pipe_entries++;
  7927. }
  7928. entry = pce_info_use->num_ce_pipe_entries;
  7929. pce_entry = pce_info_use->ce_pipe_entry =
  7930. kcalloc(entry,
  7931. sizeof(struct qseecom_ce_pipe_entry),
  7932. GFP_KERNEL);
  7933. if (pce_entry == NULL) {
  7934. rc = -ENOMEM;
  7935. goto out;
  7936. }
  7937. for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
  7938. if (p->unit_num == pce_info_use->unit_num) {
  7939. pce_entry->ce_num = p->ce;
  7940. pce_entry->ce_pipe_pair =
  7941. p->pipe_pair;
  7942. pce_entry->valid = true;
  7943. pce_entry++;
  7944. }
  7945. }
  7946. }
  7947. kfree(unit_tbl);
  7948. unit_tbl = NULL;
  7949. kfree(pfde_tbl);
  7950. pfde_tbl = NULL;
  7951. }
  7952. if (!old_db)
  7953. goto out1;
  7954. if (of_property_read_bool((&pdev->dev)->of_node,
  7955. "qcom,support-multiple-ce-hw-instance")) {
  7956. if (of_property_read_u32((&pdev->dev)->of_node,
  7957. "qcom,hlos-num-ce-hw-instances",
  7958. &hlos_num_ce_hw_instances)) {
  7959. pr_err("Fail: get hlos number of ce hw instance\n");
  7960. rc = -EINVAL;
  7961. goto out;
  7962. }
  7963. } else {
  7964. hlos_num_ce_hw_instances = 1;
  7965. }
  7966. if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
  7967. pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
  7968. MAX_CE_PIPE_PAIR_PER_UNIT);
  7969. rc = -EINVAL;
  7970. goto out;
  7971. }
  7972. if (of_property_read_u32_array((&pdev->dev)->of_node,
  7973. "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
  7974. hlos_num_ce_hw_instances)) {
  7975. pr_err("Fail: get hlos ce hw instance info\n");
  7976. rc = -EINVAL;
  7977. goto out;
  7978. }
  7979. if (qseecom.support_fde) {
  7980. pce_info_use = qseecom.ce_info.fde =
  7981. kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
  7982. if (!pce_info_use) {
  7983. rc = -ENOMEM;
  7984. goto out;
  7985. }
  7986. /* by default for old db */
  7987. qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
  7988. pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
  7989. pce_info_use->alloc = false;
  7990. pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
  7991. pce_info_use->ce_pipe_entry = NULL;
  7992. if (of_property_read_u32((&pdev->dev)->of_node,
  7993. "qcom,disk-encrypt-pipe-pair",
  7994. &disk_encrypt_pipe)) {
  7995. pr_err("Fail to get FDE pipe information.\n");
  7996. rc = -EINVAL;
  7997. goto out;
  7998. } else {
  7999. pr_debug("disk-encrypt-pipe-pair=0x%x\n",
  8000. disk_encrypt_pipe);
  8001. }
  8002. entry = pce_info_use->num_ce_pipe_entries =
  8003. hlos_num_ce_hw_instances;
  8004. pce_entry = pce_info_use->ce_pipe_entry =
  8005. kcalloc(entry,
  8006. sizeof(struct qseecom_ce_pipe_entry),
  8007. GFP_KERNEL);
  8008. if (pce_entry == NULL) {
  8009. rc = -ENOMEM;
  8010. goto out;
  8011. }
  8012. for (i = 0; i < entry; i++) {
  8013. pce_entry->ce_num = hlos_ce_hw_instance[i];
  8014. pce_entry->ce_pipe_pair = disk_encrypt_pipe;
  8015. pce_entry->valid = 1;
  8016. pce_entry++;
  8017. }
  8018. } else {
  8019. pr_warn("Device does not support FDE\n");
  8020. disk_encrypt_pipe = 0xff;
  8021. }
  8022. if (qseecom.support_pfe) {
  8023. pce_info_use = qseecom.ce_info.pfe =
  8024. kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
  8025. if (!pce_info_use) {
  8026. rc = -ENOMEM;
  8027. goto out;
  8028. }
  8029. /* by default for old db */
  8030. qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
  8031. pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
  8032. pce_info_use->alloc = false;
  8033. pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
  8034. pce_info_use->ce_pipe_entry = NULL;
  8035. if (of_property_read_u32((&pdev->dev)->of_node,
  8036. "qcom,file-encrypt-pipe-pair",
  8037. &file_encrypt_pipe)) {
  8038. pr_err("Fail to get PFE pipe information.\n");
  8039. rc = -EINVAL;
  8040. goto out;
  8041. } else {
  8042. pr_debug("file-encrypt-pipe-pair=0x%x\n",
  8043. file_encrypt_pipe);
  8044. }
  8045. entry = pce_info_use->num_ce_pipe_entries =
  8046. hlos_num_ce_hw_instances;
  8047. pce_entry = pce_info_use->ce_pipe_entry =
  8048. kcalloc(entry,
  8049. sizeof(struct qseecom_ce_pipe_entry),
  8050. GFP_KERNEL);
  8051. if (pce_entry == NULL) {
  8052. rc = -ENOMEM;
  8053. goto out;
  8054. }
  8055. for (i = 0; i < entry; i++) {
  8056. pce_entry->ce_num = hlos_ce_hw_instance[i];
  8057. pce_entry->ce_pipe_pair = file_encrypt_pipe;
  8058. pce_entry->valid = 1;
  8059. pce_entry++;
  8060. }
  8061. } else {
  8062. pr_warn("Device does not support PFE\n");
  8063. file_encrypt_pipe = 0xff;
  8064. }
  8065. out1:
  8066. qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
  8067. qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
  8068. out:
  8069. if (rc) {
  8070. if (qseecom.ce_info.fde) {
  8071. pce_info_use = qseecom.ce_info.fde;
  8072. for (i = 0; i < qseecom.ce_info.num_fde; i++) {
  8073. pce_entry = pce_info_use->ce_pipe_entry;
  8074. kfree(pce_entry);
  8075. pce_info_use++;
  8076. }
  8077. }
  8078. kfree(qseecom.ce_info.fde);
  8079. qseecom.ce_info.fde = NULL;
  8080. if (qseecom.ce_info.pfe) {
  8081. pce_info_use = qseecom.ce_info.pfe;
  8082. for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
  8083. pce_entry = pce_info_use->ce_pipe_entry;
  8084. kfree(pce_entry);
  8085. pce_info_use++;
  8086. }
  8087. }
  8088. kfree(qseecom.ce_info.pfe);
  8089. qseecom.ce_info.pfe = NULL;
  8090. }
  8091. kfree(unit_tbl);
  8092. kfree(pfde_tbl);
  8093. return rc;
  8094. }
  8095. static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
  8096. void __user *argp)
  8097. {
  8098. struct qseecom_ce_info_req req;
  8099. struct qseecom_ce_info_req *pinfo = &req;
  8100. int ret = 0;
  8101. int i;
  8102. unsigned int entries;
  8103. struct qseecom_ce_info_use *pce_info_use, *p;
  8104. int total = 0;
  8105. bool found = false;
  8106. struct qseecom_ce_pipe_entry *pce_entry;
  8107. ret = copy_from_user(pinfo, argp,
  8108. sizeof(struct qseecom_ce_info_req));
  8109. if (ret) {
  8110. pr_err("copy_from_user failed\n");
  8111. return ret;
  8112. }
  8113. switch (pinfo->usage) {
  8114. case QSEOS_KM_USAGE_DISK_ENCRYPTION:
  8115. case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
  8116. case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
  8117. if (qseecom.support_fde) {
  8118. p = qseecom.ce_info.fde;
  8119. total = qseecom.ce_info.num_fde;
  8120. } else {
  8121. pr_err("system does not support fde\n");
  8122. return -EINVAL;
  8123. }
  8124. break;
  8125. case QSEOS_KM_USAGE_FILE_ENCRYPTION:
  8126. if (qseecom.support_pfe) {
  8127. p = qseecom.ce_info.pfe;
  8128. total = qseecom.ce_info.num_pfe;
  8129. } else {
  8130. pr_err("system does not support pfe\n");
  8131. return -EINVAL;
  8132. }
  8133. break;
  8134. default:
  8135. pr_err("unsupported usage %d\n", pinfo->usage);
  8136. return -EINVAL;
  8137. }
  8138. pce_info_use = NULL;
  8139. for (i = 0; i < total; i++) {
  8140. if (!p->alloc)
  8141. pce_info_use = p;
  8142. else if (!memcmp(p->handle, pinfo->handle,
  8143. MAX_CE_INFO_HANDLE_SIZE)) {
  8144. pce_info_use = p;
  8145. found = true;
  8146. break;
  8147. }
  8148. p++;
  8149. }
  8150. if (pce_info_use == NULL)
  8151. return -EBUSY;
  8152. pinfo->unit_num = pce_info_use->unit_num;
  8153. if (!pce_info_use->alloc) {
  8154. pce_info_use->alloc = true;
  8155. memcpy(pce_info_use->handle,
  8156. pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
  8157. }
  8158. if (pce_info_use->num_ce_pipe_entries >
  8159. MAX_CE_PIPE_PAIR_PER_UNIT)
  8160. entries = MAX_CE_PIPE_PAIR_PER_UNIT;
  8161. else
  8162. entries = pce_info_use->num_ce_pipe_entries;
  8163. pinfo->num_ce_pipe_entries = entries;
  8164. pce_entry = pce_info_use->ce_pipe_entry;
  8165. for (i = 0; i < entries; i++, pce_entry++)
  8166. pinfo->ce_pipe_entry[i] = *pce_entry;
  8167. for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
  8168. pinfo->ce_pipe_entry[i].valid = 0;
  8169. if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
  8170. pr_err("copy_to_user failed\n");
  8171. ret = -EFAULT;
  8172. }
  8173. return ret;
  8174. }
  8175. static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
  8176. void __user *argp)
  8177. {
  8178. struct qseecom_ce_info_req req;
  8179. struct qseecom_ce_info_req *pinfo = &req;
  8180. int ret = 0;
  8181. struct qseecom_ce_info_use *p;
  8182. int total = 0;
  8183. int i;
  8184. bool found = false;
  8185. ret = copy_from_user(pinfo, argp,
  8186. sizeof(struct qseecom_ce_info_req));
  8187. if (ret)
  8188. return ret;
  8189. switch (pinfo->usage) {
  8190. case QSEOS_KM_USAGE_DISK_ENCRYPTION:
  8191. case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
  8192. case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
  8193. if (qseecom.support_fde) {
  8194. p = qseecom.ce_info.fde;
  8195. total = qseecom.ce_info.num_fde;
  8196. } else {
  8197. pr_err("system does not support fde\n");
  8198. return -EINVAL;
  8199. }
  8200. break;
  8201. case QSEOS_KM_USAGE_FILE_ENCRYPTION:
  8202. if (qseecom.support_pfe) {
  8203. p = qseecom.ce_info.pfe;
  8204. total = qseecom.ce_info.num_pfe;
  8205. } else {
  8206. pr_err("system does not support pfe\n");
  8207. return -EINVAL;
  8208. }
  8209. break;
  8210. default:
  8211. pr_err("unsupported usage %d\n", pinfo->usage);
  8212. return -EINVAL;
  8213. }
  8214. for (i = 0; i < total; i++) {
  8215. if (p->alloc &&
  8216. !memcmp(p->handle, pinfo->handle,
  8217. MAX_CE_INFO_HANDLE_SIZE)) {
  8218. memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
  8219. p->alloc = false;
  8220. found = true;
  8221. break;
  8222. }
  8223. p++;
  8224. }
  8225. return ret;
  8226. }
  8227. static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
  8228. void __user *argp)
  8229. {
  8230. struct qseecom_ce_info_req req;
  8231. struct qseecom_ce_info_req *pinfo = &req;
  8232. int ret = 0;
  8233. int i;
  8234. unsigned int entries;
  8235. struct qseecom_ce_info_use *pce_info_use, *p;
  8236. int total = 0;
  8237. bool found = false;
  8238. struct qseecom_ce_pipe_entry *pce_entry;
  8239. ret = copy_from_user(pinfo, argp,
  8240. sizeof(struct qseecom_ce_info_req));
  8241. if (ret)
  8242. return ret;
  8243. switch (pinfo->usage) {
  8244. case QSEOS_KM_USAGE_DISK_ENCRYPTION:
  8245. case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
  8246. case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
  8247. if (qseecom.support_fde) {
  8248. p = qseecom.ce_info.fde;
  8249. total = qseecom.ce_info.num_fde;
  8250. } else {
  8251. pr_err("system does not support fde\n");
  8252. return -EINVAL;
  8253. }
  8254. break;
  8255. case QSEOS_KM_USAGE_FILE_ENCRYPTION:
  8256. if (qseecom.support_pfe) {
  8257. p = qseecom.ce_info.pfe;
  8258. total = qseecom.ce_info.num_pfe;
  8259. } else {
  8260. pr_err("system does not support pfe\n");
  8261. return -EINVAL;
  8262. }
  8263. break;
  8264. default:
  8265. pr_err("unsupported usage %d\n", pinfo->usage);
  8266. return -EINVAL;
  8267. }
  8268. pce_info_use = NULL;
  8269. pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
  8270. pinfo->num_ce_pipe_entries = 0;
  8271. for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
  8272. pinfo->ce_pipe_entry[i].valid = 0;
  8273. for (i = 0; i < total; i++) {
  8274. if (p->alloc && !memcmp(p->handle,
  8275. pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
  8276. pce_info_use = p;
  8277. found = true;
  8278. break;
  8279. }
  8280. p++;
  8281. }
  8282. if (!pce_info_use)
  8283. goto out;
  8284. pinfo->unit_num = pce_info_use->unit_num;
  8285. if (pce_info_use->num_ce_pipe_entries >
  8286. MAX_CE_PIPE_PAIR_PER_UNIT)
  8287. entries = MAX_CE_PIPE_PAIR_PER_UNIT;
  8288. else
  8289. entries = pce_info_use->num_ce_pipe_entries;
  8290. pinfo->num_ce_pipe_entries = entries;
  8291. pce_entry = pce_info_use->ce_pipe_entry;
  8292. for (i = 0; i < entries; i++, pce_entry++)
  8293. pinfo->ce_pipe_entry[i] = *pce_entry;
  8294. for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
  8295. pinfo->ce_pipe_entry[i].valid = 0;
  8296. out:
  8297. if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
  8298. pr_err("copy_to_user failed\n");
  8299. ret = -EFAULT;
  8300. }
  8301. return ret;
  8302. }
  8303. /*
  8304. * Check whitelist feature, and if TZ feature version is < 1.0.0,
  8305. * then whitelist feature is not supported.
  8306. */
  8307. #define GET_FEAT_VERSION_CMD 3
  8308. static int qseecom_check_whitelist_feature(void)
  8309. {
  8310. struct qseecom_scm_desc desc = {0};
  8311. int version = 0;
  8312. int ret = 0;
  8313. desc.args[0] = FEATURE_ID_WHITELIST;
  8314. desc.arginfo = SCM_ARGS(1);
  8315. mutex_lock(&app_access_lock);
  8316. ret = __qseecom_scm_call2_locked(SCM_SIP_FNID(SCM_SVC_INFO,
  8317. GET_FEAT_VERSION_CMD), &desc);
  8318. mutex_unlock(&app_access_lock);
  8319. if (!ret)
  8320. version = desc.ret[0];
  8321. return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
  8322. }
  8323. static int qseecom_init_clk(void)
  8324. {
  8325. int rc;
  8326. if (qseecom.no_clock_support)
  8327. return 0;
  8328. rc = __qseecom_init_clk(CLK_QSEE);
  8329. if (rc)
  8330. return rc;
  8331. if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
  8332. (qseecom.support_pfe || qseecom.support_fde)) {
  8333. rc = __qseecom_init_clk(CLK_CE_DRV);
  8334. if (rc) {
  8335. __qseecom_deinit_clk(CLK_QSEE);
  8336. return rc;
  8337. }
  8338. } else {
  8339. qseecom.ce_drv.ce_core_clk = qseecom.qsee.ce_core_clk;
  8340. qseecom.ce_drv.ce_clk = qseecom.qsee.ce_clk;
  8341. qseecom.ce_drv.ce_core_src_clk = qseecom.qsee.ce_core_src_clk;
  8342. qseecom.ce_drv.ce_bus_clk = qseecom.qsee.ce_bus_clk;
  8343. }
  8344. return rc;
  8345. }
  8346. static void qseecom_deinit_clk(void)
  8347. {
  8348. if (qseecom.no_clock_support)
  8349. return;
  8350. __qseecom_deinit_clk(CLK_QSEE);
  8351. if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
  8352. (qseecom.support_pfe || qseecom.support_fde))
  8353. __qseecom_deinit_clk(CLK_CE_DRV);
  8354. }
  8355. static int qseecom_init_bus(struct platform_device *pdev)
  8356. {
  8357. int ret = 0;
  8358. if (!qseecom.support_bus_scaling)
  8359. return 0;
  8360. if (qseecom.no_clock_support) {
  8361. pr_err("Can not support bus_scalling if no clock support\n");
  8362. return -EINVAL;
  8363. }
  8364. timer_setup(&(qseecom.bw_scale_down_timer),
  8365. qseecom_scale_bus_bandwidth_timer_callback, 0);
  8366. INIT_WORK(&qseecom.bw_inactive_req_ws,
  8367. qseecom_bw_inactive_req_work);
  8368. qseecom.timer_running = false;
  8369. qseecom.icc_path = of_icc_get(&pdev->dev, "data_path");
  8370. if (IS_ERR(qseecom.icc_path)) {
  8371. ret = PTR_ERR(qseecom.icc_path);
  8372. if (ret != -EPROBE_DEFER)
  8373. pr_err("Unable to get Interconnect path\n");
  8374. return ret;
  8375. }
  8376. return 0;
  8377. }
  8378. static void qseecom_deinit_bus(void)
  8379. {
  8380. if (!qseecom.support_bus_scaling || qseecom.no_clock_support)
  8381. return;
  8382. qseecom_bus_scale_update_request(qseecom.qsee_perf_client, 0);
  8383. icc_put(qseecom.icc_path);
  8384. cancel_work_sync(&qseecom.bw_inactive_req_ws);
  8385. del_timer_sync(&qseecom.bw_scale_down_timer);
  8386. }
  8387. static int qseecom_send_app_region(struct platform_device *pdev)
  8388. {
  8389. struct resource *resource = NULL;
  8390. struct qsee_apps_region_info_64bit_ireq req_64bit;
  8391. struct qseecom_command_scm_resp resp;
  8392. void *cmd_buf = NULL;
  8393. size_t cmd_len;
  8394. int rc = 0;
  8395. if (qseecom.qsee_version < QSEE_VERSION_02 ||
  8396. qseecom.is_apps_region_protected ||
  8397. qseecom.appsbl_qseecom_support)
  8398. return 0;
  8399. resource = platform_get_resource_byname(pdev,
  8400. IORESOURCE_MEM, "secapp-region");
  8401. if (!resource) {
  8402. pr_err("Fail to get secure app region info\n");
  8403. return -ENOMEM;
  8404. }
  8405. req_64bit.qsee_cmd_id = QSEOS_APP_REGION_NOTIFICATION;
  8406. req_64bit.addr = resource->start;
  8407. req_64bit.size = resource_size(resource);
  8408. cmd_buf = (void *)&req_64bit;
  8409. cmd_len = sizeof(struct qsee_apps_region_info_64bit_ireq);
  8410. pr_warn("secure app region addr=0x%llx size=0x%x\n",
  8411. req_64bit.addr, req_64bit.size);
  8412. rc = __qseecom_enable_clk(CLK_QSEE);
  8413. if (rc) {
  8414. pr_err("CLK_QSEE enabling failed (%d)\n", rc);
  8415. return rc;
  8416. }
  8417. mutex_lock(&app_access_lock);
  8418. rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  8419. cmd_buf, cmd_len,
  8420. &resp, sizeof(resp));
  8421. mutex_unlock(&app_access_lock);
  8422. __qseecom_disable_clk(CLK_QSEE);
  8423. if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
  8424. pr_err("send secapp reg fail %d resp.res %d\n",
  8425. rc, resp.result);
  8426. return -EINVAL;
  8427. }
  8428. return rc;
  8429. }
  8430. static void qseecom_release_ce_data(void)
  8431. {
  8432. int i;
  8433. struct qseecom_ce_info_use *pce_info_use = NULL;
  8434. if (qseecom.ce_info.fde) {
  8435. pce_info_use = qseecom.ce_info.fde;
  8436. for (i = 0; i < qseecom.ce_info.num_fde; i++) {
  8437. kfree_sensitive(pce_info_use->ce_pipe_entry);
  8438. pce_info_use++;
  8439. }
  8440. kfree(qseecom.ce_info.fde);
  8441. }
  8442. if (qseecom.ce_info.pfe) {
  8443. pce_info_use = qseecom.ce_info.pfe;
  8444. for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
  8445. kfree_sensitive(pce_info_use->ce_pipe_entry);
  8446. pce_info_use++;
  8447. }
  8448. kfree(qseecom.ce_info.pfe);
  8449. }
  8450. }
  8451. static int qseecom_init_dev(struct platform_device *pdev)
  8452. {
  8453. int rc = 0;
  8454. rc = alloc_chrdev_region(&qseecom.qseecom_device_no,
  8455. 0, 1, QSEECOM_DEV);
  8456. if (rc < 0) {
  8457. pr_err("alloc_chrdev_region failed %d\n", rc);
  8458. return rc;
  8459. }
  8460. qseecom.driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
  8461. if (IS_ERR(qseecom.driver_class)) {
  8462. rc = PTR_ERR(qseecom.driver_class);
  8463. pr_err("class_create failed %x\n", rc);
  8464. goto exit_unreg_chrdev_region;
  8465. }
  8466. qseecom.pdev = device_create(qseecom.driver_class, NULL,
  8467. qseecom.qseecom_device_no, NULL,
  8468. QSEECOM_DEV);
  8469. if (IS_ERR(qseecom.pdev)) {
  8470. pr_err("class_device_create failed %d\n", rc);
  8471. rc = PTR_ERR(qseecom.pdev);
  8472. goto exit_destroy_class;
  8473. }
  8474. cdev_init(&qseecom.cdev, &qseecom_fops);
  8475. qseecom.cdev.owner = THIS_MODULE;
  8476. rc = cdev_add(&qseecom.cdev,
  8477. MKDEV(MAJOR(qseecom.qseecom_device_no), 0), 1);
  8478. if (rc < 0) {
  8479. pr_err("cdev_add failed %d\n", rc);
  8480. goto exit_destroy_device;
  8481. }
  8482. qseecom.dev = &pdev->dev;
  8483. rc = dma_set_mask(qseecom.dev, DMA_BIT_MASK(64));
  8484. if (rc) {
  8485. pr_err("qseecom failed to set dma mask %d\n", rc);
  8486. goto exit_del_cdev;
  8487. }
  8488. if (!qseecom.dev->dma_parms) {
  8489. qseecom.dev->dma_parms =
  8490. kzalloc(sizeof(*qseecom.dev->dma_parms), GFP_KERNEL);
  8491. if (!qseecom.dev->dma_parms) {
  8492. rc = -ENOMEM;
  8493. goto exit_del_cdev;
  8494. }
  8495. }
  8496. dma_set_max_seg_size(qseecom.dev, DMA_BIT_MASK(32));
  8497. rc = of_reserved_mem_device_init_by_idx(&pdev->dev,
  8498. (&pdev->dev)->of_node, 0);
  8499. if (rc) {
  8500. pr_err("Failed to initialize reserved mem, ret %d\n", rc);
  8501. goto exit_del_cdev;
  8502. }
  8503. return 0;
  8504. exit_del_cdev:
  8505. cdev_del(&qseecom.cdev);
  8506. exit_destroy_device:
  8507. device_destroy(qseecom.driver_class, qseecom.qseecom_device_no);
  8508. exit_destroy_class:
  8509. class_destroy(qseecom.driver_class);
  8510. exit_unreg_chrdev_region:
  8511. unregister_chrdev_region(qseecom.qseecom_device_no, 1);
  8512. return rc;
  8513. }
  8514. static void qseecom_deinit_dev(void)
  8515. {
  8516. kfree(qseecom.dev->dma_parms);
  8517. qseecom.dev->dma_parms = NULL;
  8518. cdev_del(&qseecom.cdev);
  8519. device_destroy(qseecom.driver_class, qseecom.qseecom_device_no);
  8520. class_destroy(qseecom.driver_class);
  8521. unregister_chrdev_region(qseecom.qseecom_device_no, 1);
  8522. }
  8523. static int qseecom_init_control(void)
  8524. {
  8525. uint32_t feature = 10;
  8526. struct qseecom_command_scm_resp resp;
  8527. int rc = 0;
  8528. qseecom.qsee_version = QSEEE_VERSION_00;
  8529. mutex_lock(&app_access_lock);
  8530. rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
  8531. &resp, sizeof(resp));
  8532. mutex_unlock(&app_access_lock);
  8533. pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
  8534. if (rc) {
  8535. pr_err("Failed to get QSEE version info %d\n", rc);
  8536. return rc;
  8537. }
  8538. qseecom.qsee_version = resp.result;
  8539. atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
  8540. init_waitqueue_head(&qseecom.app_block_wq);
  8541. qseecom.whitelist_support = true;
  8542. INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
  8543. INIT_LIST_HEAD(&qseecom.registered_app_list_head);
  8544. spin_lock_init(&qseecom.registered_app_list_lock);
  8545. INIT_LIST_HEAD(&qseecom.unregister_lsnr_pending_list_head);
  8546. INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
  8547. spin_lock_init(&qseecom.registered_kclient_list_lock);
  8548. init_waitqueue_head(&qseecom.send_resp_wq);
  8549. init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
  8550. init_waitqueue_head(&qseecom.unregister_lsnr_kthread_wq);
  8551. INIT_LIST_HEAD(&qseecom.unload_app_pending_list_head);
  8552. init_waitqueue_head(&qseecom.unload_app_kthread_wq);
  8553. qseecom.send_resp_flag = 0;
  8554. qseecom.qseos_version = QSEOS_VERSION_14;
  8555. qseecom.commonlib_loaded = false;
  8556. qseecom.commonlib64_loaded = false;
  8557. qseecom.whitelist_support = qseecom_check_whitelist_feature();
  8558. return rc;
  8559. }
  8560. static int qseecom_parse_dt(struct platform_device *pdev)
  8561. {
  8562. if (!pdev->dev.of_node) {
  8563. pr_err("NULL of_node\n");
  8564. return -ENODEV;
  8565. }
  8566. qseecom.pdev->of_node = pdev->dev.of_node;
  8567. qseecom.support_bus_scaling =
  8568. of_property_read_bool((&pdev->dev)->of_node,
  8569. "qcom,support-bus-scaling");
  8570. qseecom.appsbl_qseecom_support =
  8571. of_property_read_bool((&pdev->dev)->of_node,
  8572. "qcom,appsbl-qseecom-support");
  8573. qseecom.commonlib64_loaded =
  8574. of_property_read_bool((&pdev->dev)->of_node,
  8575. "qcom,commonlib64-loaded-by-uefi");
  8576. qseecom.fde_key_size =
  8577. of_property_read_bool((&pdev->dev)->of_node,
  8578. "qcom,fde-key-size");
  8579. qseecom.no_clock_support =
  8580. of_property_read_bool((&pdev->dev)->of_node,
  8581. "qcom,no-clock-support");
  8582. qseecom.enable_key_wrap_in_ks =
  8583. of_property_read_bool((&pdev->dev)->of_node,
  8584. "qcom,enable-key-wrap-in-ks");
  8585. if (of_property_read_u32((&pdev->dev)->of_node,
  8586. "qcom,qsee-reentrancy-support",
  8587. &qseecom.qsee_reentrancy_support)) {
  8588. pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
  8589. qseecom.qsee_reentrancy_support = 0;
  8590. }
  8591. if (of_property_read_u32((&pdev->dev)->of_node,
  8592. "qcom,ce-opp-freq", &qseecom.ce_opp_freq_hz)) {
  8593. pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
  8594. qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
  8595. }
  8596. /*
  8597. * By default, appsbl only loads cmnlib. If OEM changes appsbl to
  8598. * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
  8599. * Pls add "qseecom.commonlib64_loaded = true" here too.
  8600. */
  8601. if (qseecom.is_apps_region_protected ||
  8602. qseecom.appsbl_qseecom_support)
  8603. qseecom.commonlib_loaded = true;
  8604. return 0;
  8605. }
  8606. static int qseecom_create_kthreads(void)
  8607. {
  8608. int rc = 0;
  8609. qseecom.unregister_lsnr_kthread_task = kthread_run(
  8610. __qseecom_unregister_listener_kthread_func,
  8611. NULL, "qseecom-unreg-lsnr");
  8612. if (IS_ERR(qseecom.unregister_lsnr_kthread_task)) {
  8613. rc = PTR_ERR(qseecom.unregister_lsnr_kthread_task);
  8614. pr_err("fail to create kthread to unreg lsnr, rc = %x\n", rc);
  8615. return rc;
  8616. }
  8617. atomic_set(&qseecom.unregister_lsnr_kthread_state,
  8618. LSNR_UNREG_KT_SLEEP);
  8619. /*create a kthread to process pending ta unloading task */
  8620. qseecom.unload_app_kthread_task = kthread_run(
  8621. __qseecom_unload_app_kthread_func,
  8622. NULL, "qseecom-unload-ta");
  8623. if (IS_ERR(qseecom.unload_app_kthread_task)) {
  8624. rc = PTR_ERR(qseecom.unload_app_kthread_task);
  8625. pr_err("failed to create kthread to unload ta, rc = %x\n", rc);
  8626. kthread_stop(qseecom.unregister_lsnr_kthread_task);
  8627. return rc;
  8628. }
  8629. atomic_set(&qseecom.unload_app_kthread_state,
  8630. UNLOAD_APP_KT_SLEEP);
  8631. return 0;
  8632. }
  8633. static int qseecom_register_heap_shmbridge(struct platform_device *pdev,
  8634. char *heap_mem_region_name,
  8635. uint64_t *handle)
  8636. {
  8637. phys_addr_t heap_pa = 0;
  8638. size_t heap_size = 0;
  8639. struct device_node *node = NULL;
  8640. struct reserved_mem *rmem = NULL;
  8641. uint32_t ns_vmids[] = {VMID_HLOS};
  8642. uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
  8643. node = of_parse_phandle(pdev->dev.of_node, heap_mem_region_name, 0);
  8644. if (!node) {
  8645. pr_err("unable to parse memory-region of heap %d\n", heap_mem_region_name);
  8646. return -EINVAL;
  8647. }
  8648. rmem = of_reserved_mem_lookup(node);
  8649. if (!rmem) {
  8650. pr_err("unable to acquire memory-region of heap %d\n", heap_mem_region_name);
  8651. return -EINVAL;
  8652. }
  8653. heap_pa = rmem->base;
  8654. heap_size = (size_t)rmem->size;
  8655. pr_debug("get heap %d info: shmbridge created\n", heap_mem_region_name);
  8656. return qtee_shmbridge_register(heap_pa,
  8657. heap_size, ns_vmids, ns_vm_perms, 1,
  8658. PERM_READ | PERM_WRITE, handle);
  8659. }
  8660. static int qseecom_register_shmbridge(struct platform_device *pdev)
  8661. {
  8662. int ret = 0;
  8663. if (!qtee_shmbridge_is_enabled())
  8664. return 0;
  8665. ret = qseecom_register_heap_shmbridge(pdev, "qseecom_ta_mem",
  8666. &qseecom.ta_bridge_handle);
  8667. if (ret)
  8668. return ret;
  8669. ret = qseecom_register_heap_shmbridge(pdev, "qseecom_mem",
  8670. &qseecom.qseecom_bridge_handle);
  8671. if (ret) {
  8672. qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
  8673. return ret;
  8674. }
  8675. ret = qseecom_register_heap_shmbridge(pdev, "user_contig_mem",
  8676. &qseecom.user_contig_bridge_handle);
  8677. if (ret) {
  8678. qtee_shmbridge_deregister(qseecom.qseecom_bridge_handle);
  8679. qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
  8680. return ret;
  8681. }
  8682. return 0;
  8683. }
  8684. static void qseecom_deregister_shmbridge(void)
  8685. {
  8686. qtee_shmbridge_deregister(qseecom.user_contig_bridge_handle);
  8687. qtee_shmbridge_deregister(qseecom.qseecom_bridge_handle);
  8688. qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
  8689. }
  8690. static int qseecom_probe(struct platform_device *pdev)
  8691. {
  8692. int rc;
  8693. rc = qseecom_register_shmbridge(pdev);
  8694. if (rc)
  8695. return rc;
  8696. rc = qseecom_init_dev(pdev);
  8697. if (rc)
  8698. goto exit_unregister_bridge;
  8699. rc = qseecom_init_control();
  8700. if (rc)
  8701. goto exit_deinit_dev;
  8702. rc = qseecom_parse_dt(pdev);
  8703. if (rc)
  8704. goto exit_deinit_dev;
  8705. rc = qseecom_retrieve_ce_data(pdev);
  8706. if (rc)
  8707. goto exit_deinit_dev;
  8708. rc = qseecom_init_clk();
  8709. if (rc)
  8710. goto exit_release_ce_data;
  8711. rc = qseecom_init_bus(pdev);
  8712. if (rc)
  8713. goto exit_deinit_clock;
  8714. rc = qseecom_send_app_region(pdev);
  8715. if (rc)
  8716. goto exit_deinit_bus;
  8717. rc = qseecom_create_kthreads();
  8718. if (rc)
  8719. goto exit_deinit_bus;
  8720. #if IS_ENABLED(CONFIG_QSEECOM_PROXY)
  8721. /*If the api fails to get the func ops, print the error and continue
  8722. * Do not treat it as fatal*/
  8723. rc = get_qseecom_kernel_fun_ops();
  8724. if (rc)
  8725. pr_err("failed to provide qseecom ops %d", rc);
  8726. #endif
  8727. atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
  8728. return 0;
  8729. exit_deinit_bus:
  8730. qseecom_deinit_bus();
  8731. exit_deinit_clock:
  8732. qseecom_deinit_clk();
  8733. exit_release_ce_data:
  8734. qseecom_release_ce_data();
  8735. exit_deinit_dev:
  8736. qseecom_deinit_dev();
  8737. exit_unregister_bridge:
  8738. qseecom_deregister_shmbridge();
  8739. return rc;
  8740. }
  8741. static int qseecom_remove(struct platform_device *pdev)
  8742. {
  8743. struct qseecom_registered_kclient_list *kclient = NULL;
  8744. struct qseecom_registered_kclient_list *kclient_tmp = NULL;
  8745. unsigned long flags = 0;
  8746. int ret = 0;
  8747. atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
  8748. spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
  8749. list_for_each_entry_safe(kclient, kclient_tmp,
  8750. &qseecom.registered_kclient_list_head, list) {
  8751. /* Break the loop if client handle is NULL */
  8752. if (!kclient->handle) {
  8753. list_del(&kclient->list);
  8754. kfree_sensitive(kclient);
  8755. break;
  8756. }
  8757. list_del(&kclient->list);
  8758. mutex_lock(&app_access_lock);
  8759. ret = qseecom_unload_app(kclient->handle->dev, false);
  8760. mutex_unlock(&app_access_lock);
  8761. if (!ret) {
  8762. kfree_sensitive(kclient->handle->dev);
  8763. kfree_sensitive(kclient->handle);
  8764. kfree_sensitive(kclient);
  8765. }
  8766. }
  8767. spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
  8768. if (qseecom.qseos_version > QSEEE_VERSION_00)
  8769. qseecom_unload_commonlib_image();
  8770. qseecom_deregister_shmbridge();
  8771. kthread_stop(qseecom.unload_app_kthread_task);
  8772. kthread_stop(qseecom.unregister_lsnr_kthread_task);
  8773. qseecom_deinit_bus();
  8774. qseecom_deinit_clk();
  8775. qseecom_release_ce_data();
  8776. qseecom_deinit_dev();
  8777. return ret;
  8778. }
  8779. static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
  8780. {
  8781. int ret = 0;
  8782. struct qseecom_clk *qclk;
  8783. qclk = &qseecom.qsee;
  8784. atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
  8785. if (qseecom.no_clock_support)
  8786. return 0;
  8787. mutex_lock(&qsee_bw_mutex);
  8788. mutex_lock(&clk_access_lock);
  8789. if (qseecom.current_mode != INACTIVE) {
  8790. ret = qseecom_bus_scale_update_request(
  8791. qseecom.qsee_perf_client, INACTIVE);
  8792. if (ret)
  8793. pr_err("Fail to scale down bus\n");
  8794. else
  8795. qseecom.current_mode = INACTIVE;
  8796. }
  8797. if (qclk->clk_access_cnt) {
  8798. if (qclk->ce_clk != NULL)
  8799. clk_disable_unprepare(qclk->ce_clk);
  8800. if (qclk->ce_core_clk != NULL)
  8801. clk_disable_unprepare(qclk->ce_core_clk);
  8802. if (qclk->ce_bus_clk != NULL)
  8803. clk_disable_unprepare(qclk->ce_bus_clk);
  8804. }
  8805. del_timer_sync(&(qseecom.bw_scale_down_timer));
  8806. qseecom.timer_running = false;
  8807. mutex_unlock(&clk_access_lock);
  8808. mutex_unlock(&qsee_bw_mutex);
  8809. cancel_work_sync(&qseecom.bw_inactive_req_ws);
  8810. return 0;
  8811. }
  8812. static int qseecom_resume(struct platform_device *pdev)
  8813. {
  8814. int mode = 0;
  8815. int ret = 0;
  8816. struct qseecom_clk *qclk;
  8817. qclk = &qseecom.qsee;
  8818. if (qseecom.no_clock_support)
  8819. goto exit;
  8820. mutex_lock(&qsee_bw_mutex);
  8821. mutex_lock(&clk_access_lock);
  8822. if (qseecom.cumulative_mode >= HIGH)
  8823. mode = HIGH;
  8824. else
  8825. mode = qseecom.cumulative_mode;
  8826. if (qseecom.cumulative_mode != INACTIVE) {
  8827. ret = qseecom_bus_scale_update_request(
  8828. qseecom.qsee_perf_client, mode);
  8829. if (ret)
  8830. pr_err("Fail to scale up bus to %d\n", mode);
  8831. else
  8832. qseecom.current_mode = mode;
  8833. }
  8834. if (qclk->clk_access_cnt) {
  8835. if (qclk->ce_core_clk != NULL) {
  8836. ret = clk_prepare_enable(qclk->ce_core_clk);
  8837. if (ret) {
  8838. pr_err("Unable to enable/prep CE core clk\n");
  8839. qclk->clk_access_cnt = 0;
  8840. goto err;
  8841. }
  8842. }
  8843. if (qclk->ce_clk != NULL) {
  8844. ret = clk_prepare_enable(qclk->ce_clk);
  8845. if (ret) {
  8846. pr_err("Unable to enable/prep CE iface clk\n");
  8847. qclk->clk_access_cnt = 0;
  8848. goto ce_clk_err;
  8849. }
  8850. }
  8851. if (qclk->ce_bus_clk != NULL) {
  8852. ret = clk_prepare_enable(qclk->ce_bus_clk);
  8853. if (ret) {
  8854. pr_err("Unable to enable/prep CE bus clk\n");
  8855. qclk->clk_access_cnt = 0;
  8856. goto ce_bus_clk_err;
  8857. }
  8858. }
  8859. }
  8860. if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
  8861. qseecom.bw_scale_down_timer.expires = jiffies +
  8862. msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
  8863. mod_timer(&(qseecom.bw_scale_down_timer),
  8864. qseecom.bw_scale_down_timer.expires);
  8865. qseecom.timer_running = true;
  8866. }
  8867. mutex_unlock(&clk_access_lock);
  8868. mutex_unlock(&qsee_bw_mutex);
  8869. goto exit;
  8870. ce_bus_clk_err:
  8871. if (qclk->ce_clk)
  8872. clk_disable_unprepare(qclk->ce_clk);
  8873. ce_clk_err:
  8874. if (qclk->ce_core_clk)
  8875. clk_disable_unprepare(qclk->ce_core_clk);
  8876. err:
  8877. mutex_unlock(&clk_access_lock);
  8878. mutex_unlock(&qsee_bw_mutex);
  8879. ret = -EIO;
  8880. exit:
  8881. atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
  8882. return ret;
  8883. }
  8884. static const struct of_device_id qseecom_match[] = {
  8885. {
  8886. .compatible = "qcom,qseecom",
  8887. },
  8888. {}
  8889. };
  8890. static struct platform_driver qseecom_plat_driver = {
  8891. .probe = qseecom_probe,
  8892. .remove = qseecom_remove,
  8893. .suspend = qseecom_suspend,
  8894. .resume = qseecom_resume,
  8895. .driver = {
  8896. .name = "qseecom",
  8897. .of_match_table = qseecom_match,
  8898. },
  8899. };
  8900. static int qseecom_init(void)
  8901. {
  8902. return platform_driver_register(&qseecom_plat_driver);
  8903. }
  8904. static void qseecom_exit(void)
  8905. {
  8906. platform_driver_unregister(&qseecom_plat_driver);
  8907. }
  8908. MODULE_LICENSE("GPL v2");
  8909. MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
  8910. MODULE_IMPORT_NS(DMA_BUF);
  8911. module_init(qseecom_init);
  8912. module_exit(qseecom_exit);