qseecom.c 265 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677967896799680968196829683968496859686968796889689969096919692969396949695969696979698969997009701970297039704970597069707970897099710971197129713971497159716971797189719972097219722972397249725972697279728972997309731973297339734973597369737973897399740974197429743974497459746974797489749975097519752975397549755975697579758975997609761976297639764976597669767976897699770977197729773977497759776977797789779978097819782978397849785978697879788978997909791979297939794979597969797979897999800980198029803980498059806980798089809981098119812981398149815981698179818981998209821982298239824982598269827982898299830983198329833983498359836983798389839984098419842
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QTI Secure Execution Environment Communicator (QSEECOM) driver
  4. *
  5. * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  6. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  7. */
  8. #define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
  9. #include <linux/kernel.h>
  10. #include <linux/slab.h>
  11. #include <linux/module.h>
  12. #include <linux/fs.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/debugfs.h>
  15. #include <linux/cdev.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/sched.h>
  18. #include <linux/list.h>
  19. #include <linux/mutex.h>
  20. #include <linux/io.h>
  21. #include <linux/dma-buf.h>
  22. #include <linux/ion.h>
  23. #include <linux/msm_ion.h>
  24. #include <linux/types.h>
  25. #include <linux/clk.h>
  26. #include <linux/qseecom.h>
  27. #include <linux/elf.h>
  28. #include <linux/firmware.h>
  29. #include <linux/freezer.h>
  30. #include <linux/scatterlist.h>
  31. #include <linux/regulator/consumer.h>
  32. #include <linux/dma-mapping.h>
  33. #include <soc/qcom/qseecom_scm.h>
  34. #include <soc/qcom/qseecomi.h>
  35. #include <asm/cacheflush.h>
  36. #include <linux/delay.h>
  37. #include <linux/signal.h>
  38. #include <linux/compat.h>
  39. #include <linux/kthread.h>
  40. #include <linux/dma-map-ops.h>
  41. #include <linux/cma.h>
  42. #include <linux/of_platform.h>
  43. #include <linux/interconnect.h>
  44. #include <linux/of_reserved_mem.h>
  45. #include <linux/qtee_shmbridge.h>
  46. #include <linux/mem-buf.h>
  47. #include "ice.h"
  48. #if IS_ENABLED(CONFIG_QSEECOM_PROXY)
  49. #include <linux/qseecom_kernel.h>
  50. #include "misc/qseecom_priv.h"
  51. #else
  52. #include "misc/qseecom_kernel.h"
  53. #endif
  54. #define QSEECOM_DEV "qseecom"
  55. #define QSEOS_VERSION_14 0x14
  56. #define QSEEE_VERSION_00 0x400000
  57. #define QSEE_VERSION_01 0x401000
  58. #define QSEE_VERSION_02 0x402000
  59. #define QSEE_VERSION_03 0x403000
  60. #define QSEE_VERSION_04 0x404000
  61. #define QSEE_VERSION_05 0x405000
  62. #define QSEE_VERSION_20 0x800000
  63. #define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */
  64. #define QSEE_CE_CLK_100MHZ 100000000
  65. #define CE_CLK_DIV 1000000
  66. #define QSEECOM_MAX_SG_ENTRY 4096
  67. #define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \
  68. (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
  69. #define QSEECOM_INVALID_KEY_ID 0xff
  70. /* Save partition image hash for authentication check */
  71. #define SCM_SAVE_PARTITION_HASH_ID 0x01
  72. /* Check if enterprise security is activate */
  73. #define SCM_IS_ACTIVATED_ID 0x02
  74. /* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
  75. #define SCM_MDTP_CIPHER_DIP 0x01
  76. /* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
  77. #define MAX_DIP 0x20000
  78. #define RPMB_SERVICE 0x2000
  79. #define SSD_SERVICE 0x3000
  80. #define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000
  81. #define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000
  82. #define TWO 2
  83. #define QSEECOM_UFS_ICE_CE_NUM 10
  84. #define QSEECOM_SDCC_ICE_CE_NUM 20
  85. #define QSEECOM_ICE_FDE_KEY_INDEX 0
  86. #define PHY_ADDR_4G (1ULL<<32)
  87. #define QSEECOM_STATE_NOT_READY 0
  88. #define QSEECOM_STATE_SUSPEND 1
  89. #define QSEECOM_STATE_READY 2
  90. #define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2
  91. /*
  92. * default ce info unit to 0 for
  93. * services which
  94. * support only single instance.
  95. * Most of services are in this category.
  96. */
  97. #define DEFAULT_CE_INFO_UNIT 0
  98. #define DEFAULT_NUM_CE_INFO_UNIT 1
  99. #define FDE_FLAG_POS 4
  100. #define ENABLE_KEY_WRAP_IN_KS (1 << FDE_FLAG_POS)
  101. enum qseecom_clk_definitions {
  102. CLK_DFAB = 0,
  103. CLK_SFPB,
  104. };
  105. enum qseecom_ice_key_size_type {
  106. QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
  107. (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
  108. QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
  109. (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
  110. QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
  111. (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
  112. };
  113. enum qseecom_client_handle_type {
  114. QSEECOM_CLIENT_APP = 1,
  115. QSEECOM_LISTENER_SERVICE,
  116. QSEECOM_SECURE_SERVICE,
  117. QSEECOM_GENERIC,
  118. QSEECOM_UNAVAILABLE_CLIENT_APP,
  119. };
  120. enum qseecom_ce_hw_instance {
  121. CLK_QSEE = 0,
  122. CLK_CE_DRV,
  123. CLK_INVALID,
  124. };
  125. enum qseecom_cache_ops {
  126. QSEECOM_CACHE_CLEAN,
  127. QSEECOM_CACHE_INVALIDATE,
  128. };
  129. enum qseecom_listener_unregister_kthread_state {
  130. LSNR_UNREG_KT_SLEEP = 0,
  131. LSNR_UNREG_KT_WAKEUP,
  132. };
  133. enum qseecom_unload_app_kthread_state {
  134. UNLOAD_APP_KT_SLEEP = 0,
  135. UNLOAD_APP_KT_WAKEUP,
  136. };
  137. static DEFINE_MUTEX(qsee_bw_mutex);
  138. static DEFINE_MUTEX(app_access_lock);
  139. static DEFINE_MUTEX(clk_access_lock);
  140. static DEFINE_MUTEX(listener_access_lock);
  141. static DEFINE_MUTEX(unload_app_pending_list_lock);
  142. struct sglist_info {
  143. uint32_t indexAndFlags;
  144. uint32_t sizeOrCount;
  145. };
  146. /*
  147. * The 31st bit indicates only one or multiple physical address inside
  148. * the request buffer. If it is set, the index locates a single physical addr
  149. * inside the request buffer, and `sizeOrCount` is the size of the memory being
  150. * shared at that physical address.
  151. * Otherwise, the index locates an array of {start, len} pairs (a
  152. * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
  153. * that array.
  154. *
  155. * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
  156. * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
  157. *
  158. * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
  159. */
  160. #define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
  161. ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
  162. #define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
  163. #define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
  164. #define MAKE_WHITELIST_VERSION(major, minor, patch) \
  165. (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
  166. #define MAKE_NULL(sgt, attach, dmabuf) do {\
  167. sgt = NULL;\
  168. attach = NULL;\
  169. dmabuf = NULL;\
  170. } while (0)
  171. struct qseecom_registered_listener_list {
  172. struct list_head list;
  173. struct qseecom_register_listener_req svc;
  174. void *user_virt_sb_base;
  175. struct dma_buf *dmabuf;
  176. struct dma_buf_attachment *attach;
  177. struct sg_table *sgt;
  178. u8 *sb_virt;
  179. phys_addr_t sb_phys;
  180. size_t sb_length;
  181. wait_queue_head_t rcv_req_wq;
  182. /* rcv_req_flag: 0: ready and empty; 1: received req */
  183. int rcv_req_flag;
  184. int send_resp_flag;
  185. bool listener_in_use;
  186. /* wq for thread blocked on this listener*/
  187. wait_queue_head_t listener_block_app_wq;
  188. struct sglist_info *sglistinfo_ptr;
  189. struct qtee_shm sglistinfo_shm;
  190. uint32_t sglist_cnt;
  191. int abort;
  192. bool unregister_pending;
  193. };
  194. struct qseecom_unregister_pending_list {
  195. struct list_head list;
  196. struct qseecom_dev_handle *data;
  197. };
  198. struct qseecom_registered_app_list {
  199. struct list_head list;
  200. u32 app_id;
  201. u32 ref_cnt;
  202. char app_name[MAX_APP_NAME_SIZE];
  203. u32 app_arch;
  204. bool app_blocked;
  205. u32 check_block;
  206. u32 blocked_on_listener_id;
  207. };
  208. struct qseecom_registered_kclient_list {
  209. struct list_head list;
  210. struct qseecom_handle *handle;
  211. };
  212. struct qseecom_ce_info_use {
  213. unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
  214. unsigned int unit_num;
  215. unsigned int num_ce_pipe_entries;
  216. struct qseecom_ce_pipe_entry *ce_pipe_entry;
  217. bool alloc;
  218. uint32_t type;
  219. };
  220. struct ce_hw_usage_info {
  221. uint32_t qsee_ce_hw_instance;
  222. uint32_t num_fde;
  223. struct qseecom_ce_info_use *fde;
  224. uint32_t num_pfe;
  225. struct qseecom_ce_info_use *pfe;
  226. };
  227. struct qseecom_clk {
  228. enum qseecom_ce_hw_instance instance;
  229. struct clk *ce_core_clk;
  230. struct clk *ce_clk;
  231. struct clk *ce_core_src_clk;
  232. struct clk *ce_bus_clk;
  233. uint32_t clk_access_cnt;
  234. };
  235. struct qseecom_control {
  236. struct list_head registered_listener_list_head;
  237. struct list_head registered_app_list_head;
  238. spinlock_t registered_app_list_lock;
  239. struct list_head registered_kclient_list_head;
  240. spinlock_t registered_kclient_list_lock;
  241. wait_queue_head_t send_resp_wq;
  242. int send_resp_flag;
  243. uint32_t qseos_version;
  244. uint32_t qsee_version;
  245. struct device *pdev; /* class_dev */
  246. struct device *dev; /* platform_dev->dev */
  247. struct class *driver_class;
  248. dev_t qseecom_device_no;
  249. bool whitelist_support;
  250. bool commonlib_loaded;
  251. bool commonlib64_loaded;
  252. struct ce_hw_usage_info ce_info;
  253. int qsee_bw_count;
  254. int qsee_sfpb_bw_count;
  255. uint32_t qsee_perf_client;
  256. struct icc_path *icc_path;
  257. uint32_t avg_bw;
  258. uint32_t peak_bw;
  259. struct qseecom_clk qsee;
  260. struct qseecom_clk ce_drv;
  261. bool support_bus_scaling;
  262. bool support_fde;
  263. bool support_pfe;
  264. bool fde_key_size;
  265. uint32_t cumulative_mode;
  266. enum qseecom_bandwidth_request_mode current_mode;
  267. struct timer_list bw_scale_down_timer;
  268. struct work_struct bw_inactive_req_ws;
  269. struct cdev cdev;
  270. bool timer_running;
  271. bool no_clock_support;
  272. unsigned int ce_opp_freq_hz;
  273. bool appsbl_qseecom_support;
  274. uint32_t qsee_reentrancy_support;
  275. bool enable_key_wrap_in_ks;
  276. uint32_t app_block_ref_cnt;
  277. wait_queue_head_t app_block_wq;
  278. atomic_t qseecom_state;
  279. int is_apps_region_protected;
  280. bool smcinvoke_support;
  281. uint64_t qseecom_bridge_handle;
  282. uint64_t ta_bridge_handle;
  283. uint64_t user_contig_bridge_handle;
  284. struct list_head unregister_lsnr_pending_list_head;
  285. wait_queue_head_t register_lsnr_pending_wq;
  286. struct task_struct *unregister_lsnr_kthread_task;
  287. wait_queue_head_t unregister_lsnr_kthread_wq;
  288. atomic_t unregister_lsnr_kthread_state;
  289. struct list_head unload_app_pending_list_head;
  290. struct task_struct *unload_app_kthread_task;
  291. wait_queue_head_t unload_app_kthread_wq;
  292. atomic_t unload_app_kthread_state;
  293. };
  294. struct qseecom_unload_app_pending_list {
  295. struct list_head list;
  296. struct qseecom_dev_handle *data;
  297. };
  298. struct qseecom_sec_buf_fd_info {
  299. bool is_sec_buf_fd;
  300. size_t size;
  301. void *vbase;
  302. phys_addr_t pbase;
  303. struct qtee_shm shm;
  304. };
  305. struct qseecom_param_memref {
  306. uint32_t buffer;
  307. uint32_t size;
  308. };
  309. struct qseecom_client_handle {
  310. u32 app_id;
  311. struct dma_buf *dmabuf;
  312. struct dma_buf_attachment *attach;
  313. struct sg_table *sgt;
  314. u8 *sb_virt;
  315. phys_addr_t sb_phys;
  316. size_t sb_length;
  317. unsigned long user_virt_sb_base;
  318. char app_name[MAX_APP_NAME_SIZE];
  319. u32 app_arch;
  320. struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
  321. bool from_smcinvoke;
  322. struct qtee_shm shm; /* kernel client's shm for req/rsp buf */
  323. bool unload_pending;
  324. bool from_loadapp;
  325. };
  326. struct qseecom_listener_handle {
  327. u32 id;
  328. bool unregister_pending;
  329. bool release_called;
  330. };
  331. static struct qseecom_control qseecom;
  332. struct qseecom_dev_handle {
  333. enum qseecom_client_handle_type type;
  334. union {
  335. struct qseecom_client_handle client;
  336. struct qseecom_listener_handle listener;
  337. };
  338. bool released;
  339. int abort;
  340. wait_queue_head_t abort_wq;
  341. atomic_t ioctl_count;
  342. bool perf_enabled;
  343. bool fast_load_enabled;
  344. enum qseecom_bandwidth_request_mode mode;
  345. struct sglist_info *sglistinfo_ptr;
  346. struct qtee_shm sglistinfo_shm;
  347. uint32_t sglist_cnt;
  348. bool use_legacy_cmd;
  349. };
  350. struct qseecom_key_id_usage_desc {
  351. uint8_t desc[QSEECOM_KEY_ID_SIZE];
  352. };
  353. struct qseecom_crypto_info {
  354. unsigned int unit_num;
  355. unsigned int ce;
  356. unsigned int pipe_pair;
  357. };
  358. static struct qseecom_key_id_usage_desc key_id_array[] = {
  359. {
  360. .desc = "Undefined Usage Index",
  361. },
  362. {
  363. .desc = "Full Disk Encryption",
  364. },
  365. {
  366. .desc = "Per File Encryption",
  367. },
  368. {
  369. .desc = "UFS ICE Full Disk Encryption",
  370. },
  371. {
  372. .desc = "SDCC ICE Full Disk Encryption",
  373. },
  374. };
  375. /* Function proto types */
  376. static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
  377. static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
  378. static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
  379. static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
  380. static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
  381. static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
  382. char *cmnlib_name);
  383. static int qseecom_enable_ice_setup(int usage);
  384. static int qseecom_disable_ice_setup(int usage);
  385. static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
  386. static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
  387. void __user *argp);
  388. static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
  389. void __user *argp);
  390. static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
  391. void __user *argp);
  392. static int __qseecom_unload_app(struct qseecom_dev_handle *data,
  393. uint32_t app_id);
  394. static int __maybe_unused get_qseecom_keymaster_status(char *str)
  395. {
  396. get_option(&str, &qseecom.is_apps_region_protected);
  397. return 1;
  398. }
  399. __setup("androidboot.keymaster=", get_qseecom_keymaster_status);
  400. static int __qseecom_alloc_coherent_buf(
  401. uint32_t size, u8 **vaddr, phys_addr_t *paddr);
  402. static void __qseecom_free_coherent_buf(uint32_t size,
  403. u8 *vaddr, phys_addr_t paddr);
  404. #define QSEECOM_SCM_EBUSY_WAIT_MS 30
  405. #define QSEECOM_SCM_EBUSY_MAX_RETRY 67
  406. #define QSEE_RESULT_FAIL_APP_BUSY 315
  407. static int __qseecom_scm_call2_locked(uint32_t smc_id, struct qseecom_scm_desc *desc)
  408. {
  409. int ret = 0;
  410. int retry_count = 0;
  411. do {
  412. ret = qcom_scm_qseecom_call(smc_id, desc, false);
  413. if ((ret == -EBUSY) || (desc && (desc->ret[0] == -QSEE_RESULT_FAIL_APP_BUSY))) {
  414. mutex_unlock(&app_access_lock);
  415. msleep(QSEECOM_SCM_EBUSY_WAIT_MS);
  416. mutex_lock(&app_access_lock);
  417. }
  418. if (retry_count == 33)
  419. pr_warn("secure world has been busy for 1 second!\n");
  420. } while (((ret == -EBUSY) || (desc && (desc->ret[0] == -QSEE_RESULT_FAIL_APP_BUSY))) &&
  421. (retry_count++ < QSEECOM_SCM_EBUSY_MAX_RETRY));
  422. return ret;
  423. }
  424. static char *__qseecom_alloc_tzbuf(uint32_t size,
  425. phys_addr_t *pa, struct qtee_shm *shm)
  426. {
  427. char *tzbuf = NULL;
  428. int ret = qtee_shmbridge_allocate_shm(size, shm);
  429. if (ret)
  430. return NULL;
  431. tzbuf = shm->vaddr;
  432. memset(tzbuf, 0, size);
  433. *pa = shm->paddr;
  434. return tzbuf;
  435. }
  436. static void __qseecom_free_tzbuf(struct qtee_shm *shm)
  437. {
  438. qtee_shmbridge_free_shm(shm);
  439. }
  440. static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
  441. const void *req_buf, void *resp_buf)
  442. {
  443. int ret = 0;
  444. uint32_t smc_id = 0;
  445. uint32_t qseos_cmd_id = 0;
  446. struct qseecom_scm_desc desc = {0};
  447. struct qseecom_command_scm_resp *scm_resp = NULL;
  448. struct qtee_shm shm = {0};
  449. phys_addr_t pa;
  450. if (!req_buf || !resp_buf) {
  451. pr_err("Invalid buffer pointer\n");
  452. return -EINVAL;
  453. }
  454. qseos_cmd_id = *(uint32_t *)req_buf;
  455. scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
  456. switch (svc_id) {
  457. case SCM_SVC_INFO: {
  458. if (tz_cmd_id == 3) {
  459. smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
  460. desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
  461. desc.args[0] = *(uint32_t *)req_buf;
  462. } else {
  463. pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
  464. svc_id, tz_cmd_id);
  465. return -EINVAL;
  466. }
  467. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  468. break;
  469. }
  470. case SCM_SVC_ES: {
  471. switch (tz_cmd_id) {
  472. case SCM_SAVE_PARTITION_HASH_ID: {
  473. u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
  474. struct qseecom_save_partition_hash_req *p_hash_req =
  475. (struct qseecom_save_partition_hash_req *)
  476. req_buf;
  477. char *tzbuf = __qseecom_alloc_tzbuf(
  478. tzbuflen, &pa, &shm);
  479. if (!tzbuf)
  480. return -ENOMEM;
  481. memset(tzbuf, 0, tzbuflen);
  482. memcpy(tzbuf, p_hash_req->digest,
  483. SHA256_DIGEST_LENGTH);
  484. qtee_shmbridge_flush_shm_buf(&shm);
  485. smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
  486. desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
  487. desc.args[0] = p_hash_req->partition_id;
  488. desc.args[1] = pa;
  489. desc.args[2] = SHA256_DIGEST_LENGTH;
  490. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  491. __qseecom_free_tzbuf(&shm);
  492. break;
  493. }
  494. default: {
  495. pr_err("tz_cmd_id %d is not supported\n", tz_cmd_id);
  496. ret = -EINVAL;
  497. break;
  498. }
  499. } /* end of switch (tz_cmd_id) */
  500. break;
  501. } /* end of case SCM_SVC_ES */
  502. case SCM_SVC_TZSCHEDULER: {
  503. switch (qseos_cmd_id) {
  504. case QSEOS_APP_START_COMMAND: {
  505. struct qseecom_load_app_ireq *req;
  506. struct qseecom_load_app_64bit_ireq *req_64bit;
  507. smc_id = TZ_OS_APP_START_ID;
  508. desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
  509. if (qseecom.qsee_version < QSEE_VERSION_40) {
  510. req = (struct qseecom_load_app_ireq *)req_buf;
  511. desc.args[0] = req->mdt_len;
  512. desc.args[1] = req->img_len;
  513. desc.args[2] = req->phy_addr;
  514. } else {
  515. req_64bit =
  516. (struct qseecom_load_app_64bit_ireq *)
  517. req_buf;
  518. desc.args[0] = req_64bit->mdt_len;
  519. desc.args[1] = req_64bit->img_len;
  520. desc.args[2] = req_64bit->phy_addr;
  521. }
  522. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  523. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  524. break;
  525. }
  526. case QSEOS_APP_SHUTDOWN_COMMAND: {
  527. struct qseecom_unload_app_ireq *req;
  528. req = (struct qseecom_unload_app_ireq *)req_buf;
  529. smc_id = TZ_OS_APP_SHUTDOWN_ID;
  530. desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
  531. desc.args[0] = req->app_id;
  532. ret = qcom_scm_qseecom_call(smc_id, &desc, true);
  533. break;
  534. }
  535. case QSEOS_APP_LOOKUP_COMMAND: {
  536. struct qseecom_check_app_ireq *req;
  537. u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
  538. char *tzbuf = __qseecom_alloc_tzbuf(
  539. tzbuflen, &pa, &shm);
  540. if (!tzbuf)
  541. return -ENOMEM;
  542. req = (struct qseecom_check_app_ireq *)req_buf;
  543. pr_debug("Lookup app_name = %s\n", req->app_name);
  544. strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
  545. qtee_shmbridge_flush_shm_buf(&shm);
  546. smc_id = TZ_OS_APP_LOOKUP_ID;
  547. desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
  548. desc.args[0] = pa;
  549. desc.args[1] = strlen(req->app_name);
  550. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  551. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  552. __qseecom_free_tzbuf(&shm);
  553. break;
  554. }
  555. case QSEOS_APP_REGION_NOTIFICATION: {
  556. struct qsee_apps_region_info_ireq *req;
  557. struct qsee_apps_region_info_64bit_ireq *req_64bit;
  558. smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
  559. desc.arginfo =
  560. TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
  561. if (qseecom.qsee_version < QSEE_VERSION_40) {
  562. req = (struct qsee_apps_region_info_ireq *)
  563. req_buf;
  564. desc.args[0] = req->addr;
  565. desc.args[1] = req->size;
  566. } else {
  567. req_64bit =
  568. (struct qsee_apps_region_info_64bit_ireq *)
  569. req_buf;
  570. desc.args[0] = req_64bit->addr;
  571. desc.args[1] = req_64bit->size;
  572. }
  573. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  574. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  575. break;
  576. }
  577. case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
  578. struct qseecom_load_lib_image_ireq *req;
  579. struct qseecom_load_lib_image_64bit_ireq *req_64bit;
  580. smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
  581. desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
  582. if (qseecom.qsee_version < QSEE_VERSION_40) {
  583. req = (struct qseecom_load_lib_image_ireq *)
  584. req_buf;
  585. desc.args[0] = req->mdt_len;
  586. desc.args[1] = req->img_len;
  587. desc.args[2] = req->phy_addr;
  588. } else {
  589. req_64bit =
  590. (struct qseecom_load_lib_image_64bit_ireq *)
  591. req_buf;
  592. desc.args[0] = req_64bit->mdt_len;
  593. desc.args[1] = req_64bit->img_len;
  594. desc.args[2] = req_64bit->phy_addr;
  595. }
  596. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  597. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  598. break;
  599. }
  600. case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
  601. smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
  602. desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
  603. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  604. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  605. break;
  606. }
  607. case QSEOS_REGISTER_LISTENER: {
  608. struct qseecom_register_listener_ireq *req;
  609. struct qseecom_register_listener_64bit_ireq *req_64bit;
  610. desc.arginfo =
  611. TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
  612. if (qseecom.qsee_version < QSEE_VERSION_40) {
  613. req = (struct qseecom_register_listener_ireq *)
  614. req_buf;
  615. desc.args[0] = req->listener_id;
  616. desc.args[1] = req->sb_ptr;
  617. desc.args[2] = req->sb_len;
  618. } else {
  619. req_64bit =
  620. (struct qseecom_register_listener_64bit_ireq *)
  621. req_buf;
  622. desc.args[0] = req_64bit->listener_id;
  623. desc.args[1] = req_64bit->sb_ptr;
  624. desc.args[2] = req_64bit->sb_len;
  625. }
  626. qseecom.smcinvoke_support = true;
  627. smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
  628. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  629. if (ret == -EIO) {
  630. /* smcinvoke is not supported */
  631. qseecom.smcinvoke_support = false;
  632. smc_id = TZ_OS_REGISTER_LISTENER_ID;
  633. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  634. }
  635. break;
  636. }
  637. case QSEOS_DEREGISTER_LISTENER: {
  638. struct qseecom_unregister_listener_ireq *req;
  639. req = (struct qseecom_unregister_listener_ireq *)
  640. req_buf;
  641. smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
  642. desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
  643. desc.args[0] = req->listener_id;
  644. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  645. break;
  646. }
  647. case QSEOS_LISTENER_DATA_RSP_COMMAND: {
  648. struct qseecom_client_listener_data_irsp *req;
  649. req = (struct qseecom_client_listener_data_irsp *)
  650. req_buf;
  651. smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
  652. desc.arginfo =
  653. TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
  654. desc.args[0] = req->listener_id;
  655. desc.args[1] = req->status;
  656. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  657. break;
  658. }
  659. case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
  660. struct qseecom_client_listener_data_irsp *req;
  661. struct qseecom_client_listener_data_64bit_irsp *req_64;
  662. smc_id =
  663. TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
  664. desc.arginfo =
  665. TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
  666. if (qseecom.qsee_version < QSEE_VERSION_40) {
  667. req =
  668. (struct qseecom_client_listener_data_irsp *)
  669. req_buf;
  670. desc.args[0] = req->listener_id;
  671. desc.args[1] = req->status;
  672. desc.args[2] = req->sglistinfo_ptr;
  673. desc.args[3] = req->sglistinfo_len;
  674. } else {
  675. req_64 =
  676. (struct qseecom_client_listener_data_64bit_irsp *)
  677. req_buf;
  678. desc.args[0] = req_64->listener_id;
  679. desc.args[1] = req_64->status;
  680. desc.args[2] = req_64->sglistinfo_ptr;
  681. desc.args[3] = req_64->sglistinfo_len;
  682. }
  683. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  684. break;
  685. }
  686. case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
  687. struct qseecom_load_app_ireq *req;
  688. struct qseecom_load_app_64bit_ireq *req_64bit;
  689. smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
  690. desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
  691. if (qseecom.qsee_version < QSEE_VERSION_40) {
  692. req = (struct qseecom_load_app_ireq *)req_buf;
  693. desc.args[0] = req->mdt_len;
  694. desc.args[1] = req->img_len;
  695. desc.args[2] = req->phy_addr;
  696. } else {
  697. req_64bit =
  698. (struct qseecom_load_app_64bit_ireq *)req_buf;
  699. desc.args[0] = req_64bit->mdt_len;
  700. desc.args[1] = req_64bit->img_len;
  701. desc.args[2] = req_64bit->phy_addr;
  702. }
  703. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  704. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  705. break;
  706. }
  707. case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
  708. smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
  709. desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
  710. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  711. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  712. break;
  713. }
  714. case QSEOS_CLIENT_SEND_DATA_COMMAND: {
  715. struct qseecom_client_send_data_ireq *req;
  716. struct qseecom_client_send_data_64bit_ireq *req_64bit;
  717. smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
  718. desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
  719. if (qseecom.qsee_version < QSEE_VERSION_40) {
  720. req = (struct qseecom_client_send_data_ireq *)
  721. req_buf;
  722. desc.args[0] = req->app_id;
  723. desc.args[1] = req->req_ptr;
  724. desc.args[2] = req->req_len;
  725. desc.args[3] = req->rsp_ptr;
  726. desc.args[4] = req->rsp_len;
  727. } else {
  728. req_64bit =
  729. (struct qseecom_client_send_data_64bit_ireq *)
  730. req_buf;
  731. desc.args[0] = req_64bit->app_id;
  732. desc.args[1] = req_64bit->req_ptr;
  733. desc.args[2] = req_64bit->req_len;
  734. desc.args[3] = req_64bit->rsp_ptr;
  735. desc.args[4] = req_64bit->rsp_len;
  736. }
  737. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  738. break;
  739. }
  740. case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
  741. struct qseecom_client_send_data_ireq *req;
  742. struct qseecom_client_send_data_64bit_ireq *req_64bit;
  743. smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
  744. desc.arginfo =
  745. TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
  746. if (qseecom.qsee_version < QSEE_VERSION_40) {
  747. req = (struct qseecom_client_send_data_ireq *)
  748. req_buf;
  749. desc.args[0] = req->app_id;
  750. desc.args[1] = req->req_ptr;
  751. desc.args[2] = req->req_len;
  752. desc.args[3] = req->rsp_ptr;
  753. desc.args[4] = req->rsp_len;
  754. desc.args[5] = req->sglistinfo_ptr;
  755. desc.args[6] = req->sglistinfo_len;
  756. } else {
  757. req_64bit =
  758. (struct qseecom_client_send_data_64bit_ireq *)
  759. req_buf;
  760. desc.args[0] = req_64bit->app_id;
  761. desc.args[1] = req_64bit->req_ptr;
  762. desc.args[2] = req_64bit->req_len;
  763. desc.args[3] = req_64bit->rsp_ptr;
  764. desc.args[4] = req_64bit->rsp_len;
  765. desc.args[5] = req_64bit->sglistinfo_ptr;
  766. desc.args[6] = req_64bit->sglistinfo_len;
  767. }
  768. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  769. break;
  770. }
  771. case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
  772. struct qseecom_client_send_service_ireq *req;
  773. req = (struct qseecom_client_send_service_ireq *)
  774. req_buf;
  775. smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
  776. desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
  777. desc.args[0] = req->key_type;
  778. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  779. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  780. break;
  781. }
  782. case QSEOS_RPMB_ERASE_COMMAND: {
  783. smc_id = TZ_OS_RPMB_ERASE_ID;
  784. desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
  785. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  786. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  787. break;
  788. }
  789. case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
  790. smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
  791. desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
  792. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  793. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  794. break;
  795. }
  796. case QSEOS_DIAG_FUSE_REQ_CMD:
  797. case QSEOS_DIAG_FUSE_REQ_RSP_CMD: {
  798. struct qseecom_client_send_fsm_diag_req *req;
  799. smc_id = TZ_SECBOOT_GET_FUSE_INFO;
  800. desc.arginfo = TZ_SECBOOT_GET_FUSE_INFO_PARAM_ID;
  801. req = (struct qseecom_client_send_fsm_diag_req *) req_buf;
  802. desc.args[0] = req->req_ptr;
  803. desc.args[1] = req->req_len;
  804. desc.args[2] = req->rsp_ptr;
  805. desc.args[3] = req->rsp_len;
  806. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  807. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  808. break;
  809. }
  810. case QSEOS_GENERATE_KEY: {
  811. u32 tzbuflen = PAGE_ALIGN(sizeof
  812. (struct qseecom_key_generate_ireq) -
  813. sizeof(uint32_t));
  814. char *tzbuf = __qseecom_alloc_tzbuf(
  815. tzbuflen, &pa, &shm);
  816. if (!tzbuf)
  817. return -ENOMEM;
  818. memset(tzbuf, 0, tzbuflen);
  819. memcpy(tzbuf, req_buf + sizeof(uint32_t),
  820. (sizeof(struct qseecom_key_generate_ireq) -
  821. sizeof(uint32_t)));
  822. qtee_shmbridge_flush_shm_buf(&shm);
  823. smc_id = TZ_OS_KS_GEN_KEY_ID;
  824. desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
  825. desc.args[0] = pa;
  826. desc.args[1] = tzbuflen;
  827. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  828. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  829. __qseecom_free_tzbuf(&shm);
  830. break;
  831. }
  832. case QSEOS_DELETE_KEY: {
  833. u32 tzbuflen = PAGE_ALIGN(sizeof
  834. (struct qseecom_key_delete_ireq) -
  835. sizeof(uint32_t));
  836. char *tzbuf = __qseecom_alloc_tzbuf(
  837. tzbuflen, &pa, &shm);
  838. if (!tzbuf)
  839. return -ENOMEM;
  840. memset(tzbuf, 0, tzbuflen);
  841. memcpy(tzbuf, req_buf + sizeof(uint32_t),
  842. (sizeof(struct qseecom_key_delete_ireq) -
  843. sizeof(uint32_t)));
  844. qtee_shmbridge_flush_shm_buf(&shm);
  845. smc_id = TZ_OS_KS_DEL_KEY_ID;
  846. desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
  847. desc.args[0] = pa;
  848. desc.args[1] = tzbuflen;
  849. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  850. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  851. __qseecom_free_tzbuf(&shm);
  852. break;
  853. }
  854. case QSEOS_SET_KEY: {
  855. u32 tzbuflen = PAGE_ALIGN(sizeof
  856. (struct qseecom_key_select_ireq) -
  857. sizeof(uint32_t));
  858. char *tzbuf = __qseecom_alloc_tzbuf(
  859. tzbuflen, &pa, &shm);
  860. if (!tzbuf)
  861. return -ENOMEM;
  862. memset(tzbuf, 0, tzbuflen);
  863. memcpy(tzbuf, req_buf + sizeof(uint32_t),
  864. (sizeof(struct qseecom_key_select_ireq) -
  865. sizeof(uint32_t)));
  866. qtee_shmbridge_flush_shm_buf(&shm);
  867. smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
  868. desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
  869. desc.args[0] = pa;
  870. desc.args[1] = tzbuflen;
  871. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  872. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  873. __qseecom_free_tzbuf(&shm);
  874. break;
  875. }
  876. case QSEOS_UPDATE_KEY_USERINFO: {
  877. u32 tzbuflen = PAGE_ALIGN(sizeof
  878. (struct qseecom_key_userinfo_update_ireq) -
  879. sizeof(uint32_t));
  880. char *tzbuf = __qseecom_alloc_tzbuf(
  881. tzbuflen, &pa, &shm);
  882. if (!tzbuf)
  883. return -ENOMEM;
  884. memset(tzbuf, 0, tzbuflen);
  885. memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
  886. (struct qseecom_key_userinfo_update_ireq) -
  887. sizeof(uint32_t)));
  888. qtee_shmbridge_flush_shm_buf(&shm);
  889. smc_id = TZ_OS_KS_UPDATE_KEY_ID;
  890. desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
  891. desc.args[0] = pa;
  892. desc.args[1] = tzbuflen;
  893. __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
  894. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  895. __qseecom_free_tzbuf(&shm);
  896. break;
  897. }
  898. case QSEOS_TEE_OPEN_SESSION: {
  899. struct qseecom_qteec_ireq *req;
  900. struct qseecom_qteec_64bit_ireq *req_64bit;
  901. smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
  902. desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
  903. if (qseecom.qsee_version < QSEE_VERSION_40) {
  904. req = (struct qseecom_qteec_ireq *)req_buf;
  905. desc.args[0] = req->app_id;
  906. desc.args[1] = req->req_ptr;
  907. desc.args[2] = req->req_len;
  908. desc.args[3] = req->resp_ptr;
  909. desc.args[4] = req->resp_len;
  910. } else {
  911. req_64bit = (struct qseecom_qteec_64bit_ireq *)
  912. req_buf;
  913. desc.args[0] = req_64bit->app_id;
  914. desc.args[1] = req_64bit->req_ptr;
  915. desc.args[2] = req_64bit->req_len;
  916. desc.args[3] = req_64bit->resp_ptr;
  917. desc.args[4] = req_64bit->resp_len;
  918. }
  919. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  920. break;
  921. }
  922. case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
  923. struct qseecom_qteec_ireq *req;
  924. struct qseecom_qteec_64bit_ireq *req_64bit;
  925. smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
  926. desc.arginfo =
  927. TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
  928. if (qseecom.qsee_version < QSEE_VERSION_40) {
  929. req = (struct qseecom_qteec_ireq *)req_buf;
  930. desc.args[0] = req->app_id;
  931. desc.args[1] = req->req_ptr;
  932. desc.args[2] = req->req_len;
  933. desc.args[3] = req->resp_ptr;
  934. desc.args[4] = req->resp_len;
  935. desc.args[5] = req->sglistinfo_ptr;
  936. desc.args[6] = req->sglistinfo_len;
  937. } else {
  938. req_64bit = (struct qseecom_qteec_64bit_ireq *)
  939. req_buf;
  940. desc.args[0] = req_64bit->app_id;
  941. desc.args[1] = req_64bit->req_ptr;
  942. desc.args[2] = req_64bit->req_len;
  943. desc.args[3] = req_64bit->resp_ptr;
  944. desc.args[4] = req_64bit->resp_len;
  945. desc.args[5] = req_64bit->sglistinfo_ptr;
  946. desc.args[6] = req_64bit->sglistinfo_len;
  947. }
  948. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  949. break;
  950. }
  951. case QSEOS_TEE_INVOKE_COMMAND: {
  952. struct qseecom_qteec_ireq *req;
  953. struct qseecom_qteec_64bit_ireq *req_64bit;
  954. smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
  955. desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
  956. if (qseecom.qsee_version < QSEE_VERSION_40) {
  957. req = (struct qseecom_qteec_ireq *)req_buf;
  958. desc.args[0] = req->app_id;
  959. desc.args[1] = req->req_ptr;
  960. desc.args[2] = req->req_len;
  961. desc.args[3] = req->resp_ptr;
  962. desc.args[4] = req->resp_len;
  963. } else {
  964. req_64bit = (struct qseecom_qteec_64bit_ireq *)
  965. req_buf;
  966. desc.args[0] = req_64bit->app_id;
  967. desc.args[1] = req_64bit->req_ptr;
  968. desc.args[2] = req_64bit->req_len;
  969. desc.args[3] = req_64bit->resp_ptr;
  970. desc.args[4] = req_64bit->resp_len;
  971. }
  972. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  973. break;
  974. }
  975. case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
  976. struct qseecom_qteec_ireq *req;
  977. struct qseecom_qteec_64bit_ireq *req_64bit;
  978. smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
  979. desc.arginfo =
  980. TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
  981. if (qseecom.qsee_version < QSEE_VERSION_40) {
  982. req = (struct qseecom_qteec_ireq *)req_buf;
  983. desc.args[0] = req->app_id;
  984. desc.args[1] = req->req_ptr;
  985. desc.args[2] = req->req_len;
  986. desc.args[3] = req->resp_ptr;
  987. desc.args[4] = req->resp_len;
  988. desc.args[5] = req->sglistinfo_ptr;
  989. desc.args[6] = req->sglistinfo_len;
  990. } else {
  991. req_64bit = (struct qseecom_qteec_64bit_ireq *)
  992. req_buf;
  993. desc.args[0] = req_64bit->app_id;
  994. desc.args[1] = req_64bit->req_ptr;
  995. desc.args[2] = req_64bit->req_len;
  996. desc.args[3] = req_64bit->resp_ptr;
  997. desc.args[4] = req_64bit->resp_len;
  998. desc.args[5] = req_64bit->sglistinfo_ptr;
  999. desc.args[6] = req_64bit->sglistinfo_len;
  1000. }
  1001. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  1002. break;
  1003. }
  1004. case QSEOS_TEE_CLOSE_SESSION: {
  1005. struct qseecom_qteec_ireq *req;
  1006. struct qseecom_qteec_64bit_ireq *req_64bit;
  1007. smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
  1008. desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
  1009. if (qseecom.qsee_version < QSEE_VERSION_40) {
  1010. req = (struct qseecom_qteec_ireq *)req_buf;
  1011. desc.args[0] = req->app_id;
  1012. desc.args[1] = req->req_ptr;
  1013. desc.args[2] = req->req_len;
  1014. desc.args[3] = req->resp_ptr;
  1015. desc.args[4] = req->resp_len;
  1016. } else {
  1017. req_64bit = (struct qseecom_qteec_64bit_ireq *)
  1018. req_buf;
  1019. desc.args[0] = req_64bit->app_id;
  1020. desc.args[1] = req_64bit->req_ptr;
  1021. desc.args[2] = req_64bit->req_len;
  1022. desc.args[3] = req_64bit->resp_ptr;
  1023. desc.args[4] = req_64bit->resp_len;
  1024. }
  1025. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  1026. break;
  1027. }
  1028. case QSEOS_TEE_REQUEST_CANCELLATION: {
  1029. struct qseecom_qteec_ireq *req;
  1030. struct qseecom_qteec_64bit_ireq *req_64bit;
  1031. smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
  1032. desc.arginfo =
  1033. TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
  1034. if (qseecom.qsee_version < QSEE_VERSION_40) {
  1035. req = (struct qseecom_qteec_ireq *)req_buf;
  1036. desc.args[0] = req->app_id;
  1037. desc.args[1] = req->req_ptr;
  1038. desc.args[2] = req->req_len;
  1039. desc.args[3] = req->resp_ptr;
  1040. desc.args[4] = req->resp_len;
  1041. } else {
  1042. req_64bit = (struct qseecom_qteec_64bit_ireq *)
  1043. req_buf;
  1044. desc.args[0] = req_64bit->app_id;
  1045. desc.args[1] = req_64bit->req_ptr;
  1046. desc.args[2] = req_64bit->req_len;
  1047. desc.args[3] = req_64bit->resp_ptr;
  1048. desc.args[4] = req_64bit->resp_len;
  1049. }
  1050. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  1051. break;
  1052. }
  1053. case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
  1054. struct qseecom_continue_blocked_request_ireq *req =
  1055. (struct qseecom_continue_blocked_request_ireq *)
  1056. req_buf;
  1057. if (qseecom.smcinvoke_support)
  1058. smc_id =
  1059. TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
  1060. else
  1061. smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
  1062. desc.arginfo =
  1063. TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
  1064. desc.args[0] = req->app_or_session_id;
  1065. ret = __qseecom_scm_call2_locked(smc_id, &desc);
  1066. break;
  1067. }
  1068. default: {
  1069. pr_err("qseos_cmd_id %d is not supported.\n",
  1070. qseos_cmd_id);
  1071. ret = -EINVAL;
  1072. break;
  1073. }
  1074. } /*end of switch (qsee_cmd_id) */
  1075. break;
  1076. } /*end of case SCM_SVC_TZSCHEDULER*/
  1077. default: {
  1078. pr_err("svc_id 0x%x is not supported.\n", svc_id);
  1079. ret = -EINVAL;
  1080. break;
  1081. }
  1082. } /*end of switch svc_id */
  1083. scm_resp->result = desc.ret[0];
  1084. scm_resp->resp_type = desc.ret[1];
  1085. scm_resp->data = desc.ret[2];
  1086. pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
  1087. svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
  1088. pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
  1089. scm_resp->result, scm_resp->resp_type, scm_resp->data);
  1090. return ret;
  1091. }
  1092. static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
  1093. size_t cmd_len, void *resp_buf, size_t resp_len)
  1094. {
  1095. return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
  1096. }
  1097. static struct qseecom_registered_listener_list *__qseecom_find_svc(
  1098. int32_t listener_id)
  1099. {
  1100. struct qseecom_registered_listener_list *entry = NULL;
  1101. list_for_each_entry(entry,
  1102. &qseecom.registered_listener_list_head, list) {
  1103. if (entry->svc.listener_id == listener_id)
  1104. break;
  1105. }
  1106. if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
  1107. pr_debug("Service id: %u is not found\n", listener_id);
  1108. return NULL;
  1109. }
  1110. return entry;
  1111. }
  1112. static int qseecom_dmabuf_cache_operations(struct dma_buf *dmabuf,
  1113. enum qseecom_cache_ops cache_op)
  1114. {
  1115. int ret = 0;
  1116. if (!dmabuf) {
  1117. pr_err("dmabuf is NULL\n");
  1118. ret = -EINVAL;
  1119. goto exit;
  1120. }
  1121. switch (cache_op) {
  1122. case QSEECOM_CACHE_CLEAN: /* Doing CLEAN and INVALIDATE */
  1123. dma_buf_begin_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  1124. dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  1125. break;
  1126. case QSEECOM_CACHE_INVALIDATE:
  1127. dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE);
  1128. dma_buf_end_cpu_access(dmabuf, DMA_FROM_DEVICE);
  1129. break;
  1130. default:
  1131. pr_err("cache (%d) operation not supported\n",
  1132. cache_op);
  1133. ret = -EINVAL;
  1134. goto exit;
  1135. }
  1136. exit:
  1137. return ret;
  1138. }
  1139. static int qseecom_destroy_bridge_callback(void *dtor_data)
  1140. {
  1141. int ret = 0;
  1142. uint64_t handle = (uint64_t)dtor_data;
  1143. pr_debug("to destroy shm bridge %lld\n", handle);
  1144. ret = qtee_shmbridge_deregister(handle);
  1145. if (ret) {
  1146. pr_err("failed to destroy shm bridge %lld\n", handle);
  1147. return ret;
  1148. }
  1149. return ret;
  1150. }
  1151. static int qseecom_create_bridge_for_secbuf(int ion_fd, struct dma_buf *dmabuf,
  1152. struct sg_table *sgt)
  1153. {
  1154. int ret = 0;
  1155. phys_addr_t phys;
  1156. size_t size = 0;
  1157. uint64_t handle = 0;
  1158. int tz_perm = PERM_READ|PERM_WRITE;
  1159. uint32_t *vmid_list;
  1160. uint32_t *perms_list;
  1161. uint32_t nelems = 0;
  1162. struct scatterlist *sg = sgt->sgl;
  1163. if (!qtee_shmbridge_is_enabled())
  1164. return 0;
  1165. phys = sg_phys(sg);
  1166. size = sg->length;
  1167. ret = qtee_shmbridge_query(phys);
  1168. if (ret) {
  1169. pr_debug("bridge exists\n");
  1170. return 0;
  1171. }
  1172. if (mem_buf_dma_buf_exclusive_owner(dmabuf) || (sgt->nents != 1)) {
  1173. pr_debug("just create bridge for contiguous secure buf\n");
  1174. return 0;
  1175. }
  1176. ret = mem_buf_dma_buf_copy_vmperm(dmabuf, (int **)&vmid_list,
  1177. (int **)&perms_list, (int *)&nelems);
  1178. if (ret) {
  1179. pr_err("mem_buf_dma_buf_copy_vmperm failure, err=%d\n", ret);
  1180. return ret;
  1181. }
  1182. ret = qtee_shmbridge_register(phys, size, vmid_list, perms_list, nelems,
  1183. tz_perm, &handle);
  1184. if (ret && ret != -EEXIST) {
  1185. pr_err("creation of shm bridge failed with ret: %d\n",
  1186. ret);
  1187. goto exit;
  1188. }
  1189. pr_debug("created shm bridge %lld\n", handle);
  1190. mem_buf_dma_buf_set_destructor(dmabuf, qseecom_destroy_bridge_callback,
  1191. (void *)handle);
  1192. exit:
  1193. kfree(perms_list);
  1194. kfree(vmid_list);
  1195. return ret;
  1196. }
  1197. static int qseecom_dmabuf_map(int ion_fd, struct sg_table **sgt,
  1198. struct dma_buf_attachment **attach,
  1199. struct dma_buf **dmabuf)
  1200. {
  1201. struct dma_buf *new_dma_buf = NULL;
  1202. struct dma_buf_attachment *new_attach = NULL;
  1203. struct sg_table *new_sgt = NULL;
  1204. int ret = 0;
  1205. new_dma_buf = dma_buf_get(ion_fd);
  1206. if (IS_ERR_OR_NULL(new_dma_buf)) {
  1207. pr_err("dma_buf_get() for ion_fd %d failed\n", ion_fd);
  1208. ret = -ENOMEM;
  1209. goto err;
  1210. }
  1211. new_attach = dma_buf_attach(new_dma_buf, qseecom.dev);
  1212. if (IS_ERR_OR_NULL(new_attach)) {
  1213. pr_err("dma_buf_attach() for ion_fd %d failed\n", ion_fd);
  1214. ret = -ENOMEM;
  1215. goto err_put;
  1216. }
  1217. new_sgt = dma_buf_map_attachment(new_attach, DMA_BIDIRECTIONAL);
  1218. if (IS_ERR_OR_NULL(new_sgt)) {
  1219. ret = PTR_ERR(new_sgt);
  1220. pr_err("dma_buf_map_attachment for ion_fd %d failed ret = %d\n",
  1221. ion_fd, ret);
  1222. goto err_detach;
  1223. }
  1224. ret = qseecom_create_bridge_for_secbuf(ion_fd, new_dma_buf, new_sgt);
  1225. if (ret) {
  1226. pr_err("failed to create bridge for fd %d\n", ion_fd);
  1227. goto err_unmap_attachment;
  1228. }
  1229. *sgt = new_sgt;
  1230. *attach = new_attach;
  1231. *dmabuf = new_dma_buf;
  1232. return ret;
  1233. err_unmap_attachment:
  1234. dma_buf_unmap_attachment(new_attach, new_sgt, DMA_BIDIRECTIONAL);
  1235. err_detach:
  1236. dma_buf_detach(new_dma_buf, new_attach);
  1237. err_put:
  1238. dma_buf_put(new_dma_buf);
  1239. err:
  1240. return ret;
  1241. }
  1242. static void qseecom_dmabuf_unmap(struct sg_table *sgt,
  1243. struct dma_buf_attachment *attach,
  1244. struct dma_buf *dmabuf)
  1245. {
  1246. dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
  1247. dma_buf_detach(dmabuf, attach);
  1248. dma_buf_put(dmabuf);
  1249. }
  1250. /* convert ion_fd to phys_adds and virt_addr*/
  1251. static int qseecom_vaddr_map(int ion_fd,
  1252. phys_addr_t *paddr, void **vaddr,
  1253. struct sg_table **sgt,
  1254. struct dma_buf_attachment **attach,
  1255. size_t *sb_length, struct dma_buf **dmabuf)
  1256. {
  1257. struct dma_buf *new_dma_buf = NULL;
  1258. struct dma_buf_attachment *new_attach = NULL;
  1259. struct dma_buf_map new_dma_buf_map = {0};
  1260. struct sg_table *new_sgt = NULL;
  1261. void *new_va = NULL;
  1262. int ret = 0;
  1263. ret = qseecom_dmabuf_map(ion_fd, &new_sgt, &new_attach, &new_dma_buf);
  1264. if (ret) {
  1265. pr_err("qseecom_dmabuf_map for ion_fd %d failed ret = %d\n",
  1266. ion_fd, ret);
  1267. goto err;
  1268. }
  1269. ret = 0;
  1270. *paddr = sg_dma_address(new_sgt->sgl);
  1271. *sb_length = new_sgt->sgl->length;
  1272. dma_buf_begin_cpu_access(new_dma_buf, DMA_BIDIRECTIONAL);
  1273. ret = dma_buf_vmap(new_dma_buf, &new_dma_buf_map);
  1274. new_va = ret ? NULL : new_dma_buf_map.vaddr;
  1275. if (!new_va) {
  1276. pr_err("dma_buf_vmap failed\n");
  1277. ret = -ENOMEM;
  1278. goto err_unmap;
  1279. }
  1280. *dmabuf = new_dma_buf;
  1281. *attach = new_attach;
  1282. *sgt = new_sgt;
  1283. *vaddr = new_va;
  1284. return ret;
  1285. err_unmap:
  1286. dma_buf_end_cpu_access(new_dma_buf, DMA_BIDIRECTIONAL);
  1287. qseecom_dmabuf_unmap(new_sgt, new_attach, new_dma_buf);
  1288. MAKE_NULL(*sgt, *attach, *dmabuf);
  1289. err:
  1290. return ret;
  1291. }
  1292. static void qseecom_vaddr_unmap(void *vaddr, struct sg_table *sgt,
  1293. struct dma_buf_attachment *attach,
  1294. struct dma_buf *dmabuf)
  1295. {
  1296. struct dma_buf_map dmabufmap = DMA_BUF_MAP_INIT_VADDR(vaddr);
  1297. if (!dmabuf || !vaddr || !sgt || !attach)
  1298. return;
  1299. pr_err("SMITA trying to unmap vaddr");
  1300. dma_buf_vunmap(dmabuf, &dmabufmap);
  1301. dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
  1302. qseecom_dmabuf_unmap(sgt, attach, dmabuf);
  1303. }
  1304. static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
  1305. struct qseecom_dev_handle *handle,
  1306. struct qseecom_register_listener_req *listener)
  1307. {
  1308. int ret = 0;
  1309. struct qseecom_register_listener_ireq req;
  1310. struct qseecom_register_listener_64bit_ireq req_64bit;
  1311. struct qseecom_command_scm_resp resp;
  1312. void *cmd_buf = NULL;
  1313. size_t cmd_len;
  1314. ret = qseecom_vaddr_map(listener->ifd_data_fd,
  1315. &svc->sb_phys, (void **)&svc->sb_virt,
  1316. &svc->sgt, &svc->attach,
  1317. &svc->sb_length, &svc->dmabuf);
  1318. if (ret) {
  1319. pr_err("failed to convert ion_fd %d for lsnr %d with err: %d\n",
  1320. listener->ifd_data_fd, svc->svc.listener_id, ret);
  1321. return -EINVAL;
  1322. }
  1323. if (qseecom.qsee_version < QSEE_VERSION_40) {
  1324. req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
  1325. req.listener_id = svc->svc.listener_id;
  1326. req.sb_len = svc->sb_length;
  1327. req.sb_ptr = (uint32_t)svc->sb_phys;
  1328. cmd_buf = (void *)&req;
  1329. cmd_len = sizeof(struct qseecom_register_listener_ireq);
  1330. } else {
  1331. req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
  1332. req_64bit.listener_id = svc->svc.listener_id;
  1333. req_64bit.sb_len = svc->sb_length;
  1334. req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
  1335. cmd_buf = (void *)&req_64bit;
  1336. cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
  1337. }
  1338. resp.result = QSEOS_RESULT_INCOMPLETE;
  1339. mutex_unlock(&listener_access_lock);
  1340. mutex_lock(&app_access_lock);
  1341. __qseecom_reentrancy_check_if_no_app_blocked(
  1342. TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID);
  1343. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
  1344. &resp, sizeof(resp));
  1345. mutex_unlock(&app_access_lock);
  1346. mutex_lock(&listener_access_lock);
  1347. if (ret) {
  1348. pr_err("qseecom_scm_call failed with err: %d\n", ret);
  1349. ret = -EINVAL;
  1350. goto err;
  1351. }
  1352. if (resp.result != QSEOS_RESULT_SUCCESS) {
  1353. pr_err("Error SB registration req: resp.result = %d\n",
  1354. resp.result);
  1355. ret = -EPERM;
  1356. goto err;
  1357. }
  1358. return 0;
  1359. err:
  1360. if (svc->dmabuf) {
  1361. qseecom_vaddr_unmap(svc->sb_virt, svc->sgt, svc->attach,
  1362. svc->dmabuf);
  1363. MAKE_NULL(svc->sgt, svc->attach, svc->dmabuf);
  1364. }
  1365. return ret;
  1366. }
  1367. static int qseecom_register_listener(struct qseecom_dev_handle *data,
  1368. void __user *argp)
  1369. {
  1370. int ret = 0;
  1371. struct qseecom_register_listener_req rcvd_lstnr;
  1372. struct qseecom_registered_listener_list *new_entry;
  1373. struct qseecom_registered_listener_list *ptr_svc;
  1374. ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
  1375. if (ret) {
  1376. pr_err("copy_from_user failed\n");
  1377. return ret;
  1378. }
  1379. if (!access_ok((void __user *)rcvd_lstnr.virt_sb_base,
  1380. rcvd_lstnr.sb_size))
  1381. return -EFAULT;
  1382. ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id);
  1383. if (ptr_svc) {
  1384. if (!ptr_svc->unregister_pending) {
  1385. pr_err("Service %d is not unique\n",
  1386. rcvd_lstnr.listener_id);
  1387. data->released = true;
  1388. return -EBUSY;
  1389. } else {
  1390. /*wait until listener is unregistered*/
  1391. pr_debug("register %d has to wait\n",
  1392. rcvd_lstnr.listener_id);
  1393. mutex_unlock(&listener_access_lock);
  1394. ret = wait_event_interruptible(
  1395. qseecom.register_lsnr_pending_wq,
  1396. list_empty(
  1397. &qseecom.unregister_lsnr_pending_list_head));
  1398. if (ret) {
  1399. pr_err("interrupted register_pending_wq %d\n",
  1400. rcvd_lstnr.listener_id);
  1401. mutex_lock(&listener_access_lock);
  1402. return -ERESTARTSYS;
  1403. }
  1404. mutex_lock(&listener_access_lock);
  1405. }
  1406. }
  1407. new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
  1408. if (!new_entry)
  1409. return -ENOMEM;
  1410. memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
  1411. new_entry->rcv_req_flag = 0;
  1412. new_entry->sglistinfo_ptr =
  1413. (struct sglist_info *)__qseecom_alloc_tzbuf(
  1414. sizeof(struct sglist_info) * MAX_ION_FD,
  1415. &new_entry->sglistinfo_shm.paddr,
  1416. &new_entry->sglistinfo_shm);
  1417. if (!new_entry->sglistinfo_ptr) {
  1418. kfree(new_entry);
  1419. return -ENOMEM;
  1420. }
  1421. new_entry->svc.listener_id = rcvd_lstnr.listener_id;
  1422. new_entry->sb_length = rcvd_lstnr.sb_size;
  1423. new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
  1424. if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
  1425. pr_err("qseecom_set_sb_memory failed for listener %d, size %d\n",
  1426. rcvd_lstnr.listener_id, rcvd_lstnr.sb_size);
  1427. __qseecom_free_tzbuf(&new_entry->sglistinfo_shm);
  1428. kfree_sensitive(new_entry);
  1429. return -ENOMEM;
  1430. }
  1431. init_waitqueue_head(&new_entry->rcv_req_wq);
  1432. init_waitqueue_head(&new_entry->listener_block_app_wq);
  1433. new_entry->send_resp_flag = 0;
  1434. new_entry->listener_in_use = false;
  1435. list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
  1436. data->listener.id = rcvd_lstnr.listener_id;
  1437. pr_debug("Service %d is registered\n", rcvd_lstnr.listener_id);
  1438. return ret;
  1439. }
  1440. static int __qseecom_unregister_listener(struct qseecom_dev_handle *data,
  1441. struct qseecom_registered_listener_list *ptr_svc)
  1442. {
  1443. int ret = 0;
  1444. struct qseecom_register_listener_ireq req;
  1445. struct qseecom_command_scm_resp resp;
  1446. req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
  1447. req.listener_id = data->listener.id;
  1448. resp.result = QSEOS_RESULT_INCOMPLETE;
  1449. mutex_unlock(&listener_access_lock);
  1450. mutex_lock(&app_access_lock);
  1451. __qseecom_reentrancy_check_if_no_app_blocked(
  1452. TZ_OS_DEREGISTER_LISTENER_ID);
  1453. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
  1454. sizeof(req), &resp, sizeof(resp));
  1455. mutex_unlock(&app_access_lock);
  1456. mutex_lock(&listener_access_lock);
  1457. if (ret) {
  1458. pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
  1459. ret, data->listener.id);
  1460. return ret;
  1461. }
  1462. if (resp.result != QSEOS_RESULT_SUCCESS) {
  1463. pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
  1464. resp.result, data->listener.id);
  1465. ret = -EPERM;
  1466. goto exit;
  1467. }
  1468. while (atomic_read(&data->ioctl_count) > 1) {
  1469. if (wait_event_interruptible(data->abort_wq,
  1470. atomic_read(&data->ioctl_count) <= 1)) {
  1471. pr_err("Interrupted from abort\n");
  1472. ret = -ERESTARTSYS;
  1473. }
  1474. }
  1475. exit:
  1476. if (ptr_svc->dmabuf) {
  1477. qseecom_vaddr_unmap(ptr_svc->sb_virt,
  1478. ptr_svc->sgt, ptr_svc->attach, ptr_svc->dmabuf);
  1479. MAKE_NULL(ptr_svc->sgt, ptr_svc->attach, ptr_svc->dmabuf);
  1480. }
  1481. __qseecom_free_tzbuf(&ptr_svc->sglistinfo_shm);
  1482. list_del(&ptr_svc->list);
  1483. kfree_sensitive(ptr_svc);
  1484. data->released = true;
  1485. pr_debug("Service %d is unregistered\n", data->listener.id);
  1486. return ret;
  1487. }
  1488. static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
  1489. {
  1490. struct qseecom_registered_listener_list *ptr_svc = NULL;
  1491. struct qseecom_unregister_pending_list *entry = NULL;
  1492. if (data->released) {
  1493. pr_err("Don't unregister lsnr %d\n", data->listener.id);
  1494. return -EINVAL;
  1495. }
  1496. ptr_svc = __qseecom_find_svc(data->listener.id);
  1497. if (!ptr_svc) {
  1498. pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
  1499. return -ENODATA;
  1500. }
  1501. /* stop CA thread waiting for listener response */
  1502. ptr_svc->abort = 1;
  1503. wake_up_interruptible_all(&qseecom.send_resp_wq);
  1504. /* stop listener thread waiting for listener request */
  1505. data->abort = 1;
  1506. wake_up_all(&ptr_svc->rcv_req_wq);
  1507. /* return directly if pending*/
  1508. if (ptr_svc->unregister_pending)
  1509. return 0;
  1510. /*add unregistration into pending list*/
  1511. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  1512. if (!entry)
  1513. return -ENOMEM;
  1514. entry->data = data;
  1515. list_add_tail(&entry->list,
  1516. &qseecom.unregister_lsnr_pending_list_head);
  1517. ptr_svc->unregister_pending = true;
  1518. pr_debug("unregister %d pending\n", data->listener.id);
  1519. return 0;
  1520. }
  1521. static void __qseecom_processing_pending_lsnr_unregister(void)
  1522. {
  1523. struct qseecom_unregister_pending_list *entry = NULL;
  1524. struct qseecom_registered_listener_list *ptr_svc = NULL;
  1525. struct list_head *pos;
  1526. int ret = 0;
  1527. mutex_lock(&listener_access_lock);
  1528. while (!list_empty(&qseecom.unregister_lsnr_pending_list_head)) {
  1529. pos = qseecom.unregister_lsnr_pending_list_head.next;
  1530. entry = list_entry(pos,
  1531. struct qseecom_unregister_pending_list, list);
  1532. if (entry && entry->data) {
  1533. pr_debug("process pending unregister %d\n",
  1534. entry->data->listener.id);
  1535. /* don't process the entry if qseecom_release is not called*/
  1536. if (!entry->data->listener.release_called) {
  1537. list_del(pos);
  1538. list_add_tail(&entry->list,
  1539. &qseecom.unregister_lsnr_pending_list_head);
  1540. break;
  1541. }
  1542. ptr_svc = __qseecom_find_svc(
  1543. entry->data->listener.id);
  1544. if (ptr_svc) {
  1545. ret = __qseecom_unregister_listener(
  1546. entry->data, ptr_svc);
  1547. if (ret) {
  1548. pr_debug("unregister %d pending again\n",
  1549. entry->data->listener.id);
  1550. mutex_unlock(&listener_access_lock);
  1551. return;
  1552. }
  1553. } else
  1554. pr_err("invalid listener %d\n",
  1555. entry->data->listener.id);
  1556. __qseecom_free_tzbuf(&entry->data->sglistinfo_shm);
  1557. kfree_sensitive(entry->data);
  1558. }
  1559. list_del(pos);
  1560. kfree_sensitive(entry);
  1561. }
  1562. mutex_unlock(&listener_access_lock);
  1563. wake_up_interruptible(&qseecom.register_lsnr_pending_wq);
  1564. }
  1565. static void __wakeup_unregister_listener_kthread(void)
  1566. {
  1567. atomic_set(&qseecom.unregister_lsnr_kthread_state,
  1568. LSNR_UNREG_KT_WAKEUP);
  1569. wake_up_interruptible(&qseecom.unregister_lsnr_kthread_wq);
  1570. }
  1571. static int __qseecom_unregister_listener_kthread_func(void *data)
  1572. {
  1573. while (!kthread_should_stop()) {
  1574. wait_event_interruptible(
  1575. qseecom.unregister_lsnr_kthread_wq,
  1576. atomic_read(&qseecom.unregister_lsnr_kthread_state)
  1577. == LSNR_UNREG_KT_WAKEUP);
  1578. pr_debug("kthread to unregister listener is called %d\n",
  1579. atomic_read(&qseecom.unregister_lsnr_kthread_state));
  1580. __qseecom_processing_pending_lsnr_unregister();
  1581. atomic_set(&qseecom.unregister_lsnr_kthread_state,
  1582. LSNR_UNREG_KT_SLEEP);
  1583. }
  1584. pr_warn("kthread to unregister listener stopped\n");
  1585. return 0;
  1586. }
  1587. static int qseecom_bus_scale_update_request(
  1588. int client, int mode)
  1589. {
  1590. pr_debug("client %d, mode %d\n", client, mode);
  1591. /*TODO: get ab/ib from device tree for different mode*/
  1592. if (!mode)
  1593. return icc_set_bw(qseecom.icc_path, 0, 0);
  1594. else
  1595. return icc_set_bw(qseecom.icc_path,
  1596. qseecom.avg_bw, qseecom.peak_bw);
  1597. }
  1598. static int __qseecom_set_msm_bus_request(uint32_t mode)
  1599. {
  1600. int ret = 0;
  1601. struct qseecom_clk *qclk;
  1602. qclk = &qseecom.qsee;
  1603. if (qclk->ce_core_src_clk != NULL) {
  1604. if (mode == INACTIVE) {
  1605. __qseecom_disable_clk(CLK_QSEE);
  1606. } else {
  1607. ret = __qseecom_enable_clk(CLK_QSEE);
  1608. if (ret)
  1609. pr_err("CLK enabling failed (%d) MODE (%d)\n",
  1610. ret, mode);
  1611. }
  1612. }
  1613. if ((!ret) && (qseecom.current_mode != mode)) {
  1614. ret = qseecom_bus_scale_update_request(
  1615. qseecom.qsee_perf_client, mode);
  1616. if (ret) {
  1617. pr_err("Bandwidth req failed(%d) MODE (%d)\n",
  1618. ret, mode);
  1619. if (qclk->ce_core_src_clk != NULL) {
  1620. if (mode == INACTIVE) {
  1621. ret = __qseecom_enable_clk(CLK_QSEE);
  1622. if (ret)
  1623. pr_err("CLK enable failed\n");
  1624. } else
  1625. __qseecom_disable_clk(CLK_QSEE);
  1626. }
  1627. }
  1628. qseecom.current_mode = mode;
  1629. }
  1630. return ret;
  1631. }
  1632. static void qseecom_bw_inactive_req_work(struct work_struct *work)
  1633. {
  1634. mutex_lock(&app_access_lock);
  1635. mutex_lock(&qsee_bw_mutex);
  1636. if (qseecom.timer_running)
  1637. __qseecom_set_msm_bus_request(INACTIVE);
  1638. pr_debug("current_mode = %d, cumulative_mode = %d\n",
  1639. qseecom.current_mode, qseecom.cumulative_mode);
  1640. qseecom.timer_running = false;
  1641. mutex_unlock(&qsee_bw_mutex);
  1642. mutex_unlock(&app_access_lock);
  1643. }
  1644. static void qseecom_scale_bus_bandwidth_timer_callback(struct timer_list *data)
  1645. {
  1646. schedule_work(&qseecom.bw_inactive_req_ws);
  1647. }
  1648. static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
  1649. {
  1650. struct qseecom_clk *qclk;
  1651. int ret = 0;
  1652. mutex_lock(&clk_access_lock);
  1653. if (ce == CLK_QSEE)
  1654. qclk = &qseecom.qsee;
  1655. else
  1656. qclk = &qseecom.ce_drv;
  1657. if (qclk->clk_access_cnt > 0) {
  1658. qclk->clk_access_cnt--;
  1659. } else {
  1660. pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
  1661. ret = -EINVAL;
  1662. }
  1663. mutex_unlock(&clk_access_lock);
  1664. return ret;
  1665. }
  1666. static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
  1667. {
  1668. int32_t ret = 0;
  1669. int32_t request_mode = INACTIVE;
  1670. mutex_lock(&qsee_bw_mutex);
  1671. if (mode == 0) {
  1672. if (qseecom.cumulative_mode > MEDIUM)
  1673. request_mode = HIGH;
  1674. else
  1675. request_mode = qseecom.cumulative_mode;
  1676. } else {
  1677. request_mode = mode;
  1678. }
  1679. ret = __qseecom_set_msm_bus_request(request_mode);
  1680. if (ret) {
  1681. pr_err("set msm bus request failed (%d),request_mode (%d)\n",
  1682. ret, request_mode);
  1683. goto err_scale_timer;
  1684. }
  1685. if (qseecom.timer_running) {
  1686. ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
  1687. if (ret) {
  1688. pr_err("Failed to decrease clk ref count.\n");
  1689. goto err_scale_timer;
  1690. }
  1691. del_timer_sync(&(qseecom.bw_scale_down_timer));
  1692. qseecom.timer_running = false;
  1693. }
  1694. err_scale_timer:
  1695. mutex_unlock(&qsee_bw_mutex);
  1696. return ret;
  1697. }
  1698. static int qseecom_unregister_bus_bandwidth_needs(
  1699. struct qseecom_dev_handle *data)
  1700. {
  1701. qseecom.cumulative_mode -= data->mode;
  1702. data->mode = INACTIVE;
  1703. return 0;
  1704. }
  1705. static int __qseecom_register_bus_bandwidth_needs(
  1706. struct qseecom_dev_handle *data, uint32_t request_mode)
  1707. {
  1708. if (data->mode == INACTIVE) {
  1709. qseecom.cumulative_mode += request_mode;
  1710. data->mode = request_mode;
  1711. } else {
  1712. if (data->mode != request_mode) {
  1713. qseecom.cumulative_mode -= data->mode;
  1714. qseecom.cumulative_mode += request_mode;
  1715. data->mode = request_mode;
  1716. }
  1717. }
  1718. return 0;
  1719. }
  1720. static int qseecom_perf_enable(struct qseecom_dev_handle *data)
  1721. {
  1722. int ret = 0;
  1723. ret = qsee_vote_for_clock(data, CLK_DFAB);
  1724. if (ret) {
  1725. pr_err("Failed to vote for DFAB clock with err %d\n", ret);
  1726. goto perf_enable_exit;
  1727. }
  1728. ret = qsee_vote_for_clock(data, CLK_SFPB);
  1729. if (ret) {
  1730. qsee_disable_clock_vote(data, CLK_DFAB);
  1731. pr_err("Failed to vote for SFPB clock with err %d\n", ret);
  1732. goto perf_enable_exit;
  1733. }
  1734. perf_enable_exit:
  1735. return ret;
  1736. }
  1737. static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
  1738. void __user *argp)
  1739. {
  1740. int32_t ret = 0;
  1741. int32_t req_mode;
  1742. if (qseecom.no_clock_support)
  1743. return 0;
  1744. ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
  1745. if (ret) {
  1746. pr_err("copy_from_user failed\n");
  1747. return ret;
  1748. }
  1749. if (req_mode > HIGH) {
  1750. pr_err("Invalid bandwidth mode (%d)\n", req_mode);
  1751. return -EINVAL;
  1752. }
  1753. /*
  1754. * Register bus bandwidth needs if bus scaling feature is enabled;
  1755. * otherwise, qseecom enable/disable clocks for the client directly.
  1756. */
  1757. if (qseecom.support_bus_scaling) {
  1758. mutex_lock(&qsee_bw_mutex);
  1759. ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
  1760. mutex_unlock(&qsee_bw_mutex);
  1761. } else {
  1762. pr_debug("Bus scaling feature is NOT enabled\n");
  1763. pr_debug("request bandwidth mode %d for the client\n",
  1764. req_mode);
  1765. if (req_mode != INACTIVE) {
  1766. ret = qseecom_perf_enable(data);
  1767. if (ret)
  1768. pr_err("Failed to vote for clock with err %d\n",
  1769. ret);
  1770. } else {
  1771. qsee_disable_clock_vote(data, CLK_DFAB);
  1772. qsee_disable_clock_vote(data, CLK_SFPB);
  1773. }
  1774. }
  1775. return ret;
  1776. }
  1777. static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
  1778. {
  1779. if (qseecom.no_clock_support)
  1780. return;
  1781. mutex_lock(&qsee_bw_mutex);
  1782. qseecom.bw_scale_down_timer.expires = jiffies +
  1783. msecs_to_jiffies(duration);
  1784. mod_timer(&(qseecom.bw_scale_down_timer),
  1785. qseecom.bw_scale_down_timer.expires);
  1786. qseecom.timer_running = true;
  1787. mutex_unlock(&qsee_bw_mutex);
  1788. }
  1789. static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
  1790. {
  1791. if (!qseecom.support_bus_scaling)
  1792. qsee_disable_clock_vote(data, CLK_SFPB);
  1793. else
  1794. __qseecom_add_bw_scale_down_timer(
  1795. QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
  1796. }
  1797. static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
  1798. {
  1799. int ret = 0;
  1800. if (qseecom.support_bus_scaling) {
  1801. ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
  1802. if (ret)
  1803. pr_err("Failed to set bw MEDIUM.\n");
  1804. } else {
  1805. ret = qsee_vote_for_clock(data, CLK_SFPB);
  1806. if (ret)
  1807. pr_err("Fail vote for clk SFPB ret %d\n", ret);
  1808. }
  1809. return ret;
  1810. }
  1811. static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
  1812. void __user *argp)
  1813. {
  1814. int32_t ret;
  1815. struct qseecom_set_sb_mem_param_req req;
  1816. size_t len;
  1817. /* Copy the relevant information needed for loading the image */
  1818. if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
  1819. return -EFAULT;
  1820. if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
  1821. (req.sb_len == 0)) {
  1822. pr_err("Invalid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
  1823. req.ifd_data_fd, req.sb_len, req.virt_sb_base);
  1824. return -EFAULT;
  1825. }
  1826. if (!access_ok((void __user *)req.virt_sb_base,
  1827. req.sb_len))
  1828. return -EFAULT;
  1829. ret = qseecom_vaddr_map(req.ifd_data_fd, &data->client.sb_phys,
  1830. (void **)&data->client.sb_virt,
  1831. &data->client.sgt, &data->client.attach,
  1832. &len, &data->client.dmabuf);
  1833. if (ret) {
  1834. pr_err("failed to convert ion_fd %d for lsnr %d with err: %d\n",
  1835. req.ifd_data_fd, data->client.app_id, ret);
  1836. return -EINVAL;
  1837. }
  1838. if (len < req.sb_len) {
  1839. pr_err("Requested length (0x%x) is > allocated (%zu)\n",
  1840. req.sb_len, len);
  1841. ret = -EINVAL;
  1842. goto exit;
  1843. }
  1844. data->client.sb_length = req.sb_len;
  1845. data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
  1846. return ret;
  1847. exit:
  1848. if (data->client.dmabuf) {
  1849. qseecom_vaddr_unmap(data->client.sb_virt, data->client.sgt,
  1850. data->client.attach, data->client.dmabuf);
  1851. MAKE_NULL(data->client.sgt,
  1852. data->client.attach, data->client.dmabuf);
  1853. }
  1854. return ret;
  1855. }
  1856. static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data,
  1857. struct qseecom_registered_listener_list *ptr_svc)
  1858. {
  1859. int ret;
  1860. ret = (qseecom.send_resp_flag != 0);
  1861. return ret || data->abort || ptr_svc->abort;
  1862. }
  1863. static int __qseecom_reentrancy_listener_has_sent_rsp(
  1864. struct qseecom_dev_handle *data,
  1865. struct qseecom_registered_listener_list *ptr_svc)
  1866. {
  1867. int ret;
  1868. ret = (ptr_svc->send_resp_flag != 0);
  1869. return ret || data->abort || ptr_svc->abort;
  1870. }
  1871. static void __qseecom_clean_listener_sglistinfo(
  1872. struct qseecom_registered_listener_list *ptr_svc)
  1873. {
  1874. if (ptr_svc->sglist_cnt) {
  1875. memset(ptr_svc->sglistinfo_ptr, 0,
  1876. SGLISTINFO_TABLE_SIZE);
  1877. ptr_svc->sglist_cnt = 0;
  1878. }
  1879. }
  1880. static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
  1881. struct qseecom_command_scm_resp *resp)
  1882. {
  1883. int ret = 0;
  1884. int rc = 0;
  1885. uint32_t lstnr;
  1886. struct qseecom_client_listener_data_irsp send_data_rsp = {0};
  1887. struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
  1888. = {0};
  1889. struct qseecom_registered_listener_list *ptr_svc = NULL;
  1890. sigset_t new_sigset;
  1891. uint32_t status;
  1892. void *cmd_buf = NULL;
  1893. size_t cmd_len;
  1894. struct sglist_info *table = NULL;
  1895. qseecom.app_block_ref_cnt++;
  1896. while (resp->result == QSEOS_RESULT_INCOMPLETE) {
  1897. lstnr = resp->data;
  1898. /*
  1899. * Wake up blocking lsitener service with the lstnr id
  1900. */
  1901. mutex_lock(&listener_access_lock);
  1902. list_for_each_entry(ptr_svc,
  1903. &qseecom.registered_listener_list_head, list) {
  1904. if (ptr_svc->svc.listener_id == lstnr) {
  1905. ptr_svc->listener_in_use = true;
  1906. ptr_svc->rcv_req_flag = 1;
  1907. ret = qseecom_dmabuf_cache_operations(
  1908. ptr_svc->dmabuf,
  1909. QSEECOM_CACHE_INVALIDATE);
  1910. if (ret) {
  1911. rc = -EINVAL;
  1912. status = QSEOS_RESULT_FAILURE;
  1913. goto err_resp;
  1914. }
  1915. wake_up_interruptible(&ptr_svc->rcv_req_wq);
  1916. break;
  1917. }
  1918. }
  1919. if (ptr_svc == NULL) {
  1920. pr_err("Listener Svc %d does not exist\n", lstnr);
  1921. rc = -EINVAL;
  1922. status = QSEOS_RESULT_FAILURE;
  1923. goto err_resp;
  1924. }
  1925. if (!ptr_svc->dmabuf) {
  1926. pr_err("Client dmabuf is not initialized\n");
  1927. rc = -EINVAL;
  1928. status = QSEOS_RESULT_FAILURE;
  1929. goto err_resp;
  1930. }
  1931. if (ptr_svc->svc.listener_id != lstnr) {
  1932. pr_err("Service %d does not exist\n",
  1933. lstnr);
  1934. rc = -ERESTARTSYS;
  1935. ptr_svc = NULL;
  1936. status = QSEOS_RESULT_FAILURE;
  1937. goto err_resp;
  1938. }
  1939. if (ptr_svc->abort == 1) {
  1940. pr_debug("Service %d abort %d\n",
  1941. lstnr, ptr_svc->abort);
  1942. rc = -ENODEV;
  1943. status = QSEOS_RESULT_FAILURE;
  1944. goto err_resp;
  1945. }
  1946. pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
  1947. /* initialize the new signal mask with all signals*/
  1948. sigfillset(&new_sigset);
  1949. /* block all signals */
  1950. mutex_unlock(&listener_access_lock);
  1951. do {
  1952. /*
  1953. * When reentrancy is not supported, check global
  1954. * send_resp_flag; otherwise, check this listener's
  1955. * send_resp_flag.
  1956. */
  1957. if (!qseecom.qsee_reentrancy_support &&
  1958. !wait_event_interruptible(qseecom.send_resp_wq,
  1959. __qseecom_listener_has_sent_rsp(
  1960. data, ptr_svc))) {
  1961. break;
  1962. }
  1963. if (qseecom.qsee_reentrancy_support &&
  1964. !wait_event_interruptible(qseecom.send_resp_wq,
  1965. __qseecom_reentrancy_listener_has_sent_rsp(
  1966. data, ptr_svc))) {
  1967. break;
  1968. }
  1969. } while (1);
  1970. mutex_lock(&listener_access_lock);
  1971. /* restore signal mask */
  1972. if (data->abort || ptr_svc->abort) {
  1973. pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d\n",
  1974. data->client.app_id, lstnr, ret);
  1975. rc = -ENODEV;
  1976. status = QSEOS_RESULT_FAILURE;
  1977. } else {
  1978. status = QSEOS_RESULT_SUCCESS;
  1979. }
  1980. err_resp:
  1981. qseecom.send_resp_flag = 0;
  1982. if (ptr_svc) {
  1983. ptr_svc->send_resp_flag = 0;
  1984. table = ptr_svc->sglistinfo_ptr;
  1985. }
  1986. if (qseecom.qsee_version < QSEE_VERSION_40) {
  1987. send_data_rsp.listener_id = lstnr;
  1988. send_data_rsp.status = status;
  1989. if (table) {
  1990. send_data_rsp.sglistinfo_ptr =
  1991. (uint32_t)virt_to_phys(table);
  1992. send_data_rsp.sglistinfo_len =
  1993. SGLISTINFO_TABLE_SIZE;
  1994. qtee_shmbridge_flush_shm_buf(
  1995. &ptr_svc->sglistinfo_shm);
  1996. }
  1997. cmd_buf = (void *)&send_data_rsp;
  1998. cmd_len = sizeof(send_data_rsp);
  1999. } else {
  2000. send_data_rsp_64bit.listener_id = lstnr;
  2001. send_data_rsp_64bit.status = status;
  2002. if (table) {
  2003. send_data_rsp_64bit.sglistinfo_ptr =
  2004. virt_to_phys(table);
  2005. send_data_rsp_64bit.sglistinfo_len =
  2006. SGLISTINFO_TABLE_SIZE;
  2007. qtee_shmbridge_flush_shm_buf(
  2008. &ptr_svc->sglistinfo_shm);
  2009. }
  2010. cmd_buf = (void *)&send_data_rsp_64bit;
  2011. cmd_len = sizeof(send_data_rsp_64bit);
  2012. }
  2013. if (!qseecom.whitelist_support || table == NULL)
  2014. *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
  2015. else
  2016. *(uint32_t *)cmd_buf =
  2017. QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
  2018. if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
  2019. ret = __qseecom_enable_clk(CLK_QSEE);
  2020. if (ret)
  2021. goto exit;
  2022. }
  2023. if (ptr_svc) {
  2024. ret = qseecom_dmabuf_cache_operations(ptr_svc->dmabuf,
  2025. QSEECOM_CACHE_CLEAN);
  2026. if (ret)
  2027. goto exit;
  2028. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  2029. cmd_buf, cmd_len, resp, sizeof(*resp));
  2030. ptr_svc->listener_in_use = false;
  2031. __qseecom_clean_listener_sglistinfo(ptr_svc);
  2032. if (ret) {
  2033. pr_err("scm_call() failed with err: %d (app_id = %d)\n",
  2034. ret, data->client.app_id);
  2035. goto exit;
  2036. }
  2037. } else {
  2038. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  2039. cmd_buf, cmd_len, resp, sizeof(*resp));
  2040. if (ret) {
  2041. pr_err("scm_call() failed with err: %d (app_id = %d)\n",
  2042. ret, data->client.app_id);
  2043. goto exit;
  2044. }
  2045. }
  2046. pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
  2047. status, resp->result, data->client.app_id, lstnr);
  2048. if ((resp->result != QSEOS_RESULT_SUCCESS) &&
  2049. (resp->result != QSEOS_RESULT_INCOMPLETE)) {
  2050. pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
  2051. resp->result, data->client.app_id, lstnr);
  2052. ret = -EINVAL;
  2053. }
  2054. exit:
  2055. mutex_unlock(&listener_access_lock);
  2056. if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
  2057. __qseecom_disable_clk(CLK_QSEE);
  2058. }
  2059. qseecom.app_block_ref_cnt--;
  2060. wake_up_interruptible_all(&qseecom.app_block_wq);
  2061. if (rc)
  2062. return rc;
  2063. return ret;
  2064. }
  2065. static int __qseecom_process_reentrancy_blocked_on_listener(
  2066. struct qseecom_command_scm_resp *resp,
  2067. struct qseecom_registered_app_list *ptr_app,
  2068. struct qseecom_dev_handle *data)
  2069. {
  2070. struct qseecom_registered_listener_list *list_ptr;
  2071. int ret = 0;
  2072. struct qseecom_continue_blocked_request_ireq ireq;
  2073. struct qseecom_command_scm_resp continue_resp;
  2074. unsigned int session_id;
  2075. sigset_t new_sigset;
  2076. unsigned long flags;
  2077. bool found_app = false;
  2078. struct qseecom_registered_app_list dummy_app_entry = { {NULL} };
  2079. if (!resp || !data) {
  2080. pr_err("invalid resp or data pointer\n");
  2081. ret = -EINVAL;
  2082. goto exit;
  2083. }
  2084. /* find app_id & img_name from list */
  2085. if (!ptr_app) {
  2086. if (data->client.from_smcinvoke || data->client.from_loadapp) {
  2087. pr_debug("This request is from %s\n",
  2088. (data->client.from_smcinvoke ? "smcinvoke" : "load_app"));
  2089. ptr_app = &dummy_app_entry;
  2090. ptr_app->app_id = data->client.app_id;
  2091. } else {
  2092. spin_lock_irqsave(&qseecom.registered_app_list_lock,
  2093. flags);
  2094. list_for_each_entry(ptr_app,
  2095. &qseecom.registered_app_list_head, list) {
  2096. if ((ptr_app->app_id == data->client.app_id) &&
  2097. (!strcmp(ptr_app->app_name,
  2098. data->client.app_name))) {
  2099. found_app = true;
  2100. break;
  2101. }
  2102. }
  2103. spin_unlock_irqrestore(
  2104. &qseecom.registered_app_list_lock, flags);
  2105. if (!found_app) {
  2106. pr_err("app_id %d (%s) is not found\n",
  2107. data->client.app_id,
  2108. (char *)data->client.app_name);
  2109. ret = -ENOENT;
  2110. goto exit;
  2111. }
  2112. }
  2113. }
  2114. do {
  2115. session_id = resp->resp_type;
  2116. mutex_lock(&listener_access_lock);
  2117. list_ptr = __qseecom_find_svc(resp->data);
  2118. if (!list_ptr) {
  2119. pr_err("Invalid listener ID %d\n", resp->data);
  2120. ret = -ENODATA;
  2121. mutex_unlock(&listener_access_lock);
  2122. goto exit;
  2123. }
  2124. ptr_app->blocked_on_listener_id = resp->data;
  2125. pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n",
  2126. resp->data, list_ptr->listener_in_use,
  2127. session_id, data->client.app_id);
  2128. /* sleep until listener is available */
  2129. sigfillset(&new_sigset);
  2130. do {
  2131. qseecom.app_block_ref_cnt++;
  2132. ptr_app->app_blocked = true;
  2133. mutex_unlock(&listener_access_lock);
  2134. mutex_unlock(&app_access_lock);
  2135. wait_event_interruptible(
  2136. list_ptr->listener_block_app_wq,
  2137. !list_ptr->listener_in_use);
  2138. mutex_lock(&app_access_lock);
  2139. mutex_lock(&listener_access_lock);
  2140. ptr_app->app_blocked = false;
  2141. qseecom.app_block_ref_cnt--;
  2142. } while (list_ptr->listener_in_use);
  2143. ptr_app->blocked_on_listener_id = 0;
  2144. pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n",
  2145. resp->data, session_id, data->client.app_id);
  2146. /* notify TZ that listener is available */
  2147. ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
  2148. if (qseecom.smcinvoke_support)
  2149. ireq.app_or_session_id = session_id;
  2150. else
  2151. ireq.app_or_session_id = data->client.app_id;
  2152. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  2153. &ireq, sizeof(ireq),
  2154. &continue_resp, sizeof(continue_resp));
  2155. if (ret && qseecom.smcinvoke_support) {
  2156. /* retry with legacy cmd */
  2157. pr_warn("falling back to legacy method\n");
  2158. qseecom.smcinvoke_support = false;
  2159. ireq.app_or_session_id = data->client.app_id;
  2160. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  2161. &ireq, sizeof(ireq),
  2162. &continue_resp, sizeof(continue_resp));
  2163. qseecom.smcinvoke_support = true;
  2164. if (ret) {
  2165. pr_err("unblock app %d or session %d fail\n",
  2166. data->client.app_id, session_id);
  2167. mutex_unlock(&listener_access_lock);
  2168. goto exit;
  2169. }
  2170. }
  2171. mutex_unlock(&listener_access_lock);
  2172. resp->result = continue_resp.result;
  2173. resp->resp_type = continue_resp.resp_type;
  2174. resp->data = continue_resp.data;
  2175. pr_err("unblock resp = %d\n", resp->result);
  2176. } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER);
  2177. if (resp->result != QSEOS_RESULT_INCOMPLETE) {
  2178. pr_err("Unexpected unblock resp %d\n", resp->result);
  2179. ret = -EINVAL;
  2180. }
  2181. exit:
  2182. return ret;
  2183. }
  2184. static int __qseecom_reentrancy_process_incomplete_cmd(
  2185. struct qseecom_dev_handle *data,
  2186. struct qseecom_command_scm_resp *resp)
  2187. {
  2188. int ret = 0;
  2189. int rc = 0;
  2190. uint32_t lstnr;
  2191. struct qseecom_client_listener_data_irsp send_data_rsp = {0};
  2192. struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
  2193. = {0};
  2194. struct qseecom_registered_listener_list *ptr_svc = NULL;
  2195. sigset_t new_sigset;
  2196. uint32_t status;
  2197. void *cmd_buf = NULL;
  2198. size_t cmd_len;
  2199. struct sglist_info *table = NULL;
  2200. while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
  2201. lstnr = resp->data;
  2202. /*
  2203. * Wake up blocking lsitener service with the lstnr id
  2204. */
  2205. mutex_lock(&listener_access_lock);
  2206. list_for_each_entry(ptr_svc,
  2207. &qseecom.registered_listener_list_head, list) {
  2208. if (ptr_svc->svc.listener_id == lstnr) {
  2209. ptr_svc->listener_in_use = true;
  2210. ptr_svc->rcv_req_flag = 1;
  2211. ret = qseecom_dmabuf_cache_operations(
  2212. ptr_svc->dmabuf,
  2213. QSEECOM_CACHE_INVALIDATE);
  2214. if (ret) {
  2215. rc = -EINVAL;
  2216. status = QSEOS_RESULT_FAILURE;
  2217. goto err_resp;
  2218. }
  2219. wake_up_interruptible(&ptr_svc->rcv_req_wq);
  2220. break;
  2221. }
  2222. }
  2223. if (ptr_svc == NULL) {
  2224. pr_err("Listener Svc %d does not exist\n", lstnr);
  2225. rc = -EINVAL;
  2226. status = QSEOS_RESULT_FAILURE;
  2227. goto err_resp;
  2228. }
  2229. if (!ptr_svc->dmabuf) {
  2230. pr_err("Client dmabuf is not initialized\n");
  2231. rc = -EINVAL;
  2232. status = QSEOS_RESULT_FAILURE;
  2233. goto err_resp;
  2234. }
  2235. if (ptr_svc->svc.listener_id != lstnr) {
  2236. pr_err("Service %d does not exist\n",
  2237. lstnr);
  2238. rc = -ERESTARTSYS;
  2239. ptr_svc = NULL;
  2240. table = NULL;
  2241. status = QSEOS_RESULT_FAILURE;
  2242. goto err_resp;
  2243. }
  2244. if (ptr_svc->abort == 1) {
  2245. pr_debug("Service %d abort %d\n",
  2246. lstnr, ptr_svc->abort);
  2247. rc = -ENODEV;
  2248. status = QSEOS_RESULT_FAILURE;
  2249. goto err_resp;
  2250. }
  2251. pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
  2252. /* initialize the new signal mask with all signals*/
  2253. sigfillset(&new_sigset);
  2254. /* block all signals */
  2255. /* unlock mutex btw waking listener and sleep-wait */
  2256. mutex_unlock(&listener_access_lock);
  2257. mutex_unlock(&app_access_lock);
  2258. do {
  2259. if (!wait_event_interruptible(qseecom.send_resp_wq,
  2260. __qseecom_reentrancy_listener_has_sent_rsp(
  2261. data, ptr_svc))) {
  2262. break;
  2263. }
  2264. } while (1);
  2265. /* lock mutex again after resp sent */
  2266. mutex_lock(&app_access_lock);
  2267. mutex_lock(&listener_access_lock);
  2268. ptr_svc->send_resp_flag = 0;
  2269. qseecom.send_resp_flag = 0;
  2270. /* restore signal mask */
  2271. if (data->abort || ptr_svc->abort) {
  2272. pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d\n",
  2273. data->client.app_id, lstnr, ret);
  2274. rc = -ENODEV;
  2275. status = QSEOS_RESULT_FAILURE;
  2276. } else {
  2277. status = QSEOS_RESULT_SUCCESS;
  2278. }
  2279. err_resp:
  2280. if (ptr_svc)
  2281. table = ptr_svc->sglistinfo_ptr;
  2282. if (qseecom.qsee_version < QSEE_VERSION_40) {
  2283. send_data_rsp.listener_id = lstnr;
  2284. send_data_rsp.status = status;
  2285. if (table) {
  2286. send_data_rsp.sglistinfo_ptr =
  2287. (uint32_t)virt_to_phys(table);
  2288. send_data_rsp.sglistinfo_len =
  2289. SGLISTINFO_TABLE_SIZE;
  2290. qtee_shmbridge_flush_shm_buf(
  2291. &ptr_svc->sglistinfo_shm);
  2292. }
  2293. cmd_buf = (void *)&send_data_rsp;
  2294. cmd_len = sizeof(send_data_rsp);
  2295. } else {
  2296. send_data_rsp_64bit.listener_id = lstnr;
  2297. send_data_rsp_64bit.status = status;
  2298. if (table) {
  2299. send_data_rsp_64bit.sglistinfo_ptr =
  2300. virt_to_phys(table);
  2301. send_data_rsp_64bit.sglistinfo_len =
  2302. SGLISTINFO_TABLE_SIZE;
  2303. qtee_shmbridge_flush_shm_buf(
  2304. &ptr_svc->sglistinfo_shm);
  2305. }
  2306. cmd_buf = (void *)&send_data_rsp_64bit;
  2307. cmd_len = sizeof(send_data_rsp_64bit);
  2308. }
  2309. if (!qseecom.whitelist_support || table == NULL)
  2310. *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
  2311. else
  2312. *(uint32_t *)cmd_buf =
  2313. QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
  2314. if (lstnr == RPMB_SERVICE) {
  2315. ret = __qseecom_enable_clk(CLK_QSEE);
  2316. if (ret)
  2317. goto exit;
  2318. }
  2319. if (ptr_svc) {
  2320. ret = qseecom_dmabuf_cache_operations(ptr_svc->dmabuf,
  2321. QSEECOM_CACHE_CLEAN);
  2322. if (ret)
  2323. goto exit;
  2324. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  2325. cmd_buf, cmd_len, resp, sizeof(*resp));
  2326. ptr_svc->listener_in_use = false;
  2327. __qseecom_clean_listener_sglistinfo(ptr_svc);
  2328. wake_up_interruptible(&ptr_svc->listener_block_app_wq);
  2329. if (ret) {
  2330. pr_err("scm_call() failed with err: %d (app_id = %d)\n",
  2331. ret, data->client.app_id);
  2332. goto exit;
  2333. }
  2334. } else {
  2335. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  2336. cmd_buf, cmd_len, resp, sizeof(*resp));
  2337. if (ret) {
  2338. pr_err("scm_call() failed with err: %d (app_id = %d)\n",
  2339. ret, data->client.app_id);
  2340. goto exit;
  2341. }
  2342. }
  2343. switch (resp->result) {
  2344. case QSEOS_RESULT_BLOCKED_ON_LISTENER:
  2345. pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
  2346. lstnr, data->client.app_id, resp->data);
  2347. if (lstnr == resp->data) {
  2348. pr_err("lstnr %d should not be blocked!\n",
  2349. lstnr);
  2350. ret = -EINVAL;
  2351. goto exit;
  2352. }
  2353. mutex_unlock(&listener_access_lock);
  2354. ret = __qseecom_process_reentrancy_blocked_on_listener(
  2355. resp, NULL, data);
  2356. mutex_lock(&listener_access_lock);
  2357. if (ret) {
  2358. pr_err("failed to process App(%d) %s blocked on listener %d\n",
  2359. data->client.app_id,
  2360. data->client.app_name, resp->data);
  2361. goto exit;
  2362. }
  2363. case QSEOS_RESULT_SUCCESS:
  2364. case QSEOS_RESULT_INCOMPLETE:
  2365. break;
  2366. case QSEOS_RESULT_CBACK_REQUEST:
  2367. pr_warn("get cback req app_id = %d, resp->data = %d\n",
  2368. data->client.app_id, resp->data);
  2369. resp->resp_type = SMCINVOKE_RESULT_INBOUND_REQ_NEEDED;
  2370. break;
  2371. default:
  2372. pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
  2373. resp->result, data->client.app_id, lstnr);
  2374. ret = -EINVAL;
  2375. goto exit;
  2376. }
  2377. exit:
  2378. mutex_unlock(&listener_access_lock);
  2379. if (lstnr == RPMB_SERVICE)
  2380. __qseecom_disable_clk(CLK_QSEE);
  2381. }
  2382. if (rc)
  2383. return rc;
  2384. return ret;
  2385. }
  2386. /*
  2387. * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
  2388. * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
  2389. * So, needs to first check if no app blocked before sending OS level scm call,
  2390. * then wait until all apps are unblocked.
  2391. */
  2392. static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
  2393. {
  2394. if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
  2395. qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
  2396. IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
  2397. /* thread sleep until this app unblocked */
  2398. while (qseecom.app_block_ref_cnt > 0) {
  2399. mutex_unlock(&app_access_lock);
  2400. wait_event_interruptible(qseecom.app_block_wq,
  2401. (!qseecom.app_block_ref_cnt));
  2402. mutex_lock(&app_access_lock);
  2403. }
  2404. }
  2405. }
  2406. /*
  2407. * scm_call of send data will fail if this TA is blocked or there are more
  2408. * than one TA requesting listener services; So, first check to see if need
  2409. * to wait.
  2410. */
  2411. static void __qseecom_reentrancy_check_if_this_app_blocked(
  2412. struct qseecom_registered_app_list *ptr_app)
  2413. {
  2414. if (qseecom.qsee_reentrancy_support) {
  2415. ptr_app->check_block++;
  2416. while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
  2417. /* thread sleep until this app unblocked */
  2418. mutex_unlock(&app_access_lock);
  2419. wait_event_interruptible(qseecom.app_block_wq,
  2420. (!ptr_app->app_blocked &&
  2421. qseecom.app_block_ref_cnt <= 1));
  2422. mutex_lock(&app_access_lock);
  2423. }
  2424. ptr_app->check_block--;
  2425. }
  2426. }
  2427. static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
  2428. uint32_t *app_id)
  2429. {
  2430. int32_t ret;
  2431. struct qseecom_command_scm_resp resp;
  2432. bool found_app = false;
  2433. struct qseecom_registered_app_list *entry = NULL;
  2434. unsigned long flags = 0;
  2435. if (!app_id) {
  2436. pr_err("Null pointer to app_id\n");
  2437. return -EINVAL;
  2438. }
  2439. *app_id = 0;
  2440. /* check if app exists and has been registered locally */
  2441. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  2442. list_for_each_entry(entry,
  2443. &qseecom.registered_app_list_head, list) {
  2444. if (!strcmp(entry->app_name, req.app_name)) {
  2445. found_app = true;
  2446. break;
  2447. }
  2448. }
  2449. spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
  2450. if (found_app) {
  2451. pr_debug("Found app with id %d\n", entry->app_id);
  2452. *app_id = entry->app_id;
  2453. return 0;
  2454. }
  2455. memset((void *)&resp, 0, sizeof(resp));
  2456. /* SCM_CALL to check if app_id for the mentioned app exists */
  2457. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
  2458. sizeof(struct qseecom_check_app_ireq),
  2459. &resp, sizeof(resp));
  2460. if (ret) {
  2461. pr_err("scm_call to check if app is already loaded failed\n");
  2462. return -EINVAL;
  2463. }
  2464. if (resp.result == QSEOS_RESULT_FAILURE)
  2465. return 0;
  2466. switch (resp.resp_type) {
  2467. /*qsee returned listener type response */
  2468. case QSEOS_LISTENER_ID:
  2469. pr_err("resp type is of listener type instead of app\n");
  2470. return -EINVAL;
  2471. case QSEOS_APP_ID:
  2472. *app_id = resp.data;
  2473. return 0;
  2474. default:
  2475. pr_err("invalid resp type (%d) from qsee\n",
  2476. resp.resp_type);
  2477. return -ENODEV;
  2478. }
  2479. }
  2480. static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
  2481. {
  2482. struct qseecom_registered_app_list *entry = NULL;
  2483. unsigned long flags = 0;
  2484. u32 app_id = 0;
  2485. struct qseecom_load_img_req load_img_req;
  2486. int32_t ret = 0;
  2487. phys_addr_t pa = 0;
  2488. void *vaddr = NULL;
  2489. struct dma_buf_attachment *attach = NULL;
  2490. struct dma_buf *dmabuf = NULL;
  2491. struct sg_table *sgt = NULL;
  2492. size_t len;
  2493. struct qseecom_command_scm_resp resp;
  2494. struct qseecom_check_app_ireq req;
  2495. struct qseecom_load_app_ireq load_req;
  2496. struct qseecom_load_app_64bit_ireq load_req_64bit;
  2497. void *cmd_buf = NULL;
  2498. size_t cmd_len;
  2499. bool first_time = false;
  2500. /* Copy the relevant information needed for loading the image */
  2501. if (copy_from_user(&load_img_req,
  2502. (void __user *)argp,
  2503. sizeof(struct qseecom_load_img_req))) {
  2504. pr_err("copy_from_user failed\n");
  2505. return -EFAULT;
  2506. }
  2507. /* Check and load cmnlib */
  2508. if (qseecom.qsee_version > QSEEE_VERSION_00) {
  2509. if (!qseecom.commonlib_loaded &&
  2510. load_img_req.app_arch == ELFCLASS32) {
  2511. ret = qseecom_load_commonlib_image(data, "cmnlib");
  2512. if (ret) {
  2513. pr_err("failed to load cmnlib\n");
  2514. return -EIO;
  2515. }
  2516. qseecom.commonlib_loaded = true;
  2517. pr_debug("cmnlib is loaded\n");
  2518. }
  2519. if (!qseecom.commonlib64_loaded &&
  2520. load_img_req.app_arch == ELFCLASS64) {
  2521. ret = qseecom_load_commonlib_image(data, "cmnlib64");
  2522. if (ret) {
  2523. pr_err("failed to load cmnlib64\n");
  2524. return -EIO;
  2525. }
  2526. qseecom.commonlib64_loaded = true;
  2527. pr_debug("cmnlib64 is loaded\n");
  2528. }
  2529. }
  2530. if (qseecom.support_bus_scaling) {
  2531. mutex_lock(&qsee_bw_mutex);
  2532. ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
  2533. mutex_unlock(&qsee_bw_mutex);
  2534. if (ret)
  2535. return ret;
  2536. }
  2537. /* Vote for the SFPB clock */
  2538. ret = __qseecom_enable_clk_scale_up(data);
  2539. if (ret)
  2540. goto enable_clk_err;
  2541. req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
  2542. load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
  2543. strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
  2544. ret = __qseecom_check_app_exists(req, &app_id);
  2545. if (ret < 0)
  2546. goto checkapp_err;
  2547. if (app_id) {
  2548. pr_debug("App id %d (%s) already exists\n", app_id,
  2549. (char *)(req.app_name));
  2550. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  2551. list_for_each_entry(entry,
  2552. &qseecom.registered_app_list_head, list){
  2553. if (entry->app_id == app_id) {
  2554. if (entry->ref_cnt == U32_MAX) {
  2555. pr_err("App %d (%s) ref_cnt overflow\n",
  2556. app_id, req.app_name);
  2557. ret = -EINVAL;
  2558. goto loadapp_err;
  2559. }
  2560. entry->ref_cnt++;
  2561. break;
  2562. }
  2563. }
  2564. spin_unlock_irqrestore(
  2565. &qseecom.registered_app_list_lock, flags);
  2566. ret = 0;
  2567. } else {
  2568. first_time = true;
  2569. pr_warn("App (%s) does'nt exist, loading apps for first time\n",
  2570. (char *)(load_img_req.img_name));
  2571. ret = qseecom_vaddr_map(load_img_req.ifd_data_fd,
  2572. &pa, &vaddr, &sgt, &attach, &len, &dmabuf);
  2573. if (ret) {
  2574. pr_err("Ion client could not retrieve the handle\n");
  2575. ret = -ENOMEM;
  2576. goto loadapp_err;
  2577. }
  2578. if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
  2579. pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
  2580. len, load_img_req.mdt_len,
  2581. load_img_req.img_len);
  2582. ret = -EINVAL;
  2583. goto loadapp_err;
  2584. }
  2585. /* Populate the structure for sending scm call to load image */
  2586. if (qseecom.qsee_version < QSEE_VERSION_40) {
  2587. load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
  2588. load_req.mdt_len = load_img_req.mdt_len;
  2589. load_req.img_len = load_img_req.img_len;
  2590. strlcpy(load_req.app_name, load_img_req.img_name,
  2591. MAX_APP_NAME_SIZE);
  2592. load_req.phy_addr = (uint32_t)pa;
  2593. cmd_buf = (void *)&load_req;
  2594. cmd_len = sizeof(struct qseecom_load_app_ireq);
  2595. } else {
  2596. load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
  2597. load_req_64bit.mdt_len = load_img_req.mdt_len;
  2598. load_req_64bit.img_len = load_img_req.img_len;
  2599. strlcpy(load_req_64bit.app_name, load_img_req.img_name,
  2600. MAX_APP_NAME_SIZE);
  2601. load_req_64bit.phy_addr = (uint64_t)pa;
  2602. cmd_buf = (void *)&load_req_64bit;
  2603. cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
  2604. }
  2605. ret = qseecom_dmabuf_cache_operations(dmabuf,
  2606. QSEECOM_CACHE_CLEAN);
  2607. if (ret) {
  2608. pr_err("cache operation failed %d\n", ret);
  2609. goto loadapp_err;
  2610. }
  2611. /* SCM_CALL to load the app and get the app_id back */
  2612. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
  2613. cmd_len, &resp, sizeof(resp));
  2614. if (ret) {
  2615. pr_err("scm_call to load app failed\n");
  2616. ret = -EINVAL;
  2617. goto loadapp_err;
  2618. }
  2619. ret = qseecom_dmabuf_cache_operations(dmabuf,
  2620. QSEECOM_CACHE_INVALIDATE);
  2621. if (ret) {
  2622. pr_err("cache operation failed %d\n", ret);
  2623. goto loadapp_err;
  2624. }
  2625. do {
  2626. if (resp.result == QSEOS_RESULT_FAILURE) {
  2627. pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
  2628. ret = -EFAULT;
  2629. goto loadapp_err;
  2630. }
  2631. if (resp.result == QSEOS_RESULT_INCOMPLETE) {
  2632. ret = __qseecom_process_incomplete_cmd(data, &resp);
  2633. if (ret) {
  2634. /* TZ has created app_id, need to unload it */
  2635. pr_err("incomp_cmd err %d, %d, unload %d %s\n",
  2636. ret, resp.result, resp.data,
  2637. load_img_req.img_name);
  2638. __qseecom_unload_app(data, resp.data);
  2639. ret = -EFAULT;
  2640. goto loadapp_err;
  2641. }
  2642. }
  2643. if (resp.result == QSEOS_RESULT_BLOCKED_ON_LISTENER) {
  2644. pr_err("load app blocked on listener\n");
  2645. data->client.app_id = resp.result;
  2646. data->client.from_loadapp = true;
  2647. ret = __qseecom_process_reentrancy_blocked_on_listener(&resp,
  2648. NULL, data);
  2649. if (ret) {
  2650. pr_err("load app fail proc block on listener,ret :%d\n",
  2651. ret);
  2652. ret = -EFAULT;
  2653. goto loadapp_err;
  2654. }
  2655. }
  2656. } while ((resp.result == QSEOS_RESULT_BLOCKED_ON_LISTENER) ||
  2657. (resp.result == QSEOS_RESULT_INCOMPLETE));
  2658. if (resp.result != QSEOS_RESULT_SUCCESS) {
  2659. pr_err("scm_call failed resp.result unknown, %d\n",
  2660. resp.result);
  2661. ret = -EFAULT;
  2662. goto loadapp_err;
  2663. }
  2664. app_id = resp.data;
  2665. entry = kmalloc(sizeof(*entry), GFP_KERNEL);
  2666. if (!entry) {
  2667. ret = -ENOMEM;
  2668. goto loadapp_err;
  2669. }
  2670. entry->app_id = app_id;
  2671. entry->ref_cnt = 1;
  2672. entry->app_arch = load_img_req.app_arch;
  2673. /*
  2674. * keymaster app may be first loaded as "keymaste" by qseecomd,
  2675. * and then used as "keymaster" on some targets. To avoid app
  2676. * name checking error, register "keymaster" into app_list and
  2677. * thread private data.
  2678. */
  2679. if (!strcmp(load_img_req.img_name, "keymaste"))
  2680. strlcpy(entry->app_name, "keymaster",
  2681. MAX_APP_NAME_SIZE);
  2682. else
  2683. strlcpy(entry->app_name, load_img_req.img_name,
  2684. MAX_APP_NAME_SIZE);
  2685. entry->app_blocked = false;
  2686. entry->blocked_on_listener_id = 0;
  2687. entry->check_block = 0;
  2688. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  2689. list_add_tail(&entry->list, &qseecom.registered_app_list_head);
  2690. spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
  2691. flags);
  2692. pr_warn("App with id %u (%s) now loaded\n", app_id,
  2693. (char *)(load_img_req.img_name));
  2694. }
  2695. data->client.app_id = app_id;
  2696. data->client.app_arch = load_img_req.app_arch;
  2697. if (!strcmp(load_img_req.img_name, "keymaste"))
  2698. strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
  2699. else
  2700. strlcpy(data->client.app_name, load_img_req.img_name,
  2701. MAX_APP_NAME_SIZE);
  2702. load_img_req.app_id = app_id;
  2703. if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
  2704. pr_err("copy_to_user failed\n");
  2705. ret = -EFAULT;
  2706. if (first_time) {
  2707. spin_lock_irqsave(
  2708. &qseecom.registered_app_list_lock, flags);
  2709. list_del(&entry->list);
  2710. spin_unlock_irqrestore(
  2711. &qseecom.registered_app_list_lock, flags);
  2712. kfree_sensitive(entry);
  2713. }
  2714. }
  2715. loadapp_err:
  2716. if (dmabuf) {
  2717. qseecom_vaddr_unmap(vaddr, sgt, attach, dmabuf);
  2718. MAKE_NULL(sgt, attach, dmabuf);
  2719. }
  2720. checkapp_err:
  2721. __qseecom_disable_clk_scale_down(data);
  2722. enable_clk_err:
  2723. if (qseecom.support_bus_scaling) {
  2724. mutex_lock(&qsee_bw_mutex);
  2725. qseecom_unregister_bus_bandwidth_needs(data);
  2726. mutex_unlock(&qsee_bw_mutex);
  2727. }
  2728. return ret;
  2729. }
  2730. static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
  2731. {
  2732. int ret = 0; /* Set unload app */
  2733. wake_up_all(&qseecom.send_resp_wq);
  2734. if (qseecom.qsee_reentrancy_support)
  2735. mutex_unlock(&app_access_lock);
  2736. while (atomic_read(&data->ioctl_count) > 1) {
  2737. if (wait_event_interruptible(data->abort_wq,
  2738. atomic_read(&data->ioctl_count) <= 1)) {
  2739. pr_err("Interrupted from abort\n");
  2740. ret = -ERESTARTSYS;
  2741. break;
  2742. }
  2743. }
  2744. if (qseecom.qsee_reentrancy_support)
  2745. mutex_lock(&app_access_lock);
  2746. return ret;
  2747. }
  2748. static int __qseecom_unload_app(struct qseecom_dev_handle *data,
  2749. uint32_t app_id)
  2750. {
  2751. struct qseecom_unload_app_ireq req;
  2752. struct qseecom_command_scm_resp resp;
  2753. int ret = 0;
  2754. /* Populate the structure for sending scm call to load image */
  2755. req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
  2756. req.app_id = app_id;
  2757. /* SCM_CALL to unload the app */
  2758. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
  2759. sizeof(struct qseecom_unload_app_ireq),
  2760. &resp, sizeof(resp));
  2761. if (ret) {
  2762. pr_err("scm_call to unload app (id = %d) failed ret: %d\n",
  2763. app_id, ret);
  2764. return ret;
  2765. }
  2766. do {
  2767. switch (resp.result) {
  2768. case QSEOS_RESULT_SUCCESS:
  2769. pr_warn("App (%d) is unloaded\n", app_id);
  2770. break;
  2771. case QSEOS_RESULT_INCOMPLETE:
  2772. ret = __qseecom_process_incomplete_cmd(data, &resp);
  2773. if (ret)
  2774. pr_err("unload app %d fail proc incom cmd: %d,%d,%d\n",
  2775. app_id, ret, resp.result, resp.data);
  2776. else
  2777. pr_warn("App (%d) is unloaded\n", app_id);
  2778. break;
  2779. case QSEOS_RESULT_FAILURE:
  2780. pr_err("app (%d) unload_failed!!\n", app_id);
  2781. ret = -EFAULT;
  2782. break;
  2783. case QSEOS_RESULT_BLOCKED_ON_LISTENER:
  2784. pr_err("unload app (%d) blocked on listener\n", app_id);
  2785. ret = __qseecom_process_reentrancy_blocked_on_listener(&resp, NULL, data);
  2786. if (ret) {
  2787. pr_err("unload app fail proc block on listener cmd,ret :%d\n",
  2788. ret);
  2789. ret = -EFAULT;
  2790. }
  2791. break;
  2792. default:
  2793. pr_err("unload app %d get unknown resp.result %d\n",
  2794. app_id, resp.result);
  2795. ret = -EFAULT;
  2796. break;
  2797. }
  2798. } while ((resp.result == QSEOS_RESULT_INCOMPLETE) ||
  2799. (resp.result == QSEOS_RESULT_BLOCKED_ON_LISTENER));
  2800. return ret;
  2801. }
  2802. static int qseecom_unload_app(struct qseecom_dev_handle *data,
  2803. bool app_crash)
  2804. {
  2805. unsigned long flags;
  2806. int ret = 0;
  2807. struct qseecom_registered_app_list *ptr_app = NULL;
  2808. bool found_app = false;
  2809. if (!data) {
  2810. pr_err("Invalid/uninitialized device handle\n");
  2811. return -EINVAL;
  2812. }
  2813. pr_debug("unload app %d(%s), app_crash flag %d\n", data->client.app_id,
  2814. data->client.app_name, app_crash);
  2815. if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
  2816. pr_debug("Do not unload keymaster app from tz\n");
  2817. goto unload_exit;
  2818. }
  2819. ret = __qseecom_cleanup_app(data);
  2820. if (ret && !app_crash) {
  2821. pr_err("cleanup app failed, pending ioctl:%d\n", data->ioctl_count);
  2822. return ret;
  2823. }
  2824. __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
  2825. /* ignore app_id 0, it happens when close qseecom_fd if load app fail*/
  2826. if (!data->client.app_id)
  2827. goto unload_exit;
  2828. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  2829. list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
  2830. list) {
  2831. if ((ptr_app->app_id == data->client.app_id) &&
  2832. (!strcmp(ptr_app->app_name, data->client.app_name))) {
  2833. pr_debug("unload app %d (%s), ref_cnt %d\n",
  2834. ptr_app->app_id, ptr_app->app_name,
  2835. ptr_app->ref_cnt);
  2836. ptr_app->ref_cnt--;
  2837. found_app = true;
  2838. break;
  2839. }
  2840. }
  2841. spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
  2842. flags);
  2843. if (!found_app) {
  2844. pr_err("Cannot find app with id = %d (%s)\n",
  2845. data->client.app_id, data->client.app_name);
  2846. ret = -EINVAL;
  2847. goto unload_exit;
  2848. }
  2849. if (!ptr_app->ref_cnt) {
  2850. ret = __qseecom_unload_app(data, data->client.app_id);
  2851. if (ret == -EBUSY) {
  2852. /*
  2853. * If unload failed due to EBUSY, don't free mem
  2854. * just restore app ref_cnt and return -EBUSY
  2855. */
  2856. pr_warn("unload ta %d(%s) EBUSY\n",
  2857. data->client.app_id, data->client.app_name);
  2858. ptr_app->ref_cnt++;
  2859. return ret;
  2860. }
  2861. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  2862. list_del(&ptr_app->list);
  2863. spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
  2864. flags);
  2865. kfree_sensitive(ptr_app);
  2866. }
  2867. unload_exit:
  2868. if (data->client.dmabuf) {
  2869. qseecom_vaddr_unmap(data->client.sb_virt, data->client.sgt,
  2870. data->client.attach, data->client.dmabuf);
  2871. MAKE_NULL(data->client.sgt,
  2872. data->client.attach, data->client.dmabuf);
  2873. }
  2874. data->released = true;
  2875. return ret;
  2876. }
  2877. static int qseecom_prepare_unload_app(struct qseecom_dev_handle *data)
  2878. {
  2879. struct qseecom_unload_app_pending_list *entry = NULL;
  2880. pr_debug("prepare to unload app(%d)(%s), pending %d\n",
  2881. data->client.app_id, data->client.app_name,
  2882. data->client.unload_pending);
  2883. if (data->client.unload_pending)
  2884. return 0;
  2885. entry = kzalloc(sizeof(*entry), GFP_KERNEL);
  2886. if (!entry)
  2887. return -ENOMEM;
  2888. entry->data = data;
  2889. list_add_tail(&entry->list,
  2890. &qseecom.unload_app_pending_list_head);
  2891. data->client.unload_pending = true;
  2892. pr_debug("unload ta %d pending\n", data->client.app_id);
  2893. return 0;
  2894. }
  2895. static void __wakeup_unload_app_kthread(void)
  2896. {
  2897. atomic_set(&qseecom.unload_app_kthread_state,
  2898. UNLOAD_APP_KT_WAKEUP);
  2899. wake_up_interruptible(&qseecom.unload_app_kthread_wq);
  2900. }
  2901. static bool __qseecom_find_pending_unload_app(uint32_t app_id, char *app_name)
  2902. {
  2903. struct qseecom_unload_app_pending_list *entry = NULL;
  2904. bool found = false;
  2905. mutex_lock(&unload_app_pending_list_lock);
  2906. list_for_each_entry(entry, &qseecom.unload_app_pending_list_head,
  2907. list) {
  2908. if ((entry->data->client.app_id == app_id) &&
  2909. (!strcmp(entry->data->client.app_name, app_name))) {
  2910. found = true;
  2911. break;
  2912. }
  2913. }
  2914. mutex_unlock(&unload_app_pending_list_lock);
  2915. return found;
  2916. }
  2917. static void __qseecom_processing_pending_unload_app(void)
  2918. {
  2919. struct qseecom_unload_app_pending_list *entry = NULL;
  2920. struct list_head *pos;
  2921. int ret = 0;
  2922. mutex_lock(&unload_app_pending_list_lock);
  2923. while (!list_empty(&qseecom.unload_app_pending_list_head)) {
  2924. pos = qseecom.unload_app_pending_list_head.next;
  2925. entry = list_entry(pos,
  2926. struct qseecom_unload_app_pending_list, list);
  2927. if (entry && entry->data) {
  2928. pr_debug("process pending unload app %d (%s)\n",
  2929. entry->data->client.app_id,
  2930. entry->data->client.app_name);
  2931. mutex_unlock(&unload_app_pending_list_lock);
  2932. mutex_lock(&app_access_lock);
  2933. ret = qseecom_unload_app(entry->data, true);
  2934. if (ret)
  2935. pr_err("unload app %d pending failed %d\n",
  2936. entry->data->client.app_id, ret);
  2937. mutex_unlock(&app_access_lock);
  2938. mutex_lock(&unload_app_pending_list_lock);
  2939. __qseecom_free_tzbuf(&entry->data->sglistinfo_shm);
  2940. kfree_sensitive(entry->data);
  2941. }
  2942. list_del(pos);
  2943. kfree_sensitive(entry);
  2944. }
  2945. mutex_unlock(&unload_app_pending_list_lock);
  2946. }
  2947. static int __qseecom_unload_app_kthread_func(void *data)
  2948. {
  2949. while (!kthread_should_stop()) {
  2950. wait_event_interruptible(
  2951. qseecom.unload_app_kthread_wq,
  2952. atomic_read(&qseecom.unload_app_kthread_state)
  2953. == UNLOAD_APP_KT_WAKEUP);
  2954. pr_debug("kthread to unload app is called, state %d\n",
  2955. atomic_read(&qseecom.unload_app_kthread_state));
  2956. __qseecom_processing_pending_unload_app();
  2957. atomic_set(&qseecom.unload_app_kthread_state,
  2958. UNLOAD_APP_KT_SLEEP);
  2959. }
  2960. pr_warn("kthread to unload app stopped\n");
  2961. return 0;
  2962. }
  2963. static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
  2964. unsigned long virt)
  2965. {
  2966. return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
  2967. }
  2968. static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
  2969. unsigned long virt)
  2970. {
  2971. return (uintptr_t)data->client.sb_virt +
  2972. (virt - data->client.user_virt_sb_base);
  2973. }
  2974. static int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
  2975. struct qseecom_send_svc_cmd_req *req_ptr,
  2976. struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
  2977. {
  2978. int ret = 0;
  2979. void *req_buf = NULL;
  2980. if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
  2981. pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
  2982. req_ptr, send_svc_ireq_ptr);
  2983. return -EINVAL;
  2984. }
  2985. /* Clients need to ensure req_buf is at base offset of shared buffer */
  2986. if ((uintptr_t)req_ptr->cmd_req_buf !=
  2987. data_ptr->client.user_virt_sb_base) {
  2988. pr_err("cmd buf not pointing to base offset of shared buffer\n");
  2989. return -EINVAL;
  2990. }
  2991. if (data_ptr->client.sb_length <
  2992. sizeof(struct qseecom_rpmb_provision_key)) {
  2993. pr_err("shared buffer is too small to hold key type\n");
  2994. return -EINVAL;
  2995. }
  2996. req_buf = data_ptr->client.sb_virt;
  2997. send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
  2998. send_svc_ireq_ptr->key_type =
  2999. ((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
  3000. send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
  3001. send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
  3002. data_ptr, (uintptr_t)req_ptr->resp_buf));
  3003. send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
  3004. return ret;
  3005. }
  3006. static int __qseecom_process_fsm_key_svc_cmd(
  3007. struct qseecom_dev_handle *data_ptr,
  3008. struct qseecom_send_svc_cmd_req *req_ptr,
  3009. struct qseecom_client_send_fsm_diag_req *send_svc_ireq_ptr)
  3010. {
  3011. int ret = 0;
  3012. uint32_t reqd_len_sb_in = 0;
  3013. if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
  3014. pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
  3015. req_ptr, send_svc_ireq_ptr);
  3016. return -EINVAL;
  3017. }
  3018. reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
  3019. if (reqd_len_sb_in > data_ptr->client.sb_length) {
  3020. pr_err("Not enough memory to fit cmd_buf and resp_buf.\n");
  3021. pr_err("Required: %u, Available: %zu\n",
  3022. reqd_len_sb_in, data_ptr->client.sb_length);
  3023. return -ENOMEM;
  3024. }
  3025. send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
  3026. send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
  3027. send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
  3028. data_ptr, (uintptr_t)req_ptr->resp_buf));
  3029. send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
  3030. send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
  3031. data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
  3032. return ret;
  3033. }
  3034. static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
  3035. struct qseecom_send_svc_cmd_req *req)
  3036. {
  3037. if (!req || !req->resp_buf || !req->cmd_req_buf) {
  3038. pr_err("req or cmd buffer or response buffer is null\n");
  3039. return -EINVAL;
  3040. }
  3041. if (!data || !data->client.sb_virt) {
  3042. pr_err("Client or client buf is not initialized\n");
  3043. return -EINVAL;
  3044. }
  3045. if (data->client.sb_virt == NULL) {
  3046. pr_err("sb_virt null\n");
  3047. return -EINVAL;
  3048. }
  3049. if (data->client.user_virt_sb_base == 0) {
  3050. pr_err("user_virt_sb_base is null\n");
  3051. return -EINVAL;
  3052. }
  3053. if (data->client.sb_length == 0) {
  3054. pr_err("sb_length is 0\n");
  3055. return -EINVAL;
  3056. }
  3057. if (((uintptr_t)req->cmd_req_buf <
  3058. data->client.user_virt_sb_base) ||
  3059. ((uintptr_t)req->cmd_req_buf >=
  3060. (data->client.user_virt_sb_base + data->client.sb_length))) {
  3061. pr_err("cmd buffer address not within shared bufffer\n");
  3062. return -EINVAL;
  3063. }
  3064. if (((uintptr_t)req->resp_buf <
  3065. data->client.user_virt_sb_base) ||
  3066. ((uintptr_t)req->resp_buf >=
  3067. (data->client.user_virt_sb_base + data->client.sb_length))) {
  3068. pr_err("response buffer address not within shared bufffer\n");
  3069. return -EINVAL;
  3070. }
  3071. if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
  3072. (req->cmd_req_len > data->client.sb_length) ||
  3073. (req->resp_len > data->client.sb_length)) {
  3074. pr_err("cmd buf length or response buf length not valid\n");
  3075. return -EINVAL;
  3076. }
  3077. if (req->cmd_req_len > UINT_MAX - req->resp_len) {
  3078. pr_err("Integer overflow detected in req_len & rsp_len\n");
  3079. return -EINVAL;
  3080. }
  3081. if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
  3082. pr_debug("Not enough memory to fit cmd_buf.\n");
  3083. pr_debug("resp_buf. Required: %u, Available: %zu\n",
  3084. (req->cmd_req_len + req->resp_len),
  3085. data->client.sb_length);
  3086. return -ENOMEM;
  3087. }
  3088. if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
  3089. pr_err("Integer overflow in req_len & cmd_req_buf\n");
  3090. return -EINVAL;
  3091. }
  3092. if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
  3093. pr_err("Integer overflow in resp_len & resp_buf\n");
  3094. return -EINVAL;
  3095. }
  3096. if (data->client.user_virt_sb_base >
  3097. (ULONG_MAX - data->client.sb_length)) {
  3098. pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
  3099. return -EINVAL;
  3100. }
  3101. if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
  3102. ((uintptr_t)data->client.user_virt_sb_base +
  3103. data->client.sb_length)) ||
  3104. (((uintptr_t)req->resp_buf + req->resp_len) >
  3105. ((uintptr_t)data->client.user_virt_sb_base +
  3106. data->client.sb_length))) {
  3107. pr_err("cmd buf or resp buf is out of shared buffer region\n");
  3108. return -EINVAL;
  3109. }
  3110. return 0;
  3111. }
  3112. static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
  3113. void __user *argp)
  3114. {
  3115. int ret = 0;
  3116. struct qseecom_client_send_service_ireq send_svc_ireq;
  3117. struct qseecom_client_send_fsm_diag_req send_fsm_diag_svc_ireq;
  3118. struct qseecom_command_scm_resp resp;
  3119. struct qseecom_send_svc_cmd_req req;
  3120. void *send_req_ptr;
  3121. size_t req_buf_size;
  3122. /*struct qseecom_command_scm_resp resp;*/
  3123. if (copy_from_user(&req,
  3124. (void __user *)argp,
  3125. sizeof(req))) {
  3126. pr_err("copy_from_user failed\n");
  3127. return -EFAULT;
  3128. }
  3129. if (__validate_send_service_cmd_inputs(data, &req))
  3130. return -EINVAL;
  3131. data->type = QSEECOM_SECURE_SERVICE;
  3132. switch (req.cmd_id) {
  3133. case QSEOS_RPMB_PROVISION_KEY_COMMAND:
  3134. case QSEOS_RPMB_ERASE_COMMAND:
  3135. case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
  3136. send_req_ptr = &send_svc_ireq;
  3137. req_buf_size = sizeof(send_svc_ireq);
  3138. if (__qseecom_process_rpmb_svc_cmd(data, &req,
  3139. send_req_ptr))
  3140. return -EINVAL;
  3141. break;
  3142. case QSEOS_FSM_LTEOTA_REQ_CMD:
  3143. case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
  3144. case QSEOS_FSM_IKE_REQ_CMD:
  3145. case QSEOS_FSM_IKE_REQ_RSP_CMD:
  3146. case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
  3147. case QSEOS_FSM_OEM_FUSE_READ_ROW:
  3148. case QSEOS_FSM_ENCFS_REQ_CMD:
  3149. case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
  3150. case QSEOS_DIAG_FUSE_REQ_CMD:
  3151. case QSEOS_DIAG_FUSE_REQ_RSP_CMD:
  3152. send_req_ptr = &send_fsm_diag_svc_ireq;
  3153. req_buf_size = sizeof(send_fsm_diag_svc_ireq);
  3154. if (__qseecom_process_fsm_key_svc_cmd(data, &req,
  3155. send_req_ptr))
  3156. return -EINVAL;
  3157. break;
  3158. default:
  3159. pr_err("Unsupported cmd_id %d\n", req.cmd_id);
  3160. return -EINVAL;
  3161. }
  3162. ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
  3163. QSEECOM_CACHE_CLEAN);
  3164. if (ret) {
  3165. pr_err("cache operation failed %d\n", ret);
  3166. return ret;
  3167. }
  3168. if (qseecom.support_bus_scaling) {
  3169. ret = qseecom_scale_bus_bandwidth_timer(HIGH);
  3170. if (ret) {
  3171. pr_err("Fail to set bw HIGH\n");
  3172. return ret;
  3173. }
  3174. } else {
  3175. ret = qseecom_perf_enable(data);
  3176. if (ret) {
  3177. pr_err("Failed to vote for clocks with err %d\n", ret);
  3178. return ret;
  3179. }
  3180. }
  3181. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  3182. (const void *)send_req_ptr,
  3183. req_buf_size, &resp, sizeof(resp));
  3184. if (ret) {
  3185. pr_err("qseecom_scm_call failed with err: %d\n", ret);
  3186. goto exit;
  3187. }
  3188. ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
  3189. QSEECOM_CACHE_INVALIDATE);
  3190. if (ret) {
  3191. pr_err("cache operation failed %d\n", ret);
  3192. goto exit;
  3193. }
  3194. switch (resp.result) {
  3195. case QSEOS_RESULT_SUCCESS:
  3196. break;
  3197. case QSEOS_RESULT_INCOMPLETE:
  3198. pr_debug("qseos_result_incomplete\n");
  3199. ret = __qseecom_process_incomplete_cmd(data, &resp);
  3200. if (ret) {
  3201. pr_err("process_incomplete_cmd fail with result: %d\n",
  3202. resp.result);
  3203. }
  3204. if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
  3205. pr_warn("RPMB key status is 0x%x\n", resp.result);
  3206. if (put_user(resp.result,
  3207. (uint32_t __user *)req.resp_buf)) {
  3208. ret = -EINVAL;
  3209. goto exit;
  3210. }
  3211. ret = 0;
  3212. }
  3213. break;
  3214. case QSEOS_RESULT_FAILURE:
  3215. pr_err("scm call failed with resp.result: %d\n", resp.result);
  3216. ret = -EINVAL;
  3217. break;
  3218. default:
  3219. pr_err("Response result %d not supported\n",
  3220. resp.result);
  3221. ret = -EINVAL;
  3222. break;
  3223. }
  3224. exit:
  3225. if (!qseecom.support_bus_scaling) {
  3226. qsee_disable_clock_vote(data, CLK_DFAB);
  3227. qsee_disable_clock_vote(data, CLK_SFPB);
  3228. } else {
  3229. __qseecom_add_bw_scale_down_timer(
  3230. QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
  3231. }
  3232. return ret;
  3233. }
  3234. static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
  3235. struct qseecom_send_cmd_req *req)
  3236. {
  3237. if (!data || !data->client.sb_virt) {
  3238. pr_err("Client or client buf is not initialized\n");
  3239. return -EINVAL;
  3240. }
  3241. if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
  3242. (req->cmd_req_buf == NULL)) {
  3243. pr_err("cmd buffer or response buffer is null\n");
  3244. return -EINVAL;
  3245. }
  3246. if (((uintptr_t)req->cmd_req_buf <
  3247. data->client.user_virt_sb_base) ||
  3248. ((uintptr_t)req->cmd_req_buf >=
  3249. (data->client.user_virt_sb_base + data->client.sb_length))) {
  3250. pr_err("cmd buffer address not within shared bufffer\n");
  3251. return -EINVAL;
  3252. }
  3253. if (((uintptr_t)req->resp_buf <
  3254. data->client.user_virt_sb_base) ||
  3255. ((uintptr_t)req->resp_buf >=
  3256. (data->client.user_virt_sb_base + data->client.sb_length))) {
  3257. pr_err("response buffer address not within shared bufffer\n");
  3258. return -EINVAL;
  3259. }
  3260. if ((req->cmd_req_len == 0) ||
  3261. (req->cmd_req_len > data->client.sb_length) ||
  3262. (req->resp_len > data->client.sb_length)) {
  3263. pr_err("cmd buf length or response buf length not valid\n");
  3264. return -EINVAL;
  3265. }
  3266. if (req->cmd_req_len > UINT_MAX - req->resp_len) {
  3267. pr_err("Integer overflow detected in req_len & rsp_len\n");
  3268. return -EINVAL;
  3269. }
  3270. if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
  3271. pr_debug("Not enough memory to fit cmd_buf.\n");
  3272. pr_debug("resp_buf. Required: %u, Available: %zu\n",
  3273. (req->cmd_req_len + req->resp_len),
  3274. data->client.sb_length);
  3275. return -ENOMEM;
  3276. }
  3277. if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
  3278. pr_err("Integer overflow in req_len & cmd_req_buf\n");
  3279. return -EINVAL;
  3280. }
  3281. if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
  3282. pr_err("Integer overflow in resp_len & resp_buf\n");
  3283. return -EINVAL;
  3284. }
  3285. if (data->client.user_virt_sb_base >
  3286. (ULONG_MAX - data->client.sb_length)) {
  3287. pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
  3288. return -EINVAL;
  3289. }
  3290. if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
  3291. ((uintptr_t)data->client.user_virt_sb_base +
  3292. data->client.sb_length)) ||
  3293. (((uintptr_t)req->resp_buf + req->resp_len) >
  3294. ((uintptr_t)data->client.user_virt_sb_base +
  3295. data->client.sb_length))) {
  3296. pr_err("cmd buf or resp buf is out of shared buffer region\n");
  3297. return -EINVAL;
  3298. }
  3299. return 0;
  3300. }
  3301. static int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
  3302. struct qseecom_registered_app_list *ptr_app,
  3303. struct qseecom_dev_handle *data)
  3304. {
  3305. int ret = 0;
  3306. switch (resp->result) {
  3307. case QSEOS_RESULT_BLOCKED_ON_LISTENER:
  3308. pr_warn("App(%d) %s is blocked on listener %d\n",
  3309. data->client.app_id, data->client.app_name,
  3310. resp->data);
  3311. ret = __qseecom_process_reentrancy_blocked_on_listener(
  3312. resp, ptr_app, data);
  3313. if (ret) {
  3314. pr_err("failed to process App(%d) %s is blocked on listener %d\n",
  3315. data->client.app_id, data->client.app_name, resp->data);
  3316. return ret;
  3317. }
  3318. /* fall through to process incomplete request */
  3319. case QSEOS_RESULT_INCOMPLETE:
  3320. qseecom.app_block_ref_cnt++;
  3321. ptr_app->app_blocked = true;
  3322. ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
  3323. ptr_app->app_blocked = false;
  3324. qseecom.app_block_ref_cnt--;
  3325. wake_up_interruptible_all(&qseecom.app_block_wq);
  3326. if (ret)
  3327. pr_err("process_incomplete_cmd failed err: %d\n",
  3328. ret);
  3329. return ret;
  3330. case QSEOS_RESULT_SUCCESS:
  3331. return ret;
  3332. default:
  3333. pr_err("Response result %d not supported\n",
  3334. resp->result);
  3335. return -EINVAL;
  3336. }
  3337. }
  3338. static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
  3339. struct qseecom_send_cmd_req *req,
  3340. bool is_phys_adr)
  3341. {
  3342. int ret = 0;
  3343. u32 reqd_len_sb_in = 0;
  3344. struct qseecom_client_send_data_ireq send_data_req = {0};
  3345. struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
  3346. struct qseecom_command_scm_resp resp;
  3347. unsigned long flags;
  3348. struct qseecom_registered_app_list *ptr_app;
  3349. bool found_app = false;
  3350. void *cmd_buf = NULL;
  3351. size_t cmd_len;
  3352. reqd_len_sb_in = req->cmd_req_len + req->resp_len;
  3353. /* find app_id & img_name from list */
  3354. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  3355. list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
  3356. list) {
  3357. if ((ptr_app->app_id == data->client.app_id) &&
  3358. (!strcmp(ptr_app->app_name, data->client.app_name))) {
  3359. found_app = true;
  3360. break;
  3361. }
  3362. }
  3363. spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
  3364. if (!found_app) {
  3365. pr_err("app_id %d (%s) is not found\n", data->client.app_id,
  3366. (char *)data->client.app_name);
  3367. return -ENOENT;
  3368. }
  3369. if (__qseecom_find_pending_unload_app(data->client.app_id,
  3370. data->client.app_name)) {
  3371. pr_err("app %d (%s) unload is pending\n",
  3372. data->client.app_id, data->client.app_name);
  3373. return -ENOENT;
  3374. }
  3375. if (qseecom.qsee_version < QSEE_VERSION_40) {
  3376. send_data_req.app_id = data->client.app_id;
  3377. if (!is_phys_adr) {
  3378. send_data_req.req_ptr =
  3379. (uint32_t)(__qseecom_uvirt_to_kphys
  3380. (data, (uintptr_t)req->cmd_req_buf));
  3381. send_data_req.rsp_ptr =
  3382. (uint32_t)(__qseecom_uvirt_to_kphys(
  3383. data, (uintptr_t)req->resp_buf));
  3384. } else {
  3385. send_data_req.req_ptr = (uint32_t)(uintptr_t)req->cmd_req_buf;
  3386. send_data_req.rsp_ptr = (uint32_t)(uintptr_t)req->resp_buf;
  3387. }
  3388. send_data_req.req_len = req->cmd_req_len;
  3389. send_data_req.rsp_len = req->resp_len;
  3390. send_data_req.sglistinfo_ptr =
  3391. (uint32_t)data->sglistinfo_shm.paddr;
  3392. send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
  3393. qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm);
  3394. cmd_buf = (void *)&send_data_req;
  3395. cmd_len = sizeof(struct qseecom_client_send_data_ireq);
  3396. } else {
  3397. send_data_req_64bit.app_id = data->client.app_id;
  3398. if (!is_phys_adr) {
  3399. send_data_req_64bit.req_ptr =
  3400. __qseecom_uvirt_to_kphys(data,
  3401. (uintptr_t)req->cmd_req_buf);
  3402. send_data_req_64bit.rsp_ptr =
  3403. __qseecom_uvirt_to_kphys(data,
  3404. (uintptr_t)req->resp_buf);
  3405. } else {
  3406. send_data_req_64bit.req_ptr =
  3407. (uintptr_t)req->cmd_req_buf;
  3408. send_data_req_64bit.rsp_ptr =
  3409. (uintptr_t)req->resp_buf;
  3410. }
  3411. send_data_req_64bit.req_len = req->cmd_req_len;
  3412. send_data_req_64bit.rsp_len = req->resp_len;
  3413. /* check if 32bit app's phys_addr region is under 4GB.*/
  3414. if ((data->client.app_arch == ELFCLASS32) &&
  3415. ((send_data_req_64bit.req_ptr >=
  3416. PHY_ADDR_4G - send_data_req_64bit.req_len) ||
  3417. (send_data_req_64bit.rsp_ptr >=
  3418. PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
  3419. pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
  3420. data->client.app_name,
  3421. send_data_req_64bit.req_ptr,
  3422. send_data_req_64bit.req_len,
  3423. send_data_req_64bit.rsp_ptr,
  3424. send_data_req_64bit.rsp_len);
  3425. return -EFAULT;
  3426. }
  3427. send_data_req_64bit.sglistinfo_ptr =
  3428. (uint64_t)data->sglistinfo_shm.paddr;
  3429. send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
  3430. qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm);
  3431. cmd_buf = (void *)&send_data_req_64bit;
  3432. cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
  3433. }
  3434. if (!qseecom.whitelist_support || data->use_legacy_cmd)
  3435. *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
  3436. else
  3437. *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
  3438. if (data->client.dmabuf) {
  3439. ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
  3440. QSEECOM_CACHE_CLEAN);
  3441. if (ret) {
  3442. pr_err("cache operation failed %d\n", ret);
  3443. return ret;
  3444. }
  3445. }
  3446. __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
  3447. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  3448. cmd_buf, cmd_len,
  3449. &resp, sizeof(resp));
  3450. if (ret) {
  3451. pr_err("scm_call() failed with err: %d (app_id = %d)\n",
  3452. ret, data->client.app_id);
  3453. goto exit;
  3454. }
  3455. if (qseecom.qsee_reentrancy_support) {
  3456. ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
  3457. if (ret)
  3458. goto exit;
  3459. } else {
  3460. if (resp.result == QSEOS_RESULT_INCOMPLETE) {
  3461. ret = __qseecom_process_incomplete_cmd(data, &resp);
  3462. if (ret) {
  3463. pr_err("process_incomplete_cmd failed err: %d\n",
  3464. ret);
  3465. goto exit;
  3466. }
  3467. } else {
  3468. if (resp.result != QSEOS_RESULT_SUCCESS) {
  3469. pr_err("Response result %d not supported\n",
  3470. resp.result);
  3471. ret = -EINVAL;
  3472. goto exit;
  3473. }
  3474. }
  3475. }
  3476. if (data->client.dmabuf) {
  3477. ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
  3478. QSEECOM_CACHE_INVALIDATE);
  3479. if (ret) {
  3480. pr_err("cache operation failed %d\n", ret);
  3481. goto exit;
  3482. }
  3483. }
  3484. exit:
  3485. return ret;
  3486. }
  3487. static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
  3488. {
  3489. int ret = 0;
  3490. struct qseecom_send_cmd_req req;
  3491. ret = copy_from_user(&req, argp, sizeof(req));
  3492. if (ret) {
  3493. pr_err("copy_from_user failed\n");
  3494. return ret;
  3495. }
  3496. if (__validate_send_cmd_inputs(data, &req))
  3497. return -EINVAL;
  3498. ret = __qseecom_send_cmd(data, &req, false);
  3499. return ret;
  3500. }
  3501. static int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
  3502. struct qseecom_send_modfd_listener_resp *lstnr_resp,
  3503. struct qseecom_dev_handle *data, int i, size_t size)
  3504. {
  3505. char *curr_field = NULL;
  3506. char *temp_field = NULL;
  3507. int j = 0;
  3508. if ((data->type != QSEECOM_LISTENER_SERVICE) &&
  3509. (req->ifd_data[i].fd > 0)) {
  3510. if ((req->cmd_req_len < size) ||
  3511. (req->ifd_data[i].cmd_buf_offset >
  3512. req->cmd_req_len - size)) {
  3513. pr_err("Invalid offset (req len) 0x%x\n",
  3514. req->ifd_data[i].cmd_buf_offset);
  3515. return -EINVAL;
  3516. }
  3517. curr_field = (char *) (req->cmd_req_buf +
  3518. req->ifd_data[i].cmd_buf_offset);
  3519. for (j = 0; j < MAX_ION_FD; j++) {
  3520. if ((req->ifd_data[j].fd > 0) && i != j) {
  3521. temp_field = (char *) (req->cmd_req_buf +
  3522. req->ifd_data[j].cmd_buf_offset);
  3523. if (temp_field >= curr_field && temp_field <
  3524. (curr_field + size)) {
  3525. pr_err("Invalid field offset 0x%x\n",
  3526. req->ifd_data[i].cmd_buf_offset);
  3527. return -EINVAL;
  3528. }
  3529. }
  3530. }
  3531. } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
  3532. (lstnr_resp->ifd_data[i].fd > 0)) {
  3533. if ((lstnr_resp->resp_len < size) ||
  3534. (lstnr_resp->ifd_data[i].cmd_buf_offset >
  3535. lstnr_resp->resp_len - size)) {
  3536. pr_err("Invalid offset (lstnr resp len) 0x%x\n",
  3537. lstnr_resp->ifd_data[i].cmd_buf_offset);
  3538. return -EINVAL;
  3539. }
  3540. curr_field = (char *) (lstnr_resp->resp_buf_ptr +
  3541. lstnr_resp->ifd_data[i].cmd_buf_offset);
  3542. for (j = 0; j < MAX_ION_FD; j++) {
  3543. if ((lstnr_resp->ifd_data[j].fd > 0) && i != j) {
  3544. temp_field = (char *) lstnr_resp->resp_buf_ptr +
  3545. lstnr_resp->ifd_data[j].cmd_buf_offset;
  3546. if (temp_field >= curr_field && temp_field <
  3547. (curr_field + size)) {
  3548. pr_err("Invalid lstnr field offset 0x%x\n",
  3549. lstnr_resp->ifd_data[i].cmd_buf_offset);
  3550. return -EINVAL;
  3551. }
  3552. }
  3553. }
  3554. }
  3555. return 0;
  3556. }
  3557. static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
  3558. struct qseecom_dev_handle *data)
  3559. {
  3560. char *field;
  3561. int ret = 0;
  3562. int i = 0;
  3563. uint32_t len = 0;
  3564. struct scatterlist *sg;
  3565. struct qseecom_send_modfd_cmd_req *req = NULL;
  3566. struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
  3567. struct qseecom_registered_listener_list *this_lstnr = NULL;
  3568. uint32_t offset;
  3569. struct sg_table *sg_ptr = NULL;
  3570. int ion_fd = -1;
  3571. struct dma_buf *dmabuf = NULL;
  3572. struct dma_buf_attachment *attach = NULL;
  3573. if ((data->type != QSEECOM_LISTENER_SERVICE) &&
  3574. (data->type != QSEECOM_CLIENT_APP))
  3575. return -EFAULT;
  3576. if (msg == NULL) {
  3577. pr_err("Invalid address\n");
  3578. return -EINVAL;
  3579. }
  3580. if (data->type == QSEECOM_LISTENER_SERVICE) {
  3581. lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
  3582. this_lstnr = __qseecom_find_svc(data->listener.id);
  3583. if (IS_ERR_OR_NULL(this_lstnr)) {
  3584. pr_err("Invalid listener ID\n");
  3585. return -ENOMEM;
  3586. }
  3587. } else {
  3588. req = (struct qseecom_send_modfd_cmd_req *)msg;
  3589. }
  3590. for (i = 0; i < MAX_ION_FD; i++) {
  3591. if ((data->type != QSEECOM_LISTENER_SERVICE) &&
  3592. (req->ifd_data[i].fd > 0)) {
  3593. ion_fd = req->ifd_data[i].fd;
  3594. field = (char *) req->cmd_req_buf +
  3595. req->ifd_data[i].cmd_buf_offset;
  3596. } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
  3597. (lstnr_resp->ifd_data[i].fd > 0)) {
  3598. ion_fd = lstnr_resp->ifd_data[i].fd;
  3599. field = lstnr_resp->resp_buf_ptr +
  3600. lstnr_resp->ifd_data[i].cmd_buf_offset;
  3601. } else {
  3602. continue;
  3603. }
  3604. /* Populate the cmd data structure with the phys_addr */
  3605. ret = qseecom_dmabuf_map(ion_fd, &sg_ptr, &attach, &dmabuf);
  3606. if (ret) {
  3607. pr_err("IOn client could not retrieve sg table\n");
  3608. goto err;
  3609. }
  3610. if (sg_ptr->nents == 0) {
  3611. pr_err("Num of scattered entries is 0\n");
  3612. goto err;
  3613. }
  3614. if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
  3615. pr_err("Num of scattered entries\n");
  3616. pr_err(" (%d) is greater than max supported %d\n",
  3617. sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
  3618. goto err;
  3619. }
  3620. sg = sg_ptr->sgl;
  3621. if (sg_ptr->nents == 1) {
  3622. uint32_t *update;
  3623. if (__boundary_checks_offset(req, lstnr_resp, data, i, sizeof(uint32_t)))
  3624. goto err;
  3625. if ((data->type == QSEECOM_CLIENT_APP &&
  3626. (data->client.app_arch == ELFCLASS32 ||
  3627. data->client.app_arch == ELFCLASS64)) ||
  3628. (data->type == QSEECOM_LISTENER_SERVICE)) {
  3629. /*
  3630. * Check if sg list phy add region is under 4GB
  3631. */
  3632. if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
  3633. (!cleanup) &&
  3634. ((uint64_t)sg_dma_address(sg_ptr->sgl)
  3635. >= PHY_ADDR_4G - sg->length)) {
  3636. pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
  3637. data->client.app_name,
  3638. &(sg_dma_address(sg_ptr->sgl)),
  3639. sg->length);
  3640. goto err;
  3641. }
  3642. update = (uint32_t *) field;
  3643. *update = cleanup ? 0 :
  3644. (uint32_t)sg_dma_address(sg_ptr->sgl);
  3645. } else {
  3646. pr_err("QSEE app arch %u is not supported\n",
  3647. data->client.app_arch);
  3648. goto err;
  3649. }
  3650. len += (uint32_t)sg->length;
  3651. } else {
  3652. struct qseecom_sg_entry *update;
  3653. int j = 0;
  3654. if (__boundary_checks_offset(req, lstnr_resp, data, i,
  3655. (SG_ENTRY_SZ * sg_ptr->nents)))
  3656. goto err;
  3657. if ((data->type == QSEECOM_CLIENT_APP &&
  3658. (data->client.app_arch == ELFCLASS32 ||
  3659. data->client.app_arch == ELFCLASS64)) ||
  3660. (data->type == QSEECOM_LISTENER_SERVICE)) {
  3661. update = (struct qseecom_sg_entry *)field;
  3662. for (j = 0; j < sg_ptr->nents; j++) {
  3663. /*
  3664. * Check if sg list PA is under 4GB
  3665. */
  3666. if ((qseecom.qsee_version >=
  3667. QSEE_VERSION_40) &&
  3668. (!cleanup) &&
  3669. ((uint64_t)(sg_dma_address(sg))
  3670. >= PHY_ADDR_4G - sg->length)) {
  3671. pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
  3672. data->client.app_name,
  3673. &(sg_dma_address(sg)),
  3674. sg->length);
  3675. goto err;
  3676. }
  3677. update->phys_addr = cleanup ? 0 :
  3678. (uint32_t)sg_dma_address(sg);
  3679. update->len = cleanup ? 0 : sg->length;
  3680. update++;
  3681. len += sg->length;
  3682. sg = sg_next(sg);
  3683. }
  3684. } else {
  3685. pr_err("QSEE app arch %u is not supported\n",
  3686. data->client.app_arch);
  3687. goto err;
  3688. }
  3689. }
  3690. if (cleanup) {
  3691. ret = qseecom_dmabuf_cache_operations(dmabuf,
  3692. QSEECOM_CACHE_INVALIDATE);
  3693. if (ret) {
  3694. pr_err("cache operation failed %d\n", ret);
  3695. goto err;
  3696. }
  3697. } else {
  3698. ret = qseecom_dmabuf_cache_operations(dmabuf,
  3699. QSEECOM_CACHE_CLEAN);
  3700. if (ret) {
  3701. pr_err("cache operation failed %d\n", ret);
  3702. goto err;
  3703. }
  3704. if (data->type == QSEECOM_CLIENT_APP) {
  3705. offset = req->ifd_data[i].cmd_buf_offset;
  3706. data->sglistinfo_ptr[i].indexAndFlags =
  3707. SGLISTINFO_SET_INDEX_FLAG(
  3708. (sg_ptr->nents == 1), 0, offset);
  3709. data->sglistinfo_ptr[i].sizeOrCount =
  3710. (sg_ptr->nents == 1) ?
  3711. sg->length : sg_ptr->nents;
  3712. data->sglist_cnt = i + 1;
  3713. } else {
  3714. offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
  3715. + (uintptr_t)lstnr_resp->resp_buf_ptr -
  3716. (uintptr_t)this_lstnr->sb_virt);
  3717. this_lstnr->sglistinfo_ptr[i].indexAndFlags =
  3718. SGLISTINFO_SET_INDEX_FLAG(
  3719. (sg_ptr->nents == 1), 0, offset);
  3720. this_lstnr->sglistinfo_ptr[i].sizeOrCount =
  3721. (sg_ptr->nents == 1) ?
  3722. sg->length : sg_ptr->nents;
  3723. this_lstnr->sglist_cnt = i + 1;
  3724. }
  3725. }
  3726. /* Deallocate the kbuf */
  3727. qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
  3728. sg_ptr = NULL;
  3729. dmabuf = NULL;
  3730. attach = NULL;
  3731. }
  3732. return ret;
  3733. err:
  3734. if (!IS_ERR_OR_NULL(sg_ptr)) {
  3735. qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
  3736. MAKE_NULL(sg_ptr, attach, dmabuf);
  3737. }
  3738. return -ENOMEM;
  3739. }
  3740. static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
  3741. char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
  3742. {
  3743. struct scatterlist *sg = sg_ptr->sgl;
  3744. struct qseecom_sg_entry_64bit *sg_entry;
  3745. struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
  3746. void *buf;
  3747. uint i;
  3748. size_t size;
  3749. dma_addr_t coh_pmem;
  3750. if (fd_idx >= MAX_ION_FD) {
  3751. pr_err("fd_idx [%d] is invalid\n", fd_idx);
  3752. return -ENOMEM;
  3753. }
  3754. buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
  3755. memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
  3756. /* Allocate a contiguous kernel buffer */
  3757. size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
  3758. size = (size + PAGE_SIZE) & PAGE_MASK;
  3759. buf = dma_alloc_coherent(qseecom.dev,
  3760. size, &coh_pmem, GFP_KERNEL);
  3761. if (buf == NULL)
  3762. return -ENOMEM;
  3763. /* update qseecom_sg_list_buf_hdr_64bit */
  3764. buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
  3765. buf_hdr->new_buf_phys_addr = coh_pmem;
  3766. buf_hdr->nents_total = sg_ptr->nents;
  3767. /* save the left sg entries into new allocated buf */
  3768. sg_entry = (struct qseecom_sg_entry_64bit *)buf;
  3769. for (i = 0; i < sg_ptr->nents; i++) {
  3770. sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
  3771. sg_entry->len = sg->length;
  3772. sg_entry++;
  3773. sg = sg_next(sg);
  3774. }
  3775. data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
  3776. data->client.sec_buf_fd[fd_idx].vbase = buf;
  3777. data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
  3778. data->client.sec_buf_fd[fd_idx].size = size;
  3779. return 0;
  3780. }
  3781. static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
  3782. struct qseecom_dev_handle *data)
  3783. {
  3784. char *field;
  3785. int ret = 0;
  3786. int i = 0;
  3787. uint32_t len = 0;
  3788. struct scatterlist *sg;
  3789. struct qseecom_send_modfd_cmd_req *req = NULL;
  3790. struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
  3791. struct qseecom_registered_listener_list *this_lstnr = NULL;
  3792. uint32_t offset;
  3793. struct sg_table *sg_ptr;
  3794. int ion_fd = -1;
  3795. struct dma_buf *dmabuf = NULL;
  3796. struct dma_buf_attachment *attach = NULL;
  3797. if ((data->type != QSEECOM_LISTENER_SERVICE) &&
  3798. (data->type != QSEECOM_CLIENT_APP))
  3799. return -EFAULT;
  3800. if (msg == NULL) {
  3801. pr_err("Invalid address\n");
  3802. return -EINVAL;
  3803. }
  3804. if (data->type == QSEECOM_LISTENER_SERVICE) {
  3805. lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
  3806. this_lstnr = __qseecom_find_svc(data->listener.id);
  3807. if (IS_ERR_OR_NULL(this_lstnr)) {
  3808. pr_err("Invalid listener ID\n");
  3809. return -ENOMEM;
  3810. }
  3811. } else {
  3812. req = (struct qseecom_send_modfd_cmd_req *)msg;
  3813. }
  3814. for (i = 0; i < MAX_ION_FD; i++) {
  3815. if ((data->type != QSEECOM_LISTENER_SERVICE) &&
  3816. (req->ifd_data[i].fd > 0)) {
  3817. ion_fd = req->ifd_data[i].fd;
  3818. field = (char *) req->cmd_req_buf +
  3819. req->ifd_data[i].cmd_buf_offset;
  3820. } else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
  3821. (lstnr_resp->ifd_data[i].fd > 0)) {
  3822. ion_fd = lstnr_resp->ifd_data[i].fd;
  3823. field = lstnr_resp->resp_buf_ptr +
  3824. lstnr_resp->ifd_data[i].cmd_buf_offset;
  3825. } else {
  3826. continue;
  3827. }
  3828. /* Populate the cmd data structure with the phys_addr */
  3829. ret = qseecom_dmabuf_map(ion_fd, &sg_ptr, &attach, &dmabuf);
  3830. if (ret) {
  3831. pr_err("IOn client could not retrieve sg table\n");
  3832. goto err;
  3833. }
  3834. if (sg_ptr->nents == 0) {
  3835. pr_err("Num of scattered entries is 0\n");
  3836. goto err;
  3837. }
  3838. if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
  3839. pr_warn("Num of scattered entries\n");
  3840. pr_warn(" (%d) is greater than %d\n",
  3841. sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
  3842. if (cleanup) {
  3843. if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
  3844. data->client.sec_buf_fd[i].vbase)
  3845. dma_free_coherent(qseecom.dev,
  3846. data->client.sec_buf_fd[i].size,
  3847. data->client.sec_buf_fd[i].vbase,
  3848. data->client.sec_buf_fd[i].pbase);
  3849. } else {
  3850. ret = __qseecom_allocate_sg_list_buffer(data,
  3851. field, i, sg_ptr);
  3852. if (ret) {
  3853. pr_err("Failed to allocate sg list buffer\n");
  3854. goto err;
  3855. }
  3856. }
  3857. len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
  3858. sg = sg_ptr->sgl;
  3859. goto cleanup;
  3860. }
  3861. sg = sg_ptr->sgl;
  3862. if (sg_ptr->nents == 1) {
  3863. uint64_t *update_64bit;
  3864. if (__boundary_checks_offset(req, lstnr_resp, data, i, sizeof(uint64_t)))
  3865. goto err;
  3866. /* 64bit app uses 64bit address */
  3867. update_64bit = (uint64_t *) field;
  3868. *update_64bit = cleanup ? 0 :
  3869. (uint64_t)sg_dma_address(sg_ptr->sgl);
  3870. len += (uint32_t)sg->length;
  3871. } else {
  3872. struct qseecom_sg_entry_64bit *update_64bit;
  3873. int j = 0;
  3874. if (__boundary_checks_offset(req, lstnr_resp, data, i,
  3875. (SG_ENTRY_SZ_64BIT * sg_ptr->nents)))
  3876. goto err;
  3877. /* 64bit app uses 64bit address */
  3878. update_64bit = (struct qseecom_sg_entry_64bit *)field;
  3879. for (j = 0; j < sg_ptr->nents; j++) {
  3880. update_64bit->phys_addr = cleanup ? 0 :
  3881. (uint64_t)sg_dma_address(sg);
  3882. update_64bit->len = cleanup ? 0 :
  3883. (uint32_t)sg->length;
  3884. update_64bit++;
  3885. len += sg->length;
  3886. sg = sg_next(sg);
  3887. }
  3888. }
  3889. cleanup:
  3890. if (cleanup) {
  3891. ret = qseecom_dmabuf_cache_operations(dmabuf,
  3892. QSEECOM_CACHE_INVALIDATE);
  3893. if (ret) {
  3894. pr_err("cache operation failed %d\n", ret);
  3895. goto err;
  3896. }
  3897. } else {
  3898. ret = qseecom_dmabuf_cache_operations(dmabuf,
  3899. QSEECOM_CACHE_CLEAN);
  3900. if (ret) {
  3901. pr_err("cache operation failed %d\n", ret);
  3902. goto err;
  3903. }
  3904. if (data->type == QSEECOM_CLIENT_APP) {
  3905. offset = req->ifd_data[i].cmd_buf_offset;
  3906. data->sglistinfo_ptr[i].indexAndFlags =
  3907. SGLISTINFO_SET_INDEX_FLAG(
  3908. (sg_ptr->nents == 1), 1, offset);
  3909. data->sglistinfo_ptr[i].sizeOrCount =
  3910. (sg_ptr->nents == 1) ?
  3911. sg->length : sg_ptr->nents;
  3912. data->sglist_cnt = i + 1;
  3913. } else {
  3914. offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
  3915. + (uintptr_t)lstnr_resp->resp_buf_ptr -
  3916. (uintptr_t)this_lstnr->sb_virt);
  3917. this_lstnr->sglistinfo_ptr[i].indexAndFlags =
  3918. SGLISTINFO_SET_INDEX_FLAG(
  3919. (sg_ptr->nents == 1), 1, offset);
  3920. this_lstnr->sglistinfo_ptr[i].sizeOrCount =
  3921. (sg_ptr->nents == 1) ?
  3922. sg->length : sg_ptr->nents;
  3923. this_lstnr->sglist_cnt = i + 1;
  3924. }
  3925. }
  3926. /* unmap the dmabuf */
  3927. qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
  3928. sg_ptr = NULL;
  3929. dmabuf = NULL;
  3930. attach = NULL;
  3931. }
  3932. return ret;
  3933. err:
  3934. for (i = 0; i < MAX_ION_FD; i++)
  3935. if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
  3936. data->client.sec_buf_fd[i].vbase)
  3937. dma_free_coherent(qseecom.dev,
  3938. data->client.sec_buf_fd[i].size,
  3939. data->client.sec_buf_fd[i].vbase,
  3940. data->client.sec_buf_fd[i].pbase);
  3941. if (!IS_ERR_OR_NULL(sg_ptr)) {
  3942. qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
  3943. MAKE_NULL(sg_ptr, attach, dmabuf);
  3944. }
  3945. return -ENOMEM;
  3946. }
  3947. static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
  3948. void __user *argp,
  3949. bool is_64bit_addr)
  3950. {
  3951. int ret = 0;
  3952. int i;
  3953. struct qseecom_send_modfd_cmd_req req;
  3954. struct qseecom_send_cmd_req send_cmd_req;
  3955. void *origin_req_buf_kvirt, *origin_rsp_buf_kvirt;
  3956. phys_addr_t pa;
  3957. u8 *va = NULL;
  3958. ret = copy_from_user(&req, argp, sizeof(req));
  3959. if (ret) {
  3960. pr_err("copy_from_user failed\n");
  3961. return ret;
  3962. }
  3963. send_cmd_req.cmd_req_buf = req.cmd_req_buf;
  3964. send_cmd_req.cmd_req_len = req.cmd_req_len;
  3965. send_cmd_req.resp_buf = req.resp_buf;
  3966. send_cmd_req.resp_len = req.resp_len;
  3967. if (__validate_send_cmd_inputs(data, &send_cmd_req))
  3968. return -EINVAL;
  3969. /* validate offsets */
  3970. for (i = 0; i < MAX_ION_FD; i++) {
  3971. if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
  3972. pr_err("Invalid offset %d = 0x%x\n",
  3973. i, req.ifd_data[i].cmd_buf_offset);
  3974. return -EINVAL;
  3975. }
  3976. }
  3977. /*Back up original address */
  3978. origin_req_buf_kvirt = (void *)__qseecom_uvirt_to_kvirt(data,
  3979. (uintptr_t)req.cmd_req_buf);
  3980. origin_rsp_buf_kvirt = (void *)__qseecom_uvirt_to_kvirt(data,
  3981. (uintptr_t)req.resp_buf);
  3982. /* Allocate kernel buffer for request and response*/
  3983. ret = __qseecom_alloc_coherent_buf(req.cmd_req_len + req.resp_len,
  3984. &va, &pa);
  3985. if (ret) {
  3986. pr_err("Failed to allocate coherent buf, ret %d\n", ret);
  3987. return ret;
  3988. }
  3989. req.cmd_req_buf = va;
  3990. send_cmd_req.cmd_req_buf = (void *)pa;
  3991. req.resp_buf = va + req.cmd_req_len;
  3992. send_cmd_req.resp_buf = (void *)pa + req.cmd_req_len;
  3993. /* Copy the data to kernel request and response buffers*/
  3994. memcpy(req.cmd_req_buf, origin_req_buf_kvirt, req.cmd_req_len);
  3995. memcpy(req.resp_buf, origin_rsp_buf_kvirt, req.resp_len);
  3996. if (!is_64bit_addr) {
  3997. ret = __qseecom_update_cmd_buf(&req, false, data);
  3998. if (ret)
  3999. goto out;
  4000. ret = __qseecom_send_cmd(data, &send_cmd_req, true);
  4001. if (ret)
  4002. goto out;
  4003. ret = __qseecom_update_cmd_buf(&req, true, data);
  4004. if (ret)
  4005. goto out;
  4006. } else {
  4007. ret = __qseecom_update_cmd_buf_64(&req, false, data);
  4008. if (ret)
  4009. goto out;
  4010. ret = __qseecom_send_cmd(data, &send_cmd_req, true);
  4011. if (ret)
  4012. goto out;
  4013. ret = __qseecom_update_cmd_buf_64(&req, true, data);
  4014. if (ret)
  4015. goto out;
  4016. }
  4017. /*Copy the response back to the userspace buffer*/
  4018. memcpy(origin_rsp_buf_kvirt, req.resp_buf, req.resp_len);
  4019. memcpy(origin_req_buf_kvirt, req.cmd_req_buf, req.cmd_req_len);
  4020. out:
  4021. if (req.cmd_req_buf)
  4022. __qseecom_free_coherent_buf(req.cmd_req_len + req.resp_len,
  4023. req.cmd_req_buf, (phys_addr_t)send_cmd_req.cmd_req_buf);
  4024. return ret;
  4025. }
  4026. static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
  4027. void __user *argp)
  4028. {
  4029. return __qseecom_send_modfd_cmd(data, argp, false);
  4030. }
  4031. static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
  4032. void __user *argp)
  4033. {
  4034. return __qseecom_send_modfd_cmd(data, argp, true);
  4035. }
  4036. static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
  4037. struct qseecom_registered_listener_list *svc)
  4038. {
  4039. int ret;
  4040. ret = (svc->rcv_req_flag == 1);
  4041. return ret || data->abort;
  4042. }
  4043. static int qseecom_receive_req(struct qseecom_dev_handle *data)
  4044. {
  4045. int ret = 0;
  4046. struct qseecom_registered_listener_list *this_lstnr;
  4047. mutex_lock(&listener_access_lock);
  4048. this_lstnr = __qseecom_find_svc(data->listener.id);
  4049. if (!this_lstnr) {
  4050. pr_err("Invalid listener ID\n");
  4051. mutex_unlock(&listener_access_lock);
  4052. return -ENODATA;
  4053. }
  4054. mutex_unlock(&listener_access_lock);
  4055. while (1) {
  4056. if (wait_event_interruptible(this_lstnr->rcv_req_wq,
  4057. __qseecom_listener_has_rcvd_req(data,
  4058. this_lstnr))) {
  4059. pr_debug("Interrupted: exiting Listener Service = %d\n",
  4060. (uint32_t)data->listener.id);
  4061. /* woken up for different reason */
  4062. return -ERESTARTSYS;
  4063. }
  4064. if (data->abort) {
  4065. pr_err("Aborting Listener Service = %d\n",
  4066. (uint32_t)data->listener.id);
  4067. return -ENODEV;
  4068. }
  4069. mutex_lock(&listener_access_lock);
  4070. this_lstnr->rcv_req_flag = 0;
  4071. mutex_unlock(&listener_access_lock);
  4072. break;
  4073. }
  4074. return ret;
  4075. }
  4076. static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
  4077. {
  4078. unsigned char app_arch = 0;
  4079. struct elf32_hdr *ehdr;
  4080. struct elf64_hdr *ehdr64;
  4081. app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
  4082. switch (app_arch) {
  4083. case ELFCLASS32: {
  4084. ehdr = (struct elf32_hdr *)fw_entry->data;
  4085. if (fw_entry->size < sizeof(*ehdr)) {
  4086. pr_err("%s: Not big enough to be an elf32 header\n",
  4087. qseecom.pdev->init_name);
  4088. return false;
  4089. }
  4090. if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
  4091. pr_err("%s: Not an elf32 header\n",
  4092. qseecom.pdev->init_name);
  4093. return false;
  4094. }
  4095. if (ehdr->e_phnum == 0) {
  4096. pr_err("%s: No loadable segments\n",
  4097. qseecom.pdev->init_name);
  4098. return false;
  4099. }
  4100. if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
  4101. sizeof(struct elf32_hdr) > fw_entry->size) {
  4102. pr_err("%s: Program headers not within mdt\n",
  4103. qseecom.pdev->init_name);
  4104. return false;
  4105. }
  4106. break;
  4107. }
  4108. case ELFCLASS64: {
  4109. ehdr64 = (struct elf64_hdr *)fw_entry->data;
  4110. if (fw_entry->size < sizeof(*ehdr64)) {
  4111. pr_err("%s: Not big enough to be an elf64 header\n",
  4112. qseecom.pdev->init_name);
  4113. return false;
  4114. }
  4115. if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
  4116. pr_err("%s: Not an elf64 header\n",
  4117. qseecom.pdev->init_name);
  4118. return false;
  4119. }
  4120. if (ehdr64->e_phnum == 0) {
  4121. pr_err("%s: No loadable segments\n",
  4122. qseecom.pdev->init_name);
  4123. return false;
  4124. }
  4125. if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
  4126. sizeof(struct elf64_hdr) > fw_entry->size) {
  4127. pr_err("%s: Program headers not within mdt\n",
  4128. qseecom.pdev->init_name);
  4129. return false;
  4130. }
  4131. break;
  4132. }
  4133. default: {
  4134. pr_err("QSEE app arch %u is not supported\n", app_arch);
  4135. return false;
  4136. }
  4137. }
  4138. return true;
  4139. }
  4140. static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
  4141. uint32_t *app_arch)
  4142. {
  4143. int ret = -1;
  4144. int i = 0, rc = 0;
  4145. const struct firmware *fw_entry = NULL;
  4146. char fw_name[MAX_APP_NAME_SIZE];
  4147. struct elf32_hdr *ehdr;
  4148. struct elf64_hdr *ehdr64;
  4149. int num_images = 0;
  4150. snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
  4151. rc = firmware_request_nowarn(&fw_entry, fw_name, qseecom.pdev);
  4152. if (rc) {
  4153. pr_err("error with firmware_request_nowarn, rc = %d\n", rc);
  4154. ret = -EIO;
  4155. goto err;
  4156. }
  4157. if (!__qseecom_is_fw_image_valid(fw_entry)) {
  4158. ret = -EIO;
  4159. goto err;
  4160. }
  4161. *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
  4162. *fw_size = fw_entry->size;
  4163. if (*app_arch == ELFCLASS32) {
  4164. ehdr = (struct elf32_hdr *)fw_entry->data;
  4165. num_images = ehdr->e_phnum;
  4166. } else if (*app_arch == ELFCLASS64) {
  4167. ehdr64 = (struct elf64_hdr *)fw_entry->data;
  4168. num_images = ehdr64->e_phnum;
  4169. } else {
  4170. pr_err("QSEE %s app, arch %u is not supported\n",
  4171. appname, *app_arch);
  4172. ret = -EIO;
  4173. goto err;
  4174. }
  4175. pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
  4176. release_firmware(fw_entry);
  4177. fw_entry = NULL;
  4178. for (i = 0; i < num_images; i++) {
  4179. memset(fw_name, 0, sizeof(fw_name));
  4180. snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
  4181. ret = firmware_request_nowarn(&fw_entry, fw_name, qseecom.pdev);
  4182. if (ret)
  4183. goto err;
  4184. if (*fw_size > U32_MAX - fw_entry->size) {
  4185. pr_err("QSEE %s app file size overflow\n", appname);
  4186. ret = -EINVAL;
  4187. goto err;
  4188. }
  4189. *fw_size += fw_entry->size;
  4190. release_firmware(fw_entry);
  4191. fw_entry = NULL;
  4192. }
  4193. return ret;
  4194. err:
  4195. if (fw_entry)
  4196. release_firmware(fw_entry);
  4197. *fw_size = 0;
  4198. return ret;
  4199. }
  4200. static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
  4201. uint32_t fw_size,
  4202. struct qseecom_load_app_ireq *load_req)
  4203. {
  4204. int ret = -1;
  4205. int i = 0, rc = 0;
  4206. const struct firmware *fw_entry = NULL;
  4207. char fw_name[MAX_APP_NAME_SIZE];
  4208. u8 *img_data_ptr = img_data;
  4209. struct elf32_hdr *ehdr;
  4210. struct elf64_hdr *ehdr64;
  4211. int num_images = 0;
  4212. unsigned char app_arch = 0;
  4213. snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
  4214. rc = firmware_request_nowarn(&fw_entry, fw_name, qseecom.pdev);
  4215. if (rc) {
  4216. ret = -EIO;
  4217. goto err;
  4218. }
  4219. load_req->img_len = fw_entry->size;
  4220. if (load_req->img_len > fw_size) {
  4221. pr_err("app %s size %zu is larger than buf size %u\n",
  4222. appname, fw_entry->size, fw_size);
  4223. ret = -EINVAL;
  4224. goto err;
  4225. }
  4226. memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
  4227. img_data_ptr = img_data_ptr + fw_entry->size;
  4228. load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
  4229. app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
  4230. if (app_arch == ELFCLASS32) {
  4231. ehdr = (struct elf32_hdr *)fw_entry->data;
  4232. num_images = ehdr->e_phnum;
  4233. } else if (app_arch == ELFCLASS64) {
  4234. ehdr64 = (struct elf64_hdr *)fw_entry->data;
  4235. num_images = ehdr64->e_phnum;
  4236. } else {
  4237. pr_err("QSEE %s app, arch %u is not supported\n",
  4238. appname, app_arch);
  4239. ret = -EIO;
  4240. goto err;
  4241. }
  4242. release_firmware(fw_entry);
  4243. fw_entry = NULL;
  4244. for (i = 0; i < num_images; i++) {
  4245. snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
  4246. ret = firmware_request_nowarn(&fw_entry, fw_name, qseecom.pdev);
  4247. if (ret) {
  4248. pr_err("Failed to locate blob %s\n", fw_name);
  4249. goto err;
  4250. }
  4251. if ((fw_entry->size > U32_MAX - load_req->img_len) ||
  4252. (fw_entry->size + load_req->img_len > fw_size)) {
  4253. pr_err("Invalid file size for %s\n", fw_name);
  4254. ret = -EINVAL;
  4255. goto err;
  4256. }
  4257. memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
  4258. img_data_ptr = img_data_ptr + fw_entry->size;
  4259. load_req->img_len += fw_entry->size;
  4260. release_firmware(fw_entry);
  4261. fw_entry = NULL;
  4262. }
  4263. return ret;
  4264. err:
  4265. release_firmware(fw_entry);
  4266. return ret;
  4267. }
  4268. static int __qseecom_alloc_coherent_buf(
  4269. uint32_t size, u8 **vaddr, phys_addr_t *paddr)
  4270. {
  4271. dma_addr_t coh_pmem;
  4272. void *buf = NULL;
  4273. /* Allocate a contiguous kernel buffer */
  4274. size = (size + PAGE_SIZE) & PAGE_MASK;
  4275. buf = dma_alloc_coherent(qseecom.dev,
  4276. size, &coh_pmem, GFP_KERNEL);
  4277. if (buf == NULL)
  4278. return -ENOMEM;
  4279. *vaddr = buf;
  4280. *paddr = coh_pmem;
  4281. return 0;
  4282. }
  4283. static void __qseecom_free_coherent_buf(uint32_t size,
  4284. u8 *vaddr, phys_addr_t paddr)
  4285. {
  4286. if (!vaddr)
  4287. return;
  4288. size = (size + PAGE_SIZE) & PAGE_MASK;
  4289. dma_free_coherent(qseecom.dev, size, vaddr, paddr);
  4290. }
  4291. static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
  4292. uint32_t *app_id)
  4293. {
  4294. int ret = -1;
  4295. uint32_t fw_size = 0;
  4296. struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
  4297. struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
  4298. struct qseecom_command_scm_resp resp;
  4299. u8 *img_data = NULL;
  4300. phys_addr_t pa = 0;
  4301. void *cmd_buf = NULL;
  4302. size_t cmd_len;
  4303. uint32_t app_arch = 0;
  4304. if (!data || !appname || !app_id) {
  4305. pr_err("Null pointer to data or appname or appid\n");
  4306. return -EINVAL;
  4307. }
  4308. *app_id = 0;
  4309. if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
  4310. return -EIO;
  4311. data->client.app_arch = app_arch;
  4312. /* Check and load cmnlib */
  4313. if (qseecom.qsee_version > QSEEE_VERSION_00) {
  4314. if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
  4315. ret = qseecom_load_commonlib_image(data, "cmnlib");
  4316. if (ret) {
  4317. pr_err("failed to load cmnlib\n");
  4318. return -EIO;
  4319. }
  4320. qseecom.commonlib_loaded = true;
  4321. pr_debug("cmnlib is loaded\n");
  4322. }
  4323. if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
  4324. ret = qseecom_load_commonlib_image(data, "cmnlib64");
  4325. if (ret) {
  4326. pr_err("failed to load cmnlib64\n");
  4327. return -EIO;
  4328. }
  4329. qseecom.commonlib64_loaded = true;
  4330. pr_debug("cmnlib64 is loaded\n");
  4331. }
  4332. }
  4333. ret = __qseecom_alloc_coherent_buf(fw_size, &img_data, &pa);
  4334. if (ret)
  4335. return ret;
  4336. ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
  4337. if (ret) {
  4338. ret = -EIO;
  4339. goto exit_free_img_data;
  4340. }
  4341. /* Populate the load_req parameters */
  4342. if (qseecom.qsee_version < QSEE_VERSION_40) {
  4343. load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
  4344. load_req.mdt_len = load_req.mdt_len;
  4345. load_req.img_len = load_req.img_len;
  4346. strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
  4347. load_req.phy_addr = (uint32_t)pa;
  4348. cmd_buf = (void *)&load_req;
  4349. cmd_len = sizeof(struct qseecom_load_app_ireq);
  4350. } else {
  4351. load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
  4352. load_req_64bit.mdt_len = load_req.mdt_len;
  4353. load_req_64bit.img_len = load_req.img_len;
  4354. strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
  4355. load_req_64bit.phy_addr = (uint64_t)pa;
  4356. cmd_buf = (void *)&load_req_64bit;
  4357. cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
  4358. }
  4359. if (qseecom.support_bus_scaling) {
  4360. mutex_lock(&qsee_bw_mutex);
  4361. ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
  4362. mutex_unlock(&qsee_bw_mutex);
  4363. if (ret) {
  4364. ret = -EIO;
  4365. goto exit_free_img_data;
  4366. }
  4367. }
  4368. ret = __qseecom_enable_clk_scale_up(data);
  4369. if (ret) {
  4370. ret = -EIO;
  4371. goto exit_unregister_bus_bw_need;
  4372. }
  4373. /* SCM_CALL to load the image */
  4374. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
  4375. &resp, sizeof(resp));
  4376. if (ret) {
  4377. pr_err("scm_call to load failed : ret %d\n", ret);
  4378. ret = -EIO;
  4379. goto exit_disable_clk_vote;
  4380. }
  4381. switch (resp.result) {
  4382. case QSEOS_RESULT_SUCCESS:
  4383. *app_id = resp.data;
  4384. break;
  4385. case QSEOS_RESULT_INCOMPLETE:
  4386. ret = __qseecom_process_incomplete_cmd(data, &resp);
  4387. if (ret) {
  4388. pr_err("incomp_cmd err %d, %d, unload %d %s\n",
  4389. ret, resp.result, resp.data, appname);
  4390. __qseecom_unload_app(data, resp.data);
  4391. ret = -EFAULT;
  4392. } else {
  4393. *app_id = resp.data;
  4394. }
  4395. break;
  4396. case QSEOS_RESULT_FAILURE:
  4397. pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
  4398. break;
  4399. default:
  4400. pr_err("scm call return unknown response %d\n", resp.result);
  4401. ret = -EINVAL;
  4402. break;
  4403. }
  4404. exit_disable_clk_vote:
  4405. __qseecom_disable_clk_scale_down(data);
  4406. exit_unregister_bus_bw_need:
  4407. if (qseecom.support_bus_scaling) {
  4408. mutex_lock(&qsee_bw_mutex);
  4409. qseecom_unregister_bus_bandwidth_needs(data);
  4410. mutex_unlock(&qsee_bw_mutex);
  4411. }
  4412. exit_free_img_data:
  4413. if (img_data)
  4414. __qseecom_free_coherent_buf(fw_size, img_data, pa);
  4415. return ret;
  4416. }
  4417. static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
  4418. char *cmnlib_name)
  4419. {
  4420. int ret = 0;
  4421. uint32_t fw_size = 0;
  4422. struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
  4423. struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
  4424. struct qseecom_command_scm_resp resp;
  4425. u8 *img_data = NULL;
  4426. phys_addr_t pa = 0;
  4427. void *cmd_buf = NULL;
  4428. size_t cmd_len;
  4429. uint32_t app_arch = 0;
  4430. if (!cmnlib_name) {
  4431. pr_err("cmnlib_name is NULL\n");
  4432. return -EINVAL;
  4433. }
  4434. if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
  4435. pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
  4436. cmnlib_name, strlen(cmnlib_name));
  4437. return -EINVAL;
  4438. }
  4439. if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
  4440. return -EIO;
  4441. ret = __qseecom_alloc_coherent_buf(fw_size, &img_data, &pa);
  4442. if (ret)
  4443. return -EIO;
  4444. ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
  4445. if (ret) {
  4446. ret = -EIO;
  4447. goto exit_free_img_data;
  4448. }
  4449. if (qseecom.qsee_version < QSEE_VERSION_40) {
  4450. load_req.phy_addr = (uint32_t)pa;
  4451. load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
  4452. cmd_buf = (void *)&load_req;
  4453. cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
  4454. } else {
  4455. load_req_64bit.phy_addr = (uint64_t)pa;
  4456. load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
  4457. load_req_64bit.img_len = load_req.img_len;
  4458. load_req_64bit.mdt_len = load_req.mdt_len;
  4459. cmd_buf = (void *)&load_req_64bit;
  4460. cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
  4461. }
  4462. if (qseecom.support_bus_scaling) {
  4463. mutex_lock(&qsee_bw_mutex);
  4464. ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
  4465. mutex_unlock(&qsee_bw_mutex);
  4466. if (ret) {
  4467. ret = -EIO;
  4468. goto exit_free_img_data;
  4469. }
  4470. }
  4471. /* Vote for the SFPB clock */
  4472. ret = __qseecom_enable_clk_scale_up(data);
  4473. if (ret) {
  4474. ret = -EIO;
  4475. goto exit_unregister_bus_bw_need;
  4476. }
  4477. /* SCM_CALL to load the image */
  4478. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
  4479. &resp, sizeof(resp));
  4480. if (ret) {
  4481. pr_err("scm_call to load failed : ret %d\n", ret);
  4482. ret = -EIO;
  4483. goto exit_disable_clk_vote;
  4484. }
  4485. switch (resp.result) {
  4486. case QSEOS_RESULT_SUCCESS:
  4487. break;
  4488. case QSEOS_RESULT_FAILURE:
  4489. pr_err("scm call failed w/response result%d\n", resp.result);
  4490. ret = -EINVAL;
  4491. goto exit_disable_clk_vote;
  4492. case QSEOS_RESULT_INCOMPLETE:
  4493. ret = __qseecom_process_incomplete_cmd(data, &resp);
  4494. if (ret) {
  4495. pr_err("process_incomplete_cmd failed err: %d\n", ret);
  4496. goto exit_disable_clk_vote;
  4497. }
  4498. break;
  4499. default:
  4500. pr_err("scm call return unknown response %d\n", resp.result);
  4501. ret = -EINVAL;
  4502. goto exit_disable_clk_vote;
  4503. }
  4504. exit_disable_clk_vote:
  4505. __qseecom_disable_clk_scale_down(data);
  4506. exit_unregister_bus_bw_need:
  4507. if (qseecom.support_bus_scaling) {
  4508. mutex_lock(&qsee_bw_mutex);
  4509. qseecom_unregister_bus_bandwidth_needs(data);
  4510. mutex_unlock(&qsee_bw_mutex);
  4511. }
  4512. exit_free_img_data:
  4513. if (img_data)
  4514. __qseecom_free_coherent_buf(fw_size, img_data, pa);
  4515. return ret;
  4516. }
  4517. static int qseecom_unload_commonlib_image(void)
  4518. {
  4519. int ret = -EINVAL;
  4520. struct qseecom_unload_lib_image_ireq unload_req = {0};
  4521. struct qseecom_command_scm_resp resp;
  4522. /* Populate the remaining parameters */
  4523. unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
  4524. /* SCM_CALL to load the image */
  4525. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
  4526. sizeof(struct qseecom_unload_lib_image_ireq),
  4527. &resp, sizeof(resp));
  4528. if (ret) {
  4529. pr_err("scm_call to unload lib failed : ret %d\n", ret);
  4530. ret = -EIO;
  4531. } else {
  4532. switch (resp.result) {
  4533. case QSEOS_RESULT_SUCCESS:
  4534. break;
  4535. case QSEOS_RESULT_FAILURE:
  4536. pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
  4537. break;
  4538. default:
  4539. pr_err("scm call return unknown response %d\n",
  4540. resp.result);
  4541. ret = -EINVAL;
  4542. break;
  4543. }
  4544. }
  4545. return ret;
  4546. }
  4547. static int __qseecom_start_app(struct qseecom_handle **handle,
  4548. char *app_name, uint32_t size)
  4549. {
  4550. int32_t ret = 0;
  4551. unsigned long flags = 0;
  4552. struct qseecom_dev_handle *data = NULL;
  4553. struct qseecom_check_app_ireq app_ireq;
  4554. struct qseecom_registered_app_list *entry = NULL;
  4555. struct qseecom_registered_kclient_list *kclient_entry = NULL;
  4556. bool found_app = false;
  4557. phys_addr_t pa = 0;
  4558. u8 *va = NULL;
  4559. uint32_t fw_size, app_arch;
  4560. uint32_t app_id = 0;
  4561. __wakeup_unregister_listener_kthread();
  4562. __wakeup_unload_app_kthread();
  4563. if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
  4564. pr_err("Not allowed to be called in %d state\n",
  4565. atomic_read(&qseecom.qseecom_state));
  4566. return -EPERM;
  4567. }
  4568. if (!app_name) {
  4569. pr_err("failed to get the app name\n");
  4570. return -EINVAL;
  4571. }
  4572. if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) {
  4573. pr_err("The app_name (%s) with length %zu is not valid\n",
  4574. app_name, strnlen(app_name, MAX_APP_NAME_SIZE));
  4575. return -EINVAL;
  4576. }
  4577. *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
  4578. if (!(*handle))
  4579. return -ENOMEM;
  4580. data = kzalloc(sizeof(*data), GFP_KERNEL);
  4581. if (!data) {
  4582. kfree(*handle);
  4583. *handle = NULL;
  4584. return -ENOMEM;
  4585. }
  4586. mutex_lock(&app_access_lock);
  4587. data->abort = 0;
  4588. data->type = QSEECOM_CLIENT_APP;
  4589. data->released = false;
  4590. data->client.sb_length = size;
  4591. data->client.user_virt_sb_base = 0;
  4592. data->sglistinfo_ptr = (struct sglist_info *)__qseecom_alloc_tzbuf(
  4593. sizeof(struct sglist_info) * MAX_ION_FD,
  4594. &data->sglistinfo_shm.paddr,
  4595. &data->sglistinfo_shm);
  4596. if (!data->sglistinfo_ptr) {
  4597. ret = -ENOMEM;
  4598. goto err;
  4599. }
  4600. init_waitqueue_head(&data->abort_wq);
  4601. app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
  4602. strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
  4603. ret = __qseecom_check_app_exists(app_ireq, &app_id);
  4604. if (ret)
  4605. goto err;
  4606. strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
  4607. if (app_id) {
  4608. pr_warn("App id %d for [%s] app exists\n", app_id,
  4609. (char *)app_ireq.app_name);
  4610. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  4611. list_for_each_entry(entry,
  4612. &qseecom.registered_app_list_head, list){
  4613. if (entry->app_id == app_id) {
  4614. if (entry->ref_cnt == U32_MAX) {
  4615. pr_err("App %d (%s) ref_cnt overflow\n",
  4616. app_id, app_ireq.app_name);
  4617. ret = -EINVAL;
  4618. goto err;
  4619. }
  4620. entry->ref_cnt++;
  4621. found_app = true;
  4622. break;
  4623. }
  4624. }
  4625. spin_unlock_irqrestore(
  4626. &qseecom.registered_app_list_lock, flags);
  4627. if (!found_app)
  4628. pr_warn("App_id %d [%s] was loaded but not registered\n",
  4629. ret, (char *)app_ireq.app_name);
  4630. } else {
  4631. /* load the app and get the app_id */
  4632. pr_debug("%s: Loading app for the first time'\n",
  4633. qseecom.pdev->init_name);
  4634. ret = __qseecom_load_fw(data, app_name, &app_id);
  4635. if (ret < 0)
  4636. goto err;
  4637. }
  4638. data->client.app_id = app_id;
  4639. if (!found_app) {
  4640. entry = kmalloc(sizeof(*entry), GFP_KERNEL);
  4641. if (!entry) {
  4642. ret = -ENOMEM;
  4643. goto err;
  4644. }
  4645. entry->app_id = app_id;
  4646. entry->ref_cnt = 1;
  4647. strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
  4648. if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
  4649. ret = -EIO;
  4650. kfree(entry);
  4651. goto err;
  4652. }
  4653. entry->app_arch = app_arch;
  4654. entry->app_blocked = false;
  4655. entry->blocked_on_listener_id = 0;
  4656. entry->check_block = 0;
  4657. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  4658. list_add_tail(&entry->list, &qseecom.registered_app_list_head);
  4659. spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
  4660. flags);
  4661. }
  4662. /* Get the physical address of the req/resp buffer */
  4663. ret = __qseecom_alloc_coherent_buf(size, &va, &pa);
  4664. if (ret) {
  4665. pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
  4666. ret);
  4667. goto err;
  4668. }
  4669. /* Populate the structure for sending scm call to load image */
  4670. data->client.sb_virt = va;
  4671. data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
  4672. data->client.sb_phys = (phys_addr_t)pa;
  4673. (*handle)->dev = (void *)data;
  4674. (*handle)->sbuf = (unsigned char *)data->client.sb_virt;
  4675. (*handle)->sbuf_len = data->client.sb_length;
  4676. kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
  4677. if (!kclient_entry) {
  4678. ret = -ENOMEM;
  4679. goto err;
  4680. }
  4681. kclient_entry->handle = *handle;
  4682. spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
  4683. list_add_tail(&kclient_entry->list,
  4684. &qseecom.registered_kclient_list_head);
  4685. spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
  4686. mutex_unlock(&app_access_lock);
  4687. __wakeup_unload_app_kthread();
  4688. return 0;
  4689. err:
  4690. __qseecom_free_coherent_buf(size, va, pa);
  4691. __qseecom_free_tzbuf(&data->sglistinfo_shm);
  4692. kfree(data);
  4693. kfree(*handle);
  4694. *handle = NULL;
  4695. mutex_unlock(&app_access_lock);
  4696. __wakeup_unload_app_kthread();
  4697. return ret;
  4698. }
  4699. static int __qseecom_shutdown_app(struct qseecom_handle **handle)
  4700. {
  4701. int ret = -EINVAL;
  4702. struct qseecom_dev_handle *data;
  4703. struct qseecom_registered_kclient_list *kclient = NULL;
  4704. unsigned long flags = 0;
  4705. bool found_handle = false;
  4706. __wakeup_unregister_listener_kthread();
  4707. __wakeup_unload_app_kthread();
  4708. if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
  4709. pr_err("Not allowed to be called in %d state\n",
  4710. atomic_read(&qseecom.qseecom_state));
  4711. return -EPERM;
  4712. }
  4713. if ((handle == NULL) || (*handle == NULL)) {
  4714. pr_err("Handle is not initialized\n");
  4715. return -EINVAL;
  4716. }
  4717. data = (struct qseecom_dev_handle *) ((*handle)->dev);
  4718. mutex_lock(&app_access_lock);
  4719. spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
  4720. list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
  4721. list) {
  4722. if (kclient->handle == (*handle)) {
  4723. list_del(&kclient->list);
  4724. found_handle = true;
  4725. break;
  4726. }
  4727. }
  4728. spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
  4729. if (!found_handle)
  4730. pr_err("Unable to find the handle, exiting\n");
  4731. else
  4732. ret = qseecom_unload_app(data, false);
  4733. mutex_unlock(&app_access_lock);
  4734. if (ret == 0) {
  4735. if (data->client.sb_virt)
  4736. __qseecom_free_coherent_buf(data->client.sb_length,
  4737. data->client.sb_virt, data->client.sb_phys);
  4738. __qseecom_free_tzbuf(&data->sglistinfo_shm);
  4739. kfree_sensitive(data);
  4740. kfree_sensitive(*handle);
  4741. kfree_sensitive(kclient);
  4742. *handle = NULL;
  4743. }
  4744. __wakeup_unload_app_kthread();
  4745. return ret;
  4746. }
  4747. static int __qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
  4748. uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
  4749. {
  4750. int ret = 0;
  4751. struct qseecom_send_cmd_req req = {NULL, 0, NULL, 0};
  4752. struct qseecom_dev_handle *data;
  4753. bool perf_enabled = false;
  4754. __wakeup_unregister_listener_kthread();
  4755. __wakeup_unload_app_kthread();
  4756. if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
  4757. pr_err("Not allowed to be called in %d state\n",
  4758. atomic_read(&qseecom.qseecom_state));
  4759. return -EPERM;
  4760. }
  4761. if (handle == NULL) {
  4762. pr_err("Handle is not initialized\n");
  4763. return -EINVAL;
  4764. }
  4765. data = handle->dev;
  4766. req.cmd_req_len = sbuf_len;
  4767. req.resp_len = rbuf_len;
  4768. req.cmd_req_buf = send_buf;
  4769. req.resp_buf = resp_buf;
  4770. if (__validate_send_cmd_inputs(data, &req))
  4771. return -EINVAL;
  4772. mutex_lock(&app_access_lock);
  4773. if (qseecom.support_bus_scaling) {
  4774. ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
  4775. if (ret) {
  4776. pr_err("Failed to set bw.\n");
  4777. mutex_unlock(&app_access_lock);
  4778. return ret;
  4779. }
  4780. }
  4781. /*
  4782. * On targets where crypto clock is handled by HLOS,
  4783. * if clk_access_cnt is zero and perf_enabled is false,
  4784. * then the crypto clock was not enabled before sending cmd
  4785. * to tz, qseecom will enable the clock to avoid service failure.
  4786. */
  4787. if (!qseecom.no_clock_support &&
  4788. !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
  4789. pr_debug("ce clock is not enabled!\n");
  4790. ret = qseecom_perf_enable(data);
  4791. if (ret) {
  4792. pr_err("Failed to vote for clock with err %d\n",
  4793. ret);
  4794. mutex_unlock(&app_access_lock);
  4795. return -EINVAL;
  4796. }
  4797. perf_enabled = true;
  4798. }
  4799. if (!strcmp(data->client.app_name, "securemm"))
  4800. data->use_legacy_cmd = true;
  4801. ret = __qseecom_send_cmd(data, &req, false);
  4802. data->use_legacy_cmd = false;
  4803. if (qseecom.support_bus_scaling)
  4804. __qseecom_add_bw_scale_down_timer(
  4805. QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
  4806. if (perf_enabled) {
  4807. qsee_disable_clock_vote(data, CLK_DFAB);
  4808. qsee_disable_clock_vote(data, CLK_SFPB);
  4809. }
  4810. mutex_unlock(&app_access_lock);
  4811. if (ret)
  4812. return ret;
  4813. pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
  4814. req.resp_len, req.resp_buf);
  4815. return ret;
  4816. }
  4817. #if IS_ENABLED(CONFIG_QSEECOM_PROXY)
  4818. const static struct qseecom_drv_ops qseecom_driver_ops = {
  4819. .qseecom_send_command = __qseecom_send_command,
  4820. .qseecom_start_app = __qseecom_start_app,
  4821. .qseecom_shutdown_app = __qseecom_shutdown_app,
  4822. };
  4823. int get_qseecom_kernel_fun_ops(void)
  4824. {
  4825. return provide_qseecom_kernel_fun_ops(&qseecom_driver_ops);
  4826. }
  4827. #else
  4828. int qseecom_start_app(struct qseecom_handle **handle,
  4829. char *app_name, uint32_t size)
  4830. {
  4831. return __qseecom_start_app(handle, app_name, size);
  4832. }
  4833. EXPORT_SYMBOL(qseecom_start_app);
  4834. int qseecom_shutdown_app(struct qseecom_handle **handle)
  4835. {
  4836. return __qseecom_shutdown_app(handle);
  4837. }
  4838. EXPORT_SYMBOL(qseecom_shutdown_app);
  4839. int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
  4840. uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
  4841. {
  4842. return __qseecom_send_command(handle, send_buf, sbuf_len,
  4843. resp_buf, rbuf_len);
  4844. }
  4845. EXPORT_SYMBOL(qseecom_send_command);
  4846. #endif
  4847. int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
  4848. {
  4849. int ret = 0;
  4850. if ((handle == NULL) || (handle->dev == NULL)) {
  4851. pr_err("No valid kernel client\n");
  4852. return -EINVAL;
  4853. }
  4854. if (high) {
  4855. if (qseecom.support_bus_scaling) {
  4856. mutex_lock(&qsee_bw_mutex);
  4857. __qseecom_register_bus_bandwidth_needs(handle->dev,
  4858. HIGH);
  4859. mutex_unlock(&qsee_bw_mutex);
  4860. } else {
  4861. ret = qseecom_perf_enable(handle->dev);
  4862. if (ret)
  4863. pr_err("Failed to vote for clock with err %d\n",
  4864. ret);
  4865. }
  4866. } else {
  4867. if (!qseecom.support_bus_scaling) {
  4868. qsee_disable_clock_vote(handle->dev, CLK_DFAB);
  4869. qsee_disable_clock_vote(handle->dev, CLK_SFPB);
  4870. } else {
  4871. mutex_lock(&qsee_bw_mutex);
  4872. qseecom_unregister_bus_bandwidth_needs(handle->dev);
  4873. mutex_unlock(&qsee_bw_mutex);
  4874. }
  4875. }
  4876. return ret;
  4877. }
  4878. EXPORT_SYMBOL(qseecom_set_bandwidth);
  4879. int qseecom_process_listener_from_smcinvoke(uint32_t *result,
  4880. u64 *response_type, unsigned int *data)
  4881. {
  4882. struct qseecom_registered_app_list dummy_app_entry;
  4883. struct qseecom_dev_handle dummy_private_data = {0};
  4884. struct qseecom_command_scm_resp resp;
  4885. int ret = 0;
  4886. if (!result || !response_type || !data) {
  4887. pr_err("input parameter NULL\n");
  4888. return -EINVAL;
  4889. }
  4890. memset((void *)&dummy_app_entry, 0, sizeof(dummy_app_entry));
  4891. /*
  4892. * smcinvoke expects result in scm call resp.ret[1] and type in ret[0],
  4893. * while qseecom expects result in ret[0] and type in ret[1].
  4894. * To simplify API interface and code changes in smcinvoke, here
  4895. * internally switch result and resp_type to let qseecom work with
  4896. * smcinvoke and upstream scm driver protocol.
  4897. */
  4898. resp.result = *response_type;
  4899. resp.resp_type = *result;
  4900. resp.data = *data;
  4901. dummy_private_data.client.app_id = *response_type;
  4902. dummy_private_data.client.from_smcinvoke = true;
  4903. dummy_app_entry.app_id = *response_type;
  4904. mutex_lock(&app_access_lock);
  4905. if (qseecom.qsee_reentrancy_support)
  4906. ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
  4907. &dummy_private_data);
  4908. else
  4909. ret = __qseecom_process_incomplete_cmd(&dummy_private_data,
  4910. &resp);
  4911. mutex_unlock(&app_access_lock);
  4912. if (ret)
  4913. pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
  4914. resp.result, resp.data, resp.resp_type, ret);
  4915. *result = resp.resp_type;
  4916. *response_type = resp.result;
  4917. *data = resp.data;
  4918. return ret;
  4919. }
  4920. EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
  4921. static int qseecom_send_resp(void)
  4922. {
  4923. qseecom.send_resp_flag = 1;
  4924. wake_up_interruptible(&qseecom.send_resp_wq);
  4925. return 0;
  4926. }
  4927. static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
  4928. {
  4929. struct qseecom_registered_listener_list *this_lstnr = NULL;
  4930. pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
  4931. this_lstnr = __qseecom_find_svc(data->listener.id);
  4932. if (this_lstnr == NULL)
  4933. return -EINVAL;
  4934. qseecom.send_resp_flag = 1;
  4935. this_lstnr->send_resp_flag = 1;
  4936. wake_up_interruptible(&qseecom.send_resp_wq);
  4937. return 0;
  4938. }
  4939. static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
  4940. struct qseecom_send_modfd_listener_resp *resp,
  4941. struct qseecom_registered_listener_list *this_lstnr)
  4942. {
  4943. int i;
  4944. if (!data || !resp || !this_lstnr) {
  4945. pr_err("listener handle or resp msg is null\n");
  4946. return -EINVAL;
  4947. }
  4948. if (resp->resp_buf_ptr == NULL) {
  4949. pr_err("resp buffer is null\n");
  4950. return -EINVAL;
  4951. }
  4952. /* validate resp buf length */
  4953. if ((resp->resp_len == 0) ||
  4954. (resp->resp_len > this_lstnr->sb_length)) {
  4955. pr_err("resp buf length %d not valid\n", resp->resp_len);
  4956. return -EINVAL;
  4957. }
  4958. if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
  4959. pr_err("Integer overflow in resp_len & resp_buf\n");
  4960. return -EINVAL;
  4961. }
  4962. if ((uintptr_t)this_lstnr->user_virt_sb_base >
  4963. (ULONG_MAX - this_lstnr->sb_length)) {
  4964. pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
  4965. return -EINVAL;
  4966. }
  4967. /* validate resp buf */
  4968. if (((uintptr_t)resp->resp_buf_ptr <
  4969. (uintptr_t)this_lstnr->user_virt_sb_base) ||
  4970. ((uintptr_t)resp->resp_buf_ptr >=
  4971. ((uintptr_t)this_lstnr->user_virt_sb_base +
  4972. this_lstnr->sb_length)) ||
  4973. (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
  4974. ((uintptr_t)this_lstnr->user_virt_sb_base +
  4975. this_lstnr->sb_length))) {
  4976. pr_err("resp buf is out of shared buffer region\n");
  4977. return -EINVAL;
  4978. }
  4979. /* validate offsets */
  4980. for (i = 0; i < MAX_ION_FD; i++) {
  4981. if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
  4982. pr_err("Invalid offset %d = 0x%x\n",
  4983. i, resp->ifd_data[i].cmd_buf_offset);
  4984. return -EINVAL;
  4985. }
  4986. }
  4987. return 0;
  4988. }
  4989. static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
  4990. void __user *argp, bool is_64bit_addr)
  4991. {
  4992. struct qseecom_send_modfd_listener_resp resp;
  4993. struct qseecom_registered_listener_list *this_lstnr = NULL;
  4994. if (copy_from_user(&resp, argp, sizeof(resp))) {
  4995. pr_err("copy_from_user failed\n");
  4996. return -EINVAL;
  4997. }
  4998. this_lstnr = __qseecom_find_svc(data->listener.id);
  4999. if (this_lstnr == NULL)
  5000. return -EINVAL;
  5001. if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
  5002. return -EINVAL;
  5003. resp.resp_buf_ptr = this_lstnr->sb_virt +
  5004. (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
  5005. if (!is_64bit_addr)
  5006. __qseecom_update_cmd_buf(&resp, false, data);
  5007. else
  5008. __qseecom_update_cmd_buf_64(&resp, false, data);
  5009. qseecom.send_resp_flag = 1;
  5010. this_lstnr->send_resp_flag = 1;
  5011. wake_up_interruptible(&qseecom.send_resp_wq);
  5012. return 0;
  5013. }
  5014. static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
  5015. void __user *argp)
  5016. {
  5017. return __qseecom_send_modfd_resp(data, argp, false);
  5018. }
  5019. static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
  5020. void __user *argp)
  5021. {
  5022. return __qseecom_send_modfd_resp(data, argp, true);
  5023. }
  5024. static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
  5025. void __user *argp)
  5026. {
  5027. struct qseecom_qseos_version_req req;
  5028. if (copy_from_user(&req, argp, sizeof(req))) {
  5029. pr_err("copy_from_user failed\n");
  5030. return -EINVAL;
  5031. }
  5032. req.qseos_version = qseecom.qseos_version;
  5033. if (copy_to_user(argp, &req, sizeof(req))) {
  5034. pr_err("copy_to_user failed\n");
  5035. return -EINVAL;
  5036. }
  5037. return 0;
  5038. }
  5039. static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
  5040. {
  5041. int rc = 0;
  5042. struct qseecom_clk *qclk = NULL;
  5043. if (qseecom.no_clock_support)
  5044. return 0;
  5045. if (ce == CLK_QSEE)
  5046. qclk = &qseecom.qsee;
  5047. if (ce == CLK_CE_DRV)
  5048. qclk = &qseecom.ce_drv;
  5049. if (qclk == NULL) {
  5050. pr_err("CLK type not supported\n");
  5051. return -EINVAL;
  5052. }
  5053. mutex_lock(&clk_access_lock);
  5054. if (qclk->clk_access_cnt == ULONG_MAX) {
  5055. pr_err("clk_access_cnt beyond limitation\n");
  5056. goto err;
  5057. }
  5058. if (qclk->clk_access_cnt > 0) {
  5059. qclk->clk_access_cnt++;
  5060. mutex_unlock(&clk_access_lock);
  5061. return rc;
  5062. }
  5063. /* Enable CE core clk */
  5064. if (qclk->ce_core_clk != NULL) {
  5065. rc = clk_prepare_enable(qclk->ce_core_clk);
  5066. if (rc) {
  5067. pr_err("Unable to enable/prepare CE core clk\n");
  5068. goto err;
  5069. }
  5070. }
  5071. /* Enable CE clk */
  5072. if (qclk->ce_clk != NULL) {
  5073. rc = clk_prepare_enable(qclk->ce_clk);
  5074. if (rc) {
  5075. pr_err("Unable to enable/prepare CE iface clk\n");
  5076. goto ce_clk_err;
  5077. }
  5078. }
  5079. /* Enable AXI clk */
  5080. if (qclk->ce_bus_clk != NULL) {
  5081. rc = clk_prepare_enable(qclk->ce_bus_clk);
  5082. if (rc) {
  5083. pr_err("Unable to enable/prepare CE bus clk\n");
  5084. goto ce_bus_clk_err;
  5085. }
  5086. }
  5087. qclk->clk_access_cnt++;
  5088. mutex_unlock(&clk_access_lock);
  5089. return 0;
  5090. ce_bus_clk_err:
  5091. if (qclk->ce_clk != NULL)
  5092. clk_disable_unprepare(qclk->ce_clk);
  5093. ce_clk_err:
  5094. if (qclk->ce_core_clk != NULL)
  5095. clk_disable_unprepare(qclk->ce_core_clk);
  5096. err:
  5097. mutex_unlock(&clk_access_lock);
  5098. return -EIO;
  5099. }
  5100. static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
  5101. {
  5102. struct qseecom_clk *qclk;
  5103. if (qseecom.no_clock_support)
  5104. return;
  5105. if (ce == CLK_QSEE)
  5106. qclk = &qseecom.qsee;
  5107. else
  5108. qclk = &qseecom.ce_drv;
  5109. mutex_lock(&clk_access_lock);
  5110. if (qclk->clk_access_cnt == 0) {
  5111. mutex_unlock(&clk_access_lock);
  5112. return;
  5113. }
  5114. if (qclk->clk_access_cnt == 1) {
  5115. if (qclk->ce_clk != NULL)
  5116. clk_disable_unprepare(qclk->ce_clk);
  5117. if (qclk->ce_core_clk != NULL)
  5118. clk_disable_unprepare(qclk->ce_core_clk);
  5119. if (qclk->ce_bus_clk != NULL)
  5120. clk_disable_unprepare(qclk->ce_bus_clk);
  5121. }
  5122. qclk->clk_access_cnt--;
  5123. mutex_unlock(&clk_access_lock);
  5124. }
  5125. static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
  5126. int32_t clk_type)
  5127. {
  5128. int ret = 0;
  5129. struct qseecom_clk *qclk;
  5130. if (qseecom.no_clock_support)
  5131. return 0;
  5132. qclk = &qseecom.qsee;
  5133. if (!qseecom.qsee_perf_client)
  5134. return ret;
  5135. switch (clk_type) {
  5136. case CLK_DFAB:
  5137. mutex_lock(&qsee_bw_mutex);
  5138. if (!qseecom.qsee_bw_count) {
  5139. if (qseecom.qsee_sfpb_bw_count > 0)
  5140. ret = qseecom_bus_scale_update_request(
  5141. qseecom.qsee_perf_client, 3);
  5142. else {
  5143. if (qclk->ce_core_src_clk != NULL)
  5144. ret = __qseecom_enable_clk(CLK_QSEE);
  5145. if (!ret) {
  5146. ret =
  5147. qseecom_bus_scale_update_request(
  5148. qseecom.qsee_perf_client, 1);
  5149. if ((ret) &&
  5150. (qclk->ce_core_src_clk != NULL))
  5151. __qseecom_disable_clk(CLK_QSEE);
  5152. }
  5153. }
  5154. if (ret)
  5155. pr_err("DFAB Bandwidth req failed (%d)\n",
  5156. ret);
  5157. else {
  5158. qseecom.qsee_bw_count++;
  5159. data->perf_enabled = true;
  5160. }
  5161. } else {
  5162. qseecom.qsee_bw_count++;
  5163. data->perf_enabled = true;
  5164. }
  5165. mutex_unlock(&qsee_bw_mutex);
  5166. break;
  5167. case CLK_SFPB:
  5168. mutex_lock(&qsee_bw_mutex);
  5169. if (!qseecom.qsee_sfpb_bw_count) {
  5170. if (qseecom.qsee_bw_count > 0)
  5171. ret = qseecom_bus_scale_update_request(
  5172. qseecom.qsee_perf_client, 3);
  5173. else {
  5174. if (qclk->ce_core_src_clk != NULL)
  5175. ret = __qseecom_enable_clk(CLK_QSEE);
  5176. if (!ret) {
  5177. ret =
  5178. qseecom_bus_scale_update_request(
  5179. qseecom.qsee_perf_client, 2);
  5180. if ((ret) &&
  5181. (qclk->ce_core_src_clk != NULL))
  5182. __qseecom_disable_clk(CLK_QSEE);
  5183. }
  5184. }
  5185. if (ret)
  5186. pr_err("SFPB Bandwidth req failed (%d)\n",
  5187. ret);
  5188. else {
  5189. qseecom.qsee_sfpb_bw_count++;
  5190. data->fast_load_enabled = true;
  5191. }
  5192. } else {
  5193. qseecom.qsee_sfpb_bw_count++;
  5194. data->fast_load_enabled = true;
  5195. }
  5196. mutex_unlock(&qsee_bw_mutex);
  5197. break;
  5198. default:
  5199. pr_err("Clock type not defined\n");
  5200. break;
  5201. }
  5202. return ret;
  5203. }
  5204. static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
  5205. int32_t clk_type)
  5206. {
  5207. int32_t ret = 0;
  5208. struct qseecom_clk *qclk;
  5209. qclk = &qseecom.qsee;
  5210. if (qseecom.no_clock_support)
  5211. return;
  5212. if (!qseecom.qsee_perf_client)
  5213. return;
  5214. switch (clk_type) {
  5215. case CLK_DFAB:
  5216. mutex_lock(&qsee_bw_mutex);
  5217. if (qseecom.qsee_bw_count == 0) {
  5218. pr_err("Client error.Extra call to disable DFAB clk\n");
  5219. mutex_unlock(&qsee_bw_mutex);
  5220. return;
  5221. }
  5222. if (qseecom.qsee_bw_count == 1) {
  5223. if (qseecom.qsee_sfpb_bw_count > 0)
  5224. ret = qseecom_bus_scale_update_request(
  5225. qseecom.qsee_perf_client, 2);
  5226. else {
  5227. ret = qseecom_bus_scale_update_request(
  5228. qseecom.qsee_perf_client, 0);
  5229. if ((!ret) && (qclk->ce_core_src_clk != NULL))
  5230. __qseecom_disable_clk(CLK_QSEE);
  5231. }
  5232. if (ret)
  5233. pr_err("SFPB Bandwidth req fail (%d)\n",
  5234. ret);
  5235. else {
  5236. qseecom.qsee_bw_count--;
  5237. data->perf_enabled = false;
  5238. }
  5239. } else {
  5240. qseecom.qsee_bw_count--;
  5241. data->perf_enabled = false;
  5242. }
  5243. mutex_unlock(&qsee_bw_mutex);
  5244. break;
  5245. case CLK_SFPB:
  5246. mutex_lock(&qsee_bw_mutex);
  5247. if (qseecom.qsee_sfpb_bw_count == 0) {
  5248. pr_err("Client error.Extra call to disable SFPB clk\n");
  5249. mutex_unlock(&qsee_bw_mutex);
  5250. return;
  5251. }
  5252. if (qseecom.qsee_sfpb_bw_count == 1) {
  5253. if (qseecom.qsee_bw_count > 0)
  5254. ret = qseecom_bus_scale_update_request(
  5255. qseecom.qsee_perf_client, 1);
  5256. else {
  5257. ret = qseecom_bus_scale_update_request(
  5258. qseecom.qsee_perf_client, 0);
  5259. if ((!ret) && (qclk->ce_core_src_clk != NULL))
  5260. __qseecom_disable_clk(CLK_QSEE);
  5261. }
  5262. if (ret)
  5263. pr_err("SFPB Bandwidth req fail (%d)\n",
  5264. ret);
  5265. else {
  5266. qseecom.qsee_sfpb_bw_count--;
  5267. data->fast_load_enabled = false;
  5268. }
  5269. } else {
  5270. qseecom.qsee_sfpb_bw_count--;
  5271. data->fast_load_enabled = false;
  5272. }
  5273. mutex_unlock(&qsee_bw_mutex);
  5274. break;
  5275. default:
  5276. pr_err("Clock type not defined\n");
  5277. break;
  5278. }
  5279. }
  5280. static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
  5281. void __user *argp)
  5282. {
  5283. struct qseecom_load_img_req load_img_req;
  5284. int uret = 0;
  5285. int ret = 0;
  5286. phys_addr_t pa = 0;
  5287. size_t len;
  5288. struct qseecom_load_app_ireq load_req;
  5289. struct qseecom_load_app_64bit_ireq load_req_64bit;
  5290. struct qseecom_command_scm_resp resp;
  5291. void *cmd_buf = NULL;
  5292. size_t cmd_len;
  5293. struct sg_table *sgt = NULL;
  5294. struct dma_buf_attachment *attach = NULL;
  5295. struct dma_buf *dmabuf = NULL;
  5296. void *va = NULL;
  5297. /* Copy the relevant information needed for loading the image */
  5298. if (copy_from_user(&load_img_req,
  5299. (void __user *)argp,
  5300. sizeof(struct qseecom_load_img_req))) {
  5301. pr_err("copy_from_user failed\n");
  5302. return -EFAULT;
  5303. }
  5304. /* Get the handle of the shared fd */
  5305. ret = qseecom_vaddr_map(load_img_req.ifd_data_fd, &pa, &va,
  5306. &sgt, &attach, &len, &dmabuf);
  5307. if (ret) {
  5308. pr_err("Failed to map vaddr for ion_fd %d\n",
  5309. load_img_req.ifd_data_fd);
  5310. return -ENOMEM;
  5311. }
  5312. if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
  5313. pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
  5314. len, load_img_req.mdt_len,
  5315. load_img_req.img_len);
  5316. ret = -EINVAL;
  5317. goto exit_cpu_restore;
  5318. }
  5319. /* Populate the structure for sending scm call to load image */
  5320. if (qseecom.qsee_version < QSEE_VERSION_40) {
  5321. load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
  5322. load_req.mdt_len = load_img_req.mdt_len;
  5323. load_req.img_len = load_img_req.img_len;
  5324. load_req.phy_addr = (uint32_t)pa;
  5325. cmd_buf = (void *)&load_req;
  5326. cmd_len = sizeof(struct qseecom_load_app_ireq);
  5327. } else {
  5328. load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
  5329. load_req_64bit.mdt_len = load_img_req.mdt_len;
  5330. load_req_64bit.img_len = load_img_req.img_len;
  5331. load_req_64bit.phy_addr = (uint64_t)pa;
  5332. cmd_buf = (void *)&load_req_64bit;
  5333. cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
  5334. }
  5335. if (qseecom.support_bus_scaling) {
  5336. mutex_lock(&qsee_bw_mutex);
  5337. ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
  5338. mutex_unlock(&qsee_bw_mutex);
  5339. if (ret) {
  5340. ret = -EIO;
  5341. goto exit_cpu_restore;
  5342. }
  5343. }
  5344. /* Vote for the SFPB clock */
  5345. ret = __qseecom_enable_clk_scale_up(data);
  5346. if (ret) {
  5347. ret = -EIO;
  5348. goto exit_register_bus_bandwidth_needs;
  5349. }
  5350. ret = qseecom_dmabuf_cache_operations(dmabuf,
  5351. QSEECOM_CACHE_CLEAN);
  5352. if (ret) {
  5353. pr_err("cache operation failed %d\n", ret);
  5354. goto exit_disable_clock;
  5355. }
  5356. /* SCM_CALL to load the external elf */
  5357. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
  5358. &resp, sizeof(resp));
  5359. if (ret) {
  5360. pr_err("scm_call to load failed : ret %d\n",
  5361. ret);
  5362. ret = -EFAULT;
  5363. goto exit_disable_clock;
  5364. }
  5365. ret = qseecom_dmabuf_cache_operations(dmabuf,
  5366. QSEECOM_CACHE_INVALIDATE);
  5367. if (ret) {
  5368. pr_err("cache operation failed %d\n", ret);
  5369. goto exit_disable_clock;
  5370. }
  5371. switch (resp.result) {
  5372. case QSEOS_RESULT_SUCCESS:
  5373. break;
  5374. case QSEOS_RESULT_INCOMPLETE:
  5375. pr_err("%s: qseos result incomplete\n", __func__);
  5376. ret = __qseecom_process_incomplete_cmd(data, &resp);
  5377. if (ret)
  5378. pr_err("process_incomplete_cmd failed: err: %d\n", ret);
  5379. break;
  5380. case QSEOS_RESULT_FAILURE:
  5381. pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
  5382. ret = -EFAULT;
  5383. break;
  5384. default:
  5385. pr_err("scm_call response result %d not supported\n",
  5386. resp.result);
  5387. ret = -EFAULT;
  5388. break;
  5389. }
  5390. exit_disable_clock:
  5391. __qseecom_disable_clk_scale_down(data);
  5392. exit_register_bus_bandwidth_needs:
  5393. if (qseecom.support_bus_scaling) {
  5394. mutex_lock(&qsee_bw_mutex);
  5395. uret = qseecom_unregister_bus_bandwidth_needs(data);
  5396. mutex_unlock(&qsee_bw_mutex);
  5397. if (uret)
  5398. pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
  5399. uret, ret);
  5400. }
  5401. exit_cpu_restore:
  5402. if (dmabuf) {
  5403. qseecom_vaddr_unmap(va, sgt, attach, dmabuf);
  5404. MAKE_NULL(sgt, attach, dmabuf);
  5405. }
  5406. return ret;
  5407. }
  5408. static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
  5409. {
  5410. int ret = 0;
  5411. struct qseecom_command_scm_resp resp;
  5412. struct qseecom_unload_app_ireq req;
  5413. /* unavailable client app */
  5414. data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
  5415. /* Populate the structure for sending scm call to unload image */
  5416. req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
  5417. /* SCM_CALL to unload the external elf */
  5418. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
  5419. sizeof(struct qseecom_unload_app_ireq),
  5420. &resp, sizeof(resp));
  5421. if (ret) {
  5422. pr_err("scm_call to unload failed : ret %d\n",
  5423. ret);
  5424. ret = -EFAULT;
  5425. goto qseecom_unload_external_elf_scm_err;
  5426. }
  5427. if (resp.result == QSEOS_RESULT_INCOMPLETE) {
  5428. ret = __qseecom_process_incomplete_cmd(data, &resp);
  5429. if (ret)
  5430. pr_err("process_incomplete_cmd fail err: %d\n",
  5431. ret);
  5432. } else {
  5433. if (resp.result != QSEOS_RESULT_SUCCESS) {
  5434. pr_err("scm_call to unload image failed resp.result =%d\n",
  5435. resp.result);
  5436. ret = -EFAULT;
  5437. }
  5438. }
  5439. qseecom_unload_external_elf_scm_err:
  5440. return ret;
  5441. }
  5442. static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
  5443. void __user *argp)
  5444. {
  5445. int32_t ret = 0;
  5446. struct qseecom_qseos_app_load_query query_req = { {0} };
  5447. struct qseecom_check_app_ireq req;
  5448. struct qseecom_registered_app_list *entry = NULL;
  5449. unsigned long flags = 0;
  5450. uint32_t app_arch = 0, app_id = 0;
  5451. bool found_app = false;
  5452. /* Copy the relevant information needed for loading the image */
  5453. if (copy_from_user(&query_req, (void __user *)argp,
  5454. sizeof(struct qseecom_qseos_app_load_query))) {
  5455. pr_err("copy_from_user failed\n");
  5456. ret = -EFAULT;
  5457. goto exit_free;
  5458. }
  5459. req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
  5460. query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
  5461. strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
  5462. ret = __qseecom_check_app_exists(req, &app_id);
  5463. if (ret) {
  5464. pr_err(" scm call to check if app is loaded failed\n");
  5465. goto exit_free;
  5466. }
  5467. if (app_id) {
  5468. pr_debug("App id %d (%s) already exists\n", app_id,
  5469. (char *)(req.app_name));
  5470. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  5471. list_for_each_entry(entry,
  5472. &qseecom.registered_app_list_head, list){
  5473. if (entry->app_id == app_id) {
  5474. app_arch = entry->app_arch;
  5475. if (entry->ref_cnt == U32_MAX) {
  5476. pr_err("App %d (%s) ref_cnt overflow\n",
  5477. app_id, req.app_name);
  5478. ret = -EINVAL;
  5479. spin_unlock_irqrestore(
  5480. &qseecom.registered_app_list_lock,
  5481. flags);
  5482. goto exit_free;
  5483. }
  5484. entry->ref_cnt++;
  5485. found_app = true;
  5486. break;
  5487. }
  5488. }
  5489. spin_unlock_irqrestore(
  5490. &qseecom.registered_app_list_lock, flags);
  5491. data->client.app_id = app_id;
  5492. query_req.app_id = app_id;
  5493. if (app_arch) {
  5494. data->client.app_arch = app_arch;
  5495. query_req.app_arch = app_arch;
  5496. } else {
  5497. data->client.app_arch = 0;
  5498. query_req.app_arch = 0;
  5499. }
  5500. strlcpy(data->client.app_name, query_req.app_name,
  5501. MAX_APP_NAME_SIZE);
  5502. /*
  5503. * If app was loaded by appsbl before and was not registered,
  5504. * regiser this app now.
  5505. */
  5506. if (!found_app) {
  5507. pr_debug("Register app %d [%s] which was loaded before\n",
  5508. ret, (char *)query_req.app_name);
  5509. entry = kmalloc(sizeof(*entry), GFP_KERNEL);
  5510. if (!entry) {
  5511. ret = -ENOMEM;
  5512. goto exit_free;
  5513. }
  5514. entry->app_id = app_id;
  5515. entry->ref_cnt = 1;
  5516. entry->app_arch = data->client.app_arch;
  5517. strlcpy(entry->app_name, data->client.app_name,
  5518. MAX_APP_NAME_SIZE);
  5519. entry->app_blocked = false;
  5520. entry->blocked_on_listener_id = 0;
  5521. entry->check_block = 0;
  5522. spin_lock_irqsave(&qseecom.registered_app_list_lock,
  5523. flags);
  5524. list_add_tail(&entry->list,
  5525. &qseecom.registered_app_list_head);
  5526. spin_unlock_irqrestore(
  5527. &qseecom.registered_app_list_lock, flags);
  5528. }
  5529. if (copy_to_user(argp, &query_req, sizeof(query_req))) {
  5530. pr_err("copy_to_user failed\n");
  5531. ret = -EFAULT;
  5532. goto exit_free;
  5533. }
  5534. ret = -EEXIST; /* app already loaded */
  5535. goto exit_free;
  5536. }
  5537. exit_free:
  5538. return ret; /* app not loaded */
  5539. }
  5540. static int __qseecom_get_ce_pipe_info(
  5541. enum qseecom_key_management_usage_type usage,
  5542. uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
  5543. {
  5544. int ret = -EINVAL;
  5545. int i, j;
  5546. struct qseecom_ce_info_use *p = NULL;
  5547. int total = 0;
  5548. struct qseecom_ce_pipe_entry *pcepipe;
  5549. switch (usage) {
  5550. case QSEOS_KM_USAGE_DISK_ENCRYPTION:
  5551. case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
  5552. case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
  5553. if (qseecom.support_fde) {
  5554. p = qseecom.ce_info.fde;
  5555. total = qseecom.ce_info.num_fde;
  5556. } else {
  5557. pr_err("system does not support fde\n");
  5558. return -EINVAL;
  5559. }
  5560. break;
  5561. case QSEOS_KM_USAGE_FILE_ENCRYPTION:
  5562. if (qseecom.support_pfe) {
  5563. p = qseecom.ce_info.pfe;
  5564. total = qseecom.ce_info.num_pfe;
  5565. } else {
  5566. pr_err("system does not support pfe\n");
  5567. return -EINVAL;
  5568. }
  5569. break;
  5570. default:
  5571. pr_err("unsupported usage %d\n", usage);
  5572. return -EINVAL;
  5573. }
  5574. for (j = 0; j < total; j++) {
  5575. if (p->unit_num == unit) {
  5576. pcepipe = p->ce_pipe_entry;
  5577. for (i = 0; i < p->num_ce_pipe_entries; i++) {
  5578. (*ce_hw)[i] = pcepipe->ce_num;
  5579. *pipe = pcepipe->ce_pipe_pair;
  5580. pcepipe++;
  5581. }
  5582. ret = 0;
  5583. break;
  5584. }
  5585. p++;
  5586. }
  5587. return ret;
  5588. }
  5589. static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
  5590. enum qseecom_key_management_usage_type usage,
  5591. struct qseecom_key_generate_ireq *ireq)
  5592. {
  5593. struct qseecom_command_scm_resp resp;
  5594. int ret;
  5595. if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  5596. usage >= QSEOS_KM_USAGE_MAX) {
  5597. pr_err("Error:: unsupported usage %d\n", usage);
  5598. return -EFAULT;
  5599. }
  5600. ret = __qseecom_enable_clk(CLK_QSEE);
  5601. if (ret)
  5602. return ret;
  5603. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  5604. ireq, sizeof(struct qseecom_key_generate_ireq),
  5605. &resp, sizeof(resp));
  5606. if (ret) {
  5607. if (ret == -EINVAL &&
  5608. resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
  5609. pr_debug("Key ID exists.\n");
  5610. ret = 0;
  5611. } else {
  5612. pr_err("scm call to generate key failed : %d\n", ret);
  5613. ret = -EFAULT;
  5614. }
  5615. goto generate_key_exit;
  5616. }
  5617. switch (resp.result) {
  5618. case QSEOS_RESULT_SUCCESS:
  5619. break;
  5620. case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
  5621. pr_debug("Key ID exists.\n");
  5622. break;
  5623. case QSEOS_RESULT_INCOMPLETE:
  5624. ret = __qseecom_process_incomplete_cmd(data, &resp);
  5625. if (ret) {
  5626. if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
  5627. pr_debug("Key ID exists.\n");
  5628. ret = 0;
  5629. } else {
  5630. pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
  5631. resp.result);
  5632. }
  5633. }
  5634. break;
  5635. case QSEOS_RESULT_FAILURE:
  5636. default:
  5637. pr_err("gen key scm call failed resp.result %d\n", resp.result);
  5638. ret = -EINVAL;
  5639. break;
  5640. }
  5641. generate_key_exit:
  5642. __qseecom_disable_clk(CLK_QSEE);
  5643. return ret;
  5644. }
  5645. static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
  5646. enum qseecom_key_management_usage_type usage,
  5647. struct qseecom_key_delete_ireq *ireq)
  5648. {
  5649. struct qseecom_command_scm_resp resp;
  5650. int ret;
  5651. if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  5652. usage >= QSEOS_KM_USAGE_MAX) {
  5653. pr_err("Error:: unsupported usage %d\n", usage);
  5654. return -EFAULT;
  5655. }
  5656. ret = __qseecom_enable_clk(CLK_QSEE);
  5657. if (ret)
  5658. return ret;
  5659. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  5660. ireq, sizeof(struct qseecom_key_delete_ireq),
  5661. &resp, sizeof(struct qseecom_command_scm_resp));
  5662. if (ret) {
  5663. if (ret == -EINVAL &&
  5664. resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
  5665. pr_debug("Max attempts to input password reached.\n");
  5666. ret = -ERANGE;
  5667. } else {
  5668. pr_err("scm call to delete key failed : %d\n", ret);
  5669. ret = -EFAULT;
  5670. }
  5671. goto del_key_exit;
  5672. }
  5673. switch (resp.result) {
  5674. case QSEOS_RESULT_SUCCESS:
  5675. break;
  5676. case QSEOS_RESULT_INCOMPLETE:
  5677. ret = __qseecom_process_incomplete_cmd(data, &resp);
  5678. if (ret) {
  5679. pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
  5680. resp.result);
  5681. if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
  5682. pr_debug("Max attempts to input password reached.\n");
  5683. ret = -ERANGE;
  5684. }
  5685. }
  5686. break;
  5687. case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
  5688. pr_debug("Max attempts to input password reached.\n");
  5689. ret = -ERANGE;
  5690. break;
  5691. case QSEOS_RESULT_FAILURE:
  5692. default:
  5693. pr_err("Delete key scm call failed resp.result %d\n",
  5694. resp.result);
  5695. ret = -EINVAL;
  5696. break;
  5697. }
  5698. del_key_exit:
  5699. __qseecom_disable_clk(CLK_QSEE);
  5700. return ret;
  5701. }
  5702. static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
  5703. enum qseecom_key_management_usage_type usage,
  5704. struct qseecom_key_select_ireq *ireq)
  5705. {
  5706. struct qseecom_command_scm_resp resp;
  5707. int ret;
  5708. if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  5709. usage >= QSEOS_KM_USAGE_MAX) {
  5710. pr_err("Error:: unsupported usage %d\n", usage);
  5711. return -EFAULT;
  5712. }
  5713. ret = __qseecom_enable_clk(CLK_QSEE);
  5714. if (ret)
  5715. return ret;
  5716. if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
  5717. ret = __qseecom_enable_clk(CLK_CE_DRV);
  5718. if (ret)
  5719. return ret;
  5720. }
  5721. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  5722. ireq, sizeof(struct qseecom_key_select_ireq),
  5723. &resp, sizeof(struct qseecom_command_scm_resp));
  5724. if (ret) {
  5725. if (ret == -EINVAL &&
  5726. resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
  5727. pr_debug("Max attempts to input password reached.\n");
  5728. ret = -ERANGE;
  5729. } else if (ret == -EINVAL &&
  5730. resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
  5731. pr_debug("Set Key operation under processing...\n");
  5732. ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
  5733. } else {
  5734. pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
  5735. ret);
  5736. ret = -EFAULT;
  5737. }
  5738. goto set_key_exit;
  5739. }
  5740. switch (resp.result) {
  5741. case QSEOS_RESULT_SUCCESS:
  5742. break;
  5743. case QSEOS_RESULT_INCOMPLETE:
  5744. ret = __qseecom_process_incomplete_cmd(data, &resp);
  5745. if (ret) {
  5746. pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
  5747. resp.result);
  5748. if (resp.result ==
  5749. QSEOS_RESULT_FAIL_PENDING_OPERATION) {
  5750. pr_debug("Set Key operation under processing...\n");
  5751. ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
  5752. }
  5753. if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
  5754. pr_debug("Max attempts to input password reached.\n");
  5755. ret = -ERANGE;
  5756. }
  5757. }
  5758. break;
  5759. case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
  5760. pr_debug("Max attempts to input password reached.\n");
  5761. ret = -ERANGE;
  5762. break;
  5763. case QSEOS_RESULT_FAIL_PENDING_OPERATION:
  5764. pr_debug("Set Key operation under processing...\n");
  5765. ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
  5766. break;
  5767. case QSEOS_RESULT_FAILURE:
  5768. default:
  5769. pr_err("Set key scm call failed resp.result %d\n", resp.result);
  5770. ret = -EINVAL;
  5771. break;
  5772. }
  5773. set_key_exit:
  5774. __qseecom_disable_clk(CLK_QSEE);
  5775. if (qseecom.qsee.instance != qseecom.ce_drv.instance)
  5776. __qseecom_disable_clk(CLK_CE_DRV);
  5777. return ret;
  5778. }
  5779. static int __qseecom_update_current_key_user_info(
  5780. struct qseecom_dev_handle *data,
  5781. enum qseecom_key_management_usage_type usage,
  5782. struct qseecom_key_userinfo_update_ireq *ireq)
  5783. {
  5784. struct qseecom_command_scm_resp resp;
  5785. int ret;
  5786. if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  5787. usage >= QSEOS_KM_USAGE_MAX) {
  5788. pr_err("Error:: unsupported usage %d\n", usage);
  5789. return -EFAULT;
  5790. }
  5791. ret = __qseecom_enable_clk(CLK_QSEE);
  5792. if (ret)
  5793. return ret;
  5794. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  5795. ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
  5796. &resp, sizeof(struct qseecom_command_scm_resp));
  5797. if (ret) {
  5798. if (ret == -EINVAL &&
  5799. resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
  5800. pr_debug("Set Key operation under processing...\n");
  5801. ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
  5802. } else {
  5803. pr_err("scm call to update key userinfo failed: %d\n",
  5804. ret);
  5805. __qseecom_disable_clk(CLK_QSEE);
  5806. return -EFAULT;
  5807. }
  5808. }
  5809. switch (resp.result) {
  5810. case QSEOS_RESULT_SUCCESS:
  5811. break;
  5812. case QSEOS_RESULT_INCOMPLETE:
  5813. ret = __qseecom_process_incomplete_cmd(data, &resp);
  5814. if (resp.result ==
  5815. QSEOS_RESULT_FAIL_PENDING_OPERATION) {
  5816. pr_debug("Set Key operation under processing...\n");
  5817. ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
  5818. }
  5819. if (ret)
  5820. pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
  5821. resp.result);
  5822. break;
  5823. case QSEOS_RESULT_FAIL_PENDING_OPERATION:
  5824. pr_debug("Update Key operation under processing...\n");
  5825. ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
  5826. break;
  5827. case QSEOS_RESULT_FAILURE:
  5828. default:
  5829. pr_err("Set key scm call failed resp.result %d\n", resp.result);
  5830. ret = -EINVAL;
  5831. break;
  5832. }
  5833. __qseecom_disable_clk(CLK_QSEE);
  5834. return ret;
  5835. }
  5836. static int qseecom_enable_ice_setup(int usage)
  5837. {
  5838. int ret = 0;
  5839. if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
  5840. ret = qcom_ice_setup_ice_hw("ufs", true);
  5841. else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
  5842. ret = qcom_ice_setup_ice_hw("sdcc", true);
  5843. return ret;
  5844. }
  5845. static int qseecom_disable_ice_setup(int usage)
  5846. {
  5847. int ret = 0;
  5848. if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
  5849. ret = qcom_ice_setup_ice_hw("ufs", false);
  5850. else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
  5851. ret = qcom_ice_setup_ice_hw("sdcc", false);
  5852. return ret;
  5853. }
  5854. static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
  5855. {
  5856. struct qseecom_ce_info_use *pce_info_use, *p;
  5857. int total = 0;
  5858. int i;
  5859. switch (usage) {
  5860. case QSEOS_KM_USAGE_DISK_ENCRYPTION:
  5861. case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
  5862. case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
  5863. p = qseecom.ce_info.fde;
  5864. total = qseecom.ce_info.num_fde;
  5865. break;
  5866. case QSEOS_KM_USAGE_FILE_ENCRYPTION:
  5867. p = qseecom.ce_info.pfe;
  5868. total = qseecom.ce_info.num_pfe;
  5869. break;
  5870. default:
  5871. pr_err("unsupported usage %d\n", usage);
  5872. return -EINVAL;
  5873. }
  5874. pce_info_use = NULL;
  5875. for (i = 0; i < total; i++) {
  5876. if (p->unit_num == unit) {
  5877. pce_info_use = p;
  5878. break;
  5879. }
  5880. p++;
  5881. }
  5882. if (!pce_info_use) {
  5883. pr_err("can not find %d\n", unit);
  5884. return -EINVAL;
  5885. }
  5886. return pce_info_use->num_ce_pipe_entries;
  5887. }
  5888. static int qseecom_create_key(struct qseecom_dev_handle *data,
  5889. void __user *argp)
  5890. {
  5891. int i;
  5892. uint32_t *ce_hw = NULL;
  5893. uint32_t pipe = 0;
  5894. int ret = 0;
  5895. uint32_t flags = 0;
  5896. struct qseecom_create_key_req create_key_req;
  5897. struct qseecom_key_generate_ireq generate_key_ireq;
  5898. struct qseecom_key_select_ireq set_key_ireq;
  5899. uint32_t entries = 0;
  5900. ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
  5901. if (ret) {
  5902. pr_err("copy_from_user failed\n");
  5903. return ret;
  5904. }
  5905. if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  5906. create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
  5907. pr_err("unsupported usage %d\n", create_key_req.usage);
  5908. ret = -EFAULT;
  5909. return ret;
  5910. }
  5911. entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
  5912. create_key_req.usage);
  5913. if (entries <= 0) {
  5914. pr_err("no ce instance for usage %d instance %d\n",
  5915. DEFAULT_CE_INFO_UNIT, create_key_req.usage);
  5916. ret = -EINVAL;
  5917. return ret;
  5918. }
  5919. ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
  5920. if (!ce_hw) {
  5921. ret = -ENOMEM;
  5922. return ret;
  5923. }
  5924. ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
  5925. DEFAULT_CE_INFO_UNIT);
  5926. if (ret) {
  5927. pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
  5928. ret = -EINVAL;
  5929. goto free_buf;
  5930. }
  5931. if (qseecom.fde_key_size)
  5932. flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
  5933. else
  5934. flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
  5935. if (qseecom.enable_key_wrap_in_ks)
  5936. flags |= ENABLE_KEY_WRAP_IN_KS;
  5937. generate_key_ireq.flags = flags;
  5938. generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
  5939. memset((void *)generate_key_ireq.key_id,
  5940. 0, QSEECOM_KEY_ID_SIZE);
  5941. memset((void *)generate_key_ireq.hash32,
  5942. 0, QSEECOM_HASH_SIZE);
  5943. memcpy((void *)generate_key_ireq.key_id,
  5944. (void *)key_id_array[create_key_req.usage].desc,
  5945. QSEECOM_KEY_ID_SIZE);
  5946. memcpy((void *)generate_key_ireq.hash32,
  5947. (void *)create_key_req.hash32,
  5948. QSEECOM_HASH_SIZE);
  5949. ret = __qseecom_generate_and_save_key(data,
  5950. create_key_req.usage, &generate_key_ireq);
  5951. if (ret) {
  5952. pr_err("Failed to generate key on storage: %d\n", ret);
  5953. goto free_buf;
  5954. }
  5955. for (i = 0; i < entries; i++) {
  5956. set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
  5957. if (create_key_req.usage ==
  5958. QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
  5959. set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
  5960. set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
  5961. } else if (create_key_req.usage ==
  5962. QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
  5963. set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
  5964. set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
  5965. } else {
  5966. set_key_ireq.ce = ce_hw[i];
  5967. set_key_ireq.pipe = pipe;
  5968. }
  5969. set_key_ireq.flags = flags;
  5970. /* set both PIPE_ENC and PIPE_ENC_XTS*/
  5971. set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
  5972. memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
  5973. memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
  5974. memcpy((void *)set_key_ireq.key_id,
  5975. (void *)key_id_array[create_key_req.usage].desc,
  5976. QSEECOM_KEY_ID_SIZE);
  5977. memcpy((void *)set_key_ireq.hash32,
  5978. (void *)create_key_req.hash32,
  5979. QSEECOM_HASH_SIZE);
  5980. /*
  5981. * It will return false if it is GPCE based crypto instance or
  5982. * ICE is setup properly
  5983. */
  5984. ret = qseecom_enable_ice_setup(create_key_req.usage);
  5985. if (ret)
  5986. goto free_buf;
  5987. do {
  5988. ret = __qseecom_set_clear_ce_key(data,
  5989. create_key_req.usage,
  5990. &set_key_ireq);
  5991. /*
  5992. * wait a little before calling scm again to let other
  5993. * processes run
  5994. */
  5995. if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
  5996. msleep(50);
  5997. } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
  5998. qseecom_disable_ice_setup(create_key_req.usage);
  5999. if (ret) {
  6000. pr_err("Failed to create key: pipe %d, ce %d: %d\n",
  6001. pipe, ce_hw[i], ret);
  6002. goto free_buf;
  6003. } else {
  6004. pr_err("Set the key successfully\n");
  6005. if ((create_key_req.usage ==
  6006. QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
  6007. (create_key_req.usage ==
  6008. QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
  6009. goto free_buf;
  6010. }
  6011. }
  6012. free_buf:
  6013. kfree_sensitive(ce_hw);
  6014. return ret;
  6015. }
  6016. static int qseecom_wipe_key(struct qseecom_dev_handle *data,
  6017. void __user *argp)
  6018. {
  6019. uint32_t *ce_hw = NULL;
  6020. uint32_t pipe = 0;
  6021. int ret = 0;
  6022. uint32_t flags = 0;
  6023. int i, j;
  6024. struct qseecom_wipe_key_req wipe_key_req;
  6025. struct qseecom_key_delete_ireq delete_key_ireq;
  6026. struct qseecom_key_select_ireq clear_key_ireq;
  6027. uint32_t entries = 0;
  6028. ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
  6029. if (ret) {
  6030. pr_err("copy_from_user failed\n");
  6031. return ret;
  6032. }
  6033. if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  6034. wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
  6035. pr_err("unsupported usage %d\n", wipe_key_req.usage);
  6036. ret = -EFAULT;
  6037. return ret;
  6038. }
  6039. entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
  6040. wipe_key_req.usage);
  6041. if (entries <= 0) {
  6042. pr_err("no ce instance for usage %d instance %d\n",
  6043. DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
  6044. ret = -EINVAL;
  6045. return ret;
  6046. }
  6047. ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
  6048. if (!ce_hw) {
  6049. ret = -ENOMEM;
  6050. return ret;
  6051. }
  6052. ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
  6053. DEFAULT_CE_INFO_UNIT);
  6054. if (ret) {
  6055. pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
  6056. ret = -EINVAL;
  6057. goto free_buf;
  6058. }
  6059. if (wipe_key_req.wipe_key_flag) {
  6060. delete_key_ireq.flags = flags;
  6061. delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
  6062. memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
  6063. memcpy((void *)delete_key_ireq.key_id,
  6064. (void *)key_id_array[wipe_key_req.usage].desc,
  6065. QSEECOM_KEY_ID_SIZE);
  6066. memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
  6067. ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
  6068. &delete_key_ireq);
  6069. if (ret) {
  6070. pr_err("Failed to delete key from ssd storage: %d\n",
  6071. ret);
  6072. ret = -EFAULT;
  6073. goto free_buf;
  6074. }
  6075. }
  6076. for (j = 0; j < entries; j++) {
  6077. clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
  6078. if (wipe_key_req.usage ==
  6079. QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
  6080. clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
  6081. clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
  6082. } else if (wipe_key_req.usage ==
  6083. QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
  6084. clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
  6085. clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
  6086. } else {
  6087. clear_key_ireq.ce = ce_hw[j];
  6088. clear_key_ireq.pipe = pipe;
  6089. }
  6090. clear_key_ireq.flags = flags;
  6091. clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
  6092. for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
  6093. clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
  6094. memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
  6095. /*
  6096. * It will return false if it is GPCE based crypto instance or
  6097. * ICE is setup properly
  6098. */
  6099. ret = qseecom_enable_ice_setup(wipe_key_req.usage);
  6100. if (ret)
  6101. goto free_buf;
  6102. ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
  6103. &clear_key_ireq);
  6104. qseecom_disable_ice_setup(wipe_key_req.usage);
  6105. if (ret) {
  6106. pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
  6107. pipe, ce_hw[j], ret);
  6108. ret = -EFAULT;
  6109. goto free_buf;
  6110. }
  6111. }
  6112. free_buf:
  6113. kfree_sensitive(ce_hw);
  6114. return ret;
  6115. }
  6116. static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
  6117. void __user *argp)
  6118. {
  6119. int ret = 0;
  6120. uint32_t flags = 0;
  6121. struct qseecom_update_key_userinfo_req update_key_req;
  6122. struct qseecom_key_userinfo_update_ireq ireq;
  6123. ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
  6124. if (ret) {
  6125. pr_err("copy_from_user failed\n");
  6126. return ret;
  6127. }
  6128. if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
  6129. update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
  6130. pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
  6131. return -EFAULT;
  6132. }
  6133. ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
  6134. if (qseecom.fde_key_size)
  6135. flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
  6136. else
  6137. flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
  6138. ireq.flags = flags;
  6139. memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
  6140. memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
  6141. memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
  6142. memcpy((void *)ireq.key_id,
  6143. (void *)key_id_array[update_key_req.usage].desc,
  6144. QSEECOM_KEY_ID_SIZE);
  6145. memcpy((void *)ireq.current_hash32,
  6146. (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
  6147. memcpy((void *)ireq.new_hash32,
  6148. (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
  6149. do {
  6150. ret = __qseecom_update_current_key_user_info(data,
  6151. update_key_req.usage,
  6152. &ireq);
  6153. /*
  6154. * wait a little before calling scm again to let other
  6155. * processes run
  6156. */
  6157. if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
  6158. msleep(50);
  6159. } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
  6160. if (ret) {
  6161. pr_err("Failed to update key info: %d\n", ret);
  6162. return ret;
  6163. }
  6164. return ret;
  6165. }
  6166. static int qseecom_is_es_activated(void __user *argp)
  6167. {
  6168. struct qseecom_is_es_activated_req req = {0};
  6169. struct qseecom_command_scm_resp resp;
  6170. int ret;
  6171. if (qseecom.qsee_version < QSEE_VERSION_04) {
  6172. pr_err("invalid qsee version\n");
  6173. return -ENODEV;
  6174. }
  6175. if (argp == NULL) {
  6176. pr_err("arg is null\n");
  6177. return -EINVAL;
  6178. }
  6179. ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
  6180. &req, sizeof(req), &resp, sizeof(resp));
  6181. if (ret) {
  6182. pr_err("scm_call failed\n");
  6183. return ret;
  6184. }
  6185. req.is_activated = resp.result;
  6186. ret = copy_to_user(argp, &req, sizeof(req));
  6187. if (ret) {
  6188. pr_err("copy_to_user failed\n");
  6189. return ret;
  6190. }
  6191. return 0;
  6192. }
  6193. static int qseecom_save_partition_hash(void __user *argp)
  6194. {
  6195. struct qseecom_save_partition_hash_req req;
  6196. struct qseecom_command_scm_resp resp;
  6197. int ret;
  6198. memset(&resp, 0x00, sizeof(resp));
  6199. if (qseecom.qsee_version < QSEE_VERSION_04) {
  6200. pr_err("invalid qsee version\n");
  6201. return -ENODEV;
  6202. }
  6203. if (argp == NULL) {
  6204. pr_err("arg is null\n");
  6205. return -EINVAL;
  6206. }
  6207. ret = copy_from_user(&req, argp, sizeof(req));
  6208. if (ret) {
  6209. pr_err("copy_from_user failed\n");
  6210. return ret;
  6211. }
  6212. ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
  6213. (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
  6214. if (ret) {
  6215. pr_err("qseecom_scm_call failed\n");
  6216. return ret;
  6217. }
  6218. return 0;
  6219. }
  6220. static int qseecom_mdtp_cipher_dip(void __user *argp)
  6221. {
  6222. struct qseecom_mdtp_cipher_dip_req req;
  6223. u32 tzbuflenin, tzbuflenout;
  6224. char *tzbufin = NULL, *tzbufout = NULL;
  6225. struct qseecom_scm_desc desc = {0};
  6226. int ret;
  6227. phys_addr_t pain, paout;
  6228. struct qtee_shm shmin = {0}, shmout = {0};
  6229. do {
  6230. /* Copy the parameters from userspace */
  6231. if (argp == NULL) {
  6232. pr_err("arg is null\n");
  6233. ret = -EINVAL;
  6234. break;
  6235. }
  6236. ret = copy_from_user(&req, argp, sizeof(req));
  6237. if (ret) {
  6238. pr_err("copy_from_user failed, ret= %d\n", ret);
  6239. break;
  6240. }
  6241. if (req.in_buf == NULL || req.out_buf == NULL ||
  6242. req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
  6243. req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
  6244. req.direction > 1) {
  6245. pr_err("invalid parameters\n");
  6246. ret = -EINVAL;
  6247. break;
  6248. }
  6249. /* Copy the input buffer from userspace to kernel space */
  6250. tzbuflenin = PAGE_ALIGN(req.in_buf_size);
  6251. tzbufin = __qseecom_alloc_tzbuf(tzbuflenin, &pain, &shmin);
  6252. if (!tzbufin) {
  6253. pr_err("error allocating in buffer\n");
  6254. ret = -ENOMEM;
  6255. break;
  6256. }
  6257. ret = copy_from_user(tzbufin, (void __user *)req.in_buf,
  6258. req.in_buf_size);
  6259. if (ret) {
  6260. pr_err("copy_from_user failed, ret=%d\n", ret);
  6261. break;
  6262. }
  6263. qtee_shmbridge_flush_shm_buf(&shmin);
  6264. /* Prepare the output buffer in kernel space */
  6265. tzbuflenout = PAGE_ALIGN(req.out_buf_size);
  6266. tzbufout = __qseecom_alloc_tzbuf(tzbuflenout, &paout, &shmout);
  6267. if (!tzbufout) {
  6268. pr_err("error allocating out buffer\n");
  6269. ret = -ENOMEM;
  6270. break;
  6271. }
  6272. qtee_shmbridge_flush_shm_buf(&shmout);
  6273. /* Send the command to TZ */
  6274. desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
  6275. desc.args[0] = pain;
  6276. desc.args[1] = req.in_buf_size;
  6277. desc.args[2] = paout;
  6278. desc.args[3] = req.out_buf_size;
  6279. desc.args[4] = req.direction;
  6280. ret = __qseecom_enable_clk(CLK_QSEE);
  6281. if (ret)
  6282. break;
  6283. ret = __qseecom_scm_call2_locked(TZ_MDTP_CIPHER_DIP_ID, &desc);
  6284. __qseecom_disable_clk(CLK_QSEE);
  6285. if (ret) {
  6286. pr_err("failed for SCM_SVC_MDTP, ret=%d\n",
  6287. ret);
  6288. break;
  6289. }
  6290. /* Copy the output buffer from kernel space to userspace */
  6291. qtee_shmbridge_flush_shm_buf(&shmout);
  6292. ret = copy_to_user((void __user *)req.out_buf,
  6293. tzbufout, req.out_buf_size);
  6294. if (ret) {
  6295. pr_err("copy_to_user failed, ret=%d\n", ret);
  6296. break;
  6297. }
  6298. } while (0);
  6299. __qseecom_free_tzbuf(&shmin);
  6300. __qseecom_free_tzbuf(&shmout);
  6301. return ret;
  6302. }
  6303. static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
  6304. struct qseecom_qteec_req *req)
  6305. {
  6306. if (!data || !data->client.sb_virt) {
  6307. pr_err("Client or client buf is not initialized\n");
  6308. return -EINVAL;
  6309. }
  6310. if (data->type != QSEECOM_CLIENT_APP)
  6311. return -EFAULT;
  6312. if (req->req_len > UINT_MAX - req->resp_len) {
  6313. pr_err("Integer overflow detected in req_len & rsp_len\n");
  6314. return -EINVAL;
  6315. }
  6316. if (req->req_len + req->resp_len > data->client.sb_length) {
  6317. pr_debug("Not enough memory to fit cmd_buf.\n");
  6318. pr_debug("resp_buf. Required: %u, Available: %zu\n",
  6319. (req->req_len + req->resp_len), data->client.sb_length);
  6320. return -ENOMEM;
  6321. }
  6322. if (req->req_ptr == NULL || req->resp_ptr == NULL) {
  6323. pr_err("cmd buffer or response buffer is null\n");
  6324. return -EINVAL;
  6325. }
  6326. if (((uintptr_t)req->req_ptr <
  6327. data->client.user_virt_sb_base) ||
  6328. ((uintptr_t)req->req_ptr >=
  6329. (data->client.user_virt_sb_base + data->client.sb_length))) {
  6330. pr_err("cmd buffer address not within shared bufffer\n");
  6331. return -EINVAL;
  6332. }
  6333. if (((uintptr_t)req->resp_ptr <
  6334. data->client.user_virt_sb_base) ||
  6335. ((uintptr_t)req->resp_ptr >=
  6336. (data->client.user_virt_sb_base + data->client.sb_length))) {
  6337. pr_err("response buffer address not within shared bufffer\n");
  6338. return -EINVAL;
  6339. }
  6340. if ((req->req_len == 0) || (req->resp_len == 0)) {
  6341. pr_err("cmd buf lengtgh/response buf length not valid\n");
  6342. return -EINVAL;
  6343. }
  6344. if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
  6345. pr_err("Integer overflow in req_len & req_ptr\n");
  6346. return -EINVAL;
  6347. }
  6348. if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
  6349. pr_err("Integer overflow in resp_len & resp_ptr\n");
  6350. return -EINVAL;
  6351. }
  6352. if (data->client.user_virt_sb_base >
  6353. (ULONG_MAX - data->client.sb_length)) {
  6354. pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
  6355. return -EINVAL;
  6356. }
  6357. if ((((uintptr_t)req->req_ptr + req->req_len) >
  6358. ((uintptr_t)data->client.user_virt_sb_base +
  6359. data->client.sb_length)) ||
  6360. (((uintptr_t)req->resp_ptr + req->resp_len) >
  6361. ((uintptr_t)data->client.user_virt_sb_base +
  6362. data->client.sb_length))) {
  6363. pr_err("cmd buf or resp buf is out of shared buffer region\n");
  6364. return -EINVAL;
  6365. }
  6366. return 0;
  6367. }
  6368. static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
  6369. uint32_t fd_idx, struct sg_table *sg_ptr)
  6370. {
  6371. struct scatterlist *sg = sg_ptr->sgl;
  6372. struct qseecom_sg_entry *sg_entry;
  6373. void *buf;
  6374. uint i;
  6375. size_t size;
  6376. dma_addr_t coh_pmem;
  6377. if (fd_idx >= MAX_ION_FD) {
  6378. pr_err("fd_idx [%d] is invalid\n", fd_idx);
  6379. return -ENOMEM;
  6380. }
  6381. /*
  6382. * Allocate a buffer, populate it with number of entry plus
  6383. * each sg entry's phy addr and length; then return the
  6384. * phy_addr of the buffer.
  6385. */
  6386. size = sizeof(uint32_t) +
  6387. sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
  6388. size = (size + PAGE_SIZE) & PAGE_MASK;
  6389. buf = dma_alloc_coherent(qseecom.dev,
  6390. size, &coh_pmem, GFP_KERNEL);
  6391. if (buf == NULL)
  6392. return -ENOMEM;
  6393. *(uint32_t *)buf = sg_ptr->nents;
  6394. sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
  6395. for (i = 0; i < sg_ptr->nents; i++) {
  6396. sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
  6397. sg_entry->len = sg->length;
  6398. sg_entry++;
  6399. sg = sg_next(sg);
  6400. }
  6401. data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
  6402. data->client.sec_buf_fd[fd_idx].vbase = buf;
  6403. data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
  6404. data->client.sec_buf_fd[fd_idx].size = size;
  6405. return 0;
  6406. }
  6407. static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
  6408. struct qseecom_dev_handle *data, bool cleanup)
  6409. {
  6410. int ret = 0;
  6411. int i = 0;
  6412. uint32_t *update;
  6413. struct sg_table *sg_ptr = NULL;
  6414. struct scatterlist *sg;
  6415. struct qseecom_param_memref *memref;
  6416. int ion_fd = -1;
  6417. struct dma_buf *dmabuf = NULL;
  6418. struct dma_buf_attachment *attach = NULL;
  6419. if (req == NULL) {
  6420. pr_err("Invalid address\n");
  6421. return -EINVAL;
  6422. }
  6423. for (i = 0; i < MAX_ION_FD; i++) {
  6424. if (req->ifd_data[i].fd > 0) {
  6425. ion_fd = req->ifd_data[i].fd;
  6426. if ((req->req_len <
  6427. sizeof(struct qseecom_param_memref)) ||
  6428. (req->ifd_data[i].cmd_buf_offset >
  6429. req->req_len -
  6430. sizeof(struct qseecom_param_memref))) {
  6431. pr_err("Invalid offset/req len 0x%x/0x%x\n",
  6432. req->req_len,
  6433. req->ifd_data[i].cmd_buf_offset);
  6434. return -EINVAL;
  6435. }
  6436. update = (uint32_t *)((char *) req->req_ptr +
  6437. req->ifd_data[i].cmd_buf_offset);
  6438. if (!update) {
  6439. pr_err("update pointer is NULL\n");
  6440. return -EINVAL;
  6441. }
  6442. } else {
  6443. continue;
  6444. }
  6445. /* Populate the cmd data structure with the phys_addr */
  6446. ret = qseecom_dmabuf_map(ion_fd, &sg_ptr, &attach, &dmabuf);
  6447. if (ret) {
  6448. pr_err("IOn client could not retrieve sg table\n");
  6449. goto err;
  6450. }
  6451. sg = sg_ptr->sgl;
  6452. if (sg == NULL) {
  6453. pr_err("sg is NULL\n");
  6454. goto err;
  6455. }
  6456. if ((sg_ptr->nents == 0) || (sg->length == 0)) {
  6457. pr_err("Num of scat entr (%d)or length(%d) invalid\n",
  6458. sg_ptr->nents, sg->length);
  6459. goto err;
  6460. }
  6461. /* clean up buf for pre-allocated fd */
  6462. if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
  6463. (*update)) {
  6464. if (data->client.sec_buf_fd[i].vbase)
  6465. dma_free_coherent(qseecom.dev,
  6466. data->client.sec_buf_fd[i].size,
  6467. data->client.sec_buf_fd[i].vbase,
  6468. data->client.sec_buf_fd[i].pbase);
  6469. memset((void *)update, 0,
  6470. sizeof(struct qseecom_param_memref));
  6471. memset(&(data->client.sec_buf_fd[i]), 0,
  6472. sizeof(struct qseecom_sec_buf_fd_info));
  6473. goto clean;
  6474. }
  6475. if (*update == 0) {
  6476. /* update buf for pre-allocated fd from secure heap*/
  6477. ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
  6478. sg_ptr);
  6479. if (ret) {
  6480. pr_err("Failed to handle buf for fd[%d]\n", i);
  6481. goto err;
  6482. }
  6483. memref = (struct qseecom_param_memref *)update;
  6484. memref->buffer =
  6485. (uint32_t)(data->client.sec_buf_fd[i].pbase);
  6486. memref->size =
  6487. (uint32_t)(data->client.sec_buf_fd[i].size);
  6488. } else {
  6489. /* update buf for fd from non-secure qseecom heap */
  6490. if (sg_ptr->nents != 1) {
  6491. pr_err("Num of scat entr (%d) invalid\n",
  6492. sg_ptr->nents);
  6493. goto err;
  6494. }
  6495. if (cleanup)
  6496. *update = 0;
  6497. else
  6498. *update = (uint32_t)sg_dma_address(sg_ptr->sgl);
  6499. }
  6500. clean:
  6501. if (cleanup) {
  6502. ret = qseecom_dmabuf_cache_operations(dmabuf,
  6503. QSEECOM_CACHE_INVALIDATE);
  6504. if (ret) {
  6505. pr_err("cache operation failed %d\n", ret);
  6506. goto err;
  6507. }
  6508. } else {
  6509. ret = qseecom_dmabuf_cache_operations(dmabuf,
  6510. QSEECOM_CACHE_CLEAN);
  6511. if (ret) {
  6512. pr_err("cache operation failed %d\n", ret);
  6513. goto err;
  6514. }
  6515. data->sglistinfo_ptr[i].indexAndFlags =
  6516. SGLISTINFO_SET_INDEX_FLAG(
  6517. (sg_ptr->nents == 1), 0,
  6518. req->ifd_data[i].cmd_buf_offset);
  6519. data->sglistinfo_ptr[i].sizeOrCount =
  6520. (sg_ptr->nents == 1) ?
  6521. sg->length : sg_ptr->nents;
  6522. data->sglist_cnt = i + 1;
  6523. }
  6524. /* unmap the dmabuf */
  6525. qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
  6526. sg_ptr = NULL;
  6527. dmabuf = NULL;
  6528. attach = NULL;
  6529. }
  6530. return ret;
  6531. err:
  6532. if (!IS_ERR_OR_NULL(sg_ptr)) {
  6533. qseecom_dmabuf_unmap(sg_ptr, attach, dmabuf);
  6534. MAKE_NULL(sg_ptr, attach, dmabuf);
  6535. }
  6536. return -ENOMEM;
  6537. }
  6538. static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
  6539. struct qseecom_qteec_req *req, uint32_t cmd_id)
  6540. {
  6541. struct qseecom_command_scm_resp resp;
  6542. struct qseecom_qteec_ireq ireq;
  6543. struct qseecom_qteec_64bit_ireq ireq_64bit;
  6544. struct qseecom_registered_app_list *ptr_app;
  6545. bool found_app = false;
  6546. unsigned long flags;
  6547. int ret = 0;
  6548. int ret2 = 0;
  6549. uint32_t reqd_len_sb_in = 0;
  6550. void *cmd_buf = NULL;
  6551. size_t cmd_len;
  6552. struct sglist_info *table = data->sglistinfo_ptr;
  6553. void *req_ptr = NULL;
  6554. void *resp_ptr = NULL;
  6555. ret = __qseecom_qteec_validate_msg(data, req);
  6556. if (ret)
  6557. return ret;
  6558. req_ptr = req->req_ptr;
  6559. resp_ptr = req->resp_ptr;
  6560. /* find app_id & img_name from list */
  6561. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  6562. list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
  6563. list) {
  6564. if ((ptr_app->app_id == data->client.app_id) &&
  6565. (!strcmp(ptr_app->app_name, data->client.app_name))) {
  6566. found_app = true;
  6567. break;
  6568. }
  6569. }
  6570. spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
  6571. if (!found_app) {
  6572. pr_err("app_id %d (%s) is not found\n", data->client.app_id,
  6573. (char *)data->client.app_name);
  6574. return -ENOENT;
  6575. }
  6576. if (__qseecom_find_pending_unload_app(data->client.app_id,
  6577. data->client.app_name)) {
  6578. pr_err("app %d (%s) unload is pending\n",
  6579. data->client.app_id, data->client.app_name);
  6580. return -ENOENT;
  6581. }
  6582. req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
  6583. (uintptr_t)req->req_ptr);
  6584. req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
  6585. (uintptr_t)req->resp_ptr);
  6586. if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
  6587. (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
  6588. ret = __qseecom_update_qteec_req_buf(
  6589. (struct qseecom_qteec_modfd_req *)req, data, false);
  6590. if (ret)
  6591. return ret;
  6592. }
  6593. if (qseecom.qsee_version < QSEE_VERSION_40) {
  6594. ireq.app_id = data->client.app_id;
  6595. ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
  6596. (uintptr_t)req_ptr);
  6597. ireq.req_len = req->req_len;
  6598. ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
  6599. (uintptr_t)resp_ptr);
  6600. ireq.resp_len = req->resp_len;
  6601. ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
  6602. ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
  6603. qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm);
  6604. cmd_buf = (void *)&ireq;
  6605. cmd_len = sizeof(struct qseecom_qteec_ireq);
  6606. } else {
  6607. ireq_64bit.app_id = data->client.app_id;
  6608. ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
  6609. (uintptr_t)req_ptr);
  6610. ireq_64bit.req_len = req->req_len;
  6611. ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
  6612. (uintptr_t)resp_ptr);
  6613. ireq_64bit.resp_len = req->resp_len;
  6614. if ((data->client.app_arch == ELFCLASS32) &&
  6615. ((ireq_64bit.req_ptr >=
  6616. PHY_ADDR_4G - ireq_64bit.req_len) ||
  6617. (ireq_64bit.resp_ptr >=
  6618. PHY_ADDR_4G - ireq_64bit.resp_len))){
  6619. pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
  6620. data->client.app_name, data->client.app_id);
  6621. pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
  6622. ireq_64bit.req_ptr, ireq_64bit.req_len,
  6623. ireq_64bit.resp_ptr, ireq_64bit.resp_len);
  6624. return -EFAULT;
  6625. }
  6626. ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
  6627. ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
  6628. qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm);
  6629. cmd_buf = (void *)&ireq_64bit;
  6630. cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
  6631. }
  6632. if (qseecom.whitelist_support
  6633. && cmd_id == QSEOS_TEE_OPEN_SESSION)
  6634. *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
  6635. else
  6636. *(uint32_t *)cmd_buf = cmd_id;
  6637. reqd_len_sb_in = req->req_len + req->resp_len;
  6638. ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
  6639. QSEECOM_CACHE_CLEAN);
  6640. if (ret) {
  6641. pr_err("cache operation failed %d\n", ret);
  6642. return ret;
  6643. }
  6644. __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
  6645. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  6646. cmd_buf, cmd_len,
  6647. &resp, sizeof(resp));
  6648. if (ret) {
  6649. pr_err("scm_call() failed with err: %d (app_id = %d)\n",
  6650. ret, data->client.app_id);
  6651. goto exit;
  6652. }
  6653. ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
  6654. QSEECOM_CACHE_INVALIDATE);
  6655. if (ret) {
  6656. pr_err("cache operation failed %d\n", ret);
  6657. return ret;
  6658. }
  6659. if (qseecom.qsee_reentrancy_support) {
  6660. ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
  6661. if (ret)
  6662. goto exit;
  6663. } else {
  6664. if (resp.result == QSEOS_RESULT_INCOMPLETE) {
  6665. ret = __qseecom_process_incomplete_cmd(data, &resp);
  6666. if (ret) {
  6667. pr_err("process_incomplete_cmd failed err: %d\n",
  6668. ret);
  6669. goto exit;
  6670. }
  6671. } else {
  6672. if (resp.result != QSEOS_RESULT_SUCCESS) {
  6673. pr_err("Response result %d not supported\n",
  6674. resp.result);
  6675. ret = -EINVAL;
  6676. goto exit;
  6677. }
  6678. }
  6679. }
  6680. exit:
  6681. if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
  6682. (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
  6683. ret2 = __qseecom_update_qteec_req_buf(
  6684. (struct qseecom_qteec_modfd_req *)req, data, true);
  6685. if (ret2)
  6686. return ret2;
  6687. }
  6688. return ret;
  6689. }
  6690. static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
  6691. void __user *argp)
  6692. {
  6693. struct qseecom_qteec_modfd_req req;
  6694. int ret = 0;
  6695. ret = copy_from_user(&req, argp,
  6696. sizeof(struct qseecom_qteec_modfd_req));
  6697. if (ret) {
  6698. pr_err("copy_from_user failed\n");
  6699. return ret;
  6700. }
  6701. ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
  6702. QSEOS_TEE_OPEN_SESSION);
  6703. return ret;
  6704. }
  6705. static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
  6706. void __user *argp)
  6707. {
  6708. struct qseecom_qteec_req req;
  6709. int ret = 0;
  6710. ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
  6711. if (ret) {
  6712. pr_err("copy_from_user failed\n");
  6713. return ret;
  6714. }
  6715. ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
  6716. return ret;
  6717. }
  6718. static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
  6719. void __user *argp)
  6720. {
  6721. struct qseecom_qteec_modfd_req req;
  6722. struct qseecom_command_scm_resp resp;
  6723. struct qseecom_qteec_ireq ireq;
  6724. struct qseecom_qteec_64bit_ireq ireq_64bit;
  6725. struct qseecom_registered_app_list *ptr_app;
  6726. bool found_app = false;
  6727. unsigned long flags;
  6728. int ret = 0;
  6729. int i = 0;
  6730. uint32_t reqd_len_sb_in = 0;
  6731. void *cmd_buf = NULL;
  6732. size_t cmd_len;
  6733. struct sglist_info *table = data->sglistinfo_ptr;
  6734. void *req_ptr = NULL;
  6735. void *resp_ptr = NULL;
  6736. ret = copy_from_user(&req, argp,
  6737. sizeof(struct qseecom_qteec_modfd_req));
  6738. if (ret) {
  6739. pr_err("copy_from_user failed\n");
  6740. return ret;
  6741. }
  6742. ret = __qseecom_qteec_validate_msg(data,
  6743. (struct qseecom_qteec_req *)(&req));
  6744. if (ret)
  6745. return ret;
  6746. req_ptr = req.req_ptr;
  6747. resp_ptr = req.resp_ptr;
  6748. /* find app_id & img_name from list */
  6749. spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
  6750. list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
  6751. list) {
  6752. if ((ptr_app->app_id == data->client.app_id) &&
  6753. (!strcmp(ptr_app->app_name, data->client.app_name))) {
  6754. found_app = true;
  6755. break;
  6756. }
  6757. }
  6758. spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
  6759. if (!found_app) {
  6760. pr_err("app_id %d (%s) is not found\n", data->client.app_id,
  6761. (char *)data->client.app_name);
  6762. return -ENOENT;
  6763. }
  6764. if (__qseecom_find_pending_unload_app(data->client.app_id,
  6765. data->client.app_name)) {
  6766. pr_err("app %d (%s) unload is pending\n",
  6767. data->client.app_id, data->client.app_name);
  6768. return -ENOENT;
  6769. }
  6770. /* validate offsets */
  6771. for (i = 0; i < MAX_ION_FD; i++) {
  6772. if (req.ifd_data[i].fd) {
  6773. if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
  6774. return -EINVAL;
  6775. }
  6776. }
  6777. req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
  6778. (uintptr_t)req.req_ptr);
  6779. req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
  6780. (uintptr_t)req.resp_ptr);
  6781. ret = __qseecom_update_qteec_req_buf(&req, data, false);
  6782. if (ret)
  6783. return ret;
  6784. if (qseecom.qsee_version < QSEE_VERSION_40) {
  6785. ireq.app_id = data->client.app_id;
  6786. ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
  6787. (uintptr_t)req_ptr);
  6788. ireq.req_len = req.req_len;
  6789. ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
  6790. (uintptr_t)resp_ptr);
  6791. ireq.resp_len = req.resp_len;
  6792. cmd_buf = (void *)&ireq;
  6793. cmd_len = sizeof(struct qseecom_qteec_ireq);
  6794. ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
  6795. ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
  6796. qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm);
  6797. } else {
  6798. ireq_64bit.app_id = data->client.app_id;
  6799. ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
  6800. (uintptr_t)req_ptr);
  6801. ireq_64bit.req_len = req.req_len;
  6802. ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
  6803. (uintptr_t)resp_ptr);
  6804. ireq_64bit.resp_len = req.resp_len;
  6805. cmd_buf = (void *)&ireq_64bit;
  6806. cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
  6807. ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
  6808. ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
  6809. qtee_shmbridge_flush_shm_buf(&data->sglistinfo_shm);
  6810. }
  6811. reqd_len_sb_in = req.req_len + req.resp_len;
  6812. if (qseecom.whitelist_support)
  6813. *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
  6814. else
  6815. *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
  6816. ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
  6817. QSEECOM_CACHE_CLEAN);
  6818. if (ret) {
  6819. pr_err("cache operation failed %d\n", ret);
  6820. return ret;
  6821. }
  6822. __qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
  6823. ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  6824. cmd_buf, cmd_len,
  6825. &resp, sizeof(resp));
  6826. if (ret) {
  6827. pr_err("scm_call() failed with err: %d (app_id = %d)\n",
  6828. ret, data->client.app_id);
  6829. return ret;
  6830. }
  6831. ret = qseecom_dmabuf_cache_operations(data->client.dmabuf,
  6832. QSEECOM_CACHE_INVALIDATE);
  6833. if (ret) {
  6834. pr_err("cache operation failed %d\n", ret);
  6835. return ret;
  6836. }
  6837. if (qseecom.qsee_reentrancy_support) {
  6838. ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
  6839. } else {
  6840. if (resp.result == QSEOS_RESULT_INCOMPLETE) {
  6841. ret = __qseecom_process_incomplete_cmd(data, &resp);
  6842. if (ret) {
  6843. pr_err("process_incomplete_cmd failed err: %d\n",
  6844. ret);
  6845. return ret;
  6846. }
  6847. } else {
  6848. if (resp.result != QSEOS_RESULT_SUCCESS) {
  6849. pr_err("Response result %d not supported\n",
  6850. resp.result);
  6851. ret = -EINVAL;
  6852. }
  6853. }
  6854. }
  6855. ret = __qseecom_update_qteec_req_buf(&req, data, true);
  6856. if (ret)
  6857. return ret;
  6858. return 0;
  6859. }
  6860. static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
  6861. void __user *argp)
  6862. {
  6863. struct qseecom_qteec_modfd_req req;
  6864. int ret = 0;
  6865. ret = copy_from_user(&req, argp,
  6866. sizeof(struct qseecom_qteec_modfd_req));
  6867. if (ret) {
  6868. pr_err("copy_from_user failed\n");
  6869. return ret;
  6870. }
  6871. ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
  6872. QSEOS_TEE_REQUEST_CANCELLATION);
  6873. return ret;
  6874. }
  6875. static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
  6876. {
  6877. if (data->sglist_cnt) {
  6878. memset(data->sglistinfo_ptr, 0,
  6879. SGLISTINFO_TABLE_SIZE);
  6880. data->sglist_cnt = 0;
  6881. }
  6882. }
  6883. long qseecom_ioctl(struct file *file,
  6884. unsigned int cmd, unsigned long arg)
  6885. {
  6886. int ret = 0;
  6887. struct qseecom_dev_handle *data = file->private_data;
  6888. void __user *argp = (void __user *) arg;
  6889. bool perf_enabled = false;
  6890. if (!data) {
  6891. pr_err("Invalid/uninitialized device handle\n");
  6892. return -EINVAL;
  6893. }
  6894. if (data->abort) {
  6895. pr_err("Aborting qseecom driver\n");
  6896. return -ENODEV;
  6897. }
  6898. if (cmd != QSEECOM_IOCTL_RECEIVE_REQ &&
  6899. cmd != QSEECOM_IOCTL_SEND_RESP_REQ &&
  6900. cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
  6901. cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
  6902. __wakeup_unregister_listener_kthread();
  6903. __wakeup_unload_app_kthread();
  6904. switch (cmd) {
  6905. case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
  6906. if (data->type != QSEECOM_GENERIC) {
  6907. pr_err("reg lstnr req: invalid handle (%d)\n",
  6908. data->type);
  6909. ret = -EINVAL;
  6910. break;
  6911. }
  6912. pr_debug("ioctl register_listener_req()\n");
  6913. mutex_lock(&listener_access_lock);
  6914. atomic_inc(&data->ioctl_count);
  6915. data->type = QSEECOM_LISTENER_SERVICE;
  6916. ret = qseecom_register_listener(data, argp);
  6917. atomic_dec(&data->ioctl_count);
  6918. wake_up_all(&data->abort_wq);
  6919. mutex_unlock(&listener_access_lock);
  6920. if (ret)
  6921. pr_err("failed qseecom_register_listener: %d\n", ret);
  6922. break;
  6923. }
  6924. case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
  6925. if ((data->listener.id == 0) ||
  6926. (data->type != QSEECOM_LISTENER_SERVICE)) {
  6927. pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
  6928. data->type, data->listener.id);
  6929. ret = -EINVAL;
  6930. break;
  6931. }
  6932. pr_debug("ioctl unregister_listener_req()\n");
  6933. mutex_lock(&listener_access_lock);
  6934. atomic_inc(&data->ioctl_count);
  6935. ret = qseecom_unregister_listener(data);
  6936. atomic_dec(&data->ioctl_count);
  6937. wake_up_all(&data->abort_wq);
  6938. mutex_unlock(&listener_access_lock);
  6939. if (ret)
  6940. pr_err("failed qseecom_unregister_listener: %d\n", ret);
  6941. break;
  6942. }
  6943. case QSEECOM_IOCTL_SEND_CMD_REQ: {
  6944. if ((data->client.app_id == 0) ||
  6945. (data->type != QSEECOM_CLIENT_APP)) {
  6946. pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
  6947. data->type, data->client.app_id);
  6948. ret = -EINVAL;
  6949. break;
  6950. }
  6951. /* Only one client allowed here at a time */
  6952. mutex_lock(&app_access_lock);
  6953. if (qseecom.support_bus_scaling) {
  6954. /* register bus bw in case the client doesn't do it */
  6955. if (!data->mode) {
  6956. mutex_lock(&qsee_bw_mutex);
  6957. __qseecom_register_bus_bandwidth_needs(
  6958. data, HIGH);
  6959. mutex_unlock(&qsee_bw_mutex);
  6960. }
  6961. ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
  6962. if (ret) {
  6963. pr_err("Failed to set bw.\n");
  6964. ret = -EINVAL;
  6965. mutex_unlock(&app_access_lock);
  6966. break;
  6967. }
  6968. }
  6969. /*
  6970. * On targets where crypto clock is handled by HLOS,
  6971. * if clk_access_cnt is zero and perf_enabled is false,
  6972. * then the crypto clock was not enabled before sending cmd to
  6973. * tz, qseecom will enable the clock to avoid service failure.
  6974. */
  6975. if (!qseecom.no_clock_support &&
  6976. !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
  6977. pr_debug("ce clock is not enabled!\n");
  6978. ret = qseecom_perf_enable(data);
  6979. if (ret) {
  6980. pr_err("Failed to vote for clock with err %d\n",
  6981. ret);
  6982. mutex_unlock(&app_access_lock);
  6983. ret = -EINVAL;
  6984. break;
  6985. }
  6986. perf_enabled = true;
  6987. }
  6988. atomic_inc(&data->ioctl_count);
  6989. ret = qseecom_send_cmd(data, argp);
  6990. if (qseecom.support_bus_scaling)
  6991. __qseecom_add_bw_scale_down_timer(
  6992. QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
  6993. if (perf_enabled) {
  6994. qsee_disable_clock_vote(data, CLK_DFAB);
  6995. qsee_disable_clock_vote(data, CLK_SFPB);
  6996. }
  6997. atomic_dec(&data->ioctl_count);
  6998. wake_up_all(&data->abort_wq);
  6999. mutex_unlock(&app_access_lock);
  7000. if (ret)
  7001. pr_err("failed qseecom_send_cmd: %d\n", ret);
  7002. break;
  7003. }
  7004. case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
  7005. case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
  7006. if ((data->client.app_id == 0) ||
  7007. (data->type != QSEECOM_CLIENT_APP)) {
  7008. pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
  7009. data->type, data->client.app_id);
  7010. ret = -EINVAL;
  7011. break;
  7012. }
  7013. /* Only one client allowed here at a time */
  7014. mutex_lock(&app_access_lock);
  7015. if (qseecom.support_bus_scaling) {
  7016. if (!data->mode) {
  7017. mutex_lock(&qsee_bw_mutex);
  7018. __qseecom_register_bus_bandwidth_needs(
  7019. data, HIGH);
  7020. mutex_unlock(&qsee_bw_mutex);
  7021. }
  7022. ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
  7023. if (ret) {
  7024. pr_err("Failed to set bw.\n");
  7025. mutex_unlock(&app_access_lock);
  7026. ret = -EINVAL;
  7027. break;
  7028. }
  7029. }
  7030. /*
  7031. * On targets where crypto clock is handled by HLOS,
  7032. * if clk_access_cnt is zero and perf_enabled is false,
  7033. * then the crypto clock was not enabled before sending cmd to
  7034. * tz, qseecom will enable the clock to avoid service failure.
  7035. */
  7036. if (!qseecom.no_clock_support &&
  7037. !qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
  7038. pr_debug("ce clock is not enabled!\n");
  7039. ret = qseecom_perf_enable(data);
  7040. if (ret) {
  7041. pr_err("Failed to vote for clock with err %d\n",
  7042. ret);
  7043. mutex_unlock(&app_access_lock);
  7044. ret = -EINVAL;
  7045. break;
  7046. }
  7047. perf_enabled = true;
  7048. }
  7049. atomic_inc(&data->ioctl_count);
  7050. if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
  7051. ret = qseecom_send_modfd_cmd(data, argp);
  7052. else
  7053. ret = qseecom_send_modfd_cmd_64(data, argp);
  7054. if (qseecom.support_bus_scaling)
  7055. __qseecom_add_bw_scale_down_timer(
  7056. QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
  7057. if (perf_enabled) {
  7058. qsee_disable_clock_vote(data, CLK_DFAB);
  7059. qsee_disable_clock_vote(data, CLK_SFPB);
  7060. }
  7061. atomic_dec(&data->ioctl_count);
  7062. wake_up_all(&data->abort_wq);
  7063. mutex_unlock(&app_access_lock);
  7064. if (ret)
  7065. pr_err("failed qseecom_send_cmd: %d\n", ret);
  7066. __qseecom_clean_data_sglistinfo(data);
  7067. break;
  7068. }
  7069. case QSEECOM_IOCTL_RECEIVE_REQ: {
  7070. if ((data->listener.id == 0) ||
  7071. (data->type != QSEECOM_LISTENER_SERVICE)) {
  7072. pr_err("receive req: invalid handle (%d), lid(%d)\n",
  7073. data->type, data->listener.id);
  7074. ret = -EINVAL;
  7075. break;
  7076. }
  7077. atomic_inc(&data->ioctl_count);
  7078. ret = qseecom_receive_req(data);
  7079. atomic_dec(&data->ioctl_count);
  7080. wake_up_all(&data->abort_wq);
  7081. if (ret && (ret != -ERESTARTSYS))
  7082. pr_err("failed qseecom_receive_req: %d\n", ret);
  7083. break;
  7084. }
  7085. case QSEECOM_IOCTL_SEND_RESP_REQ: {
  7086. if ((data->listener.id == 0) ||
  7087. (data->type != QSEECOM_LISTENER_SERVICE)) {
  7088. pr_err("send resp req: invalid handle (%d), lid(%d)\n",
  7089. data->type, data->listener.id);
  7090. ret = -EINVAL;
  7091. break;
  7092. }
  7093. mutex_lock(&listener_access_lock);
  7094. atomic_inc(&data->ioctl_count);
  7095. if (!qseecom.qsee_reentrancy_support)
  7096. ret = qseecom_send_resp();
  7097. else
  7098. ret = qseecom_reentrancy_send_resp(data);
  7099. atomic_dec(&data->ioctl_count);
  7100. wake_up_all(&data->abort_wq);
  7101. mutex_unlock(&listener_access_lock);
  7102. if (ret)
  7103. pr_err("failed qseecom_send_resp: %d\n", ret);
  7104. break;
  7105. }
  7106. case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
  7107. if ((data->type != QSEECOM_CLIENT_APP) &&
  7108. (data->type != QSEECOM_GENERIC) &&
  7109. (data->type != QSEECOM_SECURE_SERVICE)) {
  7110. pr_err("set mem param req: invalid handle (%d)\n",
  7111. data->type);
  7112. ret = -EINVAL;
  7113. break;
  7114. }
  7115. pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
  7116. mutex_lock(&app_access_lock);
  7117. atomic_inc(&data->ioctl_count);
  7118. ret = qseecom_set_client_mem_param(data, argp);
  7119. atomic_dec(&data->ioctl_count);
  7120. mutex_unlock(&app_access_lock);
  7121. if (ret)
  7122. pr_err("failed Qqseecom_set_mem_param request: %d\n",
  7123. ret);
  7124. break;
  7125. }
  7126. case QSEECOM_IOCTL_LOAD_APP_REQ: {
  7127. if ((data->type != QSEECOM_GENERIC) &&
  7128. (data->type != QSEECOM_CLIENT_APP)) {
  7129. pr_err("load app req: invalid handle (%d)\n",
  7130. data->type);
  7131. ret = -EINVAL;
  7132. break;
  7133. }
  7134. data->type = QSEECOM_CLIENT_APP;
  7135. pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
  7136. mutex_lock(&app_access_lock);
  7137. atomic_inc(&data->ioctl_count);
  7138. ret = qseecom_load_app(data, argp);
  7139. atomic_dec(&data->ioctl_count);
  7140. mutex_unlock(&app_access_lock);
  7141. if (ret)
  7142. pr_err("failed load_app request: %d\n", ret);
  7143. __wakeup_unload_app_kthread();
  7144. break;
  7145. }
  7146. case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
  7147. if ((data->client.app_id == 0) ||
  7148. (data->type != QSEECOM_CLIENT_APP)) {
  7149. pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
  7150. data->type, data->client.app_id);
  7151. ret = -EINVAL;
  7152. break;
  7153. }
  7154. pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
  7155. mutex_lock(&app_access_lock);
  7156. atomic_inc(&data->ioctl_count);
  7157. ret = qseecom_unload_app(data, false);
  7158. atomic_dec(&data->ioctl_count);
  7159. mutex_unlock(&app_access_lock);
  7160. if (ret)
  7161. pr_err("failed unload_app request: %d\n", ret);
  7162. __wakeup_unload_app_kthread();
  7163. break;
  7164. }
  7165. case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
  7166. atomic_inc(&data->ioctl_count);
  7167. ret = qseecom_get_qseos_version(data, argp);
  7168. if (ret)
  7169. pr_err("qseecom_get_qseos_version: %d\n", ret);
  7170. atomic_dec(&data->ioctl_count);
  7171. break;
  7172. }
  7173. case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
  7174. if ((data->type != QSEECOM_GENERIC) &&
  7175. (data->type != QSEECOM_CLIENT_APP)) {
  7176. pr_err("perf enable req: invalid handle (%d)\n",
  7177. data->type);
  7178. ret = -EINVAL;
  7179. break;
  7180. }
  7181. if ((data->type == QSEECOM_CLIENT_APP) &&
  7182. (data->client.app_id == 0)) {
  7183. pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
  7184. data->type, data->client.app_id);
  7185. ret = -EINVAL;
  7186. break;
  7187. }
  7188. atomic_inc(&data->ioctl_count);
  7189. if (qseecom.support_bus_scaling) {
  7190. mutex_lock(&qsee_bw_mutex);
  7191. __qseecom_register_bus_bandwidth_needs(data, HIGH);
  7192. mutex_unlock(&qsee_bw_mutex);
  7193. } else {
  7194. ret = qseecom_perf_enable(data);
  7195. if (ret)
  7196. pr_err("Fail to vote for clocks %d\n", ret);
  7197. }
  7198. atomic_dec(&data->ioctl_count);
  7199. break;
  7200. }
  7201. case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
  7202. if ((data->type != QSEECOM_SECURE_SERVICE) &&
  7203. (data->type != QSEECOM_CLIENT_APP)) {
  7204. pr_err("perf disable req: invalid handle (%d)\n",
  7205. data->type);
  7206. ret = -EINVAL;
  7207. break;
  7208. }
  7209. if ((data->type == QSEECOM_CLIENT_APP) &&
  7210. (data->client.app_id == 0)) {
  7211. pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
  7212. data->type, data->client.app_id);
  7213. ret = -EINVAL;
  7214. break;
  7215. }
  7216. atomic_inc(&data->ioctl_count);
  7217. if (!qseecom.support_bus_scaling) {
  7218. qsee_disable_clock_vote(data, CLK_DFAB);
  7219. qsee_disable_clock_vote(data, CLK_SFPB);
  7220. } else {
  7221. mutex_lock(&qsee_bw_mutex);
  7222. qseecom_unregister_bus_bandwidth_needs(data);
  7223. mutex_unlock(&qsee_bw_mutex);
  7224. }
  7225. atomic_dec(&data->ioctl_count);
  7226. break;
  7227. }
  7228. case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
  7229. /* If crypto clock is not handled by HLOS, return directly. */
  7230. if (qseecom.no_clock_support) {
  7231. pr_debug("crypto clock is not handled by HLOS\n");
  7232. break;
  7233. }
  7234. if ((data->client.app_id == 0) ||
  7235. (data->type != QSEECOM_CLIENT_APP)) {
  7236. pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
  7237. data->type, data->client.app_id);
  7238. ret = -EINVAL;
  7239. break;
  7240. }
  7241. atomic_inc(&data->ioctl_count);
  7242. ret = qseecom_scale_bus_bandwidth(data, argp);
  7243. atomic_dec(&data->ioctl_count);
  7244. break;
  7245. }
  7246. case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
  7247. if (data->type != QSEECOM_GENERIC) {
  7248. pr_err("load ext elf req: invalid client handle (%d)\n",
  7249. data->type);
  7250. ret = -EINVAL;
  7251. break;
  7252. }
  7253. data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
  7254. data->released = true;
  7255. mutex_lock(&app_access_lock);
  7256. atomic_inc(&data->ioctl_count);
  7257. ret = qseecom_load_external_elf(data, argp);
  7258. atomic_dec(&data->ioctl_count);
  7259. mutex_unlock(&app_access_lock);
  7260. if (ret)
  7261. pr_err("failed load_external_elf request: %d\n", ret);
  7262. break;
  7263. }
  7264. case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
  7265. if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
  7266. pr_err("unload ext elf req: invalid handle (%d)\n",
  7267. data->type);
  7268. ret = -EINVAL;
  7269. break;
  7270. }
  7271. data->released = true;
  7272. mutex_lock(&app_access_lock);
  7273. atomic_inc(&data->ioctl_count);
  7274. ret = qseecom_unload_external_elf(data);
  7275. atomic_dec(&data->ioctl_count);
  7276. mutex_unlock(&app_access_lock);
  7277. if (ret)
  7278. pr_err("failed unload_app request: %d\n", ret);
  7279. break;
  7280. }
  7281. case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
  7282. if ((data->type != QSEECOM_GENERIC) &&
  7283. (data->type != QSEECOM_CLIENT_APP)) {
  7284. pr_err("app loaded query req: invalid handle (%d)\n",
  7285. data->type);
  7286. ret = -EINVAL;
  7287. break;
  7288. }
  7289. data->type = QSEECOM_CLIENT_APP;
  7290. mutex_lock(&app_access_lock);
  7291. atomic_inc(&data->ioctl_count);
  7292. pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
  7293. ret = qseecom_query_app_loaded(data, argp);
  7294. atomic_dec(&data->ioctl_count);
  7295. mutex_unlock(&app_access_lock);
  7296. break;
  7297. }
  7298. case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
  7299. if (data->type != QSEECOM_GENERIC) {
  7300. pr_err("send cmd svc req: invalid handle (%d)\n",
  7301. data->type);
  7302. ret = -EINVAL;
  7303. break;
  7304. }
  7305. data->type = QSEECOM_SECURE_SERVICE;
  7306. if (qseecom.qsee_version < QSEE_VERSION_03) {
  7307. pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
  7308. qseecom.qsee_version);
  7309. return -EINVAL;
  7310. }
  7311. mutex_lock(&app_access_lock);
  7312. atomic_inc(&data->ioctl_count);
  7313. ret = qseecom_send_service_cmd(data, argp);
  7314. atomic_dec(&data->ioctl_count);
  7315. mutex_unlock(&app_access_lock);
  7316. break;
  7317. }
  7318. case QSEECOM_IOCTL_CREATE_KEY_REQ: {
  7319. if (!(qseecom.support_pfe || qseecom.support_fde))
  7320. pr_err("Features requiring key init not supported\n");
  7321. if (data->type != QSEECOM_GENERIC) {
  7322. pr_err("create key req: invalid handle (%d)\n",
  7323. data->type);
  7324. ret = -EINVAL;
  7325. break;
  7326. }
  7327. if (qseecom.qsee_version < QSEE_VERSION_05) {
  7328. pr_err("Create Key feature unsupported: qsee ver %u\n",
  7329. qseecom.qsee_version);
  7330. return -EINVAL;
  7331. }
  7332. data->released = true;
  7333. mutex_lock(&app_access_lock);
  7334. atomic_inc(&data->ioctl_count);
  7335. ret = qseecom_create_key(data, argp);
  7336. if (ret)
  7337. pr_err("failed to create encryption key: %d\n", ret);
  7338. atomic_dec(&data->ioctl_count);
  7339. mutex_unlock(&app_access_lock);
  7340. break;
  7341. }
  7342. case QSEECOM_IOCTL_WIPE_KEY_REQ: {
  7343. if (!(qseecom.support_pfe || qseecom.support_fde))
  7344. pr_err("Features requiring key init not supported\n");
  7345. if (data->type != QSEECOM_GENERIC) {
  7346. pr_err("wipe key req: invalid handle (%d)\n",
  7347. data->type);
  7348. ret = -EINVAL;
  7349. break;
  7350. }
  7351. if (qseecom.qsee_version < QSEE_VERSION_05) {
  7352. pr_err("Wipe Key feature unsupported in qsee ver %u\n",
  7353. qseecom.qsee_version);
  7354. return -EINVAL;
  7355. }
  7356. data->released = true;
  7357. mutex_lock(&app_access_lock);
  7358. atomic_inc(&data->ioctl_count);
  7359. ret = qseecom_wipe_key(data, argp);
  7360. if (ret)
  7361. pr_err("failed to wipe encryption key: %d\n", ret);
  7362. atomic_dec(&data->ioctl_count);
  7363. mutex_unlock(&app_access_lock);
  7364. break;
  7365. }
  7366. case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
  7367. if (!(qseecom.support_pfe || qseecom.support_fde))
  7368. pr_err("Features requiring key init not supported\n");
  7369. if (data->type != QSEECOM_GENERIC) {
  7370. pr_err("update key req: invalid handle (%d)\n",
  7371. data->type);
  7372. ret = -EINVAL;
  7373. break;
  7374. }
  7375. if (qseecom.qsee_version < QSEE_VERSION_05) {
  7376. pr_err("Update Key feature unsupported in qsee ver %u\n",
  7377. qseecom.qsee_version);
  7378. return -EINVAL;
  7379. }
  7380. data->released = true;
  7381. mutex_lock(&app_access_lock);
  7382. atomic_inc(&data->ioctl_count);
  7383. ret = qseecom_update_key_user_info(data, argp);
  7384. if (ret)
  7385. pr_err("failed to update key user info: %d\n", ret);
  7386. atomic_dec(&data->ioctl_count);
  7387. mutex_unlock(&app_access_lock);
  7388. break;
  7389. }
  7390. case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
  7391. if (data->type != QSEECOM_GENERIC) {
  7392. pr_err("save part hash req: invalid handle (%d)\n",
  7393. data->type);
  7394. ret = -EINVAL;
  7395. break;
  7396. }
  7397. data->released = true;
  7398. mutex_lock(&app_access_lock);
  7399. atomic_inc(&data->ioctl_count);
  7400. ret = qseecom_save_partition_hash(argp);
  7401. atomic_dec(&data->ioctl_count);
  7402. mutex_unlock(&app_access_lock);
  7403. break;
  7404. }
  7405. case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
  7406. if (data->type != QSEECOM_GENERIC) {
  7407. pr_err("ES activated req: invalid handle (%d)\n",
  7408. data->type);
  7409. ret = -EINVAL;
  7410. break;
  7411. }
  7412. data->released = true;
  7413. mutex_lock(&app_access_lock);
  7414. atomic_inc(&data->ioctl_count);
  7415. ret = qseecom_is_es_activated(argp);
  7416. atomic_dec(&data->ioctl_count);
  7417. mutex_unlock(&app_access_lock);
  7418. break;
  7419. }
  7420. case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
  7421. if (data->type != QSEECOM_GENERIC) {
  7422. pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
  7423. data->type);
  7424. ret = -EINVAL;
  7425. break;
  7426. }
  7427. data->released = true;
  7428. mutex_lock(&app_access_lock);
  7429. atomic_inc(&data->ioctl_count);
  7430. ret = qseecom_mdtp_cipher_dip(argp);
  7431. atomic_dec(&data->ioctl_count);
  7432. mutex_unlock(&app_access_lock);
  7433. break;
  7434. }
  7435. case QSEECOM_IOCTL_SEND_MODFD_RESP:
  7436. case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
  7437. if ((data->listener.id == 0) ||
  7438. (data->type != QSEECOM_LISTENER_SERVICE)) {
  7439. pr_err("receive req: invalid handle (%d), lid(%d)\n",
  7440. data->type, data->listener.id);
  7441. ret = -EINVAL;
  7442. break;
  7443. }
  7444. mutex_lock(&listener_access_lock);
  7445. atomic_inc(&data->ioctl_count);
  7446. if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
  7447. ret = qseecom_send_modfd_resp(data, argp);
  7448. else
  7449. ret = qseecom_send_modfd_resp_64(data, argp);
  7450. atomic_dec(&data->ioctl_count);
  7451. wake_up_all(&data->abort_wq);
  7452. mutex_unlock(&listener_access_lock);
  7453. if (ret)
  7454. pr_err("failed qseecom_send_mod_resp: %d\n", ret);
  7455. __qseecom_clean_data_sglistinfo(data);
  7456. break;
  7457. }
  7458. case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
  7459. if ((data->client.app_id == 0) ||
  7460. (data->type != QSEECOM_CLIENT_APP)) {
  7461. pr_err("Open session: invalid handle (%d) appid(%d)\n",
  7462. data->type, data->client.app_id);
  7463. ret = -EINVAL;
  7464. break;
  7465. }
  7466. if (qseecom.qsee_version < QSEE_VERSION_40) {
  7467. pr_err("GP feature unsupported: qsee ver %u\n",
  7468. qseecom.qsee_version);
  7469. return -EINVAL;
  7470. }
  7471. /* Only one client allowed here at a time */
  7472. mutex_lock(&app_access_lock);
  7473. atomic_inc(&data->ioctl_count);
  7474. ret = qseecom_qteec_open_session(data, argp);
  7475. atomic_dec(&data->ioctl_count);
  7476. wake_up_all(&data->abort_wq);
  7477. mutex_unlock(&app_access_lock);
  7478. if (ret)
  7479. pr_err("failed open_session_cmd: %d\n", ret);
  7480. __qseecom_clean_data_sglistinfo(data);
  7481. break;
  7482. }
  7483. case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
  7484. if ((data->client.app_id == 0) ||
  7485. (data->type != QSEECOM_CLIENT_APP)) {
  7486. pr_err("Close session: invalid handle (%d) appid(%d)\n",
  7487. data->type, data->client.app_id);
  7488. ret = -EINVAL;
  7489. break;
  7490. }
  7491. if (qseecom.qsee_version < QSEE_VERSION_40) {
  7492. pr_err("GP feature unsupported: qsee ver %u\n",
  7493. qseecom.qsee_version);
  7494. return -EINVAL;
  7495. }
  7496. /* Only one client allowed here at a time */
  7497. mutex_lock(&app_access_lock);
  7498. atomic_inc(&data->ioctl_count);
  7499. ret = qseecom_qteec_close_session(data, argp);
  7500. atomic_dec(&data->ioctl_count);
  7501. wake_up_all(&data->abort_wq);
  7502. mutex_unlock(&app_access_lock);
  7503. if (ret)
  7504. pr_err("failed close_session_cmd: %d\n", ret);
  7505. break;
  7506. }
  7507. case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
  7508. if ((data->client.app_id == 0) ||
  7509. (data->type != QSEECOM_CLIENT_APP)) {
  7510. pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
  7511. data->type, data->client.app_id);
  7512. ret = -EINVAL;
  7513. break;
  7514. }
  7515. if (qseecom.qsee_version < QSEE_VERSION_40) {
  7516. pr_err("GP feature unsupported: qsee ver %u\n",
  7517. qseecom.qsee_version);
  7518. return -EINVAL;
  7519. }
  7520. /* Only one client allowed here at a time */
  7521. mutex_lock(&app_access_lock);
  7522. atomic_inc(&data->ioctl_count);
  7523. ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
  7524. atomic_dec(&data->ioctl_count);
  7525. wake_up_all(&data->abort_wq);
  7526. mutex_unlock(&app_access_lock);
  7527. if (ret)
  7528. pr_err("failed Invoke cmd: %d\n", ret);
  7529. __qseecom_clean_data_sglistinfo(data);
  7530. break;
  7531. }
  7532. case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
  7533. if ((data->client.app_id == 0) ||
  7534. (data->type != QSEECOM_CLIENT_APP)) {
  7535. pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
  7536. data->type, data->client.app_id);
  7537. ret = -EINVAL;
  7538. break;
  7539. }
  7540. if (qseecom.qsee_version < QSEE_VERSION_40) {
  7541. pr_err("GP feature unsupported: qsee ver %u\n",
  7542. qseecom.qsee_version);
  7543. return -EINVAL;
  7544. }
  7545. /* Only one client allowed here at a time */
  7546. mutex_lock(&app_access_lock);
  7547. atomic_inc(&data->ioctl_count);
  7548. ret = qseecom_qteec_request_cancellation(data, argp);
  7549. atomic_dec(&data->ioctl_count);
  7550. wake_up_all(&data->abort_wq);
  7551. mutex_unlock(&app_access_lock);
  7552. if (ret)
  7553. pr_err("failed request_cancellation: %d\n", ret);
  7554. break;
  7555. }
  7556. case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
  7557. atomic_inc(&data->ioctl_count);
  7558. ret = qseecom_get_ce_info(data, argp);
  7559. if (ret)
  7560. pr_err("failed get fde ce pipe info: %d\n", ret);
  7561. atomic_dec(&data->ioctl_count);
  7562. break;
  7563. }
  7564. case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
  7565. atomic_inc(&data->ioctl_count);
  7566. ret = qseecom_free_ce_info(data, argp);
  7567. if (ret)
  7568. pr_err("failed get fde ce pipe info: %d\n", ret);
  7569. atomic_dec(&data->ioctl_count);
  7570. break;
  7571. }
  7572. case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
  7573. atomic_inc(&data->ioctl_count);
  7574. ret = qseecom_query_ce_info(data, argp);
  7575. if (ret)
  7576. pr_err("failed get fde ce pipe info: %d\n", ret);
  7577. atomic_dec(&data->ioctl_count);
  7578. break;
  7579. }
  7580. case QSEECOM_IOCTL_SET_ICE_INFO: {
  7581. struct qseecom_ice_data_t ice_data;
  7582. ret = copy_from_user(&ice_data, argp, sizeof(ice_data));
  7583. if (ret) {
  7584. pr_err("copy_from_user failed\n");
  7585. return -EFAULT;
  7586. }
  7587. qcom_ice_set_fde_flag(ice_data.flag);
  7588. break;
  7589. }
  7590. case QSEECOM_IOCTL_FBE_CLEAR_KEY: {
  7591. pr_err("QSEECOM_IOCTL_FBE_CLEAR_KEY IOCTL is deprecated\n");
  7592. return -EINVAL;
  7593. }
  7594. default:
  7595. pr_err("Invalid IOCTL: 0x%x\n", cmd);
  7596. return -ENOIOCTLCMD;
  7597. }
  7598. return ret;
  7599. }
  7600. static int qseecom_open(struct inode *inode, struct file *file)
  7601. {
  7602. int ret = 0;
  7603. struct qseecom_dev_handle *data;
  7604. data = kzalloc(sizeof(*data), GFP_KERNEL);
  7605. if (!data)
  7606. {
  7607. return -ENOMEM;
  7608. }
  7609. file->private_data = data;
  7610. data->abort = 0;
  7611. data->type = QSEECOM_GENERIC;
  7612. data->released = false;
  7613. memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
  7614. data->mode = INACTIVE;
  7615. init_waitqueue_head(&data->abort_wq);
  7616. atomic_set(&data->ioctl_count, 0);
  7617. data->sglistinfo_ptr = (struct sglist_info *)__qseecom_alloc_tzbuf(
  7618. sizeof(struct sglist_info) * MAX_ION_FD,
  7619. &data->sglistinfo_shm.paddr,
  7620. &data->sglistinfo_shm);
  7621. if (!data->sglistinfo_ptr)
  7622. {
  7623. return -ENOMEM;
  7624. }
  7625. return ret;
  7626. }
  7627. static void __qseecom_release_disable_clk(struct qseecom_dev_handle *data)
  7628. {
  7629. if (qseecom.no_clock_support)
  7630. return;
  7631. if (qseecom.support_bus_scaling) {
  7632. mutex_lock(&qsee_bw_mutex);
  7633. if (data->mode != INACTIVE) {
  7634. qseecom_unregister_bus_bandwidth_needs(data);
  7635. if (qseecom.cumulative_mode == INACTIVE)
  7636. __qseecom_set_msm_bus_request(INACTIVE);
  7637. }
  7638. mutex_unlock(&qsee_bw_mutex);
  7639. } else {
  7640. if (data->fast_load_enabled)
  7641. qsee_disable_clock_vote(data, CLK_SFPB);
  7642. if (data->perf_enabled)
  7643. qsee_disable_clock_vote(data, CLK_DFAB);
  7644. }
  7645. }
  7646. static int qseecom_release(struct inode *inode, struct file *file)
  7647. {
  7648. struct qseecom_dev_handle *data = file->private_data;
  7649. int ret = 0;
  7650. bool free_private_data = true;
  7651. __qseecom_release_disable_clk(data);
  7652. if (!data->released) {
  7653. pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
  7654. data->type, data->mode, data);
  7655. switch (data->type) {
  7656. case QSEECOM_LISTENER_SERVICE:
  7657. pr_debug("release lsnr svc %d\n", data->listener.id);
  7658. mutex_lock(&listener_access_lock);
  7659. ret = qseecom_unregister_listener(data);
  7660. if (!ret)
  7661. free_private_data = false;
  7662. data->listener.release_called = true;
  7663. mutex_unlock(&listener_access_lock);
  7664. __wakeup_unregister_listener_kthread();
  7665. break;
  7666. case QSEECOM_CLIENT_APP:
  7667. pr_debug("release app %d (%s)\n",
  7668. data->client.app_id, data->client.app_name);
  7669. if (data->client.app_id) {
  7670. free_private_data = false;
  7671. mutex_lock(&unload_app_pending_list_lock);
  7672. ret = qseecom_prepare_unload_app(data);
  7673. mutex_unlock(&unload_app_pending_list_lock);
  7674. __wakeup_unload_app_kthread();
  7675. }
  7676. break;
  7677. case QSEECOM_SECURE_SERVICE:
  7678. case QSEECOM_GENERIC:
  7679. if (data->client.dmabuf) {
  7680. qseecom_vaddr_unmap(data->client.sb_virt,
  7681. data->client.sgt, data->client.attach,
  7682. data->client.dmabuf);
  7683. MAKE_NULL(data->client.sgt, data->client.attach,
  7684. data->client.dmabuf);
  7685. }
  7686. break;
  7687. case QSEECOM_UNAVAILABLE_CLIENT_APP:
  7688. break;
  7689. default:
  7690. pr_err("Unsupported clnt_handle_type %d\n",
  7691. data->type);
  7692. break;
  7693. }
  7694. }
  7695. if (free_private_data) {
  7696. __qseecom_free_tzbuf(&data->sglistinfo_shm);
  7697. kfree(data);
  7698. }
  7699. return ret;
  7700. }
  7701. static const struct file_operations qseecom_fops = {
  7702. .owner = THIS_MODULE,
  7703. .unlocked_ioctl = qseecom_ioctl,
  7704. .open = qseecom_open,
  7705. .release = qseecom_release
  7706. };
  7707. static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
  7708. {
  7709. int rc = 0;
  7710. struct device *pdev;
  7711. struct qseecom_clk *qclk;
  7712. char *core_clk_src = NULL;
  7713. char *core_clk = NULL;
  7714. char *iface_clk = NULL;
  7715. char *bus_clk = NULL;
  7716. switch (ce) {
  7717. case CLK_QSEE: {
  7718. core_clk_src = "core_clk_src";
  7719. core_clk = "core_clk";
  7720. iface_clk = "iface_clk";
  7721. bus_clk = "bus_clk";
  7722. qclk = &qseecom.qsee;
  7723. qclk->instance = CLK_QSEE;
  7724. break;
  7725. };
  7726. case CLK_CE_DRV: {
  7727. core_clk_src = "ce_drv_core_clk_src";
  7728. core_clk = "ce_drv_core_clk";
  7729. iface_clk = "ce_drv_iface_clk";
  7730. bus_clk = "ce_drv_bus_clk";
  7731. qclk = &qseecom.ce_drv;
  7732. qclk->instance = CLK_CE_DRV;
  7733. break;
  7734. };
  7735. default:
  7736. pr_err("Invalid ce hw instance: %d!\n", ce);
  7737. return -EIO;
  7738. }
  7739. if (qseecom.no_clock_support) {
  7740. qclk->ce_core_clk = NULL;
  7741. qclk->ce_clk = NULL;
  7742. qclk->ce_bus_clk = NULL;
  7743. qclk->ce_core_src_clk = NULL;
  7744. return 0;
  7745. }
  7746. pdev = qseecom.pdev;
  7747. /* Get CE3 src core clk. */
  7748. qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
  7749. if (!IS_ERR(qclk->ce_core_src_clk)) {
  7750. rc = clk_set_rate(qclk->ce_core_src_clk,
  7751. qseecom.ce_opp_freq_hz);
  7752. if (rc) {
  7753. clk_put(qclk->ce_core_src_clk);
  7754. qclk->ce_core_src_clk = NULL;
  7755. pr_err("Unable to set the core src clk @%uMhz.\n",
  7756. qseecom.ce_opp_freq_hz/CE_CLK_DIV);
  7757. return -EIO;
  7758. }
  7759. } else {
  7760. pr_warn("Unable to get CE core src clk, set to NULL\n");
  7761. qclk->ce_core_src_clk = NULL;
  7762. }
  7763. /* Get CE core clk */
  7764. qclk->ce_core_clk = clk_get(pdev, core_clk);
  7765. if (IS_ERR(qclk->ce_core_clk)) {
  7766. rc = PTR_ERR(qclk->ce_core_clk);
  7767. pr_err("Unable to get CE core clk\n");
  7768. if (qclk->ce_core_src_clk != NULL)
  7769. clk_put(qclk->ce_core_src_clk);
  7770. return -EIO;
  7771. }
  7772. /* Get CE Interface clk */
  7773. qclk->ce_clk = clk_get(pdev, iface_clk);
  7774. if (IS_ERR(qclk->ce_clk)) {
  7775. rc = PTR_ERR(qclk->ce_clk);
  7776. pr_err("Unable to get CE interface clk\n");
  7777. if (qclk->ce_core_src_clk != NULL)
  7778. clk_put(qclk->ce_core_src_clk);
  7779. clk_put(qclk->ce_core_clk);
  7780. return -EIO;
  7781. }
  7782. /* Get CE AXI clk */
  7783. qclk->ce_bus_clk = clk_get(pdev, bus_clk);
  7784. if (IS_ERR(qclk->ce_bus_clk)) {
  7785. rc = PTR_ERR(qclk->ce_bus_clk);
  7786. pr_err("Unable to get CE BUS interface clk\n");
  7787. if (qclk->ce_core_src_clk != NULL)
  7788. clk_put(qclk->ce_core_src_clk);
  7789. clk_put(qclk->ce_core_clk);
  7790. clk_put(qclk->ce_clk);
  7791. return -EIO;
  7792. }
  7793. return rc;
  7794. }
  7795. static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
  7796. {
  7797. struct qseecom_clk *qclk;
  7798. if (ce == CLK_QSEE)
  7799. qclk = &qseecom.qsee;
  7800. else
  7801. qclk = &qseecom.ce_drv;
  7802. if (qclk->ce_clk != NULL) {
  7803. clk_put(qclk->ce_clk);
  7804. qclk->ce_clk = NULL;
  7805. }
  7806. if (qclk->ce_core_clk != NULL) {
  7807. clk_put(qclk->ce_core_clk);
  7808. qclk->ce_core_clk = NULL;
  7809. }
  7810. if (qclk->ce_bus_clk != NULL) {
  7811. clk_put(qclk->ce_bus_clk);
  7812. qclk->ce_bus_clk = NULL;
  7813. }
  7814. if (qclk->ce_core_src_clk != NULL) {
  7815. clk_put(qclk->ce_core_src_clk);
  7816. qclk->ce_core_src_clk = NULL;
  7817. }
  7818. qclk->instance = CLK_INVALID;
  7819. }
  7820. static int qseecom_retrieve_ce_data(struct platform_device *pdev)
  7821. {
  7822. int rc = 0;
  7823. uint32_t hlos_num_ce_hw_instances;
  7824. uint32_t disk_encrypt_pipe;
  7825. uint32_t file_encrypt_pipe;
  7826. uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0};
  7827. int i;
  7828. const int *tbl;
  7829. int size;
  7830. int entry;
  7831. struct qseecom_crypto_info *pfde_tbl = NULL;
  7832. struct qseecom_crypto_info *p;
  7833. int tbl_size;
  7834. int j;
  7835. bool old_db = true;
  7836. struct qseecom_ce_info_use *pce_info_use;
  7837. uint32_t *unit_tbl = NULL;
  7838. int total_units = 0;
  7839. struct qseecom_ce_pipe_entry *pce_entry;
  7840. qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
  7841. qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
  7842. if (of_property_read_u32((&pdev->dev)->of_node,
  7843. "qcom,qsee-ce-hw-instance",
  7844. &qseecom.ce_info.qsee_ce_hw_instance)) {
  7845. pr_err("Fail to get qsee ce hw instance information.\n");
  7846. rc = -EINVAL;
  7847. goto out;
  7848. } else {
  7849. pr_debug("qsee-ce-hw-instance=0x%x\n",
  7850. qseecom.ce_info.qsee_ce_hw_instance);
  7851. }
  7852. qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
  7853. "qcom,support-fde");
  7854. qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
  7855. "qcom,support-pfe");
  7856. if (!qseecom.support_pfe && !qseecom.support_fde) {
  7857. pr_warn("Device does not support PFE/FDE\n");
  7858. goto out;
  7859. }
  7860. if (qseecom.support_fde)
  7861. tbl = of_get_property((&pdev->dev)->of_node,
  7862. "qcom,full-disk-encrypt-info", &size);
  7863. else
  7864. tbl = NULL;
  7865. if (tbl) {
  7866. old_db = false;
  7867. if (size % sizeof(struct qseecom_crypto_info)) {
  7868. pr_err("full-disk-encrypt-info tbl size(%d)\n",
  7869. size);
  7870. rc = -EINVAL;
  7871. goto out;
  7872. }
  7873. tbl_size = size / sizeof
  7874. (struct qseecom_crypto_info);
  7875. pfde_tbl = kzalloc(size, GFP_KERNEL);
  7876. unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
  7877. total_units = 0;
  7878. if (!pfde_tbl || !unit_tbl) {
  7879. rc = -ENOMEM;
  7880. goto out;
  7881. }
  7882. if (of_property_read_u32_array((&pdev->dev)->of_node,
  7883. "qcom,full-disk-encrypt-info",
  7884. (u32 *)pfde_tbl, size/sizeof(u32))) {
  7885. pr_err("failed to read full-disk-encrypt-info tbl\n");
  7886. rc = -EINVAL;
  7887. goto out;
  7888. }
  7889. for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
  7890. for (j = 0; j < total_units; j++) {
  7891. if (p->unit_num == *(unit_tbl + j))
  7892. break;
  7893. }
  7894. if (j == total_units) {
  7895. *(unit_tbl + total_units) = p->unit_num;
  7896. total_units++;
  7897. }
  7898. }
  7899. qseecom.ce_info.num_fde = total_units;
  7900. pce_info_use = qseecom.ce_info.fde = kcalloc(
  7901. total_units, sizeof(struct qseecom_ce_info_use),
  7902. GFP_KERNEL);
  7903. if (!pce_info_use) {
  7904. rc = -ENOMEM;
  7905. goto out;
  7906. }
  7907. for (j = 0; j < total_units; j++, pce_info_use++) {
  7908. pce_info_use->unit_num = *(unit_tbl + j);
  7909. pce_info_use->alloc = false;
  7910. pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
  7911. pce_info_use->num_ce_pipe_entries = 0;
  7912. pce_info_use->ce_pipe_entry = NULL;
  7913. for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
  7914. if (p->unit_num == pce_info_use->unit_num)
  7915. pce_info_use->num_ce_pipe_entries++;
  7916. }
  7917. entry = pce_info_use->num_ce_pipe_entries;
  7918. pce_entry = pce_info_use->ce_pipe_entry =
  7919. kcalloc(entry,
  7920. sizeof(struct qseecom_ce_pipe_entry),
  7921. GFP_KERNEL);
  7922. if (pce_entry == NULL) {
  7923. rc = -ENOMEM;
  7924. goto out;
  7925. }
  7926. for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
  7927. if (p->unit_num == pce_info_use->unit_num) {
  7928. pce_entry->ce_num = p->ce;
  7929. pce_entry->ce_pipe_pair =
  7930. p->pipe_pair;
  7931. pce_entry->valid = true;
  7932. pce_entry++;
  7933. }
  7934. }
  7935. }
  7936. kfree(unit_tbl);
  7937. unit_tbl = NULL;
  7938. kfree(pfde_tbl);
  7939. pfde_tbl = NULL;
  7940. }
  7941. if (qseecom.support_pfe)
  7942. tbl = of_get_property((&pdev->dev)->of_node,
  7943. "qcom,per-file-encrypt-info", &size);
  7944. else
  7945. tbl = NULL;
  7946. if (tbl) {
  7947. old_db = false;
  7948. if (size % sizeof(struct qseecom_crypto_info)) {
  7949. pr_err("per-file-encrypt-info tbl size(%d)\n",
  7950. size);
  7951. rc = -EINVAL;
  7952. goto out;
  7953. }
  7954. tbl_size = size / sizeof
  7955. (struct qseecom_crypto_info);
  7956. pfde_tbl = kzalloc(size, GFP_KERNEL);
  7957. unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
  7958. total_units = 0;
  7959. if (!pfde_tbl || !unit_tbl) {
  7960. rc = -ENOMEM;
  7961. goto out;
  7962. }
  7963. if (of_property_read_u32_array((&pdev->dev)->of_node,
  7964. "qcom,per-file-encrypt-info",
  7965. (u32 *)pfde_tbl, size/sizeof(u32))) {
  7966. pr_err("failed to read per-file-encrypt-info tbl\n");
  7967. rc = -EINVAL;
  7968. goto out;
  7969. }
  7970. for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
  7971. for (j = 0; j < total_units; j++) {
  7972. if (p->unit_num == *(unit_tbl + j))
  7973. break;
  7974. }
  7975. if (j == total_units) {
  7976. *(unit_tbl + total_units) = p->unit_num;
  7977. total_units++;
  7978. }
  7979. }
  7980. qseecom.ce_info.num_pfe = total_units;
  7981. pce_info_use = qseecom.ce_info.pfe = kcalloc(
  7982. total_units, sizeof(struct qseecom_ce_info_use),
  7983. GFP_KERNEL);
  7984. if (!pce_info_use) {
  7985. rc = -ENOMEM;
  7986. goto out;
  7987. }
  7988. for (j = 0; j < total_units; j++, pce_info_use++) {
  7989. pce_info_use->unit_num = *(unit_tbl + j);
  7990. pce_info_use->alloc = false;
  7991. pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
  7992. pce_info_use->num_ce_pipe_entries = 0;
  7993. pce_info_use->ce_pipe_entry = NULL;
  7994. for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
  7995. if (p->unit_num == pce_info_use->unit_num)
  7996. pce_info_use->num_ce_pipe_entries++;
  7997. }
  7998. entry = pce_info_use->num_ce_pipe_entries;
  7999. pce_entry = pce_info_use->ce_pipe_entry =
  8000. kcalloc(entry,
  8001. sizeof(struct qseecom_ce_pipe_entry),
  8002. GFP_KERNEL);
  8003. if (pce_entry == NULL) {
  8004. rc = -ENOMEM;
  8005. goto out;
  8006. }
  8007. for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
  8008. if (p->unit_num == pce_info_use->unit_num) {
  8009. pce_entry->ce_num = p->ce;
  8010. pce_entry->ce_pipe_pair =
  8011. p->pipe_pair;
  8012. pce_entry->valid = true;
  8013. pce_entry++;
  8014. }
  8015. }
  8016. }
  8017. kfree(unit_tbl);
  8018. unit_tbl = NULL;
  8019. kfree(pfde_tbl);
  8020. pfde_tbl = NULL;
  8021. }
  8022. if (!old_db)
  8023. goto out1;
  8024. if (of_property_read_bool((&pdev->dev)->of_node,
  8025. "qcom,support-multiple-ce-hw-instance")) {
  8026. if (of_property_read_u32((&pdev->dev)->of_node,
  8027. "qcom,hlos-num-ce-hw-instances",
  8028. &hlos_num_ce_hw_instances)) {
  8029. pr_err("Fail: get hlos number of ce hw instance\n");
  8030. rc = -EINVAL;
  8031. goto out;
  8032. }
  8033. } else {
  8034. hlos_num_ce_hw_instances = 1;
  8035. }
  8036. if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
  8037. pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
  8038. MAX_CE_PIPE_PAIR_PER_UNIT);
  8039. rc = -EINVAL;
  8040. goto out;
  8041. }
  8042. if (of_property_read_u32_array((&pdev->dev)->of_node,
  8043. "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
  8044. hlos_num_ce_hw_instances)) {
  8045. pr_err("Fail: get hlos ce hw instance info\n");
  8046. rc = -EINVAL;
  8047. goto out;
  8048. }
  8049. if (qseecom.support_fde) {
  8050. pce_info_use = qseecom.ce_info.fde =
  8051. kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
  8052. if (!pce_info_use) {
  8053. rc = -ENOMEM;
  8054. goto out;
  8055. }
  8056. /* by default for old db */
  8057. qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
  8058. pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
  8059. pce_info_use->alloc = false;
  8060. pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
  8061. pce_info_use->ce_pipe_entry = NULL;
  8062. if (of_property_read_u32((&pdev->dev)->of_node,
  8063. "qcom,disk-encrypt-pipe-pair",
  8064. &disk_encrypt_pipe)) {
  8065. pr_err("Fail to get FDE pipe information.\n");
  8066. rc = -EINVAL;
  8067. goto out;
  8068. } else {
  8069. pr_debug("disk-encrypt-pipe-pair=0x%x\n",
  8070. disk_encrypt_pipe);
  8071. }
  8072. entry = pce_info_use->num_ce_pipe_entries =
  8073. hlos_num_ce_hw_instances;
  8074. pce_entry = pce_info_use->ce_pipe_entry =
  8075. kcalloc(entry,
  8076. sizeof(struct qseecom_ce_pipe_entry),
  8077. GFP_KERNEL);
  8078. if (pce_entry == NULL) {
  8079. rc = -ENOMEM;
  8080. goto out;
  8081. }
  8082. for (i = 0; i < entry; i++) {
  8083. pce_entry->ce_num = hlos_ce_hw_instance[i];
  8084. pce_entry->ce_pipe_pair = disk_encrypt_pipe;
  8085. pce_entry->valid = 1;
  8086. pce_entry++;
  8087. }
  8088. } else {
  8089. pr_warn("Device does not support FDE\n");
  8090. disk_encrypt_pipe = 0xff;
  8091. }
  8092. if (qseecom.support_pfe) {
  8093. pce_info_use = qseecom.ce_info.pfe =
  8094. kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
  8095. if (!pce_info_use) {
  8096. rc = -ENOMEM;
  8097. goto out;
  8098. }
  8099. /* by default for old db */
  8100. qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
  8101. pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
  8102. pce_info_use->alloc = false;
  8103. pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
  8104. pce_info_use->ce_pipe_entry = NULL;
  8105. if (of_property_read_u32((&pdev->dev)->of_node,
  8106. "qcom,file-encrypt-pipe-pair",
  8107. &file_encrypt_pipe)) {
  8108. pr_err("Fail to get PFE pipe information.\n");
  8109. rc = -EINVAL;
  8110. goto out;
  8111. } else {
  8112. pr_debug("file-encrypt-pipe-pair=0x%x\n",
  8113. file_encrypt_pipe);
  8114. }
  8115. entry = pce_info_use->num_ce_pipe_entries =
  8116. hlos_num_ce_hw_instances;
  8117. pce_entry = pce_info_use->ce_pipe_entry =
  8118. kcalloc(entry,
  8119. sizeof(struct qseecom_ce_pipe_entry),
  8120. GFP_KERNEL);
  8121. if (pce_entry == NULL) {
  8122. rc = -ENOMEM;
  8123. goto out;
  8124. }
  8125. for (i = 0; i < entry; i++) {
  8126. pce_entry->ce_num = hlos_ce_hw_instance[i];
  8127. pce_entry->ce_pipe_pair = file_encrypt_pipe;
  8128. pce_entry->valid = 1;
  8129. pce_entry++;
  8130. }
  8131. } else {
  8132. pr_warn("Device does not support PFE\n");
  8133. file_encrypt_pipe = 0xff;
  8134. }
  8135. out1:
  8136. qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
  8137. qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
  8138. out:
  8139. if (rc) {
  8140. if (qseecom.ce_info.fde) {
  8141. pce_info_use = qseecom.ce_info.fde;
  8142. for (i = 0; i < qseecom.ce_info.num_fde; i++) {
  8143. pce_entry = pce_info_use->ce_pipe_entry;
  8144. kfree(pce_entry);
  8145. pce_info_use++;
  8146. }
  8147. }
  8148. kfree(qseecom.ce_info.fde);
  8149. qseecom.ce_info.fde = NULL;
  8150. if (qseecom.ce_info.pfe) {
  8151. pce_info_use = qseecom.ce_info.pfe;
  8152. for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
  8153. pce_entry = pce_info_use->ce_pipe_entry;
  8154. kfree(pce_entry);
  8155. pce_info_use++;
  8156. }
  8157. }
  8158. kfree(qseecom.ce_info.pfe);
  8159. qseecom.ce_info.pfe = NULL;
  8160. }
  8161. kfree(unit_tbl);
  8162. kfree(pfde_tbl);
  8163. return rc;
  8164. }
  8165. static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
  8166. void __user *argp)
  8167. {
  8168. struct qseecom_ce_info_req req;
  8169. struct qseecom_ce_info_req *pinfo = &req;
  8170. int ret = 0;
  8171. int i;
  8172. unsigned int entries;
  8173. struct qseecom_ce_info_use *pce_info_use, *p;
  8174. int total = 0;
  8175. bool found = false;
  8176. struct qseecom_ce_pipe_entry *pce_entry;
  8177. ret = copy_from_user(pinfo, argp,
  8178. sizeof(struct qseecom_ce_info_req));
  8179. if (ret) {
  8180. pr_err("copy_from_user failed\n");
  8181. return ret;
  8182. }
  8183. switch (pinfo->usage) {
  8184. case QSEOS_KM_USAGE_DISK_ENCRYPTION:
  8185. case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
  8186. case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
  8187. if (qseecom.support_fde) {
  8188. p = qseecom.ce_info.fde;
  8189. total = qseecom.ce_info.num_fde;
  8190. } else {
  8191. pr_err("system does not support fde\n");
  8192. return -EINVAL;
  8193. }
  8194. break;
  8195. case QSEOS_KM_USAGE_FILE_ENCRYPTION:
  8196. if (qseecom.support_pfe) {
  8197. p = qseecom.ce_info.pfe;
  8198. total = qseecom.ce_info.num_pfe;
  8199. } else {
  8200. pr_err("system does not support pfe\n");
  8201. return -EINVAL;
  8202. }
  8203. break;
  8204. default:
  8205. pr_err("unsupported usage %d\n", pinfo->usage);
  8206. return -EINVAL;
  8207. }
  8208. pce_info_use = NULL;
  8209. for (i = 0; i < total; i++) {
  8210. if (!p->alloc)
  8211. pce_info_use = p;
  8212. else if (!memcmp(p->handle, pinfo->handle,
  8213. MAX_CE_INFO_HANDLE_SIZE)) {
  8214. pce_info_use = p;
  8215. found = true;
  8216. break;
  8217. }
  8218. p++;
  8219. }
  8220. if (pce_info_use == NULL)
  8221. return -EBUSY;
  8222. pinfo->unit_num = pce_info_use->unit_num;
  8223. if (!pce_info_use->alloc) {
  8224. pce_info_use->alloc = true;
  8225. memcpy(pce_info_use->handle,
  8226. pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
  8227. }
  8228. if (pce_info_use->num_ce_pipe_entries >
  8229. MAX_CE_PIPE_PAIR_PER_UNIT)
  8230. entries = MAX_CE_PIPE_PAIR_PER_UNIT;
  8231. else
  8232. entries = pce_info_use->num_ce_pipe_entries;
  8233. pinfo->num_ce_pipe_entries = entries;
  8234. pce_entry = pce_info_use->ce_pipe_entry;
  8235. for (i = 0; i < entries; i++, pce_entry++)
  8236. pinfo->ce_pipe_entry[i] = *pce_entry;
  8237. for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
  8238. pinfo->ce_pipe_entry[i].valid = 0;
  8239. if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
  8240. pr_err("copy_to_user failed\n");
  8241. ret = -EFAULT;
  8242. }
  8243. return ret;
  8244. }
  8245. static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
  8246. void __user *argp)
  8247. {
  8248. struct qseecom_ce_info_req req;
  8249. struct qseecom_ce_info_req *pinfo = &req;
  8250. int ret = 0;
  8251. struct qseecom_ce_info_use *p;
  8252. int total = 0;
  8253. int i;
  8254. bool found = false;
  8255. ret = copy_from_user(pinfo, argp,
  8256. sizeof(struct qseecom_ce_info_req));
  8257. if (ret)
  8258. return ret;
  8259. switch (pinfo->usage) {
  8260. case QSEOS_KM_USAGE_DISK_ENCRYPTION:
  8261. case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
  8262. case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
  8263. if (qseecom.support_fde) {
  8264. p = qseecom.ce_info.fde;
  8265. total = qseecom.ce_info.num_fde;
  8266. } else {
  8267. pr_err("system does not support fde\n");
  8268. return -EINVAL;
  8269. }
  8270. break;
  8271. case QSEOS_KM_USAGE_FILE_ENCRYPTION:
  8272. if (qseecom.support_pfe) {
  8273. p = qseecom.ce_info.pfe;
  8274. total = qseecom.ce_info.num_pfe;
  8275. } else {
  8276. pr_err("system does not support pfe\n");
  8277. return -EINVAL;
  8278. }
  8279. break;
  8280. default:
  8281. pr_err("unsupported usage %d\n", pinfo->usage);
  8282. return -EINVAL;
  8283. }
  8284. for (i = 0; i < total; i++) {
  8285. if (p->alloc &&
  8286. !memcmp(p->handle, pinfo->handle,
  8287. MAX_CE_INFO_HANDLE_SIZE)) {
  8288. memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
  8289. p->alloc = false;
  8290. found = true;
  8291. break;
  8292. }
  8293. p++;
  8294. }
  8295. return ret;
  8296. }
  8297. static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
  8298. void __user *argp)
  8299. {
  8300. struct qseecom_ce_info_req req;
  8301. struct qseecom_ce_info_req *pinfo = &req;
  8302. int ret = 0;
  8303. int i;
  8304. unsigned int entries;
  8305. struct qseecom_ce_info_use *pce_info_use, *p;
  8306. int total = 0;
  8307. bool found = false;
  8308. struct qseecom_ce_pipe_entry *pce_entry;
  8309. ret = copy_from_user(pinfo, argp,
  8310. sizeof(struct qseecom_ce_info_req));
  8311. if (ret)
  8312. return ret;
  8313. switch (pinfo->usage) {
  8314. case QSEOS_KM_USAGE_DISK_ENCRYPTION:
  8315. case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
  8316. case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
  8317. if (qseecom.support_fde) {
  8318. p = qseecom.ce_info.fde;
  8319. total = qseecom.ce_info.num_fde;
  8320. } else {
  8321. pr_err("system does not support fde\n");
  8322. return -EINVAL;
  8323. }
  8324. break;
  8325. case QSEOS_KM_USAGE_FILE_ENCRYPTION:
  8326. if (qseecom.support_pfe) {
  8327. p = qseecom.ce_info.pfe;
  8328. total = qseecom.ce_info.num_pfe;
  8329. } else {
  8330. pr_err("system does not support pfe\n");
  8331. return -EINVAL;
  8332. }
  8333. break;
  8334. default:
  8335. pr_err("unsupported usage %d\n", pinfo->usage);
  8336. return -EINVAL;
  8337. }
  8338. pce_info_use = NULL;
  8339. pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
  8340. pinfo->num_ce_pipe_entries = 0;
  8341. for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
  8342. pinfo->ce_pipe_entry[i].valid = 0;
  8343. for (i = 0; i < total; i++) {
  8344. if (p->alloc && !memcmp(p->handle,
  8345. pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
  8346. pce_info_use = p;
  8347. found = true;
  8348. break;
  8349. }
  8350. p++;
  8351. }
  8352. if (!pce_info_use)
  8353. goto out;
  8354. pinfo->unit_num = pce_info_use->unit_num;
  8355. if (pce_info_use->num_ce_pipe_entries >
  8356. MAX_CE_PIPE_PAIR_PER_UNIT)
  8357. entries = MAX_CE_PIPE_PAIR_PER_UNIT;
  8358. else
  8359. entries = pce_info_use->num_ce_pipe_entries;
  8360. pinfo->num_ce_pipe_entries = entries;
  8361. pce_entry = pce_info_use->ce_pipe_entry;
  8362. for (i = 0; i < entries; i++, pce_entry++)
  8363. pinfo->ce_pipe_entry[i] = *pce_entry;
  8364. for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
  8365. pinfo->ce_pipe_entry[i].valid = 0;
  8366. out:
  8367. if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
  8368. pr_err("copy_to_user failed\n");
  8369. ret = -EFAULT;
  8370. }
  8371. return ret;
  8372. }
  8373. /*
  8374. * Check whitelist feature, and if TZ feature version is < 1.0.0,
  8375. * then whitelist feature is not supported.
  8376. */
  8377. #define GET_FEAT_VERSION_CMD 3
  8378. static int qseecom_check_whitelist_feature(void)
  8379. {
  8380. struct qseecom_scm_desc desc = {0};
  8381. int version = 0;
  8382. int ret = 0;
  8383. desc.args[0] = FEATURE_ID_WHITELIST;
  8384. desc.arginfo = SCM_ARGS(1);
  8385. mutex_lock(&app_access_lock);
  8386. ret = __qseecom_scm_call2_locked(SCM_SIP_FNID(SCM_SVC_INFO,
  8387. GET_FEAT_VERSION_CMD), &desc);
  8388. mutex_unlock(&app_access_lock);
  8389. if (!ret)
  8390. version = desc.ret[0];
  8391. return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
  8392. }
  8393. static int qseecom_init_clk(void)
  8394. {
  8395. int rc;
  8396. if (qseecom.no_clock_support)
  8397. return 0;
  8398. rc = __qseecom_init_clk(CLK_QSEE);
  8399. if (rc)
  8400. return rc;
  8401. if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
  8402. (qseecom.support_pfe || qseecom.support_fde)) {
  8403. rc = __qseecom_init_clk(CLK_CE_DRV);
  8404. if (rc) {
  8405. __qseecom_deinit_clk(CLK_QSEE);
  8406. return rc;
  8407. }
  8408. } else {
  8409. qseecom.ce_drv.ce_core_clk = qseecom.qsee.ce_core_clk;
  8410. qseecom.ce_drv.ce_clk = qseecom.qsee.ce_clk;
  8411. qseecom.ce_drv.ce_core_src_clk = qseecom.qsee.ce_core_src_clk;
  8412. qseecom.ce_drv.ce_bus_clk = qseecom.qsee.ce_bus_clk;
  8413. }
  8414. return rc;
  8415. }
  8416. static void qseecom_deinit_clk(void)
  8417. {
  8418. if (qseecom.no_clock_support)
  8419. return;
  8420. __qseecom_deinit_clk(CLK_QSEE);
  8421. if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
  8422. (qseecom.support_pfe || qseecom.support_fde))
  8423. __qseecom_deinit_clk(CLK_CE_DRV);
  8424. }
  8425. static int qseecom_init_bus(struct platform_device *pdev)
  8426. {
  8427. int ret = 0;
  8428. if (!qseecom.support_bus_scaling)
  8429. return 0;
  8430. if (qseecom.no_clock_support) {
  8431. pr_err("Can not support bus_scalling if no clock support\n");
  8432. return -EINVAL;
  8433. }
  8434. timer_setup(&(qseecom.bw_scale_down_timer),
  8435. qseecom_scale_bus_bandwidth_timer_callback, 0);
  8436. INIT_WORK(&qseecom.bw_inactive_req_ws,
  8437. qseecom_bw_inactive_req_work);
  8438. qseecom.timer_running = false;
  8439. qseecom.icc_path = of_icc_get(&pdev->dev, "data_path");
  8440. if (IS_ERR(qseecom.icc_path)) {
  8441. ret = PTR_ERR(qseecom.icc_path);
  8442. if (ret != -EPROBE_DEFER)
  8443. pr_err("Unable to get Interconnect path\n");
  8444. return ret;
  8445. }
  8446. return 0;
  8447. }
  8448. static void qseecom_deinit_bus(void)
  8449. {
  8450. if (!qseecom.support_bus_scaling || qseecom.no_clock_support)
  8451. return;
  8452. qseecom_bus_scale_update_request(qseecom.qsee_perf_client, 0);
  8453. icc_put(qseecom.icc_path);
  8454. cancel_work_sync(&qseecom.bw_inactive_req_ws);
  8455. del_timer_sync(&qseecom.bw_scale_down_timer);
  8456. }
  8457. static int qseecom_send_app_region(struct platform_device *pdev)
  8458. {
  8459. struct resource *resource = NULL;
  8460. struct qsee_apps_region_info_64bit_ireq req_64bit;
  8461. struct qseecom_command_scm_resp resp;
  8462. void *cmd_buf = NULL;
  8463. size_t cmd_len;
  8464. int rc = 0;
  8465. if (qseecom.qsee_version < QSEE_VERSION_02 ||
  8466. qseecom.is_apps_region_protected ||
  8467. qseecom.appsbl_qseecom_support)
  8468. return 0;
  8469. resource = platform_get_resource_byname(pdev,
  8470. IORESOURCE_MEM, "secapp-region");
  8471. if (!resource) {
  8472. pr_err("Fail to get secure app region info\n");
  8473. return -ENOMEM;
  8474. }
  8475. req_64bit.qsee_cmd_id = QSEOS_APP_REGION_NOTIFICATION;
  8476. req_64bit.addr = resource->start;
  8477. req_64bit.size = resource_size(resource);
  8478. cmd_buf = (void *)&req_64bit;
  8479. cmd_len = sizeof(struct qsee_apps_region_info_64bit_ireq);
  8480. pr_warn("secure app region addr=0x%llx size=0x%x\n",
  8481. req_64bit.addr, req_64bit.size);
  8482. rc = __qseecom_enable_clk(CLK_QSEE);
  8483. if (rc) {
  8484. pr_err("CLK_QSEE enabling failed (%d)\n", rc);
  8485. return rc;
  8486. }
  8487. mutex_lock(&app_access_lock);
  8488. rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
  8489. cmd_buf, cmd_len,
  8490. &resp, sizeof(resp));
  8491. mutex_unlock(&app_access_lock);
  8492. __qseecom_disable_clk(CLK_QSEE);
  8493. if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
  8494. pr_err("send secapp reg fail %d resp.res %d\n",
  8495. rc, resp.result);
  8496. return -EINVAL;
  8497. }
  8498. return rc;
  8499. }
  8500. static void qseecom_release_ce_data(void)
  8501. {
  8502. int i;
  8503. struct qseecom_ce_info_use *pce_info_use = NULL;
  8504. if (qseecom.ce_info.fde) {
  8505. pce_info_use = qseecom.ce_info.fde;
  8506. for (i = 0; i < qseecom.ce_info.num_fde; i++) {
  8507. kfree_sensitive(pce_info_use->ce_pipe_entry);
  8508. pce_info_use++;
  8509. }
  8510. kfree(qseecom.ce_info.fde);
  8511. }
  8512. if (qseecom.ce_info.pfe) {
  8513. pce_info_use = qseecom.ce_info.pfe;
  8514. for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
  8515. kfree_sensitive(pce_info_use->ce_pipe_entry);
  8516. pce_info_use++;
  8517. }
  8518. kfree(qseecom.ce_info.pfe);
  8519. }
  8520. }
  8521. static int qseecom_init_dev(struct platform_device *pdev)
  8522. {
  8523. int rc = 0;
  8524. rc = alloc_chrdev_region(&qseecom.qseecom_device_no,
  8525. 0, 1, QSEECOM_DEV);
  8526. if (rc < 0) {
  8527. pr_err("alloc_chrdev_region failed %d\n", rc);
  8528. return rc;
  8529. }
  8530. qseecom.driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
  8531. if (IS_ERR(qseecom.driver_class)) {
  8532. rc = PTR_ERR(qseecom.driver_class);
  8533. pr_err("class_create failed %x\n", rc);
  8534. goto exit_unreg_chrdev_region;
  8535. }
  8536. qseecom.pdev = device_create(qseecom.driver_class, NULL,
  8537. qseecom.qseecom_device_no, NULL,
  8538. QSEECOM_DEV);
  8539. if (IS_ERR(qseecom.pdev)) {
  8540. pr_err("class_device_create failed %d\n", rc);
  8541. rc = PTR_ERR(qseecom.pdev);
  8542. goto exit_destroy_class;
  8543. }
  8544. cdev_init(&qseecom.cdev, &qseecom_fops);
  8545. qseecom.cdev.owner = THIS_MODULE;
  8546. rc = cdev_add(&qseecom.cdev,
  8547. MKDEV(MAJOR(qseecom.qseecom_device_no), 0), 1);
  8548. if (rc < 0) {
  8549. pr_err("cdev_add failed %d\n", rc);
  8550. goto exit_destroy_device;
  8551. }
  8552. qseecom.dev = &pdev->dev;
  8553. rc = dma_set_mask(qseecom.dev, DMA_BIT_MASK(64));
  8554. if (rc) {
  8555. pr_err("qseecom failed to set dma mask %d\n", rc);
  8556. goto exit_del_cdev;
  8557. }
  8558. if (!qseecom.dev->dma_parms) {
  8559. qseecom.dev->dma_parms =
  8560. kzalloc(sizeof(*qseecom.dev->dma_parms), GFP_KERNEL);
  8561. if (!qseecom.dev->dma_parms) {
  8562. rc = -ENOMEM;
  8563. goto exit_del_cdev;
  8564. }
  8565. }
  8566. dma_set_max_seg_size(qseecom.dev, DMA_BIT_MASK(32));
  8567. rc = of_reserved_mem_device_init_by_idx(&pdev->dev,
  8568. (&pdev->dev)->of_node, 0);
  8569. if (rc) {
  8570. pr_err("Failed to initialize reserved mem, ret %d\n", rc);
  8571. goto exit_del_cdev;
  8572. }
  8573. return 0;
  8574. exit_del_cdev:
  8575. cdev_del(&qseecom.cdev);
  8576. exit_destroy_device:
  8577. device_destroy(qseecom.driver_class, qseecom.qseecom_device_no);
  8578. exit_destroy_class:
  8579. class_destroy(qseecom.driver_class);
  8580. exit_unreg_chrdev_region:
  8581. unregister_chrdev_region(qseecom.qseecom_device_no, 1);
  8582. return rc;
  8583. }
  8584. static void qseecom_deinit_dev(void)
  8585. {
  8586. kfree(qseecom.dev->dma_parms);
  8587. qseecom.dev->dma_parms = NULL;
  8588. cdev_del(&qseecom.cdev);
  8589. device_destroy(qseecom.driver_class, qseecom.qseecom_device_no);
  8590. class_destroy(qseecom.driver_class);
  8591. unregister_chrdev_region(qseecom.qseecom_device_no, 1);
  8592. }
  8593. static int qseecom_init_control(void)
  8594. {
  8595. uint32_t feature = 10;
  8596. struct qseecom_command_scm_resp resp;
  8597. int rc = 0;
  8598. qseecom.qsee_version = QSEEE_VERSION_00;
  8599. mutex_lock(&app_access_lock);
  8600. rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
  8601. &resp, sizeof(resp));
  8602. mutex_unlock(&app_access_lock);
  8603. pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
  8604. if (rc) {
  8605. pr_err("Failed to get QSEE version info %d\n", rc);
  8606. return rc;
  8607. }
  8608. qseecom.qsee_version = resp.result;
  8609. atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
  8610. init_waitqueue_head(&qseecom.app_block_wq);
  8611. qseecom.whitelist_support = true;
  8612. INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
  8613. INIT_LIST_HEAD(&qseecom.registered_app_list_head);
  8614. spin_lock_init(&qseecom.registered_app_list_lock);
  8615. INIT_LIST_HEAD(&qseecom.unregister_lsnr_pending_list_head);
  8616. INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
  8617. spin_lock_init(&qseecom.registered_kclient_list_lock);
  8618. init_waitqueue_head(&qseecom.send_resp_wq);
  8619. init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
  8620. init_waitqueue_head(&qseecom.unregister_lsnr_kthread_wq);
  8621. INIT_LIST_HEAD(&qseecom.unload_app_pending_list_head);
  8622. init_waitqueue_head(&qseecom.unload_app_kthread_wq);
  8623. qseecom.send_resp_flag = 0;
  8624. qseecom.qseos_version = QSEOS_VERSION_14;
  8625. qseecom.commonlib_loaded = false;
  8626. qseecom.commonlib64_loaded = false;
  8627. qseecom.whitelist_support = qseecom_check_whitelist_feature();
  8628. return rc;
  8629. }
  8630. static int qseecom_parse_dt(struct platform_device *pdev)
  8631. {
  8632. if (!pdev->dev.of_node) {
  8633. pr_err("NULL of_node\n");
  8634. return -ENODEV;
  8635. }
  8636. qseecom.pdev->of_node = pdev->dev.of_node;
  8637. qseecom.support_bus_scaling =
  8638. of_property_read_bool((&pdev->dev)->of_node,
  8639. "qcom,support-bus-scaling");
  8640. qseecom.appsbl_qseecom_support =
  8641. of_property_read_bool((&pdev->dev)->of_node,
  8642. "qcom,appsbl-qseecom-support");
  8643. qseecom.commonlib64_loaded =
  8644. of_property_read_bool((&pdev->dev)->of_node,
  8645. "qcom,commonlib64-loaded-by-uefi");
  8646. qseecom.fde_key_size =
  8647. of_property_read_bool((&pdev->dev)->of_node,
  8648. "qcom,fde-key-size");
  8649. qseecom.no_clock_support =
  8650. of_property_read_bool((&pdev->dev)->of_node,
  8651. "qcom,no-clock-support");
  8652. qseecom.enable_key_wrap_in_ks =
  8653. of_property_read_bool((&pdev->dev)->of_node,
  8654. "qcom,enable-key-wrap-in-ks");
  8655. if (of_property_read_u32((&pdev->dev)->of_node,
  8656. "qcom,qsee-reentrancy-support",
  8657. &qseecom.qsee_reentrancy_support)) {
  8658. pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
  8659. qseecom.qsee_reentrancy_support = 0;
  8660. }
  8661. if (of_property_read_u32((&pdev->dev)->of_node,
  8662. "qcom,ce-opp-freq", &qseecom.ce_opp_freq_hz)) {
  8663. pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
  8664. qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
  8665. }
  8666. /*
  8667. * By default, appsbl only loads cmnlib. If OEM changes appsbl to
  8668. * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
  8669. * Pls add "qseecom.commonlib64_loaded = true" here too.
  8670. */
  8671. if (qseecom.is_apps_region_protected ||
  8672. qseecom.appsbl_qseecom_support)
  8673. qseecom.commonlib_loaded = true;
  8674. return 0;
  8675. }
  8676. static int qseecom_create_kthreads(void)
  8677. {
  8678. int rc = 0;
  8679. qseecom.unregister_lsnr_kthread_task = kthread_run(
  8680. __qseecom_unregister_listener_kthread_func,
  8681. NULL, "qseecom-unreg-lsnr");
  8682. if (IS_ERR(qseecom.unregister_lsnr_kthread_task)) {
  8683. rc = PTR_ERR(qseecom.unregister_lsnr_kthread_task);
  8684. pr_err("fail to create kthread to unreg lsnr, rc = %x\n", rc);
  8685. return rc;
  8686. }
  8687. atomic_set(&qseecom.unregister_lsnr_kthread_state,
  8688. LSNR_UNREG_KT_SLEEP);
  8689. /*create a kthread to process pending ta unloading task */
  8690. qseecom.unload_app_kthread_task = kthread_run(
  8691. __qseecom_unload_app_kthread_func,
  8692. NULL, "qseecom-unload-ta");
  8693. if (IS_ERR(qseecom.unload_app_kthread_task)) {
  8694. rc = PTR_ERR(qseecom.unload_app_kthread_task);
  8695. pr_err("failed to create kthread to unload ta, rc = %x\n", rc);
  8696. kthread_stop(qseecom.unregister_lsnr_kthread_task);
  8697. return rc;
  8698. }
  8699. atomic_set(&qseecom.unload_app_kthread_state,
  8700. UNLOAD_APP_KT_SLEEP);
  8701. return 0;
  8702. }
  8703. static int qseecom_register_heap_shmbridge(struct platform_device *pdev,
  8704. char *heap_mem_region_name,
  8705. uint64_t *handle)
  8706. {
  8707. phys_addr_t heap_pa = 0;
  8708. size_t heap_size = 0;
  8709. struct device_node *node = NULL;
  8710. struct reserved_mem *rmem = NULL;
  8711. uint32_t ns_vmids[] = {VMID_HLOS};
  8712. uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
  8713. node = of_parse_phandle(pdev->dev.of_node, heap_mem_region_name, 0);
  8714. if (!node) {
  8715. pr_err("unable to parse memory-region of heap %d\n", heap_mem_region_name);
  8716. return -EINVAL;
  8717. }
  8718. rmem = of_reserved_mem_lookup(node);
  8719. if (!rmem) {
  8720. pr_err("unable to acquire memory-region of heap %d\n", heap_mem_region_name);
  8721. return -EINVAL;
  8722. }
  8723. heap_pa = rmem->base;
  8724. heap_size = (size_t)rmem->size;
  8725. pr_debug("get heap %d info: shmbridge created\n", heap_mem_region_name);
  8726. return qtee_shmbridge_register(heap_pa,
  8727. heap_size, ns_vmids, ns_vm_perms, 1,
  8728. PERM_READ | PERM_WRITE, handle);
  8729. }
  8730. static int qseecom_register_shmbridge(struct platform_device *pdev)
  8731. {
  8732. int ret = 0;
  8733. if (!qtee_shmbridge_is_enabled())
  8734. return 0;
  8735. ret = qseecom_register_heap_shmbridge(pdev, "qseecom_ta_mem",
  8736. &qseecom.ta_bridge_handle);
  8737. if (ret)
  8738. return ret;
  8739. ret = qseecom_register_heap_shmbridge(pdev, "qseecom_mem",
  8740. &qseecom.qseecom_bridge_handle);
  8741. if (ret) {
  8742. qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
  8743. return ret;
  8744. }
  8745. ret = qseecom_register_heap_shmbridge(pdev, "user_contig_mem",
  8746. &qseecom.user_contig_bridge_handle);
  8747. if (ret) {
  8748. qtee_shmbridge_deregister(qseecom.qseecom_bridge_handle);
  8749. qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
  8750. return ret;
  8751. }
  8752. return 0;
  8753. }
  8754. static void qseecom_deregister_shmbridge(void)
  8755. {
  8756. qtee_shmbridge_deregister(qseecom.user_contig_bridge_handle);
  8757. qtee_shmbridge_deregister(qseecom.qseecom_bridge_handle);
  8758. qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
  8759. }
  8760. static int qseecom_probe(struct platform_device *pdev)
  8761. {
  8762. int rc;
  8763. rc = qseecom_register_shmbridge(pdev);
  8764. if (rc)
  8765. return rc;
  8766. rc = qseecom_init_dev(pdev);
  8767. if (rc)
  8768. goto exit_unregister_bridge;
  8769. rc = qseecom_init_control();
  8770. if (rc)
  8771. goto exit_deinit_dev;
  8772. rc = qseecom_parse_dt(pdev);
  8773. if (rc)
  8774. goto exit_deinit_dev;
  8775. rc = qseecom_retrieve_ce_data(pdev);
  8776. if (rc)
  8777. goto exit_deinit_dev;
  8778. rc = qseecom_init_clk();
  8779. if (rc)
  8780. goto exit_release_ce_data;
  8781. rc = qseecom_init_bus(pdev);
  8782. if (rc)
  8783. goto exit_deinit_clock;
  8784. rc = qseecom_send_app_region(pdev);
  8785. if (rc)
  8786. goto exit_deinit_bus;
  8787. rc = qseecom_create_kthreads();
  8788. if (rc)
  8789. goto exit_deinit_bus;
  8790. #if IS_ENABLED(CONFIG_QSEECOM_PROXY)
  8791. /*If the api fails to get the func ops, print the error and continue
  8792. * Do not treat it as fatal*/
  8793. rc = get_qseecom_kernel_fun_ops();
  8794. if (rc)
  8795. pr_err("failed to provide qseecom ops %d", rc);
  8796. #endif
  8797. atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
  8798. return 0;
  8799. exit_deinit_bus:
  8800. qseecom_deinit_bus();
  8801. exit_deinit_clock:
  8802. qseecom_deinit_clk();
  8803. exit_release_ce_data:
  8804. qseecom_release_ce_data();
  8805. exit_deinit_dev:
  8806. qseecom_deinit_dev();
  8807. exit_unregister_bridge:
  8808. qseecom_deregister_shmbridge();
  8809. return rc;
  8810. }
  8811. static int qseecom_remove(struct platform_device *pdev)
  8812. {
  8813. struct qseecom_registered_kclient_list *kclient = NULL;
  8814. struct qseecom_registered_kclient_list *kclient_tmp = NULL;
  8815. unsigned long flags = 0;
  8816. int ret = 0;
  8817. atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
  8818. spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
  8819. list_for_each_entry_safe(kclient, kclient_tmp,
  8820. &qseecom.registered_kclient_list_head, list) {
  8821. /* Break the loop if client handle is NULL */
  8822. if (!kclient->handle) {
  8823. list_del(&kclient->list);
  8824. kfree_sensitive(kclient);
  8825. break;
  8826. }
  8827. list_del(&kclient->list);
  8828. mutex_lock(&app_access_lock);
  8829. ret = qseecom_unload_app(kclient->handle->dev, false);
  8830. mutex_unlock(&app_access_lock);
  8831. if (!ret) {
  8832. kfree_sensitive(kclient->handle->dev);
  8833. kfree_sensitive(kclient->handle);
  8834. kfree_sensitive(kclient);
  8835. }
  8836. }
  8837. spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
  8838. if (qseecom.qseos_version > QSEEE_VERSION_00)
  8839. qseecom_unload_commonlib_image();
  8840. qseecom_deregister_shmbridge();
  8841. kthread_stop(qseecom.unload_app_kthread_task);
  8842. kthread_stop(qseecom.unregister_lsnr_kthread_task);
  8843. qseecom_deinit_bus();
  8844. qseecom_deinit_clk();
  8845. qseecom_release_ce_data();
  8846. qseecom_deinit_dev();
  8847. return ret;
  8848. }
  8849. static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
  8850. {
  8851. int ret = 0;
  8852. struct qseecom_clk *qclk;
  8853. qclk = &qseecom.qsee;
  8854. atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
  8855. if (qseecom.no_clock_support)
  8856. return 0;
  8857. mutex_lock(&qsee_bw_mutex);
  8858. mutex_lock(&clk_access_lock);
  8859. if (qseecom.current_mode != INACTIVE) {
  8860. ret = qseecom_bus_scale_update_request(
  8861. qseecom.qsee_perf_client, INACTIVE);
  8862. if (ret)
  8863. pr_err("Fail to scale down bus\n");
  8864. else
  8865. qseecom.current_mode = INACTIVE;
  8866. }
  8867. if (qclk->clk_access_cnt) {
  8868. if (qclk->ce_clk != NULL)
  8869. clk_disable_unprepare(qclk->ce_clk);
  8870. if (qclk->ce_core_clk != NULL)
  8871. clk_disable_unprepare(qclk->ce_core_clk);
  8872. if (qclk->ce_bus_clk != NULL)
  8873. clk_disable_unprepare(qclk->ce_bus_clk);
  8874. }
  8875. del_timer_sync(&(qseecom.bw_scale_down_timer));
  8876. qseecom.timer_running = false;
  8877. mutex_unlock(&clk_access_lock);
  8878. mutex_unlock(&qsee_bw_mutex);
  8879. cancel_work_sync(&qseecom.bw_inactive_req_ws);
  8880. return 0;
  8881. }
  8882. static int qseecom_resume(struct platform_device *pdev)
  8883. {
  8884. int mode = 0;
  8885. int ret = 0;
  8886. struct qseecom_clk *qclk;
  8887. qclk = &qseecom.qsee;
  8888. if (qseecom.no_clock_support)
  8889. goto exit;
  8890. mutex_lock(&qsee_bw_mutex);
  8891. mutex_lock(&clk_access_lock);
  8892. if (qseecom.cumulative_mode >= HIGH)
  8893. mode = HIGH;
  8894. else
  8895. mode = qseecom.cumulative_mode;
  8896. if (qseecom.cumulative_mode != INACTIVE) {
  8897. ret = qseecom_bus_scale_update_request(
  8898. qseecom.qsee_perf_client, mode);
  8899. if (ret)
  8900. pr_err("Fail to scale up bus to %d\n", mode);
  8901. else
  8902. qseecom.current_mode = mode;
  8903. }
  8904. if (qclk->clk_access_cnt) {
  8905. if (qclk->ce_core_clk != NULL) {
  8906. ret = clk_prepare_enable(qclk->ce_core_clk);
  8907. if (ret) {
  8908. pr_err("Unable to enable/prep CE core clk\n");
  8909. qclk->clk_access_cnt = 0;
  8910. goto err;
  8911. }
  8912. }
  8913. if (qclk->ce_clk != NULL) {
  8914. ret = clk_prepare_enable(qclk->ce_clk);
  8915. if (ret) {
  8916. pr_err("Unable to enable/prep CE iface clk\n");
  8917. qclk->clk_access_cnt = 0;
  8918. goto ce_clk_err;
  8919. }
  8920. }
  8921. if (qclk->ce_bus_clk != NULL) {
  8922. ret = clk_prepare_enable(qclk->ce_bus_clk);
  8923. if (ret) {
  8924. pr_err("Unable to enable/prep CE bus clk\n");
  8925. qclk->clk_access_cnt = 0;
  8926. goto ce_bus_clk_err;
  8927. }
  8928. }
  8929. }
  8930. if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
  8931. qseecom.bw_scale_down_timer.expires = jiffies +
  8932. msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
  8933. mod_timer(&(qseecom.bw_scale_down_timer),
  8934. qseecom.bw_scale_down_timer.expires);
  8935. qseecom.timer_running = true;
  8936. }
  8937. mutex_unlock(&clk_access_lock);
  8938. mutex_unlock(&qsee_bw_mutex);
  8939. goto exit;
  8940. ce_bus_clk_err:
  8941. if (qclk->ce_clk)
  8942. clk_disable_unprepare(qclk->ce_clk);
  8943. ce_clk_err:
  8944. if (qclk->ce_core_clk)
  8945. clk_disable_unprepare(qclk->ce_core_clk);
  8946. err:
  8947. mutex_unlock(&clk_access_lock);
  8948. mutex_unlock(&qsee_bw_mutex);
  8949. ret = -EIO;
  8950. exit:
  8951. atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
  8952. return ret;
  8953. }
  8954. static const struct of_device_id qseecom_match[] = {
  8955. {
  8956. .compatible = "qcom,qseecom",
  8957. },
  8958. {}
  8959. };
  8960. static struct platform_driver qseecom_plat_driver = {
  8961. .probe = qseecom_probe,
  8962. .remove = qseecom_remove,
  8963. .suspend = qseecom_suspend,
  8964. .resume = qseecom_resume,
  8965. .driver = {
  8966. .name = "qseecom",
  8967. .of_match_table = qseecom_match,
  8968. },
  8969. };
  8970. static int qseecom_init(void)
  8971. {
  8972. return platform_driver_register(&qseecom_plat_driver);
  8973. }
  8974. static void qseecom_exit(void)
  8975. {
  8976. platform_driver_unregister(&qseecom_plat_driver);
  8977. }
  8978. MODULE_LICENSE("GPL v2");
  8979. MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
  8980. module_init(qseecom_init);
  8981. module_exit(qseecom_exit);