qce50.c 178 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QTI Crypto Engine driver.
  4. *
  5. * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  6. */
  7. #define pr_fmt(fmt) "QCE50: %s: " fmt, __func__
  8. #include <linux/types.h>
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/mod_devicetable.h>
  12. #include <linux/device.h>
  13. #include <linux/clk.h>
  14. #include <linux/err.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/io.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/delay.h>
  20. #include <linux/crypto.h>
  21. #include <linux/bitops.h>
  22. #include "linux/qcrypto.h"
  23. #include <crypto/hash.h>
  24. #include <crypto/sha1.h>
  25. #include <soc/qcom/socinfo.h>
  26. #include <linux/dma-iommu.h>
  27. #include <linux/iommu.h>
  28. #include "qce.h"
  29. #include "qce50.h"
  30. #include "qcryptohw_50.h"
  31. #include "qce_ota.h"
  32. #define CRYPTO_SMMU_IOVA_START 0x10000000
  33. #define CRYPTO_SMMU_IOVA_SIZE 0x40000000
  34. #define CRYPTO_CONFIG_RESET 0xE01EF
  35. #define MAX_SPS_DESC_FIFO_SIZE 0xfff0
  36. #define QCE_MAX_NUM_DSCR 0x200
  37. #define QCE_SECTOR_SIZE 0x200
  38. #define CE_CLK_100MHZ 100000000
  39. #define CE_CLK_DIV 1000000
  40. #define CRYPTO_CORE_MAJOR_VER_NUM 0x05
  41. #define CRYPTO_CORE_MINOR_VER_NUM 0x03
  42. #define CRYPTO_CORE_STEP_VER_NUM 0x1
  43. #define CRYPTO_REQ_USER_PAT 0xdead0000
  44. static DEFINE_MUTEX(bam_register_lock);
  45. static DEFINE_MUTEX(qce_iomap_mutex);
  46. struct bam_registration_info {
  47. struct list_head qlist;
  48. unsigned long handle;
  49. uint32_t cnt;
  50. uint32_t bam_mem;
  51. void __iomem *bam_iobase;
  52. bool support_cmd_dscr;
  53. };
  54. static LIST_HEAD(qce50_bam_list);
  55. /* Used to determine the mode */
  56. #define MAX_BUNCH_MODE_REQ 2
  57. /* Max number of request supported */
  58. #define MAX_QCE_BAM_REQ 8
  59. /* Interrupt flag will be set for every SET_INTR_AT_REQ request */
  60. #define SET_INTR_AT_REQ (MAX_QCE_BAM_REQ / 2)
  61. /* To create extra request space to hold dummy request */
  62. #define MAX_QCE_BAM_REQ_WITH_DUMMY_REQ (MAX_QCE_BAM_REQ + 1)
  63. /* Allocate the memory for MAX_QCE_BAM_REQ + 1 (for dummy request) */
  64. #define MAX_QCE_ALLOC_BAM_REQ MAX_QCE_BAM_REQ_WITH_DUMMY_REQ
  65. /* QCE driver modes */
  66. #define IN_INTERRUPT_MODE 0
  67. #define IN_BUNCH_MODE 1
  68. /* Dummy request data length */
  69. #define DUMMY_REQ_DATA_LEN 64
  70. /* Delay timer to expire when in bunch mode */
  71. #define DELAY_IN_JIFFIES 5
  72. /* Index to point the dummy request */
  73. #define DUMMY_REQ_INDEX MAX_QCE_BAM_REQ
  74. #define TOTAL_IOVEC_SPACE_PER_PIPE (QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec))
  75. enum qce_owner {
  76. QCE_OWNER_NONE = 0,
  77. QCE_OWNER_CLIENT = 1,
  78. QCE_OWNER_TIMEOUT = 2
  79. };
  80. struct dummy_request {
  81. struct qce_sha_req sreq;
  82. struct scatterlist sg;
  83. struct ahash_request areq;
  84. };
  85. /*
  86. * CE HW device structure.
  87. * Each engine has an instance of the structure.
  88. * Each engine can only handle one crypto operation at one time. It is up to
  89. * the sw above to ensure single threading of operation on an engine.
  90. */
  91. struct qce_device {
  92. struct device *pdev; /* Handle to platform_device structure */
  93. struct bam_registration_info *pbam;
  94. unsigned char *coh_vmem; /* Allocated coherent virtual memory */
  95. dma_addr_t coh_pmem; /* Allocated coherent physical memory */
  96. int memsize; /* Memory allocated */
  97. unsigned char *iovec_vmem; /* Allocate iovec virtual memory */
  98. int iovec_memsize; /* Memory allocated */
  99. uint32_t bam_mem; /* bam physical address, from DT */
  100. uint32_t bam_mem_size; /* bam io size, from DT */
  101. int is_shared; /* CE HW is shared */
  102. bool support_cmd_dscr;
  103. bool support_hw_key;
  104. bool support_clk_mgmt_sus_res;
  105. bool support_only_core_src_clk;
  106. bool request_bw_before_clk;
  107. void __iomem *iobase; /* Virtual io base of CE HW */
  108. unsigned int phy_iobase; /* Physical io base of CE HW */
  109. struct clk *ce_core_src_clk; /* Handle to CE src clk*/
  110. struct clk *ce_core_clk; /* Handle to CE clk */
  111. struct clk *ce_clk; /* Handle to CE clk */
  112. struct clk *ce_bus_clk; /* Handle to CE AXI clk*/
  113. bool no_get_around;
  114. bool no_ccm_mac_status_get_around;
  115. unsigned int ce_opp_freq_hz;
  116. bool use_sw_aes_cbc_ecb_ctr_algo;
  117. bool use_sw_aead_algo;
  118. bool use_sw_aes_xts_algo;
  119. bool use_sw_ahash_algo;
  120. bool use_sw_hmac_algo;
  121. bool use_sw_aes_ccm_algo;
  122. uint32_t engines_avail;
  123. struct qce_ce_cfg_reg_setting reg;
  124. struct ce_bam_info ce_bam_info;
  125. struct ce_request_info ce_request_info[MAX_QCE_ALLOC_BAM_REQ];
  126. unsigned int ce_request_index;
  127. enum qce_owner owner;
  128. atomic_t no_of_queued_req;
  129. struct timer_list timer;
  130. struct dummy_request dummyreq;
  131. unsigned int mode;
  132. unsigned int intr_cadence;
  133. unsigned int dev_no;
  134. struct qce_driver_stats qce_stats;
  135. atomic_t bunch_cmd_seq;
  136. atomic_t last_intr_seq;
  137. bool cadence_flag;
  138. uint8_t *dummyreq_in_buf;
  139. struct dma_iommu_mapping *smmu_mapping;
  140. bool enable_s1_smmu;
  141. bool no_clock_support;
  142. };
  143. static void print_notify_debug(struct sps_event_notify *notify);
  144. static void _sps_producer_callback(struct sps_event_notify *notify);
  145. static int qce_dummy_req(struct qce_device *pce_dev);
  146. static int _qce50_disp_stats;
  147. /* Standard initialization vector for SHA-1, source: FIPS 180-2 */
  148. static uint32_t _std_init_vector_sha1[] = {
  149. 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
  150. };
  151. /* Standard initialization vector for SHA-256, source: FIPS 180-2 */
  152. static uint32_t _std_init_vector_sha256[] = {
  153. 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
  154. 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
  155. };
  156. static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
  157. unsigned int len)
  158. {
  159. unsigned int n;
  160. n = len / sizeof(uint32_t);
  161. for (; n > 0; n--) {
  162. *iv = ((*b << 24) & 0xff000000) |
  163. (((*(b+1)) << 16) & 0xff0000) |
  164. (((*(b+2)) << 8) & 0xff00) |
  165. (*(b+3) & 0xff);
  166. b += sizeof(uint32_t);
  167. iv++;
  168. }
  169. n = len % sizeof(uint32_t);
  170. if (n == 3) {
  171. *iv = ((*b << 24) & 0xff000000) |
  172. (((*(b+1)) << 16) & 0xff0000) |
  173. (((*(b+2)) << 8) & 0xff00);
  174. } else if (n == 2) {
  175. *iv = ((*b << 24) & 0xff000000) |
  176. (((*(b+1)) << 16) & 0xff0000);
  177. } else if (n == 1) {
  178. *iv = ((*b << 24) & 0xff000000);
  179. }
  180. }
  181. static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
  182. unsigned int len)
  183. {
  184. unsigned int i, j;
  185. unsigned char swap_iv[AES_IV_LENGTH];
  186. memset(swap_iv, 0, AES_IV_LENGTH);
  187. for (i = (AES_IV_LENGTH-len), j = len-1; i < AES_IV_LENGTH; i++, j--)
  188. swap_iv[i] = b[j];
  189. _byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
  190. }
  191. static int count_sg(struct scatterlist *sg, int nbytes)
  192. {
  193. int i;
  194. for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
  195. nbytes -= sg->length;
  196. return i;
  197. }
  198. static int qce_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  199. enum dma_data_direction direction)
  200. {
  201. int i;
  202. for (i = 0; i < nents; ++i) {
  203. dma_map_sg(dev, sg, 1, direction);
  204. sg = sg_next(sg);
  205. }
  206. return nents;
  207. }
  208. static int qce_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  209. int nents, enum dma_data_direction direction)
  210. {
  211. int i;
  212. for (i = 0; i < nents; ++i) {
  213. dma_unmap_sg(dev, sg, 1, direction);
  214. sg = sg_next(sg);
  215. }
  216. return nents;
  217. }
  218. static int _probe_ce_engine(struct qce_device *pce_dev)
  219. {
  220. unsigned int rev;
  221. unsigned int maj_rev, min_rev, step_rev;
  222. rev = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
  223. /*
  224. * Ensure previous instructions (setting the GO register)
  225. * was completed before checking the version.
  226. */
  227. mb();
  228. maj_rev = (rev & CRYPTO_CORE_MAJOR_REV_MASK) >> CRYPTO_CORE_MAJOR_REV;
  229. min_rev = (rev & CRYPTO_CORE_MINOR_REV_MASK) >> CRYPTO_CORE_MINOR_REV;
  230. step_rev = (rev & CRYPTO_CORE_STEP_REV_MASK) >> CRYPTO_CORE_STEP_REV;
  231. if (maj_rev != CRYPTO_CORE_MAJOR_VER_NUM) {
  232. pr_err("Unsupported QTI crypto device at 0x%x, rev %d.%d.%d\n",
  233. pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
  234. return -EIO;
  235. }
  236. /*
  237. * The majority of crypto HW bugs have been fixed in 5.3.0 and
  238. * above. That allows a single sps transfer of consumer
  239. * pipe, and a single sps transfer of producer pipe
  240. * for a crypto request. no_get_around flag indicates this.
  241. *
  242. * In 5.3.1, the CCM MAC_FAILED in result dump issue is
  243. * fixed. no_ccm_mac_status_get_around flag indicates this.
  244. */
  245. pce_dev->no_get_around = (min_rev >=
  246. CRYPTO_CORE_MINOR_VER_NUM) ? true : false;
  247. if (min_rev > CRYPTO_CORE_MINOR_VER_NUM)
  248. pce_dev->no_ccm_mac_status_get_around = true;
  249. else if ((min_rev == CRYPTO_CORE_MINOR_VER_NUM) &&
  250. (step_rev >= CRYPTO_CORE_STEP_VER_NUM))
  251. pce_dev->no_ccm_mac_status_get_around = true;
  252. else
  253. pce_dev->no_ccm_mac_status_get_around = false;
  254. pce_dev->ce_bam_info.minor_version = min_rev;
  255. pce_dev->engines_avail = readl_relaxed(pce_dev->iobase +
  256. CRYPTO_ENGINES_AVAIL);
  257. dev_info(pce_dev->pdev, "QTI Crypto %d.%d.%d device found @0x%x\n",
  258. maj_rev, min_rev, step_rev, pce_dev->phy_iobase);
  259. pce_dev->ce_bam_info.ce_burst_size = MAX_CE_BAM_BURST_SIZE;
  260. dev_dbg(pce_dev->pdev, "CE device = %#x IO base, CE = %pK Consumer (IN) PIPE %d,\nProducer (OUT) PIPE %d IO base BAM = %pK\nBAM IRQ %d Engines Availability = %#x\n",
  261. pce_dev->ce_bam_info.ce_device, pce_dev->iobase,
  262. pce_dev->ce_bam_info.dest_pipe_index,
  263. pce_dev->ce_bam_info.src_pipe_index,
  264. pce_dev->ce_bam_info.bam_iobase,
  265. pce_dev->ce_bam_info.bam_irq, pce_dev->engines_avail);
  266. return 0;
  267. };
  268. static struct qce_cmdlist_info *_ce_get_hash_cmdlistinfo(
  269. struct qce_device *pce_dev,
  270. int req_info, struct qce_sha_req *sreq)
  271. {
  272. struct ce_sps_data *pce_sps_data;
  273. struct qce_cmdlistptr_ops *cmdlistptr;
  274. pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
  275. cmdlistptr = &pce_sps_data->cmdlistptr;
  276. switch (sreq->alg) {
  277. case QCE_HASH_SHA1:
  278. return &cmdlistptr->auth_sha1;
  279. case QCE_HASH_SHA256:
  280. return &cmdlistptr->auth_sha256;
  281. case QCE_HASH_SHA1_HMAC:
  282. return &cmdlistptr->auth_sha1_hmac;
  283. case QCE_HASH_SHA256_HMAC:
  284. return &cmdlistptr->auth_sha256_hmac;
  285. case QCE_HASH_AES_CMAC:
  286. if (sreq->authklen == AES128_KEY_SIZE)
  287. return &cmdlistptr->auth_aes_128_cmac;
  288. return &cmdlistptr->auth_aes_256_cmac;
  289. default:
  290. return NULL;
  291. }
  292. return NULL;
  293. }
  294. static int _ce_setup_hash(struct qce_device *pce_dev,
  295. struct qce_sha_req *sreq,
  296. struct qce_cmdlist_info *cmdlistinfo)
  297. {
  298. uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
  299. uint32_t diglen;
  300. int i;
  301. uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
  302. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
  303. bool sha1 = false;
  304. struct sps_command_element *pce = NULL;
  305. bool use_hw_key = false;
  306. bool use_pipe_key = false;
  307. uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
  308. uint32_t auth_cfg;
  309. if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
  310. (sreq->alg == QCE_HASH_SHA256_HMAC) ||
  311. (sreq->alg == QCE_HASH_AES_CMAC)) {
  312. /* no more check for null key. use flag */
  313. if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY)
  314. == QCRYPTO_CTX_USE_HW_KEY)
  315. use_hw_key = true;
  316. else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
  317. QCRYPTO_CTX_USE_PIPE_KEY)
  318. use_pipe_key = true;
  319. pce = cmdlistinfo->go_proc;
  320. if (use_hw_key) {
  321. pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
  322. pce_dev->phy_iobase);
  323. } else {
  324. pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
  325. pce_dev->phy_iobase);
  326. pce = cmdlistinfo->auth_key;
  327. if (!use_pipe_key) {
  328. _byte_stream_to_net_words(mackey32,
  329. sreq->authkey,
  330. sreq->authklen);
  331. for (i = 0; i < authk_size_in_word; i++, pce++)
  332. pce->data = mackey32[i];
  333. }
  334. }
  335. }
  336. if (sreq->alg == QCE_HASH_AES_CMAC)
  337. goto go_proc;
  338. /* if not the last, the size has to be on the block boundary */
  339. if (!sreq->last_blk && (sreq->size % SHA256_BLOCK_SIZE))
  340. return -EIO;
  341. switch (sreq->alg) {
  342. case QCE_HASH_SHA1:
  343. case QCE_HASH_SHA1_HMAC:
  344. diglen = SHA1_DIGEST_SIZE;
  345. sha1 = true;
  346. break;
  347. case QCE_HASH_SHA256:
  348. case QCE_HASH_SHA256_HMAC:
  349. diglen = SHA256_DIGEST_SIZE;
  350. break;
  351. default:
  352. return -EINVAL;
  353. }
  354. /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
  355. if (sreq->first_blk) {
  356. if (sha1) {
  357. for (i = 0; i < 5; i++)
  358. auth32[i] = _std_init_vector_sha1[i];
  359. } else {
  360. for (i = 0; i < 8; i++)
  361. auth32[i] = _std_init_vector_sha256[i];
  362. }
  363. } else {
  364. _byte_stream_to_net_words(auth32, sreq->digest, diglen);
  365. }
  366. pce = cmdlistinfo->auth_iv;
  367. for (i = 0; i < 5; i++, pce++)
  368. pce->data = auth32[i];
  369. if ((sreq->alg == QCE_HASH_SHA256) ||
  370. (sreq->alg == QCE_HASH_SHA256_HMAC)) {
  371. for (i = 5; i < 8; i++, pce++)
  372. pce->data = auth32[i];
  373. }
  374. /* write auth_bytecnt 0/1, start with 0 */
  375. pce = cmdlistinfo->auth_bytecount;
  376. for (i = 0; i < 2; i++, pce++)
  377. pce->data = sreq->auth_data[i];
  378. /* Set/reset last bit in CFG register */
  379. pce = cmdlistinfo->auth_seg_cfg;
  380. auth_cfg = pce->data & ~(1 << CRYPTO_LAST |
  381. 1 << CRYPTO_FIRST |
  382. 1 << CRYPTO_USE_PIPE_KEY_AUTH |
  383. 1 << CRYPTO_USE_HW_KEY_AUTH);
  384. if (sreq->last_blk)
  385. auth_cfg |= 1 << CRYPTO_LAST;
  386. if (sreq->first_blk)
  387. auth_cfg |= 1 << CRYPTO_FIRST;
  388. if (use_hw_key)
  389. auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
  390. if (use_pipe_key)
  391. auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
  392. pce->data = auth_cfg;
  393. go_proc:
  394. /* write auth seg size */
  395. pce = cmdlistinfo->auth_seg_size;
  396. pce->data = sreq->size;
  397. pce = cmdlistinfo->encr_seg_cfg;
  398. pce->data = 0;
  399. /* write auth seg size start*/
  400. pce = cmdlistinfo->auth_seg_start;
  401. pce->data = 0;
  402. /* write seg size */
  403. pce = cmdlistinfo->seg_size;
  404. /* always ensure there is input data. ZLT does not work for bam-ndp */
  405. if (sreq->size)
  406. pce->data = sreq->size;
  407. else
  408. pce->data = pce_dev->ce_bam_info.ce_burst_size;
  409. return 0;
  410. }
  411. static struct qce_cmdlist_info *_ce_get_aead_cmdlistinfo(
  412. struct qce_device *pce_dev,
  413. int req_info, struct qce_req *creq)
  414. {
  415. struct ce_sps_data *pce_sps_data;
  416. struct qce_cmdlistptr_ops *cmdlistptr;
  417. pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
  418. cmdlistptr = &pce_sps_data->cmdlistptr;
  419. switch (creq->alg) {
  420. case CIPHER_ALG_DES:
  421. switch (creq->mode) {
  422. case QCE_MODE_CBC:
  423. if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
  424. return &cmdlistptr->aead_hmac_sha1_cbc_des;
  425. else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
  426. return &cmdlistptr->aead_hmac_sha256_cbc_des;
  427. else
  428. return NULL;
  429. break;
  430. default:
  431. return NULL;
  432. }
  433. break;
  434. case CIPHER_ALG_3DES:
  435. switch (creq->mode) {
  436. case QCE_MODE_CBC:
  437. if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
  438. return &cmdlistptr->aead_hmac_sha1_cbc_3des;
  439. else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
  440. return &cmdlistptr->aead_hmac_sha256_cbc_3des;
  441. else
  442. return NULL;
  443. break;
  444. default:
  445. return NULL;
  446. }
  447. break;
  448. case CIPHER_ALG_AES:
  449. switch (creq->mode) {
  450. case QCE_MODE_CBC:
  451. if (creq->encklen == AES128_KEY_SIZE) {
  452. if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
  453. return
  454. &cmdlistptr->aead_hmac_sha1_cbc_aes_128;
  455. else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
  456. return
  457. &cmdlistptr->aead_hmac_sha256_cbc_aes_128;
  458. else
  459. return NULL;
  460. } else if (creq->encklen == AES256_KEY_SIZE) {
  461. if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
  462. return &cmdlistptr->aead_hmac_sha1_cbc_aes_256;
  463. else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
  464. return
  465. &cmdlistptr->aead_hmac_sha256_cbc_aes_256;
  466. else
  467. return NULL;
  468. } else
  469. return NULL;
  470. break;
  471. default:
  472. return NULL;
  473. }
  474. break;
  475. default:
  476. return NULL;
  477. }
  478. return NULL;
  479. }
  480. static int _ce_setup_aead(struct qce_device *pce_dev, struct qce_req *q_req,
  481. uint32_t totallen_in, uint32_t coffset,
  482. struct qce_cmdlist_info *cmdlistinfo)
  483. {
  484. int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
  485. int i;
  486. uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
  487. struct sps_command_element *pce;
  488. uint32_t a_cfg;
  489. uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
  490. uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
  491. uint32_t enck_size_in_word = 0;
  492. uint32_t enciv_in_word;
  493. uint32_t key_size;
  494. uint32_t encr_cfg = 0;
  495. uint32_t ivsize = q_req->ivsize;
  496. key_size = q_req->encklen;
  497. enck_size_in_word = key_size/sizeof(uint32_t);
  498. switch (q_req->alg) {
  499. case CIPHER_ALG_DES:
  500. enciv_in_word = 2;
  501. break;
  502. case CIPHER_ALG_3DES:
  503. enciv_in_word = 2;
  504. break;
  505. case CIPHER_ALG_AES:
  506. if ((key_size != AES128_KEY_SIZE) &&
  507. (key_size != AES256_KEY_SIZE))
  508. return -EINVAL;
  509. enciv_in_word = 4;
  510. break;
  511. default:
  512. return -EINVAL;
  513. }
  514. /* only support cbc mode */
  515. if (q_req->mode != QCE_MODE_CBC)
  516. return -EINVAL;
  517. _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
  518. pce = cmdlistinfo->encr_cntr_iv;
  519. for (i = 0; i < enciv_in_word; i++, pce++)
  520. pce->data = enciv32[i];
  521. /*
  522. * write encr key
  523. * do not use hw key or pipe key
  524. */
  525. _byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
  526. pce = cmdlistinfo->encr_key;
  527. for (i = 0; i < enck_size_in_word; i++, pce++)
  528. pce->data = enckey32[i];
  529. /* write encr seg cfg */
  530. pce = cmdlistinfo->encr_seg_cfg;
  531. encr_cfg = pce->data;
  532. if (q_req->dir == QCE_ENCRYPT)
  533. encr_cfg |= (1 << CRYPTO_ENCODE);
  534. else
  535. encr_cfg &= ~(1 << CRYPTO_ENCODE);
  536. pce->data = encr_cfg;
  537. /* we only support sha1-hmac and sha256-hmac at this point */
  538. _byte_stream_to_net_words(mackey32, q_req->authkey,
  539. q_req->authklen);
  540. pce = cmdlistinfo->auth_key;
  541. for (i = 0; i < authk_size_in_word; i++, pce++)
  542. pce->data = mackey32[i];
  543. pce = cmdlistinfo->auth_iv;
  544. if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
  545. for (i = 0; i < 5; i++, pce++)
  546. pce->data = _std_init_vector_sha1[i];
  547. else
  548. for (i = 0; i < 8; i++, pce++)
  549. pce->data = _std_init_vector_sha256[i];
  550. /* write auth_bytecnt 0/1, start with 0 */
  551. pce = cmdlistinfo->auth_bytecount;
  552. for (i = 0; i < 2; i++, pce++)
  553. pce->data = 0;
  554. pce = cmdlistinfo->auth_seg_cfg;
  555. a_cfg = pce->data;
  556. a_cfg &= ~(CRYPTO_AUTH_POS_MASK);
  557. if (q_req->dir == QCE_ENCRYPT)
  558. a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
  559. else
  560. a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  561. pce->data = a_cfg;
  562. /* write auth seg size */
  563. pce = cmdlistinfo->auth_seg_size;
  564. pce->data = totallen_in;
  565. /* write auth seg size start*/
  566. pce = cmdlistinfo->auth_seg_start;
  567. pce->data = 0;
  568. /* write seg size */
  569. pce = cmdlistinfo->seg_size;
  570. pce->data = totallen_in;
  571. /* write encr seg size */
  572. pce = cmdlistinfo->encr_seg_size;
  573. pce->data = q_req->cryptlen;
  574. /* write encr seg start */
  575. pce = cmdlistinfo->encr_seg_start;
  576. pce->data = (coffset & 0xffff);
  577. return 0;
  578. }
  579. static struct qce_cmdlist_info *_ce_get_cipher_cmdlistinfo(
  580. struct qce_device *pce_dev,
  581. int req_info, struct qce_req *creq)
  582. {
  583. struct ce_request_info *preq_info;
  584. struct ce_sps_data *pce_sps_data;
  585. struct qce_cmdlistptr_ops *cmdlistptr;
  586. preq_info = &pce_dev->ce_request_info[req_info];
  587. pce_sps_data = &preq_info->ce_sps;
  588. cmdlistptr = &pce_sps_data->cmdlistptr;
  589. if (creq->alg != CIPHER_ALG_AES) {
  590. switch (creq->alg) {
  591. case CIPHER_ALG_DES:
  592. if (creq->mode == QCE_MODE_ECB)
  593. return &cmdlistptr->cipher_des_ecb;
  594. return &cmdlistptr->cipher_des_cbc;
  595. case CIPHER_ALG_3DES:
  596. if (creq->mode == QCE_MODE_ECB)
  597. return &cmdlistptr->cipher_3des_ecb;
  598. return &cmdlistptr->cipher_3des_cbc;
  599. default:
  600. return NULL;
  601. }
  602. } else {
  603. switch (creq->mode) {
  604. case QCE_MODE_ECB:
  605. if (creq->encklen == AES128_KEY_SIZE)
  606. return &cmdlistptr->cipher_aes_128_ecb;
  607. return &cmdlistptr->cipher_aes_256_ecb;
  608. case QCE_MODE_CBC:
  609. case QCE_MODE_CTR:
  610. if (creq->encklen == AES128_KEY_SIZE)
  611. return &cmdlistptr->cipher_aes_128_cbc_ctr;
  612. return &cmdlistptr->cipher_aes_256_cbc_ctr;
  613. case QCE_MODE_XTS:
  614. if (creq->encklen/2 == AES128_KEY_SIZE)
  615. return &cmdlistptr->cipher_aes_128_xts;
  616. return &cmdlistptr->cipher_aes_256_xts;
  617. case QCE_MODE_CCM:
  618. if (creq->encklen == AES128_KEY_SIZE)
  619. return &cmdlistptr->aead_aes_128_ccm;
  620. return &cmdlistptr->aead_aes_256_ccm;
  621. default:
  622. return NULL;
  623. }
  624. }
  625. return NULL;
  626. }
  627. static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
  628. uint32_t totallen_in, uint32_t coffset,
  629. struct qce_cmdlist_info *cmdlistinfo)
  630. {
  631. uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
  632. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
  633. uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
  634. 0, 0, 0, 0};
  635. uint32_t enck_size_in_word = 0;
  636. uint32_t key_size;
  637. bool use_hw_key = false;
  638. bool use_pipe_key = false;
  639. uint32_t encr_cfg = 0;
  640. uint32_t ivsize = creq->ivsize;
  641. int i;
  642. struct sps_command_element *pce = NULL;
  643. if (creq->mode == QCE_MODE_XTS)
  644. key_size = creq->encklen/2;
  645. else
  646. key_size = creq->encklen;
  647. pce = cmdlistinfo->go_proc;
  648. if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
  649. use_hw_key = true;
  650. } else {
  651. if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
  652. QCRYPTO_CTX_USE_PIPE_KEY)
  653. use_pipe_key = true;
  654. }
  655. pce = cmdlistinfo->go_proc;
  656. if (use_hw_key)
  657. pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
  658. pce_dev->phy_iobase);
  659. else
  660. pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
  661. pce_dev->phy_iobase);
  662. if (!use_pipe_key && !use_hw_key) {
  663. _byte_stream_to_net_words(enckey32, creq->enckey, key_size);
  664. enck_size_in_word = key_size/sizeof(uint32_t);
  665. }
  666. if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
  667. uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
  668. uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
  669. uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
  670. uint32_t auth_cfg = 0;
  671. /* write nonce */
  672. _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
  673. pce = cmdlistinfo->auth_nonce_info;
  674. for (i = 0; i < noncelen32; i++, pce++)
  675. pce->data = nonce32[i];
  676. if (creq->authklen == AES128_KEY_SIZE)
  677. auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
  678. else {
  679. if (creq->authklen == AES256_KEY_SIZE)
  680. auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
  681. }
  682. if (creq->dir == QCE_ENCRYPT)
  683. auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  684. else
  685. auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
  686. auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
  687. if (use_hw_key) {
  688. auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
  689. } else {
  690. auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
  691. /* write auth key */
  692. pce = cmdlistinfo->auth_key;
  693. for (i = 0; i < authklen32; i++, pce++)
  694. pce->data = enckey32[i];
  695. }
  696. pce = cmdlistinfo->auth_seg_cfg;
  697. pce->data = auth_cfg;
  698. pce = cmdlistinfo->auth_seg_size;
  699. if (creq->dir == QCE_ENCRYPT)
  700. pce->data = totallen_in;
  701. else
  702. pce->data = totallen_in - creq->authsize;
  703. pce = cmdlistinfo->auth_seg_start;
  704. pce->data = 0;
  705. } else {
  706. if (creq->op != QCE_REQ_AEAD) {
  707. pce = cmdlistinfo->auth_seg_cfg;
  708. pce->data = 0;
  709. }
  710. }
  711. switch (creq->mode) {
  712. case QCE_MODE_ECB:
  713. if (key_size == AES128_KEY_SIZE)
  714. encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
  715. else
  716. encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
  717. break;
  718. case QCE_MODE_CBC:
  719. if (key_size == AES128_KEY_SIZE)
  720. encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
  721. else
  722. encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
  723. break;
  724. case QCE_MODE_XTS:
  725. if (key_size == AES128_KEY_SIZE)
  726. encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
  727. else
  728. encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
  729. break;
  730. case QCE_MODE_CCM:
  731. if (key_size == AES128_KEY_SIZE)
  732. encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
  733. else
  734. encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
  735. encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
  736. (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
  737. break;
  738. case QCE_MODE_CTR:
  739. default:
  740. if (key_size == AES128_KEY_SIZE)
  741. encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
  742. else
  743. encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
  744. break;
  745. }
  746. switch (creq->alg) {
  747. case CIPHER_ALG_DES:
  748. if (creq->mode != QCE_MODE_ECB) {
  749. if (ivsize > MAX_IV_LENGTH) {
  750. pr_err("%s: error: Invalid length parameter\n",
  751. __func__);
  752. return -EINVAL;
  753. }
  754. _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
  755. pce = cmdlistinfo->encr_cntr_iv;
  756. pce->data = enciv32[0];
  757. pce++;
  758. pce->data = enciv32[1];
  759. }
  760. if (!use_hw_key) {
  761. pce = cmdlistinfo->encr_key;
  762. pce->data = enckey32[0];
  763. pce++;
  764. pce->data = enckey32[1];
  765. }
  766. break;
  767. case CIPHER_ALG_3DES:
  768. if (creq->mode != QCE_MODE_ECB) {
  769. if (ivsize > MAX_IV_LENGTH) {
  770. pr_err("%s: error: Invalid length parameter\n",
  771. __func__);
  772. return -EINVAL;
  773. }
  774. _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
  775. pce = cmdlistinfo->encr_cntr_iv;
  776. pce->data = enciv32[0];
  777. pce++;
  778. pce->data = enciv32[1];
  779. }
  780. if (!use_hw_key) {
  781. /* write encr key */
  782. pce = cmdlistinfo->encr_key;
  783. for (i = 0; i < 6; i++, pce++)
  784. pce->data = enckey32[i];
  785. }
  786. break;
  787. case CIPHER_ALG_AES:
  788. default:
  789. if (creq->mode == QCE_MODE_XTS) {
  790. uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
  791. = {0, 0, 0, 0, 0, 0, 0, 0};
  792. uint32_t xtsklen =
  793. creq->encklen/(2 * sizeof(uint32_t));
  794. if (!use_hw_key && !use_pipe_key) {
  795. _byte_stream_to_net_words(xtskey32,
  796. (creq->enckey + creq->encklen/2),
  797. creq->encklen/2);
  798. /* write xts encr key */
  799. pce = cmdlistinfo->encr_xts_key;
  800. for (i = 0; i < xtsklen; i++, pce++)
  801. pce->data = xtskey32[i];
  802. }
  803. /* write xts du size */
  804. pce = cmdlistinfo->encr_xts_du_size;
  805. switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
  806. case QCRYPTO_CTX_XTS_DU_SIZE_512B:
  807. pce->data = min((unsigned int)QCE_SECTOR_SIZE,
  808. creq->cryptlen);
  809. break;
  810. case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
  811. pce->data =
  812. min((unsigned int)QCE_SECTOR_SIZE * 2,
  813. creq->cryptlen);
  814. break;
  815. default:
  816. pce->data = creq->cryptlen;
  817. break;
  818. }
  819. }
  820. if (creq->mode != QCE_MODE_ECB) {
  821. if (ivsize > MAX_IV_LENGTH) {
  822. pr_err("%s: error: Invalid length parameter\n",
  823. __func__);
  824. return -EINVAL;
  825. }
  826. if (creq->mode == QCE_MODE_XTS)
  827. _byte_stream_swap_to_net_words(enciv32,
  828. creq->iv, ivsize);
  829. else
  830. _byte_stream_to_net_words(enciv32, creq->iv,
  831. ivsize);
  832. /* write encr cntr iv */
  833. pce = cmdlistinfo->encr_cntr_iv;
  834. for (i = 0; i < 4; i++, pce++)
  835. pce->data = enciv32[i];
  836. if (creq->mode == QCE_MODE_CCM) {
  837. /* write cntr iv for ccm */
  838. pce = cmdlistinfo->encr_ccm_cntr_iv;
  839. for (i = 0; i < 4; i++, pce++)
  840. pce->data = enciv32[i];
  841. /* update cntr_iv[3] by one */
  842. pce = cmdlistinfo->encr_cntr_iv;
  843. pce += 3;
  844. pce->data += 1;
  845. }
  846. }
  847. if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
  848. encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
  849. CRYPTO_ENCR_KEY_SZ);
  850. } else {
  851. if (!use_hw_key) {
  852. /* write encr key */
  853. pce = cmdlistinfo->encr_key;
  854. for (i = 0; i < enck_size_in_word; i++, pce++)
  855. pce->data = enckey32[i];
  856. }
  857. } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
  858. break;
  859. } /* end of switch (creq->mode) */
  860. if (use_pipe_key)
  861. encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
  862. << CRYPTO_USE_PIPE_KEY_ENCR);
  863. /* write encr seg cfg */
  864. pce = cmdlistinfo->encr_seg_cfg;
  865. if ((creq->alg == CIPHER_ALG_DES) || (creq->alg == CIPHER_ALG_3DES)) {
  866. if (creq->dir == QCE_ENCRYPT)
  867. pce->data |= (1 << CRYPTO_ENCODE);
  868. else
  869. pce->data &= ~(1 << CRYPTO_ENCODE);
  870. encr_cfg = pce->data;
  871. } else {
  872. encr_cfg |=
  873. ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
  874. }
  875. if (use_hw_key)
  876. encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
  877. else
  878. encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
  879. pce->data = encr_cfg;
  880. /* write encr seg size */
  881. pce = cmdlistinfo->encr_seg_size;
  882. if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
  883. pce->data = (creq->cryptlen + creq->authsize);
  884. else
  885. pce->data = creq->cryptlen;
  886. /* write encr seg start */
  887. pce = cmdlistinfo->encr_seg_start;
  888. pce->data = (coffset & 0xffff);
  889. /* write seg size */
  890. pce = cmdlistinfo->seg_size;
  891. pce->data = totallen_in;
  892. return 0;
  893. }
  894. static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req *req,
  895. struct qce_cmdlist_info *cmdlistinfo)
  896. {
  897. uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
  898. uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
  899. uint32_t cfg;
  900. struct sps_command_element *pce;
  901. int i;
  902. switch (req->algorithm) {
  903. case QCE_OTA_ALGO_KASUMI:
  904. cfg = pce_dev->reg.auth_cfg_kasumi;
  905. break;
  906. case QCE_OTA_ALGO_SNOW3G:
  907. default:
  908. cfg = pce_dev->reg.auth_cfg_snow3g;
  909. break;
  910. }
  911. /* write key in CRYPTO_AUTH_IV0-3_REG */
  912. _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
  913. pce = cmdlistinfo->auth_iv;
  914. for (i = 0; i < key_size_in_word; i++, pce++)
  915. pce->data = ikey32[i];
  916. /* write last bits in CRYPTO_AUTH_IV4_REG */
  917. pce->data = req->last_bits;
  918. /* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
  919. pce = cmdlistinfo->auth_bytecount;
  920. pce->data = req->fresh;
  921. /* write count-i to CRYPTO_AUTH_BYTECNT1_REG */
  922. pce++;
  923. pce->data = req->count_i;
  924. /* write auth seg cfg */
  925. pce = cmdlistinfo->auth_seg_cfg;
  926. if (req->direction == QCE_OTA_DIR_DOWNLINK)
  927. cfg |= BIT(CRYPTO_F9_DIRECTION);
  928. pce->data = cfg;
  929. /* write auth seg size */
  930. pce = cmdlistinfo->auth_seg_size;
  931. pce->data = req->msize;
  932. /* write auth seg start*/
  933. pce = cmdlistinfo->auth_seg_start;
  934. pce->data = 0;
  935. /* write seg size */
  936. pce = cmdlistinfo->seg_size;
  937. pce->data = req->msize;
  938. /* write go */
  939. pce = cmdlistinfo->go_proc;
  940. pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
  941. return 0;
  942. }
  943. static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
  944. bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset,
  945. uint16_t cipher_size,
  946. struct qce_cmdlist_info *cmdlistinfo)
  947. {
  948. uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
  949. uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
  950. uint32_t cfg;
  951. struct sps_command_element *pce;
  952. int i;
  953. switch (req->algorithm) {
  954. case QCE_OTA_ALGO_KASUMI:
  955. cfg = pce_dev->reg.encr_cfg_kasumi;
  956. break;
  957. case QCE_OTA_ALGO_SNOW3G:
  958. default:
  959. cfg = pce_dev->reg.encr_cfg_snow3g;
  960. break;
  961. }
  962. /* write key */
  963. _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
  964. pce = cmdlistinfo->encr_key;
  965. for (i = 0; i < key_size_in_word; i++, pce++)
  966. pce->data = ckey32[i];
  967. /* write encr seg cfg */
  968. pce = cmdlistinfo->encr_seg_cfg;
  969. if (key_stream_mode)
  970. cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
  971. if (req->direction == QCE_OTA_DIR_DOWNLINK)
  972. cfg |= BIT(CRYPTO_F8_DIRECTION);
  973. pce->data = cfg;
  974. /* write encr seg start */
  975. pce = cmdlistinfo->encr_seg_start;
  976. pce->data = (cipher_offset & 0xffff);
  977. /* write encr seg size */
  978. pce = cmdlistinfo->encr_seg_size;
  979. pce->data = cipher_size;
  980. /* write seg size */
  981. pce = cmdlistinfo->seg_size;
  982. pce->data = req->data_len;
  983. /* write cntr0_iv0 for countC */
  984. pce = cmdlistinfo->encr_cntr_iv;
  985. pce->data = req->count_c;
  986. /* write cntr1_iv1 for nPkts, and bearer */
  987. pce++;
  988. if (npkts == 1)
  989. npkts = 0;
  990. pce->data = req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
  991. npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT;
  992. /* write go */
  993. pce = cmdlistinfo->go_proc;
  994. pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
  995. return 0;
  996. }
  997. static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info)
  998. {
  999. int i, j, ents;
  1000. struct ce_sps_data *pce_sps_data;
  1001. struct sps_iovec *iovec;
  1002. uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD;
  1003. pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
  1004. iovec = pce_sps_data->in_transfer.iovec;
  1005. pr_info("==============================================\n");
  1006. pr_info("CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n");
  1007. pr_info("==============================================\n");
  1008. for (i = 0; i < pce_sps_data->in_transfer.iovec_count; i++) {
  1009. pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
  1010. iovec->addr, iovec->size, iovec->flags);
  1011. if (iovec->flags & cmd_flags) {
  1012. struct sps_command_element *pced;
  1013. pced = (struct sps_command_element *)
  1014. (GET_VIRT_ADDR(iovec->addr));
  1015. ents = iovec->size/(sizeof(struct sps_command_element));
  1016. for (j = 0; j < ents; j++) {
  1017. pr_info(" [%d] [0x%x] 0x%x\n", j,
  1018. pced->addr, pced->data);
  1019. pced++;
  1020. }
  1021. }
  1022. iovec++;
  1023. }
  1024. pr_info("==============================================\n");
  1025. pr_info("PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n");
  1026. pr_info("==============================================\n");
  1027. iovec = pce_sps_data->out_transfer.iovec;
  1028. for (i = 0; i < pce_sps_data->out_transfer.iovec_count; i++) {
  1029. pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
  1030. iovec->addr, iovec->size, iovec->flags);
  1031. iovec++;
  1032. }
  1033. }
  1034. #ifdef QCE_DEBUG
  1035. static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
  1036. {
  1037. _qce_dump_descr_fifos(pce_dev, req_info);
  1038. }
  1039. #define QCE_WRITE_REG(val, addr) \
  1040. { \
  1041. pr_info(" [0x%pK] 0x%x\n", addr, (uint32_t)val); \
  1042. writel_relaxed(val, addr); \
  1043. }
  1044. #else
  1045. static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
  1046. {
  1047. }
  1048. #define QCE_WRITE_REG(val, addr) \
  1049. writel_relaxed(val, addr)
  1050. #endif
  1051. static int _ce_setup_hash_direct(struct qce_device *pce_dev,
  1052. struct qce_sha_req *sreq)
  1053. {
  1054. uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
  1055. uint32_t diglen;
  1056. bool use_hw_key = false;
  1057. bool use_pipe_key = false;
  1058. int i;
  1059. uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
  1060. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
  1061. uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
  1062. bool sha1 = false;
  1063. uint32_t auth_cfg = 0;
  1064. /* clear status */
  1065. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
  1066. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
  1067. CRYPTO_CONFIG_REG));
  1068. /*
  1069. * Ensure previous instructions (setting the CONFIG register)
  1070. * was completed before issuing starting to set other config register
  1071. * This is to ensure the configurations are done in correct endian-ness
  1072. * as set in the CONFIG registers
  1073. */
  1074. mb();
  1075. if (sreq->alg == QCE_HASH_AES_CMAC) {
  1076. /* write seg_cfg */
  1077. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
  1078. /* write seg_cfg */
  1079. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
  1080. /* write seg_cfg */
  1081. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
  1082. /* Clear auth_ivn, auth_keyn registers */
  1083. for (i = 0; i < 16; i++) {
  1084. QCE_WRITE_REG(0, (pce_dev->iobase +
  1085. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
  1086. QCE_WRITE_REG(0, (pce_dev->iobase +
  1087. (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
  1088. }
  1089. /* write auth_bytecnt 0/1/2/3, start with 0 */
  1090. for (i = 0; i < 4; i++)
  1091. QCE_WRITE_REG(0, pce_dev->iobase +
  1092. CRYPTO_AUTH_BYTECNT0_REG +
  1093. i * sizeof(uint32_t));
  1094. if (sreq->authklen == AES128_KEY_SIZE)
  1095. auth_cfg = pce_dev->reg.auth_cfg_cmac_128;
  1096. else
  1097. auth_cfg = pce_dev->reg.auth_cfg_cmac_256;
  1098. }
  1099. if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
  1100. (sreq->alg == QCE_HASH_SHA256_HMAC) ||
  1101. (sreq->alg == QCE_HASH_AES_CMAC)) {
  1102. _byte_stream_to_net_words(mackey32, sreq->authkey,
  1103. sreq->authklen);
  1104. /* no more check for null key. use flag to check*/
  1105. if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY) ==
  1106. QCRYPTO_CTX_USE_HW_KEY) {
  1107. use_hw_key = true;
  1108. } else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
  1109. QCRYPTO_CTX_USE_PIPE_KEY) {
  1110. use_pipe_key = true;
  1111. } else {
  1112. /* setup key */
  1113. for (i = 0; i < authk_size_in_word; i++)
  1114. QCE_WRITE_REG(mackey32[i], (pce_dev->iobase +
  1115. (CRYPTO_AUTH_KEY0_REG +
  1116. i*sizeof(uint32_t))));
  1117. }
  1118. }
  1119. if (sreq->alg == QCE_HASH_AES_CMAC)
  1120. goto go_proc;
  1121. /* if not the last, the size has to be on the block boundary */
  1122. if (!sreq->last_blk && (sreq->size % SHA256_BLOCK_SIZE))
  1123. return -EIO;
  1124. switch (sreq->alg) {
  1125. case QCE_HASH_SHA1:
  1126. auth_cfg = pce_dev->reg.auth_cfg_sha1;
  1127. diglen = SHA1_DIGEST_SIZE;
  1128. sha1 = true;
  1129. break;
  1130. case QCE_HASH_SHA1_HMAC:
  1131. auth_cfg = pce_dev->reg.auth_cfg_hmac_sha1;
  1132. diglen = SHA1_DIGEST_SIZE;
  1133. sha1 = true;
  1134. break;
  1135. case QCE_HASH_SHA256:
  1136. auth_cfg = pce_dev->reg.auth_cfg_sha256;
  1137. diglen = SHA256_DIGEST_SIZE;
  1138. break;
  1139. case QCE_HASH_SHA256_HMAC:
  1140. auth_cfg = pce_dev->reg.auth_cfg_hmac_sha256;
  1141. diglen = SHA256_DIGEST_SIZE;
  1142. break;
  1143. default:
  1144. return -EINVAL;
  1145. }
  1146. /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
  1147. if (sreq->first_blk) {
  1148. if (sha1) {
  1149. for (i = 0; i < 5; i++)
  1150. auth32[i] = _std_init_vector_sha1[i];
  1151. } else {
  1152. for (i = 0; i < 8; i++)
  1153. auth32[i] = _std_init_vector_sha256[i];
  1154. }
  1155. } else {
  1156. _byte_stream_to_net_words(auth32, sreq->digest, diglen);
  1157. }
  1158. /* Set auth_ivn, auth_keyn registers */
  1159. for (i = 0; i < 5; i++)
  1160. QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
  1161. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
  1162. if ((sreq->alg == QCE_HASH_SHA256) ||
  1163. (sreq->alg == QCE_HASH_SHA256_HMAC)) {
  1164. for (i = 5; i < 8; i++)
  1165. QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
  1166. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
  1167. }
  1168. /* write auth_bytecnt 0/1/2/3, start with 0 */
  1169. for (i = 0; i < 2; i++)
  1170. QCE_WRITE_REG(sreq->auth_data[i], pce_dev->iobase +
  1171. CRYPTO_AUTH_BYTECNT0_REG +
  1172. i * sizeof(uint32_t));
  1173. /* Set/reset last bit in CFG register */
  1174. if (sreq->last_blk)
  1175. auth_cfg |= 1 << CRYPTO_LAST;
  1176. else
  1177. auth_cfg &= ~(1 << CRYPTO_LAST);
  1178. if (sreq->first_blk)
  1179. auth_cfg |= 1 << CRYPTO_FIRST;
  1180. else
  1181. auth_cfg &= ~(1 << CRYPTO_FIRST);
  1182. if (use_hw_key)
  1183. auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
  1184. if (use_pipe_key)
  1185. auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
  1186. go_proc:
  1187. /* write seg_cfg */
  1188. QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
  1189. /* write auth seg_size */
  1190. QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
  1191. /* write auth_seg_start */
  1192. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
  1193. /* reset encr seg_cfg */
  1194. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
  1195. /* write seg_size */
  1196. QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
  1197. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
  1198. CRYPTO_CONFIG_REG));
  1199. /* issue go to crypto */
  1200. if (!use_hw_key) {
  1201. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  1202. (1 << CRYPTO_CLR_CNTXT)),
  1203. pce_dev->iobase + CRYPTO_GOPROC_REG);
  1204. } else {
  1205. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
  1206. pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
  1207. }
  1208. /*
  1209. * Ensure previous instructions (setting the GO register)
  1210. * was completed before issuing a DMA transfer request
  1211. */
  1212. mb();
  1213. return 0;
  1214. }
  1215. static int _ce_setup_aead_direct(struct qce_device *pce_dev,
  1216. struct qce_req *q_req, uint32_t totallen_in, uint32_t coffset)
  1217. {
  1218. int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
  1219. int i;
  1220. uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
  1221. uint32_t a_cfg;
  1222. uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
  1223. uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
  1224. uint32_t enck_size_in_word = 0;
  1225. uint32_t enciv_in_word;
  1226. uint32_t key_size;
  1227. uint32_t ivsize = q_req->ivsize;
  1228. uint32_t encr_cfg;
  1229. /* clear status */
  1230. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
  1231. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
  1232. CRYPTO_CONFIG_REG));
  1233. /*
  1234. * Ensure previous instructions (setting the CONFIG register)
  1235. * was completed before issuing starting to set other config register
  1236. * This is to ensure the configurations are done in correct endian-ness
  1237. * as set in the CONFIG registers
  1238. */
  1239. mb();
  1240. key_size = q_req->encklen;
  1241. enck_size_in_word = key_size/sizeof(uint32_t);
  1242. switch (q_req->alg) {
  1243. case CIPHER_ALG_DES:
  1244. switch (q_req->mode) {
  1245. case QCE_MODE_CBC:
  1246. encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
  1247. break;
  1248. default:
  1249. return -EINVAL;
  1250. }
  1251. enciv_in_word = 2;
  1252. break;
  1253. case CIPHER_ALG_3DES:
  1254. switch (q_req->mode) {
  1255. case QCE_MODE_CBC:
  1256. encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
  1257. break;
  1258. default:
  1259. return -EINVAL;
  1260. }
  1261. enciv_in_word = 2;
  1262. break;
  1263. case CIPHER_ALG_AES:
  1264. switch (q_req->mode) {
  1265. case QCE_MODE_CBC:
  1266. if (key_size == AES128_KEY_SIZE)
  1267. encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
  1268. else if (key_size == AES256_KEY_SIZE)
  1269. encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
  1270. else
  1271. return -EINVAL;
  1272. break;
  1273. default:
  1274. return -EINVAL;
  1275. }
  1276. enciv_in_word = 4;
  1277. break;
  1278. default:
  1279. return -EINVAL;
  1280. }
  1281. /* write CNTR0_IV0_REG */
  1282. if (q_req->mode != QCE_MODE_ECB) {
  1283. _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
  1284. for (i = 0; i < enciv_in_word; i++)
  1285. QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
  1286. (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
  1287. }
  1288. /*
  1289. * write encr key
  1290. * do not use hw key or pipe key
  1291. */
  1292. _byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
  1293. for (i = 0; i < enck_size_in_word; i++)
  1294. QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
  1295. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)));
  1296. /* write encr seg cfg */
  1297. if (q_req->dir == QCE_ENCRYPT)
  1298. encr_cfg |= (1 << CRYPTO_ENCODE);
  1299. QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
  1300. /* we only support sha1-hmac and sha256-hmac at this point */
  1301. _byte_stream_to_net_words(mackey32, q_req->authkey,
  1302. q_req->authklen);
  1303. for (i = 0; i < authk_size_in_word; i++)
  1304. QCE_WRITE_REG(mackey32[i], pce_dev->iobase +
  1305. (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)));
  1306. if (q_req->auth_alg == QCE_HASH_SHA1_HMAC) {
  1307. for (i = 0; i < 5; i++)
  1308. QCE_WRITE_REG(_std_init_vector_sha1[i],
  1309. pce_dev->iobase +
  1310. (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
  1311. } else {
  1312. for (i = 0; i < 8; i++)
  1313. QCE_WRITE_REG(_std_init_vector_sha256[i],
  1314. pce_dev->iobase +
  1315. (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
  1316. }
  1317. /* write auth_bytecnt 0/1, start with 0 */
  1318. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
  1319. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG);
  1320. /* write encr seg size */
  1321. QCE_WRITE_REG(q_req->cryptlen, pce_dev->iobase +
  1322. CRYPTO_ENCR_SEG_SIZE_REG);
  1323. /* write encr start */
  1324. QCE_WRITE_REG(coffset & 0xffff, pce_dev->iobase +
  1325. CRYPTO_ENCR_SEG_START_REG);
  1326. if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
  1327. a_cfg = pce_dev->reg.auth_cfg_aead_sha1_hmac;
  1328. else
  1329. a_cfg = pce_dev->reg.auth_cfg_aead_sha256_hmac;
  1330. if (q_req->dir == QCE_ENCRYPT)
  1331. a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
  1332. else
  1333. a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  1334. /* write auth seg_cfg */
  1335. QCE_WRITE_REG(a_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
  1336. /* write auth seg_size */
  1337. QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
  1338. /* write auth_seg_start */
  1339. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
  1340. /* write seg_size */
  1341. QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
  1342. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
  1343. CRYPTO_CONFIG_REG));
  1344. /* issue go to crypto */
  1345. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  1346. (1 << CRYPTO_CLR_CNTXT)),
  1347. pce_dev->iobase + CRYPTO_GOPROC_REG);
  1348. /*
  1349. * Ensure previous instructions (setting the GO register)
  1350. * was completed before issuing a DMA transfer request
  1351. */
  1352. mb();
  1353. return 0;
  1354. }
  1355. static int _ce_setup_cipher_direct(struct qce_device *pce_dev,
  1356. struct qce_req *creq, uint32_t totallen_in, uint32_t coffset)
  1357. {
  1358. uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
  1359. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
  1360. uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
  1361. 0, 0, 0, 0};
  1362. uint32_t enck_size_in_word = 0;
  1363. uint32_t key_size;
  1364. bool use_hw_key = false;
  1365. bool use_pipe_key = false;
  1366. uint32_t encr_cfg = 0;
  1367. uint32_t ivsize = creq->ivsize;
  1368. int i;
  1369. /* clear status */
  1370. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
  1371. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
  1372. CRYPTO_CONFIG_REG));
  1373. /*
  1374. * Ensure previous instructions (setting the CONFIG register)
  1375. * was completed before issuing starting to set other config register
  1376. * This is to ensure the configurations are done in correct endian-ness
  1377. * as set in the CONFIG registers
  1378. */
  1379. mb();
  1380. if (creq->mode == QCE_MODE_XTS)
  1381. key_size = creq->encklen/2;
  1382. else
  1383. key_size = creq->encklen;
  1384. if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
  1385. use_hw_key = true;
  1386. } else {
  1387. if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
  1388. QCRYPTO_CTX_USE_PIPE_KEY)
  1389. use_pipe_key = true;
  1390. }
  1391. if (!use_pipe_key && !use_hw_key) {
  1392. _byte_stream_to_net_words(enckey32, creq->enckey, key_size);
  1393. enck_size_in_word = key_size/sizeof(uint32_t);
  1394. }
  1395. if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
  1396. uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
  1397. uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
  1398. uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
  1399. uint32_t auth_cfg = 0;
  1400. /* Clear auth_ivn, auth_keyn registers */
  1401. for (i = 0; i < 16; i++) {
  1402. QCE_WRITE_REG(0, (pce_dev->iobase +
  1403. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
  1404. QCE_WRITE_REG(0, (pce_dev->iobase +
  1405. (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
  1406. }
  1407. /* write auth_bytecnt 0/1/2/3, start with 0 */
  1408. for (i = 0; i < 4; i++)
  1409. QCE_WRITE_REG(0, pce_dev->iobase +
  1410. CRYPTO_AUTH_BYTECNT0_REG +
  1411. i * sizeof(uint32_t));
  1412. /* write nonce */
  1413. _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
  1414. for (i = 0; i < noncelen32; i++)
  1415. QCE_WRITE_REG(nonce32[i], pce_dev->iobase +
  1416. CRYPTO_AUTH_INFO_NONCE0_REG +
  1417. (i*sizeof(uint32_t)));
  1418. if (creq->authklen == AES128_KEY_SIZE)
  1419. auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
  1420. else {
  1421. if (creq->authklen == AES256_KEY_SIZE)
  1422. auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
  1423. }
  1424. if (creq->dir == QCE_ENCRYPT)
  1425. auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  1426. else
  1427. auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
  1428. auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
  1429. if (use_hw_key) {
  1430. auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
  1431. } else {
  1432. auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
  1433. /* write auth key */
  1434. for (i = 0; i < authklen32; i++)
  1435. QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
  1436. CRYPTO_AUTH_KEY0_REG + (i*sizeof(uint32_t)));
  1437. }
  1438. QCE_WRITE_REG(auth_cfg, pce_dev->iobase +
  1439. CRYPTO_AUTH_SEG_CFG_REG);
  1440. if (creq->dir == QCE_ENCRYPT) {
  1441. QCE_WRITE_REG(totallen_in, pce_dev->iobase +
  1442. CRYPTO_AUTH_SEG_SIZE_REG);
  1443. } else {
  1444. QCE_WRITE_REG((totallen_in - creq->authsize),
  1445. pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
  1446. }
  1447. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
  1448. } else {
  1449. if (creq->op != QCE_REQ_AEAD)
  1450. QCE_WRITE_REG(0, pce_dev->iobase +
  1451. CRYPTO_AUTH_SEG_CFG_REG);
  1452. }
  1453. /*
  1454. * Ensure previous instructions (write to all AUTH registers)
  1455. * was completed before accessing a register that is not in
  1456. * in the same 1K range.
  1457. */
  1458. mb();
  1459. switch (creq->mode) {
  1460. case QCE_MODE_ECB:
  1461. if (key_size == AES128_KEY_SIZE)
  1462. encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
  1463. else
  1464. encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
  1465. break;
  1466. case QCE_MODE_CBC:
  1467. if (key_size == AES128_KEY_SIZE)
  1468. encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
  1469. else
  1470. encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
  1471. break;
  1472. case QCE_MODE_XTS:
  1473. if (key_size == AES128_KEY_SIZE)
  1474. encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
  1475. else
  1476. encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
  1477. break;
  1478. case QCE_MODE_CCM:
  1479. if (key_size == AES128_KEY_SIZE)
  1480. encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
  1481. else
  1482. encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
  1483. break;
  1484. case QCE_MODE_CTR:
  1485. default:
  1486. if (key_size == AES128_KEY_SIZE)
  1487. encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
  1488. else
  1489. encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
  1490. break;
  1491. }
  1492. switch (creq->alg) {
  1493. case CIPHER_ALG_DES:
  1494. if (creq->mode != QCE_MODE_ECB) {
  1495. encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
  1496. _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
  1497. QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
  1498. CRYPTO_CNTR0_IV0_REG);
  1499. QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
  1500. CRYPTO_CNTR1_IV1_REG);
  1501. } else {
  1502. encr_cfg = pce_dev->reg.encr_cfg_des_ecb;
  1503. }
  1504. if (!use_hw_key) {
  1505. QCE_WRITE_REG(enckey32[0], pce_dev->iobase +
  1506. CRYPTO_ENCR_KEY0_REG);
  1507. QCE_WRITE_REG(enckey32[1], pce_dev->iobase +
  1508. CRYPTO_ENCR_KEY1_REG);
  1509. }
  1510. break;
  1511. case CIPHER_ALG_3DES:
  1512. if (creq->mode != QCE_MODE_ECB) {
  1513. _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
  1514. QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
  1515. CRYPTO_CNTR0_IV0_REG);
  1516. QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
  1517. CRYPTO_CNTR1_IV1_REG);
  1518. encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
  1519. } else {
  1520. encr_cfg = pce_dev->reg.encr_cfg_3des_ecb;
  1521. }
  1522. if (!use_hw_key) {
  1523. /* write encr key */
  1524. for (i = 0; i < 6; i++)
  1525. QCE_WRITE_REG(enckey32[0], (pce_dev->iobase +
  1526. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t))));
  1527. }
  1528. break;
  1529. case CIPHER_ALG_AES:
  1530. default:
  1531. if (creq->mode == QCE_MODE_XTS) {
  1532. uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
  1533. = {0, 0, 0, 0, 0, 0, 0, 0};
  1534. uint32_t xtsklen =
  1535. creq->encklen/(2 * sizeof(uint32_t));
  1536. if (!use_hw_key && !use_pipe_key) {
  1537. _byte_stream_to_net_words(xtskey32,
  1538. (creq->enckey + creq->encklen/2),
  1539. creq->encklen/2);
  1540. /* write xts encr key */
  1541. for (i = 0; i < xtsklen; i++)
  1542. QCE_WRITE_REG(xtskey32[i],
  1543. pce_dev->iobase +
  1544. CRYPTO_ENCR_XTS_KEY0_REG +
  1545. (i * sizeof(uint32_t)));
  1546. }
  1547. /* write xts du size */
  1548. switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
  1549. case QCRYPTO_CTX_XTS_DU_SIZE_512B:
  1550. QCE_WRITE_REG(
  1551. min((uint32_t)QCE_SECTOR_SIZE,
  1552. creq->cryptlen), pce_dev->iobase +
  1553. CRYPTO_ENCR_XTS_DU_SIZE_REG);
  1554. break;
  1555. case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
  1556. QCE_WRITE_REG(
  1557. min((uint32_t)(QCE_SECTOR_SIZE * 2),
  1558. creq->cryptlen), pce_dev->iobase +
  1559. CRYPTO_ENCR_XTS_DU_SIZE_REG);
  1560. break;
  1561. default:
  1562. QCE_WRITE_REG(creq->cryptlen,
  1563. pce_dev->iobase +
  1564. CRYPTO_ENCR_XTS_DU_SIZE_REG);
  1565. break;
  1566. }
  1567. }
  1568. if (creq->mode != QCE_MODE_ECB) {
  1569. if (creq->mode == QCE_MODE_XTS)
  1570. _byte_stream_swap_to_net_words(enciv32,
  1571. creq->iv, ivsize);
  1572. else
  1573. _byte_stream_to_net_words(enciv32, creq->iv,
  1574. ivsize);
  1575. /* write encr cntr iv */
  1576. for (i = 0; i <= 3; i++)
  1577. QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
  1578. CRYPTO_CNTR0_IV0_REG +
  1579. (i * sizeof(uint32_t)));
  1580. if (creq->mode == QCE_MODE_CCM) {
  1581. /* write cntr iv for ccm */
  1582. for (i = 0; i <= 3; i++)
  1583. QCE_WRITE_REG(enciv32[i],
  1584. pce_dev->iobase +
  1585. CRYPTO_ENCR_CCM_INT_CNTR0_REG +
  1586. (i * sizeof(uint32_t)));
  1587. /* update cntr_iv[3] by one */
  1588. QCE_WRITE_REG((enciv32[3] + 1),
  1589. pce_dev->iobase +
  1590. CRYPTO_CNTR0_IV0_REG +
  1591. (3 * sizeof(uint32_t)));
  1592. }
  1593. }
  1594. if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
  1595. encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
  1596. CRYPTO_ENCR_KEY_SZ);
  1597. } else {
  1598. if (!use_hw_key && !use_pipe_key) {
  1599. for (i = 0; i < enck_size_in_word; i++)
  1600. QCE_WRITE_REG(enckey32[i],
  1601. pce_dev->iobase +
  1602. CRYPTO_ENCR_KEY0_REG +
  1603. (i * sizeof(uint32_t)));
  1604. }
  1605. } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
  1606. break;
  1607. } /* end of switch (creq->mode) */
  1608. if (use_pipe_key)
  1609. encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
  1610. << CRYPTO_USE_PIPE_KEY_ENCR);
  1611. /* write encr seg cfg */
  1612. encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
  1613. if (use_hw_key)
  1614. encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
  1615. else
  1616. encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
  1617. /* write encr seg cfg */
  1618. QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
  1619. /* write encr seg size */
  1620. if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT)) {
  1621. QCE_WRITE_REG((creq->cryptlen + creq->authsize),
  1622. pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
  1623. } else {
  1624. QCE_WRITE_REG(creq->cryptlen,
  1625. pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
  1626. }
  1627. /* write encr seg start */
  1628. QCE_WRITE_REG((coffset & 0xffff),
  1629. pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG);
  1630. /* write encr counter mask */
  1631. QCE_WRITE_REG(0xffffffff,
  1632. pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
  1633. QCE_WRITE_REG(0xffffffff,
  1634. pce_dev->iobase + CRYPTO_CNTR_MASK_REG0);
  1635. QCE_WRITE_REG(0xffffffff,
  1636. pce_dev->iobase + CRYPTO_CNTR_MASK_REG1);
  1637. QCE_WRITE_REG(0xffffffff,
  1638. pce_dev->iobase + CRYPTO_CNTR_MASK_REG2);
  1639. /* write seg size */
  1640. QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
  1641. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
  1642. CRYPTO_CONFIG_REG));
  1643. /* issue go to crypto */
  1644. if (!use_hw_key) {
  1645. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  1646. (1 << CRYPTO_CLR_CNTXT)),
  1647. pce_dev->iobase + CRYPTO_GOPROC_REG);
  1648. } else {
  1649. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
  1650. pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
  1651. }
  1652. /*
  1653. * Ensure previous instructions (setting the GO register)
  1654. * was completed before issuing a DMA transfer request
  1655. */
  1656. mb();
  1657. return 0;
  1658. }
  1659. static int _ce_f9_setup_direct(struct qce_device *pce_dev,
  1660. struct qce_f9_req *req)
  1661. {
  1662. uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
  1663. uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
  1664. uint32_t auth_cfg;
  1665. int i;
  1666. switch (req->algorithm) {
  1667. case QCE_OTA_ALGO_KASUMI:
  1668. auth_cfg = pce_dev->reg.auth_cfg_kasumi;
  1669. break;
  1670. case QCE_OTA_ALGO_SNOW3G:
  1671. default:
  1672. auth_cfg = pce_dev->reg.auth_cfg_snow3g;
  1673. break;
  1674. }
  1675. /* clear status */
  1676. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
  1677. /* set big endian configuration */
  1678. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
  1679. CRYPTO_CONFIG_REG));
  1680. /*
  1681. * Ensure previous instructions (setting the CONFIG register)
  1682. * was completed before issuing starting to set other config register
  1683. * This is to ensure the configurations are done in correct endian-ness
  1684. * as set in the CONFIG registers
  1685. */
  1686. mb();
  1687. /* write enc_seg_cfg */
  1688. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
  1689. /* write ecn_seg_size */
  1690. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
  1691. /* write key in CRYPTO_AUTH_IV0-3_REG */
  1692. _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
  1693. for (i = 0; i < key_size_in_word; i++)
  1694. QCE_WRITE_REG(ikey32[i], (pce_dev->iobase +
  1695. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
  1696. /* write last bits in CRYPTO_AUTH_IV4_REG */
  1697. QCE_WRITE_REG(req->last_bits, (pce_dev->iobase +
  1698. CRYPTO_AUTH_IV4_REG));
  1699. /* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
  1700. QCE_WRITE_REG(req->fresh, (pce_dev->iobase +
  1701. CRYPTO_AUTH_BYTECNT0_REG));
  1702. /* write count-i to CRYPTO_AUTH_BYTECNT1_REG */
  1703. QCE_WRITE_REG(req->count_i, (pce_dev->iobase +
  1704. CRYPTO_AUTH_BYTECNT1_REG));
  1705. /* write auth seg cfg */
  1706. if (req->direction == QCE_OTA_DIR_DOWNLINK)
  1707. auth_cfg |= BIT(CRYPTO_F9_DIRECTION);
  1708. QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
  1709. /* write auth seg size */
  1710. QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
  1711. /* write auth seg start*/
  1712. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
  1713. /* write seg size */
  1714. QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
  1715. /* set little endian configuration before go*/
  1716. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
  1717. CRYPTO_CONFIG_REG));
  1718. /* write go */
  1719. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  1720. (1 << CRYPTO_CLR_CNTXT)),
  1721. pce_dev->iobase + CRYPTO_GOPROC_REG);
  1722. /*
  1723. * Ensure previous instructions (setting the GO register)
  1724. * was completed before issuing a DMA transfer request
  1725. */
  1726. mb();
  1727. return 0;
  1728. }
  1729. static int _ce_f8_setup_direct(struct qce_device *pce_dev,
  1730. struct qce_f8_req *req, bool key_stream_mode,
  1731. uint16_t npkts, uint16_t cipher_offset, uint16_t cipher_size)
  1732. {
  1733. int i = 0;
  1734. uint32_t encr_cfg = 0;
  1735. uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
  1736. uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
  1737. switch (req->algorithm) {
  1738. case QCE_OTA_ALGO_KASUMI:
  1739. encr_cfg = pce_dev->reg.encr_cfg_kasumi;
  1740. break;
  1741. case QCE_OTA_ALGO_SNOW3G:
  1742. default:
  1743. encr_cfg = pce_dev->reg.encr_cfg_snow3g;
  1744. break;
  1745. }
  1746. /* clear status */
  1747. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
  1748. /* set big endian configuration */
  1749. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
  1750. CRYPTO_CONFIG_REG));
  1751. /* write auth seg configuration */
  1752. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
  1753. /* write auth seg size */
  1754. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
  1755. /* write key */
  1756. _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
  1757. for (i = 0; i < key_size_in_word; i++)
  1758. QCE_WRITE_REG(ckey32[i], (pce_dev->iobase +
  1759. (CRYPTO_ENCR_KEY0_REG + i*sizeof(uint32_t))));
  1760. /* write encr seg cfg */
  1761. if (key_stream_mode)
  1762. encr_cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
  1763. if (req->direction == QCE_OTA_DIR_DOWNLINK)
  1764. encr_cfg |= BIT(CRYPTO_F8_DIRECTION);
  1765. QCE_WRITE_REG(encr_cfg, pce_dev->iobase +
  1766. CRYPTO_ENCR_SEG_CFG_REG);
  1767. /* write encr seg start */
  1768. QCE_WRITE_REG((cipher_offset & 0xffff), pce_dev->iobase +
  1769. CRYPTO_ENCR_SEG_START_REG);
  1770. /* write encr seg size */
  1771. QCE_WRITE_REG(cipher_size, pce_dev->iobase +
  1772. CRYPTO_ENCR_SEG_SIZE_REG);
  1773. /* write seg size */
  1774. QCE_WRITE_REG(req->data_len, pce_dev->iobase +
  1775. CRYPTO_SEG_SIZE_REG);
  1776. /* write cntr0_iv0 for countC */
  1777. QCE_WRITE_REG(req->count_c, pce_dev->iobase +
  1778. CRYPTO_CNTR0_IV0_REG);
  1779. /* write cntr1_iv1 for nPkts, and bearer */
  1780. if (npkts == 1)
  1781. npkts = 0;
  1782. QCE_WRITE_REG(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
  1783. npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT,
  1784. pce_dev->iobase + CRYPTO_CNTR1_IV1_REG);
  1785. /* set little endian configuration before go*/
  1786. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
  1787. CRYPTO_CONFIG_REG));
  1788. /* write go */
  1789. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  1790. (1 << CRYPTO_CLR_CNTXT)),
  1791. pce_dev->iobase + CRYPTO_GOPROC_REG);
  1792. /*
  1793. * Ensure previous instructions (setting the GO register)
  1794. * was completed before issuing a DMA transfer request
  1795. */
  1796. mb();
  1797. return 0;
  1798. }
  1799. static int _qce_unlock_other_pipes(struct qce_device *pce_dev, int req_info)
  1800. {
  1801. int rc = 0;
  1802. struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info
  1803. [req_info].ce_sps;
  1804. if (pce_dev->no_get_around || !pce_dev->support_cmd_dscr)
  1805. return rc;
  1806. rc = sps_transfer_one(pce_dev->ce_bam_info.consumer.pipe,
  1807. GET_PHYS_ADDR(
  1808. pce_sps_data->cmdlistptr.unlock_all_pipes.cmdlist),
  1809. 0, NULL, (SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_UNLOCK));
  1810. if (rc) {
  1811. pr_err("sps_xfr_one() fail rc=%d\n", rc);
  1812. rc = -EINVAL;
  1813. }
  1814. return rc;
  1815. }
  1816. static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
  1817. bool is_complete);
  1818. static int _aead_complete(struct qce_device *pce_dev, int req_info)
  1819. {
  1820. struct aead_request *areq;
  1821. unsigned char mac[SHA256_DIGEST_SIZE];
  1822. uint32_t ccm_fail_status = 0;
  1823. uint32_t result_dump_status = 0;
  1824. int32_t result_status = 0;
  1825. struct ce_request_info *preq_info;
  1826. struct ce_sps_data *pce_sps_data;
  1827. qce_comp_func_ptr_t qce_callback;
  1828. preq_info = &pce_dev->ce_request_info[req_info];
  1829. pce_sps_data = &preq_info->ce_sps;
  1830. qce_callback = preq_info->qce_cb;
  1831. areq = (struct aead_request *) preq_info->areq;
  1832. if (areq->src != areq->dst) {
  1833. qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
  1834. DMA_FROM_DEVICE);
  1835. }
  1836. qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
  1837. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  1838. DMA_TO_DEVICE);
  1839. if (preq_info->asg)
  1840. qce_dma_unmap_sg(pce_dev->pdev, preq_info->asg,
  1841. preq_info->assoc_nents, DMA_TO_DEVICE);
  1842. /* check MAC */
  1843. memcpy(mac, (char *)(&pce_sps_data->result->auth_iv[0]),
  1844. SHA256_DIGEST_SIZE);
  1845. /* read status before unlock */
  1846. if (preq_info->dir == QCE_DECRYPT) {
  1847. if (pce_dev->no_get_around)
  1848. if (pce_dev->no_ccm_mac_status_get_around)
  1849. ccm_fail_status =
  1850. be32_to_cpu(pce_sps_data->result->status);
  1851. else
  1852. ccm_fail_status =
  1853. be32_to_cpu(pce_sps_data->result_null->status);
  1854. else
  1855. ccm_fail_status = readl_relaxed(pce_dev->iobase +
  1856. CRYPTO_STATUS_REG);
  1857. }
  1858. if (_qce_unlock_other_pipes(pce_dev, req_info)) {
  1859. qce_free_req_info(pce_dev, req_info, true);
  1860. qce_callback(areq, mac, NULL, -ENXIO);
  1861. return -ENXIO;
  1862. }
  1863. result_dump_status = be32_to_cpu(pce_sps_data->result->status);
  1864. pce_sps_data->result->status = 0;
  1865. if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
  1866. | (1 << CRYPTO_HSD_ERR))) {
  1867. pr_err("aead operation error. Status %x\n", result_dump_status);
  1868. result_status = -ENXIO;
  1869. } else if (pce_sps_data->consumer_status |
  1870. pce_sps_data->producer_status) {
  1871. pr_err("aead sps operation error. sps status %x %x\n",
  1872. pce_sps_data->consumer_status,
  1873. pce_sps_data->producer_status);
  1874. result_status = -ENXIO;
  1875. }
  1876. if (preq_info->mode == QCE_MODE_CCM) {
  1877. /*
  1878. * Not from result dump, instead, use the status we just
  1879. * read of device for MAC_FAILED.
  1880. */
  1881. if (result_status == 0 && (preq_info->dir == QCE_DECRYPT) &&
  1882. (ccm_fail_status & (1 << CRYPTO_MAC_FAILED)))
  1883. result_status = -EBADMSG;
  1884. qce_free_req_info(pce_dev, req_info, true);
  1885. qce_callback(areq, mac, NULL, result_status);
  1886. } else {
  1887. uint32_t ivsize = 0;
  1888. struct crypto_aead *aead;
  1889. unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
  1890. aead = crypto_aead_reqtfm(areq);
  1891. ivsize = crypto_aead_ivsize(aead);
  1892. memcpy(iv, (char *)(pce_sps_data->result->encr_cntr_iv),
  1893. sizeof(iv));
  1894. qce_free_req_info(pce_dev, req_info, true);
  1895. qce_callback(areq, mac, iv, result_status);
  1896. }
  1897. return 0;
  1898. }
  1899. static int _sha_complete(struct qce_device *pce_dev, int req_info)
  1900. {
  1901. struct ahash_request *areq;
  1902. unsigned char digest[SHA256_DIGEST_SIZE];
  1903. uint32_t bytecount32[2];
  1904. int32_t result_status = 0;
  1905. uint32_t result_dump_status;
  1906. struct ce_request_info *preq_info;
  1907. struct ce_sps_data *pce_sps_data;
  1908. qce_comp_func_ptr_t qce_callback;
  1909. preq_info = &pce_dev->ce_request_info[req_info];
  1910. pce_sps_data = &preq_info->ce_sps;
  1911. qce_callback = preq_info->qce_cb;
  1912. areq = (struct ahash_request *) preq_info->areq;
  1913. if (!areq) {
  1914. pr_err("sha operation error. areq is NULL\n");
  1915. return -ENXIO;
  1916. }
  1917. qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
  1918. DMA_TO_DEVICE);
  1919. memcpy(digest, (char *)(&pce_sps_data->result->auth_iv[0]),
  1920. SHA256_DIGEST_SIZE);
  1921. _byte_stream_to_net_words(bytecount32,
  1922. (unsigned char *)pce_sps_data->result->auth_byte_count,
  1923. 2 * CRYPTO_REG_SIZE);
  1924. if (_qce_unlock_other_pipes(pce_dev, req_info)) {
  1925. qce_free_req_info(pce_dev, req_info, true);
  1926. qce_callback(areq, digest, (char *)bytecount32,
  1927. -ENXIO);
  1928. return -ENXIO;
  1929. }
  1930. result_dump_status = be32_to_cpu(pce_sps_data->result->status);
  1931. pce_sps_data->result->status = 0;
  1932. if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
  1933. | (1 << CRYPTO_HSD_ERR))) {
  1934. pr_err("sha operation error. Status %x\n", result_dump_status);
  1935. result_status = -ENXIO;
  1936. } else if (pce_sps_data->consumer_status) {
  1937. pr_err("sha sps operation error. sps status %x\n",
  1938. pce_sps_data->consumer_status);
  1939. result_status = -ENXIO;
  1940. }
  1941. qce_free_req_info(pce_dev, req_info, true);
  1942. qce_callback(areq, digest, (char *)bytecount32, result_status);
  1943. return 0;
  1944. }
  1945. static int _f9_complete(struct qce_device *pce_dev, int req_info)
  1946. {
  1947. uint32_t mac_i;
  1948. int32_t result_status = 0;
  1949. uint32_t result_dump_status;
  1950. struct ce_request_info *preq_info;
  1951. struct ce_sps_data *pce_sps_data;
  1952. qce_comp_func_ptr_t qce_callback;
  1953. void *areq;
  1954. preq_info = &pce_dev->ce_request_info[req_info];
  1955. pce_sps_data = &preq_info->ce_sps;
  1956. qce_callback = preq_info->qce_cb;
  1957. areq = preq_info->areq;
  1958. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
  1959. preq_info->ota_size, DMA_TO_DEVICE);
  1960. _byte_stream_to_net_words(&mac_i,
  1961. (char *)(&pce_sps_data->result->auth_iv[0]),
  1962. CRYPTO_REG_SIZE);
  1963. if (_qce_unlock_other_pipes(pce_dev, req_info)) {
  1964. qce_free_req_info(pce_dev, req_info, true);
  1965. qce_callback(areq, NULL, NULL, -ENXIO);
  1966. return -ENXIO;
  1967. }
  1968. result_dump_status = be32_to_cpu(pce_sps_data->result->status);
  1969. pce_sps_data->result->status = 0;
  1970. if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
  1971. | (1 << CRYPTO_HSD_ERR))) {
  1972. pr_err("f9 operation error. Status %x\n", result_dump_status);
  1973. result_status = -ENXIO;
  1974. } else if (pce_sps_data->consumer_status |
  1975. pce_sps_data->producer_status) {
  1976. pr_err("f9 sps operation error. sps status %x %x\n",
  1977. pce_sps_data->consumer_status,
  1978. pce_sps_data->producer_status);
  1979. result_status = -ENXIO;
  1980. }
  1981. qce_free_req_info(pce_dev, req_info, true);
  1982. qce_callback(areq, (char *)&mac_i, NULL, result_status);
  1983. return 0;
  1984. }
  1985. static int _ablk_cipher_complete(struct qce_device *pce_dev, int req_info)
  1986. {
  1987. struct skcipher_request *areq;
  1988. unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
  1989. int32_t result_status = 0;
  1990. uint32_t result_dump_status;
  1991. struct ce_request_info *preq_info;
  1992. struct ce_sps_data *pce_sps_data;
  1993. qce_comp_func_ptr_t qce_callback;
  1994. preq_info = &pce_dev->ce_request_info[req_info];
  1995. pce_sps_data = &preq_info->ce_sps;
  1996. qce_callback = preq_info->qce_cb;
  1997. areq = (struct skcipher_request *) preq_info->areq;
  1998. if (areq->src != areq->dst) {
  1999. qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
  2000. preq_info->dst_nents, DMA_FROM_DEVICE);
  2001. }
  2002. qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
  2003. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  2004. DMA_TO_DEVICE);
  2005. if (_qce_unlock_other_pipes(pce_dev, req_info)) {
  2006. qce_free_req_info(pce_dev, req_info, true);
  2007. qce_callback(areq, NULL, NULL, -ENXIO);
  2008. return -ENXIO;
  2009. }
  2010. result_dump_status = be32_to_cpu(pce_sps_data->result->status);
  2011. pce_sps_data->result->status = 0;
  2012. if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
  2013. | (1 << CRYPTO_HSD_ERR))) {
  2014. pr_err("ablk_cipher operation error. Status %x\n",
  2015. result_dump_status);
  2016. result_status = -ENXIO;
  2017. } else if (pce_sps_data->consumer_status |
  2018. pce_sps_data->producer_status) {
  2019. pr_err("ablk_cipher sps operation error. sps status %x %x\n",
  2020. pce_sps_data->consumer_status,
  2021. pce_sps_data->producer_status);
  2022. result_status = -ENXIO;
  2023. }
  2024. if (preq_info->mode == QCE_MODE_ECB) {
  2025. qce_free_req_info(pce_dev, req_info, true);
  2026. qce_callback(areq, NULL, NULL, pce_sps_data->consumer_status |
  2027. result_status);
  2028. } else {
  2029. if (pce_dev->ce_bam_info.minor_version == 0) {
  2030. if (preq_info->mode == QCE_MODE_CBC) {
  2031. if (preq_info->dir == QCE_DECRYPT)
  2032. memcpy(iv, (char *)preq_info->dec_iv,
  2033. sizeof(iv));
  2034. else
  2035. memcpy(iv, (unsigned char *)
  2036. (sg_virt(areq->src) +
  2037. areq->src->length - 16),
  2038. sizeof(iv));
  2039. }
  2040. if ((preq_info->mode == QCE_MODE_CTR) ||
  2041. (preq_info->mode == QCE_MODE_XTS)) {
  2042. uint32_t num_blk = 0;
  2043. uint32_t cntr_iv3 = 0;
  2044. unsigned long long cntr_iv64 = 0;
  2045. unsigned char *b = (unsigned char *)(&cntr_iv3);
  2046. memcpy(iv, areq->iv, sizeof(iv));
  2047. if (preq_info->mode != QCE_MODE_XTS)
  2048. num_blk = areq->cryptlen/16;
  2049. else
  2050. num_blk = 1;
  2051. cntr_iv3 = ((*(iv + 12) << 24) & 0xff000000) |
  2052. (((*(iv + 13)) << 16) & 0xff0000) |
  2053. (((*(iv + 14)) << 8) & 0xff00) |
  2054. (*(iv + 15) & 0xff);
  2055. cntr_iv64 =
  2056. (((unsigned long long)cntr_iv3 &
  2057. 0xFFFFFFFFULL) +
  2058. (unsigned long long)num_blk) %
  2059. (unsigned long long)(0x100000000ULL);
  2060. cntr_iv3 = (u32)(cntr_iv64 & 0xFFFFFFFF);
  2061. *(iv + 15) = (char)(*b);
  2062. *(iv + 14) = (char)(*(b + 1));
  2063. *(iv + 13) = (char)(*(b + 2));
  2064. *(iv + 12) = (char)(*(b + 3));
  2065. }
  2066. } else {
  2067. memcpy(iv,
  2068. (char *)(pce_sps_data->result->encr_cntr_iv),
  2069. sizeof(iv));
  2070. }
  2071. qce_free_req_info(pce_dev, req_info, true);
  2072. qce_callback(areq, NULL, iv, result_status);
  2073. }
  2074. return 0;
  2075. }
  2076. static int _f8_complete(struct qce_device *pce_dev, int req_info)
  2077. {
  2078. int32_t result_status = 0;
  2079. uint32_t result_dump_status;
  2080. uint32_t result_dump_status2;
  2081. struct ce_request_info *preq_info;
  2082. struct ce_sps_data *pce_sps_data;
  2083. qce_comp_func_ptr_t qce_callback;
  2084. void *areq;
  2085. preq_info = &pce_dev->ce_request_info[req_info];
  2086. pce_sps_data = &preq_info->ce_sps;
  2087. qce_callback = preq_info->qce_cb;
  2088. areq = preq_info->areq;
  2089. if (preq_info->phy_ota_dst)
  2090. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
  2091. preq_info->ota_size, DMA_FROM_DEVICE);
  2092. if (preq_info->phy_ota_src)
  2093. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
  2094. preq_info->ota_size, (preq_info->phy_ota_dst) ?
  2095. DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
  2096. if (_qce_unlock_other_pipes(pce_dev, req_info)) {
  2097. qce_free_req_info(pce_dev, req_info, true);
  2098. qce_callback(areq, NULL, NULL, -ENXIO);
  2099. return -ENXIO;
  2100. }
  2101. result_dump_status = be32_to_cpu(pce_sps_data->result->status);
  2102. result_dump_status2 = be32_to_cpu(pce_sps_data->result->status2);
  2103. if ((result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
  2104. | (1 << CRYPTO_HSD_ERR)))) {
  2105. pr_err(
  2106. "f8 oper error. Dump Sta %x Sta2 %x req %d\n",
  2107. result_dump_status, result_dump_status2, req_info);
  2108. result_status = -ENXIO;
  2109. } else if (pce_sps_data->consumer_status |
  2110. pce_sps_data->producer_status) {
  2111. pr_err("f8 sps operation error. sps status %x %x\n",
  2112. pce_sps_data->consumer_status,
  2113. pce_sps_data->producer_status);
  2114. result_status = -ENXIO;
  2115. }
  2116. pce_sps_data->result->status = 0;
  2117. pce_sps_data->result->status2 = 0;
  2118. qce_free_req_info(pce_dev, req_info, true);
  2119. qce_callback(areq, NULL, NULL, result_status);
  2120. return 0;
  2121. }
  2122. static void _qce_sps_iovec_count_init(struct qce_device *pce_dev, int req_info)
  2123. {
  2124. struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info[req_info]
  2125. .ce_sps;
  2126. pce_sps_data->in_transfer.iovec_count = 0;
  2127. pce_sps_data->out_transfer.iovec_count = 0;
  2128. }
  2129. static void _qce_set_flag(struct sps_transfer *sps_bam_pipe, uint32_t flag)
  2130. {
  2131. struct sps_iovec *iovec;
  2132. if (sps_bam_pipe->iovec_count == 0)
  2133. return;
  2134. iovec = sps_bam_pipe->iovec + (sps_bam_pipe->iovec_count - 1);
  2135. iovec->flags |= flag;
  2136. }
  2137. static int _qce_sps_add_data(dma_addr_t paddr, uint32_t len,
  2138. struct sps_transfer *sps_bam_pipe)
  2139. {
  2140. struct sps_iovec *iovec = sps_bam_pipe->iovec +
  2141. sps_bam_pipe->iovec_count;
  2142. uint32_t data_cnt;
  2143. while (len > 0) {
  2144. if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
  2145. pr_err("Num of descrptor %d exceed max (%d)\n",
  2146. sps_bam_pipe->iovec_count,
  2147. (uint32_t)QCE_MAX_NUM_DSCR);
  2148. return -ENOMEM;
  2149. }
  2150. if (len > SPS_MAX_PKT_SIZE)
  2151. data_cnt = SPS_MAX_PKT_SIZE;
  2152. else
  2153. data_cnt = len;
  2154. iovec->size = data_cnt;
  2155. iovec->addr = SPS_GET_LOWER_ADDR(paddr);
  2156. iovec->flags = SPS_GET_UPPER_ADDR(paddr);
  2157. sps_bam_pipe->iovec_count++;
  2158. iovec++;
  2159. paddr += data_cnt;
  2160. len -= data_cnt;
  2161. }
  2162. return 0;
  2163. }
  2164. static int _qce_sps_add_sg_data(struct qce_device *pce_dev,
  2165. struct scatterlist *sg_src, uint32_t nbytes,
  2166. struct sps_transfer *sps_bam_pipe)
  2167. {
  2168. uint32_t data_cnt, len;
  2169. dma_addr_t addr;
  2170. struct sps_iovec *iovec = sps_bam_pipe->iovec +
  2171. sps_bam_pipe->iovec_count;
  2172. while (nbytes > 0 && sg_src) {
  2173. len = min(nbytes, sg_dma_len(sg_src));
  2174. nbytes -= len;
  2175. addr = sg_dma_address(sg_src);
  2176. if (pce_dev->ce_bam_info.minor_version == 0)
  2177. len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
  2178. while (len > 0) {
  2179. if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
  2180. pr_err("Num of descrptor %d exceed max (%d)\n",
  2181. sps_bam_pipe->iovec_count,
  2182. (uint32_t)QCE_MAX_NUM_DSCR);
  2183. return -ENOMEM;
  2184. }
  2185. if (len > SPS_MAX_PKT_SIZE) {
  2186. data_cnt = SPS_MAX_PKT_SIZE;
  2187. iovec->size = data_cnt;
  2188. iovec->addr = SPS_GET_LOWER_ADDR(addr);
  2189. iovec->flags = SPS_GET_UPPER_ADDR(addr);
  2190. } else {
  2191. data_cnt = len;
  2192. iovec->size = data_cnt;
  2193. iovec->addr = SPS_GET_LOWER_ADDR(addr);
  2194. iovec->flags = SPS_GET_UPPER_ADDR(addr);
  2195. }
  2196. iovec++;
  2197. sps_bam_pipe->iovec_count++;
  2198. addr += data_cnt;
  2199. len -= data_cnt;
  2200. }
  2201. sg_src = sg_next(sg_src);
  2202. }
  2203. return 0;
  2204. }
  2205. static int _qce_sps_add_sg_data_off(struct qce_device *pce_dev,
  2206. struct scatterlist *sg_src, uint32_t nbytes, uint32_t off,
  2207. struct sps_transfer *sps_bam_pipe)
  2208. {
  2209. uint32_t data_cnt, len;
  2210. dma_addr_t addr;
  2211. struct sps_iovec *iovec = sps_bam_pipe->iovec +
  2212. sps_bam_pipe->iovec_count;
  2213. unsigned int res_within_sg;
  2214. if (!sg_src)
  2215. return -ENOENT;
  2216. res_within_sg = sg_dma_len(sg_src);
  2217. while (off > 0) {
  2218. if (!sg_src) {
  2219. pr_err("broken sg list off %d nbytes %d\n",
  2220. off, nbytes);
  2221. return -ENOENT;
  2222. }
  2223. len = sg_dma_len(sg_src);
  2224. if (off < len) {
  2225. res_within_sg = len - off;
  2226. break;
  2227. }
  2228. off -= len;
  2229. sg_src = sg_next(sg_src);
  2230. if (sg_src)
  2231. res_within_sg = sg_dma_len(sg_src);
  2232. }
  2233. while (nbytes > 0 && sg_src) {
  2234. len = min(nbytes, res_within_sg);
  2235. nbytes -= len;
  2236. addr = sg_dma_address(sg_src) + off;
  2237. if (pce_dev->ce_bam_info.minor_version == 0)
  2238. len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
  2239. while (len > 0) {
  2240. if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
  2241. pr_err("Num of descrptor %d exceed max (%d)\n",
  2242. sps_bam_pipe->iovec_count,
  2243. (uint32_t)QCE_MAX_NUM_DSCR);
  2244. return -ENOMEM;
  2245. }
  2246. if (len > SPS_MAX_PKT_SIZE) {
  2247. data_cnt = SPS_MAX_PKT_SIZE;
  2248. iovec->size = data_cnt;
  2249. iovec->addr = SPS_GET_LOWER_ADDR(addr);
  2250. iovec->flags = SPS_GET_UPPER_ADDR(addr);
  2251. } else {
  2252. data_cnt = len;
  2253. iovec->size = data_cnt;
  2254. iovec->addr = SPS_GET_LOWER_ADDR(addr);
  2255. iovec->flags = SPS_GET_UPPER_ADDR(addr);
  2256. }
  2257. iovec++;
  2258. sps_bam_pipe->iovec_count++;
  2259. addr += data_cnt;
  2260. len -= data_cnt;
  2261. }
  2262. if (nbytes) {
  2263. sg_src = sg_next(sg_src);
  2264. if (!sg_src) {
  2265. pr_err("more data bytes %d\n", nbytes);
  2266. return -ENOMEM;
  2267. }
  2268. res_within_sg = sg_dma_len(sg_src);
  2269. off = 0;
  2270. }
  2271. }
  2272. return 0;
  2273. }
  2274. static int _qce_sps_add_cmd(struct qce_device *pce_dev, uint32_t flag,
  2275. struct qce_cmdlist_info *cmdptr,
  2276. struct sps_transfer *sps_bam_pipe)
  2277. {
  2278. dma_addr_t paddr = GET_PHYS_ADDR(cmdptr->cmdlist);
  2279. struct sps_iovec *iovec = sps_bam_pipe->iovec +
  2280. sps_bam_pipe->iovec_count;
  2281. iovec->size = cmdptr->size;
  2282. iovec->addr = SPS_GET_LOWER_ADDR(paddr);
  2283. iovec->flags = SPS_GET_UPPER_ADDR(paddr) | SPS_IOVEC_FLAG_CMD | flag;
  2284. sps_bam_pipe->iovec_count++;
  2285. if (sps_bam_pipe->iovec_count >= QCE_MAX_NUM_DSCR) {
  2286. pr_err("Num of descrptor %d exceed max (%d)\n",
  2287. sps_bam_pipe->iovec_count, (uint32_t)QCE_MAX_NUM_DSCR);
  2288. return -ENOMEM;
  2289. }
  2290. return 0;
  2291. }
  2292. static int _qce_sps_transfer(struct qce_device *pce_dev, int req_info)
  2293. {
  2294. int rc = 0;
  2295. struct ce_sps_data *pce_sps_data;
  2296. pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
  2297. pce_sps_data->out_transfer.user =
  2298. (void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
  2299. (unsigned int) req_info));
  2300. pce_sps_data->in_transfer.user =
  2301. (void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
  2302. (unsigned int) req_info));
  2303. _qce_dump_descr_fifos_dbg(pce_dev, req_info);
  2304. if (pce_sps_data->in_transfer.iovec_count) {
  2305. rc = sps_transfer(pce_dev->ce_bam_info.consumer.pipe,
  2306. &pce_sps_data->in_transfer);
  2307. if (rc) {
  2308. pr_err("sps_xfr() fail (consumer pipe=0x%lx) rc = %d\n",
  2309. (uintptr_t)pce_dev->ce_bam_info.consumer.pipe,
  2310. rc);
  2311. goto ret;
  2312. }
  2313. }
  2314. rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe,
  2315. &pce_sps_data->out_transfer);
  2316. if (rc)
  2317. pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
  2318. (uintptr_t)pce_dev->ce_bam_info.producer.pipe, rc);
  2319. ret:
  2320. if (rc)
  2321. _qce_dump_descr_fifos(pce_dev, req_info);
  2322. return rc;
  2323. }
  2324. /**
  2325. * Allocate and Connect a CE peripheral's SPS endpoint
  2326. *
  2327. * This function allocates endpoint context and
  2328. * connect it with memory endpoint by calling
  2329. * appropriate SPS driver APIs.
  2330. *
  2331. * Also registers a SPS callback function with
  2332. * SPS driver
  2333. *
  2334. * This function should only be called once typically
  2335. * during driver probe.
  2336. *
  2337. * @pce_dev - Pointer to qce_device structure
  2338. * @ep - Pointer to sps endpoint data structure
  2339. * @is_produce - 1 means Producer endpoint
  2340. * 0 means Consumer endpoint
  2341. *
  2342. * @return - 0 if successful else negative value.
  2343. *
  2344. */
  2345. static int qce_sps_init_ep_conn(struct qce_device *pce_dev,
  2346. struct qce_sps_ep_conn_data *ep,
  2347. bool is_producer)
  2348. {
  2349. int rc = 0;
  2350. struct sps_pipe *sps_pipe_info;
  2351. struct sps_connect *sps_connect_info = &ep->connect;
  2352. struct sps_register_event *sps_event = &ep->event;
  2353. /* Allocate endpoint context */
  2354. sps_pipe_info = sps_alloc_endpoint();
  2355. if (!sps_pipe_info) {
  2356. pr_err("sps_alloc_endpoint() failed!!! is_producer=%d\n",
  2357. is_producer);
  2358. rc = -ENOMEM;
  2359. goto out;
  2360. }
  2361. /* Now save the sps pipe handle */
  2362. ep->pipe = sps_pipe_info;
  2363. /* Get default connection configuration for an endpoint */
  2364. rc = sps_get_config(sps_pipe_info, sps_connect_info);
  2365. if (rc) {
  2366. pr_err("sps_get_config() fail pipe_handle=0x%lx, rc = %d\n",
  2367. (uintptr_t)sps_pipe_info, rc);
  2368. goto get_config_err;
  2369. }
  2370. /* Modify the default connection configuration */
  2371. if (is_producer) {
  2372. /*
  2373. * For CE producer transfer, source should be
  2374. * CE peripheral where as destination should
  2375. * be system memory.
  2376. */
  2377. sps_connect_info->source = pce_dev->ce_bam_info.bam_handle;
  2378. sps_connect_info->destination = SPS_DEV_HANDLE_MEM;
  2379. /* Producer pipe will handle this connection */
  2380. sps_connect_info->mode = SPS_MODE_SRC;
  2381. sps_connect_info->options =
  2382. SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE;
  2383. } else {
  2384. /* For CE consumer transfer, source should be
  2385. * system memory where as destination should
  2386. * CE peripheral
  2387. */
  2388. sps_connect_info->source = SPS_DEV_HANDLE_MEM;
  2389. sps_connect_info->destination = pce_dev->ce_bam_info.bam_handle;
  2390. sps_connect_info->mode = SPS_MODE_DEST;
  2391. sps_connect_info->options =
  2392. SPS_O_AUTO_ENABLE;
  2393. }
  2394. /* Producer pipe index */
  2395. sps_connect_info->src_pipe_index =
  2396. pce_dev->ce_bam_info.src_pipe_index;
  2397. /* Consumer pipe index */
  2398. sps_connect_info->dest_pipe_index =
  2399. pce_dev->ce_bam_info.dest_pipe_index;
  2400. /* Set pipe group */
  2401. sps_connect_info->lock_group = pce_dev->ce_bam_info.pipe_pair_index;
  2402. sps_connect_info->event_thresh = 0x10;
  2403. /*
  2404. * Max. no of scatter/gather buffers that can
  2405. * be passed by block layer = 32 (NR_SG).
  2406. * Each BAM descritor needs 64 bits (8 bytes).
  2407. * One BAM descriptor is required per buffer transfer.
  2408. * So we would require total 256 (32 * 8) bytes of descriptor FIFO.
  2409. * But due to HW limitation we need to allocate atleast one extra
  2410. * descriptor memory (256 bytes + 8 bytes). But in order to be
  2411. * in power of 2, we are allocating 512 bytes of memory.
  2412. */
  2413. sps_connect_info->desc.size = QCE_MAX_NUM_DSCR * MAX_QCE_ALLOC_BAM_REQ *
  2414. sizeof(struct sps_iovec);
  2415. if (sps_connect_info->desc.size > MAX_SPS_DESC_FIFO_SIZE)
  2416. sps_connect_info->desc.size = MAX_SPS_DESC_FIFO_SIZE;
  2417. sps_connect_info->desc.base = dma_alloc_coherent(pce_dev->pdev,
  2418. sps_connect_info->desc.size,
  2419. &sps_connect_info->desc.phys_base,
  2420. GFP_KERNEL | __GFP_ZERO);
  2421. if (sps_connect_info->desc.base == NULL) {
  2422. rc = -ENOMEM;
  2423. pr_err("Can not allocate coherent memory for sps data\n");
  2424. goto get_config_err;
  2425. }
  2426. /* Establish connection between peripheral and memory endpoint */
  2427. rc = sps_connect(sps_pipe_info, sps_connect_info);
  2428. if (rc) {
  2429. pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
  2430. (uintptr_t)sps_pipe_info, rc);
  2431. goto sps_connect_err;
  2432. }
  2433. sps_event->mode = SPS_TRIGGER_CALLBACK;
  2434. sps_event->xfer_done = NULL;
  2435. sps_event->user = (void *)pce_dev;
  2436. if (is_producer) {
  2437. sps_event->options = SPS_O_EOT | SPS_O_DESC_DONE;
  2438. sps_event->callback = _sps_producer_callback;
  2439. rc = sps_register_event(ep->pipe, sps_event);
  2440. if (rc) {
  2441. pr_err("Producer callback registration failed rc=%d\n",
  2442. rc);
  2443. goto sps_connect_err;
  2444. }
  2445. } else {
  2446. sps_event->options = SPS_O_EOT;
  2447. sps_event->callback = NULL;
  2448. }
  2449. pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%pK\n",
  2450. is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)",
  2451. (uintptr_t)sps_pipe_info, &sps_connect_info->desc.phys_base);
  2452. goto out;
  2453. sps_connect_err:
  2454. dma_free_coherent(pce_dev->pdev,
  2455. sps_connect_info->desc.size,
  2456. sps_connect_info->desc.base,
  2457. sps_connect_info->desc.phys_base);
  2458. get_config_err:
  2459. sps_free_endpoint(sps_pipe_info);
  2460. out:
  2461. return rc;
  2462. }
  2463. /**
  2464. * Disconnect and Deallocate a CE peripheral's SPS endpoint
  2465. *
  2466. * This function disconnect endpoint and deallocates
  2467. * endpoint context.
  2468. *
  2469. * This function should only be called once typically
  2470. * during driver remove.
  2471. *
  2472. * @pce_dev - Pointer to qce_device structure
  2473. * @ep - Pointer to sps endpoint data structure
  2474. *
  2475. */
  2476. static void qce_sps_exit_ep_conn(struct qce_device *pce_dev,
  2477. struct qce_sps_ep_conn_data *ep)
  2478. {
  2479. struct sps_pipe *sps_pipe_info = ep->pipe;
  2480. struct sps_connect *sps_connect_info = &ep->connect;
  2481. sps_disconnect(sps_pipe_info);
  2482. dma_free_coherent(pce_dev->pdev,
  2483. sps_connect_info->desc.size,
  2484. sps_connect_info->desc.base,
  2485. sps_connect_info->desc.phys_base);
  2486. sps_free_endpoint(sps_pipe_info);
  2487. }
  2488. static void qce_sps_release_bam(struct qce_device *pce_dev)
  2489. {
  2490. struct bam_registration_info *pbam;
  2491. mutex_lock(&bam_register_lock);
  2492. pbam = pce_dev->pbam;
  2493. if (pbam == NULL)
  2494. goto ret;
  2495. pbam->cnt--;
  2496. if (pbam->cnt > 0)
  2497. goto ret;
  2498. if (pce_dev->ce_bam_info.bam_handle) {
  2499. sps_deregister_bam_device(pce_dev->ce_bam_info.bam_handle);
  2500. pr_debug("deregister bam handle 0x%lx\n",
  2501. pce_dev->ce_bam_info.bam_handle);
  2502. pce_dev->ce_bam_info.bam_handle = 0;
  2503. }
  2504. iounmap(pbam->bam_iobase);
  2505. pr_debug("delete bam 0x%x\n", pbam->bam_mem);
  2506. list_del(&pbam->qlist);
  2507. kfree(pbam);
  2508. ret:
  2509. pce_dev->pbam = NULL;
  2510. mutex_unlock(&bam_register_lock);
  2511. }
  2512. static int qce_sps_get_bam(struct qce_device *pce_dev)
  2513. {
  2514. int rc = 0;
  2515. struct sps_bam_props bam = {0};
  2516. struct bam_registration_info *pbam = NULL;
  2517. struct bam_registration_info *p;
  2518. uint32_t bam_cfg = 0;
  2519. mutex_lock(&bam_register_lock);
  2520. list_for_each_entry(p, &qce50_bam_list, qlist) {
  2521. if (p->bam_mem == pce_dev->bam_mem) {
  2522. pbam = p; /* found */
  2523. break;
  2524. }
  2525. }
  2526. if (pbam) {
  2527. pr_debug("found bam 0x%x\n", pbam->bam_mem);
  2528. pbam->cnt++;
  2529. pce_dev->ce_bam_info.bam_handle = pbam->handle;
  2530. pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
  2531. pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
  2532. pce_dev->pbam = pbam;
  2533. pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
  2534. goto ret;
  2535. }
  2536. pbam = kzalloc(sizeof(struct bam_registration_info), GFP_KERNEL);
  2537. if (!pbam) {
  2538. rc = -ENOMEM;
  2539. goto ret;
  2540. }
  2541. pbam->cnt = 1;
  2542. pbam->bam_mem = pce_dev->bam_mem;
  2543. pbam->bam_iobase = ioremap(pce_dev->bam_mem,
  2544. pce_dev->bam_mem_size);
  2545. if (!pbam->bam_iobase) {
  2546. kfree(pbam);
  2547. rc = -ENOMEM;
  2548. pr_err("Can not map BAM io memory\n");
  2549. goto ret;
  2550. }
  2551. pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
  2552. pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
  2553. pbam->handle = 0;
  2554. pr_debug("allocate bam 0x%x\n", pbam->bam_mem);
  2555. bam_cfg = readl_relaxed(pce_dev->ce_bam_info.bam_iobase +
  2556. CRYPTO_BAM_CNFG_BITS_REG);
  2557. pbam->support_cmd_dscr = (bam_cfg & CRYPTO_BAM_CD_ENABLE_MASK) ?
  2558. true : false;
  2559. if (!pbam->support_cmd_dscr) {
  2560. pr_info("qce50 don't support command descriptor. bam_cfg%x\n",
  2561. bam_cfg);
  2562. pce_dev->no_get_around = false;
  2563. }
  2564. pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
  2565. bam.phys_addr = pce_dev->ce_bam_info.bam_mem;
  2566. bam.virt_addr = pce_dev->ce_bam_info.bam_iobase;
  2567. /*
  2568. * This event threshold value is only significant for BAM-to-BAM
  2569. * transfer. It's ignored for BAM-to-System mode transfer.
  2570. */
  2571. bam.event_threshold = 0x10; /* Pipe event threshold */
  2572. /*
  2573. * This threshold controls when the BAM publish
  2574. * the descriptor size on the sideband interface.
  2575. * SPS HW will only be used when
  2576. * data transfer size > 64 bytes.
  2577. */
  2578. bam.summing_threshold = 64;
  2579. /* SPS driver wll handle the crypto BAM IRQ */
  2580. bam.irq = (u32)pce_dev->ce_bam_info.bam_irq;
  2581. /*
  2582. * Set flag to indicate BAM global device control is managed
  2583. * remotely.
  2584. */
  2585. if (!pce_dev->support_cmd_dscr || pce_dev->is_shared)
  2586. bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
  2587. else
  2588. bam.manage = SPS_BAM_MGR_LOCAL;
  2589. bam.ee = pce_dev->ce_bam_info.bam_ee;
  2590. bam.ipc_loglevel = QCE_BAM_DEFAULT_IPC_LOGLVL;
  2591. bam.options |= SPS_BAM_CACHED_WP;
  2592. pr_debug("bam physical base=0x%lx\n", (uintptr_t)bam.phys_addr);
  2593. pr_debug("bam virtual base=0x%pK\n", bam.virt_addr);
  2594. /* Register CE Peripheral BAM device to SPS driver */
  2595. rc = sps_register_bam_device(&bam, &pbam->handle);
  2596. if (rc) {
  2597. pr_err("sps_register_bam_device() failed! err=%d\n", rc);
  2598. rc = -EIO;
  2599. iounmap(pbam->bam_iobase);
  2600. kfree(pbam);
  2601. goto ret;
  2602. }
  2603. pce_dev->pbam = pbam;
  2604. list_add_tail(&pbam->qlist, &qce50_bam_list);
  2605. pce_dev->ce_bam_info.bam_handle = pbam->handle;
  2606. ret:
  2607. mutex_unlock(&bam_register_lock);
  2608. return rc;
  2609. }
  2610. /**
  2611. * Initialize SPS HW connected with CE core
  2612. *
  2613. * This function register BAM HW resources with
  2614. * SPS driver and then initialize 2 SPS endpoints
  2615. *
  2616. * This function should only be called once typically
  2617. * during driver probe.
  2618. *
  2619. * @pce_dev - Pointer to qce_device structure
  2620. *
  2621. * @return - 0 if successful else negative value.
  2622. *
  2623. */
  2624. static int qce_sps_init(struct qce_device *pce_dev)
  2625. {
  2626. int rc = 0;
  2627. rc = qce_sps_get_bam(pce_dev);
  2628. if (rc)
  2629. return rc;
  2630. pr_debug("BAM device registered. bam_handle=0x%lx\n",
  2631. pce_dev->ce_bam_info.bam_handle);
  2632. rc = qce_sps_init_ep_conn(pce_dev,
  2633. &pce_dev->ce_bam_info.producer, true);
  2634. if (rc)
  2635. goto sps_connect_producer_err;
  2636. rc = qce_sps_init_ep_conn(pce_dev,
  2637. &pce_dev->ce_bam_info.consumer, false);
  2638. if (rc)
  2639. goto sps_connect_consumer_err;
  2640. pr_info(" QTI MSM CE-BAM at 0x%016llx irq %d\n",
  2641. (unsigned long long)pce_dev->ce_bam_info.bam_mem,
  2642. (unsigned int)pce_dev->ce_bam_info.bam_irq);
  2643. return rc;
  2644. sps_connect_consumer_err:
  2645. qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer);
  2646. sps_connect_producer_err:
  2647. qce_sps_release_bam(pce_dev);
  2648. return rc;
  2649. }
  2650. static inline int qce_alloc_req_info(struct qce_device *pce_dev)
  2651. {
  2652. int i;
  2653. int request_index = pce_dev->ce_request_index;
  2654. for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
  2655. request_index++;
  2656. if (request_index >= MAX_QCE_BAM_REQ)
  2657. request_index = 0;
  2658. if (!atomic_xchg(
  2659. &pce_dev->ce_request_info[request_index].in_use,
  2660. true)) {
  2661. pce_dev->ce_request_index = request_index;
  2662. return request_index;
  2663. }
  2664. }
  2665. pr_warn("pcedev %d no reqs available no_of_queued_req %d\n",
  2666. pce_dev->dev_no, atomic_read(
  2667. &pce_dev->no_of_queued_req));
  2668. return -EBUSY;
  2669. }
  2670. static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
  2671. bool is_complete)
  2672. {
  2673. pce_dev->ce_request_info[req_info].xfer_type = QCE_XFER_TYPE_LAST;
  2674. if (atomic_xchg(&pce_dev->ce_request_info[req_info].in_use,
  2675. false)) {
  2676. if (req_info < MAX_QCE_BAM_REQ && is_complete)
  2677. atomic_dec(&pce_dev->no_of_queued_req);
  2678. } else
  2679. pr_warn("request info %d free already\n", req_info);
  2680. }
  2681. static void print_notify_debug(struct sps_event_notify *notify)
  2682. {
  2683. phys_addr_t addr =
  2684. DESC_FULL_ADDR((phys_addr_t) notify->data.transfer.iovec.flags,
  2685. notify->data.transfer.iovec.addr);
  2686. pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%pK\n",
  2687. notify->event_id, &addr,
  2688. notify->data.transfer.iovec.size,
  2689. notify->data.transfer.iovec.flags,
  2690. notify->data.transfer.user);
  2691. }
  2692. static void _qce_req_complete(struct qce_device *pce_dev, unsigned int req_info)
  2693. {
  2694. struct ce_request_info *preq_info;
  2695. preq_info = &pce_dev->ce_request_info[req_info];
  2696. switch (preq_info->xfer_type) {
  2697. case QCE_XFER_CIPHERING:
  2698. _ablk_cipher_complete(pce_dev, req_info);
  2699. break;
  2700. case QCE_XFER_HASHING:
  2701. _sha_complete(pce_dev, req_info);
  2702. break;
  2703. case QCE_XFER_AEAD:
  2704. _aead_complete(pce_dev, req_info);
  2705. break;
  2706. case QCE_XFER_F8:
  2707. _f8_complete(pce_dev, req_info);
  2708. break;
  2709. case QCE_XFER_F9:
  2710. _f9_complete(pce_dev, req_info);
  2711. break;
  2712. default:
  2713. qce_free_req_info(pce_dev, req_info, true);
  2714. break;
  2715. }
  2716. }
  2717. static void qce_multireq_timeout(struct timer_list *data)
  2718. {
  2719. struct qce_device *pce_dev = from_timer(pce_dev, data, timer);
  2720. int ret = 0;
  2721. int last_seq;
  2722. unsigned long flags;
  2723. last_seq = atomic_read(&pce_dev->bunch_cmd_seq);
  2724. if (last_seq == 0 ||
  2725. last_seq != atomic_read(&pce_dev->last_intr_seq)) {
  2726. atomic_set(&pce_dev->last_intr_seq, last_seq);
  2727. mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
  2728. return;
  2729. }
  2730. /* last bunch mode command time out */
  2731. /*
  2732. * From here to dummy request finish sps request and set owner back
  2733. * to none, we disable interrupt.
  2734. * So it won't get preempted or interrupted. If bam inerrupts happen
  2735. * between, and completion callback gets called from BAM, a new
  2736. * request may be issued by the client driver. Deadlock may happen.
  2737. */
  2738. local_irq_save(flags);
  2739. if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_TIMEOUT)
  2740. != QCE_OWNER_NONE) {
  2741. local_irq_restore(flags);
  2742. mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
  2743. return;
  2744. }
  2745. ret = qce_dummy_req(pce_dev);
  2746. if (ret)
  2747. pr_warn("pcedev %d: Failed to insert dummy req\n",
  2748. pce_dev->dev_no);
  2749. cmpxchg(&pce_dev->owner, QCE_OWNER_TIMEOUT, QCE_OWNER_NONE);
  2750. pce_dev->mode = IN_INTERRUPT_MODE;
  2751. local_irq_restore(flags);
  2752. del_timer(&(pce_dev->timer));
  2753. pce_dev->qce_stats.no_of_timeouts++;
  2754. pr_debug("pcedev %d mode switch to INTR\n", pce_dev->dev_no);
  2755. }
  2756. void qce_get_driver_stats(void *handle)
  2757. {
  2758. struct qce_device *pce_dev = (struct qce_device *) handle;
  2759. if (!_qce50_disp_stats)
  2760. return;
  2761. pr_info("Engine %d timeout occuured %d\n", pce_dev->dev_no,
  2762. pce_dev->qce_stats.no_of_timeouts);
  2763. pr_info("Engine %d dummy request inserted %d\n", pce_dev->dev_no,
  2764. pce_dev->qce_stats.no_of_dummy_reqs);
  2765. if (pce_dev->mode)
  2766. pr_info("Engine %d is in BUNCH MODE\n", pce_dev->dev_no);
  2767. else
  2768. pr_info("Engine %d is in INTERRUPT MODE\n", pce_dev->dev_no);
  2769. pr_info("Engine %d outstanding request %d\n", pce_dev->dev_no,
  2770. atomic_read(&pce_dev->no_of_queued_req));
  2771. }
  2772. EXPORT_SYMBOL(qce_get_driver_stats);
  2773. void qce_clear_driver_stats(void *handle)
  2774. {
  2775. struct qce_device *pce_dev = (struct qce_device *) handle;
  2776. pce_dev->qce_stats.no_of_timeouts = 0;
  2777. pce_dev->qce_stats.no_of_dummy_reqs = 0;
  2778. }
  2779. EXPORT_SYMBOL(qce_clear_driver_stats);
  2780. static void _sps_producer_callback(struct sps_event_notify *notify)
  2781. {
  2782. struct qce_device *pce_dev = (struct qce_device *)
  2783. ((struct sps_event_notify *)notify)->user;
  2784. int rc = 0;
  2785. unsigned int req_info;
  2786. struct ce_sps_data *pce_sps_data;
  2787. struct ce_request_info *preq_info;
  2788. print_notify_debug(notify);
  2789. req_info = (unsigned int)((uintptr_t)notify->data.transfer.user);
  2790. if ((req_info & 0xffff0000) != CRYPTO_REQ_USER_PAT) {
  2791. pr_warn("request information %d out of range\n", req_info);
  2792. return;
  2793. }
  2794. req_info = req_info & 0x00ff;
  2795. if (req_info < 0 || req_info >= MAX_QCE_ALLOC_BAM_REQ) {
  2796. pr_warn("request information %d out of range\n", req_info);
  2797. return;
  2798. }
  2799. preq_info = &pce_dev->ce_request_info[req_info];
  2800. pce_sps_data = &preq_info->ce_sps;
  2801. if ((preq_info->xfer_type == QCE_XFER_CIPHERING ||
  2802. preq_info->xfer_type == QCE_XFER_AEAD) &&
  2803. pce_sps_data->producer_state == QCE_PIPE_STATE_IDLE) {
  2804. pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
  2805. pce_sps_data->out_transfer.iovec_count = 0;
  2806. _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
  2807. CRYPTO_RESULT_DUMP_SIZE,
  2808. &pce_sps_data->out_transfer);
  2809. _qce_set_flag(&pce_sps_data->out_transfer,
  2810. SPS_IOVEC_FLAG_INT);
  2811. rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe,
  2812. &pce_sps_data->out_transfer);
  2813. if (rc) {
  2814. pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
  2815. (uintptr_t)pce_dev->ce_bam_info.producer.pipe,
  2816. rc);
  2817. }
  2818. return;
  2819. }
  2820. _qce_req_complete(pce_dev, req_info);
  2821. }
  2822. /**
  2823. * De-initialize SPS HW connected with CE core
  2824. *
  2825. * This function deinitialize SPS endpoints and then
  2826. * deregisters BAM resources from SPS driver.
  2827. *
  2828. * This function should only be called once typically
  2829. * during driver remove.
  2830. *
  2831. * @pce_dev - Pointer to qce_device structure
  2832. *
  2833. */
  2834. static void qce_sps_exit(struct qce_device *pce_dev)
  2835. {
  2836. qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.consumer);
  2837. qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer);
  2838. qce_sps_release_bam(pce_dev);
  2839. }
  2840. static void qce_add_cmd_element(struct qce_device *pdev,
  2841. struct sps_command_element **cmd_ptr, u32 addr,
  2842. u32 data, struct sps_command_element **populate)
  2843. {
  2844. (*cmd_ptr)->addr = (uint32_t)(addr + pdev->phy_iobase);
  2845. (*cmd_ptr)->command = 0;
  2846. (*cmd_ptr)->data = data;
  2847. (*cmd_ptr)->mask = 0xFFFFFFFF;
  2848. (*cmd_ptr)->reserved = 0;
  2849. if (populate != NULL)
  2850. *populate = *cmd_ptr;
  2851. (*cmd_ptr)++;
  2852. }
  2853. static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index,
  2854. unsigned char **pvaddr, enum qce_cipher_mode_enum mode,
  2855. bool key_128)
  2856. {
  2857. struct sps_command_element *ce_vaddr;
  2858. uintptr_t ce_vaddr_start;
  2859. struct qce_cmdlistptr_ops *cmdlistptr;
  2860. struct qce_cmdlist_info *pcl_info = NULL;
  2861. int i = 0;
  2862. uint32_t encr_cfg = 0;
  2863. uint32_t key_reg = 0;
  2864. uint32_t xts_key_reg = 0;
  2865. uint32_t iv_reg = 0;
  2866. cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  2867. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  2868. pdev->ce_bam_info.ce_burst_size);
  2869. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  2870. ce_vaddr_start = (uintptr_t)(*pvaddr);
  2871. /*
  2872. * Designate chunks of the allocated memory to various
  2873. * command list pointers related to AES cipher operations defined
  2874. * in ce_cmdlistptrs_ops structure.
  2875. */
  2876. switch (mode) {
  2877. case QCE_MODE_CBC:
  2878. case QCE_MODE_CTR:
  2879. if (key_128) {
  2880. cmdlistptr->cipher_aes_128_cbc_ctr.cmdlist =
  2881. (uintptr_t)ce_vaddr;
  2882. pcl_info = &(cmdlistptr->cipher_aes_128_cbc_ctr);
  2883. if (mode == QCE_MODE_CBC)
  2884. encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
  2885. else
  2886. encr_cfg = pdev->reg.encr_cfg_aes_ctr_128;
  2887. iv_reg = 4;
  2888. key_reg = 4;
  2889. xts_key_reg = 0;
  2890. } else {
  2891. cmdlistptr->cipher_aes_256_cbc_ctr.cmdlist =
  2892. (uintptr_t)ce_vaddr;
  2893. pcl_info = &(cmdlistptr->cipher_aes_256_cbc_ctr);
  2894. if (mode == QCE_MODE_CBC)
  2895. encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
  2896. else
  2897. encr_cfg = pdev->reg.encr_cfg_aes_ctr_256;
  2898. iv_reg = 4;
  2899. key_reg = 8;
  2900. xts_key_reg = 0;
  2901. }
  2902. break;
  2903. case QCE_MODE_ECB:
  2904. if (key_128) {
  2905. cmdlistptr->cipher_aes_128_ecb.cmdlist =
  2906. (uintptr_t)ce_vaddr;
  2907. pcl_info = &(cmdlistptr->cipher_aes_128_ecb);
  2908. encr_cfg = pdev->reg.encr_cfg_aes_ecb_128;
  2909. iv_reg = 0;
  2910. key_reg = 4;
  2911. xts_key_reg = 0;
  2912. } else {
  2913. cmdlistptr->cipher_aes_256_ecb.cmdlist =
  2914. (uintptr_t)ce_vaddr;
  2915. pcl_info = &(cmdlistptr->cipher_aes_256_ecb);
  2916. encr_cfg = pdev->reg.encr_cfg_aes_ecb_256;
  2917. iv_reg = 0;
  2918. key_reg = 8;
  2919. xts_key_reg = 0;
  2920. }
  2921. break;
  2922. case QCE_MODE_XTS:
  2923. if (key_128) {
  2924. cmdlistptr->cipher_aes_128_xts.cmdlist =
  2925. (uintptr_t)ce_vaddr;
  2926. pcl_info = &(cmdlistptr->cipher_aes_128_xts);
  2927. encr_cfg = pdev->reg.encr_cfg_aes_xts_128;
  2928. iv_reg = 4;
  2929. key_reg = 4;
  2930. xts_key_reg = 4;
  2931. } else {
  2932. cmdlistptr->cipher_aes_256_xts.cmdlist =
  2933. (uintptr_t)ce_vaddr;
  2934. pcl_info = &(cmdlistptr->cipher_aes_256_xts);
  2935. encr_cfg = pdev->reg.encr_cfg_aes_xts_256;
  2936. iv_reg = 4;
  2937. key_reg = 8;
  2938. xts_key_reg = 8;
  2939. }
  2940. break;
  2941. default:
  2942. pr_err("Unknown mode of operation %d received, exiting now\n",
  2943. mode);
  2944. return -EINVAL;
  2945. break;
  2946. }
  2947. /* clear status register */
  2948. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
  2949. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  2950. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  2951. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  2952. &pcl_info->seg_size);
  2953. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
  2954. &pcl_info->encr_seg_cfg);
  2955. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
  2956. &pcl_info->encr_seg_size);
  2957. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  2958. &pcl_info->encr_seg_start);
  2959. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
  2960. (uint32_t)0xffffffff, &pcl_info->encr_mask);
  2961. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
  2962. (uint32_t)0xffffffff, NULL);
  2963. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
  2964. (uint32_t)0xffffffff, NULL);
  2965. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
  2966. (uint32_t)0xffffffff, NULL);
  2967. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
  2968. &pcl_info->auth_seg_cfg);
  2969. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
  2970. &pcl_info->encr_key);
  2971. for (i = 1; i < key_reg; i++)
  2972. qce_add_cmd_element(pdev, &ce_vaddr,
  2973. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
  2974. 0, NULL);
  2975. if (xts_key_reg) {
  2976. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_XTS_KEY0_REG,
  2977. 0, &pcl_info->encr_xts_key);
  2978. for (i = 1; i < xts_key_reg; i++)
  2979. qce_add_cmd_element(pdev, &ce_vaddr,
  2980. (CRYPTO_ENCR_XTS_KEY0_REG +
  2981. i * sizeof(uint32_t)), 0, NULL);
  2982. qce_add_cmd_element(pdev, &ce_vaddr,
  2983. CRYPTO_ENCR_XTS_DU_SIZE_REG, 0,
  2984. &pcl_info->encr_xts_du_size);
  2985. }
  2986. if (iv_reg) {
  2987. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
  2988. &pcl_info->encr_cntr_iv);
  2989. for (i = 1; i < iv_reg; i++)
  2990. qce_add_cmd_element(pdev, &ce_vaddr,
  2991. (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
  2992. 0, NULL);
  2993. }
  2994. /* Add dummy to align size to burst-size multiple */
  2995. if (mode == QCE_MODE_XTS) {
  2996. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
  2997. 0, &pcl_info->auth_seg_size);
  2998. } else {
  2999. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
  3000. 0, &pcl_info->auth_seg_size);
  3001. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
  3002. 0, &pcl_info->auth_seg_size);
  3003. }
  3004. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3005. pdev->reg.crypto_cfg_le, NULL);
  3006. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3007. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3008. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3009. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3010. *pvaddr = (unsigned char *) ce_vaddr;
  3011. return 0;
  3012. }
  3013. static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev, int cri_index,
  3014. unsigned char **pvaddr, enum qce_cipher_alg_enum alg,
  3015. bool mode_cbc)
  3016. {
  3017. struct sps_command_element *ce_vaddr;
  3018. uintptr_t ce_vaddr_start;
  3019. struct qce_cmdlistptr_ops *cmdlistptr;
  3020. struct qce_cmdlist_info *pcl_info = NULL;
  3021. int i = 0;
  3022. uint32_t encr_cfg = 0;
  3023. uint32_t key_reg = 0;
  3024. uint32_t iv_reg = 0;
  3025. cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  3026. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3027. pdev->ce_bam_info.ce_burst_size);
  3028. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3029. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3030. /*
  3031. * Designate chunks of the allocated memory to various
  3032. * command list pointers related to cipher operations defined
  3033. * in ce_cmdlistptrs_ops structure.
  3034. */
  3035. switch (alg) {
  3036. case CIPHER_ALG_DES:
  3037. if (mode_cbc) {
  3038. cmdlistptr->cipher_des_cbc.cmdlist =
  3039. (uintptr_t)ce_vaddr;
  3040. pcl_info = &(cmdlistptr->cipher_des_cbc);
  3041. encr_cfg = pdev->reg.encr_cfg_des_cbc;
  3042. iv_reg = 2;
  3043. key_reg = 2;
  3044. } else {
  3045. cmdlistptr->cipher_des_ecb.cmdlist =
  3046. (uintptr_t)ce_vaddr;
  3047. pcl_info = &(cmdlistptr->cipher_des_ecb);
  3048. encr_cfg = pdev->reg.encr_cfg_des_ecb;
  3049. iv_reg = 0;
  3050. key_reg = 2;
  3051. }
  3052. break;
  3053. case CIPHER_ALG_3DES:
  3054. if (mode_cbc) {
  3055. cmdlistptr->cipher_3des_cbc.cmdlist =
  3056. (uintptr_t)ce_vaddr;
  3057. pcl_info = &(cmdlistptr->cipher_3des_cbc);
  3058. encr_cfg = pdev->reg.encr_cfg_3des_cbc;
  3059. iv_reg = 2;
  3060. key_reg = 6;
  3061. } else {
  3062. cmdlistptr->cipher_3des_ecb.cmdlist =
  3063. (uintptr_t)ce_vaddr;
  3064. pcl_info = &(cmdlistptr->cipher_3des_ecb);
  3065. encr_cfg = pdev->reg.encr_cfg_3des_ecb;
  3066. iv_reg = 0;
  3067. key_reg = 6;
  3068. }
  3069. break;
  3070. default:
  3071. pr_err("Unknown algorithms %d received, exiting now\n", alg);
  3072. return -EINVAL;
  3073. break;
  3074. }
  3075. /* clear status register */
  3076. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
  3077. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3078. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3079. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  3080. &pcl_info->seg_size);
  3081. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
  3082. &pcl_info->encr_seg_cfg);
  3083. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
  3084. &pcl_info->encr_seg_size);
  3085. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  3086. &pcl_info->encr_seg_start);
  3087. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
  3088. &pcl_info->auth_seg_cfg);
  3089. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
  3090. &pcl_info->encr_key);
  3091. for (i = 1; i < key_reg; i++)
  3092. qce_add_cmd_element(pdev, &ce_vaddr,
  3093. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
  3094. 0, NULL);
  3095. if (iv_reg) {
  3096. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
  3097. &pcl_info->encr_cntr_iv);
  3098. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
  3099. NULL);
  3100. }
  3101. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3102. pdev->reg.crypto_cfg_le, NULL);
  3103. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3104. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3105. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3106. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3107. *pvaddr = (unsigned char *) ce_vaddr;
  3108. return 0;
  3109. }
  3110. static int _setup_cipher_null_cmdlistptrs(struct qce_device *pdev,
  3111. int cri_index, unsigned char **pvaddr)
  3112. {
  3113. struct sps_command_element *ce_vaddr;
  3114. uintptr_t ce_vaddr_start;
  3115. struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
  3116. [cri_index].ce_sps.cmdlistptr;
  3117. struct qce_cmdlist_info *pcl_info = NULL;
  3118. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3119. pdev->ce_bam_info.ce_burst_size);
  3120. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3121. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3122. cmdlistptr->cipher_null.cmdlist = (uintptr_t)ce_vaddr;
  3123. pcl_info = &(cmdlistptr->cipher_null);
  3124. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG,
  3125. pdev->ce_bam_info.ce_burst_size, NULL);
  3126. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
  3127. pdev->reg.encr_cfg_aes_ecb_128, NULL);
  3128. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
  3129. NULL);
  3130. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  3131. NULL);
  3132. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
  3133. 0, NULL);
  3134. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
  3135. 0, NULL);
  3136. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
  3137. NULL);
  3138. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3139. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3140. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3141. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3142. *pvaddr = (unsigned char *) ce_vaddr;
  3143. return 0;
  3144. }
  3145. static int _setup_auth_cmdlistptrs(struct qce_device *pdev, int cri_index,
  3146. unsigned char **pvaddr, enum qce_hash_alg_enum alg,
  3147. bool key_128)
  3148. {
  3149. struct sps_command_element *ce_vaddr;
  3150. uintptr_t ce_vaddr_start;
  3151. struct qce_cmdlistptr_ops *cmdlistptr;
  3152. struct qce_cmdlist_info *pcl_info = NULL;
  3153. int i = 0;
  3154. uint32_t key_reg = 0;
  3155. uint32_t auth_cfg = 0;
  3156. uint32_t iv_reg = 0;
  3157. cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  3158. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3159. pdev->ce_bam_info.ce_burst_size);
  3160. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3161. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3162. /*
  3163. * Designate chunks of the allocated memory to various
  3164. * command list pointers related to authentication operations
  3165. * defined in ce_cmdlistptrs_ops structure.
  3166. */
  3167. switch (alg) {
  3168. case QCE_HASH_SHA1:
  3169. cmdlistptr->auth_sha1.cmdlist = (uintptr_t)ce_vaddr;
  3170. pcl_info = &(cmdlistptr->auth_sha1);
  3171. auth_cfg = pdev->reg.auth_cfg_sha1;
  3172. iv_reg = 5;
  3173. /* clear status register */
  3174. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
  3175. 0, NULL);
  3176. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3177. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3178. break;
  3179. case QCE_HASH_SHA256:
  3180. cmdlistptr->auth_sha256.cmdlist = (uintptr_t)ce_vaddr;
  3181. pcl_info = &(cmdlistptr->auth_sha256);
  3182. auth_cfg = pdev->reg.auth_cfg_sha256;
  3183. iv_reg = 8;
  3184. /* clear status register */
  3185. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
  3186. 0, NULL);
  3187. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3188. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3189. /* 1 dummy write */
  3190. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
  3191. 0, NULL);
  3192. break;
  3193. case QCE_HASH_SHA1_HMAC:
  3194. cmdlistptr->auth_sha1_hmac.cmdlist = (uintptr_t)ce_vaddr;
  3195. pcl_info = &(cmdlistptr->auth_sha1_hmac);
  3196. auth_cfg = pdev->reg.auth_cfg_hmac_sha1;
  3197. key_reg = 16;
  3198. iv_reg = 5;
  3199. /* clear status register */
  3200. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
  3201. 0, NULL);
  3202. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3203. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3204. break;
  3205. case QCE_HASH_SHA256_HMAC:
  3206. cmdlistptr->auth_sha256_hmac.cmdlist = (uintptr_t)ce_vaddr;
  3207. pcl_info = &(cmdlistptr->auth_sha256_hmac);
  3208. auth_cfg = pdev->reg.auth_cfg_hmac_sha256;
  3209. key_reg = 16;
  3210. iv_reg = 8;
  3211. /* clear status register */
  3212. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
  3213. NULL);
  3214. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3215. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3216. /* 1 dummy write */
  3217. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
  3218. 0, NULL);
  3219. break;
  3220. case QCE_HASH_AES_CMAC:
  3221. if (key_128) {
  3222. cmdlistptr->auth_aes_128_cmac.cmdlist =
  3223. (uintptr_t)ce_vaddr;
  3224. pcl_info = &(cmdlistptr->auth_aes_128_cmac);
  3225. auth_cfg = pdev->reg.auth_cfg_cmac_128;
  3226. key_reg = 4;
  3227. } else {
  3228. cmdlistptr->auth_aes_256_cmac.cmdlist =
  3229. (uintptr_t)ce_vaddr;
  3230. pcl_info = &(cmdlistptr->auth_aes_256_cmac);
  3231. auth_cfg = pdev->reg.auth_cfg_cmac_256;
  3232. key_reg = 8;
  3233. }
  3234. /* clear status register */
  3235. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
  3236. NULL);
  3237. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3238. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3239. /* 1 dummy write */
  3240. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
  3241. 0, NULL);
  3242. break;
  3243. default:
  3244. pr_err("Unknown algorithms %d received, exiting now\n", alg);
  3245. return -EINVAL;
  3246. break;
  3247. }
  3248. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  3249. &pcl_info->seg_size);
  3250. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
  3251. &pcl_info->encr_seg_cfg);
  3252. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
  3253. auth_cfg, &pcl_info->auth_seg_cfg);
  3254. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
  3255. &pcl_info->auth_seg_size);
  3256. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
  3257. &pcl_info->auth_seg_start);
  3258. if (alg == QCE_HASH_AES_CMAC) {
  3259. /* reset auth iv, bytecount and key registers */
  3260. for (i = 0; i < 16; i++)
  3261. qce_add_cmd_element(pdev, &ce_vaddr,
  3262. (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
  3263. 0, NULL);
  3264. for (i = 0; i < 16; i++)
  3265. qce_add_cmd_element(pdev, &ce_vaddr,
  3266. (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
  3267. 0, NULL);
  3268. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
  3269. 0, NULL);
  3270. } else {
  3271. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
  3272. &pcl_info->auth_iv);
  3273. for (i = 1; i < iv_reg; i++)
  3274. qce_add_cmd_element(pdev, &ce_vaddr,
  3275. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
  3276. 0, NULL);
  3277. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
  3278. 0, &pcl_info->auth_bytecount);
  3279. }
  3280. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
  3281. if (key_reg) {
  3282. qce_add_cmd_element(pdev, &ce_vaddr,
  3283. CRYPTO_AUTH_KEY0_REG, 0, &pcl_info->auth_key);
  3284. for (i = 1; i < key_reg; i++)
  3285. qce_add_cmd_element(pdev, &ce_vaddr,
  3286. (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
  3287. 0, NULL);
  3288. }
  3289. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3290. pdev->reg.crypto_cfg_le, NULL);
  3291. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3292. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3293. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3294. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3295. *pvaddr = (unsigned char *) ce_vaddr;
  3296. return 0;
  3297. }
  3298. static int _setup_aead_cmdlistptrs(struct qce_device *pdev,
  3299. int cri_index,
  3300. unsigned char **pvaddr,
  3301. uint32_t alg,
  3302. uint32_t mode,
  3303. uint32_t key_size,
  3304. bool sha1)
  3305. {
  3306. struct sps_command_element *ce_vaddr;
  3307. uintptr_t ce_vaddr_start;
  3308. struct qce_cmdlistptr_ops *cmd;
  3309. struct qce_cmdlist_info *pcl_info = NULL;
  3310. uint32_t key_reg;
  3311. uint32_t iv_reg;
  3312. uint32_t i;
  3313. uint32_t enciv_in_word;
  3314. uint32_t encr_cfg;
  3315. cmd = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  3316. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3317. pdev->ce_bam_info.ce_burst_size);
  3318. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3319. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3320. switch (alg) {
  3321. case CIPHER_ALG_DES:
  3322. switch (mode) {
  3323. case QCE_MODE_CBC:
  3324. if (sha1) {
  3325. cmd->aead_hmac_sha1_cbc_des.cmdlist =
  3326. (uintptr_t)ce_vaddr;
  3327. pcl_info =
  3328. &(cmd->aead_hmac_sha1_cbc_des);
  3329. } else {
  3330. cmd->aead_hmac_sha256_cbc_des.cmdlist =
  3331. (uintptr_t)ce_vaddr;
  3332. pcl_info =
  3333. &(cmd->aead_hmac_sha256_cbc_des);
  3334. }
  3335. encr_cfg = pdev->reg.encr_cfg_des_cbc;
  3336. break;
  3337. default:
  3338. return -EINVAL;
  3339. }
  3340. enciv_in_word = 2;
  3341. break;
  3342. case CIPHER_ALG_3DES:
  3343. switch (mode) {
  3344. case QCE_MODE_CBC:
  3345. if (sha1) {
  3346. cmd->aead_hmac_sha1_cbc_3des.cmdlist =
  3347. (uintptr_t)ce_vaddr;
  3348. pcl_info =
  3349. &(cmd->aead_hmac_sha1_cbc_3des);
  3350. } else {
  3351. cmd->aead_hmac_sha256_cbc_3des.cmdlist =
  3352. (uintptr_t)ce_vaddr;
  3353. pcl_info =
  3354. &(cmd->aead_hmac_sha256_cbc_3des);
  3355. }
  3356. encr_cfg = pdev->reg.encr_cfg_3des_cbc;
  3357. break;
  3358. default:
  3359. return -EINVAL;
  3360. }
  3361. enciv_in_word = 2;
  3362. break;
  3363. case CIPHER_ALG_AES:
  3364. switch (mode) {
  3365. case QCE_MODE_CBC:
  3366. if (key_size == AES128_KEY_SIZE) {
  3367. if (sha1) {
  3368. cmd->aead_hmac_sha1_cbc_aes_128.cmdlist =
  3369. (uintptr_t)ce_vaddr;
  3370. pcl_info =
  3371. &(cmd->aead_hmac_sha1_cbc_aes_128);
  3372. } else {
  3373. cmd->aead_hmac_sha256_cbc_aes_128.cmdlist
  3374. = (uintptr_t)ce_vaddr;
  3375. pcl_info =
  3376. &(cmd->aead_hmac_sha256_cbc_aes_128);
  3377. }
  3378. encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
  3379. } else if (key_size == AES256_KEY_SIZE) {
  3380. if (sha1) {
  3381. cmd->aead_hmac_sha1_cbc_aes_256.cmdlist =
  3382. (uintptr_t)ce_vaddr;
  3383. pcl_info =
  3384. &(cmd->aead_hmac_sha1_cbc_aes_256);
  3385. } else {
  3386. cmd->aead_hmac_sha256_cbc_aes_256.cmdlist =
  3387. (uintptr_t)ce_vaddr;
  3388. pcl_info =
  3389. &(cmd->aead_hmac_sha256_cbc_aes_256);
  3390. }
  3391. encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
  3392. } else {
  3393. return -EINVAL;
  3394. }
  3395. break;
  3396. default:
  3397. return -EINVAL;
  3398. }
  3399. enciv_in_word = 4;
  3400. break;
  3401. default:
  3402. return -EINVAL;
  3403. }
  3404. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
  3405. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3406. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3407. key_reg = key_size/sizeof(uint32_t);
  3408. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
  3409. &pcl_info->encr_key);
  3410. for (i = 1; i < key_reg; i++)
  3411. qce_add_cmd_element(pdev, &ce_vaddr,
  3412. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
  3413. 0, NULL);
  3414. if (mode != QCE_MODE_ECB) {
  3415. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
  3416. &pcl_info->encr_cntr_iv);
  3417. for (i = 1; i < enciv_in_word; i++)
  3418. qce_add_cmd_element(pdev, &ce_vaddr,
  3419. (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
  3420. 0, NULL);
  3421. }
  3422. if (sha1)
  3423. iv_reg = 5;
  3424. else
  3425. iv_reg = 8;
  3426. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
  3427. &pcl_info->auth_iv);
  3428. for (i = 1; i < iv_reg; i++)
  3429. qce_add_cmd_element(pdev, &ce_vaddr,
  3430. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
  3431. 0, NULL);
  3432. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
  3433. 0, &pcl_info->auth_bytecount);
  3434. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
  3435. key_reg = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
  3436. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
  3437. &pcl_info->auth_key);
  3438. for (i = 1; i < key_reg; i++)
  3439. qce_add_cmd_element(pdev, &ce_vaddr,
  3440. (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)), 0, NULL);
  3441. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  3442. &pcl_info->seg_size);
  3443. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
  3444. &pcl_info->encr_seg_cfg);
  3445. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
  3446. &pcl_info->encr_seg_size);
  3447. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  3448. &pcl_info->encr_seg_start);
  3449. if (sha1)
  3450. qce_add_cmd_element(
  3451. pdev,
  3452. &ce_vaddr,
  3453. CRYPTO_AUTH_SEG_CFG_REG,
  3454. pdev->reg.auth_cfg_aead_sha1_hmac,
  3455. &pcl_info->auth_seg_cfg);
  3456. else
  3457. qce_add_cmd_element(
  3458. pdev,
  3459. &ce_vaddr,
  3460. CRYPTO_AUTH_SEG_CFG_REG,
  3461. pdev->reg.auth_cfg_aead_sha256_hmac,
  3462. &pcl_info->auth_seg_cfg);
  3463. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
  3464. &pcl_info->auth_seg_size);
  3465. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
  3466. &pcl_info->auth_seg_start);
  3467. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3468. pdev->reg.crypto_cfg_le, NULL);
  3469. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3470. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3471. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3472. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3473. *pvaddr = (unsigned char *) ce_vaddr;
  3474. return 0;
  3475. }
  3476. static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, int cri_index,
  3477. unsigned char **pvaddr, bool key_128)
  3478. {
  3479. struct sps_command_element *ce_vaddr;
  3480. uintptr_t ce_vaddr_start;
  3481. struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
  3482. [cri_index].ce_sps.cmdlistptr;
  3483. struct qce_cmdlist_info *pcl_info = NULL;
  3484. int i = 0;
  3485. uint32_t encr_cfg = 0;
  3486. uint32_t auth_cfg = 0;
  3487. uint32_t key_reg = 0;
  3488. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3489. pdev->ce_bam_info.ce_burst_size);
  3490. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3491. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3492. /*
  3493. * Designate chunks of the allocated memory to various
  3494. * command list pointers related to aead operations
  3495. * defined in ce_cmdlistptrs_ops structure.
  3496. */
  3497. if (key_128) {
  3498. cmdlistptr->aead_aes_128_ccm.cmdlist =
  3499. (uintptr_t)ce_vaddr;
  3500. pcl_info = &(cmdlistptr->aead_aes_128_ccm);
  3501. auth_cfg = pdev->reg.auth_cfg_aes_ccm_128;
  3502. encr_cfg = pdev->reg.encr_cfg_aes_ccm_128;
  3503. key_reg = 4;
  3504. } else {
  3505. cmdlistptr->aead_aes_256_ccm.cmdlist =
  3506. (uintptr_t)ce_vaddr;
  3507. pcl_info = &(cmdlistptr->aead_aes_256_ccm);
  3508. auth_cfg = pdev->reg.auth_cfg_aes_ccm_256;
  3509. encr_cfg = pdev->reg.encr_cfg_aes_ccm_256;
  3510. key_reg = 8;
  3511. }
  3512. /* clear status register */
  3513. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
  3514. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3515. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3516. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, NULL);
  3517. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  3518. NULL);
  3519. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  3520. &pcl_info->seg_size);
  3521. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
  3522. encr_cfg, &pcl_info->encr_seg_cfg);
  3523. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
  3524. &pcl_info->encr_seg_size);
  3525. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  3526. &pcl_info->encr_seg_start);
  3527. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
  3528. (uint32_t)0xffffffff, &pcl_info->encr_mask);
  3529. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
  3530. (uint32_t)0xffffffff, NULL);
  3531. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
  3532. (uint32_t)0xffffffff, NULL);
  3533. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
  3534. (uint32_t)0xffffffff, NULL);
  3535. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
  3536. auth_cfg, &pcl_info->auth_seg_cfg);
  3537. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
  3538. &pcl_info->auth_seg_size);
  3539. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
  3540. &pcl_info->auth_seg_start);
  3541. /* reset auth iv, bytecount and key registers */
  3542. for (i = 0; i < 8; i++)
  3543. qce_add_cmd_element(pdev, &ce_vaddr,
  3544. (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
  3545. 0, NULL);
  3546. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
  3547. 0, NULL);
  3548. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG,
  3549. 0, NULL);
  3550. for (i = 0; i < 16; i++)
  3551. qce_add_cmd_element(pdev, &ce_vaddr,
  3552. (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
  3553. 0, NULL);
  3554. /* set auth key */
  3555. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
  3556. &pcl_info->auth_key);
  3557. for (i = 1; i < key_reg; i++)
  3558. qce_add_cmd_element(pdev, &ce_vaddr,
  3559. (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
  3560. 0, NULL);
  3561. /* set NONCE info */
  3562. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_INFO_NONCE0_REG, 0,
  3563. &pcl_info->auth_nonce_info);
  3564. for (i = 1; i < 4; i++)
  3565. qce_add_cmd_element(pdev, &ce_vaddr,
  3566. (CRYPTO_AUTH_INFO_NONCE0_REG +
  3567. i * sizeof(uint32_t)), 0, NULL);
  3568. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
  3569. &pcl_info->encr_key);
  3570. for (i = 1; i < key_reg; i++)
  3571. qce_add_cmd_element(pdev, &ce_vaddr,
  3572. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
  3573. 0, NULL);
  3574. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
  3575. &pcl_info->encr_cntr_iv);
  3576. for (i = 1; i < 4; i++)
  3577. qce_add_cmd_element(pdev, &ce_vaddr,
  3578. (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
  3579. 0, NULL);
  3580. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_CCM_INT_CNTR0_REG, 0,
  3581. &pcl_info->encr_ccm_cntr_iv);
  3582. for (i = 1; i < 4; i++)
  3583. qce_add_cmd_element(pdev, &ce_vaddr,
  3584. (CRYPTO_ENCR_CCM_INT_CNTR0_REG + i * sizeof(uint32_t)),
  3585. 0, NULL);
  3586. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3587. pdev->reg.crypto_cfg_le, NULL);
  3588. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3589. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3590. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3591. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3592. *pvaddr = (unsigned char *) ce_vaddr;
  3593. return 0;
  3594. }
  3595. static int _setup_f8_cmdlistptrs(struct qce_device *pdev, int cri_index,
  3596. unsigned char **pvaddr, enum qce_ota_algo_enum alg)
  3597. {
  3598. struct sps_command_element *ce_vaddr;
  3599. uintptr_t ce_vaddr_start;
  3600. struct qce_cmdlistptr_ops *cmdlistptr;
  3601. struct qce_cmdlist_info *pcl_info = NULL;
  3602. int i = 0;
  3603. uint32_t encr_cfg = 0;
  3604. uint32_t key_reg = 4;
  3605. cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  3606. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3607. pdev->ce_bam_info.ce_burst_size);
  3608. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3609. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3610. /*
  3611. * Designate chunks of the allocated memory to various
  3612. * command list pointers related to f8 cipher algorithm defined
  3613. * in ce_cmdlistptrs_ops structure.
  3614. */
  3615. switch (alg) {
  3616. case QCE_OTA_ALGO_KASUMI:
  3617. cmdlistptr->f8_kasumi.cmdlist = (uintptr_t)ce_vaddr;
  3618. pcl_info = &(cmdlistptr->f8_kasumi);
  3619. encr_cfg = pdev->reg.encr_cfg_kasumi;
  3620. break;
  3621. case QCE_OTA_ALGO_SNOW3G:
  3622. default:
  3623. cmdlistptr->f8_snow3g.cmdlist = (uintptr_t)ce_vaddr;
  3624. pcl_info = &(cmdlistptr->f8_snow3g);
  3625. encr_cfg = pdev->reg.encr_cfg_snow3g;
  3626. break;
  3627. }
  3628. /* clear status register */
  3629. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
  3630. 0, NULL);
  3631. /* set config to big endian */
  3632. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3633. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3634. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  3635. &pcl_info->seg_size);
  3636. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
  3637. &pcl_info->encr_seg_cfg);
  3638. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
  3639. &pcl_info->encr_seg_size);
  3640. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  3641. &pcl_info->encr_seg_start);
  3642. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
  3643. &pcl_info->auth_seg_cfg);
  3644. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
  3645. 0, &pcl_info->auth_seg_size);
  3646. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
  3647. 0, &pcl_info->auth_seg_start);
  3648. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
  3649. &pcl_info->encr_key);
  3650. for (i = 1; i < key_reg; i++)
  3651. qce_add_cmd_element(pdev, &ce_vaddr,
  3652. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
  3653. 0, NULL);
  3654. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
  3655. &pcl_info->encr_cntr_iv);
  3656. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
  3657. NULL);
  3658. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3659. pdev->reg.crypto_cfg_le, NULL);
  3660. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3661. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3662. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3663. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3664. *pvaddr = (unsigned char *) ce_vaddr;
  3665. return 0;
  3666. }
  3667. static int _setup_f9_cmdlistptrs(struct qce_device *pdev, int cri_index,
  3668. unsigned char **pvaddr, enum qce_ota_algo_enum alg)
  3669. {
  3670. struct sps_command_element *ce_vaddr;
  3671. uintptr_t ce_vaddr_start;
  3672. struct qce_cmdlistptr_ops *cmdlistptr;
  3673. struct qce_cmdlist_info *pcl_info = NULL;
  3674. int i = 0;
  3675. uint32_t auth_cfg = 0;
  3676. uint32_t iv_reg = 0;
  3677. cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  3678. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3679. pdev->ce_bam_info.ce_burst_size);
  3680. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3681. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3682. /*
  3683. * Designate chunks of the allocated memory to various
  3684. * command list pointers related to authentication operations
  3685. * defined in ce_cmdlistptrs_ops structure.
  3686. */
  3687. switch (alg) {
  3688. case QCE_OTA_ALGO_KASUMI:
  3689. cmdlistptr->f9_kasumi.cmdlist = (uintptr_t)ce_vaddr;
  3690. pcl_info = &(cmdlistptr->f9_kasumi);
  3691. auth_cfg = pdev->reg.auth_cfg_kasumi;
  3692. break;
  3693. case QCE_OTA_ALGO_SNOW3G:
  3694. default:
  3695. cmdlistptr->f9_snow3g.cmdlist = (uintptr_t)ce_vaddr;
  3696. pcl_info = &(cmdlistptr->f9_snow3g);
  3697. auth_cfg = pdev->reg.auth_cfg_snow3g;
  3698. }
  3699. /* clear status register */
  3700. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
  3701. 0, NULL);
  3702. /* set config to big endian */
  3703. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3704. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3705. iv_reg = 5;
  3706. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  3707. &pcl_info->seg_size);
  3708. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
  3709. &pcl_info->encr_seg_cfg);
  3710. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
  3711. auth_cfg, &pcl_info->auth_seg_cfg);
  3712. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
  3713. &pcl_info->auth_seg_size);
  3714. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
  3715. &pcl_info->auth_seg_start);
  3716. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
  3717. &pcl_info->auth_iv);
  3718. for (i = 1; i < iv_reg; i++) {
  3719. qce_add_cmd_element(pdev, &ce_vaddr,
  3720. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
  3721. 0, NULL);
  3722. }
  3723. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
  3724. 0, &pcl_info->auth_bytecount);
  3725. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
  3726. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3727. pdev->reg.crypto_cfg_le, NULL);
  3728. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3729. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3730. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3731. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3732. *pvaddr = (unsigned char *) ce_vaddr;
  3733. return 0;
  3734. }
  3735. static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev,
  3736. int cri_index, unsigned char **pvaddr)
  3737. {
  3738. struct sps_command_element *ce_vaddr;
  3739. uintptr_t ce_vaddr_start = (uintptr_t)(*pvaddr);
  3740. struct qce_cmdlistptr_ops *cmdlistptr;
  3741. struct qce_cmdlist_info *pcl_info = NULL;
  3742. cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  3743. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3744. pdev->ce_bam_info.ce_burst_size);
  3745. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3746. cmdlistptr->unlock_all_pipes.cmdlist = (uintptr_t)ce_vaddr;
  3747. pcl_info = &(cmdlistptr->unlock_all_pipes);
  3748. /*
  3749. * Designate chunks of the allocated memory to command list
  3750. * to unlock pipes.
  3751. */
  3752. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3753. CRYPTO_CONFIG_RESET, NULL);
  3754. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3755. *pvaddr = (unsigned char *) ce_vaddr;
  3756. return 0;
  3757. }
  3758. static int qce_setup_cmdlistptrs(struct qce_device *pdev, int cri_index,
  3759. unsigned char **pvaddr)
  3760. {
  3761. struct sps_command_element *ce_vaddr =
  3762. (struct sps_command_element *)(*pvaddr);
  3763. /*
  3764. * Designate chunks of the allocated memory to various
  3765. * command list pointers related to operations defined
  3766. * in ce_cmdlistptrs_ops structure.
  3767. */
  3768. ce_vaddr =
  3769. (struct sps_command_element *)ALIGN(((uintptr_t) ce_vaddr),
  3770. pdev->ce_bam_info.ce_burst_size);
  3771. *pvaddr = (unsigned char *) ce_vaddr;
  3772. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
  3773. true);
  3774. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
  3775. true);
  3776. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
  3777. true);
  3778. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
  3779. true);
  3780. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
  3781. false);
  3782. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
  3783. false);
  3784. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
  3785. false);
  3786. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
  3787. false);
  3788. _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
  3789. true);
  3790. _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
  3791. false);
  3792. _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
  3793. true);
  3794. _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
  3795. false);
  3796. _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1,
  3797. false);
  3798. _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256,
  3799. false);
  3800. _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1_HMAC,
  3801. false);
  3802. _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256_HMAC,
  3803. false);
  3804. _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
  3805. true);
  3806. _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
  3807. false);
  3808. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
  3809. QCE_MODE_CBC, DES_KEY_SIZE, true);
  3810. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
  3811. QCE_MODE_CBC, DES3_EDE_KEY_SIZE, true);
  3812. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
  3813. QCE_MODE_CBC, AES128_KEY_SIZE, true);
  3814. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
  3815. QCE_MODE_CBC, AES256_KEY_SIZE, true);
  3816. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
  3817. QCE_MODE_CBC, DES_KEY_SIZE, false);
  3818. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
  3819. QCE_MODE_CBC, DES3_EDE_KEY_SIZE, false);
  3820. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
  3821. QCE_MODE_CBC, AES128_KEY_SIZE, false);
  3822. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
  3823. QCE_MODE_CBC, AES256_KEY_SIZE, false);
  3824. _setup_cipher_null_cmdlistptrs(pdev, cri_index, pvaddr);
  3825. _setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, true);
  3826. _setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, false);
  3827. _setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
  3828. _setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
  3829. _setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
  3830. _setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
  3831. _setup_unlock_pipe_cmdlistptrs(pdev, cri_index, pvaddr);
  3832. return 0;
  3833. }
  3834. static int qce_setup_ce_sps_data(struct qce_device *pce_dev)
  3835. {
  3836. unsigned char *vaddr;
  3837. int i;
  3838. unsigned char *iovec_vaddr;
  3839. int iovec_memsize;
  3840. vaddr = pce_dev->coh_vmem;
  3841. vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
  3842. pce_dev->ce_bam_info.ce_burst_size);
  3843. iovec_vaddr = pce_dev->iovec_vmem;
  3844. iovec_memsize = pce_dev->iovec_memsize;
  3845. for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++) {
  3846. /* Allow for 256 descriptor (cmd and data) entries per pipe */
  3847. pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec =
  3848. (struct sps_iovec *)iovec_vaddr;
  3849. pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec_phys =
  3850. virt_to_phys(
  3851. pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec);
  3852. iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
  3853. iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
  3854. pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec =
  3855. (struct sps_iovec *)iovec_vaddr;
  3856. pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec_phys =
  3857. virt_to_phys(
  3858. pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec);
  3859. iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
  3860. iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
  3861. if (pce_dev->support_cmd_dscr)
  3862. qce_setup_cmdlistptrs(pce_dev, i, &vaddr);
  3863. vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
  3864. pce_dev->ce_bam_info.ce_burst_size);
  3865. pce_dev->ce_request_info[i].ce_sps.result_dump =
  3866. (uintptr_t)vaddr;
  3867. pce_dev->ce_request_info[i].ce_sps.result_dump_phy =
  3868. GET_PHYS_ADDR((uintptr_t)vaddr);
  3869. pce_dev->ce_request_info[i].ce_sps.result =
  3870. (struct ce_result_dump_format *)vaddr;
  3871. vaddr += CRYPTO_RESULT_DUMP_SIZE;
  3872. pce_dev->ce_request_info[i].ce_sps.result_dump_null =
  3873. (uintptr_t)vaddr;
  3874. pce_dev->ce_request_info[i].ce_sps.result_dump_null_phy =
  3875. GET_PHYS_ADDR((uintptr_t)vaddr);
  3876. pce_dev->ce_request_info[i].ce_sps.result_null =
  3877. (struct ce_result_dump_format *)vaddr;
  3878. vaddr += CRYPTO_RESULT_DUMP_SIZE;
  3879. pce_dev->ce_request_info[i].ce_sps.ignore_buffer =
  3880. (uintptr_t)vaddr;
  3881. vaddr += pce_dev->ce_bam_info.ce_burst_size * 2;
  3882. }
  3883. if ((vaddr - pce_dev->coh_vmem) > pce_dev->memsize ||
  3884. iovec_memsize < 0)
  3885. panic("qce50: Not enough coherent memory. Allocate %x , need %lx\n",
  3886. pce_dev->memsize, (uintptr_t)vaddr -
  3887. (uintptr_t)pce_dev->coh_vmem);
  3888. return 0;
  3889. }
  3890. static int qce_init_ce_cfg_val(struct qce_device *pce_dev)
  3891. {
  3892. uint32_t beats = (pce_dev->ce_bam_info.ce_burst_size >> 3) - 1;
  3893. uint32_t pipe_pair = pce_dev->ce_bam_info.pipe_pair_index;
  3894. pce_dev->reg.crypto_cfg_be = (beats << CRYPTO_REQ_SIZE) |
  3895. BIT(CRYPTO_MASK_DOUT_INTR) | BIT(CRYPTO_MASK_DIN_INTR) |
  3896. BIT(CRYPTO_MASK_OP_DONE_INTR) | (0 << CRYPTO_HIGH_SPD_EN_N) |
  3897. (pipe_pair << CRYPTO_PIPE_SET_SELECT);
  3898. pce_dev->reg.crypto_cfg_le =
  3899. (pce_dev->reg.crypto_cfg_be | CRYPTO_LITTLE_ENDIAN_MASK);
  3900. /* Initialize encr_cfg register for AES alg */
  3901. pce_dev->reg.encr_cfg_aes_cbc_128 =
  3902. (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
  3903. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  3904. (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
  3905. pce_dev->reg.encr_cfg_aes_cbc_256 =
  3906. (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
  3907. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  3908. (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
  3909. pce_dev->reg.encr_cfg_aes_ctr_128 =
  3910. (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
  3911. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  3912. (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
  3913. pce_dev->reg.encr_cfg_aes_ctr_256 =
  3914. (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
  3915. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  3916. (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
  3917. pce_dev->reg.encr_cfg_aes_xts_128 =
  3918. (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
  3919. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  3920. (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
  3921. pce_dev->reg.encr_cfg_aes_xts_256 =
  3922. (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
  3923. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  3924. (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
  3925. pce_dev->reg.encr_cfg_aes_ecb_128 =
  3926. (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
  3927. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  3928. (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
  3929. pce_dev->reg.encr_cfg_aes_ecb_256 =
  3930. (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
  3931. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  3932. (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
  3933. pce_dev->reg.encr_cfg_aes_ccm_128 =
  3934. (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
  3935. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  3936. (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE)|
  3937. (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
  3938. pce_dev->reg.encr_cfg_aes_ccm_256 =
  3939. (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
  3940. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  3941. (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
  3942. (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
  3943. /* Initialize encr_cfg register for DES alg */
  3944. pce_dev->reg.encr_cfg_des_ecb =
  3945. (CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
  3946. (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
  3947. (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
  3948. pce_dev->reg.encr_cfg_des_cbc =
  3949. (CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
  3950. (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
  3951. (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
  3952. pce_dev->reg.encr_cfg_3des_ecb =
  3953. (CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
  3954. (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
  3955. (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
  3956. pce_dev->reg.encr_cfg_3des_cbc =
  3957. (CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
  3958. (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
  3959. (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
  3960. /* Initialize encr_cfg register for kasumi/snow3g alg */
  3961. pce_dev->reg.encr_cfg_kasumi =
  3962. (CRYPTO_ENCR_ALG_KASUMI << CRYPTO_ENCR_ALG);
  3963. pce_dev->reg.encr_cfg_snow3g =
  3964. (CRYPTO_ENCR_ALG_SNOW_3G << CRYPTO_ENCR_ALG);
  3965. /* Initialize auth_cfg register for CMAC alg */
  3966. pce_dev->reg.auth_cfg_cmac_128 =
  3967. (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
  3968. (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
  3969. (CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
  3970. (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
  3971. (CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE);
  3972. pce_dev->reg.auth_cfg_cmac_256 =
  3973. (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
  3974. (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
  3975. (CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
  3976. (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
  3977. (CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE);
  3978. /* Initialize auth_cfg register for HMAC alg */
  3979. pce_dev->reg.auth_cfg_hmac_sha1 =
  3980. (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
  3981. (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
  3982. (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
  3983. (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  3984. pce_dev->reg.auth_cfg_hmac_sha256 =
  3985. (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
  3986. (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
  3987. (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
  3988. (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  3989. /* Initialize auth_cfg register for SHA1/256 alg */
  3990. pce_dev->reg.auth_cfg_sha1 =
  3991. (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
  3992. (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
  3993. (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
  3994. (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  3995. pce_dev->reg.auth_cfg_sha256 =
  3996. (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
  3997. (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
  3998. (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
  3999. (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  4000. /* Initialize auth_cfg register for AEAD alg */
  4001. pce_dev->reg.auth_cfg_aead_sha1_hmac =
  4002. (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
  4003. (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
  4004. (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
  4005. (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
  4006. pce_dev->reg.auth_cfg_aead_sha256_hmac =
  4007. (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
  4008. (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
  4009. (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
  4010. (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
  4011. pce_dev->reg.auth_cfg_aes_ccm_128 =
  4012. (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
  4013. (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
  4014. (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
  4015. (CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE) |
  4016. ((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
  4017. pce_dev->reg.auth_cfg_aes_ccm_128 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
  4018. pce_dev->reg.auth_cfg_aes_ccm_256 =
  4019. (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
  4020. (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
  4021. (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
  4022. (CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE) |
  4023. ((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
  4024. pce_dev->reg.auth_cfg_aes_ccm_256 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
  4025. /* Initialize auth_cfg register for kasumi/snow3g */
  4026. pce_dev->reg.auth_cfg_kasumi =
  4027. (CRYPTO_AUTH_ALG_KASUMI << CRYPTO_AUTH_ALG) |
  4028. BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
  4029. pce_dev->reg.auth_cfg_snow3g =
  4030. (CRYPTO_AUTH_ALG_SNOW3G << CRYPTO_AUTH_ALG) |
  4031. BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
  4032. return 0;
  4033. }
  4034. static void _qce_ccm_get_around_input(struct qce_device *pce_dev,
  4035. struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
  4036. {
  4037. struct qce_cmdlist_info *cmdlistinfo;
  4038. struct ce_sps_data *pce_sps_data;
  4039. pce_sps_data = &preq_info->ce_sps;
  4040. if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
  4041. !(pce_dev->no_ccm_mac_status_get_around)) {
  4042. cmdlistinfo = &pce_sps_data->cmdlistptr.cipher_null;
  4043. _qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
  4044. &pce_sps_data->in_transfer);
  4045. _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
  4046. pce_dev->ce_bam_info.ce_burst_size,
  4047. &pce_sps_data->in_transfer);
  4048. _qce_set_flag(&pce_sps_data->in_transfer,
  4049. SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD);
  4050. }
  4051. }
  4052. static void _qce_ccm_get_around_output(struct qce_device *pce_dev,
  4053. struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
  4054. {
  4055. struct ce_sps_data *pce_sps_data;
  4056. pce_sps_data = &preq_info->ce_sps;
  4057. if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
  4058. !(pce_dev->no_ccm_mac_status_get_around)) {
  4059. _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
  4060. pce_dev->ce_bam_info.ce_burst_size,
  4061. &pce_sps_data->out_transfer);
  4062. _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump_null),
  4063. CRYPTO_RESULT_DUMP_SIZE, &pce_sps_data->out_transfer);
  4064. }
  4065. }
  4066. /* QCE_DUMMY_REQ */
  4067. static void qce_dummy_complete(void *cookie, unsigned char *digest,
  4068. unsigned char *authdata, int ret)
  4069. {
  4070. if (!cookie)
  4071. pr_err("invalid cookie\n");
  4072. }
  4073. static int qce_dummy_req(struct qce_device *pce_dev)
  4074. {
  4075. int ret = 0;
  4076. if (atomic_xchg(
  4077. &pce_dev->ce_request_info[DUMMY_REQ_INDEX].in_use, true))
  4078. return -EBUSY;
  4079. ret = qce_process_sha_req(pce_dev, NULL);
  4080. pce_dev->qce_stats.no_of_dummy_reqs++;
  4081. return ret;
  4082. }
  4083. static int select_mode(struct qce_device *pce_dev,
  4084. struct ce_request_info *preq_info)
  4085. {
  4086. struct ce_sps_data *pce_sps_data = &preq_info->ce_sps;
  4087. unsigned int no_of_queued_req;
  4088. unsigned int cadence;
  4089. if (!pce_dev->no_get_around) {
  4090. _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
  4091. return 0;
  4092. }
  4093. /*
  4094. * claim ownership of device
  4095. */
  4096. again:
  4097. if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_CLIENT)
  4098. != QCE_OWNER_NONE) {
  4099. ndelay(40);
  4100. goto again;
  4101. }
  4102. no_of_queued_req = atomic_inc_return(&pce_dev->no_of_queued_req);
  4103. if (pce_dev->mode == IN_INTERRUPT_MODE) {
  4104. if (no_of_queued_req >= MAX_BUNCH_MODE_REQ) {
  4105. pce_dev->mode = IN_BUNCH_MODE;
  4106. pr_debug("pcedev %d mode switch to BUNCH\n",
  4107. pce_dev->dev_no);
  4108. _qce_set_flag(&pce_sps_data->out_transfer,
  4109. SPS_IOVEC_FLAG_INT);
  4110. pce_dev->intr_cadence = 0;
  4111. atomic_set(&pce_dev->bunch_cmd_seq, 1);
  4112. atomic_set(&pce_dev->last_intr_seq, 1);
  4113. mod_timer(&(pce_dev->timer),
  4114. (jiffies + DELAY_IN_JIFFIES));
  4115. } else {
  4116. _qce_set_flag(&pce_sps_data->out_transfer,
  4117. SPS_IOVEC_FLAG_INT);
  4118. }
  4119. } else {
  4120. pce_dev->intr_cadence++;
  4121. cadence = (preq_info->req_len >> 7) + 1;
  4122. if (cadence > SET_INTR_AT_REQ)
  4123. cadence = SET_INTR_AT_REQ;
  4124. if (pce_dev->intr_cadence < cadence || ((pce_dev->intr_cadence
  4125. == cadence) && pce_dev->cadence_flag))
  4126. atomic_inc(&pce_dev->bunch_cmd_seq);
  4127. else {
  4128. _qce_set_flag(&pce_sps_data->out_transfer,
  4129. SPS_IOVEC_FLAG_INT);
  4130. pce_dev->intr_cadence = 0;
  4131. atomic_set(&pce_dev->bunch_cmd_seq, 0);
  4132. atomic_set(&pce_dev->last_intr_seq, 0);
  4133. pce_dev->cadence_flag = !pce_dev->cadence_flag;
  4134. }
  4135. }
  4136. return 0;
  4137. }
  4138. static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
  4139. {
  4140. int rc = 0;
  4141. struct qce_device *pce_dev = (struct qce_device *) handle;
  4142. struct aead_request *areq = (struct aead_request *) q_req->areq;
  4143. uint32_t authsize = q_req->authsize;
  4144. uint32_t totallen_in, out_len;
  4145. uint32_t hw_pad_out = 0;
  4146. int ce_burst_size;
  4147. struct qce_cmdlist_info *cmdlistinfo = NULL;
  4148. int req_info = -1;
  4149. struct ce_request_info *preq_info;
  4150. struct ce_sps_data *pce_sps_data;
  4151. req_info = qce_alloc_req_info(pce_dev);
  4152. if (req_info < 0)
  4153. return -EBUSY;
  4154. preq_info = &pce_dev->ce_request_info[req_info];
  4155. pce_sps_data = &preq_info->ce_sps;
  4156. ce_burst_size = pce_dev->ce_bam_info.ce_burst_size;
  4157. totallen_in = areq->cryptlen + q_req->assoclen;
  4158. if (q_req->dir == QCE_ENCRYPT) {
  4159. q_req->cryptlen = areq->cryptlen;
  4160. out_len = areq->cryptlen + authsize;
  4161. hw_pad_out = ALIGN(authsize, ce_burst_size) - authsize;
  4162. } else {
  4163. q_req->cryptlen = areq->cryptlen - authsize;
  4164. out_len = q_req->cryptlen;
  4165. hw_pad_out = authsize;
  4166. }
  4167. /*
  4168. * For crypto 5.0 that has burst size alignment requirement
  4169. * for data descritpor,
  4170. * the agent above(qcrypto) prepares the src scatter list with
  4171. * memory starting with associated data, followed by
  4172. * data stream to be ciphered.
  4173. * The destination scatter list is pointing to the same
  4174. * data area as source.
  4175. */
  4176. if (pce_dev->ce_bam_info.minor_version == 0)
  4177. preq_info->src_nents = count_sg(areq->src, totallen_in);
  4178. else
  4179. preq_info->src_nents = count_sg(areq->src, areq->cryptlen +
  4180. areq->assoclen);
  4181. if (q_req->assoclen) {
  4182. preq_info->assoc_nents = count_sg(q_req->asg, q_req->assoclen);
  4183. /* formatted associated data input */
  4184. qce_dma_map_sg(pce_dev->pdev, q_req->asg,
  4185. preq_info->assoc_nents, DMA_TO_DEVICE);
  4186. preq_info->asg = q_req->asg;
  4187. } else {
  4188. preq_info->assoc_nents = 0;
  4189. preq_info->asg = NULL;
  4190. }
  4191. /* cipher input */
  4192. qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
  4193. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  4194. DMA_TO_DEVICE);
  4195. /* cipher + mac output for encryption */
  4196. if (areq->src != areq->dst) {
  4197. /*
  4198. * The destination scatter list is pointing to the same
  4199. * data area as src.
  4200. * Note, the associated data will be pass-through
  4201. * at the beginning of destination area.
  4202. */
  4203. preq_info->dst_nents = count_sg(areq->dst,
  4204. out_len + areq->assoclen);
  4205. qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
  4206. DMA_FROM_DEVICE);
  4207. } else {
  4208. preq_info->dst_nents = preq_info->src_nents;
  4209. }
  4210. if (pce_dev->support_cmd_dscr) {
  4211. cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev, req_info,
  4212. q_req);
  4213. if (cmdlistinfo == NULL) {
  4214. pr_err("Unsupported cipher algorithm %d, mode %d\n",
  4215. q_req->alg, q_req->mode);
  4216. qce_free_req_info(pce_dev, req_info, false);
  4217. return -EINVAL;
  4218. }
  4219. /* set up crypto device */
  4220. rc = _ce_setup_cipher(pce_dev, q_req, totallen_in,
  4221. q_req->assoclen, cmdlistinfo);
  4222. } else {
  4223. /* set up crypto device */
  4224. rc = _ce_setup_cipher_direct(pce_dev, q_req, totallen_in,
  4225. q_req->assoclen);
  4226. }
  4227. if (rc < 0)
  4228. goto bad;
  4229. preq_info->mode = q_req->mode;
  4230. /* setup for callback, and issue command to bam */
  4231. preq_info->areq = q_req->areq;
  4232. preq_info->qce_cb = q_req->qce_cb;
  4233. preq_info->dir = q_req->dir;
  4234. /* setup xfer type for producer callback handling */
  4235. preq_info->xfer_type = QCE_XFER_AEAD;
  4236. preq_info->req_len = totallen_in;
  4237. _qce_sps_iovec_count_init(pce_dev, req_info);
  4238. if (pce_dev->support_cmd_dscr && cmdlistinfo)
  4239. _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
  4240. &pce_sps_data->in_transfer);
  4241. if (pce_dev->ce_bam_info.minor_version == 0) {
  4242. goto bad;
  4243. } else {
  4244. if (q_req->assoclen && (_qce_sps_add_sg_data(
  4245. pce_dev, q_req->asg, q_req->assoclen,
  4246. &pce_sps_data->in_transfer)))
  4247. goto bad;
  4248. if (_qce_sps_add_sg_data_off(pce_dev, areq->src, areq->cryptlen,
  4249. areq->assoclen,
  4250. &pce_sps_data->in_transfer))
  4251. goto bad;
  4252. _qce_set_flag(&pce_sps_data->in_transfer,
  4253. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  4254. _qce_ccm_get_around_input(pce_dev, preq_info, q_req->dir);
  4255. if (pce_dev->no_get_around)
  4256. _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  4257. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  4258. &pce_sps_data->in_transfer);
  4259. /* Pass through to ignore associated data*/
  4260. if (_qce_sps_add_data(
  4261. GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
  4262. q_req->assoclen,
  4263. &pce_sps_data->out_transfer))
  4264. goto bad;
  4265. if (_qce_sps_add_sg_data_off(pce_dev, areq->dst, out_len,
  4266. areq->assoclen,
  4267. &pce_sps_data->out_transfer))
  4268. goto bad;
  4269. /* Pass through to ignore hw_pad (padding of the MAC data) */
  4270. if (_qce_sps_add_data(
  4271. GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
  4272. hw_pad_out, &pce_sps_data->out_transfer))
  4273. goto bad;
  4274. if (pce_dev->no_get_around ||
  4275. totallen_in <= SPS_MAX_PKT_SIZE) {
  4276. if (_qce_sps_add_data(
  4277. GET_PHYS_ADDR(pce_sps_data->result_dump),
  4278. CRYPTO_RESULT_DUMP_SIZE,
  4279. &pce_sps_data->out_transfer))
  4280. goto bad;
  4281. pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
  4282. } else {
  4283. pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
  4284. }
  4285. _qce_ccm_get_around_output(pce_dev, preq_info, q_req->dir);
  4286. select_mode(pce_dev, preq_info);
  4287. rc = _qce_sps_transfer(pce_dev, req_info);
  4288. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  4289. }
  4290. if (rc)
  4291. goto bad;
  4292. return 0;
  4293. bad:
  4294. if (preq_info->assoc_nents) {
  4295. qce_dma_unmap_sg(pce_dev->pdev, q_req->asg,
  4296. preq_info->assoc_nents, DMA_TO_DEVICE);
  4297. }
  4298. if (preq_info->src_nents) {
  4299. qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
  4300. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  4301. DMA_TO_DEVICE);
  4302. }
  4303. if (areq->src != areq->dst) {
  4304. qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
  4305. DMA_FROM_DEVICE);
  4306. }
  4307. qce_free_req_info(pce_dev, req_info, false);
  4308. return rc;
  4309. }
  4310. static int _qce_suspend(void *handle)
  4311. {
  4312. struct qce_device *pce_dev = (struct qce_device *)handle;
  4313. struct sps_pipe *sps_pipe_info;
  4314. if (handle == NULL)
  4315. return -ENODEV;
  4316. sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
  4317. sps_disconnect(sps_pipe_info);
  4318. sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
  4319. sps_disconnect(sps_pipe_info);
  4320. return 0;
  4321. }
  4322. static int _qce_resume(void *handle)
  4323. {
  4324. struct qce_device *pce_dev = (struct qce_device *)handle;
  4325. struct sps_pipe *sps_pipe_info;
  4326. struct sps_connect *sps_connect_info;
  4327. int rc;
  4328. if (handle == NULL)
  4329. return -ENODEV;
  4330. sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
  4331. sps_connect_info = &pce_dev->ce_bam_info.consumer.connect;
  4332. memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
  4333. rc = sps_connect(sps_pipe_info, sps_connect_info);
  4334. if (rc) {
  4335. pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
  4336. (uintptr_t)sps_pipe_info, rc);
  4337. return rc;
  4338. }
  4339. sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
  4340. sps_connect_info = &pce_dev->ce_bam_info.producer.connect;
  4341. memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
  4342. rc = sps_connect(sps_pipe_info, sps_connect_info);
  4343. if (rc)
  4344. pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
  4345. (uintptr_t)sps_pipe_info, rc);
  4346. rc = sps_register_event(sps_pipe_info,
  4347. &pce_dev->ce_bam_info.producer.event);
  4348. if (rc)
  4349. pr_err("Producer callback registration failed rc = %d\n", rc);
  4350. return rc;
  4351. }
  4352. struct qce_pm_table qce_pm_table = {_qce_suspend, _qce_resume};
  4353. EXPORT_SYMBOL(qce_pm_table);
  4354. int qce_aead_req(void *handle, struct qce_req *q_req)
  4355. {
  4356. struct qce_device *pce_dev = (struct qce_device *)handle;
  4357. struct aead_request *areq;
  4358. uint32_t authsize;
  4359. struct crypto_aead *aead;
  4360. uint32_t ivsize;
  4361. uint32_t totallen;
  4362. int rc = 0;
  4363. struct qce_cmdlist_info *cmdlistinfo = NULL;
  4364. int req_info = -1;
  4365. struct ce_sps_data *pce_sps_data;
  4366. struct ce_request_info *preq_info;
  4367. if (q_req->mode == QCE_MODE_CCM)
  4368. return _qce_aead_ccm_req(handle, q_req);
  4369. req_info = qce_alloc_req_info(pce_dev);
  4370. if (req_info < 0)
  4371. return -EBUSY;
  4372. preq_info = &pce_dev->ce_request_info[req_info];
  4373. pce_sps_data = &preq_info->ce_sps;
  4374. areq = (struct aead_request *) q_req->areq;
  4375. aead = crypto_aead_reqtfm(areq);
  4376. ivsize = crypto_aead_ivsize(aead);
  4377. q_req->ivsize = ivsize;
  4378. authsize = q_req->authsize;
  4379. if (q_req->dir == QCE_ENCRYPT)
  4380. q_req->cryptlen = areq->cryptlen;
  4381. else
  4382. q_req->cryptlen = areq->cryptlen - authsize;
  4383. if (q_req->cryptlen > UINT_MAX - areq->assoclen) {
  4384. pr_err("Integer overflow on total aead req length.\n");
  4385. return -EINVAL;
  4386. }
  4387. totallen = q_req->cryptlen + areq->assoclen;
  4388. if (pce_dev->support_cmd_dscr) {
  4389. cmdlistinfo = _ce_get_aead_cmdlistinfo(pce_dev,
  4390. req_info, q_req);
  4391. if (cmdlistinfo == NULL) {
  4392. pr_err("Unsupported aead ciphering algorithm %d, mode %d, ciphering key length %d, auth digest size %d\n",
  4393. q_req->alg, q_req->mode, q_req->encklen,
  4394. q_req->authsize);
  4395. qce_free_req_info(pce_dev, req_info, false);
  4396. return -EINVAL;
  4397. }
  4398. /* set up crypto device */
  4399. rc = _ce_setup_aead(pce_dev, q_req, totallen,
  4400. areq->assoclen, cmdlistinfo);
  4401. if (rc < 0) {
  4402. qce_free_req_info(pce_dev, req_info, false);
  4403. return -EINVAL;
  4404. }
  4405. }
  4406. /*
  4407. * For crypto 5.0 that has burst size alignment requirement
  4408. * for data descritpor,
  4409. * the agent above(qcrypto) prepares the src scatter list with
  4410. * memory starting with associated data, followed by
  4411. * iv, and data stream to be ciphered.
  4412. */
  4413. preq_info->src_nents = count_sg(areq->src, totallen);
  4414. /* cipher input */
  4415. qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
  4416. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  4417. DMA_TO_DEVICE);
  4418. /* cipher output for encryption */
  4419. if (areq->src != areq->dst) {
  4420. preq_info->dst_nents = count_sg(areq->dst, totallen);
  4421. qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
  4422. DMA_FROM_DEVICE);
  4423. }
  4424. /* setup for callback, and issue command to bam */
  4425. preq_info->areq = q_req->areq;
  4426. preq_info->qce_cb = q_req->qce_cb;
  4427. preq_info->dir = q_req->dir;
  4428. preq_info->asg = NULL;
  4429. /* setup xfer type for producer callback handling */
  4430. preq_info->xfer_type = QCE_XFER_AEAD;
  4431. preq_info->req_len = totallen;
  4432. _qce_sps_iovec_count_init(pce_dev, req_info);
  4433. if (pce_dev->support_cmd_dscr) {
  4434. _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
  4435. &pce_sps_data->in_transfer);
  4436. } else {
  4437. rc = _ce_setup_aead_direct(pce_dev, q_req, totallen,
  4438. areq->assoclen);
  4439. if (rc)
  4440. goto bad;
  4441. }
  4442. preq_info->mode = q_req->mode;
  4443. if (pce_dev->ce_bam_info.minor_version == 0) {
  4444. if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen,
  4445. &pce_sps_data->in_transfer))
  4446. goto bad;
  4447. _qce_set_flag(&pce_sps_data->in_transfer,
  4448. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  4449. if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
  4450. &pce_sps_data->out_transfer))
  4451. goto bad;
  4452. if (totallen > SPS_MAX_PKT_SIZE) {
  4453. _qce_set_flag(&pce_sps_data->out_transfer,
  4454. SPS_IOVEC_FLAG_INT);
  4455. pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
  4456. } else {
  4457. if (_qce_sps_add_data(GET_PHYS_ADDR(
  4458. pce_sps_data->result_dump),
  4459. CRYPTO_RESULT_DUMP_SIZE,
  4460. &pce_sps_data->out_transfer))
  4461. goto bad;
  4462. _qce_set_flag(&pce_sps_data->out_transfer,
  4463. SPS_IOVEC_FLAG_INT);
  4464. pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
  4465. }
  4466. rc = _qce_sps_transfer(pce_dev, req_info);
  4467. } else {
  4468. if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen,
  4469. &pce_sps_data->in_transfer))
  4470. goto bad;
  4471. _qce_set_flag(&pce_sps_data->in_transfer,
  4472. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  4473. if (pce_dev->no_get_around)
  4474. _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  4475. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  4476. &pce_sps_data->in_transfer);
  4477. if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
  4478. &pce_sps_data->out_transfer))
  4479. goto bad;
  4480. if (pce_dev->no_get_around || totallen <= SPS_MAX_PKT_SIZE) {
  4481. if (_qce_sps_add_data(
  4482. GET_PHYS_ADDR(pce_sps_data->result_dump),
  4483. CRYPTO_RESULT_DUMP_SIZE,
  4484. &pce_sps_data->out_transfer))
  4485. goto bad;
  4486. pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
  4487. } else {
  4488. pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
  4489. }
  4490. select_mode(pce_dev, preq_info);
  4491. rc = _qce_sps_transfer(pce_dev, req_info);
  4492. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  4493. }
  4494. if (rc)
  4495. goto bad;
  4496. return 0;
  4497. bad:
  4498. if (preq_info->src_nents)
  4499. qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
  4500. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  4501. DMA_TO_DEVICE);
  4502. if (areq->src != areq->dst)
  4503. qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
  4504. DMA_FROM_DEVICE);
  4505. qce_free_req_info(pce_dev, req_info, false);
  4506. return rc;
  4507. }
  4508. EXPORT_SYMBOL(qce_aead_req);
  4509. int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
  4510. {
  4511. int rc = 0;
  4512. struct qce_device *pce_dev = (struct qce_device *) handle;
  4513. struct skcipher_request *areq = (struct skcipher_request *)
  4514. c_req->areq;
  4515. struct qce_cmdlist_info *cmdlistinfo = NULL;
  4516. int req_info = -1;
  4517. struct ce_sps_data *pce_sps_data;
  4518. struct ce_request_info *preq_info;
  4519. req_info = qce_alloc_req_info(pce_dev);
  4520. if (req_info < 0)
  4521. return -EBUSY;
  4522. preq_info = &pce_dev->ce_request_info[req_info];
  4523. pce_sps_data = &preq_info->ce_sps;
  4524. preq_info->src_nents = 0;
  4525. preq_info->dst_nents = 0;
  4526. /* cipher input */
  4527. preq_info->src_nents = count_sg(areq->src, areq->cryptlen);
  4528. qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
  4529. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  4530. DMA_TO_DEVICE);
  4531. /* cipher output */
  4532. if (areq->src != areq->dst) {
  4533. preq_info->dst_nents = count_sg(areq->dst, areq->cryptlen);
  4534. qce_dma_map_sg(pce_dev->pdev, areq->dst,
  4535. preq_info->dst_nents, DMA_FROM_DEVICE);
  4536. } else {
  4537. preq_info->dst_nents = preq_info->src_nents;
  4538. }
  4539. preq_info->dir = c_req->dir;
  4540. if ((pce_dev->ce_bam_info.minor_version == 0) &&
  4541. (preq_info->dir == QCE_DECRYPT) &&
  4542. (c_req->mode == QCE_MODE_CBC)) {
  4543. memcpy(preq_info->dec_iv, (unsigned char *)
  4544. sg_virt(areq->src) + areq->src->length - 16,
  4545. NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE);
  4546. }
  4547. /* set up crypto device */
  4548. if (pce_dev->support_cmd_dscr) {
  4549. cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev,
  4550. req_info, c_req);
  4551. if (cmdlistinfo == NULL) {
  4552. pr_err("Unsupported cipher algorithm %d, mode %d\n",
  4553. c_req->alg, c_req->mode);
  4554. qce_free_req_info(pce_dev, req_info, false);
  4555. return -EINVAL;
  4556. }
  4557. rc = _ce_setup_cipher(pce_dev, c_req, areq->cryptlen, 0,
  4558. cmdlistinfo);
  4559. } else {
  4560. rc = _ce_setup_cipher_direct(pce_dev, c_req, areq->cryptlen, 0);
  4561. }
  4562. if (rc < 0)
  4563. goto bad;
  4564. preq_info->mode = c_req->mode;
  4565. /* setup for client callback, and issue command to BAM */
  4566. preq_info->areq = areq;
  4567. preq_info->qce_cb = c_req->qce_cb;
  4568. /* setup xfer type for producer callback handling */
  4569. preq_info->xfer_type = QCE_XFER_CIPHERING;
  4570. preq_info->req_len = areq->cryptlen;
  4571. _qce_sps_iovec_count_init(pce_dev, req_info);
  4572. if (pce_dev->support_cmd_dscr && cmdlistinfo)
  4573. _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
  4574. &pce_sps_data->in_transfer);
  4575. if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->cryptlen,
  4576. &pce_sps_data->in_transfer))
  4577. goto bad;
  4578. _qce_set_flag(&pce_sps_data->in_transfer,
  4579. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  4580. if (pce_dev->no_get_around)
  4581. _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  4582. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  4583. &pce_sps_data->in_transfer);
  4584. if (_qce_sps_add_sg_data(pce_dev, areq->dst, areq->cryptlen,
  4585. &pce_sps_data->out_transfer))
  4586. goto bad;
  4587. if (pce_dev->no_get_around || areq->cryptlen <= SPS_MAX_PKT_SIZE) {
  4588. pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
  4589. if (_qce_sps_add_data(
  4590. GET_PHYS_ADDR(pce_sps_data->result_dump),
  4591. CRYPTO_RESULT_DUMP_SIZE,
  4592. &pce_sps_data->out_transfer))
  4593. goto bad;
  4594. } else {
  4595. pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
  4596. }
  4597. select_mode(pce_dev, preq_info);
  4598. rc = _qce_sps_transfer(pce_dev, req_info);
  4599. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  4600. if (rc)
  4601. goto bad;
  4602. return 0;
  4603. bad:
  4604. if (areq->src != areq->dst) {
  4605. if (preq_info->dst_nents) {
  4606. qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
  4607. preq_info->dst_nents, DMA_FROM_DEVICE);
  4608. }
  4609. }
  4610. if (preq_info->src_nents) {
  4611. qce_dma_unmap_sg(pce_dev->pdev, areq->src,
  4612. preq_info->src_nents,
  4613. (areq->src == areq->dst) ?
  4614. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  4615. }
  4616. qce_free_req_info(pce_dev, req_info, false);
  4617. return rc;
  4618. }
  4619. EXPORT_SYMBOL(qce_ablk_cipher_req);
  4620. int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
  4621. {
  4622. struct qce_device *pce_dev = (struct qce_device *) handle;
  4623. int rc;
  4624. struct ahash_request *areq;
  4625. struct qce_cmdlist_info *cmdlistinfo = NULL;
  4626. int req_info = -1;
  4627. struct ce_sps_data *pce_sps_data;
  4628. struct ce_request_info *preq_info;
  4629. bool is_dummy = false;
  4630. if (!sreq) {
  4631. sreq = &(pce_dev->dummyreq.sreq);
  4632. req_info = DUMMY_REQ_INDEX;
  4633. is_dummy = true;
  4634. } else {
  4635. req_info = qce_alloc_req_info(pce_dev);
  4636. if (req_info < 0)
  4637. return -EBUSY;
  4638. }
  4639. areq = (struct ahash_request *)sreq->areq;
  4640. preq_info = &pce_dev->ce_request_info[req_info];
  4641. pce_sps_data = &preq_info->ce_sps;
  4642. preq_info->src_nents = count_sg(sreq->src, sreq->size);
  4643. qce_dma_map_sg(pce_dev->pdev, sreq->src, preq_info->src_nents,
  4644. DMA_TO_DEVICE);
  4645. if (pce_dev->support_cmd_dscr) {
  4646. cmdlistinfo = _ce_get_hash_cmdlistinfo(pce_dev, req_info, sreq);
  4647. if (cmdlistinfo == NULL) {
  4648. pr_err("Unsupported hash algorithm %d\n", sreq->alg);
  4649. qce_free_req_info(pce_dev, req_info, false);
  4650. return -EINVAL;
  4651. }
  4652. rc = _ce_setup_hash(pce_dev, sreq, cmdlistinfo);
  4653. } else {
  4654. rc = _ce_setup_hash_direct(pce_dev, sreq);
  4655. }
  4656. if (rc < 0)
  4657. goto bad;
  4658. preq_info->areq = areq;
  4659. preq_info->qce_cb = sreq->qce_cb;
  4660. /* setup xfer type for producer callback handling */
  4661. preq_info->xfer_type = QCE_XFER_HASHING;
  4662. preq_info->req_len = sreq->size;
  4663. _qce_sps_iovec_count_init(pce_dev, req_info);
  4664. if (pce_dev->support_cmd_dscr && cmdlistinfo)
  4665. _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
  4666. &pce_sps_data->in_transfer);
  4667. if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
  4668. &pce_sps_data->in_transfer))
  4669. goto bad;
  4670. /* always ensure there is input data. ZLT does not work for bam-ndp */
  4671. if (!areq->nbytes)
  4672. _qce_sps_add_data(
  4673. GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
  4674. pce_dev->ce_bam_info.ce_burst_size,
  4675. &pce_sps_data->in_transfer);
  4676. _qce_set_flag(&pce_sps_data->in_transfer,
  4677. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  4678. if (pce_dev->no_get_around)
  4679. _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  4680. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  4681. &pce_sps_data->in_transfer);
  4682. if (_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
  4683. CRYPTO_RESULT_DUMP_SIZE,
  4684. &pce_sps_data->out_transfer))
  4685. goto bad;
  4686. if (is_dummy) {
  4687. _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
  4688. rc = _qce_sps_transfer(pce_dev, req_info);
  4689. } else {
  4690. select_mode(pce_dev, preq_info);
  4691. rc = _qce_sps_transfer(pce_dev, req_info);
  4692. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  4693. }
  4694. if (rc)
  4695. goto bad;
  4696. return 0;
  4697. bad:
  4698. if (preq_info->src_nents) {
  4699. qce_dma_unmap_sg(pce_dev->pdev, sreq->src,
  4700. preq_info->src_nents, DMA_TO_DEVICE);
  4701. }
  4702. qce_free_req_info(pce_dev, req_info, false);
  4703. return rc;
  4704. }
  4705. EXPORT_SYMBOL(qce_process_sha_req);
  4706. int qce_f8_req(void *handle, struct qce_f8_req *req,
  4707. void *cookie, qce_comp_func_ptr_t qce_cb)
  4708. {
  4709. struct qce_device *pce_dev = (struct qce_device *) handle;
  4710. bool key_stream_mode;
  4711. dma_addr_t dst;
  4712. int rc;
  4713. struct qce_cmdlist_info *cmdlistinfo;
  4714. int req_info = -1;
  4715. struct ce_request_info *preq_info;
  4716. struct ce_sps_data *pce_sps_data;
  4717. req_info = qce_alloc_req_info(pce_dev);
  4718. if (req_info < 0)
  4719. return -EBUSY;
  4720. preq_info = &pce_dev->ce_request_info[req_info];
  4721. pce_sps_data = &preq_info->ce_sps;
  4722. switch (req->algorithm) {
  4723. case QCE_OTA_ALGO_KASUMI:
  4724. cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
  4725. break;
  4726. case QCE_OTA_ALGO_SNOW3G:
  4727. cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
  4728. break;
  4729. default:
  4730. qce_free_req_info(pce_dev, req_info, false);
  4731. return -EINVAL;
  4732. }
  4733. key_stream_mode = (req->data_in == NULL);
  4734. /* don't support key stream mode */
  4735. if (key_stream_mode || (req->bearer >= QCE_OTA_MAX_BEARER)) {
  4736. qce_free_req_info(pce_dev, req_info, false);
  4737. return -EINVAL;
  4738. }
  4739. /* F8 cipher input */
  4740. preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
  4741. req->data_in, req->data_len,
  4742. (req->data_in == req->data_out) ?
  4743. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  4744. /* F8 cipher output */
  4745. if (req->data_in != req->data_out) {
  4746. dst = dma_map_single(pce_dev->pdev, req->data_out,
  4747. req->data_len, DMA_FROM_DEVICE);
  4748. preq_info->phy_ota_dst = dst;
  4749. } else {
  4750. /* in place ciphering */
  4751. dst = preq_info->phy_ota_src;
  4752. preq_info->phy_ota_dst = 0;
  4753. }
  4754. preq_info->ota_size = req->data_len;
  4755. /* set up crypto device */
  4756. if (pce_dev->support_cmd_dscr)
  4757. rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0,
  4758. req->data_len, cmdlistinfo);
  4759. else
  4760. rc = _ce_f8_setup_direct(pce_dev, req, key_stream_mode, 1, 0,
  4761. req->data_len);
  4762. if (rc < 0)
  4763. goto bad;
  4764. /* setup for callback, and issue command to sps */
  4765. preq_info->areq = cookie;
  4766. preq_info->qce_cb = qce_cb;
  4767. /* setup xfer type for producer callback handling */
  4768. preq_info->xfer_type = QCE_XFER_F8;
  4769. preq_info->req_len = req->data_len;
  4770. _qce_sps_iovec_count_init(pce_dev, req_info);
  4771. if (pce_dev->support_cmd_dscr)
  4772. _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
  4773. &pce_sps_data->in_transfer);
  4774. _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->data_len,
  4775. &pce_sps_data->in_transfer);
  4776. _qce_set_flag(&pce_sps_data->in_transfer,
  4777. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  4778. _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  4779. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  4780. &pce_sps_data->in_transfer);
  4781. _qce_sps_add_data((uint32_t)dst, req->data_len,
  4782. &pce_sps_data->out_transfer);
  4783. _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
  4784. CRYPTO_RESULT_DUMP_SIZE,
  4785. &pce_sps_data->out_transfer);
  4786. select_mode(pce_dev, preq_info);
  4787. rc = _qce_sps_transfer(pce_dev, req_info);
  4788. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  4789. if (rc)
  4790. goto bad;
  4791. return 0;
  4792. bad:
  4793. if (preq_info->phy_ota_dst != 0)
  4794. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
  4795. req->data_len, DMA_FROM_DEVICE);
  4796. if (preq_info->phy_ota_src != 0)
  4797. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
  4798. req->data_len,
  4799. (req->data_in == req->data_out) ?
  4800. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  4801. qce_free_req_info(pce_dev, req_info, false);
  4802. return rc;
  4803. }
  4804. EXPORT_SYMBOL(qce_f8_req);
  4805. int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
  4806. void *cookie, qce_comp_func_ptr_t qce_cb)
  4807. {
  4808. struct qce_device *pce_dev = (struct qce_device *) handle;
  4809. uint16_t num_pkt = mreq->num_pkt;
  4810. uint16_t cipher_start = mreq->cipher_start;
  4811. uint16_t cipher_size = mreq->cipher_size;
  4812. struct qce_f8_req *req = &mreq->qce_f8_req;
  4813. uint32_t total;
  4814. dma_addr_t dst = 0;
  4815. int rc = 0;
  4816. struct qce_cmdlist_info *cmdlistinfo;
  4817. int req_info = -1;
  4818. struct ce_request_info *preq_info;
  4819. struct ce_sps_data *pce_sps_data;
  4820. req_info = qce_alloc_req_info(pce_dev);
  4821. if (req_info < 0)
  4822. return -EBUSY;
  4823. preq_info = &pce_dev->ce_request_info[req_info];
  4824. pce_sps_data = &preq_info->ce_sps;
  4825. switch (req->algorithm) {
  4826. case QCE_OTA_ALGO_KASUMI:
  4827. cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
  4828. break;
  4829. case QCE_OTA_ALGO_SNOW3G:
  4830. cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
  4831. break;
  4832. default:
  4833. qce_free_req_info(pce_dev, req_info, false);
  4834. return -EINVAL;
  4835. }
  4836. total = num_pkt * req->data_len;
  4837. /* F8 cipher input */
  4838. preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
  4839. req->data_in, total,
  4840. (req->data_in == req->data_out) ?
  4841. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  4842. /* F8 cipher output */
  4843. if (req->data_in != req->data_out) {
  4844. dst = dma_map_single(pce_dev->pdev, req->data_out, total,
  4845. DMA_FROM_DEVICE);
  4846. preq_info->phy_ota_dst = dst;
  4847. } else {
  4848. /* in place ciphering */
  4849. dst = preq_info->phy_ota_src;
  4850. preq_info->phy_ota_dst = 0;
  4851. }
  4852. preq_info->ota_size = total;
  4853. /* set up crypto device */
  4854. if (pce_dev->support_cmd_dscr)
  4855. rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
  4856. cipher_size, cmdlistinfo);
  4857. else
  4858. rc = _ce_f8_setup_direct(pce_dev, req, false, num_pkt,
  4859. cipher_start, cipher_size);
  4860. if (rc)
  4861. goto bad;
  4862. /* setup for callback, and issue command to sps */
  4863. preq_info->areq = cookie;
  4864. preq_info->qce_cb = qce_cb;
  4865. /* setup xfer type for producer callback handling */
  4866. preq_info->xfer_type = QCE_XFER_F8;
  4867. preq_info->req_len = total;
  4868. _qce_sps_iovec_count_init(pce_dev, req_info);
  4869. if (pce_dev->support_cmd_dscr)
  4870. _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
  4871. &pce_sps_data->in_transfer);
  4872. _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, total,
  4873. &pce_sps_data->in_transfer);
  4874. _qce_set_flag(&pce_sps_data->in_transfer,
  4875. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  4876. _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  4877. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  4878. &pce_sps_data->in_transfer);
  4879. _qce_sps_add_data((uint32_t)dst, total,
  4880. &pce_sps_data->out_transfer);
  4881. _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
  4882. CRYPTO_RESULT_DUMP_SIZE,
  4883. &pce_sps_data->out_transfer);
  4884. select_mode(pce_dev, preq_info);
  4885. rc = _qce_sps_transfer(pce_dev, req_info);
  4886. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  4887. if (rc == 0)
  4888. return 0;
  4889. bad:
  4890. if (preq_info->phy_ota_dst)
  4891. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst, total,
  4892. DMA_FROM_DEVICE);
  4893. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, total,
  4894. (req->data_in == req->data_out) ?
  4895. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  4896. qce_free_req_info(pce_dev, req_info, false);
  4897. return rc;
  4898. }
  4899. EXPORT_SYMBOL(qce_f8_multi_pkt_req);
  4900. int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
  4901. qce_comp_func_ptr_t qce_cb)
  4902. {
  4903. struct qce_device *pce_dev = (struct qce_device *) handle;
  4904. int rc;
  4905. struct qce_cmdlist_info *cmdlistinfo;
  4906. int req_info = -1;
  4907. struct ce_sps_data *pce_sps_data;
  4908. struct ce_request_info *preq_info;
  4909. req_info = qce_alloc_req_info(pce_dev);
  4910. if (req_info < 0)
  4911. return -EBUSY;
  4912. preq_info = &pce_dev->ce_request_info[req_info];
  4913. pce_sps_data = &preq_info->ce_sps;
  4914. switch (req->algorithm) {
  4915. case QCE_OTA_ALGO_KASUMI:
  4916. cmdlistinfo = &pce_sps_data->cmdlistptr.f9_kasumi;
  4917. break;
  4918. case QCE_OTA_ALGO_SNOW3G:
  4919. cmdlistinfo = &pce_sps_data->cmdlistptr.f9_snow3g;
  4920. break;
  4921. default:
  4922. qce_free_req_info(pce_dev, req_info, false);
  4923. return -EINVAL;
  4924. }
  4925. preq_info->phy_ota_src = dma_map_single(pce_dev->pdev, req->message,
  4926. req->msize, DMA_TO_DEVICE);
  4927. preq_info->ota_size = req->msize;
  4928. if (pce_dev->support_cmd_dscr)
  4929. rc = _ce_f9_setup(pce_dev, req, cmdlistinfo);
  4930. else
  4931. rc = _ce_f9_setup_direct(pce_dev, req);
  4932. if (rc < 0)
  4933. goto bad;
  4934. /* setup for callback, and issue command to sps */
  4935. preq_info->areq = cookie;
  4936. preq_info->qce_cb = qce_cb;
  4937. /* setup xfer type for producer callback handling */
  4938. preq_info->xfer_type = QCE_XFER_F9;
  4939. preq_info->req_len = req->msize;
  4940. _qce_sps_iovec_count_init(pce_dev, req_info);
  4941. if (pce_dev->support_cmd_dscr)
  4942. _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
  4943. &pce_sps_data->in_transfer);
  4944. _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->msize,
  4945. &pce_sps_data->in_transfer);
  4946. _qce_set_flag(&pce_sps_data->in_transfer,
  4947. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  4948. _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  4949. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  4950. &pce_sps_data->in_transfer);
  4951. _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
  4952. CRYPTO_RESULT_DUMP_SIZE,
  4953. &pce_sps_data->out_transfer);
  4954. select_mode(pce_dev, preq_info);
  4955. rc = _qce_sps_transfer(pce_dev, req_info);
  4956. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  4957. if (rc)
  4958. goto bad;
  4959. return 0;
  4960. bad:
  4961. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
  4962. req->msize, DMA_TO_DEVICE);
  4963. qce_free_req_info(pce_dev, req_info, false);
  4964. return rc;
  4965. }
  4966. EXPORT_SYMBOL(qce_f9_req);
  4967. static int __qce_get_device_tree_data(struct platform_device *pdev,
  4968. struct qce_device *pce_dev)
  4969. {
  4970. struct resource *resource;
  4971. int rc = 0;
  4972. pce_dev->is_shared = of_property_read_bool((&pdev->dev)->of_node,
  4973. "qcom,ce-hw-shared");
  4974. pce_dev->support_hw_key = of_property_read_bool((&pdev->dev)->of_node,
  4975. "qcom,ce-hw-key");
  4976. pce_dev->use_sw_aes_cbc_ecb_ctr_algo =
  4977. of_property_read_bool((&pdev->dev)->of_node,
  4978. "qcom,use-sw-aes-cbc-ecb-ctr-algo");
  4979. pce_dev->use_sw_aead_algo =
  4980. of_property_read_bool((&pdev->dev)->of_node,
  4981. "qcom,use-sw-aead-algo");
  4982. pce_dev->use_sw_aes_xts_algo =
  4983. of_property_read_bool((&pdev->dev)->of_node,
  4984. "qcom,use-sw-aes-xts-algo");
  4985. pce_dev->use_sw_ahash_algo =
  4986. of_property_read_bool((&pdev->dev)->of_node,
  4987. "qcom,use-sw-ahash-algo");
  4988. pce_dev->use_sw_hmac_algo =
  4989. of_property_read_bool((&pdev->dev)->of_node,
  4990. "qcom,use-sw-hmac-algo");
  4991. pce_dev->use_sw_aes_ccm_algo =
  4992. of_property_read_bool((&pdev->dev)->of_node,
  4993. "qcom,use-sw-aes-ccm-algo");
  4994. pce_dev->support_clk_mgmt_sus_res = of_property_read_bool(
  4995. (&pdev->dev)->of_node, "qcom,clk-mgmt-sus-res");
  4996. pce_dev->support_only_core_src_clk = of_property_read_bool(
  4997. (&pdev->dev)->of_node, "qcom,support-core-clk-only");
  4998. pce_dev->request_bw_before_clk = of_property_read_bool(
  4999. (&pdev->dev)->of_node, "qcom,request-bw-before-clk");
  5000. if (of_property_read_u32((&pdev->dev)->of_node,
  5001. "qcom,bam-pipe-pair",
  5002. &pce_dev->ce_bam_info.pipe_pair_index)) {
  5003. pr_err("Fail to get bam pipe pair information.\n");
  5004. return -EINVAL;
  5005. }
  5006. if (of_property_read_u32((&pdev->dev)->of_node,
  5007. "qcom,ce-device",
  5008. &pce_dev->ce_bam_info.ce_device)) {
  5009. pr_err("Fail to get CE device information.\n");
  5010. return -EINVAL;
  5011. }
  5012. if (of_property_read_u32((&pdev->dev)->of_node,
  5013. "qcom,ce-hw-instance",
  5014. &pce_dev->ce_bam_info.ce_hw_instance)) {
  5015. pr_err("Fail to get CE hw instance information.\n");
  5016. return -EINVAL;
  5017. }
  5018. if (of_property_read_u32((&pdev->dev)->of_node,
  5019. "qcom,bam-ee",
  5020. &pce_dev->ce_bam_info.bam_ee)) {
  5021. pr_info("BAM Apps EE is not defined, setting to default 1\n");
  5022. pce_dev->ce_bam_info.bam_ee = 1;
  5023. }
  5024. if (of_property_read_u32((&pdev->dev)->of_node,
  5025. "qcom,ce-opp-freq",
  5026. &pce_dev->ce_opp_freq_hz)) {
  5027. pr_info("CE operating frequency is not defined, setting to default 100MHZ\n");
  5028. pce_dev->ce_opp_freq_hz = CE_CLK_100MHZ;
  5029. }
  5030. if (of_property_read_bool((&pdev->dev)->of_node, "qcom,smmu-s1-enable"))
  5031. pce_dev->enable_s1_smmu = true;
  5032. pce_dev->no_clock_support = of_property_read_bool((&pdev->dev)->of_node,
  5033. "qcom,no-clock-support");
  5034. pce_dev->ce_bam_info.dest_pipe_index =
  5035. 2 * pce_dev->ce_bam_info.pipe_pair_index;
  5036. pce_dev->ce_bam_info.src_pipe_index =
  5037. pce_dev->ce_bam_info.dest_pipe_index + 1;
  5038. resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  5039. "crypto-base");
  5040. if (resource) {
  5041. pce_dev->phy_iobase = resource->start;
  5042. pce_dev->iobase = ioremap(resource->start,
  5043. resource_size(resource));
  5044. if (!pce_dev->iobase) {
  5045. pr_err("Can not map CRYPTO io memory\n");
  5046. return -ENOMEM;
  5047. }
  5048. } else {
  5049. pr_err("CRYPTO HW mem unavailable.\n");
  5050. return -ENODEV;
  5051. }
  5052. resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  5053. "crypto-bam-base");
  5054. if (resource) {
  5055. pce_dev->bam_mem = resource->start;
  5056. pce_dev->bam_mem_size = resource_size(resource);
  5057. } else {
  5058. pr_err("CRYPTO BAM mem unavailable.\n");
  5059. rc = -ENODEV;
  5060. goto err_getting_bam_info;
  5061. }
  5062. resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  5063. if (resource) {
  5064. pce_dev->ce_bam_info.bam_irq = resource->start;
  5065. } else {
  5066. pr_err("CRYPTO BAM IRQ unavailable.\n");
  5067. goto err_dev;
  5068. }
  5069. return rc;
  5070. err_dev:
  5071. if (pce_dev->ce_bam_info.bam_iobase)
  5072. iounmap(pce_dev->ce_bam_info.bam_iobase);
  5073. err_getting_bam_info:
  5074. if (pce_dev->iobase)
  5075. iounmap(pce_dev->iobase);
  5076. return rc;
  5077. }
  5078. static int __qce_init_clk(struct qce_device *pce_dev)
  5079. {
  5080. int rc = 0;
  5081. if (pce_dev->no_clock_support) {
  5082. pr_debug("No clock support defined in dts\n");
  5083. return rc;
  5084. }
  5085. pce_dev->ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src");
  5086. if (!IS_ERR(pce_dev->ce_core_src_clk)) {
  5087. if (pce_dev->request_bw_before_clk)
  5088. goto skip_set_rate;
  5089. rc = clk_set_rate(pce_dev->ce_core_src_clk,
  5090. pce_dev->ce_opp_freq_hz);
  5091. if (rc) {
  5092. pr_err("Unable to set the core src clk @%uMhz.\n",
  5093. pce_dev->ce_opp_freq_hz/CE_CLK_DIV);
  5094. goto exit_put_core_src_clk;
  5095. }
  5096. } else {
  5097. if (pce_dev->support_only_core_src_clk) {
  5098. rc = PTR_ERR(pce_dev->ce_core_src_clk);
  5099. pce_dev->ce_core_src_clk = NULL;
  5100. pr_err("Unable to get CE core src clk\n");
  5101. return rc;
  5102. }
  5103. pr_warn("Unable to get CE core src clk, set to NULL\n");
  5104. pce_dev->ce_core_src_clk = NULL;
  5105. }
  5106. skip_set_rate:
  5107. if (pce_dev->support_only_core_src_clk) {
  5108. pce_dev->ce_core_clk = NULL;
  5109. pce_dev->ce_clk = NULL;
  5110. pce_dev->ce_bus_clk = NULL;
  5111. } else {
  5112. pce_dev->ce_core_clk = clk_get(pce_dev->pdev, "core_clk");
  5113. if (IS_ERR(pce_dev->ce_core_clk)) {
  5114. rc = PTR_ERR(pce_dev->ce_core_clk);
  5115. pr_err("Unable to get CE core clk\n");
  5116. goto exit_put_core_src_clk;
  5117. }
  5118. pce_dev->ce_clk = clk_get(pce_dev->pdev, "iface_clk");
  5119. if (IS_ERR(pce_dev->ce_clk)) {
  5120. rc = PTR_ERR(pce_dev->ce_clk);
  5121. pr_err("Unable to get CE interface clk\n");
  5122. goto exit_put_core_clk;
  5123. }
  5124. pce_dev->ce_bus_clk = clk_get(pce_dev->pdev, "bus_clk");
  5125. if (IS_ERR(pce_dev->ce_bus_clk)) {
  5126. rc = PTR_ERR(pce_dev->ce_bus_clk);
  5127. pr_err("Unable to get CE BUS interface clk\n");
  5128. goto exit_put_iface_clk;
  5129. }
  5130. }
  5131. return rc;
  5132. exit_put_iface_clk:
  5133. if (pce_dev->ce_clk)
  5134. clk_put(pce_dev->ce_clk);
  5135. exit_put_core_clk:
  5136. if (pce_dev->ce_core_clk)
  5137. clk_put(pce_dev->ce_core_clk);
  5138. exit_put_core_src_clk:
  5139. if (pce_dev->ce_core_src_clk)
  5140. clk_put(pce_dev->ce_core_src_clk);
  5141. pr_err("Unable to init CE clks, rc = %d\n", rc);
  5142. return rc;
  5143. }
  5144. static void __qce_deinit_clk(struct qce_device *pce_dev)
  5145. {
  5146. if (pce_dev->no_clock_support) {
  5147. pr_debug("No clock support defined in dts\n");
  5148. return;
  5149. }
  5150. if (pce_dev->ce_bus_clk)
  5151. clk_put(pce_dev->ce_bus_clk);
  5152. if (pce_dev->ce_clk)
  5153. clk_put(pce_dev->ce_clk);
  5154. if (pce_dev->ce_core_clk)
  5155. clk_put(pce_dev->ce_core_clk);
  5156. if (pce_dev->ce_core_src_clk)
  5157. clk_put(pce_dev->ce_core_src_clk);
  5158. }
  5159. int qce_enable_clk(void *handle)
  5160. {
  5161. struct qce_device *pce_dev = (struct qce_device *)handle;
  5162. int rc = 0;
  5163. if (pce_dev->no_clock_support) {
  5164. pr_debug("No clock support defined in dts\n");
  5165. return rc;
  5166. }
  5167. if (pce_dev->ce_core_src_clk) {
  5168. rc = clk_prepare_enable(pce_dev->ce_core_src_clk);
  5169. if (rc) {
  5170. pr_err("Unable to enable/prepare CE core src clk\n");
  5171. return rc;
  5172. }
  5173. }
  5174. if (pce_dev->support_only_core_src_clk)
  5175. return rc;
  5176. if (pce_dev->ce_core_clk) {
  5177. rc = clk_prepare_enable(pce_dev->ce_core_clk);
  5178. if (rc) {
  5179. pr_err("Unable to enable/prepare CE core clk\n");
  5180. goto exit_disable_core_src_clk;
  5181. }
  5182. }
  5183. if (pce_dev->ce_clk) {
  5184. rc = clk_prepare_enable(pce_dev->ce_clk);
  5185. if (rc) {
  5186. pr_err("Unable to enable/prepare CE iface clk\n");
  5187. goto exit_disable_core_clk;
  5188. }
  5189. }
  5190. if (pce_dev->ce_bus_clk) {
  5191. rc = clk_prepare_enable(pce_dev->ce_bus_clk);
  5192. if (rc) {
  5193. pr_err("Unable to enable/prepare CE BUS clk\n");
  5194. goto exit_disable_ce_clk;
  5195. }
  5196. }
  5197. return rc;
  5198. exit_disable_ce_clk:
  5199. if (pce_dev->ce_clk)
  5200. clk_disable_unprepare(pce_dev->ce_clk);
  5201. exit_disable_core_clk:
  5202. if (pce_dev->ce_core_clk)
  5203. clk_disable_unprepare(pce_dev->ce_core_clk);
  5204. exit_disable_core_src_clk:
  5205. if (pce_dev->ce_core_src_clk)
  5206. clk_disable_unprepare(pce_dev->ce_core_src_clk);
  5207. return rc;
  5208. }
  5209. EXPORT_SYMBOL(qce_enable_clk);
  5210. int qce_disable_clk(void *handle)
  5211. {
  5212. struct qce_device *pce_dev = (struct qce_device *) handle;
  5213. if (pce_dev->no_clock_support) {
  5214. pr_debug("No clock support defined in dts\n");
  5215. return 0;
  5216. }
  5217. if (pce_dev->ce_bus_clk)
  5218. clk_disable_unprepare(pce_dev->ce_bus_clk);
  5219. if (pce_dev->ce_clk)
  5220. clk_disable_unprepare(pce_dev->ce_clk);
  5221. if (pce_dev->ce_core_clk)
  5222. clk_disable_unprepare(pce_dev->ce_core_clk);
  5223. if (pce_dev->ce_core_src_clk)
  5224. clk_disable_unprepare(pce_dev->ce_core_src_clk);
  5225. return 0;
  5226. }
  5227. EXPORT_SYMBOL(qce_disable_clk);
  5228. /* dummy req setup */
  5229. static int setup_dummy_req(struct qce_device *pce_dev)
  5230. {
  5231. char *input =
  5232. "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopqopqrpqrs";
  5233. int len = DUMMY_REQ_DATA_LEN;
  5234. memcpy(pce_dev->dummyreq_in_buf, input, len);
  5235. sg_init_one(&pce_dev->dummyreq.sg, pce_dev->dummyreq_in_buf, len);
  5236. pce_dev->dummyreq.sreq.alg = QCE_HASH_SHA1;
  5237. pce_dev->dummyreq.sreq.qce_cb = qce_dummy_complete;
  5238. pce_dev->dummyreq.sreq.src = &pce_dev->dummyreq.sg;
  5239. pce_dev->dummyreq.sreq.auth_data[0] = 0;
  5240. pce_dev->dummyreq.sreq.auth_data[1] = 0;
  5241. pce_dev->dummyreq.sreq.auth_data[2] = 0;
  5242. pce_dev->dummyreq.sreq.auth_data[3] = 0;
  5243. pce_dev->dummyreq.sreq.first_blk = true;
  5244. pce_dev->dummyreq.sreq.last_blk = true;
  5245. pce_dev->dummyreq.sreq.size = len;
  5246. pce_dev->dummyreq.sreq.areq = &pce_dev->dummyreq.areq;
  5247. pce_dev->dummyreq.sreq.flags = 0;
  5248. pce_dev->dummyreq.sreq.authkey = NULL;
  5249. pce_dev->dummyreq.areq.src = pce_dev->dummyreq.sreq.src;
  5250. pce_dev->dummyreq.areq.nbytes = pce_dev->dummyreq.sreq.size;
  5251. return 0;
  5252. }
  5253. static int qce_smmu_init(struct qce_device *pce_dev)
  5254. {
  5255. struct device *dev = pce_dev->pdev;
  5256. if (!dev->dma_parms) {
  5257. dev->dma_parms = devm_kzalloc(dev,
  5258. sizeof(*dev->dma_parms), GFP_KERNEL);
  5259. if (!dev->dma_parms)
  5260. return -ENOMEM;
  5261. }
  5262. dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
  5263. dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64));
  5264. return 0;
  5265. }
  5266. /* crypto engine open function. */
  5267. void *qce_open(struct platform_device *pdev, int *rc)
  5268. {
  5269. struct qce_device *pce_dev;
  5270. int i;
  5271. static int pcedev_no = 1;
  5272. pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
  5273. if (!pce_dev) {
  5274. *rc = -ENOMEM;
  5275. pr_err("Can not allocate memory: %d\n", *rc);
  5276. return NULL;
  5277. }
  5278. pce_dev->pdev = &pdev->dev;
  5279. mutex_lock(&qce_iomap_mutex);
  5280. if (pdev->dev.of_node) {
  5281. *rc = __qce_get_device_tree_data(pdev, pce_dev);
  5282. if (*rc)
  5283. goto err_pce_dev;
  5284. } else {
  5285. *rc = -EINVAL;
  5286. pr_err("Device Node not found.\n");
  5287. goto err_pce_dev;
  5288. }
  5289. if (pce_dev->enable_s1_smmu) {
  5290. if (qce_smmu_init(pce_dev)) {
  5291. *rc = -EIO;
  5292. goto err_pce_dev;
  5293. }
  5294. }
  5295. for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++)
  5296. atomic_set(&pce_dev->ce_request_info[i].in_use, false);
  5297. pce_dev->ce_request_index = 0;
  5298. pce_dev->memsize = 10 * PAGE_SIZE * MAX_QCE_ALLOC_BAM_REQ;
  5299. pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
  5300. pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL);
  5301. if (pce_dev->coh_vmem == NULL) {
  5302. *rc = -ENOMEM;
  5303. pr_err("Can not allocate coherent memory for sps data\n");
  5304. goto err_iobase;
  5305. }
  5306. pce_dev->iovec_memsize = TOTAL_IOVEC_SPACE_PER_PIPE *
  5307. MAX_QCE_ALLOC_BAM_REQ * 2;
  5308. pce_dev->iovec_vmem = kzalloc(pce_dev->iovec_memsize, GFP_KERNEL);
  5309. if (pce_dev->iovec_vmem == NULL)
  5310. goto err_mem;
  5311. pce_dev->dummyreq_in_buf = kzalloc(DUMMY_REQ_DATA_LEN, GFP_KERNEL);
  5312. if (pce_dev->dummyreq_in_buf == NULL)
  5313. goto err_mem;
  5314. *rc = __qce_init_clk(pce_dev);
  5315. if (*rc)
  5316. goto err_mem;
  5317. *rc = qce_enable_clk(pce_dev);
  5318. if (*rc)
  5319. goto err_enable_clk;
  5320. if (_probe_ce_engine(pce_dev)) {
  5321. *rc = -ENXIO;
  5322. goto err;
  5323. }
  5324. *rc = 0;
  5325. qce_init_ce_cfg_val(pce_dev);
  5326. *rc = qce_sps_init(pce_dev);
  5327. if (*rc)
  5328. goto err;
  5329. qce_setup_ce_sps_data(pce_dev);
  5330. qce_disable_clk(pce_dev);
  5331. setup_dummy_req(pce_dev);
  5332. atomic_set(&pce_dev->no_of_queued_req, 0);
  5333. pce_dev->mode = IN_INTERRUPT_MODE;
  5334. timer_setup(&(pce_dev->timer), qce_multireq_timeout, 0);
  5335. //pce_dev->timer.function = qce_multireq_timeout;
  5336. //pce_dev->timer.data = (unsigned long)pce_dev;
  5337. pce_dev->timer.expires = jiffies + DELAY_IN_JIFFIES;
  5338. pce_dev->intr_cadence = 0;
  5339. pce_dev->dev_no = pcedev_no;
  5340. pcedev_no++;
  5341. pce_dev->owner = QCE_OWNER_NONE;
  5342. mutex_unlock(&qce_iomap_mutex);
  5343. return pce_dev;
  5344. err:
  5345. qce_disable_clk(pce_dev);
  5346. err_enable_clk:
  5347. __qce_deinit_clk(pce_dev);
  5348. err_mem:
  5349. kfree(pce_dev->dummyreq_in_buf);
  5350. kfree(pce_dev->iovec_vmem);
  5351. if (pce_dev->coh_vmem)
  5352. dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
  5353. pce_dev->coh_vmem, pce_dev->coh_pmem);
  5354. err_iobase:
  5355. if (pce_dev->iobase)
  5356. iounmap(pce_dev->iobase);
  5357. err_pce_dev:
  5358. mutex_unlock(&qce_iomap_mutex);
  5359. kfree(pce_dev);
  5360. return NULL;
  5361. }
  5362. EXPORT_SYMBOL(qce_open);
  5363. /* crypto engine close function. */
  5364. int qce_close(void *handle)
  5365. {
  5366. struct qce_device *pce_dev = (struct qce_device *) handle;
  5367. if (handle == NULL)
  5368. return -ENODEV;
  5369. mutex_lock(&qce_iomap_mutex);
  5370. qce_enable_clk(pce_dev);
  5371. qce_sps_exit(pce_dev);
  5372. if (pce_dev->iobase)
  5373. iounmap(pce_dev->iobase);
  5374. if (pce_dev->coh_vmem)
  5375. dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
  5376. pce_dev->coh_vmem, pce_dev->coh_pmem);
  5377. kfree(pce_dev->dummyreq_in_buf);
  5378. kfree(pce_dev->iovec_vmem);
  5379. qce_disable_clk(pce_dev);
  5380. __qce_deinit_clk(pce_dev);
  5381. mutex_unlock(&qce_iomap_mutex);
  5382. kfree(handle);
  5383. return 0;
  5384. }
  5385. EXPORT_SYMBOL(qce_close);
  5386. #define OTA_SUPPORT_MASK (1 << CRYPTO_ENCR_SNOW3G_SEL |\
  5387. 1 << CRYPTO_ENCR_KASUMI_SEL |\
  5388. 1 << CRYPTO_AUTH_SNOW3G_SEL |\
  5389. 1 << CRYPTO_AUTH_KASUMI_SEL)
  5390. int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
  5391. {
  5392. struct qce_device *pce_dev = (struct qce_device *)handle;
  5393. if (ce_support == NULL)
  5394. return -EINVAL;
  5395. ce_support->sha1_hmac_20 = false;
  5396. ce_support->sha1_hmac = false;
  5397. ce_support->sha256_hmac = false;
  5398. ce_support->sha_hmac = true;
  5399. ce_support->cmac = true;
  5400. ce_support->aes_key_192 = false;
  5401. ce_support->aes_xts = true;
  5402. if ((pce_dev->engines_avail & OTA_SUPPORT_MASK) == OTA_SUPPORT_MASK)
  5403. ce_support->ota = true;
  5404. else
  5405. ce_support->ota = false;
  5406. ce_support->bam = true;
  5407. ce_support->is_shared = (pce_dev->is_shared == 1) ? true : false;
  5408. ce_support->hw_key = pce_dev->support_hw_key;
  5409. ce_support->aes_ccm = true;
  5410. ce_support->clk_mgmt_sus_res = pce_dev->support_clk_mgmt_sus_res;
  5411. ce_support->req_bw_before_clk = pce_dev->request_bw_before_clk;
  5412. if (pce_dev->ce_bam_info.minor_version)
  5413. ce_support->aligned_only = false;
  5414. else
  5415. ce_support->aligned_only = true;
  5416. ce_support->use_sw_aes_cbc_ecb_ctr_algo =
  5417. pce_dev->use_sw_aes_cbc_ecb_ctr_algo;
  5418. ce_support->use_sw_aead_algo =
  5419. pce_dev->use_sw_aead_algo;
  5420. ce_support->use_sw_aes_xts_algo =
  5421. pce_dev->use_sw_aes_xts_algo;
  5422. ce_support->use_sw_ahash_algo =
  5423. pce_dev->use_sw_ahash_algo;
  5424. ce_support->use_sw_hmac_algo =
  5425. pce_dev->use_sw_hmac_algo;
  5426. ce_support->use_sw_aes_ccm_algo =
  5427. pce_dev->use_sw_aes_ccm_algo;
  5428. ce_support->ce_device = pce_dev->ce_bam_info.ce_device;
  5429. ce_support->ce_hw_instance = pce_dev->ce_bam_info.ce_hw_instance;
  5430. if (pce_dev->no_get_around)
  5431. ce_support->max_request = MAX_QCE_BAM_REQ;
  5432. else
  5433. ce_support->max_request = 1;
  5434. return 0;
  5435. }
  5436. EXPORT_SYMBOL(qce_hw_support);
  5437. void qce_dump_req(void *handle)
  5438. {
  5439. int i;
  5440. bool req_in_use;
  5441. struct qce_device *pce_dev = (struct qce_device *)handle;
  5442. for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
  5443. req_in_use = atomic_read(&pce_dev->ce_request_info[i].in_use);
  5444. pr_info("%s: %d %d\n", __func__, i, req_in_use);
  5445. if (req_in_use)
  5446. _qce_dump_descr_fifos(pce_dev, i);
  5447. }
  5448. }
  5449. EXPORT_SYMBOL(qce_dump_req);
  5450. MODULE_LICENSE("GPL v2");
  5451. MODULE_DESCRIPTION("Crypto Engine driver");