qce50.c 192 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QTI Crypto Engine driver.
  4. *
  5. * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  6. */
  7. #define pr_fmt(fmt) "QCE50: %s: " fmt, __func__
  8. #include <linux/types.h>
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/mod_devicetable.h>
  12. #include <linux/device.h>
  13. #include <linux/clk.h>
  14. #include <linux/err.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/io.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/delay.h>
  20. #include <linux/crypto.h>
  21. #include <linux/bitops.h>
  22. #include "linux/qcrypto.h"
  23. #include <crypto/hash.h>
  24. #include <crypto/sha1.h>
  25. #include <soc/qcom/socinfo.h>
  26. #include <linux/iommu.h>
  27. #include "qce.h"
  28. #include "qce50.h"
  29. #include "qcryptohw_50.h"
  30. #include "qce_ota.h"
  31. #define CRYPTO_SMMU_IOVA_START 0x10000000
  32. #define CRYPTO_SMMU_IOVA_SIZE 0x40000000
  33. #define CRYPTO_CONFIG_RESET 0xE01EF
  34. #define MAX_SPS_DESC_FIFO_SIZE 0xfff0
  35. #define QCE_MAX_NUM_DSCR 0x200
  36. #define QCE_SECTOR_SIZE 0x200
  37. #define CE_CLK_100MHZ 100000000
  38. #define CE_CLK_DIV 1000000
  39. #define CRYPTO_CORE_MAJOR_VER_NUM 0x05
  40. #define CRYPTO_CORE_MINOR_VER_NUM 0x03
  41. #define CRYPTO_CORE_STEP_VER_NUM 0x1
  42. #define CRYPTO_REQ_USER_PAT 0xdead0000
  43. static DEFINE_MUTEX(bam_register_lock);
  44. static DEFINE_MUTEX(qce_iomap_mutex);
  45. struct bam_registration_info {
  46. struct list_head qlist;
  47. unsigned long handle;
  48. uint32_t cnt;
  49. uint32_t bam_mem;
  50. void __iomem *bam_iobase;
  51. bool support_cmd_dscr;
  52. };
  53. static LIST_HEAD(qce50_bam_list);
  54. /* Used to determine the mode */
  55. #define MAX_BUNCH_MODE_REQ 2
  56. /* Max number of request supported */
  57. #define MAX_QCE_BAM_REQ 8
  58. /* Interrupt flag will be set for every SET_INTR_AT_REQ request */
  59. #define SET_INTR_AT_REQ (MAX_QCE_BAM_REQ / 2)
  60. /* To create extra request space to hold dummy request */
  61. #define MAX_QCE_BAM_REQ_WITH_DUMMY_REQ (MAX_QCE_BAM_REQ + 1)
  62. /* Allocate the memory for MAX_QCE_BAM_REQ + 1 (for dummy request) */
  63. #define MAX_QCE_ALLOC_BAM_REQ MAX_QCE_BAM_REQ_WITH_DUMMY_REQ
  64. /* QCE driver modes */
  65. #define IN_INTERRUPT_MODE 0
  66. #define IN_BUNCH_MODE 1
  67. /* Dummy request data length */
  68. #define DUMMY_REQ_DATA_LEN 64
  69. /* Delay timer to expire when in bunch mode */
  70. #define DELAY_IN_JIFFIES 5
  71. /* Index to point the dummy request */
  72. #define DUMMY_REQ_INDEX MAX_QCE_BAM_REQ
  73. #define TOTAL_IOVEC_SPACE_PER_PIPE (QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec))
  74. #define AES_CTR_IV_CTR_SIZE 64
  75. #define STATUS1_ERR_INTR_MASK 0x10
  76. enum qce_owner {
  77. QCE_OWNER_NONE = 0,
  78. QCE_OWNER_CLIENT = 1,
  79. QCE_OWNER_TIMEOUT = 2
  80. };
  81. struct dummy_request {
  82. struct qce_sha_req sreq;
  83. struct scatterlist sg;
  84. struct ahash_request areq;
  85. };
  86. /*
  87. * CE HW device structure.
  88. * Each engine has an instance of the structure.
  89. * Each engine can only handle one crypto operation at one time. It is up to
  90. * the sw above to ensure single threading of operation on an engine.
  91. */
  92. struct qce_device {
  93. struct device *pdev; /* Handle to platform_device structure */
  94. struct bam_registration_info *pbam;
  95. unsigned char *coh_vmem; /* Allocated coherent virtual memory */
  96. dma_addr_t coh_pmem; /* Allocated coherent physical memory */
  97. int memsize; /* Memory allocated */
  98. unsigned char *iovec_vmem; /* Allocate iovec virtual memory */
  99. int iovec_memsize; /* Memory allocated */
  100. uint32_t bam_mem; /* bam physical address, from DT */
  101. uint32_t bam_mem_size; /* bam io size, from DT */
  102. int is_shared; /* CE HW is shared */
  103. bool support_cmd_dscr;
  104. bool support_hw_key;
  105. bool support_clk_mgmt_sus_res;
  106. bool support_only_core_src_clk;
  107. bool request_bw_before_clk;
  108. void __iomem *iobase; /* Virtual io base of CE HW */
  109. unsigned int phy_iobase; /* Physical io base of CE HW */
  110. struct clk *ce_core_src_clk; /* Handle to CE src clk*/
  111. struct clk *ce_core_clk; /* Handle to CE clk */
  112. struct clk *ce_clk; /* Handle to CE clk */
  113. struct clk *ce_bus_clk; /* Handle to CE AXI clk*/
  114. bool no_get_around;
  115. bool no_ccm_mac_status_get_around;
  116. unsigned int ce_opp_freq_hz;
  117. bool use_sw_aes_cbc_ecb_ctr_algo;
  118. bool use_sw_aead_algo;
  119. bool use_sw_aes_xts_algo;
  120. bool use_sw_ahash_algo;
  121. bool use_sw_hmac_algo;
  122. bool use_sw_aes_ccm_algo;
  123. uint32_t engines_avail;
  124. struct qce_ce_cfg_reg_setting reg;
  125. struct ce_bam_info ce_bam_info;
  126. struct ce_request_info ce_request_info[MAX_QCE_ALLOC_BAM_REQ];
  127. unsigned int ce_request_index;
  128. enum qce_owner owner;
  129. atomic_t no_of_queued_req;
  130. struct timer_list timer;
  131. struct dummy_request dummyreq;
  132. unsigned int mode;
  133. unsigned int intr_cadence;
  134. unsigned int dev_no;
  135. struct qce_driver_stats qce_stats;
  136. atomic_t bunch_cmd_seq;
  137. atomic_t last_intr_seq;
  138. bool cadence_flag;
  139. uint8_t *dummyreq_in_buf;
  140. struct dma_iommu_mapping *smmu_mapping;
  141. bool enable_s1_smmu;
  142. bool no_clock_support;
  143. bool kernel_pipes_support;
  144. bool offload_pipes_support;
  145. };
  146. static void print_notify_debug(struct sps_event_notify *notify);
  147. static void _sps_producer_callback(struct sps_event_notify *notify);
  148. static int qce_dummy_req(struct qce_device *pce_dev);
  149. static int _qce50_disp_stats;
  150. /* Standard initialization vector for SHA-1, source: FIPS 180-2 */
  151. static uint32_t _std_init_vector_sha1[] = {
  152. 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
  153. };
  154. /* Standard initialization vector for SHA-256, source: FIPS 180-2 */
  155. static uint32_t _std_init_vector_sha256[] = {
  156. 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
  157. 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
  158. };
  159. /*
  160. * Requests for offload operations do not require explicit dma operations
  161. * as they already have SMMU mapped source/destination buffers.
  162. */
  163. static bool is_offload_op(int op)
  164. {
  165. return (op == QCE_OFFLOAD_HLOS_HLOS || op == QCE_OFFLOAD_HLOS_CPB ||
  166. op == QCE_OFFLOAD_CPB_HLOS);
  167. }
  168. static uint32_t qce_get_config_be(struct qce_device *pce_dev,
  169. uint32_t pipe_pair)
  170. {
  171. uint32_t beats = (pce_dev->ce_bam_info.ce_burst_size >> 3) - 1;
  172. return (beats << CRYPTO_REQ_SIZE |
  173. BIT(CRYPTO_MASK_DOUT_INTR) | BIT(CRYPTO_MASK_DIN_INTR) |
  174. BIT(CRYPTO_MASK_OP_DONE_INTR) | 0 << CRYPTO_HIGH_SPD_EN_N |
  175. pipe_pair << CRYPTO_PIPE_SET_SELECT);
  176. }
  177. static void dump_status_regs(unsigned int s1, unsigned int s2,unsigned int s3,
  178. unsigned int s4, unsigned int s5,unsigned int s6)
  179. {
  180. pr_info("%s: CRYPTO_STATUS_REG = 0x%x\n", __func__, s1);
  181. pr_info("%s: CRYPTO_STATUS2_REG = 0x%x\n", __func__, s2);
  182. pr_info("%s: CRYPTO_STATUS3_REG = 0x%x\n", __func__, s3);
  183. pr_info("%s: CRYPTO_STATUS4_REG = 0x%x\n", __func__, s4);
  184. pr_info("%s: CRYPTO_STATUS5_REG = 0x%x\n", __func__, s5);
  185. pr_info("%s: CRYPTO_STATUS6_REG = 0x%x\n", __func__, s6);
  186. }
  187. void qce_get_crypto_status(void *handle, unsigned int *s1, unsigned int *s2,
  188. unsigned int *s3, unsigned int *s4,
  189. unsigned int *s5, unsigned int *s6)
  190. {
  191. struct qce_device *pce_dev = (struct qce_device *) handle;
  192. *s1 = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
  193. *s2 = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS2_REG);
  194. *s3 = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS3_REG);
  195. *s4 = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS4_REG);
  196. *s5 = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS5_REG);
  197. *s6 = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS6_REG);
  198. #ifdef QCE_DEBUG
  199. dump_status_regs(*s1, *s2, *s3, *s4, *s5, *s6);
  200. #else
  201. if (*s1 & STATUS1_ERR_INTR_MASK)
  202. dump_status_regs(*s1, *s2, *s3, *s4, *s5, *s6);
  203. #endif
  204. return;
  205. }
  206. EXPORT_SYMBOL(qce_get_crypto_status);
  207. static int qce_crypto_config(struct qce_device *pce_dev,
  208. enum qce_offload_op_enum offload_op)
  209. {
  210. uint32_t config_be = 0;
  211. switch (offload_op) {
  212. case QCE_OFFLOAD_NONE:
  213. config_be = qce_get_config_be(pce_dev,
  214. pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_NONE]);
  215. break;
  216. case QCE_OFFLOAD_HLOS_HLOS:
  217. config_be = qce_get_config_be(pce_dev,
  218. pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_HLOS]);
  219. break;
  220. case QCE_OFFLOAD_HLOS_CPB:
  221. config_be = qce_get_config_be(pce_dev,
  222. pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_CPB]);
  223. break;
  224. case QCE_OFFLOAD_CPB_HLOS:
  225. config_be = qce_get_config_be(pce_dev,
  226. pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_CPB_HLOS]);
  227. break;
  228. default:
  229. pr_err("%s: Valid pipe config not set, offload op = %d\n",
  230. __func__, offload_op);
  231. return -EINVAL;
  232. }
  233. pce_dev->reg.crypto_cfg_be = config_be;
  234. pce_dev->reg.crypto_cfg_le = (config_be |
  235. CRYPTO_LITTLE_ENDIAN_MASK);
  236. return 0;
  237. }
  238. static void qce_enable_clock_gating(struct qce_device *pce_dev)
  239. {
  240. /* This feature might cause some HW issues, noop till resolved. */
  241. return;
  242. }
  243. /*
  244. * IV counter mask is be set based on the values sent through the offload ioctl
  245. * calls. Currently for offload operations, it is 64 bytes of mask for AES CTR,
  246. * and 128 bytes of mask for AES CBC.
  247. */
  248. static void qce_set_iv_ctr_mask(struct qce_device *pce_dev,
  249. struct qce_req *creq)
  250. {
  251. if (creq->iv_ctr_size == AES_CTR_IV_CTR_SIZE) {
  252. pce_dev->reg.encr_cntr_mask_0 = 0x0;
  253. pce_dev->reg.encr_cntr_mask_1 = 0x0;
  254. pce_dev->reg.encr_cntr_mask_2 = 0xFFFFFFFF;
  255. pce_dev->reg.encr_cntr_mask_3 = 0xFFFFFFFF;
  256. } else {
  257. pce_dev->reg.encr_cntr_mask_0 = 0xFFFFFFFF;
  258. pce_dev->reg.encr_cntr_mask_1 = 0xFFFFFFFF;
  259. pce_dev->reg.encr_cntr_mask_2 = 0xFFFFFFFF;
  260. pce_dev->reg.encr_cntr_mask_3 = 0xFFFFFFFF;
  261. }
  262. return;
  263. }
  264. static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
  265. unsigned int len)
  266. {
  267. unsigned int n;
  268. n = len / sizeof(uint32_t);
  269. for (; n > 0; n--) {
  270. *iv = ((*b << 24) & 0xff000000) |
  271. (((*(b+1)) << 16) & 0xff0000) |
  272. (((*(b+2)) << 8) & 0xff00) |
  273. (*(b+3) & 0xff);
  274. b += sizeof(uint32_t);
  275. iv++;
  276. }
  277. n = len % sizeof(uint32_t);
  278. if (n == 3) {
  279. *iv = ((*b << 24) & 0xff000000) |
  280. (((*(b+1)) << 16) & 0xff0000) |
  281. (((*(b+2)) << 8) & 0xff00);
  282. } else if (n == 2) {
  283. *iv = ((*b << 24) & 0xff000000) |
  284. (((*(b+1)) << 16) & 0xff0000);
  285. } else if (n == 1) {
  286. *iv = ((*b << 24) & 0xff000000);
  287. }
  288. }
  289. static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
  290. unsigned int len)
  291. {
  292. unsigned int i, j;
  293. unsigned char swap_iv[AES_IV_LENGTH];
  294. memset(swap_iv, 0, AES_IV_LENGTH);
  295. for (i = (AES_IV_LENGTH-len), j = len-1; i < AES_IV_LENGTH; i++, j--)
  296. swap_iv[i] = b[j];
  297. _byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
  298. }
  299. static int count_sg(struct scatterlist *sg, int nbytes)
  300. {
  301. int i;
  302. for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
  303. nbytes -= sg->length;
  304. return i;
  305. }
  306. static int qce_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  307. enum dma_data_direction direction)
  308. {
  309. int i;
  310. for (i = 0; i < nents; ++i) {
  311. dma_map_sg(dev, sg, 1, direction);
  312. sg = sg_next(sg);
  313. }
  314. return nents;
  315. }
  316. static int qce_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  317. int nents, enum dma_data_direction direction)
  318. {
  319. int i;
  320. for (i = 0; i < nents; ++i) {
  321. dma_unmap_sg(dev, sg, 1, direction);
  322. sg = sg_next(sg);
  323. }
  324. return nents;
  325. }
  326. static int _probe_ce_engine(struct qce_device *pce_dev)
  327. {
  328. unsigned int rev;
  329. unsigned int maj_rev, min_rev, step_rev;
  330. rev = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
  331. /*
  332. * Ensure previous instructions (setting the GO register)
  333. * was completed before checking the version.
  334. */
  335. mb();
  336. maj_rev = (rev & CRYPTO_CORE_MAJOR_REV_MASK) >> CRYPTO_CORE_MAJOR_REV;
  337. min_rev = (rev & CRYPTO_CORE_MINOR_REV_MASK) >> CRYPTO_CORE_MINOR_REV;
  338. step_rev = (rev & CRYPTO_CORE_STEP_REV_MASK) >> CRYPTO_CORE_STEP_REV;
  339. if (maj_rev != CRYPTO_CORE_MAJOR_VER_NUM) {
  340. pr_err("Unsupported QTI crypto device at 0x%x, rev %d.%d.%d\n",
  341. pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
  342. return -EIO;
  343. }
  344. /*
  345. * The majority of crypto HW bugs have been fixed in 5.3.0 and
  346. * above. That allows a single sps transfer of consumer
  347. * pipe, and a single sps transfer of producer pipe
  348. * for a crypto request. no_get_around flag indicates this.
  349. *
  350. * In 5.3.1, the CCM MAC_FAILED in result dump issue is
  351. * fixed. no_ccm_mac_status_get_around flag indicates this.
  352. */
  353. pce_dev->no_get_around = (min_rev >=
  354. CRYPTO_CORE_MINOR_VER_NUM) ? true : false;
  355. if (min_rev > CRYPTO_CORE_MINOR_VER_NUM)
  356. pce_dev->no_ccm_mac_status_get_around = true;
  357. else if ((min_rev == CRYPTO_CORE_MINOR_VER_NUM) &&
  358. (step_rev >= CRYPTO_CORE_STEP_VER_NUM))
  359. pce_dev->no_ccm_mac_status_get_around = true;
  360. else
  361. pce_dev->no_ccm_mac_status_get_around = false;
  362. pce_dev->ce_bam_info.minor_version = min_rev;
  363. pce_dev->engines_avail = readl_relaxed(pce_dev->iobase +
  364. CRYPTO_ENGINES_AVAIL);
  365. dev_info(pce_dev->pdev, "QTI Crypto %d.%d.%d device found @0x%x\n",
  366. maj_rev, min_rev, step_rev, pce_dev->phy_iobase);
  367. pce_dev->ce_bam_info.ce_burst_size = MAX_CE_BAM_BURST_SIZE;
  368. dev_dbg(pce_dev->pdev, "CE device = %#x IO base, CE = %pK Consumer (IN) PIPE %d,\nProducer (OUT) PIPE %d IO base BAM = %pK\nBAM IRQ %d Engines Availability = %#x\n",
  369. pce_dev->ce_bam_info.ce_device, pce_dev->iobase,
  370. pce_dev->ce_bam_info.dest_pipe_index,
  371. pce_dev->ce_bam_info.src_pipe_index,
  372. pce_dev->ce_bam_info.bam_iobase,
  373. pce_dev->ce_bam_info.bam_irq, pce_dev->engines_avail);
  374. return 0;
  375. };
  376. static struct qce_cmdlist_info *_ce_get_hash_cmdlistinfo(
  377. struct qce_device *pce_dev,
  378. int req_info, struct qce_sha_req *sreq)
  379. {
  380. struct ce_sps_data *pce_sps_data;
  381. struct qce_cmdlistptr_ops *cmdlistptr;
  382. pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
  383. cmdlistptr = &pce_sps_data->cmdlistptr;
  384. switch (sreq->alg) {
  385. case QCE_HASH_SHA1:
  386. return &cmdlistptr->auth_sha1;
  387. case QCE_HASH_SHA256:
  388. return &cmdlistptr->auth_sha256;
  389. case QCE_HASH_SHA1_HMAC:
  390. return &cmdlistptr->auth_sha1_hmac;
  391. case QCE_HASH_SHA256_HMAC:
  392. return &cmdlistptr->auth_sha256_hmac;
  393. case QCE_HASH_AES_CMAC:
  394. if (sreq->authklen == AES128_KEY_SIZE)
  395. return &cmdlistptr->auth_aes_128_cmac;
  396. return &cmdlistptr->auth_aes_256_cmac;
  397. default:
  398. return NULL;
  399. }
  400. return NULL;
  401. }
  402. static int _ce_setup_hash(struct qce_device *pce_dev,
  403. struct qce_sha_req *sreq,
  404. struct qce_cmdlist_info *cmdlistinfo)
  405. {
  406. uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
  407. uint32_t diglen;
  408. int i;
  409. uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
  410. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
  411. bool sha1 = false;
  412. struct sps_command_element *pce = NULL;
  413. bool use_hw_key = false;
  414. bool use_pipe_key = false;
  415. uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
  416. uint32_t auth_cfg;
  417. if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE))
  418. return -EINVAL;
  419. pce = cmdlistinfo->crypto_cfg;
  420. pce->data = pce_dev->reg.crypto_cfg_be;
  421. pce = cmdlistinfo->crypto_cfg_le;
  422. pce->data = pce_dev->reg.crypto_cfg_le;
  423. if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
  424. (sreq->alg == QCE_HASH_SHA256_HMAC) ||
  425. (sreq->alg == QCE_HASH_AES_CMAC)) {
  426. /* no more check for null key. use flag */
  427. if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY)
  428. == QCRYPTO_CTX_USE_HW_KEY)
  429. use_hw_key = true;
  430. else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
  431. QCRYPTO_CTX_USE_PIPE_KEY)
  432. use_pipe_key = true;
  433. pce = cmdlistinfo->go_proc;
  434. if (use_hw_key) {
  435. pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
  436. pce_dev->phy_iobase);
  437. } else {
  438. pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
  439. pce_dev->phy_iobase);
  440. pce = cmdlistinfo->auth_key;
  441. if (!use_pipe_key) {
  442. _byte_stream_to_net_words(mackey32,
  443. sreq->authkey,
  444. sreq->authklen);
  445. for (i = 0; i < authk_size_in_word; i++, pce++)
  446. pce->data = mackey32[i];
  447. }
  448. }
  449. }
  450. if (sreq->alg == QCE_HASH_AES_CMAC)
  451. goto go_proc;
  452. /* if not the last, the size has to be on the block boundary */
  453. if (!sreq->last_blk && (sreq->size % SHA256_BLOCK_SIZE))
  454. return -EIO;
  455. switch (sreq->alg) {
  456. case QCE_HASH_SHA1:
  457. case QCE_HASH_SHA1_HMAC:
  458. diglen = SHA1_DIGEST_SIZE;
  459. sha1 = true;
  460. break;
  461. case QCE_HASH_SHA256:
  462. case QCE_HASH_SHA256_HMAC:
  463. diglen = SHA256_DIGEST_SIZE;
  464. break;
  465. default:
  466. return -EINVAL;
  467. }
  468. /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
  469. if (sreq->first_blk) {
  470. if (sha1) {
  471. for (i = 0; i < 5; i++)
  472. auth32[i] = _std_init_vector_sha1[i];
  473. } else {
  474. for (i = 0; i < 8; i++)
  475. auth32[i] = _std_init_vector_sha256[i];
  476. }
  477. } else {
  478. _byte_stream_to_net_words(auth32, sreq->digest, diglen);
  479. }
  480. pce = cmdlistinfo->auth_iv;
  481. for (i = 0; i < 5; i++, pce++)
  482. pce->data = auth32[i];
  483. if ((sreq->alg == QCE_HASH_SHA256) ||
  484. (sreq->alg == QCE_HASH_SHA256_HMAC)) {
  485. for (i = 5; i < 8; i++, pce++)
  486. pce->data = auth32[i];
  487. }
  488. /* write auth_bytecnt 0/1, start with 0 */
  489. pce = cmdlistinfo->auth_bytecount;
  490. for (i = 0; i < 2; i++, pce++)
  491. pce->data = sreq->auth_data[i];
  492. /* Set/reset last bit in CFG register */
  493. pce = cmdlistinfo->auth_seg_cfg;
  494. auth_cfg = pce->data & ~(1 << CRYPTO_LAST |
  495. 1 << CRYPTO_FIRST |
  496. 1 << CRYPTO_USE_PIPE_KEY_AUTH |
  497. 1 << CRYPTO_USE_HW_KEY_AUTH);
  498. if (sreq->last_blk)
  499. auth_cfg |= 1 << CRYPTO_LAST;
  500. if (sreq->first_blk)
  501. auth_cfg |= 1 << CRYPTO_FIRST;
  502. if (use_hw_key)
  503. auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
  504. if (use_pipe_key)
  505. auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
  506. pce->data = auth_cfg;
  507. go_proc:
  508. /* write auth seg size */
  509. pce = cmdlistinfo->auth_seg_size;
  510. pce->data = sreq->size;
  511. pce = cmdlistinfo->encr_seg_cfg;
  512. pce->data = 0;
  513. /* write auth seg size start*/
  514. pce = cmdlistinfo->auth_seg_start;
  515. pce->data = 0;
  516. /* write seg size */
  517. pce = cmdlistinfo->seg_size;
  518. /* always ensure there is input data. ZLT does not work for bam-ndp */
  519. if (sreq->size)
  520. pce->data = sreq->size;
  521. else
  522. pce->data = pce_dev->ce_bam_info.ce_burst_size;
  523. return 0;
  524. }
  525. static struct qce_cmdlist_info *_ce_get_aead_cmdlistinfo(
  526. struct qce_device *pce_dev,
  527. int req_info, struct qce_req *creq)
  528. {
  529. struct ce_sps_data *pce_sps_data;
  530. struct qce_cmdlistptr_ops *cmdlistptr;
  531. pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
  532. cmdlistptr = &pce_sps_data->cmdlistptr;
  533. switch (creq->alg) {
  534. case CIPHER_ALG_DES:
  535. switch (creq->mode) {
  536. case QCE_MODE_CBC:
  537. if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
  538. return &cmdlistptr->aead_hmac_sha1_cbc_des;
  539. else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
  540. return &cmdlistptr->aead_hmac_sha256_cbc_des;
  541. else
  542. return NULL;
  543. break;
  544. default:
  545. return NULL;
  546. }
  547. break;
  548. case CIPHER_ALG_3DES:
  549. switch (creq->mode) {
  550. case QCE_MODE_CBC:
  551. if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
  552. return &cmdlistptr->aead_hmac_sha1_cbc_3des;
  553. else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
  554. return &cmdlistptr->aead_hmac_sha256_cbc_3des;
  555. else
  556. return NULL;
  557. break;
  558. default:
  559. return NULL;
  560. }
  561. break;
  562. case CIPHER_ALG_AES:
  563. switch (creq->mode) {
  564. case QCE_MODE_CBC:
  565. if (creq->encklen == AES128_KEY_SIZE) {
  566. if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
  567. return
  568. &cmdlistptr->aead_hmac_sha1_cbc_aes_128;
  569. else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
  570. return
  571. &cmdlistptr->aead_hmac_sha256_cbc_aes_128;
  572. else
  573. return NULL;
  574. } else if (creq->encklen == AES256_KEY_SIZE) {
  575. if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
  576. return &cmdlistptr->aead_hmac_sha1_cbc_aes_256;
  577. else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
  578. return
  579. &cmdlistptr->aead_hmac_sha256_cbc_aes_256;
  580. else
  581. return NULL;
  582. } else
  583. return NULL;
  584. break;
  585. default:
  586. return NULL;
  587. }
  588. break;
  589. default:
  590. return NULL;
  591. }
  592. return NULL;
  593. }
  594. static int _ce_setup_aead(struct qce_device *pce_dev, struct qce_req *q_req,
  595. uint32_t totallen_in, uint32_t coffset,
  596. struct qce_cmdlist_info *cmdlistinfo)
  597. {
  598. int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
  599. int i;
  600. uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
  601. struct sps_command_element *pce;
  602. uint32_t a_cfg;
  603. uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
  604. uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
  605. uint32_t enck_size_in_word = 0;
  606. uint32_t enciv_in_word;
  607. uint32_t key_size;
  608. uint32_t encr_cfg = 0;
  609. uint32_t ivsize = q_req->ivsize;
  610. key_size = q_req->encklen;
  611. enck_size_in_word = key_size/sizeof(uint32_t);
  612. if (qce_crypto_config(pce_dev, q_req->offload_op))
  613. return -EINVAL;
  614. pce = cmdlistinfo->crypto_cfg;
  615. pce->data = pce_dev->reg.crypto_cfg_be;
  616. pce = cmdlistinfo->crypto_cfg_le;
  617. pce->data = pce_dev->reg.crypto_cfg_le;
  618. switch (q_req->alg) {
  619. case CIPHER_ALG_DES:
  620. enciv_in_word = 2;
  621. break;
  622. case CIPHER_ALG_3DES:
  623. enciv_in_word = 2;
  624. break;
  625. case CIPHER_ALG_AES:
  626. if ((key_size != AES128_KEY_SIZE) &&
  627. (key_size != AES256_KEY_SIZE))
  628. return -EINVAL;
  629. enciv_in_word = 4;
  630. break;
  631. default:
  632. return -EINVAL;
  633. }
  634. /* only support cbc mode */
  635. if (q_req->mode != QCE_MODE_CBC)
  636. return -EINVAL;
  637. _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
  638. pce = cmdlistinfo->encr_cntr_iv;
  639. for (i = 0; i < enciv_in_word; i++, pce++)
  640. pce->data = enciv32[i];
  641. /*
  642. * write encr key
  643. * do not use hw key or pipe key
  644. */
  645. _byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
  646. pce = cmdlistinfo->encr_key;
  647. for (i = 0; i < enck_size_in_word; i++, pce++)
  648. pce->data = enckey32[i];
  649. /* write encr seg cfg */
  650. pce = cmdlistinfo->encr_seg_cfg;
  651. encr_cfg = pce->data;
  652. if (q_req->dir == QCE_ENCRYPT)
  653. encr_cfg |= (1 << CRYPTO_ENCODE);
  654. else
  655. encr_cfg &= ~(1 << CRYPTO_ENCODE);
  656. pce->data = encr_cfg;
  657. /* we only support sha1-hmac and sha256-hmac at this point */
  658. _byte_stream_to_net_words(mackey32, q_req->authkey,
  659. q_req->authklen);
  660. pce = cmdlistinfo->auth_key;
  661. for (i = 0; i < authk_size_in_word; i++, pce++)
  662. pce->data = mackey32[i];
  663. pce = cmdlistinfo->auth_iv;
  664. if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
  665. for (i = 0; i < 5; i++, pce++)
  666. pce->data = _std_init_vector_sha1[i];
  667. else
  668. for (i = 0; i < 8; i++, pce++)
  669. pce->data = _std_init_vector_sha256[i];
  670. /* write auth_bytecnt 0/1, start with 0 */
  671. pce = cmdlistinfo->auth_bytecount;
  672. for (i = 0; i < 2; i++, pce++)
  673. pce->data = 0;
  674. pce = cmdlistinfo->auth_seg_cfg;
  675. a_cfg = pce->data;
  676. a_cfg &= ~(CRYPTO_AUTH_POS_MASK);
  677. if (q_req->dir == QCE_ENCRYPT)
  678. a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
  679. else
  680. a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  681. pce->data = a_cfg;
  682. /* write auth seg size */
  683. pce = cmdlistinfo->auth_seg_size;
  684. pce->data = totallen_in;
  685. /* write auth seg size start*/
  686. pce = cmdlistinfo->auth_seg_start;
  687. pce->data = 0;
  688. /* write seg size */
  689. pce = cmdlistinfo->seg_size;
  690. pce->data = totallen_in;
  691. /* write encr seg size */
  692. pce = cmdlistinfo->encr_seg_size;
  693. pce->data = q_req->cryptlen;
  694. /* write encr seg start */
  695. pce = cmdlistinfo->encr_seg_start;
  696. pce->data = (coffset & 0xffff);
  697. return 0;
  698. }
  699. static struct qce_cmdlist_info *_ce_get_cipher_cmdlistinfo(
  700. struct qce_device *pce_dev,
  701. int req_info, struct qce_req *creq)
  702. {
  703. struct ce_request_info *preq_info;
  704. struct ce_sps_data *pce_sps_data;
  705. struct qce_cmdlistptr_ops *cmdlistptr;
  706. preq_info = &pce_dev->ce_request_info[req_info];
  707. pce_sps_data = &preq_info->ce_sps;
  708. cmdlistptr = &pce_sps_data->cmdlistptr;
  709. if (creq->alg != CIPHER_ALG_AES) {
  710. switch (creq->alg) {
  711. case CIPHER_ALG_DES:
  712. if (creq->mode == QCE_MODE_ECB)
  713. return &cmdlistptr->cipher_des_ecb;
  714. return &cmdlistptr->cipher_des_cbc;
  715. case CIPHER_ALG_3DES:
  716. if (creq->mode == QCE_MODE_ECB)
  717. return &cmdlistptr->cipher_3des_ecb;
  718. return &cmdlistptr->cipher_3des_cbc;
  719. default:
  720. return NULL;
  721. }
  722. } else {
  723. switch (creq->mode) {
  724. case QCE_MODE_ECB:
  725. if (creq->encklen == AES128_KEY_SIZE)
  726. return &cmdlistptr->cipher_aes_128_ecb;
  727. return &cmdlistptr->cipher_aes_256_ecb;
  728. case QCE_MODE_CBC:
  729. case QCE_MODE_CTR:
  730. if (creq->encklen == AES128_KEY_SIZE)
  731. return &cmdlistptr->cipher_aes_128_cbc_ctr;
  732. return &cmdlistptr->cipher_aes_256_cbc_ctr;
  733. case QCE_MODE_XTS:
  734. if (creq->encklen/2 == AES128_KEY_SIZE)
  735. return &cmdlistptr->cipher_aes_128_xts;
  736. return &cmdlistptr->cipher_aes_256_xts;
  737. case QCE_MODE_CCM:
  738. if (creq->encklen == AES128_KEY_SIZE)
  739. return &cmdlistptr->aead_aes_128_ccm;
  740. return &cmdlistptr->aead_aes_256_ccm;
  741. default:
  742. return NULL;
  743. }
  744. }
  745. return NULL;
  746. }
  747. static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
  748. uint32_t totallen_in, uint32_t coffset,
  749. struct qce_cmdlist_info *cmdlistinfo)
  750. {
  751. uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
  752. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
  753. uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
  754. 0, 0, 0, 0};
  755. uint32_t enck_size_in_word = 0;
  756. uint32_t key_size;
  757. bool use_hw_key = false;
  758. bool use_pipe_key = false;
  759. uint32_t encr_cfg = 0;
  760. uint32_t ivsize = creq->ivsize;
  761. int i;
  762. struct sps_command_element *pce = NULL;
  763. bool is_des_cipher = false;
  764. if (creq->mode == QCE_MODE_XTS)
  765. key_size = creq->encklen/2;
  766. else
  767. key_size = creq->encklen;
  768. if (qce_crypto_config(pce_dev, creq->offload_op))
  769. return -EINVAL;
  770. pce = cmdlistinfo->crypto_cfg;
  771. pce->data = pce_dev->reg.crypto_cfg_be;
  772. pce = cmdlistinfo->crypto_cfg_le;
  773. pce->data = pce_dev->reg.crypto_cfg_le;
  774. pce = cmdlistinfo->go_proc;
  775. if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
  776. use_hw_key = true;
  777. } else {
  778. if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
  779. QCRYPTO_CTX_USE_PIPE_KEY)
  780. use_pipe_key = true;
  781. }
  782. if (use_hw_key)
  783. pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
  784. pce_dev->phy_iobase);
  785. else
  786. pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
  787. pce_dev->phy_iobase);
  788. if (!use_pipe_key && !use_hw_key) {
  789. _byte_stream_to_net_words(enckey32, creq->enckey, key_size);
  790. enck_size_in_word = key_size/sizeof(uint32_t);
  791. }
  792. if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
  793. uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
  794. uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
  795. uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
  796. uint32_t auth_cfg = 0;
  797. /* write nonce */
  798. _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
  799. pce = cmdlistinfo->auth_nonce_info;
  800. for (i = 0; i < noncelen32; i++, pce++)
  801. pce->data = nonce32[i];
  802. if (creq->authklen == AES128_KEY_SIZE)
  803. auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
  804. else {
  805. if (creq->authklen == AES256_KEY_SIZE)
  806. auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
  807. }
  808. if (creq->dir == QCE_ENCRYPT)
  809. auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  810. else
  811. auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
  812. auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
  813. if (use_hw_key) {
  814. auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
  815. } else {
  816. auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
  817. /* write auth key */
  818. pce = cmdlistinfo->auth_key;
  819. for (i = 0; i < authklen32; i++, pce++)
  820. pce->data = enckey32[i];
  821. }
  822. pce = cmdlistinfo->auth_seg_cfg;
  823. pce->data = auth_cfg;
  824. pce = cmdlistinfo->auth_seg_size;
  825. if (creq->dir == QCE_ENCRYPT)
  826. pce->data = totallen_in;
  827. else
  828. pce->data = totallen_in - creq->authsize;
  829. pce = cmdlistinfo->auth_seg_start;
  830. pce->data = 0;
  831. } else {
  832. if (creq->op != QCE_REQ_AEAD) {
  833. pce = cmdlistinfo->auth_seg_cfg;
  834. pce->data = 0;
  835. }
  836. }
  837. switch (creq->mode) {
  838. case QCE_MODE_ECB:
  839. if (key_size == AES128_KEY_SIZE)
  840. encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
  841. else
  842. encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
  843. break;
  844. case QCE_MODE_CBC:
  845. if (key_size == AES128_KEY_SIZE)
  846. encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
  847. else
  848. encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
  849. break;
  850. case QCE_MODE_XTS:
  851. if (key_size == AES128_KEY_SIZE)
  852. encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
  853. else
  854. encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
  855. break;
  856. case QCE_MODE_CCM:
  857. if (key_size == AES128_KEY_SIZE)
  858. encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
  859. else
  860. encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
  861. encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
  862. (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
  863. break;
  864. case QCE_MODE_CTR:
  865. default:
  866. if (key_size == AES128_KEY_SIZE)
  867. encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
  868. else
  869. encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
  870. break;
  871. }
  872. switch (creq->alg) {
  873. case CIPHER_ALG_DES:
  874. if (creq->mode != QCE_MODE_ECB) {
  875. if (ivsize > MAX_IV_LENGTH) {
  876. pr_err("%s: error: Invalid length parameter\n",
  877. __func__);
  878. return -EINVAL;
  879. }
  880. _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
  881. pce = cmdlistinfo->encr_cntr_iv;
  882. pce->data = enciv32[0];
  883. pce++;
  884. pce->data = enciv32[1];
  885. }
  886. if (!use_hw_key) {
  887. pce = cmdlistinfo->encr_key;
  888. pce->data = enckey32[0];
  889. pce++;
  890. pce->data = enckey32[1];
  891. }
  892. is_des_cipher = true;
  893. break;
  894. case CIPHER_ALG_3DES:
  895. if (creq->mode != QCE_MODE_ECB) {
  896. if (ivsize > MAX_IV_LENGTH) {
  897. pr_err("%s: error: Invalid length parameter\n",
  898. __func__);
  899. return -EINVAL;
  900. }
  901. _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
  902. pce = cmdlistinfo->encr_cntr_iv;
  903. pce->data = enciv32[0];
  904. pce++;
  905. pce->data = enciv32[1];
  906. }
  907. if (!use_hw_key) {
  908. /* write encr key */
  909. pce = cmdlistinfo->encr_key;
  910. for (i = 0; i < 6; i++, pce++)
  911. pce->data = enckey32[i];
  912. }
  913. is_des_cipher = true;
  914. break;
  915. case CIPHER_ALG_AES:
  916. default:
  917. if (creq->mode == QCE_MODE_XTS) {
  918. uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
  919. = {0, 0, 0, 0, 0, 0, 0, 0};
  920. uint32_t xtsklen =
  921. creq->encklen/(2 * sizeof(uint32_t));
  922. if (!use_hw_key && !use_pipe_key) {
  923. _byte_stream_to_net_words(xtskey32,
  924. (creq->enckey + creq->encklen/2),
  925. creq->encklen/2);
  926. /* write xts encr key */
  927. pce = cmdlistinfo->encr_xts_key;
  928. for (i = 0; i < xtsklen; i++, pce++)
  929. pce->data = xtskey32[i];
  930. }
  931. /* write xts du size */
  932. pce = cmdlistinfo->encr_xts_du_size;
  933. switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
  934. case QCRYPTO_CTX_XTS_DU_SIZE_512B:
  935. pce->data = min((unsigned int)QCE_SECTOR_SIZE,
  936. creq->cryptlen);
  937. break;
  938. case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
  939. pce->data =
  940. min((unsigned int)QCE_SECTOR_SIZE * 2,
  941. creq->cryptlen);
  942. break;
  943. default:
  944. pce->data = creq->cryptlen;
  945. break;
  946. }
  947. }
  948. if (creq->mode != QCE_MODE_ECB) {
  949. if (ivsize > MAX_IV_LENGTH) {
  950. pr_err("%s: error: Invalid length parameter\n",
  951. __func__);
  952. return -EINVAL;
  953. }
  954. if (creq->mode == QCE_MODE_XTS)
  955. _byte_stream_swap_to_net_words(enciv32,
  956. creq->iv, ivsize);
  957. else
  958. _byte_stream_to_net_words(enciv32, creq->iv,
  959. ivsize);
  960. /* write encr cntr iv */
  961. pce = cmdlistinfo->encr_cntr_iv;
  962. for (i = 0; i < 4; i++, pce++)
  963. pce->data = enciv32[i];
  964. if (creq->mode == QCE_MODE_CCM) {
  965. /* write cntr iv for ccm */
  966. pce = cmdlistinfo->encr_ccm_cntr_iv;
  967. for (i = 0; i < 4; i++, pce++)
  968. pce->data = enciv32[i];
  969. /* update cntr_iv[3] by one */
  970. pce = cmdlistinfo->encr_cntr_iv;
  971. pce += 3;
  972. pce->data += 1;
  973. }
  974. }
  975. if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
  976. encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
  977. CRYPTO_ENCR_KEY_SZ);
  978. } else {
  979. if (!use_hw_key) {
  980. /* write encr key */
  981. pce = cmdlistinfo->encr_key;
  982. for (i = 0; i < enck_size_in_word; i++, pce++)
  983. pce->data = enckey32[i];
  984. }
  985. } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
  986. break;
  987. } /* end of switch (creq->mode) */
  988. if (use_pipe_key)
  989. encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
  990. << CRYPTO_USE_PIPE_KEY_ENCR);
  991. /* write encr seg cfg */
  992. pce = cmdlistinfo->encr_seg_cfg;
  993. if ((creq->alg == CIPHER_ALG_DES) || (creq->alg == CIPHER_ALG_3DES)) {
  994. if (creq->dir == QCE_ENCRYPT)
  995. pce->data |= (1 << CRYPTO_ENCODE);
  996. else
  997. pce->data &= ~(1 << CRYPTO_ENCODE);
  998. encr_cfg = pce->data;
  999. } else {
  1000. encr_cfg |=
  1001. ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
  1002. }
  1003. if (use_hw_key)
  1004. encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
  1005. else
  1006. encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
  1007. pce->data = encr_cfg;
  1008. /* write encr seg size */
  1009. pce = cmdlistinfo->encr_seg_size;
  1010. if (creq->is_copy_op) {
  1011. pce->data = 0;
  1012. } else {
  1013. if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
  1014. pce->data = (creq->cryptlen + creq->authsize);
  1015. else
  1016. pce->data = creq->cryptlen;
  1017. }
  1018. /* write encr seg start */
  1019. pce = cmdlistinfo->encr_seg_start;
  1020. pce->data = (coffset & 0xffff);
  1021. /* write seg size */
  1022. pce = cmdlistinfo->seg_size;
  1023. pce->data = totallen_in;
  1024. if (!is_des_cipher) {
  1025. /* pattern info */
  1026. pce = cmdlistinfo->pattern_info;
  1027. pce->data = creq->pattern_info;
  1028. /* block offset */
  1029. pce = cmdlistinfo->block_offset;
  1030. pce->data = (creq->block_offset << 4) |
  1031. (creq->block_offset ? 1: 0);
  1032. /* IV counter size */
  1033. qce_set_iv_ctr_mask(pce_dev, creq);
  1034. pce = cmdlistinfo->encr_mask_3;
  1035. pce->data = pce_dev->reg.encr_cntr_mask_3;
  1036. pce = cmdlistinfo->encr_mask_2;
  1037. pce->data = pce_dev->reg.encr_cntr_mask_2;
  1038. pce = cmdlistinfo->encr_mask_1;
  1039. pce->data = pce_dev->reg.encr_cntr_mask_1;
  1040. pce = cmdlistinfo->encr_mask_0;
  1041. pce->data = pce_dev->reg.encr_cntr_mask_0;
  1042. }
  1043. pce = cmdlistinfo->go_proc;
  1044. pce->data = 0;
  1045. if (is_offload_op(creq->offload_op))
  1046. pce->data = ((1 << CRYPTO_GO) | (1 << CRYPTO_CLR_CNTXT));
  1047. else
  1048. pce->data = ((1 << CRYPTO_GO) | (1 << CRYPTO_CLR_CNTXT) |
  1049. (1 << CRYPTO_RESULTS_DUMP));
  1050. return 0;
  1051. }
  1052. static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req *req,
  1053. struct qce_cmdlist_info *cmdlistinfo)
  1054. {
  1055. uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
  1056. uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
  1057. uint32_t cfg;
  1058. struct sps_command_element *pce;
  1059. int i;
  1060. switch (req->algorithm) {
  1061. case QCE_OTA_ALGO_KASUMI:
  1062. cfg = pce_dev->reg.auth_cfg_kasumi;
  1063. break;
  1064. case QCE_OTA_ALGO_SNOW3G:
  1065. default:
  1066. cfg = pce_dev->reg.auth_cfg_snow3g;
  1067. break;
  1068. }
  1069. if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE))
  1070. return -EINVAL;
  1071. pce = cmdlistinfo->crypto_cfg;
  1072. pce->data = pce_dev->reg.crypto_cfg_be;
  1073. pce = cmdlistinfo->crypto_cfg_le;
  1074. pce->data = pce_dev->reg.crypto_cfg_le;
  1075. /* write key in CRYPTO_AUTH_IV0-3_REG */
  1076. _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
  1077. pce = cmdlistinfo->auth_iv;
  1078. for (i = 0; i < key_size_in_word; i++, pce++)
  1079. pce->data = ikey32[i];
  1080. /* write last bits in CRYPTO_AUTH_IV4_REG */
  1081. pce->data = req->last_bits;
  1082. /* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
  1083. pce = cmdlistinfo->auth_bytecount;
  1084. pce->data = req->fresh;
  1085. /* write count-i to CRYPTO_AUTH_BYTECNT1_REG */
  1086. pce++;
  1087. pce->data = req->count_i;
  1088. /* write auth seg cfg */
  1089. pce = cmdlistinfo->auth_seg_cfg;
  1090. if (req->direction == QCE_OTA_DIR_DOWNLINK)
  1091. cfg |= BIT(CRYPTO_F9_DIRECTION);
  1092. pce->data = cfg;
  1093. /* write auth seg size */
  1094. pce = cmdlistinfo->auth_seg_size;
  1095. pce->data = req->msize;
  1096. /* write auth seg start*/
  1097. pce = cmdlistinfo->auth_seg_start;
  1098. pce->data = 0;
  1099. /* write seg size */
  1100. pce = cmdlistinfo->seg_size;
  1101. pce->data = req->msize;
  1102. /* write go */
  1103. pce = cmdlistinfo->go_proc;
  1104. pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
  1105. return 0;
  1106. }
  1107. static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
  1108. bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset,
  1109. uint16_t cipher_size,
  1110. struct qce_cmdlist_info *cmdlistinfo)
  1111. {
  1112. uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
  1113. uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
  1114. uint32_t cfg;
  1115. struct sps_command_element *pce;
  1116. int i;
  1117. switch (req->algorithm) {
  1118. case QCE_OTA_ALGO_KASUMI:
  1119. cfg = pce_dev->reg.encr_cfg_kasumi;
  1120. break;
  1121. case QCE_OTA_ALGO_SNOW3G:
  1122. default:
  1123. cfg = pce_dev->reg.encr_cfg_snow3g;
  1124. break;
  1125. }
  1126. if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE))
  1127. return -EINVAL;
  1128. pce = cmdlistinfo->crypto_cfg;
  1129. pce->data = pce_dev->reg.crypto_cfg_be;
  1130. pce = cmdlistinfo->crypto_cfg_le;
  1131. pce->data = pce_dev->reg.crypto_cfg_le;
  1132. /* write key */
  1133. _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
  1134. pce = cmdlistinfo->encr_key;
  1135. for (i = 0; i < key_size_in_word; i++, pce++)
  1136. pce->data = ckey32[i];
  1137. /* write encr seg cfg */
  1138. pce = cmdlistinfo->encr_seg_cfg;
  1139. if (key_stream_mode)
  1140. cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
  1141. if (req->direction == QCE_OTA_DIR_DOWNLINK)
  1142. cfg |= BIT(CRYPTO_F8_DIRECTION);
  1143. pce->data = cfg;
  1144. /* write encr seg start */
  1145. pce = cmdlistinfo->encr_seg_start;
  1146. pce->data = (cipher_offset & 0xffff);
  1147. /* write encr seg size */
  1148. pce = cmdlistinfo->encr_seg_size;
  1149. pce->data = cipher_size;
  1150. /* write seg size */
  1151. pce = cmdlistinfo->seg_size;
  1152. pce->data = req->data_len;
  1153. /* write cntr0_iv0 for countC */
  1154. pce = cmdlistinfo->encr_cntr_iv;
  1155. pce->data = req->count_c;
  1156. /* write cntr1_iv1 for nPkts, and bearer */
  1157. pce++;
  1158. if (npkts == 1)
  1159. npkts = 0;
  1160. pce->data = req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
  1161. npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT;
  1162. /* write go */
  1163. pce = cmdlistinfo->go_proc;
  1164. pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
  1165. return 0;
  1166. }
  1167. static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info)
  1168. {
  1169. int i, j, ents;
  1170. struct ce_sps_data *pce_sps_data;
  1171. struct sps_iovec *iovec;
  1172. uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD;
  1173. pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
  1174. iovec = pce_sps_data->in_transfer.iovec;
  1175. pr_info("==============================================\n");
  1176. pr_info("CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n");
  1177. pr_info("==============================================\n");
  1178. for (i = 0; i < pce_sps_data->in_transfer.iovec_count; i++) {
  1179. pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
  1180. iovec->addr, iovec->size, iovec->flags);
  1181. if (iovec->flags & cmd_flags) {
  1182. struct sps_command_element *pced;
  1183. pced = (struct sps_command_element *)
  1184. (GET_VIRT_ADDR(iovec->addr));
  1185. ents = iovec->size/(sizeof(struct sps_command_element));
  1186. for (j = 0; j < ents; j++) {
  1187. pr_info(" [%d] [0x%x] 0x%x\n", j,
  1188. pced->addr, pced->data);
  1189. pced++;
  1190. }
  1191. }
  1192. iovec++;
  1193. }
  1194. pr_info("==============================================\n");
  1195. pr_info("PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n");
  1196. pr_info("==============================================\n");
  1197. iovec = pce_sps_data->out_transfer.iovec;
  1198. for (i = 0; i < pce_sps_data->out_transfer.iovec_count; i++) {
  1199. pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
  1200. iovec->addr, iovec->size, iovec->flags);
  1201. iovec++;
  1202. }
  1203. }
  1204. #ifdef QCE_DEBUG
  1205. static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
  1206. {
  1207. _qce_dump_descr_fifos(pce_dev, req_info);
  1208. }
  1209. #define QCE_WRITE_REG(val, addr) \
  1210. { \
  1211. pr_info(" [0x%pK] 0x%x\n", addr, (uint32_t)val); \
  1212. writel_relaxed(val, addr); \
  1213. }
  1214. #else
  1215. static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
  1216. {
  1217. }
  1218. #define QCE_WRITE_REG(val, addr) \
  1219. writel_relaxed(val, addr)
  1220. #endif
  1221. static int _ce_setup_hash_direct(struct qce_device *pce_dev,
  1222. struct qce_sha_req *sreq)
  1223. {
  1224. uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
  1225. uint32_t diglen;
  1226. bool use_hw_key = false;
  1227. bool use_pipe_key = false;
  1228. int i;
  1229. uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
  1230. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
  1231. uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
  1232. bool sha1 = false;
  1233. uint32_t auth_cfg = 0;
  1234. /* clear status */
  1235. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
  1236. if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE))
  1237. return -EINVAL;
  1238. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
  1239. CRYPTO_CONFIG_REG));
  1240. /*
  1241. * Ensure previous instructions (setting the CONFIG register)
  1242. * was completed before issuing starting to set other config register
  1243. * This is to ensure the configurations are done in correct endian-ness
  1244. * as set in the CONFIG registers
  1245. */
  1246. mb();
  1247. if (sreq->alg == QCE_HASH_AES_CMAC) {
  1248. /* write seg_cfg */
  1249. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
  1250. /* write seg_cfg */
  1251. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
  1252. /* write seg_cfg */
  1253. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
  1254. /* Clear auth_ivn, auth_keyn registers */
  1255. for (i = 0; i < 16; i++) {
  1256. QCE_WRITE_REG(0, (pce_dev->iobase +
  1257. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
  1258. QCE_WRITE_REG(0, (pce_dev->iobase +
  1259. (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
  1260. }
  1261. /* write auth_bytecnt 0/1/2/3, start with 0 */
  1262. for (i = 0; i < 4; i++)
  1263. QCE_WRITE_REG(0, pce_dev->iobase +
  1264. CRYPTO_AUTH_BYTECNT0_REG +
  1265. i * sizeof(uint32_t));
  1266. if (sreq->authklen == AES128_KEY_SIZE)
  1267. auth_cfg = pce_dev->reg.auth_cfg_cmac_128;
  1268. else
  1269. auth_cfg = pce_dev->reg.auth_cfg_cmac_256;
  1270. }
  1271. if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
  1272. (sreq->alg == QCE_HASH_SHA256_HMAC) ||
  1273. (sreq->alg == QCE_HASH_AES_CMAC)) {
  1274. _byte_stream_to_net_words(mackey32, sreq->authkey,
  1275. sreq->authklen);
  1276. /* no more check for null key. use flag to check*/
  1277. if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY) ==
  1278. QCRYPTO_CTX_USE_HW_KEY) {
  1279. use_hw_key = true;
  1280. } else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
  1281. QCRYPTO_CTX_USE_PIPE_KEY) {
  1282. use_pipe_key = true;
  1283. } else {
  1284. /* setup key */
  1285. for (i = 0; i < authk_size_in_word; i++)
  1286. QCE_WRITE_REG(mackey32[i], (pce_dev->iobase +
  1287. (CRYPTO_AUTH_KEY0_REG +
  1288. i*sizeof(uint32_t))));
  1289. }
  1290. }
  1291. if (sreq->alg == QCE_HASH_AES_CMAC)
  1292. goto go_proc;
  1293. /* if not the last, the size has to be on the block boundary */
  1294. if (!sreq->last_blk && (sreq->size % SHA256_BLOCK_SIZE))
  1295. return -EIO;
  1296. switch (sreq->alg) {
  1297. case QCE_HASH_SHA1:
  1298. auth_cfg = pce_dev->reg.auth_cfg_sha1;
  1299. diglen = SHA1_DIGEST_SIZE;
  1300. sha1 = true;
  1301. break;
  1302. case QCE_HASH_SHA1_HMAC:
  1303. auth_cfg = pce_dev->reg.auth_cfg_hmac_sha1;
  1304. diglen = SHA1_DIGEST_SIZE;
  1305. sha1 = true;
  1306. break;
  1307. case QCE_HASH_SHA256:
  1308. auth_cfg = pce_dev->reg.auth_cfg_sha256;
  1309. diglen = SHA256_DIGEST_SIZE;
  1310. break;
  1311. case QCE_HASH_SHA256_HMAC:
  1312. auth_cfg = pce_dev->reg.auth_cfg_hmac_sha256;
  1313. diglen = SHA256_DIGEST_SIZE;
  1314. break;
  1315. default:
  1316. return -EINVAL;
  1317. }
  1318. /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
  1319. if (sreq->first_blk) {
  1320. if (sha1) {
  1321. for (i = 0; i < 5; i++)
  1322. auth32[i] = _std_init_vector_sha1[i];
  1323. } else {
  1324. for (i = 0; i < 8; i++)
  1325. auth32[i] = _std_init_vector_sha256[i];
  1326. }
  1327. } else {
  1328. _byte_stream_to_net_words(auth32, sreq->digest, diglen);
  1329. }
  1330. /* Set auth_ivn, auth_keyn registers */
  1331. for (i = 0; i < 5; i++)
  1332. QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
  1333. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
  1334. if ((sreq->alg == QCE_HASH_SHA256) ||
  1335. (sreq->alg == QCE_HASH_SHA256_HMAC)) {
  1336. for (i = 5; i < 8; i++)
  1337. QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
  1338. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
  1339. }
  1340. /* write auth_bytecnt 0/1/2/3, start with 0 */
  1341. for (i = 0; i < 2; i++)
  1342. QCE_WRITE_REG(sreq->auth_data[i], pce_dev->iobase +
  1343. CRYPTO_AUTH_BYTECNT0_REG +
  1344. i * sizeof(uint32_t));
  1345. /* Set/reset last bit in CFG register */
  1346. if (sreq->last_blk)
  1347. auth_cfg |= 1 << CRYPTO_LAST;
  1348. else
  1349. auth_cfg &= ~(1 << CRYPTO_LAST);
  1350. if (sreq->first_blk)
  1351. auth_cfg |= 1 << CRYPTO_FIRST;
  1352. else
  1353. auth_cfg &= ~(1 << CRYPTO_FIRST);
  1354. if (use_hw_key)
  1355. auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
  1356. if (use_pipe_key)
  1357. auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
  1358. go_proc:
  1359. /* write seg_cfg */
  1360. QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
  1361. /* write auth seg_size */
  1362. QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
  1363. /* write auth_seg_start */
  1364. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
  1365. /* reset encr seg_cfg */
  1366. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
  1367. /* write seg_size */
  1368. QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
  1369. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
  1370. CRYPTO_CONFIG_REG));
  1371. /* issue go to crypto */
  1372. if (!use_hw_key) {
  1373. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  1374. (1 << CRYPTO_CLR_CNTXT)),
  1375. pce_dev->iobase + CRYPTO_GOPROC_REG);
  1376. } else {
  1377. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
  1378. pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
  1379. }
  1380. /*
  1381. * Ensure previous instructions (setting the GO register)
  1382. * was completed before issuing a DMA transfer request
  1383. */
  1384. mb();
  1385. return 0;
  1386. }
  1387. static int _ce_setup_aead_direct(struct qce_device *pce_dev,
  1388. struct qce_req *q_req, uint32_t totallen_in, uint32_t coffset)
  1389. {
  1390. int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
  1391. int i;
  1392. uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
  1393. uint32_t a_cfg;
  1394. uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
  1395. uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
  1396. uint32_t enck_size_in_word = 0;
  1397. uint32_t enciv_in_word;
  1398. uint32_t key_size;
  1399. uint32_t ivsize = q_req->ivsize;
  1400. uint32_t encr_cfg;
  1401. /* clear status */
  1402. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
  1403. if (qce_crypto_config(pce_dev, q_req->offload_op))
  1404. return -EINVAL;
  1405. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
  1406. CRYPTO_CONFIG_REG));
  1407. /*
  1408. * Ensure previous instructions (setting the CONFIG register)
  1409. * was completed before issuing starting to set other config register
  1410. * This is to ensure the configurations are done in correct endian-ness
  1411. * as set in the CONFIG registers
  1412. */
  1413. mb();
  1414. key_size = q_req->encklen;
  1415. enck_size_in_word = key_size/sizeof(uint32_t);
  1416. switch (q_req->alg) {
  1417. case CIPHER_ALG_DES:
  1418. switch (q_req->mode) {
  1419. case QCE_MODE_CBC:
  1420. encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
  1421. break;
  1422. default:
  1423. return -EINVAL;
  1424. }
  1425. enciv_in_word = 2;
  1426. break;
  1427. case CIPHER_ALG_3DES:
  1428. switch (q_req->mode) {
  1429. case QCE_MODE_CBC:
  1430. encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
  1431. break;
  1432. default:
  1433. return -EINVAL;
  1434. }
  1435. enciv_in_word = 2;
  1436. break;
  1437. case CIPHER_ALG_AES:
  1438. switch (q_req->mode) {
  1439. case QCE_MODE_CBC:
  1440. if (key_size == AES128_KEY_SIZE)
  1441. encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
  1442. else if (key_size == AES256_KEY_SIZE)
  1443. encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
  1444. else
  1445. return -EINVAL;
  1446. break;
  1447. default:
  1448. return -EINVAL;
  1449. }
  1450. enciv_in_word = 4;
  1451. break;
  1452. default:
  1453. return -EINVAL;
  1454. }
  1455. /* write CNTR0_IV0_REG */
  1456. if (q_req->mode != QCE_MODE_ECB) {
  1457. _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
  1458. for (i = 0; i < enciv_in_word; i++)
  1459. QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
  1460. (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
  1461. }
  1462. /*
  1463. * write encr key
  1464. * do not use hw key or pipe key
  1465. */
  1466. _byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
  1467. for (i = 0; i < enck_size_in_word; i++)
  1468. QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
  1469. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)));
  1470. /* write encr seg cfg */
  1471. if (q_req->dir == QCE_ENCRYPT)
  1472. encr_cfg |= (1 << CRYPTO_ENCODE);
  1473. QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
  1474. /* we only support sha1-hmac and sha256-hmac at this point */
  1475. _byte_stream_to_net_words(mackey32, q_req->authkey,
  1476. q_req->authklen);
  1477. for (i = 0; i < authk_size_in_word; i++)
  1478. QCE_WRITE_REG(mackey32[i], pce_dev->iobase +
  1479. (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)));
  1480. if (q_req->auth_alg == QCE_HASH_SHA1_HMAC) {
  1481. for (i = 0; i < 5; i++)
  1482. QCE_WRITE_REG(_std_init_vector_sha1[i],
  1483. pce_dev->iobase +
  1484. (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
  1485. } else {
  1486. for (i = 0; i < 8; i++)
  1487. QCE_WRITE_REG(_std_init_vector_sha256[i],
  1488. pce_dev->iobase +
  1489. (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
  1490. }
  1491. /* write auth_bytecnt 0/1, start with 0 */
  1492. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
  1493. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG);
  1494. /* write encr seg size */
  1495. QCE_WRITE_REG(q_req->cryptlen, pce_dev->iobase +
  1496. CRYPTO_ENCR_SEG_SIZE_REG);
  1497. /* write encr start */
  1498. QCE_WRITE_REG(coffset & 0xffff, pce_dev->iobase +
  1499. CRYPTO_ENCR_SEG_START_REG);
  1500. if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
  1501. a_cfg = pce_dev->reg.auth_cfg_aead_sha1_hmac;
  1502. else
  1503. a_cfg = pce_dev->reg.auth_cfg_aead_sha256_hmac;
  1504. if (q_req->dir == QCE_ENCRYPT)
  1505. a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
  1506. else
  1507. a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  1508. /* write auth seg_cfg */
  1509. QCE_WRITE_REG(a_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
  1510. /* write auth seg_size */
  1511. QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
  1512. /* write auth_seg_start */
  1513. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
  1514. /* write seg_size */
  1515. QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
  1516. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
  1517. CRYPTO_CONFIG_REG));
  1518. /* issue go to crypto */
  1519. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  1520. (1 << CRYPTO_CLR_CNTXT)),
  1521. pce_dev->iobase + CRYPTO_GOPROC_REG);
  1522. /*
  1523. * Ensure previous instructions (setting the GO register)
  1524. * was completed before issuing a DMA transfer request
  1525. */
  1526. mb();
  1527. return 0;
  1528. }
  1529. static int _ce_setup_cipher_direct(struct qce_device *pce_dev,
  1530. struct qce_req *creq, uint32_t totallen_in, uint32_t coffset)
  1531. {
  1532. uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
  1533. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
  1534. uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
  1535. 0, 0, 0, 0};
  1536. uint32_t enck_size_in_word = 0;
  1537. uint32_t key_size;
  1538. bool use_hw_key = false;
  1539. bool use_pipe_key = false;
  1540. uint32_t encr_cfg = 0;
  1541. uint32_t ivsize = creq->ivsize;
  1542. int i;
  1543. /* clear status */
  1544. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
  1545. if (qce_crypto_config(pce_dev, creq->offload_op))
  1546. return -EINVAL;
  1547. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be,
  1548. (pce_dev->iobase + CRYPTO_CONFIG_REG));
  1549. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le,
  1550. (pce_dev->iobase + CRYPTO_CONFIG_REG));
  1551. /*
  1552. * Ensure previous instructions (setting the CONFIG register)
  1553. * was completed before issuing starting to set other config register
  1554. * This is to ensure the configurations are done in correct endian-ness
  1555. * as set in the CONFIG registers
  1556. */
  1557. mb();
  1558. if (creq->mode == QCE_MODE_XTS)
  1559. key_size = creq->encklen/2;
  1560. else
  1561. key_size = creq->encklen;
  1562. if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
  1563. use_hw_key = true;
  1564. } else {
  1565. if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
  1566. QCRYPTO_CTX_USE_PIPE_KEY)
  1567. use_pipe_key = true;
  1568. }
  1569. if (!use_pipe_key && !use_hw_key) {
  1570. _byte_stream_to_net_words(enckey32, creq->enckey, key_size);
  1571. enck_size_in_word = key_size/sizeof(uint32_t);
  1572. }
  1573. if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
  1574. uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
  1575. uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
  1576. uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
  1577. uint32_t auth_cfg = 0;
  1578. /* Clear auth_ivn, auth_keyn registers */
  1579. for (i = 0; i < 16; i++) {
  1580. QCE_WRITE_REG(0, (pce_dev->iobase +
  1581. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
  1582. QCE_WRITE_REG(0, (pce_dev->iobase +
  1583. (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
  1584. }
  1585. /* write auth_bytecnt 0/1/2/3, start with 0 */
  1586. for (i = 0; i < 4; i++)
  1587. QCE_WRITE_REG(0, pce_dev->iobase +
  1588. CRYPTO_AUTH_BYTECNT0_REG +
  1589. i * sizeof(uint32_t));
  1590. /* write nonce */
  1591. _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
  1592. for (i = 0; i < noncelen32; i++)
  1593. QCE_WRITE_REG(nonce32[i], pce_dev->iobase +
  1594. CRYPTO_AUTH_INFO_NONCE0_REG +
  1595. (i*sizeof(uint32_t)));
  1596. if (creq->authklen == AES128_KEY_SIZE)
  1597. auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
  1598. else {
  1599. if (creq->authklen == AES256_KEY_SIZE)
  1600. auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
  1601. }
  1602. if (creq->dir == QCE_ENCRYPT)
  1603. auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  1604. else
  1605. auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
  1606. auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
  1607. if (use_hw_key) {
  1608. auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
  1609. } else {
  1610. auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
  1611. /* write auth key */
  1612. for (i = 0; i < authklen32; i++)
  1613. QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
  1614. CRYPTO_AUTH_KEY0_REG + (i*sizeof(uint32_t)));
  1615. }
  1616. QCE_WRITE_REG(auth_cfg, pce_dev->iobase +
  1617. CRYPTO_AUTH_SEG_CFG_REG);
  1618. if (creq->dir == QCE_ENCRYPT) {
  1619. QCE_WRITE_REG(totallen_in, pce_dev->iobase +
  1620. CRYPTO_AUTH_SEG_SIZE_REG);
  1621. } else {
  1622. QCE_WRITE_REG((totallen_in - creq->authsize),
  1623. pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
  1624. }
  1625. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
  1626. } else {
  1627. if (creq->op != QCE_REQ_AEAD)
  1628. QCE_WRITE_REG(0, pce_dev->iobase +
  1629. CRYPTO_AUTH_SEG_CFG_REG);
  1630. }
  1631. /*
  1632. * Ensure previous instructions (write to all AUTH registers)
  1633. * was completed before accessing a register that is not in
  1634. * in the same 1K range.
  1635. */
  1636. mb();
  1637. switch (creq->mode) {
  1638. case QCE_MODE_ECB:
  1639. if (key_size == AES128_KEY_SIZE)
  1640. encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
  1641. else
  1642. encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
  1643. break;
  1644. case QCE_MODE_CBC:
  1645. if (key_size == AES128_KEY_SIZE)
  1646. encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
  1647. else
  1648. encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
  1649. break;
  1650. case QCE_MODE_XTS:
  1651. if (key_size == AES128_KEY_SIZE)
  1652. encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
  1653. else
  1654. encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
  1655. break;
  1656. case QCE_MODE_CCM:
  1657. if (key_size == AES128_KEY_SIZE)
  1658. encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
  1659. else
  1660. encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
  1661. break;
  1662. case QCE_MODE_CTR:
  1663. default:
  1664. if (key_size == AES128_KEY_SIZE)
  1665. encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
  1666. else
  1667. encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
  1668. break;
  1669. }
  1670. switch (creq->alg) {
  1671. case CIPHER_ALG_DES:
  1672. if (creq->mode != QCE_MODE_ECB) {
  1673. encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
  1674. _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
  1675. QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
  1676. CRYPTO_CNTR0_IV0_REG);
  1677. QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
  1678. CRYPTO_CNTR1_IV1_REG);
  1679. } else {
  1680. encr_cfg = pce_dev->reg.encr_cfg_des_ecb;
  1681. }
  1682. if (!use_hw_key) {
  1683. QCE_WRITE_REG(enckey32[0], pce_dev->iobase +
  1684. CRYPTO_ENCR_KEY0_REG);
  1685. QCE_WRITE_REG(enckey32[1], pce_dev->iobase +
  1686. CRYPTO_ENCR_KEY1_REG);
  1687. }
  1688. break;
  1689. case CIPHER_ALG_3DES:
  1690. if (creq->mode != QCE_MODE_ECB) {
  1691. _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
  1692. QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
  1693. CRYPTO_CNTR0_IV0_REG);
  1694. QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
  1695. CRYPTO_CNTR1_IV1_REG);
  1696. encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
  1697. } else {
  1698. encr_cfg = pce_dev->reg.encr_cfg_3des_ecb;
  1699. }
  1700. if (!use_hw_key) {
  1701. /* write encr key */
  1702. for (i = 0; i < 6; i++)
  1703. QCE_WRITE_REG(enckey32[0], (pce_dev->iobase +
  1704. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t))));
  1705. }
  1706. break;
  1707. case CIPHER_ALG_AES:
  1708. default:
  1709. if (creq->mode == QCE_MODE_XTS) {
  1710. uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
  1711. = {0, 0, 0, 0, 0, 0, 0, 0};
  1712. uint32_t xtsklen =
  1713. creq->encklen/(2 * sizeof(uint32_t));
  1714. if (!use_hw_key && !use_pipe_key) {
  1715. _byte_stream_to_net_words(xtskey32,
  1716. (creq->enckey + creq->encklen/2),
  1717. creq->encklen/2);
  1718. /* write xts encr key */
  1719. for (i = 0; i < xtsklen; i++)
  1720. QCE_WRITE_REG(xtskey32[i],
  1721. pce_dev->iobase +
  1722. CRYPTO_ENCR_XTS_KEY0_REG +
  1723. (i * sizeof(uint32_t)));
  1724. }
  1725. /* write xts du size */
  1726. switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
  1727. case QCRYPTO_CTX_XTS_DU_SIZE_512B:
  1728. QCE_WRITE_REG(
  1729. min((uint32_t)QCE_SECTOR_SIZE,
  1730. creq->cryptlen), pce_dev->iobase +
  1731. CRYPTO_ENCR_XTS_DU_SIZE_REG);
  1732. break;
  1733. case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
  1734. QCE_WRITE_REG(
  1735. min((uint32_t)(QCE_SECTOR_SIZE * 2),
  1736. creq->cryptlen), pce_dev->iobase +
  1737. CRYPTO_ENCR_XTS_DU_SIZE_REG);
  1738. break;
  1739. default:
  1740. QCE_WRITE_REG(creq->cryptlen,
  1741. pce_dev->iobase +
  1742. CRYPTO_ENCR_XTS_DU_SIZE_REG);
  1743. break;
  1744. }
  1745. }
  1746. if (creq->mode != QCE_MODE_ECB) {
  1747. if (creq->mode == QCE_MODE_XTS)
  1748. _byte_stream_swap_to_net_words(enciv32,
  1749. creq->iv, ivsize);
  1750. else
  1751. _byte_stream_to_net_words(enciv32, creq->iv,
  1752. ivsize);
  1753. /* write encr cntr iv */
  1754. for (i = 0; i <= 3; i++)
  1755. QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
  1756. CRYPTO_CNTR0_IV0_REG +
  1757. (i * sizeof(uint32_t)));
  1758. if (creq->mode == QCE_MODE_CCM) {
  1759. /* write cntr iv for ccm */
  1760. for (i = 0; i <= 3; i++)
  1761. QCE_WRITE_REG(enciv32[i],
  1762. pce_dev->iobase +
  1763. CRYPTO_ENCR_CCM_INT_CNTR0_REG +
  1764. (i * sizeof(uint32_t)));
  1765. /* update cntr_iv[3] by one */
  1766. QCE_WRITE_REG((enciv32[3] + 1),
  1767. pce_dev->iobase +
  1768. CRYPTO_CNTR0_IV0_REG +
  1769. (3 * sizeof(uint32_t)));
  1770. }
  1771. }
  1772. if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
  1773. encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
  1774. CRYPTO_ENCR_KEY_SZ);
  1775. } else {
  1776. if (!use_hw_key && !use_pipe_key) {
  1777. for (i = 0; i < enck_size_in_word; i++)
  1778. QCE_WRITE_REG(enckey32[i],
  1779. pce_dev->iobase +
  1780. CRYPTO_ENCR_KEY0_REG +
  1781. (i * sizeof(uint32_t)));
  1782. }
  1783. } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
  1784. break;
  1785. } /* end of switch (creq->mode) */
  1786. if (use_pipe_key)
  1787. encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
  1788. << CRYPTO_USE_PIPE_KEY_ENCR);
  1789. /* write encr seg cfg */
  1790. encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
  1791. if (use_hw_key)
  1792. encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
  1793. else
  1794. encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
  1795. /* write encr seg cfg */
  1796. QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
  1797. /* write encr seg size */
  1798. if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT)) {
  1799. QCE_WRITE_REG((creq->cryptlen + creq->authsize),
  1800. pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
  1801. } else {
  1802. QCE_WRITE_REG(creq->cryptlen,
  1803. pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
  1804. }
  1805. /* write pattern */
  1806. if (creq->is_pattern_valid)
  1807. QCE_WRITE_REG(creq->pattern_info, pce_dev->iobase +
  1808. CRYPTO_DATA_PATT_PROC_CFG_REG);
  1809. /* write block offset to CRYPTO_DATA_PARTIAL_BLOCK_PROC_CFG? */
  1810. QCE_WRITE_REG(((creq->block_offset << 4) |
  1811. (creq->block_offset ? 1 : 0)),
  1812. pce_dev->iobase + CRYPTO_DATA_PARTIAL_BLOCK_PROC_CFG_REG);
  1813. /* write encr seg start */
  1814. QCE_WRITE_REG((coffset & 0xffff),
  1815. pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG);
  1816. /* write encr counter mask */
  1817. qce_set_iv_ctr_mask(pce_dev, creq);
  1818. QCE_WRITE_REG(pce_dev->reg.encr_cntr_mask_3,
  1819. pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
  1820. QCE_WRITE_REG(pce_dev->reg.encr_cntr_mask_2,
  1821. pce_dev->iobase + CRYPTO_CNTR_MASK_REG2);
  1822. QCE_WRITE_REG(pce_dev->reg.encr_cntr_mask_1,
  1823. pce_dev->iobase + CRYPTO_CNTR_MASK_REG1);
  1824. QCE_WRITE_REG(pce_dev->reg.encr_cntr_mask_0,
  1825. pce_dev->iobase + CRYPTO_CNTR_MASK_REG0);
  1826. /* write seg size */
  1827. QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
  1828. /* issue go to crypto */
  1829. if (!use_hw_key) {
  1830. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  1831. (1 << CRYPTO_CLR_CNTXT)),
  1832. pce_dev->iobase + CRYPTO_GOPROC_REG);
  1833. } else {
  1834. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
  1835. pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
  1836. }
  1837. /*
  1838. * Ensure previous instructions (setting the GO register)
  1839. * was completed before issuing a DMA transfer request
  1840. */
  1841. mb();
  1842. return 0;
  1843. }
  1844. static int _ce_f9_setup_direct(struct qce_device *pce_dev,
  1845. struct qce_f9_req *req)
  1846. {
  1847. uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
  1848. uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
  1849. uint32_t auth_cfg;
  1850. int i;
  1851. switch (req->algorithm) {
  1852. case QCE_OTA_ALGO_KASUMI:
  1853. auth_cfg = pce_dev->reg.auth_cfg_kasumi;
  1854. break;
  1855. case QCE_OTA_ALGO_SNOW3G:
  1856. default:
  1857. auth_cfg = pce_dev->reg.auth_cfg_snow3g;
  1858. break;
  1859. }
  1860. if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE))
  1861. return -EINVAL;
  1862. /* clear status */
  1863. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
  1864. /* set big endian configuration */
  1865. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
  1866. CRYPTO_CONFIG_REG));
  1867. /*
  1868. * Ensure previous instructions (setting the CONFIG register)
  1869. * was completed before issuing starting to set other config register
  1870. * This is to ensure the configurations are done in correct endian-ness
  1871. * as set in the CONFIG registers
  1872. */
  1873. mb();
  1874. /* write enc_seg_cfg */
  1875. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
  1876. /* write ecn_seg_size */
  1877. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
  1878. /* write key in CRYPTO_AUTH_IV0-3_REG */
  1879. _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
  1880. for (i = 0; i < key_size_in_word; i++)
  1881. QCE_WRITE_REG(ikey32[i], (pce_dev->iobase +
  1882. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
  1883. /* write last bits in CRYPTO_AUTH_IV4_REG */
  1884. QCE_WRITE_REG(req->last_bits, (pce_dev->iobase +
  1885. CRYPTO_AUTH_IV4_REG));
  1886. /* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
  1887. QCE_WRITE_REG(req->fresh, (pce_dev->iobase +
  1888. CRYPTO_AUTH_BYTECNT0_REG));
  1889. /* write count-i to CRYPTO_AUTH_BYTECNT1_REG */
  1890. QCE_WRITE_REG(req->count_i, (pce_dev->iobase +
  1891. CRYPTO_AUTH_BYTECNT1_REG));
  1892. /* write auth seg cfg */
  1893. if (req->direction == QCE_OTA_DIR_DOWNLINK)
  1894. auth_cfg |= BIT(CRYPTO_F9_DIRECTION);
  1895. QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
  1896. /* write auth seg size */
  1897. QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
  1898. /* write auth seg start*/
  1899. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
  1900. /* write seg size */
  1901. QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
  1902. /* set little endian configuration before go*/
  1903. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
  1904. CRYPTO_CONFIG_REG));
  1905. /* write go */
  1906. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  1907. (1 << CRYPTO_CLR_CNTXT)),
  1908. pce_dev->iobase + CRYPTO_GOPROC_REG);
  1909. /*
  1910. * Ensure previous instructions (setting the GO register)
  1911. * was completed before issuing a DMA transfer request
  1912. */
  1913. mb();
  1914. return 0;
  1915. }
  1916. static int _ce_f8_setup_direct(struct qce_device *pce_dev,
  1917. struct qce_f8_req *req, bool key_stream_mode,
  1918. uint16_t npkts, uint16_t cipher_offset, uint16_t cipher_size)
  1919. {
  1920. int i = 0;
  1921. uint32_t encr_cfg = 0;
  1922. uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
  1923. uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
  1924. switch (req->algorithm) {
  1925. case QCE_OTA_ALGO_KASUMI:
  1926. encr_cfg = pce_dev->reg.encr_cfg_kasumi;
  1927. break;
  1928. case QCE_OTA_ALGO_SNOW3G:
  1929. default:
  1930. encr_cfg = pce_dev->reg.encr_cfg_snow3g;
  1931. break;
  1932. }
  1933. /* clear status */
  1934. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
  1935. /* set big endian configuration */
  1936. if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE))
  1937. return -EINVAL;
  1938. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
  1939. CRYPTO_CONFIG_REG));
  1940. /* write auth seg configuration */
  1941. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
  1942. /* write auth seg size */
  1943. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
  1944. /* write key */
  1945. _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
  1946. for (i = 0; i < key_size_in_word; i++)
  1947. QCE_WRITE_REG(ckey32[i], (pce_dev->iobase +
  1948. (CRYPTO_ENCR_KEY0_REG + i*sizeof(uint32_t))));
  1949. /* write encr seg cfg */
  1950. if (key_stream_mode)
  1951. encr_cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
  1952. if (req->direction == QCE_OTA_DIR_DOWNLINK)
  1953. encr_cfg |= BIT(CRYPTO_F8_DIRECTION);
  1954. QCE_WRITE_REG(encr_cfg, pce_dev->iobase +
  1955. CRYPTO_ENCR_SEG_CFG_REG);
  1956. /* write encr seg start */
  1957. QCE_WRITE_REG((cipher_offset & 0xffff), pce_dev->iobase +
  1958. CRYPTO_ENCR_SEG_START_REG);
  1959. /* write encr seg size */
  1960. QCE_WRITE_REG(cipher_size, pce_dev->iobase +
  1961. CRYPTO_ENCR_SEG_SIZE_REG);
  1962. /* write seg size */
  1963. QCE_WRITE_REG(req->data_len, pce_dev->iobase +
  1964. CRYPTO_SEG_SIZE_REG);
  1965. /* write cntr0_iv0 for countC */
  1966. QCE_WRITE_REG(req->count_c, pce_dev->iobase +
  1967. CRYPTO_CNTR0_IV0_REG);
  1968. /* write cntr1_iv1 for nPkts, and bearer */
  1969. if (npkts == 1)
  1970. npkts = 0;
  1971. QCE_WRITE_REG(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
  1972. npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT,
  1973. pce_dev->iobase + CRYPTO_CNTR1_IV1_REG);
  1974. /* set little endian configuration before go*/
  1975. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
  1976. CRYPTO_CONFIG_REG));
  1977. /* write go */
  1978. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  1979. (1 << CRYPTO_CLR_CNTXT)),
  1980. pce_dev->iobase + CRYPTO_GOPROC_REG);
  1981. /*
  1982. * Ensure previous instructions (setting the GO register)
  1983. * was completed before issuing a DMA transfer request
  1984. */
  1985. mb();
  1986. return 0;
  1987. }
  1988. static int _qce_unlock_other_pipes(struct qce_device *pce_dev, int req_info)
  1989. {
  1990. int rc = 0;
  1991. struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info
  1992. [req_info].ce_sps;
  1993. uint16_t op = pce_dev->ce_request_info[req_info].offload_op;
  1994. if (pce_dev->no_get_around || !pce_dev->support_cmd_dscr)
  1995. return rc;
  1996. rc = sps_transfer_one(pce_dev->ce_bam_info.consumer[op].pipe,
  1997. GET_PHYS_ADDR(
  1998. pce_sps_data->cmdlistptr.unlock_all_pipes.cmdlist),
  1999. 0, NULL, (SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_UNLOCK));
  2000. if (rc) {
  2001. pr_err("sps_xfr_one() fail rc=%d\n", rc);
  2002. rc = -EINVAL;
  2003. }
  2004. return rc;
  2005. }
  2006. static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
  2007. bool is_complete);
  2008. static int qce_sps_pipe_reset(struct qce_device *pce_dev, int op)
  2009. {
  2010. int rc = -1;
  2011. struct sps_pipe *sps_pipe_info = NULL;
  2012. struct sps_connect *sps_connect_info = NULL;
  2013. /* Reset both the pipe sets in the pipe group */
  2014. sps_pipe_reset(pce_dev->ce_bam_info.bam_handle,
  2015. pce_dev->ce_bam_info.dest_pipe_index[op]);
  2016. sps_pipe_reset(pce_dev->ce_bam_info.bam_handle,
  2017. pce_dev->ce_bam_info.src_pipe_index[op]);
  2018. /* Reconnect to consumer pipe */
  2019. sps_pipe_info = pce_dev->ce_bam_info.consumer[op].pipe;
  2020. sps_connect_info = &pce_dev->ce_bam_info.consumer[op].connect;
  2021. rc = sps_disconnect(sps_pipe_info);
  2022. if (rc) {
  2023. pr_err("sps_disconnect() fail pipe=0x%lx, rc = %d\n",
  2024. (uintptr_t)sps_pipe_info, rc);
  2025. goto exit;
  2026. }
  2027. memset(sps_connect_info->desc.base, 0x00,
  2028. sps_connect_info->desc.size);
  2029. rc = sps_connect(sps_pipe_info, sps_connect_info);
  2030. if (rc) {
  2031. pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n",
  2032. (uintptr_t)sps_pipe_info, rc);
  2033. goto exit;
  2034. }
  2035. /* Reconnect to producer pipe */
  2036. sps_pipe_info = pce_dev->ce_bam_info.producer[op].pipe;
  2037. sps_connect_info = &pce_dev->ce_bam_info.producer[op].connect;
  2038. rc = sps_disconnect(sps_pipe_info);
  2039. if (rc) {
  2040. pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n",
  2041. (uintptr_t)sps_pipe_info, rc);
  2042. goto exit;
  2043. }
  2044. memset(sps_connect_info->desc.base, 0x00,
  2045. sps_connect_info->desc.size);
  2046. rc = sps_connect(sps_pipe_info, sps_connect_info);
  2047. if (rc) {
  2048. pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n",
  2049. (uintptr_t)sps_pipe_info, rc);
  2050. goto exit;
  2051. }
  2052. /* Register producer callback */
  2053. rc = sps_register_event(sps_pipe_info,
  2054. &pce_dev->ce_bam_info.producer[op].event);
  2055. if (rc)
  2056. pr_err("Producer cb registration failed rc = %d\n",
  2057. rc);
  2058. exit:
  2059. return rc;
  2060. }
  2061. int qce_manage_timeout(void *handle, int req_info)
  2062. {
  2063. struct qce_device *pce_dev = (struct qce_device *) handle;
  2064. struct skcipher_request *areq;
  2065. struct ce_request_info *preq_info;
  2066. qce_comp_func_ptr_t qce_callback;
  2067. uint16_t op = pce_dev->ce_request_info[req_info].offload_op;
  2068. preq_info = &pce_dev->ce_request_info[req_info];
  2069. qce_callback = preq_info->qce_cb;
  2070. areq = (struct skcipher_request *) preq_info->areq;
  2071. pr_info("%s: req info = %d, offload op = %d\n", __func__, req_info, op);
  2072. if (qce_sps_pipe_reset(pce_dev, op))
  2073. pr_err("%s: pipe reset failed\n", __func__);
  2074. qce_enable_clock_gating(pce_dev);
  2075. if (_qce_unlock_other_pipes(pce_dev, req_info))
  2076. pr_err("%s: fail unlock other pipes\n", __func__);
  2077. if (!atomic_read(&preq_info->in_use)) {
  2078. pr_err("request information %d already done\n", req_info);
  2079. return -ENXIO;
  2080. }
  2081. qce_free_req_info(pce_dev, req_info, true);
  2082. return 0;
  2083. }
  2084. EXPORT_SYMBOL(qce_manage_timeout);
  2085. static int _aead_complete(struct qce_device *pce_dev, int req_info)
  2086. {
  2087. struct aead_request *areq;
  2088. unsigned char mac[SHA256_DIGEST_SIZE];
  2089. uint32_t ccm_fail_status = 0;
  2090. uint32_t result_dump_status = 0;
  2091. int32_t result_status = 0;
  2092. struct ce_request_info *preq_info;
  2093. struct ce_sps_data *pce_sps_data;
  2094. qce_comp_func_ptr_t qce_callback;
  2095. preq_info = &pce_dev->ce_request_info[req_info];
  2096. pce_sps_data = &preq_info->ce_sps;
  2097. qce_callback = preq_info->qce_cb;
  2098. areq = (struct aead_request *) preq_info->areq;
  2099. if (areq->src != areq->dst) {
  2100. qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
  2101. DMA_FROM_DEVICE);
  2102. }
  2103. qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
  2104. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  2105. DMA_TO_DEVICE);
  2106. if (preq_info->asg)
  2107. qce_dma_unmap_sg(pce_dev->pdev, preq_info->asg,
  2108. preq_info->assoc_nents, DMA_TO_DEVICE);
  2109. /* check MAC */
  2110. memcpy(mac, (char *)(&pce_sps_data->result->auth_iv[0]),
  2111. SHA256_DIGEST_SIZE);
  2112. /* read status before unlock */
  2113. if (preq_info->dir == QCE_DECRYPT) {
  2114. if (pce_dev->no_get_around)
  2115. if (pce_dev->no_ccm_mac_status_get_around)
  2116. ccm_fail_status =
  2117. be32_to_cpu(pce_sps_data->result->status);
  2118. else
  2119. ccm_fail_status =
  2120. be32_to_cpu(pce_sps_data->result_null->status);
  2121. else
  2122. ccm_fail_status = readl_relaxed(pce_dev->iobase +
  2123. CRYPTO_STATUS_REG);
  2124. }
  2125. if (_qce_unlock_other_pipes(pce_dev, req_info)) {
  2126. qce_free_req_info(pce_dev, req_info, true);
  2127. qce_callback(areq, mac, NULL, -ENXIO);
  2128. return -ENXIO;
  2129. }
  2130. result_dump_status = be32_to_cpu(pce_sps_data->result->status);
  2131. pce_sps_data->result->status = 0;
  2132. if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
  2133. | (1 << CRYPTO_HSD_ERR))) {
  2134. pr_err("aead operation error. Status %x\n", result_dump_status);
  2135. result_status = -ENXIO;
  2136. } else if (pce_sps_data->consumer_status |
  2137. pce_sps_data->producer_status) {
  2138. pr_err("aead sps operation error. sps status %x %x\n",
  2139. pce_sps_data->consumer_status,
  2140. pce_sps_data->producer_status);
  2141. result_status = -ENXIO;
  2142. }
  2143. if (!atomic_read(&preq_info->in_use)) {
  2144. pr_err("request information %d already done\n", req_info);
  2145. return -ENXIO;
  2146. }
  2147. if (preq_info->mode == QCE_MODE_CCM) {
  2148. /*
  2149. * Not from result dump, instead, use the status we just
  2150. * read of device for MAC_FAILED.
  2151. */
  2152. if (result_status == 0 && (preq_info->dir == QCE_DECRYPT) &&
  2153. (ccm_fail_status & (1 << CRYPTO_MAC_FAILED)))
  2154. result_status = -EBADMSG;
  2155. qce_free_req_info(pce_dev, req_info, true);
  2156. qce_callback(areq, mac, NULL, result_status);
  2157. } else {
  2158. uint32_t ivsize = 0;
  2159. struct crypto_aead *aead;
  2160. unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
  2161. aead = crypto_aead_reqtfm(areq);
  2162. ivsize = crypto_aead_ivsize(aead);
  2163. memcpy(iv, (char *)(pce_sps_data->result->encr_cntr_iv),
  2164. sizeof(iv));
  2165. qce_free_req_info(pce_dev, req_info, true);
  2166. qce_callback(areq, mac, iv, result_status);
  2167. }
  2168. return 0;
  2169. }
  2170. static int _sha_complete(struct qce_device *pce_dev, int req_info)
  2171. {
  2172. struct ahash_request *areq;
  2173. unsigned char digest[SHA256_DIGEST_SIZE];
  2174. uint32_t bytecount32[2];
  2175. int32_t result_status = 0;
  2176. uint32_t result_dump_status;
  2177. struct ce_request_info *preq_info;
  2178. struct ce_sps_data *pce_sps_data;
  2179. qce_comp_func_ptr_t qce_callback;
  2180. preq_info = &pce_dev->ce_request_info[req_info];
  2181. pce_sps_data = &preq_info->ce_sps;
  2182. qce_callback = preq_info->qce_cb;
  2183. areq = (struct ahash_request *) preq_info->areq;
  2184. if (!areq) {
  2185. pr_err("sha operation error. areq is NULL\n");
  2186. return -ENXIO;
  2187. }
  2188. qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
  2189. DMA_TO_DEVICE);
  2190. memcpy(digest, (char *)(&pce_sps_data->result->auth_iv[0]),
  2191. SHA256_DIGEST_SIZE);
  2192. _byte_stream_to_net_words(bytecount32,
  2193. (unsigned char *)pce_sps_data->result->auth_byte_count,
  2194. 2 * CRYPTO_REG_SIZE);
  2195. if (_qce_unlock_other_pipes(pce_dev, req_info)) {
  2196. qce_free_req_info(pce_dev, req_info, true);
  2197. qce_callback(areq, digest, (char *)bytecount32,
  2198. -ENXIO);
  2199. return -ENXIO;
  2200. }
  2201. result_dump_status = be32_to_cpu(pce_sps_data->result->status);
  2202. pce_sps_data->result->status = 0;
  2203. if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
  2204. | (1 << CRYPTO_HSD_ERR))) {
  2205. pr_err("sha operation error. Status %x\n", result_dump_status);
  2206. result_status = -ENXIO;
  2207. } else if (pce_sps_data->consumer_status) {
  2208. pr_err("sha sps operation error. sps status %x\n",
  2209. pce_sps_data->consumer_status);
  2210. result_status = -ENXIO;
  2211. }
  2212. if (!atomic_read(&preq_info->in_use)) {
  2213. pr_err("request information %d already done\n", req_info);
  2214. return -ENXIO;
  2215. }
  2216. qce_free_req_info(pce_dev, req_info, true);
  2217. qce_callback(areq, digest, (char *)bytecount32, result_status);
  2218. return 0;
  2219. }
  2220. static int _f9_complete(struct qce_device *pce_dev, int req_info)
  2221. {
  2222. uint32_t mac_i;
  2223. int32_t result_status = 0;
  2224. uint32_t result_dump_status;
  2225. struct ce_request_info *preq_info;
  2226. struct ce_sps_data *pce_sps_data;
  2227. qce_comp_func_ptr_t qce_callback;
  2228. void *areq;
  2229. preq_info = &pce_dev->ce_request_info[req_info];
  2230. pce_sps_data = &preq_info->ce_sps;
  2231. qce_callback = preq_info->qce_cb;
  2232. areq = preq_info->areq;
  2233. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
  2234. preq_info->ota_size, DMA_TO_DEVICE);
  2235. _byte_stream_to_net_words(&mac_i,
  2236. (char *)(&pce_sps_data->result->auth_iv[0]),
  2237. CRYPTO_REG_SIZE);
  2238. if (_qce_unlock_other_pipes(pce_dev, req_info)) {
  2239. qce_free_req_info(pce_dev, req_info, true);
  2240. qce_callback(areq, NULL, NULL, -ENXIO);
  2241. return -ENXIO;
  2242. }
  2243. result_dump_status = be32_to_cpu(pce_sps_data->result->status);
  2244. pce_sps_data->result->status = 0;
  2245. if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
  2246. | (1 << CRYPTO_HSD_ERR))) {
  2247. pr_err("f9 operation error. Status %x\n", result_dump_status);
  2248. result_status = -ENXIO;
  2249. } else if (pce_sps_data->consumer_status |
  2250. pce_sps_data->producer_status) {
  2251. pr_err("f9 sps operation error. sps status %x %x\n",
  2252. pce_sps_data->consumer_status,
  2253. pce_sps_data->producer_status);
  2254. result_status = -ENXIO;
  2255. }
  2256. qce_free_req_info(pce_dev, req_info, true);
  2257. qce_callback(areq, (char *)&mac_i, NULL, result_status);
  2258. return 0;
  2259. }
  2260. static int _ablk_cipher_complete(struct qce_device *pce_dev, int req_info)
  2261. {
  2262. struct skcipher_request *areq;
  2263. unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
  2264. int32_t result_status = 0;
  2265. uint32_t result_dump_status;
  2266. struct ce_request_info *preq_info;
  2267. struct ce_sps_data *pce_sps_data;
  2268. qce_comp_func_ptr_t qce_callback;
  2269. preq_info = &pce_dev->ce_request_info[req_info];
  2270. pce_sps_data = &preq_info->ce_sps;
  2271. qce_callback = preq_info->qce_cb;
  2272. areq = (struct skcipher_request *) preq_info->areq;
  2273. if (!is_offload_op(preq_info->offload_op)) {
  2274. if (areq->src != areq->dst)
  2275. qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
  2276. preq_info->dst_nents, DMA_FROM_DEVICE);
  2277. qce_dma_unmap_sg(pce_dev->pdev, areq->src,
  2278. preq_info->src_nents,
  2279. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  2280. DMA_TO_DEVICE);
  2281. }
  2282. if (_qce_unlock_other_pipes(pce_dev, req_info)) {
  2283. qce_free_req_info(pce_dev, req_info, true);
  2284. qce_callback(areq, NULL, NULL, -ENXIO);
  2285. return -ENXIO;
  2286. }
  2287. result_dump_status = be32_to_cpu(pce_sps_data->result->status);
  2288. pce_sps_data->result->status = 0;
  2289. if (!is_offload_op(preq_info->offload_op)) {
  2290. if (result_dump_status & ((1 << CRYPTO_SW_ERR) |
  2291. (1 << CRYPTO_AXI_ERR) | (1 << CRYPTO_HSD_ERR))) {
  2292. pr_err("ablk_cipher operation error. Status %x\n",
  2293. result_dump_status);
  2294. result_status = -ENXIO;
  2295. }
  2296. }
  2297. if (pce_sps_data->consumer_status |
  2298. pce_sps_data->producer_status) {
  2299. pr_err("ablk_cipher sps operation error. sps status %x %x\n",
  2300. pce_sps_data->consumer_status,
  2301. pce_sps_data->producer_status);
  2302. result_status = -ENXIO;
  2303. }
  2304. if (preq_info->mode == QCE_MODE_ECB) {
  2305. qce_free_req_info(pce_dev, req_info, true);
  2306. qce_callback(areq, NULL, NULL, pce_sps_data->consumer_status |
  2307. result_status);
  2308. } else {
  2309. if (pce_dev->ce_bam_info.minor_version == 0) {
  2310. if (preq_info->mode == QCE_MODE_CBC) {
  2311. if (preq_info->dir == QCE_DECRYPT)
  2312. memcpy(iv, (char *)preq_info->dec_iv,
  2313. sizeof(iv));
  2314. else
  2315. memcpy(iv, (unsigned char *)
  2316. (sg_virt(areq->src) +
  2317. areq->src->length - 16),
  2318. sizeof(iv));
  2319. }
  2320. if ((preq_info->mode == QCE_MODE_CTR) ||
  2321. (preq_info->mode == QCE_MODE_XTS)) {
  2322. uint32_t num_blk = 0;
  2323. uint32_t cntr_iv3 = 0;
  2324. unsigned long long cntr_iv64 = 0;
  2325. unsigned char *b = (unsigned char *)(&cntr_iv3);
  2326. memcpy(iv, areq->iv, sizeof(iv));
  2327. if (preq_info->mode != QCE_MODE_XTS)
  2328. num_blk = areq->cryptlen/16;
  2329. else
  2330. num_blk = 1;
  2331. cntr_iv3 = ((*(iv + 12) << 24) & 0xff000000) |
  2332. (((*(iv + 13)) << 16) & 0xff0000) |
  2333. (((*(iv + 14)) << 8) & 0xff00) |
  2334. (*(iv + 15) & 0xff);
  2335. cntr_iv64 =
  2336. (((unsigned long long)cntr_iv3 &
  2337. 0xFFFFFFFFULL) +
  2338. (unsigned long long)num_blk) %
  2339. (unsigned long long)(0x100000000ULL);
  2340. cntr_iv3 = (u32)(cntr_iv64 & 0xFFFFFFFF);
  2341. *(iv + 15) = (char)(*b);
  2342. *(iv + 14) = (char)(*(b + 1));
  2343. *(iv + 13) = (char)(*(b + 2));
  2344. *(iv + 12) = (char)(*(b + 3));
  2345. }
  2346. } else {
  2347. memcpy(iv,
  2348. (char *)(pce_sps_data->result->encr_cntr_iv),
  2349. sizeof(iv));
  2350. }
  2351. if (!atomic_read(&preq_info->in_use)) {
  2352. pr_err("request information %d already done\n", req_info);
  2353. return -ENXIO;
  2354. }
  2355. qce_free_req_info(pce_dev, req_info, true);
  2356. qce_callback(areq, NULL, iv, result_status);
  2357. }
  2358. return 0;
  2359. }
  2360. static int _f8_complete(struct qce_device *pce_dev, int req_info)
  2361. {
  2362. int32_t result_status = 0;
  2363. uint32_t result_dump_status;
  2364. uint32_t result_dump_status2;
  2365. struct ce_request_info *preq_info;
  2366. struct ce_sps_data *pce_sps_data;
  2367. qce_comp_func_ptr_t qce_callback;
  2368. void *areq;
  2369. preq_info = &pce_dev->ce_request_info[req_info];
  2370. pce_sps_data = &preq_info->ce_sps;
  2371. qce_callback = preq_info->qce_cb;
  2372. areq = preq_info->areq;
  2373. if (preq_info->phy_ota_dst)
  2374. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
  2375. preq_info->ota_size, DMA_FROM_DEVICE);
  2376. if (preq_info->phy_ota_src)
  2377. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
  2378. preq_info->ota_size, (preq_info->phy_ota_dst) ?
  2379. DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
  2380. if (_qce_unlock_other_pipes(pce_dev, req_info)) {
  2381. qce_free_req_info(pce_dev, req_info, true);
  2382. qce_callback(areq, NULL, NULL, -ENXIO);
  2383. return -ENXIO;
  2384. }
  2385. result_dump_status = be32_to_cpu(pce_sps_data->result->status);
  2386. result_dump_status2 = be32_to_cpu(pce_sps_data->result->status2);
  2387. if ((result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
  2388. | (1 << CRYPTO_HSD_ERR)))) {
  2389. pr_err(
  2390. "f8 oper error. Dump Sta %x Sta2 %x req %d\n",
  2391. result_dump_status, result_dump_status2, req_info);
  2392. result_status = -ENXIO;
  2393. } else if (pce_sps_data->consumer_status |
  2394. pce_sps_data->producer_status) {
  2395. pr_err("f8 sps operation error. sps status %x %x\n",
  2396. pce_sps_data->consumer_status,
  2397. pce_sps_data->producer_status);
  2398. result_status = -ENXIO;
  2399. }
  2400. pce_sps_data->result->status = 0;
  2401. pce_sps_data->result->status2 = 0;
  2402. qce_free_req_info(pce_dev, req_info, true);
  2403. qce_callback(areq, NULL, NULL, result_status);
  2404. return 0;
  2405. }
  2406. static void _qce_sps_iovec_count_init(struct qce_device *pce_dev, int req_info)
  2407. {
  2408. struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info[req_info]
  2409. .ce_sps;
  2410. pce_sps_data->in_transfer.iovec_count = 0;
  2411. pce_sps_data->out_transfer.iovec_count = 0;
  2412. }
  2413. static void _qce_set_flag(struct sps_transfer *sps_bam_pipe, uint32_t flag)
  2414. {
  2415. struct sps_iovec *iovec;
  2416. if (sps_bam_pipe->iovec_count == 0)
  2417. return;
  2418. iovec = sps_bam_pipe->iovec + (sps_bam_pipe->iovec_count - 1);
  2419. iovec->flags |= flag;
  2420. }
  2421. static int _qce_sps_add_data(dma_addr_t paddr, uint32_t len,
  2422. struct sps_transfer *sps_bam_pipe)
  2423. {
  2424. struct sps_iovec *iovec = sps_bam_pipe->iovec +
  2425. sps_bam_pipe->iovec_count;
  2426. uint32_t data_cnt;
  2427. while (len > 0) {
  2428. if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
  2429. pr_err("Num of descrptor %d exceed max (%d)\n",
  2430. sps_bam_pipe->iovec_count,
  2431. (uint32_t)QCE_MAX_NUM_DSCR);
  2432. return -ENOMEM;
  2433. }
  2434. if (len > SPS_MAX_PKT_SIZE)
  2435. data_cnt = SPS_MAX_PKT_SIZE;
  2436. else
  2437. data_cnt = len;
  2438. iovec->size = data_cnt;
  2439. iovec->addr = SPS_GET_LOWER_ADDR(paddr);
  2440. iovec->flags = SPS_GET_UPPER_ADDR(paddr);
  2441. sps_bam_pipe->iovec_count++;
  2442. iovec++;
  2443. paddr += data_cnt;
  2444. len -= data_cnt;
  2445. }
  2446. return 0;
  2447. }
  2448. static int _qce_sps_add_sg_data(struct qce_device *pce_dev,
  2449. struct scatterlist *sg_src, uint32_t nbytes,
  2450. struct sps_transfer *sps_bam_pipe)
  2451. {
  2452. uint32_t data_cnt, len;
  2453. dma_addr_t addr;
  2454. struct sps_iovec *iovec = sps_bam_pipe->iovec +
  2455. sps_bam_pipe->iovec_count;
  2456. while (nbytes > 0 && sg_src) {
  2457. len = min(nbytes, sg_dma_len(sg_src));
  2458. nbytes -= len;
  2459. addr = sg_dma_address(sg_src);
  2460. if (pce_dev->ce_bam_info.minor_version == 0)
  2461. len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
  2462. while (len > 0) {
  2463. if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
  2464. pr_err("Num of descrptor %d exceed max (%d)\n",
  2465. sps_bam_pipe->iovec_count,
  2466. (uint32_t)QCE_MAX_NUM_DSCR);
  2467. return -ENOMEM;
  2468. }
  2469. if (len > SPS_MAX_PKT_SIZE) {
  2470. data_cnt = SPS_MAX_PKT_SIZE;
  2471. iovec->size = data_cnt;
  2472. iovec->addr = SPS_GET_LOWER_ADDR(addr);
  2473. iovec->flags = SPS_GET_UPPER_ADDR(addr);
  2474. } else {
  2475. data_cnt = len;
  2476. iovec->size = data_cnt;
  2477. iovec->addr = SPS_GET_LOWER_ADDR(addr);
  2478. iovec->flags = SPS_GET_UPPER_ADDR(addr);
  2479. }
  2480. iovec++;
  2481. sps_bam_pipe->iovec_count++;
  2482. addr += data_cnt;
  2483. len -= data_cnt;
  2484. }
  2485. sg_src = sg_next(sg_src);
  2486. }
  2487. return 0;
  2488. }
  2489. static int _qce_sps_add_sg_data_off(struct qce_device *pce_dev,
  2490. struct scatterlist *sg_src, uint32_t nbytes, uint32_t off,
  2491. struct sps_transfer *sps_bam_pipe)
  2492. {
  2493. uint32_t data_cnt, len;
  2494. dma_addr_t addr;
  2495. struct sps_iovec *iovec = sps_bam_pipe->iovec +
  2496. sps_bam_pipe->iovec_count;
  2497. unsigned int res_within_sg;
  2498. if (!sg_src)
  2499. return -ENOENT;
  2500. res_within_sg = sg_dma_len(sg_src);
  2501. while (off > 0) {
  2502. if (!sg_src) {
  2503. pr_err("broken sg list off %d nbytes %d\n",
  2504. off, nbytes);
  2505. return -ENOENT;
  2506. }
  2507. len = sg_dma_len(sg_src);
  2508. if (off < len) {
  2509. res_within_sg = len - off;
  2510. break;
  2511. }
  2512. off -= len;
  2513. sg_src = sg_next(sg_src);
  2514. if (sg_src)
  2515. res_within_sg = sg_dma_len(sg_src);
  2516. }
  2517. while (nbytes > 0 && sg_src) {
  2518. len = min(nbytes, res_within_sg);
  2519. nbytes -= len;
  2520. addr = sg_dma_address(sg_src) + off;
  2521. if (pce_dev->ce_bam_info.minor_version == 0)
  2522. len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
  2523. while (len > 0) {
  2524. if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
  2525. pr_err("Num of descrptor %d exceed max (%d)\n",
  2526. sps_bam_pipe->iovec_count,
  2527. (uint32_t)QCE_MAX_NUM_DSCR);
  2528. return -ENOMEM;
  2529. }
  2530. if (len > SPS_MAX_PKT_SIZE) {
  2531. data_cnt = SPS_MAX_PKT_SIZE;
  2532. iovec->size = data_cnt;
  2533. iovec->addr = SPS_GET_LOWER_ADDR(addr);
  2534. iovec->flags = SPS_GET_UPPER_ADDR(addr);
  2535. } else {
  2536. data_cnt = len;
  2537. iovec->size = data_cnt;
  2538. iovec->addr = SPS_GET_LOWER_ADDR(addr);
  2539. iovec->flags = SPS_GET_UPPER_ADDR(addr);
  2540. }
  2541. iovec++;
  2542. sps_bam_pipe->iovec_count++;
  2543. addr += data_cnt;
  2544. len -= data_cnt;
  2545. }
  2546. if (nbytes) {
  2547. sg_src = sg_next(sg_src);
  2548. if (!sg_src) {
  2549. pr_err("more data bytes %d\n", nbytes);
  2550. return -ENOMEM;
  2551. }
  2552. res_within_sg = sg_dma_len(sg_src);
  2553. off = 0;
  2554. }
  2555. }
  2556. return 0;
  2557. }
  2558. static int _qce_sps_add_cmd(struct qce_device *pce_dev, uint32_t flag,
  2559. struct qce_cmdlist_info *cmdptr,
  2560. struct sps_transfer *sps_bam_pipe)
  2561. {
  2562. dma_addr_t paddr = GET_PHYS_ADDR(cmdptr->cmdlist);
  2563. struct sps_iovec *iovec = sps_bam_pipe->iovec +
  2564. sps_bam_pipe->iovec_count;
  2565. iovec->size = cmdptr->size;
  2566. iovec->addr = SPS_GET_LOWER_ADDR(paddr);
  2567. iovec->flags = SPS_GET_UPPER_ADDR(paddr) | SPS_IOVEC_FLAG_CMD | flag;
  2568. sps_bam_pipe->iovec_count++;
  2569. if (sps_bam_pipe->iovec_count >= QCE_MAX_NUM_DSCR) {
  2570. pr_err("Num of descrptor %d exceed max (%d)\n",
  2571. sps_bam_pipe->iovec_count, (uint32_t)QCE_MAX_NUM_DSCR);
  2572. return -ENOMEM;
  2573. }
  2574. return 0;
  2575. }
  2576. static int _qce_sps_transfer(struct qce_device *pce_dev, int req_info)
  2577. {
  2578. int rc = 0;
  2579. struct ce_sps_data *pce_sps_data;
  2580. uint16_t op = pce_dev->ce_request_info[req_info].offload_op;
  2581. pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
  2582. pce_sps_data->out_transfer.user =
  2583. (void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
  2584. (unsigned int) req_info));
  2585. pce_sps_data->in_transfer.user =
  2586. (void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
  2587. (unsigned int) req_info));
  2588. _qce_dump_descr_fifos_dbg(pce_dev, req_info);
  2589. if (pce_sps_data->in_transfer.iovec_count) {
  2590. rc = sps_transfer(pce_dev->ce_bam_info.consumer[op].pipe,
  2591. &pce_sps_data->in_transfer);
  2592. if (rc) {
  2593. pr_err("sps_xfr() fail (cons pipe=0x%lx) rc = %d\n",
  2594. (uintptr_t)pce_dev->ce_bam_info.consumer[op].pipe,
  2595. rc);
  2596. goto ret;
  2597. }
  2598. }
  2599. rc = sps_transfer(pce_dev->ce_bam_info.producer[op].pipe,
  2600. &pce_sps_data->out_transfer);
  2601. if (rc)
  2602. pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
  2603. (uintptr_t)pce_dev->ce_bam_info.producer[op].pipe, rc);
  2604. ret:
  2605. if (rc)
  2606. _qce_dump_descr_fifos(pce_dev, req_info);
  2607. return rc;
  2608. }
  2609. /**
  2610. * Allocate and Connect a CE peripheral's SPS endpoint
  2611. *
  2612. * This function allocates endpoint context and
  2613. * connect it with memory endpoint by calling
  2614. * appropriate SPS driver APIs.
  2615. *
  2616. * Also registers a SPS callback function with
  2617. * SPS driver
  2618. *
  2619. * This function should only be called once typically
  2620. * during driver probe.
  2621. *
  2622. * @pce_dev - Pointer to qce_device structure
  2623. * @ep - Pointer to sps endpoint data structure
  2624. * @index - Points to crypto use case
  2625. * @is_produce - 1 means Producer endpoint
  2626. * 0 means Consumer endpoint
  2627. *
  2628. * @return - 0 if successful else negative value.
  2629. *
  2630. */
  2631. static int qce_sps_init_ep_conn(struct qce_device *pce_dev,
  2632. struct qce_sps_ep_conn_data *ep,
  2633. int index,
  2634. bool is_producer)
  2635. {
  2636. int rc = 0;
  2637. struct sps_pipe *sps_pipe_info;
  2638. struct sps_connect *sps_connect_info = &ep->connect;
  2639. struct sps_register_event *sps_event = &ep->event;
  2640. /* Allocate endpoint context */
  2641. sps_pipe_info = sps_alloc_endpoint();
  2642. if (!sps_pipe_info) {
  2643. pr_err("sps_alloc_endpoint() failed!!! is_producer=%d\n",
  2644. is_producer);
  2645. rc = -ENOMEM;
  2646. goto out;
  2647. }
  2648. /* Now save the sps pipe handle */
  2649. ep->pipe = sps_pipe_info;
  2650. /* Get default connection configuration for an endpoint */
  2651. rc = sps_get_config(sps_pipe_info, sps_connect_info);
  2652. if (rc) {
  2653. pr_err("sps_get_config() fail pipe_handle=0x%lx, rc = %d\n",
  2654. (uintptr_t)sps_pipe_info, rc);
  2655. goto get_config_err;
  2656. }
  2657. /* Modify the default connection configuration */
  2658. if (is_producer) {
  2659. /*
  2660. * For CE producer transfer, source should be
  2661. * CE peripheral where as destination should
  2662. * be system memory.
  2663. */
  2664. sps_connect_info->source = pce_dev->ce_bam_info.bam_handle;
  2665. sps_connect_info->destination = SPS_DEV_HANDLE_MEM;
  2666. /* Producer pipe will handle this connection */
  2667. sps_connect_info->mode = SPS_MODE_SRC;
  2668. sps_connect_info->options =
  2669. SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE;
  2670. } else {
  2671. /* For CE consumer transfer, source should be
  2672. * system memory where as destination should
  2673. * CE peripheral
  2674. */
  2675. sps_connect_info->source = SPS_DEV_HANDLE_MEM;
  2676. sps_connect_info->destination = pce_dev->ce_bam_info.bam_handle;
  2677. sps_connect_info->mode = SPS_MODE_DEST;
  2678. sps_connect_info->options =
  2679. SPS_O_AUTO_ENABLE;
  2680. }
  2681. /* Producer pipe index */
  2682. sps_connect_info->src_pipe_index =
  2683. pce_dev->ce_bam_info.src_pipe_index[index];
  2684. /* Consumer pipe index */
  2685. sps_connect_info->dest_pipe_index =
  2686. pce_dev->ce_bam_info.dest_pipe_index[index];
  2687. /* Set pipe group */
  2688. sps_connect_info->lock_group =
  2689. pce_dev->ce_bam_info.pipe_pair_index[index];
  2690. sps_connect_info->event_thresh = 0x10;
  2691. /*
  2692. * Max. no of scatter/gather buffers that can
  2693. * be passed by block layer = 32 (NR_SG).
  2694. * Each BAM descritor needs 64 bits (8 bytes).
  2695. * One BAM descriptor is required per buffer transfer.
  2696. * So we would require total 256 (32 * 8) bytes of descriptor FIFO.
  2697. * But due to HW limitation we need to allocate atleast one extra
  2698. * descriptor memory (256 bytes + 8 bytes). But in order to be
  2699. * in power of 2, we are allocating 512 bytes of memory.
  2700. */
  2701. sps_connect_info->desc.size = QCE_MAX_NUM_DSCR * MAX_QCE_ALLOC_BAM_REQ *
  2702. sizeof(struct sps_iovec);
  2703. if (sps_connect_info->desc.size > MAX_SPS_DESC_FIFO_SIZE)
  2704. sps_connect_info->desc.size = MAX_SPS_DESC_FIFO_SIZE;
  2705. sps_connect_info->desc.base = dma_alloc_coherent(pce_dev->pdev,
  2706. sps_connect_info->desc.size,
  2707. &sps_connect_info->desc.phys_base,
  2708. GFP_KERNEL | __GFP_ZERO);
  2709. if (sps_connect_info->desc.base == NULL) {
  2710. rc = -ENOMEM;
  2711. pr_err("Can not allocate coherent memory for sps data\n");
  2712. goto get_config_err;
  2713. }
  2714. /* Establish connection between peripheral and memory endpoint */
  2715. rc = sps_connect(sps_pipe_info, sps_connect_info);
  2716. if (rc) {
  2717. pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
  2718. (uintptr_t)sps_pipe_info, rc);
  2719. goto sps_connect_err;
  2720. }
  2721. sps_event->mode = SPS_TRIGGER_CALLBACK;
  2722. sps_event->xfer_done = NULL;
  2723. sps_event->user = (void *)pce_dev;
  2724. if (is_producer) {
  2725. sps_event->options = SPS_O_EOT | SPS_O_DESC_DONE;
  2726. sps_event->callback = _sps_producer_callback;
  2727. rc = sps_register_event(ep->pipe, sps_event);
  2728. if (rc) {
  2729. pr_err("Producer callback registration failed rc=%d\n",
  2730. rc);
  2731. goto sps_connect_err;
  2732. }
  2733. } else {
  2734. sps_event->options = SPS_O_EOT;
  2735. sps_event->callback = NULL;
  2736. }
  2737. pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%pK\n",
  2738. is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)",
  2739. (uintptr_t)sps_pipe_info, &sps_connect_info->desc.phys_base);
  2740. goto out;
  2741. sps_connect_err:
  2742. dma_free_coherent(pce_dev->pdev,
  2743. sps_connect_info->desc.size,
  2744. sps_connect_info->desc.base,
  2745. sps_connect_info->desc.phys_base);
  2746. get_config_err:
  2747. sps_free_endpoint(sps_pipe_info);
  2748. out:
  2749. return rc;
  2750. }
  2751. /**
  2752. * Disconnect and Deallocate a CE peripheral's SPS endpoint
  2753. *
  2754. * This function disconnect endpoint and deallocates
  2755. * endpoint context.
  2756. *
  2757. * This function should only be called once typically
  2758. * during driver remove.
  2759. *
  2760. * @pce_dev - Pointer to qce_device structure
  2761. * @ep - Pointer to sps endpoint data structure
  2762. *
  2763. */
  2764. static void qce_sps_exit_ep_conn(struct qce_device *pce_dev,
  2765. struct qce_sps_ep_conn_data *ep)
  2766. {
  2767. struct sps_pipe *sps_pipe_info = ep->pipe;
  2768. struct sps_connect *sps_connect_info = &ep->connect;
  2769. sps_disconnect(sps_pipe_info);
  2770. dma_free_coherent(pce_dev->pdev,
  2771. sps_connect_info->desc.size,
  2772. sps_connect_info->desc.base,
  2773. sps_connect_info->desc.phys_base);
  2774. sps_free_endpoint(sps_pipe_info);
  2775. }
  2776. static void qce_sps_release_bam(struct qce_device *pce_dev)
  2777. {
  2778. struct bam_registration_info *pbam;
  2779. mutex_lock(&bam_register_lock);
  2780. pbam = pce_dev->pbam;
  2781. if (pbam == NULL)
  2782. goto ret;
  2783. pbam->cnt--;
  2784. if (pbam->cnt > 0)
  2785. goto ret;
  2786. if (pce_dev->ce_bam_info.bam_handle) {
  2787. sps_deregister_bam_device(pce_dev->ce_bam_info.bam_handle);
  2788. pr_debug("deregister bam handle 0x%lx\n",
  2789. pce_dev->ce_bam_info.bam_handle);
  2790. pce_dev->ce_bam_info.bam_handle = 0;
  2791. }
  2792. iounmap(pbam->bam_iobase);
  2793. pr_debug("delete bam 0x%x\n", pbam->bam_mem);
  2794. list_del(&pbam->qlist);
  2795. kfree(pbam);
  2796. ret:
  2797. pce_dev->pbam = NULL;
  2798. mutex_unlock(&bam_register_lock);
  2799. }
  2800. static int qce_sps_get_bam(struct qce_device *pce_dev)
  2801. {
  2802. int rc = 0;
  2803. struct sps_bam_props bam = {0};
  2804. struct bam_registration_info *pbam = NULL;
  2805. struct bam_registration_info *p;
  2806. uint32_t bam_cfg = 0;
  2807. mutex_lock(&bam_register_lock);
  2808. list_for_each_entry(p, &qce50_bam_list, qlist) {
  2809. if (p->bam_mem == pce_dev->bam_mem) {
  2810. pbam = p; /* found */
  2811. break;
  2812. }
  2813. }
  2814. if (pbam) {
  2815. pr_debug("found bam 0x%x\n", pbam->bam_mem);
  2816. pbam->cnt++;
  2817. pce_dev->ce_bam_info.bam_handle = pbam->handle;
  2818. pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
  2819. pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
  2820. pce_dev->pbam = pbam;
  2821. pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
  2822. goto ret;
  2823. }
  2824. pbam = kzalloc(sizeof(struct bam_registration_info), GFP_KERNEL);
  2825. if (!pbam) {
  2826. rc = -ENOMEM;
  2827. goto ret;
  2828. }
  2829. pbam->cnt = 1;
  2830. pbam->bam_mem = pce_dev->bam_mem;
  2831. pbam->bam_iobase = ioremap(pce_dev->bam_mem,
  2832. pce_dev->bam_mem_size);
  2833. if (!pbam->bam_iobase) {
  2834. kfree(pbam);
  2835. rc = -ENOMEM;
  2836. pr_err("Can not map BAM io memory\n");
  2837. goto ret;
  2838. }
  2839. pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
  2840. pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
  2841. pbam->handle = 0;
  2842. pr_debug("allocate bam 0x%x\n", pbam->bam_mem);
  2843. bam_cfg = readl_relaxed(pce_dev->ce_bam_info.bam_iobase +
  2844. CRYPTO_BAM_CNFG_BITS_REG);
  2845. pbam->support_cmd_dscr = (bam_cfg & CRYPTO_BAM_CD_ENABLE_MASK) ?
  2846. true : false;
  2847. if (!pbam->support_cmd_dscr) {
  2848. pr_info("qce50 don't support command descriptor. bam_cfg%x\n",
  2849. bam_cfg);
  2850. pce_dev->no_get_around = false;
  2851. }
  2852. pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
  2853. bam.phys_addr = pce_dev->ce_bam_info.bam_mem;
  2854. bam.virt_addr = pce_dev->ce_bam_info.bam_iobase;
  2855. /*
  2856. * This event threshold value is only significant for BAM-to-BAM
  2857. * transfer. It's ignored for BAM-to-System mode transfer.
  2858. */
  2859. bam.event_threshold = 0x10; /* Pipe event threshold */
  2860. /*
  2861. * This threshold controls when the BAM publish
  2862. * the descriptor size on the sideband interface.
  2863. * SPS HW will only be used when
  2864. * data transfer size > 64 bytes.
  2865. */
  2866. bam.summing_threshold = 64;
  2867. /* SPS driver wll handle the crypto BAM IRQ */
  2868. bam.irq = (u32)pce_dev->ce_bam_info.bam_irq;
  2869. /*
  2870. * Set flag to indicate BAM global device control is managed
  2871. * remotely.
  2872. */
  2873. if (!pce_dev->support_cmd_dscr || pce_dev->is_shared)
  2874. bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
  2875. else
  2876. bam.manage = SPS_BAM_MGR_LOCAL;
  2877. bam.ee = pce_dev->ce_bam_info.bam_ee;
  2878. bam.ipc_loglevel = QCE_BAM_DEFAULT_IPC_LOGLVL;
  2879. bam.options |= SPS_BAM_CACHED_WP;
  2880. pr_debug("bam physical base=0x%lx\n", (uintptr_t)bam.phys_addr);
  2881. pr_debug("bam virtual base=0x%pK\n", bam.virt_addr);
  2882. /* Register CE Peripheral BAM device to SPS driver */
  2883. rc = sps_register_bam_device(&bam, &pbam->handle);
  2884. if (rc) {
  2885. pr_err("sps_register_bam_device() failed! err=%d\n", rc);
  2886. rc = -EIO;
  2887. iounmap(pbam->bam_iobase);
  2888. kfree(pbam);
  2889. goto ret;
  2890. }
  2891. pce_dev->pbam = pbam;
  2892. list_add_tail(&pbam->qlist, &qce50_bam_list);
  2893. pce_dev->ce_bam_info.bam_handle = pbam->handle;
  2894. ret:
  2895. mutex_unlock(&bam_register_lock);
  2896. return rc;
  2897. }
  2898. /**
  2899. * Initialize SPS HW connected with CE core
  2900. *
  2901. * This function register BAM HW resources with
  2902. * SPS driver and then initialize 2 SPS endpoints
  2903. *
  2904. * This function should only be called once typically
  2905. * during driver probe.
  2906. *
  2907. * @pce_dev - Pointer to qce_device structure
  2908. *
  2909. * @return - 0 if successful else negative value.
  2910. *
  2911. */
  2912. static int qce_sps_init(struct qce_device *pce_dev)
  2913. {
  2914. int rc = 0, i = 0;
  2915. rc = qce_sps_get_bam(pce_dev);
  2916. if (rc)
  2917. return rc;
  2918. pr_debug("BAM device registered. bam_handle=0x%lx\n",
  2919. pce_dev->ce_bam_info.bam_handle);
  2920. for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
  2921. if (i == QCE_OFFLOAD_NONE && !(pce_dev->kernel_pipes_support))
  2922. continue;
  2923. else if ((i > 0) && !(pce_dev->offload_pipes_support))
  2924. break;
  2925. rc = qce_sps_init_ep_conn(pce_dev,
  2926. &pce_dev->ce_bam_info.producer[i], i, true);
  2927. if (rc)
  2928. goto sps_connect_producer_err;
  2929. rc = qce_sps_init_ep_conn(pce_dev,
  2930. &pce_dev->ce_bam_info.consumer[i], i, false);
  2931. if (rc)
  2932. goto sps_connect_consumer_err;
  2933. }
  2934. pr_info(" QTI MSM CE-BAM at 0x%016llx irq %d\n",
  2935. (unsigned long long)pce_dev->ce_bam_info.bam_mem,
  2936. (unsigned int)pce_dev->ce_bam_info.bam_irq);
  2937. return rc;
  2938. sps_connect_consumer_err:
  2939. qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer[i]);
  2940. sps_connect_producer_err:
  2941. qce_sps_release_bam(pce_dev);
  2942. return rc;
  2943. }
  2944. static inline int qce_alloc_req_info(struct qce_device *pce_dev)
  2945. {
  2946. int i;
  2947. int request_index = pce_dev->ce_request_index;
  2948. for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
  2949. request_index++;
  2950. if (request_index >= MAX_QCE_BAM_REQ)
  2951. request_index = 0;
  2952. if (!atomic_xchg(
  2953. &pce_dev->ce_request_info[request_index].in_use,
  2954. true)) {
  2955. pce_dev->ce_request_index = request_index;
  2956. return request_index;
  2957. }
  2958. }
  2959. pr_warn("pcedev %d no reqs available no_of_queued_req %d\n",
  2960. pce_dev->dev_no, atomic_read(
  2961. &pce_dev->no_of_queued_req));
  2962. return -EBUSY;
  2963. }
  2964. static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
  2965. bool is_complete)
  2966. {
  2967. pce_dev->ce_request_info[req_info].xfer_type = QCE_XFER_TYPE_LAST;
  2968. if (atomic_xchg(&pce_dev->ce_request_info[req_info].in_use,
  2969. false)) {
  2970. if (req_info < MAX_QCE_BAM_REQ && is_complete)
  2971. atomic_dec(&pce_dev->no_of_queued_req);
  2972. } else
  2973. pr_warn("request info %d free already\n", req_info);
  2974. }
  2975. static void print_notify_debug(struct sps_event_notify *notify)
  2976. {
  2977. phys_addr_t addr =
  2978. DESC_FULL_ADDR((phys_addr_t) notify->data.transfer.iovec.flags,
  2979. notify->data.transfer.iovec.addr);
  2980. pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%pK\n",
  2981. notify->event_id, &addr,
  2982. notify->data.transfer.iovec.size,
  2983. notify->data.transfer.iovec.flags,
  2984. notify->data.transfer.user);
  2985. }
  2986. static void _qce_req_complete(struct qce_device *pce_dev, unsigned int req_info)
  2987. {
  2988. struct ce_request_info *preq_info;
  2989. preq_info = &pce_dev->ce_request_info[req_info];
  2990. switch (preq_info->xfer_type) {
  2991. case QCE_XFER_CIPHERING:
  2992. _ablk_cipher_complete(pce_dev, req_info);
  2993. break;
  2994. case QCE_XFER_HASHING:
  2995. _sha_complete(pce_dev, req_info);
  2996. break;
  2997. case QCE_XFER_AEAD:
  2998. _aead_complete(pce_dev, req_info);
  2999. break;
  3000. case QCE_XFER_F8:
  3001. _f8_complete(pce_dev, req_info);
  3002. break;
  3003. case QCE_XFER_F9:
  3004. _f9_complete(pce_dev, req_info);
  3005. break;
  3006. default:
  3007. qce_free_req_info(pce_dev, req_info, true);
  3008. break;
  3009. }
  3010. }
  3011. static void qce_multireq_timeout(struct timer_list *data)
  3012. {
  3013. struct qce_device *pce_dev = from_timer(pce_dev, data, timer);
  3014. int ret = 0;
  3015. int last_seq;
  3016. unsigned long flags;
  3017. last_seq = atomic_read(&pce_dev->bunch_cmd_seq);
  3018. if (last_seq == 0 ||
  3019. last_seq != atomic_read(&pce_dev->last_intr_seq)) {
  3020. atomic_set(&pce_dev->last_intr_seq, last_seq);
  3021. mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
  3022. return;
  3023. }
  3024. /* last bunch mode command time out */
  3025. /*
  3026. * From here to dummy request finish sps request and set owner back
  3027. * to none, we disable interrupt.
  3028. * So it won't get preempted or interrupted. If bam inerrupts happen
  3029. * between, and completion callback gets called from BAM, a new
  3030. * request may be issued by the client driver. Deadlock may happen.
  3031. */
  3032. local_irq_save(flags);
  3033. if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_TIMEOUT)
  3034. != QCE_OWNER_NONE) {
  3035. local_irq_restore(flags);
  3036. mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
  3037. return;
  3038. }
  3039. ret = qce_dummy_req(pce_dev);
  3040. if (ret)
  3041. pr_warn("pcedev %d: Failed to insert dummy req\n",
  3042. pce_dev->dev_no);
  3043. cmpxchg(&pce_dev->owner, QCE_OWNER_TIMEOUT, QCE_OWNER_NONE);
  3044. pce_dev->mode = IN_INTERRUPT_MODE;
  3045. local_irq_restore(flags);
  3046. del_timer(&(pce_dev->timer));
  3047. pce_dev->qce_stats.no_of_timeouts++;
  3048. pr_debug("pcedev %d mode switch to INTR\n", pce_dev->dev_no);
  3049. }
  3050. void qce_get_driver_stats(void *handle)
  3051. {
  3052. struct qce_device *pce_dev = (struct qce_device *) handle;
  3053. if (!_qce50_disp_stats)
  3054. return;
  3055. pr_info("Engine %d timeout occuured %d\n", pce_dev->dev_no,
  3056. pce_dev->qce_stats.no_of_timeouts);
  3057. pr_info("Engine %d dummy request inserted %d\n", pce_dev->dev_no,
  3058. pce_dev->qce_stats.no_of_dummy_reqs);
  3059. if (pce_dev->mode)
  3060. pr_info("Engine %d is in BUNCH MODE\n", pce_dev->dev_no);
  3061. else
  3062. pr_info("Engine %d is in INTERRUPT MODE\n", pce_dev->dev_no);
  3063. pr_info("Engine %d outstanding request %d\n", pce_dev->dev_no,
  3064. atomic_read(&pce_dev->no_of_queued_req));
  3065. }
  3066. EXPORT_SYMBOL(qce_get_driver_stats);
  3067. void qce_clear_driver_stats(void *handle)
  3068. {
  3069. struct qce_device *pce_dev = (struct qce_device *) handle;
  3070. pce_dev->qce_stats.no_of_timeouts = 0;
  3071. pce_dev->qce_stats.no_of_dummy_reqs = 0;
  3072. }
  3073. EXPORT_SYMBOL(qce_clear_driver_stats);
  3074. static void _sps_producer_callback(struct sps_event_notify *notify)
  3075. {
  3076. struct qce_device *pce_dev = (struct qce_device *)
  3077. ((struct sps_event_notify *)notify)->user;
  3078. int rc = 0;
  3079. unsigned int req_info;
  3080. struct ce_sps_data *pce_sps_data;
  3081. struct ce_request_info *preq_info;
  3082. uint16_t op;
  3083. print_notify_debug(notify);
  3084. req_info = (unsigned int)((uintptr_t)notify->data.transfer.user);
  3085. if ((req_info & 0xffff0000) != CRYPTO_REQ_USER_PAT) {
  3086. pr_warn("request information %d out of range\n", req_info);
  3087. return;
  3088. }
  3089. req_info = req_info & 0x00ff;
  3090. if (req_info < 0 || req_info >= MAX_QCE_ALLOC_BAM_REQ) {
  3091. pr_warn("request information %d out of range\n", req_info);
  3092. return;
  3093. }
  3094. preq_info = &pce_dev->ce_request_info[req_info];
  3095. if (!atomic_read(&preq_info->in_use)) {
  3096. pr_err("request information %d already done\n", req_info);
  3097. return;
  3098. }
  3099. op = pce_dev->ce_request_info[req_info].offload_op;
  3100. pce_sps_data = &preq_info->ce_sps;
  3101. if ((preq_info->xfer_type == QCE_XFER_CIPHERING ||
  3102. preq_info->xfer_type == QCE_XFER_AEAD) &&
  3103. pce_sps_data->producer_state == QCE_PIPE_STATE_IDLE) {
  3104. pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
  3105. if (!is_offload_op(op)) {
  3106. pce_sps_data->out_transfer.iovec_count = 0;
  3107. _qce_sps_add_data(GET_PHYS_ADDR(
  3108. pce_sps_data->result_dump),
  3109. CRYPTO_RESULT_DUMP_SIZE,
  3110. &pce_sps_data->out_transfer);
  3111. _qce_set_flag(&pce_sps_data->out_transfer,
  3112. SPS_IOVEC_FLAG_INT);
  3113. rc = sps_transfer(
  3114. pce_dev->ce_bam_info.producer[op].pipe,
  3115. &pce_sps_data->out_transfer);
  3116. if (rc) {
  3117. pr_err("sps_xfr fail (prod pipe=0x%lx) rc = %d\n",
  3118. (uintptr_t)pce_dev->ce_bam_info.producer[op].pipe,
  3119. rc);
  3120. }
  3121. }
  3122. return;
  3123. }
  3124. _qce_req_complete(pce_dev, req_info);
  3125. }
  3126. /**
  3127. * De-initialize SPS HW connected with CE core
  3128. *
  3129. * This function deinitialize SPS endpoints and then
  3130. * deregisters BAM resources from SPS driver.
  3131. *
  3132. * This function should only be called once typically
  3133. * during driver remove.
  3134. *
  3135. * @pce_dev - Pointer to qce_device structure
  3136. *
  3137. */
  3138. static void qce_sps_exit(struct qce_device *pce_dev)
  3139. {
  3140. int i = 0;
  3141. for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
  3142. if (i == QCE_OFFLOAD_NONE && !(pce_dev->kernel_pipes_support))
  3143. continue;
  3144. else if ((i > 0) && !(pce_dev->offload_pipes_support))
  3145. break;
  3146. qce_sps_exit_ep_conn(pce_dev,
  3147. &pce_dev->ce_bam_info.consumer[i]);
  3148. qce_sps_exit_ep_conn(pce_dev,
  3149. &pce_dev->ce_bam_info.producer[i]);
  3150. }
  3151. qce_sps_release_bam(pce_dev);
  3152. }
  3153. static void qce_add_cmd_element(struct qce_device *pdev,
  3154. struct sps_command_element **cmd_ptr, u32 addr,
  3155. u32 data, struct sps_command_element **populate)
  3156. {
  3157. (*cmd_ptr)->addr = (uint32_t)(addr + pdev->phy_iobase);
  3158. (*cmd_ptr)->command = 0;
  3159. (*cmd_ptr)->data = data;
  3160. (*cmd_ptr)->mask = 0xFFFFFFFF;
  3161. (*cmd_ptr)->reserved = 0;
  3162. if (populate != NULL)
  3163. *populate = *cmd_ptr;
  3164. (*cmd_ptr)++;
  3165. }
  3166. static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index,
  3167. unsigned char **pvaddr, enum qce_cipher_mode_enum mode,
  3168. bool key_128)
  3169. {
  3170. struct sps_command_element *ce_vaddr;
  3171. uintptr_t ce_vaddr_start;
  3172. struct qce_cmdlistptr_ops *cmdlistptr;
  3173. struct qce_cmdlist_info *pcl_info = NULL;
  3174. int i = 0;
  3175. uint32_t encr_cfg = 0;
  3176. uint32_t key_reg = 0;
  3177. uint32_t xts_key_reg = 0;
  3178. uint32_t iv_reg = 0;
  3179. cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  3180. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3181. pdev->ce_bam_info.ce_burst_size);
  3182. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3183. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3184. /*
  3185. * Designate chunks of the allocated memory to various
  3186. * command list pointers related to AES cipher operations defined
  3187. * in ce_cmdlistptrs_ops structure.
  3188. */
  3189. switch (mode) {
  3190. case QCE_MODE_CBC:
  3191. case QCE_MODE_CTR:
  3192. if (key_128) {
  3193. cmdlistptr->cipher_aes_128_cbc_ctr.cmdlist =
  3194. (uintptr_t)ce_vaddr;
  3195. pcl_info = &(cmdlistptr->cipher_aes_128_cbc_ctr);
  3196. if (mode == QCE_MODE_CBC)
  3197. encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
  3198. else
  3199. encr_cfg = pdev->reg.encr_cfg_aes_ctr_128;
  3200. iv_reg = 4;
  3201. key_reg = 4;
  3202. xts_key_reg = 0;
  3203. } else {
  3204. cmdlistptr->cipher_aes_256_cbc_ctr.cmdlist =
  3205. (uintptr_t)ce_vaddr;
  3206. pcl_info = &(cmdlistptr->cipher_aes_256_cbc_ctr);
  3207. if (mode == QCE_MODE_CBC)
  3208. encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
  3209. else
  3210. encr_cfg = pdev->reg.encr_cfg_aes_ctr_256;
  3211. iv_reg = 4;
  3212. key_reg = 8;
  3213. xts_key_reg = 0;
  3214. }
  3215. break;
  3216. case QCE_MODE_ECB:
  3217. if (key_128) {
  3218. cmdlistptr->cipher_aes_128_ecb.cmdlist =
  3219. (uintptr_t)ce_vaddr;
  3220. pcl_info = &(cmdlistptr->cipher_aes_128_ecb);
  3221. encr_cfg = pdev->reg.encr_cfg_aes_ecb_128;
  3222. iv_reg = 0;
  3223. key_reg = 4;
  3224. xts_key_reg = 0;
  3225. } else {
  3226. cmdlistptr->cipher_aes_256_ecb.cmdlist =
  3227. (uintptr_t)ce_vaddr;
  3228. pcl_info = &(cmdlistptr->cipher_aes_256_ecb);
  3229. encr_cfg = pdev->reg.encr_cfg_aes_ecb_256;
  3230. iv_reg = 0;
  3231. key_reg = 8;
  3232. xts_key_reg = 0;
  3233. }
  3234. break;
  3235. case QCE_MODE_XTS:
  3236. if (key_128) {
  3237. cmdlistptr->cipher_aes_128_xts.cmdlist =
  3238. (uintptr_t)ce_vaddr;
  3239. pcl_info = &(cmdlistptr->cipher_aes_128_xts);
  3240. encr_cfg = pdev->reg.encr_cfg_aes_xts_128;
  3241. iv_reg = 4;
  3242. key_reg = 4;
  3243. xts_key_reg = 4;
  3244. } else {
  3245. cmdlistptr->cipher_aes_256_xts.cmdlist =
  3246. (uintptr_t)ce_vaddr;
  3247. pcl_info = &(cmdlistptr->cipher_aes_256_xts);
  3248. encr_cfg = pdev->reg.encr_cfg_aes_xts_256;
  3249. iv_reg = 4;
  3250. key_reg = 8;
  3251. xts_key_reg = 8;
  3252. }
  3253. break;
  3254. default:
  3255. pr_err("Unknown mode of operation %d received, exiting now\n",
  3256. mode);
  3257. return -EINVAL;
  3258. break;
  3259. }
  3260. /* clear status register */
  3261. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
  3262. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS2_REG, 0, NULL);
  3263. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS3_REG, 0, NULL);
  3264. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS4_REG, 0, NULL);
  3265. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS5_REG, 0, NULL);
  3266. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS6_REG, 0, NULL);
  3267. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3268. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3269. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  3270. &pcl_info->seg_size);
  3271. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
  3272. &pcl_info->encr_seg_cfg);
  3273. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
  3274. &pcl_info->encr_seg_size);
  3275. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  3276. &pcl_info->encr_seg_start);
  3277. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
  3278. pdev->reg.encr_cntr_mask_3, &pcl_info->encr_mask_3);
  3279. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
  3280. pdev->reg.encr_cntr_mask_2, &pcl_info->encr_mask_2);
  3281. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
  3282. pdev->reg.encr_cntr_mask_1, &pcl_info->encr_mask_1);
  3283. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
  3284. pdev->reg.encr_cntr_mask_0, &pcl_info->encr_mask_0);
  3285. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
  3286. &pcl_info->auth_seg_cfg);
  3287. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_DATA_PATT_PROC_CFG_REG, 0,
  3288. &pcl_info->pattern_info);
  3289. qce_add_cmd_element(pdev, &ce_vaddr,
  3290. CRYPTO_DATA_PARTIAL_BLOCK_PROC_CFG_REG, 0,
  3291. &pcl_info->block_offset);
  3292. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
  3293. &pcl_info->encr_key);
  3294. for (i = 1; i < key_reg; i++)
  3295. qce_add_cmd_element(pdev, &ce_vaddr,
  3296. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
  3297. 0, NULL);
  3298. if (xts_key_reg) {
  3299. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_XTS_KEY0_REG,
  3300. 0, &pcl_info->encr_xts_key);
  3301. for (i = 1; i < xts_key_reg; i++)
  3302. qce_add_cmd_element(pdev, &ce_vaddr,
  3303. (CRYPTO_ENCR_XTS_KEY0_REG +
  3304. i * sizeof(uint32_t)), 0, NULL);
  3305. qce_add_cmd_element(pdev, &ce_vaddr,
  3306. CRYPTO_ENCR_XTS_DU_SIZE_REG, 0,
  3307. &pcl_info->encr_xts_du_size);
  3308. }
  3309. if (iv_reg) {
  3310. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
  3311. &pcl_info->encr_cntr_iv);
  3312. for (i = 1; i < iv_reg; i++)
  3313. qce_add_cmd_element(pdev, &ce_vaddr,
  3314. (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
  3315. 0, NULL);
  3316. }
  3317. /* Add dummy to align size to burst-size multiple */
  3318. if (mode == QCE_MODE_XTS) {
  3319. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
  3320. 0, &pcl_info->auth_seg_size);
  3321. } else {
  3322. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
  3323. 0, &pcl_info->auth_seg_size);
  3324. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
  3325. 0, &pcl_info->auth_seg_size);
  3326. }
  3327. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3328. pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
  3329. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3330. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3331. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3332. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3333. *pvaddr = (unsigned char *) ce_vaddr;
  3334. return 0;
  3335. }
  3336. static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev, int cri_index,
  3337. unsigned char **pvaddr, enum qce_cipher_alg_enum alg,
  3338. bool mode_cbc)
  3339. {
  3340. struct sps_command_element *ce_vaddr;
  3341. uintptr_t ce_vaddr_start;
  3342. struct qce_cmdlistptr_ops *cmdlistptr;
  3343. struct qce_cmdlist_info *pcl_info = NULL;
  3344. int i = 0;
  3345. uint32_t encr_cfg = 0;
  3346. uint32_t key_reg = 0;
  3347. uint32_t iv_reg = 0;
  3348. cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  3349. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3350. pdev->ce_bam_info.ce_burst_size);
  3351. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3352. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3353. /*
  3354. * Designate chunks of the allocated memory to various
  3355. * command list pointers related to cipher operations defined
  3356. * in ce_cmdlistptrs_ops structure.
  3357. */
  3358. switch (alg) {
  3359. case CIPHER_ALG_DES:
  3360. if (mode_cbc) {
  3361. cmdlistptr->cipher_des_cbc.cmdlist =
  3362. (uintptr_t)ce_vaddr;
  3363. pcl_info = &(cmdlistptr->cipher_des_cbc);
  3364. encr_cfg = pdev->reg.encr_cfg_des_cbc;
  3365. iv_reg = 2;
  3366. key_reg = 2;
  3367. } else {
  3368. cmdlistptr->cipher_des_ecb.cmdlist =
  3369. (uintptr_t)ce_vaddr;
  3370. pcl_info = &(cmdlistptr->cipher_des_ecb);
  3371. encr_cfg = pdev->reg.encr_cfg_des_ecb;
  3372. iv_reg = 0;
  3373. key_reg = 2;
  3374. }
  3375. break;
  3376. case CIPHER_ALG_3DES:
  3377. if (mode_cbc) {
  3378. cmdlistptr->cipher_3des_cbc.cmdlist =
  3379. (uintptr_t)ce_vaddr;
  3380. pcl_info = &(cmdlistptr->cipher_3des_cbc);
  3381. encr_cfg = pdev->reg.encr_cfg_3des_cbc;
  3382. iv_reg = 2;
  3383. key_reg = 6;
  3384. } else {
  3385. cmdlistptr->cipher_3des_ecb.cmdlist =
  3386. (uintptr_t)ce_vaddr;
  3387. pcl_info = &(cmdlistptr->cipher_3des_ecb);
  3388. encr_cfg = pdev->reg.encr_cfg_3des_ecb;
  3389. iv_reg = 0;
  3390. key_reg = 6;
  3391. }
  3392. break;
  3393. default:
  3394. pr_err("Unknown algorithms %d received, exiting now\n", alg);
  3395. return -EINVAL;
  3396. break;
  3397. }
  3398. /* clear status register */
  3399. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
  3400. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3401. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3402. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  3403. &pcl_info->seg_size);
  3404. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
  3405. &pcl_info->encr_seg_cfg);
  3406. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
  3407. &pcl_info->encr_seg_size);
  3408. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  3409. &pcl_info->encr_seg_start);
  3410. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
  3411. &pcl_info->auth_seg_cfg);
  3412. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
  3413. &pcl_info->encr_key);
  3414. for (i = 1; i < key_reg; i++)
  3415. qce_add_cmd_element(pdev, &ce_vaddr,
  3416. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
  3417. 0, NULL);
  3418. if (iv_reg) {
  3419. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
  3420. &pcl_info->encr_cntr_iv);
  3421. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
  3422. NULL);
  3423. }
  3424. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3425. pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
  3426. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3427. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3428. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3429. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3430. *pvaddr = (unsigned char *) ce_vaddr;
  3431. return 0;
  3432. }
  3433. static int _setup_cipher_null_cmdlistptrs(struct qce_device *pdev,
  3434. int cri_index, unsigned char **pvaddr)
  3435. {
  3436. struct sps_command_element *ce_vaddr;
  3437. uintptr_t ce_vaddr_start;
  3438. struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
  3439. [cri_index].ce_sps.cmdlistptr;
  3440. struct qce_cmdlist_info *pcl_info = NULL;
  3441. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3442. pdev->ce_bam_info.ce_burst_size);
  3443. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3444. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3445. cmdlistptr->cipher_null.cmdlist = (uintptr_t)ce_vaddr;
  3446. pcl_info = &(cmdlistptr->cipher_null);
  3447. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG,
  3448. pdev->ce_bam_info.ce_burst_size, NULL);
  3449. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
  3450. pdev->reg.encr_cfg_aes_ecb_128, NULL);
  3451. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
  3452. NULL);
  3453. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  3454. NULL);
  3455. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
  3456. 0, NULL);
  3457. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
  3458. 0, NULL);
  3459. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
  3460. NULL);
  3461. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3462. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3463. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3464. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3465. *pvaddr = (unsigned char *) ce_vaddr;
  3466. return 0;
  3467. }
  3468. static int _setup_auth_cmdlistptrs(struct qce_device *pdev, int cri_index,
  3469. unsigned char **pvaddr, enum qce_hash_alg_enum alg,
  3470. bool key_128)
  3471. {
  3472. struct sps_command_element *ce_vaddr;
  3473. uintptr_t ce_vaddr_start;
  3474. struct qce_cmdlistptr_ops *cmdlistptr;
  3475. struct qce_cmdlist_info *pcl_info = NULL;
  3476. int i = 0;
  3477. uint32_t key_reg = 0;
  3478. uint32_t auth_cfg = 0;
  3479. uint32_t iv_reg = 0;
  3480. cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  3481. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3482. pdev->ce_bam_info.ce_burst_size);
  3483. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3484. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3485. /*
  3486. * Designate chunks of the allocated memory to various
  3487. * command list pointers related to authentication operations
  3488. * defined in ce_cmdlistptrs_ops structure.
  3489. */
  3490. switch (alg) {
  3491. case QCE_HASH_SHA1:
  3492. cmdlistptr->auth_sha1.cmdlist = (uintptr_t)ce_vaddr;
  3493. pcl_info = &(cmdlistptr->auth_sha1);
  3494. auth_cfg = pdev->reg.auth_cfg_sha1;
  3495. iv_reg = 5;
  3496. /* clear status register */
  3497. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
  3498. 0, NULL);
  3499. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3500. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3501. break;
  3502. case QCE_HASH_SHA256:
  3503. cmdlistptr->auth_sha256.cmdlist = (uintptr_t)ce_vaddr;
  3504. pcl_info = &(cmdlistptr->auth_sha256);
  3505. auth_cfg = pdev->reg.auth_cfg_sha256;
  3506. iv_reg = 8;
  3507. /* clear status register */
  3508. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
  3509. 0, NULL);
  3510. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3511. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3512. /* 1 dummy write */
  3513. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
  3514. 0, NULL);
  3515. break;
  3516. case QCE_HASH_SHA1_HMAC:
  3517. cmdlistptr->auth_sha1_hmac.cmdlist = (uintptr_t)ce_vaddr;
  3518. pcl_info = &(cmdlistptr->auth_sha1_hmac);
  3519. auth_cfg = pdev->reg.auth_cfg_hmac_sha1;
  3520. key_reg = 16;
  3521. iv_reg = 5;
  3522. /* clear status register */
  3523. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
  3524. 0, NULL);
  3525. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3526. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3527. break;
  3528. case QCE_HASH_SHA256_HMAC:
  3529. cmdlistptr->auth_sha256_hmac.cmdlist = (uintptr_t)ce_vaddr;
  3530. pcl_info = &(cmdlistptr->auth_sha256_hmac);
  3531. auth_cfg = pdev->reg.auth_cfg_hmac_sha256;
  3532. key_reg = 16;
  3533. iv_reg = 8;
  3534. /* clear status register */
  3535. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
  3536. NULL);
  3537. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3538. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3539. /* 1 dummy write */
  3540. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
  3541. 0, NULL);
  3542. break;
  3543. case QCE_HASH_AES_CMAC:
  3544. if (key_128) {
  3545. cmdlistptr->auth_aes_128_cmac.cmdlist =
  3546. (uintptr_t)ce_vaddr;
  3547. pcl_info = &(cmdlistptr->auth_aes_128_cmac);
  3548. auth_cfg = pdev->reg.auth_cfg_cmac_128;
  3549. key_reg = 4;
  3550. } else {
  3551. cmdlistptr->auth_aes_256_cmac.cmdlist =
  3552. (uintptr_t)ce_vaddr;
  3553. pcl_info = &(cmdlistptr->auth_aes_256_cmac);
  3554. auth_cfg = pdev->reg.auth_cfg_cmac_256;
  3555. key_reg = 8;
  3556. }
  3557. /* clear status register */
  3558. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
  3559. NULL);
  3560. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3561. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3562. /* 1 dummy write */
  3563. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
  3564. 0, NULL);
  3565. break;
  3566. default:
  3567. pr_err("Unknown algorithms %d received, exiting now\n", alg);
  3568. return -EINVAL;
  3569. break;
  3570. }
  3571. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  3572. &pcl_info->seg_size);
  3573. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
  3574. &pcl_info->encr_seg_cfg);
  3575. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
  3576. auth_cfg, &pcl_info->auth_seg_cfg);
  3577. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
  3578. &pcl_info->auth_seg_size);
  3579. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
  3580. &pcl_info->auth_seg_start);
  3581. if (alg == QCE_HASH_AES_CMAC) {
  3582. /* reset auth iv, bytecount and key registers */
  3583. for (i = 0; i < 16; i++)
  3584. qce_add_cmd_element(pdev, &ce_vaddr,
  3585. (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
  3586. 0, NULL);
  3587. for (i = 0; i < 16; i++)
  3588. qce_add_cmd_element(pdev, &ce_vaddr,
  3589. (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
  3590. 0, NULL);
  3591. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
  3592. 0, NULL);
  3593. } else {
  3594. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
  3595. &pcl_info->auth_iv);
  3596. for (i = 1; i < iv_reg; i++)
  3597. qce_add_cmd_element(pdev, &ce_vaddr,
  3598. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
  3599. 0, NULL);
  3600. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
  3601. 0, &pcl_info->auth_bytecount);
  3602. }
  3603. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
  3604. if (key_reg) {
  3605. qce_add_cmd_element(pdev, &ce_vaddr,
  3606. CRYPTO_AUTH_KEY0_REG, 0, &pcl_info->auth_key);
  3607. for (i = 1; i < key_reg; i++)
  3608. qce_add_cmd_element(pdev, &ce_vaddr,
  3609. (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
  3610. 0, NULL);
  3611. }
  3612. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3613. pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
  3614. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3615. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3616. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3617. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3618. *pvaddr = (unsigned char *) ce_vaddr;
  3619. return 0;
  3620. }
  3621. static int _setup_aead_cmdlistptrs(struct qce_device *pdev,
  3622. int cri_index,
  3623. unsigned char **pvaddr,
  3624. uint32_t alg,
  3625. uint32_t mode,
  3626. uint32_t key_size,
  3627. bool sha1)
  3628. {
  3629. struct sps_command_element *ce_vaddr;
  3630. uintptr_t ce_vaddr_start;
  3631. struct qce_cmdlistptr_ops *cmd;
  3632. struct qce_cmdlist_info *pcl_info = NULL;
  3633. uint32_t key_reg;
  3634. uint32_t iv_reg;
  3635. uint32_t i;
  3636. uint32_t enciv_in_word;
  3637. uint32_t encr_cfg;
  3638. cmd = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  3639. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3640. pdev->ce_bam_info.ce_burst_size);
  3641. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3642. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3643. switch (alg) {
  3644. case CIPHER_ALG_DES:
  3645. switch (mode) {
  3646. case QCE_MODE_CBC:
  3647. if (sha1) {
  3648. cmd->aead_hmac_sha1_cbc_des.cmdlist =
  3649. (uintptr_t)ce_vaddr;
  3650. pcl_info =
  3651. &(cmd->aead_hmac_sha1_cbc_des);
  3652. } else {
  3653. cmd->aead_hmac_sha256_cbc_des.cmdlist =
  3654. (uintptr_t)ce_vaddr;
  3655. pcl_info =
  3656. &(cmd->aead_hmac_sha256_cbc_des);
  3657. }
  3658. encr_cfg = pdev->reg.encr_cfg_des_cbc;
  3659. break;
  3660. default:
  3661. return -EINVAL;
  3662. }
  3663. enciv_in_word = 2;
  3664. break;
  3665. case CIPHER_ALG_3DES:
  3666. switch (mode) {
  3667. case QCE_MODE_CBC:
  3668. if (sha1) {
  3669. cmd->aead_hmac_sha1_cbc_3des.cmdlist =
  3670. (uintptr_t)ce_vaddr;
  3671. pcl_info =
  3672. &(cmd->aead_hmac_sha1_cbc_3des);
  3673. } else {
  3674. cmd->aead_hmac_sha256_cbc_3des.cmdlist =
  3675. (uintptr_t)ce_vaddr;
  3676. pcl_info =
  3677. &(cmd->aead_hmac_sha256_cbc_3des);
  3678. }
  3679. encr_cfg = pdev->reg.encr_cfg_3des_cbc;
  3680. break;
  3681. default:
  3682. return -EINVAL;
  3683. }
  3684. enciv_in_word = 2;
  3685. break;
  3686. case CIPHER_ALG_AES:
  3687. switch (mode) {
  3688. case QCE_MODE_CBC:
  3689. if (key_size == AES128_KEY_SIZE) {
  3690. if (sha1) {
  3691. cmd->aead_hmac_sha1_cbc_aes_128.cmdlist =
  3692. (uintptr_t)ce_vaddr;
  3693. pcl_info =
  3694. &(cmd->aead_hmac_sha1_cbc_aes_128);
  3695. } else {
  3696. cmd->aead_hmac_sha256_cbc_aes_128.cmdlist
  3697. = (uintptr_t)ce_vaddr;
  3698. pcl_info =
  3699. &(cmd->aead_hmac_sha256_cbc_aes_128);
  3700. }
  3701. encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
  3702. } else if (key_size == AES256_KEY_SIZE) {
  3703. if (sha1) {
  3704. cmd->aead_hmac_sha1_cbc_aes_256.cmdlist =
  3705. (uintptr_t)ce_vaddr;
  3706. pcl_info =
  3707. &(cmd->aead_hmac_sha1_cbc_aes_256);
  3708. } else {
  3709. cmd->aead_hmac_sha256_cbc_aes_256.cmdlist =
  3710. (uintptr_t)ce_vaddr;
  3711. pcl_info =
  3712. &(cmd->aead_hmac_sha256_cbc_aes_256);
  3713. }
  3714. encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
  3715. } else {
  3716. return -EINVAL;
  3717. }
  3718. break;
  3719. default:
  3720. return -EINVAL;
  3721. }
  3722. enciv_in_word = 4;
  3723. break;
  3724. default:
  3725. return -EINVAL;
  3726. }
  3727. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
  3728. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3729. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3730. key_reg = key_size/sizeof(uint32_t);
  3731. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
  3732. &pcl_info->encr_key);
  3733. for (i = 1; i < key_reg; i++)
  3734. qce_add_cmd_element(pdev, &ce_vaddr,
  3735. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
  3736. 0, NULL);
  3737. if (mode != QCE_MODE_ECB) {
  3738. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
  3739. &pcl_info->encr_cntr_iv);
  3740. for (i = 1; i < enciv_in_word; i++)
  3741. qce_add_cmd_element(pdev, &ce_vaddr,
  3742. (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
  3743. 0, NULL);
  3744. }
  3745. if (sha1)
  3746. iv_reg = 5;
  3747. else
  3748. iv_reg = 8;
  3749. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
  3750. &pcl_info->auth_iv);
  3751. for (i = 1; i < iv_reg; i++)
  3752. qce_add_cmd_element(pdev, &ce_vaddr,
  3753. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
  3754. 0, NULL);
  3755. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
  3756. 0, &pcl_info->auth_bytecount);
  3757. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
  3758. key_reg = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
  3759. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
  3760. &pcl_info->auth_key);
  3761. for (i = 1; i < key_reg; i++)
  3762. qce_add_cmd_element(pdev, &ce_vaddr,
  3763. (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)), 0, NULL);
  3764. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  3765. &pcl_info->seg_size);
  3766. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
  3767. &pcl_info->encr_seg_cfg);
  3768. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
  3769. &pcl_info->encr_seg_size);
  3770. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  3771. &pcl_info->encr_seg_start);
  3772. if (sha1)
  3773. qce_add_cmd_element(
  3774. pdev,
  3775. &ce_vaddr,
  3776. CRYPTO_AUTH_SEG_CFG_REG,
  3777. pdev->reg.auth_cfg_aead_sha1_hmac,
  3778. &pcl_info->auth_seg_cfg);
  3779. else
  3780. qce_add_cmd_element(
  3781. pdev,
  3782. &ce_vaddr,
  3783. CRYPTO_AUTH_SEG_CFG_REG,
  3784. pdev->reg.auth_cfg_aead_sha256_hmac,
  3785. &pcl_info->auth_seg_cfg);
  3786. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
  3787. &pcl_info->auth_seg_size);
  3788. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
  3789. &pcl_info->auth_seg_start);
  3790. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3791. pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
  3792. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3793. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3794. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3795. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3796. *pvaddr = (unsigned char *) ce_vaddr;
  3797. return 0;
  3798. }
  3799. static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, int cri_index,
  3800. unsigned char **pvaddr, bool key_128)
  3801. {
  3802. struct sps_command_element *ce_vaddr;
  3803. uintptr_t ce_vaddr_start;
  3804. struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
  3805. [cri_index].ce_sps.cmdlistptr;
  3806. struct qce_cmdlist_info *pcl_info = NULL;
  3807. int i = 0;
  3808. uint32_t encr_cfg = 0;
  3809. uint32_t auth_cfg = 0;
  3810. uint32_t key_reg = 0;
  3811. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3812. pdev->ce_bam_info.ce_burst_size);
  3813. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3814. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3815. /*
  3816. * Designate chunks of the allocated memory to various
  3817. * command list pointers related to aead operations
  3818. * defined in ce_cmdlistptrs_ops structure.
  3819. */
  3820. if (key_128) {
  3821. cmdlistptr->aead_aes_128_ccm.cmdlist =
  3822. (uintptr_t)ce_vaddr;
  3823. pcl_info = &(cmdlistptr->aead_aes_128_ccm);
  3824. auth_cfg = pdev->reg.auth_cfg_aes_ccm_128;
  3825. encr_cfg = pdev->reg.encr_cfg_aes_ccm_128;
  3826. key_reg = 4;
  3827. } else {
  3828. cmdlistptr->aead_aes_256_ccm.cmdlist =
  3829. (uintptr_t)ce_vaddr;
  3830. pcl_info = &(cmdlistptr->aead_aes_256_ccm);
  3831. auth_cfg = pdev->reg.auth_cfg_aes_ccm_256;
  3832. encr_cfg = pdev->reg.encr_cfg_aes_ccm_256;
  3833. key_reg = 8;
  3834. }
  3835. /* clear status register */
  3836. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
  3837. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3838. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3839. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, NULL);
  3840. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  3841. NULL);
  3842. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  3843. &pcl_info->seg_size);
  3844. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
  3845. encr_cfg, &pcl_info->encr_seg_cfg);
  3846. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
  3847. &pcl_info->encr_seg_size);
  3848. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  3849. &pcl_info->encr_seg_start);
  3850. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
  3851. pdev->reg.encr_cntr_mask_3, &pcl_info->encr_mask_3);
  3852. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
  3853. pdev->reg.encr_cntr_mask_2, &pcl_info->encr_mask_2);
  3854. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
  3855. pdev->reg.encr_cntr_mask_1, &pcl_info->encr_mask_1);
  3856. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
  3857. pdev->reg.encr_cntr_mask_0, &pcl_info->encr_mask_0);
  3858. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
  3859. auth_cfg, &pcl_info->auth_seg_cfg);
  3860. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
  3861. &pcl_info->auth_seg_size);
  3862. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
  3863. &pcl_info->auth_seg_start);
  3864. /* reset auth iv, bytecount and key registers */
  3865. for (i = 0; i < 8; i++)
  3866. qce_add_cmd_element(pdev, &ce_vaddr,
  3867. (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
  3868. 0, NULL);
  3869. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
  3870. 0, NULL);
  3871. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG,
  3872. 0, NULL);
  3873. for (i = 0; i < 16; i++)
  3874. qce_add_cmd_element(pdev, &ce_vaddr,
  3875. (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
  3876. 0, NULL);
  3877. /* set auth key */
  3878. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
  3879. &pcl_info->auth_key);
  3880. for (i = 1; i < key_reg; i++)
  3881. qce_add_cmd_element(pdev, &ce_vaddr,
  3882. (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
  3883. 0, NULL);
  3884. /* set NONCE info */
  3885. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_INFO_NONCE0_REG, 0,
  3886. &pcl_info->auth_nonce_info);
  3887. for (i = 1; i < 4; i++)
  3888. qce_add_cmd_element(pdev, &ce_vaddr,
  3889. (CRYPTO_AUTH_INFO_NONCE0_REG +
  3890. i * sizeof(uint32_t)), 0, NULL);
  3891. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
  3892. &pcl_info->encr_key);
  3893. for (i = 1; i < key_reg; i++)
  3894. qce_add_cmd_element(pdev, &ce_vaddr,
  3895. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
  3896. 0, NULL);
  3897. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
  3898. &pcl_info->encr_cntr_iv);
  3899. for (i = 1; i < 4; i++)
  3900. qce_add_cmd_element(pdev, &ce_vaddr,
  3901. (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
  3902. 0, NULL);
  3903. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_CCM_INT_CNTR0_REG, 0,
  3904. &pcl_info->encr_ccm_cntr_iv);
  3905. for (i = 1; i < 4; i++)
  3906. qce_add_cmd_element(pdev, &ce_vaddr,
  3907. (CRYPTO_ENCR_CCM_INT_CNTR0_REG + i * sizeof(uint32_t)),
  3908. 0, NULL);
  3909. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3910. pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
  3911. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3912. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3913. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3914. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3915. *pvaddr = (unsigned char *) ce_vaddr;
  3916. return 0;
  3917. }
  3918. static int _setup_f8_cmdlistptrs(struct qce_device *pdev, int cri_index,
  3919. unsigned char **pvaddr, enum qce_ota_algo_enum alg)
  3920. {
  3921. struct sps_command_element *ce_vaddr;
  3922. uintptr_t ce_vaddr_start;
  3923. struct qce_cmdlistptr_ops *cmdlistptr;
  3924. struct qce_cmdlist_info *pcl_info = NULL;
  3925. int i = 0;
  3926. uint32_t encr_cfg = 0;
  3927. uint32_t key_reg = 4;
  3928. cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  3929. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3930. pdev->ce_bam_info.ce_burst_size);
  3931. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3932. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3933. /*
  3934. * Designate chunks of the allocated memory to various
  3935. * command list pointers related to f8 cipher algorithm defined
  3936. * in ce_cmdlistptrs_ops structure.
  3937. */
  3938. switch (alg) {
  3939. case QCE_OTA_ALGO_KASUMI:
  3940. cmdlistptr->f8_kasumi.cmdlist = (uintptr_t)ce_vaddr;
  3941. pcl_info = &(cmdlistptr->f8_kasumi);
  3942. encr_cfg = pdev->reg.encr_cfg_kasumi;
  3943. break;
  3944. case QCE_OTA_ALGO_SNOW3G:
  3945. default:
  3946. cmdlistptr->f8_snow3g.cmdlist = (uintptr_t)ce_vaddr;
  3947. pcl_info = &(cmdlistptr->f8_snow3g);
  3948. encr_cfg = pdev->reg.encr_cfg_snow3g;
  3949. break;
  3950. }
  3951. /* clear status register */
  3952. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
  3953. 0, NULL);
  3954. /* set config to big endian */
  3955. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3956. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3957. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  3958. &pcl_info->seg_size);
  3959. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
  3960. &pcl_info->encr_seg_cfg);
  3961. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
  3962. &pcl_info->encr_seg_size);
  3963. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  3964. &pcl_info->encr_seg_start);
  3965. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
  3966. &pcl_info->auth_seg_cfg);
  3967. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
  3968. 0, &pcl_info->auth_seg_size);
  3969. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
  3970. 0, &pcl_info->auth_seg_start);
  3971. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
  3972. &pcl_info->encr_key);
  3973. for (i = 1; i < key_reg; i++)
  3974. qce_add_cmd_element(pdev, &ce_vaddr,
  3975. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
  3976. 0, NULL);
  3977. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
  3978. &pcl_info->encr_cntr_iv);
  3979. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
  3980. NULL);
  3981. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3982. pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
  3983. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3984. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3985. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3986. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3987. *pvaddr = (unsigned char *) ce_vaddr;
  3988. return 0;
  3989. }
  3990. static int _setup_f9_cmdlistptrs(struct qce_device *pdev, int cri_index,
  3991. unsigned char **pvaddr, enum qce_ota_algo_enum alg)
  3992. {
  3993. struct sps_command_element *ce_vaddr;
  3994. uintptr_t ce_vaddr_start;
  3995. struct qce_cmdlistptr_ops *cmdlistptr;
  3996. struct qce_cmdlist_info *pcl_info = NULL;
  3997. int i = 0;
  3998. uint32_t auth_cfg = 0;
  3999. uint32_t iv_reg = 0;
  4000. cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  4001. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  4002. pdev->ce_bam_info.ce_burst_size);
  4003. ce_vaddr_start = (uintptr_t)(*pvaddr);
  4004. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  4005. /*
  4006. * Designate chunks of the allocated memory to various
  4007. * command list pointers related to authentication operations
  4008. * defined in ce_cmdlistptrs_ops structure.
  4009. */
  4010. switch (alg) {
  4011. case QCE_OTA_ALGO_KASUMI:
  4012. cmdlistptr->f9_kasumi.cmdlist = (uintptr_t)ce_vaddr;
  4013. pcl_info = &(cmdlistptr->f9_kasumi);
  4014. auth_cfg = pdev->reg.auth_cfg_kasumi;
  4015. break;
  4016. case QCE_OTA_ALGO_SNOW3G:
  4017. default:
  4018. cmdlistptr->f9_snow3g.cmdlist = (uintptr_t)ce_vaddr;
  4019. pcl_info = &(cmdlistptr->f9_snow3g);
  4020. auth_cfg = pdev->reg.auth_cfg_snow3g;
  4021. }
  4022. /* clear status register */
  4023. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
  4024. 0, NULL);
  4025. /* set config to big endian */
  4026. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  4027. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  4028. iv_reg = 5;
  4029. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  4030. &pcl_info->seg_size);
  4031. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
  4032. &pcl_info->encr_seg_cfg);
  4033. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
  4034. auth_cfg, &pcl_info->auth_seg_cfg);
  4035. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
  4036. &pcl_info->auth_seg_size);
  4037. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
  4038. &pcl_info->auth_seg_start);
  4039. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
  4040. &pcl_info->auth_iv);
  4041. for (i = 1; i < iv_reg; i++) {
  4042. qce_add_cmd_element(pdev, &ce_vaddr,
  4043. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
  4044. 0, NULL);
  4045. }
  4046. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
  4047. 0, &pcl_info->auth_bytecount);
  4048. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
  4049. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  4050. pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
  4051. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  4052. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  4053. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  4054. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  4055. *pvaddr = (unsigned char *) ce_vaddr;
  4056. return 0;
  4057. }
  4058. static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev,
  4059. int cri_index, unsigned char **pvaddr)
  4060. {
  4061. struct sps_command_element *ce_vaddr;
  4062. uintptr_t ce_vaddr_start = (uintptr_t)(*pvaddr);
  4063. struct qce_cmdlistptr_ops *cmdlistptr;
  4064. struct qce_cmdlist_info *pcl_info = NULL;
  4065. cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  4066. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  4067. pdev->ce_bam_info.ce_burst_size);
  4068. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  4069. cmdlistptr->unlock_all_pipes.cmdlist = (uintptr_t)ce_vaddr;
  4070. pcl_info = &(cmdlistptr->unlock_all_pipes);
  4071. /*
  4072. * Designate chunks of the allocated memory to command list
  4073. * to unlock pipes.
  4074. */
  4075. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  4076. CRYPTO_CONFIG_RESET, NULL);
  4077. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  4078. *pvaddr = (unsigned char *) ce_vaddr;
  4079. return 0;
  4080. }
  4081. static int qce_setup_cmdlistptrs(struct qce_device *pdev, int cri_index,
  4082. unsigned char **pvaddr)
  4083. {
  4084. struct sps_command_element *ce_vaddr =
  4085. (struct sps_command_element *)(*pvaddr);
  4086. /*
  4087. * Designate chunks of the allocated memory to various
  4088. * command list pointers related to operations defined
  4089. * in ce_cmdlistptrs_ops structure.
  4090. */
  4091. ce_vaddr =
  4092. (struct sps_command_element *)ALIGN(((uintptr_t) ce_vaddr),
  4093. pdev->ce_bam_info.ce_burst_size);
  4094. *pvaddr = (unsigned char *) ce_vaddr;
  4095. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
  4096. true);
  4097. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
  4098. true);
  4099. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
  4100. true);
  4101. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
  4102. true);
  4103. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
  4104. false);
  4105. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
  4106. false);
  4107. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
  4108. false);
  4109. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
  4110. false);
  4111. _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
  4112. true);
  4113. _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
  4114. false);
  4115. _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
  4116. true);
  4117. _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
  4118. false);
  4119. _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1,
  4120. false);
  4121. _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256,
  4122. false);
  4123. _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1_HMAC,
  4124. false);
  4125. _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256_HMAC,
  4126. false);
  4127. _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
  4128. true);
  4129. _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
  4130. false);
  4131. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
  4132. QCE_MODE_CBC, DES_KEY_SIZE, true);
  4133. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
  4134. QCE_MODE_CBC, DES3_EDE_KEY_SIZE, true);
  4135. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
  4136. QCE_MODE_CBC, AES128_KEY_SIZE, true);
  4137. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
  4138. QCE_MODE_CBC, AES256_KEY_SIZE, true);
  4139. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
  4140. QCE_MODE_CBC, DES_KEY_SIZE, false);
  4141. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
  4142. QCE_MODE_CBC, DES3_EDE_KEY_SIZE, false);
  4143. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
  4144. QCE_MODE_CBC, AES128_KEY_SIZE, false);
  4145. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
  4146. QCE_MODE_CBC, AES256_KEY_SIZE, false);
  4147. _setup_cipher_null_cmdlistptrs(pdev, cri_index, pvaddr);
  4148. _setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, true);
  4149. _setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, false);
  4150. _setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
  4151. _setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
  4152. _setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
  4153. _setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
  4154. _setup_unlock_pipe_cmdlistptrs(pdev, cri_index, pvaddr);
  4155. return 0;
  4156. }
  4157. static int qce_setup_ce_sps_data(struct qce_device *pce_dev)
  4158. {
  4159. unsigned char *vaddr;
  4160. int i;
  4161. unsigned char *iovec_vaddr;
  4162. int iovec_memsize;
  4163. vaddr = pce_dev->coh_vmem;
  4164. vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
  4165. pce_dev->ce_bam_info.ce_burst_size);
  4166. iovec_vaddr = pce_dev->iovec_vmem;
  4167. iovec_memsize = pce_dev->iovec_memsize;
  4168. for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++) {
  4169. /* Allow for 256 descriptor (cmd and data) entries per pipe */
  4170. pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec =
  4171. (struct sps_iovec *)iovec_vaddr;
  4172. pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec_phys =
  4173. virt_to_phys(
  4174. pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec);
  4175. iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
  4176. iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
  4177. pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec =
  4178. (struct sps_iovec *)iovec_vaddr;
  4179. pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec_phys =
  4180. virt_to_phys(
  4181. pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec);
  4182. iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
  4183. iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
  4184. if (pce_dev->support_cmd_dscr)
  4185. qce_setup_cmdlistptrs(pce_dev, i, &vaddr);
  4186. vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
  4187. pce_dev->ce_bam_info.ce_burst_size);
  4188. pce_dev->ce_request_info[i].ce_sps.result_dump =
  4189. (uintptr_t)vaddr;
  4190. pce_dev->ce_request_info[i].ce_sps.result_dump_phy =
  4191. GET_PHYS_ADDR((uintptr_t)vaddr);
  4192. pce_dev->ce_request_info[i].ce_sps.result =
  4193. (struct ce_result_dump_format *)vaddr;
  4194. vaddr += CRYPTO_RESULT_DUMP_SIZE;
  4195. pce_dev->ce_request_info[i].ce_sps.result_dump_null =
  4196. (uintptr_t)vaddr;
  4197. pce_dev->ce_request_info[i].ce_sps.result_dump_null_phy =
  4198. GET_PHYS_ADDR((uintptr_t)vaddr);
  4199. pce_dev->ce_request_info[i].ce_sps.result_null =
  4200. (struct ce_result_dump_format *)vaddr;
  4201. vaddr += CRYPTO_RESULT_DUMP_SIZE;
  4202. pce_dev->ce_request_info[i].ce_sps.ignore_buffer =
  4203. (uintptr_t)vaddr;
  4204. vaddr += pce_dev->ce_bam_info.ce_burst_size * 2;
  4205. }
  4206. if ((vaddr - pce_dev->coh_vmem) > pce_dev->memsize ||
  4207. iovec_memsize < 0)
  4208. panic("qce50: Not enough coherent memory. Allocate %x , need %lx\n",
  4209. pce_dev->memsize, (uintptr_t)vaddr -
  4210. (uintptr_t)pce_dev->coh_vmem);
  4211. return 0;
  4212. }
  4213. static int qce_init_ce_cfg_val(struct qce_device *pce_dev)
  4214. {
  4215. uint32_t pipe_pair =
  4216. pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_NONE];
  4217. pce_dev->reg.crypto_cfg_be = qce_get_config_be(pce_dev, pipe_pair);
  4218. pce_dev->reg.crypto_cfg_le =
  4219. (pce_dev->reg.crypto_cfg_be | CRYPTO_LITTLE_ENDIAN_MASK);
  4220. /* Initialize encr_cfg register for AES alg */
  4221. pce_dev->reg.encr_cfg_aes_cbc_128 =
  4222. (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
  4223. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  4224. (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
  4225. pce_dev->reg.encr_cfg_aes_cbc_256 =
  4226. (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
  4227. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  4228. (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
  4229. pce_dev->reg.encr_cfg_aes_ctr_128 =
  4230. (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
  4231. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  4232. (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
  4233. pce_dev->reg.encr_cfg_aes_ctr_256 =
  4234. (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
  4235. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  4236. (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
  4237. pce_dev->reg.encr_cfg_aes_xts_128 =
  4238. (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
  4239. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  4240. (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
  4241. pce_dev->reg.encr_cfg_aes_xts_256 =
  4242. (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
  4243. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  4244. (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
  4245. pce_dev->reg.encr_cfg_aes_ecb_128 =
  4246. (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
  4247. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  4248. (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
  4249. pce_dev->reg.encr_cfg_aes_ecb_256 =
  4250. (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
  4251. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  4252. (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
  4253. pce_dev->reg.encr_cfg_aes_ccm_128 =
  4254. (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
  4255. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  4256. (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE)|
  4257. (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
  4258. pce_dev->reg.encr_cfg_aes_ccm_256 =
  4259. (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
  4260. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  4261. (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
  4262. (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
  4263. /* Initialize encr_cfg register for DES alg */
  4264. pce_dev->reg.encr_cfg_des_ecb =
  4265. (CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
  4266. (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
  4267. (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
  4268. pce_dev->reg.encr_cfg_des_cbc =
  4269. (CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
  4270. (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
  4271. (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
  4272. pce_dev->reg.encr_cfg_3des_ecb =
  4273. (CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
  4274. (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
  4275. (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
  4276. pce_dev->reg.encr_cfg_3des_cbc =
  4277. (CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
  4278. (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
  4279. (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
  4280. /* Initialize encr_cfg register for kasumi/snow3g alg */
  4281. pce_dev->reg.encr_cfg_kasumi =
  4282. (CRYPTO_ENCR_ALG_KASUMI << CRYPTO_ENCR_ALG);
  4283. pce_dev->reg.encr_cfg_snow3g =
  4284. (CRYPTO_ENCR_ALG_SNOW_3G << CRYPTO_ENCR_ALG);
  4285. /* Initialize auth_cfg register for CMAC alg */
  4286. pce_dev->reg.auth_cfg_cmac_128 =
  4287. (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
  4288. (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
  4289. (CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
  4290. (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
  4291. (CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE);
  4292. pce_dev->reg.auth_cfg_cmac_256 =
  4293. (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
  4294. (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
  4295. (CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
  4296. (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
  4297. (CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE);
  4298. /* Initialize auth_cfg register for HMAC alg */
  4299. pce_dev->reg.auth_cfg_hmac_sha1 =
  4300. (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
  4301. (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
  4302. (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
  4303. (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  4304. pce_dev->reg.auth_cfg_hmac_sha256 =
  4305. (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
  4306. (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
  4307. (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
  4308. (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  4309. /* Initialize auth_cfg register for SHA1/256 alg */
  4310. pce_dev->reg.auth_cfg_sha1 =
  4311. (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
  4312. (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
  4313. (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
  4314. (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  4315. pce_dev->reg.auth_cfg_sha256 =
  4316. (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
  4317. (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
  4318. (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
  4319. (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  4320. /* Initialize auth_cfg register for AEAD alg */
  4321. pce_dev->reg.auth_cfg_aead_sha1_hmac =
  4322. (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
  4323. (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
  4324. (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
  4325. (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
  4326. pce_dev->reg.auth_cfg_aead_sha256_hmac =
  4327. (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
  4328. (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
  4329. (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
  4330. (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
  4331. pce_dev->reg.auth_cfg_aes_ccm_128 =
  4332. (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
  4333. (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
  4334. (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
  4335. (CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE) |
  4336. ((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
  4337. pce_dev->reg.auth_cfg_aes_ccm_128 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
  4338. pce_dev->reg.auth_cfg_aes_ccm_256 =
  4339. (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
  4340. (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
  4341. (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
  4342. (CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE) |
  4343. ((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
  4344. pce_dev->reg.auth_cfg_aes_ccm_256 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
  4345. /* Initialize auth_cfg register for kasumi/snow3g */
  4346. pce_dev->reg.auth_cfg_kasumi =
  4347. (CRYPTO_AUTH_ALG_KASUMI << CRYPTO_AUTH_ALG) |
  4348. BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
  4349. pce_dev->reg.auth_cfg_snow3g =
  4350. (CRYPTO_AUTH_ALG_SNOW3G << CRYPTO_AUTH_ALG) |
  4351. BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
  4352. /* Initialize IV counter mask values */
  4353. pce_dev->reg.encr_cntr_mask_3 = 0xFFFFFFFF;
  4354. pce_dev->reg.encr_cntr_mask_2 = 0xFFFFFFFF;
  4355. pce_dev->reg.encr_cntr_mask_1 = 0xFFFFFFFF;
  4356. pce_dev->reg.encr_cntr_mask_0 = 0xFFFFFFFF;
  4357. return 0;
  4358. }
  4359. static void _qce_ccm_get_around_input(struct qce_device *pce_dev,
  4360. struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
  4361. {
  4362. struct qce_cmdlist_info *cmdlistinfo;
  4363. struct ce_sps_data *pce_sps_data;
  4364. pce_sps_data = &preq_info->ce_sps;
  4365. if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
  4366. !(pce_dev->no_ccm_mac_status_get_around)) {
  4367. cmdlistinfo = &pce_sps_data->cmdlistptr.cipher_null;
  4368. _qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
  4369. &pce_sps_data->in_transfer);
  4370. _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
  4371. pce_dev->ce_bam_info.ce_burst_size,
  4372. &pce_sps_data->in_transfer);
  4373. _qce_set_flag(&pce_sps_data->in_transfer,
  4374. SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD);
  4375. }
  4376. }
  4377. static void _qce_ccm_get_around_output(struct qce_device *pce_dev,
  4378. struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
  4379. {
  4380. struct ce_sps_data *pce_sps_data;
  4381. pce_sps_data = &preq_info->ce_sps;
  4382. if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
  4383. !(pce_dev->no_ccm_mac_status_get_around)) {
  4384. _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
  4385. pce_dev->ce_bam_info.ce_burst_size,
  4386. &pce_sps_data->out_transfer);
  4387. _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump_null),
  4388. CRYPTO_RESULT_DUMP_SIZE, &pce_sps_data->out_transfer);
  4389. }
  4390. }
  4391. /* QCE_DUMMY_REQ */
  4392. static void qce_dummy_complete(void *cookie, unsigned char *digest,
  4393. unsigned char *authdata, int ret)
  4394. {
  4395. if (!cookie)
  4396. pr_err("invalid cookie\n");
  4397. }
  4398. static int qce_dummy_req(struct qce_device *pce_dev)
  4399. {
  4400. int ret = 0;
  4401. if (atomic_xchg(
  4402. &pce_dev->ce_request_info[DUMMY_REQ_INDEX].in_use, true))
  4403. return -EBUSY;
  4404. ret = qce_process_sha_req(pce_dev, NULL);
  4405. pce_dev->qce_stats.no_of_dummy_reqs++;
  4406. return ret;
  4407. }
  4408. static int select_mode(struct qce_device *pce_dev,
  4409. struct ce_request_info *preq_info)
  4410. {
  4411. struct ce_sps_data *pce_sps_data = &preq_info->ce_sps;
  4412. unsigned int no_of_queued_req;
  4413. unsigned int cadence;
  4414. if (!pce_dev->no_get_around) {
  4415. _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
  4416. return 0;
  4417. }
  4418. /*
  4419. * claim ownership of device
  4420. */
  4421. again:
  4422. if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_CLIENT)
  4423. != QCE_OWNER_NONE) {
  4424. ndelay(40);
  4425. goto again;
  4426. }
  4427. no_of_queued_req = atomic_inc_return(&pce_dev->no_of_queued_req);
  4428. if (pce_dev->mode == IN_INTERRUPT_MODE) {
  4429. if (no_of_queued_req >= MAX_BUNCH_MODE_REQ) {
  4430. pce_dev->mode = IN_BUNCH_MODE;
  4431. pr_debug("pcedev %d mode switch to BUNCH\n",
  4432. pce_dev->dev_no);
  4433. _qce_set_flag(&pce_sps_data->out_transfer,
  4434. SPS_IOVEC_FLAG_INT);
  4435. pce_dev->intr_cadence = 0;
  4436. atomic_set(&pce_dev->bunch_cmd_seq, 1);
  4437. atomic_set(&pce_dev->last_intr_seq, 1);
  4438. mod_timer(&(pce_dev->timer),
  4439. (jiffies + DELAY_IN_JIFFIES));
  4440. } else {
  4441. _qce_set_flag(&pce_sps_data->out_transfer,
  4442. SPS_IOVEC_FLAG_INT);
  4443. }
  4444. } else {
  4445. pce_dev->intr_cadence++;
  4446. cadence = (preq_info->req_len >> 7) + 1;
  4447. if (cadence > SET_INTR_AT_REQ)
  4448. cadence = SET_INTR_AT_REQ;
  4449. if (pce_dev->intr_cadence < cadence || ((pce_dev->intr_cadence
  4450. == cadence) && pce_dev->cadence_flag))
  4451. atomic_inc(&pce_dev->bunch_cmd_seq);
  4452. else {
  4453. _qce_set_flag(&pce_sps_data->out_transfer,
  4454. SPS_IOVEC_FLAG_INT);
  4455. pce_dev->intr_cadence = 0;
  4456. atomic_set(&pce_dev->bunch_cmd_seq, 0);
  4457. atomic_set(&pce_dev->last_intr_seq, 0);
  4458. pce_dev->cadence_flag = !pce_dev->cadence_flag;
  4459. }
  4460. }
  4461. return 0;
  4462. }
  4463. static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
  4464. {
  4465. int rc = 0;
  4466. struct qce_device *pce_dev = (struct qce_device *) handle;
  4467. struct aead_request *areq = (struct aead_request *) q_req->areq;
  4468. uint32_t authsize = q_req->authsize;
  4469. uint32_t totallen_in, out_len;
  4470. uint32_t hw_pad_out = 0;
  4471. int ce_burst_size;
  4472. struct qce_cmdlist_info *cmdlistinfo = NULL;
  4473. int req_info = -1;
  4474. struct ce_request_info *preq_info;
  4475. struct ce_sps_data *pce_sps_data;
  4476. req_info = qce_alloc_req_info(pce_dev);
  4477. if (req_info < 0)
  4478. return -EBUSY;
  4479. q_req->current_req_info = req_info;
  4480. preq_info = &pce_dev->ce_request_info[req_info];
  4481. pce_sps_data = &preq_info->ce_sps;
  4482. ce_burst_size = pce_dev->ce_bam_info.ce_burst_size;
  4483. totallen_in = areq->cryptlen + q_req->assoclen;
  4484. if (q_req->dir == QCE_ENCRYPT) {
  4485. q_req->cryptlen = areq->cryptlen;
  4486. out_len = areq->cryptlen + authsize;
  4487. hw_pad_out = ALIGN(authsize, ce_burst_size) - authsize;
  4488. } else {
  4489. q_req->cryptlen = areq->cryptlen - authsize;
  4490. out_len = q_req->cryptlen;
  4491. hw_pad_out = authsize;
  4492. }
  4493. /*
  4494. * For crypto 5.0 that has burst size alignment requirement
  4495. * for data descritpor,
  4496. * the agent above(qcrypto) prepares the src scatter list with
  4497. * memory starting with associated data, followed by
  4498. * data stream to be ciphered.
  4499. * The destination scatter list is pointing to the same
  4500. * data area as source.
  4501. */
  4502. if (pce_dev->ce_bam_info.minor_version == 0)
  4503. preq_info->src_nents = count_sg(areq->src, totallen_in);
  4504. else
  4505. preq_info->src_nents = count_sg(areq->src, areq->cryptlen +
  4506. areq->assoclen);
  4507. if (q_req->assoclen) {
  4508. preq_info->assoc_nents = count_sg(q_req->asg, q_req->assoclen);
  4509. /* formatted associated data input */
  4510. qce_dma_map_sg(pce_dev->pdev, q_req->asg,
  4511. preq_info->assoc_nents, DMA_TO_DEVICE);
  4512. preq_info->asg = q_req->asg;
  4513. } else {
  4514. preq_info->assoc_nents = 0;
  4515. preq_info->asg = NULL;
  4516. }
  4517. /* cipher input */
  4518. qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
  4519. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  4520. DMA_TO_DEVICE);
  4521. /* cipher + mac output for encryption */
  4522. if (areq->src != areq->dst) {
  4523. /*
  4524. * The destination scatter list is pointing to the same
  4525. * data area as src.
  4526. * Note, the associated data will be pass-through
  4527. * at the beginning of destination area.
  4528. */
  4529. preq_info->dst_nents = count_sg(areq->dst,
  4530. out_len + areq->assoclen);
  4531. qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
  4532. DMA_FROM_DEVICE);
  4533. } else {
  4534. preq_info->dst_nents = preq_info->src_nents;
  4535. }
  4536. if (pce_dev->support_cmd_dscr) {
  4537. cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev, req_info,
  4538. q_req);
  4539. if (cmdlistinfo == NULL) {
  4540. pr_err("Unsupported cipher algorithm %d, mode %d\n",
  4541. q_req->alg, q_req->mode);
  4542. qce_free_req_info(pce_dev, req_info, false);
  4543. return -EINVAL;
  4544. }
  4545. /* set up crypto device */
  4546. rc = _ce_setup_cipher(pce_dev, q_req, totallen_in,
  4547. q_req->assoclen, cmdlistinfo);
  4548. } else {
  4549. /* set up crypto device */
  4550. rc = _ce_setup_cipher_direct(pce_dev, q_req, totallen_in,
  4551. q_req->assoclen);
  4552. }
  4553. if (rc < 0)
  4554. goto bad;
  4555. preq_info->mode = q_req->mode;
  4556. /* setup for callback, and issue command to bam */
  4557. preq_info->areq = q_req->areq;
  4558. preq_info->qce_cb = q_req->qce_cb;
  4559. preq_info->dir = q_req->dir;
  4560. /* setup xfer type for producer callback handling */
  4561. preq_info->xfer_type = QCE_XFER_AEAD;
  4562. preq_info->req_len = totallen_in;
  4563. _qce_sps_iovec_count_init(pce_dev, req_info);
  4564. if (pce_dev->support_cmd_dscr && cmdlistinfo) {
  4565. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
  4566. cmdlistinfo, &pce_sps_data->in_transfer);
  4567. if (rc)
  4568. goto bad;
  4569. }
  4570. if (pce_dev->ce_bam_info.minor_version == 0) {
  4571. goto bad;
  4572. } else {
  4573. if (q_req->assoclen) {
  4574. rc = _qce_sps_add_sg_data(pce_dev, q_req->asg,
  4575. q_req->assoclen, &pce_sps_data->in_transfer);
  4576. if (rc)
  4577. goto bad;
  4578. }
  4579. rc = _qce_sps_add_sg_data_off(pce_dev, areq->src, areq->cryptlen,
  4580. areq->assoclen,
  4581. &pce_sps_data->in_transfer);
  4582. if (rc)
  4583. goto bad;
  4584. _qce_set_flag(&pce_sps_data->in_transfer,
  4585. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  4586. _qce_ccm_get_around_input(pce_dev, preq_info, q_req->dir);
  4587. if (pce_dev->no_get_around) {
  4588. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  4589. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  4590. &pce_sps_data->in_transfer);
  4591. if (rc)
  4592. goto bad;
  4593. }
  4594. /* Pass through to ignore associated data*/
  4595. rc = _qce_sps_add_data(
  4596. GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
  4597. q_req->assoclen,
  4598. &pce_sps_data->out_transfer);
  4599. if (rc)
  4600. goto bad;
  4601. rc = _qce_sps_add_sg_data_off(pce_dev, areq->dst, out_len,
  4602. areq->assoclen,
  4603. &pce_sps_data->out_transfer);
  4604. if (rc)
  4605. goto bad;
  4606. /* Pass through to ignore hw_pad (padding of the MAC data) */
  4607. rc = _qce_sps_add_data(
  4608. GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
  4609. hw_pad_out, &pce_sps_data->out_transfer);
  4610. if (rc)
  4611. goto bad;
  4612. if (pce_dev->no_get_around ||
  4613. totallen_in <= SPS_MAX_PKT_SIZE) {
  4614. rc = _qce_sps_add_data(
  4615. GET_PHYS_ADDR(pce_sps_data->result_dump),
  4616. CRYPTO_RESULT_DUMP_SIZE,
  4617. &pce_sps_data->out_transfer);
  4618. if (rc)
  4619. goto bad;
  4620. pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
  4621. } else {
  4622. pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
  4623. }
  4624. _qce_ccm_get_around_output(pce_dev, preq_info, q_req->dir);
  4625. select_mode(pce_dev, preq_info);
  4626. rc = _qce_sps_transfer(pce_dev, req_info);
  4627. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  4628. }
  4629. if (rc)
  4630. goto bad;
  4631. return 0;
  4632. bad:
  4633. if (preq_info->assoc_nents) {
  4634. qce_dma_unmap_sg(pce_dev->pdev, q_req->asg,
  4635. preq_info->assoc_nents, DMA_TO_DEVICE);
  4636. }
  4637. if (preq_info->src_nents) {
  4638. qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
  4639. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  4640. DMA_TO_DEVICE);
  4641. }
  4642. if (areq->src != areq->dst) {
  4643. qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
  4644. DMA_FROM_DEVICE);
  4645. }
  4646. qce_free_req_info(pce_dev, req_info, false);
  4647. return rc;
  4648. }
  4649. static int _qce_suspend(void *handle)
  4650. {
  4651. struct qce_device *pce_dev = (struct qce_device *)handle;
  4652. struct sps_pipe *sps_pipe_info;
  4653. int i = 0;
  4654. if (handle == NULL)
  4655. return -ENODEV;
  4656. for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
  4657. if (i == QCE_OFFLOAD_NONE && !(pce_dev->kernel_pipes_support))
  4658. continue;
  4659. else if ((i > 0) && !(pce_dev->offload_pipes_support))
  4660. break;
  4661. sps_pipe_info = pce_dev->ce_bam_info.consumer[i].pipe;
  4662. sps_disconnect(sps_pipe_info);
  4663. sps_pipe_info = pce_dev->ce_bam_info.producer[i].pipe;
  4664. sps_disconnect(sps_pipe_info);
  4665. }
  4666. return 0;
  4667. }
  4668. static int _qce_resume(void *handle)
  4669. {
  4670. struct qce_device *pce_dev = (struct qce_device *)handle;
  4671. struct sps_pipe *sps_pipe_info;
  4672. struct sps_connect *sps_connect_info;
  4673. int rc, i;
  4674. if (handle == NULL)
  4675. return -ENODEV;
  4676. for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
  4677. if (i == QCE_OFFLOAD_NONE && !(pce_dev->kernel_pipes_support))
  4678. continue;
  4679. else if ((i > 0) && !(pce_dev->offload_pipes_support))
  4680. break;
  4681. sps_pipe_info = pce_dev->ce_bam_info.consumer[i].pipe;
  4682. sps_connect_info = &pce_dev->ce_bam_info.consumer[i].connect;
  4683. memset(sps_connect_info->desc.base, 0x00,
  4684. sps_connect_info->desc.size);
  4685. rc = sps_connect(sps_pipe_info, sps_connect_info);
  4686. if (rc) {
  4687. pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n",
  4688. (uintptr_t)sps_pipe_info, rc);
  4689. return rc;
  4690. }
  4691. sps_pipe_info = pce_dev->ce_bam_info.producer[i].pipe;
  4692. sps_connect_info = &pce_dev->ce_bam_info.producer[i].connect;
  4693. memset(sps_connect_info->desc.base, 0x00,
  4694. sps_connect_info->desc.size);
  4695. rc = sps_connect(sps_pipe_info, sps_connect_info);
  4696. if (rc)
  4697. pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n",
  4698. (uintptr_t)sps_pipe_info, rc);
  4699. rc = sps_register_event(sps_pipe_info,
  4700. &pce_dev->ce_bam_info.producer[i].event);
  4701. if (rc)
  4702. pr_err("Producer cb registration failed rc = %d\n",
  4703. rc);
  4704. }
  4705. qce_enable_clock_gating(pce_dev);
  4706. return rc;
  4707. }
  4708. struct qce_pm_table qce_pm_table = {_qce_suspend, _qce_resume};
  4709. EXPORT_SYMBOL(qce_pm_table);
  4710. int qce_aead_req(void *handle, struct qce_req *q_req)
  4711. {
  4712. struct qce_device *pce_dev = (struct qce_device *)handle;
  4713. struct aead_request *areq;
  4714. uint32_t authsize;
  4715. struct crypto_aead *aead;
  4716. uint32_t ivsize;
  4717. uint32_t totallen;
  4718. int rc = 0;
  4719. struct qce_cmdlist_info *cmdlistinfo = NULL;
  4720. int req_info = -1;
  4721. struct ce_sps_data *pce_sps_data;
  4722. struct ce_request_info *preq_info;
  4723. if (q_req->mode == QCE_MODE_CCM)
  4724. return _qce_aead_ccm_req(handle, q_req);
  4725. req_info = qce_alloc_req_info(pce_dev);
  4726. if (req_info < 0)
  4727. return -EBUSY;
  4728. q_req->current_req_info = req_info;
  4729. preq_info = &pce_dev->ce_request_info[req_info];
  4730. pce_sps_data = &preq_info->ce_sps;
  4731. areq = (struct aead_request *) q_req->areq;
  4732. aead = crypto_aead_reqtfm(areq);
  4733. ivsize = crypto_aead_ivsize(aead);
  4734. q_req->ivsize = ivsize;
  4735. authsize = q_req->authsize;
  4736. if (q_req->dir == QCE_ENCRYPT)
  4737. q_req->cryptlen = areq->cryptlen;
  4738. else
  4739. q_req->cryptlen = areq->cryptlen - authsize;
  4740. if (q_req->cryptlen > UINT_MAX - areq->assoclen) {
  4741. pr_err("Integer overflow on total aead req length.\n");
  4742. return -EINVAL;
  4743. }
  4744. totallen = q_req->cryptlen + areq->assoclen;
  4745. if (pce_dev->support_cmd_dscr) {
  4746. cmdlistinfo = _ce_get_aead_cmdlistinfo(pce_dev,
  4747. req_info, q_req);
  4748. if (cmdlistinfo == NULL) {
  4749. pr_err("Unsupported aead ciphering algorithm %d, mode %d, ciphering key length %d, auth digest size %d\n",
  4750. q_req->alg, q_req->mode, q_req->encklen,
  4751. q_req->authsize);
  4752. qce_free_req_info(pce_dev, req_info, false);
  4753. return -EINVAL;
  4754. }
  4755. /* set up crypto device */
  4756. rc = _ce_setup_aead(pce_dev, q_req, totallen,
  4757. areq->assoclen, cmdlistinfo);
  4758. if (rc < 0) {
  4759. qce_free_req_info(pce_dev, req_info, false);
  4760. return -EINVAL;
  4761. }
  4762. }
  4763. /*
  4764. * For crypto 5.0 that has burst size alignment requirement
  4765. * for data descritpor,
  4766. * the agent above(qcrypto) prepares the src scatter list with
  4767. * memory starting with associated data, followed by
  4768. * iv, and data stream to be ciphered.
  4769. */
  4770. preq_info->src_nents = count_sg(areq->src, totallen);
  4771. /* cipher input */
  4772. qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
  4773. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  4774. DMA_TO_DEVICE);
  4775. /* cipher output for encryption */
  4776. if (areq->src != areq->dst) {
  4777. preq_info->dst_nents = count_sg(areq->dst, totallen);
  4778. qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
  4779. DMA_FROM_DEVICE);
  4780. }
  4781. /* setup for callback, and issue command to bam */
  4782. preq_info->areq = q_req->areq;
  4783. preq_info->qce_cb = q_req->qce_cb;
  4784. preq_info->dir = q_req->dir;
  4785. preq_info->asg = NULL;
  4786. preq_info->offload_op = QCE_OFFLOAD_NONE;
  4787. /* setup xfer type for producer callback handling */
  4788. preq_info->xfer_type = QCE_XFER_AEAD;
  4789. preq_info->req_len = totallen;
  4790. _qce_sps_iovec_count_init(pce_dev, req_info);
  4791. if (pce_dev->support_cmd_dscr) {
  4792. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
  4793. cmdlistinfo, &pce_sps_data->in_transfer);
  4794. if (rc)
  4795. goto bad;
  4796. } else {
  4797. rc = _ce_setup_aead_direct(pce_dev, q_req, totallen,
  4798. areq->assoclen);
  4799. if (rc)
  4800. goto bad;
  4801. }
  4802. preq_info->mode = q_req->mode;
  4803. if (pce_dev->ce_bam_info.minor_version == 0) {
  4804. rc = _qce_sps_add_sg_data(pce_dev, areq->src, totallen,
  4805. &pce_sps_data->in_transfer);
  4806. if (rc)
  4807. goto bad;
  4808. _qce_set_flag(&pce_sps_data->in_transfer,
  4809. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  4810. rc = _qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
  4811. &pce_sps_data->out_transfer);
  4812. if (rc)
  4813. goto bad;
  4814. if (totallen > SPS_MAX_PKT_SIZE) {
  4815. _qce_set_flag(&pce_sps_data->out_transfer,
  4816. SPS_IOVEC_FLAG_INT);
  4817. pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
  4818. } else {
  4819. rc = _qce_sps_add_data(GET_PHYS_ADDR(
  4820. pce_sps_data->result_dump),
  4821. CRYPTO_RESULT_DUMP_SIZE,
  4822. &pce_sps_data->out_transfer);
  4823. if (rc)
  4824. goto bad;
  4825. _qce_set_flag(&pce_sps_data->out_transfer,
  4826. SPS_IOVEC_FLAG_INT);
  4827. pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
  4828. }
  4829. rc = _qce_sps_transfer(pce_dev, req_info);
  4830. } else {
  4831. rc = _qce_sps_add_sg_data(pce_dev, areq->src, totallen,
  4832. &pce_sps_data->in_transfer);
  4833. if (rc)
  4834. goto bad;
  4835. _qce_set_flag(&pce_sps_data->in_transfer,
  4836. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  4837. if (pce_dev->no_get_around) {
  4838. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  4839. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  4840. &pce_sps_data->in_transfer);
  4841. if (rc)
  4842. goto bad;
  4843. }
  4844. rc = _qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
  4845. &pce_sps_data->out_transfer);
  4846. if (rc)
  4847. goto bad;
  4848. if (pce_dev->no_get_around || totallen <= SPS_MAX_PKT_SIZE) {
  4849. rc = _qce_sps_add_data(
  4850. GET_PHYS_ADDR(pce_sps_data->result_dump),
  4851. CRYPTO_RESULT_DUMP_SIZE,
  4852. &pce_sps_data->out_transfer);
  4853. if (rc)
  4854. goto bad;
  4855. pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
  4856. } else {
  4857. pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
  4858. }
  4859. select_mode(pce_dev, preq_info);
  4860. rc = _qce_sps_transfer(pce_dev, req_info);
  4861. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  4862. }
  4863. if (rc)
  4864. goto bad;
  4865. return 0;
  4866. bad:
  4867. if (preq_info->src_nents)
  4868. qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
  4869. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  4870. DMA_TO_DEVICE);
  4871. if (areq->src != areq->dst)
  4872. qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
  4873. DMA_FROM_DEVICE);
  4874. qce_free_req_info(pce_dev, req_info, false);
  4875. return rc;
  4876. }
  4877. EXPORT_SYMBOL(qce_aead_req);
  4878. int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
  4879. {
  4880. int rc = 0;
  4881. struct qce_device *pce_dev = (struct qce_device *) handle;
  4882. struct skcipher_request *areq = (struct skcipher_request *)
  4883. c_req->areq;
  4884. struct qce_cmdlist_info *cmdlistinfo = NULL;
  4885. int req_info = -1;
  4886. struct ce_sps_data *pce_sps_data;
  4887. struct ce_request_info *preq_info;
  4888. req_info = qce_alloc_req_info(pce_dev);
  4889. if (req_info < 0)
  4890. return -EBUSY;
  4891. c_req->current_req_info = req_info;
  4892. preq_info = &pce_dev->ce_request_info[req_info];
  4893. pce_sps_data = &preq_info->ce_sps;
  4894. preq_info->src_nents = 0;
  4895. preq_info->dst_nents = 0;
  4896. /* cipher input */
  4897. preq_info->src_nents = count_sg(areq->src, areq->cryptlen);
  4898. if (!is_offload_op(c_req->offload_op))
  4899. qce_dma_map_sg(pce_dev->pdev, areq->src,
  4900. preq_info->src_nents,
  4901. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  4902. DMA_TO_DEVICE);
  4903. /* cipher output */
  4904. if (areq->src != areq->dst) {
  4905. preq_info->dst_nents = count_sg(areq->dst, areq->cryptlen);
  4906. if (!is_offload_op(c_req->offload_op))
  4907. qce_dma_map_sg(pce_dev->pdev, areq->dst,
  4908. preq_info->dst_nents, DMA_FROM_DEVICE);
  4909. } else {
  4910. preq_info->dst_nents = preq_info->src_nents;
  4911. }
  4912. preq_info->dir = c_req->dir;
  4913. if ((pce_dev->ce_bam_info.minor_version == 0) &&
  4914. (preq_info->dir == QCE_DECRYPT) &&
  4915. (c_req->mode == QCE_MODE_CBC)) {
  4916. memcpy(preq_info->dec_iv, (unsigned char *)
  4917. sg_virt(areq->src) + areq->src->length - 16,
  4918. NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE);
  4919. }
  4920. /* set up crypto device */
  4921. if (pce_dev->support_cmd_dscr) {
  4922. cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev,
  4923. req_info, c_req);
  4924. if (cmdlistinfo == NULL) {
  4925. pr_err("Unsupported cipher algorithm %d, mode %d\n",
  4926. c_req->alg, c_req->mode);
  4927. qce_free_req_info(pce_dev, req_info, false);
  4928. return -EINVAL;
  4929. }
  4930. rc = _ce_setup_cipher(pce_dev, c_req, areq->cryptlen, 0,
  4931. cmdlistinfo);
  4932. } else {
  4933. rc = _ce_setup_cipher_direct(pce_dev, c_req, areq->cryptlen, 0);
  4934. }
  4935. if (rc < 0)
  4936. goto bad;
  4937. preq_info->mode = c_req->mode;
  4938. preq_info->offload_op = c_req->offload_op;
  4939. /* setup for client callback, and issue command to BAM */
  4940. preq_info->areq = areq;
  4941. preq_info->qce_cb = c_req->qce_cb;
  4942. /* setup xfer type for producer callback handling */
  4943. preq_info->xfer_type = QCE_XFER_CIPHERING;
  4944. preq_info->req_len = areq->cryptlen;
  4945. _qce_sps_iovec_count_init(pce_dev, req_info);
  4946. if (pce_dev->support_cmd_dscr && cmdlistinfo) {
  4947. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
  4948. cmdlistinfo, &pce_sps_data->in_transfer);
  4949. if (rc)
  4950. goto bad;
  4951. }
  4952. rc = _qce_sps_add_data(areq->src->dma_address, areq->cryptlen,
  4953. &pce_sps_data->in_transfer);
  4954. if (rc)
  4955. goto bad;
  4956. _qce_set_flag(&pce_sps_data->in_transfer,
  4957. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  4958. if (pce_dev->no_get_around) {
  4959. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  4960. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  4961. &pce_sps_data->in_transfer);
  4962. if (rc)
  4963. goto bad;
  4964. }
  4965. rc = _qce_sps_add_data(areq->dst->dma_address, areq->cryptlen,
  4966. &pce_sps_data->out_transfer);
  4967. if (rc)
  4968. goto bad;
  4969. if (pce_dev->no_get_around || areq->cryptlen <= SPS_MAX_PKT_SIZE) {
  4970. pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
  4971. if (!is_offload_op(c_req->offload_op)) {
  4972. rc = _qce_sps_add_data(
  4973. GET_PHYS_ADDR(pce_sps_data->result_dump),
  4974. CRYPTO_RESULT_DUMP_SIZE,
  4975. &pce_sps_data->out_transfer);
  4976. if (rc)
  4977. goto bad;
  4978. }
  4979. } else {
  4980. pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
  4981. }
  4982. select_mode(pce_dev, preq_info);
  4983. rc = _qce_sps_transfer(pce_dev, req_info);
  4984. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  4985. if (rc)
  4986. goto bad;
  4987. return 0;
  4988. bad:
  4989. if (!is_offload_op(c_req->offload_op)) {
  4990. if (areq->src != areq->dst)
  4991. if (preq_info->dst_nents)
  4992. qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
  4993. preq_info->dst_nents, DMA_FROM_DEVICE);
  4994. if (preq_info->src_nents)
  4995. qce_dma_unmap_sg(pce_dev->pdev, areq->src,
  4996. preq_info->src_nents,
  4997. (areq->src == areq->dst) ?
  4998. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  4999. }
  5000. qce_free_req_info(pce_dev, req_info, false);
  5001. return rc;
  5002. }
  5003. EXPORT_SYMBOL(qce_ablk_cipher_req);
  5004. int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
  5005. {
  5006. struct qce_device *pce_dev = (struct qce_device *) handle;
  5007. int rc;
  5008. struct ahash_request *areq;
  5009. struct qce_cmdlist_info *cmdlistinfo = NULL;
  5010. int req_info = -1;
  5011. struct ce_sps_data *pce_sps_data;
  5012. struct ce_request_info *preq_info;
  5013. bool is_dummy = false;
  5014. if (!sreq) {
  5015. sreq = &(pce_dev->dummyreq.sreq);
  5016. req_info = DUMMY_REQ_INDEX;
  5017. is_dummy = true;
  5018. } else {
  5019. req_info = qce_alloc_req_info(pce_dev);
  5020. if (req_info < 0)
  5021. return -EBUSY;
  5022. }
  5023. sreq->current_req_info = req_info;
  5024. areq = (struct ahash_request *)sreq->areq;
  5025. preq_info = &pce_dev->ce_request_info[req_info];
  5026. pce_sps_data = &preq_info->ce_sps;
  5027. preq_info->src_nents = count_sg(sreq->src, sreq->size);
  5028. qce_dma_map_sg(pce_dev->pdev, sreq->src, preq_info->src_nents,
  5029. DMA_TO_DEVICE);
  5030. if (pce_dev->support_cmd_dscr) {
  5031. cmdlistinfo = _ce_get_hash_cmdlistinfo(pce_dev, req_info, sreq);
  5032. if (cmdlistinfo == NULL) {
  5033. pr_err("Unsupported hash algorithm %d\n", sreq->alg);
  5034. qce_free_req_info(pce_dev, req_info, false);
  5035. return -EINVAL;
  5036. }
  5037. rc = _ce_setup_hash(pce_dev, sreq, cmdlistinfo);
  5038. } else {
  5039. rc = _ce_setup_hash_direct(pce_dev, sreq);
  5040. }
  5041. if (rc < 0)
  5042. goto bad;
  5043. preq_info->areq = areq;
  5044. preq_info->qce_cb = sreq->qce_cb;
  5045. preq_info->offload_op = QCE_OFFLOAD_NONE;
  5046. /* setup xfer type for producer callback handling */
  5047. preq_info->xfer_type = QCE_XFER_HASHING;
  5048. preq_info->req_len = sreq->size;
  5049. _qce_sps_iovec_count_init(pce_dev, req_info);
  5050. if (pce_dev->support_cmd_dscr && cmdlistinfo) {
  5051. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
  5052. cmdlistinfo, &pce_sps_data->in_transfer);
  5053. if (rc)
  5054. goto bad;
  5055. }
  5056. rc = _qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
  5057. &pce_sps_data->in_transfer);
  5058. if (rc)
  5059. goto bad;
  5060. /* always ensure there is input data. ZLT does not work for bam-ndp */
  5061. if (!areq->nbytes) {
  5062. rc = _qce_sps_add_data(
  5063. GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
  5064. pce_dev->ce_bam_info.ce_burst_size,
  5065. &pce_sps_data->in_transfer);
  5066. if (rc)
  5067. goto bad;
  5068. }
  5069. _qce_set_flag(&pce_sps_data->in_transfer,
  5070. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  5071. if (pce_dev->no_get_around) {
  5072. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  5073. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  5074. &pce_sps_data->in_transfer);
  5075. if (rc)
  5076. goto bad;
  5077. }
  5078. rc = _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
  5079. CRYPTO_RESULT_DUMP_SIZE,
  5080. &pce_sps_data->out_transfer);
  5081. if (rc)
  5082. goto bad;
  5083. if (is_dummy) {
  5084. _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
  5085. rc = _qce_sps_transfer(pce_dev, req_info);
  5086. } else {
  5087. select_mode(pce_dev, preq_info);
  5088. rc = _qce_sps_transfer(pce_dev, req_info);
  5089. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  5090. }
  5091. if (rc)
  5092. goto bad;
  5093. return 0;
  5094. bad:
  5095. if (preq_info->src_nents) {
  5096. qce_dma_unmap_sg(pce_dev->pdev, sreq->src,
  5097. preq_info->src_nents, DMA_TO_DEVICE);
  5098. }
  5099. qce_free_req_info(pce_dev, req_info, false);
  5100. return rc;
  5101. }
  5102. EXPORT_SYMBOL(qce_process_sha_req);
  5103. int qce_f8_req(void *handle, struct qce_f8_req *req,
  5104. void *cookie, qce_comp_func_ptr_t qce_cb)
  5105. {
  5106. struct qce_device *pce_dev = (struct qce_device *) handle;
  5107. bool key_stream_mode;
  5108. dma_addr_t dst;
  5109. int rc;
  5110. struct qce_cmdlist_info *cmdlistinfo;
  5111. int req_info = -1;
  5112. struct ce_request_info *preq_info;
  5113. struct ce_sps_data *pce_sps_data;
  5114. req_info = qce_alloc_req_info(pce_dev);
  5115. if (req_info < 0)
  5116. return -EBUSY;
  5117. req->current_req_info = req_info;
  5118. preq_info = &pce_dev->ce_request_info[req_info];
  5119. pce_sps_data = &preq_info->ce_sps;
  5120. switch (req->algorithm) {
  5121. case QCE_OTA_ALGO_KASUMI:
  5122. cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
  5123. break;
  5124. case QCE_OTA_ALGO_SNOW3G:
  5125. cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
  5126. break;
  5127. default:
  5128. qce_free_req_info(pce_dev, req_info, false);
  5129. return -EINVAL;
  5130. }
  5131. key_stream_mode = (req->data_in == NULL);
  5132. /* don't support key stream mode */
  5133. if (key_stream_mode || (req->bearer >= QCE_OTA_MAX_BEARER)) {
  5134. qce_free_req_info(pce_dev, req_info, false);
  5135. return -EINVAL;
  5136. }
  5137. /* F8 cipher input */
  5138. preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
  5139. req->data_in, req->data_len,
  5140. (req->data_in == req->data_out) ?
  5141. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  5142. /* F8 cipher output */
  5143. if (req->data_in != req->data_out) {
  5144. dst = dma_map_single(pce_dev->pdev, req->data_out,
  5145. req->data_len, DMA_FROM_DEVICE);
  5146. preq_info->phy_ota_dst = dst;
  5147. } else {
  5148. /* in place ciphering */
  5149. dst = preq_info->phy_ota_src;
  5150. preq_info->phy_ota_dst = 0;
  5151. }
  5152. preq_info->ota_size = req->data_len;
  5153. /* set up crypto device */
  5154. if (pce_dev->support_cmd_dscr)
  5155. rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0,
  5156. req->data_len, cmdlistinfo);
  5157. else
  5158. rc = _ce_f8_setup_direct(pce_dev, req, key_stream_mode, 1, 0,
  5159. req->data_len);
  5160. if (rc < 0)
  5161. goto bad;
  5162. /* setup for callback, and issue command to sps */
  5163. preq_info->areq = cookie;
  5164. preq_info->qce_cb = qce_cb;
  5165. preq_info->offload_op = QCE_OFFLOAD_NONE;
  5166. /* setup xfer type for producer callback handling */
  5167. preq_info->xfer_type = QCE_XFER_F8;
  5168. preq_info->req_len = req->data_len;
  5169. _qce_sps_iovec_count_init(pce_dev, req_info);
  5170. if (pce_dev->support_cmd_dscr) {
  5171. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
  5172. cmdlistinfo, &pce_sps_data->in_transfer);
  5173. if (rc)
  5174. goto bad;
  5175. }
  5176. rc = _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->data_len,
  5177. &pce_sps_data->in_transfer);
  5178. if (rc)
  5179. goto bad;
  5180. _qce_set_flag(&pce_sps_data->in_transfer,
  5181. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  5182. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  5183. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  5184. &pce_sps_data->in_transfer);
  5185. if (rc)
  5186. goto bad;
  5187. rc = _qce_sps_add_data((uint32_t)dst, req->data_len,
  5188. &pce_sps_data->out_transfer);
  5189. if (rc)
  5190. goto bad;
  5191. rc = _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
  5192. CRYPTO_RESULT_DUMP_SIZE,
  5193. &pce_sps_data->out_transfer);
  5194. if (rc)
  5195. goto bad;
  5196. select_mode(pce_dev, preq_info);
  5197. rc = _qce_sps_transfer(pce_dev, req_info);
  5198. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  5199. if (rc)
  5200. goto bad;
  5201. return 0;
  5202. bad:
  5203. if (preq_info->phy_ota_dst != 0)
  5204. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
  5205. req->data_len, DMA_FROM_DEVICE);
  5206. if (preq_info->phy_ota_src != 0)
  5207. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
  5208. req->data_len,
  5209. (req->data_in == req->data_out) ?
  5210. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  5211. qce_free_req_info(pce_dev, req_info, false);
  5212. return rc;
  5213. }
  5214. EXPORT_SYMBOL(qce_f8_req);
  5215. int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
  5216. void *cookie, qce_comp_func_ptr_t qce_cb)
  5217. {
  5218. struct qce_device *pce_dev = (struct qce_device *) handle;
  5219. uint16_t num_pkt = mreq->num_pkt;
  5220. uint16_t cipher_start = mreq->cipher_start;
  5221. uint16_t cipher_size = mreq->cipher_size;
  5222. struct qce_f8_req *req = &mreq->qce_f8_req;
  5223. uint32_t total;
  5224. dma_addr_t dst = 0;
  5225. int rc = 0;
  5226. struct qce_cmdlist_info *cmdlistinfo;
  5227. int req_info = -1;
  5228. struct ce_request_info *preq_info;
  5229. struct ce_sps_data *pce_sps_data;
  5230. req_info = qce_alloc_req_info(pce_dev);
  5231. if (req_info < 0)
  5232. return -EBUSY;
  5233. req->current_req_info = req_info;
  5234. preq_info = &pce_dev->ce_request_info[req_info];
  5235. pce_sps_data = &preq_info->ce_sps;
  5236. switch (req->algorithm) {
  5237. case QCE_OTA_ALGO_KASUMI:
  5238. cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
  5239. break;
  5240. case QCE_OTA_ALGO_SNOW3G:
  5241. cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
  5242. break;
  5243. default:
  5244. qce_free_req_info(pce_dev, req_info, false);
  5245. return -EINVAL;
  5246. }
  5247. total = num_pkt * req->data_len;
  5248. /* F8 cipher input */
  5249. preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
  5250. req->data_in, total,
  5251. (req->data_in == req->data_out) ?
  5252. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  5253. /* F8 cipher output */
  5254. if (req->data_in != req->data_out) {
  5255. dst = dma_map_single(pce_dev->pdev, req->data_out, total,
  5256. DMA_FROM_DEVICE);
  5257. preq_info->phy_ota_dst = dst;
  5258. } else {
  5259. /* in place ciphering */
  5260. dst = preq_info->phy_ota_src;
  5261. preq_info->phy_ota_dst = 0;
  5262. }
  5263. preq_info->ota_size = total;
  5264. /* set up crypto device */
  5265. if (pce_dev->support_cmd_dscr)
  5266. rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
  5267. cipher_size, cmdlistinfo);
  5268. else
  5269. rc = _ce_f8_setup_direct(pce_dev, req, false, num_pkt,
  5270. cipher_start, cipher_size);
  5271. if (rc)
  5272. goto bad;
  5273. /* setup for callback, and issue command to sps */
  5274. preq_info->areq = cookie;
  5275. preq_info->qce_cb = qce_cb;
  5276. preq_info->offload_op = QCE_OFFLOAD_NONE;
  5277. /* setup xfer type for producer callback handling */
  5278. preq_info->xfer_type = QCE_XFER_F8;
  5279. preq_info->req_len = total;
  5280. _qce_sps_iovec_count_init(pce_dev, req_info);
  5281. if (pce_dev->support_cmd_dscr) {
  5282. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
  5283. cmdlistinfo, &pce_sps_data->in_transfer);
  5284. goto bad;
  5285. }
  5286. rc = _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, total,
  5287. &pce_sps_data->in_transfer);
  5288. if (rc)
  5289. goto bad;
  5290. _qce_set_flag(&pce_sps_data->in_transfer,
  5291. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  5292. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  5293. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  5294. &pce_sps_data->in_transfer);
  5295. if (rc)
  5296. goto bad;
  5297. rc = _qce_sps_add_data((uint32_t)dst, total,
  5298. &pce_sps_data->out_transfer);
  5299. if (rc)
  5300. goto bad;
  5301. rc = _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
  5302. CRYPTO_RESULT_DUMP_SIZE,
  5303. &pce_sps_data->out_transfer);
  5304. if (rc)
  5305. goto bad;
  5306. select_mode(pce_dev, preq_info);
  5307. rc = _qce_sps_transfer(pce_dev, req_info);
  5308. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  5309. if (rc == 0)
  5310. return 0;
  5311. bad:
  5312. if (preq_info->phy_ota_dst)
  5313. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst, total,
  5314. DMA_FROM_DEVICE);
  5315. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, total,
  5316. (req->data_in == req->data_out) ?
  5317. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  5318. qce_free_req_info(pce_dev, req_info, false);
  5319. return rc;
  5320. }
  5321. EXPORT_SYMBOL(qce_f8_multi_pkt_req);
  5322. int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
  5323. qce_comp_func_ptr_t qce_cb)
  5324. {
  5325. struct qce_device *pce_dev = (struct qce_device *) handle;
  5326. int rc;
  5327. struct qce_cmdlist_info *cmdlistinfo;
  5328. int req_info = -1;
  5329. struct ce_sps_data *pce_sps_data;
  5330. struct ce_request_info *preq_info;
  5331. req_info = qce_alloc_req_info(pce_dev);
  5332. if (req_info < 0)
  5333. return -EBUSY;
  5334. req->current_req_info = req_info;
  5335. preq_info = &pce_dev->ce_request_info[req_info];
  5336. pce_sps_data = &preq_info->ce_sps;
  5337. switch (req->algorithm) {
  5338. case QCE_OTA_ALGO_KASUMI:
  5339. cmdlistinfo = &pce_sps_data->cmdlistptr.f9_kasumi;
  5340. break;
  5341. case QCE_OTA_ALGO_SNOW3G:
  5342. cmdlistinfo = &pce_sps_data->cmdlistptr.f9_snow3g;
  5343. break;
  5344. default:
  5345. qce_free_req_info(pce_dev, req_info, false);
  5346. return -EINVAL;
  5347. }
  5348. preq_info->phy_ota_src = dma_map_single(pce_dev->pdev, req->message,
  5349. req->msize, DMA_TO_DEVICE);
  5350. preq_info->ota_size = req->msize;
  5351. if (pce_dev->support_cmd_dscr)
  5352. rc = _ce_f9_setup(pce_dev, req, cmdlistinfo);
  5353. else
  5354. rc = _ce_f9_setup_direct(pce_dev, req);
  5355. if (rc < 0)
  5356. goto bad;
  5357. /* setup for callback, and issue command to sps */
  5358. preq_info->areq = cookie;
  5359. preq_info->qce_cb = qce_cb;
  5360. preq_info->offload_op = QCE_OFFLOAD_NONE;
  5361. /* setup xfer type for producer callback handling */
  5362. preq_info->xfer_type = QCE_XFER_F9;
  5363. preq_info->req_len = req->msize;
  5364. _qce_sps_iovec_count_init(pce_dev, req_info);
  5365. if (pce_dev->support_cmd_dscr) {
  5366. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
  5367. cmdlistinfo, &pce_sps_data->in_transfer);
  5368. if (rc)
  5369. goto bad;
  5370. }
  5371. rc = _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->msize,
  5372. &pce_sps_data->in_transfer);
  5373. if (rc)
  5374. goto bad;
  5375. _qce_set_flag(&pce_sps_data->in_transfer,
  5376. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  5377. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  5378. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  5379. &pce_sps_data->in_transfer);
  5380. if (rc)
  5381. goto bad;
  5382. rc = _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
  5383. CRYPTO_RESULT_DUMP_SIZE,
  5384. &pce_sps_data->out_transfer);
  5385. if (rc)
  5386. goto bad;
  5387. select_mode(pce_dev, preq_info);
  5388. rc = _qce_sps_transfer(pce_dev, req_info);
  5389. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  5390. if (rc)
  5391. goto bad;
  5392. return 0;
  5393. bad:
  5394. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
  5395. req->msize, DMA_TO_DEVICE);
  5396. qce_free_req_info(pce_dev, req_info, false);
  5397. return rc;
  5398. }
  5399. EXPORT_SYMBOL(qce_f9_req);
  5400. static int __qce_get_device_tree_data(struct platform_device *pdev,
  5401. struct qce_device *pce_dev)
  5402. {
  5403. struct resource *resource;
  5404. int rc = 0, i = 0;
  5405. pce_dev->is_shared = of_property_read_bool((&pdev->dev)->of_node,
  5406. "qcom,ce-hw-shared");
  5407. pce_dev->support_hw_key = of_property_read_bool((&pdev->dev)->of_node,
  5408. "qcom,ce-hw-key");
  5409. pce_dev->use_sw_aes_cbc_ecb_ctr_algo =
  5410. of_property_read_bool((&pdev->dev)->of_node,
  5411. "qcom,use-sw-aes-cbc-ecb-ctr-algo");
  5412. pce_dev->use_sw_aead_algo =
  5413. of_property_read_bool((&pdev->dev)->of_node,
  5414. "qcom,use-sw-aead-algo");
  5415. pce_dev->use_sw_aes_xts_algo =
  5416. of_property_read_bool((&pdev->dev)->of_node,
  5417. "qcom,use-sw-aes-xts-algo");
  5418. pce_dev->use_sw_ahash_algo =
  5419. of_property_read_bool((&pdev->dev)->of_node,
  5420. "qcom,use-sw-ahash-algo");
  5421. pce_dev->use_sw_hmac_algo =
  5422. of_property_read_bool((&pdev->dev)->of_node,
  5423. "qcom,use-sw-hmac-algo");
  5424. pce_dev->use_sw_aes_ccm_algo =
  5425. of_property_read_bool((&pdev->dev)->of_node,
  5426. "qcom,use-sw-aes-ccm-algo");
  5427. pce_dev->support_clk_mgmt_sus_res = of_property_read_bool(
  5428. (&pdev->dev)->of_node, "qcom,clk-mgmt-sus-res");
  5429. pce_dev->support_only_core_src_clk = of_property_read_bool(
  5430. (&pdev->dev)->of_node, "qcom,support-core-clk-only");
  5431. pce_dev->request_bw_before_clk = of_property_read_bool(
  5432. (&pdev->dev)->of_node, "qcom,request-bw-before-clk");
  5433. pce_dev->kernel_pipes_support = true;
  5434. if (of_property_read_u32((&pdev->dev)->of_node,
  5435. "qcom,bam-pipe-pair",
  5436. &pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_NONE])) {
  5437. pr_warn("Kernel pipes not supported.\n");
  5438. //Unused pipe, just as failsafe.
  5439. pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_NONE] = 2;
  5440. pce_dev->kernel_pipes_support = false;
  5441. }
  5442. if (of_property_read_bool((&pdev->dev)->of_node,
  5443. "qcom,offload-ops-support")) {
  5444. pce_dev->offload_pipes_support = true;
  5445. if (of_property_read_u32((&pdev->dev)->of_node,
  5446. "qcom,bam-pipe-offload-cpb-hlos",
  5447. &pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_CPB_HLOS])) {
  5448. pr_err("Fail to get bam offload cpb-hlos pipe pair info.\n");
  5449. return -EINVAL;
  5450. }
  5451. if (of_property_read_u32((&pdev->dev)->of_node,
  5452. "qcom,bam-pipe-offload-hlos-hlos",
  5453. &pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_HLOS])) {
  5454. pr_err("Fail to get bam offload hlos-hlos info.\n");
  5455. return -EINVAL;
  5456. }
  5457. if (of_property_read_u32((&pdev->dev)->of_node,
  5458. "qcom,bam-pipe-offload-hlos-cpb",
  5459. &pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_CPB])) {
  5460. pr_err("Fail to get bam offload hlos-cpb info\n");
  5461. return -EINVAL;
  5462. }
  5463. }
  5464. if (of_property_read_u32((&pdev->dev)->of_node,
  5465. "qcom,ce-device",
  5466. &pce_dev->ce_bam_info.ce_device)) {
  5467. pr_err("Fail to get CE device information.\n");
  5468. return -EINVAL;
  5469. }
  5470. if (of_property_read_u32((&pdev->dev)->of_node,
  5471. "qcom,ce-hw-instance",
  5472. &pce_dev->ce_bam_info.ce_hw_instance)) {
  5473. pr_err("Fail to get CE hw instance information.\n");
  5474. return -EINVAL;
  5475. }
  5476. if (of_property_read_u32((&pdev->dev)->of_node,
  5477. "qcom,bam-ee",
  5478. &pce_dev->ce_bam_info.bam_ee)) {
  5479. pr_info("BAM Apps EE is not defined, setting to default 1\n");
  5480. pce_dev->ce_bam_info.bam_ee = 1;
  5481. }
  5482. if (of_property_read_u32((&pdev->dev)->of_node,
  5483. "qcom,ce-opp-freq",
  5484. &pce_dev->ce_opp_freq_hz)) {
  5485. pr_info("CE operating frequency is not defined, setting to default 100MHZ\n");
  5486. pce_dev->ce_opp_freq_hz = CE_CLK_100MHZ;
  5487. }
  5488. if (of_property_read_bool((&pdev->dev)->of_node, "qcom,smmu-s1-enable"))
  5489. pce_dev->enable_s1_smmu = true;
  5490. pce_dev->no_clock_support = of_property_read_bool((&pdev->dev)->of_node,
  5491. "qcom,no-clock-support");
  5492. for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
  5493. /* Source/destination pipes for all usecases */
  5494. pce_dev->ce_bam_info.dest_pipe_index[i] =
  5495. 2 * pce_dev->ce_bam_info.pipe_pair_index[i];
  5496. pce_dev->ce_bam_info.src_pipe_index[i] =
  5497. pce_dev->ce_bam_info.dest_pipe_index[i] + 1;
  5498. }
  5499. resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  5500. "crypto-base");
  5501. if (resource) {
  5502. pce_dev->phy_iobase = resource->start;
  5503. pce_dev->iobase = ioremap(resource->start,
  5504. resource_size(resource));
  5505. if (!pce_dev->iobase) {
  5506. pr_err("Can not map CRYPTO io memory\n");
  5507. return -ENOMEM;
  5508. }
  5509. } else {
  5510. pr_err("CRYPTO HW mem unavailable.\n");
  5511. return -ENODEV;
  5512. }
  5513. resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  5514. "crypto-bam-base");
  5515. if (resource) {
  5516. pce_dev->bam_mem = resource->start;
  5517. pce_dev->bam_mem_size = resource_size(resource);
  5518. } else {
  5519. pr_err("CRYPTO BAM mem unavailable.\n");
  5520. rc = -ENODEV;
  5521. goto err_getting_bam_info;
  5522. }
  5523. pce_dev->ce_bam_info.bam_irq = platform_get_irq(pdev,0);
  5524. if (pce_dev->ce_bam_info.bam_irq < 0) {
  5525. pr_err("CRYPTO BAM IRQ unavailable.\n");
  5526. goto err_dev;
  5527. }
  5528. return rc;
  5529. err_dev:
  5530. if (pce_dev->ce_bam_info.bam_iobase)
  5531. iounmap(pce_dev->ce_bam_info.bam_iobase);
  5532. err_getting_bam_info:
  5533. if (pce_dev->iobase)
  5534. iounmap(pce_dev->iobase);
  5535. return rc;
  5536. }
  5537. static int __qce_init_clk(struct qce_device *pce_dev)
  5538. {
  5539. int rc = 0;
  5540. if (pce_dev->no_clock_support) {
  5541. pr_debug("No clock support defined in dts\n");
  5542. return rc;
  5543. }
  5544. pce_dev->ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src");
  5545. if (!IS_ERR(pce_dev->ce_core_src_clk)) {
  5546. if (pce_dev->request_bw_before_clk)
  5547. goto skip_set_rate;
  5548. rc = clk_set_rate(pce_dev->ce_core_src_clk,
  5549. pce_dev->ce_opp_freq_hz);
  5550. if (rc) {
  5551. pr_err("Unable to set the core src clk @%uMhz.\n",
  5552. pce_dev->ce_opp_freq_hz/CE_CLK_DIV);
  5553. goto exit_put_core_src_clk;
  5554. }
  5555. } else {
  5556. if (pce_dev->support_only_core_src_clk) {
  5557. rc = PTR_ERR(pce_dev->ce_core_src_clk);
  5558. pce_dev->ce_core_src_clk = NULL;
  5559. pr_err("Unable to get CE core src clk\n");
  5560. return rc;
  5561. }
  5562. pr_warn("Unable to get CE core src clk, set to NULL\n");
  5563. pce_dev->ce_core_src_clk = NULL;
  5564. }
  5565. skip_set_rate:
  5566. if (pce_dev->support_only_core_src_clk) {
  5567. pce_dev->ce_core_clk = NULL;
  5568. pce_dev->ce_clk = NULL;
  5569. pce_dev->ce_bus_clk = NULL;
  5570. } else {
  5571. pce_dev->ce_core_clk = clk_get(pce_dev->pdev, "core_clk");
  5572. if (IS_ERR(pce_dev->ce_core_clk)) {
  5573. rc = PTR_ERR(pce_dev->ce_core_clk);
  5574. pr_err("Unable to get CE core clk\n");
  5575. goto exit_put_core_src_clk;
  5576. }
  5577. pce_dev->ce_clk = clk_get(pce_dev->pdev, "iface_clk");
  5578. if (IS_ERR(pce_dev->ce_clk)) {
  5579. rc = PTR_ERR(pce_dev->ce_clk);
  5580. pr_err("Unable to get CE interface clk\n");
  5581. goto exit_put_core_clk;
  5582. }
  5583. pce_dev->ce_bus_clk = clk_get(pce_dev->pdev, "bus_clk");
  5584. if (IS_ERR(pce_dev->ce_bus_clk)) {
  5585. rc = PTR_ERR(pce_dev->ce_bus_clk);
  5586. pr_err("Unable to get CE BUS interface clk\n");
  5587. goto exit_put_iface_clk;
  5588. }
  5589. }
  5590. return rc;
  5591. exit_put_iface_clk:
  5592. if (pce_dev->ce_clk)
  5593. clk_put(pce_dev->ce_clk);
  5594. exit_put_core_clk:
  5595. if (pce_dev->ce_core_clk)
  5596. clk_put(pce_dev->ce_core_clk);
  5597. exit_put_core_src_clk:
  5598. if (pce_dev->ce_core_src_clk)
  5599. clk_put(pce_dev->ce_core_src_clk);
  5600. pr_err("Unable to init CE clks, rc = %d\n", rc);
  5601. return rc;
  5602. }
  5603. static void __qce_deinit_clk(struct qce_device *pce_dev)
  5604. {
  5605. if (pce_dev->no_clock_support) {
  5606. pr_debug("No clock support defined in dts\n");
  5607. return;
  5608. }
  5609. if (pce_dev->ce_bus_clk)
  5610. clk_put(pce_dev->ce_bus_clk);
  5611. if (pce_dev->ce_clk)
  5612. clk_put(pce_dev->ce_clk);
  5613. if (pce_dev->ce_core_clk)
  5614. clk_put(pce_dev->ce_core_clk);
  5615. if (pce_dev->ce_core_src_clk)
  5616. clk_put(pce_dev->ce_core_src_clk);
  5617. }
  5618. int qce_enable_clk(void *handle)
  5619. {
  5620. struct qce_device *pce_dev = (struct qce_device *)handle;
  5621. int rc = 0;
  5622. if (pce_dev->no_clock_support) {
  5623. pr_debug("No clock support defined in dts\n");
  5624. return rc;
  5625. }
  5626. if (pce_dev->ce_core_src_clk) {
  5627. rc = clk_prepare_enable(pce_dev->ce_core_src_clk);
  5628. if (rc) {
  5629. pr_err("Unable to enable/prepare CE core src clk\n");
  5630. return rc;
  5631. }
  5632. }
  5633. if (pce_dev->support_only_core_src_clk)
  5634. return rc;
  5635. if (pce_dev->ce_core_clk) {
  5636. rc = clk_prepare_enable(pce_dev->ce_core_clk);
  5637. if (rc) {
  5638. pr_err("Unable to enable/prepare CE core clk\n");
  5639. goto exit_disable_core_src_clk;
  5640. }
  5641. }
  5642. if (pce_dev->ce_clk) {
  5643. rc = clk_prepare_enable(pce_dev->ce_clk);
  5644. if (rc) {
  5645. pr_err("Unable to enable/prepare CE iface clk\n");
  5646. goto exit_disable_core_clk;
  5647. }
  5648. }
  5649. if (pce_dev->ce_bus_clk) {
  5650. rc = clk_prepare_enable(pce_dev->ce_bus_clk);
  5651. if (rc) {
  5652. pr_err("Unable to enable/prepare CE BUS clk\n");
  5653. goto exit_disable_ce_clk;
  5654. }
  5655. }
  5656. return rc;
  5657. exit_disable_ce_clk:
  5658. if (pce_dev->ce_clk)
  5659. clk_disable_unprepare(pce_dev->ce_clk);
  5660. exit_disable_core_clk:
  5661. if (pce_dev->ce_core_clk)
  5662. clk_disable_unprepare(pce_dev->ce_core_clk);
  5663. exit_disable_core_src_clk:
  5664. if (pce_dev->ce_core_src_clk)
  5665. clk_disable_unprepare(pce_dev->ce_core_src_clk);
  5666. return rc;
  5667. }
  5668. EXPORT_SYMBOL(qce_enable_clk);
  5669. int qce_disable_clk(void *handle)
  5670. {
  5671. struct qce_device *pce_dev = (struct qce_device *) handle;
  5672. if (pce_dev->no_clock_support) {
  5673. pr_debug("No clock support defined in dts\n");
  5674. return 0;
  5675. }
  5676. if (pce_dev->ce_bus_clk)
  5677. clk_disable_unprepare(pce_dev->ce_bus_clk);
  5678. if (pce_dev->ce_clk)
  5679. clk_disable_unprepare(pce_dev->ce_clk);
  5680. if (pce_dev->ce_core_clk)
  5681. clk_disable_unprepare(pce_dev->ce_core_clk);
  5682. if (pce_dev->ce_core_src_clk)
  5683. clk_disable_unprepare(pce_dev->ce_core_src_clk);
  5684. return 0;
  5685. }
  5686. EXPORT_SYMBOL(qce_disable_clk);
  5687. /* dummy req setup */
  5688. static int setup_dummy_req(struct qce_device *pce_dev)
  5689. {
  5690. char *input =
  5691. "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopqopqrpqrs";
  5692. int len = DUMMY_REQ_DATA_LEN;
  5693. memcpy(pce_dev->dummyreq_in_buf, input, len);
  5694. sg_init_one(&pce_dev->dummyreq.sg, pce_dev->dummyreq_in_buf, len);
  5695. pce_dev->dummyreq.sreq.alg = QCE_HASH_SHA1;
  5696. pce_dev->dummyreq.sreq.qce_cb = qce_dummy_complete;
  5697. pce_dev->dummyreq.sreq.src = &pce_dev->dummyreq.sg;
  5698. pce_dev->dummyreq.sreq.auth_data[0] = 0;
  5699. pce_dev->dummyreq.sreq.auth_data[1] = 0;
  5700. pce_dev->dummyreq.sreq.auth_data[2] = 0;
  5701. pce_dev->dummyreq.sreq.auth_data[3] = 0;
  5702. pce_dev->dummyreq.sreq.first_blk = true;
  5703. pce_dev->dummyreq.sreq.last_blk = true;
  5704. pce_dev->dummyreq.sreq.size = len;
  5705. pce_dev->dummyreq.sreq.areq = &pce_dev->dummyreq.areq;
  5706. pce_dev->dummyreq.sreq.flags = 0;
  5707. pce_dev->dummyreq.sreq.authkey = NULL;
  5708. pce_dev->dummyreq.areq.src = pce_dev->dummyreq.sreq.src;
  5709. pce_dev->dummyreq.areq.nbytes = pce_dev->dummyreq.sreq.size;
  5710. return 0;
  5711. }
  5712. static int qce_smmu_init(struct qce_device *pce_dev)
  5713. {
  5714. struct device *dev = pce_dev->pdev;
  5715. if (!dev->dma_parms) {
  5716. dev->dma_parms = devm_kzalloc(dev,
  5717. sizeof(*dev->dma_parms), GFP_KERNEL);
  5718. if (!dev->dma_parms)
  5719. return -ENOMEM;
  5720. }
  5721. dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
  5722. dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64));
  5723. return 0;
  5724. }
  5725. /* crypto engine open function. */
  5726. void *qce_open(struct platform_device *pdev, int *rc)
  5727. {
  5728. struct qce_device *pce_dev;
  5729. int i;
  5730. static int pcedev_no = 1;
  5731. pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
  5732. if (!pce_dev) {
  5733. *rc = -ENOMEM;
  5734. pr_err("Can not allocate memory: %d\n", *rc);
  5735. return NULL;
  5736. }
  5737. pce_dev->pdev = &pdev->dev;
  5738. mutex_lock(&qce_iomap_mutex);
  5739. if (pdev->dev.of_node) {
  5740. *rc = __qce_get_device_tree_data(pdev, pce_dev);
  5741. if (*rc)
  5742. goto err_pce_dev;
  5743. } else {
  5744. *rc = -EINVAL;
  5745. pr_err("Device Node not found.\n");
  5746. goto err_pce_dev;
  5747. }
  5748. if (pce_dev->enable_s1_smmu) {
  5749. if (qce_smmu_init(pce_dev)) {
  5750. *rc = -EIO;
  5751. goto err_pce_dev;
  5752. }
  5753. }
  5754. for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++)
  5755. atomic_set(&pce_dev->ce_request_info[i].in_use, false);
  5756. pce_dev->ce_request_index = 0;
  5757. pce_dev->memsize = 10 * PAGE_SIZE * MAX_QCE_ALLOC_BAM_REQ;
  5758. pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
  5759. pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL);
  5760. if (pce_dev->coh_vmem == NULL) {
  5761. *rc = -ENOMEM;
  5762. pr_err("Can not allocate coherent memory for sps data\n");
  5763. goto err_iobase;
  5764. }
  5765. pce_dev->iovec_memsize = TOTAL_IOVEC_SPACE_PER_PIPE *
  5766. MAX_QCE_ALLOC_BAM_REQ * 2;
  5767. pce_dev->iovec_vmem = kzalloc(pce_dev->iovec_memsize, GFP_KERNEL);
  5768. if (pce_dev->iovec_vmem == NULL)
  5769. goto err_mem;
  5770. pce_dev->dummyreq_in_buf = kzalloc(DUMMY_REQ_DATA_LEN, GFP_KERNEL);
  5771. if (pce_dev->dummyreq_in_buf == NULL)
  5772. goto err_mem;
  5773. *rc = __qce_init_clk(pce_dev);
  5774. if (*rc)
  5775. goto err_mem;
  5776. *rc = qce_enable_clk(pce_dev);
  5777. if (*rc)
  5778. goto err_enable_clk;
  5779. if (_probe_ce_engine(pce_dev)) {
  5780. *rc = -ENXIO;
  5781. goto err;
  5782. }
  5783. *rc = 0;
  5784. qce_init_ce_cfg_val(pce_dev);
  5785. *rc = qce_sps_init(pce_dev);
  5786. if (*rc)
  5787. goto err;
  5788. qce_setup_ce_sps_data(pce_dev);
  5789. qce_disable_clk(pce_dev);
  5790. setup_dummy_req(pce_dev);
  5791. atomic_set(&pce_dev->no_of_queued_req, 0);
  5792. pce_dev->mode = IN_INTERRUPT_MODE;
  5793. timer_setup(&(pce_dev->timer), qce_multireq_timeout, 0);
  5794. //pce_dev->timer.function = qce_multireq_timeout;
  5795. //pce_dev->timer.data = (unsigned long)pce_dev;
  5796. pce_dev->timer.expires = jiffies + DELAY_IN_JIFFIES;
  5797. pce_dev->intr_cadence = 0;
  5798. pce_dev->dev_no = pcedev_no;
  5799. pcedev_no++;
  5800. pce_dev->owner = QCE_OWNER_NONE;
  5801. qce_enable_clock_gating(pce_dev);
  5802. mutex_unlock(&qce_iomap_mutex);
  5803. return pce_dev;
  5804. err:
  5805. qce_disable_clk(pce_dev);
  5806. err_enable_clk:
  5807. __qce_deinit_clk(pce_dev);
  5808. err_mem:
  5809. kfree(pce_dev->dummyreq_in_buf);
  5810. kfree(pce_dev->iovec_vmem);
  5811. if (pce_dev->coh_vmem)
  5812. dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
  5813. pce_dev->coh_vmem, pce_dev->coh_pmem);
  5814. err_iobase:
  5815. if (pce_dev->iobase)
  5816. iounmap(pce_dev->iobase);
  5817. err_pce_dev:
  5818. mutex_unlock(&qce_iomap_mutex);
  5819. kfree(pce_dev);
  5820. return NULL;
  5821. }
  5822. EXPORT_SYMBOL(qce_open);
  5823. /* crypto engine close function. */
  5824. int qce_close(void *handle)
  5825. {
  5826. struct qce_device *pce_dev = (struct qce_device *) handle;
  5827. if (handle == NULL)
  5828. return -ENODEV;
  5829. mutex_lock(&qce_iomap_mutex);
  5830. qce_enable_clk(pce_dev);
  5831. qce_sps_exit(pce_dev);
  5832. if (pce_dev->iobase)
  5833. iounmap(pce_dev->iobase);
  5834. if (pce_dev->coh_vmem)
  5835. dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
  5836. pce_dev->coh_vmem, pce_dev->coh_pmem);
  5837. kfree(pce_dev->dummyreq_in_buf);
  5838. kfree(pce_dev->iovec_vmem);
  5839. qce_disable_clk(pce_dev);
  5840. __qce_deinit_clk(pce_dev);
  5841. mutex_unlock(&qce_iomap_mutex);
  5842. kfree(handle);
  5843. return 0;
  5844. }
  5845. EXPORT_SYMBOL(qce_close);
  5846. #define OTA_SUPPORT_MASK (1 << CRYPTO_ENCR_SNOW3G_SEL |\
  5847. 1 << CRYPTO_ENCR_KASUMI_SEL |\
  5848. 1 << CRYPTO_AUTH_SNOW3G_SEL |\
  5849. 1 << CRYPTO_AUTH_KASUMI_SEL)
  5850. int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
  5851. {
  5852. struct qce_device *pce_dev = (struct qce_device *)handle;
  5853. if (ce_support == NULL)
  5854. return -EINVAL;
  5855. ce_support->sha1_hmac_20 = false;
  5856. ce_support->sha1_hmac = false;
  5857. ce_support->sha256_hmac = false;
  5858. ce_support->sha_hmac = true;
  5859. ce_support->cmac = true;
  5860. ce_support->aes_key_192 = false;
  5861. ce_support->aes_xts = true;
  5862. if ((pce_dev->engines_avail & OTA_SUPPORT_MASK) == OTA_SUPPORT_MASK)
  5863. ce_support->ota = true;
  5864. else
  5865. ce_support->ota = false;
  5866. ce_support->bam = true;
  5867. ce_support->is_shared = (pce_dev->is_shared == 1) ? true : false;
  5868. ce_support->hw_key = pce_dev->support_hw_key;
  5869. ce_support->aes_ccm = true;
  5870. ce_support->clk_mgmt_sus_res = pce_dev->support_clk_mgmt_sus_res;
  5871. ce_support->req_bw_before_clk = pce_dev->request_bw_before_clk;
  5872. if (pce_dev->ce_bam_info.minor_version)
  5873. ce_support->aligned_only = false;
  5874. else
  5875. ce_support->aligned_only = true;
  5876. ce_support->use_sw_aes_cbc_ecb_ctr_algo =
  5877. pce_dev->use_sw_aes_cbc_ecb_ctr_algo;
  5878. ce_support->use_sw_aead_algo =
  5879. pce_dev->use_sw_aead_algo;
  5880. ce_support->use_sw_aes_xts_algo =
  5881. pce_dev->use_sw_aes_xts_algo;
  5882. ce_support->use_sw_ahash_algo =
  5883. pce_dev->use_sw_ahash_algo;
  5884. ce_support->use_sw_hmac_algo =
  5885. pce_dev->use_sw_hmac_algo;
  5886. ce_support->use_sw_aes_ccm_algo =
  5887. pce_dev->use_sw_aes_ccm_algo;
  5888. ce_support->ce_device = pce_dev->ce_bam_info.ce_device;
  5889. ce_support->ce_hw_instance = pce_dev->ce_bam_info.ce_hw_instance;
  5890. if (pce_dev->no_get_around)
  5891. ce_support->max_request = MAX_QCE_BAM_REQ;
  5892. else
  5893. ce_support->max_request = 1;
  5894. return 0;
  5895. }
  5896. EXPORT_SYMBOL(qce_hw_support);
  5897. void qce_dump_req(void *handle)
  5898. {
  5899. int i;
  5900. bool req_in_use;
  5901. struct qce_device *pce_dev = (struct qce_device *)handle;
  5902. for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
  5903. req_in_use = atomic_read(&pce_dev->ce_request_info[i].in_use);
  5904. pr_info("%s: %d %d\n", __func__, i, req_in_use);
  5905. if (req_in_use)
  5906. _qce_dump_descr_fifos(pce_dev, i);
  5907. }
  5908. }
  5909. EXPORT_SYMBOL(qce_dump_req);
  5910. MODULE_LICENSE("GPL v2");
  5911. MODULE_DESCRIPTION("Crypto Engine driver");