qce50.c 195 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * QTI Crypto Engine driver.
  4. *
  5. * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  6. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  7. */
  8. #define pr_fmt(fmt) "QCE50: %s: " fmt, __func__
  9. #include <linux/types.h>
  10. #include <linux/kernel.h>
  11. #include <linux/module.h>
  12. #include <linux/mod_devicetable.h>
  13. #include <linux/device.h>
  14. #include <linux/clk.h>
  15. #include <linux/err.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/io.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/delay.h>
  21. #include <linux/crypto.h>
  22. #include <linux/bitops.h>
  23. #include <crypto/hash.h>
  24. #include <crypto/sha1.h>
  25. #include <soc/qcom/socinfo.h>
  26. #include <linux/iommu.h>
  27. #include "qcrypto.h"
  28. #include "qce.h"
  29. #include "qce50.h"
  30. #include "qcryptohw_50.h"
  31. #include "qce_ota.h"
  32. #define CRYPTO_SMMU_IOVA_START 0x10000000
  33. #define CRYPTO_SMMU_IOVA_SIZE 0x40000000
  34. #define CRYPTO_CONFIG_RESET 0xE01EF
  35. #define MAX_SPS_DESC_FIFO_SIZE 0xfff0
  36. #define QCE_MAX_NUM_DSCR 0x200
  37. #define QCE_SECTOR_SIZE 0x200
  38. #define CE_CLK_100MHZ 100000000
  39. #define CE_CLK_DIV 1000000
  40. #define CRYPTO_CORE_MAJOR_VER_NUM 0x05
  41. #define CRYPTO_CORE_MINOR_VER_NUM 0x03
  42. #define CRYPTO_CORE_STEP_VER_NUM 0x1
  43. #define CRYPTO_REQ_USER_PAT 0xdead0000
  44. static DEFINE_MUTEX(bam_register_lock);
  45. static DEFINE_MUTEX(qce_iomap_mutex);
  46. struct bam_registration_info {
  47. struct list_head qlist;
  48. unsigned long handle;
  49. uint32_t cnt;
  50. uint32_t bam_mem;
  51. void __iomem *bam_iobase;
  52. bool support_cmd_dscr;
  53. };
  54. static LIST_HEAD(qce50_bam_list);
  55. /* Used to determine the mode */
  56. #define MAX_BUNCH_MODE_REQ 2
  57. /* Max number of request supported */
  58. #define MAX_QCE_BAM_REQ 8
  59. /* Interrupt flag will be set for every SET_INTR_AT_REQ request */
  60. #define SET_INTR_AT_REQ (MAX_QCE_BAM_REQ / 2)
  61. /* To create extra request space to hold dummy request */
  62. #define MAX_QCE_BAM_REQ_WITH_DUMMY_REQ (MAX_QCE_BAM_REQ + 1)
  63. /* Allocate the memory for MAX_QCE_BAM_REQ + 1 (for dummy request) */
  64. #define MAX_QCE_ALLOC_BAM_REQ MAX_QCE_BAM_REQ_WITH_DUMMY_REQ
  65. /* QCE driver modes */
  66. #define IN_INTERRUPT_MODE 0
  67. #define IN_BUNCH_MODE 1
  68. /* Dummy request data length */
  69. #define DUMMY_REQ_DATA_LEN 64
  70. /* Delay timer to expire when in bunch mode */
  71. #define DELAY_IN_JIFFIES 5
  72. /* Index to point the dummy request */
  73. #define DUMMY_REQ_INDEX MAX_QCE_BAM_REQ
  74. #define TOTAL_IOVEC_SPACE_PER_PIPE (QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec))
  75. #define AES_CTR_IV_CTR_SIZE 64
  76. #define QCE_NO_ERROR_VAL1 0x2000006
  77. #define QCE_NO_ERROR_VAL2 0x2000004
  78. // Crypto Engines 5.7 and below
  79. // Key timer expiry for pipes 1-15 (Status3)
  80. #define CRYPTO5_LEGACY_TIMER_EXPIRED_STATUS3 0x0000FF00
  81. // Key timer expiry for pipes 16-19 (Status6)
  82. #define CRYPTO5_LEGACY_TIMER_EXPIRED_STATUS6 0x00000300
  83. // Key pause for pipes 1-15 (Status3)
  84. #define CRYPTO5_LEGACY_KEY_PAUSE_STATUS3 0xFF000000
  85. // Key pause for pipes 16-19 (Status6)
  86. #define CRYPTO5_LEGACY_KEY_PAUSE_STATUS6 0x3000000
  87. // Crypto Engines 5.8 and above
  88. // Key timer expiry for all pipes (Status3)
  89. #define CRYPTO58_TIMER_EXPIRED 0x00000010
  90. // Key pause for all pipes (Status3)
  91. #define CRYPTO58_KEY_PAUSE 0x00001000
  92. // Key index for Status3 (Timer and Key Pause)
  93. #define KEY_INDEX_SHIFT 16
  94. enum qce_owner {
  95. QCE_OWNER_NONE = 0,
  96. QCE_OWNER_CLIENT = 1,
  97. QCE_OWNER_TIMEOUT = 2
  98. };
  99. struct dummy_request {
  100. struct qce_sha_req sreq;
  101. struct scatterlist sg;
  102. struct ahash_request areq;
  103. };
  104. /*
  105. * CE HW device structure.
  106. * Each engine has an instance of the structure.
  107. * Each engine can only handle one crypto operation at one time. It is up to
  108. * the sw above to ensure single threading of operation on an engine.
  109. */
  110. struct qce_device {
  111. struct device *pdev; /* Handle to platform_device structure */
  112. struct bam_registration_info *pbam;
  113. unsigned char *coh_vmem; /* Allocated coherent virtual memory */
  114. dma_addr_t coh_pmem; /* Allocated coherent physical memory */
  115. int memsize; /* Memory allocated */
  116. unsigned char *iovec_vmem; /* Allocate iovec virtual memory */
  117. int iovec_memsize; /* Memory allocated */
  118. uint32_t bam_mem; /* bam physical address, from DT */
  119. uint32_t bam_mem_size; /* bam io size, from DT */
  120. int is_shared; /* CE HW is shared */
  121. bool support_cmd_dscr;
  122. bool support_hw_key;
  123. bool support_clk_mgmt_sus_res;
  124. bool support_only_core_src_clk;
  125. bool request_bw_before_clk;
  126. void __iomem *iobase; /* Virtual io base of CE HW */
  127. unsigned int phy_iobase; /* Physical io base of CE HW */
  128. struct clk *ce_core_src_clk; /* Handle to CE src clk*/
  129. struct clk *ce_core_clk; /* Handle to CE clk */
  130. struct clk *ce_clk; /* Handle to CE clk */
  131. struct clk *ce_bus_clk; /* Handle to CE AXI clk*/
  132. bool no_get_around;
  133. bool no_ccm_mac_status_get_around;
  134. unsigned int ce_opp_freq_hz;
  135. bool use_sw_aes_cbc_ecb_ctr_algo;
  136. bool use_sw_aead_algo;
  137. bool use_sw_aes_xts_algo;
  138. bool use_sw_ahash_algo;
  139. bool use_sw_hmac_algo;
  140. bool use_sw_aes_ccm_algo;
  141. uint32_t engines_avail;
  142. struct qce_ce_cfg_reg_setting reg;
  143. struct ce_bam_info ce_bam_info;
  144. struct ce_request_info ce_request_info[MAX_QCE_ALLOC_BAM_REQ];
  145. unsigned int ce_request_index;
  146. enum qce_owner owner;
  147. atomic_t no_of_queued_req;
  148. struct timer_list timer;
  149. struct dummy_request dummyreq;
  150. unsigned int mode;
  151. unsigned int intr_cadence;
  152. unsigned int dev_no;
  153. struct qce_driver_stats qce_stats;
  154. atomic_t bunch_cmd_seq;
  155. atomic_t last_intr_seq;
  156. bool cadence_flag;
  157. uint8_t *dummyreq_in_buf;
  158. struct dma_iommu_mapping *smmu_mapping;
  159. bool enable_s1_smmu;
  160. bool no_clock_support;
  161. bool kernel_pipes_support;
  162. bool offload_pipes_support;
  163. };
  164. static void print_notify_debug(struct sps_event_notify *notify);
  165. static void _sps_producer_callback(struct sps_event_notify *notify);
  166. static int qce_dummy_req(struct qce_device *pce_dev);
  167. static int _qce50_disp_stats;
  168. /* Standard initialization vector for SHA-1, source: FIPS 180-2 */
  169. static uint32_t _std_init_vector_sha1[] = {
  170. 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
  171. };
  172. /* Standard initialization vector for SHA-256, source: FIPS 180-2 */
  173. static uint32_t _std_init_vector_sha256[] = {
  174. 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
  175. 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19
  176. };
  177. /*
  178. * Requests for offload operations do not require explicit dma operations
  179. * as they already have SMMU mapped source/destination buffers.
  180. */
  181. static bool is_offload_op(int op)
  182. {
  183. return (op == QCE_OFFLOAD_HLOS_HLOS || op == QCE_OFFLOAD_HLOS_HLOS_1 ||
  184. op == QCE_OFFLOAD_CPB_HLOS || op == QCE_OFFLOAD_HLOS_CPB ||
  185. op == QCE_OFFLOAD_HLOS_CPB_1);
  186. }
  187. static uint32_t qce_get_config_be(struct qce_device *pce_dev,
  188. uint32_t pipe_pair)
  189. {
  190. uint32_t beats = (pce_dev->ce_bam_info.ce_burst_size >> 3) - 1;
  191. return (beats << CRYPTO_REQ_SIZE |
  192. BIT(CRYPTO_MASK_DOUT_INTR) | BIT(CRYPTO_MASK_DIN_INTR) |
  193. BIT(CRYPTO_MASK_OP_DONE_INTR) | 0 << CRYPTO_HIGH_SPD_EN_N |
  194. pipe_pair << CRYPTO_PIPE_SET_SELECT);
  195. }
  196. static void dump_status_regs(unsigned int *status)
  197. {
  198. pr_info("%s: CRYPTO_STATUS_REG = 0x%x\n", __func__, status[0]);
  199. pr_info("%s: CRYPTO_STATUS2_REG = 0x%x\n", __func__, status[1]);
  200. pr_info("%s: CRYPTO_STATUS3_REG = 0x%x\n", __func__, status[2]);
  201. pr_info("%s: CRYPTO_STATUS4_REG = 0x%x\n", __func__, status[3]);
  202. pr_info("%s: CRYPTO_STATUS5_REG = 0x%x\n", __func__, status[4]);
  203. pr_info("%s: CRYPTO_STATUS6_REG = 0x%x\n", __func__, status[5]);
  204. }
  205. void qce_get_crypto_status(void *handle, struct qce_error *error)
  206. {
  207. struct qce_device *pce_dev = (struct qce_device *) handle;
  208. unsigned int status[6] = {0};
  209. status[0] = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
  210. status[1] = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS2_REG);
  211. status[2] = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS3_REG);
  212. status[3] = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS4_REG);
  213. status[4] = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS5_REG);
  214. status[5] = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS6_REG);
  215. #ifdef QCE_DEBUG
  216. dump_status_regs(status);
  217. #endif
  218. if (status[0] != QCE_NO_ERROR_VAL1 && status[0] != QCE_NO_ERROR_VAL2) {
  219. if (pce_dev->ce_bam_info.minor_version >= 8) {
  220. if (status[2] & CRYPTO58_TIMER_EXPIRED) {
  221. error->timer_error = true;
  222. pr_err("%s: timer expired, index = 0x%x\n",
  223. __func__, (status[2] >> KEY_INDEX_SHIFT));
  224. } else if (status[2] & CRYPTO58_KEY_PAUSE) {
  225. error->key_paused = true;
  226. pr_err("%s: key paused, index = 0x%x\n",
  227. __func__, (status[2] >> KEY_INDEX_SHIFT));
  228. } else {
  229. pr_err("%s: generic error, refer all status\n",
  230. __func__);
  231. error->generic_error = true;
  232. }
  233. } else {
  234. if ((status[2] & CRYPTO5_LEGACY_TIMER_EXPIRED_STATUS3) ||
  235. (status[5] & CRYPTO5_LEGACY_TIMER_EXPIRED_STATUS6)) {
  236. error->timer_error = true;
  237. pr_err("%s: timer expired, refer status 3 and 6\n",
  238. __func__);
  239. }
  240. else if ((status[2] & CRYPTO5_LEGACY_KEY_PAUSE_STATUS3) ||
  241. (status[5] & CRYPTO5_LEGACY_KEY_PAUSE_STATUS6)) {
  242. error->key_paused = true;
  243. pr_err("%s: key paused, reder status 3 and 6\n",
  244. __func__);
  245. } else {
  246. pr_err("%s: generic error, refer all status\n",
  247. __func__);
  248. error->generic_error = true;
  249. }
  250. }
  251. dump_status_regs(status);
  252. return;
  253. }
  254. error->no_error = true;
  255. pr_info("%s: No crypto error, status1 = 0x%x\n",
  256. __func__, status[0]);
  257. return;
  258. }
  259. EXPORT_SYMBOL(qce_get_crypto_status);
  260. static int qce_crypto_config(struct qce_device *pce_dev,
  261. enum qce_offload_op_enum offload_op)
  262. {
  263. uint32_t config_be = 0;
  264. config_be = qce_get_config_be(pce_dev,
  265. pce_dev->ce_bam_info.pipe_pair_index[offload_op]);
  266. pce_dev->reg.crypto_cfg_be = config_be;
  267. pce_dev->reg.crypto_cfg_le = (config_be |
  268. CRYPTO_LITTLE_ENDIAN_MASK);
  269. return 0;
  270. }
  271. static void qce_enable_clock_gating(struct qce_device *pce_dev)
  272. {
  273. /* This feature might cause some HW issues, noop till resolved. */
  274. return;
  275. }
  276. /*
  277. * IV counter mask is be set based on the values sent through the offload ioctl
  278. * calls. Currently for offload operations, it is 64 bytes of mask for AES CTR,
  279. * and 128 bytes of mask for AES CBC.
  280. */
  281. static void qce_set_iv_ctr_mask(struct qce_device *pce_dev,
  282. struct qce_req *creq)
  283. {
  284. if (creq->iv_ctr_size == AES_CTR_IV_CTR_SIZE) {
  285. pce_dev->reg.encr_cntr_mask_0 = 0x0;
  286. pce_dev->reg.encr_cntr_mask_1 = 0x0;
  287. pce_dev->reg.encr_cntr_mask_2 = 0xFFFFFFFF;
  288. pce_dev->reg.encr_cntr_mask_3 = 0xFFFFFFFF;
  289. } else {
  290. pce_dev->reg.encr_cntr_mask_0 = 0xFFFFFFFF;
  291. pce_dev->reg.encr_cntr_mask_1 = 0xFFFFFFFF;
  292. pce_dev->reg.encr_cntr_mask_2 = 0xFFFFFFFF;
  293. pce_dev->reg.encr_cntr_mask_3 = 0xFFFFFFFF;
  294. }
  295. return;
  296. }
  297. static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
  298. unsigned int len)
  299. {
  300. unsigned int n;
  301. n = len / sizeof(uint32_t);
  302. for (; n > 0; n--) {
  303. *iv = ((*b << 24) & 0xff000000) |
  304. (((*(b+1)) << 16) & 0xff0000) |
  305. (((*(b+2)) << 8) & 0xff00) |
  306. (*(b+3) & 0xff);
  307. b += sizeof(uint32_t);
  308. iv++;
  309. }
  310. n = len % sizeof(uint32_t);
  311. if (n == 3) {
  312. *iv = ((*b << 24) & 0xff000000) |
  313. (((*(b+1)) << 16) & 0xff0000) |
  314. (((*(b+2)) << 8) & 0xff00);
  315. } else if (n == 2) {
  316. *iv = ((*b << 24) & 0xff000000) |
  317. (((*(b+1)) << 16) & 0xff0000);
  318. } else if (n == 1) {
  319. *iv = ((*b << 24) & 0xff000000);
  320. }
  321. }
  322. static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
  323. unsigned int len)
  324. {
  325. unsigned int i, j;
  326. unsigned char swap_iv[AES_IV_LENGTH];
  327. memset(swap_iv, 0, AES_IV_LENGTH);
  328. for (i = (AES_IV_LENGTH-len), j = len-1; i < AES_IV_LENGTH; i++, j--)
  329. swap_iv[i] = b[j];
  330. _byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
  331. }
  332. static int count_sg(struct scatterlist *sg, int nbytes)
  333. {
  334. int i;
  335. for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
  336. nbytes -= sg->length;
  337. return i;
  338. }
  339. static int qce_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  340. enum dma_data_direction direction)
  341. {
  342. int i;
  343. for (i = 0; i < nents; ++i) {
  344. dma_map_sg(dev, sg, 1, direction);
  345. sg = sg_next(sg);
  346. }
  347. return nents;
  348. }
  349. static int qce_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
  350. int nents, enum dma_data_direction direction)
  351. {
  352. int i;
  353. for (i = 0; i < nents; ++i) {
  354. dma_unmap_sg(dev, sg, 1, direction);
  355. sg = sg_next(sg);
  356. }
  357. return nents;
  358. }
  359. static int _probe_ce_engine(struct qce_device *pce_dev)
  360. {
  361. unsigned int rev;
  362. unsigned int maj_rev, min_rev, step_rev;
  363. int i = 0;
  364. rev = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
  365. /*
  366. * Ensure previous instructions (setting the GO register)
  367. * was completed before checking the version.
  368. */
  369. mb();
  370. maj_rev = (rev & CRYPTO_CORE_MAJOR_REV_MASK) >> CRYPTO_CORE_MAJOR_REV;
  371. min_rev = (rev & CRYPTO_CORE_MINOR_REV_MASK) >> CRYPTO_CORE_MINOR_REV;
  372. step_rev = (rev & CRYPTO_CORE_STEP_REV_MASK) >> CRYPTO_CORE_STEP_REV;
  373. if (maj_rev != CRYPTO_CORE_MAJOR_VER_NUM) {
  374. pr_err("Unsupported QTI crypto device at 0x%x, rev %d.%d.%d\n",
  375. pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
  376. return -EIO;
  377. }
  378. /*
  379. * The majority of crypto HW bugs have been fixed in 5.3.0 and
  380. * above. That allows a single sps transfer of consumer
  381. * pipe, and a single sps transfer of producer pipe
  382. * for a crypto request. no_get_around flag indicates this.
  383. *
  384. * In 5.3.1, the CCM MAC_FAILED in result dump issue is
  385. * fixed. no_ccm_mac_status_get_around flag indicates this.
  386. */
  387. pce_dev->no_get_around = (min_rev >=
  388. CRYPTO_CORE_MINOR_VER_NUM) ? true : false;
  389. if (min_rev > CRYPTO_CORE_MINOR_VER_NUM)
  390. pce_dev->no_ccm_mac_status_get_around = true;
  391. else if ((min_rev == CRYPTO_CORE_MINOR_VER_NUM) &&
  392. (step_rev >= CRYPTO_CORE_STEP_VER_NUM))
  393. pce_dev->no_ccm_mac_status_get_around = true;
  394. else
  395. pce_dev->no_ccm_mac_status_get_around = false;
  396. pce_dev->ce_bam_info.minor_version = min_rev;
  397. pce_dev->ce_bam_info.major_version = maj_rev;
  398. pce_dev->engines_avail = readl_relaxed(pce_dev->iobase +
  399. CRYPTO_ENGINES_AVAIL);
  400. dev_info(pce_dev->pdev, "QTI Crypto %d.%d.%d device found @0x%x\n",
  401. maj_rev, min_rev, step_rev, pce_dev->phy_iobase);
  402. pce_dev->ce_bam_info.ce_burst_size = MAX_CE_BAM_BURST_SIZE;
  403. dev_dbg(pce_dev->pdev, "CE device = %#x IO base, CE = %pK, IO base BAM = %pK\nBAM IRQ %d Engines Availability = %#x\n",
  404. pce_dev->ce_bam_info.ce_device, pce_dev->iobase,
  405. pce_dev->ce_bam_info.bam_iobase,
  406. pce_dev->ce_bam_info.bam_irq, pce_dev->engines_avail);
  407. for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
  408. dev_dbg(pce_dev->pdev, "Consumer pipe IN [%d] = %d, Producer Pipe OUT [%d] = %d\n",
  409. i, pce_dev->ce_bam_info.src_pipe_index[i],
  410. i, pce_dev->ce_bam_info.dest_pipe_index[i]);
  411. }
  412. return 0;
  413. };
  414. static struct qce_cmdlist_info *_ce_get_hash_cmdlistinfo(
  415. struct qce_device *pce_dev,
  416. int req_info, struct qce_sha_req *sreq)
  417. {
  418. struct ce_sps_data *pce_sps_data;
  419. struct qce_cmdlistptr_ops *cmdlistptr;
  420. pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
  421. cmdlistptr = &pce_sps_data->cmdlistptr;
  422. switch (sreq->alg) {
  423. case QCE_HASH_SHA1:
  424. return &cmdlistptr->auth_sha1;
  425. case QCE_HASH_SHA256:
  426. return &cmdlistptr->auth_sha256;
  427. case QCE_HASH_SHA1_HMAC:
  428. return &cmdlistptr->auth_sha1_hmac;
  429. case QCE_HASH_SHA256_HMAC:
  430. return &cmdlistptr->auth_sha256_hmac;
  431. case QCE_HASH_AES_CMAC:
  432. if (sreq->authklen == AES128_KEY_SIZE)
  433. return &cmdlistptr->auth_aes_128_cmac;
  434. return &cmdlistptr->auth_aes_256_cmac;
  435. default:
  436. return NULL;
  437. }
  438. return NULL;
  439. }
  440. static int _ce_setup_hash(struct qce_device *pce_dev,
  441. struct qce_sha_req *sreq,
  442. struct qce_cmdlist_info *cmdlistinfo)
  443. {
  444. uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
  445. uint32_t diglen;
  446. int i;
  447. uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
  448. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
  449. bool sha1 = false;
  450. struct sps_command_element *pce = NULL;
  451. bool use_hw_key = false;
  452. bool use_pipe_key = false;
  453. uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
  454. uint32_t auth_cfg;
  455. if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE))
  456. return -EINVAL;
  457. pce = cmdlistinfo->crypto_cfg;
  458. pce->data = pce_dev->reg.crypto_cfg_be;
  459. pce = cmdlistinfo->crypto_cfg_le;
  460. pce->data = pce_dev->reg.crypto_cfg_le;
  461. if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
  462. (sreq->alg == QCE_HASH_SHA256_HMAC) ||
  463. (sreq->alg == QCE_HASH_AES_CMAC)) {
  464. /* no more check for null key. use flag */
  465. if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY)
  466. == QCRYPTO_CTX_USE_HW_KEY)
  467. use_hw_key = true;
  468. else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
  469. QCRYPTO_CTX_USE_PIPE_KEY)
  470. use_pipe_key = true;
  471. pce = cmdlistinfo->go_proc;
  472. if (use_hw_key) {
  473. pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
  474. pce_dev->phy_iobase);
  475. } else {
  476. pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
  477. pce_dev->phy_iobase);
  478. pce = cmdlistinfo->auth_key;
  479. if (!use_pipe_key) {
  480. _byte_stream_to_net_words(mackey32,
  481. sreq->authkey,
  482. sreq->authklen);
  483. for (i = 0; i < authk_size_in_word; i++, pce++)
  484. pce->data = mackey32[i];
  485. }
  486. }
  487. }
  488. if (sreq->alg == QCE_HASH_AES_CMAC)
  489. goto go_proc;
  490. /* if not the last, the size has to be on the block boundary */
  491. if (!sreq->last_blk && (sreq->size % SHA256_BLOCK_SIZE))
  492. return -EIO;
  493. switch (sreq->alg) {
  494. case QCE_HASH_SHA1:
  495. case QCE_HASH_SHA1_HMAC:
  496. diglen = SHA1_DIGEST_SIZE;
  497. sha1 = true;
  498. break;
  499. case QCE_HASH_SHA256:
  500. case QCE_HASH_SHA256_HMAC:
  501. diglen = SHA256_DIGEST_SIZE;
  502. break;
  503. default:
  504. return -EINVAL;
  505. }
  506. /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
  507. if (sreq->first_blk) {
  508. if (sha1) {
  509. for (i = 0; i < 5; i++)
  510. auth32[i] = _std_init_vector_sha1[i];
  511. } else {
  512. for (i = 0; i < 8; i++)
  513. auth32[i] = _std_init_vector_sha256[i];
  514. }
  515. } else {
  516. _byte_stream_to_net_words(auth32, sreq->digest, diglen);
  517. }
  518. pce = cmdlistinfo->auth_iv;
  519. for (i = 0; i < 5; i++, pce++)
  520. pce->data = auth32[i];
  521. if ((sreq->alg == QCE_HASH_SHA256) ||
  522. (sreq->alg == QCE_HASH_SHA256_HMAC)) {
  523. for (i = 5; i < 8; i++, pce++)
  524. pce->data = auth32[i];
  525. }
  526. /* write auth_bytecnt 0/1, start with 0 */
  527. pce = cmdlistinfo->auth_bytecount;
  528. for (i = 0; i < 2; i++, pce++)
  529. pce->data = sreq->auth_data[i];
  530. /* Set/reset last bit in CFG register */
  531. pce = cmdlistinfo->auth_seg_cfg;
  532. auth_cfg = pce->data & ~(1 << CRYPTO_LAST |
  533. 1 << CRYPTO_FIRST |
  534. 1 << CRYPTO_USE_PIPE_KEY_AUTH |
  535. 1 << CRYPTO_USE_HW_KEY_AUTH);
  536. if (sreq->last_blk)
  537. auth_cfg |= 1 << CRYPTO_LAST;
  538. if (sreq->first_blk)
  539. auth_cfg |= 1 << CRYPTO_FIRST;
  540. if (use_hw_key)
  541. auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
  542. if (use_pipe_key)
  543. auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
  544. pce->data = auth_cfg;
  545. go_proc:
  546. /* write auth seg size */
  547. pce = cmdlistinfo->auth_seg_size;
  548. pce->data = sreq->size;
  549. pce = cmdlistinfo->encr_seg_cfg;
  550. pce->data = 0;
  551. /* write auth seg size start*/
  552. pce = cmdlistinfo->auth_seg_start;
  553. pce->data = 0;
  554. /* write seg size */
  555. pce = cmdlistinfo->seg_size;
  556. /* always ensure there is input data. ZLT does not work for bam-ndp */
  557. if (sreq->size)
  558. pce->data = sreq->size;
  559. else
  560. pce->data = pce_dev->ce_bam_info.ce_burst_size;
  561. return 0;
  562. }
  563. static struct qce_cmdlist_info *_ce_get_aead_cmdlistinfo(
  564. struct qce_device *pce_dev,
  565. int req_info, struct qce_req *creq)
  566. {
  567. struct ce_sps_data *pce_sps_data;
  568. struct qce_cmdlistptr_ops *cmdlistptr;
  569. pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
  570. cmdlistptr = &pce_sps_data->cmdlistptr;
  571. switch (creq->alg) {
  572. case CIPHER_ALG_DES:
  573. switch (creq->mode) {
  574. case QCE_MODE_CBC:
  575. if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
  576. return &cmdlistptr->aead_hmac_sha1_cbc_des;
  577. else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
  578. return &cmdlistptr->aead_hmac_sha256_cbc_des;
  579. else
  580. return NULL;
  581. break;
  582. default:
  583. return NULL;
  584. }
  585. break;
  586. case CIPHER_ALG_3DES:
  587. switch (creq->mode) {
  588. case QCE_MODE_CBC:
  589. if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
  590. return &cmdlistptr->aead_hmac_sha1_cbc_3des;
  591. else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
  592. return &cmdlistptr->aead_hmac_sha256_cbc_3des;
  593. else
  594. return NULL;
  595. break;
  596. default:
  597. return NULL;
  598. }
  599. break;
  600. case CIPHER_ALG_AES:
  601. switch (creq->mode) {
  602. case QCE_MODE_CBC:
  603. if (creq->encklen == AES128_KEY_SIZE) {
  604. if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
  605. return
  606. &cmdlistptr->aead_hmac_sha1_cbc_aes_128;
  607. else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
  608. return
  609. &cmdlistptr->aead_hmac_sha256_cbc_aes_128;
  610. else
  611. return NULL;
  612. } else if (creq->encklen == AES256_KEY_SIZE) {
  613. if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
  614. return &cmdlistptr->aead_hmac_sha1_cbc_aes_256;
  615. else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
  616. return
  617. &cmdlistptr->aead_hmac_sha256_cbc_aes_256;
  618. else
  619. return NULL;
  620. } else
  621. return NULL;
  622. break;
  623. default:
  624. return NULL;
  625. }
  626. break;
  627. default:
  628. return NULL;
  629. }
  630. return NULL;
  631. }
  632. static int _ce_setup_aead(struct qce_device *pce_dev, struct qce_req *q_req,
  633. uint32_t totallen_in, uint32_t coffset,
  634. struct qce_cmdlist_info *cmdlistinfo)
  635. {
  636. int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
  637. int i;
  638. uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
  639. struct sps_command_element *pce;
  640. uint32_t a_cfg;
  641. uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
  642. uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
  643. uint32_t enck_size_in_word = 0;
  644. uint32_t enciv_in_word;
  645. uint32_t key_size;
  646. uint32_t encr_cfg = 0;
  647. uint32_t ivsize = q_req->ivsize;
  648. key_size = q_req->encklen;
  649. enck_size_in_word = key_size/sizeof(uint32_t);
  650. if (qce_crypto_config(pce_dev, q_req->offload_op))
  651. return -EINVAL;
  652. pce = cmdlistinfo->crypto_cfg;
  653. pce->data = pce_dev->reg.crypto_cfg_be;
  654. pce = cmdlistinfo->crypto_cfg_le;
  655. pce->data = pce_dev->reg.crypto_cfg_le;
  656. switch (q_req->alg) {
  657. case CIPHER_ALG_DES:
  658. enciv_in_word = 2;
  659. break;
  660. case CIPHER_ALG_3DES:
  661. enciv_in_word = 2;
  662. break;
  663. case CIPHER_ALG_AES:
  664. if ((key_size != AES128_KEY_SIZE) &&
  665. (key_size != AES256_KEY_SIZE))
  666. return -EINVAL;
  667. enciv_in_word = 4;
  668. break;
  669. default:
  670. return -EINVAL;
  671. }
  672. /* only support cbc mode */
  673. if (q_req->mode != QCE_MODE_CBC)
  674. return -EINVAL;
  675. _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
  676. pce = cmdlistinfo->encr_cntr_iv;
  677. for (i = 0; i < enciv_in_word; i++, pce++)
  678. pce->data = enciv32[i];
  679. /*
  680. * write encr key
  681. * do not use hw key or pipe key
  682. */
  683. _byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
  684. pce = cmdlistinfo->encr_key;
  685. for (i = 0; i < enck_size_in_word; i++, pce++)
  686. pce->data = enckey32[i];
  687. /* write encr seg cfg */
  688. pce = cmdlistinfo->encr_seg_cfg;
  689. encr_cfg = pce->data;
  690. if (q_req->dir == QCE_ENCRYPT)
  691. encr_cfg |= (1 << CRYPTO_ENCODE);
  692. else
  693. encr_cfg &= ~(1 << CRYPTO_ENCODE);
  694. pce->data = encr_cfg;
  695. /* we only support sha1-hmac and sha256-hmac at this point */
  696. _byte_stream_to_net_words(mackey32, q_req->authkey,
  697. q_req->authklen);
  698. pce = cmdlistinfo->auth_key;
  699. for (i = 0; i < authk_size_in_word; i++, pce++)
  700. pce->data = mackey32[i];
  701. pce = cmdlistinfo->auth_iv;
  702. if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
  703. for (i = 0; i < 5; i++, pce++)
  704. pce->data = _std_init_vector_sha1[i];
  705. else
  706. for (i = 0; i < 8; i++, pce++)
  707. pce->data = _std_init_vector_sha256[i];
  708. /* write auth_bytecnt 0/1, start with 0 */
  709. pce = cmdlistinfo->auth_bytecount;
  710. for (i = 0; i < 2; i++, pce++)
  711. pce->data = 0;
  712. pce = cmdlistinfo->auth_seg_cfg;
  713. a_cfg = pce->data;
  714. a_cfg &= ~(CRYPTO_AUTH_POS_MASK);
  715. if (q_req->dir == QCE_ENCRYPT)
  716. a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
  717. else
  718. a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  719. pce->data = a_cfg;
  720. /* write auth seg size */
  721. pce = cmdlistinfo->auth_seg_size;
  722. pce->data = totallen_in;
  723. /* write auth seg size start*/
  724. pce = cmdlistinfo->auth_seg_start;
  725. pce->data = 0;
  726. /* write seg size */
  727. pce = cmdlistinfo->seg_size;
  728. pce->data = totallen_in;
  729. /* write encr seg size */
  730. pce = cmdlistinfo->encr_seg_size;
  731. pce->data = q_req->cryptlen;
  732. /* write encr seg start */
  733. pce = cmdlistinfo->encr_seg_start;
  734. pce->data = (coffset & 0xffff);
  735. return 0;
  736. }
  737. static struct qce_cmdlist_info *_ce_get_cipher_cmdlistinfo(
  738. struct qce_device *pce_dev,
  739. int req_info, struct qce_req *creq)
  740. {
  741. struct ce_request_info *preq_info;
  742. struct ce_sps_data *pce_sps_data;
  743. struct qce_cmdlistptr_ops *cmdlistptr;
  744. preq_info = &pce_dev->ce_request_info[req_info];
  745. pce_sps_data = &preq_info->ce_sps;
  746. cmdlistptr = &pce_sps_data->cmdlistptr;
  747. if (creq->alg != CIPHER_ALG_AES) {
  748. switch (creq->alg) {
  749. case CIPHER_ALG_DES:
  750. if (creq->mode == QCE_MODE_ECB)
  751. return &cmdlistptr->cipher_des_ecb;
  752. return &cmdlistptr->cipher_des_cbc;
  753. case CIPHER_ALG_3DES:
  754. if (creq->mode == QCE_MODE_ECB)
  755. return &cmdlistptr->cipher_3des_ecb;
  756. return &cmdlistptr->cipher_3des_cbc;
  757. default:
  758. return NULL;
  759. }
  760. } else {
  761. switch (creq->mode) {
  762. case QCE_MODE_ECB:
  763. if (creq->encklen == AES128_KEY_SIZE)
  764. return &cmdlistptr->cipher_aes_128_ecb;
  765. return &cmdlistptr->cipher_aes_256_ecb;
  766. case QCE_MODE_CBC:
  767. case QCE_MODE_CTR:
  768. if (creq->encklen == AES128_KEY_SIZE)
  769. return &cmdlistptr->cipher_aes_128_cbc_ctr;
  770. return &cmdlistptr->cipher_aes_256_cbc_ctr;
  771. case QCE_MODE_XTS:
  772. if (creq->encklen/2 == AES128_KEY_SIZE)
  773. return &cmdlistptr->cipher_aes_128_xts;
  774. return &cmdlistptr->cipher_aes_256_xts;
  775. case QCE_MODE_CCM:
  776. if (creq->encklen == AES128_KEY_SIZE)
  777. return &cmdlistptr->aead_aes_128_ccm;
  778. return &cmdlistptr->aead_aes_256_ccm;
  779. default:
  780. return NULL;
  781. }
  782. }
  783. return NULL;
  784. }
  785. static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
  786. uint32_t totallen_in, uint32_t coffset,
  787. struct qce_cmdlist_info *cmdlistinfo)
  788. {
  789. uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
  790. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
  791. uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
  792. 0, 0, 0, 0};
  793. uint32_t enck_size_in_word = 0;
  794. uint32_t key_size;
  795. bool use_hw_key = false;
  796. bool use_pipe_key = false;
  797. uint32_t encr_cfg = 0;
  798. uint32_t ivsize = creq->ivsize;
  799. int i;
  800. struct sps_command_element *pce = NULL;
  801. bool is_des_cipher = false;
  802. if (creq->mode == QCE_MODE_XTS)
  803. key_size = creq->encklen/2;
  804. else
  805. key_size = creq->encklen;
  806. if (qce_crypto_config(pce_dev, creq->offload_op))
  807. return -EINVAL;
  808. pce = cmdlistinfo->crypto_cfg;
  809. pce->data = pce_dev->reg.crypto_cfg_be;
  810. pce = cmdlistinfo->crypto_cfg_le;
  811. pce->data = pce_dev->reg.crypto_cfg_le;
  812. pce = cmdlistinfo->go_proc;
  813. if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
  814. use_hw_key = true;
  815. } else {
  816. if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
  817. QCRYPTO_CTX_USE_PIPE_KEY)
  818. use_pipe_key = true;
  819. }
  820. if (use_hw_key)
  821. pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
  822. pce_dev->phy_iobase);
  823. else
  824. pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
  825. pce_dev->phy_iobase);
  826. if (!use_pipe_key && !use_hw_key) {
  827. _byte_stream_to_net_words(enckey32, creq->enckey, key_size);
  828. enck_size_in_word = key_size/sizeof(uint32_t);
  829. }
  830. if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
  831. uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
  832. uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
  833. uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
  834. uint32_t auth_cfg = 0;
  835. /* write nonce */
  836. _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
  837. pce = cmdlistinfo->auth_nonce_info;
  838. for (i = 0; i < noncelen32; i++, pce++)
  839. pce->data = nonce32[i];
  840. if (creq->authklen == AES128_KEY_SIZE)
  841. auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
  842. else {
  843. if (creq->authklen == AES256_KEY_SIZE)
  844. auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
  845. }
  846. if (creq->dir == QCE_ENCRYPT)
  847. auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  848. else
  849. auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
  850. auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
  851. if (use_hw_key) {
  852. auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
  853. } else {
  854. auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
  855. /* write auth key */
  856. pce = cmdlistinfo->auth_key;
  857. for (i = 0; i < authklen32; i++, pce++)
  858. pce->data = enckey32[i];
  859. }
  860. pce = cmdlistinfo->auth_seg_cfg;
  861. pce->data = auth_cfg;
  862. pce = cmdlistinfo->auth_seg_size;
  863. if (creq->dir == QCE_ENCRYPT)
  864. pce->data = totallen_in;
  865. else
  866. pce->data = totallen_in - creq->authsize;
  867. pce = cmdlistinfo->auth_seg_start;
  868. pce->data = 0;
  869. } else {
  870. if (creq->op != QCE_REQ_AEAD) {
  871. pce = cmdlistinfo->auth_seg_cfg;
  872. pce->data = 0;
  873. }
  874. }
  875. switch (creq->mode) {
  876. case QCE_MODE_ECB:
  877. if (key_size == AES128_KEY_SIZE)
  878. encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
  879. else
  880. encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
  881. break;
  882. case QCE_MODE_CBC:
  883. if (key_size == AES128_KEY_SIZE)
  884. encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
  885. else
  886. encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
  887. break;
  888. case QCE_MODE_XTS:
  889. if (key_size == AES128_KEY_SIZE)
  890. encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
  891. else
  892. encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
  893. break;
  894. case QCE_MODE_CCM:
  895. if (key_size == AES128_KEY_SIZE)
  896. encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
  897. else
  898. encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
  899. encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
  900. (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
  901. break;
  902. case QCE_MODE_CTR:
  903. default:
  904. if (key_size == AES128_KEY_SIZE)
  905. encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
  906. else
  907. encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
  908. break;
  909. }
  910. switch (creq->alg) {
  911. case CIPHER_ALG_DES:
  912. if (creq->mode != QCE_MODE_ECB) {
  913. if (ivsize > MAX_IV_LENGTH) {
  914. pr_err("%s: error: Invalid length parameter\n",
  915. __func__);
  916. return -EINVAL;
  917. }
  918. _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
  919. pce = cmdlistinfo->encr_cntr_iv;
  920. pce->data = enciv32[0];
  921. pce++;
  922. pce->data = enciv32[1];
  923. }
  924. if (!use_hw_key) {
  925. pce = cmdlistinfo->encr_key;
  926. pce->data = enckey32[0];
  927. pce++;
  928. pce->data = enckey32[1];
  929. }
  930. is_des_cipher = true;
  931. break;
  932. case CIPHER_ALG_3DES:
  933. if (creq->mode != QCE_MODE_ECB) {
  934. if (ivsize > MAX_IV_LENGTH) {
  935. pr_err("%s: error: Invalid length parameter\n",
  936. __func__);
  937. return -EINVAL;
  938. }
  939. _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
  940. pce = cmdlistinfo->encr_cntr_iv;
  941. pce->data = enciv32[0];
  942. pce++;
  943. pce->data = enciv32[1];
  944. }
  945. if (!use_hw_key) {
  946. /* write encr key */
  947. pce = cmdlistinfo->encr_key;
  948. for (i = 0; i < 6; i++, pce++)
  949. pce->data = enckey32[i];
  950. }
  951. is_des_cipher = true;
  952. break;
  953. case CIPHER_ALG_AES:
  954. default:
  955. if (creq->mode == QCE_MODE_XTS) {
  956. uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
  957. = {0, 0, 0, 0, 0, 0, 0, 0};
  958. uint32_t xtsklen =
  959. creq->encklen/(2 * sizeof(uint32_t));
  960. if (!use_hw_key && !use_pipe_key) {
  961. _byte_stream_to_net_words(xtskey32,
  962. (creq->enckey + creq->encklen/2),
  963. creq->encklen/2);
  964. /* write xts encr key */
  965. pce = cmdlistinfo->encr_xts_key;
  966. for (i = 0; i < xtsklen; i++, pce++)
  967. pce->data = xtskey32[i];
  968. }
  969. /* write xts du size */
  970. pce = cmdlistinfo->encr_xts_du_size;
  971. switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
  972. case QCRYPTO_CTX_XTS_DU_SIZE_512B:
  973. pce->data = min((unsigned int)QCE_SECTOR_SIZE,
  974. creq->cryptlen);
  975. break;
  976. case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
  977. pce->data =
  978. min((unsigned int)QCE_SECTOR_SIZE * 2,
  979. creq->cryptlen);
  980. break;
  981. default:
  982. pce->data = creq->cryptlen;
  983. break;
  984. }
  985. }
  986. if (creq->mode != QCE_MODE_ECB) {
  987. if (ivsize > MAX_IV_LENGTH) {
  988. pr_err("%s: error: Invalid length parameter\n",
  989. __func__);
  990. return -EINVAL;
  991. }
  992. if (creq->mode == QCE_MODE_XTS)
  993. _byte_stream_swap_to_net_words(enciv32,
  994. creq->iv, ivsize);
  995. else
  996. _byte_stream_to_net_words(enciv32, creq->iv,
  997. ivsize);
  998. /* write encr cntr iv */
  999. pce = cmdlistinfo->encr_cntr_iv;
  1000. for (i = 0; i < 4; i++, pce++)
  1001. pce->data = enciv32[i];
  1002. if (creq->mode == QCE_MODE_CCM) {
  1003. /* write cntr iv for ccm */
  1004. pce = cmdlistinfo->encr_ccm_cntr_iv;
  1005. for (i = 0; i < 4; i++, pce++)
  1006. pce->data = enciv32[i];
  1007. /* update cntr_iv[3] by one */
  1008. pce = cmdlistinfo->encr_cntr_iv;
  1009. pce += 3;
  1010. pce->data += 1;
  1011. }
  1012. }
  1013. if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
  1014. encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
  1015. CRYPTO_ENCR_KEY_SZ);
  1016. } else {
  1017. if (!use_hw_key) {
  1018. /* write encr key */
  1019. pce = cmdlistinfo->encr_key;
  1020. for (i = 0; i < enck_size_in_word; i++, pce++)
  1021. pce->data = enckey32[i];
  1022. }
  1023. } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
  1024. break;
  1025. } /* end of switch (creq->mode) */
  1026. if (use_pipe_key)
  1027. encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
  1028. << CRYPTO_USE_PIPE_KEY_ENCR);
  1029. /* write encr seg cfg */
  1030. pce = cmdlistinfo->encr_seg_cfg;
  1031. if ((creq->alg == CIPHER_ALG_DES) || (creq->alg == CIPHER_ALG_3DES)) {
  1032. if (creq->dir == QCE_ENCRYPT)
  1033. pce->data |= (1 << CRYPTO_ENCODE);
  1034. else
  1035. pce->data &= ~(1 << CRYPTO_ENCODE);
  1036. encr_cfg = pce->data;
  1037. } else {
  1038. encr_cfg |=
  1039. ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
  1040. }
  1041. if (use_hw_key)
  1042. encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
  1043. else
  1044. encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
  1045. pce->data = encr_cfg;
  1046. /* write encr seg size */
  1047. pce = cmdlistinfo->encr_seg_size;
  1048. if (creq->is_copy_op) {
  1049. pce->data = 0;
  1050. } else {
  1051. if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
  1052. pce->data = (creq->cryptlen + creq->authsize);
  1053. else
  1054. pce->data = creq->cryptlen;
  1055. }
  1056. /* write encr seg start */
  1057. pce = cmdlistinfo->encr_seg_start;
  1058. pce->data = (coffset & 0xffff);
  1059. /* write seg size */
  1060. pce = cmdlistinfo->seg_size;
  1061. pce->data = totallen_in;
  1062. if (!is_des_cipher) {
  1063. /* pattern info */
  1064. pce = cmdlistinfo->pattern_info;
  1065. pce->data = creq->pattern_info;
  1066. /* block offset */
  1067. pce = cmdlistinfo->block_offset;
  1068. pce->data = (creq->block_offset << 4) |
  1069. (creq->block_offset ? 1: 0);
  1070. /* IV counter size */
  1071. qce_set_iv_ctr_mask(pce_dev, creq);
  1072. pce = cmdlistinfo->encr_mask_3;
  1073. pce->data = pce_dev->reg.encr_cntr_mask_3;
  1074. pce = cmdlistinfo->encr_mask_2;
  1075. pce->data = pce_dev->reg.encr_cntr_mask_2;
  1076. pce = cmdlistinfo->encr_mask_1;
  1077. pce->data = pce_dev->reg.encr_cntr_mask_1;
  1078. pce = cmdlistinfo->encr_mask_0;
  1079. pce->data = pce_dev->reg.encr_cntr_mask_0;
  1080. }
  1081. pce = cmdlistinfo->go_proc;
  1082. pce->data = 0;
  1083. if (is_offload_op(creq->offload_op))
  1084. pce->data = ((1 << CRYPTO_GO) | (1 << CRYPTO_CLR_CNTXT));
  1085. else
  1086. pce->data = ((1 << CRYPTO_GO) | (1 << CRYPTO_CLR_CNTXT) |
  1087. (1 << CRYPTO_RESULTS_DUMP));
  1088. return 0;
  1089. }
  1090. static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req *req,
  1091. struct qce_cmdlist_info *cmdlistinfo)
  1092. {
  1093. uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
  1094. uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
  1095. uint32_t cfg;
  1096. struct sps_command_element *pce;
  1097. int i;
  1098. switch (req->algorithm) {
  1099. case QCE_OTA_ALGO_KASUMI:
  1100. cfg = pce_dev->reg.auth_cfg_kasumi;
  1101. break;
  1102. case QCE_OTA_ALGO_SNOW3G:
  1103. default:
  1104. cfg = pce_dev->reg.auth_cfg_snow3g;
  1105. break;
  1106. }
  1107. if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE))
  1108. return -EINVAL;
  1109. pce = cmdlistinfo->crypto_cfg;
  1110. pce->data = pce_dev->reg.crypto_cfg_be;
  1111. pce = cmdlistinfo->crypto_cfg_le;
  1112. pce->data = pce_dev->reg.crypto_cfg_le;
  1113. /* write key in CRYPTO_AUTH_IV0-3_REG */
  1114. _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
  1115. pce = cmdlistinfo->auth_iv;
  1116. for (i = 0; i < key_size_in_word; i++, pce++)
  1117. pce->data = ikey32[i];
  1118. /* write last bits in CRYPTO_AUTH_IV4_REG */
  1119. pce->data = req->last_bits;
  1120. /* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
  1121. pce = cmdlistinfo->auth_bytecount;
  1122. pce->data = req->fresh;
  1123. /* write count-i to CRYPTO_AUTH_BYTECNT1_REG */
  1124. pce++;
  1125. pce->data = req->count_i;
  1126. /* write auth seg cfg */
  1127. pce = cmdlistinfo->auth_seg_cfg;
  1128. if (req->direction == QCE_OTA_DIR_DOWNLINK)
  1129. cfg |= BIT(CRYPTO_F9_DIRECTION);
  1130. pce->data = cfg;
  1131. /* write auth seg size */
  1132. pce = cmdlistinfo->auth_seg_size;
  1133. pce->data = req->msize;
  1134. /* write auth seg start*/
  1135. pce = cmdlistinfo->auth_seg_start;
  1136. pce->data = 0;
  1137. /* write seg size */
  1138. pce = cmdlistinfo->seg_size;
  1139. pce->data = req->msize;
  1140. /* write go */
  1141. pce = cmdlistinfo->go_proc;
  1142. pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
  1143. return 0;
  1144. }
  1145. static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
  1146. bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset,
  1147. uint16_t cipher_size,
  1148. struct qce_cmdlist_info *cmdlistinfo)
  1149. {
  1150. uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
  1151. uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
  1152. uint32_t cfg;
  1153. struct sps_command_element *pce;
  1154. int i;
  1155. switch (req->algorithm) {
  1156. case QCE_OTA_ALGO_KASUMI:
  1157. cfg = pce_dev->reg.encr_cfg_kasumi;
  1158. break;
  1159. case QCE_OTA_ALGO_SNOW3G:
  1160. default:
  1161. cfg = pce_dev->reg.encr_cfg_snow3g;
  1162. break;
  1163. }
  1164. if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE))
  1165. return -EINVAL;
  1166. pce = cmdlistinfo->crypto_cfg;
  1167. pce->data = pce_dev->reg.crypto_cfg_be;
  1168. pce = cmdlistinfo->crypto_cfg_le;
  1169. pce->data = pce_dev->reg.crypto_cfg_le;
  1170. /* write key */
  1171. _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
  1172. pce = cmdlistinfo->encr_key;
  1173. for (i = 0; i < key_size_in_word; i++, pce++)
  1174. pce->data = ckey32[i];
  1175. /* write encr seg cfg */
  1176. pce = cmdlistinfo->encr_seg_cfg;
  1177. if (key_stream_mode)
  1178. cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
  1179. if (req->direction == QCE_OTA_DIR_DOWNLINK)
  1180. cfg |= BIT(CRYPTO_F8_DIRECTION);
  1181. pce->data = cfg;
  1182. /* write encr seg start */
  1183. pce = cmdlistinfo->encr_seg_start;
  1184. pce->data = (cipher_offset & 0xffff);
  1185. /* write encr seg size */
  1186. pce = cmdlistinfo->encr_seg_size;
  1187. pce->data = cipher_size;
  1188. /* write seg size */
  1189. pce = cmdlistinfo->seg_size;
  1190. pce->data = req->data_len;
  1191. /* write cntr0_iv0 for countC */
  1192. pce = cmdlistinfo->encr_cntr_iv;
  1193. pce->data = req->count_c;
  1194. /* write cntr1_iv1 for nPkts, and bearer */
  1195. pce++;
  1196. if (npkts == 1)
  1197. npkts = 0;
  1198. pce->data = req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
  1199. npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT;
  1200. /* write go */
  1201. pce = cmdlistinfo->go_proc;
  1202. pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
  1203. return 0;
  1204. }
  1205. static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info)
  1206. {
  1207. int i, j, ents;
  1208. struct ce_sps_data *pce_sps_data;
  1209. struct sps_iovec *iovec;
  1210. uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD;
  1211. pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
  1212. iovec = pce_sps_data->in_transfer.iovec;
  1213. pr_info("==============================================\n");
  1214. pr_info("CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n");
  1215. pr_info("==============================================\n");
  1216. for (i = 0; i < pce_sps_data->in_transfer.iovec_count; i++) {
  1217. pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
  1218. iovec->addr, iovec->size, iovec->flags);
  1219. if (iovec->flags & cmd_flags) {
  1220. struct sps_command_element *pced;
  1221. pced = (struct sps_command_element *)
  1222. (GET_VIRT_ADDR(iovec->addr));
  1223. ents = iovec->size/(sizeof(struct sps_command_element));
  1224. for (j = 0; j < ents; j++) {
  1225. pr_info(" [%d] [0x%x] 0x%x\n", j,
  1226. pced->addr, pced->data);
  1227. pced++;
  1228. }
  1229. }
  1230. iovec++;
  1231. }
  1232. pr_info("==============================================\n");
  1233. pr_info("PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n");
  1234. pr_info("==============================================\n");
  1235. iovec = pce_sps_data->out_transfer.iovec;
  1236. for (i = 0; i < pce_sps_data->out_transfer.iovec_count; i++) {
  1237. pr_info(" [%d] addr=0x%x size=0x%x flags=0x%x\n", i,
  1238. iovec->addr, iovec->size, iovec->flags);
  1239. iovec++;
  1240. }
  1241. }
  1242. #ifdef QCE_DEBUG
  1243. static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
  1244. {
  1245. _qce_dump_descr_fifos(pce_dev, req_info);
  1246. }
  1247. #define QCE_WRITE_REG(val, addr) \
  1248. { \
  1249. pr_info(" [0x%pK] 0x%x\n", addr, (uint32_t)val); \
  1250. writel_relaxed(val, addr); \
  1251. }
  1252. #else
  1253. static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
  1254. {
  1255. }
  1256. #define QCE_WRITE_REG(val, addr) \
  1257. writel_relaxed(val, addr)
  1258. #endif
  1259. static int _ce_setup_hash_direct(struct qce_device *pce_dev,
  1260. struct qce_sha_req *sreq)
  1261. {
  1262. uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
  1263. uint32_t diglen;
  1264. bool use_hw_key = false;
  1265. bool use_pipe_key = false;
  1266. int i;
  1267. uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
  1268. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
  1269. uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
  1270. bool sha1 = false;
  1271. uint32_t auth_cfg = 0;
  1272. /* clear status */
  1273. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
  1274. if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE))
  1275. return -EINVAL;
  1276. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
  1277. CRYPTO_CONFIG_REG));
  1278. /*
  1279. * Ensure previous instructions (setting the CONFIG register)
  1280. * was completed before issuing starting to set other config register
  1281. * This is to ensure the configurations are done in correct endian-ness
  1282. * as set in the CONFIG registers
  1283. */
  1284. mb();
  1285. if (sreq->alg == QCE_HASH_AES_CMAC) {
  1286. /* write seg_cfg */
  1287. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
  1288. /* write seg_cfg */
  1289. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
  1290. /* write seg_cfg */
  1291. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
  1292. /* Clear auth_ivn, auth_keyn registers */
  1293. for (i = 0; i < 16; i++) {
  1294. QCE_WRITE_REG(0, (pce_dev->iobase +
  1295. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
  1296. QCE_WRITE_REG(0, (pce_dev->iobase +
  1297. (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
  1298. }
  1299. /* write auth_bytecnt 0/1/2/3, start with 0 */
  1300. for (i = 0; i < 4; i++)
  1301. QCE_WRITE_REG(0, pce_dev->iobase +
  1302. CRYPTO_AUTH_BYTECNT0_REG +
  1303. i * sizeof(uint32_t));
  1304. if (sreq->authklen == AES128_KEY_SIZE)
  1305. auth_cfg = pce_dev->reg.auth_cfg_cmac_128;
  1306. else
  1307. auth_cfg = pce_dev->reg.auth_cfg_cmac_256;
  1308. }
  1309. if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
  1310. (sreq->alg == QCE_HASH_SHA256_HMAC) ||
  1311. (sreq->alg == QCE_HASH_AES_CMAC)) {
  1312. _byte_stream_to_net_words(mackey32, sreq->authkey,
  1313. sreq->authklen);
  1314. /* no more check for null key. use flag to check*/
  1315. if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY) ==
  1316. QCRYPTO_CTX_USE_HW_KEY) {
  1317. use_hw_key = true;
  1318. } else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
  1319. QCRYPTO_CTX_USE_PIPE_KEY) {
  1320. use_pipe_key = true;
  1321. } else {
  1322. /* setup key */
  1323. for (i = 0; i < authk_size_in_word; i++)
  1324. QCE_WRITE_REG(mackey32[i], (pce_dev->iobase +
  1325. (CRYPTO_AUTH_KEY0_REG +
  1326. i*sizeof(uint32_t))));
  1327. }
  1328. }
  1329. if (sreq->alg == QCE_HASH_AES_CMAC)
  1330. goto go_proc;
  1331. /* if not the last, the size has to be on the block boundary */
  1332. if (!sreq->last_blk && (sreq->size % SHA256_BLOCK_SIZE))
  1333. return -EIO;
  1334. switch (sreq->alg) {
  1335. case QCE_HASH_SHA1:
  1336. auth_cfg = pce_dev->reg.auth_cfg_sha1;
  1337. diglen = SHA1_DIGEST_SIZE;
  1338. sha1 = true;
  1339. break;
  1340. case QCE_HASH_SHA1_HMAC:
  1341. auth_cfg = pce_dev->reg.auth_cfg_hmac_sha1;
  1342. diglen = SHA1_DIGEST_SIZE;
  1343. sha1 = true;
  1344. break;
  1345. case QCE_HASH_SHA256:
  1346. auth_cfg = pce_dev->reg.auth_cfg_sha256;
  1347. diglen = SHA256_DIGEST_SIZE;
  1348. break;
  1349. case QCE_HASH_SHA256_HMAC:
  1350. auth_cfg = pce_dev->reg.auth_cfg_hmac_sha256;
  1351. diglen = SHA256_DIGEST_SIZE;
  1352. break;
  1353. default:
  1354. return -EINVAL;
  1355. }
  1356. /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
  1357. if (sreq->first_blk) {
  1358. if (sha1) {
  1359. for (i = 0; i < 5; i++)
  1360. auth32[i] = _std_init_vector_sha1[i];
  1361. } else {
  1362. for (i = 0; i < 8; i++)
  1363. auth32[i] = _std_init_vector_sha256[i];
  1364. }
  1365. } else {
  1366. _byte_stream_to_net_words(auth32, sreq->digest, diglen);
  1367. }
  1368. /* Set auth_ivn, auth_keyn registers */
  1369. for (i = 0; i < 5; i++)
  1370. QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
  1371. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
  1372. if ((sreq->alg == QCE_HASH_SHA256) ||
  1373. (sreq->alg == QCE_HASH_SHA256_HMAC)) {
  1374. for (i = 5; i < 8; i++)
  1375. QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
  1376. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
  1377. }
  1378. /* write auth_bytecnt 0/1/2/3, start with 0 */
  1379. for (i = 0; i < 2; i++)
  1380. QCE_WRITE_REG(sreq->auth_data[i], pce_dev->iobase +
  1381. CRYPTO_AUTH_BYTECNT0_REG +
  1382. i * sizeof(uint32_t));
  1383. /* Set/reset last bit in CFG register */
  1384. if (sreq->last_blk)
  1385. auth_cfg |= 1 << CRYPTO_LAST;
  1386. else
  1387. auth_cfg &= ~(1 << CRYPTO_LAST);
  1388. if (sreq->first_blk)
  1389. auth_cfg |= 1 << CRYPTO_FIRST;
  1390. else
  1391. auth_cfg &= ~(1 << CRYPTO_FIRST);
  1392. if (use_hw_key)
  1393. auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
  1394. if (use_pipe_key)
  1395. auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
  1396. go_proc:
  1397. /* write seg_cfg */
  1398. QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
  1399. /* write auth seg_size */
  1400. QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
  1401. /* write auth_seg_start */
  1402. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
  1403. /* reset encr seg_cfg */
  1404. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
  1405. /* write seg_size */
  1406. QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
  1407. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
  1408. CRYPTO_CONFIG_REG));
  1409. /* issue go to crypto */
  1410. if (!use_hw_key) {
  1411. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  1412. (1 << CRYPTO_CLR_CNTXT)),
  1413. pce_dev->iobase + CRYPTO_GOPROC_REG);
  1414. } else {
  1415. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
  1416. pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
  1417. }
  1418. /*
  1419. * Ensure previous instructions (setting the GO register)
  1420. * was completed before issuing a DMA transfer request
  1421. */
  1422. mb();
  1423. return 0;
  1424. }
  1425. static int _ce_setup_aead_direct(struct qce_device *pce_dev,
  1426. struct qce_req *q_req, uint32_t totallen_in, uint32_t coffset)
  1427. {
  1428. int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
  1429. int i;
  1430. uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
  1431. uint32_t a_cfg;
  1432. uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
  1433. uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
  1434. uint32_t enck_size_in_word = 0;
  1435. uint32_t enciv_in_word;
  1436. uint32_t key_size;
  1437. uint32_t ivsize = q_req->ivsize;
  1438. uint32_t encr_cfg;
  1439. /* clear status */
  1440. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
  1441. if (qce_crypto_config(pce_dev, q_req->offload_op))
  1442. return -EINVAL;
  1443. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
  1444. CRYPTO_CONFIG_REG));
  1445. /*
  1446. * Ensure previous instructions (setting the CONFIG register)
  1447. * was completed before issuing starting to set other config register
  1448. * This is to ensure the configurations are done in correct endian-ness
  1449. * as set in the CONFIG registers
  1450. */
  1451. mb();
  1452. key_size = q_req->encklen;
  1453. enck_size_in_word = key_size/sizeof(uint32_t);
  1454. switch (q_req->alg) {
  1455. case CIPHER_ALG_DES:
  1456. switch (q_req->mode) {
  1457. case QCE_MODE_CBC:
  1458. encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
  1459. break;
  1460. default:
  1461. return -EINVAL;
  1462. }
  1463. enciv_in_word = 2;
  1464. break;
  1465. case CIPHER_ALG_3DES:
  1466. switch (q_req->mode) {
  1467. case QCE_MODE_CBC:
  1468. encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
  1469. break;
  1470. default:
  1471. return -EINVAL;
  1472. }
  1473. enciv_in_word = 2;
  1474. break;
  1475. case CIPHER_ALG_AES:
  1476. switch (q_req->mode) {
  1477. case QCE_MODE_CBC:
  1478. if (key_size == AES128_KEY_SIZE)
  1479. encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
  1480. else if (key_size == AES256_KEY_SIZE)
  1481. encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
  1482. else
  1483. return -EINVAL;
  1484. break;
  1485. default:
  1486. return -EINVAL;
  1487. }
  1488. enciv_in_word = 4;
  1489. break;
  1490. default:
  1491. return -EINVAL;
  1492. }
  1493. /* write CNTR0_IV0_REG */
  1494. if (q_req->mode != QCE_MODE_ECB) {
  1495. _byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
  1496. for (i = 0; i < enciv_in_word; i++)
  1497. QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
  1498. (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
  1499. }
  1500. /*
  1501. * write encr key
  1502. * do not use hw key or pipe key
  1503. */
  1504. _byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
  1505. for (i = 0; i < enck_size_in_word; i++)
  1506. QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
  1507. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)));
  1508. /* write encr seg cfg */
  1509. if (q_req->dir == QCE_ENCRYPT)
  1510. encr_cfg |= (1 << CRYPTO_ENCODE);
  1511. QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
  1512. /* we only support sha1-hmac and sha256-hmac at this point */
  1513. _byte_stream_to_net_words(mackey32, q_req->authkey,
  1514. q_req->authklen);
  1515. for (i = 0; i < authk_size_in_word; i++)
  1516. QCE_WRITE_REG(mackey32[i], pce_dev->iobase +
  1517. (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)));
  1518. if (q_req->auth_alg == QCE_HASH_SHA1_HMAC) {
  1519. for (i = 0; i < 5; i++)
  1520. QCE_WRITE_REG(_std_init_vector_sha1[i],
  1521. pce_dev->iobase +
  1522. (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
  1523. } else {
  1524. for (i = 0; i < 8; i++)
  1525. QCE_WRITE_REG(_std_init_vector_sha256[i],
  1526. pce_dev->iobase +
  1527. (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
  1528. }
  1529. /* write auth_bytecnt 0/1, start with 0 */
  1530. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
  1531. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG);
  1532. /* write encr seg size */
  1533. QCE_WRITE_REG(q_req->cryptlen, pce_dev->iobase +
  1534. CRYPTO_ENCR_SEG_SIZE_REG);
  1535. /* write encr start */
  1536. QCE_WRITE_REG(coffset & 0xffff, pce_dev->iobase +
  1537. CRYPTO_ENCR_SEG_START_REG);
  1538. if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
  1539. a_cfg = pce_dev->reg.auth_cfg_aead_sha1_hmac;
  1540. else
  1541. a_cfg = pce_dev->reg.auth_cfg_aead_sha256_hmac;
  1542. if (q_req->dir == QCE_ENCRYPT)
  1543. a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
  1544. else
  1545. a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  1546. /* write auth seg_cfg */
  1547. QCE_WRITE_REG(a_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
  1548. /* write auth seg_size */
  1549. QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
  1550. /* write auth_seg_start */
  1551. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
  1552. /* write seg_size */
  1553. QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
  1554. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
  1555. CRYPTO_CONFIG_REG));
  1556. /* issue go to crypto */
  1557. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  1558. (1 << CRYPTO_CLR_CNTXT)),
  1559. pce_dev->iobase + CRYPTO_GOPROC_REG);
  1560. /*
  1561. * Ensure previous instructions (setting the GO register)
  1562. * was completed before issuing a DMA transfer request
  1563. */
  1564. mb();
  1565. return 0;
  1566. }
  1567. static int _ce_setup_cipher_direct(struct qce_device *pce_dev,
  1568. struct qce_req *creq, uint32_t totallen_in, uint32_t coffset)
  1569. {
  1570. uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
  1571. 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
  1572. uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
  1573. 0, 0, 0, 0};
  1574. uint32_t enck_size_in_word = 0;
  1575. uint32_t key_size;
  1576. bool use_hw_key = false;
  1577. bool use_pipe_key = false;
  1578. uint32_t encr_cfg = 0;
  1579. uint32_t ivsize = creq->ivsize;
  1580. int i;
  1581. /* clear status */
  1582. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
  1583. if (qce_crypto_config(pce_dev, creq->offload_op))
  1584. return -EINVAL;
  1585. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be,
  1586. (pce_dev->iobase + CRYPTO_CONFIG_REG));
  1587. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le,
  1588. (pce_dev->iobase + CRYPTO_CONFIG_REG));
  1589. /*
  1590. * Ensure previous instructions (setting the CONFIG register)
  1591. * was completed before issuing starting to set other config register
  1592. * This is to ensure the configurations are done in correct endian-ness
  1593. * as set in the CONFIG registers
  1594. */
  1595. mb();
  1596. if (creq->mode == QCE_MODE_XTS)
  1597. key_size = creq->encklen/2;
  1598. else
  1599. key_size = creq->encklen;
  1600. if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
  1601. use_hw_key = true;
  1602. } else {
  1603. if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
  1604. QCRYPTO_CTX_USE_PIPE_KEY)
  1605. use_pipe_key = true;
  1606. }
  1607. if (!use_pipe_key && !use_hw_key) {
  1608. _byte_stream_to_net_words(enckey32, creq->enckey, key_size);
  1609. enck_size_in_word = key_size/sizeof(uint32_t);
  1610. }
  1611. if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
  1612. uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
  1613. uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
  1614. uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
  1615. uint32_t auth_cfg = 0;
  1616. /* Clear auth_ivn, auth_keyn registers */
  1617. for (i = 0; i < 16; i++) {
  1618. QCE_WRITE_REG(0, (pce_dev->iobase +
  1619. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
  1620. QCE_WRITE_REG(0, (pce_dev->iobase +
  1621. (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
  1622. }
  1623. /* write auth_bytecnt 0/1/2/3, start with 0 */
  1624. for (i = 0; i < 4; i++)
  1625. QCE_WRITE_REG(0, pce_dev->iobase +
  1626. CRYPTO_AUTH_BYTECNT0_REG +
  1627. i * sizeof(uint32_t));
  1628. /* write nonce */
  1629. _byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
  1630. for (i = 0; i < noncelen32; i++)
  1631. QCE_WRITE_REG(nonce32[i], pce_dev->iobase +
  1632. CRYPTO_AUTH_INFO_NONCE0_REG +
  1633. (i*sizeof(uint32_t)));
  1634. if (creq->authklen == AES128_KEY_SIZE)
  1635. auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
  1636. else {
  1637. if (creq->authklen == AES256_KEY_SIZE)
  1638. auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
  1639. }
  1640. if (creq->dir == QCE_ENCRYPT)
  1641. auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  1642. else
  1643. auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
  1644. auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
  1645. if (use_hw_key) {
  1646. auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
  1647. } else {
  1648. auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
  1649. /* write auth key */
  1650. for (i = 0; i < authklen32; i++)
  1651. QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
  1652. CRYPTO_AUTH_KEY0_REG + (i*sizeof(uint32_t)));
  1653. }
  1654. QCE_WRITE_REG(auth_cfg, pce_dev->iobase +
  1655. CRYPTO_AUTH_SEG_CFG_REG);
  1656. if (creq->dir == QCE_ENCRYPT) {
  1657. QCE_WRITE_REG(totallen_in, pce_dev->iobase +
  1658. CRYPTO_AUTH_SEG_SIZE_REG);
  1659. } else {
  1660. QCE_WRITE_REG((totallen_in - creq->authsize),
  1661. pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
  1662. }
  1663. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
  1664. } else {
  1665. if (creq->op != QCE_REQ_AEAD)
  1666. QCE_WRITE_REG(0, pce_dev->iobase +
  1667. CRYPTO_AUTH_SEG_CFG_REG);
  1668. }
  1669. /*
  1670. * Ensure previous instructions (write to all AUTH registers)
  1671. * was completed before accessing a register that is not in
  1672. * in the same 1K range.
  1673. */
  1674. mb();
  1675. switch (creq->mode) {
  1676. case QCE_MODE_ECB:
  1677. if (key_size == AES128_KEY_SIZE)
  1678. encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
  1679. else
  1680. encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
  1681. break;
  1682. case QCE_MODE_CBC:
  1683. if (key_size == AES128_KEY_SIZE)
  1684. encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
  1685. else
  1686. encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
  1687. break;
  1688. case QCE_MODE_XTS:
  1689. if (key_size == AES128_KEY_SIZE)
  1690. encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
  1691. else
  1692. encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
  1693. break;
  1694. case QCE_MODE_CCM:
  1695. if (key_size == AES128_KEY_SIZE)
  1696. encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
  1697. else
  1698. encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
  1699. break;
  1700. case QCE_MODE_CTR:
  1701. default:
  1702. if (key_size == AES128_KEY_SIZE)
  1703. encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
  1704. else
  1705. encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
  1706. break;
  1707. }
  1708. switch (creq->alg) {
  1709. case CIPHER_ALG_DES:
  1710. if (creq->mode != QCE_MODE_ECB) {
  1711. encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
  1712. _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
  1713. QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
  1714. CRYPTO_CNTR0_IV0_REG);
  1715. QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
  1716. CRYPTO_CNTR1_IV1_REG);
  1717. } else {
  1718. encr_cfg = pce_dev->reg.encr_cfg_des_ecb;
  1719. }
  1720. if (!use_hw_key) {
  1721. QCE_WRITE_REG(enckey32[0], pce_dev->iobase +
  1722. CRYPTO_ENCR_KEY0_REG);
  1723. QCE_WRITE_REG(enckey32[1], pce_dev->iobase +
  1724. CRYPTO_ENCR_KEY1_REG);
  1725. }
  1726. break;
  1727. case CIPHER_ALG_3DES:
  1728. if (creq->mode != QCE_MODE_ECB) {
  1729. _byte_stream_to_net_words(enciv32, creq->iv, ivsize);
  1730. QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
  1731. CRYPTO_CNTR0_IV0_REG);
  1732. QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
  1733. CRYPTO_CNTR1_IV1_REG);
  1734. encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
  1735. } else {
  1736. encr_cfg = pce_dev->reg.encr_cfg_3des_ecb;
  1737. }
  1738. if (!use_hw_key) {
  1739. /* write encr key */
  1740. for (i = 0; i < 6; i++)
  1741. QCE_WRITE_REG(enckey32[0], (pce_dev->iobase +
  1742. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t))));
  1743. }
  1744. break;
  1745. case CIPHER_ALG_AES:
  1746. default:
  1747. if (creq->mode == QCE_MODE_XTS) {
  1748. uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
  1749. = {0, 0, 0, 0, 0, 0, 0, 0};
  1750. uint32_t xtsklen =
  1751. creq->encklen/(2 * sizeof(uint32_t));
  1752. if (!use_hw_key && !use_pipe_key) {
  1753. _byte_stream_to_net_words(xtskey32,
  1754. (creq->enckey + creq->encklen/2),
  1755. creq->encklen/2);
  1756. /* write xts encr key */
  1757. for (i = 0; i < xtsklen; i++)
  1758. QCE_WRITE_REG(xtskey32[i],
  1759. pce_dev->iobase +
  1760. CRYPTO_ENCR_XTS_KEY0_REG +
  1761. (i * sizeof(uint32_t)));
  1762. }
  1763. /* write xts du size */
  1764. switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
  1765. case QCRYPTO_CTX_XTS_DU_SIZE_512B:
  1766. QCE_WRITE_REG(
  1767. min((uint32_t)QCE_SECTOR_SIZE,
  1768. creq->cryptlen), pce_dev->iobase +
  1769. CRYPTO_ENCR_XTS_DU_SIZE_REG);
  1770. break;
  1771. case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
  1772. QCE_WRITE_REG(
  1773. min((uint32_t)(QCE_SECTOR_SIZE * 2),
  1774. creq->cryptlen), pce_dev->iobase +
  1775. CRYPTO_ENCR_XTS_DU_SIZE_REG);
  1776. break;
  1777. default:
  1778. QCE_WRITE_REG(creq->cryptlen,
  1779. pce_dev->iobase +
  1780. CRYPTO_ENCR_XTS_DU_SIZE_REG);
  1781. break;
  1782. }
  1783. }
  1784. if (creq->mode != QCE_MODE_ECB) {
  1785. if (creq->mode == QCE_MODE_XTS)
  1786. _byte_stream_swap_to_net_words(enciv32,
  1787. creq->iv, ivsize);
  1788. else
  1789. _byte_stream_to_net_words(enciv32, creq->iv,
  1790. ivsize);
  1791. /* write encr cntr iv */
  1792. for (i = 0; i <= 3; i++)
  1793. QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
  1794. CRYPTO_CNTR0_IV0_REG +
  1795. (i * sizeof(uint32_t)));
  1796. if (creq->mode == QCE_MODE_CCM) {
  1797. /* write cntr iv for ccm */
  1798. for (i = 0; i <= 3; i++)
  1799. QCE_WRITE_REG(enciv32[i],
  1800. pce_dev->iobase +
  1801. CRYPTO_ENCR_CCM_INT_CNTR0_REG +
  1802. (i * sizeof(uint32_t)));
  1803. /* update cntr_iv[3] by one */
  1804. QCE_WRITE_REG((enciv32[3] + 1),
  1805. pce_dev->iobase +
  1806. CRYPTO_CNTR0_IV0_REG +
  1807. (3 * sizeof(uint32_t)));
  1808. }
  1809. }
  1810. if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
  1811. encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
  1812. CRYPTO_ENCR_KEY_SZ);
  1813. } else {
  1814. if (!use_hw_key && !use_pipe_key) {
  1815. for (i = 0; i < enck_size_in_word; i++)
  1816. QCE_WRITE_REG(enckey32[i],
  1817. pce_dev->iobase +
  1818. CRYPTO_ENCR_KEY0_REG +
  1819. (i * sizeof(uint32_t)));
  1820. }
  1821. } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
  1822. break;
  1823. } /* end of switch (creq->mode) */
  1824. if (use_pipe_key)
  1825. encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
  1826. << CRYPTO_USE_PIPE_KEY_ENCR);
  1827. /* write encr seg cfg */
  1828. encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
  1829. if (use_hw_key)
  1830. encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
  1831. else
  1832. encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
  1833. /* write encr seg cfg */
  1834. QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
  1835. /* write encr seg size */
  1836. if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT)) {
  1837. QCE_WRITE_REG((creq->cryptlen + creq->authsize),
  1838. pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
  1839. } else {
  1840. QCE_WRITE_REG(creq->cryptlen,
  1841. pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
  1842. }
  1843. /* write pattern */
  1844. if (creq->is_pattern_valid)
  1845. QCE_WRITE_REG(creq->pattern_info, pce_dev->iobase +
  1846. CRYPTO_DATA_PATT_PROC_CFG_REG);
  1847. /* write block offset to CRYPTO_DATA_PARTIAL_BLOCK_PROC_CFG? */
  1848. QCE_WRITE_REG(((creq->block_offset << 4) |
  1849. (creq->block_offset ? 1 : 0)),
  1850. pce_dev->iobase + CRYPTO_DATA_PARTIAL_BLOCK_PROC_CFG_REG);
  1851. /* write encr seg start */
  1852. QCE_WRITE_REG((coffset & 0xffff),
  1853. pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG);
  1854. /* write encr counter mask */
  1855. qce_set_iv_ctr_mask(pce_dev, creq);
  1856. QCE_WRITE_REG(pce_dev->reg.encr_cntr_mask_3,
  1857. pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
  1858. QCE_WRITE_REG(pce_dev->reg.encr_cntr_mask_2,
  1859. pce_dev->iobase + CRYPTO_CNTR_MASK_REG2);
  1860. QCE_WRITE_REG(pce_dev->reg.encr_cntr_mask_1,
  1861. pce_dev->iobase + CRYPTO_CNTR_MASK_REG1);
  1862. QCE_WRITE_REG(pce_dev->reg.encr_cntr_mask_0,
  1863. pce_dev->iobase + CRYPTO_CNTR_MASK_REG0);
  1864. /* write seg size */
  1865. QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
  1866. /* issue go to crypto */
  1867. if (!use_hw_key) {
  1868. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  1869. (1 << CRYPTO_CLR_CNTXT)),
  1870. pce_dev->iobase + CRYPTO_GOPROC_REG);
  1871. } else {
  1872. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
  1873. pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
  1874. }
  1875. /*
  1876. * Ensure previous instructions (setting the GO register)
  1877. * was completed before issuing a DMA transfer request
  1878. */
  1879. mb();
  1880. return 0;
  1881. }
  1882. static int _ce_f9_setup_direct(struct qce_device *pce_dev,
  1883. struct qce_f9_req *req)
  1884. {
  1885. uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
  1886. uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
  1887. uint32_t auth_cfg;
  1888. int i;
  1889. switch (req->algorithm) {
  1890. case QCE_OTA_ALGO_KASUMI:
  1891. auth_cfg = pce_dev->reg.auth_cfg_kasumi;
  1892. break;
  1893. case QCE_OTA_ALGO_SNOW3G:
  1894. default:
  1895. auth_cfg = pce_dev->reg.auth_cfg_snow3g;
  1896. break;
  1897. }
  1898. if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE))
  1899. return -EINVAL;
  1900. /* clear status */
  1901. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
  1902. /* set big endian configuration */
  1903. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
  1904. CRYPTO_CONFIG_REG));
  1905. /*
  1906. * Ensure previous instructions (setting the CONFIG register)
  1907. * was completed before issuing starting to set other config register
  1908. * This is to ensure the configurations are done in correct endian-ness
  1909. * as set in the CONFIG registers
  1910. */
  1911. mb();
  1912. /* write enc_seg_cfg */
  1913. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
  1914. /* write ecn_seg_size */
  1915. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
  1916. /* write key in CRYPTO_AUTH_IV0-3_REG */
  1917. _byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
  1918. for (i = 0; i < key_size_in_word; i++)
  1919. QCE_WRITE_REG(ikey32[i], (pce_dev->iobase +
  1920. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
  1921. /* write last bits in CRYPTO_AUTH_IV4_REG */
  1922. QCE_WRITE_REG(req->last_bits, (pce_dev->iobase +
  1923. CRYPTO_AUTH_IV4_REG));
  1924. /* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
  1925. QCE_WRITE_REG(req->fresh, (pce_dev->iobase +
  1926. CRYPTO_AUTH_BYTECNT0_REG));
  1927. /* write count-i to CRYPTO_AUTH_BYTECNT1_REG */
  1928. QCE_WRITE_REG(req->count_i, (pce_dev->iobase +
  1929. CRYPTO_AUTH_BYTECNT1_REG));
  1930. /* write auth seg cfg */
  1931. if (req->direction == QCE_OTA_DIR_DOWNLINK)
  1932. auth_cfg |= BIT(CRYPTO_F9_DIRECTION);
  1933. QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
  1934. /* write auth seg size */
  1935. QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
  1936. /* write auth seg start*/
  1937. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
  1938. /* write seg size */
  1939. QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
  1940. /* set little endian configuration before go*/
  1941. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
  1942. CRYPTO_CONFIG_REG));
  1943. /* write go */
  1944. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  1945. (1 << CRYPTO_CLR_CNTXT)),
  1946. pce_dev->iobase + CRYPTO_GOPROC_REG);
  1947. /*
  1948. * Ensure previous instructions (setting the GO register)
  1949. * was completed before issuing a DMA transfer request
  1950. */
  1951. mb();
  1952. return 0;
  1953. }
  1954. static int _ce_f8_setup_direct(struct qce_device *pce_dev,
  1955. struct qce_f8_req *req, bool key_stream_mode,
  1956. uint16_t npkts, uint16_t cipher_offset, uint16_t cipher_size)
  1957. {
  1958. int i = 0;
  1959. uint32_t encr_cfg = 0;
  1960. uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
  1961. uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
  1962. switch (req->algorithm) {
  1963. case QCE_OTA_ALGO_KASUMI:
  1964. encr_cfg = pce_dev->reg.encr_cfg_kasumi;
  1965. break;
  1966. case QCE_OTA_ALGO_SNOW3G:
  1967. default:
  1968. encr_cfg = pce_dev->reg.encr_cfg_snow3g;
  1969. break;
  1970. }
  1971. /* clear status */
  1972. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
  1973. /* set big endian configuration */
  1974. if (qce_crypto_config(pce_dev, QCE_OFFLOAD_NONE))
  1975. return -EINVAL;
  1976. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
  1977. CRYPTO_CONFIG_REG));
  1978. /* write auth seg configuration */
  1979. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
  1980. /* write auth seg size */
  1981. QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
  1982. /* write key */
  1983. _byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
  1984. for (i = 0; i < key_size_in_word; i++)
  1985. QCE_WRITE_REG(ckey32[i], (pce_dev->iobase +
  1986. (CRYPTO_ENCR_KEY0_REG + i*sizeof(uint32_t))));
  1987. /* write encr seg cfg */
  1988. if (key_stream_mode)
  1989. encr_cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
  1990. if (req->direction == QCE_OTA_DIR_DOWNLINK)
  1991. encr_cfg |= BIT(CRYPTO_F8_DIRECTION);
  1992. QCE_WRITE_REG(encr_cfg, pce_dev->iobase +
  1993. CRYPTO_ENCR_SEG_CFG_REG);
  1994. /* write encr seg start */
  1995. QCE_WRITE_REG((cipher_offset & 0xffff), pce_dev->iobase +
  1996. CRYPTO_ENCR_SEG_START_REG);
  1997. /* write encr seg size */
  1998. QCE_WRITE_REG(cipher_size, pce_dev->iobase +
  1999. CRYPTO_ENCR_SEG_SIZE_REG);
  2000. /* write seg size */
  2001. QCE_WRITE_REG(req->data_len, pce_dev->iobase +
  2002. CRYPTO_SEG_SIZE_REG);
  2003. /* write cntr0_iv0 for countC */
  2004. QCE_WRITE_REG(req->count_c, pce_dev->iobase +
  2005. CRYPTO_CNTR0_IV0_REG);
  2006. /* write cntr1_iv1 for nPkts, and bearer */
  2007. if (npkts == 1)
  2008. npkts = 0;
  2009. QCE_WRITE_REG(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
  2010. npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT,
  2011. pce_dev->iobase + CRYPTO_CNTR1_IV1_REG);
  2012. /* set little endian configuration before go*/
  2013. QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
  2014. CRYPTO_CONFIG_REG));
  2015. /* write go */
  2016. QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  2017. (1 << CRYPTO_CLR_CNTXT)),
  2018. pce_dev->iobase + CRYPTO_GOPROC_REG);
  2019. /*
  2020. * Ensure previous instructions (setting the GO register)
  2021. * was completed before issuing a DMA transfer request
  2022. */
  2023. mb();
  2024. return 0;
  2025. }
  2026. static int _qce_unlock_other_pipes(struct qce_device *pce_dev, int req_info)
  2027. {
  2028. int rc = 0;
  2029. struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info
  2030. [req_info].ce_sps;
  2031. uint16_t op = pce_dev->ce_request_info[req_info].offload_op;
  2032. if (pce_dev->no_get_around || !pce_dev->support_cmd_dscr)
  2033. return rc;
  2034. rc = sps_transfer_one(pce_dev->ce_bam_info.consumer[op].pipe,
  2035. GET_PHYS_ADDR(
  2036. pce_sps_data->cmdlistptr.unlock_all_pipes.cmdlist),
  2037. 0, NULL, (SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_UNLOCK));
  2038. if (rc) {
  2039. pr_err("sps_xfr_one() fail rc=%d\n", rc);
  2040. rc = -EINVAL;
  2041. }
  2042. return rc;
  2043. }
  2044. static int qce_sps_set_irqs(struct qce_device *pce_dev, bool enable)
  2045. {
  2046. if (enable)
  2047. return sps_bam_enable_irqs(pce_dev->ce_bam_info.bam_handle);
  2048. else
  2049. return sps_bam_disable_irqs(pce_dev->ce_bam_info.bam_handle);
  2050. }
  2051. int qce_set_irqs(void *handle, bool enable)
  2052. {
  2053. return qce_sps_set_irqs(handle, enable);
  2054. }
  2055. EXPORT_SYMBOL(qce_set_irqs);
  2056. static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
  2057. bool is_complete);
  2058. static int qce_sps_pipe_reset(struct qce_device *pce_dev, int op)
  2059. {
  2060. int rc = -1;
  2061. struct sps_pipe *sps_pipe_info = NULL;
  2062. struct sps_connect *sps_connect_info = NULL;
  2063. /* Reset both the pipe sets in the pipe group */
  2064. sps_pipe_reset(pce_dev->ce_bam_info.bam_handle,
  2065. pce_dev->ce_bam_info.dest_pipe_index[op]);
  2066. sps_pipe_reset(pce_dev->ce_bam_info.bam_handle,
  2067. pce_dev->ce_bam_info.src_pipe_index[op]);
  2068. /* Reconnect to consumer pipe */
  2069. sps_pipe_info = pce_dev->ce_bam_info.consumer[op].pipe;
  2070. sps_connect_info = &pce_dev->ce_bam_info.consumer[op].connect;
  2071. rc = sps_disconnect(sps_pipe_info);
  2072. if (rc) {
  2073. pr_err("sps_disconnect() fail pipe=0x%lx, rc = %d\n",
  2074. (uintptr_t)sps_pipe_info, rc);
  2075. goto exit;
  2076. }
  2077. memset(sps_connect_info->desc.base, 0x00,
  2078. sps_connect_info->desc.size);
  2079. rc = sps_connect(sps_pipe_info, sps_connect_info);
  2080. if (rc) {
  2081. pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n",
  2082. (uintptr_t)sps_pipe_info, rc);
  2083. goto exit;
  2084. }
  2085. /* Reconnect to producer pipe */
  2086. sps_pipe_info = pce_dev->ce_bam_info.producer[op].pipe;
  2087. sps_connect_info = &pce_dev->ce_bam_info.producer[op].connect;
  2088. rc = sps_disconnect(sps_pipe_info);
  2089. if (rc) {
  2090. pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n",
  2091. (uintptr_t)sps_pipe_info, rc);
  2092. goto exit;
  2093. }
  2094. memset(sps_connect_info->desc.base, 0x00,
  2095. sps_connect_info->desc.size);
  2096. rc = sps_connect(sps_pipe_info, sps_connect_info);
  2097. if (rc) {
  2098. pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n",
  2099. (uintptr_t)sps_pipe_info, rc);
  2100. goto exit;
  2101. }
  2102. /* Register producer callback */
  2103. rc = sps_register_event(sps_pipe_info,
  2104. &pce_dev->ce_bam_info.producer[op].event);
  2105. if (rc)
  2106. pr_err("Producer cb registration failed rc = %d\n",
  2107. rc);
  2108. exit:
  2109. return rc;
  2110. }
  2111. #define MAX_RESET_TIME_RETRIES 1000
  2112. int qce_manage_timeout(void *handle, int req_info)
  2113. {
  2114. struct qce_device *pce_dev = (struct qce_device *) handle;
  2115. struct skcipher_request *areq;
  2116. struct ce_request_info *preq_info;
  2117. qce_comp_func_ptr_t qce_callback;
  2118. uint16_t op = pce_dev->ce_request_info[req_info].offload_op;
  2119. struct qce_error error = {0};
  2120. int retries = 0;
  2121. preq_info = &pce_dev->ce_request_info[req_info];
  2122. qce_callback = preq_info->qce_cb;
  2123. areq = (struct skcipher_request *) preq_info->areq;
  2124. pr_info("%s: req info = %d, offload op = %d\n", __func__, req_info, op);
  2125. if (qce_sps_pipe_reset(pce_dev, op))
  2126. pr_err("%s: pipe reset failed\n", __func__);
  2127. qce_get_crypto_status(pce_dev, &error);
  2128. while (!error.no_error && retries < MAX_RESET_TIME_RETRIES) {
  2129. usleep_range(3000, 5000);
  2130. retries++;
  2131. qce_get_crypto_status(pce_dev, &error);
  2132. pr_info("%s: waiting for reset to complete\n", __func__);
  2133. }
  2134. // Write memory barrier
  2135. wmb();
  2136. if (_qce_unlock_other_pipes(pce_dev, req_info))
  2137. pr_err("%s: fail unlock other pipes\n", __func__);
  2138. qce_enable_clock_gating(pce_dev);
  2139. if (!atomic_read(&preq_info->in_use)) {
  2140. pr_err("request information %d already done\n", req_info);
  2141. return -ENXIO;
  2142. }
  2143. qce_free_req_info(pce_dev, req_info, true);
  2144. return 0;
  2145. }
  2146. EXPORT_SYMBOL(qce_manage_timeout);
  2147. static int _aead_complete(struct qce_device *pce_dev, int req_info)
  2148. {
  2149. struct aead_request *areq;
  2150. unsigned char mac[SHA256_DIGEST_SIZE];
  2151. uint32_t ccm_fail_status = 0;
  2152. uint32_t result_dump_status = 0;
  2153. int32_t result_status = 0;
  2154. struct ce_request_info *preq_info;
  2155. struct ce_sps_data *pce_sps_data;
  2156. qce_comp_func_ptr_t qce_callback;
  2157. preq_info = &pce_dev->ce_request_info[req_info];
  2158. pce_sps_data = &preq_info->ce_sps;
  2159. qce_callback = preq_info->qce_cb;
  2160. areq = (struct aead_request *) preq_info->areq;
  2161. if (areq->src != areq->dst) {
  2162. qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
  2163. DMA_FROM_DEVICE);
  2164. }
  2165. qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
  2166. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  2167. DMA_TO_DEVICE);
  2168. if (preq_info->asg)
  2169. qce_dma_unmap_sg(pce_dev->pdev, preq_info->asg,
  2170. preq_info->assoc_nents, DMA_TO_DEVICE);
  2171. /* check MAC */
  2172. memcpy(mac, (char *)(&pce_sps_data->result->auth_iv[0]),
  2173. SHA256_DIGEST_SIZE);
  2174. /* read status before unlock */
  2175. if (preq_info->dir == QCE_DECRYPT) {
  2176. if (pce_dev->no_get_around)
  2177. if (pce_dev->no_ccm_mac_status_get_around)
  2178. ccm_fail_status =
  2179. be32_to_cpu(pce_sps_data->result->status);
  2180. else
  2181. ccm_fail_status =
  2182. be32_to_cpu(pce_sps_data->result_null->status);
  2183. else
  2184. ccm_fail_status = readl_relaxed(pce_dev->iobase +
  2185. CRYPTO_STATUS_REG);
  2186. }
  2187. if (_qce_unlock_other_pipes(pce_dev, req_info)) {
  2188. qce_free_req_info(pce_dev, req_info, true);
  2189. qce_callback(areq, mac, NULL, -ENXIO);
  2190. return -ENXIO;
  2191. }
  2192. result_dump_status = be32_to_cpu(pce_sps_data->result->status);
  2193. pce_sps_data->result->status = 0;
  2194. if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
  2195. | (1 << CRYPTO_HSD_ERR))) {
  2196. pr_err("aead operation error. Status %x\n", result_dump_status);
  2197. result_status = -ENXIO;
  2198. } else if (pce_sps_data->consumer_status |
  2199. pce_sps_data->producer_status) {
  2200. pr_err("aead sps operation error. sps status %x %x\n",
  2201. pce_sps_data->consumer_status,
  2202. pce_sps_data->producer_status);
  2203. result_status = -ENXIO;
  2204. }
  2205. if (!atomic_read(&preq_info->in_use)) {
  2206. pr_err("request information %d already done\n", req_info);
  2207. return -ENXIO;
  2208. }
  2209. if (preq_info->mode == QCE_MODE_CCM) {
  2210. /*
  2211. * Not from result dump, instead, use the status we just
  2212. * read of device for MAC_FAILED.
  2213. */
  2214. if (result_status == 0 && (preq_info->dir == QCE_DECRYPT) &&
  2215. (ccm_fail_status & (1 << CRYPTO_MAC_FAILED)))
  2216. result_status = -EBADMSG;
  2217. qce_free_req_info(pce_dev, req_info, true);
  2218. qce_callback(areq, mac, NULL, result_status);
  2219. } else {
  2220. uint32_t ivsize = 0;
  2221. struct crypto_aead *aead;
  2222. unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
  2223. aead = crypto_aead_reqtfm(areq);
  2224. ivsize = crypto_aead_ivsize(aead);
  2225. memcpy(iv, (char *)(pce_sps_data->result->encr_cntr_iv),
  2226. sizeof(iv));
  2227. qce_free_req_info(pce_dev, req_info, true);
  2228. qce_callback(areq, mac, iv, result_status);
  2229. }
  2230. return 0;
  2231. }
  2232. static int _sha_complete(struct qce_device *pce_dev, int req_info)
  2233. {
  2234. struct ahash_request *areq;
  2235. unsigned char digest[SHA256_DIGEST_SIZE];
  2236. uint32_t bytecount32[2];
  2237. int32_t result_status = 0;
  2238. uint32_t result_dump_status;
  2239. struct ce_request_info *preq_info;
  2240. struct ce_sps_data *pce_sps_data;
  2241. qce_comp_func_ptr_t qce_callback;
  2242. preq_info = &pce_dev->ce_request_info[req_info];
  2243. pce_sps_data = &preq_info->ce_sps;
  2244. qce_callback = preq_info->qce_cb;
  2245. areq = (struct ahash_request *) preq_info->areq;
  2246. if (!areq) {
  2247. pr_err("sha operation error. areq is NULL\n");
  2248. return -ENXIO;
  2249. }
  2250. qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
  2251. DMA_TO_DEVICE);
  2252. memcpy(digest, (char *)(&pce_sps_data->result->auth_iv[0]),
  2253. SHA256_DIGEST_SIZE);
  2254. _byte_stream_to_net_words(bytecount32,
  2255. (unsigned char *)pce_sps_data->result->auth_byte_count,
  2256. 2 * CRYPTO_REG_SIZE);
  2257. if (_qce_unlock_other_pipes(pce_dev, req_info)) {
  2258. qce_free_req_info(pce_dev, req_info, true);
  2259. qce_callback(areq, digest, (char *)bytecount32,
  2260. -ENXIO);
  2261. return -ENXIO;
  2262. }
  2263. result_dump_status = be32_to_cpu(pce_sps_data->result->status);
  2264. pce_sps_data->result->status = 0;
  2265. if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
  2266. | (1 << CRYPTO_HSD_ERR))) {
  2267. pr_err("sha operation error. Status %x\n", result_dump_status);
  2268. result_status = -ENXIO;
  2269. } else if (pce_sps_data->consumer_status) {
  2270. pr_err("sha sps operation error. sps status %x\n",
  2271. pce_sps_data->consumer_status);
  2272. result_status = -ENXIO;
  2273. }
  2274. if (!atomic_read(&preq_info->in_use)) {
  2275. pr_err("request information %d already done\n", req_info);
  2276. return -ENXIO;
  2277. }
  2278. qce_free_req_info(pce_dev, req_info, true);
  2279. qce_callback(areq, digest, (char *)bytecount32, result_status);
  2280. return 0;
  2281. }
  2282. static int _f9_complete(struct qce_device *pce_dev, int req_info)
  2283. {
  2284. uint32_t mac_i;
  2285. int32_t result_status = 0;
  2286. uint32_t result_dump_status;
  2287. struct ce_request_info *preq_info;
  2288. struct ce_sps_data *pce_sps_data;
  2289. qce_comp_func_ptr_t qce_callback;
  2290. void *areq;
  2291. preq_info = &pce_dev->ce_request_info[req_info];
  2292. pce_sps_data = &preq_info->ce_sps;
  2293. qce_callback = preq_info->qce_cb;
  2294. areq = preq_info->areq;
  2295. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
  2296. preq_info->ota_size, DMA_TO_DEVICE);
  2297. _byte_stream_to_net_words(&mac_i,
  2298. (char *)(&pce_sps_data->result->auth_iv[0]),
  2299. CRYPTO_REG_SIZE);
  2300. if (_qce_unlock_other_pipes(pce_dev, req_info)) {
  2301. qce_free_req_info(pce_dev, req_info, true);
  2302. qce_callback(areq, NULL, NULL, -ENXIO);
  2303. return -ENXIO;
  2304. }
  2305. result_dump_status = be32_to_cpu(pce_sps_data->result->status);
  2306. pce_sps_data->result->status = 0;
  2307. if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
  2308. | (1 << CRYPTO_HSD_ERR))) {
  2309. pr_err("f9 operation error. Status %x\n", result_dump_status);
  2310. result_status = -ENXIO;
  2311. } else if (pce_sps_data->consumer_status |
  2312. pce_sps_data->producer_status) {
  2313. pr_err("f9 sps operation error. sps status %x %x\n",
  2314. pce_sps_data->consumer_status,
  2315. pce_sps_data->producer_status);
  2316. result_status = -ENXIO;
  2317. }
  2318. qce_free_req_info(pce_dev, req_info, true);
  2319. qce_callback(areq, (char *)&mac_i, NULL, result_status);
  2320. return 0;
  2321. }
  2322. static int _ablk_cipher_complete(struct qce_device *pce_dev, int req_info)
  2323. {
  2324. struct skcipher_request *areq;
  2325. unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
  2326. int32_t result_status = 0;
  2327. uint32_t result_dump_status;
  2328. struct ce_request_info *preq_info;
  2329. struct ce_sps_data *pce_sps_data;
  2330. qce_comp_func_ptr_t qce_callback;
  2331. preq_info = &pce_dev->ce_request_info[req_info];
  2332. pce_sps_data = &preq_info->ce_sps;
  2333. qce_callback = preq_info->qce_cb;
  2334. areq = (struct skcipher_request *) preq_info->areq;
  2335. if (!is_offload_op(preq_info->offload_op)) {
  2336. if (areq->src != areq->dst)
  2337. qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
  2338. preq_info->dst_nents, DMA_FROM_DEVICE);
  2339. qce_dma_unmap_sg(pce_dev->pdev, areq->src,
  2340. preq_info->src_nents,
  2341. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  2342. DMA_TO_DEVICE);
  2343. }
  2344. if (_qce_unlock_other_pipes(pce_dev, req_info)) {
  2345. qce_free_req_info(pce_dev, req_info, true);
  2346. qce_callback(areq, NULL, NULL, -ENXIO);
  2347. return -ENXIO;
  2348. }
  2349. result_dump_status = be32_to_cpu(pce_sps_data->result->status);
  2350. pce_sps_data->result->status = 0;
  2351. if (!is_offload_op(preq_info->offload_op)) {
  2352. if (result_dump_status & ((1 << CRYPTO_SW_ERR) |
  2353. (1 << CRYPTO_AXI_ERR) | (1 << CRYPTO_HSD_ERR))) {
  2354. pr_err("ablk_cipher operation error. Status %x\n",
  2355. result_dump_status);
  2356. result_status = -ENXIO;
  2357. }
  2358. }
  2359. if (pce_sps_data->consumer_status |
  2360. pce_sps_data->producer_status) {
  2361. pr_err("ablk_cipher sps operation error. sps status %x %x\n",
  2362. pce_sps_data->consumer_status,
  2363. pce_sps_data->producer_status);
  2364. result_status = -ENXIO;
  2365. }
  2366. if (preq_info->mode == QCE_MODE_ECB) {
  2367. qce_free_req_info(pce_dev, req_info, true);
  2368. qce_callback(areq, NULL, NULL, pce_sps_data->consumer_status |
  2369. result_status);
  2370. } else {
  2371. if (pce_dev->ce_bam_info.minor_version == 0) {
  2372. if (preq_info->mode == QCE_MODE_CBC) {
  2373. if (preq_info->dir == QCE_DECRYPT)
  2374. memcpy(iv, (char *)preq_info->dec_iv,
  2375. sizeof(iv));
  2376. else
  2377. memcpy(iv, (unsigned char *)
  2378. (sg_virt(areq->src) +
  2379. areq->src->length - 16),
  2380. sizeof(iv));
  2381. }
  2382. if ((preq_info->mode == QCE_MODE_CTR) ||
  2383. (preq_info->mode == QCE_MODE_XTS)) {
  2384. uint32_t num_blk = 0;
  2385. uint32_t cntr_iv3 = 0;
  2386. unsigned long long cntr_iv64 = 0;
  2387. unsigned char *b = (unsigned char *)(&cntr_iv3);
  2388. memcpy(iv, areq->iv, sizeof(iv));
  2389. if (preq_info->mode != QCE_MODE_XTS)
  2390. num_blk = areq->cryptlen/16;
  2391. else
  2392. num_blk = 1;
  2393. cntr_iv3 = ((*(iv + 12) << 24) & 0xff000000) |
  2394. (((*(iv + 13)) << 16) & 0xff0000) |
  2395. (((*(iv + 14)) << 8) & 0xff00) |
  2396. (*(iv + 15) & 0xff);
  2397. cntr_iv64 =
  2398. (((unsigned long long)cntr_iv3 &
  2399. 0xFFFFFFFFULL) +
  2400. (unsigned long long)num_blk) %
  2401. (unsigned long long)(0x100000000ULL);
  2402. cntr_iv3 = (u32)(cntr_iv64 & 0xFFFFFFFF);
  2403. *(iv + 15) = (char)(*b);
  2404. *(iv + 14) = (char)(*(b + 1));
  2405. *(iv + 13) = (char)(*(b + 2));
  2406. *(iv + 12) = (char)(*(b + 3));
  2407. }
  2408. } else {
  2409. memcpy(iv,
  2410. (char *)(pce_sps_data->result->encr_cntr_iv),
  2411. sizeof(iv));
  2412. }
  2413. if (!atomic_read(&preq_info->in_use)) {
  2414. pr_err("request information %d already done\n", req_info);
  2415. return -ENXIO;
  2416. }
  2417. qce_free_req_info(pce_dev, req_info, true);
  2418. qce_callback(areq, NULL, iv, result_status);
  2419. }
  2420. return 0;
  2421. }
  2422. static int _f8_complete(struct qce_device *pce_dev, int req_info)
  2423. {
  2424. int32_t result_status = 0;
  2425. uint32_t result_dump_status;
  2426. uint32_t result_dump_status2;
  2427. struct ce_request_info *preq_info;
  2428. struct ce_sps_data *pce_sps_data;
  2429. qce_comp_func_ptr_t qce_callback;
  2430. void *areq;
  2431. preq_info = &pce_dev->ce_request_info[req_info];
  2432. pce_sps_data = &preq_info->ce_sps;
  2433. qce_callback = preq_info->qce_cb;
  2434. areq = preq_info->areq;
  2435. if (preq_info->phy_ota_dst)
  2436. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
  2437. preq_info->ota_size, DMA_FROM_DEVICE);
  2438. if (preq_info->phy_ota_src)
  2439. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
  2440. preq_info->ota_size, (preq_info->phy_ota_dst) ?
  2441. DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
  2442. if (_qce_unlock_other_pipes(pce_dev, req_info)) {
  2443. qce_free_req_info(pce_dev, req_info, true);
  2444. qce_callback(areq, NULL, NULL, -ENXIO);
  2445. return -ENXIO;
  2446. }
  2447. result_dump_status = be32_to_cpu(pce_sps_data->result->status);
  2448. result_dump_status2 = be32_to_cpu(pce_sps_data->result->status2);
  2449. if ((result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
  2450. | (1 << CRYPTO_HSD_ERR)))) {
  2451. pr_err(
  2452. "f8 oper error. Dump Sta %x Sta2 %x req %d\n",
  2453. result_dump_status, result_dump_status2, req_info);
  2454. result_status = -ENXIO;
  2455. } else if (pce_sps_data->consumer_status |
  2456. pce_sps_data->producer_status) {
  2457. pr_err("f8 sps operation error. sps status %x %x\n",
  2458. pce_sps_data->consumer_status,
  2459. pce_sps_data->producer_status);
  2460. result_status = -ENXIO;
  2461. }
  2462. pce_sps_data->result->status = 0;
  2463. pce_sps_data->result->status2 = 0;
  2464. qce_free_req_info(pce_dev, req_info, true);
  2465. qce_callback(areq, NULL, NULL, result_status);
  2466. return 0;
  2467. }
  2468. static void _qce_sps_iovec_count_init(struct qce_device *pce_dev, int req_info)
  2469. {
  2470. struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info[req_info]
  2471. .ce_sps;
  2472. pce_sps_data->in_transfer.iovec_count = 0;
  2473. pce_sps_data->out_transfer.iovec_count = 0;
  2474. }
  2475. static void _qce_set_flag(struct sps_transfer *sps_bam_pipe, uint32_t flag)
  2476. {
  2477. struct sps_iovec *iovec;
  2478. if (sps_bam_pipe->iovec_count == 0)
  2479. return;
  2480. iovec = sps_bam_pipe->iovec + (sps_bam_pipe->iovec_count - 1);
  2481. iovec->flags |= flag;
  2482. }
  2483. static int _qce_sps_add_data(dma_addr_t paddr, uint32_t len,
  2484. struct sps_transfer *sps_bam_pipe)
  2485. {
  2486. struct sps_iovec *iovec = sps_bam_pipe->iovec +
  2487. sps_bam_pipe->iovec_count;
  2488. uint32_t data_cnt;
  2489. while (len > 0) {
  2490. if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
  2491. pr_err("Num of descrptor %d exceed max (%d)\n",
  2492. sps_bam_pipe->iovec_count,
  2493. (uint32_t)QCE_MAX_NUM_DSCR);
  2494. return -ENOMEM;
  2495. }
  2496. if (len > SPS_MAX_PKT_SIZE)
  2497. data_cnt = SPS_MAX_PKT_SIZE;
  2498. else
  2499. data_cnt = len;
  2500. iovec->size = data_cnt;
  2501. iovec->addr = SPS_GET_LOWER_ADDR(paddr);
  2502. iovec->flags = SPS_GET_UPPER_ADDR(paddr);
  2503. sps_bam_pipe->iovec_count++;
  2504. iovec++;
  2505. paddr += data_cnt;
  2506. len -= data_cnt;
  2507. }
  2508. return 0;
  2509. }
  2510. static int _qce_sps_add_sg_data(struct qce_device *pce_dev,
  2511. struct scatterlist *sg_src, uint32_t nbytes,
  2512. struct sps_transfer *sps_bam_pipe)
  2513. {
  2514. uint32_t data_cnt, len;
  2515. dma_addr_t addr;
  2516. struct sps_iovec *iovec = sps_bam_pipe->iovec +
  2517. sps_bam_pipe->iovec_count;
  2518. while (nbytes > 0 && sg_src) {
  2519. len = min(nbytes, sg_dma_len(sg_src));
  2520. nbytes -= len;
  2521. addr = sg_dma_address(sg_src);
  2522. if (pce_dev->ce_bam_info.minor_version == 0)
  2523. len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
  2524. while (len > 0) {
  2525. if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
  2526. pr_err("Num of descrptor %d exceed max (%d)\n",
  2527. sps_bam_pipe->iovec_count,
  2528. (uint32_t)QCE_MAX_NUM_DSCR);
  2529. return -ENOMEM;
  2530. }
  2531. if (len > SPS_MAX_PKT_SIZE) {
  2532. data_cnt = SPS_MAX_PKT_SIZE;
  2533. iovec->size = data_cnt;
  2534. iovec->addr = SPS_GET_LOWER_ADDR(addr);
  2535. iovec->flags = SPS_GET_UPPER_ADDR(addr);
  2536. } else {
  2537. data_cnt = len;
  2538. iovec->size = data_cnt;
  2539. iovec->addr = SPS_GET_LOWER_ADDR(addr);
  2540. iovec->flags = SPS_GET_UPPER_ADDR(addr);
  2541. }
  2542. iovec++;
  2543. sps_bam_pipe->iovec_count++;
  2544. addr += data_cnt;
  2545. len -= data_cnt;
  2546. }
  2547. sg_src = sg_next(sg_src);
  2548. }
  2549. return 0;
  2550. }
  2551. static int _qce_sps_add_sg_data_off(struct qce_device *pce_dev,
  2552. struct scatterlist *sg_src, uint32_t nbytes, uint32_t off,
  2553. struct sps_transfer *sps_bam_pipe)
  2554. {
  2555. uint32_t data_cnt, len;
  2556. dma_addr_t addr;
  2557. struct sps_iovec *iovec = sps_bam_pipe->iovec +
  2558. sps_bam_pipe->iovec_count;
  2559. unsigned int res_within_sg;
  2560. if (!sg_src)
  2561. return -ENOENT;
  2562. res_within_sg = sg_dma_len(sg_src);
  2563. while (off > 0) {
  2564. if (!sg_src) {
  2565. pr_err("broken sg list off %d nbytes %d\n",
  2566. off, nbytes);
  2567. return -ENOENT;
  2568. }
  2569. len = sg_dma_len(sg_src);
  2570. if (off < len) {
  2571. res_within_sg = len - off;
  2572. break;
  2573. }
  2574. off -= len;
  2575. sg_src = sg_next(sg_src);
  2576. if (sg_src)
  2577. res_within_sg = sg_dma_len(sg_src);
  2578. }
  2579. while (nbytes > 0 && sg_src) {
  2580. len = min(nbytes, res_within_sg);
  2581. nbytes -= len;
  2582. addr = sg_dma_address(sg_src) + off;
  2583. if (pce_dev->ce_bam_info.minor_version == 0)
  2584. len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
  2585. while (len > 0) {
  2586. if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
  2587. pr_err("Num of descrptor %d exceed max (%d)\n",
  2588. sps_bam_pipe->iovec_count,
  2589. (uint32_t)QCE_MAX_NUM_DSCR);
  2590. return -ENOMEM;
  2591. }
  2592. if (len > SPS_MAX_PKT_SIZE) {
  2593. data_cnt = SPS_MAX_PKT_SIZE;
  2594. iovec->size = data_cnt;
  2595. iovec->addr = SPS_GET_LOWER_ADDR(addr);
  2596. iovec->flags = SPS_GET_UPPER_ADDR(addr);
  2597. } else {
  2598. data_cnt = len;
  2599. iovec->size = data_cnt;
  2600. iovec->addr = SPS_GET_LOWER_ADDR(addr);
  2601. iovec->flags = SPS_GET_UPPER_ADDR(addr);
  2602. }
  2603. iovec++;
  2604. sps_bam_pipe->iovec_count++;
  2605. addr += data_cnt;
  2606. len -= data_cnt;
  2607. }
  2608. if (nbytes) {
  2609. sg_src = sg_next(sg_src);
  2610. if (!sg_src) {
  2611. pr_err("more data bytes %d\n", nbytes);
  2612. return -ENOMEM;
  2613. }
  2614. res_within_sg = sg_dma_len(sg_src);
  2615. off = 0;
  2616. }
  2617. }
  2618. return 0;
  2619. }
  2620. static int _qce_sps_add_cmd(struct qce_device *pce_dev, uint32_t flag,
  2621. struct qce_cmdlist_info *cmdptr,
  2622. struct sps_transfer *sps_bam_pipe)
  2623. {
  2624. dma_addr_t paddr = GET_PHYS_ADDR(cmdptr->cmdlist);
  2625. struct sps_iovec *iovec = sps_bam_pipe->iovec +
  2626. sps_bam_pipe->iovec_count;
  2627. iovec->size = cmdptr->size;
  2628. iovec->addr = SPS_GET_LOWER_ADDR(paddr);
  2629. iovec->flags = SPS_GET_UPPER_ADDR(paddr) | SPS_IOVEC_FLAG_CMD | flag;
  2630. sps_bam_pipe->iovec_count++;
  2631. if (sps_bam_pipe->iovec_count >= QCE_MAX_NUM_DSCR) {
  2632. pr_err("Num of descrptor %d exceed max (%d)\n",
  2633. sps_bam_pipe->iovec_count, (uint32_t)QCE_MAX_NUM_DSCR);
  2634. return -ENOMEM;
  2635. }
  2636. return 0;
  2637. }
  2638. static int _qce_sps_transfer(struct qce_device *pce_dev, int req_info)
  2639. {
  2640. int rc = 0;
  2641. struct ce_sps_data *pce_sps_data;
  2642. uint16_t op = pce_dev->ce_request_info[req_info].offload_op;
  2643. pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
  2644. pce_sps_data->out_transfer.user =
  2645. (void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
  2646. (unsigned int) req_info));
  2647. pce_sps_data->in_transfer.user =
  2648. (void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
  2649. (unsigned int) req_info));
  2650. _qce_dump_descr_fifos_dbg(pce_dev, req_info);
  2651. if (pce_sps_data->in_transfer.iovec_count) {
  2652. rc = sps_transfer(pce_dev->ce_bam_info.consumer[op].pipe,
  2653. &pce_sps_data->in_transfer);
  2654. if (rc) {
  2655. pr_err("sps_xfr() fail (cons pipe=0x%lx) rc = %d\n",
  2656. (uintptr_t)pce_dev->ce_bam_info.consumer[op].pipe,
  2657. rc);
  2658. goto ret;
  2659. }
  2660. }
  2661. rc = sps_transfer(pce_dev->ce_bam_info.producer[op].pipe,
  2662. &pce_sps_data->out_transfer);
  2663. if (rc)
  2664. pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
  2665. (uintptr_t)pce_dev->ce_bam_info.producer[op].pipe, rc);
  2666. ret:
  2667. return rc;
  2668. }
  2669. /**
  2670. * Allocate and Connect a CE peripheral's SPS endpoint
  2671. *
  2672. * This function allocates endpoint context and
  2673. * connect it with memory endpoint by calling
  2674. * appropriate SPS driver APIs.
  2675. *
  2676. * Also registers a SPS callback function with
  2677. * SPS driver
  2678. *
  2679. * This function should only be called once typically
  2680. * during driver probe.
  2681. *
  2682. * @pce_dev - Pointer to qce_device structure
  2683. * @ep - Pointer to sps endpoint data structure
  2684. * @index - Points to crypto use case
  2685. * @is_produce - 1 means Producer endpoint
  2686. * 0 means Consumer endpoint
  2687. *
  2688. * @return - 0 if successful else negative value.
  2689. *
  2690. */
  2691. static int qce_sps_init_ep_conn(struct qce_device *pce_dev,
  2692. struct qce_sps_ep_conn_data *ep,
  2693. int index,
  2694. bool is_producer)
  2695. {
  2696. int rc = 0;
  2697. struct sps_pipe *sps_pipe_info;
  2698. struct sps_connect *sps_connect_info = &ep->connect;
  2699. struct sps_register_event *sps_event = &ep->event;
  2700. /* Allocate endpoint context */
  2701. sps_pipe_info = sps_alloc_endpoint();
  2702. if (!sps_pipe_info) {
  2703. pr_err("sps_alloc_endpoint() failed!!! is_producer=%d\n",
  2704. is_producer);
  2705. rc = -ENOMEM;
  2706. goto out;
  2707. }
  2708. /* Now save the sps pipe handle */
  2709. ep->pipe = sps_pipe_info;
  2710. /* Get default connection configuration for an endpoint */
  2711. rc = sps_get_config(sps_pipe_info, sps_connect_info);
  2712. if (rc) {
  2713. pr_err("sps_get_config() fail pipe_handle=0x%lx, rc = %d\n",
  2714. (uintptr_t)sps_pipe_info, rc);
  2715. goto get_config_err;
  2716. }
  2717. /* Modify the default connection configuration */
  2718. if (is_producer) {
  2719. /*
  2720. * For CE producer transfer, source should be
  2721. * CE peripheral where as destination should
  2722. * be system memory.
  2723. */
  2724. sps_connect_info->source = pce_dev->ce_bam_info.bam_handle;
  2725. sps_connect_info->destination = SPS_DEV_HANDLE_MEM;
  2726. /* Producer pipe will handle this connection */
  2727. sps_connect_info->mode = SPS_MODE_SRC;
  2728. sps_connect_info->options =
  2729. SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE;
  2730. } else {
  2731. /* For CE consumer transfer, source should be
  2732. * system memory where as destination should
  2733. * CE peripheral
  2734. */
  2735. sps_connect_info->source = SPS_DEV_HANDLE_MEM;
  2736. sps_connect_info->destination = pce_dev->ce_bam_info.bam_handle;
  2737. sps_connect_info->mode = SPS_MODE_DEST;
  2738. sps_connect_info->options =
  2739. SPS_O_AUTO_ENABLE;
  2740. }
  2741. /* Producer pipe index */
  2742. sps_connect_info->src_pipe_index =
  2743. pce_dev->ce_bam_info.src_pipe_index[index];
  2744. /* Consumer pipe index */
  2745. sps_connect_info->dest_pipe_index =
  2746. pce_dev->ce_bam_info.dest_pipe_index[index];
  2747. /* Set pipe group */
  2748. sps_connect_info->lock_group =
  2749. pce_dev->ce_bam_info.pipe_pair_index[index];
  2750. sps_connect_info->event_thresh = 0x10;
  2751. /*
  2752. * Max. no of scatter/gather buffers that can
  2753. * be passed by block layer = 32 (NR_SG).
  2754. * Each BAM descritor needs 64 bits (8 bytes).
  2755. * One BAM descriptor is required per buffer transfer.
  2756. * So we would require total 256 (32 * 8) bytes of descriptor FIFO.
  2757. * But due to HW limitation we need to allocate atleast one extra
  2758. * descriptor memory (256 bytes + 8 bytes). But in order to be
  2759. * in power of 2, we are allocating 512 bytes of memory.
  2760. */
  2761. sps_connect_info->desc.size = QCE_MAX_NUM_DSCR * MAX_QCE_ALLOC_BAM_REQ *
  2762. sizeof(struct sps_iovec);
  2763. if (sps_connect_info->desc.size > MAX_SPS_DESC_FIFO_SIZE)
  2764. sps_connect_info->desc.size = MAX_SPS_DESC_FIFO_SIZE;
  2765. sps_connect_info->desc.base = dma_alloc_coherent(pce_dev->pdev,
  2766. sps_connect_info->desc.size,
  2767. &sps_connect_info->desc.phys_base,
  2768. GFP_KERNEL | __GFP_ZERO);
  2769. if (sps_connect_info->desc.base == NULL) {
  2770. rc = -ENOMEM;
  2771. pr_err("Can not allocate coherent memory for sps data\n");
  2772. goto get_config_err;
  2773. }
  2774. /* Establish connection between peripheral and memory endpoint */
  2775. rc = sps_connect(sps_pipe_info, sps_connect_info);
  2776. if (rc) {
  2777. pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
  2778. (uintptr_t)sps_pipe_info, rc);
  2779. goto sps_connect_err;
  2780. }
  2781. sps_event->mode = SPS_TRIGGER_CALLBACK;
  2782. sps_event->xfer_done = NULL;
  2783. sps_event->user = (void *)pce_dev;
  2784. if (is_producer) {
  2785. sps_event->options = SPS_O_EOT | SPS_O_DESC_DONE;
  2786. sps_event->callback = _sps_producer_callback;
  2787. rc = sps_register_event(ep->pipe, sps_event);
  2788. if (rc) {
  2789. pr_err("Producer callback registration failed rc=%d\n",
  2790. rc);
  2791. goto sps_connect_err;
  2792. }
  2793. } else {
  2794. sps_event->options = SPS_O_EOT;
  2795. sps_event->callback = NULL;
  2796. }
  2797. pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%pK\n",
  2798. is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)",
  2799. (uintptr_t)sps_pipe_info, &sps_connect_info->desc.phys_base);
  2800. goto out;
  2801. sps_connect_err:
  2802. dma_free_coherent(pce_dev->pdev,
  2803. sps_connect_info->desc.size,
  2804. sps_connect_info->desc.base,
  2805. sps_connect_info->desc.phys_base);
  2806. get_config_err:
  2807. sps_free_endpoint(sps_pipe_info);
  2808. out:
  2809. return rc;
  2810. }
  2811. /**
  2812. * Disconnect and Deallocate a CE peripheral's SPS endpoint
  2813. *
  2814. * This function disconnect endpoint and deallocates
  2815. * endpoint context.
  2816. *
  2817. * This function should only be called once typically
  2818. * during driver remove.
  2819. *
  2820. * @pce_dev - Pointer to qce_device structure
  2821. * @ep - Pointer to sps endpoint data structure
  2822. *
  2823. */
  2824. static void qce_sps_exit_ep_conn(struct qce_device *pce_dev,
  2825. struct qce_sps_ep_conn_data *ep)
  2826. {
  2827. struct sps_pipe *sps_pipe_info = ep->pipe;
  2828. struct sps_connect *sps_connect_info = &ep->connect;
  2829. sps_disconnect(sps_pipe_info);
  2830. dma_free_coherent(pce_dev->pdev,
  2831. sps_connect_info->desc.size,
  2832. sps_connect_info->desc.base,
  2833. sps_connect_info->desc.phys_base);
  2834. sps_free_endpoint(sps_pipe_info);
  2835. }
  2836. static void qce_sps_release_bam(struct qce_device *pce_dev)
  2837. {
  2838. struct bam_registration_info *pbam;
  2839. mutex_lock(&bam_register_lock);
  2840. pbam = pce_dev->pbam;
  2841. if (pbam == NULL)
  2842. goto ret;
  2843. pbam->cnt--;
  2844. if (pbam->cnt > 0)
  2845. goto ret;
  2846. if (pce_dev->ce_bam_info.bam_handle) {
  2847. sps_deregister_bam_device(pce_dev->ce_bam_info.bam_handle);
  2848. pr_debug("deregister bam handle 0x%lx\n",
  2849. pce_dev->ce_bam_info.bam_handle);
  2850. pce_dev->ce_bam_info.bam_handle = 0;
  2851. }
  2852. iounmap(pbam->bam_iobase);
  2853. pr_debug("delete bam 0x%x\n", pbam->bam_mem);
  2854. list_del(&pbam->qlist);
  2855. kfree(pbam);
  2856. ret:
  2857. pce_dev->pbam = NULL;
  2858. mutex_unlock(&bam_register_lock);
  2859. }
  2860. static int qce_sps_get_bam(struct qce_device *pce_dev)
  2861. {
  2862. int rc = 0;
  2863. struct sps_bam_props bam = {0};
  2864. struct bam_registration_info *pbam = NULL;
  2865. struct bam_registration_info *p;
  2866. uint32_t bam_cfg = 0;
  2867. mutex_lock(&bam_register_lock);
  2868. list_for_each_entry(p, &qce50_bam_list, qlist) {
  2869. if (p->bam_mem == pce_dev->bam_mem) {
  2870. pbam = p; /* found */
  2871. break;
  2872. }
  2873. }
  2874. if (pbam) {
  2875. pr_debug("found bam 0x%x\n", pbam->bam_mem);
  2876. pbam->cnt++;
  2877. pce_dev->ce_bam_info.bam_handle = pbam->handle;
  2878. pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
  2879. pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
  2880. pce_dev->pbam = pbam;
  2881. pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
  2882. goto ret;
  2883. }
  2884. pbam = kzalloc(sizeof(struct bam_registration_info), GFP_KERNEL);
  2885. if (!pbam) {
  2886. rc = -ENOMEM;
  2887. goto ret;
  2888. }
  2889. pbam->cnt = 1;
  2890. pbam->bam_mem = pce_dev->bam_mem;
  2891. pbam->bam_iobase = ioremap(pce_dev->bam_mem,
  2892. pce_dev->bam_mem_size);
  2893. if (!pbam->bam_iobase) {
  2894. kfree(pbam);
  2895. rc = -ENOMEM;
  2896. pr_err("Can not map BAM io memory\n");
  2897. goto ret;
  2898. }
  2899. pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
  2900. pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
  2901. pbam->handle = 0;
  2902. pr_debug("allocate bam 0x%x\n", pbam->bam_mem);
  2903. bam_cfg = readl_relaxed(pce_dev->ce_bam_info.bam_iobase +
  2904. CRYPTO_BAM_CNFG_BITS_REG);
  2905. pbam->support_cmd_dscr = (bam_cfg & CRYPTO_BAM_CD_ENABLE_MASK) ?
  2906. true : false;
  2907. if (!pbam->support_cmd_dscr) {
  2908. pr_info("qce50 don't support command descriptor. bam_cfg%x\n",
  2909. bam_cfg);
  2910. pce_dev->no_get_around = false;
  2911. }
  2912. pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
  2913. bam.phys_addr = pce_dev->ce_bam_info.bam_mem;
  2914. bam.virt_addr = pce_dev->ce_bam_info.bam_iobase;
  2915. /*
  2916. * This event threshold value is only significant for BAM-to-BAM
  2917. * transfer. It's ignored for BAM-to-System mode transfer.
  2918. */
  2919. bam.event_threshold = 0x10; /* Pipe event threshold */
  2920. /*
  2921. * This threshold controls when the BAM publish
  2922. * the descriptor size on the sideband interface.
  2923. * SPS HW will only be used when
  2924. * data transfer size > 64 bytes.
  2925. */
  2926. bam.summing_threshold = 64;
  2927. /* SPS driver wll handle the crypto BAM IRQ */
  2928. bam.irq = (u32)pce_dev->ce_bam_info.bam_irq;
  2929. /*
  2930. * Set flag to indicate BAM global device control is managed
  2931. * remotely.
  2932. */
  2933. if (!pce_dev->support_cmd_dscr || pce_dev->is_shared)
  2934. bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
  2935. else
  2936. bam.manage = SPS_BAM_MGR_LOCAL;
  2937. bam.ee = pce_dev->ce_bam_info.bam_ee;
  2938. bam.ipc_loglevel = QCE_BAM_DEFAULT_IPC_LOGLVL;
  2939. bam.options |= SPS_BAM_CACHED_WP;
  2940. pr_debug("bam physical base=0x%lx\n", (uintptr_t)bam.phys_addr);
  2941. pr_debug("bam virtual base=0x%pK\n", bam.virt_addr);
  2942. /* Register CE Peripheral BAM device to SPS driver */
  2943. rc = sps_register_bam_device(&bam, &pbam->handle);
  2944. if (rc) {
  2945. pr_err("sps_register_bam_device() failed! err=%d\n", rc);
  2946. rc = -EIO;
  2947. iounmap(pbam->bam_iobase);
  2948. kfree(pbam);
  2949. goto ret;
  2950. }
  2951. pce_dev->pbam = pbam;
  2952. list_add_tail(&pbam->qlist, &qce50_bam_list);
  2953. pce_dev->ce_bam_info.bam_handle = pbam->handle;
  2954. ret:
  2955. mutex_unlock(&bam_register_lock);
  2956. return rc;
  2957. }
  2958. /**
  2959. * Initialize SPS HW connected with CE core
  2960. *
  2961. * This function register BAM HW resources with
  2962. * SPS driver and then initialize 2 SPS endpoints
  2963. *
  2964. * This function should only be called once typically
  2965. * during driver probe.
  2966. *
  2967. * @pce_dev - Pointer to qce_device structure
  2968. *
  2969. * @return - 0 if successful else negative value.
  2970. *
  2971. */
  2972. static int qce_sps_init(struct qce_device *pce_dev)
  2973. {
  2974. int rc = 0, i = 0;
  2975. rc = qce_sps_get_bam(pce_dev);
  2976. if (rc)
  2977. return rc;
  2978. pr_debug("BAM device registered. bam_handle=0x%lx\n",
  2979. pce_dev->ce_bam_info.bam_handle);
  2980. for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
  2981. if (i == QCE_OFFLOAD_NONE && !(pce_dev->kernel_pipes_support))
  2982. continue;
  2983. else if ((i > 0) && !(pce_dev->offload_pipes_support))
  2984. break;
  2985. if (!pce_dev->ce_bam_info.pipe_pair_index[i])
  2986. continue;
  2987. rc = qce_sps_init_ep_conn(pce_dev,
  2988. &pce_dev->ce_bam_info.producer[i], i, true);
  2989. if (rc)
  2990. goto sps_connect_producer_err;
  2991. rc = qce_sps_init_ep_conn(pce_dev,
  2992. &pce_dev->ce_bam_info.consumer[i], i, false);
  2993. if (rc)
  2994. goto sps_connect_consumer_err;
  2995. }
  2996. pr_info(" QTI MSM CE-BAM at 0x%016llx irq %d\n",
  2997. (unsigned long long)pce_dev->ce_bam_info.bam_mem,
  2998. (unsigned int)pce_dev->ce_bam_info.bam_irq);
  2999. return rc;
  3000. sps_connect_consumer_err:
  3001. qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer[i]);
  3002. sps_connect_producer_err:
  3003. qce_sps_release_bam(pce_dev);
  3004. return rc;
  3005. }
  3006. static inline int qce_alloc_req_info(struct qce_device *pce_dev)
  3007. {
  3008. int i;
  3009. int request_index = pce_dev->ce_request_index;
  3010. for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
  3011. request_index++;
  3012. if (request_index >= MAX_QCE_BAM_REQ)
  3013. request_index = 0;
  3014. if (!atomic_xchg(
  3015. &pce_dev->ce_request_info[request_index].in_use,
  3016. true)) {
  3017. pce_dev->ce_request_index = request_index;
  3018. return request_index;
  3019. }
  3020. }
  3021. pr_warn("pcedev %d no reqs available no_of_queued_req %d\n",
  3022. pce_dev->dev_no, atomic_read(
  3023. &pce_dev->no_of_queued_req));
  3024. return -EBUSY;
  3025. }
  3026. static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
  3027. bool is_complete)
  3028. {
  3029. pce_dev->ce_request_info[req_info].xfer_type = QCE_XFER_TYPE_LAST;
  3030. if (atomic_xchg(&pce_dev->ce_request_info[req_info].in_use,
  3031. false)) {
  3032. if (req_info < MAX_QCE_BAM_REQ && is_complete)
  3033. atomic_dec(&pce_dev->no_of_queued_req);
  3034. } else
  3035. pr_warn("request info %d free already\n", req_info);
  3036. }
  3037. static void print_notify_debug(struct sps_event_notify *notify)
  3038. {
  3039. phys_addr_t addr =
  3040. DESC_FULL_ADDR((phys_addr_t) notify->data.transfer.iovec.flags,
  3041. notify->data.transfer.iovec.addr);
  3042. pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%pK\n",
  3043. notify->event_id, &addr,
  3044. notify->data.transfer.iovec.size,
  3045. notify->data.transfer.iovec.flags,
  3046. notify->data.transfer.user);
  3047. }
  3048. static void _qce_req_complete(struct qce_device *pce_dev, unsigned int req_info)
  3049. {
  3050. struct ce_request_info *preq_info;
  3051. preq_info = &pce_dev->ce_request_info[req_info];
  3052. switch (preq_info->xfer_type) {
  3053. case QCE_XFER_CIPHERING:
  3054. _ablk_cipher_complete(pce_dev, req_info);
  3055. break;
  3056. case QCE_XFER_HASHING:
  3057. _sha_complete(pce_dev, req_info);
  3058. break;
  3059. case QCE_XFER_AEAD:
  3060. _aead_complete(pce_dev, req_info);
  3061. break;
  3062. case QCE_XFER_F8:
  3063. _f8_complete(pce_dev, req_info);
  3064. break;
  3065. case QCE_XFER_F9:
  3066. _f9_complete(pce_dev, req_info);
  3067. break;
  3068. default:
  3069. qce_free_req_info(pce_dev, req_info, true);
  3070. break;
  3071. }
  3072. }
  3073. static void qce_multireq_timeout(struct timer_list *data)
  3074. {
  3075. struct qce_device *pce_dev = from_timer(pce_dev, data, timer);
  3076. int ret = 0;
  3077. int last_seq;
  3078. unsigned long flags;
  3079. last_seq = atomic_read(&pce_dev->bunch_cmd_seq);
  3080. if (last_seq == 0 ||
  3081. last_seq != atomic_read(&pce_dev->last_intr_seq)) {
  3082. atomic_set(&pce_dev->last_intr_seq, last_seq);
  3083. mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
  3084. return;
  3085. }
  3086. /* last bunch mode command time out */
  3087. /*
  3088. * From here to dummy request finish sps request and set owner back
  3089. * to none, we disable interrupt.
  3090. * So it won't get preempted or interrupted. If bam inerrupts happen
  3091. * between, and completion callback gets called from BAM, a new
  3092. * request may be issued by the client driver. Deadlock may happen.
  3093. */
  3094. local_irq_save(flags);
  3095. if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_TIMEOUT)
  3096. != QCE_OWNER_NONE) {
  3097. local_irq_restore(flags);
  3098. mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
  3099. return;
  3100. }
  3101. ret = qce_dummy_req(pce_dev);
  3102. if (ret)
  3103. pr_warn("pcedev %d: Failed to insert dummy req\n",
  3104. pce_dev->dev_no);
  3105. cmpxchg(&pce_dev->owner, QCE_OWNER_TIMEOUT, QCE_OWNER_NONE);
  3106. pce_dev->mode = IN_INTERRUPT_MODE;
  3107. local_irq_restore(flags);
  3108. del_timer(&(pce_dev->timer));
  3109. pce_dev->qce_stats.no_of_timeouts++;
  3110. pr_debug("pcedev %d mode switch to INTR\n", pce_dev->dev_no);
  3111. }
  3112. void qce_get_driver_stats(void *handle)
  3113. {
  3114. struct qce_device *pce_dev = (struct qce_device *) handle;
  3115. if (!_qce50_disp_stats)
  3116. return;
  3117. pr_info("Engine %d timeout occuured %d\n", pce_dev->dev_no,
  3118. pce_dev->qce_stats.no_of_timeouts);
  3119. pr_info("Engine %d dummy request inserted %d\n", pce_dev->dev_no,
  3120. pce_dev->qce_stats.no_of_dummy_reqs);
  3121. if (pce_dev->mode)
  3122. pr_info("Engine %d is in BUNCH MODE\n", pce_dev->dev_no);
  3123. else
  3124. pr_info("Engine %d is in INTERRUPT MODE\n", pce_dev->dev_no);
  3125. pr_info("Engine %d outstanding request %d\n", pce_dev->dev_no,
  3126. atomic_read(&pce_dev->no_of_queued_req));
  3127. }
  3128. EXPORT_SYMBOL(qce_get_driver_stats);
  3129. void qce_clear_driver_stats(void *handle)
  3130. {
  3131. struct qce_device *pce_dev = (struct qce_device *) handle;
  3132. pce_dev->qce_stats.no_of_timeouts = 0;
  3133. pce_dev->qce_stats.no_of_dummy_reqs = 0;
  3134. }
  3135. EXPORT_SYMBOL(qce_clear_driver_stats);
  3136. static void _sps_producer_callback(struct sps_event_notify *notify)
  3137. {
  3138. struct qce_device *pce_dev = (struct qce_device *)
  3139. ((struct sps_event_notify *)notify)->user;
  3140. int rc = 0;
  3141. unsigned int req_info;
  3142. struct ce_sps_data *pce_sps_data;
  3143. struct ce_request_info *preq_info;
  3144. uint16_t op;
  3145. print_notify_debug(notify);
  3146. req_info = (unsigned int)((uintptr_t)notify->data.transfer.user);
  3147. if ((req_info & 0xffff0000) != CRYPTO_REQ_USER_PAT) {
  3148. pr_warn("request information %d out of range\n", req_info);
  3149. return;
  3150. }
  3151. req_info = req_info & 0x00ff;
  3152. if (req_info < 0 || req_info >= MAX_QCE_ALLOC_BAM_REQ) {
  3153. pr_warn("request information %d out of range\n", req_info);
  3154. return;
  3155. }
  3156. preq_info = &pce_dev->ce_request_info[req_info];
  3157. if (!atomic_read(&preq_info->in_use)) {
  3158. pr_err("request information %d already done\n", req_info);
  3159. return;
  3160. }
  3161. op = pce_dev->ce_request_info[req_info].offload_op;
  3162. pce_sps_data = &preq_info->ce_sps;
  3163. if ((preq_info->xfer_type == QCE_XFER_CIPHERING ||
  3164. preq_info->xfer_type == QCE_XFER_AEAD) &&
  3165. pce_sps_data->producer_state == QCE_PIPE_STATE_IDLE) {
  3166. pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
  3167. if (!is_offload_op(op) && (op < QCE_OFFLOAD_OPER_LAST)) {
  3168. pce_sps_data->out_transfer.iovec_count = 0;
  3169. _qce_sps_add_data(GET_PHYS_ADDR(
  3170. pce_sps_data->result_dump),
  3171. CRYPTO_RESULT_DUMP_SIZE,
  3172. &pce_sps_data->out_transfer);
  3173. _qce_set_flag(&pce_sps_data->out_transfer,
  3174. SPS_IOVEC_FLAG_INT);
  3175. rc = sps_transfer(
  3176. pce_dev->ce_bam_info.producer[op].pipe,
  3177. &pce_sps_data->out_transfer);
  3178. if (rc) {
  3179. pr_err("sps_xfr fail (prod pipe=0x%lx) rc = %d\n",
  3180. (uintptr_t)pce_dev->ce_bam_info.producer[op].pipe,
  3181. rc);
  3182. }
  3183. }
  3184. return;
  3185. }
  3186. _qce_req_complete(pce_dev, req_info);
  3187. }
  3188. /**
  3189. * De-initialize SPS HW connected with CE core
  3190. *
  3191. * This function deinitialize SPS endpoints and then
  3192. * deregisters BAM resources from SPS driver.
  3193. *
  3194. * This function should only be called once typically
  3195. * during driver remove.
  3196. *
  3197. * @pce_dev - Pointer to qce_device structure
  3198. *
  3199. */
  3200. static void qce_sps_exit(struct qce_device *pce_dev)
  3201. {
  3202. int i = 0;
  3203. for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
  3204. if (i == QCE_OFFLOAD_NONE && !(pce_dev->kernel_pipes_support))
  3205. continue;
  3206. else if ((i > 0) && !(pce_dev->offload_pipes_support))
  3207. break;
  3208. if (!pce_dev->ce_bam_info.pipe_pair_index[i])
  3209. continue;
  3210. qce_sps_exit_ep_conn(pce_dev,
  3211. &pce_dev->ce_bam_info.consumer[i]);
  3212. qce_sps_exit_ep_conn(pce_dev,
  3213. &pce_dev->ce_bam_info.producer[i]);
  3214. }
  3215. qce_sps_release_bam(pce_dev);
  3216. }
  3217. static void qce_add_cmd_element(struct qce_device *pdev,
  3218. struct sps_command_element **cmd_ptr, u32 addr,
  3219. u32 data, struct sps_command_element **populate)
  3220. {
  3221. (*cmd_ptr)->addr = (uint32_t)(addr + pdev->phy_iobase);
  3222. (*cmd_ptr)->command = 0;
  3223. (*cmd_ptr)->data = data;
  3224. (*cmd_ptr)->mask = 0xFFFFFFFF;
  3225. (*cmd_ptr)->reserved = 0;
  3226. if (populate != NULL)
  3227. *populate = *cmd_ptr;
  3228. (*cmd_ptr)++;
  3229. }
  3230. static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index,
  3231. unsigned char **pvaddr, enum qce_cipher_mode_enum mode,
  3232. bool key_128)
  3233. {
  3234. struct sps_command_element *ce_vaddr;
  3235. uintptr_t ce_vaddr_start;
  3236. struct qce_cmdlistptr_ops *cmdlistptr;
  3237. struct qce_cmdlist_info *pcl_info = NULL;
  3238. int i = 0;
  3239. uint32_t encr_cfg = 0;
  3240. uint32_t key_reg = 0;
  3241. uint32_t xts_key_reg = 0;
  3242. uint32_t iv_reg = 0;
  3243. cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  3244. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3245. pdev->ce_bam_info.ce_burst_size);
  3246. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3247. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3248. /*
  3249. * Designate chunks of the allocated memory to various
  3250. * command list pointers related to AES cipher operations defined
  3251. * in ce_cmdlistptrs_ops structure.
  3252. */
  3253. switch (mode) {
  3254. case QCE_MODE_CBC:
  3255. case QCE_MODE_CTR:
  3256. if (key_128) {
  3257. cmdlistptr->cipher_aes_128_cbc_ctr.cmdlist =
  3258. (uintptr_t)ce_vaddr;
  3259. pcl_info = &(cmdlistptr->cipher_aes_128_cbc_ctr);
  3260. if (mode == QCE_MODE_CBC)
  3261. encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
  3262. else
  3263. encr_cfg = pdev->reg.encr_cfg_aes_ctr_128;
  3264. iv_reg = 4;
  3265. key_reg = 4;
  3266. xts_key_reg = 0;
  3267. } else {
  3268. cmdlistptr->cipher_aes_256_cbc_ctr.cmdlist =
  3269. (uintptr_t)ce_vaddr;
  3270. pcl_info = &(cmdlistptr->cipher_aes_256_cbc_ctr);
  3271. if (mode == QCE_MODE_CBC)
  3272. encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
  3273. else
  3274. encr_cfg = pdev->reg.encr_cfg_aes_ctr_256;
  3275. iv_reg = 4;
  3276. key_reg = 8;
  3277. xts_key_reg = 0;
  3278. }
  3279. break;
  3280. case QCE_MODE_ECB:
  3281. if (key_128) {
  3282. cmdlistptr->cipher_aes_128_ecb.cmdlist =
  3283. (uintptr_t)ce_vaddr;
  3284. pcl_info = &(cmdlistptr->cipher_aes_128_ecb);
  3285. encr_cfg = pdev->reg.encr_cfg_aes_ecb_128;
  3286. iv_reg = 0;
  3287. key_reg = 4;
  3288. xts_key_reg = 0;
  3289. } else {
  3290. cmdlistptr->cipher_aes_256_ecb.cmdlist =
  3291. (uintptr_t)ce_vaddr;
  3292. pcl_info = &(cmdlistptr->cipher_aes_256_ecb);
  3293. encr_cfg = pdev->reg.encr_cfg_aes_ecb_256;
  3294. iv_reg = 0;
  3295. key_reg = 8;
  3296. xts_key_reg = 0;
  3297. }
  3298. break;
  3299. case QCE_MODE_XTS:
  3300. if (key_128) {
  3301. cmdlistptr->cipher_aes_128_xts.cmdlist =
  3302. (uintptr_t)ce_vaddr;
  3303. pcl_info = &(cmdlistptr->cipher_aes_128_xts);
  3304. encr_cfg = pdev->reg.encr_cfg_aes_xts_128;
  3305. iv_reg = 4;
  3306. key_reg = 4;
  3307. xts_key_reg = 4;
  3308. } else {
  3309. cmdlistptr->cipher_aes_256_xts.cmdlist =
  3310. (uintptr_t)ce_vaddr;
  3311. pcl_info = &(cmdlistptr->cipher_aes_256_xts);
  3312. encr_cfg = pdev->reg.encr_cfg_aes_xts_256;
  3313. iv_reg = 4;
  3314. key_reg = 8;
  3315. xts_key_reg = 8;
  3316. }
  3317. break;
  3318. default:
  3319. pr_err("Unknown mode of operation %d received, exiting now\n",
  3320. mode);
  3321. return -EINVAL;
  3322. break;
  3323. }
  3324. /* clear status register */
  3325. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
  3326. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS2_REG, 0, NULL);
  3327. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS3_REG, 0, NULL);
  3328. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS4_REG, 0, NULL);
  3329. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS5_REG, 0, NULL);
  3330. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS6_REG, 0, NULL);
  3331. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3332. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3333. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  3334. &pcl_info->seg_size);
  3335. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
  3336. &pcl_info->encr_seg_cfg);
  3337. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
  3338. &pcl_info->encr_seg_size);
  3339. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  3340. &pcl_info->encr_seg_start);
  3341. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
  3342. pdev->reg.encr_cntr_mask_3, &pcl_info->encr_mask_3);
  3343. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
  3344. pdev->reg.encr_cntr_mask_2, &pcl_info->encr_mask_2);
  3345. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
  3346. pdev->reg.encr_cntr_mask_1, &pcl_info->encr_mask_1);
  3347. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
  3348. pdev->reg.encr_cntr_mask_0, &pcl_info->encr_mask_0);
  3349. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
  3350. &pcl_info->auth_seg_cfg);
  3351. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_DATA_PATT_PROC_CFG_REG, 0,
  3352. &pcl_info->pattern_info);
  3353. qce_add_cmd_element(pdev, &ce_vaddr,
  3354. CRYPTO_DATA_PARTIAL_BLOCK_PROC_CFG_REG, 0,
  3355. &pcl_info->block_offset);
  3356. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
  3357. &pcl_info->encr_key);
  3358. for (i = 1; i < key_reg; i++)
  3359. qce_add_cmd_element(pdev, &ce_vaddr,
  3360. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
  3361. 0, NULL);
  3362. if (xts_key_reg) {
  3363. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_XTS_KEY0_REG,
  3364. 0, &pcl_info->encr_xts_key);
  3365. for (i = 1; i < xts_key_reg; i++)
  3366. qce_add_cmd_element(pdev, &ce_vaddr,
  3367. (CRYPTO_ENCR_XTS_KEY0_REG +
  3368. i * sizeof(uint32_t)), 0, NULL);
  3369. qce_add_cmd_element(pdev, &ce_vaddr,
  3370. CRYPTO_ENCR_XTS_DU_SIZE_REG, 0,
  3371. &pcl_info->encr_xts_du_size);
  3372. }
  3373. if (iv_reg) {
  3374. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
  3375. &pcl_info->encr_cntr_iv);
  3376. for (i = 1; i < iv_reg; i++)
  3377. qce_add_cmd_element(pdev, &ce_vaddr,
  3378. (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
  3379. 0, NULL);
  3380. }
  3381. /* Add dummy to align size to burst-size multiple */
  3382. if (mode == QCE_MODE_XTS) {
  3383. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
  3384. 0, &pcl_info->auth_seg_size);
  3385. } else {
  3386. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
  3387. 0, &pcl_info->auth_seg_size);
  3388. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
  3389. 0, &pcl_info->auth_seg_size);
  3390. }
  3391. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3392. pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
  3393. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3394. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3395. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3396. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3397. *pvaddr = (unsigned char *) ce_vaddr;
  3398. return 0;
  3399. }
  3400. static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev, int cri_index,
  3401. unsigned char **pvaddr, enum qce_cipher_alg_enum alg,
  3402. bool mode_cbc)
  3403. {
  3404. struct sps_command_element *ce_vaddr;
  3405. uintptr_t ce_vaddr_start;
  3406. struct qce_cmdlistptr_ops *cmdlistptr;
  3407. struct qce_cmdlist_info *pcl_info = NULL;
  3408. int i = 0;
  3409. uint32_t encr_cfg = 0;
  3410. uint32_t key_reg = 0;
  3411. uint32_t iv_reg = 0;
  3412. cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  3413. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3414. pdev->ce_bam_info.ce_burst_size);
  3415. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3416. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3417. /*
  3418. * Designate chunks of the allocated memory to various
  3419. * command list pointers related to cipher operations defined
  3420. * in ce_cmdlistptrs_ops structure.
  3421. */
  3422. switch (alg) {
  3423. case CIPHER_ALG_DES:
  3424. if (mode_cbc) {
  3425. cmdlistptr->cipher_des_cbc.cmdlist =
  3426. (uintptr_t)ce_vaddr;
  3427. pcl_info = &(cmdlistptr->cipher_des_cbc);
  3428. encr_cfg = pdev->reg.encr_cfg_des_cbc;
  3429. iv_reg = 2;
  3430. key_reg = 2;
  3431. } else {
  3432. cmdlistptr->cipher_des_ecb.cmdlist =
  3433. (uintptr_t)ce_vaddr;
  3434. pcl_info = &(cmdlistptr->cipher_des_ecb);
  3435. encr_cfg = pdev->reg.encr_cfg_des_ecb;
  3436. iv_reg = 0;
  3437. key_reg = 2;
  3438. }
  3439. break;
  3440. case CIPHER_ALG_3DES:
  3441. if (mode_cbc) {
  3442. cmdlistptr->cipher_3des_cbc.cmdlist =
  3443. (uintptr_t)ce_vaddr;
  3444. pcl_info = &(cmdlistptr->cipher_3des_cbc);
  3445. encr_cfg = pdev->reg.encr_cfg_3des_cbc;
  3446. iv_reg = 2;
  3447. key_reg = 6;
  3448. } else {
  3449. cmdlistptr->cipher_3des_ecb.cmdlist =
  3450. (uintptr_t)ce_vaddr;
  3451. pcl_info = &(cmdlistptr->cipher_3des_ecb);
  3452. encr_cfg = pdev->reg.encr_cfg_3des_ecb;
  3453. iv_reg = 0;
  3454. key_reg = 6;
  3455. }
  3456. break;
  3457. default:
  3458. pr_err("Unknown algorithms %d received, exiting now\n", alg);
  3459. return -EINVAL;
  3460. break;
  3461. }
  3462. /* clear status register */
  3463. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
  3464. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3465. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3466. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  3467. &pcl_info->seg_size);
  3468. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
  3469. &pcl_info->encr_seg_cfg);
  3470. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
  3471. &pcl_info->encr_seg_size);
  3472. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  3473. &pcl_info->encr_seg_start);
  3474. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
  3475. &pcl_info->auth_seg_cfg);
  3476. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
  3477. &pcl_info->encr_key);
  3478. for (i = 1; i < key_reg; i++)
  3479. qce_add_cmd_element(pdev, &ce_vaddr,
  3480. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
  3481. 0, NULL);
  3482. if (iv_reg) {
  3483. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
  3484. &pcl_info->encr_cntr_iv);
  3485. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
  3486. NULL);
  3487. }
  3488. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3489. pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
  3490. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3491. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3492. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3493. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3494. *pvaddr = (unsigned char *) ce_vaddr;
  3495. return 0;
  3496. }
  3497. static int _setup_cipher_null_cmdlistptrs(struct qce_device *pdev,
  3498. int cri_index, unsigned char **pvaddr)
  3499. {
  3500. struct sps_command_element *ce_vaddr;
  3501. uintptr_t ce_vaddr_start;
  3502. struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
  3503. [cri_index].ce_sps.cmdlistptr;
  3504. struct qce_cmdlist_info *pcl_info = NULL;
  3505. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3506. pdev->ce_bam_info.ce_burst_size);
  3507. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3508. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3509. cmdlistptr->cipher_null.cmdlist = (uintptr_t)ce_vaddr;
  3510. pcl_info = &(cmdlistptr->cipher_null);
  3511. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG,
  3512. pdev->ce_bam_info.ce_burst_size, NULL);
  3513. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
  3514. pdev->reg.encr_cfg_aes_ecb_128, NULL);
  3515. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
  3516. NULL);
  3517. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  3518. NULL);
  3519. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
  3520. 0, NULL);
  3521. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
  3522. 0, NULL);
  3523. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
  3524. NULL);
  3525. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3526. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3527. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3528. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3529. *pvaddr = (unsigned char *) ce_vaddr;
  3530. return 0;
  3531. }
  3532. static int _setup_auth_cmdlistptrs(struct qce_device *pdev, int cri_index,
  3533. unsigned char **pvaddr, enum qce_hash_alg_enum alg,
  3534. bool key_128)
  3535. {
  3536. struct sps_command_element *ce_vaddr;
  3537. uintptr_t ce_vaddr_start;
  3538. struct qce_cmdlistptr_ops *cmdlistptr;
  3539. struct qce_cmdlist_info *pcl_info = NULL;
  3540. int i = 0;
  3541. uint32_t key_reg = 0;
  3542. uint32_t auth_cfg = 0;
  3543. uint32_t iv_reg = 0;
  3544. cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  3545. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3546. pdev->ce_bam_info.ce_burst_size);
  3547. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3548. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3549. /*
  3550. * Designate chunks of the allocated memory to various
  3551. * command list pointers related to authentication operations
  3552. * defined in ce_cmdlistptrs_ops structure.
  3553. */
  3554. switch (alg) {
  3555. case QCE_HASH_SHA1:
  3556. cmdlistptr->auth_sha1.cmdlist = (uintptr_t)ce_vaddr;
  3557. pcl_info = &(cmdlistptr->auth_sha1);
  3558. auth_cfg = pdev->reg.auth_cfg_sha1;
  3559. iv_reg = 5;
  3560. /* clear status register */
  3561. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
  3562. 0, NULL);
  3563. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3564. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3565. break;
  3566. case QCE_HASH_SHA256:
  3567. cmdlistptr->auth_sha256.cmdlist = (uintptr_t)ce_vaddr;
  3568. pcl_info = &(cmdlistptr->auth_sha256);
  3569. auth_cfg = pdev->reg.auth_cfg_sha256;
  3570. iv_reg = 8;
  3571. /* clear status register */
  3572. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
  3573. 0, NULL);
  3574. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3575. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3576. /* 1 dummy write */
  3577. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
  3578. 0, NULL);
  3579. break;
  3580. case QCE_HASH_SHA1_HMAC:
  3581. cmdlistptr->auth_sha1_hmac.cmdlist = (uintptr_t)ce_vaddr;
  3582. pcl_info = &(cmdlistptr->auth_sha1_hmac);
  3583. auth_cfg = pdev->reg.auth_cfg_hmac_sha1;
  3584. key_reg = 16;
  3585. iv_reg = 5;
  3586. /* clear status register */
  3587. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
  3588. 0, NULL);
  3589. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3590. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3591. break;
  3592. case QCE_HASH_SHA256_HMAC:
  3593. cmdlistptr->auth_sha256_hmac.cmdlist = (uintptr_t)ce_vaddr;
  3594. pcl_info = &(cmdlistptr->auth_sha256_hmac);
  3595. auth_cfg = pdev->reg.auth_cfg_hmac_sha256;
  3596. key_reg = 16;
  3597. iv_reg = 8;
  3598. /* clear status register */
  3599. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
  3600. NULL);
  3601. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3602. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3603. /* 1 dummy write */
  3604. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
  3605. 0, NULL);
  3606. break;
  3607. case QCE_HASH_AES_CMAC:
  3608. if (key_128) {
  3609. cmdlistptr->auth_aes_128_cmac.cmdlist =
  3610. (uintptr_t)ce_vaddr;
  3611. pcl_info = &(cmdlistptr->auth_aes_128_cmac);
  3612. auth_cfg = pdev->reg.auth_cfg_cmac_128;
  3613. key_reg = 4;
  3614. } else {
  3615. cmdlistptr->auth_aes_256_cmac.cmdlist =
  3616. (uintptr_t)ce_vaddr;
  3617. pcl_info = &(cmdlistptr->auth_aes_256_cmac);
  3618. auth_cfg = pdev->reg.auth_cfg_cmac_256;
  3619. key_reg = 8;
  3620. }
  3621. /* clear status register */
  3622. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
  3623. NULL);
  3624. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3625. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3626. /* 1 dummy write */
  3627. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
  3628. 0, NULL);
  3629. break;
  3630. default:
  3631. pr_err("Unknown algorithms %d received, exiting now\n", alg);
  3632. return -EINVAL;
  3633. break;
  3634. }
  3635. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  3636. &pcl_info->seg_size);
  3637. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
  3638. &pcl_info->encr_seg_cfg);
  3639. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
  3640. auth_cfg, &pcl_info->auth_seg_cfg);
  3641. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
  3642. &pcl_info->auth_seg_size);
  3643. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
  3644. &pcl_info->auth_seg_start);
  3645. if (alg == QCE_HASH_AES_CMAC) {
  3646. /* reset auth iv, bytecount and key registers */
  3647. for (i = 0; i < 16; i++)
  3648. qce_add_cmd_element(pdev, &ce_vaddr,
  3649. (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
  3650. 0, NULL);
  3651. for (i = 0; i < 16; i++)
  3652. qce_add_cmd_element(pdev, &ce_vaddr,
  3653. (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
  3654. 0, NULL);
  3655. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
  3656. 0, NULL);
  3657. } else {
  3658. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
  3659. &pcl_info->auth_iv);
  3660. for (i = 1; i < iv_reg; i++)
  3661. qce_add_cmd_element(pdev, &ce_vaddr,
  3662. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
  3663. 0, NULL);
  3664. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
  3665. 0, &pcl_info->auth_bytecount);
  3666. }
  3667. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
  3668. if (key_reg) {
  3669. qce_add_cmd_element(pdev, &ce_vaddr,
  3670. CRYPTO_AUTH_KEY0_REG, 0, &pcl_info->auth_key);
  3671. for (i = 1; i < key_reg; i++)
  3672. qce_add_cmd_element(pdev, &ce_vaddr,
  3673. (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
  3674. 0, NULL);
  3675. }
  3676. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3677. pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
  3678. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3679. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3680. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3681. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3682. *pvaddr = (unsigned char *) ce_vaddr;
  3683. return 0;
  3684. }
  3685. static int _setup_aead_cmdlistptrs(struct qce_device *pdev,
  3686. int cri_index,
  3687. unsigned char **pvaddr,
  3688. uint32_t alg,
  3689. uint32_t mode,
  3690. uint32_t key_size,
  3691. bool sha1)
  3692. {
  3693. struct sps_command_element *ce_vaddr;
  3694. uintptr_t ce_vaddr_start;
  3695. struct qce_cmdlistptr_ops *cmd;
  3696. struct qce_cmdlist_info *pcl_info = NULL;
  3697. uint32_t key_reg;
  3698. uint32_t iv_reg;
  3699. uint32_t i;
  3700. uint32_t enciv_in_word;
  3701. uint32_t encr_cfg;
  3702. cmd = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  3703. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3704. pdev->ce_bam_info.ce_burst_size);
  3705. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3706. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3707. switch (alg) {
  3708. case CIPHER_ALG_DES:
  3709. switch (mode) {
  3710. case QCE_MODE_CBC:
  3711. if (sha1) {
  3712. cmd->aead_hmac_sha1_cbc_des.cmdlist =
  3713. (uintptr_t)ce_vaddr;
  3714. pcl_info =
  3715. &(cmd->aead_hmac_sha1_cbc_des);
  3716. } else {
  3717. cmd->aead_hmac_sha256_cbc_des.cmdlist =
  3718. (uintptr_t)ce_vaddr;
  3719. pcl_info =
  3720. &(cmd->aead_hmac_sha256_cbc_des);
  3721. }
  3722. encr_cfg = pdev->reg.encr_cfg_des_cbc;
  3723. break;
  3724. default:
  3725. return -EINVAL;
  3726. }
  3727. enciv_in_word = 2;
  3728. break;
  3729. case CIPHER_ALG_3DES:
  3730. switch (mode) {
  3731. case QCE_MODE_CBC:
  3732. if (sha1) {
  3733. cmd->aead_hmac_sha1_cbc_3des.cmdlist =
  3734. (uintptr_t)ce_vaddr;
  3735. pcl_info =
  3736. &(cmd->aead_hmac_sha1_cbc_3des);
  3737. } else {
  3738. cmd->aead_hmac_sha256_cbc_3des.cmdlist =
  3739. (uintptr_t)ce_vaddr;
  3740. pcl_info =
  3741. &(cmd->aead_hmac_sha256_cbc_3des);
  3742. }
  3743. encr_cfg = pdev->reg.encr_cfg_3des_cbc;
  3744. break;
  3745. default:
  3746. return -EINVAL;
  3747. }
  3748. enciv_in_word = 2;
  3749. break;
  3750. case CIPHER_ALG_AES:
  3751. switch (mode) {
  3752. case QCE_MODE_CBC:
  3753. if (key_size == AES128_KEY_SIZE) {
  3754. if (sha1) {
  3755. cmd->aead_hmac_sha1_cbc_aes_128.cmdlist =
  3756. (uintptr_t)ce_vaddr;
  3757. pcl_info =
  3758. &(cmd->aead_hmac_sha1_cbc_aes_128);
  3759. } else {
  3760. cmd->aead_hmac_sha256_cbc_aes_128.cmdlist
  3761. = (uintptr_t)ce_vaddr;
  3762. pcl_info =
  3763. &(cmd->aead_hmac_sha256_cbc_aes_128);
  3764. }
  3765. encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
  3766. } else if (key_size == AES256_KEY_SIZE) {
  3767. if (sha1) {
  3768. cmd->aead_hmac_sha1_cbc_aes_256.cmdlist =
  3769. (uintptr_t)ce_vaddr;
  3770. pcl_info =
  3771. &(cmd->aead_hmac_sha1_cbc_aes_256);
  3772. } else {
  3773. cmd->aead_hmac_sha256_cbc_aes_256.cmdlist =
  3774. (uintptr_t)ce_vaddr;
  3775. pcl_info =
  3776. &(cmd->aead_hmac_sha256_cbc_aes_256);
  3777. }
  3778. encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
  3779. } else {
  3780. return -EINVAL;
  3781. }
  3782. break;
  3783. default:
  3784. return -EINVAL;
  3785. }
  3786. enciv_in_word = 4;
  3787. break;
  3788. default:
  3789. return -EINVAL;
  3790. }
  3791. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
  3792. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3793. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3794. key_reg = key_size/sizeof(uint32_t);
  3795. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
  3796. &pcl_info->encr_key);
  3797. for (i = 1; i < key_reg; i++)
  3798. qce_add_cmd_element(pdev, &ce_vaddr,
  3799. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
  3800. 0, NULL);
  3801. if (mode != QCE_MODE_ECB) {
  3802. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
  3803. &pcl_info->encr_cntr_iv);
  3804. for (i = 1; i < enciv_in_word; i++)
  3805. qce_add_cmd_element(pdev, &ce_vaddr,
  3806. (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
  3807. 0, NULL);
  3808. }
  3809. if (sha1)
  3810. iv_reg = 5;
  3811. else
  3812. iv_reg = 8;
  3813. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
  3814. &pcl_info->auth_iv);
  3815. for (i = 1; i < iv_reg; i++)
  3816. qce_add_cmd_element(pdev, &ce_vaddr,
  3817. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
  3818. 0, NULL);
  3819. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
  3820. 0, &pcl_info->auth_bytecount);
  3821. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
  3822. key_reg = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
  3823. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
  3824. &pcl_info->auth_key);
  3825. for (i = 1; i < key_reg; i++)
  3826. qce_add_cmd_element(pdev, &ce_vaddr,
  3827. (CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)), 0, NULL);
  3828. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  3829. &pcl_info->seg_size);
  3830. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
  3831. &pcl_info->encr_seg_cfg);
  3832. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
  3833. &pcl_info->encr_seg_size);
  3834. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  3835. &pcl_info->encr_seg_start);
  3836. if (sha1)
  3837. qce_add_cmd_element(
  3838. pdev,
  3839. &ce_vaddr,
  3840. CRYPTO_AUTH_SEG_CFG_REG,
  3841. pdev->reg.auth_cfg_aead_sha1_hmac,
  3842. &pcl_info->auth_seg_cfg);
  3843. else
  3844. qce_add_cmd_element(
  3845. pdev,
  3846. &ce_vaddr,
  3847. CRYPTO_AUTH_SEG_CFG_REG,
  3848. pdev->reg.auth_cfg_aead_sha256_hmac,
  3849. &pcl_info->auth_seg_cfg);
  3850. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
  3851. &pcl_info->auth_seg_size);
  3852. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
  3853. &pcl_info->auth_seg_start);
  3854. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3855. pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
  3856. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3857. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3858. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3859. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3860. *pvaddr = (unsigned char *) ce_vaddr;
  3861. return 0;
  3862. }
  3863. static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, int cri_index,
  3864. unsigned char **pvaddr, bool key_128)
  3865. {
  3866. struct sps_command_element *ce_vaddr;
  3867. uintptr_t ce_vaddr_start;
  3868. struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
  3869. [cri_index].ce_sps.cmdlistptr;
  3870. struct qce_cmdlist_info *pcl_info = NULL;
  3871. int i = 0;
  3872. uint32_t encr_cfg = 0;
  3873. uint32_t auth_cfg = 0;
  3874. uint32_t key_reg = 0;
  3875. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3876. pdev->ce_bam_info.ce_burst_size);
  3877. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3878. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3879. /*
  3880. * Designate chunks of the allocated memory to various
  3881. * command list pointers related to aead operations
  3882. * defined in ce_cmdlistptrs_ops structure.
  3883. */
  3884. if (key_128) {
  3885. cmdlistptr->aead_aes_128_ccm.cmdlist =
  3886. (uintptr_t)ce_vaddr;
  3887. pcl_info = &(cmdlistptr->aead_aes_128_ccm);
  3888. auth_cfg = pdev->reg.auth_cfg_aes_ccm_128;
  3889. encr_cfg = pdev->reg.encr_cfg_aes_ccm_128;
  3890. key_reg = 4;
  3891. } else {
  3892. cmdlistptr->aead_aes_256_ccm.cmdlist =
  3893. (uintptr_t)ce_vaddr;
  3894. pcl_info = &(cmdlistptr->aead_aes_256_ccm);
  3895. auth_cfg = pdev->reg.auth_cfg_aes_ccm_256;
  3896. encr_cfg = pdev->reg.encr_cfg_aes_ccm_256;
  3897. key_reg = 8;
  3898. }
  3899. /* clear status register */
  3900. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
  3901. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3902. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  3903. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, NULL);
  3904. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  3905. NULL);
  3906. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  3907. &pcl_info->seg_size);
  3908. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
  3909. encr_cfg, &pcl_info->encr_seg_cfg);
  3910. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
  3911. &pcl_info->encr_seg_size);
  3912. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  3913. &pcl_info->encr_seg_start);
  3914. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
  3915. pdev->reg.encr_cntr_mask_3, &pcl_info->encr_mask_3);
  3916. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
  3917. pdev->reg.encr_cntr_mask_2, &pcl_info->encr_mask_2);
  3918. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
  3919. pdev->reg.encr_cntr_mask_1, &pcl_info->encr_mask_1);
  3920. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
  3921. pdev->reg.encr_cntr_mask_0, &pcl_info->encr_mask_0);
  3922. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
  3923. auth_cfg, &pcl_info->auth_seg_cfg);
  3924. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
  3925. &pcl_info->auth_seg_size);
  3926. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
  3927. &pcl_info->auth_seg_start);
  3928. /* reset auth iv, bytecount and key registers */
  3929. for (i = 0; i < 8; i++)
  3930. qce_add_cmd_element(pdev, &ce_vaddr,
  3931. (CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
  3932. 0, NULL);
  3933. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
  3934. 0, NULL);
  3935. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG,
  3936. 0, NULL);
  3937. for (i = 0; i < 16; i++)
  3938. qce_add_cmd_element(pdev, &ce_vaddr,
  3939. (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
  3940. 0, NULL);
  3941. /* set auth key */
  3942. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
  3943. &pcl_info->auth_key);
  3944. for (i = 1; i < key_reg; i++)
  3945. qce_add_cmd_element(pdev, &ce_vaddr,
  3946. (CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
  3947. 0, NULL);
  3948. /* set NONCE info */
  3949. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_INFO_NONCE0_REG, 0,
  3950. &pcl_info->auth_nonce_info);
  3951. for (i = 1; i < 4; i++)
  3952. qce_add_cmd_element(pdev, &ce_vaddr,
  3953. (CRYPTO_AUTH_INFO_NONCE0_REG +
  3954. i * sizeof(uint32_t)), 0, NULL);
  3955. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
  3956. &pcl_info->encr_key);
  3957. for (i = 1; i < key_reg; i++)
  3958. qce_add_cmd_element(pdev, &ce_vaddr,
  3959. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
  3960. 0, NULL);
  3961. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
  3962. &pcl_info->encr_cntr_iv);
  3963. for (i = 1; i < 4; i++)
  3964. qce_add_cmd_element(pdev, &ce_vaddr,
  3965. (CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
  3966. 0, NULL);
  3967. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_CCM_INT_CNTR0_REG, 0,
  3968. &pcl_info->encr_ccm_cntr_iv);
  3969. for (i = 1; i < 4; i++)
  3970. qce_add_cmd_element(pdev, &ce_vaddr,
  3971. (CRYPTO_ENCR_CCM_INT_CNTR0_REG + i * sizeof(uint32_t)),
  3972. 0, NULL);
  3973. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  3974. pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
  3975. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  3976. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  3977. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  3978. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  3979. *pvaddr = (unsigned char *) ce_vaddr;
  3980. return 0;
  3981. }
  3982. static int _setup_f8_cmdlistptrs(struct qce_device *pdev, int cri_index,
  3983. unsigned char **pvaddr, enum qce_ota_algo_enum alg)
  3984. {
  3985. struct sps_command_element *ce_vaddr;
  3986. uintptr_t ce_vaddr_start;
  3987. struct qce_cmdlistptr_ops *cmdlistptr;
  3988. struct qce_cmdlist_info *pcl_info = NULL;
  3989. int i = 0;
  3990. uint32_t encr_cfg = 0;
  3991. uint32_t key_reg = 4;
  3992. cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  3993. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  3994. pdev->ce_bam_info.ce_burst_size);
  3995. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  3996. ce_vaddr_start = (uintptr_t)(*pvaddr);
  3997. /*
  3998. * Designate chunks of the allocated memory to various
  3999. * command list pointers related to f8 cipher algorithm defined
  4000. * in ce_cmdlistptrs_ops structure.
  4001. */
  4002. switch (alg) {
  4003. case QCE_OTA_ALGO_KASUMI:
  4004. cmdlistptr->f8_kasumi.cmdlist = (uintptr_t)ce_vaddr;
  4005. pcl_info = &(cmdlistptr->f8_kasumi);
  4006. encr_cfg = pdev->reg.encr_cfg_kasumi;
  4007. break;
  4008. case QCE_OTA_ALGO_SNOW3G:
  4009. default:
  4010. cmdlistptr->f8_snow3g.cmdlist = (uintptr_t)ce_vaddr;
  4011. pcl_info = &(cmdlistptr->f8_snow3g);
  4012. encr_cfg = pdev->reg.encr_cfg_snow3g;
  4013. break;
  4014. }
  4015. /* clear status register */
  4016. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
  4017. 0, NULL);
  4018. /* set config to big endian */
  4019. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  4020. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  4021. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  4022. &pcl_info->seg_size);
  4023. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
  4024. &pcl_info->encr_seg_cfg);
  4025. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
  4026. &pcl_info->encr_seg_size);
  4027. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
  4028. &pcl_info->encr_seg_start);
  4029. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
  4030. &pcl_info->auth_seg_cfg);
  4031. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
  4032. 0, &pcl_info->auth_seg_size);
  4033. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
  4034. 0, &pcl_info->auth_seg_start);
  4035. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
  4036. &pcl_info->encr_key);
  4037. for (i = 1; i < key_reg; i++)
  4038. qce_add_cmd_element(pdev, &ce_vaddr,
  4039. (CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
  4040. 0, NULL);
  4041. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
  4042. &pcl_info->encr_cntr_iv);
  4043. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
  4044. NULL);
  4045. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  4046. pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
  4047. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  4048. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  4049. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  4050. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  4051. *pvaddr = (unsigned char *) ce_vaddr;
  4052. return 0;
  4053. }
  4054. static int _setup_f9_cmdlistptrs(struct qce_device *pdev, int cri_index,
  4055. unsigned char **pvaddr, enum qce_ota_algo_enum alg)
  4056. {
  4057. struct sps_command_element *ce_vaddr;
  4058. uintptr_t ce_vaddr_start;
  4059. struct qce_cmdlistptr_ops *cmdlistptr;
  4060. struct qce_cmdlist_info *pcl_info = NULL;
  4061. int i = 0;
  4062. uint32_t auth_cfg = 0;
  4063. uint32_t iv_reg = 0;
  4064. cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  4065. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  4066. pdev->ce_bam_info.ce_burst_size);
  4067. ce_vaddr_start = (uintptr_t)(*pvaddr);
  4068. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  4069. /*
  4070. * Designate chunks of the allocated memory to various
  4071. * command list pointers related to authentication operations
  4072. * defined in ce_cmdlistptrs_ops structure.
  4073. */
  4074. switch (alg) {
  4075. case QCE_OTA_ALGO_KASUMI:
  4076. cmdlistptr->f9_kasumi.cmdlist = (uintptr_t)ce_vaddr;
  4077. pcl_info = &(cmdlistptr->f9_kasumi);
  4078. auth_cfg = pdev->reg.auth_cfg_kasumi;
  4079. break;
  4080. case QCE_OTA_ALGO_SNOW3G:
  4081. default:
  4082. cmdlistptr->f9_snow3g.cmdlist = (uintptr_t)ce_vaddr;
  4083. pcl_info = &(cmdlistptr->f9_snow3g);
  4084. auth_cfg = pdev->reg.auth_cfg_snow3g;
  4085. }
  4086. /* clear status register */
  4087. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
  4088. 0, NULL);
  4089. /* set config to big endian */
  4090. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  4091. pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
  4092. iv_reg = 5;
  4093. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
  4094. &pcl_info->seg_size);
  4095. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
  4096. &pcl_info->encr_seg_cfg);
  4097. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
  4098. auth_cfg, &pcl_info->auth_seg_cfg);
  4099. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
  4100. &pcl_info->auth_seg_size);
  4101. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
  4102. &pcl_info->auth_seg_start);
  4103. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
  4104. &pcl_info->auth_iv);
  4105. for (i = 1; i < iv_reg; i++) {
  4106. qce_add_cmd_element(pdev, &ce_vaddr,
  4107. (CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
  4108. 0, NULL);
  4109. }
  4110. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
  4111. 0, &pcl_info->auth_bytecount);
  4112. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
  4113. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  4114. pdev->reg.crypto_cfg_le, &pcl_info->crypto_cfg_le);
  4115. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
  4116. ((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
  4117. (1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
  4118. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  4119. *pvaddr = (unsigned char *) ce_vaddr;
  4120. return 0;
  4121. }
  4122. static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev,
  4123. int cri_index, unsigned char **pvaddr)
  4124. {
  4125. struct sps_command_element *ce_vaddr;
  4126. uintptr_t ce_vaddr_start = (uintptr_t)(*pvaddr);
  4127. struct qce_cmdlistptr_ops *cmdlistptr;
  4128. struct qce_cmdlist_info *pcl_info = NULL;
  4129. cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
  4130. *pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
  4131. pdev->ce_bam_info.ce_burst_size);
  4132. ce_vaddr = (struct sps_command_element *)(*pvaddr);
  4133. cmdlistptr->unlock_all_pipes.cmdlist = (uintptr_t)ce_vaddr;
  4134. pcl_info = &(cmdlistptr->unlock_all_pipes);
  4135. /*
  4136. * Designate chunks of the allocated memory to command list
  4137. * to unlock pipes.
  4138. */
  4139. qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
  4140. CRYPTO_CONFIG_RESET, NULL);
  4141. pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
  4142. *pvaddr = (unsigned char *) ce_vaddr;
  4143. return 0;
  4144. }
  4145. static int qce_setup_cmdlistptrs(struct qce_device *pdev, int cri_index,
  4146. unsigned char **pvaddr)
  4147. {
  4148. struct sps_command_element *ce_vaddr =
  4149. (struct sps_command_element *)(*pvaddr);
  4150. /*
  4151. * Designate chunks of the allocated memory to various
  4152. * command list pointers related to operations defined
  4153. * in ce_cmdlistptrs_ops structure.
  4154. */
  4155. ce_vaddr =
  4156. (struct sps_command_element *)ALIGN(((uintptr_t) ce_vaddr),
  4157. pdev->ce_bam_info.ce_burst_size);
  4158. *pvaddr = (unsigned char *) ce_vaddr;
  4159. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
  4160. true);
  4161. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
  4162. true);
  4163. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
  4164. true);
  4165. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
  4166. true);
  4167. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
  4168. false);
  4169. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
  4170. false);
  4171. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
  4172. false);
  4173. _setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
  4174. false);
  4175. _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
  4176. true);
  4177. _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
  4178. false);
  4179. _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
  4180. true);
  4181. _setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
  4182. false);
  4183. _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1,
  4184. false);
  4185. _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256,
  4186. false);
  4187. _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1_HMAC,
  4188. false);
  4189. _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256_HMAC,
  4190. false);
  4191. _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
  4192. true);
  4193. _setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
  4194. false);
  4195. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
  4196. QCE_MODE_CBC, DES_KEY_SIZE, true);
  4197. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
  4198. QCE_MODE_CBC, DES3_EDE_KEY_SIZE, true);
  4199. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
  4200. QCE_MODE_CBC, AES128_KEY_SIZE, true);
  4201. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
  4202. QCE_MODE_CBC, AES256_KEY_SIZE, true);
  4203. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
  4204. QCE_MODE_CBC, DES_KEY_SIZE, false);
  4205. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
  4206. QCE_MODE_CBC, DES3_EDE_KEY_SIZE, false);
  4207. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
  4208. QCE_MODE_CBC, AES128_KEY_SIZE, false);
  4209. _setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
  4210. QCE_MODE_CBC, AES256_KEY_SIZE, false);
  4211. _setup_cipher_null_cmdlistptrs(pdev, cri_index, pvaddr);
  4212. _setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, true);
  4213. _setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, false);
  4214. _setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
  4215. _setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
  4216. _setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
  4217. _setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
  4218. _setup_unlock_pipe_cmdlistptrs(pdev, cri_index, pvaddr);
  4219. return 0;
  4220. }
  4221. static int qce_setup_ce_sps_data(struct qce_device *pce_dev)
  4222. {
  4223. unsigned char *vaddr;
  4224. int i;
  4225. unsigned char *iovec_vaddr;
  4226. int iovec_memsize;
  4227. vaddr = pce_dev->coh_vmem;
  4228. vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
  4229. pce_dev->ce_bam_info.ce_burst_size);
  4230. iovec_vaddr = pce_dev->iovec_vmem;
  4231. iovec_memsize = pce_dev->iovec_memsize;
  4232. for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++) {
  4233. /* Allow for 256 descriptor (cmd and data) entries per pipe */
  4234. pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec =
  4235. (struct sps_iovec *)iovec_vaddr;
  4236. pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec_phys =
  4237. virt_to_phys(
  4238. pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec);
  4239. iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
  4240. iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
  4241. pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec =
  4242. (struct sps_iovec *)iovec_vaddr;
  4243. pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec_phys =
  4244. virt_to_phys(
  4245. pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec);
  4246. iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
  4247. iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
  4248. if (pce_dev->support_cmd_dscr)
  4249. qce_setup_cmdlistptrs(pce_dev, i, &vaddr);
  4250. vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
  4251. pce_dev->ce_bam_info.ce_burst_size);
  4252. pce_dev->ce_request_info[i].ce_sps.result_dump =
  4253. (uintptr_t)vaddr;
  4254. pce_dev->ce_request_info[i].ce_sps.result_dump_phy =
  4255. GET_PHYS_ADDR((uintptr_t)vaddr);
  4256. pce_dev->ce_request_info[i].ce_sps.result =
  4257. (struct ce_result_dump_format *)vaddr;
  4258. vaddr += CRYPTO_RESULT_DUMP_SIZE;
  4259. pce_dev->ce_request_info[i].ce_sps.result_dump_null =
  4260. (uintptr_t)vaddr;
  4261. pce_dev->ce_request_info[i].ce_sps.result_dump_null_phy =
  4262. GET_PHYS_ADDR((uintptr_t)vaddr);
  4263. pce_dev->ce_request_info[i].ce_sps.result_null =
  4264. (struct ce_result_dump_format *)vaddr;
  4265. vaddr += CRYPTO_RESULT_DUMP_SIZE;
  4266. pce_dev->ce_request_info[i].ce_sps.ignore_buffer =
  4267. (uintptr_t)vaddr;
  4268. vaddr += pce_dev->ce_bam_info.ce_burst_size * 2;
  4269. }
  4270. if ((vaddr - pce_dev->coh_vmem) > pce_dev->memsize ||
  4271. iovec_memsize < 0)
  4272. panic("qce50: Not enough coherent memory. Allocate %x , need %lx\n",
  4273. pce_dev->memsize, (uintptr_t)vaddr -
  4274. (uintptr_t)pce_dev->coh_vmem);
  4275. return 0;
  4276. }
  4277. static int qce_init_ce_cfg_val(struct qce_device *pce_dev)
  4278. {
  4279. uint32_t pipe_pair =
  4280. pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_NONE];
  4281. pce_dev->reg.crypto_cfg_be = qce_get_config_be(pce_dev, pipe_pair);
  4282. pce_dev->reg.crypto_cfg_le =
  4283. (pce_dev->reg.crypto_cfg_be | CRYPTO_LITTLE_ENDIAN_MASK);
  4284. /* Initialize encr_cfg register for AES alg */
  4285. pce_dev->reg.encr_cfg_aes_cbc_128 =
  4286. (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
  4287. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  4288. (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
  4289. pce_dev->reg.encr_cfg_aes_cbc_256 =
  4290. (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
  4291. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  4292. (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
  4293. pce_dev->reg.encr_cfg_aes_ctr_128 =
  4294. (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
  4295. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  4296. (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
  4297. pce_dev->reg.encr_cfg_aes_ctr_256 =
  4298. (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
  4299. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  4300. (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
  4301. pce_dev->reg.encr_cfg_aes_xts_128 =
  4302. (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
  4303. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  4304. (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
  4305. pce_dev->reg.encr_cfg_aes_xts_256 =
  4306. (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
  4307. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  4308. (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
  4309. pce_dev->reg.encr_cfg_aes_ecb_128 =
  4310. (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
  4311. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  4312. (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
  4313. pce_dev->reg.encr_cfg_aes_ecb_256 =
  4314. (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
  4315. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  4316. (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
  4317. pce_dev->reg.encr_cfg_aes_ccm_128 =
  4318. (CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
  4319. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  4320. (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE)|
  4321. (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
  4322. pce_dev->reg.encr_cfg_aes_ccm_256 =
  4323. (CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
  4324. (CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
  4325. (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
  4326. (CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
  4327. /* Initialize encr_cfg register for DES alg */
  4328. pce_dev->reg.encr_cfg_des_ecb =
  4329. (CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
  4330. (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
  4331. (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
  4332. pce_dev->reg.encr_cfg_des_cbc =
  4333. (CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
  4334. (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
  4335. (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
  4336. pce_dev->reg.encr_cfg_3des_ecb =
  4337. (CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
  4338. (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
  4339. (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
  4340. pce_dev->reg.encr_cfg_3des_cbc =
  4341. (CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
  4342. (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
  4343. (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
  4344. /* Initialize encr_cfg register for kasumi/snow3g alg */
  4345. pce_dev->reg.encr_cfg_kasumi =
  4346. (CRYPTO_ENCR_ALG_KASUMI << CRYPTO_ENCR_ALG);
  4347. pce_dev->reg.encr_cfg_snow3g =
  4348. (CRYPTO_ENCR_ALG_SNOW_3G << CRYPTO_ENCR_ALG);
  4349. /* Initialize auth_cfg register for CMAC alg */
  4350. pce_dev->reg.auth_cfg_cmac_128 =
  4351. (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
  4352. (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
  4353. (CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
  4354. (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
  4355. (CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE);
  4356. pce_dev->reg.auth_cfg_cmac_256 =
  4357. (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
  4358. (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
  4359. (CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
  4360. (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
  4361. (CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE);
  4362. /* Initialize auth_cfg register for HMAC alg */
  4363. pce_dev->reg.auth_cfg_hmac_sha1 =
  4364. (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
  4365. (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
  4366. (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
  4367. (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  4368. pce_dev->reg.auth_cfg_hmac_sha256 =
  4369. (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
  4370. (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
  4371. (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
  4372. (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  4373. /* Initialize auth_cfg register for SHA1/256 alg */
  4374. pce_dev->reg.auth_cfg_sha1 =
  4375. (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
  4376. (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
  4377. (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
  4378. (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  4379. pce_dev->reg.auth_cfg_sha256 =
  4380. (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
  4381. (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
  4382. (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
  4383. (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
  4384. /* Initialize auth_cfg register for AEAD alg */
  4385. pce_dev->reg.auth_cfg_aead_sha1_hmac =
  4386. (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
  4387. (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
  4388. (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
  4389. (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
  4390. pce_dev->reg.auth_cfg_aead_sha256_hmac =
  4391. (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
  4392. (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
  4393. (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
  4394. (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
  4395. pce_dev->reg.auth_cfg_aes_ccm_128 =
  4396. (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
  4397. (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
  4398. (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
  4399. (CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE) |
  4400. ((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
  4401. pce_dev->reg.auth_cfg_aes_ccm_128 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
  4402. pce_dev->reg.auth_cfg_aes_ccm_256 =
  4403. (1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
  4404. (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
  4405. (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
  4406. (CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE) |
  4407. ((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
  4408. pce_dev->reg.auth_cfg_aes_ccm_256 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
  4409. /* Initialize auth_cfg register for kasumi/snow3g */
  4410. pce_dev->reg.auth_cfg_kasumi =
  4411. (CRYPTO_AUTH_ALG_KASUMI << CRYPTO_AUTH_ALG) |
  4412. BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
  4413. pce_dev->reg.auth_cfg_snow3g =
  4414. (CRYPTO_AUTH_ALG_SNOW3G << CRYPTO_AUTH_ALG) |
  4415. BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
  4416. /* Initialize IV counter mask values */
  4417. pce_dev->reg.encr_cntr_mask_3 = 0xFFFFFFFF;
  4418. pce_dev->reg.encr_cntr_mask_2 = 0xFFFFFFFF;
  4419. pce_dev->reg.encr_cntr_mask_1 = 0xFFFFFFFF;
  4420. pce_dev->reg.encr_cntr_mask_0 = 0xFFFFFFFF;
  4421. return 0;
  4422. }
  4423. static void _qce_ccm_get_around_input(struct qce_device *pce_dev,
  4424. struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
  4425. {
  4426. struct qce_cmdlist_info *cmdlistinfo;
  4427. struct ce_sps_data *pce_sps_data;
  4428. pce_sps_data = &preq_info->ce_sps;
  4429. if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
  4430. !(pce_dev->no_ccm_mac_status_get_around)) {
  4431. cmdlistinfo = &pce_sps_data->cmdlistptr.cipher_null;
  4432. _qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
  4433. &pce_sps_data->in_transfer);
  4434. _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
  4435. pce_dev->ce_bam_info.ce_burst_size,
  4436. &pce_sps_data->in_transfer);
  4437. _qce_set_flag(&pce_sps_data->in_transfer,
  4438. SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD);
  4439. }
  4440. }
  4441. static void _qce_ccm_get_around_output(struct qce_device *pce_dev,
  4442. struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
  4443. {
  4444. struct ce_sps_data *pce_sps_data;
  4445. pce_sps_data = &preq_info->ce_sps;
  4446. if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
  4447. !(pce_dev->no_ccm_mac_status_get_around)) {
  4448. _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
  4449. pce_dev->ce_bam_info.ce_burst_size,
  4450. &pce_sps_data->out_transfer);
  4451. _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump_null),
  4452. CRYPTO_RESULT_DUMP_SIZE, &pce_sps_data->out_transfer);
  4453. }
  4454. }
  4455. /* QCE_DUMMY_REQ */
  4456. static void qce_dummy_complete(void *cookie, unsigned char *digest,
  4457. unsigned char *authdata, int ret)
  4458. {
  4459. if (!cookie)
  4460. pr_err("invalid cookie\n");
  4461. }
  4462. static int qce_dummy_req(struct qce_device *pce_dev)
  4463. {
  4464. int ret = 0;
  4465. if (atomic_xchg(
  4466. &pce_dev->ce_request_info[DUMMY_REQ_INDEX].in_use, true))
  4467. return -EBUSY;
  4468. ret = qce_process_sha_req(pce_dev, NULL);
  4469. pce_dev->qce_stats.no_of_dummy_reqs++;
  4470. return ret;
  4471. }
  4472. static int select_mode(struct qce_device *pce_dev,
  4473. struct ce_request_info *preq_info)
  4474. {
  4475. struct ce_sps_data *pce_sps_data = &preq_info->ce_sps;
  4476. unsigned int no_of_queued_req;
  4477. unsigned int cadence;
  4478. if (!pce_dev->no_get_around) {
  4479. _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
  4480. return 0;
  4481. }
  4482. /*
  4483. * claim ownership of device
  4484. */
  4485. again:
  4486. if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_CLIENT)
  4487. != QCE_OWNER_NONE) {
  4488. ndelay(40);
  4489. goto again;
  4490. }
  4491. no_of_queued_req = atomic_inc_return(&pce_dev->no_of_queued_req);
  4492. if (pce_dev->mode == IN_INTERRUPT_MODE) {
  4493. if (no_of_queued_req >= MAX_BUNCH_MODE_REQ) {
  4494. pce_dev->mode = IN_BUNCH_MODE;
  4495. pr_debug("pcedev %d mode switch to BUNCH\n",
  4496. pce_dev->dev_no);
  4497. _qce_set_flag(&pce_sps_data->out_transfer,
  4498. SPS_IOVEC_FLAG_INT);
  4499. pce_dev->intr_cadence = 0;
  4500. atomic_set(&pce_dev->bunch_cmd_seq, 1);
  4501. atomic_set(&pce_dev->last_intr_seq, 1);
  4502. mod_timer(&(pce_dev->timer),
  4503. (jiffies + DELAY_IN_JIFFIES));
  4504. } else {
  4505. _qce_set_flag(&pce_sps_data->out_transfer,
  4506. SPS_IOVEC_FLAG_INT);
  4507. }
  4508. } else {
  4509. pce_dev->intr_cadence++;
  4510. cadence = (preq_info->req_len >> 7) + 1;
  4511. if (cadence > SET_INTR_AT_REQ)
  4512. cadence = SET_INTR_AT_REQ;
  4513. if (pce_dev->intr_cadence < cadence || ((pce_dev->intr_cadence
  4514. == cadence) && pce_dev->cadence_flag))
  4515. atomic_inc(&pce_dev->bunch_cmd_seq);
  4516. else {
  4517. _qce_set_flag(&pce_sps_data->out_transfer,
  4518. SPS_IOVEC_FLAG_INT);
  4519. pce_dev->intr_cadence = 0;
  4520. atomic_set(&pce_dev->bunch_cmd_seq, 0);
  4521. atomic_set(&pce_dev->last_intr_seq, 0);
  4522. pce_dev->cadence_flag = !pce_dev->cadence_flag;
  4523. }
  4524. }
  4525. return 0;
  4526. }
  4527. static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
  4528. {
  4529. int rc = 0;
  4530. struct qce_device *pce_dev = (struct qce_device *) handle;
  4531. struct aead_request *areq = (struct aead_request *) q_req->areq;
  4532. uint32_t authsize = q_req->authsize;
  4533. uint32_t totallen_in, out_len;
  4534. uint32_t hw_pad_out = 0;
  4535. int ce_burst_size;
  4536. struct qce_cmdlist_info *cmdlistinfo = NULL;
  4537. int req_info = -1;
  4538. struct ce_request_info *preq_info;
  4539. struct ce_sps_data *pce_sps_data;
  4540. req_info = qce_alloc_req_info(pce_dev);
  4541. if (req_info < 0)
  4542. return -EBUSY;
  4543. q_req->current_req_info = req_info;
  4544. preq_info = &pce_dev->ce_request_info[req_info];
  4545. pce_sps_data = &preq_info->ce_sps;
  4546. ce_burst_size = pce_dev->ce_bam_info.ce_burst_size;
  4547. totallen_in = areq->cryptlen + q_req->assoclen;
  4548. if (q_req->dir == QCE_ENCRYPT) {
  4549. q_req->cryptlen = areq->cryptlen;
  4550. out_len = areq->cryptlen + authsize;
  4551. hw_pad_out = ALIGN(authsize, ce_burst_size) - authsize;
  4552. } else {
  4553. q_req->cryptlen = areq->cryptlen - authsize;
  4554. out_len = q_req->cryptlen;
  4555. hw_pad_out = authsize;
  4556. }
  4557. /*
  4558. * For crypto 5.0 that has burst size alignment requirement
  4559. * for data descritpor,
  4560. * the agent above(qcrypto) prepares the src scatter list with
  4561. * memory starting with associated data, followed by
  4562. * data stream to be ciphered.
  4563. * The destination scatter list is pointing to the same
  4564. * data area as source.
  4565. */
  4566. if (pce_dev->ce_bam_info.minor_version == 0)
  4567. preq_info->src_nents = count_sg(areq->src, totallen_in);
  4568. else
  4569. preq_info->src_nents = count_sg(areq->src, areq->cryptlen +
  4570. areq->assoclen);
  4571. if (q_req->assoclen) {
  4572. preq_info->assoc_nents = count_sg(q_req->asg, q_req->assoclen);
  4573. /* formatted associated data input */
  4574. qce_dma_map_sg(pce_dev->pdev, q_req->asg,
  4575. preq_info->assoc_nents, DMA_TO_DEVICE);
  4576. preq_info->asg = q_req->asg;
  4577. } else {
  4578. preq_info->assoc_nents = 0;
  4579. preq_info->asg = NULL;
  4580. }
  4581. /* cipher input */
  4582. qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
  4583. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  4584. DMA_TO_DEVICE);
  4585. /* cipher + mac output for encryption */
  4586. if (areq->src != areq->dst) {
  4587. /*
  4588. * The destination scatter list is pointing to the same
  4589. * data area as src.
  4590. * Note, the associated data will be pass-through
  4591. * at the beginning of destination area.
  4592. */
  4593. preq_info->dst_nents = count_sg(areq->dst,
  4594. out_len + areq->assoclen);
  4595. qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
  4596. DMA_FROM_DEVICE);
  4597. } else {
  4598. preq_info->dst_nents = preq_info->src_nents;
  4599. }
  4600. if (pce_dev->support_cmd_dscr) {
  4601. cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev, req_info,
  4602. q_req);
  4603. if (cmdlistinfo == NULL) {
  4604. pr_err("Unsupported cipher algorithm %d, mode %d\n",
  4605. q_req->alg, q_req->mode);
  4606. qce_free_req_info(pce_dev, req_info, false);
  4607. return -EINVAL;
  4608. }
  4609. /* set up crypto device */
  4610. rc = _ce_setup_cipher(pce_dev, q_req, totallen_in,
  4611. q_req->assoclen, cmdlistinfo);
  4612. } else {
  4613. /* set up crypto device */
  4614. rc = _ce_setup_cipher_direct(pce_dev, q_req, totallen_in,
  4615. q_req->assoclen);
  4616. }
  4617. if (rc < 0)
  4618. goto bad;
  4619. preq_info->mode = q_req->mode;
  4620. /* setup for callback, and issue command to bam */
  4621. preq_info->areq = q_req->areq;
  4622. preq_info->qce_cb = q_req->qce_cb;
  4623. preq_info->dir = q_req->dir;
  4624. /* setup xfer type for producer callback handling */
  4625. preq_info->xfer_type = QCE_XFER_AEAD;
  4626. preq_info->req_len = totallen_in;
  4627. _qce_sps_iovec_count_init(pce_dev, req_info);
  4628. if (pce_dev->support_cmd_dscr && cmdlistinfo) {
  4629. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
  4630. cmdlistinfo, &pce_sps_data->in_transfer);
  4631. if (rc)
  4632. goto bad;
  4633. }
  4634. if (pce_dev->ce_bam_info.minor_version == 0) {
  4635. goto bad;
  4636. } else {
  4637. if (q_req->assoclen) {
  4638. rc = _qce_sps_add_sg_data(pce_dev, q_req->asg,
  4639. q_req->assoclen, &pce_sps_data->in_transfer);
  4640. if (rc)
  4641. goto bad;
  4642. }
  4643. rc = _qce_sps_add_sg_data_off(pce_dev, areq->src, areq->cryptlen,
  4644. areq->assoclen,
  4645. &pce_sps_data->in_transfer);
  4646. if (rc)
  4647. goto bad;
  4648. _qce_set_flag(&pce_sps_data->in_transfer,
  4649. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  4650. _qce_ccm_get_around_input(pce_dev, preq_info, q_req->dir);
  4651. if (pce_dev->no_get_around) {
  4652. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  4653. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  4654. &pce_sps_data->in_transfer);
  4655. if (rc)
  4656. goto bad;
  4657. }
  4658. /* Pass through to ignore associated data*/
  4659. rc = _qce_sps_add_data(
  4660. GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
  4661. q_req->assoclen,
  4662. &pce_sps_data->out_transfer);
  4663. if (rc)
  4664. goto bad;
  4665. rc = _qce_sps_add_sg_data_off(pce_dev, areq->dst, out_len,
  4666. areq->assoclen,
  4667. &pce_sps_data->out_transfer);
  4668. if (rc)
  4669. goto bad;
  4670. /* Pass through to ignore hw_pad (padding of the MAC data) */
  4671. rc = _qce_sps_add_data(
  4672. GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
  4673. hw_pad_out, &pce_sps_data->out_transfer);
  4674. if (rc)
  4675. goto bad;
  4676. if (pce_dev->no_get_around ||
  4677. totallen_in <= SPS_MAX_PKT_SIZE) {
  4678. rc = _qce_sps_add_data(
  4679. GET_PHYS_ADDR(pce_sps_data->result_dump),
  4680. CRYPTO_RESULT_DUMP_SIZE,
  4681. &pce_sps_data->out_transfer);
  4682. if (rc)
  4683. goto bad;
  4684. pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
  4685. } else {
  4686. pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
  4687. }
  4688. _qce_ccm_get_around_output(pce_dev, preq_info, q_req->dir);
  4689. select_mode(pce_dev, preq_info);
  4690. rc = _qce_sps_transfer(pce_dev, req_info);
  4691. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  4692. }
  4693. if (rc)
  4694. goto bad;
  4695. return 0;
  4696. bad:
  4697. if (preq_info->assoc_nents) {
  4698. qce_dma_unmap_sg(pce_dev->pdev, q_req->asg,
  4699. preq_info->assoc_nents, DMA_TO_DEVICE);
  4700. }
  4701. if (preq_info->src_nents) {
  4702. qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
  4703. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  4704. DMA_TO_DEVICE);
  4705. }
  4706. if (areq->src != areq->dst) {
  4707. qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
  4708. DMA_FROM_DEVICE);
  4709. }
  4710. qce_free_req_info(pce_dev, req_info, false);
  4711. return rc;
  4712. }
  4713. static int _qce_suspend(void *handle)
  4714. {
  4715. struct qce_device *pce_dev = (struct qce_device *)handle;
  4716. struct sps_pipe *sps_pipe_info;
  4717. int i = 0;
  4718. if (handle == NULL)
  4719. return -ENODEV;
  4720. for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
  4721. if (i == QCE_OFFLOAD_NONE && !(pce_dev->kernel_pipes_support))
  4722. continue;
  4723. else if ((i > 0) && !(pce_dev->offload_pipes_support))
  4724. break;
  4725. if (!pce_dev->ce_bam_info.pipe_pair_index[i])
  4726. continue;
  4727. sps_pipe_info = pce_dev->ce_bam_info.consumer[i].pipe;
  4728. sps_disconnect(sps_pipe_info);
  4729. sps_pipe_info = pce_dev->ce_bam_info.producer[i].pipe;
  4730. sps_disconnect(sps_pipe_info);
  4731. }
  4732. return 0;
  4733. }
  4734. static int _qce_resume(void *handle)
  4735. {
  4736. struct qce_device *pce_dev = (struct qce_device *)handle;
  4737. struct sps_pipe *sps_pipe_info;
  4738. struct sps_connect *sps_connect_info;
  4739. int rc, i;
  4740. rc = -ENODEV;
  4741. if (handle == NULL)
  4742. return rc;
  4743. for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
  4744. if (i == QCE_OFFLOAD_NONE && !(pce_dev->kernel_pipes_support))
  4745. continue;
  4746. else if ((i > 0) && !(pce_dev->offload_pipes_support))
  4747. break;
  4748. if (!pce_dev->ce_bam_info.pipe_pair_index[i])
  4749. continue;
  4750. sps_pipe_info = pce_dev->ce_bam_info.consumer[i].pipe;
  4751. sps_connect_info = &pce_dev->ce_bam_info.consumer[i].connect;
  4752. memset(sps_connect_info->desc.base, 0x00,
  4753. sps_connect_info->desc.size);
  4754. rc = sps_connect(sps_pipe_info, sps_connect_info);
  4755. if (rc) {
  4756. pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n",
  4757. (uintptr_t)sps_pipe_info, rc);
  4758. return rc;
  4759. }
  4760. sps_pipe_info = pce_dev->ce_bam_info.producer[i].pipe;
  4761. sps_connect_info = &pce_dev->ce_bam_info.producer[i].connect;
  4762. memset(sps_connect_info->desc.base, 0x00,
  4763. sps_connect_info->desc.size);
  4764. rc = sps_connect(sps_pipe_info, sps_connect_info);
  4765. if (rc)
  4766. pr_err("sps_connect() fail pipe=0x%lx, rc = %d\n",
  4767. (uintptr_t)sps_pipe_info, rc);
  4768. rc = sps_register_event(sps_pipe_info,
  4769. &pce_dev->ce_bam_info.producer[i].event);
  4770. if (rc)
  4771. pr_err("Producer cb registration failed rc = %d\n",
  4772. rc);
  4773. }
  4774. qce_enable_clock_gating(pce_dev);
  4775. return rc;
  4776. }
  4777. struct qce_pm_table qce_pm_table = {_qce_suspend, _qce_resume};
  4778. EXPORT_SYMBOL(qce_pm_table);
  4779. int qce_aead_req(void *handle, struct qce_req *q_req)
  4780. {
  4781. struct qce_device *pce_dev = (struct qce_device *)handle;
  4782. struct aead_request *areq;
  4783. uint32_t authsize;
  4784. struct crypto_aead *aead;
  4785. uint32_t ivsize;
  4786. uint32_t totallen;
  4787. int rc = 0;
  4788. struct qce_cmdlist_info *cmdlistinfo = NULL;
  4789. int req_info = -1;
  4790. struct ce_sps_data *pce_sps_data;
  4791. struct ce_request_info *preq_info;
  4792. if (q_req->mode == QCE_MODE_CCM)
  4793. return _qce_aead_ccm_req(handle, q_req);
  4794. req_info = qce_alloc_req_info(pce_dev);
  4795. if (req_info < 0)
  4796. return -EBUSY;
  4797. q_req->current_req_info = req_info;
  4798. preq_info = &pce_dev->ce_request_info[req_info];
  4799. pce_sps_data = &preq_info->ce_sps;
  4800. areq = (struct aead_request *) q_req->areq;
  4801. aead = crypto_aead_reqtfm(areq);
  4802. ivsize = crypto_aead_ivsize(aead);
  4803. q_req->ivsize = ivsize;
  4804. authsize = q_req->authsize;
  4805. if (q_req->dir == QCE_ENCRYPT)
  4806. q_req->cryptlen = areq->cryptlen;
  4807. else
  4808. q_req->cryptlen = areq->cryptlen - authsize;
  4809. if (q_req->cryptlen > UINT_MAX - areq->assoclen) {
  4810. pr_err("Integer overflow on total aead req length.\n");
  4811. return -EINVAL;
  4812. }
  4813. totallen = q_req->cryptlen + areq->assoclen;
  4814. if (pce_dev->support_cmd_dscr) {
  4815. cmdlistinfo = _ce_get_aead_cmdlistinfo(pce_dev,
  4816. req_info, q_req);
  4817. if (cmdlistinfo == NULL) {
  4818. pr_err("Unsupported aead ciphering algorithm %d, mode %d, ciphering key length %d, auth digest size %d\n",
  4819. q_req->alg, q_req->mode, q_req->encklen,
  4820. q_req->authsize);
  4821. qce_free_req_info(pce_dev, req_info, false);
  4822. return -EINVAL;
  4823. }
  4824. /* set up crypto device */
  4825. rc = _ce_setup_aead(pce_dev, q_req, totallen,
  4826. areq->assoclen, cmdlistinfo);
  4827. if (rc < 0) {
  4828. qce_free_req_info(pce_dev, req_info, false);
  4829. return -EINVAL;
  4830. }
  4831. }
  4832. /*
  4833. * For crypto 5.0 that has burst size alignment requirement
  4834. * for data descritpor,
  4835. * the agent above(qcrypto) prepares the src scatter list with
  4836. * memory starting with associated data, followed by
  4837. * iv, and data stream to be ciphered.
  4838. */
  4839. preq_info->src_nents = count_sg(areq->src, totallen);
  4840. /* cipher input */
  4841. qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
  4842. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  4843. DMA_TO_DEVICE);
  4844. /* cipher output for encryption */
  4845. if (areq->src != areq->dst) {
  4846. preq_info->dst_nents = count_sg(areq->dst, totallen);
  4847. qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
  4848. DMA_FROM_DEVICE);
  4849. }
  4850. /* setup for callback, and issue command to bam */
  4851. preq_info->areq = q_req->areq;
  4852. preq_info->qce_cb = q_req->qce_cb;
  4853. preq_info->dir = q_req->dir;
  4854. preq_info->asg = NULL;
  4855. preq_info->offload_op = QCE_OFFLOAD_NONE;
  4856. /* setup xfer type for producer callback handling */
  4857. preq_info->xfer_type = QCE_XFER_AEAD;
  4858. preq_info->req_len = totallen;
  4859. _qce_sps_iovec_count_init(pce_dev, req_info);
  4860. if (pce_dev->support_cmd_dscr) {
  4861. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
  4862. cmdlistinfo, &pce_sps_data->in_transfer);
  4863. if (rc)
  4864. goto bad;
  4865. } else {
  4866. rc = _ce_setup_aead_direct(pce_dev, q_req, totallen,
  4867. areq->assoclen);
  4868. if (rc)
  4869. goto bad;
  4870. }
  4871. preq_info->mode = q_req->mode;
  4872. if (pce_dev->ce_bam_info.minor_version == 0) {
  4873. rc = _qce_sps_add_sg_data(pce_dev, areq->src, totallen,
  4874. &pce_sps_data->in_transfer);
  4875. if (rc)
  4876. goto bad;
  4877. _qce_set_flag(&pce_sps_data->in_transfer,
  4878. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  4879. rc = _qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
  4880. &pce_sps_data->out_transfer);
  4881. if (rc)
  4882. goto bad;
  4883. if (totallen > SPS_MAX_PKT_SIZE) {
  4884. _qce_set_flag(&pce_sps_data->out_transfer,
  4885. SPS_IOVEC_FLAG_INT);
  4886. pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
  4887. } else {
  4888. rc = _qce_sps_add_data(GET_PHYS_ADDR(
  4889. pce_sps_data->result_dump),
  4890. CRYPTO_RESULT_DUMP_SIZE,
  4891. &pce_sps_data->out_transfer);
  4892. if (rc)
  4893. goto bad;
  4894. _qce_set_flag(&pce_sps_data->out_transfer,
  4895. SPS_IOVEC_FLAG_INT);
  4896. pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
  4897. }
  4898. rc = _qce_sps_transfer(pce_dev, req_info);
  4899. } else {
  4900. rc = _qce_sps_add_sg_data(pce_dev, areq->src, totallen,
  4901. &pce_sps_data->in_transfer);
  4902. if (rc)
  4903. goto bad;
  4904. _qce_set_flag(&pce_sps_data->in_transfer,
  4905. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  4906. if (pce_dev->no_get_around) {
  4907. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  4908. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  4909. &pce_sps_data->in_transfer);
  4910. if (rc)
  4911. goto bad;
  4912. }
  4913. rc = _qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
  4914. &pce_sps_data->out_transfer);
  4915. if (rc)
  4916. goto bad;
  4917. if (pce_dev->no_get_around || totallen <= SPS_MAX_PKT_SIZE) {
  4918. rc = _qce_sps_add_data(
  4919. GET_PHYS_ADDR(pce_sps_data->result_dump),
  4920. CRYPTO_RESULT_DUMP_SIZE,
  4921. &pce_sps_data->out_transfer);
  4922. if (rc)
  4923. goto bad;
  4924. pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
  4925. } else {
  4926. pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
  4927. }
  4928. select_mode(pce_dev, preq_info);
  4929. rc = _qce_sps_transfer(pce_dev, req_info);
  4930. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  4931. }
  4932. if (rc)
  4933. goto bad;
  4934. return 0;
  4935. bad:
  4936. if (preq_info->src_nents)
  4937. qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
  4938. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  4939. DMA_TO_DEVICE);
  4940. if (areq->src != areq->dst)
  4941. qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
  4942. DMA_FROM_DEVICE);
  4943. qce_free_req_info(pce_dev, req_info, false);
  4944. return rc;
  4945. }
  4946. EXPORT_SYMBOL(qce_aead_req);
  4947. int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
  4948. {
  4949. int rc = 0;
  4950. struct qce_device *pce_dev = (struct qce_device *) handle;
  4951. struct skcipher_request *areq = (struct skcipher_request *)
  4952. c_req->areq;
  4953. struct qce_cmdlist_info *cmdlistinfo = NULL;
  4954. int req_info = -1;
  4955. struct ce_sps_data *pce_sps_data;
  4956. struct ce_request_info *preq_info;
  4957. req_info = qce_alloc_req_info(pce_dev);
  4958. if (req_info < 0)
  4959. return -EBUSY;
  4960. c_req->current_req_info = req_info;
  4961. preq_info = &pce_dev->ce_request_info[req_info];
  4962. pce_sps_data = &preq_info->ce_sps;
  4963. preq_info->src_nents = 0;
  4964. preq_info->dst_nents = 0;
  4965. /* cipher input */
  4966. preq_info->src_nents = count_sg(areq->src, areq->cryptlen);
  4967. if (!is_offload_op(c_req->offload_op))
  4968. qce_dma_map_sg(pce_dev->pdev, areq->src,
  4969. preq_info->src_nents,
  4970. (areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
  4971. DMA_TO_DEVICE);
  4972. /* cipher output */
  4973. if (areq->src != areq->dst) {
  4974. preq_info->dst_nents = count_sg(areq->dst, areq->cryptlen);
  4975. if (!is_offload_op(c_req->offload_op))
  4976. qce_dma_map_sg(pce_dev->pdev, areq->dst,
  4977. preq_info->dst_nents, DMA_FROM_DEVICE);
  4978. } else {
  4979. preq_info->dst_nents = preq_info->src_nents;
  4980. }
  4981. preq_info->dir = c_req->dir;
  4982. if ((pce_dev->ce_bam_info.minor_version == 0) &&
  4983. (preq_info->dir == QCE_DECRYPT) &&
  4984. (c_req->mode == QCE_MODE_CBC)) {
  4985. memcpy(preq_info->dec_iv, (unsigned char *)
  4986. sg_virt(areq->src) + areq->src->length - 16,
  4987. NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE);
  4988. }
  4989. /* set up crypto device */
  4990. if (pce_dev->support_cmd_dscr) {
  4991. cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev,
  4992. req_info, c_req);
  4993. if (cmdlistinfo == NULL) {
  4994. pr_err("Unsupported cipher algorithm %d, mode %d\n",
  4995. c_req->alg, c_req->mode);
  4996. qce_free_req_info(pce_dev, req_info, false);
  4997. return -EINVAL;
  4998. }
  4999. rc = _ce_setup_cipher(pce_dev, c_req, areq->cryptlen, 0,
  5000. cmdlistinfo);
  5001. } else {
  5002. rc = _ce_setup_cipher_direct(pce_dev, c_req, areq->cryptlen, 0);
  5003. }
  5004. if (rc < 0)
  5005. goto bad;
  5006. preq_info->mode = c_req->mode;
  5007. preq_info->offload_op = c_req->offload_op;
  5008. /* setup for client callback, and issue command to BAM */
  5009. preq_info->areq = areq;
  5010. preq_info->qce_cb = c_req->qce_cb;
  5011. /* setup xfer type for producer callback handling */
  5012. preq_info->xfer_type = QCE_XFER_CIPHERING;
  5013. preq_info->req_len = areq->cryptlen;
  5014. _qce_sps_iovec_count_init(pce_dev, req_info);
  5015. if (pce_dev->support_cmd_dscr && cmdlistinfo) {
  5016. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
  5017. cmdlistinfo, &pce_sps_data->in_transfer);
  5018. if (rc)
  5019. goto bad;
  5020. }
  5021. rc = _qce_sps_add_data(areq->src->dma_address, areq->cryptlen,
  5022. &pce_sps_data->in_transfer);
  5023. if (rc)
  5024. goto bad;
  5025. _qce_set_flag(&pce_sps_data->in_transfer,
  5026. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  5027. if (pce_dev->no_get_around) {
  5028. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  5029. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  5030. &pce_sps_data->in_transfer);
  5031. if (rc)
  5032. goto bad;
  5033. }
  5034. rc = _qce_sps_add_data(areq->dst->dma_address, areq->cryptlen,
  5035. &pce_sps_data->out_transfer);
  5036. if (rc)
  5037. goto bad;
  5038. if (pce_dev->no_get_around || areq->cryptlen <= SPS_MAX_PKT_SIZE) {
  5039. pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
  5040. if (!is_offload_op(c_req->offload_op)) {
  5041. rc = _qce_sps_add_data(
  5042. GET_PHYS_ADDR(pce_sps_data->result_dump),
  5043. CRYPTO_RESULT_DUMP_SIZE,
  5044. &pce_sps_data->out_transfer);
  5045. if (rc)
  5046. goto bad;
  5047. }
  5048. } else {
  5049. pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
  5050. }
  5051. select_mode(pce_dev, preq_info);
  5052. rc = _qce_sps_transfer(pce_dev, req_info);
  5053. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  5054. if (rc)
  5055. goto bad;
  5056. return 0;
  5057. bad:
  5058. if (!is_offload_op(c_req->offload_op)) {
  5059. if (areq->src != areq->dst)
  5060. if (preq_info->dst_nents)
  5061. qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
  5062. preq_info->dst_nents, DMA_FROM_DEVICE);
  5063. if (preq_info->src_nents)
  5064. qce_dma_unmap_sg(pce_dev->pdev, areq->src,
  5065. preq_info->src_nents,
  5066. (areq->src == areq->dst) ?
  5067. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  5068. }
  5069. qce_free_req_info(pce_dev, req_info, false);
  5070. return rc;
  5071. }
  5072. EXPORT_SYMBOL(qce_ablk_cipher_req);
  5073. int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
  5074. {
  5075. struct qce_device *pce_dev = (struct qce_device *) handle;
  5076. int rc;
  5077. struct ahash_request *areq;
  5078. struct qce_cmdlist_info *cmdlistinfo = NULL;
  5079. int req_info = -1;
  5080. struct ce_sps_data *pce_sps_data;
  5081. struct ce_request_info *preq_info;
  5082. bool is_dummy = false;
  5083. if (!sreq) {
  5084. sreq = &(pce_dev->dummyreq.sreq);
  5085. req_info = DUMMY_REQ_INDEX;
  5086. is_dummy = true;
  5087. } else {
  5088. req_info = qce_alloc_req_info(pce_dev);
  5089. if (req_info < 0)
  5090. return -EBUSY;
  5091. }
  5092. sreq->current_req_info = req_info;
  5093. areq = (struct ahash_request *)sreq->areq;
  5094. preq_info = &pce_dev->ce_request_info[req_info];
  5095. pce_sps_data = &preq_info->ce_sps;
  5096. preq_info->src_nents = count_sg(sreq->src, sreq->size);
  5097. qce_dma_map_sg(pce_dev->pdev, sreq->src, preq_info->src_nents,
  5098. DMA_TO_DEVICE);
  5099. if (pce_dev->support_cmd_dscr) {
  5100. cmdlistinfo = _ce_get_hash_cmdlistinfo(pce_dev, req_info, sreq);
  5101. if (cmdlistinfo == NULL) {
  5102. pr_err("Unsupported hash algorithm %d\n", sreq->alg);
  5103. qce_free_req_info(pce_dev, req_info, false);
  5104. return -EINVAL;
  5105. }
  5106. rc = _ce_setup_hash(pce_dev, sreq, cmdlistinfo);
  5107. } else {
  5108. rc = _ce_setup_hash_direct(pce_dev, sreq);
  5109. }
  5110. if (rc < 0)
  5111. goto bad;
  5112. preq_info->areq = areq;
  5113. preq_info->qce_cb = sreq->qce_cb;
  5114. preq_info->offload_op = QCE_OFFLOAD_NONE;
  5115. /* setup xfer type for producer callback handling */
  5116. preq_info->xfer_type = QCE_XFER_HASHING;
  5117. preq_info->req_len = sreq->size;
  5118. _qce_sps_iovec_count_init(pce_dev, req_info);
  5119. if (pce_dev->support_cmd_dscr && cmdlistinfo) {
  5120. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
  5121. cmdlistinfo, &pce_sps_data->in_transfer);
  5122. if (rc)
  5123. goto bad;
  5124. }
  5125. rc = _qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
  5126. &pce_sps_data->in_transfer);
  5127. if (rc)
  5128. goto bad;
  5129. /* always ensure there is input data. ZLT does not work for bam-ndp */
  5130. if (!areq->nbytes) {
  5131. rc = _qce_sps_add_data(
  5132. GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
  5133. pce_dev->ce_bam_info.ce_burst_size,
  5134. &pce_sps_data->in_transfer);
  5135. if (rc)
  5136. goto bad;
  5137. }
  5138. _qce_set_flag(&pce_sps_data->in_transfer,
  5139. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  5140. if (pce_dev->no_get_around) {
  5141. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  5142. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  5143. &pce_sps_data->in_transfer);
  5144. if (rc)
  5145. goto bad;
  5146. }
  5147. rc = _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
  5148. CRYPTO_RESULT_DUMP_SIZE,
  5149. &pce_sps_data->out_transfer);
  5150. if (rc)
  5151. goto bad;
  5152. if (is_dummy) {
  5153. _qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
  5154. rc = _qce_sps_transfer(pce_dev, req_info);
  5155. } else {
  5156. select_mode(pce_dev, preq_info);
  5157. rc = _qce_sps_transfer(pce_dev, req_info);
  5158. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  5159. }
  5160. if (rc)
  5161. goto bad;
  5162. return 0;
  5163. bad:
  5164. if (preq_info->src_nents) {
  5165. qce_dma_unmap_sg(pce_dev->pdev, sreq->src,
  5166. preq_info->src_nents, DMA_TO_DEVICE);
  5167. }
  5168. qce_free_req_info(pce_dev, req_info, false);
  5169. return rc;
  5170. }
  5171. EXPORT_SYMBOL(qce_process_sha_req);
  5172. int qce_f8_req(void *handle, struct qce_f8_req *req,
  5173. void *cookie, qce_comp_func_ptr_t qce_cb)
  5174. {
  5175. struct qce_device *pce_dev = (struct qce_device *) handle;
  5176. bool key_stream_mode;
  5177. dma_addr_t dst;
  5178. int rc;
  5179. struct qce_cmdlist_info *cmdlistinfo;
  5180. int req_info = -1;
  5181. struct ce_request_info *preq_info;
  5182. struct ce_sps_data *pce_sps_data;
  5183. req_info = qce_alloc_req_info(pce_dev);
  5184. if (req_info < 0)
  5185. return -EBUSY;
  5186. req->current_req_info = req_info;
  5187. preq_info = &pce_dev->ce_request_info[req_info];
  5188. pce_sps_data = &preq_info->ce_sps;
  5189. switch (req->algorithm) {
  5190. case QCE_OTA_ALGO_KASUMI:
  5191. cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
  5192. break;
  5193. case QCE_OTA_ALGO_SNOW3G:
  5194. cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
  5195. break;
  5196. default:
  5197. qce_free_req_info(pce_dev, req_info, false);
  5198. return -EINVAL;
  5199. }
  5200. key_stream_mode = (req->data_in == NULL);
  5201. /* don't support key stream mode */
  5202. if (key_stream_mode || (req->bearer >= QCE_OTA_MAX_BEARER)) {
  5203. qce_free_req_info(pce_dev, req_info, false);
  5204. return -EINVAL;
  5205. }
  5206. /* F8 cipher input */
  5207. preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
  5208. req->data_in, req->data_len,
  5209. (req->data_in == req->data_out) ?
  5210. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  5211. /* F8 cipher output */
  5212. if (req->data_in != req->data_out) {
  5213. dst = dma_map_single(pce_dev->pdev, req->data_out,
  5214. req->data_len, DMA_FROM_DEVICE);
  5215. preq_info->phy_ota_dst = dst;
  5216. } else {
  5217. /* in place ciphering */
  5218. dst = preq_info->phy_ota_src;
  5219. preq_info->phy_ota_dst = 0;
  5220. }
  5221. preq_info->ota_size = req->data_len;
  5222. /* set up crypto device */
  5223. if (pce_dev->support_cmd_dscr)
  5224. rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0,
  5225. req->data_len, cmdlistinfo);
  5226. else
  5227. rc = _ce_f8_setup_direct(pce_dev, req, key_stream_mode, 1, 0,
  5228. req->data_len);
  5229. if (rc < 0)
  5230. goto bad;
  5231. /* setup for callback, and issue command to sps */
  5232. preq_info->areq = cookie;
  5233. preq_info->qce_cb = qce_cb;
  5234. preq_info->offload_op = QCE_OFFLOAD_NONE;
  5235. /* setup xfer type for producer callback handling */
  5236. preq_info->xfer_type = QCE_XFER_F8;
  5237. preq_info->req_len = req->data_len;
  5238. _qce_sps_iovec_count_init(pce_dev, req_info);
  5239. if (pce_dev->support_cmd_dscr) {
  5240. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
  5241. cmdlistinfo, &pce_sps_data->in_transfer);
  5242. if (rc)
  5243. goto bad;
  5244. }
  5245. rc = _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->data_len,
  5246. &pce_sps_data->in_transfer);
  5247. if (rc)
  5248. goto bad;
  5249. _qce_set_flag(&pce_sps_data->in_transfer,
  5250. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  5251. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  5252. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  5253. &pce_sps_data->in_transfer);
  5254. if (rc)
  5255. goto bad;
  5256. rc = _qce_sps_add_data((uint32_t)dst, req->data_len,
  5257. &pce_sps_data->out_transfer);
  5258. if (rc)
  5259. goto bad;
  5260. rc = _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
  5261. CRYPTO_RESULT_DUMP_SIZE,
  5262. &pce_sps_data->out_transfer);
  5263. if (rc)
  5264. goto bad;
  5265. select_mode(pce_dev, preq_info);
  5266. rc = _qce_sps_transfer(pce_dev, req_info);
  5267. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  5268. if (rc)
  5269. goto bad;
  5270. return 0;
  5271. bad:
  5272. if (preq_info->phy_ota_dst != 0)
  5273. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
  5274. req->data_len, DMA_FROM_DEVICE);
  5275. if (preq_info->phy_ota_src != 0)
  5276. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
  5277. req->data_len,
  5278. (req->data_in == req->data_out) ?
  5279. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  5280. qce_free_req_info(pce_dev, req_info, false);
  5281. return rc;
  5282. }
  5283. EXPORT_SYMBOL(qce_f8_req);
  5284. int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
  5285. void *cookie, qce_comp_func_ptr_t qce_cb)
  5286. {
  5287. struct qce_device *pce_dev = (struct qce_device *) handle;
  5288. uint16_t num_pkt = mreq->num_pkt;
  5289. uint16_t cipher_start = mreq->cipher_start;
  5290. uint16_t cipher_size = mreq->cipher_size;
  5291. struct qce_f8_req *req = &mreq->qce_f8_req;
  5292. uint32_t total;
  5293. dma_addr_t dst = 0;
  5294. int rc = 0;
  5295. struct qce_cmdlist_info *cmdlistinfo;
  5296. int req_info = -1;
  5297. struct ce_request_info *preq_info;
  5298. struct ce_sps_data *pce_sps_data;
  5299. req_info = qce_alloc_req_info(pce_dev);
  5300. if (req_info < 0)
  5301. return -EBUSY;
  5302. req->current_req_info = req_info;
  5303. preq_info = &pce_dev->ce_request_info[req_info];
  5304. pce_sps_data = &preq_info->ce_sps;
  5305. switch (req->algorithm) {
  5306. case QCE_OTA_ALGO_KASUMI:
  5307. cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
  5308. break;
  5309. case QCE_OTA_ALGO_SNOW3G:
  5310. cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
  5311. break;
  5312. default:
  5313. qce_free_req_info(pce_dev, req_info, false);
  5314. return -EINVAL;
  5315. }
  5316. total = num_pkt * req->data_len;
  5317. /* F8 cipher input */
  5318. preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
  5319. req->data_in, total,
  5320. (req->data_in == req->data_out) ?
  5321. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  5322. /* F8 cipher output */
  5323. if (req->data_in != req->data_out) {
  5324. dst = dma_map_single(pce_dev->pdev, req->data_out, total,
  5325. DMA_FROM_DEVICE);
  5326. preq_info->phy_ota_dst = dst;
  5327. } else {
  5328. /* in place ciphering */
  5329. dst = preq_info->phy_ota_src;
  5330. preq_info->phy_ota_dst = 0;
  5331. }
  5332. preq_info->ota_size = total;
  5333. /* set up crypto device */
  5334. if (pce_dev->support_cmd_dscr)
  5335. rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
  5336. cipher_size, cmdlistinfo);
  5337. else
  5338. rc = _ce_f8_setup_direct(pce_dev, req, false, num_pkt,
  5339. cipher_start, cipher_size);
  5340. if (rc)
  5341. goto bad;
  5342. /* setup for callback, and issue command to sps */
  5343. preq_info->areq = cookie;
  5344. preq_info->qce_cb = qce_cb;
  5345. preq_info->offload_op = QCE_OFFLOAD_NONE;
  5346. /* setup xfer type for producer callback handling */
  5347. preq_info->xfer_type = QCE_XFER_F8;
  5348. preq_info->req_len = total;
  5349. _qce_sps_iovec_count_init(pce_dev, req_info);
  5350. if (pce_dev->support_cmd_dscr) {
  5351. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
  5352. cmdlistinfo, &pce_sps_data->in_transfer);
  5353. goto bad;
  5354. }
  5355. rc = _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, total,
  5356. &pce_sps_data->in_transfer);
  5357. if (rc)
  5358. goto bad;
  5359. _qce_set_flag(&pce_sps_data->in_transfer,
  5360. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  5361. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  5362. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  5363. &pce_sps_data->in_transfer);
  5364. if (rc)
  5365. goto bad;
  5366. rc = _qce_sps_add_data((uint32_t)dst, total,
  5367. &pce_sps_data->out_transfer);
  5368. if (rc)
  5369. goto bad;
  5370. rc = _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
  5371. CRYPTO_RESULT_DUMP_SIZE,
  5372. &pce_sps_data->out_transfer);
  5373. if (rc)
  5374. goto bad;
  5375. select_mode(pce_dev, preq_info);
  5376. rc = _qce_sps_transfer(pce_dev, req_info);
  5377. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  5378. if (rc == 0)
  5379. return 0;
  5380. bad:
  5381. if (preq_info->phy_ota_dst)
  5382. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst, total,
  5383. DMA_FROM_DEVICE);
  5384. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, total,
  5385. (req->data_in == req->data_out) ?
  5386. DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  5387. qce_free_req_info(pce_dev, req_info, false);
  5388. return rc;
  5389. }
  5390. EXPORT_SYMBOL(qce_f8_multi_pkt_req);
  5391. int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
  5392. qce_comp_func_ptr_t qce_cb)
  5393. {
  5394. struct qce_device *pce_dev = (struct qce_device *) handle;
  5395. int rc;
  5396. struct qce_cmdlist_info *cmdlistinfo;
  5397. int req_info = -1;
  5398. struct ce_sps_data *pce_sps_data;
  5399. struct ce_request_info *preq_info;
  5400. req_info = qce_alloc_req_info(pce_dev);
  5401. if (req_info < 0)
  5402. return -EBUSY;
  5403. req->current_req_info = req_info;
  5404. preq_info = &pce_dev->ce_request_info[req_info];
  5405. pce_sps_data = &preq_info->ce_sps;
  5406. switch (req->algorithm) {
  5407. case QCE_OTA_ALGO_KASUMI:
  5408. cmdlistinfo = &pce_sps_data->cmdlistptr.f9_kasumi;
  5409. break;
  5410. case QCE_OTA_ALGO_SNOW3G:
  5411. cmdlistinfo = &pce_sps_data->cmdlistptr.f9_snow3g;
  5412. break;
  5413. default:
  5414. qce_free_req_info(pce_dev, req_info, false);
  5415. return -EINVAL;
  5416. }
  5417. preq_info->phy_ota_src = dma_map_single(pce_dev->pdev, req->message,
  5418. req->msize, DMA_TO_DEVICE);
  5419. preq_info->ota_size = req->msize;
  5420. if (pce_dev->support_cmd_dscr)
  5421. rc = _ce_f9_setup(pce_dev, req, cmdlistinfo);
  5422. else
  5423. rc = _ce_f9_setup_direct(pce_dev, req);
  5424. if (rc < 0)
  5425. goto bad;
  5426. /* setup for callback, and issue command to sps */
  5427. preq_info->areq = cookie;
  5428. preq_info->qce_cb = qce_cb;
  5429. preq_info->offload_op = QCE_OFFLOAD_NONE;
  5430. /* setup xfer type for producer callback handling */
  5431. preq_info->xfer_type = QCE_XFER_F9;
  5432. preq_info->req_len = req->msize;
  5433. _qce_sps_iovec_count_init(pce_dev, req_info);
  5434. if (pce_dev->support_cmd_dscr) {
  5435. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK,
  5436. cmdlistinfo, &pce_sps_data->in_transfer);
  5437. if (rc)
  5438. goto bad;
  5439. }
  5440. rc = _qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->msize,
  5441. &pce_sps_data->in_transfer);
  5442. if (rc)
  5443. goto bad;
  5444. _qce_set_flag(&pce_sps_data->in_transfer,
  5445. SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
  5446. rc = _qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
  5447. &pce_sps_data->cmdlistptr.unlock_all_pipes,
  5448. &pce_sps_data->in_transfer);
  5449. if (rc)
  5450. goto bad;
  5451. rc = _qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
  5452. CRYPTO_RESULT_DUMP_SIZE,
  5453. &pce_sps_data->out_transfer);
  5454. if (rc)
  5455. goto bad;
  5456. select_mode(pce_dev, preq_info);
  5457. rc = _qce_sps_transfer(pce_dev, req_info);
  5458. cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
  5459. if (rc)
  5460. goto bad;
  5461. return 0;
  5462. bad:
  5463. dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
  5464. req->msize, DMA_TO_DEVICE);
  5465. qce_free_req_info(pce_dev, req_info, false);
  5466. return rc;
  5467. }
  5468. EXPORT_SYMBOL(qce_f9_req);
  5469. static int __qce_get_device_tree_data(struct platform_device *pdev,
  5470. struct qce_device *pce_dev)
  5471. {
  5472. struct resource *resource;
  5473. int rc = 0, i = 0;
  5474. pce_dev->is_shared = of_property_read_bool((&pdev->dev)->of_node,
  5475. "qcom,ce-hw-shared");
  5476. pce_dev->support_hw_key = of_property_read_bool((&pdev->dev)->of_node,
  5477. "qcom,ce-hw-key");
  5478. pce_dev->use_sw_aes_cbc_ecb_ctr_algo =
  5479. of_property_read_bool((&pdev->dev)->of_node,
  5480. "qcom,use-sw-aes-cbc-ecb-ctr-algo");
  5481. pce_dev->use_sw_aead_algo =
  5482. of_property_read_bool((&pdev->dev)->of_node,
  5483. "qcom,use-sw-aead-algo");
  5484. pce_dev->use_sw_aes_xts_algo =
  5485. of_property_read_bool((&pdev->dev)->of_node,
  5486. "qcom,use-sw-aes-xts-algo");
  5487. pce_dev->use_sw_ahash_algo =
  5488. of_property_read_bool((&pdev->dev)->of_node,
  5489. "qcom,use-sw-ahash-algo");
  5490. pce_dev->use_sw_hmac_algo =
  5491. of_property_read_bool((&pdev->dev)->of_node,
  5492. "qcom,use-sw-hmac-algo");
  5493. pce_dev->use_sw_aes_ccm_algo =
  5494. of_property_read_bool((&pdev->dev)->of_node,
  5495. "qcom,use-sw-aes-ccm-algo");
  5496. pce_dev->support_clk_mgmt_sus_res = of_property_read_bool(
  5497. (&pdev->dev)->of_node, "qcom,clk-mgmt-sus-res");
  5498. pce_dev->support_only_core_src_clk = of_property_read_bool(
  5499. (&pdev->dev)->of_node, "qcom,support-core-clk-only");
  5500. pce_dev->request_bw_before_clk = of_property_read_bool(
  5501. (&pdev->dev)->of_node, "qcom,request-bw-before-clk");
  5502. for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++)
  5503. pce_dev->ce_bam_info.pipe_pair_index[i] = 0;
  5504. pce_dev->kernel_pipes_support = true;
  5505. if (of_property_read_u32((&pdev->dev)->of_node,
  5506. "qcom,bam-pipe-pair",
  5507. &pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_NONE])) {
  5508. pr_warn("Kernel pipes not supported.\n");
  5509. //Unused pipe, just as failsafe.
  5510. pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_NONE] = 2;
  5511. pce_dev->kernel_pipes_support = false;
  5512. }
  5513. if (of_property_read_bool((&pdev->dev)->of_node,
  5514. "qcom,offload-ops-support")) {
  5515. pce_dev->offload_pipes_support = true;
  5516. if (of_property_read_u32((&pdev->dev)->of_node,
  5517. "qcom,bam-pipe-offload-cpb-hlos",
  5518. &pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_CPB_HLOS])) {
  5519. pr_err("Fail to get bam offload cpb-hlos pipe pair info.\n");
  5520. return -EINVAL;
  5521. }
  5522. if (of_property_read_u32((&pdev->dev)->of_node,
  5523. "qcom,bam-pipe-offload-hlos-hlos",
  5524. &pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_HLOS])) {
  5525. pr_err("Fail to get bam offload hlos-hlos info.\n");
  5526. return -EINVAL;
  5527. }
  5528. if (of_property_read_u32((&pdev->dev)->of_node,
  5529. "qcom,bam-pipe-offload-hlos-hlos-1",
  5530. &pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_HLOS_1])) {
  5531. pr_info("No bam offload hlos-hlos-1 info.\n");
  5532. }
  5533. if (of_property_read_u32((&pdev->dev)->of_node,
  5534. "qcom,bam-pipe-offload-hlos-cpb",
  5535. &pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_CPB])) {
  5536. pr_err("Fail to get bam offload hlos-cpb info\n");
  5537. return -EINVAL;
  5538. }
  5539. if (of_property_read_u32((&pdev->dev)->of_node,
  5540. "qcom,bam-pipe-offload-hlos-cpb-1",
  5541. &pce_dev->ce_bam_info.pipe_pair_index[QCE_OFFLOAD_HLOS_CPB_1])) {
  5542. pr_info("No bam offload hlos-cpb-1 info\n");
  5543. }
  5544. }
  5545. if (of_property_read_u32((&pdev->dev)->of_node,
  5546. "qcom,ce-device",
  5547. &pce_dev->ce_bam_info.ce_device)) {
  5548. pr_err("Fail to get CE device information.\n");
  5549. return -EINVAL;
  5550. }
  5551. if (of_property_read_u32((&pdev->dev)->of_node,
  5552. "qcom,ce-hw-instance",
  5553. &pce_dev->ce_bam_info.ce_hw_instance)) {
  5554. pr_err("Fail to get CE hw instance information.\n");
  5555. return -EINVAL;
  5556. }
  5557. if (of_property_read_u32((&pdev->dev)->of_node,
  5558. "qcom,bam-ee",
  5559. &pce_dev->ce_bam_info.bam_ee)) {
  5560. pr_info("BAM Apps EE is not defined, setting to default 1\n");
  5561. pce_dev->ce_bam_info.bam_ee = 1;
  5562. }
  5563. if (of_property_read_u32((&pdev->dev)->of_node,
  5564. "qcom,ce-opp-freq",
  5565. &pce_dev->ce_opp_freq_hz)) {
  5566. pr_info("CE operating frequency is not defined, setting to default 100MHZ\n");
  5567. pce_dev->ce_opp_freq_hz = CE_CLK_100MHZ;
  5568. }
  5569. if (of_property_read_bool((&pdev->dev)->of_node, "qcom,smmu-s1-enable"))
  5570. pce_dev->enable_s1_smmu = true;
  5571. pce_dev->no_clock_support = of_property_read_bool((&pdev->dev)->of_node,
  5572. "qcom,no-clock-support");
  5573. for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) {
  5574. /* Source/destination pipes for all usecases */
  5575. pce_dev->ce_bam_info.dest_pipe_index[i] =
  5576. 2 * pce_dev->ce_bam_info.pipe_pair_index[i];
  5577. pce_dev->ce_bam_info.src_pipe_index[i] =
  5578. pce_dev->ce_bam_info.dest_pipe_index[i] + 1;
  5579. }
  5580. resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  5581. "crypto-base");
  5582. if (resource) {
  5583. pce_dev->phy_iobase = resource->start;
  5584. pce_dev->iobase = ioremap(resource->start,
  5585. resource_size(resource));
  5586. if (!pce_dev->iobase) {
  5587. pr_err("Can not map CRYPTO io memory\n");
  5588. return -ENOMEM;
  5589. }
  5590. } else {
  5591. pr_err("CRYPTO HW mem unavailable.\n");
  5592. return -ENODEV;
  5593. }
  5594. resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  5595. "crypto-bam-base");
  5596. if (resource) {
  5597. pce_dev->bam_mem = resource->start;
  5598. pce_dev->bam_mem_size = resource_size(resource);
  5599. } else {
  5600. pr_err("CRYPTO BAM mem unavailable.\n");
  5601. rc = -ENODEV;
  5602. goto err_getting_bam_info;
  5603. }
  5604. pce_dev->ce_bam_info.bam_irq = platform_get_irq(pdev,0);
  5605. if (pce_dev->ce_bam_info.bam_irq < 0) {
  5606. pr_err("CRYPTO BAM IRQ unavailable.\n");
  5607. goto err_dev;
  5608. }
  5609. return rc;
  5610. err_dev:
  5611. if (pce_dev->ce_bam_info.bam_iobase)
  5612. iounmap(pce_dev->ce_bam_info.bam_iobase);
  5613. err_getting_bam_info:
  5614. if (pce_dev->iobase)
  5615. iounmap(pce_dev->iobase);
  5616. return rc;
  5617. }
  5618. static int __qce_init_clk(struct qce_device *pce_dev)
  5619. {
  5620. int rc = 0;
  5621. if (pce_dev->no_clock_support) {
  5622. pr_debug("No clock support defined in dts\n");
  5623. return rc;
  5624. }
  5625. pce_dev->ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src");
  5626. if (!IS_ERR(pce_dev->ce_core_src_clk)) {
  5627. if (pce_dev->request_bw_before_clk)
  5628. goto skip_set_rate;
  5629. rc = clk_set_rate(pce_dev->ce_core_src_clk,
  5630. pce_dev->ce_opp_freq_hz);
  5631. if (rc) {
  5632. pr_err("Unable to set the core src clk @%uMhz.\n",
  5633. pce_dev->ce_opp_freq_hz/CE_CLK_DIV);
  5634. goto exit_put_core_src_clk;
  5635. }
  5636. } else {
  5637. if (pce_dev->support_only_core_src_clk) {
  5638. rc = PTR_ERR(pce_dev->ce_core_src_clk);
  5639. pce_dev->ce_core_src_clk = NULL;
  5640. pr_err("Unable to get CE core src clk\n");
  5641. return rc;
  5642. }
  5643. pr_warn("Unable to get CE core src clk, set to NULL\n");
  5644. pce_dev->ce_core_src_clk = NULL;
  5645. }
  5646. skip_set_rate:
  5647. if (pce_dev->support_only_core_src_clk) {
  5648. pce_dev->ce_core_clk = NULL;
  5649. pce_dev->ce_clk = NULL;
  5650. pce_dev->ce_bus_clk = NULL;
  5651. } else {
  5652. pce_dev->ce_core_clk = clk_get(pce_dev->pdev, "core_clk");
  5653. if (IS_ERR(pce_dev->ce_core_clk)) {
  5654. rc = PTR_ERR(pce_dev->ce_core_clk);
  5655. pr_err("Unable to get CE core clk\n");
  5656. goto exit_put_core_src_clk;
  5657. }
  5658. pce_dev->ce_clk = clk_get(pce_dev->pdev, "iface_clk");
  5659. if (IS_ERR(pce_dev->ce_clk)) {
  5660. rc = PTR_ERR(pce_dev->ce_clk);
  5661. pr_err("Unable to get CE interface clk\n");
  5662. goto exit_put_core_clk;
  5663. }
  5664. pce_dev->ce_bus_clk = clk_get(pce_dev->pdev, "bus_clk");
  5665. if (IS_ERR(pce_dev->ce_bus_clk)) {
  5666. rc = PTR_ERR(pce_dev->ce_bus_clk);
  5667. pr_err("Unable to get CE BUS interface clk\n");
  5668. goto exit_put_iface_clk;
  5669. }
  5670. }
  5671. return rc;
  5672. exit_put_iface_clk:
  5673. if (pce_dev->ce_clk)
  5674. clk_put(pce_dev->ce_clk);
  5675. exit_put_core_clk:
  5676. if (pce_dev->ce_core_clk)
  5677. clk_put(pce_dev->ce_core_clk);
  5678. exit_put_core_src_clk:
  5679. if (pce_dev->ce_core_src_clk)
  5680. clk_put(pce_dev->ce_core_src_clk);
  5681. pr_err("Unable to init CE clks, rc = %d\n", rc);
  5682. return rc;
  5683. }
  5684. static void __qce_deinit_clk(struct qce_device *pce_dev)
  5685. {
  5686. if (pce_dev->no_clock_support) {
  5687. pr_debug("No clock support defined in dts\n");
  5688. return;
  5689. }
  5690. if (pce_dev->ce_bus_clk)
  5691. clk_put(pce_dev->ce_bus_clk);
  5692. if (pce_dev->ce_clk)
  5693. clk_put(pce_dev->ce_clk);
  5694. if (pce_dev->ce_core_clk)
  5695. clk_put(pce_dev->ce_core_clk);
  5696. if (pce_dev->ce_core_src_clk)
  5697. clk_put(pce_dev->ce_core_src_clk);
  5698. }
  5699. int qce_enable_clk(void *handle)
  5700. {
  5701. struct qce_device *pce_dev = (struct qce_device *)handle;
  5702. int rc = 0;
  5703. if (pce_dev->no_clock_support) {
  5704. pr_debug("No clock support defined in dts\n");
  5705. return rc;
  5706. }
  5707. if (pce_dev->ce_core_src_clk) {
  5708. rc = clk_prepare_enable(pce_dev->ce_core_src_clk);
  5709. if (rc) {
  5710. pr_err("Unable to enable/prepare CE core src clk\n");
  5711. return rc;
  5712. }
  5713. }
  5714. if (pce_dev->support_only_core_src_clk)
  5715. return rc;
  5716. if (pce_dev->ce_core_clk) {
  5717. rc = clk_prepare_enable(pce_dev->ce_core_clk);
  5718. if (rc) {
  5719. pr_err("Unable to enable/prepare CE core clk\n");
  5720. goto exit_disable_core_src_clk;
  5721. }
  5722. }
  5723. if (pce_dev->ce_clk) {
  5724. rc = clk_prepare_enable(pce_dev->ce_clk);
  5725. if (rc) {
  5726. pr_err("Unable to enable/prepare CE iface clk\n");
  5727. goto exit_disable_core_clk;
  5728. }
  5729. }
  5730. if (pce_dev->ce_bus_clk) {
  5731. rc = clk_prepare_enable(pce_dev->ce_bus_clk);
  5732. if (rc) {
  5733. pr_err("Unable to enable/prepare CE BUS clk\n");
  5734. goto exit_disable_ce_clk;
  5735. }
  5736. }
  5737. return rc;
  5738. exit_disable_ce_clk:
  5739. if (pce_dev->ce_clk)
  5740. clk_disable_unprepare(pce_dev->ce_clk);
  5741. exit_disable_core_clk:
  5742. if (pce_dev->ce_core_clk)
  5743. clk_disable_unprepare(pce_dev->ce_core_clk);
  5744. exit_disable_core_src_clk:
  5745. if (pce_dev->ce_core_src_clk)
  5746. clk_disable_unprepare(pce_dev->ce_core_src_clk);
  5747. return rc;
  5748. }
  5749. EXPORT_SYMBOL(qce_enable_clk);
  5750. int qce_disable_clk(void *handle)
  5751. {
  5752. struct qce_device *pce_dev = (struct qce_device *) handle;
  5753. if (pce_dev->no_clock_support) {
  5754. pr_debug("No clock support defined in dts\n");
  5755. return 0;
  5756. }
  5757. if (pce_dev->ce_bus_clk)
  5758. clk_disable_unprepare(pce_dev->ce_bus_clk);
  5759. if (pce_dev->ce_clk)
  5760. clk_disable_unprepare(pce_dev->ce_clk);
  5761. if (pce_dev->ce_core_clk)
  5762. clk_disable_unprepare(pce_dev->ce_core_clk);
  5763. if (pce_dev->ce_core_src_clk)
  5764. clk_disable_unprepare(pce_dev->ce_core_src_clk);
  5765. return 0;
  5766. }
  5767. EXPORT_SYMBOL(qce_disable_clk);
  5768. /* dummy req setup */
  5769. static int setup_dummy_req(struct qce_device *pce_dev)
  5770. {
  5771. char *input =
  5772. "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopqopqrpqrs";
  5773. int len = DUMMY_REQ_DATA_LEN;
  5774. memcpy(pce_dev->dummyreq_in_buf, input, len);
  5775. sg_init_one(&pce_dev->dummyreq.sg, pce_dev->dummyreq_in_buf, len);
  5776. pce_dev->dummyreq.sreq.alg = QCE_HASH_SHA1;
  5777. pce_dev->dummyreq.sreq.qce_cb = qce_dummy_complete;
  5778. pce_dev->dummyreq.sreq.src = &pce_dev->dummyreq.sg;
  5779. pce_dev->dummyreq.sreq.auth_data[0] = 0;
  5780. pce_dev->dummyreq.sreq.auth_data[1] = 0;
  5781. pce_dev->dummyreq.sreq.auth_data[2] = 0;
  5782. pce_dev->dummyreq.sreq.auth_data[3] = 0;
  5783. pce_dev->dummyreq.sreq.first_blk = true;
  5784. pce_dev->dummyreq.sreq.last_blk = true;
  5785. pce_dev->dummyreq.sreq.size = len;
  5786. pce_dev->dummyreq.sreq.areq = &pce_dev->dummyreq.areq;
  5787. pce_dev->dummyreq.sreq.flags = 0;
  5788. pce_dev->dummyreq.sreq.authkey = NULL;
  5789. pce_dev->dummyreq.areq.src = pce_dev->dummyreq.sreq.src;
  5790. pce_dev->dummyreq.areq.nbytes = pce_dev->dummyreq.sreq.size;
  5791. return 0;
  5792. }
  5793. static int qce_smmu_init(struct qce_device *pce_dev)
  5794. {
  5795. struct device *dev = pce_dev->pdev;
  5796. if (!dev->dma_parms) {
  5797. dev->dma_parms = devm_kzalloc(dev,
  5798. sizeof(*dev->dma_parms), GFP_KERNEL);
  5799. if (!dev->dma_parms)
  5800. return -ENOMEM;
  5801. }
  5802. dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
  5803. dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64));
  5804. return 0;
  5805. }
  5806. /* crypto engine open function. */
  5807. void *qce_open(struct platform_device *pdev, int *rc)
  5808. {
  5809. struct qce_device *pce_dev;
  5810. int i;
  5811. static int pcedev_no = 1;
  5812. pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
  5813. if (!pce_dev) {
  5814. *rc = -ENOMEM;
  5815. pr_err("Can not allocate memory: %d\n", *rc);
  5816. return NULL;
  5817. }
  5818. pce_dev->pdev = &pdev->dev;
  5819. mutex_lock(&qce_iomap_mutex);
  5820. if (pdev->dev.of_node) {
  5821. *rc = __qce_get_device_tree_data(pdev, pce_dev);
  5822. if (*rc)
  5823. goto err_pce_dev;
  5824. } else {
  5825. *rc = -EINVAL;
  5826. pr_err("Device Node not found.\n");
  5827. goto err_pce_dev;
  5828. }
  5829. if (pce_dev->enable_s1_smmu) {
  5830. if (qce_smmu_init(pce_dev)) {
  5831. *rc = -EIO;
  5832. goto err_pce_dev;
  5833. }
  5834. }
  5835. for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++)
  5836. atomic_set(&pce_dev->ce_request_info[i].in_use, false);
  5837. pce_dev->ce_request_index = 0;
  5838. pce_dev->memsize = 10 * PAGE_SIZE * MAX_QCE_ALLOC_BAM_REQ;
  5839. pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
  5840. pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL);
  5841. if (pce_dev->coh_vmem == NULL) {
  5842. *rc = -ENOMEM;
  5843. pr_err("Can not allocate coherent memory for sps data\n");
  5844. goto err_iobase;
  5845. }
  5846. pce_dev->iovec_memsize = TOTAL_IOVEC_SPACE_PER_PIPE *
  5847. MAX_QCE_ALLOC_BAM_REQ * 2;
  5848. pce_dev->iovec_vmem = kzalloc(pce_dev->iovec_memsize, GFP_KERNEL);
  5849. if (pce_dev->iovec_vmem == NULL)
  5850. goto err_mem;
  5851. pce_dev->dummyreq_in_buf = kzalloc(DUMMY_REQ_DATA_LEN, GFP_KERNEL);
  5852. if (pce_dev->dummyreq_in_buf == NULL)
  5853. goto err_mem;
  5854. *rc = __qce_init_clk(pce_dev);
  5855. if (*rc)
  5856. goto err_mem;
  5857. *rc = qce_enable_clk(pce_dev);
  5858. if (*rc)
  5859. goto err_enable_clk;
  5860. if (_probe_ce_engine(pce_dev)) {
  5861. *rc = -ENXIO;
  5862. goto err;
  5863. }
  5864. *rc = 0;
  5865. qce_init_ce_cfg_val(pce_dev);
  5866. *rc = qce_sps_init(pce_dev);
  5867. if (*rc)
  5868. goto err;
  5869. qce_setup_ce_sps_data(pce_dev);
  5870. qce_disable_clk(pce_dev);
  5871. setup_dummy_req(pce_dev);
  5872. atomic_set(&pce_dev->no_of_queued_req, 0);
  5873. pce_dev->mode = IN_INTERRUPT_MODE;
  5874. timer_setup(&(pce_dev->timer), qce_multireq_timeout, 0);
  5875. //pce_dev->timer.function = qce_multireq_timeout;
  5876. //pce_dev->timer.data = (unsigned long)pce_dev;
  5877. pce_dev->timer.expires = jiffies + DELAY_IN_JIFFIES;
  5878. pce_dev->intr_cadence = 0;
  5879. pce_dev->dev_no = pcedev_no;
  5880. pcedev_no++;
  5881. pce_dev->owner = QCE_OWNER_NONE;
  5882. qce_enable_clock_gating(pce_dev);
  5883. mutex_unlock(&qce_iomap_mutex);
  5884. return pce_dev;
  5885. err:
  5886. qce_disable_clk(pce_dev);
  5887. err_enable_clk:
  5888. __qce_deinit_clk(pce_dev);
  5889. err_mem:
  5890. kfree(pce_dev->dummyreq_in_buf);
  5891. kfree(pce_dev->iovec_vmem);
  5892. if (pce_dev->coh_vmem)
  5893. dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
  5894. pce_dev->coh_vmem, pce_dev->coh_pmem);
  5895. err_iobase:
  5896. if (pce_dev->iobase)
  5897. iounmap(pce_dev->iobase);
  5898. err_pce_dev:
  5899. mutex_unlock(&qce_iomap_mutex);
  5900. kfree(pce_dev);
  5901. return NULL;
  5902. }
  5903. EXPORT_SYMBOL(qce_open);
  5904. /* crypto engine close function. */
  5905. int qce_close(void *handle)
  5906. {
  5907. struct qce_device *pce_dev = (struct qce_device *) handle;
  5908. if (handle == NULL)
  5909. return -ENODEV;
  5910. mutex_lock(&qce_iomap_mutex);
  5911. qce_enable_clk(pce_dev);
  5912. qce_sps_exit(pce_dev);
  5913. if (pce_dev->iobase)
  5914. iounmap(pce_dev->iobase);
  5915. if (pce_dev->coh_vmem)
  5916. dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
  5917. pce_dev->coh_vmem, pce_dev->coh_pmem);
  5918. kfree(pce_dev->dummyreq_in_buf);
  5919. kfree(pce_dev->iovec_vmem);
  5920. qce_disable_clk(pce_dev);
  5921. __qce_deinit_clk(pce_dev);
  5922. mutex_unlock(&qce_iomap_mutex);
  5923. kfree(handle);
  5924. return 0;
  5925. }
  5926. EXPORT_SYMBOL(qce_close);
  5927. #define OTA_SUPPORT_MASK (1 << CRYPTO_ENCR_SNOW3G_SEL |\
  5928. 1 << CRYPTO_ENCR_KASUMI_SEL |\
  5929. 1 << CRYPTO_AUTH_SNOW3G_SEL |\
  5930. 1 << CRYPTO_AUTH_KASUMI_SEL)
  5931. int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
  5932. {
  5933. struct qce_device *pce_dev = (struct qce_device *)handle;
  5934. if (ce_support == NULL)
  5935. return -EINVAL;
  5936. ce_support->sha1_hmac_20 = false;
  5937. ce_support->sha1_hmac = false;
  5938. ce_support->sha256_hmac = false;
  5939. ce_support->sha_hmac = true;
  5940. ce_support->cmac = true;
  5941. ce_support->aes_key_192 = false;
  5942. ce_support->aes_xts = true;
  5943. if ((pce_dev->engines_avail & OTA_SUPPORT_MASK) == OTA_SUPPORT_MASK)
  5944. ce_support->ota = true;
  5945. else
  5946. ce_support->ota = false;
  5947. ce_support->bam = true;
  5948. ce_support->is_shared = (pce_dev->is_shared == 1) ? true : false;
  5949. ce_support->hw_key = pce_dev->support_hw_key;
  5950. ce_support->aes_ccm = true;
  5951. ce_support->clk_mgmt_sus_res = pce_dev->support_clk_mgmt_sus_res;
  5952. ce_support->req_bw_before_clk = pce_dev->request_bw_before_clk;
  5953. if (pce_dev->ce_bam_info.minor_version)
  5954. ce_support->aligned_only = false;
  5955. else
  5956. ce_support->aligned_only = true;
  5957. ce_support->use_sw_aes_cbc_ecb_ctr_algo =
  5958. pce_dev->use_sw_aes_cbc_ecb_ctr_algo;
  5959. ce_support->use_sw_aead_algo =
  5960. pce_dev->use_sw_aead_algo;
  5961. ce_support->use_sw_aes_xts_algo =
  5962. pce_dev->use_sw_aes_xts_algo;
  5963. ce_support->use_sw_ahash_algo =
  5964. pce_dev->use_sw_ahash_algo;
  5965. ce_support->use_sw_hmac_algo =
  5966. pce_dev->use_sw_hmac_algo;
  5967. ce_support->use_sw_aes_ccm_algo =
  5968. pce_dev->use_sw_aes_ccm_algo;
  5969. ce_support->ce_device = pce_dev->ce_bam_info.ce_device;
  5970. ce_support->ce_hw_instance = pce_dev->ce_bam_info.ce_hw_instance;
  5971. if (pce_dev->no_get_around)
  5972. ce_support->max_request = MAX_QCE_BAM_REQ;
  5973. else
  5974. ce_support->max_request = 1;
  5975. return 0;
  5976. }
  5977. EXPORT_SYMBOL(qce_hw_support);
  5978. void qce_dump_req(void *handle)
  5979. {
  5980. int i;
  5981. bool req_in_use;
  5982. struct qce_device *pce_dev = (struct qce_device *)handle;
  5983. for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
  5984. req_in_use = atomic_read(&pce_dev->ce_request_info[i].in_use);
  5985. pr_info("%s: %d %d\n", __func__, i, req_in_use);
  5986. if (req_in_use)
  5987. _qce_dump_descr_fifos(pce_dev, i);
  5988. }
  5989. }
  5990. EXPORT_SYMBOL(qce_dump_req);
  5991. MODULE_LICENSE("GPL v2");
  5992. MODULE_DESCRIPTION("Crypto Engine driver");