cs40l26.c 161 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // cs40l26.c -- CS40L26 Boosted Haptic Driver with Integrated DSP and
  4. // Waveform Memory with Advanced Closed Loop Algorithms and LRA protection
  5. //
  6. // Copyright 2022 Cirrus Logic, Inc.
  7. //
  8. // Author: Fred Treven <[email protected]>
  9. //
  10. // This program is free software; you can redistribute it and/or modify
  11. // it under the terms of the GNU General Public License version 2 as
  12. // published by the Free Software Foundation.
  13. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  14. #include <linux/vibrator/cs40l26.h>
  15. #if IS_ENABLED(CONFIG_SEC_ABC)
  16. #include <linux/sti/abc_common.h>
  17. #endif
  18. #else
  19. #include <linux/mfd/cs40l26.h>
  20. #endif
  21. static const struct cs40l26_rom_regs cs40l26_rom_regs_a1_b0_b1 = {
  22. .pm_cur_state = 0x02800370,
  23. .pm_state_locks = 0x02800378,
  24. .pm_timeout_ticks = 0x02800350,
  25. .dsp_halo_state = 0x02800fa8,
  26. .event_map_table_event_data_packed = 0x02806FC4,
  27. .p_vibegen_rom = 0x02802154,
  28. .rom_pseq_end_of_script = 0x028003E8,
  29. };
  30. static const struct cs40l26_rom_regs cs40l26_rom_regs_b2 = { /* RC2 8.1.2 */
  31. .pm_cur_state = 0x02801F98,
  32. .pm_state_locks = 0x02801FA0,
  33. .pm_timeout_ticks = 0x02801F78,
  34. .dsp_halo_state = 0x02806AF8,
  35. .event_map_table_event_data_packed = 0x02806FB0,
  36. .p_vibegen_rom = 0x02802F50,
  37. .rom_pseq_end_of_script = 0x02802040,
  38. };
  39. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  40. static const char * const vibe_state_events[] = {
  41. [CS40L26_VIBE_STATE_EVENT_MBOX_PLAYBACK] = "MBOX_PLAYBACK",
  42. [CS40L26_VIBE_STATE_EVENT_MBOX_COMPLETE] = "MBOX_COMPLETE",
  43. [CS40L26_VIBE_STATE_EVENT_GPIO_TRIGGER] = "GPIO_TRIGGER",
  44. [CS40L26_VIBE_STATE_EVENT_GPIO_COMPLETE] = "GPIO_COMPLETE",
  45. [CS40L26_VIBE_STATE_EVENT_ASP_START] = "ASP_START",
  46. [CS40L26_VIBE_STATE_EVENT_ASP_STOP] = "ASP_STOP",
  47. };
  48. static const char * const vibe_state_strings[] = {
  49. [CS40L26_VIBE_STATE_STOPPED] = "VIBE_STATE_STOPPED",
  50. [CS40L26_VIBE_STATE_HAPTIC] = "VIBE_STATE_HAPTIC",
  51. [CS40L26_VIBE_STATE_ASP] = "VIBE_STATE_ASP",
  52. };
  53. static const char * const pm_state_strings[] = {
  54. [CS40L26_PM_STATE_HIBERNATE] = "HIBERNATE",
  55. [CS40L26_PM_STATE_WAKEUP] = "WAKEUP",
  56. [CS40L26_PM_STATE_PREVENT_HIBERNATE] = "PREVENT_HIBERNATE",
  57. [CS40L26_PM_STATE_ALLOW_HIBERNATE] = "ALLOW_HIBERNATE",
  58. [CS40L26_PM_STATE_SHUTDOWN] = "SHUTDOWN",
  59. };
  60. #endif
  61. static inline bool section_complete(struct cs40l26_owt_section *s)
  62. {
  63. return s->delay ? true : false;
  64. }
  65. static u32 gpio_map_get(struct device *dev, enum cs40l26_gpio_map gpio)
  66. {
  67. const char *name = (gpio == CS40L26_GPIO_MAP_A_PRESS) ?
  68. "cirrus,press-index" : "cirrus,release-index";
  69. u32 bank_idx_pair[2];
  70. int error;
  71. error = device_property_read_u32_array(dev, name, bank_idx_pair, 2);
  72. if (error)
  73. return error;
  74. if (bank_idx_pair[0] == CS40L26_RAM_BANK_ID)
  75. return (bank_idx_pair[1] & CS40L26_BTN_INDEX_MASK) | (1 << CS40L26_BTN_BANK_SHIFT);
  76. else if (bank_idx_pair[0] == CS40L26_ROM_BANK_ID)
  77. return (bank_idx_pair[1] & CS40L26_BTN_INDEX_MASK);
  78. return CS40L26_EVENT_MAP_GPI_DISABLE;
  79. }
  80. static int cs40l26_dsp_read(struct cs40l26_private *cs40l26, u32 reg, u32 *val)
  81. {
  82. struct regmap *regmap = cs40l26->regmap;
  83. struct device *dev = cs40l26->dev;
  84. u32 read_val;
  85. int i;
  86. for (i = 0; i < CS40L26_DSP_TIMEOUT_COUNT; i++) {
  87. if (regmap_read(regmap, reg, &read_val)) {
  88. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  89. dev_info(dev, "Failed to read 0x%X, attempt(s) = %d\n", reg, i + 1);
  90. #else
  91. dev_dbg(dev, "Failed to read 0x%X, attempt(s) = %d\n", reg, i + 1);
  92. #endif
  93. } else
  94. break;
  95. usleep_range(CS40L26_DSP_TIMEOUT_US_MIN, CS40L26_DSP_TIMEOUT_US_MAX);
  96. }
  97. if (i >= CS40L26_DSP_TIMEOUT_COUNT) {
  98. dev_err(dev, "Timed out attempting to read 0x%X\n", reg);
  99. return -ETIME;
  100. }
  101. *val = read_val;
  102. return 0;
  103. }
  104. static int cs40l26_dsp_write(struct cs40l26_private *cs40l26, u32 reg, u32 val)
  105. {
  106. struct regmap *regmap = cs40l26->regmap;
  107. struct device *dev = cs40l26->dev;
  108. int i;
  109. for (i = 0; i < CS40L26_DSP_TIMEOUT_COUNT; i++) {
  110. if (regmap_write(regmap, reg, val)) {
  111. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  112. dev_info(dev, "Failed to write to 0x%X, attempt(s) = %d\n", reg, i + 1);
  113. #else
  114. dev_dbg(dev, "Failed to write to 0x%X, attempt(s) = %d\n", reg, i + 1);
  115. #endif
  116. } else
  117. break;
  118. usleep_range(CS40L26_DSP_TIMEOUT_US_MIN, CS40L26_DSP_TIMEOUT_US_MAX);
  119. }
  120. if (i >= CS40L26_DSP_TIMEOUT_COUNT) {
  121. dev_err(dev, "Timed out attempting to write to 0x%X\n", reg);
  122. return -ETIME;
  123. }
  124. return 0;
  125. }
  126. int cs40l26_mailbox_write(struct cs40l26_private *cs40l26, u32 write_val)
  127. {
  128. int i, error;
  129. u32 val;
  130. error = cs40l26_dsp_write(cs40l26, CS40L26_DSP_VIRTUAL1_MBOX_1, write_val);
  131. if (error)
  132. return error;
  133. for (i = 0; i < CS40L26_DSP_TIMEOUT_COUNT; i++) {
  134. error = cs40l26_dsp_read(cs40l26, CS40L26_DSP_VIRTUAL1_MBOX_1, &val);
  135. if (error)
  136. return error;
  137. if (val == 0x0)
  138. break;
  139. usleep_range(CS40L26_DSP_TIMEOUT_US_MIN, CS40L26_DSP_TIMEOUT_US_MAX);
  140. }
  141. if (i >= CS40L26_DSP_TIMEOUT_COUNT) {
  142. dev_err(cs40l26->dev, "Mailbox not acknowledged (0x%08X != 0x0)\n", val);
  143. return -ETIMEDOUT;
  144. }
  145. return 0;
  146. }
  147. EXPORT_SYMBOL_GPL(cs40l26_mailbox_write);
  148. int cs40l26_dsp_state_get(struct cs40l26_private *cs40l26, u8 *state)
  149. {
  150. u32 reg, dsp_state;
  151. int error;
  152. if (cs40l26->fw_loaded) {
  153. error = cl_dsp_get_reg(cs40l26->dsp, "PM_CUR_STATE", CL_DSP_XM_UNPACKED_TYPE,
  154. CS40L26_PM_ALGO_ID, &reg);
  155. if (error) {
  156. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  157. dev_err(cs40l26->dev, "CANT READ PM_CUR_STATE %d\n", error);
  158. #endif
  159. return error;
  160. }
  161. } else {
  162. reg = cs40l26->rom_regs->pm_cur_state;
  163. }
  164. error = cs40l26_dsp_read(cs40l26, reg, &dsp_state);
  165. if (error)
  166. return error;
  167. switch (dsp_state) {
  168. case CS40L26_DSP_STATE_HIBERNATE:
  169. /* intentionally fall through */
  170. case CS40L26_DSP_STATE_SHUTDOWN:
  171. /* intentionally fall through */
  172. case CS40L26_DSP_STATE_STANDBY:
  173. /* intentionally fall through */
  174. case CS40L26_DSP_STATE_ACTIVE:
  175. *state = CS40L26_DSP_STATE_MASK & dsp_state;
  176. break;
  177. default:
  178. dev_err(cs40l26->dev, "DSP state %u is invalid\n", dsp_state);
  179. error = -EINVAL;
  180. }
  181. return error;
  182. }
  183. EXPORT_SYMBOL_GPL(cs40l26_dsp_state_get);
  184. int cs40l26_set_pll_loop(struct cs40l26_private *cs40l26, unsigned int pll_loop)
  185. {
  186. int i;
  187. if (pll_loop != CS40L26_PLL_REFCLK_SET_OPEN_LOOP &&
  188. pll_loop != CS40L26_PLL_REFCLK_SET_CLOSED_LOOP) {
  189. dev_err(cs40l26->dev, "Invalid PLL Loop setting: %u\n", pll_loop);
  190. return -EINVAL;
  191. }
  192. /* Retry in case DSP is hibernating */
  193. for (i = 0; i < CS40L26_PLL_REFCLK_SET_ATTEMPTS; i++) {
  194. if (!regmap_update_bits(cs40l26->regmap, CS40L26_REFCLK_INPUT,
  195. CS40L26_PLL_REFCLK_LOOP_MASK, pll_loop <<
  196. CS40L26_PLL_REFCLK_LOOP_SHIFT))
  197. break;
  198. }
  199. if (i == CS40L26_PLL_REFCLK_SET_ATTEMPTS) {
  200. dev_err(cs40l26->dev, "Failed to configure PLL\n");
  201. return -ETIMEDOUT;
  202. }
  203. return 0;
  204. }
  205. EXPORT_SYMBOL_GPL(cs40l26_set_pll_loop);
  206. int cs40l26_dbc_get(struct cs40l26_private *cs40l26, enum cs40l26_dbc_type dbc, unsigned int *val)
  207. {
  208. struct device *dev = cs40l26->dev;
  209. unsigned int reg;
  210. int error;
  211. error = cs40l26_pm_enter(dev);
  212. if (error)
  213. return error;
  214. mutex_lock(&cs40l26->lock);
  215. error = cl_dsp_get_reg(cs40l26->dsp, cs40l26_dbc_params[dbc].name, CL_DSP_XM_UNPACKED_TYPE,
  216. CS40L26_EXT_ALGO_ID, &reg);
  217. if (error)
  218. goto err_pm;
  219. error = regmap_read(cs40l26->regmap, reg, val);
  220. if (error)
  221. dev_err(dev, "Failed to read Dynamic Boost Control value\n");
  222. err_pm:
  223. mutex_unlock(&cs40l26->lock);
  224. cs40l26_pm_exit(dev);
  225. return error;
  226. }
  227. EXPORT_SYMBOL_GPL(cs40l26_dbc_get);
  228. int cs40l26_dbc_set(struct cs40l26_private *cs40l26, enum cs40l26_dbc_type dbc, u32 val)
  229. {
  230. struct device *dev = cs40l26->dev;
  231. u32 reg, write_val;
  232. int error;
  233. if (val > cs40l26_dbc_params[dbc].max)
  234. write_val = cs40l26_dbc_params[dbc].max;
  235. else if (val < cs40l26_dbc_params[dbc].min)
  236. write_val = cs40l26_dbc_params[dbc].min;
  237. else
  238. write_val = val;
  239. error = cl_dsp_get_reg(cs40l26->dsp, cs40l26_dbc_params[dbc].name,
  240. CL_DSP_XM_UNPACKED_TYPE, CS40L26_EXT_ALGO_ID, &reg);
  241. if (error)
  242. return error;
  243. error = regmap_write(cs40l26->regmap, reg, write_val);
  244. if (error)
  245. dev_err(dev, "Failed to write Dynamic Boost Control value\n");
  246. return error;
  247. }
  248. EXPORT_SYMBOL_GPL(cs40l26_dbc_set);
  249. int cs40l26_pm_timeout_ms_set(struct cs40l26_private *cs40l26, unsigned int dsp_state,
  250. u32 timeout_ms)
  251. {
  252. u32 reg, timeout_ticks;
  253. unsigned int min;
  254. int error;
  255. if (cs40l26->fw_loaded) {
  256. error = cl_dsp_get_reg(cs40l26->dsp, "PM_TIMER_TIMEOUT_TICKS",
  257. CL_DSP_XM_UNPACKED_TYPE, CS40L26_PM_ALGO_ID, &reg);
  258. if (error)
  259. return error;
  260. } else {
  261. reg = cs40l26->rom_regs->pm_timeout_ticks;
  262. }
  263. if (dsp_state == CS40L26_DSP_STATE_STANDBY) {
  264. reg += CS40L26_PM_STDBY_TIMEOUT_OFFSET;
  265. min = CS40L26_PM_STDBY_TIMEOUT_MS_MIN;
  266. } else if (dsp_state == CS40L26_DSP_STATE_ACTIVE) {
  267. reg += CS40L26_PM_ACTIVE_TIMEOUT_OFFSET;
  268. min = CS40L26_PM_ACTIVE_TIMEOUT_MS_MIN;
  269. } else {
  270. dev_err(cs40l26->dev, "Invalid DSP state: %u\n", dsp_state);
  271. return -EINVAL;
  272. }
  273. if (timeout_ms > CS40L26_PM_TIMEOUT_MS_MAX)
  274. timeout_ticks = CS40L26_PM_TIMEOUT_MS_MAX * CS40L26_PM_TICKS_PER_MS;
  275. else if (timeout_ms < min)
  276. timeout_ticks = min * CS40L26_PM_TICKS_PER_MS;
  277. else
  278. timeout_ticks = timeout_ms * CS40L26_PM_TICKS_PER_MS;
  279. error = regmap_write(cs40l26->regmap, reg, timeout_ticks);
  280. if (error)
  281. dev_err(cs40l26->dev, "Failed to set PM timeout: %d\n", error);
  282. return error;
  283. }
  284. EXPORT_SYMBOL_GPL(cs40l26_pm_timeout_ms_set);
  285. int cs40l26_pm_timeout_ms_get(struct cs40l26_private *cs40l26, unsigned int dsp_state,
  286. u32 *timeout_ms)
  287. {
  288. u32 reg, timeout_ticks;
  289. int error;
  290. if (cs40l26->fw_loaded) {
  291. error = cl_dsp_get_reg(cs40l26->dsp, "PM_TIMER_TIMEOUT_TICKS",
  292. CL_DSP_XM_UNPACKED_TYPE, CS40L26_PM_ALGO_ID, &reg);
  293. if (error)
  294. return error;
  295. } else {
  296. reg = cs40l26->rom_regs->pm_timeout_ticks;
  297. }
  298. if (dsp_state == CS40L26_DSP_STATE_STANDBY) {
  299. reg += CS40L26_PM_STDBY_TIMEOUT_OFFSET;
  300. } else if (dsp_state == CS40L26_DSP_STATE_ACTIVE) {
  301. reg += CS40L26_PM_ACTIVE_TIMEOUT_OFFSET;
  302. } else {
  303. dev_err(cs40l26->dev, "Invalid DSP state: %u\n", dsp_state);
  304. return -EINVAL;
  305. }
  306. error = regmap_read(cs40l26->regmap, reg, &timeout_ticks);
  307. if (error) {
  308. dev_err(cs40l26->dev, "Failed to get PM timeout: %d\n", error);
  309. return error;
  310. }
  311. *timeout_ms = timeout_ticks / CS40L26_PM_TICKS_PER_MS;
  312. return 0;
  313. }
  314. EXPORT_SYMBOL_GPL(cs40l26_pm_timeout_ms_get);
  315. static inline void cs40l26_pm_runtime_setup(struct cs40l26_private *cs40l26)
  316. {
  317. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  318. dev_info(cs40l26->dev, "%s+\n", __func__);
  319. #endif
  320. pm_runtime_mark_last_busy(cs40l26->dev);
  321. pm_runtime_use_autosuspend(cs40l26->dev);
  322. pm_runtime_set_autosuspend_delay(cs40l26->dev, CS40L26_AUTOSUSPEND_DELAY_MS);
  323. pm_runtime_enable(cs40l26->dev);
  324. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  325. dev_info(cs40l26->dev, "%s-\n", __func__);
  326. #endif
  327. }
  328. static inline void cs40l26_pm_runtime_teardown(struct cs40l26_private *cs40l26)
  329. {
  330. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  331. dev_info(cs40l26->dev, "%s+ power.disable_depth=%d\n",
  332. __func__, cs40l26->dev->power.disable_depth);
  333. if (cs40l26->dev->power.disable_depth > 0)
  334. return;
  335. #endif
  336. pm_runtime_dont_use_autosuspend(cs40l26->dev);
  337. pm_runtime_disable(cs40l26->dev);
  338. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  339. dev_info(cs40l26->dev, "%s-\n", __func__);
  340. #endif
  341. }
  342. static int cs40l26_check_pm_lock(struct cs40l26_private *cs40l26, bool *locked)
  343. {
  344. unsigned int dsp_lock;
  345. int error;
  346. error = regmap_read(cs40l26->regmap, cs40l26->rom_regs->pm_state_locks +
  347. CS40L26_DSP_LOCK3_OFFSET, &dsp_lock);
  348. if (error)
  349. return error;
  350. if (dsp_lock & CS40L26_DSP_LOCK3_MASK)
  351. *locked = true;
  352. else
  353. *locked = false;
  354. return 0;
  355. }
  356. static void cs40l26_remove_asp_scaling(struct cs40l26_private *cs40l26)
  357. {
  358. struct device *dev = cs40l26->dev;
  359. u16 gain;
  360. if (cs40l26->asp_scale_pct >= CS40L26_GAIN_FULL_SCALE || !cs40l26->scaling_applied)
  361. return;
  362. gain = cs40l26->gain_tmp;
  363. if (gain >= CS40L26_NUM_PCT_MAP_VALUES) {
  364. dev_err(dev, "Gain %u%% out of bounds\n", gain);
  365. return;
  366. }
  367. cs40l26->gain_pct = gain;
  368. cs40l26->scaling_applied = false;
  369. queue_work(cs40l26->vibe_workqueue, &cs40l26->set_gain_work);
  370. }
  371. int cs40l26_pm_state_transition(struct cs40l26_private *cs40l26, enum cs40l26_pm_state state)
  372. {
  373. struct device *dev = cs40l26->dev;
  374. u32 cmd, he_time_cmd, he_time_cmd_payload;
  375. u8 curr_state;
  376. bool dsp_lock;
  377. int error, i;
  378. cmd = (u32) CS40L26_DSP_MBOX_PM_CMD_BASE + state;
  379. switch (state) {
  380. case CS40L26_PM_STATE_WAKEUP:
  381. error = cs40l26_mailbox_write(cs40l26, cmd);
  382. if (error)
  383. return error;
  384. break;
  385. case CS40L26_PM_STATE_PREVENT_HIBERNATE:
  386. for (i = 0; i < CS40L26_DSP_STATE_ATTEMPTS; i++) {
  387. error = cs40l26_mailbox_write(cs40l26, cmd);
  388. if (error)
  389. return error;
  390. error = cs40l26_dsp_state_get(cs40l26, &curr_state);
  391. if (error)
  392. return error;
  393. if (curr_state == CS40L26_DSP_STATE_ACTIVE)
  394. break;
  395. if (curr_state == CS40L26_DSP_STATE_STANDBY) {
  396. error = cs40l26_check_pm_lock(cs40l26, &dsp_lock);
  397. if (error)
  398. return error;
  399. if (dsp_lock)
  400. break;
  401. }
  402. usleep_range(5000, 5100);
  403. }
  404. if (i == CS40L26_DSP_STATE_ATTEMPTS) {
  405. dev_err(cs40l26->dev, "DSP not starting\n");
  406. return -ETIMEDOUT;
  407. }
  408. if (cs40l26->allow_hibernate_sent) {
  409. /*
  410. * send time elapsed since last ALLOW_HIBERNATE mailbox
  411. * command to provide input to thermal model
  412. */
  413. if (timer_pending(&cs40l26->hibernate_timer)) {
  414. he_time_cmd_payload = ktime_to_ms(ktime_sub(
  415. ktime_get_boottime(),
  416. cs40l26->allow_hibernate_ts));
  417. if (he_time_cmd_payload > CS40L26_DSP_MBOX_HE_PAYLOAD_MAX_MS)
  418. he_time_cmd_payload = CS40L26_DSP_MBOX_HE_PAYLOAD_OVERFLOW;
  419. } else {
  420. he_time_cmd_payload =
  421. CS40L26_DSP_MBOX_HE_PAYLOAD_OVERFLOW;
  422. }
  423. dev_dbg(dev, "HE_TIME payload, 0x%06X",
  424. he_time_cmd_payload);
  425. he_time_cmd = CS40L26_DSP_MBOX_CMD_HE_TIME_BASE |
  426. he_time_cmd_payload;
  427. error = cs40l26_dsp_write(cs40l26,
  428. CS40L26_DSP_VIRTUAL1_MBOX_1,
  429. he_time_cmd);
  430. if (error)
  431. return error;
  432. }
  433. break;
  434. case CS40L26_PM_STATE_ALLOW_HIBERNATE:
  435. cs40l26->wksrc_sts = 0x00;
  436. error = cs40l26_dsp_write(cs40l26, CS40L26_DSP_VIRTUAL1_MBOX_1, cmd);
  437. if (error)
  438. return error;
  439. cs40l26->allow_hibernate_sent = true;
  440. mod_timer(&cs40l26->hibernate_timer, jiffies +
  441. msecs_to_jiffies(CS40L26_DSP_MBOX_HE_PAYLOAD_MAX_MS));
  442. cs40l26->allow_hibernate_ts = ktime_get_boottime();
  443. break;
  444. case CS40L26_PM_STATE_SHUTDOWN:
  445. cs40l26->wksrc_sts = 0x00;
  446. error = cs40l26_mailbox_write(cs40l26, cmd);
  447. if (error)
  448. return error;
  449. break;
  450. default:
  451. dev_err(dev, "Invalid PM state: %u\n", state);
  452. return -EINVAL;
  453. }
  454. cs40l26->pm_state = state;
  455. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  456. if (state <= CS40L26_PM_STATE_SHUTDOWN)
  457. dev_info(dev, "%s PM state: %s\n", __func__, pm_state_strings[state]);
  458. #endif
  459. return 0;
  460. }
  461. static int cs40l26_dsp_start(struct cs40l26_private *cs40l26)
  462. {
  463. u8 dsp_state;
  464. int error;
  465. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  466. dev_info(cs40l26->dev, "%s\n", __func__);
  467. #endif
  468. error = regmap_write(cs40l26->regmap, CS40L26_DSP1_CCM_CORE_CONTROL,
  469. CS40L26_DSP_CCM_CORE_RESET);
  470. if (error) {
  471. dev_err(cs40l26->dev, "Failed to reset DSP core\n");
  472. return error;
  473. }
  474. error = cs40l26_dsp_state_get(cs40l26, &dsp_state);
  475. if (error)
  476. return error;
  477. if (dsp_state != CS40L26_DSP_STATE_ACTIVE && dsp_state != CS40L26_DSP_STATE_STANDBY) {
  478. dev_err(cs40l26->dev, "Failed to wake DSP core\n");
  479. return -EINVAL;
  480. }
  481. return 0;
  482. }
  483. static int cs40l26_dsp_pre_config(struct cs40l26_private *cs40l26)
  484. {
  485. u32 halo_state, timeout_ms;
  486. u8 dsp_state;
  487. int error, i;
  488. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  489. dev_info(cs40l26->dev, "%s\n", __func__);
  490. #endif
  491. error = cs40l26_pm_state_transition(cs40l26, CS40L26_PM_STATE_PREVENT_HIBERNATE);
  492. if (error)
  493. return error;
  494. error = regmap_read(cs40l26->regmap, cs40l26->rom_regs->dsp_halo_state, &halo_state);
  495. if (error) {
  496. dev_err(cs40l26->dev, "Failed to get HALO state\n");
  497. return error;
  498. }
  499. if (halo_state != CS40L26_DSP_HALO_STATE_RUN) {
  500. dev_err(cs40l26->dev, "DSP not Ready: HALO_STATE: %08X\n", halo_state);
  501. return -EINVAL;
  502. }
  503. error = cs40l26_pm_timeout_ms_get(cs40l26, CS40L26_DSP_STATE_ACTIVE, &timeout_ms);
  504. if (error)
  505. return error;
  506. for (i = 0; i < CS40L26_DSP_SHUTDOWN_MAX_ATTEMPTS; i++) {
  507. error = cs40l26_dsp_state_get(cs40l26, &dsp_state);
  508. if (error) {
  509. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  510. dev_err(cs40l26->dev, "DSP state get fail\n");
  511. #endif
  512. return error;
  513. }
  514. if (dsp_state != CS40L26_DSP_STATE_SHUTDOWN &&
  515. dsp_state != CS40L26_DSP_STATE_STANDBY)
  516. dev_warn(cs40l26->dev, "DSP core not safe to kill\n");
  517. else
  518. break;
  519. usleep_range(CS40L26_MS_TO_US(timeout_ms), CS40L26_MS_TO_US(timeout_ms) + 100);
  520. }
  521. if (i == CS40L26_DSP_SHUTDOWN_MAX_ATTEMPTS) {
  522. dev_err(cs40l26->dev, "DSP Core could not be shut down\n");
  523. return -EINVAL;
  524. }
  525. error = regmap_write(cs40l26->regmap, CS40L26_DSP1_CCM_CORE_CONTROL,
  526. CS40L26_DSP_CCM_CORE_KILL);
  527. if (error)
  528. dev_err(cs40l26->dev, "Failed to kill DSP core\n");
  529. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  530. dev_info(cs40l26->dev, "%s done ret %d\n", __func__, error);
  531. #endif
  532. return error;
  533. }
  534. static int cs40l26_mbox_buffer_read(struct cs40l26_private *cs40l26, u32 *val)
  535. {
  536. struct regmap *regmap = cs40l26->regmap;
  537. struct device *dev = cs40l26->dev;
  538. u32 base, last, len, mbox_response, read_ptr, reg, status, write_ptr;
  539. u32 buffer[CS40L26_DSP_MBOX_BUFFER_NUM_REGS];
  540. int error;
  541. error = cl_dsp_get_reg(cs40l26->dsp, "QUEUE_BASE", CL_DSP_XM_UNPACKED_TYPE,
  542. CS40L26_MAILBOX_ALGO_ID, &reg);
  543. if (error)
  544. return error;
  545. error = regmap_bulk_read(regmap, reg, buffer, CS40L26_DSP_MBOX_BUFFER_NUM_REGS);
  546. if (error) {
  547. dev_err(dev, "Failed to read buffer contents\n");
  548. return error;
  549. }
  550. base = buffer[0];
  551. len = buffer[1];
  552. write_ptr = buffer[2];
  553. read_ptr = buffer[3];
  554. last = base + ((len - 1) * CL_DSP_BYTES_PER_WORD);
  555. error = cl_dsp_get_reg(cs40l26->dsp, "STATUS", CL_DSP_XM_UNPACKED_TYPE,
  556. CS40L26_MAILBOX_ALGO_ID, &reg);
  557. if (error)
  558. return error;
  559. error = regmap_read(regmap, reg, &status);
  560. if (error) {
  561. dev_err(dev, "Failed to read mailbox status\n");
  562. return error;
  563. }
  564. if (status) {
  565. dev_err(dev, "Mailbox status error: 0x%X\n", status);
  566. return -ENOSPC;
  567. }
  568. if (read_ptr == write_ptr) {
  569. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  570. dev_info(dev, "Reached end of queue\n");
  571. #else
  572. dev_dbg(dev, "Reached end of queue\n");
  573. #endif
  574. return 1;
  575. }
  576. error = regmap_read(regmap, read_ptr, &mbox_response);
  577. if (error) {
  578. dev_err(dev, "Failed to read from mailbox buffer\n");
  579. return error;
  580. }
  581. if (read_ptr == last)
  582. read_ptr = base;
  583. else
  584. read_ptr += CL_DSP_BYTES_PER_WORD;
  585. error = cl_dsp_get_reg(cs40l26->dsp, "QUEUE_RD", CL_DSP_XM_UNPACKED_TYPE,
  586. CS40L26_MAILBOX_ALGO_ID, &reg);
  587. if (error)
  588. return error;
  589. error = regmap_write(regmap, reg, read_ptr);
  590. if (error) {
  591. dev_err(dev, "Failed to update read pointer\n");
  592. return error;
  593. }
  594. *val = mbox_response;
  595. return 0;
  596. }
  597. static irqreturn_t cs40l26_handle_mbox_buffer(int irq, void *data)
  598. {
  599. struct cs40l26_private *cs40l26 = data;
  600. irqreturn_t irq_status = IRQ_HANDLED;
  601. struct device *dev = cs40l26->dev;
  602. u32 val = 0;
  603. int error;
  604. mutex_lock(&cs40l26->lock);
  605. while (!cs40l26_mbox_buffer_read(cs40l26, &val)) {
  606. if ((val & CS40L26_DSP_MBOX_CMD_INDEX_MASK) == CS40L26_DSP_MBOX_PANIC) {
  607. dev_alert(dev, "DSP PANIC! Error condition: 0x%06X\n",
  608. (u32) (val & CS40L26_DSP_MBOX_CMD_PAYLOAD_MASK));
  609. irq_status = IRQ_HANDLED;
  610. goto err_mutex;
  611. }
  612. if ((val & CS40L26_DSP_MBOX_CMD_INDEX_MASK) == CS40L26_DSP_MBOX_WATERMARK) {
  613. dev_dbg(dev, "Mailbox: WATERMARK\n");
  614. #ifdef CONFIG_DEBUG_FS
  615. error = cl_dsp_logger_update(cs40l26->cl_dsp_db);
  616. if (error) {
  617. irq_status = IRQ_NONE;
  618. goto err_mutex;
  619. }
  620. #endif
  621. continue;
  622. }
  623. switch (val) {
  624. case CS40L26_DSP_MBOX_COMPLETE_MBOX:
  625. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  626. dev_info(dev, "Mailbox: COMPLETE_MBOX\n");
  627. #else
  628. dev_dbg(dev, "Mailbox: COMPLETE_MBOX\n");
  629. #endif
  630. complete_all(&cs40l26->erase_cont);
  631. cs40l26_vibe_state_update(cs40l26, CS40L26_VIBE_STATE_EVENT_MBOX_COMPLETE);
  632. break;
  633. case CS40L26_DSP_MBOX_COMPLETE_GPIO:
  634. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  635. dev_info(dev, "Mailbox: COMPLETE_GPIO\n");
  636. #else
  637. dev_dbg(dev, "Mailbox: COMPLETE_GPIO\n");
  638. #endif
  639. cs40l26_vibe_state_update(cs40l26, CS40L26_VIBE_STATE_EVENT_GPIO_COMPLETE);
  640. break;
  641. case CS40L26_DSP_MBOX_COMPLETE_I2S:
  642. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  643. dev_info(dev, "Mailbox: COMPLETE_I2S\n");
  644. #else
  645. dev_dbg(dev, "Mailbox: COMPLETE_I2S\n");
  646. #endif
  647. /* ASP is interrupted */
  648. if (cs40l26->asp_enable)
  649. complete(&cs40l26->i2s_cont);
  650. break;
  651. case CS40L26_DSP_MBOX_TRIGGER_I2S:
  652. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  653. dev_info(dev, "Mailbox: TRIGGER_I2S\n");
  654. #else
  655. dev_dbg(dev, "Mailbox: TRIGGER_I2S\n");
  656. #endif
  657. complete(&cs40l26->i2s_cont);
  658. break;
  659. case CS40L26_DSP_MBOX_TRIGGER_CP:
  660. if (!cs40l26->vibe_state_reporting) {
  661. dev_err(dev, "vibe_state not supported\n");
  662. irq_status = IRQ_HANDLED;
  663. goto err_mutex;
  664. }
  665. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  666. dev_info(dev, "Mailbox: TRIGGER_CP\n");
  667. #else
  668. dev_dbg(dev, "Mailbox: TRIGGER_CP\n");
  669. #endif
  670. cs40l26_vibe_state_update(cs40l26, CS40L26_VIBE_STATE_EVENT_MBOX_PLAYBACK);
  671. break;
  672. case CS40L26_DSP_MBOX_TRIGGER_GPIO:
  673. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  674. dev_info(dev, "Mailbox: TRIGGER_GPIO\n");
  675. #else
  676. dev_dbg(dev, "Mailbox: TRIGGER_GPIO\n");
  677. #endif
  678. cs40l26_vibe_state_update(cs40l26, CS40L26_VIBE_STATE_EVENT_GPIO_TRIGGER);
  679. break;
  680. case CS40L26_DSP_MBOX_PM_AWAKE:
  681. cs40l26->wksrc_sts |= CS40L26_WKSRC_STS_EN;
  682. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  683. dev_info(dev, "Mailbox: AWAKE\n");
  684. #else
  685. dev_dbg(dev, "Mailbox: AWAKE\n");
  686. #endif
  687. break;
  688. case CS40L26_DSP_MBOX_F0_EST_START:
  689. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  690. dev_info(dev, "Mailbox: F0_EST_START\n");
  691. #else
  692. dev_dbg(dev, "Mailbox: F0_EST_START\n");
  693. #endif
  694. break;
  695. case CS40L26_DSP_MBOX_F0_EST_DONE:
  696. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  697. dev_info(dev, "Mailbox: F0_EST_DONE\n");
  698. cs40l26->sec_vib_ddata.trigger_calibration = 0;
  699. #else
  700. dev_dbg(dev, "Mailbox: F0_EST_DONE\n");
  701. #endif
  702. complete(&cs40l26->cal_f0_cont);
  703. break;
  704. case CS40L26_DSP_MBOX_REDC_EST_START:
  705. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  706. dev_info(dev, "Mailbox: REDC_EST_START\n");
  707. #else
  708. dev_dbg(dev, "Mailbox: REDC_EST_START\n");
  709. #endif
  710. break;
  711. case CS40L26_DSP_MBOX_REDC_EST_DONE:
  712. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  713. dev_info(dev, "Mailbox: REDC_EST_DONE\n");
  714. #else
  715. dev_dbg(dev, "Mailbox: REDC_EST_DONE\n");
  716. #endif
  717. complete(&cs40l26->cal_redc_cont);
  718. break;
  719. case CS40L26_DSP_MBOX_LS_CALIBRATION_START:
  720. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  721. dev_info(dev, "Mailbox: LS_CALIBRATION_START\n");
  722. #else
  723. dev_dbg(dev, "Mailbox: LS_CALIBRATION_START\n");
  724. #endif
  725. break;
  726. case CS40L26_DSP_MBOX_LS_CALIBRATION_DONE:
  727. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  728. dev_info(dev, "Mailbox: LS_CALIBRATION_DONE\n");
  729. #else
  730. dev_dbg(dev, "Mailbox: LS_CALIBRATION_DONE\n");
  731. #endif
  732. complete(&cs40l26->cal_ls_cont);
  733. break;
  734. case CS40L26_DSP_MBOX_LS_CALIBRATION_ERROR:
  735. dev_warn(dev, "Mailbox: LS_CALIBRATION_ERROR\n");
  736. complete(&cs40l26->cal_ls_cont);
  737. break;
  738. case CS40L26_DSP_MBOX_LE_EST_START:
  739. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  740. dev_info(dev, "Mailbox: LE_EST_START\n");
  741. #else
  742. dev_dbg(dev, "Mailbox: LE_EST_START\n");
  743. #endif
  744. break;
  745. case CS40L26_DSP_MBOX_LE_EST_DONE:
  746. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  747. dev_info(dev, "Mailbox: LE_EST_DONE\n");
  748. #else
  749. dev_dbg(dev, "Mailbox: LE_EST_DONE\n");
  750. #endif
  751. break;
  752. case CS40L26_DSP_MBOX_PEQ_CALCULATION_START:
  753. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  754. dev_info(dev, "Mailbox: PEQ_CALCULATION_START\n");
  755. #else
  756. dev_dbg(dev, "Mailbox: PEQ_CALCULATION_START\n");
  757. #endif
  758. break;
  759. case CS40L26_DSP_MBOX_PEQ_CALCULATION_DONE:
  760. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  761. dev_info(dev, "Mailbox: PEQ_CALCULATION_DONE\n");
  762. #else
  763. dev_dbg(dev, "Mailbox: PEQ_CALCULATION_DONE\n");
  764. #endif
  765. complete(&cs40l26->cal_dvl_peq_cont);
  766. break;
  767. case CS40L26_DSP_MBOX_SYS_ACK:
  768. dev_err(dev, "Mailbox: ACK\n");
  769. irq_status = IRQ_HANDLED;
  770. goto err_mutex;
  771. default:
  772. dev_err(dev, "MBOX buffer value (0x%X) is invalid\n", val);
  773. irq_status = IRQ_HANDLED;
  774. goto err_mutex;
  775. }
  776. }
  777. err_mutex:
  778. mutex_unlock(&cs40l26->lock);
  779. return irq_status;
  780. }
  781. int cs40l26_copy_f0_est_to_dvl(struct cs40l26_private *cs40l26)
  782. {
  783. u32 reg, f0_measured_q9_14, global_sample_rate, normalized_f0_q1_23;
  784. int error, sample_rate;
  785. /* Must be awake and under mutex lock */
  786. error = regmap_read(cs40l26->regmap, CS40L26_GLOBAL_SAMPLE_RATE, &global_sample_rate);
  787. if (error)
  788. return error;
  789. switch (global_sample_rate & CS40L26_GLOBAL_FS_MASK) {
  790. case CS40L26_GLOBAL_FS_48K:
  791. sample_rate = 48000;
  792. break;
  793. case CS40L26_GLOBAL_FS_96K:
  794. sample_rate = 96000;
  795. break;
  796. default:
  797. dev_warn(cs40l26->dev, "Invalid GLOBAL_FS, %08X", global_sample_rate);
  798. return -EINVAL;
  799. }
  800. error = cl_dsp_get_reg(cs40l26->dsp, "F0_EST", CL_DSP_XM_UNPACKED_TYPE,
  801. CS40L26_F0_EST_ALGO_ID, &reg);
  802. if (error)
  803. return error;
  804. error = regmap_read(cs40l26->regmap, reg, &f0_measured_q9_14);
  805. if (error)
  806. return error;
  807. error = cl_dsp_get_reg(cs40l26->dsp, "LRA_NORM_F0", CL_DSP_XM_UNPACKED_TYPE,
  808. CS40L26_DVL_ALGO_ID, &reg);
  809. if (error)
  810. return error;
  811. normalized_f0_q1_23 = (f0_measured_q9_14 << 9) / sample_rate;
  812. return regmap_write(cs40l26->regmap, reg, normalized_f0_q1_23);
  813. }
  814. EXPORT_SYMBOL_GPL(cs40l26_copy_f0_est_to_dvl);
  815. int cs40l26_asp_start(struct cs40l26_private *cs40l26)
  816. {
  817. int error;
  818. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  819. cs40l26->asp_scale_pct = sec_vib_inputff_get_ach_percent(&cs40l26->sec_vib_ddata);
  820. dev_info(cs40l26->dev, "%s calls stop playback. asp_scale_pct : %d\n",
  821. __func__, cs40l26->asp_scale_pct);
  822. #endif
  823. if (cs40l26->asp_scale_pct < CS40L26_GAIN_FULL_SCALE)
  824. queue_work(cs40l26->vibe_workqueue, &cs40l26->set_gain_work);
  825. error = cs40l26_mailbox_write(cs40l26, CS40L26_STOP_PLAYBACK);
  826. if (error) {
  827. dev_err(cs40l26->dev, "Failed to stop playback before I2S start\n");
  828. return error;
  829. }
  830. reinit_completion(&cs40l26->i2s_cont);
  831. return cs40l26_mailbox_write(cs40l26, CS40L26_DSP_MBOX_CMD_START_I2S);
  832. }
  833. EXPORT_SYMBOL_GPL(cs40l26_asp_start);
  834. void cs40l26_vibe_state_update(struct cs40l26_private *cs40l26, enum cs40l26_vibe_state_event event)
  835. {
  836. if (!mutex_is_locked(&cs40l26->lock)) {
  837. dev_err(cs40l26->dev, "%s must be called under mutex lock\n", __func__);
  838. return;
  839. }
  840. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  841. dev_info(cs40l26->dev, "%s: effects_in_flight = %d\n", __func__, cs40l26->effects_in_flight);
  842. #else
  843. dev_dbg(cs40l26->dev, "effects_in_flight = %d\n", cs40l26->effects_in_flight);
  844. #endif
  845. switch (event) {
  846. case CS40L26_VIBE_STATE_EVENT_MBOX_PLAYBACK:
  847. case CS40L26_VIBE_STATE_EVENT_GPIO_TRIGGER:
  848. cs40l26_remove_asp_scaling(cs40l26);
  849. cs40l26->effects_in_flight = cs40l26->effects_in_flight <= 0 ? 1 :
  850. cs40l26->effects_in_flight + 1;
  851. break;
  852. case CS40L26_VIBE_STATE_EVENT_MBOX_COMPLETE:
  853. case CS40L26_VIBE_STATE_EVENT_GPIO_COMPLETE:
  854. cs40l26->effects_in_flight = cs40l26->effects_in_flight <= 0 ? 0 :
  855. cs40l26->effects_in_flight - 1;
  856. if (cs40l26->effects_in_flight == 0 && cs40l26->asp_enable)
  857. if (cs40l26_asp_start(cs40l26)) {
  858. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  859. dev_err(cs40l26->dev, "%s error -\n", __func__);
  860. #endif
  861. return;
  862. }
  863. break;
  864. case CS40L26_VIBE_STATE_EVENT_ASP_START:
  865. cs40l26->asp_enable = true;
  866. break;
  867. case CS40L26_VIBE_STATE_EVENT_ASP_STOP:
  868. cs40l26_remove_asp_scaling(cs40l26);
  869. cs40l26->asp_enable = false;
  870. break;
  871. default:
  872. dev_err(cs40l26->dev, "Invalid vibe state event: %d\n", event);
  873. break;
  874. }
  875. if (cs40l26->effects_in_flight)
  876. cs40l26->vibe_state = CS40L26_VIBE_STATE_HAPTIC;
  877. else if (cs40l26->asp_enable)
  878. cs40l26->vibe_state = CS40L26_VIBE_STATE_ASP;
  879. else
  880. cs40l26->vibe_state = CS40L26_VIBE_STATE_STOPPED;
  881. sysfs_notify(&cs40l26->dev->kobj, NULL, "vibe_state");
  882. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  883. if (event <= CS40L26_VIBE_STATE_EVENT_ASP_STOP)
  884. dev_info(cs40l26->dev, "%s vib_state(%s)->(%s) effects_in_flight(%d) asp_enable(%d)\n",
  885. __func__, vibe_state_events[event], vibe_state_strings[cs40l26->vibe_state],
  886. cs40l26->effects_in_flight, cs40l26->asp_enable);
  887. #endif
  888. }
  889. EXPORT_SYMBOL_GPL(cs40l26_vibe_state_update);
  890. static int cs40l26_error_release(struct cs40l26_private *cs40l26,
  891. unsigned int err_rls)
  892. {
  893. struct regmap *regmap = cs40l26->regmap;
  894. struct device *dev = cs40l26->dev;
  895. u32 err_sts, err_cfg;
  896. int error;
  897. error = regmap_read(regmap, CS40L26_ERROR_RELEASE, &err_sts);
  898. if (error) {
  899. dev_err(cs40l26->dev, "Failed to get error status\n");
  900. return error;
  901. }
  902. err_cfg = err_sts & ~BIT(err_rls);
  903. error = regmap_write(cs40l26->regmap, CS40L26_ERROR_RELEASE, err_cfg);
  904. if (error) {
  905. dev_err(dev, "Actuator Safe Mode release sequence failed\n");
  906. return error;
  907. }
  908. err_cfg |= BIT(err_rls);
  909. error = regmap_write(regmap, CS40L26_ERROR_RELEASE, err_cfg);
  910. if (error) {
  911. dev_err(dev, "Actuator Safe Mode release sequence failed\n");
  912. return error;
  913. }
  914. err_cfg &= ~BIT(err_rls);
  915. error = regmap_write(cs40l26->regmap, CS40L26_ERROR_RELEASE, err_cfg);
  916. if (error)
  917. dev_err(dev, "Actuator Safe Mode release sequence failed\n");
  918. return error;
  919. }
  920. static int cs40l26_handle_pre_irq(void *irq_drv_data)
  921. {
  922. struct cs40l26_private *cs40l26 = irq_drv_data;
  923. unsigned int sts;
  924. int error;
  925. error = cs40l26_pm_enter(cs40l26->dev);
  926. if (error)
  927. return error;
  928. error = regmap_read(cs40l26->regmap, CS40L26_IRQ1_STATUS, &sts);
  929. if (error)
  930. goto err_pm;
  931. if (!(sts & CS40L26_IRQ_STATUS_MASK)) {
  932. dev_err(cs40l26->dev, "IRQ1 asserted with no pending interrupts\n");
  933. #if IS_ENABLED(CONFIG_CS40L26_SAMSUNG_FEATURE) && IS_ENABLED(CONFIG_SEC_ABC)
  934. /* If it occurs 25 times in 5 seconds, it is recognized as a problem */
  935. sec_abc_send_event("MODULE=vib@WARN=int_gnd_short");
  936. #endif
  937. }
  938. err_pm:
  939. cs40l26_pm_exit(cs40l26->dev);
  940. return error;
  941. }
  942. static irqreturn_t cs40l26_gpio_rise(int irq, void *data)
  943. {
  944. struct cs40l26_private *cs40l26 = data;
  945. mutex_lock(&cs40l26->lock);
  946. if (cs40l26->wksrc_sts & CS40L26_WKSRC_STS_EN) {
  947. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  948. dev_info(cs40l26->dev, "GPIO rising edge detected\n");
  949. #else
  950. dev_dbg(cs40l26->dev, "GPIO rising edge detected\n");
  951. #endif
  952. }
  953. cs40l26->wksrc_sts |= CS40L26_WKSRC_STS_EN;
  954. mutex_unlock(&cs40l26->lock);
  955. return IRQ_HANDLED;
  956. }
  957. static irqreturn_t cs40l26_gpio_fall(int irq, void *data)
  958. {
  959. struct cs40l26_private *cs40l26 = data;
  960. mutex_lock(&cs40l26->lock);
  961. if (cs40l26->wksrc_sts & CS40L26_WKSRC_STS_EN) {
  962. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  963. dev_info(cs40l26->dev, "GPIO falling edge detected\n");
  964. #else
  965. dev_dbg(cs40l26->dev, "GPIO falling edge detected\n");
  966. #endif
  967. }
  968. cs40l26->wksrc_sts |= CS40L26_WKSRC_STS_EN;
  969. mutex_unlock(&cs40l26->lock);
  970. return IRQ_HANDLED;
  971. }
  972. static irqreturn_t cs40l26_wakesource_any(int irq, void *data)
  973. {
  974. struct cs40l26_private *cs40l26 = data;
  975. irqreturn_t irq_return = IRQ_HANDLED;
  976. u32 reg, val;
  977. int error;
  978. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  979. dev_info(cs40l26->dev, "Wakesource detected (ANY)\n");
  980. #else
  981. dev_dbg(cs40l26->dev, "Wakesource detected (ANY)\n");
  982. #endif
  983. mutex_lock(&cs40l26->lock);
  984. error = regmap_read(cs40l26->regmap, CS40L26_PWRMGT_STS, &val);
  985. if (error) {
  986. dev_err(cs40l26->dev, "Failed to get Power Management Status\n");
  987. irq_return = IRQ_NONE;
  988. goto mutex_exit;
  989. }
  990. cs40l26->wksrc_sts = (u8) ((val & CS40L26_WKSRC_STS_MASK) >>
  991. CS40L26_WKSRC_STS_SHIFT);
  992. error = cl_dsp_get_reg(cs40l26->dsp, "LAST_WAKESRC_CTL",
  993. CL_DSP_XM_UNPACKED_TYPE, cs40l26->fw_id, &reg);
  994. if (error) {
  995. irq_return = IRQ_NONE;
  996. goto mutex_exit;
  997. }
  998. error = regmap_read(cs40l26->regmap, reg, &val);
  999. if (error) {
  1000. dev_err(cs40l26->dev, "Failed to read LAST_WAKESRC_CTL\n");
  1001. irq_return = IRQ_NONE;
  1002. goto mutex_exit;
  1003. }
  1004. cs40l26->last_wksrc_pol = (u8) (val & CS40L26_WKSRC_GPIO_POL_MASK);
  1005. mutex_exit:
  1006. mutex_unlock(&cs40l26->lock);
  1007. return irq_return;
  1008. }
  1009. static irqreturn_t cs40l26_wakesource_gpio(int irq, void *data)
  1010. {
  1011. struct cs40l26_private *cs40l26 = data;
  1012. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  1013. dev_info(cs40l26->dev, "GPIO event woke device from hibernate\n");
  1014. #else
  1015. dev_dbg(cs40l26->dev, "GPIO event woke device from hibernate\n");
  1016. #endif
  1017. mutex_lock(&cs40l26->lock);
  1018. if (cs40l26->wksrc_sts & cs40l26->last_wksrc_pol) {
  1019. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  1020. dev_info(cs40l26->dev, "GPIO falling edge detected\n");
  1021. #else
  1022. dev_dbg(cs40l26->dev, "GPIO falling edge detected\n");
  1023. #endif
  1024. cs40l26->wksrc_sts |= CS40L26_WKSRC_STS_EN;
  1025. } else {
  1026. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  1027. dev_info(cs40l26->dev, "GPIO rising edge detected\n");
  1028. #else
  1029. dev_dbg(cs40l26->dev, "GPIO rising edge detected\n");
  1030. #endif
  1031. }
  1032. mutex_unlock(&cs40l26->lock);
  1033. return IRQ_HANDLED;
  1034. }
  1035. static irqreturn_t cs40l26_wakesource_iic(int irq, void *data)
  1036. {
  1037. struct cs40l26_private *cs40l26 = data;
  1038. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  1039. dev_info(cs40l26->dev, "I2C event woke device from hibernate\n");
  1040. #else
  1041. dev_dbg(cs40l26->dev, "I2C event woke device from hibernate\n");
  1042. #endif
  1043. return IRQ_HANDLED;
  1044. }
  1045. static irqreturn_t cs40l26_bst_ovp_err(int irq, void *data)
  1046. {
  1047. struct cs40l26_private *cs40l26 = data;
  1048. dev_err(cs40l26->dev, "BST overvolt. error\n");
  1049. return IRQ_RETVAL(!cs40l26_error_release(cs40l26, CS40L26_BST_OVP_ERR_RLS));
  1050. }
  1051. static irqreturn_t cs40l26_bst_uv_err(int irq, void *data)
  1052. {
  1053. struct cs40l26_private *cs40l26 = data;
  1054. dev_err(cs40l26->dev, "BST undervolt. error\n");
  1055. return IRQ_RETVAL(!cs40l26_error_release(cs40l26, CS40L26_BST_UVP_ERR_RLS));
  1056. }
  1057. static irqreturn_t cs40l26_bst_short(int irq, void *data)
  1058. {
  1059. struct cs40l26_private *cs40l26 = data;
  1060. dev_err(cs40l26->dev, "LBST short detected\n");
  1061. return IRQ_RETVAL(!cs40l26_error_release(cs40l26, CS40L26_BST_SHORT_ERR_RLS));
  1062. }
  1063. static irqreturn_t cs40l26_ipk_flag(int irq, void *data)
  1064. {
  1065. struct cs40l26_private *cs40l26 = data;
  1066. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  1067. dev_info(cs40l26->dev, "Current is being limited by LBST inductor\n");
  1068. #else
  1069. dev_dbg(cs40l26->dev, "Current is being limited by LBST inductor\n");
  1070. #endif
  1071. return IRQ_HANDLED;
  1072. }
  1073. static irqreturn_t cs40l26_temp_err(int irq, void *data)
  1074. {
  1075. struct cs40l26_private *cs40l26 = data;
  1076. dev_err(cs40l26->dev, "Die overtemperature error\n");
  1077. return IRQ_RETVAL(!cs40l26_error_release(cs40l26, CS40L26_TEMP_ERR_RLS));
  1078. }
  1079. static irqreturn_t cs40l26_amp_short(int irq, void *data)
  1080. {
  1081. struct cs40l26_private *cs40l26 = data;
  1082. dev_err(cs40l26->dev, "AMP short detected\n");
  1083. return IRQ_RETVAL(!cs40l26_error_release(cs40l26, CS40L26_AMP_SHORT_ERR_RLS));
  1084. }
  1085. static irqreturn_t cs40l26_vpbr_flag(int irq, void *data)
  1086. {
  1087. struct cs40l26_private *cs40l26 = data;
  1088. dev_err(cs40l26->dev, "VP voltage has dropped below brownout threshold\n");
  1089. return IRQ_HANDLED;
  1090. }
  1091. static irqreturn_t cs40l26_vpbr_att_clr(int irq, void *data)
  1092. {
  1093. struct cs40l26_private *cs40l26 = data;
  1094. dev_warn(cs40l26->dev, "Cleared attenuation applied by VP brownout event\n");
  1095. return IRQ_HANDLED;
  1096. }
  1097. static irqreturn_t cs40l26_vbbr_flag(int irq, void *data)
  1098. {
  1099. struct cs40l26_private *cs40l26 = data;
  1100. dev_err(cs40l26->dev, "VBST voltage has dropped below brownout threshold\n");
  1101. return IRQ_HANDLED;
  1102. }
  1103. static irqreturn_t cs40l26_vbst_att_clr(int irq, void *data)
  1104. {
  1105. struct cs40l26_private *cs40l26 = data;
  1106. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  1107. dev_info(cs40l26->dev, "Cleared attenuation caused by VBST brownout\n");
  1108. #else
  1109. dev_dbg(cs40l26->dev, "Cleared attenuation caused by VBST brownout\n");
  1110. #endif
  1111. return IRQ_HANDLED;
  1112. }
  1113. static const struct cs40l26_irq cs40l26_irqs[] = {
  1114. CS40L26_IRQ(GPIO1_RISE, "GPIO1 rise", cs40l26_gpio_rise),
  1115. CS40L26_IRQ(GPIO1_FALL, "GPIO1 fall", cs40l26_gpio_fall),
  1116. CS40L26_IRQ(GPIO2_RISE, "GPIO2 rise", cs40l26_gpio_rise),
  1117. CS40L26_IRQ(GPIO2_FALL, "GPIO2 fall", cs40l26_gpio_fall),
  1118. CS40L26_IRQ(GPIO3_RISE, "GPIO3 rise", cs40l26_gpio_rise),
  1119. CS40L26_IRQ(GPIO3_FALL, "GPIO3 fall", cs40l26_gpio_fall),
  1120. CS40L26_IRQ(GPIO4_RISE, "GPIO4 rise", cs40l26_gpio_rise),
  1121. CS40L26_IRQ(GPIO4_FALL, "GPIO4 fall", cs40l26_gpio_fall),
  1122. CS40L26_IRQ(WKSRC_STS_ANY, "Wakesource any", cs40l26_wakesource_any),
  1123. CS40L26_IRQ(WKSRC_STS_GPIO1, "Wakesource GPIO1", cs40l26_wakesource_gpio),
  1124. CS40L26_IRQ(WKSRC_STS_GPIO2, "Wakesource GPIO2", cs40l26_wakesource_gpio),
  1125. CS40L26_IRQ(WKSRC_STS_GPIO3, "Wakesource GPIO3", cs40l26_wakesource_gpio),
  1126. CS40L26_IRQ(WKSRC_STS_GPIO4, "Wakesource GPIO4", cs40l26_wakesource_gpio),
  1127. CS40L26_IRQ(WKSRC_STS_I2C, "Wakesource I2C", cs40l26_wakesource_iic),
  1128. CS40L26_IRQ(BST_OVP_ERR, "Boost overvoltage error", cs40l26_bst_ovp_err),
  1129. CS40L26_IRQ(BST_DCM_UVP_ERR, "Boost undervoltage error", cs40l26_bst_uv_err),
  1130. CS40L26_IRQ(BST_SHORT_ERR, "Boost short", cs40l26_bst_short),
  1131. CS40L26_IRQ(BST_IPK_FLAG, "Current limited", cs40l26_ipk_flag),
  1132. CS40L26_IRQ(TEMP_ERR, "Die overtemperature error", cs40l26_temp_err),
  1133. CS40L26_IRQ(AMP_ERR, "Amp short", cs40l26_amp_short),
  1134. CS40L26_IRQ(VIRTUAL2_MBOX_WR, "Mailbox interrupt", cs40l26_handle_mbox_buffer),
  1135. CS40L26_IRQ(VPBR_FLAG, "VP brownout", cs40l26_vpbr_flag),
  1136. CS40L26_IRQ(VPBR_ATT_CLR, "VPBR attenuation cleared", cs40l26_vpbr_att_clr),
  1137. CS40L26_IRQ(VBBR_FLAG, "VBST brownout", cs40l26_vbbr_flag),
  1138. CS40L26_IRQ(VBBR_ATT_CLR, "VBST attenuation cleared", cs40l26_vbst_att_clr),
  1139. };
  1140. static const struct regmap_irq cs40l26_reg_irqs[] = {
  1141. CS40L26_REG_IRQ(IRQ1_EINT_1, GPIO1_RISE),
  1142. CS40L26_REG_IRQ(IRQ1_EINT_1, GPIO1_FALL),
  1143. CS40L26_REG_IRQ(IRQ1_EINT_1, GPIO2_RISE),
  1144. CS40L26_REG_IRQ(IRQ1_EINT_1, GPIO2_FALL),
  1145. CS40L26_REG_IRQ(IRQ1_EINT_1, GPIO3_RISE),
  1146. CS40L26_REG_IRQ(IRQ1_EINT_1, GPIO3_FALL),
  1147. CS40L26_REG_IRQ(IRQ1_EINT_1, GPIO4_RISE),
  1148. CS40L26_REG_IRQ(IRQ1_EINT_1, GPIO4_FALL),
  1149. CS40L26_REG_IRQ(IRQ1_EINT_1, WKSRC_STS_ANY),
  1150. CS40L26_REG_IRQ(IRQ1_EINT_1, WKSRC_STS_GPIO1),
  1151. CS40L26_REG_IRQ(IRQ1_EINT_1, WKSRC_STS_GPIO2),
  1152. CS40L26_REG_IRQ(IRQ1_EINT_1, WKSRC_STS_GPIO3),
  1153. CS40L26_REG_IRQ(IRQ1_EINT_1, WKSRC_STS_GPIO4),
  1154. CS40L26_REG_IRQ(IRQ1_EINT_1, WKSRC_STS_I2C),
  1155. CS40L26_REG_IRQ(IRQ1_EINT_1, BST_OVP_ERR),
  1156. CS40L26_REG_IRQ(IRQ1_EINT_1, BST_DCM_UVP_ERR),
  1157. CS40L26_REG_IRQ(IRQ1_EINT_1, BST_SHORT_ERR),
  1158. CS40L26_REG_IRQ(IRQ1_EINT_1, BST_IPK_FLAG),
  1159. CS40L26_REG_IRQ(IRQ1_EINT_1, TEMP_ERR),
  1160. CS40L26_REG_IRQ(IRQ1_EINT_1, AMP_ERR),
  1161. CS40L26_REG_IRQ(IRQ1_EINT_1, VIRTUAL2_MBOX_WR),
  1162. CS40L26_REG_IRQ(IRQ1_EINT_2, VPBR_FLAG),
  1163. CS40L26_REG_IRQ(IRQ1_EINT_2, VPBR_ATT_CLR),
  1164. CS40L26_REG_IRQ(IRQ1_EINT_2, VBBR_FLAG),
  1165. CS40L26_REG_IRQ(IRQ1_EINT_2, VBBR_ATT_CLR),
  1166. };
  1167. static struct regmap_irq_chip cs40l26_regmap_irq_chip = {
  1168. .name = "cs40l26 IRQ1 Controller",
  1169. .status_base = CS40L26_IRQ1_EINT_1,
  1170. .mask_base = CS40L26_IRQ1_MASK_1,
  1171. .ack_base = CS40L26_IRQ1_EINT_1,
  1172. .num_regs = 2,
  1173. .irqs = cs40l26_reg_irqs,
  1174. .num_irqs = ARRAY_SIZE(cs40l26_reg_irqs),
  1175. .handle_pre_irq = cs40l26_handle_pre_irq,
  1176. .runtime_pm = true,
  1177. };
  1178. static struct cs40l26_pseq_op *cs40l26_pseq_op_format(struct cs40l26_private *cs40l26,
  1179. u32 addr, u32 data, u8 op_code)
  1180. {
  1181. struct cs40l26_pseq_op *op;
  1182. if (op_code != CS40L26_PSEQ_OP_WRITE_FULL) {
  1183. if (addr & CS40L26_PSEQ_INVALID_ADDR) {
  1184. dev_err(cs40l26->dev, "Invalid PSEQ address: 0x%08X\n", addr);
  1185. return ERR_PTR(-EINVAL);
  1186. }
  1187. }
  1188. op = devm_kzalloc(cs40l26->dev, sizeof(struct cs40l26_pseq_op), GFP_KERNEL);
  1189. if (!op)
  1190. return ERR_PTR(-ENOMEM);
  1191. op->operation = op_code;
  1192. op->words[0] = op_code << CS40L26_PSEQ_OP_SHIFT;
  1193. switch (op_code) {
  1194. case CS40L26_PSEQ_OP_WRITE_FULL:
  1195. op->size = CS40L26_PSEQ_OP_WRITE_FULL_WORDS;
  1196. op->words[0] |= ((addr & CS40L26_PSEQ_WRITE_FULL_UPPER_ADDR_MASK) >>
  1197. CS40L26_PSEQ_WRITE_FULL_UPPER_ADDR_SHIFT);
  1198. op->words[1] = ((addr & CS40L26_PSEQ_WRITE_FULL_LOWER_ADDR_MASK) <<
  1199. CS40L26_PSEQ_WRITE_FULL_LOWER_ADDR_SHIFT);
  1200. op->words[1] |= ((data & CS40L26_PSEQ_WRITE_FULL_UPPER_DATA_MASK) >>
  1201. CS40L26_PSEQ_WRITE_FULL_UPPER_DATA_SHIFT);
  1202. op->words[2] = data & CS40L26_PSEQ_WRITE_FULL_LOWER_DATA_MASK;
  1203. break;
  1204. case CS40L26_PSEQ_OP_WRITE_L16:
  1205. case CS40L26_PSEQ_OP_WRITE_H16:
  1206. op->size = CS40L26_PSEQ_OP_WRITE_X16_WORDS;
  1207. op->words[0] |= ((addr & CS40L26_PSEQ_WRITE_X16_UPPER_ADDR_MASK) >>
  1208. CS40L26_PSEQ_WRITE_X16_UPPER_ADDR_SHIFT);
  1209. op->words[1] = ((addr & CS40L26_PSEQ_WRITE_X16_LOWER_ADDR_MASK) <<
  1210. CS40L26_PSEQ_WRITE_X16_LOWER_ADDR_SHIFT);
  1211. op->words[1] |= ((data & CS40L26_PSEQ_WRITE_X16_UPPER_DATA_MASK) >>
  1212. CS40L26_PSEQ_WRITE_X16_UPPER_DATA_SHIFT);
  1213. break;
  1214. case CS40L26_PSEQ_OP_WRITE_ADDR8:
  1215. op->size = CS40L26_PSEQ_OP_WRITE_ADDR8_WORDS;
  1216. op->words[0] |= ((addr & CS40L26_PSEQ_WRITE_ADDR8_ADDR_MASK) <<
  1217. CS40L26_PSEQ_WRITE_ADDR8_ADDR_SHIFT);
  1218. op->words[0] |= ((data & CS40L26_PSEQ_WRITE_ADDR8_UPPER_DATA_MASK) >>
  1219. CS40L26_PSEQ_WRITE_ADDR8_UPPER_DATA_SHIFT);
  1220. op->words[1] = data & CS40L26_PSEQ_WRITE_ADDR8_LOWER_DATA_MASK;
  1221. break;
  1222. default:
  1223. dev_err(cs40l26->dev, "Invalid PSEQ Op. Code 0x%02X\n", op_code);
  1224. return ERR_PTR(-EINVAL);
  1225. }
  1226. return op;
  1227. }
  1228. static int cs40l26_pseq_find_end(struct cs40l26_private *cs40l26, struct cs40l26_pseq_op **op_end)
  1229. {
  1230. u8 operation = 0;
  1231. struct cs40l26_pseq_op *op;
  1232. list_for_each_entry(op, &cs40l26->pseq_op_head, list) {
  1233. operation = op->operation;
  1234. if (operation == CS40L26_PSEQ_OP_END)
  1235. break;
  1236. }
  1237. if (operation != CS40L26_PSEQ_OP_END) {
  1238. dev_err(cs40l26->dev, "Failed to find PSEQ list terminator\n");
  1239. return -ENOENT;
  1240. }
  1241. *op_end = op;
  1242. return 0;
  1243. }
  1244. int cs40l26_pseq_write(struct cs40l26_private *cs40l26, u32 addr,
  1245. u32 data, bool update, u8 op_code)
  1246. {
  1247. struct device *dev = cs40l26->dev;
  1248. bool is_new = true;
  1249. struct cs40l26_pseq_op *op, *op_new, *op_end;
  1250. int error;
  1251. op_new = cs40l26_pseq_op_format(cs40l26, addr, data, op_code);
  1252. if (IS_ERR_OR_NULL(op_new))
  1253. return op_new ? PTR_ERR(op_new) : -EINVAL;
  1254. if (update) {
  1255. list_for_each_entry(op, &cs40l26->pseq_op_head, list) {
  1256. if (op->words[0] == op_new->words[0] &&
  1257. (op->words[1] & CS40L26_PSEQ_OP_MASK) ==
  1258. (op_new->words[1] & CS40L26_PSEQ_OP_MASK)) {
  1259. if (op->size != op_new->size) {
  1260. dev_err(dev, "Failed to replace PSEQ op.\n");
  1261. error = -EINVAL;
  1262. goto op_new_free;
  1263. }
  1264. is_new = false;
  1265. break;
  1266. }
  1267. }
  1268. }
  1269. error = cs40l26_pseq_find_end(cs40l26, &op_end);
  1270. if (error)
  1271. goto op_new_free;
  1272. if (((CS40L26_PSEQ_MAX_WORDS * CL_DSP_BYTES_PER_WORD) - op_end->offset)
  1273. < (op_new->size * CL_DSP_BYTES_PER_WORD)) {
  1274. dev_err(dev, "Not enough space in pseq to add op\n");
  1275. error = -ENOMEM;
  1276. goto op_new_free;
  1277. }
  1278. if (is_new) {
  1279. op_new->offset = op_end->offset;
  1280. op_end->offset += (op_new->size * CL_DSP_BYTES_PER_WORD);
  1281. } else {
  1282. op_new->offset = op->offset;
  1283. }
  1284. error = regmap_bulk_write(cs40l26->regmap, cs40l26->pseq_base + op_new->offset,
  1285. op_new->words, op_new->size);
  1286. if (error) {
  1287. dev_err(dev, "Failed to write PSEQ op.\n");
  1288. goto op_new_free;
  1289. }
  1290. if (is_new) {
  1291. error = regmap_bulk_write(cs40l26->regmap, cs40l26->pseq_base + op_end->offset,
  1292. op_end->words, op_end->size);
  1293. if (error) {
  1294. dev_err(dev, "Failed to write PSEQ terminator\n");
  1295. goto op_new_free;
  1296. }
  1297. list_add(&op_new->list, &cs40l26->pseq_op_head);
  1298. cs40l26->pseq_num_ops++;
  1299. } else {
  1300. list_replace(&op->list, &op_new->list);
  1301. }
  1302. return 0;
  1303. op_new_free:
  1304. devm_kfree(dev, op_new);
  1305. return error;
  1306. }
  1307. EXPORT_SYMBOL_GPL(cs40l26_pseq_write);
  1308. static int cs40l26_pseq_multi_write(struct cs40l26_private *cs40l26,
  1309. const struct reg_sequence *reg_seq, int num_regs, bool update, u8 op_code)
  1310. {
  1311. int error, i;
  1312. for (i = 0; i < num_regs; i++) {
  1313. error = cs40l26_pseq_write(cs40l26, reg_seq[i].reg, reg_seq[i].def,
  1314. update, op_code);
  1315. if (error)
  1316. return error;
  1317. }
  1318. return 0;
  1319. }
  1320. static int cs40l26_update_reg_defaults_via_pseq(struct cs40l26_private *cs40l26)
  1321. {
  1322. struct device *dev = cs40l26->dev;
  1323. int error;
  1324. error = cs40l26_pseq_write(cs40l26, CS40L26_NGATE1_INPUT, CS40L26_DATA_SRC_DSP1TX4, true,
  1325. CS40L26_PSEQ_OP_WRITE_L16);
  1326. if (error)
  1327. return error;
  1328. /* set SPK_DEFAULT_HIZ to 1 */
  1329. error = cs40l26_pseq_write(cs40l26, CS40L26_TST_DAC_MSM_CONFIG,
  1330. CS40L26_TST_DAC_MSM_CONFIG_DEFAULT_CHANGE_VALUE_H16,
  1331. true, CS40L26_PSEQ_OP_WRITE_H16);
  1332. if (error)
  1333. dev_err(dev, "Failed to sequence register default updates\n");
  1334. return error;
  1335. }
  1336. static int cs40l26_pseq_init(struct cs40l26_private *cs40l26)
  1337. {
  1338. struct cs40l26_pseq_op *pseq_op;
  1339. int i, num_words, error;
  1340. u8 operation;
  1341. u32 *words;
  1342. INIT_LIST_HEAD(&cs40l26->pseq_op_head);
  1343. cs40l26->pseq_num_ops = 0;
  1344. words = kcalloc(CS40L26_PSEQ_MAX_WORDS, CL_DSP_BYTES_PER_WORD, GFP_KERNEL);
  1345. if (IS_ERR_OR_NULL(words))
  1346. return -ENOMEM;
  1347. error = cl_dsp_get_reg(cs40l26->dsp, "POWER_ON_SEQUENCE", CL_DSP_XM_UNPACKED_TYPE,
  1348. CS40L26_PM_ALGO_ID, &cs40l26->pseq_base);
  1349. if (error)
  1350. goto err_free;
  1351. /* read pseq memory space */
  1352. error = regmap_raw_read(cs40l26->regmap, cs40l26->pseq_base, words,
  1353. CS40L26_PSEQ_MAX_WORDS * CL_DSP_BYTES_PER_WORD);
  1354. if (error)
  1355. goto err_free;
  1356. for (i = 0; i < CS40L26_PSEQ_MAX_WORDS; i++)
  1357. words[i] = be32_to_cpu(words[i]);
  1358. for (i = 0; i < CS40L26_PSEQ_MAX_WORDS; i += num_words) {
  1359. operation = (words[i] & CS40L26_PSEQ_OP_MASK) >> CS40L26_PSEQ_OP_SHIFT;
  1360. switch (operation) {
  1361. case CS40L26_PSEQ_OP_END:
  1362. num_words = CS40L26_PSEQ_OP_END_WORDS;
  1363. break;
  1364. case CS40L26_PSEQ_OP_WRITE_ADDR8:
  1365. num_words = CS40L26_PSEQ_OP_WRITE_ADDR8_WORDS;
  1366. break;
  1367. case CS40L26_PSEQ_OP_WRITE_H16:
  1368. case CS40L26_PSEQ_OP_WRITE_L16:
  1369. num_words = CS40L26_PSEQ_OP_WRITE_X16_WORDS;
  1370. break;
  1371. case CS40L26_PSEQ_OP_WRITE_FULL:
  1372. num_words = CS40L26_PSEQ_OP_WRITE_FULL_WORDS;
  1373. break;
  1374. default:
  1375. dev_err(cs40l26->dev, "Invalid OP code 0x%02X\n", operation);
  1376. error = -EINVAL;
  1377. goto err_free;
  1378. }
  1379. pseq_op = devm_kzalloc(cs40l26->dev, sizeof(struct cs40l26_pseq_op), GFP_KERNEL);
  1380. if (IS_ERR_OR_NULL(pseq_op)) {
  1381. error = -ENOMEM;
  1382. goto err_free;
  1383. }
  1384. memcpy(pseq_op->words, &words[i], num_words * CL_DSP_BYTES_PER_WORD);
  1385. pseq_op->size = num_words;
  1386. pseq_op->offset = i * CL_DSP_BYTES_PER_WORD;
  1387. pseq_op->operation = operation;
  1388. list_add(&pseq_op->list, &cs40l26->pseq_op_head);
  1389. cs40l26->pseq_num_ops++;
  1390. if (operation == CS40L26_PSEQ_OP_END)
  1391. break;
  1392. }
  1393. if (operation != CS40L26_PSEQ_OP_END) {
  1394. dev_err(cs40l26->dev, "PSEQ_END_OF_SCRIPT not found\n");
  1395. error = -ENOENT;
  1396. goto err_free;
  1397. }
  1398. error = cs40l26_update_reg_defaults_via_pseq(cs40l26);
  1399. err_free:
  1400. kfree(words);
  1401. return error;
  1402. }
  1403. static int cs40l26_irq_update_mask(struct cs40l26_private *cs40l26, u32 reg, u32 val, u32 bit_mask)
  1404. {
  1405. u32 eint_reg, cur_mask, new_mask;
  1406. int error;
  1407. if (reg == CS40L26_IRQ1_MASK_1) {
  1408. eint_reg = CS40L26_IRQ1_EINT_1;
  1409. } else if (reg == CS40L26_IRQ1_MASK_2) {
  1410. eint_reg = CS40L26_IRQ1_EINT_2;
  1411. } else {
  1412. dev_err(cs40l26->dev, "Invalid IRQ mask reg: 0x%08X\n", reg);
  1413. return -EINVAL;
  1414. }
  1415. error = regmap_read(cs40l26->regmap, reg, &cur_mask);
  1416. if (error) {
  1417. dev_err(cs40l26->dev, "Failed to get IRQ mask\n");
  1418. return error;
  1419. }
  1420. new_mask = (cur_mask & ~bit_mask) | val;
  1421. /* Clear interrupt prior to masking/unmasking */
  1422. error = regmap_write(cs40l26->regmap, eint_reg, bit_mask);
  1423. if (error) {
  1424. dev_err(cs40l26->dev, "Failed to clear IRQ\n");
  1425. return error;
  1426. }
  1427. error = regmap_write(cs40l26->regmap, reg, new_mask);
  1428. if (error) {
  1429. dev_err(cs40l26->dev, "Failed to update IRQ mask\n");
  1430. return error;
  1431. }
  1432. if (bit_mask & GENMASK(31, 16)) {
  1433. error = cs40l26_pseq_write(cs40l26, reg, (new_mask & GENMASK(31, 16)) >> 16,
  1434. true, CS40L26_PSEQ_OP_WRITE_H16);
  1435. if (error) {
  1436. dev_err(cs40l26->dev, "Failed to update IRQ mask H16");
  1437. return error;
  1438. }
  1439. }
  1440. if (bit_mask & GENMASK(15, 0)) {
  1441. error = cs40l26_pseq_write(cs40l26, reg, (new_mask & GENMASK(15, 0)),
  1442. true, CS40L26_PSEQ_OP_WRITE_L16);
  1443. if (error) {
  1444. dev_err(cs40l26->dev, "Failed to update IRQ mask L16");
  1445. return error;
  1446. }
  1447. }
  1448. return error;
  1449. }
  1450. static int cs40l26_map_gpi_to_haptic(struct cs40l26_private *cs40l26, struct ff_effect *effect,
  1451. struct cs40l26_uploaded_effect *ueffect)
  1452. {
  1453. u8 gpio = (effect->trigger.button & CS40L26_BTN_NUM_MASK) >> CS40L26_BTN_NUM_SHIFT;
  1454. bool edge, ev_handler_bank_ram, owt, use_timeout;
  1455. unsigned int fw_rev;
  1456. u32 reg, write_val;
  1457. int error;
  1458. edge = (effect->trigger.button & CS40L26_BTN_EDGE_MASK) >> CS40L26_BTN_EDGE_SHIFT;
  1459. switch (ueffect->wvfrm_bank) {
  1460. case CS40L26_RAM_BANK_ID:
  1461. case CS40L26_BUZ_BANK_ID:
  1462. owt = false;
  1463. ev_handler_bank_ram = true;
  1464. break;
  1465. case CS40L26_ROM_BANK_ID:
  1466. owt = false;
  1467. ev_handler_bank_ram = false;
  1468. break;
  1469. case CS40L26_OWT_BANK_ID:
  1470. owt = true;
  1471. ev_handler_bank_ram = true;
  1472. break;
  1473. default:
  1474. dev_err(cs40l26->dev, "Effect bank %u not supported\n", ueffect->wvfrm_bank);
  1475. return -EINVAL;
  1476. }
  1477. if (gpio != CS40L26_GPIO1) {
  1478. dev_err(cs40l26->dev, "GPIO%u not supported on 0x%02X\n", gpio, cs40l26->revid);
  1479. return -EINVAL;
  1480. }
  1481. reg = cs40l26->event_map_base + (edge ? 0 : 4);
  1482. write_val = (ueffect->trigger_index & CS40L26_BTN_INDEX_MASK) |
  1483. (ev_handler_bank_ram << CS40L26_BTN_BANK_SHIFT) |
  1484. (owt << CS40L26_BTN_OWT_SHIFT);
  1485. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  1486. dev_info(cs40l26->dev, "%s\n", __func__);
  1487. #endif
  1488. error = regmap_write(cs40l26->regmap, reg, write_val);
  1489. if (error) {
  1490. dev_err(cs40l26->dev, "Failed to update event map\n");
  1491. return error;
  1492. }
  1493. error = cl_dsp_fw_rev_get(cs40l26->dsp, &fw_rev);
  1494. if (error)
  1495. return error;
  1496. use_timeout = (!cs40l26->calib_fw && fw_rev >= CS40L26_FW_GPI_TIMEOUT_MIN_REV) ||
  1497. (cs40l26->calib_fw && fw_rev >= CS40L26_FW_GPI_TIMEOUT_CALIB_MIN_REV);
  1498. if (use_timeout) {
  1499. error = cl_dsp_get_reg(cs40l26->dsp, "TIMEOUT_GPI_MS", CL_DSP_XM_UNPACKED_TYPE,
  1500. CS40L26_VIBEGEN_ALGO_ID, &reg);
  1501. if (error)
  1502. return error;
  1503. error = regmap_write(cs40l26->regmap, reg, effect->replay.length);
  1504. if (error)
  1505. dev_warn(cs40l26->dev, "Failed to set GPI timeout, continuing...\n");
  1506. }
  1507. if (edge)
  1508. ueffect->mapping = CS40L26_GPIO_MAP_A_PRESS;
  1509. else
  1510. ueffect->mapping = CS40L26_GPIO_MAP_A_RELEASE;
  1511. return error;
  1512. }
  1513. static struct cs40l26_uploaded_effect *cs40l26_uploaded_effect_find(struct cs40l26_private *cs40l26,
  1514. int id)
  1515. {
  1516. struct list_head *head = &cs40l26->effect_head;
  1517. int uid = -1;
  1518. struct cs40l26_uploaded_effect *ueffect;
  1519. if (list_empty(head)) {
  1520. dev_dbg(cs40l26->dev, "Effect list is empty\n");
  1521. return ERR_PTR(-ENODATA);
  1522. }
  1523. list_for_each_entry(ueffect, head, list) {
  1524. uid = ueffect->id;
  1525. if (uid == id)
  1526. break;
  1527. }
  1528. if (uid != id) {
  1529. dev_dbg(cs40l26->dev, "No such effect (ID = %d)\n", id);
  1530. return ERR_PTR(-EINVAL);
  1531. }
  1532. return ueffect;
  1533. }
  1534. static struct cs40l26_buzzgen_config cs40l26_buzzgen_configs[] = {
  1535. {
  1536. .duration_name = "BUZZ_EFFECTS2_BUZZ_DURATION",
  1537. .freq_name = "BUZZ_EFFECTS2_BUZZ_FREQ",
  1538. .level_name = "BUZZ_EFFECTS2_BUZZ_LEVEL",
  1539. .effect_id = -1
  1540. },
  1541. {
  1542. .duration_name = "BUZZ_EFFECTS3_BUZZ_DURATION",
  1543. .freq_name = "BUZZ_EFFECTS3_BUZZ_FREQ",
  1544. .level_name = "BUZZ_EFFECTS3_BUZZ_LEVEL",
  1545. .effect_id = -1
  1546. },
  1547. {
  1548. .duration_name = "BUZZ_EFFECTS4_BUZZ_DURATION",
  1549. .freq_name = "BUZZ_EFFECTS4_BUZZ_FREQ",
  1550. .level_name = "BUZZ_EFFECTS4_BUZZ_LEVEL",
  1551. .effect_id = -1
  1552. },
  1553. {
  1554. .duration_name = "BUZZ_EFFECTS5_BUZZ_DURATION",
  1555. .freq_name = "BUZZ_EFFECTS5_BUZZ_FREQ",
  1556. .level_name = "BUZZ_EFFECTS5_BUZZ_LEVEL",
  1557. .effect_id = -1
  1558. },
  1559. {
  1560. .duration_name = "BUZZ_EFFECTS6_BUZZ_DURATION",
  1561. .freq_name = "BUZZ_EFFECTS6_BUZZ_FREQ",
  1562. .level_name = "BUZZ_EFFECTS6_BUZZ_LEVEL",
  1563. .effect_id = -1
  1564. },
  1565. };
  1566. static int cs40l26_buzzgen_find_slot(struct cs40l26_private *cs40l26, int id)
  1567. {
  1568. int i, slot = -1;
  1569. for (i = CS40L26_BUZZGEN_NUM_CONFIGS - 1; i >= 0; i--) {
  1570. if (cs40l26_buzzgen_configs[i].effect_id == id) {
  1571. slot = i;
  1572. break;
  1573. } else if (cs40l26_buzzgen_configs[i].effect_id == -1) {
  1574. slot = i;
  1575. }
  1576. }
  1577. return slot;
  1578. }
  1579. static int cs40l26_erase_buzzgen(struct cs40l26_private *cs40l26, int id)
  1580. {
  1581. int slot = cs40l26_buzzgen_find_slot(cs40l26, id);
  1582. if (slot == -1) {
  1583. dev_err(cs40l26->dev, "Failed to erase BUZZGEN config for id %d\n", id);
  1584. return -EINVAL;
  1585. }
  1586. cs40l26_buzzgen_configs[slot].effect_id = -1;
  1587. return 0;
  1588. }
  1589. static bool cs40l26_is_no_wait_ram_index(struct cs40l26_private *cs40l26,
  1590. u32 index)
  1591. {
  1592. int i;
  1593. for (i = 0; i < cs40l26->num_no_wait_ram_indices; i++) {
  1594. if (cs40l26->no_wait_ram_indices[i] == index)
  1595. return true;
  1596. }
  1597. return false;
  1598. }
  1599. static void cs40l26_set_gain_worker(struct work_struct *work)
  1600. {
  1601. struct cs40l26_private *cs40l26 = container_of(work, struct cs40l26_private, set_gain_work);
  1602. int error;
  1603. u16 gain;
  1604. u32 reg;
  1605. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  1606. if (cs40l26->busy_state) {
  1607. pr_info("%s - f/w is busy\n", __func__);
  1608. return;
  1609. }
  1610. #endif
  1611. error = cs40l26_pm_enter(cs40l26->dev);
  1612. if (error)
  1613. return;
  1614. mutex_lock(&cs40l26->lock);
  1615. if (cs40l26->vibe_state == CS40L26_VIBE_STATE_ASP) {
  1616. gain = (cs40l26->asp_scale_pct * cs40l26->gain_pct) / CS40L26_GAIN_FULL_SCALE;
  1617. cs40l26->gain_tmp = cs40l26->gain_pct;
  1618. cs40l26->gain_pct = gain;
  1619. cs40l26->scaling_applied = true;
  1620. } else {
  1621. gain = cs40l26->gain_pct;
  1622. }
  1623. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  1624. dev_info(cs40l26->dev, "%s: gain = %u%%\n", __func__, gain);
  1625. #else
  1626. dev_dbg(cs40l26->dev, "%s: gain = %u%%\n", __func__, gain);
  1627. #endif
  1628. /* Write Q21.2 value to SOURCE_ATTENUATION */
  1629. error = cl_dsp_get_reg(cs40l26->dsp, "SOURCE_ATTENUATION",
  1630. CL_DSP_XM_UNPACKED_TYPE, CS40L26_EXT_ALGO_ID, &reg);
  1631. if (error) {
  1632. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  1633. dev_err(cs40l26->dev, "Failed to get Source Attenuation\n");
  1634. #endif
  1635. goto err_mutex;
  1636. }
  1637. error = regmap_write(cs40l26->regmap, reg, cs40l26_attn_q21_2_vals[gain]);
  1638. if (error)
  1639. dev_err(cs40l26->dev, "Failed to set attenuation\n");
  1640. err_mutex:
  1641. mutex_unlock(&cs40l26->lock);
  1642. cs40l26_pm_exit(cs40l26->dev);
  1643. }
  1644. static void cs40l26_vibe_start_worker(struct work_struct *work)
  1645. {
  1646. struct cs40l26_private *cs40l26 = container_of(work, struct cs40l26_private,
  1647. vibe_start_work);
  1648. struct device *dev = cs40l26->dev;
  1649. struct cs40l26_uploaded_effect *ueffect;
  1650. struct ff_effect *effect;
  1651. unsigned int reg;
  1652. u16 duration;
  1653. bool invert;
  1654. int error;
  1655. dev_dbg(dev, "%s\n", __func__);
  1656. error = cs40l26_pm_enter(dev);
  1657. if (error)
  1658. return;
  1659. mutex_lock(&cs40l26->lock);
  1660. effect = cs40l26->trigger_effect;
  1661. ueffect = cs40l26_uploaded_effect_find(cs40l26, effect->id);
  1662. if (IS_ERR_OR_NULL(ueffect)) {
  1663. dev_err(dev, "No such effect to play back\n");
  1664. goto err_mutex;
  1665. }
  1666. duration = effect->replay.length;
  1667. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  1668. dev_info(dev, "%s CS40L26_START_PLAYBACK duration = %dms\n",
  1669. __func__, duration);
  1670. #endif
  1671. error = cl_dsp_get_reg(cs40l26->dsp, "TIMEOUT_MS",
  1672. CL_DSP_XM_UNPACKED_TYPE, CS40L26_VIBEGEN_ALGO_ID, &reg);
  1673. if (error) {
  1674. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  1675. dev_err(dev, "%s cs40l26 read fail(%d)\n", __func__, error);
  1676. #endif
  1677. goto err_mutex;
  1678. }
  1679. error = regmap_write(cs40l26->regmap, reg, duration);
  1680. if (error) {
  1681. dev_err(dev, "Failed to set TIMEOUT_MS\n");
  1682. goto err_mutex;
  1683. }
  1684. error = cl_dsp_get_reg(cs40l26->dsp, "SOURCE_INVERT",
  1685. CL_DSP_XM_UNPACKED_TYPE, CS40L26_EXT_ALGO_ID, &reg);
  1686. if (error)
  1687. goto err_mutex;
  1688. switch (effect->direction) {
  1689. case 0x0000:
  1690. invert = false;
  1691. break;
  1692. case 0x8000:
  1693. invert = true;
  1694. break;
  1695. default:
  1696. dev_err(dev, "Invalid ff_effect direction: 0x%X\n", effect->direction);
  1697. goto err_mutex;
  1698. }
  1699. error = regmap_write(cs40l26->regmap, reg, invert);
  1700. if (error)
  1701. goto err_mutex;
  1702. switch (effect->u.periodic.waveform) {
  1703. case FF_CUSTOM:
  1704. case FF_SINE:
  1705. error = cs40l26_mailbox_write(cs40l26, ueffect->trigger_index);
  1706. if (error) {
  1707. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  1708. dev_err(dev, "%s: cs40l26 write fail(%d)\n", __func__, error);
  1709. #endif
  1710. goto err_mutex;
  1711. }
  1712. cs40l26->cur_index = ueffect->trigger_index;
  1713. break;
  1714. default:
  1715. dev_err(dev, "Invalid waveform type: 0x%X\n", effect->u.periodic.waveform);
  1716. goto err_mutex;
  1717. }
  1718. if (!cs40l26->vibe_state_reporting)
  1719. cs40l26_vibe_state_update(cs40l26, CS40L26_VIBE_STATE_EVENT_MBOX_PLAYBACK);
  1720. reinit_completion(&cs40l26->erase_cont);
  1721. err_mutex:
  1722. mutex_unlock(&cs40l26->lock);
  1723. cs40l26_pm_exit(dev);
  1724. }
  1725. static void cs40l26_vibe_stop_worker(struct work_struct *work)
  1726. {
  1727. struct cs40l26_private *cs40l26 = container_of(work, struct cs40l26_private,
  1728. vibe_stop_work);
  1729. bool skip_delay;
  1730. u32 delay_us;
  1731. int error;
  1732. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  1733. dev_info(cs40l26->dev, "%s vib_state(%s)\n",
  1734. __func__, vibe_state_strings[cs40l26->vibe_state]);
  1735. #else
  1736. dev_dbg(cs40l26->dev, "%s\n", __func__);
  1737. #endif
  1738. error = cs40l26_pm_enter(cs40l26->dev);
  1739. if (error)
  1740. return;
  1741. mutex_lock(&cs40l26->lock);
  1742. delay_us = cs40l26->delay_before_stop_playback_us;
  1743. skip_delay = cs40l26_is_no_wait_ram_index(cs40l26, cs40l26->cur_index);
  1744. if (delay_us && !skip_delay) {
  1745. mutex_unlock(&cs40l26->lock);
  1746. dev_info(cs40l26->dev, "Applying delay\n");
  1747. /* wait for SVC init phase to complete */
  1748. usleep_range(delay_us, delay_us + 100);
  1749. mutex_lock(&cs40l26->lock);
  1750. } else {
  1751. dev_info(cs40l26->dev, "Skipping delay\n");
  1752. }
  1753. if (skip_delay) {
  1754. dev_dbg(cs40l26->dev, "Stop command skipped\n");
  1755. goto mutex_exit;
  1756. }
  1757. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  1758. dev_info(cs40l26->dev, "%s CS40L26_STOP_PLAYBACK\n", __func__);
  1759. #endif
  1760. error = cs40l26_mailbox_write(cs40l26, CS40L26_STOP_PLAYBACK);
  1761. if (error)
  1762. dev_err(cs40l26->dev, "Failed to stop playback\n");
  1763. mutex_exit:
  1764. mutex_unlock(&cs40l26->lock);
  1765. cs40l26_pm_exit(cs40l26->dev);
  1766. }
  1767. /* This function made by cirrus, samsung moved location for samsung_hw_reset() */
  1768. static int cs40l26_part_num_resolve(struct cs40l26_private *cs40l26)
  1769. {
  1770. struct regmap *regmap = cs40l26->regmap;
  1771. struct device *dev = cs40l26->dev;
  1772. u32 devid, revid, fullid;
  1773. int error;
  1774. error = regmap_read(regmap, CS40L26_DEVID, &devid);
  1775. if (error) {
  1776. dev_err(dev, "Failed to read device ID\n");
  1777. return error;
  1778. }
  1779. error = regmap_read(regmap, CS40L26_REVID, &revid);
  1780. if (error) {
  1781. dev_err(dev, "Failed to read revision ID\n");
  1782. return error;
  1783. }
  1784. devid &= CS40L26_DEVID_MASK;
  1785. revid &= CS40L26_REVID_MASK;
  1786. fullid = (devid << 8) | revid;
  1787. switch (fullid) {
  1788. case CS40L26_ID_L26A_A1:
  1789. case CS40L26_ID_L26B_A1:
  1790. case CS40L26_ID_L27A_A1:
  1791. case CS40L26_ID_L27B_A1:
  1792. case CS40L26_ID_L26A_B0:
  1793. case CS40L26_ID_L26B_B0:
  1794. case CS40L26_ID_L27A_B0:
  1795. case CS40L26_ID_L27B_B0:
  1796. case CS40L26_ID_L27A_B1:
  1797. cs40l26->rom_regs = &cs40l26_rom_regs_a1_b0_b1;
  1798. break;
  1799. case CS40L26_ID_L27A_B2:
  1800. cs40l26->rom_regs = &cs40l26_rom_regs_b2;
  1801. break;
  1802. default:
  1803. dev_err(dev, "Invalid ID: 0x%06X 0x%02X\n", devid, revid);
  1804. return -EINVAL;
  1805. }
  1806. cs40l26->devid = devid;
  1807. cs40l26->revid = revid;
  1808. dev_info(dev, "Cirrus Logic %s ID: 0x%06X, Revision: 0x%02X\n",
  1809. CS40L26_DEV_NAME, cs40l26->devid, cs40l26->revid);
  1810. return 0;
  1811. }
  1812. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  1813. __visible_for_testing bool samsung_is_valid_vmon(struct cs40l26_private *cs40l26, u32 vmon)
  1814. {
  1815. u32 num;
  1816. int pass_val;
  1817. pass_val = cs40l26->asp_scale_pct * VMON_100_MV / 100;
  1818. if (pass_val < VMON_20_MV)
  1819. pass_val = VMON_20_MV;
  1820. /* num = VMON/(2^23-1)*12.3V*1000 is between 80mV to 120mV */
  1821. if (vmon > pass_val - VMON_20_MV && vmon < pass_val + VMON_20_MV) {
  1822. num = vmon * 12300 / 8388607;
  1823. dev_info(cs40l26->dev, "%s, num : %umV\n", __func__, num);
  1824. return true;
  1825. } else
  1826. dev_info(cs40l26->dev, "vmon is out of range\n");
  1827. return false;
  1828. }
  1829. static int samsung_get_i2s_test(struct input_dev *dev)
  1830. {
  1831. struct sec_vib_inputff_drvdata *ddata = input_get_drvdata(dev);
  1832. struct cs40l26_private *cs40l26 = ddata->private_data;
  1833. int error = 0;
  1834. u32 vmon;
  1835. error = cs40l26_pm_enter(cs40l26->dev);
  1836. if (error)
  1837. return error;
  1838. error = regmap_read(cs40l26->regmap, CS40L26_SPKMON_VMON_DEC_OUT_DATA,
  1839. &vmon);
  1840. if (error) {
  1841. dev_err(cs40l26->dev, "Failed to get VMON Data for I2S\n");
  1842. goto pm_err;
  1843. }
  1844. if (vmon & CS40L26_VMON_OVFL_FLAG_MASK) {
  1845. dev_err(cs40l26->dev, "I2S VMON overflow detected\n");
  1846. error = -EOVERFLOW;
  1847. goto pm_err;
  1848. }
  1849. vmon &= CS40L26_VMON_DEC_OUT_DATA_MASK;
  1850. if (samsung_is_valid_vmon(cs40l26, vmon))
  1851. error = 1;
  1852. dev_info(cs40l26->dev, "%s, vmon : %u, ret : %d done\n", __func__, vmon, error);
  1853. pm_err:
  1854. cs40l26_pm_exit(cs40l26->dev);
  1855. return error;
  1856. }
  1857. static int samsung_set_trigger_cal(struct input_dev *dev, u32 val)
  1858. {
  1859. struct sec_vib_inputff_drvdata *ddata = input_get_drvdata(dev);
  1860. struct cs40l26_private *cs40l26 = ddata->private_data;
  1861. u32 mailbox_command;
  1862. int error;
  1863. struct completion *completion;
  1864. if (val < 1 || val > 2) {
  1865. dev_err(cs40l26->dev, "%s: %u is out of range\n", __func__, val);
  1866. return -EINVAL;
  1867. }
  1868. if (!cs40l26->calib_fw) {
  1869. dev_err(cs40l26->dev, "Must use calibration firmware\n");
  1870. return -EPERM;
  1871. }
  1872. switch (val) {
  1873. case CS40L26_CALIBRATION_CONTROL_REQUEST_F0_AND_Q:
  1874. completion = &cs40l26->cal_f0_cont;
  1875. break;
  1876. case CS40L26_CALIBRATION_CONTROL_REQUEST_REDC:
  1877. completion = &cs40l26->cal_redc_cont;
  1878. break;
  1879. case CS40L26_CALIBRATION_CONTROL_REQUEST_DVL_PEQ:
  1880. completion = &cs40l26->cal_dvl_peq_cont;
  1881. break;
  1882. case CS40L26_CALIBRATION_CONTROL_REQUEST_LS_CALIBRATION:
  1883. completion = &cs40l26->cal_ls_cont;
  1884. break;
  1885. default:
  1886. return -EINVAL;
  1887. }
  1888. mailbox_command = ((CS40L26_DSP_MBOX_CMD_INDEX_CALIBRATION_CONTROL <<
  1889. CS40L26_DSP_MBOX_CMD_INDEX_SHIFT) & CS40L26_DSP_MBOX_CMD_INDEX_MASK) |
  1890. (val & CS40L26_DSP_MBOX_CMD_PAYLOAD_MASK);
  1891. error = cs40l26_pm_enter(cs40l26->dev);
  1892. if (error)
  1893. return error;
  1894. mutex_lock(&cs40l26->lock);
  1895. reinit_completion(completion);
  1896. error = cs40l26_mailbox_write(cs40l26, mailbox_command);
  1897. mutex_unlock(&cs40l26->lock);
  1898. if (error) {
  1899. dev_err(cs40l26->dev, "Failed to request calibration\n");
  1900. goto err_pm;
  1901. }
  1902. if (!wait_for_completion_timeout(
  1903. completion,
  1904. msecs_to_jiffies(CS40L26_CALIBRATION_TIMEOUT_MS))) {
  1905. error = -ETIME;
  1906. dev_err(cs40l26->dev, "Failed to complete cal req, %d, err: %d",
  1907. val, error);
  1908. goto err_pm;
  1909. }
  1910. mutex_lock(&cs40l26->lock);
  1911. if (val == CS40L26_CALIBRATION_CONTROL_REQUEST_F0_AND_Q)
  1912. error = cs40l26_copy_f0_est_to_dvl(cs40l26);
  1913. mutex_unlock(&cs40l26->lock);
  1914. err_pm:
  1915. cs40l26_pm_exit(cs40l26->dev);
  1916. return error;
  1917. }
  1918. static u32 samsung_get_f0_measured(struct input_dev *dev)
  1919. {
  1920. struct sec_vib_inputff_drvdata *ddata = input_get_drvdata(dev);
  1921. struct cs40l26_private *cs40l26 = ddata->private_data;
  1922. u32 reg, f0_measured;
  1923. int error;
  1924. error = cs40l26_pm_enter(cs40l26->dev);
  1925. if (error)
  1926. return error;
  1927. mutex_lock(&cs40l26->lock);
  1928. error = cl_dsp_get_reg(cs40l26->dsp, "F0_EST", CL_DSP_XM_UNPACKED_TYPE,
  1929. CS40L26_F0_EST_ALGO_ID, &reg);
  1930. if (error)
  1931. goto err_mutex;
  1932. error = regmap_read(cs40l26->regmap, reg, &f0_measured);
  1933. if (error)
  1934. goto err_mutex;
  1935. dev_info(cs40l26->dev, "%s: f0_measured : %u", __func__, f0_measured);
  1936. err_mutex:
  1937. mutex_unlock(&cs40l26->lock);
  1938. cs40l26_pm_exit(cs40l26->dev);
  1939. if (error) {
  1940. dev_err(cs40l26->dev, "%s is return error : %d\n", __func__, error);
  1941. return error;
  1942. } else
  1943. return f0_measured;
  1944. }
  1945. static int samsung_get_f0_offset(struct input_dev *dev)
  1946. {
  1947. struct sec_vib_inputff_drvdata *ddata = input_get_drvdata(dev);
  1948. struct cs40l26_private *cs40l26 = ddata->private_data;
  1949. if (cs40l26->pdata.f0_offset) {
  1950. dev_info(cs40l26->dev, "%s: f0_offset : 0x%08X", __func__, cs40l26->pdata.f0_offset);
  1951. return cs40l26->pdata.f0_offset;
  1952. }
  1953. return 0;
  1954. }
  1955. static u32 samsung_set_f0_stored(struct input_dev *dev, u32 val)
  1956. {
  1957. struct sec_vib_inputff_drvdata *ddata = input_get_drvdata(dev);
  1958. struct cs40l26_private *cs40l26 = ddata->private_data;
  1959. u32 reg;
  1960. int error;
  1961. if (val < CS40L26_SAMSUNG_F0_MIN || val > CS40L26_SAMSUNG_F0_MAX) {
  1962. dev_err(cs40l26->dev, "%s: %u is out of range\n", __func__, val);
  1963. return -EINVAL;
  1964. }
  1965. error = cs40l26_pm_enter(cs40l26->dev);
  1966. if (error)
  1967. return error;
  1968. mutex_lock(&cs40l26->lock);
  1969. error = cl_dsp_get_reg(cs40l26->dsp, "F0_OTP_STORED", CL_DSP_XM_UNPACKED_TYPE,
  1970. CS40L26_VIBEGEN_ALGO_ID, &reg);
  1971. if (error)
  1972. goto err_mutex;
  1973. error = regmap_write(cs40l26->regmap, reg, val);
  1974. if (error)
  1975. goto err_mutex;
  1976. dev_info(cs40l26->dev, "%s: f0 val : %u", __func__, val);
  1977. err_mutex:
  1978. mutex_unlock(&cs40l26->lock);
  1979. cs40l26_pm_exit(cs40l26->dev);
  1980. if (error)
  1981. dev_err(cs40l26->dev, "%s is return error : %d\n", __func__, error);
  1982. return error;
  1983. }
  1984. static int samsung_get_le_est(struct input_dev *dev, u32 *le)
  1985. {
  1986. struct sec_vib_inputff_drvdata *ddata = input_get_drvdata(dev);
  1987. struct cs40l26_private *cs40l26 = ddata->private_data;
  1988. int error;
  1989. error = cs40l26_pm_enter(cs40l26->dev);
  1990. if (error)
  1991. return error;
  1992. mutex_lock(&cs40l26->lock);
  1993. error = cs40l26_svc_le_estimate(cs40l26, le);
  1994. if (error)
  1995. dev_err(cs40l26->dev, "%s is return error : %d\n", __func__, error);
  1996. mutex_unlock(&cs40l26->lock);
  1997. cs40l26_pm_exit(cs40l26->dev);
  1998. return error;
  1999. }
  2000. static unsigned int samsung_get_le_stored(struct input_dev *dev)
  2001. {
  2002. struct sec_vib_inputff_drvdata *ddata = input_get_drvdata(dev);
  2003. struct cs40l26_private *cs40l26 = ddata->private_data;
  2004. unsigned int le;
  2005. mutex_lock(&cs40l26->lock);
  2006. le = cs40l26->svc_le_est_stored;
  2007. mutex_unlock(&cs40l26->lock);
  2008. return le;
  2009. }
  2010. static int samsung_set_le_stored(struct input_dev *dev, u32 val)
  2011. {
  2012. struct sec_vib_inputff_drvdata *ddata = input_get_drvdata(dev);
  2013. struct cs40l26_private *cs40l26 = ddata->private_data;
  2014. mutex_lock(&cs40l26->lock);
  2015. cs40l26->svc_le_est_stored = val;
  2016. mutex_unlock(&cs40l26->lock);
  2017. return 0;
  2018. }
  2019. static const char *samsung_get_owt_lib_compat_version(struct input_dev *dev)
  2020. {
  2021. struct sec_vib_inputff_drvdata *ddata = input_get_drvdata(dev);
  2022. struct cs40l26_private *cs40l26 = ddata->private_data;
  2023. return cs40l26->pdata.owt_lib_compat_version;
  2024. }
  2025. static const char *samsung_get_ap_chipset(struct input_dev *dev)
  2026. {
  2027. struct sec_vib_inputff_drvdata *ddata = input_get_drvdata(dev);
  2028. struct cs40l26_private *cs40l26 = ddata->private_data;
  2029. return cs40l26->pdata.ap_chipset;
  2030. }
  2031. static int samsung_set_use_sep_index(struct input_dev *dev, bool use_sep_index)
  2032. {
  2033. struct sec_vib_inputff_drvdata *ddata = input_get_drvdata(dev);
  2034. struct cs40l26_private *cs40l26 = ddata->private_data;
  2035. dev_info(cs40l26->dev, "%s +\n", __func__);
  2036. mutex_lock(&cs40l26->lock);
  2037. cs40l26->use_sep_index = use_sep_index;
  2038. mutex_unlock(&cs40l26->lock);
  2039. dev_info(cs40l26->dev, "%s -\n", __func__);
  2040. return 0;
  2041. }
  2042. static int samsung_hw_reset(struct cs40l26_private *cs40l26)
  2043. {
  2044. int error;
  2045. dev_info(cs40l26->dev, "HW Reset\n");
  2046. gpiod_set_value_cansleep(cs40l26->reset_gpio, 1);
  2047. msleep(500);
  2048. gpiod_set_value_cansleep(cs40l26->reset_gpio, 0);
  2049. usleep_range(CS40L26_CONTROL_PORT_READY_DELAY,
  2050. CS40L26_CONTROL_PORT_READY_DELAY + 100);
  2051. error = cs40l26_part_num_resolve(cs40l26);
  2052. if (error) {
  2053. dev_err(cs40l26->dev, "Failed to part num resolve(%d)\n", error);
  2054. return error;
  2055. }
  2056. /* Set LRA to high-z to avoid fault conditions */
  2057. error = regmap_update_bits(cs40l26->regmap, CS40L26_TST_DAC_MSM_CONFIG,
  2058. CS40L26_SPK_DEFAULT_HIZ_MASK, 1 <<
  2059. CS40L26_SPK_DEFAULT_HIZ_SHIFT);
  2060. if (error) {
  2061. dev_err(cs40l26->dev, "Failed to update reg defaults(%d)\n", error);
  2062. return error;
  2063. }
  2064. return 0;
  2065. }
  2066. static int samsung_fw_load(struct input_dev *dev, unsigned int fw_id)
  2067. {
  2068. struct sec_vib_inputff_drvdata *ddata = input_get_drvdata(dev);
  2069. struct cs40l26_private *cs40l26 = ddata->private_data;
  2070. int error = 0;
  2071. cs40l26->busy_state = 1;
  2072. if (fw_id == 0)
  2073. error = cs40l26_fw_swap(cs40l26, CS40L26_FW_ID);
  2074. else if (fw_id == 1)
  2075. error = cs40l26_fw_swap(cs40l26, CS40L26_FW_CALIB_ID);
  2076. else
  2077. error = -EINVAL;
  2078. if (error) {
  2079. dev_err(cs40l26->dev, "%s: retry(%d), fail(%d)", __func__,
  2080. cs40l26->sec_vib_ddata.fw.retry, error);
  2081. cs40l26->fw_id = 0;
  2082. if (samsung_hw_reset(cs40l26))
  2083. dev_err(cs40l26->dev, "%s HW Reset Failed\n", __func__);
  2084. }
  2085. cs40l26->busy_state = 0;
  2086. dev_info(cs40l26->dev, "%s fw_id : %d done\n", __func__, fw_id);
  2087. return error;
  2088. }
  2089. static void samsung_recovery(struct cs40l26_private *cs40l26)
  2090. {
  2091. int error = 0, i = 0;
  2092. struct input_dev *dev = cs40l26->sec_vib_ddata.input;
  2093. if (cs40l26->vibe_workqueue) {
  2094. cancel_work_sync(&cs40l26->vibe_start_work);
  2095. cancel_work_sync(&cs40l26->vibe_stop_work);
  2096. cancel_work_sync(&cs40l26->set_gain_work);
  2097. cancel_work_sync(&cs40l26->erase_work);
  2098. }
  2099. disable_irq(cs40l26->irq);
  2100. cs40l26->busy_state = 1;
  2101. for (i = 0; i < 3; i++) {
  2102. pr_info("%s, try(%d)\n", __func__, i + 1);
  2103. samsung_hw_reset(cs40l26);
  2104. msleep(100);
  2105. cs40l26_pm_runtime_teardown(cs40l26);
  2106. cs40l26->fw_loaded = false;
  2107. cs40l26->fw_id = 0;
  2108. error = cs40l26_fw_swap(cs40l26, CS40L26_FW_ID);
  2109. if (!error) {
  2110. pr_info("%s, f/w load success!\n", __func__);
  2111. if (cs40l26->sec_vib_ddata.f0_stored) {
  2112. error = samsung_set_f0_stored(dev, cs40l26->sec_vib_ddata.f0_stored);
  2113. if (error)
  2114. pr_err("%s, samsung_set_f0_stored error : %d\n", __func__, error);
  2115. }
  2116. break;
  2117. }
  2118. msleep(100);
  2119. }
  2120. cs40l26->busy_state = 0;
  2121. enable_irq(cs40l26->irq);
  2122. dev_info(cs40l26->dev, "%s done\n", __func__);
  2123. }
  2124. #endif
  2125. static void cs40l26_set_gain(struct input_dev *dev, u16 gain)
  2126. {
  2127. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2128. struct sec_vib_inputff_drvdata *ddata = input_get_drvdata(dev);
  2129. struct cs40l26_private *cs40l26 = ddata->private_data;
  2130. #else
  2131. struct cs40l26_private *cs40l26 = input_get_drvdata(dev);
  2132. #endif
  2133. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2134. if (gain > 0 && gain < 100)
  2135. cs40l26->gain_pct = 1;
  2136. else
  2137. cs40l26->gain_pct = gain/100;
  2138. if (cs40l26->gain_pct > CS40L26_NUM_PCT_MAP_VALUES) {
  2139. dev_err(cs40l26->dev, "%s: gain_pct(%d) is over, just return!\n",
  2140. __func__, cs40l26->gain_pct);
  2141. return;
  2142. }
  2143. dev_info(cs40l26->dev, "Before %s:gain(%d), gain_pct(%d)\n", __func__,
  2144. gain, cs40l26->gain_pct);
  2145. cs40l26->gain_pct = sec_vib_inputff_tune_gain(&cs40l26->sec_vib_ddata, cs40l26->gain_pct);
  2146. dev_info(cs40l26->dev, "After: %s:gain(%d), gain_pct(%d)\n", __func__,
  2147. gain, cs40l26->gain_pct);
  2148. #else
  2149. if (gain >= CS40L26_NUM_PCT_MAP_VALUES) {
  2150. dev_err(cs40l26->dev, "Gain value %u %% out of bounds\n", gain);
  2151. return;
  2152. }
  2153. cs40l26->gain_pct = gain;
  2154. #endif
  2155. queue_work(cs40l26->vibe_workqueue, &cs40l26->set_gain_work);
  2156. }
  2157. static int cs40l26_playback_effect(struct input_dev *dev,
  2158. int effect_id, int val)
  2159. {
  2160. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2161. struct sec_vib_inputff_drvdata *ddata = input_get_drvdata(dev);
  2162. struct cs40l26_private *cs40l26 = ddata->private_data;
  2163. #else
  2164. struct cs40l26_private *cs40l26 = input_get_drvdata(dev);
  2165. #endif
  2166. struct ff_effect *effect;
  2167. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2168. if (cs40l26->busy_state) {
  2169. pr_info("%s - f/w is busy\n", __func__);
  2170. return -EINVAL;
  2171. }
  2172. dev_info(cs40l26->dev, "%s: effect ID = %d, val = %d\n", __func__, effect_id, val);
  2173. #else
  2174. dev_dbg(cs40l26->dev, "%s: effect ID = %d, val = %d\n", __func__, effect_id, val);
  2175. #endif
  2176. effect = &dev->ff->effects[effect_id];
  2177. if (!effect) {
  2178. dev_err(cs40l26->dev, "No such effect to playback\n");
  2179. return -EINVAL;
  2180. }
  2181. cs40l26->trigger_effect = effect;
  2182. if (val > 0)
  2183. queue_work(cs40l26->vibe_workqueue, &cs40l26->vibe_start_work);
  2184. else
  2185. queue_work(cs40l26->vibe_workqueue, &cs40l26->vibe_stop_work);
  2186. return 0;
  2187. }
  2188. int cs40l26_get_num_waves(struct cs40l26_private *cs40l26, u32 *num_waves)
  2189. {
  2190. u32 reg, nwaves, nowt;
  2191. int error;
  2192. error = cl_dsp_get_reg(cs40l26->dsp, "NUM_OF_WAVES", CL_DSP_XM_UNPACKED_TYPE,
  2193. CS40L26_VIBEGEN_ALGO_ID, &reg);
  2194. if (error)
  2195. return error;
  2196. error = cs40l26_dsp_read(cs40l26, reg, &nwaves);
  2197. if (error)
  2198. return error;
  2199. error = cl_dsp_get_reg(cs40l26->dsp, "OWT_NUM_OF_WAVES_XM",
  2200. CL_DSP_XM_UNPACKED_TYPE, CS40L26_VIBEGEN_ALGO_ID, &reg);
  2201. if (error)
  2202. return error;
  2203. error = cs40l26_dsp_read(cs40l26, reg, &nowt);
  2204. if (error)
  2205. return error;
  2206. *num_waves = nwaves + nowt;
  2207. return 0;
  2208. }
  2209. EXPORT_SYMBOL_GPL(cs40l26_get_num_waves);
  2210. static struct cl_dsp_owt_header *cs40l26_owt_header(struct cs40l26_private *cs40l26, u8 index,
  2211. u16 bank)
  2212. {
  2213. if (bank == CS40L26_RAM_BANK_ID && cs40l26->dsp->wt_desc &&
  2214. index < cs40l26->dsp->wt_desc->owt.nwaves)
  2215. return &cs40l26->dsp->wt_desc->owt.waves[index];
  2216. return ERR_PTR(-EINVAL);
  2217. }
  2218. static int cs40l26_owt_get_wlength(struct cs40l26_private *cs40l26, u8 index, u32 *wlen_whole,
  2219. u16 bank)
  2220. {
  2221. struct device *dev = cs40l26->dev;
  2222. struct cl_dsp_owt_header *entry;
  2223. struct cl_dsp_memchunk ch;
  2224. if (index == 0) {
  2225. *wlen_whole = 0;
  2226. return 0;
  2227. }
  2228. entry = cs40l26_owt_header(cs40l26, index, bank);
  2229. if (IS_ERR(entry))
  2230. return PTR_ERR(entry);
  2231. switch (entry->type) {
  2232. case WT_TYPE_V6_PCM_F0_REDC:
  2233. case WT_TYPE_V6_PCM_F0_REDC_VAR:
  2234. case WT_TYPE_V6_PWLE:
  2235. break;
  2236. default:
  2237. dev_err(dev, "Cannot size waveform type %u\n", entry->type);
  2238. return -EINVAL;
  2239. }
  2240. ch = cl_dsp_memchunk_create(entry->data, sizeof(u32));
  2241. /* First 24 bits of each waveform is the length in samples @ 8 kHz */
  2242. return cl_dsp_memchunk_read(cs40l26->dsp, &ch, 24, wlen_whole);
  2243. }
  2244. static void cs40l26_owt_set_section_info(struct cs40l26_private *cs40l26,
  2245. struct cl_dsp_memchunk *ch, struct cs40l26_owt_section *sections, u8 nsections)
  2246. {
  2247. int i;
  2248. for (i = 0; i < nsections; i++) {
  2249. cl_dsp_memchunk_write(ch, 8, sections[i].amplitude);
  2250. cl_dsp_memchunk_write(ch, 8, sections[i].index);
  2251. cl_dsp_memchunk_write(ch, 8, sections[i].repeat);
  2252. cl_dsp_memchunk_write(ch, 8, sections[i].flags);
  2253. cl_dsp_memchunk_write(ch, 16, sections[i].delay);
  2254. if (sections[i].flags & CS40L26_WT_TYPE10_COMP_DURATION_FLAG) {
  2255. cl_dsp_memchunk_write(ch, 8, 0x00); /* Pad */
  2256. cl_dsp_memchunk_write(ch, 16, sections[i].duration);
  2257. }
  2258. }
  2259. }
  2260. static int cs40l26_owt_get_section_info(struct cs40l26_private *cs40l26, struct cl_dsp_memchunk *ch,
  2261. struct cs40l26_owt_section *sections, u8 nsections)
  2262. {
  2263. int error = 0, i;
  2264. for (i = 0; i < nsections; i++) {
  2265. error = cl_dsp_memchunk_read(cs40l26->dsp, ch, 8, &sections[i].amplitude);
  2266. if (error)
  2267. return error;
  2268. error = cl_dsp_memchunk_read(cs40l26->dsp, ch, 8, &sections[i].index);
  2269. if (error)
  2270. return error;
  2271. error = cl_dsp_memchunk_read(cs40l26->dsp, ch, 8, &sections[i].repeat);
  2272. if (error)
  2273. return error;
  2274. error = cl_dsp_memchunk_read(cs40l26->dsp, ch, 8, &sections[i].flags);
  2275. if (error)
  2276. return error;
  2277. error = cl_dsp_memchunk_read(cs40l26->dsp, ch, 16, &sections[i].delay);
  2278. if (error)
  2279. return error;
  2280. if (sections[i].flags & CS40L26_WT_TYPE10_COMP_DURATION_FLAG) {
  2281. /* Skip padding */
  2282. error = cl_dsp_memchunk_read(cs40l26->dsp, ch, 8, NULL);
  2283. if (error)
  2284. return error;
  2285. error = cl_dsp_memchunk_read(cs40l26->dsp, ch, 16, &sections[i].duration);
  2286. if (error)
  2287. return error;
  2288. }
  2289. if (sections[i].flags & CS40L26_WT_TYPE10_COMP_ROM_FLAG)
  2290. sections[i].wvfrm_bank = CS40L26_ROM_BANK_ID;
  2291. else
  2292. sections[i].wvfrm_bank = CS40L26_RAM_BANK_ID;
  2293. }
  2294. return error;
  2295. }
  2296. static int cs40l26_owt_calculate_wlength(struct cs40l26_private *cs40l26, u8 nsections,
  2297. u8 global_rep, u8 *data, u32 data_size_bytes, u32 *owt_wlen)
  2298. {
  2299. u32 total_len = 0, section_len = 0, loop_len = 0, wlen_whole = 0;
  2300. bool in_loop = false;
  2301. struct cs40l26_owt_section *sections;
  2302. struct cl_dsp_memchunk ch;
  2303. u32 dlen, wlen;
  2304. int error, i;
  2305. if (nsections < 1) {
  2306. dev_err(cs40l26->dev, "Not enough sections for composite\n");
  2307. return -EINVAL;
  2308. }
  2309. sections = kcalloc(nsections, sizeof(struct cs40l26_owt_section), GFP_KERNEL);
  2310. if (!sections)
  2311. return -ENOMEM;
  2312. ch = cl_dsp_memchunk_create((void *) data, data_size_bytes);
  2313. error = cs40l26_owt_get_section_info(cs40l26, &ch, sections, nsections);
  2314. if (error) {
  2315. dev_err(cs40l26->dev, "Failed to get section info\n");
  2316. goto err_free;
  2317. }
  2318. for (i = 0; i < nsections; i++) {
  2319. error = cs40l26_owt_get_wlength(cs40l26, sections[i].index, &wlen_whole,
  2320. sections[i].wvfrm_bank);
  2321. if (error) {
  2322. dev_err(cs40l26->dev, "Failed to get wlength for index %u: %d\n",
  2323. sections[i].index, error);
  2324. goto err_free;
  2325. }
  2326. if (wlen_whole & CS40L26_WT_TYPE10_WAVELEN_INDEF) {
  2327. if (!(sections[i].flags & CS40L26_WT_TYPE10_COMP_DURATION_FLAG)) {
  2328. dev_err(cs40l26->dev, "Indefinite entry needs duration\n");
  2329. error = -EINVAL;
  2330. goto err_free;
  2331. }
  2332. wlen = CS40L26_WT_TYPE10_WAVELEN_MAX;
  2333. } else {
  2334. /* Length is 22 LSBs, filter out flags */
  2335. wlen = wlen_whole & CS40L26_WT_TYPE10_WAVELEN_MAX;
  2336. }
  2337. dlen = 8 * sections[i].delay;
  2338. if (sections[i].flags & CS40L26_WT_TYPE10_COMP_DURATION_FLAG) {
  2339. if (wlen > (2 * sections[i].duration))
  2340. wlen = 2 * sections[i].duration;
  2341. }
  2342. section_len = wlen + dlen;
  2343. loop_len += section_len;
  2344. if (sections[i].repeat == 0xFF) {
  2345. in_loop = true;
  2346. } else if (sections[i].repeat) {
  2347. total_len += (loop_len * (sections[i].repeat + 1));
  2348. in_loop = false;
  2349. loop_len = 0;
  2350. } else if (!in_loop) {
  2351. total_len += section_len;
  2352. loop_len = 0;
  2353. }
  2354. }
  2355. *owt_wlen = (total_len * (global_rep + 1)) | CS40L26_WT_TYPE10_WAVELEN_CALCULATED;
  2356. err_free:
  2357. kfree(sections);
  2358. return error;
  2359. }
  2360. static int cs40l26_owt_upload(struct cs40l26_private *cs40l26, u8 *data, u32 data_size_bytes)
  2361. {
  2362. struct device *dev = cs40l26->dev;
  2363. struct cl_dsp *dsp = cs40l26->dsp;
  2364. unsigned int write_reg, reg, wt_offset, wt_size_words, wt_base;
  2365. int error;
  2366. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2367. bool err = false;
  2368. #endif
  2369. error = cs40l26_pm_enter(dev);
  2370. if (error)
  2371. return error;
  2372. error = cl_dsp_get_reg(dsp, "OWT_NEXT_XM", CL_DSP_XM_UNPACKED_TYPE, CS40L26_VIBEGEN_ALGO_ID,
  2373. &reg);
  2374. if (error) {
  2375. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2376. dev_err(dev, "Failed to get cl_dsp_get_reg(OWT_NEXT_XM)\n");
  2377. err = true;
  2378. #endif
  2379. goto err_pm;
  2380. }
  2381. error = regmap_read(cs40l26->regmap, reg, &wt_offset);
  2382. if (error) {
  2383. dev_err(dev, "Failed to get wavetable offset\n");
  2384. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2385. err = true;
  2386. #endif
  2387. goto err_pm;
  2388. }
  2389. error = cl_dsp_get_reg(dsp, "OWT_SIZE_XM", CL_DSP_XM_UNPACKED_TYPE,
  2390. CS40L26_VIBEGEN_ALGO_ID, &reg);
  2391. if (error) {
  2392. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2393. dev_err(dev, "Failed to get cl_dsp_get_reg(OWT_SIZE_XM)\n");
  2394. err = true;
  2395. #endif
  2396. goto err_pm;
  2397. }
  2398. error = regmap_read(cs40l26->regmap, reg, &wt_size_words);
  2399. if (error) {
  2400. dev_err(dev, "Failed to get available WT size\n");
  2401. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2402. err = true;
  2403. #endif
  2404. goto err_pm;
  2405. }
  2406. if ((wt_size_words * CL_DSP_BYTES_PER_WORD) < data_size_bytes) {
  2407. dev_err(dev, "No space for OWT waveform\n");
  2408. error = -ENOSPC;
  2409. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2410. err = true;
  2411. #endif
  2412. goto err_pm;
  2413. }
  2414. error = cl_dsp_get_reg(dsp, CS40L26_WT_NAME_XM, CL_DSP_XM_UNPACKED_TYPE,
  2415. CS40L26_VIBEGEN_ALGO_ID, &wt_base);
  2416. if (error) {
  2417. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2418. dev_err(dev, "Failed to get cl_dsp_get_reg(CS40L26_WT_NAME_XM)\n");
  2419. err = true;
  2420. #endif
  2421. goto err_pm;
  2422. }
  2423. write_reg = wt_base + (wt_offset * 4);
  2424. error = cl_dsp_raw_write(cs40l26->dsp, write_reg, data, data_size_bytes, CL_DSP_MAX_WLEN);
  2425. if (error) {
  2426. dev_err(dev, "Failed to sync OWT\n");
  2427. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2428. err = true;
  2429. #endif
  2430. goto err_pm;
  2431. }
  2432. error = cs40l26_mailbox_write(cs40l26, CS40L26_DSP_MBOX_CMD_OWT_PUSH);
  2433. if (error) {
  2434. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2435. dev_err(dev, "Failed to set cs40l26_ack_write\n");
  2436. err = true;
  2437. #endif
  2438. goto err_pm;
  2439. }
  2440. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2441. dev_info(dev, "Successfully wrote waveform (%u bytes) to 0x%08X\n",
  2442. data_size_bytes, write_reg);
  2443. #else
  2444. dev_dbg(dev, "Successfully wrote waveform (%u bytes) to 0x%08X\n", data_size_bytes,
  2445. write_reg);
  2446. #endif
  2447. err_pm:
  2448. cs40l26_pm_exit(dev);
  2449. return error;
  2450. }
  2451. static u8 *cs40l26_ncw_refactor_data(struct cs40l26_private *cs40l26, u8 amp, u8 nsections,
  2452. void *in_data, u32 data_bytes, u16 bank)
  2453. {
  2454. struct cs40l26_owt_section *sections;
  2455. struct cl_dsp_memchunk in_ch, out_ch;
  2456. u16 amp_product;
  2457. u8 *out_data;
  2458. int i, error;
  2459. if (nsections <= 0) {
  2460. dev_err(cs40l26->dev, "Too few sections for NCW\n");
  2461. return ERR_PTR(-EINVAL);
  2462. }
  2463. sections = kcalloc(nsections, sizeof(struct cs40l26_owt_section), GFP_KERNEL);
  2464. if (!sections)
  2465. return ERR_PTR(-ENOMEM);
  2466. in_ch = cl_dsp_memchunk_create(in_data, data_bytes);
  2467. error = cs40l26_owt_get_section_info(cs40l26, &in_ch, sections, nsections);
  2468. if (error) {
  2469. dev_err(cs40l26->dev, "Failed to get section info\n");
  2470. goto sections_free;
  2471. }
  2472. for (i = 0; i < nsections; i++) {
  2473. if (sections[i].index != 0) {
  2474. amp_product = sections[i].amplitude * amp;
  2475. sections[i].amplitude = (u8) DIV_ROUND_UP(amp_product, 100);
  2476. }
  2477. if (bank == CS40L26_ROM_BANK_ID)
  2478. sections[i].flags |= CS40L26_WT_TYPE10_COMP_ROM_FLAG;
  2479. }
  2480. out_data = kzalloc(data_bytes, GFP_KERNEL);
  2481. if (!out_data) {
  2482. error = -ENOMEM;
  2483. goto sections_free;
  2484. }
  2485. out_ch = cl_dsp_memchunk_create((void *) out_data, data_bytes);
  2486. cs40l26_owt_set_section_info(cs40l26, &out_ch, sections, nsections);
  2487. sections_free:
  2488. kfree(sections);
  2489. return error ? ERR_PTR(error) : out_data;
  2490. }
  2491. static int cs40l26_owt_comp_data_size(struct cs40l26_private *cs40l26,
  2492. u8 nsections, struct cs40l26_owt_section *sections)
  2493. {
  2494. int i, size = 0;
  2495. struct cl_dsp_owt_header *header;
  2496. for (i = 0; i < nsections; i++) {
  2497. if (sections[i].index == 0) {
  2498. size += CS40L26_WT_TYPE10_SECTION_BYTES_MIN;
  2499. continue;
  2500. }
  2501. header = cs40l26_owt_header(cs40l26, sections[i].index, sections[i].wvfrm_bank);
  2502. if (IS_ERR(header))
  2503. return PTR_ERR(header);
  2504. if (header->type == WT_TYPE_V6_COMPOSITE) {
  2505. size += (header->size - 2) * 4;
  2506. if (section_complete(&sections[i]))
  2507. size += CS40L26_WT_TYPE10_SECTION_BYTES_MIN;
  2508. } else {
  2509. size += sections[i].duration ?
  2510. CS40L26_WT_TYPE10_SECTION_BYTES_MAX :
  2511. CS40L26_WT_TYPE10_SECTION_BYTES_MIN;
  2512. }
  2513. }
  2514. return size;
  2515. }
  2516. static int cs40l26_composite_upload(struct cs40l26_private *cs40l26, s16 *in_data,
  2517. u32 in_data_nibbles)
  2518. {
  2519. int pos_byte = 0, in_pos_nib = 2, in_data_bytes = 2 * in_data_nibbles;
  2520. u8 nsections, global_rep, out_nsections = 0;
  2521. int out_data_bytes = 0, data_bytes = 0;
  2522. struct device *dev = cs40l26->dev;
  2523. u8 ncw_nsections, ncw_global_rep, *data, *ncw_data, *out_data;
  2524. u8 delay_section_data[CS40L26_WT_TYPE10_SECTION_BYTES_MIN];
  2525. struct cs40l26_owt_section *sections;
  2526. struct cl_dsp_memchunk ch, out_ch;
  2527. struct cl_dsp_owt_header *header;
  2528. u16 section_size_bytes;
  2529. u32 ncw_bytes, wlen;
  2530. int i, error;
  2531. ch = cl_dsp_memchunk_create((void *) in_data, in_data_bytes);
  2532. /* Skip padding */
  2533. error = cl_dsp_memchunk_read(cs40l26->dsp, &ch, 8, NULL);
  2534. if (error)
  2535. return error;
  2536. error = cl_dsp_memchunk_read(cs40l26->dsp, &ch, 8, &nsections);
  2537. if (error)
  2538. return error;
  2539. error = cl_dsp_memchunk_read(cs40l26->dsp, &ch, 8, &global_rep);
  2540. if (error)
  2541. return error;
  2542. sections = kcalloc(nsections, sizeof(struct cs40l26_owt_section),
  2543. GFP_KERNEL);
  2544. if (!sections)
  2545. return -ENOMEM;
  2546. error = cs40l26_owt_get_section_info(cs40l26, &ch, sections, nsections);
  2547. if (error) {
  2548. dev_err(cs40l26->dev, "Failed to get section info\n");
  2549. goto sections_err_free;
  2550. }
  2551. data_bytes = cs40l26_owt_comp_data_size(cs40l26, nsections, sections);
  2552. if (data_bytes <= 0) {
  2553. dev_err(dev, "Failed to get OWT Composite Data Size\n");
  2554. error = data_bytes;
  2555. goto sections_err_free;
  2556. }
  2557. data = kcalloc(data_bytes, sizeof(u8), GFP_KERNEL);
  2558. if (!data) {
  2559. error = -ENOMEM;
  2560. goto sections_err_free;
  2561. }
  2562. cl_dsp_memchunk_flush(&ch);
  2563. memset(&delay_section_data, 0, CS40L26_WT_TYPE10_SECTION_BYTES_MIN);
  2564. for (i = 0; i < nsections; i++) {
  2565. section_size_bytes = sections[i].duration ?
  2566. CS40L26_WT_TYPE10_SECTION_BYTES_MAX :
  2567. CS40L26_WT_TYPE10_SECTION_BYTES_MIN;
  2568. if (sections[i].index == 0) {
  2569. memcpy(data + pos_byte, in_data + in_pos_nib, section_size_bytes);
  2570. pos_byte += section_size_bytes;
  2571. in_pos_nib += section_size_bytes / 2;
  2572. out_nsections++;
  2573. continue;
  2574. }
  2575. if (sections[i].repeat != 0) {
  2576. dev_err(dev, "Inner repeats not allowed for NCWs\n");
  2577. error = -EPERM;
  2578. goto data_err_free;
  2579. }
  2580. header = cs40l26_owt_header(cs40l26, sections[i].index, sections[i].wvfrm_bank);
  2581. if (IS_ERR(header)) {
  2582. error = PTR_ERR(header);
  2583. goto data_err_free;
  2584. }
  2585. if (header->type == WT_TYPE_V6_COMPOSITE) {
  2586. ch = cl_dsp_memchunk_create(header->data, 8);
  2587. /* Skip Wlength */
  2588. error = cl_dsp_memchunk_read(cs40l26->dsp, &ch, 24, NULL);
  2589. if (error)
  2590. goto data_err_free;
  2591. /* Skip Padding */
  2592. error = cl_dsp_memchunk_read(cs40l26->dsp, &ch, 8, NULL);
  2593. if (error)
  2594. goto data_err_free;
  2595. error = cl_dsp_memchunk_read(cs40l26->dsp, &ch, 8, &ncw_nsections);
  2596. if (error)
  2597. goto data_err_free;
  2598. error = cl_dsp_memchunk_read(cs40l26->dsp, &ch, 8, &ncw_global_rep);
  2599. if (error)
  2600. goto data_err_free;
  2601. if (ncw_global_rep != 0) {
  2602. dev_err(dev,
  2603. "No NCW support for outer repeat\n");
  2604. error = -EPERM;
  2605. goto data_err_free;
  2606. }
  2607. cl_dsp_memchunk_flush(&ch);
  2608. ncw_bytes = (header->size - 2) * 4;
  2609. ncw_data = cs40l26_ncw_refactor_data(cs40l26, sections[i].amplitude,
  2610. ncw_nsections, header->data + 8,
  2611. ncw_bytes, sections[i].wvfrm_bank);
  2612. if (IS_ERR(ncw_data)) {
  2613. error = PTR_ERR(ncw_data);
  2614. goto data_err_free;
  2615. }
  2616. memcpy(data + pos_byte, ncw_data, ncw_bytes);
  2617. pos_byte += ncw_bytes;
  2618. out_nsections += ncw_nsections;
  2619. kfree(ncw_data);
  2620. if (section_complete(&sections[i])) {
  2621. ch = cl_dsp_memchunk_create((void *) delay_section_data,
  2622. CS40L26_WT_TYPE10_SECTION_BYTES_MIN);
  2623. cl_dsp_memchunk_write(&ch, 24, 0x000000);
  2624. cl_dsp_memchunk_write(&ch, 8, 0x00);
  2625. cl_dsp_memchunk_write(&ch, 16, sections[i].delay);
  2626. memcpy(data + pos_byte, delay_section_data,
  2627. CS40L26_WT_TYPE10_SECTION_BYTES_MIN);
  2628. cl_dsp_memchunk_flush(&ch);
  2629. pos_byte += CS40L26_WT_TYPE10_SECTION_BYTES_MIN;
  2630. out_nsections++;
  2631. }
  2632. } else {
  2633. memcpy(data + pos_byte, in_data + in_pos_nib, section_size_bytes);
  2634. pos_byte += section_size_bytes;
  2635. out_nsections++;
  2636. }
  2637. in_pos_nib += section_size_bytes / 2;
  2638. }
  2639. out_data_bytes = data_bytes + CS40L26_WT_HEADER_COMP_SIZE;
  2640. out_data = kcalloc(out_data_bytes, sizeof(u8), GFP_KERNEL);
  2641. if (!out_data) {
  2642. dev_err(dev, "Failed to allocate space for composite\n");
  2643. error = -ENOMEM;
  2644. goto data_err_free;
  2645. }
  2646. out_ch = cl_dsp_memchunk_create((void *) out_data, out_data_bytes);
  2647. cl_dsp_memchunk_write(&out_ch, 16, CS40L26_WT_HEADER_DEFAULT_FLAGS);
  2648. cl_dsp_memchunk_write(&out_ch, 8, WT_TYPE_V6_COMPOSITE);
  2649. cl_dsp_memchunk_write(&out_ch, 24, CS40L26_WT_HEADER_OFFSET);
  2650. cl_dsp_memchunk_write(&out_ch, 24, data_bytes / CL_DSP_BYTES_PER_WORD);
  2651. error = cs40l26_owt_calculate_wlength(cs40l26, out_nsections, global_rep, data, data_bytes,
  2652. &wlen);
  2653. if (error)
  2654. goto out_data_err_free;
  2655. cl_dsp_memchunk_write(&out_ch, 24, wlen);
  2656. cl_dsp_memchunk_write(&out_ch, 8, 0x00); /* Pad */
  2657. cl_dsp_memchunk_write(&out_ch, 8, out_nsections);
  2658. cl_dsp_memchunk_write(&out_ch, 8, global_rep);
  2659. memcpy(out_data + out_ch.bytes, data, data_bytes);
  2660. error = cs40l26_owt_upload(cs40l26, out_data, out_data_bytes);
  2661. out_data_err_free:
  2662. kfree(out_data);
  2663. data_err_free:
  2664. kfree(data);
  2665. sections_err_free:
  2666. kfree(sections);
  2667. return error;
  2668. }
  2669. static int cs40l26_sine_upload(struct cs40l26_private *cs40l26, struct ff_effect *effect,
  2670. struct cs40l26_uploaded_effect *ueffect)
  2671. {
  2672. unsigned int duration, freq, level;
  2673. int error, slot;
  2674. u32 reg;
  2675. slot = cs40l26_buzzgen_find_slot(cs40l26, effect->id);
  2676. if (slot == -1) {
  2677. dev_err(cs40l26->dev, "No free BUZZGEN slot available\n");
  2678. return -ENOSPC;
  2679. }
  2680. cs40l26_buzzgen_configs[slot].effect_id = effect->id;
  2681. /*
  2682. * Divide duration by 4 to match firmware's expectation.
  2683. * Round up to avoid inadvertently setting a duration of 0.
  2684. */
  2685. duration = (unsigned int) DIV_ROUND_UP(effect->replay.length, 4);
  2686. if (effect->u.periodic.period < CS40L26_BUZZGEN_PER_MIN)
  2687. freq = 1000 / CS40L26_BUZZGEN_PER_MIN;
  2688. else if (effect->u.periodic.period > CS40L26_BUZZGEN_PER_MAX)
  2689. freq = 1000 / CS40L26_BUZZGEN_PER_MAX;
  2690. else
  2691. freq = 1000 / effect->u.periodic.period;
  2692. if (effect->u.periodic.magnitude < CS40L26_BUZZGEN_LEVEL_MIN)
  2693. level = CS40L26_BUZZGEN_LEVEL_MIN;
  2694. else if (effect->u.periodic.magnitude > CS40L26_BUZZGEN_LEVEL_MAX)
  2695. level = CS40L26_BUZZGEN_LEVEL_MAX;
  2696. else
  2697. level = effect->u.periodic.magnitude;
  2698. error = cl_dsp_get_reg(cs40l26->dsp, cs40l26_buzzgen_configs[slot].duration_name,
  2699. CL_DSP_XM_UNPACKED_TYPE, CS40L26_BUZZGEN_ALGO_ID, &reg);
  2700. if (error)
  2701. return error;
  2702. error = regmap_write(cs40l26->regmap, reg, duration);
  2703. if (error)
  2704. return error;
  2705. error = cl_dsp_get_reg(cs40l26->dsp, cs40l26_buzzgen_configs[slot].freq_name,
  2706. CL_DSP_XM_UNPACKED_TYPE, CS40L26_BUZZGEN_ALGO_ID, &reg);
  2707. if (error)
  2708. return error;
  2709. error = regmap_write(cs40l26->regmap, reg, freq);
  2710. if (error)
  2711. return error;
  2712. error = cl_dsp_get_reg(cs40l26->dsp, cs40l26_buzzgen_configs[slot].level_name,
  2713. CL_DSP_XM_UNPACKED_TYPE, CS40L26_BUZZGEN_ALGO_ID, &reg);
  2714. if (error)
  2715. return error;
  2716. error = regmap_write(cs40l26->regmap, reg, level);
  2717. if (error)
  2718. return error;
  2719. ueffect->id = effect->id;
  2720. ueffect->wvfrm_bank = CS40L26_BUZ_BANK_ID;
  2721. /*
  2722. * BUZZGEN 1 is reserved for OTP buzz; BUZZGEN 2 - BUZZGEN 6 are valid.
  2723. * Add an offset of 1 for this reason.
  2724. */
  2725. ueffect->trigger_index = CS40L26_BUZZGEN_INDEX_START + slot + 1;
  2726. return 0;
  2727. }
  2728. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2729. static int cs40l26_index_mapping(int sep_index)
  2730. {
  2731. int cirrus_index = 0;
  2732. switch (sep_index) {
  2733. case 0:
  2734. case 100:
  2735. break;
  2736. case 119 ... 124:
  2737. cirrus_index = sep_index + 16;
  2738. break;
  2739. case 126 ... 127:
  2740. cirrus_index = sep_index + 15;
  2741. break;
  2742. default:
  2743. cirrus_index = sep_index + 9;
  2744. break;
  2745. }
  2746. return cirrus_index;
  2747. }
  2748. #endif
  2749. static int cs40l26_custom_upload(struct cs40l26_private *cs40l26, struct ff_effect *effect,
  2750. struct cs40l26_uploaded_effect *ueffect)
  2751. {
  2752. struct device *dev = cs40l26->dev;
  2753. u8 *pwle_data = NULL;
  2754. int error, data_len, pwle_data_len, max_index_tmp;
  2755. u32 nwaves, min_index, max_index, trigger_index;
  2756. u16 index, bank;
  2757. data_len = effect->u.periodic.custom_len;
  2758. if (data_len > CS40L26_CUSTOM_DATA_SIZE) {
  2759. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2760. dev_info(cs40l26->dev, "%s OWT effect\n", __func__);
  2761. #endif
  2762. if (cs40l26->raw_custom_data[1] == CS40L26_WT_TYPE12_IDENTIFIER) {
  2763. pwle_data_len = cs40l26->raw_custom_data_len * 2;
  2764. pwle_data = kcalloc(pwle_data_len, sizeof(u8), GFP_KERNEL);
  2765. if (!pwle_data) {
  2766. dev_err(dev, "Failed to allocate space for PWLE\n");
  2767. return -ENOMEM;
  2768. }
  2769. memcpy(pwle_data, cs40l26->raw_custom_data, pwle_data_len);
  2770. error = cs40l26_owt_upload(cs40l26, pwle_data, pwle_data_len);
  2771. if (error)
  2772. return error;
  2773. } else {
  2774. error = cs40l26_composite_upload(cs40l26, cs40l26->raw_custom_data,
  2775. data_len);
  2776. if (error) {
  2777. dev_err(dev, "Failed to refactor OWT\n");
  2778. return error;
  2779. }
  2780. }
  2781. bank = (u16) CS40L26_OWT_BANK_ID;
  2782. index = (u16) cs40l26->num_owt_effects;
  2783. } else {
  2784. bank = (u16) cs40l26->raw_custom_data[0];
  2785. index = (u16) (cs40l26->raw_custom_data[1] & CS40L26_MAX_INDEX_MASK);
  2786. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2787. if (cs40l26->use_sep_index) {
  2788. dev_info(cs40l26->dev, "%s SEP index(%d)\n", __func__, index);
  2789. index = cs40l26_index_mapping(index);
  2790. }
  2791. dev_info(cs40l26->dev, "%s Index(%d) effect\n", __func__, index);
  2792. #endif
  2793. }
  2794. error = cs40l26_get_num_waves(cs40l26, &nwaves);
  2795. if (error) {
  2796. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2797. dev_err(cs40l26->dev, "%s cs40l26_get_num_waves in FF_CUSTOM error(%d)\n",
  2798. __func__, error);
  2799. #endif
  2800. return error;
  2801. }
  2802. switch (bank) {
  2803. case CS40L26_RAM_BANK_ID:
  2804. if (nwaves - cs40l26->num_owt_effects == 0) {
  2805. dev_err(dev, "No waveforms in RAM bank\n");
  2806. return -EINVAL;
  2807. }
  2808. min_index = CS40L26_RAM_INDEX_START;
  2809. max_index_tmp = min_index + nwaves - cs40l26->num_owt_effects - 1;
  2810. if (max_index_tmp < 0) {
  2811. dev_err(dev, "Invalid RAM index %d\n", max_index_tmp);
  2812. return -EINVAL;
  2813. }
  2814. max_index = (u32) max_index_tmp;
  2815. break;
  2816. case CS40L26_ROM_BANK_ID:
  2817. min_index = CS40L26_ROM_INDEX_START;
  2818. max_index = CS40L26_ROM_INDEX_END;
  2819. break;
  2820. case CS40L26_OWT_BANK_ID:
  2821. min_index = CS40L26_OWT_INDEX_START;
  2822. max_index = CS40L26_OWT_INDEX_END;
  2823. break;
  2824. default:
  2825. dev_err(dev, "Bank ID (%u) invalid\n", bank);
  2826. return -EINVAL;
  2827. }
  2828. trigger_index = index + min_index;
  2829. if (trigger_index < min_index || trigger_index > max_index) {
  2830. dev_err(dev, "Index 0x%X out of bounds (0x%X - 0x%X)\n", trigger_index, min_index,
  2831. max_index);
  2832. return -EINVAL;
  2833. }
  2834. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2835. dev_info(cs40l26->dev, "%s: ID = %d, trigger index = 0x%08X, duration = %dms\n",
  2836. __func__, effect->id, trigger_index, effect->replay.length);
  2837. #else
  2838. dev_dbg(dev, "ID = %d, trigger index = 0x%08X\n", effect->id, trigger_index);
  2839. #endif
  2840. if (bank == CS40L26_OWT_BANK_ID)
  2841. cs40l26->num_owt_effects++;
  2842. ueffect->id = effect->id;
  2843. ueffect->wvfrm_bank = bank;
  2844. ueffect->trigger_index = trigger_index;
  2845. return error;
  2846. }
  2847. static int cs40l26_uploaded_effect_add(struct cs40l26_private *cs40l26, struct ff_effect *effect)
  2848. {
  2849. struct device *dev = cs40l26->dev;
  2850. bool is_new = false;
  2851. struct cs40l26_uploaded_effect *ueffect;
  2852. int error;
  2853. ueffect = cs40l26_uploaded_effect_find(cs40l26, effect->id);
  2854. if (IS_ERR_OR_NULL(ueffect)) {
  2855. is_new = true;
  2856. ueffect = devm_kzalloc(dev, sizeof(*ueffect), GFP_KERNEL);
  2857. if (!ueffect)
  2858. return -ENOMEM;
  2859. }
  2860. if (effect->u.periodic.waveform == FF_CUSTOM) {
  2861. error = cs40l26_custom_upload(cs40l26, effect, ueffect);
  2862. } else if (effect->u.periodic.waveform == FF_SINE) {
  2863. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2864. dev_info(cs40l26->dev, "%s FF_SINE\n", __func__);
  2865. #endif
  2866. error = cs40l26_sine_upload(cs40l26, effect, ueffect);
  2867. } else {
  2868. dev_err(dev, "Periodic waveform type 0x%X not supported\n",
  2869. effect->u.periodic.waveform);
  2870. error = -EINVAL;
  2871. }
  2872. if (error)
  2873. goto err_free;
  2874. if (effect->trigger.button) {
  2875. error = cs40l26_map_gpi_to_haptic(cs40l26, effect, ueffect);
  2876. if (error)
  2877. goto err_free;
  2878. } else {
  2879. ueffect->mapping = CS40L26_GPIO_MAP_INVALID;
  2880. }
  2881. if (is_new)
  2882. list_add(&ueffect->list, &cs40l26->effect_head);
  2883. return 0;
  2884. err_free:
  2885. if (is_new)
  2886. devm_kfree(dev, ueffect);
  2887. return error;
  2888. }
  2889. static void cs40l26_upload_worker(struct work_struct *work)
  2890. {
  2891. struct cs40l26_private *cs40l26 = container_of(work,
  2892. struct cs40l26_private, upload_work);
  2893. struct device *cdev = cs40l26->dev;
  2894. struct ff_effect *effect;
  2895. u32 nwaves;
  2896. int error;
  2897. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2898. bool err = false;
  2899. if (cs40l26->busy_state) {
  2900. dev_err(cs40l26->dev, "%s - f/w is busy\n", __func__);
  2901. return;
  2902. }
  2903. #endif
  2904. error = cs40l26_pm_enter(cdev);
  2905. if (error)
  2906. return;
  2907. mutex_lock(&cs40l26->lock);
  2908. effect = &cs40l26->upload_effect;
  2909. if (effect->type != FF_PERIODIC) {
  2910. dev_err(cdev, "Effect type 0x%X not supported\n", effect->type);
  2911. error = -EINVAL;
  2912. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2913. err = true;
  2914. #endif
  2915. goto out_mutex;
  2916. }
  2917. error = cs40l26_uploaded_effect_add(cs40l26, effect);
  2918. if (error) {
  2919. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2920. err = true;
  2921. #endif
  2922. goto out_mutex;
  2923. }
  2924. error = cs40l26_get_num_waves(cs40l26, &nwaves);
  2925. if (error) {
  2926. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2927. dev_err(cdev, "%s final cs40l26_get_num_waves error(%d)\n",
  2928. __func__, error);
  2929. err = true;
  2930. #endif
  2931. goto out_mutex;
  2932. }
  2933. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2934. dev_info(cdev, "Total number of waveforms = %u\n", nwaves);
  2935. #else
  2936. dev_dbg(cdev, "Total number of waveforms = %u\n", nwaves);
  2937. #endif
  2938. out_mutex:
  2939. mutex_unlock(&cs40l26->lock);
  2940. cs40l26_pm_exit(cdev);
  2941. cs40l26->upload_ret = error;
  2942. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2943. if (err)
  2944. samsung_recovery(cs40l26);
  2945. #endif
  2946. }
  2947. static int cs40l26_upload_effect(struct input_dev *dev,
  2948. struct ff_effect *effect, struct ff_effect *old)
  2949. {
  2950. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2951. struct sec_vib_inputff_drvdata *ddata = input_get_drvdata(dev);
  2952. struct cs40l26_private *cs40l26 = ddata->private_data;
  2953. #else
  2954. struct cs40l26_private *cs40l26 = input_get_drvdata(dev);
  2955. #endif
  2956. int len = effect->u.periodic.custom_len;
  2957. int error;
  2958. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2959. dev_info(cs40l26->dev, "%s: effect ID = %d len=%d\n", __func__, effect->id, len);
  2960. #else
  2961. dev_dbg(cs40l26->dev, "%s: effect ID = %d\n", __func__, effect->id);
  2962. #endif
  2963. memcpy(&cs40l26->upload_effect, effect, sizeof(struct ff_effect));
  2964. if (effect->u.periodic.waveform == FF_CUSTOM) {
  2965. cs40l26->raw_custom_data_len = len;
  2966. cs40l26->raw_custom_data = kcalloc(len, sizeof(s16),
  2967. GFP_KERNEL);
  2968. if (!cs40l26->raw_custom_data) {
  2969. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  2970. dev_err(cs40l26->dev, "%s:raw_custom_data is null\n", __func__);
  2971. #endif
  2972. error = -ENOMEM;
  2973. goto out_free;
  2974. }
  2975. if (copy_from_user(cs40l26->raw_custom_data, effect->u.periodic.custom_data,
  2976. sizeof(s16) * len)) {
  2977. dev_err(cs40l26->dev, "Failed to get user data\n");
  2978. error = -EFAULT;
  2979. goto out_free;
  2980. }
  2981. }
  2982. queue_work(cs40l26->vibe_workqueue, &cs40l26->upload_work);
  2983. /* Wait for upload to finish */
  2984. flush_work(&cs40l26->upload_work);
  2985. error = cs40l26->upload_ret;
  2986. out_free:
  2987. memset(&cs40l26->upload_effect, 0, sizeof(struct ff_effect));
  2988. kfree(cs40l26->raw_custom_data);
  2989. cs40l26->raw_custom_data = NULL;
  2990. return error;
  2991. }
  2992. static int cs40l26_erase_gpi_mapping(struct cs40l26_private *cs40l26, enum cs40l26_gpio_map mapping)
  2993. {
  2994. u32 reg, base, offset;
  2995. int error;
  2996. if (mapping != CS40L26_GPIO_MAP_A_PRESS && mapping != CS40L26_GPIO_MAP_A_RELEASE) {
  2997. dev_err(cs40l26->dev, "Invalid GPI mapping %u\n", mapping);
  2998. return -EINVAL;
  2999. }
  3000. base = cs40l26->rom_regs->event_map_table_event_data_packed;
  3001. offset = mapping * CL_DSP_BYTES_PER_WORD;
  3002. reg = base + offset;
  3003. error = regmap_write(cs40l26->regmap, reg, CS40L26_EVENT_MAP_GPI_DISABLE);
  3004. if (error) {
  3005. dev_err(cs40l26->dev, "Failed to clear GPI mapping %u\n",
  3006. mapping);
  3007. return error;
  3008. }
  3009. return 0;
  3010. }
  3011. static int cs40l26_erase_owt(struct cs40l26_private *cs40l26,
  3012. struct cs40l26_uploaded_effect *ueffect)
  3013. {
  3014. u32 cmd = CS40L26_DSP_MBOX_CMD_OWT_DELETE_BASE;
  3015. u32 index = ueffect->trigger_index;
  3016. struct cs40l26_uploaded_effect *ueffect_tmp;
  3017. int error;
  3018. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3019. u32 old_nwaves = 0, nwaves = 0;
  3020. #endif
  3021. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3022. dev_info(cs40l26->dev, "%s\n", __func__);
  3023. error = cs40l26_get_num_waves(cs40l26, &old_nwaves);
  3024. if (error)
  3025. dev_err(cs40l26->dev, "%s Failed to get old num waves: %d\n",
  3026. __func__, error);
  3027. #endif
  3028. cmd |= (index & 0xFF);
  3029. error = cs40l26_mailbox_write(cs40l26, cmd);
  3030. if (error)
  3031. return error;
  3032. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3033. error = cs40l26_get_num_waves(cs40l26, &nwaves);
  3034. if (error)
  3035. dev_err(cs40l26->dev, "%s Failed to get new num waves: %d\n",
  3036. __func__, error);
  3037. if (old_nwaves-1 != nwaves) {
  3038. dev_err(cs40l26->dev, "%s unmatch nwaves old %d: new %d\n",
  3039. __func__, old_nwaves, nwaves);
  3040. cs40l26_mailbox_write(cs40l26, cmd);
  3041. error = cs40l26_get_num_waves(cs40l26, &nwaves);
  3042. if (error)
  3043. dev_err(cs40l26->dev, "%s Failed to get num waves: %d\n",
  3044. __func__, error);
  3045. dev_info(cs40l26->dev, "nwaves old %d: new %d\n",
  3046. old_nwaves, nwaves);
  3047. }
  3048. #endif
  3049. /* Update indices for OWT waveforms uploaded after erased effect */
  3050. list_for_each_entry(ueffect_tmp, &cs40l26->effect_head, list) {
  3051. if (ueffect_tmp->wvfrm_bank == CS40L26_OWT_BANK_ID &&
  3052. ueffect_tmp->trigger_index > index)
  3053. ueffect_tmp->trigger_index--;
  3054. }
  3055. cs40l26->num_owt_effects--;
  3056. return 0;
  3057. }
  3058. static void cs40l26_erase_worker(struct work_struct *work)
  3059. {
  3060. struct cs40l26_private *cs40l26 = container_of(work,
  3061. struct cs40l26_private, erase_work);
  3062. struct cs40l26_uploaded_effect *ueffect;
  3063. int effect_id, error;
  3064. u16 duration;
  3065. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3066. if (cs40l26->busy_state) {
  3067. pr_info("%s - f/w is busy\n", __func__);
  3068. return;
  3069. }
  3070. #endif
  3071. error = cs40l26_pm_enter(cs40l26->dev);
  3072. if (error)
  3073. return;
  3074. mutex_lock(&cs40l26->lock);
  3075. effect_id = cs40l26->erase_effect->id;
  3076. ueffect = cs40l26_uploaded_effect_find(cs40l26, effect_id);
  3077. if (IS_ERR_OR_NULL(ueffect)) {
  3078. dev_err(cs40l26->dev, "No such effect to erase (%d)\n",
  3079. effect_id);
  3080. error = ueffect ? PTR_ERR(ueffect) : -EINVAL;
  3081. goto out_mutex;
  3082. }
  3083. duration = (cs40l26->erase_effect->replay.length == 0) ?
  3084. CS40L26_MAX_WAIT_VIBE_COMPLETE_MS :
  3085. cs40l26->erase_effect->replay.length + CS40L26_ERASE_BUFFER_MS;
  3086. /* Check for ongoing effect playback. */
  3087. if (cs40l26->vibe_state == CS40L26_VIBE_STATE_HAPTIC) {
  3088. /* Wait for effect to complete. */
  3089. mutex_unlock(&cs40l26->lock);
  3090. if (!wait_for_completion_timeout(&cs40l26->erase_cont,
  3091. msecs_to_jiffies(duration))) {
  3092. error = -ETIME;
  3093. dev_err(cs40l26->dev, "Failed to erase effect (%d)\n",
  3094. effect_id);
  3095. goto pm_err;
  3096. }
  3097. mutex_lock(&cs40l26->lock);
  3098. }
  3099. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3100. dev_info(cs40l26->dev, "%s: effect ID = %d\n", __func__, effect_id);
  3101. #else
  3102. dev_dbg(cs40l26->dev, "%s: effect ID = %d\n", __func__, effect_id);
  3103. #endif
  3104. if (ueffect->wvfrm_bank == CS40L26_BUZ_BANK_ID) {
  3105. error = cs40l26_erase_buzzgen(cs40l26, ueffect->id);
  3106. if (error)
  3107. goto out_mutex;
  3108. }
  3109. if (ueffect->mapping != CS40L26_GPIO_MAP_INVALID) {
  3110. error = cs40l26_erase_gpi_mapping(cs40l26, ueffect->mapping);
  3111. if (error)
  3112. goto out_mutex;
  3113. ueffect->mapping = CS40L26_GPIO_MAP_INVALID;
  3114. }
  3115. if (ueffect->wvfrm_bank == CS40L26_OWT_BANK_ID)
  3116. error = cs40l26_erase_owt(cs40l26, ueffect);
  3117. if (error) {
  3118. dev_err(cs40l26->dev, "Failed to erase effect: %d", error);
  3119. goto out_mutex;
  3120. }
  3121. list_del(&ueffect->list);
  3122. devm_kfree(cs40l26->dev, ueffect);
  3123. out_mutex:
  3124. mutex_unlock(&cs40l26->lock);
  3125. pm_err:
  3126. cs40l26_pm_exit(cs40l26->dev);
  3127. cs40l26->erase_ret = error;
  3128. }
  3129. static int cs40l26_erase_effect(struct input_dev *dev, int effect_id)
  3130. {
  3131. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3132. struct sec_vib_inputff_drvdata *ddata = input_get_drvdata(dev);
  3133. struct cs40l26_private *cs40l26 = ddata->private_data;
  3134. #else
  3135. struct cs40l26_private *cs40l26 = input_get_drvdata(dev);
  3136. #endif
  3137. struct ff_effect *effect;
  3138. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3139. dev_info(cs40l26->dev, "%s: effect ID = %d\n", __func__, effect_id);
  3140. #else
  3141. dev_dbg(cs40l26->dev, "%s: effect ID = %d\n", __func__, effect_id);
  3142. #endif
  3143. effect = &dev->ff->effects[effect_id];
  3144. if (!effect) {
  3145. dev_err(cs40l26->dev, "No such effect to erase\n");
  3146. return -EINVAL;
  3147. }
  3148. cs40l26->erase_effect = effect;
  3149. queue_work(cs40l26->vibe_workqueue, &cs40l26->erase_work);
  3150. /* Wait for erase to finish */
  3151. flush_work(&cs40l26->erase_work);
  3152. return cs40l26->erase_ret;
  3153. }
  3154. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3155. static const struct sec_vib_inputff_ops cs40l26_vib_ops = {
  3156. .upload = cs40l26_upload_effect,
  3157. .playback = cs40l26_playback_effect,
  3158. .set_gain = cs40l26_set_gain,
  3159. .erase = cs40l26_erase_effect,
  3160. .get_i2s_test = samsung_get_i2s_test,
  3161. .fw_load = samsung_fw_load,
  3162. .set_trigger_cal = samsung_set_trigger_cal,
  3163. .get_f0_measured = samsung_get_f0_measured,
  3164. .get_f0_offset = samsung_get_f0_offset,
  3165. .set_f0_stored = samsung_set_f0_stored,
  3166. .set_le_stored = samsung_set_le_stored,
  3167. .get_le_stored = samsung_get_le_stored,
  3168. .get_le_est = samsung_get_le_est,
  3169. .set_use_sep_index = samsung_set_use_sep_index,
  3170. .get_owt_lib_compat_version = samsung_get_owt_lib_compat_version,
  3171. .get_ap_chipset = samsung_get_ap_chipset,
  3172. };
  3173. static struct attribute_group *cs40l26_dev_attr_groups[] = {
  3174. NULL
  3175. };
  3176. static void samsung_input_data_init(struct cs40l26_private *cs40l26)
  3177. {
  3178. cs40l26->sec_vib_ddata.dev = cs40l26->dev;
  3179. cs40l26->sec_vib_ddata.vib_ops = &cs40l26_vib_ops;
  3180. cs40l26->sec_vib_ddata.vendor_dev_attr_groups = cs40l26_dev_attr_groups;
  3181. cs40l26->sec_vib_ddata.private_data = (void *)cs40l26;
  3182. cs40l26->sec_vib_ddata.devid = cs40l26->devid;
  3183. cs40l26->sec_vib_ddata.revid = cs40l26->revid;
  3184. cs40l26->sec_vib_ddata.ff_val = 0;
  3185. cs40l26->sec_vib_ddata.support_fw = 1;
  3186. cs40l26->sec_vib_ddata.ach_percent = cs40l26->asp_scale_pct;
  3187. cs40l26->sec_vib_ddata.f0_stored = 0;
  3188. cs40l26->sec_vib_ddata.is_f0_tracking = cs40l26->pdata.is_f0_tracking;
  3189. cs40l26->sec_vib_ddata.is_le_support = cs40l26->pdata.is_mv_support;
  3190. cs40l26->sec_vib_ddata.trigger_calibration = 0;
  3191. sec_vib_inputff_setbit(&cs40l26->sec_vib_ddata, FF_PERIODIC);
  3192. sec_vib_inputff_setbit(&cs40l26->sec_vib_ddata, FF_CUSTOM);
  3193. sec_vib_inputff_setbit(&cs40l26->sec_vib_ddata, FF_SINE);
  3194. sec_vib_inputff_setbit(&cs40l26->sec_vib_ddata, FF_GAIN);
  3195. }
  3196. #else
  3197. static int cs40l26_input_init(struct cs40l26_private *cs40l26)
  3198. {
  3199. struct device *dev = cs40l26->dev;
  3200. int error;
  3201. cs40l26->input = devm_input_allocate_device(dev);
  3202. if (!cs40l26->input)
  3203. return -ENOMEM;
  3204. cs40l26->input->name = "cs40l26_input";
  3205. cs40l26->input->id.product = cs40l26->devid;
  3206. cs40l26->input->id.version = cs40l26->revid;
  3207. input_set_drvdata(cs40l26->input, cs40l26);
  3208. input_set_capability(cs40l26->input, EV_FF, FF_PERIODIC);
  3209. input_set_capability(cs40l26->input, EV_FF, FF_CUSTOM);
  3210. input_set_capability(cs40l26->input, EV_FF, FF_SINE);
  3211. input_set_capability(cs40l26->input, EV_FF, FF_GAIN);
  3212. error = input_ff_create(cs40l26->input, FF_MAX_EFFECTS);
  3213. if (error) {
  3214. dev_err(dev, "Failed to create FF device: %d\n", error);
  3215. return error;
  3216. }
  3217. /*
  3218. * input_ff_create() automatically sets FF_RUMBLE capabilities;
  3219. * we want to restrtict this to only FF_PERIODIC
  3220. */
  3221. clear_bit(FF_RUMBLE, cs40l26->input->ffbit);
  3222. cs40l26->input->ff->upload = cs40l26_upload_effect;
  3223. cs40l26->input->ff->playback = cs40l26_playback_effect;
  3224. cs40l26->input->ff->set_gain = cs40l26_set_gain;
  3225. cs40l26->input->ff->erase = cs40l26_erase_effect;
  3226. error = input_register_device(cs40l26->input);
  3227. if (error) {
  3228. dev_err(dev, "Cannot register input device: %d\n", error);
  3229. return error;
  3230. }
  3231. error = sysfs_create_group(&cs40l26->input->dev.kobj,
  3232. &cs40l26_dev_attr_group);
  3233. if (error) {
  3234. dev_err(dev, "Failed to create sysfs group: %d\n", error);
  3235. return error;
  3236. }
  3237. error = sysfs_create_group(&cs40l26->input->dev.kobj,
  3238. &cs40l26_dev_attr_cal_group);
  3239. if (error) {
  3240. dev_err(dev, "Failed to create cal sysfs group: %d\n", error);
  3241. return error;
  3242. }
  3243. error = sysfs_create_group(&cs40l26->input->dev.kobj,
  3244. &cs40l26_dev_attr_dbc_group);
  3245. if (error) {
  3246. dev_err(dev, "Failed to create DBC sysfs group\n");
  3247. return error;
  3248. }
  3249. cs40l26->vibe_init_success = true;
  3250. return error;
  3251. }
  3252. #endif
  3253. static int cs40l26_wksrc_config(struct cs40l26_private *cs40l26)
  3254. {
  3255. u8 mask_wksrc;
  3256. u32 val, mask;
  3257. if (cs40l26->devid == CS40L26_DEVID_A ||
  3258. cs40l26->devid == CS40L26_DEVID_L27_A)
  3259. mask_wksrc = 1;
  3260. else
  3261. mask_wksrc = 0;
  3262. val = CS40L26_WKSRC_STS_SPI_MASK |
  3263. (mask_wksrc ? CS40L26_WKSRC_STS_GPIO2_MASK : 0) |
  3264. (mask_wksrc ? CS40L26_WKSRC_STS_GPIO3_MASK : 0) |
  3265. (mask_wksrc ? CS40L26_WKSRC_STS_GPIO4_MASK : 0);
  3266. mask = CS40L26_WKSRC_STS_ANY_MASK | CS40L26_WKSRC_STS_GPIO1_MASK |
  3267. CS40L26_WKSRC_STS_I2C_MASK | CS40L26_WKSRC_STS_SPI_MASK |
  3268. CS40L26_WKSRC_STS_GPIO2_MASK | CS40L26_WKSRC_STS_GPIO3_MASK |
  3269. CS40L26_WKSRC_STS_GPIO4_MASK;
  3270. return cs40l26_irq_update_mask(cs40l26, CS40L26_IRQ1_MASK_1, val, mask);
  3271. }
  3272. static int cs40l26_gpio_config(struct cs40l26_private *cs40l26)
  3273. {
  3274. u32 val, reg;
  3275. u8 mask_gpio;
  3276. int error;
  3277. if (cs40l26->devid == CS40L26_DEVID_A ||
  3278. cs40l26->devid == CS40L26_DEVID_L27_A)
  3279. mask_gpio = 1;
  3280. else
  3281. mask_gpio = 0;
  3282. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3283. dev_info(cs40l26->dev, "%s\n", __func__);
  3284. #endif
  3285. error = cl_dsp_get_reg(cs40l26->dsp, "ENT_MAP_TABLE_EVENT_DATA_PACKED",
  3286. CL_DSP_XM_UNPACKED_TYPE, CS40L26_EVENT_HANDLER_ALGO_ID,
  3287. &cs40l26->event_map_base);
  3288. if (error)
  3289. return error;
  3290. if (mask_gpio)
  3291. val = (u32) GENMASK(CS40L26_GPIO4_FALL_IRQ,
  3292. CS40L26_GPIO2_RISE_IRQ);
  3293. else
  3294. val = 0;
  3295. reg = cs40l26->event_map_base + (CS40L26_GPIO_MAP_A_PRESS * CL_DSP_BYTES_PER_WORD);
  3296. error = regmap_write(cs40l26->regmap, reg, cs40l26->press_idx);
  3297. if (error) {
  3298. dev_err(cs40l26->dev, "Failed to map press GPI event\n");
  3299. return error;
  3300. }
  3301. reg = cs40l26->event_map_base + (CS40L26_GPIO_MAP_A_RELEASE * CL_DSP_BYTES_PER_WORD);
  3302. error = regmap_write(cs40l26->regmap, reg, cs40l26->release_idx);
  3303. if (error) {
  3304. dev_err(cs40l26->dev, "Failed to map release GPI event\n");
  3305. return error;
  3306. }
  3307. return cs40l26_irq_update_mask(cs40l26, CS40L26_IRQ1_MASK_1, val,
  3308. GENMASK(CS40L26_GPIO4_FALL_IRQ, CS40L26_GPIO1_RISE_IRQ));
  3309. }
  3310. static const struct cs40l26_brwnout_limits cs40l26_brwnout_params[] = {
  3311. {
  3312. .max = CS40L26_VBBR_THLD_UV_MAX,
  3313. .min = CS40L26_VBBR_THLD_UV_MIN,
  3314. },
  3315. {
  3316. .max = CS40L26_VPBR_THLD_UV_MAX,
  3317. .min = CS40L26_VPBR_THLD_UV_MIN,
  3318. },
  3319. {
  3320. .max = CS40L26_VXBR_MAX_ATT_MAX,
  3321. .min = CS40L26_VXBR_MAX_ATT_MIN,
  3322. },
  3323. {
  3324. .max = CS40L26_VXBR_ATK_STEP_MAX,
  3325. .min = CS40L26_VXBR_ATK_STEP_MIN,
  3326. },
  3327. {
  3328. .max = CS40L26_VXBR_ATK_RATE_MAX,
  3329. .min = CS40L26_VXBR_ATK_RATE_MIN,
  3330. },
  3331. {
  3332. .max = CS40L26_VXBR_WAIT_MAX,
  3333. .min = CS40L26_VXBR_WAIT_MIN,
  3334. },
  3335. {
  3336. .max = CS40L26_VXBR_REL_RATE_MAX,
  3337. .min = CS40L26_VXBR_REL_RATE_MIN,
  3338. },
  3339. };
  3340. static int cs40l26_brwnout_prevention_init(struct cs40l26_private *cs40l26)
  3341. {
  3342. u32 enables, pseq_mask = 0, val, vbbr_config, vpbr_config;
  3343. struct device *dev = cs40l26->dev;
  3344. struct regmap *regmap = cs40l26->regmap;
  3345. int error;
  3346. error = regmap_read(regmap, CS40L26_BLOCK_ENABLES2, &enables);
  3347. if (error) {
  3348. dev_err(dev, "Failed to read block enables 2\n");
  3349. return error;
  3350. }
  3351. enables |= ((cs40l26->vbbr.enable << CS40L26_VBBR_EN_SHIFT) |
  3352. (cs40l26->vpbr.enable << CS40L26_VPBR_EN_SHIFT));
  3353. error = regmap_write(regmap, CS40L26_BLOCK_ENABLES2, enables);
  3354. if (error) {
  3355. dev_err(dev, "Failed to enable brownout prevention\n");
  3356. return error;
  3357. }
  3358. error = cs40l26_pseq_write(cs40l26, CS40L26_BLOCK_ENABLES2, enables, true,
  3359. CS40L26_PSEQ_OP_WRITE_FULL);
  3360. if (error) {
  3361. dev_err(dev, "Failed to sequence brownout prevention\n");
  3362. return error;
  3363. }
  3364. if (cs40l26->vbbr.enable) {
  3365. pseq_mask = CS40L26_VBBR_ATT_CLR_MASK | CS40L26_VBBR_FLAG_MASK;
  3366. vbbr_config = (cs40l26->vbbr.thld_uv / CS40L26_VBBR_THLD_UV_DIV) &
  3367. CS40L26_VBBR_THLD_MASK;
  3368. vbbr_config |= ((cs40l26->vbbr.max_att_db << CS40L26_VXBR_MAX_ATT_SHIFT) &
  3369. CS40L26_VXBR_MAX_ATT_MASK);
  3370. vbbr_config |= ((cs40l26->vbbr.atk_step << CS40L26_VXBR_ATK_STEP_SHIFT) &
  3371. CS40L26_VXBR_ATK_STEP_MASK);
  3372. vbbr_config |= ((cs40l26->vbbr.atk_rate << CS40L26_VXBR_ATK_RATE_SHIFT) &
  3373. CS40L26_VXBR_ATK_RATE_MASK);
  3374. vbbr_config |= ((cs40l26->vbbr.wait << CS40L26_VXBR_WAIT_SHIFT) &
  3375. CS40L26_VXBR_WAIT_MASK);
  3376. vbbr_config |= ((cs40l26->vbbr.rel_rate << CS40L26_VXBR_REL_RATE_SHIFT) &
  3377. CS40L26_VXBR_REL_RATE_MASK);
  3378. error = regmap_read(regmap, CS40L26_VBBR_CONFIG, &val);
  3379. if (error) {
  3380. dev_err(dev, "Failed to read VBBR_CONFIG\n");
  3381. return error;
  3382. }
  3383. vbbr_config |= (val & CS40L26_VXBR_DEFAULT_MASK);
  3384. error = regmap_write(regmap, CS40L26_VBBR_CONFIG, vbbr_config);
  3385. if (error) {
  3386. dev_err(dev, "Failed to write VBBR_CONFIG\n");
  3387. return error;
  3388. }
  3389. error = cs40l26_pseq_write(cs40l26, CS40L26_VBBR_CONFIG,
  3390. (vbbr_config & GENMASK(31, 16)) >> 16,
  3391. true, CS40L26_PSEQ_OP_WRITE_H16);
  3392. if (error)
  3393. return error;
  3394. error = cs40l26_pseq_write(cs40l26, CS40L26_VBBR_CONFIG,
  3395. (vbbr_config & GENMASK(15, 0)),
  3396. true, CS40L26_PSEQ_OP_WRITE_L16);
  3397. if (error)
  3398. return error;
  3399. }
  3400. if (cs40l26->vpbr.enable) {
  3401. pseq_mask |= CS40L26_VPBR_ATT_CLR_MASK | CS40L26_VPBR_FLAG_MASK;
  3402. vpbr_config = ((cs40l26->vpbr.thld_uv / CS40L26_VPBR_THLD_UV_DIV) - 51) &
  3403. CS40L26_VPBR_THLD_MASK;
  3404. vpbr_config |= ((cs40l26->vpbr.max_att_db << CS40L26_VXBR_MAX_ATT_SHIFT) &
  3405. CS40L26_VXBR_MAX_ATT_MASK);
  3406. vpbr_config |= ((cs40l26->vpbr.atk_step << CS40L26_VXBR_ATK_STEP_SHIFT) &
  3407. CS40L26_VXBR_ATK_STEP_MASK);
  3408. vpbr_config |= ((cs40l26->vpbr.atk_rate << CS40L26_VXBR_ATK_RATE_SHIFT) &
  3409. CS40L26_VXBR_ATK_RATE_MASK);
  3410. vpbr_config |= ((cs40l26->vpbr.wait << CS40L26_VXBR_WAIT_SHIFT) &
  3411. CS40L26_VXBR_WAIT_MASK);
  3412. vpbr_config |= ((cs40l26->vpbr.rel_rate << CS40L26_VXBR_REL_RATE_SHIFT) &
  3413. CS40L26_VXBR_REL_RATE_MASK);
  3414. error = regmap_read(regmap, CS40L26_VPBR_CONFIG, &val);
  3415. if (error) {
  3416. dev_err(dev, "Failed to read VPBR_CONFIG\n");
  3417. return error;
  3418. }
  3419. vpbr_config |= (val & CS40L26_VXBR_DEFAULT_MASK);
  3420. error = regmap_write(regmap, CS40L26_VPBR_CONFIG, vpbr_config);
  3421. if (error) {
  3422. dev_err(dev, "Failed to write VPBR_CONFIG\n");
  3423. return error;
  3424. }
  3425. error = cs40l26_pseq_write(cs40l26, CS40L26_VPBR_CONFIG,
  3426. (vpbr_config & GENMASK(31, 16)) >> 16,
  3427. true, CS40L26_PSEQ_OP_WRITE_H16);
  3428. if (error)
  3429. return error;
  3430. error = cs40l26_pseq_write(cs40l26, CS40L26_VPBR_CONFIG,
  3431. (vpbr_config & GENMASK(15, 0)),
  3432. true, CS40L26_PSEQ_OP_WRITE_L16);
  3433. if (error)
  3434. return error;
  3435. }
  3436. return cs40l26_irq_update_mask(cs40l26, CS40L26_IRQ1_MASK_2, 0, pseq_mask);
  3437. }
  3438. static int cs40l26_asp_config(struct cs40l26_private *cs40l26)
  3439. {
  3440. struct reg_sequence *dsp1rx_config =
  3441. kcalloc(2, sizeof(struct reg_sequence), GFP_KERNEL);
  3442. int error;
  3443. if (!dsp1rx_config) {
  3444. dev_err(cs40l26->dev, "Failed to allocate reg. sequence\n");
  3445. return -ENOMEM;
  3446. }
  3447. dsp1rx_config[0].reg = CS40L26_DSP1RX1_INPUT;
  3448. dsp1rx_config[0].def = CS40L26_DATA_SRC_ASPRX1;
  3449. dsp1rx_config[1].reg = CS40L26_DSP1RX5_INPUT;
  3450. dsp1rx_config[1].def = CS40L26_DATA_SRC_ASPRX2;
  3451. error = regmap_multi_reg_write(cs40l26->regmap, dsp1rx_config, 2);
  3452. if (error) {
  3453. dev_err(cs40l26->dev, "Failed to configure ASP\n");
  3454. goto err_free;
  3455. }
  3456. error = cs40l26_pseq_multi_write(cs40l26, dsp1rx_config, 2, true,
  3457. CS40L26_PSEQ_OP_WRITE_L16);
  3458. err_free:
  3459. kfree(dsp1rx_config);
  3460. return error;
  3461. }
  3462. static int cs40l26_bst_dcm_config(struct cs40l26_private *cs40l26)
  3463. {
  3464. int error = 0;
  3465. u32 val;
  3466. if (cs40l26->bst_dcm_en != CS40L26_BST_DCM_EN_DEFAULT) {
  3467. error = regmap_read(cs40l26->regmap, CS40L26_BST_DCM_CTL, &val);
  3468. if (error)
  3469. return error;
  3470. val &= ~CS40L26_BST_DCM_EN_MASK;
  3471. val |= cs40l26->bst_dcm_en << CS40L26_BST_DCM_EN_SHIFT;
  3472. error = regmap_write(cs40l26->regmap, CS40L26_BST_DCM_CTL, val);
  3473. if (error)
  3474. return error;
  3475. error = cs40l26_pseq_write(cs40l26, CS40L26_BST_DCM_CTL,
  3476. val, true, CS40L26_PSEQ_OP_WRITE_FULL);
  3477. }
  3478. return error;
  3479. }
  3480. static int cs40l26_zero_cross_config(struct cs40l26_private *cs40l26)
  3481. {
  3482. int error = 0;
  3483. u32 reg;
  3484. if (cs40l26->pwle_zero_cross) {
  3485. error = cl_dsp_get_reg(cs40l26->dsp, "PWLE_EXTEND_ZERO_CROSS",
  3486. CL_DSP_XM_UNPACKED_TYPE, CS40L26_VIBEGEN_ALGO_ID, &reg);
  3487. if (error)
  3488. return error;
  3489. error = regmap_write(cs40l26->regmap, reg, 1);
  3490. if (error)
  3491. dev_err(cs40l26->dev, "Failed to set PWLE_EXTEND_ZERO_CROSS\n");
  3492. }
  3493. return error;
  3494. }
  3495. static int cs40l26_calib_dt_config(struct cs40l26_private *cs40l26)
  3496. {
  3497. int error = 0;
  3498. u32 reg;
  3499. if (cs40l26->f0_default <= CS40L26_F0_EST_MAX &&
  3500. cs40l26->f0_default >= CS40L26_F0_EST_MIN) {
  3501. error = cl_dsp_get_reg(cs40l26->dsp, "F0_OTP_STORED",
  3502. CL_DSP_XM_UNPACKED_TYPE,
  3503. CS40L26_VIBEGEN_ALGO_ID, &reg);
  3504. if (error)
  3505. return error;
  3506. error = regmap_write(cs40l26->regmap, reg, cs40l26->f0_default);
  3507. if (error) {
  3508. dev_err(cs40l26->dev, "Failed to write default f0\n");
  3509. return error;
  3510. }
  3511. }
  3512. if (cs40l26->redc_default && cs40l26->redc_default <= CS40L26_UINT_24_BITS_MAX) {
  3513. error = cl_dsp_get_reg(cs40l26->dsp, "REDC_OTP_STORED", CL_DSP_XM_UNPACKED_TYPE,
  3514. CS40L26_VIBEGEN_ALGO_ID, &reg);
  3515. if (error)
  3516. return error;
  3517. error = regmap_write(cs40l26->regmap, reg, cs40l26->redc_default);
  3518. if (error) {
  3519. dev_err(cs40l26->dev, "Failed to write default ReDC\n");
  3520. return error;
  3521. }
  3522. }
  3523. if (cs40l26->revid < CS40L26_REVID_B2) {
  3524. if (cs40l26->q_default <= CS40L26_Q_EST_MAX) {
  3525. error = cl_dsp_get_reg(cs40l26->dsp, "Q_STORED", CL_DSP_XM_UNPACKED_TYPE,
  3526. CS40L26_VIBEGEN_ALGO_ID, &reg);
  3527. if (error)
  3528. return error;
  3529. error = regmap_write(cs40l26->regmap, reg, cs40l26->q_default);
  3530. if (error) {
  3531. dev_err(cs40l26->dev, "Failed to write default Q\n");
  3532. return error;
  3533. }
  3534. }
  3535. }
  3536. return error;
  3537. }
  3538. static int cs40l26_bst_ipk_config(struct cs40l26_private *cs40l26)
  3539. {
  3540. u32 bst_ipk;
  3541. int error;
  3542. if (cs40l26->bst_ipk < CS40L26_BST_IPK_UA_MIN || cs40l26->bst_ipk > CS40L26_BST_IPK_UA_MAX)
  3543. bst_ipk = CS40L26_BST_IPK_DEFAULT;
  3544. else
  3545. bst_ipk = (cs40l26->bst_ipk / CS40L26_BST_IPK_UA_STEP) - 16;
  3546. error = regmap_write(cs40l26->regmap, CS40L26_BST_IPK_CTL, bst_ipk);
  3547. if (error) {
  3548. dev_err(cs40l26->dev, "Failed to update BST peak current\n");
  3549. return error;
  3550. }
  3551. error = cs40l26_pseq_write(cs40l26, CS40L26_BST_IPK_CTL, bst_ipk, true,
  3552. CS40L26_PSEQ_OP_WRITE_L16);
  3553. if (error)
  3554. return error;
  3555. return cs40l26_irq_update_mask(cs40l26, CS40L26_IRQ1_MASK_1, 0,
  3556. CS40L26_BST_IPK_FLAG_MASK);
  3557. }
  3558. static int cs40l26_bst_ctl_config(struct cs40l26_private *cs40l26)
  3559. {
  3560. u32 bst_ctl;
  3561. int error;
  3562. if (cs40l26->bst_ctl < CS40L26_BST_UV_MIN || cs40l26->bst_ctl > CS40L26_BST_UV_MAX)
  3563. bst_ctl = CS40L26_BST_CTL_DEFAULT;
  3564. else
  3565. bst_ctl = (cs40l26->bst_ctl - CS40L26_BST_UV_MIN) / CS40L26_BST_UV_STEP;
  3566. error = regmap_write(cs40l26->regmap, CS40L26_VBST_CTL_1, bst_ctl);
  3567. if (error) {
  3568. dev_err(cs40l26->dev, "Failed to write VBST limit\n");
  3569. return error;
  3570. }
  3571. return cs40l26_pseq_write(cs40l26, CS40L26_VBST_CTL_1, bst_ctl, true,
  3572. CS40L26_PSEQ_OP_WRITE_L16);
  3573. }
  3574. static int cs40l26_noise_gate_config(struct cs40l26_private *cs40l26)
  3575. {
  3576. u32 ng_config;
  3577. int error;
  3578. if (cs40l26->ng_thld < CS40L26_NG_THRESHOLD_MIN ||
  3579. cs40l26->ng_thld > CS40L26_NG_THRESHOLD_MAX)
  3580. cs40l26->ng_thld = CS40L26_NG_THRESHOLD_DEFAULT;
  3581. if (cs40l26->ng_delay < CS40L26_NG_DELAY_MIN || cs40l26->ng_delay > CS40L26_NG_DELAY_MAX)
  3582. cs40l26->ng_delay = CS40L26_NG_DELAY_DEFAULT;
  3583. ng_config = FIELD_PREP(CS40L26_NG_THRESHOLD_MASK, cs40l26->ng_thld) |
  3584. FIELD_PREP(CS40L26_NG_DELAY_MASK, cs40l26->ng_delay) |
  3585. FIELD_PREP(CS40L26_NG_ENABLE_MASK, cs40l26->ng_enable);
  3586. error = regmap_write(cs40l26->regmap, CS40L26_NG_CONFIG, ng_config);
  3587. if (error)
  3588. return error;
  3589. return cs40l26_pseq_write(cs40l26, CS40L26_NG_CONFIG, ng_config, true,
  3590. CS40L26_PSEQ_OP_WRITE_FULL);
  3591. }
  3592. static int cs40l26_aux_noise_gate_config(struct cs40l26_private *cs40l26)
  3593. {
  3594. u32 aux_ng_config;
  3595. int error;
  3596. if (cs40l26->aux_ng_thld > CS40L26_AUX_NG_THLD_MAX)
  3597. cs40l26->aux_ng_thld = CS40L26_AUX_NG_THLD_DEFAULT;
  3598. if (cs40l26->aux_ng_delay > CS40L26_AUX_NG_HOLD_MAX)
  3599. cs40l26->aux_ng_delay = CS40L26_AUX_NG_HOLD_DEFAULT;
  3600. aux_ng_config = FIELD_PREP(CS40L26_AUX_NG_THLD_MASK, cs40l26->aux_ng_thld) |
  3601. FIELD_PREP(CS40L26_AUX_NG_HOLD_MASK, cs40l26->aux_ng_delay) |
  3602. FIELD_PREP(CS40L26_AUX_NG_EN_MASK, cs40l26->aux_ng_enable);
  3603. error = regmap_write(cs40l26->regmap, CS40L26_MIXER_NGATE_CH1_CFG, aux_ng_config);
  3604. if (error)
  3605. return error;
  3606. return cs40l26_pseq_write(cs40l26, CS40L26_MIXER_NGATE_CH1_CFG, aux_ng_config, true,
  3607. CS40L26_PSEQ_OP_WRITE_FULL);
  3608. }
  3609. static int cs40l26_clip_lvl_config(struct cs40l26_private *cs40l26)
  3610. {
  3611. u32 clip_lvl, digpwm_config;
  3612. int error;
  3613. error = regmap_write(cs40l26->regmap, CS40L26_TEST_KEY_CTRL, CS40L26_TEST_KEY_UNLOCK_CODE1);
  3614. if (error)
  3615. return error;
  3616. error = cs40l26_pseq_write(cs40l26, CS40L26_TEST_KEY_CTRL, CS40L26_TEST_KEY_UNLOCK_CODE1,
  3617. false, CS40L26_PSEQ_OP_WRITE_L16);
  3618. if (error)
  3619. return error;
  3620. error = regmap_write(cs40l26->regmap, CS40L26_TEST_KEY_CTRL, CS40L26_TEST_KEY_UNLOCK_CODE2);
  3621. if (error)
  3622. return error;
  3623. error = cs40l26_pseq_write(cs40l26, CS40L26_TEST_KEY_CTRL, CS40L26_TEST_KEY_UNLOCK_CODE2,
  3624. false, CS40L26_PSEQ_OP_WRITE_ADDR8);
  3625. if (error)
  3626. return error;
  3627. if (cs40l26->clip_lvl < CS40L26_CLIP_LVL_UV_MIN ||
  3628. cs40l26->clip_lvl > CS40L26_CLIP_LVL_UV_MAX)
  3629. clip_lvl = CS40L26_CLIP_LVL_DEFAULT;
  3630. else
  3631. clip_lvl = cs40l26->clip_lvl / CS40L26_CLIP_LVL_UV_STEP;
  3632. error = regmap_read(cs40l26->regmap, CS40L26_DIGPWM_CONFIG2, &digpwm_config);
  3633. if (error) {
  3634. dev_err(cs40l26->dev, "Failed to get DIGPWM config\n");
  3635. return error;
  3636. }
  3637. digpwm_config &= ~CS40L26_CLIP_LVL_MASK;
  3638. digpwm_config |= ((clip_lvl << CS40L26_CLIP_LVL_SHIFT) & CS40L26_CLIP_LVL_MASK);
  3639. error = regmap_write(cs40l26->regmap, CS40L26_DIGPWM_CONFIG2, digpwm_config);
  3640. if (error) {
  3641. dev_err(cs40l26->dev, "Failed to set DIGPWM config\n");
  3642. return error;
  3643. }
  3644. error = cs40l26_pseq_write(cs40l26, CS40L26_DIGPWM_CONFIG2, digpwm_config, true,
  3645. CS40L26_PSEQ_OP_WRITE_FULL);
  3646. if (error)
  3647. return error;
  3648. error = regmap_write(cs40l26->regmap, CS40L26_TEST_KEY_CTRL, CS40L26_TEST_KEY_LOCK_CODE);
  3649. if (error)
  3650. return error;
  3651. return cs40l26_pseq_write(cs40l26, CS40L26_TEST_KEY_CTRL, CS40L26_TEST_KEY_LOCK_CODE,
  3652. false, CS40L26_PSEQ_OP_WRITE_L16);
  3653. }
  3654. static int cs40l26_lbst_short_test(struct cs40l26_private *cs40l26)
  3655. {
  3656. struct regmap *regmap = cs40l26->regmap;
  3657. struct device *dev = cs40l26->dev;
  3658. unsigned int err, vbst_ctl_1, vbst_ctl_2;
  3659. int error;
  3660. error = regmap_read(regmap, CS40L26_VBST_CTL_1, &vbst_ctl_1);
  3661. if (error) {
  3662. dev_err(dev, "Failed to read VBST_CTL_1\n");
  3663. return error;
  3664. }
  3665. error = regmap_read(regmap, CS40L26_VBST_CTL_2, &vbst_ctl_2);
  3666. if (error) {
  3667. dev_err(dev, "Failed to read VBST_CTL_2\n");
  3668. return error;
  3669. }
  3670. error = regmap_update_bits(regmap, CS40L26_VBST_CTL_1,
  3671. CS40L26_BST_CTL_MASK, CS40L26_BST_CTL_VP);
  3672. if (error) {
  3673. dev_err(dev, "Failed to set VBST_CTL_1\n");
  3674. return error;
  3675. }
  3676. error = regmap_update_bits(regmap, CS40L26_VBST_CTL_2,
  3677. CS40L26_BST_CTL_SEL_MASK, CS40L26_BST_CTL_SEL_FIXED);
  3678. if (error) {
  3679. dev_err(dev, "Failed to set VBST_CTL_2\n");
  3680. return error;
  3681. }
  3682. /* Set GLOBAL_EN; safe because DSP is guaranteed to be off here */
  3683. error = regmap_update_bits(regmap, CS40L26_GLOBAL_ENABLES,
  3684. CS40L26_GLOBAL_EN_MASK, 1);
  3685. if (error) {
  3686. dev_err(dev, "Failed to set GLOBAL_EN\n");
  3687. return error;
  3688. }
  3689. /* Wait until boost converter is guranteed to be powered up */
  3690. usleep_range(CS40L26_BST_TIME_MIN_US, CS40L26_BST_TIME_MAX_US);
  3691. error = regmap_read(regmap, CS40L26_ERROR_RELEASE, &err);
  3692. if (error) {
  3693. dev_err(dev, "Failed to get ERROR_RELEASE contents\n");
  3694. return error;
  3695. }
  3696. if (err & BIT(CS40L26_BST_SHORT_ERR_RLS)) {
  3697. dev_alert(dev, "FATAL: Boost shorted at startup\n");
  3698. return -ENOTRECOVERABLE;
  3699. }
  3700. /* Clear GLOBAL_EN; safe because DSP is guaranteed to be off here */
  3701. error = regmap_update_bits(regmap, CS40L26_GLOBAL_ENABLES,
  3702. CS40L26_GLOBAL_EN_MASK, 0);
  3703. if (error) {
  3704. dev_err(dev, "Failed to clear GLOBAL_EN\n");
  3705. return error;
  3706. }
  3707. error = regmap_write(regmap, CS40L26_VBST_CTL_1, vbst_ctl_1);
  3708. if (error) {
  3709. dev_err(dev, "Failed to set VBST_CTL_1\n");
  3710. return error;
  3711. }
  3712. error = regmap_write(regmap, CS40L26_VBST_CTL_2, vbst_ctl_2);
  3713. if (error)
  3714. dev_err(dev, "Failed to set VBST_CTL_2\n");
  3715. return error;
  3716. }
  3717. static int cs40l26_handle_errata(struct cs40l26_private *cs40l26)
  3718. {
  3719. int error, num_writes;
  3720. if (!cs40l26->expl_mode_enabled) {
  3721. error = cs40l26_lbst_short_test(cs40l26);
  3722. if (error)
  3723. return error;
  3724. num_writes = CS40L26_ERRATA_A1_NUM_WRITES;
  3725. } else {
  3726. num_writes = CS40L26_ERRATA_A1_EXPL_EN_NUM_WRITES;
  3727. }
  3728. return cs40l26_pseq_multi_write(cs40l26, cs40l26_a1_errata, num_writes,
  3729. false, CS40L26_PSEQ_OP_WRITE_FULL);
  3730. }
  3731. int cs40l26_dbc_enable(struct cs40l26_private *cs40l26, u32 enable)
  3732. {
  3733. unsigned int reg;
  3734. int error;
  3735. error = cl_dsp_get_reg(cs40l26->dsp, "FLAGS", CL_DSP_XM_UNPACKED_TYPE,
  3736. CS40L26_EXT_ALGO_ID, &reg);
  3737. if (error)
  3738. return error;
  3739. error = regmap_update_bits(cs40l26->regmap, reg, CS40L26_DBC_ENABLE_MASK,
  3740. enable << CS40L26_DBC_ENABLE_SHIFT);
  3741. if (error)
  3742. dev_err(cs40l26->dev, "Failed to %s DBC\n", enable ? "enable" : "disable");
  3743. return error;
  3744. }
  3745. EXPORT_SYMBOL_GPL(cs40l26_dbc_enable);
  3746. static int cs40l26_handle_dbc_defaults(struct cs40l26_private *cs40l26)
  3747. {
  3748. unsigned int i;
  3749. int error;
  3750. u32 val;
  3751. for (i = 0; i < CS40L26_DBC_NUM_CONTROLS; i++) {
  3752. val = cs40l26->dbc_defaults[i];
  3753. if (val != CS40L26_DBC_USE_DEFAULT) {
  3754. error = cs40l26_dbc_set(cs40l26, i, val);
  3755. if (error)
  3756. return error;
  3757. }
  3758. }
  3759. if (cs40l26->dbc_enable_default) {
  3760. error = cs40l26_dbc_enable(cs40l26, 1);
  3761. if (error)
  3762. return error;
  3763. }
  3764. return 0;
  3765. }
  3766. static int cs40l26_logger_setup(struct cs40l26_private *cs40l26)
  3767. {
  3768. u32 exc_offset, exc_reg, exc_src, reg, src;
  3769. int error, i;
  3770. if (cs40l26->log_srcs != NULL) {
  3771. cs40l26->num_log_srcs = 0;
  3772. devm_kfree(cs40l26->dev, cs40l26->log_srcs);
  3773. }
  3774. error = cl_dsp_get_reg(cs40l26->dsp, "COUNT", CL_DSP_XM_UNPACKED_TYPE,
  3775. CS40L26_LOGGER_ALGO_ID, &reg);
  3776. if (error)
  3777. return error;
  3778. error = regmap_read(cs40l26->regmap, reg, &cs40l26->num_log_srcs);
  3779. if (error)
  3780. return error;
  3781. if (cl_dsp_algo_is_present(cs40l26->dsp, CS40L26_EP_ALGO_ID)) {
  3782. /* Add excursion logger source */
  3783. cs40l26->num_log_srcs++;
  3784. error = regmap_write(cs40l26->regmap, reg, cs40l26->num_log_srcs);
  3785. if (error)
  3786. return error;
  3787. error = cl_dsp_get_reg(cs40l26->dsp, "DBG_SRC_CFG", CL_DSP_XM_UNPACKED_TYPE,
  3788. CS40L26_EP_ALGO_ID, &reg);
  3789. if (error)
  3790. return error;
  3791. error = regmap_write(cs40l26->regmap, reg,
  3792. CS40L26_LOGGER_SRC_PROTECTION_OUT << 8);
  3793. if (error)
  3794. return error;
  3795. error = cl_dsp_get_reg(cs40l26->dsp, "DBG_ADDR", CL_DSP_XM_UNPACKED_TYPE,
  3796. CS40L26_EP_ALGO_ID, &exc_reg);
  3797. if (error)
  3798. return error;
  3799. exc_reg += CL_DSP_BYTES_PER_WORD;
  3800. exc_reg &= CS40L26_LOGGER_SRC_ADDR_MASK;
  3801. exc_reg /= CL_DSP_BYTES_PER_WORD;
  3802. exc_src = exc_reg | FIELD_PREP(CS40L26_LOGGER_SRC_ID_MASK,
  3803. CS40L26_LOGGER_SRC_ID_EP) | FIELD_PREP(CS40L26_LOGGER_SRC_TYPE_MASK,
  3804. CS40L26_LOGGER_SRC_TYPE_XM_TO_XM) | CS40L26_LOGGER_SRC_SIGN_MASK;
  3805. error = cl_dsp_get_reg(cs40l26->dsp, "SOURCE", CL_DSP_XM_UNPACKED_TYPE,
  3806. CS40L26_LOGGER_ALGO_ID, &reg);
  3807. if (error)
  3808. return error;
  3809. exc_offset = (cs40l26->num_log_srcs - 1) * CL_DSP_BYTES_PER_WORD;
  3810. error = regmap_write(cs40l26->regmap, reg + exc_offset, exc_src);
  3811. if (error)
  3812. return error;
  3813. }
  3814. cs40l26->log_srcs = devm_kcalloc(cs40l26->dev, cs40l26->num_log_srcs,
  3815. sizeof(struct cs40l26_log_src), GFP_KERNEL);
  3816. if (IS_ERR_OR_NULL(cs40l26->log_srcs))
  3817. return cs40l26->log_srcs ? PTR_ERR(cs40l26->log_srcs) : -ENOMEM;
  3818. error = cl_dsp_get_reg(cs40l26->dsp, "SOURCE", CL_DSP_XM_UNPACKED_TYPE,
  3819. CS40L26_LOGGER_ALGO_ID, &reg);
  3820. if (error)
  3821. goto err_free;
  3822. for (i = 0; i < cs40l26->num_log_srcs; i++) {
  3823. error = regmap_read(cs40l26->regmap, reg + (i * CL_DSP_BYTES_PER_WORD), &src);
  3824. if (error)
  3825. goto err_free;
  3826. cs40l26->log_srcs[i].sign = FIELD_GET(CS40L26_LOGGER_SRC_SIGN_MASK, src);
  3827. cs40l26->log_srcs[i].size = FIELD_GET(CS40L26_LOGGER_SRC_SIZE_MASK, src);
  3828. cs40l26->log_srcs[i].type = FIELD_GET(CS40L26_LOGGER_SRC_TYPE_MASK, src);
  3829. cs40l26->log_srcs[i].id = FIELD_GET(CS40L26_LOGGER_SRC_ID_MASK, src);
  3830. cs40l26->log_srcs[i].addr = FIELD_GET(CS40L26_LOGGER_SRC_ADDR_MASK, src);
  3831. }
  3832. return 0;
  3833. err_free:
  3834. devm_kfree(cs40l26->dev, cs40l26->log_srcs);
  3835. return error;
  3836. }
  3837. static int cs40l26_dsp_config(struct cs40l26_private *cs40l26)
  3838. {
  3839. struct regmap *regmap = cs40l26->regmap;
  3840. struct device *dev = cs40l26->dev;
  3841. unsigned int val;
  3842. u32 reg, nwaves, value;
  3843. int error;
  3844. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3845. dev_info(dev, "%s\n", __func__);
  3846. #endif
  3847. if (!cs40l26->fw_rom_only) {
  3848. error = regmap_update_bits(regmap, CS40L26_PWRMGT_CTL,
  3849. CS40L26_MEM_RDY_MASK, 1 << CS40L26_MEM_RDY_SHIFT);
  3850. if (error) {
  3851. dev_err(dev, "Failed to set MEM_RDY to initialize RAM\n");
  3852. return error;
  3853. }
  3854. error = cl_dsp_get_reg(cs40l26->dsp, "CALL_RAM_INIT", CL_DSP_XM_UNPACKED_TYPE,
  3855. cs40l26->fw_id, &reg);
  3856. if (error) {
  3857. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3858. dev_err(dev, "cl_dsp_get_reg CALL_RAM_INIT fail\n");
  3859. #endif
  3860. return error;
  3861. }
  3862. error = cs40l26_dsp_write(cs40l26, reg, 1);
  3863. if (error) {
  3864. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3865. dev_err(dev, "cl_dsp_write fail\n");
  3866. #endif
  3867. return error;
  3868. }
  3869. }
  3870. cs40l26->fw_loaded = true;
  3871. #ifdef CONFIG_DEBUG_FS
  3872. cs40l26_debugfs_init(cs40l26);
  3873. #endif
  3874. error = cs40l26_pseq_init(cs40l26);
  3875. if (error) {
  3876. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3877. dev_err(dev, "%s:Failed to pseq_init\n", __func__);
  3878. #endif
  3879. return error;
  3880. }
  3881. error = cs40l26_handle_errata(cs40l26);
  3882. if (error)
  3883. return error;
  3884. if (!cs40l26->fw_rom_only) {
  3885. error = cs40l26_dsp_start(cs40l26);
  3886. if (error) {
  3887. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3888. dev_err(dev, "%s:Failed to dsp_start\n", __func__);
  3889. #endif
  3890. return error;
  3891. }
  3892. }
  3893. error = cs40l26_pm_state_transition(cs40l26, CS40L26_PM_STATE_PREVENT_HIBERNATE);
  3894. if (error)
  3895. return error;
  3896. /* ensure firmware running */
  3897. error = cl_dsp_get_reg(cs40l26->dsp, "HALO_STATE", CL_DSP_XM_UNPACKED_TYPE, cs40l26->fw_id,
  3898. &reg);
  3899. if (error)
  3900. return error;
  3901. error = regmap_read(regmap, reg, &val);
  3902. if (error) {
  3903. dev_err(dev, "Failed to read HALO_STATE\n");
  3904. return error;
  3905. }
  3906. if (val != CS40L26_DSP_HALO_STATE_RUN) {
  3907. dev_err(dev, "Firmware in unexpected state: 0x%X\n", val);
  3908. return -EINVAL;
  3909. }
  3910. error = cs40l26_irq_update_mask(cs40l26, CS40L26_IRQ1_MASK_1, 0,
  3911. CS40L26_AMP_ERR_MASK | CS40L26_TEMP_ERR_MASK |
  3912. CS40L26_BST_SHORT_ERR_MASK | CS40L26_BST_DCM_UVP_ERR_MASK |
  3913. CS40L26_BST_OVP_ERR_MASK | CS40L26_VIRTUAL2_MBOX_WR_MASK);
  3914. if (error) {
  3915. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3916. dev_err(dev, "%s:Failed to irq_update_mask\n", __func__);
  3917. #endif
  3918. return error;
  3919. }
  3920. error = cs40l26_wksrc_config(cs40l26);
  3921. if (error) {
  3922. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3923. dev_err(dev, "%s:Failed to wksrc_config\n", __func__);
  3924. #endif
  3925. return error;
  3926. }
  3927. error = cs40l26_gpio_config(cs40l26);
  3928. if (error) {
  3929. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3930. dev_err(dev, "%s:Failed to gpio_config\n", __func__);
  3931. #endif
  3932. return error;
  3933. }
  3934. error = cs40l26_bst_dcm_config(cs40l26);
  3935. if (error) {
  3936. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3937. dev_err(dev, "%s:Failed to bst_dcm_config\n", __func__);
  3938. #endif
  3939. return error;
  3940. }
  3941. error = cs40l26_bst_ipk_config(cs40l26);
  3942. if (error) {
  3943. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3944. dev_err(dev, "%s:Failed to bst_ipk_config\n", __func__);
  3945. #endif
  3946. return error;
  3947. }
  3948. error = cs40l26_bst_ctl_config(cs40l26);
  3949. if (error) {
  3950. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3951. dev_err(dev, "%s:Failed to bst_ctl_config\n", __func__);
  3952. #endif
  3953. return error;
  3954. }
  3955. error = cs40l26_clip_lvl_config(cs40l26);
  3956. if (error) {
  3957. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3958. dev_err(dev, "%s:Failed to clip_lvl_config\n", __func__);
  3959. #endif
  3960. return error;
  3961. }
  3962. error = cs40l26_handle_dbc_defaults(cs40l26);
  3963. if (error)
  3964. return error;
  3965. error = cs40l26_zero_cross_config(cs40l26);
  3966. if (error)
  3967. return error;
  3968. error = cs40l26_noise_gate_config(cs40l26);
  3969. if (error)
  3970. return error;
  3971. error = cs40l26_aux_noise_gate_config(cs40l26);
  3972. if (error)
  3973. return error;
  3974. if (!cs40l26->vibe_init_success) {
  3975. error = cs40l26_calib_dt_config(cs40l26);
  3976. if (error)
  3977. return error;
  3978. }
  3979. error = cs40l26_brwnout_prevention_init(cs40l26);
  3980. if (error) {
  3981. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3982. dev_err(dev, "%s:Failed to brownout_prevention_init\n", __func__);
  3983. #endif
  3984. return error;
  3985. }
  3986. cs40l26_pm_runtime_setup(cs40l26);
  3987. error = cs40l26_pm_state_transition(cs40l26, CS40L26_PM_STATE_ALLOW_HIBERNATE);
  3988. if (error)
  3989. return error;
  3990. error = cs40l26_pm_enter(dev);
  3991. if (error)
  3992. return error;
  3993. error = cl_dsp_get_reg(cs40l26->dsp, "TIMEOUT_MS",
  3994. CL_DSP_XM_UNPACKED_TYPE, CS40L26_VIBEGEN_ALGO_ID, &reg);
  3995. if (error) {
  3996. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  3997. dev_err(dev, "Failed to get TIMEOUT_MS\n");
  3998. #endif
  3999. goto pm_err;
  4000. }
  4001. error = regmap_write(regmap, reg, 0);
  4002. if (error) {
  4003. dev_err(dev, "Failed to set TIMEOUT_MS\n");
  4004. goto pm_err;
  4005. }
  4006. error = cs40l26_logger_setup(cs40l26);
  4007. if (error)
  4008. goto pm_err;
  4009. error = cs40l26_asp_config(cs40l26);
  4010. if (error) {
  4011. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4012. dev_err(dev, "Failed to set asp config\n");
  4013. #endif
  4014. goto pm_err;
  4015. }
  4016. error = cs40l26_get_num_waves(cs40l26, &nwaves);
  4017. if (error) {
  4018. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4019. dev_err(dev, "Failed to get num waves\n");
  4020. #endif
  4021. goto pm_err;
  4022. }
  4023. dev_info(dev, "%s loaded with %u RAM waveforms\n", CS40L26_DEV_NAME, nwaves);
  4024. cs40l26->num_owt_effects = 0;
  4025. value = (cs40l26->comp_enable_redc << CS40L26_COMP_EN_REDC_SHIFT) |
  4026. (cs40l26->comp_enable_f0 << CS40L26_COMP_EN_F0_SHIFT);
  4027. if (cs40l26->fw_id != CS40L26_FW_CALIB_ID) {
  4028. error = cl_dsp_get_reg(cs40l26->dsp, "COMPENSATION_ENABLE", CL_DSP_XM_UNPACKED_TYPE,
  4029. CS40L26_VIBEGEN_ALGO_ID, &reg);
  4030. if (error)
  4031. goto pm_err;
  4032. error = regmap_write(cs40l26->regmap, reg, value);
  4033. if (error)
  4034. dev_err(dev, "Failed to configure compensation\n");
  4035. }
  4036. pm_err:
  4037. cs40l26_pm_exit(dev);
  4038. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4039. dev_info(dev, "%s done ret %d\n", __func__, error);
  4040. #endif
  4041. return error;
  4042. }
  4043. static void cs40l26_gain_adjust(struct cs40l26_private *cs40l26, s32 adjust)
  4044. {
  4045. u16 total, asp, change;
  4046. if (abs(adjust) > 100) {
  4047. dev_warn(cs40l26->dev, "Gain adjust %d invalid, not applied\n", adjust);
  4048. return;
  4049. }
  4050. asp = cs40l26->asp_scale_pct;
  4051. if (adjust < 0) {
  4052. change = (u16) ((adjust * -1) & 0xFFFF);
  4053. if (asp < change)
  4054. total = 0;
  4055. else
  4056. total = asp - change;
  4057. } else {
  4058. change = (u16) (adjust & 0xFFFF);
  4059. total = asp + change;
  4060. if (total > CS40L26_GAIN_FULL_SCALE)
  4061. total = CS40L26_GAIN_FULL_SCALE;
  4062. }
  4063. cs40l26->asp_scale_pct = total;
  4064. }
  4065. int cs40l26_svc_le_estimate(struct cs40l26_private *cs40l26, unsigned int *le)
  4066. {
  4067. struct device *dev = cs40l26->dev;
  4068. unsigned int reg, le_est = 0;
  4069. int error, i;
  4070. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4071. dev_info(cs40l26->dev, "%s\n", __func__);
  4072. #endif
  4073. error = cs40l26_mailbox_write(cs40l26, CS40L26_DSP_MBOX_CMD_LE_EST);
  4074. if (error)
  4075. return error;
  4076. error = cl_dsp_get_reg(cs40l26->dsp, "LE_EST_STATUS", CL_DSP_YM_UNPACKED_TYPE,
  4077. CS40L26_SVC_ALGO_ID, &reg);
  4078. if (error)
  4079. return error;
  4080. for (i = 0; i < CS40L26_SVC_LE_MAX_ATTEMPTS; i++) {
  4081. usleep_range(CS40L26_SVC_LE_EST_TIME_US, CS40L26_SVC_LE_EST_TIME_US + 100);
  4082. error = regmap_read(cs40l26->regmap, reg, &le_est);
  4083. if (error) {
  4084. dev_err(dev, "Failed to get LE_EST_STATUS\n");
  4085. return error;
  4086. }
  4087. dev_info(dev, "Measured Le Estimation = %u\n", le_est);
  4088. if (le_est)
  4089. break;
  4090. }
  4091. *le = le_est;
  4092. return 0;
  4093. }
  4094. EXPORT_SYMBOL_GPL(cs40l26_svc_le_estimate);
  4095. static void cs40l26_tuning_select_from_svc_le(struct cs40l26_private *cs40l26,
  4096. unsigned int le, u32 *tuning_num)
  4097. {
  4098. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4099. int i = -1;
  4100. #else
  4101. int i;
  4102. #endif
  4103. if (le) {
  4104. for (i = 0; i < cs40l26->num_svc_le_vals; i++) {
  4105. if (le >= cs40l26->svc_le_vals[i]->min &&
  4106. le <= cs40l26->svc_le_vals[i]->max) {
  4107. *tuning_num = cs40l26->svc_le_vals[i]->n;
  4108. cs40l26_gain_adjust(cs40l26, cs40l26->svc_le_vals[i]->gain_adjust);
  4109. break;
  4110. }
  4111. }
  4112. }
  4113. if (!le || i == cs40l26->num_svc_le_vals)
  4114. dev_warn(cs40l26->dev, "Using default tunings\n");
  4115. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4116. dev_info(cs40l26->dev, "%s, le_est: %u, le_cnt:%d, svc_le:%u, i=%d\n", __func__,
  4117. le, cs40l26->num_svc_le_vals, *tuning_num, i);
  4118. #endif
  4119. }
  4120. static char **cs40l26_get_tuning_names(struct cs40l26_private *cs40l26, int *actual_num_files,
  4121. u32 tuning)
  4122. {
  4123. int i, file_count = 0;
  4124. char **coeff_files;
  4125. coeff_files = kcalloc(CS40L26_MAX_TUNING_FILES, sizeof(char *), GFP_KERNEL);
  4126. if (!coeff_files)
  4127. return ERR_PTR(-ENOMEM);
  4128. for (i = 0; i < CS40L26_MAX_TUNING_FILES; i++) {
  4129. coeff_files[i] = kzalloc(CS40L26_TUNING_FILE_NAME_MAX_LEN, GFP_KERNEL);
  4130. if (!coeff_files[i])
  4131. goto err_free;
  4132. }
  4133. if (tuning) {
  4134. snprintf(coeff_files[file_count++], CS40L26_TUNING_FILE_NAME_MAX_LEN, "%s%d%s",
  4135. CS40L26_WT_FILE_PREFIX, tuning, CS40L26_TUNING_FILE_SUFFIX);
  4136. } else {
  4137. strscpy(coeff_files[file_count++], CS40L26_WT_FILE_NAME,
  4138. CS40L26_TUNING_FILE_NAME_MAX_LEN);
  4139. }
  4140. if (tuning) {
  4141. snprintf(coeff_files[file_count++], CS40L26_TUNING_FILE_NAME_MAX_LEN, "%s%d%s",
  4142. CS40L26_SVC_TUNING_FILE_PREFIX, tuning, CS40L26_TUNING_FILE_SUFFIX);
  4143. } else {
  4144. strscpy(coeff_files[file_count++], CS40L26_SVC_TUNING_FILE_NAME,
  4145. CS40L26_TUNING_FILE_NAME_MAX_LEN);
  4146. }
  4147. if (cl_dsp_algo_is_present(cs40l26->dsp, CS40L26_LF0T_ALGO_ID))
  4148. strscpy(coeff_files[file_count++], CS40L26_LF0T_FILE_NAME,
  4149. CS40L26_TUNING_FILE_NAME_MAX_LEN);
  4150. if (cl_dsp_algo_is_present(cs40l26->dsp, CS40L26_DVL_ALGO_ID))
  4151. strscpy(coeff_files[file_count++], CS40L26_DVL_FILE_NAME,
  4152. CS40L26_TUNING_FILE_NAME_MAX_LEN);
  4153. if (cs40l26->fw_id == CS40L26_FW_ID) {
  4154. if (cl_dsp_algo_is_present(cs40l26->dsp, CS40L26_A2H_ALGO_ID))
  4155. strscpy(coeff_files[file_count++],
  4156. CS40L26_A2H_TUNING_FILE_NAME,
  4157. CS40L26_TUNING_FILE_NAME_MAX_LEN);
  4158. if (cl_dsp_algo_is_present(cs40l26->dsp, CS40L26_EP_ALGO_ID))
  4159. strscpy(coeff_files[file_count++],
  4160. CS40L26_EP_TUNING_FILE_NAME,
  4161. CS40L26_TUNING_FILE_NAME_MAX_LEN);
  4162. } else {
  4163. strscpy(coeff_files[file_count++], CS40L26_CALIB_BIN_FILE_NAME,
  4164. CS40L26_TUNING_FILE_NAME_MAX_LEN);
  4165. }
  4166. *actual_num_files = file_count;
  4167. return coeff_files;
  4168. err_free:
  4169. for (; i >= 0; i--)
  4170. kfree(coeff_files[i]);
  4171. kfree(coeff_files);
  4172. *actual_num_files = 0;
  4173. return ERR_PTR(-ENOMEM);
  4174. }
  4175. static int cs40l26_coeff_load(struct cs40l26_private *cs40l26, u32 tuning)
  4176. {
  4177. struct device *dev = cs40l26->dev;
  4178. int i, error, num_files_to_load;
  4179. const struct firmware *coeff;
  4180. char **coeff_files;
  4181. coeff_files = cs40l26_get_tuning_names(cs40l26, &num_files_to_load, tuning);
  4182. if (IS_ERR(coeff_files))
  4183. return PTR_ERR(coeff_files);
  4184. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4185. dev_info(dev, "%s\n", __func__);
  4186. #endif
  4187. for (i = 0; i < num_files_to_load; i++) {
  4188. error = request_firmware(&coeff, coeff_files[i], dev);
  4189. if (error) {
  4190. dev_warn(dev, "Continuing...\n");
  4191. continue;
  4192. }
  4193. error = cl_dsp_coeff_file_parse(cs40l26->dsp, coeff);
  4194. if (error)
  4195. dev_warn(dev, "Failed to load %s, %d. Continuing...\n", coeff_files[i],
  4196. error);
  4197. else
  4198. dev_info(dev, "%s Loaded Successfully\n", coeff_files[i]);
  4199. release_firmware(coeff);
  4200. }
  4201. kfree(coeff_files);
  4202. return 0;
  4203. }
  4204. static int cs40l26_change_fw_control_defaults(struct cs40l26_private *cs40l26)
  4205. {
  4206. int error;
  4207. error = cs40l26_pm_timeout_ms_set(cs40l26, CS40L26_DSP_STATE_STANDBY,
  4208. cs40l26->pm_stdby_timeout_ms);
  4209. if (error)
  4210. return error;
  4211. return cs40l26_pm_timeout_ms_set(cs40l26, CS40L26_DSP_STATE_ACTIVE,
  4212. cs40l26->pm_active_timeout_ms);
  4213. }
  4214. static int cs40l26_get_fw_params(struct cs40l26_private *cs40l26)
  4215. {
  4216. u32 id, min_rev, rev, branch;
  4217. int error, maj, min, patch;
  4218. error = cl_dsp_fw_rev_get(cs40l26->dsp, &rev);
  4219. if (error)
  4220. return error;
  4221. branch = CL_DSP_GET_MAJOR(rev);
  4222. maj = (int) branch;
  4223. min = (int) CL_DSP_GET_MINOR(rev);
  4224. patch = (int) CL_DSP_GET_PATCH(rev);
  4225. error = cl_dsp_fw_id_get(cs40l26->dsp, &id);
  4226. if (error)
  4227. return error;
  4228. switch (id) {
  4229. case CS40L26_FW_ID:
  4230. switch (branch) {
  4231. case CS40L26_FW_BRANCH:
  4232. min_rev = CS40L26_FW_MIN_REV;
  4233. cs40l26->vibe_state_reporting = true;
  4234. break;
  4235. case CS40L26_FW_MAINT_BRANCH:
  4236. min_rev = CS40L26_FW_MAINT_MIN_REV;
  4237. cs40l26->vibe_state_reporting = false;
  4238. break;
  4239. case CS40L26_FW_B2_BRANCH:
  4240. min_rev = CS40L26_FW_B2_MIN_REV;
  4241. cs40l26->vibe_state_reporting = true;
  4242. break;
  4243. default:
  4244. error = -EINVAL;
  4245. break;
  4246. }
  4247. break;
  4248. case CS40L26_FW_CALIB_ID:
  4249. if (branch == CS40L26_FW_CALIB_BRANCH) {
  4250. min_rev = CS40L26_FW_CALIB_MIN_REV;
  4251. cs40l26->vibe_state_reporting = true;
  4252. } else if (branch == CS40L26_FW_MAINT_CALIB_BRANCH) {
  4253. min_rev = CS40L26_FW_MAINT_CALIB_MIN_REV;
  4254. cs40l26->vibe_state_reporting = false;
  4255. } else {
  4256. error = -EINVAL;
  4257. }
  4258. break;
  4259. default:
  4260. dev_err(cs40l26->dev, "Invalid FW ID: 0x%06X\n", id);
  4261. return -EINVAL;
  4262. }
  4263. if (error) {
  4264. dev_err(cs40l26->dev, "Rev. Branch 0x%02X invalid\n", maj);
  4265. return error;
  4266. }
  4267. if (rev < min_rev) {
  4268. dev_err(cs40l26->dev, "Invalid firmware revision: %d.%d.%d\n",
  4269. maj, min, patch);
  4270. return -EINVAL;
  4271. }
  4272. cs40l26->fw_id = id;
  4273. dev_info(cs40l26->dev, "Firmware revision %d.%d.%d\n", maj, min, patch);
  4274. return 0;
  4275. }
  4276. static int cs40l26_cl_dsp_reinit(struct cs40l26_private *cs40l26)
  4277. {
  4278. int error;
  4279. if (cs40l26->dsp) {
  4280. error = cl_dsp_destroy(cs40l26->dsp);
  4281. if (error) {
  4282. dev_err(cs40l26->dev, "Failed to destroy DSP struct\n");
  4283. return error;
  4284. }
  4285. cs40l26->dsp = NULL;
  4286. }
  4287. cs40l26->dsp = cl_dsp_create(cs40l26->dev, cs40l26->regmap);
  4288. if (IS_ERR(cs40l26->dsp))
  4289. return PTR_ERR(cs40l26->dsp);
  4290. return cl_dsp_wavetable_create(cs40l26->dsp, CS40L26_VIBEGEN_ALGO_ID,
  4291. CS40L26_WT_NAME_XM, CS40L26_WT_NAME_YM, CS40L26_WT_FILE_NAME);
  4292. }
  4293. static int cs40l26_fw_upload(struct cs40l26_private *cs40l26)
  4294. {
  4295. bool svc_le_required = cs40l26->num_svc_le_vals && !cs40l26->calib_fw;
  4296. struct device *dev = cs40l26->dev;
  4297. u32 rev, branch, tuning_num = 0;
  4298. unsigned int le = 0;
  4299. const struct firmware *fw;
  4300. int error;
  4301. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4302. dev_info(cs40l26->dev, "%s\n", __func__);
  4303. #endif
  4304. cs40l26->fw_loaded = false;
  4305. error = cs40l26_cl_dsp_reinit(cs40l26);
  4306. if (error)
  4307. return error;
  4308. if (cs40l26->calib_fw)
  4309. error = request_firmware(&fw, CS40L26_FW_CALIB_NAME, dev);
  4310. else
  4311. error = request_firmware(&fw, CS40L26_FW_FILE_NAME, dev);
  4312. if (error) {
  4313. release_firmware(fw);
  4314. return error;
  4315. }
  4316. if (!cs40l26->fw_rom_only) {
  4317. error = cs40l26_dsp_pre_config(cs40l26);
  4318. if (error) {
  4319. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4320. release_firmware(fw);
  4321. #endif
  4322. return error;
  4323. }
  4324. }
  4325. error = cl_dsp_firmware_parse(cs40l26->dsp, fw, !cs40l26->fw_rom_only);
  4326. release_firmware(fw);
  4327. if (error)
  4328. return error;
  4329. error = cs40l26_change_fw_control_defaults(cs40l26);
  4330. if (error)
  4331. return error;
  4332. error = cs40l26_get_fw_params(cs40l26);
  4333. if (error)
  4334. return error;
  4335. if (svc_le_required) {
  4336. error = cl_dsp_fw_rev_get(cs40l26->dsp, &rev);
  4337. if (error)
  4338. return error;
  4339. branch = CL_DSP_GET_MAJOR(rev);
  4340. switch (branch) {
  4341. case CS40L26_FW_MAINT_BRANCH:
  4342. error = cs40l26_dsp_config(cs40l26);
  4343. if (error)
  4344. return error;
  4345. error = cs40l26_pm_enter(dev);
  4346. if (error)
  4347. return error;
  4348. error = cs40l26_svc_le_estimate(cs40l26, &le);
  4349. if (error)
  4350. dev_warn(dev, "svc_le_est failed: %d", error);
  4351. cs40l26_pm_exit(dev);
  4352. cs40l26_pm_runtime_teardown(cs40l26);
  4353. error = cs40l26_dsp_pre_config(cs40l26);
  4354. if (error)
  4355. return error;
  4356. break;
  4357. case CS40L26_FW_BRANCH:
  4358. le = cs40l26->svc_le_est_stored;
  4359. break;
  4360. default:
  4361. dev_err(dev, "Invalid firmware branch, %d", branch);
  4362. return -EINVAL;
  4363. }
  4364. cs40l26_tuning_select_from_svc_le(cs40l26, le, &tuning_num);
  4365. }
  4366. error = cs40l26_coeff_load(cs40l26, tuning_num);
  4367. if (error)
  4368. return error;
  4369. return cs40l26_dsp_config(cs40l26);
  4370. }
  4371. static int cs40l26_request_irq(struct cs40l26_private *cs40l26)
  4372. {
  4373. int error, irq, i;
  4374. cs40l26_regmap_irq_chip.irq_drv_data = cs40l26;
  4375. error = devm_regmap_add_irq_chip(cs40l26->dev, cs40l26->regmap,
  4376. cs40l26->irq, IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_LOW,
  4377. -1, &cs40l26_regmap_irq_chip, &cs40l26->irq_data);
  4378. if (error < 0) {
  4379. dev_err(cs40l26->dev, "Failed to request threaded IRQ: %d\n", error);
  4380. return error;
  4381. }
  4382. cs40l26_regmap_irq_chip.irq_drv_data = cs40l26;
  4383. for (i = 0; i < ARRAY_SIZE(cs40l26_irqs); i++) {
  4384. irq = regmap_irq_get_virq(cs40l26->irq_data, cs40l26_irqs[i].irq);
  4385. if (irq < 0) {
  4386. dev_err(cs40l26->dev, "Failed to get %s\n", cs40l26_irqs[i].name);
  4387. return irq;
  4388. }
  4389. error = devm_request_threaded_irq(cs40l26->dev, irq, NULL, cs40l26_irqs[i].handler,
  4390. IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_LOW,
  4391. cs40l26_irqs[i].name, cs40l26);
  4392. if (error) {
  4393. dev_err(cs40l26->dev, "Failed to request IRQ %s: %d\n",
  4394. cs40l26_irqs[i].name, error);
  4395. return error;
  4396. }
  4397. }
  4398. return error;
  4399. }
  4400. int cs40l26_fw_swap(struct cs40l26_private *cs40l26, const u32 id)
  4401. {
  4402. struct device *dev = cs40l26->dev;
  4403. bool re_enable = false;
  4404. int error;
  4405. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4406. dev_info(dev, "%s: fw_id:%d, id%d\n", __func__, cs40l26->fw_id, id);
  4407. #endif
  4408. if (cs40l26->fw_loaded) {
  4409. disable_irq(cs40l26->irq);
  4410. cs40l26_pm_runtime_teardown(cs40l26);
  4411. re_enable = true;
  4412. }
  4413. switch (cs40l26->revid) {
  4414. case CS40L26_REVID_A1:
  4415. case CS40L26_REVID_B0:
  4416. case CS40L26_REVID_B1:
  4417. break;
  4418. default:
  4419. dev_err(dev, "pseq unrecognized revid: %d\n", cs40l26->revid);
  4420. return -EINVAL;
  4421. }
  4422. /* reset pseq END_OF_SCRIPT to location from ROM */
  4423. error = cs40l26_dsp_write(cs40l26, cs40l26->rom_regs->rom_pseq_end_of_script,
  4424. CS40L26_PSEQ_OP_END << CS40L26_PSEQ_OP_SHIFT);
  4425. if (error) {
  4426. dev_err(dev, "Failed to reset pseq END_OF_SCRIPT %d\n", error);
  4427. return error;
  4428. }
  4429. if (id == CS40L26_FW_CALIB_ID)
  4430. cs40l26->calib_fw = true;
  4431. else
  4432. cs40l26->calib_fw = false;
  4433. error = cs40l26_fw_upload(cs40l26);
  4434. if (error)
  4435. return error;
  4436. if (cs40l26->fw_defer && cs40l26->fw_loaded) {
  4437. error = cs40l26_request_irq(cs40l26);
  4438. if (error)
  4439. return error;
  4440. cs40l26->fw_defer = false;
  4441. }
  4442. if (re_enable)
  4443. enable_irq(cs40l26->irq);
  4444. return error;
  4445. }
  4446. EXPORT_SYMBOL_GPL(cs40l26_fw_swap);
  4447. static int cs40l26_handle_svc_le_nodes(struct cs40l26_private *cs40l26)
  4448. {
  4449. int i, error, init_count, node_count = 0;
  4450. struct device *dev = cs40l26->dev;
  4451. unsigned int min, max, index;
  4452. struct fwnode_handle *child;
  4453. const char *node_name;
  4454. u32 gain_adjust_raw;
  4455. s32 gain_adjust;
  4456. init_count = device_get_child_node_count(dev);
  4457. if (!init_count)
  4458. return 0;
  4459. cs40l26->svc_le_vals = devm_kcalloc(dev, init_count, sizeof(struct cs40l26_svc_le *),
  4460. GFP_KERNEL);
  4461. if (!cs40l26->svc_le_vals)
  4462. return -ENOMEM;
  4463. device_for_each_child_node(dev, child) {
  4464. node_name = fwnode_get_name(child);
  4465. if (strncmp(node_name, CS40L26_SVC_DT_PREFIX, 6))
  4466. continue;
  4467. if (fwnode_property_read_u32(child, "cirrus,min", &min)) {
  4468. dev_err(dev, "No minimum value for SVC LE node\n");
  4469. continue;
  4470. }
  4471. if (fwnode_property_read_u32(child, "cirrus,max", &max)) {
  4472. dev_err(dev, "No maximum value for SVC LE node\n");
  4473. continue;
  4474. }
  4475. if (max <= min) {
  4476. dev_err(dev, "Max <= Min, SVC LE node malformed\n");
  4477. continue;
  4478. }
  4479. if (fwnode_property_read_u32(child, "cirrus,gain-adjust", &gain_adjust_raw))
  4480. gain_adjust = 0;
  4481. else
  4482. gain_adjust = (s32) gain_adjust_raw;
  4483. if (fwnode_property_read_u32(child, "cirrus,index", &index)) {
  4484. dev_err(dev, "No index specified for SVC LE node\n");
  4485. continue;
  4486. }
  4487. for (i = 0; i < node_count; i++) {
  4488. if (index == cs40l26->svc_le_vals[i]->n)
  4489. break;
  4490. }
  4491. if (i < node_count) {
  4492. dev_err(dev, "SVC LE nodes must have unique index\n");
  4493. return -EINVAL;
  4494. }
  4495. cs40l26->svc_le_vals[node_count] = devm_kzalloc(dev, sizeof(struct cs40l26_svc_le),
  4496. GFP_KERNEL);
  4497. if (!cs40l26->svc_le_vals[node_count]) {
  4498. error = -ENOMEM;
  4499. goto err;
  4500. }
  4501. cs40l26->svc_le_vals[node_count]->min = min;
  4502. cs40l26->svc_le_vals[node_count]->max = max;
  4503. cs40l26->svc_le_vals[node_count]->gain_adjust = gain_adjust;
  4504. cs40l26->svc_le_vals[node_count]->n = index;
  4505. node_count++;
  4506. }
  4507. if (node_count != init_count)
  4508. dev_warn(dev, "%d platform nodes unused for SVC LE\n", init_count - node_count);
  4509. return node_count;
  4510. err:
  4511. devm_kfree(dev, cs40l26->svc_le_vals);
  4512. return error;
  4513. }
  4514. static int cs40l26_no_wait_ram_indices_get(struct cs40l26_private *cs40l26)
  4515. {
  4516. int i, num, error;
  4517. num = device_property_count_u32(cs40l26->dev, "cirrus,no-wait-ram-indices");
  4518. if (num <= 0)
  4519. return 0;
  4520. cs40l26->no_wait_ram_indices = devm_kcalloc(cs40l26->dev, num, sizeof(u32), GFP_KERNEL);
  4521. if (!cs40l26->no_wait_ram_indices)
  4522. return -ENOMEM;
  4523. error = device_property_read_u32_array(cs40l26->dev, "cirrus,no-wait-ram-indices",
  4524. cs40l26->no_wait_ram_indices, num);
  4525. if (error)
  4526. goto err_free;
  4527. for (i = 0; i < num; i++)
  4528. cs40l26->no_wait_ram_indices[i] += CS40L26_RAM_INDEX_START;
  4529. cs40l26->num_no_wait_ram_indices = num;
  4530. return 0;
  4531. err_free:
  4532. devm_kfree(cs40l26->dev, cs40l26->no_wait_ram_indices);
  4533. cs40l26->num_no_wait_ram_indices = 0;
  4534. return error;
  4535. }
  4536. static void cs40l26_hibernate_timer_callback(struct timer_list *t)
  4537. {
  4538. struct cs40l26_private *cs40l26 = from_timer(cs40l26, t, hibernate_timer);
  4539. dev_dbg(cs40l26->dev, "Time since ALLOW_HIBERNATE exceeded HE_TIME max");
  4540. }
  4541. static inline bool cs40l26_brwnout_is_valid(enum cs40l26_brwnout_type type, u32 val)
  4542. {
  4543. if (type >= CS40L26_NUM_BRWNOUT_TYPES)
  4544. return false;
  4545. return (val <= cs40l26_brwnout_params[type].max) &&
  4546. (val >= cs40l26_brwnout_params[type].min);
  4547. }
  4548. static void cs40l26_parse_brwnout_properties(struct cs40l26_private *cs40l26)
  4549. {
  4550. struct device *dev = cs40l26->dev;
  4551. int error;
  4552. if (device_property_present(dev, "cirrus,vbbr-enable")) {
  4553. cs40l26->vbbr.enable = true;
  4554. error = device_property_read_u32(dev, "cirrus,vbbr-thld-uv",
  4555. &cs40l26->vbbr.thld_uv);
  4556. if (error || !cs40l26_brwnout_is_valid(CS40L26_VBBR_THLD, cs40l26->vbbr.thld_uv))
  4557. cs40l26->vbbr.thld_uv = CS40L26_VBBR_THLD_UV_DEFAULT;
  4558. error = device_property_read_u32(dev, "cirrus,vbbr-max-att-db",
  4559. &cs40l26->vbbr.max_att_db);
  4560. if (error || !cs40l26_brwnout_is_valid(CS40L26_VXBR_MAX_ATT,
  4561. cs40l26->vbbr.max_att_db))
  4562. cs40l26->vbbr.max_att_db = CS40L26_VXBR_MAX_ATT_DEFAULT;
  4563. error = device_property_read_u32(dev, "cirrus,vbbr-atk-step",
  4564. &cs40l26->vbbr.atk_step);
  4565. if (error || !cs40l26_brwnout_is_valid(CS40L26_VXBR_ATK_STEP,
  4566. cs40l26->vbbr.atk_step))
  4567. cs40l26->vbbr.atk_step = CS40L26_VXBR_ATK_STEP_DEFAULT;
  4568. error = device_property_read_u32(dev, "cirrus,vbbr-atk-rate",
  4569. &cs40l26->vbbr.atk_rate);
  4570. if (error || !cs40l26_brwnout_is_valid(CS40L26_VXBR_ATK_RATE,
  4571. cs40l26->vbbr.atk_rate))
  4572. cs40l26->vbbr.atk_rate = CS40L26_VXBR_ATK_RATE_DEFAULT;
  4573. error = device_property_read_u32(dev, "cirrus,vbbr-wait", &cs40l26->vbbr.wait);
  4574. if (error || !cs40l26_brwnout_is_valid(CS40L26_VXBR_WAIT, cs40l26->vbbr.wait))
  4575. cs40l26->vbbr.wait = CS40L26_VXBR_WAIT_DEFAULT;
  4576. error = device_property_read_u32(dev, "cirrus,vbbr-rel-rate",
  4577. &cs40l26->vbbr.rel_rate);
  4578. if (error || !cs40l26_brwnout_is_valid(CS40L26_VXBR_REL_RATE,
  4579. cs40l26->vbbr.rel_rate))
  4580. cs40l26->vbbr.rel_rate = CS40L26_VXBR_REL_RATE_DEFAULT;
  4581. }
  4582. if (device_property_present(dev, "cirrus,vpbr-enable")) {
  4583. cs40l26->vpbr.enable = true;
  4584. error = device_property_read_u32(dev, "cirrus,vpbr-thld-uv",
  4585. &cs40l26->vpbr.thld_uv);
  4586. if (error || !cs40l26_brwnout_is_valid(CS40L26_VPBR_THLD, cs40l26->vpbr.thld_uv))
  4587. cs40l26->vpbr.thld_uv = CS40L26_VPBR_THLD_UV_DEFAULT;
  4588. error = device_property_read_u32(dev, "cirrus,vpbr-max-att-db",
  4589. &cs40l26->vpbr.max_att_db);
  4590. if (error || !cs40l26_brwnout_is_valid(CS40L26_VXBR_MAX_ATT,
  4591. cs40l26->vpbr.max_att_db))
  4592. cs40l26->vpbr.max_att_db = CS40L26_VXBR_MAX_ATT_DEFAULT;
  4593. error = device_property_read_u32(dev, "cirrus,vpbr-atk-step",
  4594. &cs40l26->vpbr.atk_step);
  4595. if (error || !cs40l26_brwnout_is_valid(CS40L26_VXBR_ATK_STEP,
  4596. cs40l26->vpbr.atk_step))
  4597. cs40l26->vpbr.atk_step = CS40L26_VXBR_ATK_STEP_DEFAULT;
  4598. error = device_property_read_u32(dev, "cirrus,vpbr-atk-rate",
  4599. &cs40l26->vpbr.atk_rate);
  4600. if (error || !cs40l26_brwnout_is_valid(CS40L26_VXBR_ATK_RATE,
  4601. cs40l26->vpbr.atk_rate))
  4602. cs40l26->vpbr.atk_rate = CS40L26_VXBR_ATK_RATE_DEFAULT;
  4603. error = device_property_read_u32(dev, "cirrus,vpbr-wait", &cs40l26->vpbr.wait);
  4604. if (error || !cs40l26_brwnout_is_valid(CS40L26_VXBR_WAIT, cs40l26->vpbr.wait))
  4605. cs40l26->vpbr.wait = CS40L26_VXBR_WAIT_DEFAULT;
  4606. error = device_property_read_u32(dev, "cirrus,vpbr-rel-rate",
  4607. &cs40l26->vpbr.rel_rate);
  4608. if (error || !cs40l26_brwnout_is_valid(CS40L26_VXBR_REL_RATE,
  4609. cs40l26->vpbr.rel_rate))
  4610. cs40l26->vpbr.rel_rate = CS40L26_VXBR_REL_RATE_DEFAULT;
  4611. }
  4612. }
  4613. static int cs40l26_parse_properties(struct cs40l26_private *cs40l26)
  4614. {
  4615. struct device *dev = cs40l26->dev;
  4616. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4617. struct device_node *np = dev->of_node;
  4618. #endif
  4619. int error;
  4620. cs40l26->fw_defer = device_property_present(dev, "cirrus,fw-defer");
  4621. cs40l26->fw_rom_only = device_property_present(dev, "cirrus,fw-rom-only");
  4622. cs40l26->calib_fw = device_property_present(dev, "cirrus,calib-fw");
  4623. cs40l26->expl_mode_enabled = !device_property_present(dev, "cirrus,bst-expl-mode-disable");
  4624. cs40l26_parse_brwnout_properties(cs40l26);
  4625. cs40l26->bst_dcm_en = device_property_present(dev, "cirrus,bst-dcm-en");
  4626. cs40l26->ng_enable = device_property_present(dev, "cirrus,ng-enable");
  4627. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4628. cs40l26->irq_gpio = of_get_named_gpio(np, "irq-gpio", 0);
  4629. pr_info("%s: irq-gpio: %u\n", __func__, cs40l26->irq_gpio);
  4630. #endif
  4631. error = device_property_read_u32(dev, "cirrus,bst-ipk-microamp", &cs40l26->bst_ipk);
  4632. if (error)
  4633. cs40l26->bst_ipk = CS40L26_BST_IPK_UA_DEFAULT;
  4634. error = device_property_read_u32(dev, "cirrus,bst-ctl-microvolt", &cs40l26->bst_ctl);
  4635. if (error)
  4636. cs40l26->bst_ctl = CS40L26_BST_UV_MAX;
  4637. error = device_property_read_u32(dev, "cirrus,clip-lvl-microvolt", &cs40l26->clip_lvl);
  4638. if (error)
  4639. cs40l26->clip_lvl = CS40L26_CLIP_LVL_UV_MAX;
  4640. error = device_property_read_u32(dev, "cirrus,pm-stdby-timeout-ms",
  4641. &cs40l26->pm_stdby_timeout_ms);
  4642. if (error)
  4643. cs40l26->pm_stdby_timeout_ms = CS40L26_PM_STDBY_TIMEOUT_MS_MIN;
  4644. error = device_property_read_u32(dev, "cirrus,pm-active-timeout-ms",
  4645. &cs40l26->pm_active_timeout_ms);
  4646. if (error)
  4647. cs40l26->pm_active_timeout_ms = CS40L26_PM_ACTIVE_TIMEOUT_MS_DEFAULT;
  4648. error = cs40l26_handle_svc_le_nodes(cs40l26);
  4649. if (error < 0)
  4650. cs40l26->num_svc_le_vals = 0;
  4651. else
  4652. cs40l26->num_svc_le_vals = error;
  4653. #if IS_ENABLED(CONFIG_SEC_FACTORY)
  4654. dev_info(dev, "%s - asp_scale_pct is always 100 %%\n", __func__);
  4655. cs40l26->asp_scale_pct = CS40L26_GAIN_FULL_SCALE;
  4656. #else
  4657. error = device_property_read_u32(dev, "cirrus,asp-gain-scale-pct", &cs40l26->asp_scale_pct);
  4658. if (error)
  4659. cs40l26->asp_scale_pct = CS40L26_GAIN_FULL_SCALE;
  4660. #endif
  4661. cs40l26->gain_pct = CS40L26_GAIN_FULL_SCALE;
  4662. cs40l26->gain_tmp = CS40L26_GAIN_FULL_SCALE;
  4663. error = device_property_read_u32(dev, "cirrus,ng-thld", &cs40l26->ng_thld);
  4664. if (error)
  4665. cs40l26->ng_thld = CS40L26_NG_THRESHOLD_DEFAULT;
  4666. error = device_property_read_u32(dev, "cirrus,ng-delay", &cs40l26->ng_delay);
  4667. if (error)
  4668. cs40l26->ng_delay = CS40L26_NG_DELAY_DEFAULT;
  4669. cs40l26->aux_ng_enable = device_property_present(dev, "cirrus,aux-ng-enable");
  4670. error = device_property_read_u32(dev, "cirrus,aux-ng-thld", &cs40l26->aux_ng_thld);
  4671. if (error)
  4672. cs40l26->aux_ng_thld = CS40L26_AUX_NG_THLD_DEFAULT;
  4673. error = device_property_read_u32(dev, "cirrus,aux-ng-delay", &cs40l26->aux_ng_delay);
  4674. if (error)
  4675. cs40l26->aux_ng_delay = CS40L26_AUX_NG_HOLD_DEFAULT;
  4676. error = device_property_read_u32(dev, "cirrus,f0-default", &cs40l26->f0_default);
  4677. if (error && error != -EINVAL)
  4678. return error;
  4679. error = device_property_read_u32(dev, "cirrus,redc-default", &cs40l26->redc_default);
  4680. if (error && error != -EINVAL)
  4681. return error;
  4682. error = device_property_read_u32(dev, "cirrus,q-default", &cs40l26->q_default);
  4683. if (error && error != -EINVAL)
  4684. return error;
  4685. cs40l26->dbc_enable_default = device_property_present(dev, "cirrus,dbc-enable");
  4686. error = device_property_read_u32(dev, "cirrus,dbc-env-rel-coef",
  4687. &cs40l26->dbc_defaults[CS40L26_DBC_ENV_REL_COEF]);
  4688. if (error)
  4689. cs40l26->dbc_defaults[CS40L26_DBC_ENV_REL_COEF] = CS40L26_DBC_USE_DEFAULT;
  4690. error = device_property_read_u32(dev, "cirrus,dbc-fall-headroom",
  4691. &cs40l26->dbc_defaults[CS40L26_DBC_FALL_HEADROOM]);
  4692. if (error)
  4693. cs40l26->dbc_defaults[CS40L26_DBC_FALL_HEADROOM] = CS40L26_DBC_USE_DEFAULT;
  4694. error = device_property_read_u32(dev, "cirrus,dbc-rise-headroom",
  4695. &cs40l26->dbc_defaults[CS40L26_DBC_RISE_HEADROOM]);
  4696. if (error)
  4697. cs40l26->dbc_defaults[CS40L26_DBC_RISE_HEADROOM] = CS40L26_DBC_USE_DEFAULT;
  4698. error = device_property_read_u32(dev, "cirrus,dbc-tx-lvl-hold-off-ms",
  4699. &cs40l26->dbc_defaults[CS40L26_DBC_TX_LVL_HOLD_OFF_MS]);
  4700. if (error)
  4701. cs40l26->dbc_defaults[CS40L26_DBC_TX_LVL_HOLD_OFF_MS] = CS40L26_DBC_USE_DEFAULT;
  4702. error = device_property_read_u32(dev, "cirrus,dbc-tx-lvl-thresh-fs",
  4703. &cs40l26->dbc_defaults[CS40L26_DBC_TX_LVL_THRESH_FS]);
  4704. if (error)
  4705. cs40l26->dbc_defaults[CS40L26_DBC_TX_LVL_THRESH_FS] = CS40L26_DBC_USE_DEFAULT;
  4706. cs40l26->pwle_zero_cross = device_property_present(dev, "cirrus,pwle-zero-cross-en");
  4707. cs40l26->press_idx = gpio_map_get(dev, CS40L26_GPIO_MAP_A_PRESS);
  4708. cs40l26->release_idx = gpio_map_get(dev, CS40L26_GPIO_MAP_A_RELEASE);
  4709. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4710. cs40l26->pdata.is_f0_tracking = device_property_present(dev, "samsung,f0-tracking");
  4711. cs40l26->pdata.is_mv_support = device_property_present(dev, "samsung,mv_support");
  4712. error = device_property_read_u32(dev, "samsung,f0-tracking-offset", &cs40l26->pdata.f0_offset);
  4713. if (error)
  4714. cs40l26->pdata.f0_offset = 0;
  4715. else
  4716. cs40l26->pdata.f0_offset *= CS40L26_SAMSUNG_F0_OFFSET;
  4717. dev_info(dev, "%s - f0 tracking is %s, f0 offset is 0x%x\n", __func__,
  4718. cs40l26->pdata.is_f0_tracking ? "set" : "unset", cs40l26->pdata.f0_offset);
  4719. error = device_property_read_string(dev, "samsung,owt-lib-compat-version",
  4720. (const char **) &cs40l26->pdata.owt_lib_compat_version);
  4721. if (error)
  4722. cs40l26->pdata.owt_lib_compat_version = "0.0.0";
  4723. dev_info(dev, "%s - owt lib compat version is %s\n", __func__, cs40l26->pdata.owt_lib_compat_version);
  4724. error = device_property_read_string(dev, "samsung,ap_chipset", (const char **) &cs40l26->pdata.ap_chipset);
  4725. if (error)
  4726. cs40l26->pdata.ap_chipset = "qcom";
  4727. dev_info(dev, "%s - AP Chipset is %s\n", __func__, cs40l26->pdata.ap_chipset);
  4728. #endif
  4729. return cs40l26_no_wait_ram_indices_get(cs40l26);
  4730. }
  4731. int cs40l26_probe(struct cs40l26_private *cs40l26)
  4732. {
  4733. struct device *dev = cs40l26->dev;
  4734. int error;
  4735. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4736. dev_info(dev, "%s - start\n", __func__);
  4737. #endif
  4738. mutex_init(&cs40l26->lock);
  4739. cs40l26->vibe_workqueue = alloc_ordered_workqueue("vibe_workqueue", WQ_HIGHPRI);
  4740. if (!cs40l26->vibe_workqueue) {
  4741. error = -ENOMEM;
  4742. goto err;
  4743. }
  4744. INIT_WORK(&cs40l26->vibe_start_work, cs40l26_vibe_start_worker);
  4745. INIT_WORK(&cs40l26->vibe_stop_work, cs40l26_vibe_stop_worker);
  4746. INIT_WORK(&cs40l26->set_gain_work, cs40l26_set_gain_worker);
  4747. INIT_WORK(&cs40l26->upload_work, cs40l26_upload_worker);
  4748. INIT_WORK(&cs40l26->erase_work, cs40l26_erase_worker);
  4749. timer_setup(&cs40l26->hibernate_timer, cs40l26_hibernate_timer_callback, 0);
  4750. error = devm_regulator_bulk_get(dev, CS40L26_NUM_SUPPLIES, cs40l26_supplies);
  4751. if (error) {
  4752. dev_err(dev, "Failed to request core supplies: %d\n", error);
  4753. goto err;
  4754. }
  4755. error = cs40l26_parse_properties(cs40l26);
  4756. if (error)
  4757. goto err;
  4758. error = regulator_bulk_enable(CS40L26_NUM_SUPPLIES, cs40l26_supplies);
  4759. if (error) {
  4760. dev_err(dev, "Failed to enable core supplies\n");
  4761. goto err;
  4762. }
  4763. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4764. cs40l26->delay_before_stop_playback_us = DELAY_BEFORE_STOP_PLAYBACK_US;
  4765. pr_info("%s delay_before_stop_playback_us: %d\n", __func__, DELAY_BEFORE_STOP_PLAYBACK_US);
  4766. #endif
  4767. cs40l26->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
  4768. if (IS_ERR(cs40l26->reset_gpio)) {
  4769. dev_err(dev, "Failed to get reset GPIO\n");
  4770. error = PTR_ERR(cs40l26->reset_gpio);
  4771. cs40l26->reset_gpio = NULL;
  4772. goto err;
  4773. }
  4774. usleep_range(CS40L26_MIN_RESET_PULSE_WIDTH, CS40L26_MIN_RESET_PULSE_WIDTH + 100);
  4775. gpiod_set_value_cansleep(cs40l26->reset_gpio, 0);
  4776. usleep_range(CS40L26_CONTROL_PORT_READY_DELAY, CS40L26_CONTROL_PORT_READY_DELAY + 100);
  4777. /*
  4778. * The DSP may lock up if a haptic effect is triggered via
  4779. * GPI event or control port and the PLL is set to closed-loop.
  4780. *
  4781. * Set PLL to open-loop and remove any default GPI mappings
  4782. * to prevent this while the driver is loading and configuring RAM
  4783. * firmware.
  4784. */
  4785. error = cs40l26_set_pll_loop(cs40l26, CS40L26_PLL_REFCLK_SET_OPEN_LOOP);
  4786. if (error)
  4787. goto err;
  4788. error = cs40l26_part_num_resolve(cs40l26);
  4789. if (error)
  4790. goto err;
  4791. error = cs40l26_erase_gpi_mapping(cs40l26, CS40L26_GPIO_MAP_A_PRESS);
  4792. if (error)
  4793. goto err;
  4794. error = cs40l26_erase_gpi_mapping(cs40l26, CS40L26_GPIO_MAP_A_RELEASE);
  4795. if (error)
  4796. goto err;
  4797. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4798. cs40l26->irq = gpio_to_irq(cs40l26->irq_gpio);
  4799. pr_info("%s irq number: %d\n", __func__, cs40l26->irq);
  4800. #endif
  4801. /* Set LRA to high-z to avoid fault conditions */
  4802. error = regmap_update_bits(cs40l26->regmap, CS40L26_TST_DAC_MSM_CONFIG,
  4803. CS40L26_SPK_DEFAULT_HIZ_MASK, 1 << CS40L26_SPK_DEFAULT_HIZ_SHIFT);
  4804. if (error) {
  4805. dev_err(dev, "Failed to set LRA to HI-Z\n");
  4806. goto err;
  4807. }
  4808. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4809. cs40l26->busy_state = 0;
  4810. #endif
  4811. init_completion(&cs40l26->i2s_cont);
  4812. init_completion(&cs40l26->erase_cont);
  4813. init_completion(&cs40l26->cal_f0_cont);
  4814. init_completion(&cs40l26->cal_redc_cont);
  4815. init_completion(&cs40l26->cal_dvl_peq_cont);
  4816. init_completion(&cs40l26->cal_ls_cont);
  4817. if (!cs40l26->fw_defer) {
  4818. error = cs40l26_fw_upload(cs40l26);
  4819. if (error)
  4820. goto err;
  4821. error = cs40l26_request_irq(cs40l26);
  4822. if (error)
  4823. goto err;
  4824. }
  4825. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4826. samsung_input_data_init(cs40l26);
  4827. error = sec_vib_inputff_register(&cs40l26->sec_vib_ddata);
  4828. #else
  4829. error = cs40l26_input_init(cs40l26);
  4830. #endif
  4831. if (error)
  4832. goto err;
  4833. INIT_LIST_HEAD(&cs40l26->effect_head);
  4834. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4835. cs40l26->input = cs40l26->sec_vib_ddata.input;
  4836. error = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, cs40l26_devs,
  4837. CS40L26_NUM_MFD_DEVS, NULL, 0, NULL);
  4838. #else
  4839. error = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, cs40l26_devs,
  4840. CS40L26_NUM_MFD_DEVS, NULL, 0, NULL);
  4841. #endif
  4842. if (error) {
  4843. dev_err(dev, "Failed to register codec component\n");
  4844. goto err;
  4845. }
  4846. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4847. dev_info(dev, "%s - end\n", __func__);
  4848. #endif
  4849. return 0;
  4850. err:
  4851. cs40l26_remove(cs40l26);
  4852. return error;
  4853. }
  4854. EXPORT_SYMBOL_GPL(cs40l26_probe);
  4855. int cs40l26_remove(struct cs40l26_private *cs40l26)
  4856. {
  4857. struct regulator *vp_consumer = cs40l26_supplies[CS40L26_VP_SUPPLY].consumer;
  4858. struct regulator *va_consumer = cs40l26_supplies[CS40L26_VA_SUPPLY].consumer;
  4859. disable_irq(cs40l26->irq);
  4860. mutex_destroy(&cs40l26->lock);
  4861. cs40l26_pm_runtime_teardown(cs40l26);
  4862. if (cs40l26->vibe_workqueue) {
  4863. cancel_work_sync(&cs40l26->vibe_start_work);
  4864. cancel_work_sync(&cs40l26->vibe_stop_work);
  4865. cancel_work_sync(&cs40l26->set_gain_work);
  4866. cancel_work_sync(&cs40l26->upload_work);
  4867. cancel_work_sync(&cs40l26->erase_work);
  4868. destroy_workqueue(cs40l26->vibe_workqueue);
  4869. }
  4870. if (vp_consumer)
  4871. regulator_disable(vp_consumer);
  4872. if (va_consumer)
  4873. regulator_disable(va_consumer);
  4874. gpiod_set_value_cansleep(cs40l26->reset_gpio, 1);
  4875. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4876. #ifdef CONFIG_DEBUG_FS
  4877. cs40l26_debugfs_cleanup(cs40l26);
  4878. #endif /* CONFIG_DEBUG_FS */
  4879. sec_vib_inputff_unregister(&cs40l26->sec_vib_ddata);
  4880. #else
  4881. if (cs40l26->vibe_init_success) {
  4882. sysfs_remove_group(&cs40l26->input->dev.kobj, &cs40l26_dev_attr_group);
  4883. sysfs_remove_group(&cs40l26->input->dev.kobj, &cs40l26_dev_attr_cal_group);
  4884. sysfs_remove_group(&cs40l26->input->dev.kobj, &cs40l26_dev_attr_dbc_group);
  4885. }
  4886. #ifdef CONFIG_DEBUG_FS
  4887. cs40l26_debugfs_cleanup(cs40l26);
  4888. #endif
  4889. if (cs40l26->input)
  4890. input_unregister_device(cs40l26->input);
  4891. #endif
  4892. return 0;
  4893. }
  4894. EXPORT_SYMBOL_GPL(cs40l26_remove);
  4895. int cs40l26_pm_enter(struct device *dev)
  4896. {
  4897. int error;
  4898. error = pm_runtime_get_sync(dev);
  4899. if (error < 0) {
  4900. cs40l26_resume_error_handle(dev, error);
  4901. return error;
  4902. }
  4903. return 0;
  4904. }
  4905. EXPORT_SYMBOL_GPL(cs40l26_pm_enter);
  4906. void cs40l26_pm_exit(struct device *dev)
  4907. {
  4908. pm_runtime_mark_last_busy(dev);
  4909. pm_runtime_put_autosuspend(dev);
  4910. }
  4911. EXPORT_SYMBOL_GPL(cs40l26_pm_exit);
  4912. int cs40l26_suspend(struct device *dev)
  4913. {
  4914. struct cs40l26_private *cs40l26 = dev_get_drvdata(dev);
  4915. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4916. dev_info(cs40l26->dev, "%s: Enabling hibernation\n", __func__);
  4917. #else
  4918. dev_dbg(cs40l26->dev, "%s: Enabling hibernation\n", __func__);
  4919. #endif
  4920. return cs40l26_pm_state_transition(cs40l26, CS40L26_PM_STATE_ALLOW_HIBERNATE);
  4921. }
  4922. EXPORT_SYMBOL_GPL(cs40l26_suspend);
  4923. int cs40l26_sys_suspend(struct device *dev)
  4924. {
  4925. struct cs40l26_private *cs40l26 = dev_get_drvdata(dev);
  4926. struct i2c_client *i2c_client = to_i2c_client(dev);
  4927. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4928. dev_info(cs40l26->dev, "System suspend, disabling IRQ\n");
  4929. #else
  4930. dev_dbg(cs40l26->dev, "System suspend, disabling IRQ\n");
  4931. #endif
  4932. disable_irq(i2c_client->irq);
  4933. return 0;
  4934. }
  4935. EXPORT_SYMBOL_GPL(cs40l26_sys_suspend);
  4936. int cs40l26_sys_suspend_noirq(struct device *dev)
  4937. {
  4938. struct cs40l26_private *cs40l26 = dev_get_drvdata(dev);
  4939. struct i2c_client *i2c_client = to_i2c_client(dev);
  4940. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4941. dev_info(cs40l26->dev, "Late system suspend, re-enabling IRQ\n");
  4942. #else
  4943. dev_dbg(cs40l26->dev, "Late system suspend, re-enabling IRQ\n");
  4944. #endif
  4945. enable_irq(i2c_client->irq);
  4946. return 0;
  4947. }
  4948. EXPORT_SYMBOL_GPL(cs40l26_sys_suspend_noirq);
  4949. void cs40l26_resume_error_handle(struct device *dev, int error)
  4950. {
  4951. dev_alert(dev, "PM Runtime Resume failed: %d\n", error);
  4952. pm_runtime_set_active(dev);
  4953. cs40l26_pm_exit(dev);
  4954. }
  4955. EXPORT_SYMBOL_GPL(cs40l26_resume_error_handle);
  4956. int cs40l26_resume(struct device *dev)
  4957. {
  4958. struct cs40l26_private *cs40l26 = dev_get_drvdata(dev);
  4959. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4960. dev_info(cs40l26->dev, "%s: Disabling hibernation\n", __func__);
  4961. #else
  4962. dev_dbg(cs40l26->dev, "%s: Disabling hibernation\n", __func__);
  4963. #endif
  4964. return cs40l26_pm_state_transition(cs40l26, CS40L26_PM_STATE_PREVENT_HIBERNATE);
  4965. }
  4966. EXPORT_SYMBOL_GPL(cs40l26_resume);
  4967. int cs40l26_sys_resume(struct device *dev)
  4968. {
  4969. struct cs40l26_private *cs40l26 = dev_get_drvdata(dev);
  4970. struct i2c_client *i2c_client = to_i2c_client(dev);
  4971. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4972. dev_info(cs40l26->dev, "System resume, re-enabling IRQ\n");
  4973. #else
  4974. dev_dbg(cs40l26->dev, "System resume, re-enabling IRQ\n");
  4975. #endif
  4976. enable_irq(i2c_client->irq);
  4977. return 0;
  4978. }
  4979. EXPORT_SYMBOL_GPL(cs40l26_sys_resume);
  4980. int cs40l26_sys_resume_noirq(struct device *dev)
  4981. {
  4982. struct cs40l26_private *cs40l26 = dev_get_drvdata(dev);
  4983. struct i2c_client *i2c_client = to_i2c_client(dev);
  4984. #ifdef CONFIG_CS40L26_SAMSUNG_FEATURE
  4985. dev_info(cs40l26->dev, "Early system resume, disabling IRQ\n");
  4986. #else
  4987. dev_dbg(cs40l26->dev, "Early system resume, disabling IRQ\n");
  4988. #endif
  4989. disable_irq(i2c_client->irq);
  4990. return 0;
  4991. }
  4992. EXPORT_SYMBOL_GPL(cs40l26_sys_resume_noirq);
  4993. const struct dev_pm_ops cs40l26_pm_ops = {
  4994. SET_RUNTIME_PM_OPS(cs40l26_suspend, cs40l26_resume, NULL)
  4995. SET_SYSTEM_SLEEP_PM_OPS(cs40l26_sys_suspend, cs40l26_sys_resume)
  4996. SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(cs40l26_sys_suspend_noirq, cs40l26_sys_resume_noirq)
  4997. };
  4998. EXPORT_SYMBOL_GPL(cs40l26_pm_ops);
  4999. MODULE_DESCRIPTION("CS40L26 Boosted Mono Class D Amplifier for Haptics");
  5000. MODULE_AUTHOR("Fred Treven, Cirrus Logic Inc. <[email protected]>");
  5001. MODULE_LICENSE("GPL");