cam_soc_util.c 114 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/of.h>
  7. #include <linux/clk.h>
  8. #include <linux/slab.h>
  9. #include <linux/gpio.h>
  10. #include <linux/of_gpio.h>
  11. #include "cam_soc_util.h"
  12. #include "cam_debug_util.h"
  13. #include "cam_cx_ipeak.h"
  14. #include "cam_mem_mgr.h"
  15. #include "cam_presil_hw_access.h"
  16. #include "cam_compat.h"
  17. #if IS_ENABLED(CONFIG_QCOM_CRM)
  18. #include <soc/qcom/crm.h>
  19. #include <linux/clk/qcom.h>
  20. #endif
  21. #define CAM_TO_MASK(bitn) (1 << (int)(bitn))
  22. #define CAM_IS_BIT_SET(mask, bit) ((mask) & CAM_TO_MASK(bit))
  23. #define CAM_SET_BIT(mask, bit) ((mask) |= CAM_TO_MASK(bit))
  24. #define CAM_CLEAR_BIT(mask, bit) ((mask) &= ~CAM_TO_MASK(bit))
  25. #define CAM_SS_START_PRESIL 0x08c00000
  26. #define CAM_SS_START 0x0ac00000
  27. #define CAM_CLK_DIRNAME "clk"
  28. static uint skip_mmrm_set_rate;
  29. module_param(skip_mmrm_set_rate, uint, 0644);
  30. /**
  31. * struct cam_clk_wrapper_clk: This represents an entry corresponding to a
  32. * shared clock in Clk wrapper. Clients that share
  33. * the same clock are registered to this clk entry
  34. * and set rate from them is consolidated before
  35. * setting it to clk driver.
  36. *
  37. * @list: List pointer to point to next shared clk entry
  38. * @clk_id: Clk Id of this clock
  39. * @curr_clk_rate: Current clock rate set for this clock
  40. * @client_list: List of clients registered to this shared clock entry
  41. * @num_clients: Number of registered clients
  42. * @active_clients: Number of active clients
  43. * @mmrm_client: MMRM Client handle for src clock
  44. * @soc_info: soc_info of client with which mmrm handle is created.
  45. * This is used as unique identifier for a client and mmrm
  46. * callback data. When client corresponds to this soc_info is
  47. * unregistered, need to unregister mmrm handle as well.
  48. * @is_nrt_dev: Whether this clock corresponds to NRT device
  49. * @min_clk_rate: Minimum clk rate that this clock supports
  50. **/
  51. struct cam_clk_wrapper_clk {
  52. struct list_head list;
  53. uint32_t clk_id;
  54. int64_t curr_clk_rate;
  55. struct list_head client_list;
  56. uint32_t num_clients;
  57. uint32_t active_clients;
  58. void *mmrm_handle;
  59. struct cam_hw_soc_info *soc_info;
  60. bool is_nrt_dev;
  61. int64_t min_clk_rate;
  62. };
  63. /**
  64. * struct cam_clk_wrapper_client: This represents a client (device) that wants
  65. * to share the clock with some other client.
  66. *
  67. * @list: List pointer to point to next client that share the
  68. * same clock
  69. * @soc_info: soc_info of client. This is used as unique identifier
  70. * for a client
  71. * @clk: Clk handle
  72. * @curr_clk_rate: Current clock rate set for this client
  73. **/
  74. struct cam_clk_wrapper_client {
  75. struct list_head list;
  76. struct cam_hw_soc_info *soc_info;
  77. struct clk *clk;
  78. int64_t curr_clk_rate;
  79. };
  80. static char supported_clk_info[256];
  81. static DEFINE_MUTEX(wrapper_lock);
  82. static LIST_HEAD(wrapper_clk_list);
  83. #define CAM_IS_VALID_CESTA_IDX(idx) ((idx >= 0) && (idx < CAM_CESTA_MAX_CLIENTS))
  84. #define CAM_CRM_DEV_IDENTIFIER "cam_crm"
  85. const struct device *cam_cesta_crm_dev;
  86. #if IS_ENABLED(CONFIG_QCOM_CRM) && IS_ENABLED(CONFIG_SPECTRA_USE_CLK_CRM_API)
  87. static int cam_soc_util_set_hw_client_rate_through_mmrm(
  88. void *mmrm_handle, long low_val, long high_val,
  89. uint32_t num_hw_blocks, int cesta_client_idx);
  90. #endif
  91. #if IS_ENABLED(CONFIG_QCOM_CRM)
  92. static inline const struct device *cam_wrapper_crm_get_device(
  93. const char *name)
  94. {
  95. if (debug_bypass_drivers & CAM_BYPASS_CESTA) {
  96. CAM_WARN(CAM_UTIL, "Bypass crm get device");
  97. return (const struct device *)BYPASS_VALUE;
  98. }
  99. return crm_get_device(name);
  100. }
  101. static inline int cam_wrapper_crm_write_pwr_states(const struct device *dev,
  102. u32 drv_id)
  103. {
  104. if (debug_bypass_drivers & CAM_BYPASS_CESTA) {
  105. CAM_WARN(CAM_UTIL, "Bypass crm write pwr states");
  106. return 0;
  107. }
  108. return crm_write_pwr_states(cam_cesta_crm_dev, drv_id);
  109. }
  110. #endif
  111. #if IS_ENABLED(CONFIG_QCOM_CRM) && IS_ENABLED(CONFIG_SPECTRA_USE_CLK_CRM_API)
  112. static inline int cam_wrapper_qcom_clk_crm_set_rate(struct clk *clk,
  113. enum crm_drv_type client_type, u32 client_idx,
  114. u32 pwr_st, unsigned long rate)
  115. {
  116. if (debug_bypass_drivers & CAM_BYPASS_CESTA) {
  117. CAM_WARN(CAM_UTIL, "Bypass qcom clk crm set rate");
  118. return 0;
  119. }
  120. return qcom_clk_crm_set_rate(clk, client_type, client_idx, pwr_st, rate);
  121. }
  122. #endif
  123. static inline int cam_wrapper_clk_set_rate(struct clk *clk, unsigned long rate)
  124. {
  125. if (debug_bypass_drivers & CAM_BYPASS_CLKS) {
  126. CAM_WARN(CAM_UTIL, "Bypass clk set rate");
  127. return 0;
  128. }
  129. return clk_set_rate(clk, rate);
  130. }
  131. static inline long cam_wrapper_clk_round_rate(struct clk *clk, unsigned long rate)
  132. {
  133. if (debug_bypass_drivers & CAM_BYPASS_CLKS) {
  134. CAM_WARN(CAM_UTIL, "Bypass clk round rate");
  135. return rate;
  136. }
  137. return clk_round_rate(clk, rate);
  138. }
  139. inline unsigned long cam_wrapper_clk_get_rate(struct clk *clk)
  140. {
  141. if (debug_bypass_drivers & CAM_BYPASS_CLKS) {
  142. CAM_WARN(CAM_UTIL, "Bypass clk get rate");
  143. return DEFAULT_CLK_VALUE;
  144. }
  145. return clk_get_rate(clk);
  146. }
  147. static inline struct clk *cam_wrapper_clk_get(struct device *dev, const char *id)
  148. {
  149. if (debug_bypass_drivers & CAM_BYPASS_CLKS) {
  150. CAM_WARN(CAM_UTIL, "Bypass clk get");
  151. return (struct clk *)BYPASS_VALUE;
  152. }
  153. return clk_get(dev, id);
  154. }
  155. static inline void cam_wrapper_clk_put(struct clk *clk)
  156. {
  157. if (debug_bypass_drivers & CAM_BYPASS_CLKS) {
  158. CAM_WARN(CAM_UTIL, "Bypass clk put");
  159. return;
  160. }
  161. clk_put(clk);
  162. }
  163. static inline struct clk *cam_wrapper_of_clk_get_from_provider(
  164. struct of_phandle_args *clkspec)
  165. {
  166. if (debug_bypass_drivers & CAM_BYPASS_CLKS) {
  167. CAM_WARN(CAM_UTIL, "Bypass of clk get from provider");
  168. return (struct clk *)BYPASS_VALUE;
  169. }
  170. return of_clk_get_from_provider(clkspec);
  171. }
  172. static inline int cam_wrapper_clk_prepare_enable(struct clk *clk)
  173. {
  174. if (debug_bypass_drivers & CAM_BYPASS_CLKS) {
  175. CAM_WARN(CAM_UTIL, "Bypass clk prepare enable");
  176. return 0;
  177. }
  178. return clk_prepare_enable(clk);
  179. }
  180. static inline void cam_wrapper_clk_disable_unprepare(struct clk *clk)
  181. {
  182. if (debug_bypass_drivers & CAM_BYPASS_CLKS) {
  183. CAM_WARN(CAM_UTIL, "Bypass clk disable unprepare");
  184. return;
  185. }
  186. clk_disable_unprepare(clk);
  187. }
  188. static inline struct regulator *cam_wrapper_regulator_get(struct device *dev,
  189. const char *id)
  190. {
  191. if (debug_bypass_drivers & CAM_BYPASS_RGLTR) {
  192. CAM_WARN(CAM_UTIL, "Bypass regulator get");
  193. return (struct regulator *)BYPASS_VALUE;
  194. }
  195. return regulator_get(dev, id);
  196. }
  197. static inline void cam_wrapper_regulator_put(struct regulator *regulator)
  198. {
  199. if (debug_bypass_drivers & CAM_BYPASS_RGLTR) {
  200. CAM_WARN(CAM_UTIL, "Bypass regulator put");
  201. return;
  202. }
  203. regulator_put(regulator);
  204. }
  205. static inline int cam_wrapper_regulator_disable(struct regulator *regulator)
  206. {
  207. if (debug_bypass_drivers & CAM_BYPASS_RGLTR) {
  208. CAM_WARN(CAM_UTIL, "Bypass regulator disable");
  209. return 0;
  210. }
  211. return regulator_disable(regulator);
  212. }
  213. static inline int cam_wrapper_regulator_enable(struct regulator *regulator)
  214. {
  215. if (debug_bypass_drivers & CAM_BYPASS_RGLTR) {
  216. CAM_WARN(CAM_UTIL, "Bypass regulator enable");
  217. return 0;
  218. }
  219. return regulator_enable(regulator);
  220. }
  221. static inline int cam_wrapper_regulator_set_voltage(
  222. struct regulator *regulator, int min_uV, int max_uV)
  223. {
  224. if (debug_bypass_drivers & CAM_BYPASS_RGLTR) {
  225. CAM_WARN(CAM_UTIL, "Bypass regulator set voltage");
  226. return 0;
  227. }
  228. return regulator_set_voltage(regulator, min_uV, max_uV);
  229. }
  230. static inline int cam_wrapper_regulator_count_voltages(
  231. struct regulator *regulator)
  232. {
  233. if (debug_bypass_drivers & CAM_BYPASS_RGLTR) {
  234. CAM_WARN(CAM_UTIL, "Bypass regulator count voltages");
  235. return 0;
  236. }
  237. return regulator_count_voltages(regulator);
  238. }
  239. inline int cam_wrapper_regulator_set_load(
  240. struct regulator *regulator, int uA_load)
  241. {
  242. if (debug_bypass_drivers & CAM_BYPASS_RGLTR) {
  243. CAM_WARN(CAM_UTIL, "Bypass regulator set load");
  244. return 0;
  245. }
  246. return regulator_set_load(regulator, uA_load);
  247. }
  248. inline int cam_wrapper_regulator_set_mode(
  249. struct regulator *regulator, unsigned int mode)
  250. {
  251. if (debug_bypass_drivers & CAM_BYPASS_RGLTR_MODE) {
  252. CAM_WARN(CAM_UTIL, "Bypass regulator set mode");
  253. return 0;
  254. }
  255. return regulator_set_mode(regulator, mode);
  256. }
  257. static inline int cam_wrapper_regulator_is_enabled(
  258. struct regulator *regulator)
  259. {
  260. if (debug_bypass_drivers & CAM_BYPASS_RGLTR) {
  261. CAM_WARN(CAM_UTIL, "Bypass regulator is enabled");
  262. return 0;
  263. }
  264. return regulator_is_enabled(regulator);
  265. }
  266. inline void cam_soc_util_set_bypass_drivers(
  267. uint32_t bypass_drivers)
  268. {
  269. debug_bypass_drivers = bypass_drivers;
  270. CAM_INFO(CAM_UTIL, "bypass drivers %d", debug_bypass_drivers);
  271. }
  272. #if IS_ENABLED(CONFIG_QCOM_CRM)
  273. inline int cam_soc_util_cesta_populate_crm_device(void)
  274. {
  275. cam_cesta_crm_dev = cam_wrapper_crm_get_device(CAM_CRM_DEV_IDENTIFIER);
  276. if (!cam_cesta_crm_dev) {
  277. CAM_ERR(CAM_UTIL, "Failed to get cesta crm dev for %s", CAM_CRM_DEV_IDENTIFIER);
  278. return -ENODEV;
  279. }
  280. return 0;
  281. }
  282. int cam_soc_util_cesta_channel_switch(uint32_t cesta_client_idx, const char *identifier)
  283. {
  284. int rc = 0;
  285. if (!cam_cesta_crm_dev) {
  286. CAM_ERR(CAM_UTIL, "camera cesta crm device is null");
  287. return -EINVAL;
  288. }
  289. if (!CAM_IS_VALID_CESTA_IDX(cesta_client_idx)) {
  290. CAM_ERR(CAM_UTIL, "Invalid client index for camera cesta idx: %d max: %d",
  291. cesta_client_idx, CAM_CESTA_MAX_CLIENTS);
  292. return -EINVAL;
  293. }
  294. CAM_DBG(CAM_PERF, "CESTA Channel switch : hw client idx %d identifier=%s",
  295. cesta_client_idx, identifier);
  296. rc = cam_wrapper_crm_write_pwr_states(cam_cesta_crm_dev, cesta_client_idx);
  297. if (rc) {
  298. CAM_ERR(CAM_UTIL,
  299. "Failed to trigger cesta channel switch cesta_client_idx: %u rc: %d",
  300. cesta_client_idx, rc);
  301. return rc;
  302. }
  303. return rc;
  304. }
  305. #else
  306. inline int cam_soc_util_cesta_populate_crm_device(void)
  307. {
  308. CAM_ERR(CAM_UTIL, "Not supported");
  309. return -EOPNOTSUPP;
  310. }
  311. inline int cam_soc_util_cesta_channel_switch(uint32_t cesta_client_idx, const char *identifier)
  312. {
  313. CAM_ERR(CAM_UTIL, "Not supported, cesta_client_idx=%d, identifier=%s",
  314. cesta_client_idx, identifier);
  315. return -EOPNOTSUPP;
  316. }
  317. #endif
  318. #if IS_ENABLED(CONFIG_QCOM_CRM) && IS_ENABLED(CONFIG_SPECTRA_USE_CLK_CRM_API)
  319. static int cam_soc_util_set_cesta_clk_rate(struct cam_hw_soc_info *soc_info,
  320. uint32_t cesta_client_idx, unsigned long high_val, unsigned long low_val,
  321. unsigned long *applied_high_val, unsigned long *applied_low_val)
  322. {
  323. int32_t src_clk_idx;
  324. struct clk *clk = NULL;
  325. int rc = 0;
  326. if (!soc_info || (soc_info->src_clk_idx < 0) ||
  327. (soc_info->src_clk_idx >= CAM_SOC_MAX_CLK)) {
  328. CAM_ERR(CAM_UTIL, "Invalid src_clk_idx: %d",
  329. soc_info ? soc_info->src_clk_idx : -1);
  330. return -EINVAL;
  331. }
  332. if (!CAM_IS_VALID_CESTA_IDX(cesta_client_idx)) {
  333. CAM_ERR(CAM_UTIL, "Invalid client index for camera cesta idx: %d max: %d",
  334. cesta_client_idx, CAM_CESTA_MAX_CLIENTS);
  335. return -EINVAL;
  336. }
  337. /* Only source clocks are supported by this API to set HW client clock votes */
  338. src_clk_idx = soc_info->src_clk_idx;
  339. clk = soc_info->clk[src_clk_idx];
  340. if (!skip_mmrm_set_rate && soc_info->mmrm_handle) {
  341. CAM_DBG(CAM_UTIL, "cesta mmrm hw client: set %s, high-rate %lld low-rate %lld",
  342. soc_info->clk_name[src_clk_idx], high_val, low_val);
  343. rc = cam_soc_util_set_hw_client_rate_through_mmrm(
  344. soc_info->mmrm_handle, low_val, high_val, 1,
  345. cesta_client_idx);
  346. if (rc) {
  347. CAM_ERR(CAM_UTIL,
  348. "set_sw_client_rate through mmrm failed on %s clk_id %d low_val %llu high_val %llu client idx=%d",
  349. soc_info->clk_name[src_clk_idx], soc_info->clk_id[src_clk_idx],
  350. low_val, high_val, cesta_client_idx);
  351. return rc;
  352. }
  353. goto end;
  354. }
  355. CAM_DBG(CAM_UTIL, "%s Requested clk rate [high low]: [%llu %llu] cesta_client_idx: %d",
  356. soc_info->clk_name[src_clk_idx], high_val, low_val, cesta_client_idx);
  357. rc = cam_wrapper_qcom_clk_crm_set_rate(
  358. clk, CRM_HW_DRV, cesta_client_idx, CRM_PWR_STATE1, high_val);
  359. if (rc) {
  360. CAM_ERR(CAM_UTIL,
  361. "Failed in setting cesta high clk rate, client idx: %u pwr state: %u clk_val: %llu rc: %d",
  362. cesta_client_idx, CRM_PWR_STATE1, high_val, rc);
  363. return rc;
  364. }
  365. rc = cam_wrapper_qcom_clk_crm_set_rate(
  366. clk, CRM_HW_DRV, cesta_client_idx, CRM_PWR_STATE0, low_val);
  367. if (rc) {
  368. CAM_ERR(CAM_UTIL,
  369. "Failed in setting cesta low clk rate, client idx: %u pwr state: %u clk_val: %llu rc: %d",
  370. cesta_client_idx, CRM_PWR_STATE0, low_val, rc);
  371. return rc;
  372. }
  373. end:
  374. if (applied_high_val)
  375. *applied_high_val = high_val;
  376. if (applied_low_val)
  377. *applied_low_val = low_val;
  378. return rc;
  379. }
  380. #if IS_REACHABLE(CONFIG_MSM_MMRM)
  381. int cam_soc_util_set_hw_client_rate_through_mmrm(
  382. void *mmrm_handle, long low_val, long high_val,
  383. uint32_t num_hw_blocks, int cesta_client_idx)
  384. {
  385. int rc = 0;
  386. struct mmrm_client_data client_data;
  387. client_data.num_hw_blocks = num_hw_blocks;
  388. client_data.crm_drv_idx = cesta_client_idx;
  389. client_data.drv_type = MMRM_CRM_HW_DRV;
  390. client_data.pwr_st = CRM_PWR_STATE1;
  391. client_data.flags = 0;
  392. CAM_DBG(CAM_UTIL,
  393. "hw client mmrm=%pK, high_val %ld, low_val %ld, num_blocks=%d, pwr_state: %u, client_idx: %d",
  394. mmrm_handle, high_val, low_val, num_hw_blocks, CRM_PWR_STATE1, cesta_client_idx);
  395. rc = mmrm_client_set_value((struct mmrm_client *)mmrm_handle,
  396. &client_data, high_val);
  397. if (rc) {
  398. CAM_ERR(CAM_UTIL, "Set high rate failed rate %ld rc %d",
  399. high_val, rc);
  400. return rc;
  401. }
  402. /* We vote a second time for pwr_st = low */
  403. client_data.pwr_st = CRM_PWR_STATE0;
  404. rc = mmrm_client_set_value((struct mmrm_client *)mmrm_handle,
  405. &client_data, low_val);
  406. if (rc)
  407. CAM_ERR(CAM_UTIL, "Set low rate failed rate %ld rc %d", low_val, rc);
  408. return rc;
  409. }
  410. #else
  411. int cam_soc_util_set_hw_client_rate_through_mmrm(
  412. void *mmrm_handle, long low_val, long high_val,
  413. uint32_t num_hw_blocks, int cesta_client_idx)
  414. {
  415. return 0;
  416. }
  417. #endif
  418. #else
  419. static inline int cam_soc_util_set_cesta_clk_rate(struct cam_hw_soc_info *soc_info,
  420. uint32_t cesta_client_idx, unsigned long high_val, unsigned long low_val,
  421. unsigned long *applied_high_val, unsigned long *applied_low_val)
  422. {
  423. CAM_ERR(CAM_UTIL, "Not supported, dev=%s, cesta_client_idx=%d, high_val=%ld, low_val=%ld",
  424. soc_info->dev_name, cesta_client_idx, high_val, low_val);
  425. return -EOPNOTSUPP;
  426. }
  427. #endif
  428. #if IS_REACHABLE(CONFIG_MSM_MMRM)
  429. bool cam_is_mmrm_supported_on_current_chip(void)
  430. {
  431. bool is_supported;
  432. is_supported = mmrm_client_check_scaling_supported(MMRM_CLIENT_CLOCK,
  433. MMRM_CLIENT_DOMAIN_CAMERA);
  434. CAM_DBG(CAM_UTIL, "is mmrm supported: %s",
  435. CAM_BOOL_TO_YESNO(is_supported));;
  436. return is_supported;
  437. }
  438. int cam_mmrm_notifier_callback(
  439. struct mmrm_client_notifier_data *notifier_data)
  440. {
  441. if (!notifier_data) {
  442. CAM_ERR(CAM_UTIL, "Invalid notifier data");
  443. return -EBADR;
  444. }
  445. if (notifier_data->cb_type == MMRM_CLIENT_RESOURCE_VALUE_CHANGE) {
  446. struct cam_hw_soc_info *soc_info = notifier_data->pvt_data;
  447. CAM_WARN(CAM_UTIL, "Dev %s Clk %s value change from %ld to %ld",
  448. soc_info->dev_name,
  449. (soc_info->src_clk_idx == -1) ? "No src clk" :
  450. soc_info->clk_name[soc_info->src_clk_idx],
  451. notifier_data->cb_data.val_chng.old_val,
  452. notifier_data->cb_data.val_chng.new_val);
  453. }
  454. return 0;
  455. }
  456. int cam_soc_util_register_mmrm_client(
  457. uint32_t clk_id, struct clk *clk, bool is_nrt_dev,
  458. struct cam_hw_soc_info *soc_info, const char *clk_name,
  459. void **mmrm_handle)
  460. {
  461. struct mmrm_client *mmrm_client;
  462. struct mmrm_client_desc desc = { };
  463. if (!mmrm_handle) {
  464. CAM_ERR(CAM_UTIL, "Invalid mmrm input");
  465. return -EINVAL;
  466. }
  467. *mmrm_handle = (void *)NULL;
  468. if (debug_bypass_drivers & CAM_BYPASS_CLKS) {
  469. CAM_WARN(CAM_UTIL, "Bypass register mmrm client");
  470. return 0;
  471. }
  472. if (!cam_is_mmrm_supported_on_current_chip())
  473. return 0;
  474. desc.client_type = MMRM_CLIENT_CLOCK;
  475. desc.client_info.desc.client_domain = MMRM_CLIENT_DOMAIN_CAMERA;
  476. desc.client_info.desc.client_id = clk_id;
  477. desc.client_info.desc.clk = clk;
  478. #if IS_ENABLED(CONFIG_QCOM_CRM) && IS_ENABLED(CONFIG_SPECTRA_USE_CLK_CRM_API)
  479. if (soc_info->is_clk_drv_en) {
  480. desc.client_info.desc.hw_drv_instances = CAM_CESTA_MAX_CLIENTS;
  481. desc.client_info.desc.num_pwr_states = CAM_NUM_PWR_STATES;
  482. } else {
  483. desc.client_info.desc.hw_drv_instances = 0;
  484. desc.client_info.desc.num_pwr_states = 0;
  485. }
  486. #endif
  487. snprintf((char *)desc.client_info.desc.name,
  488. sizeof(desc.client_info.desc.name), "%s_%s",
  489. soc_info->dev_name, clk_name);
  490. desc.priority = is_nrt_dev ?
  491. MMRM_CLIENT_PRIOR_LOW : MMRM_CLIENT_PRIOR_HIGH;
  492. desc.pvt_data = soc_info;
  493. desc.notifier_callback_fn = cam_mmrm_notifier_callback;
  494. mmrm_client = mmrm_client_register(&desc);
  495. if (!mmrm_client) {
  496. CAM_ERR(CAM_UTIL, "MMRM Register failed Dev %s clk %s id %d",
  497. soc_info->dev_name, clk_name, clk_id);
  498. return -EINVAL;
  499. }
  500. CAM_DBG(CAM_UTIL,
  501. "MMRM Register success Dev %s is_nrt_dev %d clk %s id %d handle=%pK",
  502. soc_info->dev_name, is_nrt_dev, clk_name, clk_id, mmrm_client);
  503. *mmrm_handle = (void *)mmrm_client;
  504. return 0;
  505. }
  506. int cam_soc_util_unregister_mmrm_client(
  507. void *mmrm_handle)
  508. {
  509. int rc = 0;
  510. CAM_DBG(CAM_UTIL, "MMRM UnRegister handle=%pK", mmrm_handle);
  511. if (mmrm_handle) {
  512. rc = mmrm_client_deregister((struct mmrm_client *)mmrm_handle);
  513. if (rc)
  514. CAM_ERR(CAM_UTIL,
  515. "Failed in deregister handle=%pK, rc %d",
  516. mmrm_handle, rc);
  517. }
  518. return rc;
  519. }
  520. static int cam_soc_util_set_sw_client_rate_through_mmrm(
  521. void *mmrm_handle, bool is_nrt_dev, long min_rate,
  522. long req_rate, uint32_t num_hw_blocks)
  523. {
  524. int rc = 0;
  525. struct mmrm_client_data client_data;
  526. struct mmrm_client_res_value val;
  527. client_data.num_hw_blocks = num_hw_blocks;
  528. client_data.flags = 0;
  529. #if IS_ENABLED(CONFIG_QCOM_CRM) && IS_ENABLED(CONFIG_SPECTRA_USE_CLK_CRM_API)
  530. client_data.drv_type = MMRM_CRM_SW_DRV;
  531. #endif
  532. CAM_DBG(CAM_UTIL,
  533. "sw client mmrm=%pK, nrt=%d, min_rate=%ld req_rate %ld, num_blocks=%d",
  534. mmrm_handle, is_nrt_dev, min_rate, req_rate, num_hw_blocks);
  535. if (is_nrt_dev) {
  536. val.min = min_rate;
  537. val.cur = req_rate;
  538. rc = mmrm_client_set_value_in_range(
  539. (struct mmrm_client *)mmrm_handle, &client_data, &val);
  540. } else {
  541. rc = mmrm_client_set_value(
  542. (struct mmrm_client *)mmrm_handle,
  543. &client_data, req_rate);
  544. }
  545. if (rc)
  546. CAM_ERR(CAM_UTIL, "Set rate failed rate %ld rc %d",
  547. req_rate, rc);
  548. return rc;
  549. }
  550. #else
  551. int cam_soc_util_register_mmrm_client(
  552. uint32_t clk_id, struct clk *clk, bool is_nrt_dev,
  553. struct cam_hw_soc_info *soc_info, const char *clk_name,
  554. void **mmrm_handle)
  555. {
  556. if (!mmrm_handle) {
  557. CAM_ERR(CAM_UTIL, "Invalid mmrm input");
  558. return -EINVAL;
  559. }
  560. *mmrm_handle = NULL;
  561. return 0;
  562. }
  563. int cam_soc_util_unregister_mmrm_client(
  564. void *mmrm_handle)
  565. {
  566. return 0;
  567. }
  568. static int cam_soc_util_set_sw_client_rate_through_mmrm(
  569. void *mmrm_handle, bool is_nrt_dev, long min_rate,
  570. long req_rate, uint32_t num_hw_blocks)
  571. {
  572. return 0;
  573. }
  574. #endif
  575. static int cam_soc_util_clk_wrapper_register_entry(
  576. uint32_t clk_id, struct clk *clk, bool is_src_clk,
  577. struct cam_hw_soc_info *soc_info, int64_t min_clk_rate,
  578. const char *clk_name)
  579. {
  580. struct cam_clk_wrapper_clk *wrapper_clk;
  581. struct cam_clk_wrapper_client *wrapper_client;
  582. bool clock_found = false;
  583. int rc = 0;
  584. mutex_lock(&wrapper_lock);
  585. list_for_each_entry(wrapper_clk, &wrapper_clk_list, list) {
  586. CAM_DBG(CAM_UTIL, "Clk list id %d num clients %d",
  587. wrapper_clk->clk_id, wrapper_clk->num_clients);
  588. if (wrapper_clk->clk_id == clk_id) {
  589. clock_found = true;
  590. list_for_each_entry(wrapper_client,
  591. &wrapper_clk->client_list, list) {
  592. CAM_DBG(CAM_UTIL,
  593. "Clk id %d entry client %s",
  594. wrapper_clk->clk_id,
  595. wrapper_client->soc_info->dev_name);
  596. if (wrapper_client->soc_info == soc_info) {
  597. CAM_ERR(CAM_UTIL,
  598. "Register with same soc info, clk id %d, client %s",
  599. clk_id, soc_info->dev_name);
  600. rc = -EINVAL;
  601. goto end;
  602. }
  603. }
  604. break;
  605. }
  606. }
  607. if (!clock_found) {
  608. CAM_DBG(CAM_UTIL, "Adding new entry for clk id %d", clk_id);
  609. wrapper_clk = kzalloc(sizeof(struct cam_clk_wrapper_clk),
  610. GFP_KERNEL);
  611. if (!wrapper_clk) {
  612. CAM_ERR(CAM_UTIL,
  613. "Failed in allocating new clk entry %d",
  614. clk_id);
  615. rc = -ENOMEM;
  616. goto end;
  617. }
  618. wrapper_clk->clk_id = clk_id;
  619. INIT_LIST_HEAD(&wrapper_clk->list);
  620. INIT_LIST_HEAD(&wrapper_clk->client_list);
  621. list_add_tail(&wrapper_clk->list, &wrapper_clk_list);
  622. }
  623. wrapper_client = kzalloc(sizeof(struct cam_clk_wrapper_client),
  624. GFP_KERNEL);
  625. if (!wrapper_client) {
  626. CAM_ERR(CAM_UTIL, "Failed in allocating new client entry %d",
  627. clk_id);
  628. rc = -ENOMEM;
  629. goto end;
  630. }
  631. wrapper_client->soc_info = soc_info;
  632. wrapper_client->clk = clk;
  633. if (is_src_clk && !wrapper_clk->mmrm_handle) {
  634. wrapper_clk->is_nrt_dev = soc_info->is_nrt_dev;
  635. wrapper_clk->min_clk_rate = min_clk_rate;
  636. wrapper_clk->soc_info = soc_info;
  637. rc = cam_soc_util_register_mmrm_client(clk_id, clk,
  638. wrapper_clk->is_nrt_dev, soc_info, clk_name,
  639. &wrapper_clk->mmrm_handle);
  640. if (rc) {
  641. CAM_ERR(CAM_UTIL,
  642. "Failed in register mmrm client Dev %s clk id %d",
  643. soc_info->dev_name, clk_id);
  644. kfree(wrapper_client);
  645. goto end;
  646. }
  647. }
  648. INIT_LIST_HEAD(&wrapper_client->list);
  649. list_add_tail(&wrapper_client->list, &wrapper_clk->client_list);
  650. wrapper_clk->num_clients++;
  651. CAM_DBG(CAM_UTIL,
  652. "Adding new client %s for clk[%s] id %d, num clients %d",
  653. soc_info->dev_name, clk_name, clk_id, wrapper_clk->num_clients);
  654. end:
  655. mutex_unlock(&wrapper_lock);
  656. return rc;
  657. }
  658. static int cam_soc_util_clk_wrapper_unregister_entry(
  659. uint32_t clk_id, struct cam_hw_soc_info *soc_info)
  660. {
  661. struct cam_clk_wrapper_clk *wrapper_clk;
  662. struct cam_clk_wrapper_client *wrapper_client;
  663. bool clock_found = false;
  664. bool client_found = false;
  665. int rc = 0;
  666. mutex_lock(&wrapper_lock);
  667. list_for_each_entry(wrapper_clk, &wrapper_clk_list, list) {
  668. CAM_DBG(CAM_UTIL, "Clk list id %d num clients %d",
  669. wrapper_clk->clk_id, wrapper_clk->num_clients);
  670. if (wrapper_clk->clk_id == clk_id) {
  671. clock_found = true;
  672. list_for_each_entry(wrapper_client,
  673. &wrapper_clk->client_list, list) {
  674. CAM_DBG(CAM_UTIL, "Clk id %d entry client %s",
  675. wrapper_clk->clk_id,
  676. wrapper_client->soc_info->dev_name);
  677. if (wrapper_client->soc_info == soc_info) {
  678. client_found = true;
  679. break;
  680. }
  681. }
  682. break;
  683. }
  684. }
  685. if (!clock_found) {
  686. CAM_ERR(CAM_UTIL, "Shared clk id %d entry not found", clk_id);
  687. rc = -EINVAL;
  688. goto end;
  689. }
  690. if (!client_found) {
  691. CAM_ERR(CAM_UTIL,
  692. "Client %pK for Shared clk id %d entry not found",
  693. soc_info, clk_id);
  694. rc = -EINVAL;
  695. goto end;
  696. }
  697. wrapper_clk->num_clients--;
  698. if (wrapper_clk->mmrm_handle && (wrapper_clk->soc_info == soc_info)) {
  699. cam_soc_util_unregister_mmrm_client(wrapper_clk->mmrm_handle);
  700. wrapper_clk->mmrm_handle = NULL;
  701. wrapper_clk->soc_info = NULL;
  702. }
  703. list_del_init(&wrapper_client->list);
  704. kfree(wrapper_client);
  705. CAM_DBG(CAM_UTIL, "Unregister client %s for clk id %d, num clients %d",
  706. soc_info->dev_name, clk_id, wrapper_clk->num_clients);
  707. if (!wrapper_clk->num_clients) {
  708. list_del_init(&wrapper_clk->list);
  709. kfree(wrapper_clk);
  710. }
  711. end:
  712. mutex_unlock(&wrapper_lock);
  713. return rc;
  714. }
  715. static int cam_soc_util_clk_wrapper_set_clk_rate(
  716. uint32_t clk_id, struct cam_hw_soc_info *soc_info,
  717. struct clk *clk, int64_t clk_rate)
  718. {
  719. struct cam_clk_wrapper_clk *wrapper_clk;
  720. struct cam_clk_wrapper_client *wrapper_client;
  721. bool clk_found = false;
  722. bool client_found = false;
  723. int rc = 0;
  724. int64_t final_clk_rate = 0;
  725. uint32_t active_clients = 0;
  726. if (!soc_info || !clk) {
  727. CAM_ERR(CAM_UTIL, "Invalid param soc_info %pK clk %pK",
  728. soc_info, clk);
  729. return -EINVAL;
  730. }
  731. mutex_lock(&wrapper_lock);
  732. list_for_each_entry(wrapper_clk, &wrapper_clk_list, list) {
  733. CAM_DBG(CAM_UTIL, "Clk list id %d num clients %d",
  734. wrapper_clk->clk_id, wrapper_clk->num_clients);
  735. if (wrapper_clk->clk_id == clk_id) {
  736. clk_found = true;
  737. break;
  738. }
  739. }
  740. if (!clk_found) {
  741. CAM_ERR(CAM_UTIL, "Clk entry not found id %d client %s",
  742. clk_id, soc_info->dev_name);
  743. rc = -EINVAL;
  744. goto end;
  745. }
  746. list_for_each_entry(wrapper_client, &wrapper_clk->client_list, list) {
  747. CAM_DBG(CAM_UTIL, "Clk id %d client %s, clk rate %lld",
  748. wrapper_clk->clk_id, wrapper_client->soc_info->dev_name,
  749. wrapper_client->curr_clk_rate);
  750. if (wrapper_client->soc_info == soc_info) {
  751. client_found = true;
  752. CAM_DBG(CAM_UTIL,
  753. "Clk enable clk id %d, client %s curr %ld new %ld",
  754. clk_id, wrapper_client->soc_info->dev_name,
  755. wrapper_client->curr_clk_rate, clk_rate);
  756. wrapper_client->curr_clk_rate = clk_rate;
  757. }
  758. if (wrapper_client->curr_clk_rate > 0)
  759. active_clients++;
  760. if (final_clk_rate < wrapper_client->curr_clk_rate)
  761. final_clk_rate = wrapper_client->curr_clk_rate;
  762. }
  763. if (!client_found) {
  764. CAM_ERR(CAM_UTIL,
  765. "Wrapper clk enable without client entry clk id %d client %s",
  766. clk_id, soc_info->dev_name);
  767. rc = -EINVAL;
  768. goto end;
  769. }
  770. CAM_DBG(CAM_UTIL,
  771. "Clk id %d, client %s, clients rate %ld, curr %ld final %ld",
  772. wrapper_clk->clk_id, soc_info->dev_name, clk_rate,
  773. wrapper_clk->curr_clk_rate, final_clk_rate);
  774. if ((final_clk_rate != wrapper_clk->curr_clk_rate) ||
  775. (active_clients != wrapper_clk->active_clients)) {
  776. bool set_rate_finish = false;
  777. if (!skip_mmrm_set_rate && wrapper_clk->mmrm_handle) {
  778. rc = cam_soc_util_set_sw_client_rate_through_mmrm(
  779. wrapper_clk->mmrm_handle,
  780. wrapper_clk->is_nrt_dev,
  781. wrapper_clk->min_clk_rate,
  782. final_clk_rate, active_clients);
  783. if (rc) {
  784. CAM_ERR(CAM_UTIL,
  785. "set_sw_client_rate through mmrm failed clk_id %d, rate=%ld",
  786. wrapper_clk->clk_id, final_clk_rate);
  787. goto end;
  788. }
  789. set_rate_finish = true;
  790. }
  791. if (!set_rate_finish && final_clk_rate &&
  792. (final_clk_rate != wrapper_clk->curr_clk_rate)) {
  793. rc = cam_wrapper_clk_set_rate(clk, final_clk_rate);
  794. if (rc) {
  795. CAM_ERR(CAM_UTIL, "set_rate failed on clk %d",
  796. wrapper_clk->clk_id);
  797. goto end;
  798. }
  799. }
  800. wrapper_clk->curr_clk_rate = final_clk_rate;
  801. wrapper_clk->active_clients = active_clients;
  802. }
  803. end:
  804. mutex_unlock(&wrapper_lock);
  805. return rc;
  806. }
  807. int cam_soc_util_get_clk_level(struct cam_hw_soc_info *soc_info,
  808. int64_t clk_rate, int clk_idx, int32_t *clk_lvl)
  809. {
  810. int i;
  811. long clk_rate_round;
  812. if (!soc_info || (clk_idx < 0) || (clk_idx >= CAM_SOC_MAX_CLK)) {
  813. CAM_ERR(CAM_UTIL, "Invalid src_clk_idx: %d", clk_idx);
  814. *clk_lvl = -1;
  815. return -EINVAL;
  816. }
  817. clk_rate_round = cam_wrapper_clk_round_rate(
  818. soc_info->clk[clk_idx], clk_rate);
  819. if (clk_rate_round < 0) {
  820. CAM_ERR(CAM_UTIL, "round failed rc = %ld",
  821. clk_rate_round);
  822. *clk_lvl = -1;
  823. return -EINVAL;
  824. }
  825. if (debug_bypass_drivers & CAM_BYPASS_CLKS) {
  826. CAM_WARN(CAM_UTIL, "Bypass get clk level");
  827. *clk_lvl = CAM_NOMINAL_VOTE;
  828. return 0;
  829. }
  830. for (i = 0; i < CAM_MAX_VOTE; i++) {
  831. if ((soc_info->clk_level_valid[i]) &&
  832. (soc_info->clk_rate[i][clk_idx] >=
  833. clk_rate_round)) {
  834. CAM_DBG(CAM_UTIL,
  835. "soc = %d round rate = %ld actual = %lld",
  836. soc_info->clk_rate[i][clk_idx],
  837. clk_rate_round, clk_rate);
  838. *clk_lvl = i;
  839. return 0;
  840. }
  841. }
  842. CAM_WARN(CAM_UTIL, "Invalid clock rate %ld", clk_rate_round);
  843. *clk_lvl = -1;
  844. return -EINVAL;
  845. }
  846. const char *cam_soc_util_get_string_from_level(enum cam_vote_level level)
  847. {
  848. switch (level) {
  849. case CAM_SUSPEND_VOTE:
  850. return "";
  851. case CAM_MINSVS_VOTE:
  852. return "MINSVS[1]";
  853. case CAM_LOWSVS_D1_VOTE:
  854. return "LOWSVSD1[2]";
  855. case CAM_LOWSVS_VOTE:
  856. return "LOWSVS[3]";
  857. case CAM_SVS_VOTE:
  858. return "SVS[4]";
  859. case CAM_SVSL1_VOTE:
  860. return "SVSL1[5]";
  861. case CAM_NOMINAL_VOTE:
  862. return "NOM[6]";
  863. case CAM_NOMINALL1_VOTE:
  864. return "NOML1[7]";
  865. case CAM_TURBO_VOTE:
  866. return "TURBO[8]";
  867. default:
  868. return "";
  869. }
  870. }
  871. /**
  872. * cam_soc_util_get_supported_clk_levels()
  873. *
  874. * @brief: Returns the string of all the supported clk levels for
  875. * the given device
  876. *
  877. * @soc_info: Device soc information
  878. *
  879. * @return: String containing all supported clk levels
  880. */
  881. static const char *cam_soc_util_get_supported_clk_levels(
  882. struct cam_hw_soc_info *soc_info)
  883. {
  884. int i = 0;
  885. scnprintf(supported_clk_info, sizeof(supported_clk_info), "Supported levels: ");
  886. for (i = 0; i < CAM_MAX_VOTE; i++) {
  887. if (soc_info->clk_level_valid[i] == true) {
  888. strlcat(supported_clk_info,
  889. cam_soc_util_get_string_from_level(i),
  890. sizeof(supported_clk_info));
  891. strlcat(supported_clk_info, " ",
  892. sizeof(supported_clk_info));
  893. }
  894. }
  895. strlcat(supported_clk_info, "\n", sizeof(supported_clk_info));
  896. return supported_clk_info;
  897. }
  898. static int cam_soc_util_clk_lvl_options_open(struct inode *inode,
  899. struct file *file)
  900. {
  901. file->private_data = inode->i_private;
  902. return 0;
  903. }
  904. static ssize_t cam_soc_util_clk_lvl_options_read(struct file *file,
  905. char __user *clk_info, size_t size_t, loff_t *loff_t)
  906. {
  907. struct cam_hw_soc_info *soc_info =
  908. (struct cam_hw_soc_info *)file->private_data;
  909. const char *display_string =
  910. cam_soc_util_get_supported_clk_levels(soc_info);
  911. return simple_read_from_buffer(clk_info, size_t, loff_t, display_string,
  912. strlen(display_string));
  913. }
  914. static const struct file_operations cam_soc_util_clk_lvl_options = {
  915. .open = cam_soc_util_clk_lvl_options_open,
  916. .read = cam_soc_util_clk_lvl_options_read,
  917. };
  918. static int cam_soc_util_set_clk_lvl_override(void *data, u64 val)
  919. {
  920. struct cam_hw_soc_info *soc_info = (struct cam_hw_soc_info *)data;
  921. if ((val <= CAM_SUSPEND_VOTE) || (val >= CAM_MAX_VOTE)) {
  922. CAM_WARN(CAM_UTIL, "Invalid clk lvl override %d", val);
  923. return 0;
  924. }
  925. if (soc_info->clk_level_valid[val])
  926. soc_info->clk_level_override_high = val;
  927. else
  928. soc_info->clk_level_override_high = 0;
  929. return 0;
  930. }
  931. static int cam_soc_util_get_clk_lvl_override(void *data, u64 *val)
  932. {
  933. struct cam_hw_soc_info *soc_info = (struct cam_hw_soc_info *)data;
  934. *val = soc_info->clk_level_override_high;
  935. return 0;
  936. }
  937. static int cam_soc_util_set_clk_lvl_override_low(void *data, u64 val)
  938. {
  939. struct cam_hw_soc_info *soc_info = (struct cam_hw_soc_info *)data;
  940. if ((val <= CAM_SUSPEND_VOTE) || (val >= CAM_MAX_VOTE)) {
  941. CAM_WARN(CAM_UTIL, "Invalid clk lvl override %d", val);
  942. return 0;
  943. }
  944. if (soc_info->clk_level_valid[val])
  945. soc_info->clk_level_override_low = val;
  946. else
  947. soc_info->clk_level_override_low = 0;
  948. return 0;
  949. }
  950. static int cam_soc_util_get_clk_lvl_override_low(void *data, u64 *val)
  951. {
  952. struct cam_hw_soc_info *soc_info = (struct cam_hw_soc_info *)data;
  953. *val = soc_info->clk_level_override_low;
  954. return 0;
  955. }
  956. DEFINE_SIMPLE_ATTRIBUTE(cam_soc_util_clk_lvl_control,
  957. cam_soc_util_get_clk_lvl_override, cam_soc_util_set_clk_lvl_override, "%08llu");
  958. DEFINE_SIMPLE_ATTRIBUTE(cam_soc_util_clk_lvl_control_low,
  959. cam_soc_util_get_clk_lvl_override_low, cam_soc_util_set_clk_lvl_override_low, "%08llu");
  960. /**
  961. * cam_soc_util_create_clk_lvl_debugfs()
  962. *
  963. * @brief: Creates debugfs files to view/control device clk rates
  964. *
  965. * @soc_info: Device soc information
  966. *
  967. * @return: Success or failure
  968. */
  969. static int cam_soc_util_create_clk_lvl_debugfs(struct cam_hw_soc_info *soc_info)
  970. {
  971. int rc = 0;
  972. struct dentry *clkdirptr = NULL;
  973. if (!cam_debugfs_available())
  974. return 0;
  975. if (soc_info->dentry) {
  976. CAM_DBG(CAM_UTIL, "Debugfs entry for %s already exists",
  977. soc_info->dev_name);
  978. goto end;
  979. }
  980. rc = cam_debugfs_lookup_subdir(CAM_CLK_DIRNAME, &clkdirptr);
  981. if (rc) {
  982. rc = cam_debugfs_create_subdir(CAM_CLK_DIRNAME, &clkdirptr);
  983. if (rc) {
  984. CAM_ERR(CAM_UTIL, "DebugFS could not create clk directory!");
  985. rc = -ENOENT;
  986. goto end;
  987. }
  988. }
  989. soc_info->dentry = debugfs_create_dir(soc_info->dev_name, clkdirptr);
  990. if (IS_ERR_OR_NULL(soc_info->dentry)) {
  991. CAM_ERR(CAM_UTIL, "DebugFS could not create directory for dev:%s!",
  992. soc_info->dev_name);
  993. rc = -ENOENT;
  994. goto end;
  995. }
  996. /* Store parent inode for cleanup in caller */
  997. debugfs_create_file("clk_lvl_options", 0444,
  998. soc_info->dentry, soc_info, &cam_soc_util_clk_lvl_options);
  999. debugfs_create_file("clk_lvl_control", 0644,
  1000. soc_info->dentry, soc_info, &cam_soc_util_clk_lvl_control);
  1001. debugfs_create_file("clk_lvl_control_low", 0644,
  1002. soc_info->dentry, soc_info, &cam_soc_util_clk_lvl_control_low);
  1003. end:
  1004. return rc;
  1005. }
  1006. int cam_soc_util_get_level_from_string(const char *string,
  1007. enum cam_vote_level *level)
  1008. {
  1009. if (!level)
  1010. return -EINVAL;
  1011. if (!strcmp(string, "suspend")) {
  1012. *level = CAM_SUSPEND_VOTE;
  1013. } else if (!strcmp(string, "minsvs")) {
  1014. *level = CAM_MINSVS_VOTE;
  1015. } else if (!strcmp(string, "lowsvsd1")) {
  1016. *level = CAM_LOWSVS_D1_VOTE;
  1017. } else if (!strcmp(string, "lowsvs")) {
  1018. *level = CAM_LOWSVS_VOTE;
  1019. } else if (!strcmp(string, "svs")) {
  1020. *level = CAM_SVS_VOTE;
  1021. } else if (!strcmp(string, "svs_l1")) {
  1022. *level = CAM_SVSL1_VOTE;
  1023. } else if (!strcmp(string, "nominal")) {
  1024. *level = CAM_NOMINAL_VOTE;
  1025. } else if (!strcmp(string, "nominal_l1")) {
  1026. *level = CAM_NOMINALL1_VOTE;
  1027. } else if (!strcmp(string, "turbo")) {
  1028. *level = CAM_TURBO_VOTE;
  1029. } else {
  1030. CAM_ERR(CAM_UTIL, "Invalid string %s", string);
  1031. return -EINVAL;
  1032. }
  1033. return 0;
  1034. }
  1035. /**
  1036. * cam_soc_util_get_clk_level_to_apply()
  1037. *
  1038. * @brief: Get the clock level to apply. If the requested level
  1039. * is not valid, bump the level to next available valid
  1040. * level. If no higher level found, return failure.
  1041. *
  1042. * @soc_info: Device soc struct to be populated
  1043. * @req_level: Requested level
  1044. * @apply_level Level to apply
  1045. *
  1046. * @return: success or failure
  1047. */
  1048. static int cam_soc_util_get_clk_level_to_apply(
  1049. struct cam_hw_soc_info *soc_info, enum cam_vote_level req_level,
  1050. enum cam_vote_level *apply_level)
  1051. {
  1052. if (req_level >= CAM_MAX_VOTE) {
  1053. CAM_ERR(CAM_UTIL, "Invalid clock level parameter %d",
  1054. req_level);
  1055. return -EINVAL;
  1056. }
  1057. if (soc_info->clk_level_valid[req_level] == true) {
  1058. *apply_level = req_level;
  1059. } else {
  1060. int i;
  1061. for (i = (req_level + 1); i < CAM_MAX_VOTE; i++)
  1062. if (soc_info->clk_level_valid[i] == true) {
  1063. *apply_level = i;
  1064. break;
  1065. }
  1066. if (i == CAM_MAX_VOTE) {
  1067. CAM_ERR(CAM_UTIL,
  1068. "No valid clock level found to apply, req=%d",
  1069. req_level);
  1070. return -EINVAL;
  1071. }
  1072. }
  1073. CAM_DBG(CAM_UTIL, "Req level %s, Applying %s",
  1074. cam_soc_util_get_string_from_level(req_level),
  1075. cam_soc_util_get_string_from_level(*apply_level));
  1076. return 0;
  1077. }
  1078. int cam_soc_util_irq_enable(struct cam_hw_soc_info *soc_info)
  1079. {
  1080. int i, rc = 0;
  1081. if (!soc_info) {
  1082. CAM_ERR(CAM_UTIL, "Invalid arguments");
  1083. return -EINVAL;
  1084. }
  1085. for (i = 0; i < soc_info->irq_count; i++) {
  1086. if (soc_info->irq_num[i] < 0) {
  1087. CAM_ERR(CAM_UTIL, "No IRQ line available for irq: %s dev: %s",
  1088. soc_info->irq_name[i], soc_info->dev_name);
  1089. rc = -ENODEV;
  1090. goto disable_irq;
  1091. }
  1092. enable_irq(soc_info->irq_num[i]);
  1093. }
  1094. return rc;
  1095. disable_irq:
  1096. for (i = i - 1; i >= 0; i--)
  1097. disable_irq(soc_info->irq_num[i]);
  1098. return rc;
  1099. }
  1100. int cam_soc_util_irq_disable(struct cam_hw_soc_info *soc_info)
  1101. {
  1102. int i, rc = 0;
  1103. if (!soc_info) {
  1104. CAM_ERR(CAM_UTIL, "Invalid arguments");
  1105. return -EINVAL;
  1106. }
  1107. for (i = 0; i < soc_info->irq_count; i++) {
  1108. if (soc_info->irq_num[i] < 0) {
  1109. CAM_ERR(CAM_UTIL, "No IRQ line available irq: %s dev:",
  1110. soc_info->irq_name[i], soc_info->dev_name);
  1111. rc = -ENODEV;
  1112. continue;
  1113. }
  1114. disable_irq(soc_info->irq_num[i]);
  1115. }
  1116. return rc;
  1117. }
  1118. long cam_soc_util_get_clk_round_rate(struct cam_hw_soc_info *soc_info,
  1119. uint32_t clk_index, unsigned long clk_rate)
  1120. {
  1121. if (!soc_info || (clk_index >= soc_info->num_clk) || (clk_rate == 0)) {
  1122. CAM_ERR(CAM_UTIL, "Invalid input params %pK, %d %lu",
  1123. soc_info, clk_index, clk_rate);
  1124. return clk_rate;
  1125. }
  1126. return cam_wrapper_clk_round_rate(soc_info->clk[clk_index], clk_rate);
  1127. }
  1128. /**
  1129. * cam_soc_util_set_clk_rate()
  1130. *
  1131. * @brief: Sets the given rate for the clk requested for
  1132. *
  1133. * @clk: Clock structure information for which rate is to be set
  1134. * @clk_name: Name of the clock for which rate is being set
  1135. * @clk_rate: Clock rate to be set
  1136. * @shared_clk: Whether this is a shared clk
  1137. * @is_src_clk: Whether this is source clk
  1138. * @clk_id: Clock ID
  1139. * @applied_clk_rate: Final clock rate set to the clk
  1140. *
  1141. * @return: Success or failure
  1142. */
  1143. static int cam_soc_util_set_clk_rate(struct cam_hw_soc_info *soc_info,
  1144. struct clk *clk, const char *clk_name,
  1145. int64_t clk_rate, bool shared_clk, bool is_src_clk, uint32_t clk_id,
  1146. unsigned long *applied_clk_rate)
  1147. {
  1148. int rc = 0;
  1149. long clk_rate_round = -1;
  1150. bool set_rate = false;
  1151. if (!clk_name) {
  1152. CAM_ERR(CAM_UTIL, "Invalid input clk %pK clk_name %pK",
  1153. clk, clk_name);
  1154. return -EINVAL;
  1155. }
  1156. CAM_DBG(CAM_UTIL, "set %s, rate %lld", clk_name, clk_rate);
  1157. if (!clk)
  1158. return 0;
  1159. if (clk_rate > 0) {
  1160. clk_rate_round = cam_wrapper_clk_round_rate(clk, clk_rate);
  1161. CAM_DBG(CAM_UTIL, "new_rate %ld", clk_rate_round);
  1162. if (clk_rate_round < 0) {
  1163. CAM_ERR(CAM_UTIL, "round failed for clock %s rc = %ld",
  1164. clk_name, clk_rate_round);
  1165. return clk_rate_round;
  1166. }
  1167. set_rate = true;
  1168. } else if (clk_rate == INIT_RATE) {
  1169. clk_rate_round = cam_wrapper_clk_get_rate(clk);
  1170. CAM_DBG(CAM_UTIL, "init new_rate %ld", clk_rate_round);
  1171. if (clk_rate_round == 0) {
  1172. clk_rate_round = cam_wrapper_clk_round_rate(clk, 0);
  1173. if (clk_rate_round <= 0) {
  1174. CAM_ERR(CAM_UTIL, "round rate failed on %s",
  1175. clk_name);
  1176. return clk_rate_round;
  1177. }
  1178. }
  1179. set_rate = true;
  1180. }
  1181. if (set_rate) {
  1182. if (shared_clk) {
  1183. CAM_DBG(CAM_UTIL,
  1184. "Dev %s clk %s id %d Set Shared clk %ld",
  1185. soc_info->dev_name, clk_name, clk_id,
  1186. clk_rate_round);
  1187. cam_soc_util_clk_wrapper_set_clk_rate(
  1188. clk_id, soc_info, clk, clk_rate_round);
  1189. } else {
  1190. bool set_rate_finish = false;
  1191. CAM_DBG(CAM_UTIL,
  1192. "Dev %s clk %s clk_id %d src_idx %d src_clk_id %d",
  1193. soc_info->dev_name, clk_name, clk_id,
  1194. soc_info->src_clk_idx,
  1195. (soc_info->src_clk_idx == -1) ? -1 :
  1196. soc_info->clk_id[soc_info->src_clk_idx]);
  1197. if (is_src_clk && soc_info->mmrm_handle &&
  1198. !skip_mmrm_set_rate) {
  1199. uint32_t idx = soc_info->src_clk_idx;
  1200. uint32_t min_level = soc_info->lowest_clk_level;
  1201. rc = cam_soc_util_set_sw_client_rate_through_mmrm(
  1202. soc_info->mmrm_handle,
  1203. soc_info->is_nrt_dev,
  1204. soc_info->clk_rate[min_level][idx],
  1205. clk_rate_round, 1);
  1206. if (rc) {
  1207. CAM_ERR(CAM_UTIL,
  1208. "set_sw_client_rate through mmrm failed on %s clk_id %d, rate=%ld",
  1209. clk_name, clk_id, clk_rate_round);
  1210. return rc;
  1211. }
  1212. set_rate_finish = true;
  1213. }
  1214. if (!set_rate_finish) {
  1215. rc = cam_wrapper_clk_set_rate(clk, clk_rate_round);
  1216. if (rc) {
  1217. CAM_ERR(CAM_UTIL, "set_rate failed on %s", clk_name);
  1218. return rc;
  1219. }
  1220. }
  1221. }
  1222. }
  1223. if (applied_clk_rate)
  1224. *applied_clk_rate = clk_rate_round;
  1225. return rc;
  1226. }
  1227. int cam_soc_util_set_src_clk_rate(struct cam_hw_soc_info *soc_info, int cesta_client_idx,
  1228. unsigned long clk_rate_high, unsigned long clk_rate_low)
  1229. {
  1230. int rc = 0;
  1231. int i = 0;
  1232. int32_t src_clk_idx;
  1233. int32_t scl_clk_idx;
  1234. struct clk *clk = NULL;
  1235. int32_t apply_level;
  1236. uint32_t clk_level_override_high = 0, clk_level_override_low = 0;
  1237. if (!soc_info || (soc_info->src_clk_idx < 0) ||
  1238. (soc_info->src_clk_idx >= CAM_SOC_MAX_CLK)) {
  1239. CAM_ERR(CAM_UTIL, "Invalid src_clk_idx: %d",
  1240. soc_info ? soc_info->src_clk_idx : -1);
  1241. return -EINVAL;
  1242. }
  1243. src_clk_idx = soc_info->src_clk_idx;
  1244. clk_level_override_high = soc_info->clk_level_override_high;
  1245. clk_level_override_low = soc_info->clk_level_override_low;
  1246. if (clk_level_override_high && clk_rate_high)
  1247. clk_rate_high = soc_info->clk_rate[clk_level_override_high][src_clk_idx];
  1248. if (clk_level_override_low && clk_rate_low)
  1249. clk_rate_low = soc_info->clk_rate[clk_level_override_low][src_clk_idx];
  1250. clk = soc_info->clk[src_clk_idx];
  1251. rc = cam_soc_util_get_clk_level(soc_info, clk_rate_high, src_clk_idx,
  1252. &apply_level);
  1253. if (rc || (apply_level < 0) || (apply_level >= CAM_MAX_VOTE)) {
  1254. CAM_ERR(CAM_UTIL,
  1255. "set %s, rate %lld dev_name = %s apply level = %d",
  1256. soc_info->clk_name[src_clk_idx], clk_rate_high,
  1257. soc_info->dev_name, apply_level);
  1258. return -EINVAL;
  1259. }
  1260. CAM_DBG(CAM_UTIL,
  1261. "set %s, cesta_client_idx: %d rate [%ld %ld] dev_name = %s apply level = %d",
  1262. soc_info->clk_name[src_clk_idx], cesta_client_idx, clk_rate_high, clk_rate_low,
  1263. soc_info->dev_name, apply_level);
  1264. if ((soc_info->cam_cx_ipeak_enable) && (clk_rate_high > 0)) {
  1265. cam_cx_ipeak_update_vote_cx_ipeak(soc_info,
  1266. apply_level);
  1267. }
  1268. if (soc_info->is_clk_drv_en && CAM_IS_VALID_CESTA_IDX(cesta_client_idx)) {
  1269. rc = cam_soc_util_set_cesta_clk_rate(soc_info, cesta_client_idx, clk_rate_high,
  1270. clk_rate_low,
  1271. &soc_info->applied_src_clk_rates.hw_client[cesta_client_idx].high,
  1272. &soc_info->applied_src_clk_rates.hw_client[cesta_client_idx].low);
  1273. if (rc) {
  1274. CAM_ERR(CAM_UTIL,
  1275. "Failed in setting cesta clk rates[high low]:[%ld %ld] client_idx:%d rc:%d",
  1276. clk_rate_high, clk_rate_low, cesta_client_idx, rc);
  1277. return rc;
  1278. }
  1279. goto end;
  1280. }
  1281. rc = cam_soc_util_set_clk_rate(soc_info, clk,
  1282. soc_info->clk_name[src_clk_idx], clk_rate_high,
  1283. CAM_IS_BIT_SET(soc_info->shared_clk_mask, src_clk_idx),
  1284. true, soc_info->clk_id[src_clk_idx],
  1285. &soc_info->applied_src_clk_rates.sw_client);
  1286. if (rc) {
  1287. CAM_ERR(CAM_UTIL,
  1288. "SET_RATE Failed: src clk: %s, rate %lld, dev_name = %s rc: %d",
  1289. soc_info->clk_name[src_clk_idx], clk_rate_high,
  1290. soc_info->dev_name, rc);
  1291. return rc;
  1292. }
  1293. /* set clk rate for scalable clk if available */
  1294. for (i = 0; i < soc_info->scl_clk_count; i++) {
  1295. scl_clk_idx = soc_info->scl_clk_idx[i];
  1296. if (scl_clk_idx < 0) {
  1297. CAM_DBG(CAM_UTIL, "Scl clk index invalid");
  1298. continue;
  1299. }
  1300. clk = soc_info->clk[scl_clk_idx];
  1301. rc = cam_soc_util_set_clk_rate(soc_info, clk,
  1302. soc_info->clk_name[scl_clk_idx],
  1303. soc_info->clk_rate[apply_level][scl_clk_idx],
  1304. CAM_IS_BIT_SET(soc_info->shared_clk_mask, scl_clk_idx),
  1305. false, soc_info->clk_id[scl_clk_idx],
  1306. NULL);
  1307. if (rc) {
  1308. CAM_WARN(CAM_UTIL,
  1309. "SET_RATE Failed: scl clk: %s, rate %d dev_name = %s, rc: %d",
  1310. soc_info->clk_name[scl_clk_idx],
  1311. soc_info->clk_rate[apply_level][scl_clk_idx],
  1312. soc_info->dev_name, rc);
  1313. }
  1314. }
  1315. end:
  1316. return 0;
  1317. }
  1318. int cam_soc_util_put_optional_clk(struct cam_hw_soc_info *soc_info,
  1319. int32_t clk_indx)
  1320. {
  1321. if (clk_indx < 0) {
  1322. CAM_ERR(CAM_UTIL, "Invalid params clk %d", clk_indx);
  1323. return -EINVAL;
  1324. }
  1325. if (CAM_IS_BIT_SET(soc_info->optional_shared_clk_mask, clk_indx))
  1326. cam_soc_util_clk_wrapper_unregister_entry(
  1327. soc_info->optional_clk_id[clk_indx], soc_info);
  1328. cam_wrapper_clk_put(soc_info->optional_clk[clk_indx]);
  1329. soc_info->optional_clk[clk_indx] = NULL;
  1330. return 0;
  1331. }
  1332. static struct clk *cam_soc_util_option_clk_get(struct device_node *np,
  1333. int index, uint32_t *clk_id)
  1334. {
  1335. struct of_phandle_args clkspec;
  1336. struct clk *clk;
  1337. int rc;
  1338. if (index < 0)
  1339. return ERR_PTR(-EINVAL);
  1340. rc = of_parse_phandle_with_args(np, "clocks-option", "#clock-cells",
  1341. index, &clkspec);
  1342. if (rc)
  1343. return ERR_PTR(rc);
  1344. clk = cam_wrapper_of_clk_get_from_provider(&clkspec);
  1345. *clk_id = clkspec.args[0];
  1346. of_node_put(clkspec.np);
  1347. return clk;
  1348. }
  1349. int cam_soc_util_get_option_clk_by_name(struct cam_hw_soc_info *soc_info,
  1350. const char *clk_name, int32_t *clk_index)
  1351. {
  1352. int index = 0;
  1353. int rc = 0;
  1354. struct device_node *of_node = NULL;
  1355. uint32_t shared_clk_val;
  1356. if (!soc_info || !clk_name || !clk_index) {
  1357. CAM_ERR(CAM_UTIL,
  1358. "Invalid params soc_info %pK clk_name %s clk_index %pK",
  1359. soc_info, clk_name, clk_index);
  1360. return -EINVAL;
  1361. }
  1362. of_node = soc_info->dev->of_node;
  1363. index = of_property_match_string(of_node, "clock-names-option",
  1364. clk_name);
  1365. if (index < 0) {
  1366. CAM_DBG(CAM_UTIL, "No clk data for %s", clk_name);
  1367. *clk_index = -1;
  1368. return -EINVAL;
  1369. }
  1370. if (index >= CAM_SOC_MAX_OPT_CLK) {
  1371. CAM_ERR(CAM_UTIL, "Insufficient optional clk entries %d %d",
  1372. index, CAM_SOC_MAX_OPT_CLK);
  1373. return -EINVAL;
  1374. }
  1375. of_property_read_string_index(of_node, "clock-names-option",
  1376. index, &(soc_info->optional_clk_name[index]));
  1377. soc_info->optional_clk[index] = cam_soc_util_option_clk_get(of_node,
  1378. index, &soc_info->optional_clk_id[index]);
  1379. if (IS_ERR(soc_info->optional_clk[index])) {
  1380. CAM_ERR(CAM_UTIL, "No clk named %s found. Dev %s", clk_name,
  1381. soc_info->dev_name);
  1382. *clk_index = -1;
  1383. return -EFAULT;
  1384. }
  1385. *clk_index = index;
  1386. rc = of_property_read_u32_index(of_node, "clock-rates-option",
  1387. index, &soc_info->optional_clk_rate[index]);
  1388. if (rc) {
  1389. CAM_ERR(CAM_UTIL,
  1390. "Error reading clock-rates clk_name %s index %d",
  1391. clk_name, index);
  1392. goto error;
  1393. }
  1394. /*
  1395. * Option clocks are assumed to be available to single Device here.
  1396. * Hence use INIT_RATE instead of NO_SET_RATE.
  1397. */
  1398. soc_info->optional_clk_rate[index] =
  1399. (soc_info->optional_clk_rate[index] == 0) ?
  1400. (int32_t)INIT_RATE : soc_info->optional_clk_rate[index];
  1401. CAM_DBG(CAM_UTIL, "clk_name %s index %d clk_rate %d",
  1402. clk_name, *clk_index, soc_info->optional_clk_rate[index]);
  1403. rc = of_property_read_u32_index(of_node, "shared-clks-option",
  1404. index, &shared_clk_val);
  1405. if (rc) {
  1406. CAM_DBG(CAM_UTIL, "Not shared clk %s index %d",
  1407. clk_name, index);
  1408. } else if (shared_clk_val > 1) {
  1409. CAM_WARN(CAM_UTIL, "Invalid shared clk val %d", shared_clk_val);
  1410. } else {
  1411. CAM_DBG(CAM_UTIL,
  1412. "Dev %s shared clk %s index %d, clk id %d, shared_clk_val %d",
  1413. soc_info->dev_name, clk_name, index,
  1414. soc_info->optional_clk_id[index], shared_clk_val);
  1415. if (shared_clk_val) {
  1416. CAM_SET_BIT(soc_info->optional_shared_clk_mask, index);
  1417. /* Create a wrapper entry if this is a shared clock */
  1418. CAM_DBG(CAM_UTIL,
  1419. "Dev %s, clk %s, id %d register wrapper entry for shared clk",
  1420. soc_info->dev_name,
  1421. soc_info->optional_clk_name[index],
  1422. soc_info->optional_clk_id[index]);
  1423. rc = cam_soc_util_clk_wrapper_register_entry(
  1424. soc_info->optional_clk_id[index],
  1425. soc_info->optional_clk[index], false,
  1426. soc_info,
  1427. soc_info->optional_clk_rate[index],
  1428. soc_info->optional_clk_name[index]);
  1429. if (rc) {
  1430. CAM_ERR(CAM_UTIL,
  1431. "Failed in registering shared clk Dev %s id %d",
  1432. soc_info->dev_name,
  1433. soc_info->optional_clk_id[index]);
  1434. goto error;
  1435. }
  1436. }
  1437. }
  1438. return 0;
  1439. error:
  1440. cam_wrapper_clk_put(soc_info->optional_clk[index]);
  1441. soc_info->optional_clk_rate[index] = 0;
  1442. soc_info->optional_clk[index] = NULL;
  1443. *clk_index = -1;
  1444. return rc;
  1445. }
  1446. int cam_soc_util_clk_enable(struct cam_hw_soc_info *soc_info, int cesta_client_idx,
  1447. bool optional_clk, int32_t clk_idx, int32_t apply_level)
  1448. {
  1449. int rc = 0;
  1450. struct clk *clk;
  1451. const char *clk_name;
  1452. unsigned long clk_rate;
  1453. uint32_t shared_clk_mask;
  1454. uint32_t clk_id;
  1455. bool is_src_clk = false;
  1456. if (!soc_info || (clk_idx < 0) || (apply_level >= CAM_MAX_VOTE)) {
  1457. CAM_ERR(CAM_UTIL, "Invalid param %d %d", clk_idx, apply_level);
  1458. return -EINVAL;
  1459. }
  1460. if (optional_clk) {
  1461. clk = soc_info->optional_clk[clk_idx];
  1462. clk_name = soc_info->optional_clk_name[clk_idx];
  1463. clk_rate = (apply_level == -1) ?
  1464. 0 : soc_info->optional_clk_rate[clk_idx];
  1465. shared_clk_mask = soc_info->optional_shared_clk_mask;
  1466. clk_id = soc_info->optional_clk_id[clk_idx];
  1467. } else {
  1468. clk = soc_info->clk[clk_idx];
  1469. clk_name = soc_info->clk_name[clk_idx];
  1470. clk_rate = (apply_level == -1) ?
  1471. 0 : soc_info->clk_rate[apply_level][clk_idx];
  1472. shared_clk_mask = soc_info->shared_clk_mask;
  1473. clk_id = soc_info->clk_id[clk_idx];
  1474. if (clk_idx == soc_info->src_clk_idx)
  1475. is_src_clk = true;
  1476. }
  1477. if (!clk)
  1478. return 0;
  1479. if (is_src_clk && soc_info->is_clk_drv_en && CAM_IS_VALID_CESTA_IDX(cesta_client_idx)) {
  1480. rc = cam_soc_util_set_cesta_clk_rate(soc_info, cesta_client_idx, clk_rate, clk_rate,
  1481. &soc_info->applied_src_clk_rates.hw_client[cesta_client_idx].high,
  1482. &soc_info->applied_src_clk_rates.hw_client[cesta_client_idx].low);
  1483. if (rc) {
  1484. CAM_ERR(CAM_UTIL,
  1485. "[%s] Failed in setting cesta clk rates[high low]:[%ld %ld] client_idx:%d rc:%d",
  1486. soc_info->dev_name, clk_rate, clk_rate, cesta_client_idx, rc);
  1487. return rc;
  1488. }
  1489. rc = cam_soc_util_cesta_channel_switch(cesta_client_idx, soc_info->dev_name);
  1490. if (rc) {
  1491. CAM_ERR(CAM_UTIL,
  1492. "[%s] Failed to apply power states for cesta client:%d rc:%d",
  1493. soc_info->dev_name, cesta_client_idx, rc);
  1494. return rc;
  1495. }
  1496. } else {
  1497. rc = cam_soc_util_set_clk_rate(soc_info, clk, clk_name, clk_rate,
  1498. CAM_IS_BIT_SET(shared_clk_mask, clk_idx), is_src_clk, clk_id,
  1499. &soc_info->applied_src_clk_rates.sw_client);
  1500. if (rc) {
  1501. CAM_ERR(CAM_UTIL, "[%s] Failed in setting clk rate %ld rc:%d",
  1502. soc_info->dev_name, clk_rate, rc);
  1503. return rc;
  1504. }
  1505. }
  1506. CAM_DBG(CAM_UTIL, "[%s] : clk enable %s", soc_info->dev_name, clk_name);
  1507. rc = cam_wrapper_clk_prepare_enable(clk);
  1508. if (rc) {
  1509. CAM_ERR(CAM_UTIL, "enable failed for %s: rc(%d)", clk_name, rc);
  1510. return rc;
  1511. }
  1512. return rc;
  1513. }
  1514. int cam_soc_util_clk_disable(struct cam_hw_soc_info *soc_info, int cesta_client_idx,
  1515. bool optional_clk, int32_t clk_idx)
  1516. {
  1517. int rc = 0;
  1518. struct clk *clk;
  1519. const char *clk_name;
  1520. uint32_t shared_clk_mask;
  1521. uint32_t clk_id;
  1522. if (!soc_info || (clk_idx < 0)) {
  1523. CAM_ERR(CAM_UTIL, "Invalid param %d", clk_idx);
  1524. return -EINVAL;
  1525. }
  1526. if (optional_clk) {
  1527. clk = soc_info->optional_clk[clk_idx];
  1528. clk_name = soc_info->optional_clk_name[clk_idx];
  1529. shared_clk_mask = soc_info->optional_shared_clk_mask;
  1530. clk_id = soc_info->optional_clk_id[clk_idx];
  1531. } else {
  1532. clk = soc_info->clk[clk_idx];
  1533. clk_name = soc_info->clk_name[clk_idx];
  1534. shared_clk_mask = soc_info->shared_clk_mask;
  1535. clk_id = soc_info->clk_id[clk_idx];
  1536. }
  1537. CAM_DBG(CAM_UTIL, "disable %s", clk_name);
  1538. if (!clk)
  1539. return 0;
  1540. cam_wrapper_clk_disable_unprepare(clk);
  1541. if ((clk_idx == soc_info->src_clk_idx) && soc_info->is_clk_drv_en &&
  1542. CAM_IS_VALID_CESTA_IDX(cesta_client_idx)) {
  1543. rc = cam_soc_util_set_cesta_clk_rate(soc_info, cesta_client_idx, 0, 0,
  1544. &soc_info->applied_src_clk_rates.hw_client[cesta_client_idx].high,
  1545. &soc_info->applied_src_clk_rates.hw_client[cesta_client_idx].low);
  1546. if (rc) {
  1547. CAM_ERR(CAM_UTIL,
  1548. "Failed in setting cesta clk rates[high low]:[0 0] client_idx:%d rc:%d",
  1549. cesta_client_idx, rc);
  1550. return rc;
  1551. }
  1552. rc = cam_soc_util_cesta_channel_switch(cesta_client_idx, soc_info->dev_name);
  1553. if (rc) {
  1554. CAM_ERR(CAM_CSIPHY,
  1555. "Failed to apply power states for cesta_client_idx:%d rc:%d",
  1556. cesta_client_idx, rc);
  1557. return rc;
  1558. }
  1559. } else {
  1560. if (CAM_IS_BIT_SET(shared_clk_mask, clk_idx)) {
  1561. CAM_DBG(CAM_UTIL,
  1562. "Dev %s clk %s Disabling Shared clk, set 0 rate",
  1563. soc_info->dev_name, clk_name);
  1564. cam_soc_util_clk_wrapper_set_clk_rate(clk_id, soc_info, clk, 0);
  1565. } else if (soc_info->mmrm_handle && (!skip_mmrm_set_rate) &&
  1566. (soc_info->src_clk_idx == clk_idx)) {
  1567. CAM_DBG(CAM_UTIL, "Dev %s Disabling %s clk, set 0 rate",
  1568. soc_info->dev_name, clk_name);
  1569. cam_soc_util_set_sw_client_rate_through_mmrm(
  1570. soc_info->mmrm_handle,
  1571. soc_info->is_nrt_dev,
  1572. 0, 0, 1);
  1573. }
  1574. }
  1575. return 0;
  1576. }
  1577. /**
  1578. * cam_soc_util_clk_enable_default()
  1579. *
  1580. * @brief: This function enables the default clocks present
  1581. * in soc_info
  1582. *
  1583. * @soc_info: Device soc struct to be populated
  1584. * @cesta_client_idx: CESTA Client idx for hw client based src clocks
  1585. * @clk_level: Clk level to apply while enabling
  1586. *
  1587. * @return: success or failure
  1588. */
  1589. int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info,
  1590. int cesta_client_idx, enum cam_vote_level clk_level)
  1591. {
  1592. int i, rc = 0;
  1593. enum cam_vote_level apply_level;
  1594. if ((soc_info->num_clk == 0) ||
  1595. (soc_info->num_clk >= CAM_SOC_MAX_CLK)) {
  1596. CAM_ERR(CAM_UTIL, "Invalid number of clock %d",
  1597. soc_info->num_clk);
  1598. return -EINVAL;
  1599. }
  1600. rc = cam_soc_util_get_clk_level_to_apply(soc_info, clk_level,
  1601. &apply_level);
  1602. if (rc) {
  1603. CAM_ERR(CAM_UTIL, "[%s] : failed to get level clk_level=%d, rc=%d",
  1604. soc_info->dev_name, clk_level, rc);
  1605. return rc;
  1606. }
  1607. if (soc_info->cam_cx_ipeak_enable)
  1608. cam_cx_ipeak_update_vote_cx_ipeak(soc_info, apply_level);
  1609. CAM_DBG(CAM_UTIL, "Dev[%s] : cesta client %d, request level %s, apply level %s",
  1610. soc_info->dev_name, cesta_client_idx,
  1611. cam_soc_util_get_string_from_level(clk_level),
  1612. cam_soc_util_get_string_from_level(apply_level));
  1613. memset(&soc_info->applied_src_clk_rates, 0, sizeof(struct cam_soc_util_clk_rates));
  1614. for (i = 0; i < soc_info->num_clk; i++) {
  1615. rc = cam_soc_util_clk_enable(soc_info, cesta_client_idx, false, i, apply_level);
  1616. if (rc) {
  1617. CAM_ERR(CAM_UTIL,
  1618. "[%s] : failed to enable clk apply_level=%d, rc=%d, cesta_client_idx=%d",
  1619. soc_info->dev_name, apply_level, rc, cesta_client_idx);
  1620. goto clk_disable;
  1621. }
  1622. if (soc_info->cam_cx_ipeak_enable)
  1623. CAM_DBG(CAM_UTIL,
  1624. "dev name = %s clk name = %s idx = %d apply_level = %d clc idx = %d",
  1625. soc_info->dev_name, soc_info->clk_name[i], i, apply_level, i);
  1626. }
  1627. return rc;
  1628. clk_disable:
  1629. if (soc_info->cam_cx_ipeak_enable)
  1630. cam_cx_ipeak_update_vote_cx_ipeak(soc_info, 0);
  1631. for (i--; i >= 0; i--) {
  1632. cam_soc_util_clk_disable(soc_info, cesta_client_idx, false, i);
  1633. }
  1634. return rc;
  1635. }
  1636. /**
  1637. * cam_soc_util_clk_disable_default()
  1638. *
  1639. * @brief: This function disables the default clocks present
  1640. * in soc_info
  1641. *
  1642. * @soc_info: device soc struct to be populated
  1643. * @cesta_client_idx: CESTA Client idx for hw client based src clocks
  1644. *
  1645. * @return: success or failure
  1646. */
  1647. void cam_soc_util_clk_disable_default(struct cam_hw_soc_info *soc_info,
  1648. int cesta_client_idx)
  1649. {
  1650. int i;
  1651. if (soc_info->num_clk == 0)
  1652. return;
  1653. if (soc_info->cam_cx_ipeak_enable)
  1654. cam_cx_ipeak_unvote_cx_ipeak(soc_info);
  1655. for (i = soc_info->num_clk - 1; i >= 0; i--)
  1656. cam_soc_util_clk_disable(soc_info, cesta_client_idx, false, i);
  1657. }
  1658. /**
  1659. * cam_soc_util_get_dt_clk_info()
  1660. *
  1661. * @brief: Parse the DT and populate the Clock properties
  1662. *
  1663. * @soc_info: device soc struct to be populated
  1664. * @src_clk_str name of src clock that has rate control
  1665. *
  1666. * @return: success or failure
  1667. */
  1668. static int cam_soc_util_get_dt_clk_info(struct cam_hw_soc_info *soc_info)
  1669. {
  1670. struct device_node *of_node = NULL;
  1671. int count;
  1672. int num_clk_rates, num_clk_levels;
  1673. int i, j, rc;
  1674. int32_t num_clk_level_strings;
  1675. const char *src_clk_str = NULL;
  1676. const char *scl_clk_str = NULL;
  1677. const char *clk_control_debugfs = NULL;
  1678. const char *clk_cntl_lvl_string = NULL;
  1679. enum cam_vote_level level;
  1680. int shared_clk_cnt;
  1681. struct of_phandle_args clk_args = {0};
  1682. if (!soc_info || !soc_info->dev)
  1683. return -EINVAL;
  1684. of_node = soc_info->dev->of_node;
  1685. if (!of_property_read_bool(of_node, "use-shared-clk")) {
  1686. CAM_DBG(CAM_UTIL, "No shared clk parameter defined");
  1687. soc_info->use_shared_clk = false;
  1688. } else {
  1689. soc_info->use_shared_clk = true;
  1690. }
  1691. count = of_property_count_strings(of_node, "clock-names");
  1692. CAM_DBG(CAM_UTIL, "E: dev_name = %s count = %d",
  1693. soc_info->dev_name, count);
  1694. if (count > CAM_SOC_MAX_CLK) {
  1695. CAM_ERR(CAM_UTIL, "invalid count of clocks, count=%d", count);
  1696. rc = -EINVAL;
  1697. return rc;
  1698. }
  1699. if (count <= 0) {
  1700. CAM_DBG(CAM_UTIL, "No clock-names found");
  1701. count = 0;
  1702. soc_info->num_clk = count;
  1703. return 0;
  1704. }
  1705. soc_info->num_clk = count;
  1706. for (i = 0; i < count; i++) {
  1707. rc = of_property_read_string_index(of_node, "clock-names",
  1708. i, &(soc_info->clk_name[i]));
  1709. CAM_DBG(CAM_UTIL, "clock-names[%d] = %s",
  1710. i, soc_info->clk_name[i]);
  1711. if (rc) {
  1712. CAM_ERR(CAM_UTIL,
  1713. "i= %d count= %d reading clock-names failed",
  1714. i, count);
  1715. return rc;
  1716. }
  1717. }
  1718. num_clk_rates = of_property_count_u32_elems(of_node, "clock-rates");
  1719. if (num_clk_rates <= 0) {
  1720. CAM_ERR(CAM_UTIL, "reading clock-rates count failed");
  1721. return -EINVAL;
  1722. }
  1723. if ((num_clk_rates % soc_info->num_clk) != 0) {
  1724. CAM_ERR(CAM_UTIL,
  1725. "mismatch clk/rates, No of clocks=%d, No of rates=%d",
  1726. soc_info->num_clk, num_clk_rates);
  1727. return -EINVAL;
  1728. }
  1729. num_clk_levels = (num_clk_rates / soc_info->num_clk);
  1730. num_clk_level_strings = of_property_count_strings(of_node,
  1731. "clock-cntl-level");
  1732. if (num_clk_level_strings != num_clk_levels) {
  1733. CAM_ERR(CAM_UTIL,
  1734. "Mismatch No of levels=%d, No of level string=%d",
  1735. num_clk_levels, num_clk_level_strings);
  1736. return -EINVAL;
  1737. }
  1738. soc_info->lowest_clk_level = CAM_TURBO_VOTE;
  1739. for (i = 0; i < num_clk_levels; i++) {
  1740. rc = of_property_read_string_index(of_node,
  1741. "clock-cntl-level", i, &clk_cntl_lvl_string);
  1742. if (rc) {
  1743. CAM_ERR(CAM_UTIL,
  1744. "Error reading clock-cntl-level, rc=%d", rc);
  1745. return rc;
  1746. }
  1747. rc = cam_soc_util_get_level_from_string(clk_cntl_lvl_string,
  1748. &level);
  1749. if (rc)
  1750. return rc;
  1751. CAM_DBG(CAM_UTIL,
  1752. "[%d] : %s %d", i, clk_cntl_lvl_string, level);
  1753. soc_info->clk_level_valid[level] = true;
  1754. for (j = 0; j < soc_info->num_clk; j++) {
  1755. rc = of_property_read_u32_index(of_node, "clock-rates",
  1756. ((i * soc_info->num_clk) + j),
  1757. &soc_info->clk_rate[level][j]);
  1758. if (rc) {
  1759. CAM_ERR(CAM_UTIL,
  1760. "Error reading clock-rates, rc=%d",
  1761. rc);
  1762. return rc;
  1763. }
  1764. soc_info->clk_rate[level][j] =
  1765. (soc_info->clk_rate[level][j] == 0) ?
  1766. (int32_t)NO_SET_RATE :
  1767. soc_info->clk_rate[level][j];
  1768. CAM_DBG(CAM_UTIL, "soc_info->clk_rate[%d][%d] = %d",
  1769. level, j,
  1770. soc_info->clk_rate[level][j]);
  1771. }
  1772. if ((level > CAM_MINSVS_VOTE) &&
  1773. (level < soc_info->lowest_clk_level))
  1774. soc_info->lowest_clk_level = level;
  1775. }
  1776. soc_info->src_clk_idx = -1;
  1777. rc = of_property_read_string_index(of_node, "src-clock-name", 0,
  1778. &src_clk_str);
  1779. if (rc || !src_clk_str) {
  1780. CAM_DBG(CAM_UTIL, "No src_clk_str found");
  1781. rc = 0;
  1782. goto end;
  1783. }
  1784. for (i = 0; i < soc_info->num_clk; i++) {
  1785. if (strcmp(soc_info->clk_name[i], src_clk_str) == 0) {
  1786. soc_info->src_clk_idx = i;
  1787. CAM_DBG(CAM_UTIL, "src clock = %s, index = %d",
  1788. src_clk_str, i);
  1789. }
  1790. rc = of_parse_phandle_with_args(of_node, "clocks",
  1791. "#clock-cells", i, &clk_args);
  1792. if (rc) {
  1793. CAM_ERR(CAM_CPAS,
  1794. "failed to clock info rc=%d", rc);
  1795. rc = -EINVAL;
  1796. goto end;
  1797. }
  1798. soc_info->clk_id[i] = clk_args.args[0];
  1799. of_node_put(clk_args.np);
  1800. CAM_DBG(CAM_UTIL, "Dev %s clk %s id %d",
  1801. soc_info->dev_name, soc_info->clk_name[i],
  1802. soc_info->clk_id[i]);
  1803. }
  1804. CAM_DBG(CAM_UTIL, "Dev %s src_clk_idx %d, lowest_clk_level %d",
  1805. soc_info->dev_name, soc_info->src_clk_idx,
  1806. soc_info->lowest_clk_level);
  1807. soc_info->shared_clk_mask = 0;
  1808. shared_clk_cnt = of_property_count_u32_elems(of_node, "shared-clks");
  1809. if (shared_clk_cnt <= 0) {
  1810. CAM_DBG(CAM_UTIL, "Dev %s, no shared clks", soc_info->dev_name);
  1811. } else if (shared_clk_cnt != count) {
  1812. CAM_ERR(CAM_UTIL, "Dev %s, incorrect shared clock count %d %d",
  1813. soc_info->dev_name, shared_clk_cnt, count);
  1814. rc = -EINVAL;
  1815. goto end;
  1816. } else {
  1817. uint32_t shared_clk_val;
  1818. for (i = 0; i < shared_clk_cnt; i++) {
  1819. rc = of_property_read_u32_index(of_node,
  1820. "shared-clks", i, &shared_clk_val);
  1821. if (rc || (shared_clk_val > 1)) {
  1822. CAM_ERR(CAM_UTIL,
  1823. "Incorrect shared clk info at %d, val=%d, count=%d",
  1824. i, shared_clk_val, shared_clk_cnt);
  1825. rc = -EINVAL;
  1826. goto end;
  1827. }
  1828. if (shared_clk_val)
  1829. CAM_SET_BIT(soc_info->shared_clk_mask, i);
  1830. }
  1831. CAM_DBG(CAM_UTIL, "Dev %s shared clk mask 0x%x",
  1832. soc_info->dev_name, soc_info->shared_clk_mask);
  1833. }
  1834. /* scalable clk info parsing */
  1835. soc_info->scl_clk_count = 0;
  1836. soc_info->scl_clk_count = of_property_count_strings(of_node,
  1837. "scl-clk-names");
  1838. if ((soc_info->scl_clk_count <= 0) ||
  1839. (soc_info->scl_clk_count > CAM_SOC_MAX_CLK)) {
  1840. if (soc_info->scl_clk_count == -EINVAL) {
  1841. CAM_DBG(CAM_UTIL, "scl_clk_name prop not avialable");
  1842. } else if ((soc_info->scl_clk_count == -ENODATA) ||
  1843. (soc_info->scl_clk_count > CAM_SOC_MAX_CLK)) {
  1844. CAM_ERR(CAM_UTIL, "Invalid scl_clk_count: %d",
  1845. soc_info->scl_clk_count);
  1846. return -EINVAL;
  1847. }
  1848. CAM_DBG(CAM_UTIL, "Invalid scl_clk count: %d",
  1849. soc_info->scl_clk_count);
  1850. soc_info->scl_clk_count = -1;
  1851. } else {
  1852. CAM_DBG(CAM_UTIL, "No of scalable clocks: %d",
  1853. soc_info->scl_clk_count);
  1854. for (i = 0; i < soc_info->scl_clk_count; i++) {
  1855. rc = of_property_read_string_index(of_node,
  1856. "scl-clk-names", i,
  1857. (const char **)&scl_clk_str);
  1858. if (rc || !scl_clk_str) {
  1859. CAM_WARN(CAM_UTIL, "scl_clk_str is NULL");
  1860. soc_info->scl_clk_idx[i] = -1;
  1861. continue;
  1862. }
  1863. for (j = 0; j < soc_info->num_clk; j++) {
  1864. if (strnstr(scl_clk_str, soc_info->clk_name[j],
  1865. strlen(scl_clk_str))) {
  1866. soc_info->scl_clk_idx[i] = j;
  1867. CAM_DBG(CAM_UTIL,
  1868. "scl clock = %s, index = %d",
  1869. scl_clk_str, j);
  1870. break;
  1871. }
  1872. }
  1873. }
  1874. }
  1875. rc = of_property_read_string_index(of_node,
  1876. "clock-control-debugfs", 0, &clk_control_debugfs);
  1877. if (rc || !clk_control_debugfs) {
  1878. CAM_DBG(CAM_UTIL, "No clock_control_debugfs property found");
  1879. rc = 0;
  1880. goto end;
  1881. }
  1882. if (strcmp("true", clk_control_debugfs) == 0)
  1883. soc_info->clk_control_enable = true;
  1884. CAM_DBG(CAM_UTIL, "X: dev_name = %s count = %d",
  1885. soc_info->dev_name, count);
  1886. end:
  1887. return rc;
  1888. }
  1889. int cam_soc_util_set_clk_rate_level(struct cam_hw_soc_info *soc_info,
  1890. int cesta_client_idx, enum cam_vote_level clk_level_high,
  1891. enum cam_vote_level clk_level_low, bool do_not_set_src_clk)
  1892. {
  1893. int i, rc = 0;
  1894. enum cam_vote_level apply_level_high;
  1895. enum cam_vote_level apply_level_low = soc_info->lowest_clk_level;
  1896. unsigned long applied_clk_rate;
  1897. if ((soc_info->num_clk == 0) ||
  1898. (soc_info->num_clk >= CAM_SOC_MAX_CLK)) {
  1899. CAM_ERR(CAM_UTIL, "Invalid number of clock %d", soc_info->num_clk);
  1900. return -EINVAL;
  1901. }
  1902. rc = cam_soc_util_get_clk_level_to_apply(soc_info, clk_level_high,
  1903. &apply_level_high);
  1904. if (rc) {
  1905. CAM_ERR(CAM_UTIL, "[%s] : failed to get level clk_level_high=%d, rc=%d",
  1906. soc_info->dev_name, clk_level_high, rc);
  1907. return rc;
  1908. }
  1909. if (soc_info->is_clk_drv_en && CAM_IS_VALID_CESTA_IDX(cesta_client_idx)) {
  1910. rc = cam_soc_util_get_clk_level_to_apply(soc_info, clk_level_low,
  1911. &apply_level_low);
  1912. if (rc) {
  1913. CAM_ERR(CAM_UTIL, "[%s] : failed to get level clk_level_low=%d, rc=%d",
  1914. soc_info->dev_name, clk_level_low, rc);
  1915. return rc;
  1916. }
  1917. }
  1918. if (soc_info->cam_cx_ipeak_enable)
  1919. cam_cx_ipeak_update_vote_cx_ipeak(soc_info, apply_level_high);
  1920. for (i = 0; i < soc_info->num_clk; i++) {
  1921. if (do_not_set_src_clk && (i == soc_info->src_clk_idx)) {
  1922. CAM_DBG(CAM_UTIL, "Skipping set rate for src clk %s",
  1923. soc_info->clk_name[i]);
  1924. continue;
  1925. }
  1926. if (soc_info->is_clk_drv_en && CAM_IS_VALID_CESTA_IDX(cesta_client_idx) &&
  1927. (i == soc_info->src_clk_idx)) {
  1928. rc = cam_soc_util_set_cesta_clk_rate(soc_info, cesta_client_idx,
  1929. soc_info->clk_rate[apply_level_high][i],
  1930. soc_info->clk_rate[apply_level_low][i],
  1931. &soc_info->applied_src_clk_rates.hw_client[cesta_client_idx].high,
  1932. &soc_info->applied_src_clk_rates.hw_client[cesta_client_idx].low);
  1933. if (rc) {
  1934. CAM_ERR(CAM_UTIL,
  1935. "Failed to set the req clk level[high low]: [%s %s] cesta_client_idx: %d",
  1936. cam_soc_util_get_string_from_level(apply_level_high),
  1937. cam_soc_util_get_string_from_level(apply_level_low),
  1938. cesta_client_idx);
  1939. break;
  1940. }
  1941. continue;
  1942. }
  1943. CAM_DBG(CAM_UTIL, "Set rate for clk %s rate %d", soc_info->clk_name[i],
  1944. soc_info->clk_rate[apply_level_high][i]);
  1945. rc = cam_soc_util_set_clk_rate(soc_info, soc_info->clk[i],
  1946. soc_info->clk_name[i],
  1947. soc_info->clk_rate[apply_level_high][i],
  1948. CAM_IS_BIT_SET(soc_info->shared_clk_mask, i),
  1949. (i == soc_info->src_clk_idx) ? true : false,
  1950. soc_info->clk_id[i],
  1951. &applied_clk_rate);
  1952. if (rc < 0) {
  1953. CAM_DBG(CAM_UTIL,
  1954. "dev name = %s clk_name = %s idx = %d apply_level = %s",
  1955. soc_info->dev_name, soc_info->clk_name[i],
  1956. i, cam_soc_util_get_string_from_level(apply_level_high));
  1957. if (soc_info->cam_cx_ipeak_enable)
  1958. cam_cx_ipeak_update_vote_cx_ipeak(soc_info, 0);
  1959. break;
  1960. }
  1961. if (i == soc_info->src_clk_idx)
  1962. soc_info->applied_src_clk_rates.sw_client = applied_clk_rate;
  1963. }
  1964. return rc;
  1965. };
  1966. static int cam_soc_util_get_dt_gpio_req_tbl(struct device_node *of_node,
  1967. struct cam_soc_gpio_data *gconf, uint16_t *gpio_array,
  1968. uint16_t gpio_array_size)
  1969. {
  1970. int32_t rc = 0, i = 0;
  1971. uint32_t count = 0;
  1972. uint32_t *val_array = NULL;
  1973. if (!of_get_property(of_node, "gpio-req-tbl-num", &count))
  1974. return 0;
  1975. count /= sizeof(uint32_t);
  1976. if (!count) {
  1977. CAM_ERR(CAM_UTIL, "gpio-req-tbl-num 0");
  1978. return 0;
  1979. }
  1980. val_array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
  1981. if (!val_array)
  1982. return -ENOMEM;
  1983. gconf->cam_gpio_req_tbl = kcalloc(count, sizeof(struct gpio),
  1984. GFP_KERNEL);
  1985. if (!gconf->cam_gpio_req_tbl) {
  1986. rc = -ENOMEM;
  1987. goto free_val_array;
  1988. }
  1989. gconf->cam_gpio_req_tbl_size = count;
  1990. rc = of_property_read_u32_array(of_node, "gpio-req-tbl-num",
  1991. val_array, count);
  1992. if (rc) {
  1993. CAM_ERR(CAM_UTIL, "failed in reading gpio-req-tbl-num, rc = %d",
  1994. rc);
  1995. goto free_gpio_req_tbl;
  1996. }
  1997. for (i = 0; i < count; i++) {
  1998. if (val_array[i] >= gpio_array_size) {
  1999. CAM_ERR(CAM_UTIL, "gpio req tbl index %d invalid",
  2000. val_array[i]);
  2001. goto free_gpio_req_tbl;
  2002. }
  2003. gconf->cam_gpio_req_tbl[i].gpio = gpio_array[val_array[i]];
  2004. CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].gpio = %d", i,
  2005. gconf->cam_gpio_req_tbl[i].gpio);
  2006. }
  2007. rc = of_property_read_u32_array(of_node, "gpio-req-tbl-flags",
  2008. val_array, count);
  2009. if (rc) {
  2010. CAM_ERR(CAM_UTIL, "Failed in gpio-req-tbl-flags, rc %d", rc);
  2011. goto free_gpio_req_tbl;
  2012. }
  2013. for (i = 0; i < count; i++) {
  2014. gconf->cam_gpio_req_tbl[i].flags = val_array[i];
  2015. CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].flags = %ld", i,
  2016. gconf->cam_gpio_req_tbl[i].flags);
  2017. }
  2018. for (i = 0; i < count; i++) {
  2019. rc = of_property_read_string_index(of_node,
  2020. "gpio-req-tbl-label", i,
  2021. &gconf->cam_gpio_req_tbl[i].label);
  2022. if (rc) {
  2023. CAM_ERR(CAM_UTIL, "Failed rc %d", rc);
  2024. goto free_gpio_req_tbl;
  2025. }
  2026. CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].label = %s", i,
  2027. gconf->cam_gpio_req_tbl[i].label);
  2028. }
  2029. kfree(val_array);
  2030. return rc;
  2031. free_gpio_req_tbl:
  2032. kfree(gconf->cam_gpio_req_tbl);
  2033. free_val_array:
  2034. kfree(val_array);
  2035. gconf->cam_gpio_req_tbl_size = 0;
  2036. return rc;
  2037. }
  2038. static int cam_soc_util_get_gpio_info(struct cam_hw_soc_info *soc_info)
  2039. {
  2040. int32_t rc = 0, i = 0;
  2041. uint16_t *gpio_array = NULL;
  2042. int16_t gpio_array_size = 0;
  2043. struct cam_soc_gpio_data *gconf = NULL;
  2044. struct device_node *of_node = NULL;
  2045. if (!soc_info || !soc_info->dev)
  2046. return -EINVAL;
  2047. of_node = soc_info->dev->of_node;
  2048. /* Validate input parameters */
  2049. if (!of_node) {
  2050. CAM_ERR(CAM_UTIL, "Invalid param of_node");
  2051. return -EINVAL;
  2052. }
  2053. gpio_array_size = of_gpio_count(of_node);
  2054. if (gpio_array_size <= 0)
  2055. return 0;
  2056. CAM_DBG(CAM_UTIL, "gpio count %d", gpio_array_size);
  2057. gpio_array = kcalloc(gpio_array_size, sizeof(uint16_t), GFP_KERNEL);
  2058. if (!gpio_array) {
  2059. rc = -ENOMEM;
  2060. goto err;
  2061. }
  2062. for (i = 0; i < gpio_array_size; i++) {
  2063. gpio_array[i] = of_get_gpio(of_node, i);
  2064. CAM_DBG(CAM_UTIL, "gpio_array[%d] = %d", i, gpio_array[i]);
  2065. }
  2066. gconf = kzalloc(sizeof(*gconf), GFP_KERNEL);
  2067. if (!gconf) {
  2068. rc = -ENOMEM;
  2069. goto free_gpio_array;
  2070. }
  2071. rc = cam_soc_util_get_dt_gpio_req_tbl(of_node, gconf, gpio_array,
  2072. gpio_array_size);
  2073. if (rc) {
  2074. CAM_ERR(CAM_UTIL, "failed in msm_camera_get_dt_gpio_req_tbl");
  2075. goto free_gpio_conf;
  2076. }
  2077. gconf->cam_gpio_common_tbl = kcalloc(gpio_array_size,
  2078. sizeof(struct gpio), GFP_KERNEL);
  2079. if (!gconf->cam_gpio_common_tbl) {
  2080. rc = -ENOMEM;
  2081. goto free_gpio_conf;
  2082. }
  2083. for (i = 0; i < gpio_array_size; i++)
  2084. gconf->cam_gpio_common_tbl[i].gpio = gpio_array[i];
  2085. gconf->cam_gpio_common_tbl_size = gpio_array_size;
  2086. soc_info->gpio_data = gconf;
  2087. kfree(gpio_array);
  2088. return rc;
  2089. free_gpio_conf:
  2090. kfree(gconf);
  2091. free_gpio_array:
  2092. kfree(gpio_array);
  2093. err:
  2094. soc_info->gpio_data = NULL;
  2095. return rc;
  2096. }
  2097. static int cam_soc_util_request_gpio_table(
  2098. struct cam_hw_soc_info *soc_info, bool gpio_en)
  2099. {
  2100. int rc = 0, i = 0;
  2101. uint8_t size = 0;
  2102. struct cam_soc_gpio_data *gpio_conf =
  2103. soc_info->gpio_data;
  2104. struct gpio *gpio_tbl = NULL;
  2105. if (!gpio_conf) {
  2106. CAM_DBG(CAM_UTIL, "No GPIO entry");
  2107. return 0;
  2108. }
  2109. if (gpio_conf->cam_gpio_common_tbl_size <= 0) {
  2110. CAM_ERR(CAM_UTIL, "GPIO table size is invalid");
  2111. return -EINVAL;
  2112. }
  2113. size = gpio_conf->cam_gpio_req_tbl_size;
  2114. gpio_tbl = gpio_conf->cam_gpio_req_tbl;
  2115. if (!gpio_tbl || !size) {
  2116. CAM_ERR(CAM_UTIL, "Invalid gpio_tbl %pK / size %d",
  2117. gpio_tbl, size);
  2118. return -EINVAL;
  2119. }
  2120. for (i = 0; i < size; i++) {
  2121. CAM_DBG(CAM_UTIL, "i=%d, gpio=%d dir=%ld", i,
  2122. gpio_tbl[i].gpio, gpio_tbl[i].flags);
  2123. }
  2124. if (gpio_en) {
  2125. for (i = 0; i < size; i++) {
  2126. rc = gpio_request_one(gpio_tbl[i].gpio,
  2127. gpio_tbl[i].flags, gpio_tbl[i].label);
  2128. if (rc) {
  2129. /*
  2130. * After GPIO request fails, contine to
  2131. * apply new gpios, outout a error message
  2132. * for driver bringup debug
  2133. */
  2134. CAM_ERR(CAM_UTIL, "gpio %d:%s request fails",
  2135. gpio_tbl[i].gpio, gpio_tbl[i].label);
  2136. }
  2137. }
  2138. } else {
  2139. gpio_free_array(gpio_tbl, size);
  2140. }
  2141. return rc;
  2142. }
  2143. static int cam_soc_util_get_dt_regulator_info
  2144. (struct cam_hw_soc_info *soc_info)
  2145. {
  2146. int rc = 0, count = 0, i = 0;
  2147. struct device_node *of_node = NULL;
  2148. if (!soc_info || !soc_info->dev) {
  2149. CAM_ERR(CAM_UTIL, "Invalid parameters");
  2150. return -EINVAL;
  2151. }
  2152. of_node = soc_info->dev->of_node;
  2153. soc_info->num_rgltr = 0;
  2154. count = of_property_count_strings(of_node, "regulator-names");
  2155. if (count != -EINVAL) {
  2156. if (count <= 0) {
  2157. CAM_ERR(CAM_UTIL, "no regulators found");
  2158. return -EINVAL;
  2159. }
  2160. soc_info->num_rgltr = count;
  2161. } else {
  2162. CAM_DBG(CAM_UTIL, "No regulators node found");
  2163. return 0;
  2164. }
  2165. if (soc_info->num_rgltr > CAM_SOC_MAX_REGULATOR) {
  2166. CAM_ERR(CAM_UTIL, "Invalid regulator count:%d",
  2167. soc_info->num_rgltr);
  2168. return -EINVAL;
  2169. }
  2170. for (i = 0; i < soc_info->num_rgltr; i++) {
  2171. rc = of_property_read_string_index(of_node,
  2172. "regulator-names", i, &soc_info->rgltr_name[i]);
  2173. CAM_DBG(CAM_UTIL, "rgltr_name[%d] = %s",
  2174. i, soc_info->rgltr_name[i]);
  2175. if (rc) {
  2176. CAM_ERR(CAM_UTIL, "no regulator resource at cnt=%d", i);
  2177. return -ENODEV;
  2178. }
  2179. }
  2180. if (!of_property_read_bool(of_node, "rgltr-cntrl-support")) {
  2181. CAM_DBG(CAM_UTIL, "No regulator control parameter defined");
  2182. soc_info->rgltr_ctrl_support = false;
  2183. return 0;
  2184. }
  2185. soc_info->rgltr_ctrl_support = true;
  2186. rc = of_property_read_u32_array(of_node, "rgltr-min-voltage",
  2187. soc_info->rgltr_min_volt, soc_info->num_rgltr);
  2188. if (rc) {
  2189. CAM_ERR(CAM_UTIL, "No minimum volatage value found, rc=%d", rc);
  2190. return -EINVAL;
  2191. }
  2192. rc = of_property_read_u32_array(of_node, "rgltr-max-voltage",
  2193. soc_info->rgltr_max_volt, soc_info->num_rgltr);
  2194. if (rc) {
  2195. CAM_ERR(CAM_UTIL, "No maximum volatage value found, rc=%d", rc);
  2196. return -EINVAL;
  2197. }
  2198. rc = of_property_read_u32_array(of_node, "rgltr-load-current",
  2199. soc_info->rgltr_op_mode, soc_info->num_rgltr);
  2200. if (rc) {
  2201. CAM_ERR(CAM_UTIL, "No Load curent found rc=%d", rc);
  2202. return -EINVAL;
  2203. }
  2204. return rc;
  2205. }
  2206. #ifdef CONFIG_CAM_PRESIL
  2207. static uint32_t next_dummy_irq_line_num = 0x000f;
  2208. struct resource dummy_irq_line[512];
  2209. #endif
  2210. int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info)
  2211. {
  2212. struct device_node *of_node = NULL;
  2213. int count = 0, i = 0, rc = 0;
  2214. if (!soc_info || !soc_info->dev)
  2215. return -EINVAL;
  2216. of_node = soc_info->dev->of_node;
  2217. rc = of_property_read_u32(of_node, "cell-index", &soc_info->index);
  2218. if (rc) {
  2219. CAM_ERR(CAM_UTIL, "device %s failed to read cell-index",
  2220. soc_info->dev_name);
  2221. return rc;
  2222. }
  2223. count = of_property_count_strings(of_node, "reg-names");
  2224. if (count <= 0) {
  2225. CAM_DBG(CAM_UTIL, "no reg-names found for: %s",
  2226. soc_info->dev_name);
  2227. count = 0;
  2228. }
  2229. soc_info->num_mem_block = count;
  2230. for (i = 0; i < soc_info->num_mem_block; i++) {
  2231. rc = of_property_read_string_index(of_node, "reg-names", i,
  2232. &soc_info->mem_block_name[i]);
  2233. if (rc) {
  2234. CAM_ERR(CAM_UTIL, "failed to read reg-names at %d", i);
  2235. return rc;
  2236. }
  2237. soc_info->mem_block[i] =
  2238. platform_get_resource_byname(soc_info->pdev,
  2239. IORESOURCE_MEM, soc_info->mem_block_name[i]);
  2240. if (!soc_info->mem_block[i]) {
  2241. CAM_ERR(CAM_UTIL, "no mem resource by name %s",
  2242. soc_info->mem_block_name[i]);
  2243. rc = -ENODEV;
  2244. return rc;
  2245. }
  2246. }
  2247. rc = of_property_read_string(of_node, "label", &soc_info->label_name);
  2248. if (rc)
  2249. CAM_DBG(CAM_UTIL, "Label is not available in the node: %d", rc);
  2250. if (soc_info->num_mem_block > 0) {
  2251. rc = of_property_read_u32_array(of_node, "reg-cam-base",
  2252. soc_info->mem_block_cam_base, soc_info->num_mem_block);
  2253. if (rc) {
  2254. CAM_ERR(CAM_UTIL, "Error reading register offsets");
  2255. return rc;
  2256. }
  2257. }
  2258. count = of_property_count_strings(of_node, "interrupt-names");
  2259. if (count <= 0) {
  2260. CAM_DBG(CAM_UTIL, "No interrupt line present for: %s", soc_info->dev_name);
  2261. soc_info->irq_count = 0;
  2262. } else {
  2263. if (count > CAM_SOC_MAX_IRQ_LINES_PER_DEV) {
  2264. CAM_ERR(CAM_UTIL,
  2265. "Number of interrupt: %d exceeds maximum allowable interrupts: %d",
  2266. count, CAM_SOC_MAX_IRQ_LINES_PER_DEV);
  2267. return -EINVAL;
  2268. }
  2269. soc_info->irq_count = count;
  2270. for (i = 0; i < soc_info->irq_count; i++) {
  2271. rc = of_property_read_string_index(of_node, "interrupt-names",
  2272. i, &soc_info->irq_name[i]);
  2273. if (rc) {
  2274. CAM_ERR(CAM_UTIL, "failed to read interrupt name at %d", i);
  2275. return rc;
  2276. }
  2277. }
  2278. rc = cam_compat_util_get_irq(soc_info);
  2279. if (rc < 0) {
  2280. CAM_ERR(CAM_UTIL, "get irq resource failed: %d for: %s",
  2281. rc, soc_info->dev_name);
  2282. #ifndef CONFIG_CAM_PRESIL
  2283. return rc;
  2284. #else
  2285. /* Pre-sil for new devices not present on old */
  2286. for (i = 0; i < soc_info->irq_count; i++) {
  2287. soc_info->irq_line[i] =
  2288. &dummy_irq_line[next_dummy_irq_line_num++];
  2289. CAM_DBG(CAM_PRESIL,
  2290. "interrupt line for dev %s irq name %s number %d",
  2291. soc_info->dev_name, soc_info->irq_name[i],
  2292. soc_info->irq_line[i]->start);
  2293. }
  2294. #endif
  2295. }
  2296. }
  2297. rc = of_property_read_string_index(of_node, "compatible", 0,
  2298. (const char **)&soc_info->compatible);
  2299. if (rc)
  2300. CAM_DBG(CAM_UTIL, "No compatible string present for: %s",
  2301. soc_info->dev_name);
  2302. soc_info->is_nrt_dev = false;
  2303. if (of_property_read_bool(of_node, "nrt-device"))
  2304. soc_info->is_nrt_dev = true;
  2305. CAM_DBG(CAM_UTIL, "Dev %s, nrt_dev %d",
  2306. soc_info->dev_name, soc_info->is_nrt_dev);
  2307. rc = cam_soc_util_get_dt_regulator_info(soc_info);
  2308. if (rc)
  2309. return rc;
  2310. rc = cam_soc_util_get_dt_clk_info(soc_info);
  2311. if (rc)
  2312. return rc;
  2313. rc = cam_soc_util_get_gpio_info(soc_info);
  2314. if (rc)
  2315. return rc;
  2316. if (of_find_property(of_node, "qcom,cam-cx-ipeak", NULL))
  2317. rc = cam_cx_ipeak_register_cx_ipeak(soc_info);
  2318. return rc;
  2319. }
  2320. /**
  2321. * cam_soc_util_get_regulator()
  2322. *
  2323. * @brief: Get regulator resource named vdd
  2324. *
  2325. * @dev: Device associated with regulator
  2326. * @reg: Return pointer to be filled with regulator on success
  2327. * @rgltr_name: Name of regulator to get
  2328. *
  2329. * @return: 0 for Success, negative value for failure
  2330. */
  2331. static int cam_soc_util_get_regulator(struct device *dev,
  2332. struct regulator **reg, const char *rgltr_name)
  2333. {
  2334. int rc = 0;
  2335. *reg = cam_wrapper_regulator_get(dev, rgltr_name);
  2336. if (IS_ERR_OR_NULL(*reg)) {
  2337. rc = PTR_ERR(*reg);
  2338. rc = rc ? rc : -EINVAL;
  2339. CAM_ERR(CAM_UTIL, "Regulator %s get failed %d", rgltr_name, rc);
  2340. *reg = NULL;
  2341. }
  2342. return rc;
  2343. }
  2344. int cam_soc_util_regulator_disable(struct regulator *rgltr,
  2345. const char *rgltr_name, uint32_t rgltr_min_volt,
  2346. uint32_t rgltr_max_volt, uint32_t rgltr_op_mode,
  2347. uint32_t rgltr_delay_ms)
  2348. {
  2349. int32_t rc = 0;
  2350. if (!rgltr) {
  2351. CAM_ERR(CAM_UTIL, "Invalid NULL parameter");
  2352. return -EINVAL;
  2353. }
  2354. rc = cam_wrapper_regulator_disable(rgltr);
  2355. if (rc) {
  2356. CAM_ERR(CAM_UTIL, "%s regulator disable failed", rgltr_name);
  2357. return rc;
  2358. }
  2359. if (rgltr_delay_ms > 20)
  2360. msleep(rgltr_delay_ms);
  2361. else if (rgltr_delay_ms)
  2362. usleep_range(rgltr_delay_ms * 1000,
  2363. (rgltr_delay_ms * 1000) + 1000);
  2364. if (cam_wrapper_regulator_count_voltages(rgltr) > 0) {
  2365. cam_wrapper_regulator_set_load(rgltr, 0);
  2366. cam_wrapper_regulator_set_voltage(rgltr, 0, rgltr_max_volt);
  2367. }
  2368. return rc;
  2369. }
  2370. int cam_soc_util_regulator_enable(struct regulator *rgltr,
  2371. const char *rgltr_name,
  2372. uint32_t rgltr_min_volt, uint32_t rgltr_max_volt,
  2373. uint32_t rgltr_op_mode, uint32_t rgltr_delay)
  2374. {
  2375. int32_t rc = 0;
  2376. if (!rgltr) {
  2377. CAM_ERR(CAM_UTIL, "Invalid NULL parameter");
  2378. return -EINVAL;
  2379. }
  2380. if (cam_wrapper_regulator_count_voltages(rgltr) > 0) {
  2381. CAM_DBG(CAM_UTIL, "[%s] voltage min=%d, max=%d",
  2382. rgltr_name, rgltr_min_volt, rgltr_max_volt);
  2383. rc = cam_wrapper_regulator_set_voltage(
  2384. rgltr, rgltr_min_volt, rgltr_max_volt);
  2385. if (rc) {
  2386. CAM_ERR(CAM_UTIL, "%s set voltage failed", rgltr_name);
  2387. return rc;
  2388. }
  2389. rc = cam_wrapper_regulator_set_load(rgltr, rgltr_op_mode);
  2390. if (rc) {
  2391. CAM_ERR(CAM_UTIL, "%s set optimum mode failed",
  2392. rgltr_name);
  2393. return rc;
  2394. }
  2395. }
  2396. rc = cam_wrapper_regulator_enable(rgltr);
  2397. if (rc) {
  2398. CAM_ERR(CAM_UTIL, "%s regulator_enable failed", rgltr_name);
  2399. return rc;
  2400. }
  2401. if (rgltr_delay > 20)
  2402. msleep(rgltr_delay);
  2403. else if (rgltr_delay)
  2404. usleep_range(rgltr_delay * 1000,
  2405. (rgltr_delay * 1000) + 1000);
  2406. return rc;
  2407. }
  2408. int cam_soc_util_select_pinctrl_state(struct cam_hw_soc_info *soc_info,
  2409. int pctrl_idx, bool active)
  2410. {
  2411. int rc = 0;
  2412. struct cam_soc_pinctrl_info *pctrl_info = &soc_info->pinctrl_info;
  2413. if (pctrl_idx >= CAM_SOC_MAX_PINCTRL_MAP) {
  2414. CAM_ERR(CAM_UTIL, "Invalid Map idx: %d max supported: %d",
  2415. pctrl_idx, CAM_SOC_MAX_PINCTRL_MAP);
  2416. return -EINVAL;
  2417. }
  2418. if (pctrl_info->pctrl_state[pctrl_idx].gpio_state_active &&
  2419. active &&
  2420. !pctrl_info->pctrl_state[pctrl_idx].is_active) {
  2421. rc = pinctrl_select_state(pctrl_info->pinctrl,
  2422. pctrl_info->pctrl_state[pctrl_idx].gpio_state_active);
  2423. if (rc)
  2424. CAM_ERR(CAM_UTIL,
  2425. "Pinctrl active state transition failed: rc: %d",
  2426. rc);
  2427. else {
  2428. pctrl_info->pctrl_state[pctrl_idx].is_active = true;
  2429. CAM_DBG(CAM_UTIL, "Pctrl_idx: %d is in active state",
  2430. pctrl_idx);
  2431. }
  2432. }
  2433. if (pctrl_info->pctrl_state[pctrl_idx].gpio_state_suspend &&
  2434. !active &&
  2435. pctrl_info->pctrl_state[pctrl_idx].is_active) {
  2436. rc = pinctrl_select_state(pctrl_info->pinctrl,
  2437. pctrl_info->pctrl_state[pctrl_idx].gpio_state_suspend);
  2438. if (rc)
  2439. CAM_ERR(CAM_UTIL,
  2440. "Pinctrl suspend state transition failed: rc: %d",
  2441. rc);
  2442. else {
  2443. pctrl_info->pctrl_state[pctrl_idx].is_active = false;
  2444. CAM_DBG(CAM_UTIL, "Pctrl_idx: %d is in suspend state",
  2445. pctrl_idx);
  2446. }
  2447. }
  2448. return rc;
  2449. }
  2450. static int cam_soc_util_request_pinctrl(
  2451. struct cam_hw_soc_info *soc_info)
  2452. {
  2453. struct cam_soc_pinctrl_info *device_pctrl = &soc_info->pinctrl_info;
  2454. struct device *dev = soc_info->dev;
  2455. struct device_node *of_node = dev->of_node;
  2456. uint32_t i = 0;
  2457. int rc = 0;
  2458. const char *name;
  2459. uint32_t idx;
  2460. char pctrl_active[50];
  2461. char pctrl_suspend[50];
  2462. int32_t num_of_map_idx = 0;
  2463. int32_t num_of_string = 0;
  2464. device_pctrl->pinctrl = devm_pinctrl_get(dev);
  2465. if (IS_ERR_OR_NULL(device_pctrl->pinctrl)) {
  2466. CAM_DBG(CAM_UTIL, "Pinctrl not available");
  2467. device_pctrl->pinctrl = NULL;
  2468. return 0;
  2469. }
  2470. num_of_map_idx = of_property_count_u32_elems(
  2471. of_node, "pctrl-idx-mapping");
  2472. if (num_of_map_idx <= 0) {
  2473. CAM_ERR(CAM_UTIL,
  2474. "Reading pctrl-idx-mapping failed");
  2475. return -EINVAL;
  2476. }
  2477. num_of_string = of_property_count_strings(
  2478. of_node, "pctrl-map-names");
  2479. if (num_of_string <= 0) {
  2480. CAM_ERR(CAM_UTIL, "no pinctrl-mapping found for: %s",
  2481. soc_info->dev_name);
  2482. device_pctrl->pinctrl = NULL;
  2483. return -EINVAL;
  2484. }
  2485. if (num_of_map_idx != num_of_string) {
  2486. CAM_ERR(CAM_UTIL,
  2487. "Incorrect inputs mapping-idx count: %d mapping-names: %d",
  2488. num_of_map_idx, num_of_string);
  2489. device_pctrl->pinctrl = NULL;
  2490. return -EINVAL;
  2491. }
  2492. if (num_of_map_idx > CAM_SOC_MAX_PINCTRL_MAP) {
  2493. CAM_ERR(CAM_UTIL, "Invalid mapping %u max supported: %d",
  2494. num_of_map_idx, CAM_SOC_MAX_PINCTRL_MAP);
  2495. return -EINVAL;
  2496. }
  2497. for (i = 0; i < num_of_map_idx; i++) {
  2498. of_property_read_u32_index(of_node,
  2499. "pctrl-idx-mapping", i, &idx);
  2500. if (idx >= CAM_SOC_MAX_PINCTRL_MAP) {
  2501. CAM_ERR(CAM_UTIL, "Invalid Index: %d max supported: %d",
  2502. idx, CAM_SOC_MAX_PINCTRL_MAP);
  2503. return -EINVAL;
  2504. }
  2505. rc = of_property_read_string_index(
  2506. of_node, "pctrl-map-names", i, &name);
  2507. if (rc) {
  2508. CAM_ERR(CAM_UTIL,
  2509. "failed to read pinctrl-mapping at %d", i);
  2510. return rc;
  2511. }
  2512. snprintf(pctrl_active, sizeof(pctrl_active),
  2513. "%s%s", name, "_active");
  2514. CAM_DBG(CAM_UTIL, "pctrl_active at index: %d name: %s",
  2515. i, pctrl_active);
  2516. snprintf(pctrl_suspend, sizeof(pctrl_suspend),
  2517. "%s%s", name, "_suspend");
  2518. CAM_DBG(CAM_UTIL, "pctrl_suspend at index: %d name: %s",
  2519. i, pctrl_suspend);
  2520. device_pctrl->pctrl_state[idx].gpio_state_active =
  2521. pinctrl_lookup_state(device_pctrl->pinctrl,
  2522. pctrl_active);
  2523. if (IS_ERR_OR_NULL(
  2524. device_pctrl->pctrl_state[idx].gpio_state_active)) {
  2525. CAM_ERR(CAM_UTIL,
  2526. "Failed to get the active state pinctrl handle");
  2527. device_pctrl->pctrl_state[idx].gpio_state_active =
  2528. NULL;
  2529. return -EINVAL;
  2530. }
  2531. device_pctrl->pctrl_state[idx].gpio_state_suspend =
  2532. pinctrl_lookup_state(device_pctrl->pinctrl,
  2533. pctrl_suspend);
  2534. if (IS_ERR_OR_NULL(
  2535. device_pctrl->pctrl_state[idx].gpio_state_suspend)) {
  2536. CAM_ERR(CAM_UTIL,
  2537. "Failed to get the active state pinctrl handle");
  2538. device_pctrl->pctrl_state[idx].gpio_state_suspend = NULL;
  2539. return -EINVAL;
  2540. }
  2541. }
  2542. return 0;
  2543. }
  2544. static void cam_soc_util_release_pinctrl(struct cam_hw_soc_info *soc_info)
  2545. {
  2546. if (soc_info->pinctrl_info.pinctrl)
  2547. devm_pinctrl_put(soc_info->pinctrl_info.pinctrl);
  2548. }
  2549. static void cam_soc_util_regulator_disable_default(
  2550. struct cam_hw_soc_info *soc_info)
  2551. {
  2552. int j = 0;
  2553. uint32_t num_rgltr = soc_info->num_rgltr;
  2554. for (j = num_rgltr-1; j >= 0; j--) {
  2555. if (soc_info->rgltr_ctrl_support == true) {
  2556. cam_soc_util_regulator_disable(soc_info->rgltr[j],
  2557. soc_info->rgltr_name[j],
  2558. soc_info->rgltr_min_volt[j],
  2559. soc_info->rgltr_max_volt[j],
  2560. soc_info->rgltr_op_mode[j],
  2561. soc_info->rgltr_delay[j]);
  2562. } else {
  2563. if (soc_info->rgltr[j])
  2564. cam_wrapper_regulator_disable(soc_info->rgltr[j]);
  2565. }
  2566. }
  2567. }
  2568. static int cam_soc_util_regulator_enable_default(
  2569. struct cam_hw_soc_info *soc_info)
  2570. {
  2571. int j = 0, rc = 0;
  2572. uint32_t num_rgltr = soc_info->num_rgltr;
  2573. if (num_rgltr > CAM_SOC_MAX_REGULATOR) {
  2574. CAM_ERR(CAM_UTIL,
  2575. "%s has invalid regulator number %d",
  2576. soc_info->dev_name, num_rgltr);
  2577. return -EINVAL;
  2578. }
  2579. for (j = 0; j < num_rgltr; j++) {
  2580. CAM_DBG(CAM_UTIL, "[%s] : start regulator %s enable, rgltr_ctrl_support %d",
  2581. soc_info->dev_name, soc_info->rgltr_name[j], soc_info->rgltr_ctrl_support);
  2582. if (soc_info->rgltr_ctrl_support == true) {
  2583. rc = cam_soc_util_regulator_enable(soc_info->rgltr[j],
  2584. soc_info->rgltr_name[j],
  2585. soc_info->rgltr_min_volt[j],
  2586. soc_info->rgltr_max_volt[j],
  2587. soc_info->rgltr_op_mode[j],
  2588. soc_info->rgltr_delay[j]);
  2589. } else {
  2590. if (soc_info->rgltr[j])
  2591. rc = cam_wrapper_regulator_enable(soc_info->rgltr[j]);
  2592. }
  2593. if (rc) {
  2594. CAM_ERR(CAM_UTIL, "%s enable failed",
  2595. soc_info->rgltr_name[j]);
  2596. goto disable_rgltr;
  2597. }
  2598. }
  2599. return rc;
  2600. disable_rgltr:
  2601. for (j--; j >= 0; j--) {
  2602. if (soc_info->rgltr_ctrl_support == true) {
  2603. cam_soc_util_regulator_disable(soc_info->rgltr[j],
  2604. soc_info->rgltr_name[j],
  2605. soc_info->rgltr_min_volt[j],
  2606. soc_info->rgltr_max_volt[j],
  2607. soc_info->rgltr_op_mode[j],
  2608. soc_info->rgltr_delay[j]);
  2609. } else {
  2610. if (soc_info->rgltr[j])
  2611. cam_wrapper_regulator_disable(soc_info->rgltr[j]);
  2612. }
  2613. }
  2614. return rc;
  2615. }
  2616. static bool cam_soc_util_is_presil_address_space(unsigned long mem_block_start)
  2617. {
  2618. if(mem_block_start >= CAM_SS_START_PRESIL && mem_block_start < CAM_SS_START)
  2619. return true;
  2620. return false;
  2621. }
  2622. #ifndef CONFIG_CAM_PRESIL
  2623. void __iomem * cam_soc_util_get_mem_base(
  2624. unsigned long mem_block_start,
  2625. unsigned long mem_block_size,
  2626. const char *mem_block_name,
  2627. uint32_t reserve_mem)
  2628. {
  2629. void __iomem * mem_base;
  2630. if (reserve_mem) {
  2631. if (!request_mem_region(mem_block_start,
  2632. mem_block_size,
  2633. mem_block_name)) {
  2634. CAM_ERR(CAM_UTIL,
  2635. "Error Mem region request Failed:%s",
  2636. mem_block_name);
  2637. return NULL;
  2638. }
  2639. }
  2640. mem_base = ioremap(mem_block_start, mem_block_size);
  2641. if (!mem_base) {
  2642. CAM_ERR(CAM_UTIL, "get mem base failed");
  2643. }
  2644. return mem_base;
  2645. }
  2646. int cam_soc_util_request_irq(struct device *dev,
  2647. unsigned int irq_line_start,
  2648. irq_handler_t handler,
  2649. unsigned long irqflags,
  2650. const char *irq_name,
  2651. void *irq_data,
  2652. unsigned long mem_block_start)
  2653. {
  2654. int rc;
  2655. rc = devm_request_irq(dev,
  2656. irq_line_start,
  2657. handler,
  2658. IRQF_TRIGGER_RISING,
  2659. irq_name,
  2660. irq_data);
  2661. if (rc) {
  2662. CAM_ERR(CAM_UTIL, "irq request fail rc %d", rc);
  2663. return -EBUSY;
  2664. }
  2665. disable_irq(irq_line_start);
  2666. return rc;
  2667. }
  2668. #else
  2669. void __iomem * cam_soc_util_get_mem_base(
  2670. unsigned long mem_block_start,
  2671. unsigned long mem_block_size,
  2672. const char *mem_block_name,
  2673. uint32_t reserve_mem)
  2674. {
  2675. void __iomem * mem_base;
  2676. if(cam_soc_util_is_presil_address_space(mem_block_start))
  2677. mem_base = (void __iomem *)mem_block_start;
  2678. else {
  2679. if (reserve_mem) {
  2680. if (!request_mem_region(mem_block_start,
  2681. mem_block_size,
  2682. mem_block_name)) {
  2683. CAM_ERR(CAM_UTIL,
  2684. "Error Mem region request Failed:%s",
  2685. mem_block_name);
  2686. return NULL;
  2687. }
  2688. }
  2689. mem_base = ioremap(mem_block_start, mem_block_size);
  2690. }
  2691. if (!mem_base) {
  2692. CAM_ERR(CAM_UTIL, "get mem base failed");
  2693. }
  2694. return mem_base;
  2695. }
  2696. int cam_soc_util_request_irq(struct device *dev,
  2697. unsigned int irq_line_start,
  2698. irq_handler_t handler,
  2699. unsigned long irqflags,
  2700. const char *irq_name,
  2701. void *irq_data,
  2702. unsigned long mem_block_start)
  2703. {
  2704. int rc;
  2705. if(cam_soc_util_is_presil_address_space(mem_block_start)) {
  2706. rc = devm_request_irq(dev,
  2707. irq_line_start,
  2708. handler,
  2709. irqflags,
  2710. irq_name,
  2711. irq_data);
  2712. if (rc) {
  2713. CAM_ERR(CAM_UTIL, "presil irq request fail");
  2714. return -EBUSY;
  2715. }
  2716. disable_irq(irq_line_start);
  2717. rc = !(cam_presil_subscribe_device_irq(irq_line_start,
  2718. handler, irq_data, irq_name));
  2719. CAM_DBG(CAM_PRESIL, "Subscribe presil IRQ: rc=%d NUM=%d Name=%s handler=0x%x",
  2720. rc, irq_line_start, irq_name, handler);
  2721. if (rc) {
  2722. CAM_ERR(CAM_UTIL, "presil irq request fail");
  2723. return -EBUSY;
  2724. }
  2725. } else {
  2726. rc = devm_request_irq(dev,
  2727. irq_line_start,
  2728. handler,
  2729. irqflags,
  2730. irq_name,
  2731. irq_data);
  2732. if (rc) {
  2733. CAM_ERR(CAM_UTIL, "irq request fail");
  2734. return -EBUSY;
  2735. }
  2736. disable_irq(irq_line_start);
  2737. CAM_INFO(CAM_UTIL, "Subscribe for non-presil IRQ success");
  2738. }
  2739. CAM_INFO(CAM_UTIL, "returning IRQ for mem_block_start 0x%0x rc %d",
  2740. mem_block_start, rc);
  2741. return rc;
  2742. }
  2743. #endif
  2744. int cam_soc_util_request_platform_resource(
  2745. struct cam_hw_soc_info *soc_info,
  2746. irq_handler_t handler, void **irq_data)
  2747. {
  2748. int i = 0, rc = 0;
  2749. if (!soc_info || !soc_info->dev) {
  2750. CAM_ERR(CAM_UTIL, "Invalid parameters");
  2751. return -EINVAL;
  2752. }
  2753. if (unlikely(soc_info->irq_count > CAM_SOC_MAX_IRQ_LINES_PER_DEV)) {
  2754. CAM_ERR(CAM_UTIL, "Invalid irq count: %u Max IRQ per device: %d",
  2755. soc_info->irq_count, CAM_SOC_MAX_IRQ_LINES_PER_DEV);
  2756. return -EINVAL;
  2757. }
  2758. for (i = 0; i < soc_info->num_mem_block; i++) {
  2759. soc_info->reg_map[i].mem_base = cam_soc_util_get_mem_base(
  2760. soc_info->mem_block[i]->start,
  2761. resource_size(soc_info->mem_block[i]),
  2762. soc_info->mem_block_name[i],
  2763. soc_info->reserve_mem);
  2764. if (!soc_info->reg_map[i].mem_base) {
  2765. CAM_ERR(CAM_UTIL, "i= %d base NULL", i);
  2766. rc = -ENOMEM;
  2767. goto unmap_base;
  2768. }
  2769. soc_info->reg_map[i].mem_cam_base =
  2770. soc_info->mem_block_cam_base[i];
  2771. soc_info->reg_map[i].size =
  2772. resource_size(soc_info->mem_block[i]);
  2773. soc_info->num_reg_map++;
  2774. }
  2775. for (i = 0; i < soc_info->num_rgltr; i++) {
  2776. if (soc_info->rgltr_name[i] == NULL) {
  2777. CAM_ERR(CAM_UTIL, "can't find regulator name");
  2778. goto put_regulator;
  2779. }
  2780. rc = cam_soc_util_get_regulator(soc_info->dev,
  2781. &soc_info->rgltr[i],
  2782. soc_info->rgltr_name[i]);
  2783. if (rc)
  2784. goto put_regulator;
  2785. }
  2786. for (i = 0; i < soc_info->irq_count; i++) {
  2787. rc = cam_soc_util_request_irq(soc_info->dev, soc_info->irq_num[i],
  2788. handler, IRQF_TRIGGER_RISING, soc_info->irq_name[i],
  2789. irq_data[i], soc_info->mem_block[0]->start);
  2790. if (rc) {
  2791. CAM_ERR(CAM_UTIL, "irq request fail for irq name: %s dev: %s",
  2792. soc_info->irq_name[i], soc_info->dev_name);
  2793. rc = -EBUSY;
  2794. goto put_irq;
  2795. }
  2796. soc_info->irq_data[i] = irq_data[i];
  2797. }
  2798. /* Get Clock */
  2799. for (i = 0; i < soc_info->num_clk; i++) {
  2800. soc_info->clk[i] = cam_wrapper_clk_get(soc_info->dev,
  2801. soc_info->clk_name[i]);
  2802. if (IS_ERR(soc_info->clk[i])) {
  2803. CAM_ERR(CAM_UTIL, "get failed for %s",
  2804. soc_info->clk_name[i]);
  2805. rc = -ENOENT;
  2806. goto put_clk;
  2807. } else if (!soc_info->clk[i]) {
  2808. CAM_DBG(CAM_UTIL, "%s handle is NULL skip get",
  2809. soc_info->clk_name[i]);
  2810. continue;
  2811. }
  2812. /* Create a wrapper entry if this is a shared clock */
  2813. if (CAM_IS_BIT_SET(soc_info->shared_clk_mask, i)) {
  2814. uint32_t min_level = soc_info->lowest_clk_level;
  2815. CAM_DBG(CAM_UTIL,
  2816. "Dev %s, clk %s, id %d register wrapper entry for shared clk",
  2817. soc_info->dev_name, soc_info->clk_name[i],
  2818. soc_info->clk_id[i]);
  2819. rc = cam_soc_util_clk_wrapper_register_entry(
  2820. soc_info->clk_id[i], soc_info->clk[i],
  2821. (i == soc_info->src_clk_idx) ? true : false,
  2822. soc_info, soc_info->clk_rate[min_level][i],
  2823. soc_info->clk_name[i]);
  2824. if (rc) {
  2825. CAM_ERR(CAM_UTIL,
  2826. "Failed in registering shared clk Dev %s id %d",
  2827. soc_info->dev_name,
  2828. soc_info->clk_id[i]);
  2829. cam_wrapper_clk_put(soc_info->clk[i]);
  2830. soc_info->clk[i] = NULL;
  2831. goto put_clk;
  2832. }
  2833. } else if (i == soc_info->src_clk_idx) {
  2834. rc = cam_soc_util_register_mmrm_client(
  2835. soc_info->clk_id[i], soc_info->clk[i],
  2836. soc_info->is_nrt_dev,
  2837. soc_info, soc_info->clk_name[i],
  2838. &soc_info->mmrm_handle);
  2839. if (rc) {
  2840. CAM_ERR(CAM_UTIL,
  2841. "Failed in register mmrm client Dev %s clk id %d",
  2842. soc_info->dev_name,
  2843. soc_info->clk_id[i]);
  2844. cam_wrapper_clk_put(soc_info->clk[i]);
  2845. soc_info->clk[i] = NULL;
  2846. goto put_clk;
  2847. }
  2848. }
  2849. }
  2850. rc = cam_soc_util_request_pinctrl(soc_info);
  2851. if (rc) {
  2852. CAM_ERR(CAM_UTIL, "Failed in requesting Pinctrl, rc: %d", rc);
  2853. goto put_clk;
  2854. }
  2855. rc = cam_soc_util_request_gpio_table(soc_info, true);
  2856. if (rc) {
  2857. CAM_ERR(CAM_UTIL, "Failed in request gpio table, rc=%d", rc);
  2858. goto put_clk;
  2859. }
  2860. if (soc_info->clk_control_enable)
  2861. cam_soc_util_create_clk_lvl_debugfs(soc_info);
  2862. return rc;
  2863. put_clk:
  2864. if (soc_info->mmrm_handle) {
  2865. cam_soc_util_unregister_mmrm_client(soc_info->mmrm_handle);
  2866. soc_info->mmrm_handle = NULL;
  2867. }
  2868. for (i = i - 1; i >= 0; i--) {
  2869. if (soc_info->clk[i]) {
  2870. if (CAM_IS_BIT_SET(soc_info->shared_clk_mask, i))
  2871. cam_soc_util_clk_wrapper_unregister_entry(
  2872. soc_info->clk_id[i], soc_info);
  2873. cam_wrapper_clk_put(soc_info->clk[i]);
  2874. soc_info->clk[i] = NULL;
  2875. }
  2876. }
  2877. put_irq:
  2878. if (i == -1)
  2879. i = soc_info->irq_count;
  2880. for (i = i - 1; i >= 0; i--) {
  2881. if (soc_info->irq_num[i] > 0)
  2882. disable_irq(soc_info->irq_num[i]);
  2883. }
  2884. put_regulator:
  2885. if (i == -1)
  2886. i = soc_info->num_rgltr;
  2887. for (i = i - 1; i >= 0; i--) {
  2888. if (soc_info->rgltr[i]) {
  2889. cam_wrapper_regulator_disable(soc_info->rgltr[i]);
  2890. cam_wrapper_regulator_put(soc_info->rgltr[i]);
  2891. soc_info->rgltr[i] = NULL;
  2892. }
  2893. }
  2894. unmap_base:
  2895. if (i == -1)
  2896. i = soc_info->num_reg_map;
  2897. for (i = i - 1; i >= 0; i--) {
  2898. if (soc_info->reserve_mem)
  2899. release_mem_region(soc_info->mem_block[i]->start,
  2900. resource_size(soc_info->mem_block[i]));
  2901. iounmap(soc_info->reg_map[i].mem_base);
  2902. soc_info->reg_map[i].mem_base = NULL;
  2903. soc_info->reg_map[i].size = 0;
  2904. }
  2905. return rc;
  2906. }
  2907. int cam_soc_util_release_platform_resource(struct cam_hw_soc_info *soc_info)
  2908. {
  2909. int i;
  2910. bool b_ret = false;
  2911. if (!soc_info || !soc_info->dev) {
  2912. CAM_ERR(CAM_UTIL, "Invalid parameter");
  2913. return -EINVAL;
  2914. }
  2915. if (soc_info->mmrm_handle) {
  2916. cam_soc_util_unregister_mmrm_client(soc_info->mmrm_handle);
  2917. soc_info->mmrm_handle = NULL;
  2918. }
  2919. for (i = soc_info->num_clk - 1; i >= 0; i--) {
  2920. if (CAM_IS_BIT_SET(soc_info->shared_clk_mask, i))
  2921. cam_soc_util_clk_wrapper_unregister_entry(
  2922. soc_info->clk_id[i], soc_info);
  2923. if (!soc_info->clk[i]) {
  2924. CAM_DBG(CAM_UTIL, "%s handle is NULL skip put",
  2925. soc_info->clk_name[i]);
  2926. continue;
  2927. }
  2928. cam_wrapper_clk_put(soc_info->clk[i]);
  2929. soc_info->clk[i] = NULL;
  2930. }
  2931. for (i = soc_info->num_rgltr - 1; i >= 0; i--) {
  2932. if (soc_info->rgltr[i]) {
  2933. cam_wrapper_regulator_put(soc_info->rgltr[i]);
  2934. soc_info->rgltr[i] = NULL;
  2935. }
  2936. }
  2937. for (i = soc_info->num_reg_map - 1; i >= 0; i--) {
  2938. iounmap(soc_info->reg_map[i].mem_base);
  2939. soc_info->reg_map[i].mem_base = NULL;
  2940. soc_info->reg_map[i].size = 0;
  2941. }
  2942. for (i = soc_info->irq_count; i >= 0; i--) {
  2943. if (soc_info->irq_num[i] > 0) {
  2944. if (cam_presil_mode_enabled()) {
  2945. if (cam_soc_util_is_presil_address_space(
  2946. soc_info->mem_block[0]->start)) {
  2947. b_ret = cam_presil_unsubscribe_device_irq(
  2948. soc_info->irq_line[i]->start);
  2949. CAM_DBG(CAM_PRESIL,
  2950. "UnSubscribe IRQ: Ret=%d NUM=%d Name=%s",
  2951. b_ret, soc_info->irq_line[i]->start,
  2952. soc_info->irq_name[i]);
  2953. }
  2954. }
  2955. disable_irq(soc_info->irq_num[i]);
  2956. }
  2957. }
  2958. cam_soc_util_release_pinctrl(soc_info);
  2959. /* release for gpio */
  2960. cam_soc_util_request_gpio_table(soc_info, false);
  2961. soc_info->dentry = NULL;
  2962. return 0;
  2963. }
  2964. int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info,
  2965. int cesta_client_idx, bool enable_clocks, enum cam_vote_level clk_level,
  2966. bool irq_enable)
  2967. {
  2968. int rc = 0, i;
  2969. if (!soc_info)
  2970. return -EINVAL;
  2971. rc = cam_soc_util_regulator_enable_default(soc_info);
  2972. if (rc) {
  2973. CAM_ERR(CAM_UTIL, "Regulators enable failed");
  2974. return rc;
  2975. }
  2976. if (enable_clocks) {
  2977. rc = cam_soc_util_clk_enable_default(soc_info, cesta_client_idx, clk_level);
  2978. if (rc)
  2979. goto disable_regulator;
  2980. }
  2981. if (irq_enable) {
  2982. for (i = 0; i < soc_info->irq_count; i++) {
  2983. if (soc_info->irq_num[i] < 0) {
  2984. CAM_ERR(CAM_UTIL, "No IRQ line available for irq: %s dev: %s",
  2985. soc_info->irq_name[i], soc_info->dev_name);
  2986. rc = -ENODEV;
  2987. goto disable_irq;
  2988. }
  2989. enable_irq(soc_info->irq_num[i]);
  2990. }
  2991. }
  2992. return rc;
  2993. disable_irq:
  2994. if (irq_enable) {
  2995. for (i = i - 1; i >= 0; i--)
  2996. disable_irq(soc_info->irq_num[i]);
  2997. }
  2998. if (enable_clocks)
  2999. cam_soc_util_clk_disable_default(soc_info, cesta_client_idx);
  3000. disable_regulator:
  3001. cam_soc_util_regulator_disable_default(soc_info);
  3002. return rc;
  3003. }
  3004. int cam_soc_util_disable_platform_resource(struct cam_hw_soc_info *soc_info,
  3005. int cesta_client_idx, bool disable_clocks, bool disable_irq)
  3006. {
  3007. int rc = 0;
  3008. if (!soc_info)
  3009. return -EINVAL;
  3010. if (disable_irq)
  3011. rc |= cam_soc_util_irq_disable(soc_info);
  3012. if (disable_clocks)
  3013. cam_soc_util_clk_disable_default(soc_info, cesta_client_idx);
  3014. cam_soc_util_regulator_disable_default(soc_info);
  3015. return rc;
  3016. }
  3017. int cam_soc_util_reg_dump(struct cam_hw_soc_info *soc_info,
  3018. uint32_t base_index, uint32_t offset, int size)
  3019. {
  3020. void __iomem *base_addr = NULL;
  3021. CAM_DBG(CAM_UTIL, "base_idx %u size=%d", base_index, size);
  3022. if (!soc_info || base_index >= soc_info->num_reg_map ||
  3023. size <= 0 || (offset + size) >=
  3024. CAM_SOC_GET_REG_MAP_SIZE(soc_info, base_index))
  3025. return -EINVAL;
  3026. base_addr = CAM_SOC_GET_REG_MAP_START(soc_info, base_index);
  3027. /*
  3028. * All error checking already done above,
  3029. * hence ignoring the return value below.
  3030. */
  3031. cam_io_dump(base_addr, offset, size);
  3032. return 0;
  3033. }
  3034. static int cam_soc_util_dump_cont_reg_range(
  3035. struct cam_hw_soc_info *soc_info,
  3036. struct cam_reg_range_read_desc *reg_read, uint32_t base_idx,
  3037. struct cam_reg_dump_out_buffer *dump_out_buf, uintptr_t cmd_buf_end)
  3038. {
  3039. int i = 0, rc = 0, val = 0;
  3040. uint32_t write_idx = 0;
  3041. if (!soc_info || !dump_out_buf || !reg_read || !cmd_buf_end) {
  3042. CAM_ERR(CAM_UTIL,
  3043. "Invalid input args soc_info: %pK, dump_out_buffer: %pK reg_read: %pK cmd_buf_end: %pK",
  3044. soc_info, dump_out_buf, reg_read, cmd_buf_end);
  3045. rc = -EINVAL;
  3046. goto end;
  3047. }
  3048. if ((reg_read->num_values) && ((reg_read->num_values > U32_MAX / 2) ||
  3049. (sizeof(uint32_t) > ((U32_MAX -
  3050. sizeof(struct cam_reg_dump_out_buffer) -
  3051. dump_out_buf->bytes_written) /
  3052. (reg_read->num_values * 2))))) {
  3053. CAM_ERR(CAM_UTIL,
  3054. "Integer Overflow bytes_written: [%u] num_values: [%u]",
  3055. dump_out_buf->bytes_written, reg_read->num_values);
  3056. rc = -EOVERFLOW;
  3057. goto end;
  3058. }
  3059. if ((cmd_buf_end - (uintptr_t)dump_out_buf) <=
  3060. (uintptr_t)(sizeof(struct cam_reg_dump_out_buffer)
  3061. - sizeof(uint32_t) + dump_out_buf->bytes_written +
  3062. (reg_read->num_values * 2 * sizeof(uint32_t)))) {
  3063. CAM_ERR(CAM_UTIL,
  3064. "Insufficient space in out buffer num_values: [%d] cmd_buf_end: %pK dump_out_buf: %pK",
  3065. reg_read->num_values, cmd_buf_end,
  3066. (uintptr_t)dump_out_buf);
  3067. rc = -EINVAL;
  3068. goto end;
  3069. }
  3070. write_idx = dump_out_buf->bytes_written / sizeof(uint32_t);
  3071. for (i = 0; i < reg_read->num_values; i++) {
  3072. val = cam_soc_util_r(soc_info, base_idx,
  3073. (reg_read->offset + (i * sizeof(uint32_t))));
  3074. if (!val)
  3075. CAM_WARN(CAM_UTIL, "Possibly fails to read");
  3076. dump_out_buf->dump_data[write_idx++] = reg_read->offset +
  3077. (i * sizeof(uint32_t));
  3078. dump_out_buf->dump_data[write_idx++] = val;
  3079. dump_out_buf->bytes_written += (2 * sizeof(uint32_t));
  3080. }
  3081. end:
  3082. return rc;
  3083. }
  3084. static int cam_soc_util_dump_dmi_reg_range(
  3085. struct cam_hw_soc_info *soc_info,
  3086. struct cam_dmi_read_desc *dmi_read, uint32_t base_idx,
  3087. struct cam_reg_dump_out_buffer *dump_out_buf, uintptr_t cmd_buf_end)
  3088. {
  3089. int i = 0, rc = 0, val = 0;
  3090. uint32_t write_idx = 0;
  3091. if (!soc_info || !dump_out_buf || !dmi_read || !cmd_buf_end) {
  3092. CAM_ERR(CAM_UTIL,
  3093. "Invalid input args soc_info: %pK, dump_out_buffer: %pK",
  3094. soc_info, dump_out_buf);
  3095. rc = -EINVAL;
  3096. goto end;
  3097. }
  3098. if (dmi_read->num_pre_writes > CAM_REG_DUMP_DMI_CONFIG_MAX ||
  3099. dmi_read->num_post_writes > CAM_REG_DUMP_DMI_CONFIG_MAX) {
  3100. CAM_ERR(CAM_UTIL,
  3101. "Invalid number of requested writes, pre: %d post: %d",
  3102. dmi_read->num_pre_writes, dmi_read->num_post_writes);
  3103. rc = -EINVAL;
  3104. goto end;
  3105. }
  3106. if ((dmi_read->num_pre_writes + dmi_read->dmi_data_read.num_values)
  3107. && ((dmi_read->num_pre_writes > U32_MAX / 2) ||
  3108. (dmi_read->dmi_data_read.num_values > U32_MAX / 2) ||
  3109. ((dmi_read->num_pre_writes * 2) > U32_MAX -
  3110. (dmi_read->dmi_data_read.num_values * 2)) ||
  3111. (sizeof(uint32_t) > ((U32_MAX -
  3112. sizeof(struct cam_reg_dump_out_buffer) -
  3113. dump_out_buf->bytes_written) / ((dmi_read->num_pre_writes +
  3114. dmi_read->dmi_data_read.num_values) * 2))))) {
  3115. CAM_ERR(CAM_UTIL,
  3116. "Integer Overflow bytes_written: [%u] num_pre_writes: [%u] num_values: [%u]",
  3117. dump_out_buf->bytes_written, dmi_read->num_pre_writes,
  3118. dmi_read->dmi_data_read.num_values);
  3119. rc = -EOVERFLOW;
  3120. goto end;
  3121. }
  3122. if ((cmd_buf_end - (uintptr_t)dump_out_buf) <=
  3123. (uintptr_t)(
  3124. sizeof(struct cam_reg_dump_out_buffer) - sizeof(uint32_t) +
  3125. (dump_out_buf->bytes_written +
  3126. (dmi_read->num_pre_writes * 2 * sizeof(uint32_t)) +
  3127. (dmi_read->dmi_data_read.num_values * 2 *
  3128. sizeof(uint32_t))))) {
  3129. CAM_ERR(CAM_UTIL,
  3130. "Insufficient space in out buffer num_read_val: [%d] num_write_val: [%d] cmd_buf_end: %pK dump_out_buf: %pK",
  3131. dmi_read->dmi_data_read.num_values,
  3132. dmi_read->num_pre_writes, cmd_buf_end,
  3133. (uintptr_t)dump_out_buf);
  3134. rc = -EINVAL;
  3135. goto end;
  3136. }
  3137. write_idx = dump_out_buf->bytes_written / sizeof(uint32_t);
  3138. for (i = 0; i < dmi_read->num_pre_writes; i++) {
  3139. rc = cam_soc_util_w_mb(soc_info, base_idx,
  3140. dmi_read->pre_read_config[i].offset,
  3141. dmi_read->pre_read_config[i].value);
  3142. if (rc) {
  3143. CAM_ERR(CAM_UTIL, "Fails to write for pre_read_config");
  3144. goto end;
  3145. }
  3146. dump_out_buf->dump_data[write_idx++] =
  3147. dmi_read->pre_read_config[i].offset;
  3148. dump_out_buf->dump_data[write_idx++] =
  3149. dmi_read->pre_read_config[i].value;
  3150. dump_out_buf->bytes_written += (2 * sizeof(uint32_t));
  3151. }
  3152. for (i = 0; i < dmi_read->dmi_data_read.num_values; i++) {
  3153. val = cam_soc_util_r_mb(soc_info, base_idx,
  3154. dmi_read->dmi_data_read.offset);
  3155. if (!val)
  3156. CAM_WARN(CAM_UTIL, "Possibly fails to read for dmi_data_read");
  3157. dump_out_buf->dump_data[write_idx++] =
  3158. dmi_read->dmi_data_read.offset;
  3159. dump_out_buf->dump_data[write_idx++] = val;
  3160. dump_out_buf->bytes_written += (2 * sizeof(uint32_t));
  3161. }
  3162. for (i = 0; i < dmi_read->num_post_writes; i++) {
  3163. rc = cam_soc_util_w_mb(soc_info, base_idx,
  3164. dmi_read->post_read_config[i].offset,
  3165. dmi_read->post_read_config[i].value);
  3166. if (rc) {
  3167. CAM_ERR(CAM_UTIL, "Fails to write for post_read_config");
  3168. goto end;
  3169. }
  3170. }
  3171. end:
  3172. return rc;
  3173. }
  3174. static int cam_soc_util_dump_dmi_reg_range_user_buf(
  3175. struct cam_hw_soc_info *soc_info,
  3176. struct cam_dmi_read_desc *dmi_read, uint32_t base_idx,
  3177. struct cam_hw_soc_dump_args *dump_args)
  3178. {
  3179. int i;
  3180. int rc;
  3181. int val = 0;
  3182. size_t buf_len = 0;
  3183. uint8_t *dst;
  3184. size_t remain_len;
  3185. uint32_t min_len;
  3186. uint32_t *waddr, *start;
  3187. uintptr_t cpu_addr;
  3188. struct cam_hw_soc_dump_header *hdr;
  3189. if (!soc_info || !dump_args || !dmi_read) {
  3190. CAM_ERR(CAM_UTIL,
  3191. "Invalid input args soc_info: %pK, dump_args: %pK",
  3192. soc_info, dump_args);
  3193. return -EINVAL;
  3194. }
  3195. if (dmi_read->num_pre_writes > CAM_REG_DUMP_DMI_CONFIG_MAX ||
  3196. dmi_read->num_post_writes > CAM_REG_DUMP_DMI_CONFIG_MAX) {
  3197. CAM_ERR(CAM_UTIL,
  3198. "Invalid number of requested writes, pre: %d post: %d",
  3199. dmi_read->num_pre_writes, dmi_read->num_post_writes);
  3200. return -EINVAL;
  3201. }
  3202. rc = cam_mem_get_cpu_buf(dump_args->buf_handle, &cpu_addr, &buf_len);
  3203. if (rc) {
  3204. CAM_ERR(CAM_UTIL, "Invalid handle %u rc %d",
  3205. dump_args->buf_handle, rc);
  3206. return rc;
  3207. }
  3208. if (buf_len <= dump_args->offset) {
  3209. CAM_WARN(CAM_UTIL, "Dump offset overshoot offset %zu len %zu",
  3210. dump_args->offset, buf_len);
  3211. rc = -ENOSPC;
  3212. goto end;
  3213. }
  3214. remain_len = buf_len - dump_args->offset;
  3215. min_len = (dmi_read->num_pre_writes * 2 * sizeof(uint32_t)) +
  3216. (dmi_read->dmi_data_read.num_values * 2 * sizeof(uint32_t)) +
  3217. sizeof(uint32_t);
  3218. if (remain_len < min_len) {
  3219. CAM_WARN(CAM_UTIL,
  3220. "Dump Buffer exhaust read %d write %d remain %zu min %u",
  3221. dmi_read->dmi_data_read.num_values,
  3222. dmi_read->num_pre_writes, remain_len,
  3223. min_len);
  3224. rc = -ENOSPC;
  3225. goto end;
  3226. }
  3227. dst = (uint8_t *)cpu_addr + dump_args->offset;
  3228. hdr = (struct cam_hw_soc_dump_header *)dst;
  3229. memset(hdr, 0, sizeof(struct cam_hw_soc_dump_header));
  3230. scnprintf(hdr->tag, CAM_SOC_HW_DUMP_TAG_MAX_LEN,
  3231. "DMI_DUMP:");
  3232. waddr = (uint32_t *)(dst + sizeof(struct cam_hw_soc_dump_header));
  3233. start = waddr;
  3234. hdr->word_size = sizeof(uint32_t);
  3235. *waddr = soc_info->index;
  3236. waddr++;
  3237. for (i = 0; i < dmi_read->num_pre_writes; i++) {
  3238. rc = cam_soc_util_w_mb(soc_info, base_idx,
  3239. dmi_read->pre_read_config[i].offset,
  3240. dmi_read->pre_read_config[i].value);
  3241. if (rc) {
  3242. CAM_ERR(CAM_UTIL, "Fails to write for pre_read_config");
  3243. goto end;
  3244. }
  3245. *waddr++ = dmi_read->pre_read_config[i].offset;
  3246. *waddr++ = dmi_read->pre_read_config[i].value;
  3247. }
  3248. for (i = 0; i < dmi_read->dmi_data_read.num_values; i++) {
  3249. val = cam_soc_util_r_mb(soc_info, base_idx,
  3250. dmi_read->dmi_data_read.offset);
  3251. if (!val)
  3252. CAM_WARN(CAM_UTIL, "Possibly fails to read for dmi_data_read");
  3253. *waddr++ = dmi_read->dmi_data_read.offset;
  3254. *waddr++ = val;
  3255. }
  3256. for (i = 0; i < dmi_read->num_post_writes; i++) {
  3257. rc = cam_soc_util_w_mb(soc_info, base_idx,
  3258. dmi_read->post_read_config[i].offset,
  3259. dmi_read->post_read_config[i].value);
  3260. if (rc) {
  3261. CAM_ERR(CAM_UTIL, "Fails to write for post_read_config");
  3262. goto end;
  3263. }
  3264. }
  3265. hdr->size = (waddr - start) * hdr->word_size;
  3266. dump_args->offset += hdr->size +
  3267. sizeof(struct cam_hw_soc_dump_header);
  3268. end:
  3269. cam_mem_put_cpu_buf(dump_args->buf_handle);
  3270. return rc;
  3271. }
  3272. static int cam_soc_util_dump_cont_reg_range_user_buf(
  3273. struct cam_hw_soc_info *soc_info,
  3274. struct cam_reg_range_read_desc *reg_read,
  3275. uint32_t base_idx,
  3276. struct cam_hw_soc_dump_args *dump_args)
  3277. {
  3278. int i;
  3279. int rc = 0, val = 0;
  3280. size_t buf_len;
  3281. uint8_t *dst;
  3282. size_t remain_len;
  3283. uint32_t min_len;
  3284. uint32_t *waddr, *start;
  3285. uintptr_t cpu_addr;
  3286. struct cam_hw_soc_dump_header *hdr;
  3287. if (!soc_info || !dump_args || !reg_read) {
  3288. CAM_ERR(CAM_UTIL,
  3289. "Invalid input args soc_info: %pK, dump_out_buffer: %pK reg_read: %pK",
  3290. soc_info, dump_args, reg_read);
  3291. return -EINVAL;
  3292. }
  3293. rc = cam_mem_get_cpu_buf(dump_args->buf_handle, &cpu_addr, &buf_len);
  3294. if (rc) {
  3295. CAM_ERR(CAM_UTIL, "Invalid handle %u rc %d",
  3296. dump_args->buf_handle, rc);
  3297. return rc;
  3298. }
  3299. if (buf_len <= dump_args->offset) {
  3300. CAM_WARN(CAM_UTIL, "Dump offset overshoot %zu %zu",
  3301. dump_args->offset, buf_len);
  3302. rc = -ENOSPC;
  3303. goto end;
  3304. }
  3305. remain_len = buf_len - dump_args->offset;
  3306. min_len = (reg_read->num_values * 2 * sizeof(uint32_t)) +
  3307. sizeof(struct cam_hw_soc_dump_header) + sizeof(uint32_t);
  3308. if (remain_len < min_len) {
  3309. CAM_WARN(CAM_UTIL,
  3310. "Dump Buffer exhaust read_values %d remain %zu min %u",
  3311. reg_read->num_values,
  3312. remain_len,
  3313. min_len);
  3314. rc = -ENOSPC;
  3315. goto end;
  3316. }
  3317. dst = (uint8_t *)cpu_addr + dump_args->offset;
  3318. hdr = (struct cam_hw_soc_dump_header *)dst;
  3319. memset(hdr, 0, sizeof(struct cam_hw_soc_dump_header));
  3320. scnprintf(hdr->tag, CAM_SOC_HW_DUMP_TAG_MAX_LEN, "%s_REG:",
  3321. soc_info->dev_name);
  3322. waddr = (uint32_t *)(dst + sizeof(struct cam_hw_soc_dump_header));
  3323. start = waddr;
  3324. hdr->word_size = sizeof(uint32_t);
  3325. *waddr = soc_info->index;
  3326. waddr++;
  3327. for (i = 0; i < reg_read->num_values; i++) {
  3328. val = cam_soc_util_r(soc_info, base_idx,
  3329. (reg_read->offset + (i * sizeof(uint32_t))));
  3330. if (!val)
  3331. CAM_WARN(CAM_UTIL, "Possibly fails to read");
  3332. waddr[0] = reg_read->offset + (i * sizeof(uint32_t));
  3333. waddr[1] = val;
  3334. waddr += 2;
  3335. }
  3336. hdr->size = (waddr - start) * hdr->word_size;
  3337. dump_args->offset += hdr->size +
  3338. sizeof(struct cam_hw_soc_dump_header);
  3339. end:
  3340. cam_mem_put_cpu_buf(dump_args->buf_handle);
  3341. return rc;
  3342. }
  3343. static int cam_soc_util_user_reg_dump(
  3344. struct cam_reg_dump_desc *reg_dump_desc,
  3345. struct cam_hw_soc_dump_args *dump_args,
  3346. struct cam_hw_soc_info *soc_info,
  3347. uint32_t reg_base_idx)
  3348. {
  3349. int rc = 0;
  3350. int i;
  3351. struct cam_reg_read_info *reg_read_info = NULL;
  3352. if (!dump_args || !reg_dump_desc || !soc_info) {
  3353. CAM_ERR(CAM_UTIL,
  3354. "Invalid input parameters %pK %pK %pK",
  3355. dump_args, reg_dump_desc, soc_info);
  3356. return -EINVAL;
  3357. }
  3358. for (i = 0; i < reg_dump_desc->num_read_range; i++) {
  3359. reg_read_info = &reg_dump_desc->read_range[i];
  3360. if (reg_read_info->type ==
  3361. CAM_REG_DUMP_READ_TYPE_CONT_RANGE) {
  3362. rc = cam_soc_util_dump_cont_reg_range_user_buf(
  3363. soc_info,
  3364. &reg_read_info->reg_read,
  3365. reg_base_idx,
  3366. dump_args);
  3367. } else if (reg_read_info->type ==
  3368. CAM_REG_DUMP_READ_TYPE_DMI) {
  3369. rc = cam_soc_util_dump_dmi_reg_range_user_buf(
  3370. soc_info,
  3371. &reg_read_info->dmi_read,
  3372. reg_base_idx,
  3373. dump_args);
  3374. } else {
  3375. CAM_ERR(CAM_UTIL,
  3376. "Invalid Reg dump read type: %d",
  3377. reg_read_info->type);
  3378. rc = -EINVAL;
  3379. goto end;
  3380. }
  3381. if (rc) {
  3382. CAM_ERR(CAM_UTIL,
  3383. "Reg range read failed rc: %d reg_base_idx: %d",
  3384. rc, reg_base_idx);
  3385. goto end;
  3386. }
  3387. }
  3388. end:
  3389. return rc;
  3390. }
  3391. int cam_soc_util_reg_dump_to_cmd_buf(void *ctx,
  3392. struct cam_cmd_buf_desc *cmd_desc, uint64_t req_id,
  3393. cam_soc_util_regspace_data_cb reg_data_cb,
  3394. struct cam_hw_soc_dump_args *soc_dump_args,
  3395. bool user_triggered_dump)
  3396. {
  3397. int rc = 0, i, j;
  3398. uintptr_t cpu_addr = 0;
  3399. uintptr_t cmd_buf_start = 0;
  3400. uintptr_t cmd_in_data_end = 0;
  3401. uintptr_t cmd_buf_end = 0;
  3402. uint32_t reg_base_type = 0;
  3403. size_t buf_size = 0, remain_len = 0;
  3404. struct cam_reg_dump_input_info *reg_input_info = NULL;
  3405. struct cam_reg_dump_desc *reg_dump_desc = NULL;
  3406. struct cam_reg_dump_out_buffer *dump_out_buf = NULL;
  3407. struct cam_reg_read_info *reg_read_info = NULL;
  3408. struct cam_hw_soc_info *soc_info;
  3409. uint32_t reg_base_idx = 0;
  3410. if (!ctx || !cmd_desc || !reg_data_cb) {
  3411. CAM_ERR(CAM_UTIL, "Invalid args to reg dump [%pK] [%pK]",
  3412. cmd_desc, reg_data_cb);
  3413. return -EINVAL;
  3414. }
  3415. if (!cmd_desc->length || !cmd_desc->size) {
  3416. CAM_ERR(CAM_UTIL, "Invalid cmd buf size %d %d",
  3417. cmd_desc->length, cmd_desc->size);
  3418. return -EINVAL;
  3419. }
  3420. rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle, &cpu_addr, &buf_size);
  3421. if (rc || !cpu_addr || (buf_size == 0)) {
  3422. CAM_ERR(CAM_UTIL, "Failed in Get cpu addr, rc=%d, cpu_addr=%pK",
  3423. rc, (void *)cpu_addr);
  3424. if (rc)
  3425. return rc;
  3426. goto end;
  3427. }
  3428. CAM_DBG(CAM_UTIL, "Get cpu buf success req_id: %llu buf_size: %zu",
  3429. req_id, buf_size);
  3430. if ((buf_size < sizeof(uint32_t)) ||
  3431. ((size_t)cmd_desc->offset > (buf_size - sizeof(uint32_t)))) {
  3432. CAM_ERR(CAM_UTIL, "Invalid offset for cmd buf: %zu",
  3433. (size_t)cmd_desc->offset);
  3434. rc = -EINVAL;
  3435. goto end;
  3436. }
  3437. remain_len = buf_size - (size_t)cmd_desc->offset;
  3438. if ((remain_len < (size_t)cmd_desc->size) || (cmd_desc->size <
  3439. cmd_desc->length)) {
  3440. CAM_ERR(CAM_UTIL,
  3441. "Invalid params for cmd buf len: %zu size: %zu remain_len: %zu",
  3442. (size_t)cmd_desc->length, (size_t)cmd_desc->length,
  3443. remain_len);
  3444. rc = -EINVAL;
  3445. goto end;
  3446. }
  3447. cmd_buf_start = cpu_addr + (uintptr_t)cmd_desc->offset;
  3448. cmd_in_data_end = cmd_buf_start + (uintptr_t)cmd_desc->length;
  3449. cmd_buf_end = cmd_buf_start + (uintptr_t)cmd_desc->size;
  3450. if ((cmd_buf_end <= cmd_buf_start) ||
  3451. (cmd_in_data_end <= cmd_buf_start)) {
  3452. CAM_ERR(CAM_UTIL,
  3453. "Invalid length or size for cmd buf: [%zu] [%zu]",
  3454. (size_t)cmd_desc->length, (size_t)cmd_desc->size);
  3455. rc = -EINVAL;
  3456. goto end;
  3457. }
  3458. CAM_DBG(CAM_UTIL,
  3459. "Buffer params start [%pK] input_end [%pK] buf_end [%pK]",
  3460. cmd_buf_start, cmd_in_data_end, cmd_buf_end);
  3461. reg_input_info = (struct cam_reg_dump_input_info *) cmd_buf_start;
  3462. if ((reg_input_info->num_dump_sets > 1) && (sizeof(uint32_t) >
  3463. ((U32_MAX - sizeof(struct cam_reg_dump_input_info)) /
  3464. (reg_input_info->num_dump_sets - 1)))) {
  3465. CAM_ERR(CAM_UTIL,
  3466. "Integer Overflow req_id: [%llu] num_dump_sets: [%u]",
  3467. req_id, reg_input_info->num_dump_sets);
  3468. rc = -EOVERFLOW;
  3469. goto end;
  3470. }
  3471. if ((!reg_input_info->num_dump_sets) ||
  3472. ((cmd_in_data_end - cmd_buf_start) <= (uintptr_t)
  3473. (sizeof(struct cam_reg_dump_input_info) +
  3474. ((reg_input_info->num_dump_sets - 1) * sizeof(uint32_t))))) {
  3475. CAM_ERR(CAM_UTIL,
  3476. "Invalid number of dump sets, req_id: [%llu] num_dump_sets: [%u]",
  3477. req_id, reg_input_info->num_dump_sets);
  3478. rc = -EINVAL;
  3479. goto end;
  3480. }
  3481. CAM_DBG(CAM_UTIL,
  3482. "reg_input_info req_id: %llu ctx %pK num_dump_sets: %d",
  3483. req_id, ctx, reg_input_info->num_dump_sets);
  3484. for (i = 0; i < reg_input_info->num_dump_sets; i++) {
  3485. if ((cmd_in_data_end - cmd_buf_start) <= (uintptr_t)
  3486. reg_input_info->dump_set_offsets[i]) {
  3487. CAM_ERR(CAM_UTIL,
  3488. "Invalid dump set offset: [%pK], cmd_buf_start: [%pK] cmd_in_data_end: [%pK]",
  3489. (uintptr_t)reg_input_info->dump_set_offsets[i],
  3490. cmd_buf_start, cmd_in_data_end);
  3491. rc = -EINVAL;
  3492. goto end;
  3493. }
  3494. reg_dump_desc = (struct cam_reg_dump_desc *)
  3495. (cmd_buf_start +
  3496. (uintptr_t)reg_input_info->dump_set_offsets[i]);
  3497. if ((reg_dump_desc->num_read_range > 1) &&
  3498. (sizeof(struct cam_reg_read_info) > ((U32_MAX -
  3499. sizeof(struct cam_reg_dump_desc)) /
  3500. (reg_dump_desc->num_read_range - 1)))) {
  3501. CAM_ERR(CAM_UTIL,
  3502. "Integer Overflow req_id: [%llu] num_read_range: [%u]",
  3503. req_id, reg_dump_desc->num_read_range);
  3504. rc = -EOVERFLOW;
  3505. goto end;
  3506. }
  3507. if ((!reg_dump_desc->num_read_range) ||
  3508. ((cmd_in_data_end - (uintptr_t)reg_dump_desc) <=
  3509. (uintptr_t)(sizeof(struct cam_reg_dump_desc) +
  3510. ((reg_dump_desc->num_read_range - 1) *
  3511. sizeof(struct cam_reg_read_info))))) {
  3512. CAM_ERR(CAM_UTIL,
  3513. "Invalid number of read ranges, req_id: [%llu] num_read_range: [%d]",
  3514. req_id, reg_dump_desc->num_read_range);
  3515. rc = -EINVAL;
  3516. goto end;
  3517. }
  3518. if ((cmd_buf_end - cmd_buf_start) <= (uintptr_t)
  3519. (reg_dump_desc->dump_buffer_offset +
  3520. sizeof(struct cam_reg_dump_out_buffer))) {
  3521. CAM_ERR(CAM_UTIL,
  3522. "Invalid out buffer offset: [%pK], cmd_buf_start: [%pK] cmd_buf_end: [%pK]",
  3523. (uintptr_t)reg_dump_desc->dump_buffer_offset,
  3524. cmd_buf_start, cmd_buf_end);
  3525. rc = -EINVAL;
  3526. goto end;
  3527. }
  3528. reg_base_type = reg_dump_desc->reg_base_type;
  3529. if (reg_base_type == 0 || reg_base_type >
  3530. CAM_REG_DUMP_BASE_TYPE_SFE_RIGHT) {
  3531. CAM_ERR(CAM_UTIL,
  3532. "Invalid Reg dump base type: %d",
  3533. reg_base_type);
  3534. rc = -EINVAL;
  3535. goto end;
  3536. }
  3537. rc = reg_data_cb(reg_base_type, ctx, &soc_info, &reg_base_idx);
  3538. if (rc || !soc_info) {
  3539. CAM_ERR(CAM_UTIL,
  3540. "Reg space data callback failed rc: %d soc_info: [%pK]",
  3541. rc, soc_info);
  3542. rc = -EINVAL;
  3543. goto end;
  3544. }
  3545. if (reg_base_idx > soc_info->num_reg_map) {
  3546. CAM_ERR(CAM_UTIL,
  3547. "Invalid reg base idx: %d num reg map: %d",
  3548. reg_base_idx, soc_info->num_reg_map);
  3549. rc = -EINVAL;
  3550. goto end;
  3551. }
  3552. CAM_DBG(CAM_UTIL,
  3553. "Reg data callback success req_id: %llu base_type: %d base_idx: %d num_read_range: %d",
  3554. req_id, reg_base_type, reg_base_idx,
  3555. reg_dump_desc->num_read_range);
  3556. /* If the dump request is triggered by user space
  3557. * buffer will be different from the buffer which is received
  3558. * in init packet. In this case, dump the data to the
  3559. * user provided buffer and exit.
  3560. */
  3561. if (user_triggered_dump) {
  3562. rc = cam_soc_util_user_reg_dump(reg_dump_desc,
  3563. soc_dump_args, soc_info, reg_base_idx);
  3564. CAM_INFO(CAM_UTIL,
  3565. "%s reg_base_idx %d dumped offset %u",
  3566. soc_info->dev_name, reg_base_idx,
  3567. soc_dump_args->offset);
  3568. goto end;
  3569. }
  3570. /* Below code is executed when data is dumped to the
  3571. * out buffer received in init packet
  3572. */
  3573. dump_out_buf = (struct cam_reg_dump_out_buffer *)
  3574. (cmd_buf_start +
  3575. (uintptr_t)reg_dump_desc->dump_buffer_offset);
  3576. dump_out_buf->req_id = req_id;
  3577. dump_out_buf->bytes_written = 0;
  3578. for (j = 0; j < reg_dump_desc->num_read_range; j++) {
  3579. CAM_DBG(CAM_UTIL,
  3580. "Number of bytes written to cmd buffer: %u req_id: %llu",
  3581. dump_out_buf->bytes_written, req_id);
  3582. reg_read_info = &reg_dump_desc->read_range[j];
  3583. if (reg_read_info->type ==
  3584. CAM_REG_DUMP_READ_TYPE_CONT_RANGE) {
  3585. rc = cam_soc_util_dump_cont_reg_range(soc_info,
  3586. &reg_read_info->reg_read, reg_base_idx,
  3587. dump_out_buf, cmd_buf_end);
  3588. } else if (reg_read_info->type ==
  3589. CAM_REG_DUMP_READ_TYPE_DMI) {
  3590. rc = cam_soc_util_dump_dmi_reg_range(soc_info,
  3591. &reg_read_info->dmi_read, reg_base_idx,
  3592. dump_out_buf, cmd_buf_end);
  3593. } else {
  3594. CAM_ERR(CAM_UTIL,
  3595. "Invalid Reg dump read type: %d",
  3596. reg_read_info->type);
  3597. rc = -EINVAL;
  3598. goto end;
  3599. }
  3600. if (rc) {
  3601. CAM_ERR(CAM_UTIL,
  3602. "Reg range read failed rc: %d reg_base_idx: %d dump_out_buf: %pK",
  3603. rc, reg_base_idx, dump_out_buf);
  3604. goto end;
  3605. }
  3606. }
  3607. }
  3608. end:
  3609. cam_mem_put_cpu_buf(cmd_desc->mem_handle);
  3610. return rc;
  3611. }
  3612. /**
  3613. * cam_soc_util_print_clk_freq()
  3614. *
  3615. * @brief: This function gets the clk rates for each clk from clk
  3616. * driver and prints in log
  3617. *
  3618. * @soc_info: Device soc struct to be populated
  3619. *
  3620. * @return: success or failure
  3621. */
  3622. int cam_soc_util_print_clk_freq(struct cam_hw_soc_info *soc_info)
  3623. {
  3624. int i;
  3625. unsigned long clk_rate = 0;
  3626. if (!soc_info) {
  3627. CAM_ERR(CAM_UTIL, "Invalid soc info");
  3628. return -EINVAL;
  3629. }
  3630. if ((soc_info->num_clk == 0) ||
  3631. (soc_info->num_clk >= CAM_SOC_MAX_CLK)) {
  3632. CAM_ERR(CAM_UTIL, "[%s] Invalid number of clock %d",
  3633. soc_info->dev_name, soc_info->num_clk);
  3634. return -EINVAL;
  3635. }
  3636. for (i = 0; i < soc_info->num_clk; i++) {
  3637. clk_rate = cam_wrapper_clk_get_rate(soc_info->clk[i]);
  3638. CAM_INFO(CAM_UTIL,
  3639. "[%s] idx = %d clk name = %s clk_rate=%lld",
  3640. soc_info->dev_name, i, soc_info->clk_name[i],
  3641. clk_rate);
  3642. }
  3643. return 0;
  3644. }
  3645. inline unsigned long cam_soc_util_get_applied_src_clk(
  3646. struct cam_hw_soc_info *soc_info, bool is_max)
  3647. {
  3648. unsigned long clk_rate;
  3649. /*
  3650. * For CRMC type, exa - ife, csid, cphy
  3651. * final clk = max(hw_client_0, hw_client_1, hw_client_2, sw_client)
  3652. * For CRMB type, exa - camnoc axi
  3653. * final clk = max(hw_client_0 + hw_client_1 + hw_client_2, sw_client)
  3654. */
  3655. if (is_max) {
  3656. clk_rate = max(soc_info->applied_src_clk_rates.hw_client[0].high,
  3657. soc_info->applied_src_clk_rates.hw_client[1].high);
  3658. clk_rate = max(clk_rate, soc_info->applied_src_clk_rates.hw_client[2].high);
  3659. clk_rate = max(clk_rate, soc_info->applied_src_clk_rates.sw_client);
  3660. } else {
  3661. clk_rate = max((soc_info->applied_src_clk_rates.hw_client[0].high +
  3662. soc_info->applied_src_clk_rates.hw_client[1].high +
  3663. soc_info->applied_src_clk_rates.hw_client[2].high),
  3664. soc_info->applied_src_clk_rates.sw_client);
  3665. }
  3666. return clk_rate;
  3667. }
  3668. int cam_soc_util_regulators_enabled(struct cam_hw_soc_info *soc_info)
  3669. {
  3670. int j = 0, rc = 0;
  3671. int enabled_cnt = 0;
  3672. for (j = 0; j < soc_info->num_rgltr; j++) {
  3673. if (soc_info->rgltr[j]) {
  3674. rc = cam_wrapper_regulator_is_enabled(soc_info->rgltr[j]);
  3675. if (rc < 0) {
  3676. CAM_ERR(CAM_UTIL, "%s regulator_is_enabled failed",
  3677. soc_info->rgltr_name[j]);
  3678. } else if (rc > 0) {
  3679. CAM_DBG(CAM_UTIL, "%s regulator enabled",
  3680. soc_info->rgltr_name[j]);
  3681. enabled_cnt++;
  3682. } else {
  3683. CAM_DBG(CAM_UTIL, "%s regulator is disabled",
  3684. soc_info->rgltr_name[j]);
  3685. }
  3686. }
  3687. }
  3688. return enabled_cnt;
  3689. }