clk-rcg2.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2013, 2016-2018, 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/bitops.h>
  8. #include <linux/err.h>
  9. #include <linux/bug.h>
  10. #include <linux/export.h>
  11. #include <linux/clk.h>
  12. #include <linux/clk-provider.h>
  13. #include <linux/delay.h>
  14. #include <linux/rational.h>
  15. #include <linux/regmap.h>
  16. #include <linux/math64.h>
  17. #include <linux/minmax.h>
  18. #include <linux/slab.h>
  19. #include <asm/div64.h>
  20. #include <soc/qcom/crm.h>
  21. #include <soc/qcom/tcs.h>
  22. #include "clk-rcg.h"
  23. #include "common.h"
  24. #include "clk-debug.h"
  25. #define CMD_REG 0x0
  26. #define CMD_UPDATE BIT(0)
  27. #define CMD_ROOT_EN BIT(1)
  28. #define CMD_DIRTY_CFG BIT(4)
  29. #define CMD_DIRTY_N BIT(5)
  30. #define CMD_DIRTY_M BIT(6)
  31. #define CMD_DIRTY_D BIT(7)
  32. #define CMD_ROOT_OFF BIT(31)
  33. #define CFG_REG 0x4
  34. #define CFG_SRC_DIV_SHIFT 0
  35. #define CFG_SRC_SEL_SHIFT 8
  36. #define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
  37. #define CFG_MODE_SHIFT 12
  38. #define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT)
  39. #define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT)
  40. #define CFG_HW_CLK_CTRL_MASK BIT(20)
  41. #define M_REG 0x8
  42. #define N_REG 0xc
  43. #define D_REG 0x10
  44. #define RCG_CFG_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG)
  45. #define RCG_M_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG)
  46. #define RCG_N_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG)
  47. #define RCG_D_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG)
  48. /* Dynamic Frequency Scaling */
  49. #define MAX_PERF_LEVEL 8
  50. #define SE_CMD_DFSR_OFFSET 0x14
  51. #define SE_CMD_DFS_EN BIT(0)
  52. #define SE_PERF_DFSR(level) (0x1c + 0x4 * (level))
  53. #define SE_PERF_M_DFSR(level) (0x5c + 0x4 * (level))
  54. #define SE_PERF_N_DFSR(level) (0x9c + 0x4 * (level))
  55. /* Cesta configuration*/
  56. #define MAX_VCD_PER_CRM 9
  57. #define MAX_PERF_LEVEL_PER_VCD 8
  58. #define MAX_PERF_OL_PER_VCD 4
  59. #define MAX_CRM_SW_DRV_STATE 3
  60. #define CLK_RCG_CRMC_CFG_RCGR_OFFSET 0x110
  61. #define CLK_RCG_CRMC_PERF_LEVEL_PLL_L_VAL_LUT_OFFSET 0x138
  62. #define CLK_RCG_CRMC_CURR_PERF_OL_OFFSET 0x0c
  63. #define CLK_RCG_CRMC_CURR_PERF_OL(rcg_index) \
  64. (CLK_RCG_CRMC_CURR_PERF_OL_OFFSET + ((rcg_index) * 0x200))
  65. #define CLK_RCG_CRMC_CFG_RCGR(rcg_index, level) \
  66. (CLK_RCG_CRMC_CFG_RCGR_OFFSET + ((rcg_index) * 0x200) + (0x4 * (level)))
  67. #define CLK_RCG_CRMC_PERF_LEVEL_PLL_L_VAL_LUT(rcg_index, level) \
  68. (CLK_RCG_CRMC_PERF_LEVEL_PLL_L_VAL_LUT_OFFSET + ((rcg_index) * 0x200) + (0x4 * (level)))
  69. #define CLK_RCG_CURR_PERF_OL_MASK 0x07
  70. #define PLL_L_VAL_MASK GENMASK(7, 0)
  71. #define PLL_ALPHA_VAL_MASK GENMASK(31, 16)
  72. #define PLL_ALPHA_VAL_SHIFT 16
  73. enum freq_policy {
  74. FLOOR,
  75. CEIL,
  76. };
  77. static struct freq_tbl cxo_f = {
  78. .freq = 19200000,
  79. .src = 0,
  80. .pre_div = 1,
  81. .m = 0,
  82. .n = 0,
  83. };
  84. static int clk_rcg2_is_enabled(struct clk_hw *hw)
  85. {
  86. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  87. u32 cmd;
  88. int ret;
  89. ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
  90. if (ret)
  91. return ret;
  92. return (cmd & CMD_ROOT_OFF) == 0;
  93. }
  94. static u8 __clk_rcg2_get_parent(struct clk_hw *hw, u32 cfg)
  95. {
  96. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  97. int num_parents = clk_hw_get_num_parents(hw);
  98. int i;
  99. cfg &= CFG_SRC_SEL_MASK;
  100. cfg >>= CFG_SRC_SEL_SHIFT;
  101. for (i = 0; i < num_parents; i++)
  102. if (cfg == rcg->parent_map[i].cfg)
  103. return i;
  104. pr_debug("%s: Clock %s has invalid parent, using default.\n",
  105. __func__, clk_hw_get_name(hw));
  106. return 0;
  107. }
  108. static u8 clk_rcg2_get_parent(struct clk_hw *hw)
  109. {
  110. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  111. u32 cfg;
  112. int ret;
  113. ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
  114. if (ret) {
  115. pr_debug("%s: Unable to read CFG register for %s\n",
  116. __func__, clk_hw_get_name(hw));
  117. return 0;
  118. }
  119. return __clk_rcg2_get_parent(hw, cfg);
  120. }
  121. static int get_update_timeout(const struct clk_rcg2 *rcg)
  122. {
  123. int timeout = 0;
  124. /*
  125. * The time it takes an RCG to update is roughly 3 clock cycles of the
  126. * old and new clock rates.
  127. */
  128. if (rcg->current_freq)
  129. timeout += 3 * (1000000 / rcg->current_freq);
  130. if (rcg->configured_freq)
  131. timeout += 3 * (1000000 / rcg->configured_freq);
  132. return max(timeout, 500);
  133. }
  134. static int update_config(struct clk_rcg2 *rcg)
  135. {
  136. int timeout, count, ret;
  137. u32 cmd;
  138. struct clk_hw *hw = &rcg->clkr.hw;
  139. ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
  140. CMD_UPDATE, CMD_UPDATE);
  141. if (ret)
  142. return ret;
  143. timeout = get_update_timeout(rcg);
  144. /* Wait for update to take effect */
  145. for (count = timeout; count > 0; count--) {
  146. ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
  147. if (ret)
  148. return ret;
  149. if (!(cmd & CMD_UPDATE))
  150. return 0;
  151. udelay(1);
  152. }
  153. WARN_CLK(hw, 1, "rcg didn't update its configuration after %d us.", timeout);
  154. return -EBUSY;
  155. }
  156. static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
  157. {
  158. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  159. int ret;
  160. u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
  161. ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
  162. CFG_SRC_SEL_MASK, cfg);
  163. if (ret)
  164. return ret;
  165. return update_config(rcg);
  166. }
  167. static int clk_rcg2_set_force_enable(struct clk_hw *hw)
  168. {
  169. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  170. int ret = 0, count = 500;
  171. ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
  172. CMD_ROOT_EN, CMD_ROOT_EN);
  173. if (ret)
  174. return ret;
  175. for (; count > 0; count--) {
  176. if (clk_rcg2_is_enabled(hw))
  177. return ret;
  178. /* Delay for 1usec and retry polling the status bit */
  179. udelay(1);
  180. }
  181. WARN_CLK(hw, 1, "rcg didn't turn on.");
  182. return ret;
  183. }
  184. static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
  185. {
  186. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  187. return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
  188. CMD_ROOT_EN, 0);
  189. }
  190. static bool clk_rcg2_is_force_enabled(struct clk_hw *hw)
  191. {
  192. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  193. u32 val = 0;
  194. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &val);
  195. return val & CMD_ROOT_EN;
  196. }
  197. static int prepare_enable_rcg_srcs(struct clk *curr, struct clk *new)
  198. {
  199. int rc = 0;
  200. rc = clk_prepare(curr);
  201. if (rc)
  202. return rc;
  203. rc = clk_prepare(new);
  204. if (rc)
  205. goto err_new_src_prepare;
  206. rc = clk_enable(curr);
  207. if (rc)
  208. goto err_curr_src_enable;
  209. rc = clk_enable(new);
  210. if (rc)
  211. goto err_new_src_enable;
  212. return rc;
  213. err_new_src_enable:
  214. clk_disable(curr);
  215. err_curr_src_enable:
  216. clk_unprepare(new);
  217. err_new_src_prepare:
  218. clk_unprepare(curr);
  219. return rc;
  220. }
  221. static void disable_unprepare_rcg_srcs(struct clk *curr, struct clk *new)
  222. {
  223. clk_disable(new);
  224. clk_disable(curr);
  225. clk_unprepare(new);
  226. clk_unprepare(curr);
  227. }
  228. /*
  229. * Calculate m/n:d rate
  230. *
  231. * parent_rate m
  232. * rate = ----------- x ---
  233. * hid_div n
  234. */
  235. static unsigned long
  236. calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
  237. {
  238. if (hid_div)
  239. rate = mult_frac(rate, 2, hid_div + 1);
  240. if (mode)
  241. rate = mult_frac(rate, m, n);
  242. return rate;
  243. }
  244. static unsigned long
  245. __clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, u32 cfg)
  246. {
  247. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  248. const struct freq_tbl *f_curr;
  249. u32 src, hid_div, m = 0, n = 0, mode = 0, mask;
  250. unsigned long rrate = 0;
  251. src = cfg;
  252. src &= CFG_SRC_SEL_MASK;
  253. src >>= CFG_SRC_SEL_SHIFT;
  254. if (rcg->enable_safe_config && (!clk_hw_is_prepared(hw)
  255. || !clk_hw_is_enabled(hw)) && !src) {
  256. if (!rcg->current_freq)
  257. rcg->current_freq = cxo_f.freq;
  258. return rcg->current_freq;
  259. }
  260. if (rcg->mnd_width) {
  261. mask = BIT(rcg->mnd_width) - 1;
  262. regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
  263. m &= mask;
  264. regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n);
  265. n = ~n;
  266. n &= mask;
  267. n += m;
  268. mode = cfg & CFG_MODE_MASK;
  269. mode >>= CFG_MODE_SHIFT;
  270. }
  271. if (rcg->enable_safe_config && !src) {
  272. f_curr = qcom_find_freq(rcg->freq_tbl, rcg->current_freq);
  273. if (!f_curr)
  274. return -EINVAL;
  275. hid_div = f_curr->pre_div;
  276. } else {
  277. mask = BIT(rcg->hid_width) - 1;
  278. hid_div = cfg >> CFG_SRC_DIV_SHIFT;
  279. hid_div &= mask;
  280. }
  281. rrate = calc_rate(parent_rate, m, n, mode, hid_div);
  282. /*
  283. * Check to cover the case when the RCG has been initialized to a
  284. * non-CXO frequency before the clock driver has taken control of it.
  285. */
  286. if (rcg->enable_safe_config && !rcg->current_freq)
  287. rcg->current_freq = rrate;
  288. return rrate;
  289. }
  290. static unsigned long
  291. clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  292. {
  293. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  294. u32 cfg;
  295. regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
  296. return __clk_rcg2_recalc_rate(hw, parent_rate, cfg);
  297. }
  298. static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
  299. struct clk_rate_request *req,
  300. enum freq_policy policy)
  301. {
  302. unsigned long clk_flags, rate = req->rate;
  303. struct clk_rate_request parent_req = { };
  304. struct clk_hw *p;
  305. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  306. int index, ret = 0;
  307. switch (policy) {
  308. case FLOOR:
  309. f = qcom_find_freq_floor(f, rate);
  310. break;
  311. case CEIL:
  312. f = qcom_find_freq(f, rate);
  313. break;
  314. default:
  315. return -EINVAL;
  316. }
  317. if (!f)
  318. return -EINVAL;
  319. index = qcom_find_src_index(hw, rcg->parent_map, f->src);
  320. if (index < 0)
  321. return index;
  322. clk_flags = clk_hw_get_flags(hw);
  323. p = clk_hw_get_parent_by_index(hw, index);
  324. if (!p)
  325. return -EINVAL;
  326. if (clk_flags & CLK_SET_RATE_PARENT) {
  327. rate = f->freq;
  328. if (f->pre_div) {
  329. if (!rate)
  330. rate = req->rate;
  331. rate /= 2;
  332. rate *= f->pre_div + 1;
  333. }
  334. if (f->n) {
  335. u64 tmp = rate;
  336. tmp = tmp * f->n;
  337. do_div(tmp, f->m);
  338. rate = tmp;
  339. }
  340. } else {
  341. rate = clk_hw_get_rate(p);
  342. }
  343. req->best_parent_hw = p;
  344. req->best_parent_rate = clk_hw_round_rate(p, rate);
  345. req->rate = f->freq;
  346. if (f->src_freq != FIXED_FREQ_SRC) {
  347. rate = parent_req.rate = f->src_freq;
  348. parent_req.best_parent_hw = p;
  349. ret = __clk_determine_rate(p, &parent_req);
  350. if (ret)
  351. return ret;
  352. ret = clk_set_rate(p->clk, parent_req.rate);
  353. if (ret) {
  354. pr_err("Failed set rate(%lu) on parent for non-fixed source\n",
  355. parent_req.rate);
  356. return ret;
  357. }
  358. }
  359. return 0;
  360. }
  361. static int clk_rcg2_determine_rate(struct clk_hw *hw,
  362. struct clk_rate_request *req)
  363. {
  364. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  365. return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
  366. }
  367. static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
  368. struct clk_rate_request *req)
  369. {
  370. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  371. return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
  372. }
  373. static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
  374. u32 *_cfg)
  375. {
  376. u32 cfg, mask, d_val, not2d_val, n_minus_m;
  377. struct clk_hw *hw = &rcg->clkr.hw;
  378. int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
  379. if (index < 0)
  380. return index;
  381. if (rcg->mnd_width && f->n) {
  382. mask = BIT(rcg->mnd_width) - 1;
  383. ret = regmap_update_bits(rcg->clkr.regmap,
  384. RCG_M_OFFSET(rcg), mask, f->m);
  385. if (ret)
  386. return ret;
  387. ret = regmap_update_bits(rcg->clkr.regmap,
  388. RCG_N_OFFSET(rcg), mask, ~(f->n - f->m));
  389. if (ret)
  390. return ret;
  391. /* Calculate 2d value */
  392. d_val = f->n;
  393. n_minus_m = f->n - f->m;
  394. n_minus_m *= 2;
  395. d_val = clamp_t(u32, d_val, f->m, n_minus_m);
  396. not2d_val = ~d_val & mask;
  397. ret = regmap_update_bits(rcg->clkr.regmap,
  398. RCG_D_OFFSET(rcg), mask, not2d_val);
  399. if (ret)
  400. return ret;
  401. }
  402. mask = BIT(rcg->hid_width) - 1;
  403. mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
  404. cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
  405. cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
  406. if (rcg->mnd_width && f->n && (f->m != f->n))
  407. cfg |= CFG_MODE_DUAL_EDGE;
  408. if (rcg->flags & HW_CLK_CTRL_MODE)
  409. cfg |= CFG_HW_CLK_CTRL_MASK;
  410. *_cfg &= ~mask;
  411. *_cfg |= cfg;
  412. return 0;
  413. }
  414. static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
  415. {
  416. u32 cfg;
  417. int ret;
  418. ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
  419. if (ret)
  420. return ret;
  421. ret = __clk_rcg2_configure(rcg, f, &cfg);
  422. if (ret)
  423. return ret;
  424. ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg);
  425. if (ret)
  426. return ret;
  427. rcg->configured_freq = f->freq;
  428. return update_config(rcg);
  429. }
  430. static void clk_rcg2_list_registers(struct seq_file *f, struct clk_hw *hw)
  431. {
  432. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  433. static struct clk_register_data *data;
  434. int i, val;
  435. static struct clk_register_data data1[] = {
  436. {"CMD_RCGR", 0x0},
  437. {"CFG_RCGR", 0x4},
  438. { },
  439. };
  440. static struct clk_register_data data2[] = {
  441. {"CMD_RCGR", 0x0},
  442. {"CFG_RCGR", 0x4},
  443. {"M_VAL", 0x8},
  444. {"N_VAL", 0xC},
  445. {"D_VAL", 0x10},
  446. { },
  447. };
  448. static struct clk_register_data data3[] = {
  449. {"CMD_RCGR", 0x0},
  450. {"CFG_RCGR", 0x4},
  451. {"M_VAL", 0x8},
  452. {"N_VAL", 0xC},
  453. {"D_VAL", 0x10},
  454. {"CMD_DFSR", 0x14},
  455. { },
  456. };
  457. if (rcg->flags & DFS_SUPPORT)
  458. data = data3;
  459. else if (rcg->mnd_width)
  460. data = data2;
  461. else
  462. data = data1;
  463. for (i = 0; data[i].name != NULL; i++) {
  464. regmap_read(rcg->clkr.regmap, (rcg->cmd_rcgr +
  465. data[i].offset), &val);
  466. clock_debug_output(f, "%20s: 0x%.8x\n", data[i].name, val);
  467. }
  468. }
  469. /* Return the nth supported frequency for a given clock. */
  470. static long clk_rcg2_list_rate(struct clk_hw *hw, unsigned int n,
  471. unsigned long fmax)
  472. {
  473. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  474. const struct freq_tbl *f = rcg->freq_tbl;
  475. size_t freq_tbl_size = 0;
  476. if (!f)
  477. return -ENXIO;
  478. for (; f->freq; f++)
  479. freq_tbl_size++;
  480. if (n > freq_tbl_size - 1)
  481. return -EINVAL;
  482. return (rcg->freq_tbl + n)->freq;
  483. }
  484. static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
  485. enum freq_policy policy)
  486. {
  487. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  488. const struct freq_tbl *f, *f_curr;
  489. int ret, curr_src_index, new_src_index;
  490. struct clk_hw *curr_src = NULL, *new_src = NULL;
  491. bool force_enabled = false;
  492. switch (policy) {
  493. case FLOOR:
  494. f = qcom_find_freq_floor(rcg->freq_tbl, rate);
  495. break;
  496. case CEIL:
  497. f = qcom_find_freq(rcg->freq_tbl, rate);
  498. break;
  499. default:
  500. return -EINVAL;
  501. }
  502. if (!f)
  503. return -EINVAL;
  504. /*
  505. * Return if the RCG is currently disabled. This configuration update
  506. * will happen as part of the RCG enable sequence.
  507. */
  508. if (rcg->enable_safe_config && !clk_hw_is_prepared(hw)) {
  509. rcg->current_freq = rate;
  510. return 0;
  511. }
  512. if (rcg->flags & FORCE_ENABLE_RCG) {
  513. rcg->current_freq = DIV_ROUND_CLOSEST_ULL(
  514. clk_get_rate(hw->clk), 1000) * 1000;
  515. if (rcg->current_freq == cxo_f.freq)
  516. curr_src_index = 0;
  517. else {
  518. f_curr = qcom_find_freq(rcg->freq_tbl,
  519. rcg->current_freq);
  520. if (!f_curr)
  521. return -EINVAL;
  522. curr_src_index = qcom_find_src_index(hw,
  523. rcg->parent_map, f_curr->src);
  524. }
  525. new_src_index = qcom_find_src_index(hw, rcg->parent_map,
  526. f->src);
  527. curr_src = clk_hw_get_parent_by_index(hw, curr_src_index);
  528. if (!curr_src)
  529. return -EINVAL;
  530. new_src = clk_hw_get_parent_by_index(hw, new_src_index);
  531. if (!new_src)
  532. return -EINVAL;
  533. /* The RCG could currently be disabled. Enable its parents. */
  534. ret = prepare_enable_rcg_srcs(curr_src->clk, new_src->clk);
  535. if (ret)
  536. return ret;
  537. force_enabled = clk_rcg2_is_force_enabled(hw);
  538. if (!force_enabled)
  539. clk_rcg2_set_force_enable(hw);
  540. }
  541. ret = clk_rcg2_configure(rcg, f);
  542. if (ret)
  543. return ret;
  544. if (rcg->flags & FORCE_ENABLE_RCG) {
  545. if (!force_enabled)
  546. clk_rcg2_clear_force_enable(hw);
  547. disable_unprepare_rcg_srcs(curr_src->clk, new_src->clk);
  548. }
  549. /* Update current frequency with the requested frequency. */
  550. rcg->current_freq = rate;
  551. return ret;
  552. }
  553. static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
  554. unsigned long parent_rate)
  555. {
  556. return __clk_rcg2_set_rate(hw, rate, CEIL);
  557. }
  558. static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
  559. unsigned long parent_rate)
  560. {
  561. return __clk_rcg2_set_rate(hw, rate, FLOOR);
  562. }
  563. static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
  564. unsigned long rate, unsigned long parent_rate, u8 index)
  565. {
  566. return __clk_rcg2_set_rate(hw, rate, CEIL);
  567. }
  568. static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
  569. unsigned long rate, unsigned long parent_rate, u8 index)
  570. {
  571. return __clk_rcg2_set_rate(hw, rate, FLOOR);
  572. }
  573. static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
  574. {
  575. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  576. u32 notn_m, n, m, d, not2d, mask;
  577. if (!rcg->mnd_width) {
  578. /* 50 % duty-cycle for Non-MND RCGs */
  579. duty->num = 1;
  580. duty->den = 2;
  581. return 0;
  582. }
  583. regmap_read(rcg->clkr.regmap, RCG_D_OFFSET(rcg), &not2d);
  584. regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
  585. regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m);
  586. if (!not2d && !m && !notn_m) {
  587. /* 50 % duty-cycle always */
  588. duty->num = 1;
  589. duty->den = 2;
  590. return 0;
  591. }
  592. mask = BIT(rcg->mnd_width) - 1;
  593. d = ~(not2d) & mask;
  594. d = DIV_ROUND_CLOSEST(d, 2);
  595. n = (~(notn_m) + m) & mask;
  596. duty->num = d;
  597. duty->den = n;
  598. return 0;
  599. }
  600. static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
  601. {
  602. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  603. u32 notn_m, n, m, d, not2d, mask, duty_per, cfg;
  604. int ret;
  605. /* Duty-cycle cannot be modified for non-MND RCGs */
  606. if (!rcg->mnd_width)
  607. return -EINVAL;
  608. mask = BIT(rcg->mnd_width) - 1;
  609. regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m);
  610. regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
  611. regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
  612. /* Duty-cycle cannot be modified if MND divider is in bypass mode. */
  613. if (!(cfg & CFG_MODE_MASK))
  614. return -EINVAL;
  615. n = (~(notn_m) + m) & mask;
  616. duty_per = (duty->num * 100) / duty->den;
  617. /* Calculate 2d value */
  618. d = DIV_ROUND_CLOSEST(n * duty_per * 2, 100);
  619. /*
  620. * Check bit widths of 2d. If D is too big reduce duty cycle.
  621. * Also make sure it is never zero.
  622. */
  623. d = clamp_val(d, 1, mask);
  624. if ((d / 2) > (n - m))
  625. d = (n - m) * 2;
  626. else if ((d / 2) < (m / 2))
  627. d = m;
  628. not2d = ~d & mask;
  629. ret = regmap_update_bits(rcg->clkr.regmap, RCG_D_OFFSET(rcg), mask,
  630. not2d);
  631. if (ret)
  632. return ret;
  633. return update_config(rcg);
  634. }
  635. static int clk_rcg2_enable(struct clk_hw *hw)
  636. {
  637. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  638. unsigned long rate;
  639. const struct freq_tbl *f;
  640. int ret;
  641. if (rcg->flags & FORCE_ENABLE_RCG)
  642. clk_rcg2_set_force_enable(hw);
  643. if (!rcg->enable_safe_config)
  644. return 0;
  645. /*
  646. * Switch from CXO to the stashed mux selection. Force enable and
  647. * disable the RCG while configuring it to safeguard against any update
  648. * signal coming from the downstream clock. The current parent has
  649. * already been prepared and enabled at this point, and the CXO source
  650. * is always on while APPS is online. Therefore, the RCG can safely be
  651. * switched.
  652. */
  653. rate = rcg->current_freq;
  654. f = qcom_find_freq(rcg->freq_tbl, rate);
  655. if (!f)
  656. return -EINVAL;
  657. /*
  658. * If CXO is not listed as a supported frequency in the frequency
  659. * table, the above API would return the lowest supported frequency
  660. * instead. This will lead to incorrect configuration of the RCG.
  661. * Check if the RCG rate is CXO and configure it accordingly.
  662. */
  663. if (rate == cxo_f.freq)
  664. f = &cxo_f;
  665. if (!(rcg->flags & FORCE_ENABLE_RCG))
  666. clk_rcg2_set_force_enable(hw);
  667. ret = clk_rcg2_configure(rcg, f);
  668. if (!(rcg->flags & FORCE_ENABLE_RCG))
  669. clk_rcg2_clear_force_enable(hw);
  670. return ret;
  671. }
  672. static void clk_rcg2_disable(struct clk_hw *hw)
  673. {
  674. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  675. int ret;
  676. if (!rcg->enable_safe_config) {
  677. if (rcg->flags & FORCE_ENABLE_RCG)
  678. clk_rcg2_clear_force_enable(hw);
  679. return;
  680. }
  681. /*
  682. * Park the RCG at a safe configuration - sourced off the CXO. This is
  683. * needed for 2 reasons: In the case of RCGs sourcing PSCBCs, due to a
  684. * default HW behavior, the RCG will turn on when its corresponding
  685. * GDSC is enabled. We might also have cases when the RCG might be left
  686. * enabled without the overlying SW knowing about it. This results from
  687. * hard to track cases of downstream clocks being left enabled. In both
  688. * these cases, scaling the RCG will fail since it's enabled but with
  689. * its sources cut off.
  690. *
  691. * Save mux select and switch to CXO. Force enable and disable the RCG
  692. * while configuring it to safeguard against any update signal coming
  693. * from the downstream clock. The current parent is still prepared and
  694. * enabled at this point, and the CXO source is always on while APPS is
  695. * online. Therefore, the RCG can safely be switched.
  696. */
  697. clk_rcg2_set_force_enable(hw);
  698. ret = clk_rcg2_configure(rcg, &cxo_f);
  699. if (ret)
  700. pr_err("%s: CXO configuration failed\n", clk_hw_get_name(hw));
  701. clk_rcg2_clear_force_enable(hw);
  702. }
  703. static struct clk_regmap_ops clk_rcg2_regmap_ops = {
  704. .list_rate = clk_rcg2_list_rate,
  705. .list_registers = clk_rcg2_list_registers,
  706. };
  707. static int clk_rcg2_init(struct clk_hw *hw)
  708. {
  709. struct clk_regmap *rclk = to_clk_regmap(hw);
  710. if (!rclk->ops)
  711. rclk->ops = &clk_rcg2_regmap_ops;
  712. return 0;
  713. }
  714. const struct clk_ops clk_rcg2_ops = {
  715. .prepare = clk_prepare_regmap,
  716. .unprepare = clk_unprepare_regmap,
  717. .pre_rate_change = clk_pre_change_regmap,
  718. .post_rate_change = clk_post_change_regmap,
  719. .is_enabled = clk_rcg2_is_enabled,
  720. .enable = clk_rcg2_enable,
  721. .disable = clk_rcg2_disable,
  722. .get_parent = clk_rcg2_get_parent,
  723. .set_parent = clk_rcg2_set_parent,
  724. .recalc_rate = clk_rcg2_recalc_rate,
  725. .determine_rate = clk_rcg2_determine_rate,
  726. .set_rate = clk_rcg2_set_rate,
  727. .set_rate_and_parent = clk_rcg2_set_rate_and_parent,
  728. .get_duty_cycle = clk_rcg2_get_duty_cycle,
  729. .set_duty_cycle = clk_rcg2_set_duty_cycle,
  730. .init = clk_rcg2_init,
  731. .debug_init = clk_common_debug_init,
  732. };
  733. EXPORT_SYMBOL_GPL(clk_rcg2_ops);
  734. const struct clk_ops clk_rcg2_floor_ops = {
  735. .prepare = clk_prepare_regmap,
  736. .unprepare = clk_unprepare_regmap,
  737. .pre_rate_change = clk_pre_change_regmap,
  738. .post_rate_change = clk_post_change_regmap,
  739. .is_enabled = clk_rcg2_is_enabled,
  740. .enable = clk_rcg2_enable,
  741. .disable = clk_rcg2_disable,
  742. .get_parent = clk_rcg2_get_parent,
  743. .set_parent = clk_rcg2_set_parent,
  744. .recalc_rate = clk_rcg2_recalc_rate,
  745. .determine_rate = clk_rcg2_determine_floor_rate,
  746. .set_rate = clk_rcg2_set_floor_rate,
  747. .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
  748. .get_duty_cycle = clk_rcg2_get_duty_cycle,
  749. .set_duty_cycle = clk_rcg2_set_duty_cycle,
  750. .init = clk_rcg2_init,
  751. .debug_init = clk_common_debug_init,
  752. };
  753. EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
  754. const struct clk_ops clk_rcg2_mux_closest_ops = {
  755. .determine_rate = __clk_mux_determine_rate_closest,
  756. .get_parent = clk_rcg2_get_parent,
  757. .set_parent = clk_rcg2_set_parent,
  758. };
  759. EXPORT_SYMBOL_GPL(clk_rcg2_mux_closest_ops);
  760. struct frac_entry {
  761. int num;
  762. int den;
  763. };
  764. static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */
  765. { 52, 295 }, /* 119 M */
  766. { 11, 57 }, /* 130.25 M */
  767. { 63, 307 }, /* 138.50 M */
  768. { 11, 50 }, /* 148.50 M */
  769. { 47, 206 }, /* 154 M */
  770. { 31, 100 }, /* 205.25 M */
  771. { 107, 269 }, /* 268.50 M */
  772. { },
  773. };
  774. static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
  775. { 31, 211 }, /* 119 M */
  776. { 32, 199 }, /* 130.25 M */
  777. { 63, 307 }, /* 138.50 M */
  778. { 11, 60 }, /* 148.50 M */
  779. { 50, 263 }, /* 154 M */
  780. { 31, 120 }, /* 205.25 M */
  781. { 119, 359 }, /* 268.50 M */
  782. { },
  783. };
  784. static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
  785. unsigned long parent_rate)
  786. {
  787. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  788. struct freq_tbl f = *rcg->freq_tbl;
  789. const struct frac_entry *frac;
  790. int delta = 100000;
  791. s64 src_rate = parent_rate;
  792. s64 request;
  793. u32 mask = BIT(rcg->hid_width) - 1;
  794. u32 hid_div;
  795. if (src_rate == 810000000)
  796. frac = frac_table_810m;
  797. else
  798. frac = frac_table_675m;
  799. for (; frac->num; frac++) {
  800. request = rate;
  801. request *= frac->den;
  802. request = div_s64(request, frac->num);
  803. if ((src_rate < (request - delta)) ||
  804. (src_rate > (request + delta)))
  805. continue;
  806. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
  807. &hid_div);
  808. f.pre_div = hid_div;
  809. f.pre_div >>= CFG_SRC_DIV_SHIFT;
  810. f.pre_div &= mask;
  811. f.m = frac->num;
  812. f.n = frac->den;
  813. return clk_rcg2_configure(rcg, &f);
  814. }
  815. return -EINVAL;
  816. }
  817. static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
  818. unsigned long rate, unsigned long parent_rate, u8 index)
  819. {
  820. /* Parent index is set statically in frequency table */
  821. return clk_edp_pixel_set_rate(hw, rate, parent_rate);
  822. }
  823. static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
  824. struct clk_rate_request *req)
  825. {
  826. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  827. const struct freq_tbl *f = rcg->freq_tbl;
  828. const struct frac_entry *frac;
  829. int delta = 100000;
  830. s64 request;
  831. u32 mask = BIT(rcg->hid_width) - 1;
  832. u32 hid_div;
  833. int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
  834. /* Force the correct parent */
  835. req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
  836. if (!req->best_parent_hw)
  837. return -EINVAL;
  838. req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
  839. if (req->best_parent_rate == 810000000)
  840. frac = frac_table_810m;
  841. else
  842. frac = frac_table_675m;
  843. for (; frac->num; frac++) {
  844. request = req->rate;
  845. request *= frac->den;
  846. request = div_s64(request, frac->num);
  847. if ((req->best_parent_rate < (request - delta)) ||
  848. (req->best_parent_rate > (request + delta)))
  849. continue;
  850. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
  851. &hid_div);
  852. hid_div >>= CFG_SRC_DIV_SHIFT;
  853. hid_div &= mask;
  854. req->rate = calc_rate(req->best_parent_rate,
  855. frac->num, frac->den,
  856. !!frac->den, hid_div);
  857. return 0;
  858. }
  859. return -EINVAL;
  860. }
  861. const struct clk_ops clk_edp_pixel_ops = {
  862. .prepare = clk_prepare_regmap,
  863. .unprepare = clk_unprepare_regmap,
  864. .pre_rate_change = clk_pre_change_regmap,
  865. .post_rate_change = clk_post_change_regmap,
  866. .is_enabled = clk_rcg2_is_enabled,
  867. .get_parent = clk_rcg2_get_parent,
  868. .set_parent = clk_rcg2_set_parent,
  869. .recalc_rate = clk_rcg2_recalc_rate,
  870. .set_rate = clk_edp_pixel_set_rate,
  871. .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
  872. .determine_rate = clk_edp_pixel_determine_rate,
  873. .init = clk_rcg2_init,
  874. .debug_init = clk_common_debug_init,
  875. };
  876. EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
  877. static int clk_byte_determine_rate(struct clk_hw *hw,
  878. struct clk_rate_request *req)
  879. {
  880. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  881. const struct freq_tbl *f = rcg->freq_tbl;
  882. int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
  883. unsigned long parent_rate, div;
  884. u32 mask = BIT(rcg->hid_width) - 1;
  885. struct clk_hw *p;
  886. if (req->rate == 0)
  887. return -EINVAL;
  888. req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
  889. if (!p)
  890. return -EINVAL;
  891. req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
  892. div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
  893. div = min_t(u32, div, mask);
  894. req->rate = calc_rate(parent_rate, 0, 0, 0, div);
  895. return 0;
  896. }
  897. static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
  898. unsigned long parent_rate)
  899. {
  900. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  901. struct freq_tbl f = *rcg->freq_tbl;
  902. unsigned long div;
  903. u32 mask = BIT(rcg->hid_width) - 1;
  904. div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
  905. div = min_t(u32, div, mask);
  906. f.pre_div = div;
  907. return clk_rcg2_configure(rcg, &f);
  908. }
  909. static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
  910. unsigned long rate, unsigned long parent_rate, u8 index)
  911. {
  912. /* Parent index is set statically in frequency table */
  913. return clk_byte_set_rate(hw, rate, parent_rate);
  914. }
  915. const struct clk_ops clk_byte_ops = {
  916. .prepare = clk_prepare_regmap,
  917. .unprepare = clk_unprepare_regmap,
  918. .pre_rate_change = clk_pre_change_regmap,
  919. .post_rate_change = clk_post_change_regmap,
  920. .is_enabled = clk_rcg2_is_enabled,
  921. .get_parent = clk_rcg2_get_parent,
  922. .set_parent = clk_rcg2_set_parent,
  923. .recalc_rate = clk_rcg2_recalc_rate,
  924. .set_rate = clk_byte_set_rate,
  925. .set_rate_and_parent = clk_byte_set_rate_and_parent,
  926. .determine_rate = clk_byte_determine_rate,
  927. .init = clk_rcg2_init,
  928. .debug_init = clk_common_debug_init,
  929. };
  930. EXPORT_SYMBOL_GPL(clk_byte_ops);
  931. static int clk_byte2_determine_rate(struct clk_hw *hw,
  932. struct clk_rate_request *req)
  933. {
  934. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  935. unsigned long parent_rate, div;
  936. u32 mask = BIT(rcg->hid_width) - 1;
  937. struct clk_hw *p;
  938. unsigned long rate = req->rate;
  939. p = req->best_parent_hw;
  940. if (!p || rate == 0)
  941. return -EINVAL;
  942. req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
  943. div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
  944. div = min_t(u32, div, mask);
  945. req->rate = calc_rate(parent_rate, 0, 0, 0, div);
  946. return 0;
  947. }
  948. static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
  949. unsigned long parent_rate)
  950. {
  951. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  952. struct freq_tbl f = { 0 };
  953. unsigned long div;
  954. int i, num_parents = clk_hw_get_num_parents(hw);
  955. u32 mask = BIT(rcg->hid_width) - 1;
  956. u32 cfg;
  957. div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
  958. div = min_t(u32, div, mask);
  959. f.pre_div = div;
  960. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
  961. cfg &= CFG_SRC_SEL_MASK;
  962. cfg >>= CFG_SRC_SEL_SHIFT;
  963. for (i = 0; i < num_parents; i++) {
  964. if (cfg == rcg->parent_map[i].cfg) {
  965. f.src = rcg->parent_map[i].src;
  966. return clk_rcg2_configure(rcg, &f);
  967. }
  968. }
  969. return -EINVAL;
  970. }
  971. static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
  972. unsigned long rate, unsigned long parent_rate, u8 index)
  973. {
  974. /* Read the hardware to determine parent during set_rate */
  975. return clk_byte2_set_rate(hw, rate, parent_rate);
  976. }
  977. const struct clk_ops clk_byte2_ops = {
  978. .prepare = clk_prepare_regmap,
  979. .unprepare = clk_unprepare_regmap,
  980. .pre_rate_change = clk_pre_change_regmap,
  981. .post_rate_change = clk_post_change_regmap,
  982. .is_enabled = clk_rcg2_is_enabled,
  983. .get_parent = clk_rcg2_get_parent,
  984. .set_parent = clk_rcg2_set_parent,
  985. .recalc_rate = clk_rcg2_recalc_rate,
  986. .set_rate = clk_byte2_set_rate,
  987. .set_rate_and_parent = clk_byte2_set_rate_and_parent,
  988. .determine_rate = clk_byte2_determine_rate,
  989. .init = clk_rcg2_init,
  990. .debug_init = clk_common_debug_init,
  991. };
  992. EXPORT_SYMBOL_GPL(clk_byte2_ops);
  993. static const struct frac_entry frac_table_pixel[] = {
  994. { 3, 8 },
  995. { 2, 9 },
  996. { 4, 9 },
  997. { 1, 1 },
  998. { 2, 3 },
  999. { }
  1000. };
  1001. static int clk_pixel_determine_rate(struct clk_hw *hw,
  1002. struct clk_rate_request *req)
  1003. {
  1004. unsigned long request, src_rate;
  1005. int delta = 100000;
  1006. const struct frac_entry *frac = frac_table_pixel;
  1007. if (!req->best_parent_hw)
  1008. return -EINVAL;
  1009. for (; frac->num; frac++) {
  1010. request = (req->rate * frac->den) / frac->num;
  1011. src_rate = clk_hw_round_rate(req->best_parent_hw, request);
  1012. if ((src_rate < (request - delta)) ||
  1013. (src_rate > (request + delta)))
  1014. continue;
  1015. req->best_parent_rate = src_rate;
  1016. req->rate = (src_rate * frac->num) / frac->den;
  1017. return 0;
  1018. }
  1019. return -EINVAL;
  1020. }
  1021. static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
  1022. unsigned long parent_rate)
  1023. {
  1024. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1025. struct freq_tbl f = { 0 };
  1026. const struct frac_entry *frac = frac_table_pixel;
  1027. unsigned long request;
  1028. int delta = 100000;
  1029. u32 mask = BIT(rcg->hid_width) - 1;
  1030. u32 hid_div, cfg;
  1031. int i, num_parents = clk_hw_get_num_parents(hw);
  1032. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
  1033. cfg &= CFG_SRC_SEL_MASK;
  1034. cfg >>= CFG_SRC_SEL_SHIFT;
  1035. for (i = 0; i < num_parents; i++)
  1036. if (cfg == rcg->parent_map[i].cfg) {
  1037. f.src = rcg->parent_map[i].src;
  1038. break;
  1039. }
  1040. for (; frac->num; frac++) {
  1041. request = (rate * frac->den) / frac->num;
  1042. if ((parent_rate < (request - delta)) ||
  1043. (parent_rate > (request + delta)))
  1044. continue;
  1045. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
  1046. &hid_div);
  1047. f.pre_div = hid_div;
  1048. f.pre_div >>= CFG_SRC_DIV_SHIFT;
  1049. f.pre_div &= mask;
  1050. f.m = frac->num;
  1051. f.n = frac->den;
  1052. return clk_rcg2_configure(rcg, &f);
  1053. }
  1054. return -EINVAL;
  1055. }
  1056. static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
  1057. unsigned long parent_rate, u8 index)
  1058. {
  1059. return clk_pixel_set_rate(hw, rate, parent_rate);
  1060. }
  1061. const struct clk_ops clk_pixel_ops = {
  1062. .prepare = clk_prepare_regmap,
  1063. .unprepare = clk_unprepare_regmap,
  1064. .pre_rate_change = clk_pre_change_regmap,
  1065. .post_rate_change = clk_post_change_regmap,
  1066. .is_enabled = clk_rcg2_is_enabled,
  1067. .get_parent = clk_rcg2_get_parent,
  1068. .set_parent = clk_rcg2_set_parent,
  1069. .recalc_rate = clk_rcg2_recalc_rate,
  1070. .set_rate = clk_pixel_set_rate,
  1071. .set_rate_and_parent = clk_pixel_set_rate_and_parent,
  1072. .determine_rate = clk_pixel_determine_rate,
  1073. .init = clk_rcg2_init,
  1074. .debug_init = clk_common_debug_init,
  1075. };
  1076. EXPORT_SYMBOL_GPL(clk_pixel_ops);
  1077. static int clk_gfx3d_determine_rate(struct clk_hw *hw,
  1078. struct clk_rate_request *req)
  1079. {
  1080. struct clk_rate_request parent_req = { .min_rate = 0, .max_rate = ULONG_MAX };
  1081. struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
  1082. struct clk_hw *xo, *p0, *p1, *p2;
  1083. unsigned long p0_rate;
  1084. u8 mux_div = cgfx->div;
  1085. int ret;
  1086. p0 = cgfx->hws[0];
  1087. p1 = cgfx->hws[1];
  1088. p2 = cgfx->hws[2];
  1089. /*
  1090. * This function does ping-pong the RCG between PLLs: if we don't
  1091. * have at least one fixed PLL and two variable ones,
  1092. * then it's not going to work correctly.
  1093. */
  1094. if (WARN_ON(!p0 || !p1 || !p2))
  1095. return -EINVAL;
  1096. xo = clk_hw_get_parent_by_index(hw, 0);
  1097. if (!xo)
  1098. return -EINVAL;
  1099. if (req->rate == clk_hw_get_rate(xo)) {
  1100. req->best_parent_hw = xo;
  1101. return 0;
  1102. }
  1103. if (mux_div == 0)
  1104. mux_div = 1;
  1105. parent_req.rate = req->rate * mux_div;
  1106. /* This has to be a fixed rate PLL */
  1107. p0_rate = clk_hw_get_rate(p0);
  1108. if (parent_req.rate == p0_rate) {
  1109. req->rate = req->best_parent_rate = p0_rate;
  1110. req->best_parent_hw = p0;
  1111. return 0;
  1112. }
  1113. if (req->best_parent_hw == p0) {
  1114. /* Are we going back to a previously used rate? */
  1115. if (clk_hw_get_rate(p2) == parent_req.rate)
  1116. req->best_parent_hw = p2;
  1117. else
  1118. req->best_parent_hw = p1;
  1119. } else if (req->best_parent_hw == p2) {
  1120. req->best_parent_hw = p1;
  1121. } else {
  1122. req->best_parent_hw = p2;
  1123. }
  1124. clk_hw_get_rate_range(req->best_parent_hw,
  1125. &parent_req.min_rate, &parent_req.max_rate);
  1126. if (req->min_rate > parent_req.min_rate)
  1127. parent_req.min_rate = req->min_rate;
  1128. if (req->max_rate < parent_req.max_rate)
  1129. parent_req.max_rate = req->max_rate;
  1130. ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
  1131. if (ret)
  1132. return ret;
  1133. req->rate = req->best_parent_rate = parent_req.rate;
  1134. req->rate /= mux_div;
  1135. return 0;
  1136. }
  1137. static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
  1138. unsigned long parent_rate, u8 index)
  1139. {
  1140. struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
  1141. struct clk_rcg2 *rcg = &cgfx->rcg;
  1142. u32 cfg;
  1143. int ret;
  1144. cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
  1145. /* On some targets, the GFX3D RCG may need to divide PLL frequency */
  1146. if (cgfx->div > 1)
  1147. cfg |= ((2 * cgfx->div) - 1) << CFG_SRC_DIV_SHIFT;
  1148. ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
  1149. if (ret)
  1150. return ret;
  1151. return update_config(rcg);
  1152. }
  1153. static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
  1154. unsigned long parent_rate)
  1155. {
  1156. /*
  1157. * We should never get here; clk_gfx3d_determine_rate() should always
  1158. * make us use a different parent than what we're currently using, so
  1159. * clk_gfx3d_set_rate_and_parent() should always be called.
  1160. */
  1161. return 0;
  1162. }
  1163. const struct clk_ops clk_gfx3d_ops = {
  1164. .prepare = clk_prepare_regmap,
  1165. .unprepare = clk_unprepare_regmap,
  1166. .pre_rate_change = clk_pre_change_regmap,
  1167. .post_rate_change = clk_post_change_regmap,
  1168. .is_enabled = clk_rcg2_is_enabled,
  1169. .get_parent = clk_rcg2_get_parent,
  1170. .set_parent = clk_rcg2_set_parent,
  1171. .recalc_rate = clk_rcg2_recalc_rate,
  1172. .set_rate = clk_gfx3d_set_rate,
  1173. .set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
  1174. .determine_rate = clk_gfx3d_determine_rate,
  1175. .init = clk_rcg2_init,
  1176. .debug_init = clk_common_debug_init,
  1177. };
  1178. EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
  1179. static int
  1180. clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
  1181. {
  1182. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1183. int ret;
  1184. ret = clk_rcg2_set_force_enable(hw);
  1185. if (ret)
  1186. return ret;
  1187. ret = clk_rcg2_configure(rcg, f);
  1188. if (ret)
  1189. return ret;
  1190. return clk_rcg2_clear_force_enable(hw);
  1191. }
  1192. static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
  1193. unsigned long parent_rate)
  1194. {
  1195. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1196. const struct freq_tbl *f;
  1197. f = qcom_find_freq(rcg->freq_tbl, rate);
  1198. if (!f)
  1199. return -EINVAL;
  1200. /*
  1201. * In case clock is disabled, update the M, N and D registers, cache
  1202. * the CFG value in parked_cfg and don't hit the update bit of CMD
  1203. * register.
  1204. */
  1205. if (!clk_hw_is_enabled(hw))
  1206. return __clk_rcg2_configure(rcg, f, &rcg->parked_cfg);
  1207. return clk_rcg2_shared_force_enable_clear(hw, f);
  1208. }
  1209. static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
  1210. unsigned long rate, unsigned long parent_rate, u8 index)
  1211. {
  1212. return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
  1213. }
  1214. static int clk_rcg2_shared_enable(struct clk_hw *hw)
  1215. {
  1216. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1217. int ret;
  1218. /*
  1219. * Set the update bit because required configuration has already
  1220. * been written in clk_rcg2_shared_set_rate()
  1221. */
  1222. ret = clk_rcg2_set_force_enable(hw);
  1223. if (ret)
  1224. return ret;
  1225. /* Write back the stored configuration corresponding to current rate */
  1226. ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, rcg->parked_cfg);
  1227. if (ret)
  1228. return ret;
  1229. ret = update_config(rcg);
  1230. if (ret)
  1231. return ret;
  1232. return clk_rcg2_clear_force_enable(hw);
  1233. }
  1234. static void clk_rcg2_shared_disable(struct clk_hw *hw)
  1235. {
  1236. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1237. /*
  1238. * Store current configuration as switching to safe source would clear
  1239. * the SRC and DIV of CFG register
  1240. */
  1241. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg);
  1242. /*
  1243. * Park the RCG at a safe configuration - sourced off of safe source.
  1244. * Force enable and disable the RCG while configuring it to safeguard
  1245. * against any update signal coming from the downstream clock.
  1246. * The current parent is still prepared and enabled at this point, and
  1247. * the safe source is always on while application processor subsystem
  1248. * is online. Therefore, the RCG can safely switch its parent.
  1249. */
  1250. clk_rcg2_set_force_enable(hw);
  1251. regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
  1252. rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
  1253. update_config(rcg);
  1254. clk_rcg2_clear_force_enable(hw);
  1255. }
  1256. static u8 clk_rcg2_shared_get_parent(struct clk_hw *hw)
  1257. {
  1258. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1259. /* If the shared rcg is parked use the cached cfg instead */
  1260. if (!clk_hw_is_enabled(hw))
  1261. return __clk_rcg2_get_parent(hw, rcg->parked_cfg);
  1262. return clk_rcg2_get_parent(hw);
  1263. }
  1264. static int clk_rcg2_shared_set_parent(struct clk_hw *hw, u8 index)
  1265. {
  1266. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1267. /* If the shared rcg is parked only update the cached cfg */
  1268. if (!clk_hw_is_enabled(hw)) {
  1269. rcg->parked_cfg &= ~CFG_SRC_SEL_MASK;
  1270. rcg->parked_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
  1271. return 0;
  1272. }
  1273. return clk_rcg2_set_parent(hw, index);
  1274. }
  1275. static unsigned long
  1276. clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  1277. {
  1278. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1279. /* If the shared rcg is parked use the cached cfg instead */
  1280. if (!clk_hw_is_enabled(hw))
  1281. return __clk_rcg2_recalc_rate(hw, parent_rate, rcg->parked_cfg);
  1282. return clk_rcg2_recalc_rate(hw, parent_rate);
  1283. }
  1284. const struct clk_ops clk_rcg2_shared_ops = {
  1285. .prepare = clk_prepare_regmap,
  1286. .unprepare = clk_unprepare_regmap,
  1287. .pre_rate_change = clk_pre_change_regmap,
  1288. .post_rate_change = clk_post_change_regmap,
  1289. .enable = clk_rcg2_shared_enable,
  1290. .disable = clk_rcg2_shared_disable,
  1291. .get_parent = clk_rcg2_shared_get_parent,
  1292. .set_parent = clk_rcg2_shared_set_parent,
  1293. .recalc_rate = clk_rcg2_shared_recalc_rate,
  1294. .determine_rate = clk_rcg2_determine_rate,
  1295. .set_rate = clk_rcg2_shared_set_rate,
  1296. .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
  1297. .init = clk_rcg2_init,
  1298. .debug_init = clk_common_debug_init,
  1299. };
  1300. EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
  1301. /* Common APIs to be used for DFS based RCGR */
  1302. static int clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
  1303. struct freq_tbl *f)
  1304. {
  1305. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1306. struct clk_hw *p;
  1307. unsigned long prate = 0;
  1308. u32 val, mask, cfg, mode, src;
  1309. int i, num_parents;
  1310. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
  1311. mask = BIT(rcg->hid_width) - 1;
  1312. f->pre_div = 1;
  1313. if (cfg & mask)
  1314. f->pre_div = cfg & mask;
  1315. src = cfg & CFG_SRC_SEL_MASK;
  1316. src >>= CFG_SRC_SEL_SHIFT;
  1317. num_parents = clk_hw_get_num_parents(hw);
  1318. for (i = 0; i < num_parents; i++) {
  1319. if (src == rcg->parent_map[i].cfg) {
  1320. f->src = rcg->parent_map[i].src;
  1321. p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
  1322. if (!p)
  1323. return -EINVAL;
  1324. prate = clk_hw_get_rate(p);
  1325. }
  1326. }
  1327. mode = cfg & CFG_MODE_MASK;
  1328. mode >>= CFG_MODE_SHIFT;
  1329. if (mode) {
  1330. mask = BIT(rcg->mnd_width) - 1;
  1331. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l),
  1332. &val);
  1333. val &= mask;
  1334. f->m = val;
  1335. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l),
  1336. &val);
  1337. val = ~val;
  1338. val &= mask;
  1339. val += f->m;
  1340. f->n = val;
  1341. }
  1342. f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div);
  1343. return 0;
  1344. }
  1345. static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg)
  1346. {
  1347. struct freq_tbl *freq_tbl;
  1348. int i, ret;
  1349. /* Allocate space for 1 extra since table is NULL terminated */
  1350. freq_tbl = kcalloc(MAX_PERF_LEVEL + 1, sizeof(*freq_tbl), GFP_KERNEL);
  1351. if (!freq_tbl)
  1352. return -ENOMEM;
  1353. rcg->freq_tbl = freq_tbl;
  1354. for (i = 0; i < MAX_PERF_LEVEL; i++) {
  1355. ret =
  1356. clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i);
  1357. if (ret)
  1358. return ret;
  1359. }
  1360. return ret;
  1361. }
  1362. static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw,
  1363. struct clk_rate_request *req)
  1364. {
  1365. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1366. int ret;
  1367. if (!rcg->freq_tbl) {
  1368. ret = clk_rcg2_dfs_populate_freq_table(rcg);
  1369. if (ret) {
  1370. pr_err("Failed to update DFS tables for %s\n",
  1371. clk_hw_get_name(hw));
  1372. return ret;
  1373. }
  1374. }
  1375. return clk_rcg2_determine_rate(hw, req);
  1376. }
  1377. static unsigned long
  1378. clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  1379. {
  1380. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1381. u32 level, mask, cfg, m = 0, n = 0, mode, pre_div;
  1382. regmap_read(rcg->clkr.regmap,
  1383. rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level);
  1384. level &= GENMASK(4, 1);
  1385. level >>= 1;
  1386. if (rcg->freq_tbl)
  1387. return rcg->freq_tbl[level].freq;
  1388. /*
  1389. * Assume that parent_rate is actually the parent because
  1390. * we can't do any better at figuring it out when the table
  1391. * hasn't been populated yet. We only populate the table
  1392. * in determine_rate because we can't guarantee the parents
  1393. * will be registered with the framework until then.
  1394. */
  1395. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level),
  1396. &cfg);
  1397. mask = BIT(rcg->hid_width) - 1;
  1398. pre_div = 1;
  1399. if (cfg & mask)
  1400. pre_div = cfg & mask;
  1401. mode = cfg & CFG_MODE_MASK;
  1402. mode >>= CFG_MODE_SHIFT;
  1403. if (mode) {
  1404. mask = BIT(rcg->mnd_width) - 1;
  1405. regmap_read(rcg->clkr.regmap,
  1406. rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m);
  1407. m &= mask;
  1408. regmap_read(rcg->clkr.regmap,
  1409. rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n);
  1410. n = ~n;
  1411. n &= mask;
  1412. n += m;
  1413. }
  1414. return calc_rate(parent_rate, m, n, mode, pre_div);
  1415. }
  1416. static const struct clk_ops clk_rcg2_dfs_ops = {
  1417. .prepare = clk_prepare_regmap,
  1418. .unprepare = clk_unprepare_regmap,
  1419. .pre_rate_change = clk_pre_change_regmap,
  1420. .post_rate_change = clk_post_change_regmap,
  1421. .is_enabled = clk_rcg2_is_enabled,
  1422. .get_parent = clk_rcg2_get_parent,
  1423. .determine_rate = clk_rcg2_dfs_determine_rate,
  1424. .recalc_rate = clk_rcg2_dfs_recalc_rate,
  1425. .init = clk_rcg2_init,
  1426. .debug_init = clk_common_debug_init,
  1427. };
  1428. /* Common APIs to be used for CESTA based RCGR */
  1429. static int clk_rcg2_crmc_populate_freq(struct clk_hw *hw, unsigned int l,
  1430. struct freq_tbl *f)
  1431. {
  1432. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1433. struct clk_crm *crm = rcg->clkr.crm;
  1434. struct clk_hw *p;
  1435. unsigned long prate = 0;
  1436. u32 mask, rcgr_cfg, src, pll_lval, lval, alpha_val, num_parents, i;
  1437. if (!crm->regmap_crmc) {
  1438. pr_err("%s crmc regmap error\n", __func__);
  1439. return -ENODEV;
  1440. }
  1441. regmap_read(crm->regmap_crmc,
  1442. CLK_RCG_CRMC_CFG_RCGR(rcg->clkr.crm_vcd, l), &rcgr_cfg);
  1443. regmap_read(crm->regmap_crmc,
  1444. CLK_RCG_CRMC_PERF_LEVEL_PLL_L_VAL_LUT(rcg->clkr.crm_vcd, l), &pll_lval);
  1445. mask = BIT(rcg->hid_width) - 1;
  1446. f->pre_div = 1;
  1447. if (rcgr_cfg & mask)
  1448. f->pre_div = rcgr_cfg & mask;
  1449. src = rcgr_cfg & CFG_SRC_SEL_MASK;
  1450. src >>= CFG_SRC_SEL_SHIFT;
  1451. lval = pll_lval & PLL_L_VAL_MASK;
  1452. alpha_val = (pll_lval & PLL_ALPHA_VAL_MASK) >> PLL_ALPHA_VAL_SHIFT;
  1453. num_parents = clk_hw_get_num_parents(hw);
  1454. for (i = 0; i < num_parents; i++) {
  1455. if (src == rcg->parent_map[i].cfg) {
  1456. f->src = rcg->parent_map[i].src;
  1457. p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
  1458. if (!p)
  1459. return -EINVAL;
  1460. if (!lval) {
  1461. prate = clk_hw_get_rate(p);
  1462. } else if (clk_is_regmap_clk(p)) {
  1463. struct clk_regmap *rclk = to_clk_regmap(p);
  1464. if (rclk->ops && rclk->ops->calc_pll)
  1465. prate = rclk->ops->calc_pll(p, lval, alpha_val);
  1466. }
  1467. break;
  1468. }
  1469. }
  1470. if (!prate) {
  1471. pr_err("%s error clk=%s\n", __func__, qcom_clk_hw_get_name(hw));
  1472. return -EINVAL;
  1473. }
  1474. f->freq = calc_rate(prate, 0, 0, 0, f->pre_div);
  1475. return 0;
  1476. }
  1477. int clk_rcg2_crmc_populate_freq_table(struct clk_rcg2 *rcg)
  1478. {
  1479. struct freq_tbl *freq_tbl, *curr_freq_tbl;
  1480. u32 prev_freq = 0;
  1481. int i, ret;
  1482. /* Allocate space for 1 extra since table is NULL terminated */
  1483. freq_tbl = kcalloc(MAX_PERF_LEVEL_PER_VCD + 1, sizeof(*freq_tbl), GFP_KERNEL);
  1484. if (!freq_tbl)
  1485. return -ENOMEM;
  1486. rcg->freq_tbl = freq_tbl;
  1487. /*
  1488. * Skipping first LUT entry as first entry is used to disable RCG
  1489. */
  1490. for (i = 0; i < MAX_PERF_LEVEL_PER_VCD; i++) {
  1491. ret = clk_rcg2_crmc_populate_freq(&rcg->clkr.hw, i + 1,
  1492. freq_tbl + i);
  1493. if (ret)
  1494. return ret;
  1495. curr_freq_tbl = freq_tbl + i;
  1496. /*
  1497. * Two of the same/decreasing frequencies means end of LUT
  1498. */
  1499. if (prev_freq >= curr_freq_tbl->freq) {
  1500. curr_freq_tbl->freq = 0;
  1501. break;
  1502. }
  1503. prev_freq = curr_freq_tbl->freq;
  1504. }
  1505. return ret;
  1506. }
  1507. static int clk_rcg2_crmc_determine_rate(struct clk_hw *hw,
  1508. struct clk_rate_request *req)
  1509. {
  1510. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1511. struct clk_crm *crm = rcg->clkr.crm;
  1512. int ret;
  1513. ret = clk_runtime_get_regmap(&rcg->clkr);
  1514. if (ret)
  1515. return ret;
  1516. ret = qcom_clk_crm_init(rcg->clkr.dev, crm);
  1517. if (ret) {
  1518. pr_err("%s Failed to initialize CRM ret=%d clk=%s\n",
  1519. __func__, ret, qcom_clk_hw_get_name(hw));
  1520. goto err;
  1521. }
  1522. if (!rcg->freq_populated) {
  1523. ret = clk_rcg2_crmc_populate_freq_table(rcg);
  1524. if (ret) {
  1525. pr_err("%s Failed to populate crmc tables for %s\n",
  1526. __func__, qcom_clk_hw_get_name(hw));
  1527. goto err;
  1528. }
  1529. rcg->freq_populated = true;
  1530. }
  1531. ret = clk_rcg2_determine_rate(hw, req);
  1532. err:
  1533. clk_runtime_put_regmap(&rcg->clkr);
  1534. return ret;
  1535. }
  1536. static int clk_rcg2_vote_perf_level(struct clk_hw *hw, unsigned long rate)
  1537. {
  1538. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1539. struct clk_crm *crm = rcg->clkr.crm;
  1540. struct crm_cmd cmd;
  1541. int perf_index;
  1542. int ret, i;
  1543. if (!rcg->freq_tbl || !crm->initialized) {
  1544. pr_err("%s rcg=%s rate=%ld\n", __func__,
  1545. qcom_clk_hw_get_name(hw), rate);
  1546. return -EINVAL;
  1547. }
  1548. perf_index = qcom_find_crm_freq_index(rcg->freq_tbl, rate);
  1549. if (perf_index < 0 || perf_index >= MAX_PERF_LEVEL_PER_VCD) {
  1550. pr_err("%s rcg name %s perf_index=%d\n", __func__,
  1551. qcom_clk_hw_get_name(hw), perf_index);
  1552. return -EINVAL;
  1553. }
  1554. cmd.data = perf_index;
  1555. cmd.resource_idx = rcg->clkr.crm_vcd;
  1556. cmd.wait = 1;
  1557. for (i = 0; i < MAX_CRM_SW_DRV_STATE; i++) {
  1558. cmd.pwr_state.sw = i;
  1559. ret = crm_write_perf_ol(crm->dev, CRM_SW_DRV, 0, &cmd);
  1560. if (ret)
  1561. pr_err("%s err write_perf_ol rcg name %s ret=%d\n",
  1562. __func__, qcom_clk_hw_get_name(hw), ret);
  1563. }
  1564. return ret;
  1565. }
  1566. static int clk_rcg2_crmc_set_rate(struct clk_hw *hw, unsigned long rate,
  1567. unsigned long parent_rate)
  1568. {
  1569. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1570. rcg->current_freq = rate;
  1571. if (!clk_hw_is_prepared(hw))
  1572. return 0;
  1573. return clk_rcg2_vote_perf_level(hw, rate);
  1574. }
  1575. /**
  1576. * clk_rcg2_crmc_prepare() - cesta rcg/vcd prepare call back for cesta managed clks
  1577. *
  1578. * @hw: clk to operate on
  1579. *
  1580. * Vote clock by updating the perf_level to level required by
  1581. * the current rate of the clock if it hasn't been initialized before.
  1582. * Vdd_level and level required by current clock rate mismatches can
  1583. * occur due to error cases and upon initial clock registration
  1584. * if the clock becomes an orphan and is later reparented.
  1585. *
  1586. * Returns 0 on success, -EERROR otherwise.
  1587. */
  1588. int clk_rcg2_crmc_prepare(struct clk_hw *hw)
  1589. {
  1590. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1591. if (!rcg->current_freq)
  1592. rcg->current_freq = cxo_f.freq;
  1593. return clk_rcg2_vote_perf_level(hw, rcg->current_freq);
  1594. }
  1595. /**
  1596. * clk_rcg2_crmc_unprepare() - standard prepare call back for regmap clks
  1597. *
  1598. * @hw: clk to operate on
  1599. *
  1600. * Unprepare the clock by removing the outstanding perf_level vote.
  1601. *
  1602. */
  1603. void clk_rcg2_crmc_unprepare(struct clk_hw *hw)
  1604. {
  1605. int ret;
  1606. /*
  1607. * Cesta will park RCG at a safe configuration that is CP0 perf level
  1608. */
  1609. ret = clk_rcg2_vote_perf_level(hw, 0);
  1610. if (ret)
  1611. pr_err("%s rcg name=%s ret=%d\n", __func__, qcom_clk_hw_get_name(hw), ret);
  1612. }
  1613. unsigned long clk_rcg2_crmc_hw_set_rate(struct clk_hw *hw,
  1614. enum crm_drv_type client_type, u32 client_idx,
  1615. u32 pwr_st, unsigned long rate)
  1616. {
  1617. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1618. struct clk_crm *crm = rcg->clkr.crm;
  1619. struct crm_cmd cmd;
  1620. int ret, perf_index;
  1621. perf_index = qcom_find_crm_freq_index(rcg->freq_tbl, rate);
  1622. if (perf_index < 0 || perf_index >= MAX_PERF_LEVEL_PER_VCD) {
  1623. pr_err("%s rcg name %s perf_index=%d\n", __func__,
  1624. qcom_clk_hw_get_name(hw), perf_index);
  1625. return -EINVAL;
  1626. }
  1627. cmd.resource_idx = rcg->clkr.crm_vcd;
  1628. cmd.data = perf_index;
  1629. cmd.wait = 1;
  1630. cmd.pwr_state.hw = pwr_st;
  1631. ret = crm_write_perf_ol(crm->dev, client_type, client_idx, &cmd);
  1632. if (ret)
  1633. pr_err("%s err write_perf_ol rcg name %s ret=%d\n",
  1634. __func__, qcom_clk_hw_get_name(hw), ret);
  1635. return ret;
  1636. }
  1637. static long clk_rcg2_crmc_list_rate(struct clk_hw *hw, unsigned int n,
  1638. unsigned long fmax)
  1639. {
  1640. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1641. struct clk_crm *crm = rcg->clkr.crm;
  1642. struct clk_rate_request req = {0};
  1643. int ret;
  1644. if (crm->name && !crm->initialized)
  1645. ret = clk_rcg2_crmc_determine_rate(hw, &req);
  1646. return clk_rcg2_list_rate(hw, n, fmax);
  1647. }
  1648. static struct clk_regmap_ops clk_rcg2_crmc_regmap_ops = {
  1649. .set_crm_rate = clk_rcg2_crmc_hw_set_rate,
  1650. .list_rate = clk_rcg2_crmc_list_rate,
  1651. };
  1652. static int clk_rcg2_crmc_init(struct clk_hw *hw)
  1653. {
  1654. struct clk_regmap *rclk = to_clk_regmap(hw);
  1655. if (!rclk->ops)
  1656. rclk->ops = &clk_rcg2_crmc_regmap_ops;
  1657. return 0;
  1658. }
  1659. static unsigned long
  1660. clk_rcg2_crmc_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  1661. {
  1662. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1663. /*
  1664. * CRM-controlled clocks have multiple SW and HW voters. We need to
  1665. * return the Linux SW vote instead of the current HW rate. The HW rate
  1666. * is a result of aggregating across all clients. If we return the
  1667. * aggregated rate, then subsequent clk_set_rate() calls can
  1668. * short-circuit before calling our set_rate() callback, even if we
  1669. * haven't sent a vote for that new rate on behalf of our SW client
  1670. * yet. Failing to do so can result in the clock frequency dropping
  1671. * below the rate expected by the framework and consumer.
  1672. */
  1673. return rcg->current_freq;
  1674. }
  1675. const struct clk_ops clk_rcg2_crmc_ops = {
  1676. .prepare = clk_rcg2_crmc_prepare,
  1677. .unprepare = clk_rcg2_crmc_unprepare,
  1678. .is_enabled = clk_rcg2_is_enabled,
  1679. .get_parent = clk_rcg2_get_parent,
  1680. .set_rate = clk_rcg2_crmc_set_rate,
  1681. .determine_rate = clk_rcg2_crmc_determine_rate,
  1682. .recalc_rate = clk_rcg2_crmc_recalc_rate,
  1683. .init = clk_rcg2_crmc_init,
  1684. .debug_init = clk_common_debug_init,
  1685. };
  1686. EXPORT_SYMBOL(clk_rcg2_crmc_ops);
  1687. static int clk_rcg2_vote_bw(struct clk_hw *hw, unsigned long rate)
  1688. {
  1689. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1690. struct clk_crm *crm = rcg->clkr.crm;
  1691. struct crm_cmd cmd = {0};
  1692. int ret, i;
  1693. if (rate)
  1694. rate /= 1000000;
  1695. cmd.resource_idx = 0;
  1696. cmd.wait = 1;
  1697. cmd.data = BCM_TCS_CMD(1, 1, 0, rate);
  1698. for (i = 0; i < MAX_CRM_SW_DRV_STATE; i++) {
  1699. cmd.pwr_state.sw = i;
  1700. ret = crm_write_bw_vote(crm->dev, CRM_SW_DRV, 0, &cmd);
  1701. if (ret)
  1702. pr_err("%s err crm_write_bw_vote rcg name %s ret=%d\n",
  1703. __func__, qcom_clk_hw_get_name(hw), ret);
  1704. }
  1705. return ret;
  1706. }
  1707. /**
  1708. * clk_rcg2_crmb_prepare() - cesta rcg/vcd prepare call back for cesta managed clks
  1709. *
  1710. * @hw: clk to operate on
  1711. *
  1712. * Vote clock by updating the perf_level to level required by
  1713. * the current rate of the clock if it hasn't been initialized before.
  1714. * Vdd_level and level required by current clock rate mismatches can
  1715. * occur due to error cases and upon initial clock registration
  1716. * if the clock becomes an orphan and is later reparented.
  1717. *
  1718. * Returns 0 on success, -EERROR otherwise.
  1719. */
  1720. int clk_rcg2_crmb_prepare(struct clk_hw *hw)
  1721. {
  1722. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1723. struct clk_crm *crm = rcg->clkr.crm;
  1724. if (!rcg->freq_tbl || !crm->initialized)
  1725. return 0;
  1726. return clk_rcg2_vote_bw(hw, rcg->current_freq);
  1727. }
  1728. /**
  1729. * clk_rcg2_crmb_unprepare() - standard prepare call back for regmap clks
  1730. *
  1731. * @hw: clk to operate on
  1732. *
  1733. * Unprepare the clock by removing the outstanding perf_level vote.
  1734. *
  1735. */
  1736. void clk_rcg2_crmb_unprepare(struct clk_hw *hw)
  1737. {
  1738. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1739. struct clk_crm *crm = rcg->clkr.crm;
  1740. int ret;
  1741. if (!rcg->freq_tbl || !crm->initialized)
  1742. return;
  1743. /*
  1744. * Cesta will park RCG at a safe configuration that is CP0 perf level
  1745. */
  1746. ret = clk_rcg2_vote_bw(hw, 0);
  1747. if (ret)
  1748. pr_err("%s clk_rcg2_vote_bw rcg name %s ret=%d\n",
  1749. __func__, qcom_clk_hw_get_name(hw), ret);
  1750. }
  1751. static int clk_rcg2_crmb_set_rate(struct clk_hw *hw, unsigned long rate,
  1752. unsigned long parent_rate)
  1753. {
  1754. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1755. struct clk_crm *crm = rcg->clkr.crm;
  1756. int ret;
  1757. rcg->current_freq = rate;
  1758. if (!clk_hw_is_prepared(hw))
  1759. return 0;
  1760. if (rcg->freq_tbl && crm->initialized) {
  1761. ret = clk_rcg2_vote_bw(hw, rate);
  1762. if (ret)
  1763. pr_err("%s clk_rcg2_vote_bw rcg name %s ret=%d\n",
  1764. __func__, qcom_clk_hw_get_name(hw), ret);
  1765. return ret;
  1766. }
  1767. return -EINVAL;
  1768. }
  1769. unsigned long clk_rcg2_crmb_hw_set_bw(struct clk_hw *hw,
  1770. enum crm_drv_type client_type, u32 client_idx,
  1771. u32 pwr_st, unsigned long rate)
  1772. {
  1773. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1774. struct clk_crm *crm = rcg->clkr.crm;
  1775. struct crm_cmd cmd = {0};
  1776. int ret;
  1777. if (rate)
  1778. rate /= 1000000;
  1779. cmd.resource_idx = 0;
  1780. cmd.pwr_state.hw = pwr_st;
  1781. cmd.wait = 1;
  1782. /*
  1783. * write AB for HW clients
  1784. */
  1785. cmd.data = BCM_TCS_CMD(1, 1, rate, 0);
  1786. ret = crm_write_bw_vote(crm->dev, client_type, client_idx, &cmd);
  1787. if (ret)
  1788. pr_err("%s err crm_write_bw_vote rcg name %s cmd.data=0x%x ret=%d\n",
  1789. __func__, qcom_clk_hw_get_name(hw), cmd.data, ret);
  1790. return ret;
  1791. }
  1792. static struct clk_regmap_ops clk_rcg2_crmb_regmap_ops = {
  1793. .set_crm_rate = clk_rcg2_crmb_hw_set_bw,
  1794. .list_rate = clk_rcg2_list_rate,
  1795. };
  1796. static int clk_rcg2_crmb_init(struct clk_hw *hw)
  1797. {
  1798. struct clk_regmap *rclk = to_clk_regmap(hw);
  1799. if (!rclk->ops)
  1800. rclk->ops = &clk_rcg2_crmb_regmap_ops;
  1801. return 0;
  1802. }
  1803. const struct clk_ops clk_rcg2_crmb_ops = {
  1804. .prepare = clk_rcg2_crmb_prepare,
  1805. .unprepare = clk_rcg2_crmb_unprepare,
  1806. .is_enabled = clk_rcg2_is_enabled,
  1807. .get_parent = clk_rcg2_get_parent,
  1808. .set_rate = clk_rcg2_crmb_set_rate,
  1809. .determine_rate = clk_rcg2_crmc_determine_rate,
  1810. .recalc_rate = clk_rcg2_crmc_recalc_rate,
  1811. .init = clk_rcg2_crmb_init,
  1812. .debug_init = clk_common_debug_init,
  1813. };
  1814. EXPORT_SYMBOL(clk_rcg2_crmb_ops);
  1815. static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data,
  1816. struct regmap *regmap)
  1817. {
  1818. struct clk_rcg2 *rcg = data->rcg;
  1819. struct clk_init_data *init = data->init;
  1820. u32 val;
  1821. int ret;
  1822. rcg->flags |= DFS_SUPPORT;
  1823. ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val);
  1824. if (ret)
  1825. return -EINVAL;
  1826. if (!(val & SE_CMD_DFS_EN))
  1827. return 0;
  1828. /*
  1829. * Rate changes with consumer writing a register in
  1830. * their own I/O region
  1831. */
  1832. init->flags |= CLK_GET_RATE_NOCACHE;
  1833. init->ops = &clk_rcg2_dfs_ops;
  1834. rcg->freq_tbl = NULL;
  1835. return 0;
  1836. }
  1837. int qcom_cc_register_rcg_dfs(struct regmap *regmap,
  1838. const struct clk_rcg_dfs_data *rcgs, size_t len)
  1839. {
  1840. int i, ret;
  1841. for (i = 0; i < len; i++) {
  1842. ret = clk_rcg2_enable_dfs(&rcgs[i], regmap);
  1843. if (ret)
  1844. return ret;
  1845. }
  1846. return 0;
  1847. }
  1848. EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);
  1849. static int clk_rcg2_dp_set_rate(struct clk_hw *hw, unsigned long rate,
  1850. unsigned long parent_rate)
  1851. {
  1852. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1853. struct freq_tbl f = { 0 };
  1854. u32 mask = BIT(rcg->hid_width) - 1;
  1855. u32 hid_div, cfg;
  1856. int i, num_parents = clk_hw_get_num_parents(hw);
  1857. unsigned long num, den;
  1858. rational_best_approximation(parent_rate, rate,
  1859. GENMASK(rcg->mnd_width - 1, 0),
  1860. GENMASK(rcg->mnd_width - 1, 0), &den, &num);
  1861. if (!num || !den)
  1862. return -EINVAL;
  1863. regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
  1864. hid_div = cfg;
  1865. cfg &= CFG_SRC_SEL_MASK;
  1866. cfg >>= CFG_SRC_SEL_SHIFT;
  1867. for (i = 0; i < num_parents; i++) {
  1868. if (cfg == rcg->parent_map[i].cfg) {
  1869. f.src = rcg->parent_map[i].src;
  1870. break;
  1871. }
  1872. }
  1873. f.pre_div = hid_div;
  1874. f.pre_div >>= CFG_SRC_DIV_SHIFT;
  1875. f.pre_div &= mask;
  1876. if (num != den) {
  1877. f.m = num;
  1878. f.n = den;
  1879. } else {
  1880. f.m = 0;
  1881. f.n = 0;
  1882. }
  1883. return clk_rcg2_configure(rcg, &f);
  1884. }
  1885. static int clk_rcg2_dp_set_rate_and_parent(struct clk_hw *hw,
  1886. unsigned long rate, unsigned long parent_rate, u8 index)
  1887. {
  1888. return clk_rcg2_dp_set_rate(hw, rate, parent_rate);
  1889. }
  1890. static int clk_rcg2_dp_determine_rate(struct clk_hw *hw,
  1891. struct clk_rate_request *req)
  1892. {
  1893. struct clk_rcg2 *rcg = to_clk_rcg2(hw);
  1894. unsigned long num, den;
  1895. u64 tmp;
  1896. /* Parent rate is a fixed phy link rate */
  1897. rational_best_approximation(req->best_parent_rate, req->rate,
  1898. GENMASK(rcg->mnd_width - 1, 0),
  1899. GENMASK(rcg->mnd_width - 1, 0), &den, &num);
  1900. if (!num || !den)
  1901. return -EINVAL;
  1902. tmp = req->best_parent_rate * num;
  1903. do_div(tmp, den);
  1904. req->rate = tmp;
  1905. return 0;
  1906. }
  1907. const struct clk_ops clk_dp_ops = {
  1908. .prepare = clk_prepare_regmap,
  1909. .unprepare = clk_unprepare_regmap,
  1910. .pre_rate_change = clk_pre_change_regmap,
  1911. .post_rate_change = clk_post_change_regmap,
  1912. .is_enabled = clk_rcg2_is_enabled,
  1913. .get_parent = clk_rcg2_get_parent,
  1914. .set_parent = clk_rcg2_set_parent,
  1915. .recalc_rate = clk_rcg2_recalc_rate,
  1916. .set_rate = clk_rcg2_dp_set_rate,
  1917. .set_rate_and_parent = clk_rcg2_dp_set_rate_and_parent,
  1918. .determine_rate = clk_rcg2_dp_determine_rate,
  1919. .init = clk_rcg2_init,
  1920. .debug_init = clk_common_debug_init,
  1921. };
  1922. EXPORT_SYMBOL_GPL(clk_dp_ops);