clk-rcg.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2013, 2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/bitops.h>
  7. #include <linux/err.h>
  8. #include <linux/export.h>
  9. #include <linux/clk-provider.h>
  10. #include <linux/regmap.h>
  11. #include <asm/div64.h>
  12. #include "clk-rcg.h"
  13. #include "common.h"
  14. static u32 ns_to_src(struct src_sel *s, u32 ns)
  15. {
  16. ns >>= s->src_sel_shift;
  17. ns &= SRC_SEL_MASK;
  18. return ns;
  19. }
  20. static u32 src_to_ns(struct src_sel *s, u8 src, u32 ns)
  21. {
  22. u32 mask;
  23. mask = SRC_SEL_MASK;
  24. mask <<= s->src_sel_shift;
  25. ns &= ~mask;
  26. ns |= src << s->src_sel_shift;
  27. return ns;
  28. }
  29. static u8 clk_rcg_get_parent(struct clk_hw *hw)
  30. {
  31. struct clk_rcg *rcg = to_clk_rcg(hw);
  32. int num_parents = clk_hw_get_num_parents(hw);
  33. u32 ns;
  34. int i, ret;
  35. ret = regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
  36. if (ret)
  37. goto err;
  38. ns = ns_to_src(&rcg->s, ns);
  39. for (i = 0; i < num_parents; i++)
  40. if (ns == rcg->s.parent_map[i].cfg)
  41. return i;
  42. err:
  43. pr_debug("%s: Clock %s has invalid parent, using default.\n",
  44. __func__, clk_hw_get_name(hw));
  45. return 0;
  46. }
  47. static int reg_to_bank(struct clk_dyn_rcg *rcg, u32 bank)
  48. {
  49. bank &= BIT(rcg->mux_sel_bit);
  50. return !!bank;
  51. }
  52. static u8 clk_dyn_rcg_get_parent(struct clk_hw *hw)
  53. {
  54. struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
  55. int num_parents = clk_hw_get_num_parents(hw);
  56. u32 ns, reg;
  57. int bank;
  58. int i, ret;
  59. struct src_sel *s;
  60. ret = regmap_read(rcg->clkr.regmap, rcg->bank_reg, &reg);
  61. if (ret)
  62. goto err;
  63. bank = reg_to_bank(rcg, reg);
  64. s = &rcg->s[bank];
  65. ret = regmap_read(rcg->clkr.regmap, rcg->ns_reg[bank], &ns);
  66. if (ret)
  67. goto err;
  68. ns = ns_to_src(s, ns);
  69. for (i = 0; i < num_parents; i++)
  70. if (ns == s->parent_map[i].cfg)
  71. return i;
  72. err:
  73. pr_debug("%s: Clock %s has invalid parent, using default.\n",
  74. __func__, clk_hw_get_name(hw));
  75. return 0;
  76. }
  77. static int clk_rcg_set_parent(struct clk_hw *hw, u8 index)
  78. {
  79. struct clk_rcg *rcg = to_clk_rcg(hw);
  80. u32 ns;
  81. regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
  82. ns = src_to_ns(&rcg->s, rcg->s.parent_map[index].cfg, ns);
  83. regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns);
  84. return 0;
  85. }
  86. static u32 md_to_m(struct mn *mn, u32 md)
  87. {
  88. md >>= mn->m_val_shift;
  89. md &= BIT(mn->width) - 1;
  90. return md;
  91. }
  92. static u32 ns_to_pre_div(struct pre_div *p, u32 ns)
  93. {
  94. ns >>= p->pre_div_shift;
  95. ns &= BIT(p->pre_div_width) - 1;
  96. return ns;
  97. }
  98. static u32 pre_div_to_ns(struct pre_div *p, u8 pre_div, u32 ns)
  99. {
  100. u32 mask;
  101. mask = BIT(p->pre_div_width) - 1;
  102. mask <<= p->pre_div_shift;
  103. ns &= ~mask;
  104. ns |= pre_div << p->pre_div_shift;
  105. return ns;
  106. }
  107. static u32 mn_to_md(struct mn *mn, u32 m, u32 n, u32 md)
  108. {
  109. u32 mask, mask_w;
  110. mask_w = BIT(mn->width) - 1;
  111. mask = (mask_w << mn->m_val_shift) | mask_w;
  112. md &= ~mask;
  113. if (n) {
  114. m <<= mn->m_val_shift;
  115. md |= m;
  116. md |= ~n & mask_w;
  117. }
  118. return md;
  119. }
  120. static u32 ns_m_to_n(struct mn *mn, u32 ns, u32 m)
  121. {
  122. ns = ~ns >> mn->n_val_shift;
  123. ns &= BIT(mn->width) - 1;
  124. return ns + m;
  125. }
  126. static u32 reg_to_mnctr_mode(struct mn *mn, u32 val)
  127. {
  128. val >>= mn->mnctr_mode_shift;
  129. val &= MNCTR_MODE_MASK;
  130. return val;
  131. }
  132. static u32 mn_to_ns(struct mn *mn, u32 m, u32 n, u32 ns)
  133. {
  134. u32 mask;
  135. mask = BIT(mn->width) - 1;
  136. mask <<= mn->n_val_shift;
  137. ns &= ~mask;
  138. if (n) {
  139. n = n - m;
  140. n = ~n;
  141. n &= BIT(mn->width) - 1;
  142. n <<= mn->n_val_shift;
  143. ns |= n;
  144. }
  145. return ns;
  146. }
  147. static u32 mn_to_reg(struct mn *mn, u32 m, u32 n, u32 val)
  148. {
  149. u32 mask;
  150. mask = MNCTR_MODE_MASK << mn->mnctr_mode_shift;
  151. mask |= BIT(mn->mnctr_en_bit);
  152. val &= ~mask;
  153. if (n) {
  154. val |= BIT(mn->mnctr_en_bit);
  155. val |= MNCTR_MODE_DUAL << mn->mnctr_mode_shift;
  156. }
  157. return val;
  158. }
  159. static int configure_bank(struct clk_dyn_rcg *rcg, const struct freq_tbl *f)
  160. {
  161. u32 ns, md, reg;
  162. int bank, new_bank, ret, index;
  163. struct mn *mn;
  164. struct pre_div *p;
  165. struct src_sel *s;
  166. bool enabled;
  167. u32 md_reg, ns_reg;
  168. bool banked_mn = !!rcg->mn[1].width;
  169. bool banked_p = !!rcg->p[1].pre_div_width;
  170. struct clk_hw *hw = &rcg->clkr.hw;
  171. enabled = __clk_is_enabled(hw->clk);
  172. ret = regmap_read(rcg->clkr.regmap, rcg->bank_reg, &reg);
  173. if (ret)
  174. return ret;
  175. bank = reg_to_bank(rcg, reg);
  176. new_bank = enabled ? !bank : bank;
  177. ns_reg = rcg->ns_reg[new_bank];
  178. ret = regmap_read(rcg->clkr.regmap, ns_reg, &ns);
  179. if (ret)
  180. return ret;
  181. if (banked_mn) {
  182. mn = &rcg->mn[new_bank];
  183. md_reg = rcg->md_reg[new_bank];
  184. ns |= BIT(mn->mnctr_reset_bit);
  185. ret = regmap_write(rcg->clkr.regmap, ns_reg, ns);
  186. if (ret)
  187. return ret;
  188. ret = regmap_read(rcg->clkr.regmap, md_reg, &md);
  189. if (ret)
  190. return ret;
  191. md = mn_to_md(mn, f->m, f->n, md);
  192. ret = regmap_write(rcg->clkr.regmap, md_reg, md);
  193. if (ret)
  194. return ret;
  195. ns = mn_to_ns(mn, f->m, f->n, ns);
  196. ret = regmap_write(rcg->clkr.regmap, ns_reg, ns);
  197. if (ret)
  198. return ret;
  199. /* Two NS registers means mode control is in NS register */
  200. if (rcg->ns_reg[0] != rcg->ns_reg[1]) {
  201. ns = mn_to_reg(mn, f->m, f->n, ns);
  202. ret = regmap_write(rcg->clkr.regmap, ns_reg, ns);
  203. if (ret)
  204. return ret;
  205. } else {
  206. reg = mn_to_reg(mn, f->m, f->n, reg);
  207. ret = regmap_write(rcg->clkr.regmap, rcg->bank_reg,
  208. reg);
  209. if (ret)
  210. return ret;
  211. }
  212. ns &= ~BIT(mn->mnctr_reset_bit);
  213. ret = regmap_write(rcg->clkr.regmap, ns_reg, ns);
  214. if (ret)
  215. return ret;
  216. }
  217. if (banked_p) {
  218. p = &rcg->p[new_bank];
  219. ns = pre_div_to_ns(p, f->pre_div - 1, ns);
  220. }
  221. s = &rcg->s[new_bank];
  222. index = qcom_find_src_index(hw, s->parent_map, f->src);
  223. if (index < 0)
  224. return index;
  225. ns = src_to_ns(s, s->parent_map[index].cfg, ns);
  226. ret = regmap_write(rcg->clkr.regmap, ns_reg, ns);
  227. if (ret)
  228. return ret;
  229. if (enabled) {
  230. ret = regmap_read(rcg->clkr.regmap, rcg->bank_reg, &reg);
  231. if (ret)
  232. return ret;
  233. reg ^= BIT(rcg->mux_sel_bit);
  234. ret = regmap_write(rcg->clkr.regmap, rcg->bank_reg, reg);
  235. if (ret)
  236. return ret;
  237. }
  238. return 0;
  239. }
  240. static int clk_dyn_rcg_set_parent(struct clk_hw *hw, u8 index)
  241. {
  242. struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
  243. u32 ns, md, reg;
  244. int bank;
  245. struct freq_tbl f = { 0 };
  246. bool banked_mn = !!rcg->mn[1].width;
  247. bool banked_p = !!rcg->p[1].pre_div_width;
  248. regmap_read(rcg->clkr.regmap, rcg->bank_reg, &reg);
  249. bank = reg_to_bank(rcg, reg);
  250. regmap_read(rcg->clkr.regmap, rcg->ns_reg[bank], &ns);
  251. if (banked_mn) {
  252. regmap_read(rcg->clkr.regmap, rcg->md_reg[bank], &md);
  253. f.m = md_to_m(&rcg->mn[bank], md);
  254. f.n = ns_m_to_n(&rcg->mn[bank], ns, f.m);
  255. }
  256. if (banked_p)
  257. f.pre_div = ns_to_pre_div(&rcg->p[bank], ns) + 1;
  258. f.src = qcom_find_src_index(hw, rcg->s[bank].parent_map, index);
  259. return configure_bank(rcg, &f);
  260. }
  261. /*
  262. * Calculate m/n:d rate
  263. *
  264. * parent_rate m
  265. * rate = ----------- x ---
  266. * pre_div n
  267. */
  268. static unsigned long
  269. calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 pre_div)
  270. {
  271. if (pre_div)
  272. rate /= pre_div + 1;
  273. if (mode) {
  274. u64 tmp = rate;
  275. tmp *= m;
  276. do_div(tmp, n);
  277. rate = tmp;
  278. }
  279. return rate;
  280. }
  281. static unsigned long
  282. clk_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  283. {
  284. struct clk_rcg *rcg = to_clk_rcg(hw);
  285. u32 pre_div, m = 0, n = 0, ns, md, mode = 0;
  286. struct mn *mn = &rcg->mn;
  287. regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
  288. pre_div = ns_to_pre_div(&rcg->p, ns);
  289. if (rcg->mn.width) {
  290. regmap_read(rcg->clkr.regmap, rcg->md_reg, &md);
  291. m = md_to_m(mn, md);
  292. n = ns_m_to_n(mn, ns, m);
  293. /* MN counter mode is in hw.enable_reg sometimes */
  294. if (rcg->clkr.enable_reg != rcg->ns_reg)
  295. regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &mode);
  296. else
  297. mode = ns;
  298. mode = reg_to_mnctr_mode(mn, mode);
  299. }
  300. return calc_rate(parent_rate, m, n, mode, pre_div);
  301. }
  302. static unsigned long
  303. clk_dyn_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  304. {
  305. struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
  306. u32 m, n, pre_div, ns, md, mode, reg;
  307. int bank;
  308. struct mn *mn;
  309. bool banked_p = !!rcg->p[1].pre_div_width;
  310. bool banked_mn = !!rcg->mn[1].width;
  311. regmap_read(rcg->clkr.regmap, rcg->bank_reg, &reg);
  312. bank = reg_to_bank(rcg, reg);
  313. regmap_read(rcg->clkr.regmap, rcg->ns_reg[bank], &ns);
  314. m = n = pre_div = mode = 0;
  315. if (banked_mn) {
  316. mn = &rcg->mn[bank];
  317. regmap_read(rcg->clkr.regmap, rcg->md_reg[bank], &md);
  318. m = md_to_m(mn, md);
  319. n = ns_m_to_n(mn, ns, m);
  320. /* Two NS registers means mode control is in NS register */
  321. if (rcg->ns_reg[0] != rcg->ns_reg[1])
  322. reg = ns;
  323. mode = reg_to_mnctr_mode(mn, reg);
  324. }
  325. if (banked_p)
  326. pre_div = ns_to_pre_div(&rcg->p[bank], ns);
  327. return calc_rate(parent_rate, m, n, mode, pre_div);
  328. }
  329. static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
  330. struct clk_rate_request *req,
  331. const struct parent_map *parent_map)
  332. {
  333. unsigned long clk_flags, rate = req->rate;
  334. struct clk_hw *p;
  335. int index;
  336. f = qcom_find_freq(f, rate);
  337. if (!f)
  338. return -EINVAL;
  339. index = qcom_find_src_index(hw, parent_map, f->src);
  340. if (index < 0)
  341. return index;
  342. clk_flags = clk_hw_get_flags(hw);
  343. p = clk_hw_get_parent_by_index(hw, index);
  344. if (!p)
  345. return -EINVAL;
  346. if (clk_flags & CLK_SET_RATE_PARENT) {
  347. rate = rate * f->pre_div;
  348. if (f->n) {
  349. u64 tmp = rate;
  350. tmp = tmp * f->n;
  351. do_div(tmp, f->m);
  352. rate = tmp;
  353. }
  354. } else {
  355. rate = clk_hw_get_rate(p);
  356. }
  357. req->best_parent_hw = p;
  358. req->best_parent_rate = rate;
  359. req->rate = f->freq;
  360. return 0;
  361. }
  362. static int clk_rcg_determine_rate(struct clk_hw *hw,
  363. struct clk_rate_request *req)
  364. {
  365. struct clk_rcg *rcg = to_clk_rcg(hw);
  366. return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req,
  367. rcg->s.parent_map);
  368. }
  369. static int clk_dyn_rcg_determine_rate(struct clk_hw *hw,
  370. struct clk_rate_request *req)
  371. {
  372. struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
  373. u32 reg;
  374. int bank;
  375. struct src_sel *s;
  376. regmap_read(rcg->clkr.regmap, rcg->bank_reg, &reg);
  377. bank = reg_to_bank(rcg, reg);
  378. s = &rcg->s[bank];
  379. return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, s->parent_map);
  380. }
  381. static int clk_rcg_bypass_determine_rate(struct clk_hw *hw,
  382. struct clk_rate_request *req)
  383. {
  384. struct clk_rcg *rcg = to_clk_rcg(hw);
  385. const struct freq_tbl *f = rcg->freq_tbl;
  386. struct clk_hw *p;
  387. int index = qcom_find_src_index(hw, rcg->s.parent_map, f->src);
  388. req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
  389. if (!p)
  390. return -EINVAL;
  391. req->best_parent_rate = clk_hw_round_rate(p, req->rate);
  392. req->rate = req->best_parent_rate;
  393. return 0;
  394. }
  395. static int __clk_rcg_set_rate(struct clk_rcg *rcg, const struct freq_tbl *f)
  396. {
  397. u32 ns, md, ctl;
  398. struct mn *mn = &rcg->mn;
  399. u32 mask = 0;
  400. unsigned int reset_reg;
  401. if (rcg->mn.reset_in_cc)
  402. reset_reg = rcg->clkr.enable_reg;
  403. else
  404. reset_reg = rcg->ns_reg;
  405. if (rcg->mn.width) {
  406. mask = BIT(mn->mnctr_reset_bit);
  407. regmap_update_bits(rcg->clkr.regmap, reset_reg, mask, mask);
  408. regmap_read(rcg->clkr.regmap, rcg->md_reg, &md);
  409. md = mn_to_md(mn, f->m, f->n, md);
  410. regmap_write(rcg->clkr.regmap, rcg->md_reg, md);
  411. regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
  412. /* MN counter mode is in hw.enable_reg sometimes */
  413. if (rcg->clkr.enable_reg != rcg->ns_reg) {
  414. regmap_read(rcg->clkr.regmap, rcg->clkr.enable_reg, &ctl);
  415. ctl = mn_to_reg(mn, f->m, f->n, ctl);
  416. regmap_write(rcg->clkr.regmap, rcg->clkr.enable_reg, ctl);
  417. } else {
  418. ns = mn_to_reg(mn, f->m, f->n, ns);
  419. }
  420. ns = mn_to_ns(mn, f->m, f->n, ns);
  421. } else {
  422. regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
  423. }
  424. ns = pre_div_to_ns(&rcg->p, f->pre_div - 1, ns);
  425. regmap_write(rcg->clkr.regmap, rcg->ns_reg, ns);
  426. regmap_update_bits(rcg->clkr.regmap, reset_reg, mask, 0);
  427. return 0;
  428. }
  429. static int clk_rcg_set_rate(struct clk_hw *hw, unsigned long rate,
  430. unsigned long parent_rate)
  431. {
  432. struct clk_rcg *rcg = to_clk_rcg(hw);
  433. const struct freq_tbl *f;
  434. f = qcom_find_freq(rcg->freq_tbl, rate);
  435. if (!f)
  436. return -EINVAL;
  437. return __clk_rcg_set_rate(rcg, f);
  438. }
  439. static int clk_rcg_set_floor_rate(struct clk_hw *hw, unsigned long rate,
  440. unsigned long parent_rate)
  441. {
  442. struct clk_rcg *rcg = to_clk_rcg(hw);
  443. const struct freq_tbl *f;
  444. f = qcom_find_freq_floor(rcg->freq_tbl, rate);
  445. if (!f)
  446. return -EINVAL;
  447. return __clk_rcg_set_rate(rcg, f);
  448. }
  449. static int clk_rcg_bypass_set_rate(struct clk_hw *hw, unsigned long rate,
  450. unsigned long parent_rate)
  451. {
  452. struct clk_rcg *rcg = to_clk_rcg(hw);
  453. return __clk_rcg_set_rate(rcg, rcg->freq_tbl);
  454. }
  455. static int clk_rcg_bypass2_determine_rate(struct clk_hw *hw,
  456. struct clk_rate_request *req)
  457. {
  458. struct clk_hw *p;
  459. p = req->best_parent_hw;
  460. req->best_parent_rate = clk_hw_round_rate(p, req->rate);
  461. req->rate = req->best_parent_rate;
  462. return 0;
  463. }
  464. static int clk_rcg_bypass2_set_rate(struct clk_hw *hw, unsigned long rate,
  465. unsigned long parent_rate)
  466. {
  467. struct clk_rcg *rcg = to_clk_rcg(hw);
  468. struct freq_tbl f = { 0 };
  469. u32 ns, src;
  470. int i, ret, num_parents = clk_hw_get_num_parents(hw);
  471. ret = regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
  472. if (ret)
  473. return ret;
  474. src = ns_to_src(&rcg->s, ns);
  475. f.pre_div = ns_to_pre_div(&rcg->p, ns) + 1;
  476. for (i = 0; i < num_parents; i++) {
  477. if (src == rcg->s.parent_map[i].cfg) {
  478. f.src = rcg->s.parent_map[i].src;
  479. return __clk_rcg_set_rate(rcg, &f);
  480. }
  481. }
  482. return -EINVAL;
  483. }
  484. static int clk_rcg_bypass2_set_rate_and_parent(struct clk_hw *hw,
  485. unsigned long rate, unsigned long parent_rate, u8 index)
  486. {
  487. /* Read the hardware to determine parent during set_rate */
  488. return clk_rcg_bypass2_set_rate(hw, rate, parent_rate);
  489. }
  490. struct frac_entry {
  491. int num;
  492. int den;
  493. };
  494. static const struct frac_entry pixel_table[] = {
  495. { 1, 2 },
  496. { 1, 3 },
  497. { 3, 16 },
  498. { }
  499. };
  500. static int clk_rcg_pixel_determine_rate(struct clk_hw *hw,
  501. struct clk_rate_request *req)
  502. {
  503. int delta = 100000;
  504. const struct frac_entry *frac = pixel_table;
  505. unsigned long request, src_rate;
  506. for (; frac->num; frac++) {
  507. request = (req->rate * frac->den) / frac->num;
  508. src_rate = clk_hw_round_rate(req->best_parent_hw, request);
  509. if ((src_rate < (request - delta)) ||
  510. (src_rate > (request + delta)))
  511. continue;
  512. req->best_parent_rate = src_rate;
  513. req->rate = (src_rate * frac->num) / frac->den;
  514. return 0;
  515. }
  516. return -EINVAL;
  517. }
  518. static int clk_rcg_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
  519. unsigned long parent_rate)
  520. {
  521. struct clk_rcg *rcg = to_clk_rcg(hw);
  522. int delta = 100000;
  523. const struct frac_entry *frac = pixel_table;
  524. unsigned long request;
  525. struct freq_tbl f = { 0 };
  526. u32 ns, src;
  527. int i, ret, num_parents = clk_hw_get_num_parents(hw);
  528. ret = regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
  529. if (ret)
  530. return ret;
  531. src = ns_to_src(&rcg->s, ns);
  532. for (i = 0; i < num_parents; i++) {
  533. if (src == rcg->s.parent_map[i].cfg) {
  534. f.src = rcg->s.parent_map[i].src;
  535. break;
  536. }
  537. }
  538. /* bypass the pre divider */
  539. f.pre_div = 1;
  540. /* let us find appropriate m/n values for this */
  541. for (; frac->num; frac++) {
  542. request = (rate * frac->den) / frac->num;
  543. if ((parent_rate < (request - delta)) ||
  544. (parent_rate > (request + delta)))
  545. continue;
  546. f.m = frac->num;
  547. f.n = frac->den;
  548. return __clk_rcg_set_rate(rcg, &f);
  549. }
  550. return -EINVAL;
  551. }
  552. static int clk_rcg_pixel_set_rate_and_parent(struct clk_hw *hw,
  553. unsigned long rate, unsigned long parent_rate, u8 index)
  554. {
  555. return clk_rcg_pixel_set_rate(hw, rate, parent_rate);
  556. }
  557. static int clk_rcg_esc_determine_rate(struct clk_hw *hw,
  558. struct clk_rate_request *req)
  559. {
  560. struct clk_rcg *rcg = to_clk_rcg(hw);
  561. int pre_div_max = BIT(rcg->p.pre_div_width);
  562. int div;
  563. unsigned long src_rate;
  564. if (req->rate == 0)
  565. return -EINVAL;
  566. src_rate = clk_hw_get_rate(req->best_parent_hw);
  567. div = src_rate / req->rate;
  568. if (div >= 1 && div <= pre_div_max) {
  569. req->best_parent_rate = src_rate;
  570. req->rate = src_rate / div;
  571. return 0;
  572. }
  573. return -EINVAL;
  574. }
  575. static int clk_rcg_esc_set_rate(struct clk_hw *hw, unsigned long rate,
  576. unsigned long parent_rate)
  577. {
  578. struct clk_rcg *rcg = to_clk_rcg(hw);
  579. struct freq_tbl f = { 0 };
  580. int pre_div_max = BIT(rcg->p.pre_div_width);
  581. int div;
  582. u32 ns;
  583. int i, ret, num_parents = clk_hw_get_num_parents(hw);
  584. if (rate == 0)
  585. return -EINVAL;
  586. ret = regmap_read(rcg->clkr.regmap, rcg->ns_reg, &ns);
  587. if (ret)
  588. return ret;
  589. ns = ns_to_src(&rcg->s, ns);
  590. for (i = 0; i < num_parents; i++) {
  591. if (ns == rcg->s.parent_map[i].cfg) {
  592. f.src = rcg->s.parent_map[i].src;
  593. break;
  594. }
  595. }
  596. div = parent_rate / rate;
  597. if (div >= 1 && div <= pre_div_max) {
  598. f.pre_div = div;
  599. return __clk_rcg_set_rate(rcg, &f);
  600. }
  601. return -EINVAL;
  602. }
  603. static int clk_rcg_esc_set_rate_and_parent(struct clk_hw *hw,
  604. unsigned long rate, unsigned long parent_rate, u8 index)
  605. {
  606. return clk_rcg_esc_set_rate(hw, rate, parent_rate);
  607. }
  608. /*
  609. * This type of clock has a glitch-free mux that switches between the output of
  610. * the M/N counter and an always on clock source (XO). When clk_set_rate() is
  611. * called we need to make sure that we don't switch to the M/N counter if it
  612. * isn't clocking because the mux will get stuck and the clock will stop
  613. * outputting a clock. This can happen if the framework isn't aware that this
  614. * clock is on and so clk_set_rate() doesn't turn on the new parent. To fix
  615. * this we switch the mux in the enable/disable ops and reprogram the M/N
  616. * counter in the set_rate op. We also make sure to switch away from the M/N
  617. * counter in set_rate if software thinks the clock is off.
  618. */
  619. static int clk_rcg_lcc_set_rate(struct clk_hw *hw, unsigned long rate,
  620. unsigned long parent_rate)
  621. {
  622. struct clk_rcg *rcg = to_clk_rcg(hw);
  623. const struct freq_tbl *f;
  624. int ret;
  625. u32 gfm = BIT(10);
  626. f = qcom_find_freq(rcg->freq_tbl, rate);
  627. if (!f)
  628. return -EINVAL;
  629. /* Switch to XO to avoid glitches */
  630. regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, 0);
  631. ret = __clk_rcg_set_rate(rcg, f);
  632. /* Switch back to M/N if it's clocking */
  633. if (__clk_is_enabled(hw->clk))
  634. regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, gfm);
  635. return ret;
  636. }
  637. static int clk_rcg_lcc_enable(struct clk_hw *hw)
  638. {
  639. struct clk_rcg *rcg = to_clk_rcg(hw);
  640. u32 gfm = BIT(10);
  641. /* Use M/N */
  642. return regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, gfm);
  643. }
  644. static void clk_rcg_lcc_disable(struct clk_hw *hw)
  645. {
  646. struct clk_rcg *rcg = to_clk_rcg(hw);
  647. u32 gfm = BIT(10);
  648. /* Use XO */
  649. regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, 0);
  650. }
  651. static int __clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate)
  652. {
  653. struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
  654. const struct freq_tbl *f;
  655. f = qcom_find_freq(rcg->freq_tbl, rate);
  656. if (!f)
  657. return -EINVAL;
  658. return configure_bank(rcg, f);
  659. }
  660. static int clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate,
  661. unsigned long parent_rate)
  662. {
  663. return __clk_dyn_rcg_set_rate(hw, rate);
  664. }
  665. static int clk_dyn_rcg_set_rate_and_parent(struct clk_hw *hw,
  666. unsigned long rate, unsigned long parent_rate, u8 index)
  667. {
  668. return __clk_dyn_rcg_set_rate(hw, rate);
  669. }
  670. const struct clk_ops clk_rcg_ops = {
  671. .enable = clk_enable_regmap,
  672. .disable = clk_disable_regmap,
  673. .get_parent = clk_rcg_get_parent,
  674. .set_parent = clk_rcg_set_parent,
  675. .recalc_rate = clk_rcg_recalc_rate,
  676. .determine_rate = clk_rcg_determine_rate,
  677. .set_rate = clk_rcg_set_rate,
  678. };
  679. EXPORT_SYMBOL_GPL(clk_rcg_ops);
  680. const struct clk_ops clk_rcg_floor_ops = {
  681. .enable = clk_enable_regmap,
  682. .disable = clk_disable_regmap,
  683. .get_parent = clk_rcg_get_parent,
  684. .set_parent = clk_rcg_set_parent,
  685. .recalc_rate = clk_rcg_recalc_rate,
  686. .determine_rate = clk_rcg_determine_rate,
  687. .set_rate = clk_rcg_set_floor_rate,
  688. };
  689. EXPORT_SYMBOL_GPL(clk_rcg_floor_ops);
  690. const struct clk_ops clk_rcg_bypass_ops = {
  691. .enable = clk_enable_regmap,
  692. .disable = clk_disable_regmap,
  693. .get_parent = clk_rcg_get_parent,
  694. .set_parent = clk_rcg_set_parent,
  695. .recalc_rate = clk_rcg_recalc_rate,
  696. .determine_rate = clk_rcg_bypass_determine_rate,
  697. .set_rate = clk_rcg_bypass_set_rate,
  698. };
  699. EXPORT_SYMBOL_GPL(clk_rcg_bypass_ops);
  700. const struct clk_ops clk_rcg_bypass2_ops = {
  701. .enable = clk_enable_regmap,
  702. .disable = clk_disable_regmap,
  703. .get_parent = clk_rcg_get_parent,
  704. .set_parent = clk_rcg_set_parent,
  705. .recalc_rate = clk_rcg_recalc_rate,
  706. .determine_rate = clk_rcg_bypass2_determine_rate,
  707. .set_rate = clk_rcg_bypass2_set_rate,
  708. .set_rate_and_parent = clk_rcg_bypass2_set_rate_and_parent,
  709. };
  710. EXPORT_SYMBOL_GPL(clk_rcg_bypass2_ops);
  711. const struct clk_ops clk_rcg_pixel_ops = {
  712. .enable = clk_enable_regmap,
  713. .disable = clk_disable_regmap,
  714. .get_parent = clk_rcg_get_parent,
  715. .set_parent = clk_rcg_set_parent,
  716. .recalc_rate = clk_rcg_recalc_rate,
  717. .determine_rate = clk_rcg_pixel_determine_rate,
  718. .set_rate = clk_rcg_pixel_set_rate,
  719. .set_rate_and_parent = clk_rcg_pixel_set_rate_and_parent,
  720. };
  721. EXPORT_SYMBOL_GPL(clk_rcg_pixel_ops);
  722. const struct clk_ops clk_rcg_esc_ops = {
  723. .enable = clk_enable_regmap,
  724. .disable = clk_disable_regmap,
  725. .get_parent = clk_rcg_get_parent,
  726. .set_parent = clk_rcg_set_parent,
  727. .recalc_rate = clk_rcg_recalc_rate,
  728. .determine_rate = clk_rcg_esc_determine_rate,
  729. .set_rate = clk_rcg_esc_set_rate,
  730. .set_rate_and_parent = clk_rcg_esc_set_rate_and_parent,
  731. };
  732. EXPORT_SYMBOL_GPL(clk_rcg_esc_ops);
  733. const struct clk_ops clk_rcg_lcc_ops = {
  734. .enable = clk_rcg_lcc_enable,
  735. .disable = clk_rcg_lcc_disable,
  736. .get_parent = clk_rcg_get_parent,
  737. .set_parent = clk_rcg_set_parent,
  738. .recalc_rate = clk_rcg_recalc_rate,
  739. .determine_rate = clk_rcg_determine_rate,
  740. .set_rate = clk_rcg_lcc_set_rate,
  741. };
  742. EXPORT_SYMBOL_GPL(clk_rcg_lcc_ops);
  743. const struct clk_ops clk_dyn_rcg_ops = {
  744. .enable = clk_enable_regmap,
  745. .is_enabled = clk_is_enabled_regmap,
  746. .disable = clk_disable_regmap,
  747. .get_parent = clk_dyn_rcg_get_parent,
  748. .set_parent = clk_dyn_rcg_set_parent,
  749. .recalc_rate = clk_dyn_rcg_recalc_rate,
  750. .determine_rate = clk_dyn_rcg_determine_rate,
  751. .set_rate = clk_dyn_rcg_set_rate,
  752. .set_rate_and_parent = clk_dyn_rcg_set_rate_and_parent,
  753. };
  754. EXPORT_SYMBOL_GPL(clk_dyn_rcg_ops);