cgu.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Ingenic SoC CGU driver
  4. *
  5. * Copyright (c) 2013-2015 Imagination Technologies
  6. * Author: Paul Burton <[email protected]>
  7. */
  8. #include <linux/bitops.h>
  9. #include <linux/clk.h>
  10. #include <linux/clk-provider.h>
  11. #include <linux/clkdev.h>
  12. #include <linux/delay.h>
  13. #include <linux/io.h>
  14. #include <linux/iopoll.h>
  15. #include <linux/math64.h>
  16. #include <linux/of.h>
  17. #include <linux/of_address.h>
  18. #include <linux/slab.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/time.h>
  21. #include "cgu.h"
  22. #define MHZ (1000 * 1000)
  23. static inline const struct ingenic_cgu_clk_info *
  24. to_clk_info(struct ingenic_clk *clk)
  25. {
  26. return &clk->cgu->clock_info[clk->idx];
  27. }
  28. /**
  29. * ingenic_cgu_gate_get() - get the value of clock gate register bit
  30. * @cgu: reference to the CGU whose registers should be read
  31. * @info: info struct describing the gate bit
  32. *
  33. * Retrieves the state of the clock gate bit described by info. The
  34. * caller must hold cgu->lock.
  35. *
  36. * Return: true if the gate bit is set, else false.
  37. */
  38. static inline bool
  39. ingenic_cgu_gate_get(struct ingenic_cgu *cgu,
  40. const struct ingenic_cgu_gate_info *info)
  41. {
  42. return !!(readl(cgu->base + info->reg) & BIT(info->bit))
  43. ^ info->clear_to_gate;
  44. }
  45. /**
  46. * ingenic_cgu_gate_set() - set the value of clock gate register bit
  47. * @cgu: reference to the CGU whose registers should be modified
  48. * @info: info struct describing the gate bit
  49. * @val: non-zero to gate a clock, otherwise zero
  50. *
  51. * Sets the given gate bit in order to gate or ungate a clock.
  52. *
  53. * The caller must hold cgu->lock.
  54. */
  55. static inline void
  56. ingenic_cgu_gate_set(struct ingenic_cgu *cgu,
  57. const struct ingenic_cgu_gate_info *info, bool val)
  58. {
  59. u32 clkgr = readl(cgu->base + info->reg);
  60. if (val ^ info->clear_to_gate)
  61. clkgr |= BIT(info->bit);
  62. else
  63. clkgr &= ~BIT(info->bit);
  64. writel(clkgr, cgu->base + info->reg);
  65. }
  66. /*
  67. * PLL operations
  68. */
  69. static unsigned long
  70. ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  71. {
  72. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  73. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  74. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  75. const struct ingenic_cgu_pll_info *pll_info;
  76. unsigned m, n, od_enc, od;
  77. bool bypass;
  78. u32 ctl;
  79. BUG_ON(clk_info->type != CGU_CLK_PLL);
  80. pll_info = &clk_info->pll;
  81. ctl = readl(cgu->base + pll_info->reg);
  82. m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0);
  83. m += pll_info->m_offset;
  84. n = (ctl >> pll_info->n_shift) & GENMASK(pll_info->n_bits - 1, 0);
  85. n += pll_info->n_offset;
  86. od_enc = ctl >> pll_info->od_shift;
  87. od_enc &= GENMASK(pll_info->od_bits - 1, 0);
  88. if (pll_info->bypass_bit >= 0) {
  89. ctl = readl(cgu->base + pll_info->bypass_reg);
  90. bypass = !!(ctl & BIT(pll_info->bypass_bit));
  91. if (bypass)
  92. return parent_rate;
  93. }
  94. for (od = 0; od < pll_info->od_max; od++) {
  95. if (pll_info->od_encoding[od] == od_enc)
  96. break;
  97. }
  98. BUG_ON(od == pll_info->od_max);
  99. od++;
  100. return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
  101. n * od);
  102. }
  103. static void
  104. ingenic_pll_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
  105. unsigned long rate, unsigned long parent_rate,
  106. unsigned int *pm, unsigned int *pn, unsigned int *pod)
  107. {
  108. unsigned int m, n, od = 1;
  109. /*
  110. * The frequency after the input divider must be between 10 and 50 MHz.
  111. * The highest divider yields the best resolution.
  112. */
  113. n = parent_rate / (10 * MHZ);
  114. n = min_t(unsigned int, n, 1 << pll_info->n_bits);
  115. n = max_t(unsigned int, n, pll_info->n_offset);
  116. m = (rate / MHZ) * od * n / (parent_rate / MHZ);
  117. m = min_t(unsigned int, m, 1 << pll_info->m_bits);
  118. m = max_t(unsigned int, m, pll_info->m_offset);
  119. *pm = m;
  120. *pn = n;
  121. *pod = od;
  122. }
  123. static unsigned long
  124. ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info,
  125. unsigned long rate, unsigned long parent_rate,
  126. unsigned int *pm, unsigned int *pn, unsigned int *pod)
  127. {
  128. const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
  129. unsigned int m, n, od;
  130. if (pll_info->calc_m_n_od)
  131. (*pll_info->calc_m_n_od)(pll_info, rate, parent_rate, &m, &n, &od);
  132. else
  133. ingenic_pll_calc_m_n_od(pll_info, rate, parent_rate, &m, &n, &od);
  134. if (pm)
  135. *pm = m;
  136. if (pn)
  137. *pn = n;
  138. if (pod)
  139. *pod = od;
  140. return div_u64((u64)parent_rate * m * pll_info->rate_multiplier,
  141. n * od);
  142. }
  143. static long
  144. ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate,
  145. unsigned long *prate)
  146. {
  147. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  148. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  149. return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL);
  150. }
  151. static inline int ingenic_pll_check_stable(struct ingenic_cgu *cgu,
  152. const struct ingenic_cgu_pll_info *pll_info)
  153. {
  154. u32 ctl;
  155. return readl_poll_timeout(cgu->base + pll_info->reg, ctl,
  156. ctl & BIT(pll_info->stable_bit),
  157. 0, 100 * USEC_PER_MSEC);
  158. }
  159. static int
  160. ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate,
  161. unsigned long parent_rate)
  162. {
  163. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  164. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  165. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  166. const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
  167. unsigned long rate, flags;
  168. unsigned int m, n, od;
  169. int ret = 0;
  170. u32 ctl;
  171. rate = ingenic_pll_calc(clk_info, req_rate, parent_rate,
  172. &m, &n, &od);
  173. if (rate != req_rate)
  174. pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n",
  175. clk_info->name, req_rate, rate);
  176. spin_lock_irqsave(&cgu->lock, flags);
  177. ctl = readl(cgu->base + pll_info->reg);
  178. ctl &= ~(GENMASK(pll_info->m_bits - 1, 0) << pll_info->m_shift);
  179. ctl |= (m - pll_info->m_offset) << pll_info->m_shift;
  180. ctl &= ~(GENMASK(pll_info->n_bits - 1, 0) << pll_info->n_shift);
  181. ctl |= (n - pll_info->n_offset) << pll_info->n_shift;
  182. ctl &= ~(GENMASK(pll_info->od_bits - 1, 0) << pll_info->od_shift);
  183. ctl |= pll_info->od_encoding[od - 1] << pll_info->od_shift;
  184. writel(ctl, cgu->base + pll_info->reg);
  185. /* If the PLL is enabled, verify that it's stable */
  186. if (ctl & BIT(pll_info->enable_bit))
  187. ret = ingenic_pll_check_stable(cgu, pll_info);
  188. spin_unlock_irqrestore(&cgu->lock, flags);
  189. return ret;
  190. }
  191. static int ingenic_pll_enable(struct clk_hw *hw)
  192. {
  193. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  194. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  195. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  196. const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
  197. unsigned long flags;
  198. int ret;
  199. u32 ctl;
  200. spin_lock_irqsave(&cgu->lock, flags);
  201. if (pll_info->bypass_bit >= 0) {
  202. ctl = readl(cgu->base + pll_info->bypass_reg);
  203. ctl &= ~BIT(pll_info->bypass_bit);
  204. writel(ctl, cgu->base + pll_info->bypass_reg);
  205. }
  206. ctl = readl(cgu->base + pll_info->reg);
  207. ctl |= BIT(pll_info->enable_bit);
  208. writel(ctl, cgu->base + pll_info->reg);
  209. ret = ingenic_pll_check_stable(cgu, pll_info);
  210. spin_unlock_irqrestore(&cgu->lock, flags);
  211. return ret;
  212. }
  213. static void ingenic_pll_disable(struct clk_hw *hw)
  214. {
  215. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  216. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  217. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  218. const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
  219. unsigned long flags;
  220. u32 ctl;
  221. spin_lock_irqsave(&cgu->lock, flags);
  222. ctl = readl(cgu->base + pll_info->reg);
  223. ctl &= ~BIT(pll_info->enable_bit);
  224. writel(ctl, cgu->base + pll_info->reg);
  225. spin_unlock_irqrestore(&cgu->lock, flags);
  226. }
  227. static int ingenic_pll_is_enabled(struct clk_hw *hw)
  228. {
  229. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  230. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  231. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  232. const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll;
  233. u32 ctl;
  234. ctl = readl(cgu->base + pll_info->reg);
  235. return !!(ctl & BIT(pll_info->enable_bit));
  236. }
  237. static const struct clk_ops ingenic_pll_ops = {
  238. .recalc_rate = ingenic_pll_recalc_rate,
  239. .round_rate = ingenic_pll_round_rate,
  240. .set_rate = ingenic_pll_set_rate,
  241. .enable = ingenic_pll_enable,
  242. .disable = ingenic_pll_disable,
  243. .is_enabled = ingenic_pll_is_enabled,
  244. };
  245. /*
  246. * Operations for all non-PLL clocks
  247. */
  248. static u8 ingenic_clk_get_parent(struct clk_hw *hw)
  249. {
  250. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  251. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  252. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  253. u32 reg;
  254. u8 i, hw_idx, idx = 0;
  255. if (clk_info->type & CGU_CLK_MUX) {
  256. reg = readl(cgu->base + clk_info->mux.reg);
  257. hw_idx = (reg >> clk_info->mux.shift) &
  258. GENMASK(clk_info->mux.bits - 1, 0);
  259. /*
  260. * Convert the hardware index to the parent index by skipping
  261. * over any -1's in the parents array.
  262. */
  263. for (i = 0; i < hw_idx; i++) {
  264. if (clk_info->parents[i] != -1)
  265. idx++;
  266. }
  267. }
  268. return idx;
  269. }
  270. static int ingenic_clk_set_parent(struct clk_hw *hw, u8 idx)
  271. {
  272. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  273. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  274. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  275. unsigned long flags;
  276. u8 curr_idx, hw_idx, num_poss;
  277. u32 reg, mask;
  278. if (clk_info->type & CGU_CLK_MUX) {
  279. /*
  280. * Convert the parent index to the hardware index by adding
  281. * 1 for any -1 in the parents array preceding the given
  282. * index. That is, we want the index of idx'th entry in
  283. * clk_info->parents which does not equal -1.
  284. */
  285. hw_idx = curr_idx = 0;
  286. num_poss = 1 << clk_info->mux.bits;
  287. for (; hw_idx < num_poss; hw_idx++) {
  288. if (clk_info->parents[hw_idx] == -1)
  289. continue;
  290. if (curr_idx == idx)
  291. break;
  292. curr_idx++;
  293. }
  294. /* idx should always be a valid parent */
  295. BUG_ON(curr_idx != idx);
  296. mask = GENMASK(clk_info->mux.bits - 1, 0);
  297. mask <<= clk_info->mux.shift;
  298. spin_lock_irqsave(&cgu->lock, flags);
  299. /* write the register */
  300. reg = readl(cgu->base + clk_info->mux.reg);
  301. reg &= ~mask;
  302. reg |= hw_idx << clk_info->mux.shift;
  303. writel(reg, cgu->base + clk_info->mux.reg);
  304. spin_unlock_irqrestore(&cgu->lock, flags);
  305. return 0;
  306. }
  307. return idx ? -EINVAL : 0;
  308. }
  309. static unsigned long
  310. ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  311. {
  312. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  313. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  314. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  315. unsigned long rate = parent_rate;
  316. u32 div_reg, div;
  317. u8 parent;
  318. if (clk_info->type & CGU_CLK_DIV) {
  319. parent = ingenic_clk_get_parent(hw);
  320. if (!(clk_info->div.bypass_mask & BIT(parent))) {
  321. div_reg = readl(cgu->base + clk_info->div.reg);
  322. div = (div_reg >> clk_info->div.shift) &
  323. GENMASK(clk_info->div.bits - 1, 0);
  324. if (clk_info->div.div_table)
  325. div = clk_info->div.div_table[div];
  326. else
  327. div = (div + 1) * clk_info->div.div;
  328. rate /= div;
  329. }
  330. } else if (clk_info->type & CGU_CLK_FIXDIV) {
  331. rate /= clk_info->fixdiv.div;
  332. }
  333. return rate;
  334. }
  335. static unsigned int
  336. ingenic_clk_calc_hw_div(const struct ingenic_cgu_clk_info *clk_info,
  337. unsigned int div)
  338. {
  339. unsigned int i, best_i = 0, best = (unsigned int)-1;
  340. for (i = 0; i < (1 << clk_info->div.bits)
  341. && clk_info->div.div_table[i]; i++) {
  342. if (clk_info->div.div_table[i] >= div &&
  343. clk_info->div.div_table[i] < best) {
  344. best = clk_info->div.div_table[i];
  345. best_i = i;
  346. if (div == best)
  347. break;
  348. }
  349. }
  350. return best_i;
  351. }
  352. static unsigned
  353. ingenic_clk_calc_div(struct clk_hw *hw,
  354. const struct ingenic_cgu_clk_info *clk_info,
  355. unsigned long parent_rate, unsigned long req_rate)
  356. {
  357. unsigned int div, hw_div;
  358. u8 parent;
  359. parent = ingenic_clk_get_parent(hw);
  360. if (clk_info->div.bypass_mask & BIT(parent))
  361. return 1;
  362. /* calculate the divide */
  363. div = DIV_ROUND_UP(parent_rate, req_rate);
  364. if (clk_info->div.div_table) {
  365. hw_div = ingenic_clk_calc_hw_div(clk_info, div);
  366. return clk_info->div.div_table[hw_div];
  367. }
  368. /* Impose hardware constraints */
  369. div = clamp_t(unsigned int, div, clk_info->div.div,
  370. clk_info->div.div << clk_info->div.bits);
  371. /*
  372. * If the divider value itself must be divided before being written to
  373. * the divider register, we must ensure we don't have any bits set that
  374. * would be lost as a result of doing so.
  375. */
  376. div = DIV_ROUND_UP(div, clk_info->div.div);
  377. div *= clk_info->div.div;
  378. return div;
  379. }
  380. static long
  381. ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
  382. unsigned long *parent_rate)
  383. {
  384. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  385. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  386. unsigned int div = 1;
  387. if (clk_info->type & CGU_CLK_DIV)
  388. div = ingenic_clk_calc_div(hw, clk_info, *parent_rate, req_rate);
  389. else if (clk_info->type & CGU_CLK_FIXDIV)
  390. div = clk_info->fixdiv.div;
  391. else if (clk_hw_can_set_rate_parent(hw))
  392. *parent_rate = req_rate;
  393. return DIV_ROUND_UP(*parent_rate, div);
  394. }
  395. static inline int ingenic_clk_check_stable(struct ingenic_cgu *cgu,
  396. const struct ingenic_cgu_clk_info *clk_info)
  397. {
  398. u32 reg;
  399. return readl_poll_timeout(cgu->base + clk_info->div.reg, reg,
  400. !(reg & BIT(clk_info->div.busy_bit)),
  401. 0, 100 * USEC_PER_MSEC);
  402. }
  403. static int
  404. ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
  405. unsigned long parent_rate)
  406. {
  407. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  408. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  409. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  410. unsigned long rate, flags;
  411. unsigned int hw_div, div;
  412. u32 reg, mask;
  413. int ret = 0;
  414. if (clk_info->type & CGU_CLK_DIV) {
  415. div = ingenic_clk_calc_div(hw, clk_info, parent_rate, req_rate);
  416. rate = DIV_ROUND_UP(parent_rate, div);
  417. if (rate != req_rate)
  418. return -EINVAL;
  419. if (clk_info->div.div_table)
  420. hw_div = ingenic_clk_calc_hw_div(clk_info, div);
  421. else
  422. hw_div = ((div / clk_info->div.div) - 1);
  423. spin_lock_irqsave(&cgu->lock, flags);
  424. reg = readl(cgu->base + clk_info->div.reg);
  425. /* update the divide */
  426. mask = GENMASK(clk_info->div.bits - 1, 0);
  427. reg &= ~(mask << clk_info->div.shift);
  428. reg |= hw_div << clk_info->div.shift;
  429. /* clear the stop bit */
  430. if (clk_info->div.stop_bit != -1)
  431. reg &= ~BIT(clk_info->div.stop_bit);
  432. /* set the change enable bit */
  433. if (clk_info->div.ce_bit != -1)
  434. reg |= BIT(clk_info->div.ce_bit);
  435. /* update the hardware */
  436. writel(reg, cgu->base + clk_info->div.reg);
  437. /* wait for the change to take effect */
  438. if (clk_info->div.busy_bit != -1)
  439. ret = ingenic_clk_check_stable(cgu, clk_info);
  440. spin_unlock_irqrestore(&cgu->lock, flags);
  441. return ret;
  442. }
  443. return -EINVAL;
  444. }
  445. static int ingenic_clk_enable(struct clk_hw *hw)
  446. {
  447. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  448. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  449. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  450. unsigned long flags;
  451. if (clk_info->type & CGU_CLK_GATE) {
  452. /* ungate the clock */
  453. spin_lock_irqsave(&cgu->lock, flags);
  454. ingenic_cgu_gate_set(cgu, &clk_info->gate, false);
  455. spin_unlock_irqrestore(&cgu->lock, flags);
  456. if (clk_info->gate.delay_us)
  457. udelay(clk_info->gate.delay_us);
  458. }
  459. return 0;
  460. }
  461. static void ingenic_clk_disable(struct clk_hw *hw)
  462. {
  463. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  464. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  465. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  466. unsigned long flags;
  467. if (clk_info->type & CGU_CLK_GATE) {
  468. /* gate the clock */
  469. spin_lock_irqsave(&cgu->lock, flags);
  470. ingenic_cgu_gate_set(cgu, &clk_info->gate, true);
  471. spin_unlock_irqrestore(&cgu->lock, flags);
  472. }
  473. }
  474. static int ingenic_clk_is_enabled(struct clk_hw *hw)
  475. {
  476. struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
  477. const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk);
  478. struct ingenic_cgu *cgu = ingenic_clk->cgu;
  479. int enabled = 1;
  480. if (clk_info->type & CGU_CLK_GATE)
  481. enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate);
  482. return enabled;
  483. }
  484. static const struct clk_ops ingenic_clk_ops = {
  485. .get_parent = ingenic_clk_get_parent,
  486. .set_parent = ingenic_clk_set_parent,
  487. .recalc_rate = ingenic_clk_recalc_rate,
  488. .round_rate = ingenic_clk_round_rate,
  489. .set_rate = ingenic_clk_set_rate,
  490. .enable = ingenic_clk_enable,
  491. .disable = ingenic_clk_disable,
  492. .is_enabled = ingenic_clk_is_enabled,
  493. };
  494. /*
  495. * Setup functions.
  496. */
  497. static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx)
  498. {
  499. const struct ingenic_cgu_clk_info *clk_info = &cgu->clock_info[idx];
  500. struct clk_init_data clk_init;
  501. struct ingenic_clk *ingenic_clk = NULL;
  502. struct clk *clk, *parent;
  503. const char *parent_names[4];
  504. unsigned caps, i, num_possible;
  505. int err = -EINVAL;
  506. BUILD_BUG_ON(ARRAY_SIZE(clk_info->parents) > ARRAY_SIZE(parent_names));
  507. if (clk_info->type == CGU_CLK_EXT) {
  508. clk = of_clk_get_by_name(cgu->np, clk_info->name);
  509. if (IS_ERR(clk)) {
  510. pr_err("%s: no external clock '%s' provided\n",
  511. __func__, clk_info->name);
  512. err = -ENODEV;
  513. goto out;
  514. }
  515. err = clk_register_clkdev(clk, clk_info->name, NULL);
  516. if (err) {
  517. clk_put(clk);
  518. goto out;
  519. }
  520. cgu->clocks.clks[idx] = clk;
  521. return 0;
  522. }
  523. if (!clk_info->type) {
  524. pr_err("%s: no clock type specified for '%s'\n", __func__,
  525. clk_info->name);
  526. goto out;
  527. }
  528. ingenic_clk = kzalloc(sizeof(*ingenic_clk), GFP_KERNEL);
  529. if (!ingenic_clk) {
  530. err = -ENOMEM;
  531. goto out;
  532. }
  533. ingenic_clk->hw.init = &clk_init;
  534. ingenic_clk->cgu = cgu;
  535. ingenic_clk->idx = idx;
  536. clk_init.name = clk_info->name;
  537. clk_init.flags = clk_info->flags;
  538. clk_init.parent_names = parent_names;
  539. caps = clk_info->type;
  540. if (caps & CGU_CLK_DIV) {
  541. caps &= ~CGU_CLK_DIV;
  542. } else if (!(caps & CGU_CLK_CUSTOM)) {
  543. /* pass rate changes to the parent clock */
  544. clk_init.flags |= CLK_SET_RATE_PARENT;
  545. }
  546. if (caps & (CGU_CLK_MUX | CGU_CLK_CUSTOM)) {
  547. clk_init.num_parents = 0;
  548. if (caps & CGU_CLK_MUX)
  549. num_possible = 1 << clk_info->mux.bits;
  550. else
  551. num_possible = ARRAY_SIZE(clk_info->parents);
  552. for (i = 0; i < num_possible; i++) {
  553. if (clk_info->parents[i] == -1)
  554. continue;
  555. parent = cgu->clocks.clks[clk_info->parents[i]];
  556. parent_names[clk_init.num_parents] =
  557. __clk_get_name(parent);
  558. clk_init.num_parents++;
  559. }
  560. BUG_ON(!clk_init.num_parents);
  561. BUG_ON(clk_init.num_parents > ARRAY_SIZE(parent_names));
  562. } else {
  563. BUG_ON(clk_info->parents[0] == -1);
  564. clk_init.num_parents = 1;
  565. parent = cgu->clocks.clks[clk_info->parents[0]];
  566. parent_names[0] = __clk_get_name(parent);
  567. }
  568. if (caps & CGU_CLK_CUSTOM) {
  569. clk_init.ops = clk_info->custom.clk_ops;
  570. caps &= ~CGU_CLK_CUSTOM;
  571. if (caps) {
  572. pr_err("%s: custom clock may not be combined with type 0x%x\n",
  573. __func__, caps);
  574. goto out;
  575. }
  576. } else if (caps & CGU_CLK_PLL) {
  577. clk_init.ops = &ingenic_pll_ops;
  578. caps &= ~CGU_CLK_PLL;
  579. if (caps) {
  580. pr_err("%s: PLL may not be combined with type 0x%x\n",
  581. __func__, caps);
  582. goto out;
  583. }
  584. } else {
  585. clk_init.ops = &ingenic_clk_ops;
  586. }
  587. /* nothing to do for gates or fixed dividers */
  588. caps &= ~(CGU_CLK_GATE | CGU_CLK_FIXDIV);
  589. if (caps & CGU_CLK_MUX) {
  590. if (!(caps & CGU_CLK_MUX_GLITCHFREE))
  591. clk_init.flags |= CLK_SET_PARENT_GATE;
  592. caps &= ~(CGU_CLK_MUX | CGU_CLK_MUX_GLITCHFREE);
  593. }
  594. if (caps) {
  595. pr_err("%s: unknown clock type 0x%x\n", __func__, caps);
  596. goto out;
  597. }
  598. clk = clk_register(NULL, &ingenic_clk->hw);
  599. if (IS_ERR(clk)) {
  600. pr_err("%s: failed to register clock '%s'\n", __func__,
  601. clk_info->name);
  602. err = PTR_ERR(clk);
  603. goto out;
  604. }
  605. err = clk_register_clkdev(clk, clk_info->name, NULL);
  606. if (err)
  607. goto out;
  608. cgu->clocks.clks[idx] = clk;
  609. out:
  610. if (err)
  611. kfree(ingenic_clk);
  612. return err;
  613. }
  614. struct ingenic_cgu *
  615. ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info,
  616. unsigned num_clocks, struct device_node *np)
  617. {
  618. struct ingenic_cgu *cgu;
  619. cgu = kzalloc(sizeof(*cgu), GFP_KERNEL);
  620. if (!cgu)
  621. goto err_out;
  622. cgu->base = of_iomap(np, 0);
  623. if (!cgu->base) {
  624. pr_err("%s: failed to map CGU registers\n", __func__);
  625. goto err_out_free;
  626. }
  627. cgu->np = np;
  628. cgu->clock_info = clock_info;
  629. cgu->clocks.clk_num = num_clocks;
  630. spin_lock_init(&cgu->lock);
  631. return cgu;
  632. err_out_free:
  633. kfree(cgu);
  634. err_out:
  635. return NULL;
  636. }
  637. int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu)
  638. {
  639. unsigned i;
  640. int err;
  641. cgu->clocks.clks = kcalloc(cgu->clocks.clk_num, sizeof(struct clk *),
  642. GFP_KERNEL);
  643. if (!cgu->clocks.clks) {
  644. err = -ENOMEM;
  645. goto err_out;
  646. }
  647. for (i = 0; i < cgu->clocks.clk_num; i++) {
  648. err = ingenic_register_clock(cgu, i);
  649. if (err)
  650. goto err_out_unregister;
  651. }
  652. err = of_clk_add_provider(cgu->np, of_clk_src_onecell_get,
  653. &cgu->clocks);
  654. if (err)
  655. goto err_out_unregister;
  656. return 0;
  657. err_out_unregister:
  658. for (i = 0; i < cgu->clocks.clk_num; i++) {
  659. if (!cgu->clocks.clks[i])
  660. continue;
  661. if (cgu->clock_info[i].type & CGU_CLK_EXT)
  662. clk_put(cgu->clocks.clks[i]);
  663. else
  664. clk_unregister(cgu->clocks.clks[i]);
  665. }
  666. kfree(cgu->clocks.clks);
  667. err_out:
  668. return err;
  669. }