clk-cgu.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2020-2022 MaxLinear, Inc.
  4. * Copyright (C) 2020 Intel Corporation.
  5. * Zhu Yixin <[email protected]>
  6. * Rahul Tanwar <[email protected]>
  7. */
  8. #include <linux/clk-provider.h>
  9. #include <linux/device.h>
  10. #include <linux/of.h>
  11. #include "clk-cgu.h"
  12. #define GATE_HW_REG_STAT(reg) ((reg) + 0x0)
  13. #define GATE_HW_REG_EN(reg) ((reg) + 0x4)
  14. #define GATE_HW_REG_DIS(reg) ((reg) + 0x8)
  15. #define MAX_DDIV_REG 8
  16. #define MAX_DIVIDER_VAL 64
  17. #define to_lgm_clk_mux(_hw) container_of(_hw, struct lgm_clk_mux, hw)
  18. #define to_lgm_clk_divider(_hw) container_of(_hw, struct lgm_clk_divider, hw)
  19. #define to_lgm_clk_gate(_hw) container_of(_hw, struct lgm_clk_gate, hw)
  20. #define to_lgm_clk_ddiv(_hw) container_of(_hw, struct lgm_clk_ddiv, hw)
  21. static struct clk_hw *lgm_clk_register_fixed(struct lgm_clk_provider *ctx,
  22. const struct lgm_clk_branch *list)
  23. {
  24. if (list->div_flags & CLOCK_FLAG_VAL_INIT)
  25. lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
  26. list->div_width, list->div_val);
  27. return clk_hw_register_fixed_rate(NULL, list->name,
  28. list->parent_data[0].name,
  29. list->flags, list->mux_flags);
  30. }
  31. static u8 lgm_clk_mux_get_parent(struct clk_hw *hw)
  32. {
  33. struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
  34. u32 val;
  35. if (mux->flags & MUX_CLK_SW)
  36. val = mux->reg;
  37. else
  38. val = lgm_get_clk_val(mux->membase, mux->reg, mux->shift,
  39. mux->width);
  40. return clk_mux_val_to_index(hw, NULL, mux->flags, val);
  41. }
  42. static int lgm_clk_mux_set_parent(struct clk_hw *hw, u8 index)
  43. {
  44. struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
  45. u32 val;
  46. val = clk_mux_index_to_val(NULL, mux->flags, index);
  47. if (mux->flags & MUX_CLK_SW)
  48. mux->reg = val;
  49. else
  50. lgm_set_clk_val(mux->membase, mux->reg, mux->shift,
  51. mux->width, val);
  52. return 0;
  53. }
  54. static int lgm_clk_mux_determine_rate(struct clk_hw *hw,
  55. struct clk_rate_request *req)
  56. {
  57. struct lgm_clk_mux *mux = to_lgm_clk_mux(hw);
  58. return clk_mux_determine_rate_flags(hw, req, mux->flags);
  59. }
  60. static const struct clk_ops lgm_clk_mux_ops = {
  61. .get_parent = lgm_clk_mux_get_parent,
  62. .set_parent = lgm_clk_mux_set_parent,
  63. .determine_rate = lgm_clk_mux_determine_rate,
  64. };
  65. static struct clk_hw *
  66. lgm_clk_register_mux(struct lgm_clk_provider *ctx,
  67. const struct lgm_clk_branch *list)
  68. {
  69. unsigned long cflags = list->mux_flags;
  70. struct device *dev = ctx->dev;
  71. u8 shift = list->mux_shift;
  72. u8 width = list->mux_width;
  73. struct clk_init_data init = {};
  74. struct lgm_clk_mux *mux;
  75. u32 reg = list->mux_off;
  76. struct clk_hw *hw;
  77. int ret;
  78. mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
  79. if (!mux)
  80. return ERR_PTR(-ENOMEM);
  81. init.name = list->name;
  82. init.ops = &lgm_clk_mux_ops;
  83. init.flags = list->flags;
  84. init.parent_data = list->parent_data;
  85. init.num_parents = list->num_parents;
  86. mux->membase = ctx->membase;
  87. mux->reg = reg;
  88. mux->shift = shift;
  89. mux->width = width;
  90. mux->flags = cflags;
  91. mux->hw.init = &init;
  92. hw = &mux->hw;
  93. ret = devm_clk_hw_register(dev, hw);
  94. if (ret)
  95. return ERR_PTR(ret);
  96. if (cflags & CLOCK_FLAG_VAL_INIT)
  97. lgm_set_clk_val(mux->membase, reg, shift, width, list->mux_val);
  98. return hw;
  99. }
  100. static unsigned long
  101. lgm_clk_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  102. {
  103. struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
  104. unsigned int val;
  105. val = lgm_get_clk_val(divider->membase, divider->reg,
  106. divider->shift, divider->width);
  107. return divider_recalc_rate(hw, parent_rate, val, divider->table,
  108. divider->flags, divider->width);
  109. }
  110. static long
  111. lgm_clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
  112. unsigned long *prate)
  113. {
  114. struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
  115. return divider_round_rate(hw, rate, prate, divider->table,
  116. divider->width, divider->flags);
  117. }
  118. static int
  119. lgm_clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
  120. unsigned long prate)
  121. {
  122. struct lgm_clk_divider *divider = to_lgm_clk_divider(hw);
  123. int value;
  124. value = divider_get_val(rate, prate, divider->table,
  125. divider->width, divider->flags);
  126. if (value < 0)
  127. return value;
  128. lgm_set_clk_val(divider->membase, divider->reg,
  129. divider->shift, divider->width, value);
  130. return 0;
  131. }
  132. static int lgm_clk_divider_enable_disable(struct clk_hw *hw, int enable)
  133. {
  134. struct lgm_clk_divider *div = to_lgm_clk_divider(hw);
  135. if (div->flags != DIV_CLK_NO_MASK)
  136. lgm_set_clk_val(div->membase, div->reg, div->shift_gate,
  137. div->width_gate, enable);
  138. return 0;
  139. }
  140. static int lgm_clk_divider_enable(struct clk_hw *hw)
  141. {
  142. return lgm_clk_divider_enable_disable(hw, 1);
  143. }
  144. static void lgm_clk_divider_disable(struct clk_hw *hw)
  145. {
  146. lgm_clk_divider_enable_disable(hw, 0);
  147. }
  148. static const struct clk_ops lgm_clk_divider_ops = {
  149. .recalc_rate = lgm_clk_divider_recalc_rate,
  150. .round_rate = lgm_clk_divider_round_rate,
  151. .set_rate = lgm_clk_divider_set_rate,
  152. .enable = lgm_clk_divider_enable,
  153. .disable = lgm_clk_divider_disable,
  154. };
  155. static struct clk_hw *
  156. lgm_clk_register_divider(struct lgm_clk_provider *ctx,
  157. const struct lgm_clk_branch *list)
  158. {
  159. unsigned long cflags = list->div_flags;
  160. struct device *dev = ctx->dev;
  161. struct lgm_clk_divider *div;
  162. struct clk_init_data init = {};
  163. u8 shift = list->div_shift;
  164. u8 width = list->div_width;
  165. u8 shift_gate = list->div_shift_gate;
  166. u8 width_gate = list->div_width_gate;
  167. u32 reg = list->div_off;
  168. struct clk_hw *hw;
  169. int ret;
  170. div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
  171. if (!div)
  172. return ERR_PTR(-ENOMEM);
  173. init.name = list->name;
  174. init.ops = &lgm_clk_divider_ops;
  175. init.flags = list->flags;
  176. init.parent_data = list->parent_data;
  177. init.num_parents = 1;
  178. div->membase = ctx->membase;
  179. div->reg = reg;
  180. div->shift = shift;
  181. div->width = width;
  182. div->shift_gate = shift_gate;
  183. div->width_gate = width_gate;
  184. div->flags = cflags;
  185. div->table = list->div_table;
  186. div->hw.init = &init;
  187. hw = &div->hw;
  188. ret = devm_clk_hw_register(dev, hw);
  189. if (ret)
  190. return ERR_PTR(ret);
  191. if (cflags & CLOCK_FLAG_VAL_INIT)
  192. lgm_set_clk_val(div->membase, reg, shift, width, list->div_val);
  193. return hw;
  194. }
  195. static struct clk_hw *
  196. lgm_clk_register_fixed_factor(struct lgm_clk_provider *ctx,
  197. const struct lgm_clk_branch *list)
  198. {
  199. struct clk_hw *hw;
  200. hw = clk_hw_register_fixed_factor(ctx->dev, list->name,
  201. list->parent_data[0].name, list->flags,
  202. list->mult, list->div);
  203. if (IS_ERR(hw))
  204. return ERR_CAST(hw);
  205. if (list->div_flags & CLOCK_FLAG_VAL_INIT)
  206. lgm_set_clk_val(ctx->membase, list->div_off, list->div_shift,
  207. list->div_width, list->div_val);
  208. return hw;
  209. }
  210. static int lgm_clk_gate_enable(struct clk_hw *hw)
  211. {
  212. struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
  213. unsigned int reg;
  214. reg = GATE_HW_REG_EN(gate->reg);
  215. lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
  216. return 0;
  217. }
  218. static void lgm_clk_gate_disable(struct clk_hw *hw)
  219. {
  220. struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
  221. unsigned int reg;
  222. reg = GATE_HW_REG_DIS(gate->reg);
  223. lgm_set_clk_val(gate->membase, reg, gate->shift, 1, 1);
  224. }
  225. static int lgm_clk_gate_is_enabled(struct clk_hw *hw)
  226. {
  227. struct lgm_clk_gate *gate = to_lgm_clk_gate(hw);
  228. unsigned int reg, ret;
  229. reg = GATE_HW_REG_STAT(gate->reg);
  230. ret = lgm_get_clk_val(gate->membase, reg, gate->shift, 1);
  231. return ret;
  232. }
  233. static const struct clk_ops lgm_clk_gate_ops = {
  234. .enable = lgm_clk_gate_enable,
  235. .disable = lgm_clk_gate_disable,
  236. .is_enabled = lgm_clk_gate_is_enabled,
  237. };
  238. static struct clk_hw *
  239. lgm_clk_register_gate(struct lgm_clk_provider *ctx,
  240. const struct lgm_clk_branch *list)
  241. {
  242. unsigned long cflags = list->gate_flags;
  243. const char *pname = list->parent_data[0].name;
  244. struct device *dev = ctx->dev;
  245. u8 shift = list->gate_shift;
  246. struct clk_init_data init = {};
  247. struct lgm_clk_gate *gate;
  248. u32 reg = list->gate_off;
  249. struct clk_hw *hw;
  250. int ret;
  251. gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
  252. if (!gate)
  253. return ERR_PTR(-ENOMEM);
  254. init.name = list->name;
  255. init.ops = &lgm_clk_gate_ops;
  256. init.flags = list->flags;
  257. init.parent_names = pname ? &pname : NULL;
  258. init.num_parents = pname ? 1 : 0;
  259. gate->membase = ctx->membase;
  260. gate->reg = reg;
  261. gate->shift = shift;
  262. gate->flags = cflags;
  263. gate->hw.init = &init;
  264. hw = &gate->hw;
  265. ret = devm_clk_hw_register(dev, hw);
  266. if (ret)
  267. return ERR_PTR(ret);
  268. if (cflags & CLOCK_FLAG_VAL_INIT) {
  269. lgm_set_clk_val(gate->membase, reg, shift, 1, list->gate_val);
  270. }
  271. return hw;
  272. }
  273. int lgm_clk_register_branches(struct lgm_clk_provider *ctx,
  274. const struct lgm_clk_branch *list,
  275. unsigned int nr_clk)
  276. {
  277. struct clk_hw *hw;
  278. unsigned int idx;
  279. for (idx = 0; idx < nr_clk; idx++, list++) {
  280. switch (list->type) {
  281. case CLK_TYPE_FIXED:
  282. hw = lgm_clk_register_fixed(ctx, list);
  283. break;
  284. case CLK_TYPE_MUX:
  285. hw = lgm_clk_register_mux(ctx, list);
  286. break;
  287. case CLK_TYPE_DIVIDER:
  288. hw = lgm_clk_register_divider(ctx, list);
  289. break;
  290. case CLK_TYPE_FIXED_FACTOR:
  291. hw = lgm_clk_register_fixed_factor(ctx, list);
  292. break;
  293. case CLK_TYPE_GATE:
  294. if (list->gate_flags & GATE_CLK_HW) {
  295. hw = lgm_clk_register_gate(ctx, list);
  296. } else {
  297. /*
  298. * GATE_CLKs can be controlled either from
  299. * CGU clk driver i.e. this driver or directly
  300. * from power management driver/daemon. It is
  301. * dependent on the power policy/profile requirements
  302. * of the end product. To override control of gate
  303. * clks from this driver, provide NULL for this index
  304. * of gate clk provider.
  305. */
  306. hw = NULL;
  307. }
  308. break;
  309. default:
  310. dev_err(ctx->dev, "invalid clk type\n");
  311. return -EINVAL;
  312. }
  313. if (IS_ERR(hw)) {
  314. dev_err(ctx->dev,
  315. "register clk: %s, type: %u failed!\n",
  316. list->name, list->type);
  317. return -EIO;
  318. }
  319. ctx->clk_data.hws[list->id] = hw;
  320. }
  321. return 0;
  322. }
  323. static unsigned long
  324. lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  325. {
  326. struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
  327. unsigned int div0, div1, exdiv;
  328. u64 prate;
  329. div0 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
  330. ddiv->shift0, ddiv->width0) + 1;
  331. div1 = lgm_get_clk_val(ddiv->membase, ddiv->reg,
  332. ddiv->shift1, ddiv->width1) + 1;
  333. exdiv = lgm_get_clk_val(ddiv->membase, ddiv->reg,
  334. ddiv->shift2, ddiv->width2);
  335. prate = (u64)parent_rate;
  336. do_div(prate, div0);
  337. do_div(prate, div1);
  338. if (exdiv) {
  339. do_div(prate, ddiv->div);
  340. prate *= ddiv->mult;
  341. }
  342. return prate;
  343. }
  344. static int lgm_clk_ddiv_enable(struct clk_hw *hw)
  345. {
  346. struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
  347. lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
  348. ddiv->width_gate, 1);
  349. return 0;
  350. }
  351. static void lgm_clk_ddiv_disable(struct clk_hw *hw)
  352. {
  353. struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
  354. lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift_gate,
  355. ddiv->width_gate, 0);
  356. }
  357. static int
  358. lgm_clk_get_ddiv_val(u32 div, u32 *ddiv1, u32 *ddiv2)
  359. {
  360. u32 idx, temp;
  361. *ddiv1 = 1;
  362. *ddiv2 = 1;
  363. if (div > MAX_DIVIDER_VAL)
  364. div = MAX_DIVIDER_VAL;
  365. if (div > 1) {
  366. for (idx = 2; idx <= MAX_DDIV_REG; idx++) {
  367. temp = DIV_ROUND_UP_ULL((u64)div, idx);
  368. if (div % idx == 0 && temp <= MAX_DDIV_REG)
  369. break;
  370. }
  371. if (idx > MAX_DDIV_REG)
  372. return -EINVAL;
  373. *ddiv1 = temp;
  374. *ddiv2 = idx;
  375. }
  376. return 0;
  377. }
  378. static int
  379. lgm_clk_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
  380. unsigned long prate)
  381. {
  382. struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
  383. u32 div, ddiv1, ddiv2;
  384. div = DIV_ROUND_CLOSEST_ULL((u64)prate, rate);
  385. if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
  386. div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
  387. div = div * 2;
  388. }
  389. if (div <= 0)
  390. return -EINVAL;
  391. if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2))
  392. return -EINVAL;
  393. lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0,
  394. ddiv1 - 1);
  395. lgm_set_clk_val(ddiv->membase, ddiv->reg, ddiv->shift1, ddiv->width1,
  396. ddiv2 - 1);
  397. return 0;
  398. }
  399. static long
  400. lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
  401. unsigned long *prate)
  402. {
  403. struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw);
  404. u32 div, ddiv1, ddiv2;
  405. u64 rate64;
  406. div = DIV_ROUND_CLOSEST_ULL((u64)*prate, rate);
  407. /* if predivide bit is enabled, modify div by factor of 2.5 */
  408. if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
  409. div = div * 2;
  410. div = DIV_ROUND_CLOSEST_ULL((u64)div, 5);
  411. }
  412. if (div <= 0)
  413. return *prate;
  414. if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0)
  415. if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0)
  416. return -EINVAL;
  417. rate64 = *prate;
  418. do_div(rate64, ddiv1);
  419. do_div(rate64, ddiv2);
  420. /* if predivide bit is enabled, modify rounded rate by factor of 2.5 */
  421. if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) {
  422. rate64 = rate64 * 2;
  423. rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5);
  424. }
  425. return rate64;
  426. }
  427. static const struct clk_ops lgm_clk_ddiv_ops = {
  428. .recalc_rate = lgm_clk_ddiv_recalc_rate,
  429. .enable = lgm_clk_ddiv_enable,
  430. .disable = lgm_clk_ddiv_disable,
  431. .set_rate = lgm_clk_ddiv_set_rate,
  432. .round_rate = lgm_clk_ddiv_round_rate,
  433. };
  434. int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx,
  435. const struct lgm_clk_ddiv_data *list,
  436. unsigned int nr_clk)
  437. {
  438. struct device *dev = ctx->dev;
  439. struct clk_hw *hw;
  440. unsigned int idx;
  441. int ret;
  442. for (idx = 0; idx < nr_clk; idx++, list++) {
  443. struct clk_init_data init = {};
  444. struct lgm_clk_ddiv *ddiv;
  445. ddiv = devm_kzalloc(dev, sizeof(*ddiv), GFP_KERNEL);
  446. if (!ddiv)
  447. return -ENOMEM;
  448. init.name = list->name;
  449. init.ops = &lgm_clk_ddiv_ops;
  450. init.flags = list->flags;
  451. init.parent_data = list->parent_data;
  452. init.num_parents = 1;
  453. ddiv->membase = ctx->membase;
  454. ddiv->reg = list->reg;
  455. ddiv->shift0 = list->shift0;
  456. ddiv->width0 = list->width0;
  457. ddiv->shift1 = list->shift1;
  458. ddiv->width1 = list->width1;
  459. ddiv->shift_gate = list->shift_gate;
  460. ddiv->width_gate = list->width_gate;
  461. ddiv->shift2 = list->ex_shift;
  462. ddiv->width2 = list->ex_width;
  463. ddiv->flags = list->div_flags;
  464. ddiv->mult = 2;
  465. ddiv->div = 5;
  466. ddiv->hw.init = &init;
  467. hw = &ddiv->hw;
  468. ret = devm_clk_hw_register(dev, hw);
  469. if (ret) {
  470. dev_err(dev, "register clk: %s failed!\n", list->name);
  471. return ret;
  472. }
  473. ctx->clk_data.hws[list->id] = hw;
  474. }
  475. return 0;
  476. }