ccu-div.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
  4. *
  5. * Authors:
  6. * Serge Semin <[email protected]>
  7. * Dmitry Dunaev <[email protected]>
  8. *
  9. * Baikal-T1 CCU Dividers interface driver
  10. */
  11. #define pr_fmt(fmt) "bt1-ccu-div: " fmt
  12. #include <linux/kernel.h>
  13. #include <linux/printk.h>
  14. #include <linux/bits.h>
  15. #include <linux/bitfield.h>
  16. #include <linux/slab.h>
  17. #include <linux/clk-provider.h>
  18. #include <linux/of.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/regmap.h>
  21. #include <linux/delay.h>
  22. #include <linux/time64.h>
  23. #include <linux/debugfs.h>
  24. #include "ccu-div.h"
  25. #define CCU_DIV_CTL 0x00
  26. #define CCU_DIV_CTL_EN BIT(0)
  27. #define CCU_DIV_CTL_RST BIT(1)
  28. #define CCU_DIV_CTL_SET_CLKDIV BIT(2)
  29. #define CCU_DIV_CTL_CLKDIV_FLD 4
  30. #define CCU_DIV_CTL_CLKDIV_MASK(_width) \
  31. GENMASK((_width) + CCU_DIV_CTL_CLKDIV_FLD - 1, CCU_DIV_CTL_CLKDIV_FLD)
  32. #define CCU_DIV_CTL_LOCK_SHIFTED BIT(27)
  33. #define CCU_DIV_CTL_GATE_REF_BUF BIT(28)
  34. #define CCU_DIV_CTL_LOCK_NORMAL BIT(31)
  35. #define CCU_DIV_LOCK_CHECK_RETRIES 50
  36. #define CCU_DIV_CLKDIV_MIN 0
  37. #define CCU_DIV_CLKDIV_MAX(_mask) \
  38. ((_mask) >> CCU_DIV_CTL_CLKDIV_FLD)
  39. /*
  40. * Use the next two methods until there are generic field setter and
  41. * getter available with non-constant mask support.
  42. */
  43. static inline u32 ccu_div_get(u32 mask, u32 val)
  44. {
  45. return (val & mask) >> CCU_DIV_CTL_CLKDIV_FLD;
  46. }
  47. static inline u32 ccu_div_prep(u32 mask, u32 val)
  48. {
  49. return (val << CCU_DIV_CTL_CLKDIV_FLD) & mask;
  50. }
  51. static inline unsigned long ccu_div_lock_delay_ns(unsigned long ref_clk,
  52. unsigned long div)
  53. {
  54. u64 ns = 4ULL * (div ?: 1) * NSEC_PER_SEC;
  55. do_div(ns, ref_clk);
  56. return ns;
  57. }
  58. static inline unsigned long ccu_div_calc_freq(unsigned long ref_clk,
  59. unsigned long div)
  60. {
  61. return ref_clk / (div ?: 1);
  62. }
  63. static int ccu_div_var_update_clkdiv(struct ccu_div *div,
  64. unsigned long parent_rate,
  65. unsigned long divider)
  66. {
  67. unsigned long nd;
  68. u32 val = 0;
  69. u32 lock;
  70. int count;
  71. nd = ccu_div_lock_delay_ns(parent_rate, divider);
  72. if (div->features & CCU_DIV_LOCK_SHIFTED)
  73. lock = CCU_DIV_CTL_LOCK_SHIFTED;
  74. else
  75. lock = CCU_DIV_CTL_LOCK_NORMAL;
  76. regmap_update_bits(div->sys_regs, div->reg_ctl,
  77. CCU_DIV_CTL_SET_CLKDIV, CCU_DIV_CTL_SET_CLKDIV);
  78. /*
  79. * Until there is nsec-version of readl_poll_timeout() is available
  80. * we have to implement the next polling loop.
  81. */
  82. count = CCU_DIV_LOCK_CHECK_RETRIES;
  83. do {
  84. ndelay(nd);
  85. regmap_read(div->sys_regs, div->reg_ctl, &val);
  86. if (val & lock)
  87. return 0;
  88. } while (--count);
  89. return -ETIMEDOUT;
  90. }
  91. static int ccu_div_var_enable(struct clk_hw *hw)
  92. {
  93. struct clk_hw *parent_hw = clk_hw_get_parent(hw);
  94. struct ccu_div *div = to_ccu_div(hw);
  95. unsigned long flags;
  96. u32 val = 0;
  97. int ret;
  98. if (!parent_hw) {
  99. pr_err("Can't enable '%s' with no parent", clk_hw_get_name(hw));
  100. return -EINVAL;
  101. }
  102. regmap_read(div->sys_regs, div->reg_ctl, &val);
  103. if (val & CCU_DIV_CTL_EN)
  104. return 0;
  105. spin_lock_irqsave(&div->lock, flags);
  106. ret = ccu_div_var_update_clkdiv(div, clk_hw_get_rate(parent_hw),
  107. ccu_div_get(div->mask, val));
  108. if (!ret)
  109. regmap_update_bits(div->sys_regs, div->reg_ctl,
  110. CCU_DIV_CTL_EN, CCU_DIV_CTL_EN);
  111. spin_unlock_irqrestore(&div->lock, flags);
  112. if (ret)
  113. pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw));
  114. return ret;
  115. }
  116. static int ccu_div_gate_enable(struct clk_hw *hw)
  117. {
  118. struct ccu_div *div = to_ccu_div(hw);
  119. unsigned long flags;
  120. spin_lock_irqsave(&div->lock, flags);
  121. regmap_update_bits(div->sys_regs, div->reg_ctl,
  122. CCU_DIV_CTL_EN, CCU_DIV_CTL_EN);
  123. spin_unlock_irqrestore(&div->lock, flags);
  124. return 0;
  125. }
  126. static void ccu_div_gate_disable(struct clk_hw *hw)
  127. {
  128. struct ccu_div *div = to_ccu_div(hw);
  129. unsigned long flags;
  130. spin_lock_irqsave(&div->lock, flags);
  131. regmap_update_bits(div->sys_regs, div->reg_ctl, CCU_DIV_CTL_EN, 0);
  132. spin_unlock_irqrestore(&div->lock, flags);
  133. }
  134. static int ccu_div_gate_is_enabled(struct clk_hw *hw)
  135. {
  136. struct ccu_div *div = to_ccu_div(hw);
  137. u32 val = 0;
  138. regmap_read(div->sys_regs, div->reg_ctl, &val);
  139. return !!(val & CCU_DIV_CTL_EN);
  140. }
  141. static int ccu_div_buf_enable(struct clk_hw *hw)
  142. {
  143. struct ccu_div *div = to_ccu_div(hw);
  144. unsigned long flags;
  145. spin_lock_irqsave(&div->lock, flags);
  146. regmap_update_bits(div->sys_regs, div->reg_ctl,
  147. CCU_DIV_CTL_GATE_REF_BUF, 0);
  148. spin_unlock_irqrestore(&div->lock, flags);
  149. return 0;
  150. }
  151. static void ccu_div_buf_disable(struct clk_hw *hw)
  152. {
  153. struct ccu_div *div = to_ccu_div(hw);
  154. unsigned long flags;
  155. spin_lock_irqsave(&div->lock, flags);
  156. regmap_update_bits(div->sys_regs, div->reg_ctl,
  157. CCU_DIV_CTL_GATE_REF_BUF, CCU_DIV_CTL_GATE_REF_BUF);
  158. spin_unlock_irqrestore(&div->lock, flags);
  159. }
  160. static int ccu_div_buf_is_enabled(struct clk_hw *hw)
  161. {
  162. struct ccu_div *div = to_ccu_div(hw);
  163. u32 val = 0;
  164. regmap_read(div->sys_regs, div->reg_ctl, &val);
  165. return !(val & CCU_DIV_CTL_GATE_REF_BUF);
  166. }
  167. static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw,
  168. unsigned long parent_rate)
  169. {
  170. struct ccu_div *div = to_ccu_div(hw);
  171. unsigned long divider;
  172. u32 val = 0;
  173. regmap_read(div->sys_regs, div->reg_ctl, &val);
  174. divider = ccu_div_get(div->mask, val);
  175. return ccu_div_calc_freq(parent_rate, divider);
  176. }
  177. static inline unsigned long ccu_div_var_calc_divider(unsigned long rate,
  178. unsigned long parent_rate,
  179. unsigned int mask)
  180. {
  181. unsigned long divider;
  182. divider = parent_rate / rate;
  183. return clamp_t(unsigned long, divider, CCU_DIV_CLKDIV_MIN,
  184. CCU_DIV_CLKDIV_MAX(mask));
  185. }
  186. static long ccu_div_var_round_rate(struct clk_hw *hw, unsigned long rate,
  187. unsigned long *parent_rate)
  188. {
  189. struct ccu_div *div = to_ccu_div(hw);
  190. unsigned long divider;
  191. divider = ccu_div_var_calc_divider(rate, *parent_rate, div->mask);
  192. return ccu_div_calc_freq(*parent_rate, divider);
  193. }
  194. /*
  195. * This method is used for the clock divider blocks, which support the
  196. * on-the-fly rate change. So due to lacking the EN bit functionality
  197. * they can't be gated before the rate adjustment.
  198. */
  199. static int ccu_div_var_set_rate_slow(struct clk_hw *hw, unsigned long rate,
  200. unsigned long parent_rate)
  201. {
  202. struct ccu_div *div = to_ccu_div(hw);
  203. unsigned long flags, divider;
  204. u32 val;
  205. int ret;
  206. divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask);
  207. if (divider == 1 && div->features & CCU_DIV_SKIP_ONE) {
  208. divider = 0;
  209. } else if (div->features & CCU_DIV_SKIP_ONE_TO_THREE) {
  210. if (divider == 1 || divider == 2)
  211. divider = 0;
  212. else if (divider == 3)
  213. divider = 4;
  214. }
  215. val = ccu_div_prep(div->mask, divider);
  216. spin_lock_irqsave(&div->lock, flags);
  217. regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, val);
  218. ret = ccu_div_var_update_clkdiv(div, parent_rate, divider);
  219. spin_unlock_irqrestore(&div->lock, flags);
  220. if (ret)
  221. pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw));
  222. return ret;
  223. }
  224. /*
  225. * This method is used for the clock divider blocks, which don't support
  226. * the on-the-fly rate change.
  227. */
  228. static int ccu_div_var_set_rate_fast(struct clk_hw *hw, unsigned long rate,
  229. unsigned long parent_rate)
  230. {
  231. struct ccu_div *div = to_ccu_div(hw);
  232. unsigned long flags, divider;
  233. u32 val;
  234. divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask);
  235. val = ccu_div_prep(div->mask, divider);
  236. /*
  237. * Also disable the clock divider block if it was enabled by default
  238. * or by the bootloader.
  239. */
  240. spin_lock_irqsave(&div->lock, flags);
  241. regmap_update_bits(div->sys_regs, div->reg_ctl,
  242. div->mask | CCU_DIV_CTL_EN, val);
  243. spin_unlock_irqrestore(&div->lock, flags);
  244. return 0;
  245. }
  246. static unsigned long ccu_div_fixed_recalc_rate(struct clk_hw *hw,
  247. unsigned long parent_rate)
  248. {
  249. struct ccu_div *div = to_ccu_div(hw);
  250. return ccu_div_calc_freq(parent_rate, div->divider);
  251. }
  252. static long ccu_div_fixed_round_rate(struct clk_hw *hw, unsigned long rate,
  253. unsigned long *parent_rate)
  254. {
  255. struct ccu_div *div = to_ccu_div(hw);
  256. return ccu_div_calc_freq(*parent_rate, div->divider);
  257. }
  258. static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate,
  259. unsigned long parent_rate)
  260. {
  261. return 0;
  262. }
  263. #ifdef CONFIG_DEBUG_FS
  264. struct ccu_div_dbgfs_bit {
  265. struct ccu_div *div;
  266. const char *name;
  267. u32 mask;
  268. };
  269. #define CCU_DIV_DBGFS_BIT_ATTR(_name, _mask) { \
  270. .name = _name, \
  271. .mask = _mask \
  272. }
  273. static const struct ccu_div_dbgfs_bit ccu_div_bits[] = {
  274. CCU_DIV_DBGFS_BIT_ATTR("div_en", CCU_DIV_CTL_EN),
  275. CCU_DIV_DBGFS_BIT_ATTR("div_rst", CCU_DIV_CTL_RST),
  276. CCU_DIV_DBGFS_BIT_ATTR("div_bypass", CCU_DIV_CTL_SET_CLKDIV),
  277. CCU_DIV_DBGFS_BIT_ATTR("div_buf", CCU_DIV_CTL_GATE_REF_BUF),
  278. CCU_DIV_DBGFS_BIT_ATTR("div_lock", CCU_DIV_CTL_LOCK_NORMAL)
  279. };
  280. #define CCU_DIV_DBGFS_BIT_NUM ARRAY_SIZE(ccu_div_bits)
  281. /*
  282. * It can be dangerous to change the Divider settings behind clock framework
  283. * back, therefore we don't provide any kernel config based compile time option
  284. * for this feature to enable.
  285. */
  286. #undef CCU_DIV_ALLOW_WRITE_DEBUGFS
  287. #ifdef CCU_DIV_ALLOW_WRITE_DEBUGFS
  288. static int ccu_div_dbgfs_bit_set(void *priv, u64 val)
  289. {
  290. const struct ccu_div_dbgfs_bit *bit = priv;
  291. struct ccu_div *div = bit->div;
  292. unsigned long flags;
  293. spin_lock_irqsave(&div->lock, flags);
  294. regmap_update_bits(div->sys_regs, div->reg_ctl,
  295. bit->mask, val ? bit->mask : 0);
  296. spin_unlock_irqrestore(&div->lock, flags);
  297. return 0;
  298. }
  299. static int ccu_div_dbgfs_var_clkdiv_set(void *priv, u64 val)
  300. {
  301. struct ccu_div *div = priv;
  302. unsigned long flags;
  303. u32 data;
  304. val = clamp_t(u64, val, CCU_DIV_CLKDIV_MIN,
  305. CCU_DIV_CLKDIV_MAX(div->mask));
  306. data = ccu_div_prep(div->mask, val);
  307. spin_lock_irqsave(&div->lock, flags);
  308. regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, data);
  309. spin_unlock_irqrestore(&div->lock, flags);
  310. return 0;
  311. }
  312. #define ccu_div_dbgfs_mode 0644
  313. #else /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */
  314. #define ccu_div_dbgfs_bit_set NULL
  315. #define ccu_div_dbgfs_var_clkdiv_set NULL
  316. #define ccu_div_dbgfs_mode 0444
  317. #endif /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */
  318. static int ccu_div_dbgfs_bit_get(void *priv, u64 *val)
  319. {
  320. const struct ccu_div_dbgfs_bit *bit = priv;
  321. struct ccu_div *div = bit->div;
  322. u32 data = 0;
  323. regmap_read(div->sys_regs, div->reg_ctl, &data);
  324. *val = !!(data & bit->mask);
  325. return 0;
  326. }
  327. DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_bit_fops,
  328. ccu_div_dbgfs_bit_get, ccu_div_dbgfs_bit_set, "%llu\n");
  329. static int ccu_div_dbgfs_var_clkdiv_get(void *priv, u64 *val)
  330. {
  331. struct ccu_div *div = priv;
  332. u32 data = 0;
  333. regmap_read(div->sys_regs, div->reg_ctl, &data);
  334. *val = ccu_div_get(div->mask, data);
  335. return 0;
  336. }
  337. DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_var_clkdiv_fops,
  338. ccu_div_dbgfs_var_clkdiv_get, ccu_div_dbgfs_var_clkdiv_set, "%llu\n");
  339. static int ccu_div_dbgfs_fixed_clkdiv_get(void *priv, u64 *val)
  340. {
  341. struct ccu_div *div = priv;
  342. *val = div->divider;
  343. return 0;
  344. }
  345. DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_fixed_clkdiv_fops,
  346. ccu_div_dbgfs_fixed_clkdiv_get, NULL, "%llu\n");
  347. static void ccu_div_var_debug_init(struct clk_hw *hw, struct dentry *dentry)
  348. {
  349. struct ccu_div *div = to_ccu_div(hw);
  350. struct ccu_div_dbgfs_bit *bits;
  351. int didx, bidx, num = 2;
  352. const char *name;
  353. num += !!(div->flags & CLK_SET_RATE_GATE) +
  354. !!(div->features & CCU_DIV_RESET_DOMAIN);
  355. bits = kcalloc(num, sizeof(*bits), GFP_KERNEL);
  356. if (!bits)
  357. return;
  358. for (didx = 0, bidx = 0; bidx < CCU_DIV_DBGFS_BIT_NUM; ++bidx) {
  359. name = ccu_div_bits[bidx].name;
  360. if (!(div->flags & CLK_SET_RATE_GATE) &&
  361. !strcmp("div_en", name)) {
  362. continue;
  363. }
  364. if (!(div->features & CCU_DIV_RESET_DOMAIN) &&
  365. !strcmp("div_rst", name)) {
  366. continue;
  367. }
  368. if (!strcmp("div_buf", name))
  369. continue;
  370. bits[didx] = ccu_div_bits[bidx];
  371. bits[didx].div = div;
  372. if (div->features & CCU_DIV_LOCK_SHIFTED &&
  373. !strcmp("div_lock", name)) {
  374. bits[didx].mask = CCU_DIV_CTL_LOCK_SHIFTED;
  375. }
  376. debugfs_create_file_unsafe(bits[didx].name, ccu_div_dbgfs_mode,
  377. dentry, &bits[didx],
  378. &ccu_div_dbgfs_bit_fops);
  379. ++didx;
  380. }
  381. debugfs_create_file_unsafe("div_clkdiv", ccu_div_dbgfs_mode, dentry,
  382. div, &ccu_div_dbgfs_var_clkdiv_fops);
  383. }
  384. static void ccu_div_gate_debug_init(struct clk_hw *hw, struct dentry *dentry)
  385. {
  386. struct ccu_div *div = to_ccu_div(hw);
  387. struct ccu_div_dbgfs_bit *bit;
  388. bit = kmalloc(sizeof(*bit), GFP_KERNEL);
  389. if (!bit)
  390. return;
  391. *bit = ccu_div_bits[0];
  392. bit->div = div;
  393. debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit,
  394. &ccu_div_dbgfs_bit_fops);
  395. debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div,
  396. &ccu_div_dbgfs_fixed_clkdiv_fops);
  397. }
  398. static void ccu_div_buf_debug_init(struct clk_hw *hw, struct dentry *dentry)
  399. {
  400. struct ccu_div *div = to_ccu_div(hw);
  401. struct ccu_div_dbgfs_bit *bit;
  402. bit = kmalloc(sizeof(*bit), GFP_KERNEL);
  403. if (!bit)
  404. return;
  405. *bit = ccu_div_bits[3];
  406. bit->div = div;
  407. debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit,
  408. &ccu_div_dbgfs_bit_fops);
  409. }
  410. static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry)
  411. {
  412. struct ccu_div *div = to_ccu_div(hw);
  413. debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div,
  414. &ccu_div_dbgfs_fixed_clkdiv_fops);
  415. }
  416. #else /* !CONFIG_DEBUG_FS */
  417. #define ccu_div_var_debug_init NULL
  418. #define ccu_div_gate_debug_init NULL
  419. #define ccu_div_buf_debug_init NULL
  420. #define ccu_div_fixed_debug_init NULL
  421. #endif /* !CONFIG_DEBUG_FS */
  422. static const struct clk_ops ccu_div_var_gate_to_set_ops = {
  423. .enable = ccu_div_var_enable,
  424. .disable = ccu_div_gate_disable,
  425. .is_enabled = ccu_div_gate_is_enabled,
  426. .recalc_rate = ccu_div_var_recalc_rate,
  427. .round_rate = ccu_div_var_round_rate,
  428. .set_rate = ccu_div_var_set_rate_fast,
  429. .debug_init = ccu_div_var_debug_init
  430. };
  431. static const struct clk_ops ccu_div_var_nogate_ops = {
  432. .recalc_rate = ccu_div_var_recalc_rate,
  433. .round_rate = ccu_div_var_round_rate,
  434. .set_rate = ccu_div_var_set_rate_slow,
  435. .debug_init = ccu_div_var_debug_init
  436. };
  437. static const struct clk_ops ccu_div_gate_ops = {
  438. .enable = ccu_div_gate_enable,
  439. .disable = ccu_div_gate_disable,
  440. .is_enabled = ccu_div_gate_is_enabled,
  441. .recalc_rate = ccu_div_fixed_recalc_rate,
  442. .round_rate = ccu_div_fixed_round_rate,
  443. .set_rate = ccu_div_fixed_set_rate,
  444. .debug_init = ccu_div_gate_debug_init
  445. };
  446. static const struct clk_ops ccu_div_buf_ops = {
  447. .enable = ccu_div_buf_enable,
  448. .disable = ccu_div_buf_disable,
  449. .is_enabled = ccu_div_buf_is_enabled,
  450. .debug_init = ccu_div_buf_debug_init
  451. };
  452. static const struct clk_ops ccu_div_fixed_ops = {
  453. .recalc_rate = ccu_div_fixed_recalc_rate,
  454. .round_rate = ccu_div_fixed_round_rate,
  455. .set_rate = ccu_div_fixed_set_rate,
  456. .debug_init = ccu_div_fixed_debug_init
  457. };
  458. struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init)
  459. {
  460. struct clk_parent_data parent_data = { };
  461. struct clk_init_data hw_init = { };
  462. struct ccu_div *div;
  463. int ret;
  464. if (!div_init)
  465. return ERR_PTR(-EINVAL);
  466. div = kzalloc(sizeof(*div), GFP_KERNEL);
  467. if (!div)
  468. return ERR_PTR(-ENOMEM);
  469. /*
  470. * Note since Baikal-T1 System Controller registers are MMIO-backed
  471. * we won't check the regmap IO operations return status, because it
  472. * must be zero anyway.
  473. */
  474. div->hw.init = &hw_init;
  475. div->id = div_init->id;
  476. div->reg_ctl = div_init->base + CCU_DIV_CTL;
  477. div->sys_regs = div_init->sys_regs;
  478. div->flags = div_init->flags;
  479. div->features = div_init->features;
  480. spin_lock_init(&div->lock);
  481. hw_init.name = div_init->name;
  482. hw_init.flags = div_init->flags;
  483. if (div_init->type == CCU_DIV_VAR) {
  484. if (hw_init.flags & CLK_SET_RATE_GATE)
  485. hw_init.ops = &ccu_div_var_gate_to_set_ops;
  486. else
  487. hw_init.ops = &ccu_div_var_nogate_ops;
  488. div->mask = CCU_DIV_CTL_CLKDIV_MASK(div_init->width);
  489. } else if (div_init->type == CCU_DIV_GATE) {
  490. hw_init.ops = &ccu_div_gate_ops;
  491. div->divider = div_init->divider;
  492. } else if (div_init->type == CCU_DIV_BUF) {
  493. hw_init.ops = &ccu_div_buf_ops;
  494. } else if (div_init->type == CCU_DIV_FIXED) {
  495. hw_init.ops = &ccu_div_fixed_ops;
  496. div->divider = div_init->divider;
  497. } else {
  498. ret = -EINVAL;
  499. goto err_free_div;
  500. }
  501. if (!div_init->parent_name) {
  502. ret = -EINVAL;
  503. goto err_free_div;
  504. }
  505. parent_data.fw_name = div_init->parent_name;
  506. parent_data.name = div_init->parent_name;
  507. hw_init.parent_data = &parent_data;
  508. hw_init.num_parents = 1;
  509. ret = of_clk_hw_register(div_init->np, &div->hw);
  510. if (ret)
  511. goto err_free_div;
  512. return div;
  513. err_free_div:
  514. kfree(div);
  515. return ERR_PTR(ret);
  516. }
  517. void ccu_div_hw_unregister(struct ccu_div *div)
  518. {
  519. clk_hw_unregister(&div->hw);
  520. kfree(div);
  521. }