cpg.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477
  1. /*
  2. * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
  3. *
  4. * Copyright (C) 2010 Magnus Damm
  5. * Copyright (C) 2010 - 2012 Paul Mundt
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file "COPYING" in the main directory of this archive
  9. * for more details.
  10. */
  11. #include <linux/clk.h>
  12. #include <linux/compiler.h>
  13. #include <linux/slab.h>
  14. #include <linux/io.h>
  15. #include <linux/sh_clk.h>
  16. #define CPG_CKSTP_BIT BIT(8)
  17. static unsigned int sh_clk_read(struct clk *clk)
  18. {
  19. if (clk->flags & CLK_ENABLE_REG_8BIT)
  20. return ioread8(clk->mapped_reg);
  21. else if (clk->flags & CLK_ENABLE_REG_16BIT)
  22. return ioread16(clk->mapped_reg);
  23. return ioread32(clk->mapped_reg);
  24. }
  25. static void sh_clk_write(int value, struct clk *clk)
  26. {
  27. if (clk->flags & CLK_ENABLE_REG_8BIT)
  28. iowrite8(value, clk->mapped_reg);
  29. else if (clk->flags & CLK_ENABLE_REG_16BIT)
  30. iowrite16(value, clk->mapped_reg);
  31. else
  32. iowrite32(value, clk->mapped_reg);
  33. }
  34. static int sh_clk_mstp_enable(struct clk *clk)
  35. {
  36. sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
  37. if (clk->status_reg) {
  38. unsigned int (*read)(const void __iomem *addr);
  39. int i;
  40. void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
  41. (phys_addr_t)clk->enable_reg + clk->mapped_reg;
  42. if (clk->flags & CLK_ENABLE_REG_8BIT)
  43. read = ioread8;
  44. else if (clk->flags & CLK_ENABLE_REG_16BIT)
  45. read = ioread16;
  46. else
  47. read = ioread32;
  48. for (i = 1000;
  49. (read(mapped_status) & (1 << clk->enable_bit)) && i;
  50. i--)
  51. cpu_relax();
  52. if (!i) {
  53. pr_err("cpg: failed to enable %p[%d]\n",
  54. clk->enable_reg, clk->enable_bit);
  55. return -ETIMEDOUT;
  56. }
  57. }
  58. return 0;
  59. }
  60. static void sh_clk_mstp_disable(struct clk *clk)
  61. {
  62. sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
  63. }
  64. static struct sh_clk_ops sh_clk_mstp_clk_ops = {
  65. .enable = sh_clk_mstp_enable,
  66. .disable = sh_clk_mstp_disable,
  67. .recalc = followparent_recalc,
  68. };
  69. int __init sh_clk_mstp_register(struct clk *clks, int nr)
  70. {
  71. struct clk *clkp;
  72. int ret = 0;
  73. int k;
  74. for (k = 0; !ret && (k < nr); k++) {
  75. clkp = clks + k;
  76. clkp->ops = &sh_clk_mstp_clk_ops;
  77. ret |= clk_register(clkp);
  78. }
  79. return ret;
  80. }
  81. /*
  82. * Div/mult table lookup helpers
  83. */
  84. static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
  85. {
  86. return clk->priv;
  87. }
  88. static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
  89. {
  90. return clk_to_div_table(clk)->div_mult_table;
  91. }
  92. /*
  93. * Common div ops
  94. */
  95. static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
  96. {
  97. return clk_rate_table_round(clk, clk->freq_table, rate);
  98. }
  99. static unsigned long sh_clk_div_recalc(struct clk *clk)
  100. {
  101. struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
  102. unsigned int idx;
  103. clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
  104. table, clk->arch_flags ? &clk->arch_flags : NULL);
  105. idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
  106. return clk->freq_table[idx].frequency;
  107. }
  108. static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
  109. {
  110. struct clk_div_table *dt = clk_to_div_table(clk);
  111. unsigned long value;
  112. int idx;
  113. idx = clk_rate_table_find(clk, clk->freq_table, rate);
  114. if (idx < 0)
  115. return idx;
  116. value = sh_clk_read(clk);
  117. value &= ~(clk->div_mask << clk->enable_bit);
  118. value |= (idx << clk->enable_bit);
  119. sh_clk_write(value, clk);
  120. /* XXX: Should use a post-change notifier */
  121. if (dt->kick)
  122. dt->kick(clk);
  123. return 0;
  124. }
  125. static int sh_clk_div_enable(struct clk *clk)
  126. {
  127. if (clk->div_mask == SH_CLK_DIV6_MSK) {
  128. int ret = sh_clk_div_set_rate(clk, clk->rate);
  129. if (ret < 0)
  130. return ret;
  131. }
  132. sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
  133. return 0;
  134. }
  135. static void sh_clk_div_disable(struct clk *clk)
  136. {
  137. unsigned int val;
  138. val = sh_clk_read(clk);
  139. val |= CPG_CKSTP_BIT;
  140. /*
  141. * div6 clocks require the divisor field to be non-zero or the
  142. * above CKSTP toggle silently fails. Ensure that the divisor
  143. * array is reset to its initial state on disable.
  144. */
  145. if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
  146. val |= clk->div_mask;
  147. sh_clk_write(val, clk);
  148. }
  149. static struct sh_clk_ops sh_clk_div_clk_ops = {
  150. .recalc = sh_clk_div_recalc,
  151. .set_rate = sh_clk_div_set_rate,
  152. .round_rate = sh_clk_div_round_rate,
  153. };
  154. static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
  155. .recalc = sh_clk_div_recalc,
  156. .set_rate = sh_clk_div_set_rate,
  157. .round_rate = sh_clk_div_round_rate,
  158. .enable = sh_clk_div_enable,
  159. .disable = sh_clk_div_disable,
  160. };
  161. static int __init sh_clk_init_parent(struct clk *clk)
  162. {
  163. u32 val;
  164. if (clk->parent)
  165. return 0;
  166. if (!clk->parent_table || !clk->parent_num)
  167. return 0;
  168. if (!clk->src_width) {
  169. pr_err("sh_clk_init_parent: cannot select parent clock\n");
  170. return -EINVAL;
  171. }
  172. val = (sh_clk_read(clk) >> clk->src_shift);
  173. val &= (1 << clk->src_width) - 1;
  174. if (val >= clk->parent_num) {
  175. pr_err("sh_clk_init_parent: parent table size failed\n");
  176. return -EINVAL;
  177. }
  178. clk_reparent(clk, clk->parent_table[val]);
  179. if (!clk->parent) {
  180. pr_err("sh_clk_init_parent: unable to set parent");
  181. return -EINVAL;
  182. }
  183. return 0;
  184. }
  185. static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
  186. struct clk_div_table *table, struct sh_clk_ops *ops)
  187. {
  188. struct clk *clkp;
  189. void *freq_table;
  190. int nr_divs = table->div_mult_table->nr_divisors;
  191. int freq_table_size = sizeof(struct cpufreq_frequency_table);
  192. int ret = 0;
  193. int k;
  194. freq_table_size *= (nr_divs + 1);
  195. freq_table = kcalloc(nr, freq_table_size, GFP_KERNEL);
  196. if (!freq_table) {
  197. pr_err("%s: unable to alloc memory\n", __func__);
  198. return -ENOMEM;
  199. }
  200. for (k = 0; !ret && (k < nr); k++) {
  201. clkp = clks + k;
  202. clkp->ops = ops;
  203. clkp->priv = table;
  204. clkp->freq_table = freq_table + (k * freq_table_size);
  205. clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
  206. ret = clk_register(clkp);
  207. if (ret == 0)
  208. ret = sh_clk_init_parent(clkp);
  209. }
  210. return ret;
  211. }
  212. /*
  213. * div6 support
  214. */
  215. static int sh_clk_div6_divisors[64] = {
  216. 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
  217. 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
  218. 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
  219. 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
  220. };
  221. static struct clk_div_mult_table div6_div_mult_table = {
  222. .divisors = sh_clk_div6_divisors,
  223. .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
  224. };
  225. static struct clk_div_table sh_clk_div6_table = {
  226. .div_mult_table = &div6_div_mult_table,
  227. };
  228. static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
  229. {
  230. struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
  231. u32 value;
  232. int ret, i;
  233. if (!clk->parent_table || !clk->parent_num)
  234. return -EINVAL;
  235. /* Search the parent */
  236. for (i = 0; i < clk->parent_num; i++)
  237. if (clk->parent_table[i] == parent)
  238. break;
  239. if (i == clk->parent_num)
  240. return -ENODEV;
  241. ret = clk_reparent(clk, parent);
  242. if (ret < 0)
  243. return ret;
  244. value = sh_clk_read(clk) &
  245. ~(((1 << clk->src_width) - 1) << clk->src_shift);
  246. sh_clk_write(value | (i << clk->src_shift), clk);
  247. /* Rebuild the frequency table */
  248. clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
  249. table, NULL);
  250. return 0;
  251. }
  252. static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
  253. .recalc = sh_clk_div_recalc,
  254. .round_rate = sh_clk_div_round_rate,
  255. .set_rate = sh_clk_div_set_rate,
  256. .enable = sh_clk_div_enable,
  257. .disable = sh_clk_div_disable,
  258. .set_parent = sh_clk_div6_set_parent,
  259. };
  260. int __init sh_clk_div6_register(struct clk *clks, int nr)
  261. {
  262. return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
  263. &sh_clk_div_enable_clk_ops);
  264. }
  265. int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
  266. {
  267. return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
  268. &sh_clk_div6_reparent_clk_ops);
  269. }
  270. /*
  271. * div4 support
  272. */
  273. static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
  274. {
  275. struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
  276. u32 value;
  277. int ret;
  278. /* we really need a better way to determine parent index, but for
  279. * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
  280. * no CLK_ENABLE_ON_INIT means external clock...
  281. */
  282. if (parent->flags & CLK_ENABLE_ON_INIT)
  283. value = sh_clk_read(clk) & ~(1 << 7);
  284. else
  285. value = sh_clk_read(clk) | (1 << 7);
  286. ret = clk_reparent(clk, parent);
  287. if (ret < 0)
  288. return ret;
  289. sh_clk_write(value, clk);
  290. /* Rebiuld the frequency table */
  291. clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
  292. table, &clk->arch_flags);
  293. return 0;
  294. }
  295. static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
  296. .recalc = sh_clk_div_recalc,
  297. .set_rate = sh_clk_div_set_rate,
  298. .round_rate = sh_clk_div_round_rate,
  299. .enable = sh_clk_div_enable,
  300. .disable = sh_clk_div_disable,
  301. .set_parent = sh_clk_div4_set_parent,
  302. };
  303. int __init sh_clk_div4_register(struct clk *clks, int nr,
  304. struct clk_div4_table *table)
  305. {
  306. return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
  307. }
  308. int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
  309. struct clk_div4_table *table)
  310. {
  311. return sh_clk_div_register_ops(clks, nr, table,
  312. &sh_clk_div_enable_clk_ops);
  313. }
  314. int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
  315. struct clk_div4_table *table)
  316. {
  317. return sh_clk_div_register_ops(clks, nr, table,
  318. &sh_clk_div4_reparent_clk_ops);
  319. }
  320. /* FSI-DIV */
  321. static unsigned long fsidiv_recalc(struct clk *clk)
  322. {
  323. u32 value;
  324. value = __raw_readl(clk->mapping->base);
  325. value >>= 16;
  326. if (value < 2)
  327. return clk->parent->rate;
  328. return clk->parent->rate / value;
  329. }
  330. static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
  331. {
  332. return clk_rate_div_range_round(clk, 1, 0xffff, rate);
  333. }
  334. static void fsidiv_disable(struct clk *clk)
  335. {
  336. __raw_writel(0, clk->mapping->base);
  337. }
  338. static int fsidiv_enable(struct clk *clk)
  339. {
  340. u32 value;
  341. value = __raw_readl(clk->mapping->base) >> 16;
  342. if (value < 2)
  343. return 0;
  344. __raw_writel((value << 16) | 0x3, clk->mapping->base);
  345. return 0;
  346. }
  347. static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
  348. {
  349. int idx;
  350. idx = (clk->parent->rate / rate) & 0xffff;
  351. if (idx < 2)
  352. __raw_writel(0, clk->mapping->base);
  353. else
  354. __raw_writel(idx << 16, clk->mapping->base);
  355. return 0;
  356. }
  357. static struct sh_clk_ops fsidiv_clk_ops = {
  358. .recalc = fsidiv_recalc,
  359. .round_rate = fsidiv_round_rate,
  360. .set_rate = fsidiv_set_rate,
  361. .enable = fsidiv_enable,
  362. .disable = fsidiv_disable,
  363. };
  364. int __init sh_clk_fsidiv_register(struct clk *clks, int nr)
  365. {
  366. struct clk_mapping *map;
  367. int i;
  368. for (i = 0; i < nr; i++) {
  369. map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL);
  370. if (!map) {
  371. pr_err("%s: unable to alloc memory\n", __func__);
  372. return -ENOMEM;
  373. }
  374. /* clks[i].enable_reg came from SH_CLK_FSIDIV() */
  375. map->phys = (phys_addr_t)clks[i].enable_reg;
  376. map->len = 8;
  377. clks[i].enable_reg = 0; /* remove .enable_reg */
  378. clks[i].ops = &fsidiv_clk_ops;
  379. clks[i].mapping = map;
  380. clk_register(&clks[i]);
  381. }
  382. return 0;
  383. }