clk-highbank.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright 2011-2012 Calxeda, Inc.
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/slab.h>
  7. #include <linux/err.h>
  8. #include <linux/clk-provider.h>
  9. #include <linux/io.h>
  10. #include <linux/of.h>
  11. #include <linux/of_address.h>
  12. #define HB_PLL_LOCK_500 0x20000000
  13. #define HB_PLL_LOCK 0x10000000
  14. #define HB_PLL_DIVF_SHIFT 20
  15. #define HB_PLL_DIVF_MASK 0x0ff00000
  16. #define HB_PLL_DIVQ_SHIFT 16
  17. #define HB_PLL_DIVQ_MASK 0x00070000
  18. #define HB_PLL_DIVR_SHIFT 8
  19. #define HB_PLL_DIVR_MASK 0x00001f00
  20. #define HB_PLL_RANGE_SHIFT 4
  21. #define HB_PLL_RANGE_MASK 0x00000070
  22. #define HB_PLL_BYPASS 0x00000008
  23. #define HB_PLL_RESET 0x00000004
  24. #define HB_PLL_EXT_BYPASS 0x00000002
  25. #define HB_PLL_EXT_ENA 0x00000001
  26. #define HB_PLL_VCO_MIN_FREQ 2133000000
  27. #define HB_PLL_MAX_FREQ HB_PLL_VCO_MIN_FREQ
  28. #define HB_PLL_MIN_FREQ (HB_PLL_VCO_MIN_FREQ / 64)
  29. #define HB_A9_BCLK_DIV_MASK 0x00000006
  30. #define HB_A9_BCLK_DIV_SHIFT 1
  31. #define HB_A9_PCLK_DIV 0x00000001
  32. struct hb_clk {
  33. struct clk_hw hw;
  34. void __iomem *reg;
  35. char *parent_name;
  36. };
  37. #define to_hb_clk(p) container_of(p, struct hb_clk, hw)
  38. static int clk_pll_prepare(struct clk_hw *hwclk)
  39. {
  40. struct hb_clk *hbclk = to_hb_clk(hwclk);
  41. u32 reg;
  42. reg = readl(hbclk->reg);
  43. reg &= ~HB_PLL_RESET;
  44. writel(reg, hbclk->reg);
  45. while ((readl(hbclk->reg) & HB_PLL_LOCK) == 0)
  46. ;
  47. while ((readl(hbclk->reg) & HB_PLL_LOCK_500) == 0)
  48. ;
  49. return 0;
  50. }
  51. static void clk_pll_unprepare(struct clk_hw *hwclk)
  52. {
  53. struct hb_clk *hbclk = to_hb_clk(hwclk);
  54. u32 reg;
  55. reg = readl(hbclk->reg);
  56. reg |= HB_PLL_RESET;
  57. writel(reg, hbclk->reg);
  58. }
  59. static int clk_pll_enable(struct clk_hw *hwclk)
  60. {
  61. struct hb_clk *hbclk = to_hb_clk(hwclk);
  62. u32 reg;
  63. reg = readl(hbclk->reg);
  64. reg |= HB_PLL_EXT_ENA;
  65. writel(reg, hbclk->reg);
  66. return 0;
  67. }
  68. static void clk_pll_disable(struct clk_hw *hwclk)
  69. {
  70. struct hb_clk *hbclk = to_hb_clk(hwclk);
  71. u32 reg;
  72. reg = readl(hbclk->reg);
  73. reg &= ~HB_PLL_EXT_ENA;
  74. writel(reg, hbclk->reg);
  75. }
  76. static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
  77. unsigned long parent_rate)
  78. {
  79. struct hb_clk *hbclk = to_hb_clk(hwclk);
  80. unsigned long divf, divq, vco_freq, reg;
  81. reg = readl(hbclk->reg);
  82. if (reg & HB_PLL_EXT_BYPASS)
  83. return parent_rate;
  84. divf = (reg & HB_PLL_DIVF_MASK) >> HB_PLL_DIVF_SHIFT;
  85. divq = (reg & HB_PLL_DIVQ_MASK) >> HB_PLL_DIVQ_SHIFT;
  86. vco_freq = parent_rate * (divf + 1);
  87. return vco_freq / (1 << divq);
  88. }
  89. static void clk_pll_calc(unsigned long rate, unsigned long ref_freq,
  90. u32 *pdivq, u32 *pdivf)
  91. {
  92. u32 divq, divf;
  93. unsigned long vco_freq;
  94. if (rate < HB_PLL_MIN_FREQ)
  95. rate = HB_PLL_MIN_FREQ;
  96. if (rate > HB_PLL_MAX_FREQ)
  97. rate = HB_PLL_MAX_FREQ;
  98. for (divq = 1; divq <= 6; divq++) {
  99. if ((rate * (1 << divq)) >= HB_PLL_VCO_MIN_FREQ)
  100. break;
  101. }
  102. vco_freq = rate * (1 << divq);
  103. divf = (vco_freq + (ref_freq / 2)) / ref_freq;
  104. divf--;
  105. *pdivq = divq;
  106. *pdivf = divf;
  107. }
  108. static long clk_pll_round_rate(struct clk_hw *hwclk, unsigned long rate,
  109. unsigned long *parent_rate)
  110. {
  111. u32 divq, divf;
  112. unsigned long ref_freq = *parent_rate;
  113. clk_pll_calc(rate, ref_freq, &divq, &divf);
  114. return (ref_freq * (divf + 1)) / (1 << divq);
  115. }
  116. static int clk_pll_set_rate(struct clk_hw *hwclk, unsigned long rate,
  117. unsigned long parent_rate)
  118. {
  119. struct hb_clk *hbclk = to_hb_clk(hwclk);
  120. u32 divq, divf;
  121. u32 reg;
  122. clk_pll_calc(rate, parent_rate, &divq, &divf);
  123. reg = readl(hbclk->reg);
  124. if (divf != ((reg & HB_PLL_DIVF_MASK) >> HB_PLL_DIVF_SHIFT)) {
  125. /* Need to re-lock PLL, so put it into bypass mode */
  126. reg |= HB_PLL_EXT_BYPASS;
  127. writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg);
  128. writel(reg | HB_PLL_RESET, hbclk->reg);
  129. reg &= ~(HB_PLL_DIVF_MASK | HB_PLL_DIVQ_MASK);
  130. reg |= (divf << HB_PLL_DIVF_SHIFT) | (divq << HB_PLL_DIVQ_SHIFT);
  131. writel(reg | HB_PLL_RESET, hbclk->reg);
  132. writel(reg, hbclk->reg);
  133. while ((readl(hbclk->reg) & HB_PLL_LOCK) == 0)
  134. ;
  135. while ((readl(hbclk->reg) & HB_PLL_LOCK_500) == 0)
  136. ;
  137. reg |= HB_PLL_EXT_ENA;
  138. reg &= ~HB_PLL_EXT_BYPASS;
  139. } else {
  140. writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg);
  141. reg &= ~HB_PLL_DIVQ_MASK;
  142. reg |= divq << HB_PLL_DIVQ_SHIFT;
  143. writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg);
  144. }
  145. writel(reg, hbclk->reg);
  146. return 0;
  147. }
  148. static const struct clk_ops clk_pll_ops = {
  149. .prepare = clk_pll_prepare,
  150. .unprepare = clk_pll_unprepare,
  151. .enable = clk_pll_enable,
  152. .disable = clk_pll_disable,
  153. .recalc_rate = clk_pll_recalc_rate,
  154. .round_rate = clk_pll_round_rate,
  155. .set_rate = clk_pll_set_rate,
  156. };
  157. static unsigned long clk_cpu_periphclk_recalc_rate(struct clk_hw *hwclk,
  158. unsigned long parent_rate)
  159. {
  160. struct hb_clk *hbclk = to_hb_clk(hwclk);
  161. u32 div = (readl(hbclk->reg) & HB_A9_PCLK_DIV) ? 8 : 4;
  162. return parent_rate / div;
  163. }
  164. static const struct clk_ops a9periphclk_ops = {
  165. .recalc_rate = clk_cpu_periphclk_recalc_rate,
  166. };
  167. static unsigned long clk_cpu_a9bclk_recalc_rate(struct clk_hw *hwclk,
  168. unsigned long parent_rate)
  169. {
  170. struct hb_clk *hbclk = to_hb_clk(hwclk);
  171. u32 div = (readl(hbclk->reg) & HB_A9_BCLK_DIV_MASK) >> HB_A9_BCLK_DIV_SHIFT;
  172. return parent_rate / (div + 2);
  173. }
  174. static const struct clk_ops a9bclk_ops = {
  175. .recalc_rate = clk_cpu_a9bclk_recalc_rate,
  176. };
  177. static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk,
  178. unsigned long parent_rate)
  179. {
  180. struct hb_clk *hbclk = to_hb_clk(hwclk);
  181. u32 div;
  182. div = readl(hbclk->reg) & 0x1f;
  183. div++;
  184. div *= 2;
  185. return parent_rate / div;
  186. }
  187. static long clk_periclk_round_rate(struct clk_hw *hwclk, unsigned long rate,
  188. unsigned long *parent_rate)
  189. {
  190. u32 div;
  191. div = *parent_rate / rate;
  192. div++;
  193. div &= ~0x1;
  194. return *parent_rate / div;
  195. }
  196. static int clk_periclk_set_rate(struct clk_hw *hwclk, unsigned long rate,
  197. unsigned long parent_rate)
  198. {
  199. struct hb_clk *hbclk = to_hb_clk(hwclk);
  200. u32 div;
  201. div = parent_rate / rate;
  202. if (div & 0x1)
  203. return -EINVAL;
  204. writel(div >> 1, hbclk->reg);
  205. return 0;
  206. }
  207. static const struct clk_ops periclk_ops = {
  208. .recalc_rate = clk_periclk_recalc_rate,
  209. .round_rate = clk_periclk_round_rate,
  210. .set_rate = clk_periclk_set_rate,
  211. };
  212. static void __init hb_clk_init(struct device_node *node, const struct clk_ops *ops, unsigned long clkflags)
  213. {
  214. u32 reg;
  215. struct hb_clk *hb_clk;
  216. const char *clk_name = node->name;
  217. const char *parent_name;
  218. struct clk_init_data init;
  219. struct device_node *srnp;
  220. int rc;
  221. rc = of_property_read_u32(node, "reg", &reg);
  222. if (WARN_ON(rc))
  223. return;
  224. hb_clk = kzalloc(sizeof(*hb_clk), GFP_KERNEL);
  225. if (WARN_ON(!hb_clk))
  226. return;
  227. /* Map system registers */
  228. srnp = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs");
  229. hb_clk->reg = of_iomap(srnp, 0);
  230. of_node_put(srnp);
  231. BUG_ON(!hb_clk->reg);
  232. hb_clk->reg += reg;
  233. of_property_read_string(node, "clock-output-names", &clk_name);
  234. init.name = clk_name;
  235. init.ops = ops;
  236. init.flags = clkflags;
  237. parent_name = of_clk_get_parent_name(node, 0);
  238. init.parent_names = &parent_name;
  239. init.num_parents = 1;
  240. hb_clk->hw.init = &init;
  241. rc = clk_hw_register(NULL, &hb_clk->hw);
  242. if (WARN_ON(rc)) {
  243. kfree(hb_clk);
  244. return;
  245. }
  246. of_clk_add_hw_provider(node, of_clk_hw_simple_get, &hb_clk->hw);
  247. }
  248. static void __init hb_pll_init(struct device_node *node)
  249. {
  250. hb_clk_init(node, &clk_pll_ops, 0);
  251. }
  252. CLK_OF_DECLARE(hb_pll, "calxeda,hb-pll-clock", hb_pll_init);
  253. static void __init hb_a9periph_init(struct device_node *node)
  254. {
  255. hb_clk_init(node, &a9periphclk_ops, 0);
  256. }
  257. CLK_OF_DECLARE(hb_a9periph, "calxeda,hb-a9periph-clock", hb_a9periph_init);
  258. static void __init hb_a9bus_init(struct device_node *node)
  259. {
  260. hb_clk_init(node, &a9bclk_ops, CLK_IS_CRITICAL);
  261. }
  262. CLK_OF_DECLARE(hb_a9bus, "calxeda,hb-a9bus-clock", hb_a9bus_init);
  263. static void __init hb_emmc_init(struct device_node *node)
  264. {
  265. hb_clk_init(node, &periclk_ops, 0);
  266. }
  267. CLK_OF_DECLARE(hb_emmc, "calxeda,hb-emmc-clock", hb_emmc_init);