clk-gate.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2010-2011 Canonical Ltd <[email protected]>
  4. * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <[email protected]>
  5. *
  6. * Gated clock implementation
  7. */
  8. #include <linux/clk-provider.h>
  9. #include <linux/device.h>
  10. #include <linux/module.h>
  11. #include <linux/slab.h>
  12. #include <linux/io.h>
  13. #include <linux/err.h>
  14. #include <linux/string.h>
  15. /**
  16. * DOC: basic gatable clock which can gate and ungate it's ouput
  17. *
  18. * Traits of this clock:
  19. * prepare - clk_(un)prepare only ensures parent is (un)prepared
  20. * enable - clk_enable and clk_disable are functional & control gating
  21. * rate - inherits rate from parent. No clk_set_rate support
  22. * parent - fixed parent. No clk_set_parent support
  23. */
  24. static inline u32 clk_gate_readl(struct clk_gate *gate)
  25. {
  26. if (gate->flags & CLK_GATE_BIG_ENDIAN)
  27. return ioread32be(gate->reg);
  28. return readl(gate->reg);
  29. }
  30. static inline void clk_gate_writel(struct clk_gate *gate, u32 val)
  31. {
  32. if (gate->flags & CLK_GATE_BIG_ENDIAN)
  33. iowrite32be(val, gate->reg);
  34. else
  35. writel(val, gate->reg);
  36. }
  37. /*
  38. * It works on following logic:
  39. *
  40. * For enabling clock, enable = 1
  41. * set2dis = 1 -> clear bit -> set = 0
  42. * set2dis = 0 -> set bit -> set = 1
  43. *
  44. * For disabling clock, enable = 0
  45. * set2dis = 1 -> set bit -> set = 1
  46. * set2dis = 0 -> clear bit -> set = 0
  47. *
  48. * So, result is always: enable xor set2dis.
  49. */
  50. static void clk_gate_endisable(struct clk_hw *hw, int enable)
  51. {
  52. struct clk_gate *gate = to_clk_gate(hw);
  53. int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0;
  54. unsigned long flags;
  55. u32 reg;
  56. set ^= enable;
  57. if (gate->lock)
  58. spin_lock_irqsave(gate->lock, flags);
  59. else
  60. __acquire(gate->lock);
  61. if (gate->flags & CLK_GATE_HIWORD_MASK) {
  62. reg = BIT(gate->bit_idx + 16);
  63. if (set)
  64. reg |= BIT(gate->bit_idx);
  65. } else {
  66. reg = clk_gate_readl(gate);
  67. if (set)
  68. reg |= BIT(gate->bit_idx);
  69. else
  70. reg &= ~BIT(gate->bit_idx);
  71. }
  72. clk_gate_writel(gate, reg);
  73. if (gate->lock)
  74. spin_unlock_irqrestore(gate->lock, flags);
  75. else
  76. __release(gate->lock);
  77. }
  78. static int clk_gate_enable(struct clk_hw *hw)
  79. {
  80. clk_gate_endisable(hw, 1);
  81. return 0;
  82. }
  83. static void clk_gate_disable(struct clk_hw *hw)
  84. {
  85. clk_gate_endisable(hw, 0);
  86. }
  87. int clk_gate_is_enabled(struct clk_hw *hw)
  88. {
  89. u32 reg;
  90. struct clk_gate *gate = to_clk_gate(hw);
  91. reg = clk_gate_readl(gate);
  92. /* if a set bit disables this clk, flip it before masking */
  93. if (gate->flags & CLK_GATE_SET_TO_DISABLE)
  94. reg ^= BIT(gate->bit_idx);
  95. reg &= BIT(gate->bit_idx);
  96. return reg ? 1 : 0;
  97. }
  98. EXPORT_SYMBOL_GPL(clk_gate_is_enabled);
  99. const struct clk_ops clk_gate_ops = {
  100. .enable = clk_gate_enable,
  101. .disable = clk_gate_disable,
  102. .is_enabled = clk_gate_is_enabled,
  103. };
  104. EXPORT_SYMBOL_GPL(clk_gate_ops);
  105. struct clk_hw *__clk_hw_register_gate(struct device *dev,
  106. struct device_node *np, const char *name,
  107. const char *parent_name, const struct clk_hw *parent_hw,
  108. const struct clk_parent_data *parent_data,
  109. unsigned long flags,
  110. void __iomem *reg, u8 bit_idx,
  111. u8 clk_gate_flags, spinlock_t *lock)
  112. {
  113. struct clk_gate *gate;
  114. struct clk_hw *hw;
  115. struct clk_init_data init = {};
  116. int ret = -EINVAL;
  117. if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
  118. if (bit_idx > 15) {
  119. pr_err("gate bit exceeds LOWORD field\n");
  120. return ERR_PTR(-EINVAL);
  121. }
  122. }
  123. /* allocate the gate */
  124. gate = kzalloc(sizeof(*gate), GFP_KERNEL);
  125. if (!gate)
  126. return ERR_PTR(-ENOMEM);
  127. init.name = name;
  128. init.ops = &clk_gate_ops;
  129. init.flags = flags;
  130. init.parent_names = parent_name ? &parent_name : NULL;
  131. init.parent_hws = parent_hw ? &parent_hw : NULL;
  132. init.parent_data = parent_data;
  133. if (parent_name || parent_hw || parent_data)
  134. init.num_parents = 1;
  135. else
  136. init.num_parents = 0;
  137. /* struct clk_gate assignments */
  138. gate->reg = reg;
  139. gate->bit_idx = bit_idx;
  140. gate->flags = clk_gate_flags;
  141. gate->lock = lock;
  142. gate->hw.init = &init;
  143. hw = &gate->hw;
  144. if (dev || !np)
  145. ret = clk_hw_register(dev, hw);
  146. else if (np)
  147. ret = of_clk_hw_register(np, hw);
  148. if (ret) {
  149. kfree(gate);
  150. hw = ERR_PTR(ret);
  151. }
  152. return hw;
  153. }
  154. EXPORT_SYMBOL_GPL(__clk_hw_register_gate);
  155. struct clk *clk_register_gate(struct device *dev, const char *name,
  156. const char *parent_name, unsigned long flags,
  157. void __iomem *reg, u8 bit_idx,
  158. u8 clk_gate_flags, spinlock_t *lock)
  159. {
  160. struct clk_hw *hw;
  161. hw = clk_hw_register_gate(dev, name, parent_name, flags, reg,
  162. bit_idx, clk_gate_flags, lock);
  163. if (IS_ERR(hw))
  164. return ERR_CAST(hw);
  165. return hw->clk;
  166. }
  167. EXPORT_SYMBOL_GPL(clk_register_gate);
  168. void clk_unregister_gate(struct clk *clk)
  169. {
  170. struct clk_gate *gate;
  171. struct clk_hw *hw;
  172. hw = __clk_get_hw(clk);
  173. if (!hw)
  174. return;
  175. gate = to_clk_gate(hw);
  176. clk_unregister(clk);
  177. kfree(gate);
  178. }
  179. EXPORT_SYMBOL_GPL(clk_unregister_gate);
  180. void clk_hw_unregister_gate(struct clk_hw *hw)
  181. {
  182. struct clk_gate *gate;
  183. gate = to_clk_gate(hw);
  184. clk_hw_unregister(hw);
  185. kfree(gate);
  186. }
  187. EXPORT_SYMBOL_GPL(clk_hw_unregister_gate);
  188. static void devm_clk_hw_release_gate(struct device *dev, void *res)
  189. {
  190. clk_hw_unregister_gate(*(struct clk_hw **)res);
  191. }
  192. struct clk_hw *__devm_clk_hw_register_gate(struct device *dev,
  193. struct device_node *np, const char *name,
  194. const char *parent_name, const struct clk_hw *parent_hw,
  195. const struct clk_parent_data *parent_data,
  196. unsigned long flags,
  197. void __iomem *reg, u8 bit_idx,
  198. u8 clk_gate_flags, spinlock_t *lock)
  199. {
  200. struct clk_hw **ptr, *hw;
  201. ptr = devres_alloc(devm_clk_hw_release_gate, sizeof(*ptr), GFP_KERNEL);
  202. if (!ptr)
  203. return ERR_PTR(-ENOMEM);
  204. hw = __clk_hw_register_gate(dev, np, name, parent_name, parent_hw,
  205. parent_data, flags, reg, bit_idx,
  206. clk_gate_flags, lock);
  207. if (!IS_ERR(hw)) {
  208. *ptr = hw;
  209. devres_add(dev, ptr);
  210. } else {
  211. devres_free(ptr);
  212. }
  213. return hw;
  214. }
  215. EXPORT_SYMBOL_GPL(__devm_clk_hw_register_gate);