clk-composite-93.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright 2021 NXP
  4. *
  5. * Peng Fan <[email protected]>
  6. */
  7. #include <linux/clk-provider.h>
  8. #include <linux/errno.h>
  9. #include <linux/export.h>
  10. #include <linux/io.h>
  11. #include <linux/iopoll.h>
  12. #include <linux/slab.h>
  13. #include "clk.h"
  14. #define TIMEOUT_US 500U
  15. #define CCM_DIV_SHIFT 0
  16. #define CCM_DIV_WIDTH 8
  17. #define CCM_MUX_SHIFT 8
  18. #define CCM_MUX_MASK 3
  19. #define CCM_OFF_SHIFT 24
  20. #define CCM_BUSY_SHIFT 28
  21. #define STAT_OFFSET 0x4
  22. #define AUTHEN_OFFSET 0x30
  23. #define TZ_NS_SHIFT 9
  24. #define TZ_NS_MASK BIT(9)
  25. #define WHITE_LIST_SHIFT 16
  26. static int imx93_clk_composite_wait_ready(struct clk_hw *hw, void __iomem *reg)
  27. {
  28. int ret;
  29. u32 val;
  30. ret = readl_poll_timeout_atomic(reg + STAT_OFFSET, val, !(val & BIT(CCM_BUSY_SHIFT)),
  31. 0, TIMEOUT_US);
  32. if (ret)
  33. pr_err("Slice[%s] busy timeout\n", clk_hw_get_name(hw));
  34. return ret;
  35. }
  36. static void imx93_clk_composite_gate_endisable(struct clk_hw *hw, int enable)
  37. {
  38. struct clk_gate *gate = to_clk_gate(hw);
  39. unsigned long flags;
  40. u32 reg;
  41. if (gate->lock)
  42. spin_lock_irqsave(gate->lock, flags);
  43. reg = readl(gate->reg);
  44. if (enable)
  45. reg &= ~BIT(gate->bit_idx);
  46. else
  47. reg |= BIT(gate->bit_idx);
  48. writel(reg, gate->reg);
  49. imx93_clk_composite_wait_ready(hw, gate->reg);
  50. if (gate->lock)
  51. spin_unlock_irqrestore(gate->lock, flags);
  52. }
  53. static int imx93_clk_composite_gate_enable(struct clk_hw *hw)
  54. {
  55. imx93_clk_composite_gate_endisable(hw, 1);
  56. return 0;
  57. }
  58. static void imx93_clk_composite_gate_disable(struct clk_hw *hw)
  59. {
  60. imx93_clk_composite_gate_endisable(hw, 0);
  61. }
  62. static const struct clk_ops imx93_clk_composite_gate_ops = {
  63. .enable = imx93_clk_composite_gate_enable,
  64. .disable = imx93_clk_composite_gate_disable,
  65. .is_enabled = clk_gate_is_enabled,
  66. };
  67. static unsigned long
  68. imx93_clk_composite_divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
  69. {
  70. return clk_divider_ops.recalc_rate(hw, parent_rate);
  71. }
  72. static long
  73. imx93_clk_composite_divider_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate)
  74. {
  75. return clk_divider_ops.round_rate(hw, rate, prate);
  76. }
  77. static int
  78. imx93_clk_composite_divider_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
  79. {
  80. return clk_divider_ops.determine_rate(hw, req);
  81. }
  82. static int imx93_clk_composite_divider_set_rate(struct clk_hw *hw, unsigned long rate,
  83. unsigned long parent_rate)
  84. {
  85. struct clk_divider *divider = to_clk_divider(hw);
  86. int value;
  87. unsigned long flags = 0;
  88. u32 val;
  89. int ret;
  90. value = divider_get_val(rate, parent_rate, divider->table, divider->width, divider->flags);
  91. if (value < 0)
  92. return value;
  93. if (divider->lock)
  94. spin_lock_irqsave(divider->lock, flags);
  95. val = readl(divider->reg);
  96. val &= ~(clk_div_mask(divider->width) << divider->shift);
  97. val |= (u32)value << divider->shift;
  98. writel(val, divider->reg);
  99. ret = imx93_clk_composite_wait_ready(hw, divider->reg);
  100. if (divider->lock)
  101. spin_unlock_irqrestore(divider->lock, flags);
  102. return ret;
  103. }
  104. static const struct clk_ops imx93_clk_composite_divider_ops = {
  105. .recalc_rate = imx93_clk_composite_divider_recalc_rate,
  106. .round_rate = imx93_clk_composite_divider_round_rate,
  107. .determine_rate = imx93_clk_composite_divider_determine_rate,
  108. .set_rate = imx93_clk_composite_divider_set_rate,
  109. };
  110. static u8 imx93_clk_composite_mux_get_parent(struct clk_hw *hw)
  111. {
  112. return clk_mux_ops.get_parent(hw);
  113. }
  114. static int imx93_clk_composite_mux_set_parent(struct clk_hw *hw, u8 index)
  115. {
  116. struct clk_mux *mux = to_clk_mux(hw);
  117. u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
  118. unsigned long flags = 0;
  119. u32 reg;
  120. int ret;
  121. if (mux->lock)
  122. spin_lock_irqsave(mux->lock, flags);
  123. reg = readl(mux->reg);
  124. reg &= ~(mux->mask << mux->shift);
  125. val = val << mux->shift;
  126. reg |= val;
  127. writel(reg, mux->reg);
  128. ret = imx93_clk_composite_wait_ready(hw, mux->reg);
  129. if (mux->lock)
  130. spin_unlock_irqrestore(mux->lock, flags);
  131. return ret;
  132. }
  133. static int
  134. imx93_clk_composite_mux_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
  135. {
  136. return clk_mux_ops.determine_rate(hw, req);
  137. }
  138. static const struct clk_ops imx93_clk_composite_mux_ops = {
  139. .get_parent = imx93_clk_composite_mux_get_parent,
  140. .set_parent = imx93_clk_composite_mux_set_parent,
  141. .determine_rate = imx93_clk_composite_mux_determine_rate,
  142. };
  143. struct clk_hw *imx93_clk_composite_flags(const char *name, const char * const *parent_names,
  144. int num_parents, void __iomem *reg, u32 domain_id,
  145. unsigned long flags)
  146. {
  147. struct clk_hw *hw = ERR_PTR(-ENOMEM), *mux_hw;
  148. struct clk_hw *div_hw, *gate_hw;
  149. struct clk_divider *div = NULL;
  150. struct clk_gate *gate = NULL;
  151. struct clk_mux *mux = NULL;
  152. bool clk_ro = false;
  153. u32 authen;
  154. mux = kzalloc(sizeof(*mux), GFP_KERNEL);
  155. if (!mux)
  156. goto fail;
  157. mux_hw = &mux->hw;
  158. mux->reg = reg;
  159. mux->shift = CCM_MUX_SHIFT;
  160. mux->mask = CCM_MUX_MASK;
  161. mux->lock = &imx_ccm_lock;
  162. div = kzalloc(sizeof(*div), GFP_KERNEL);
  163. if (!div)
  164. goto fail;
  165. div_hw = &div->hw;
  166. div->reg = reg;
  167. div->shift = CCM_DIV_SHIFT;
  168. div->width = CCM_DIV_WIDTH;
  169. div->lock = &imx_ccm_lock;
  170. div->flags = CLK_DIVIDER_ROUND_CLOSEST;
  171. authen = readl(reg + AUTHEN_OFFSET);
  172. if (!(authen & TZ_NS_MASK) || !(authen & BIT(WHITE_LIST_SHIFT + domain_id)))
  173. clk_ro = true;
  174. if (clk_ro) {
  175. hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
  176. mux_hw, &clk_mux_ro_ops, div_hw,
  177. &clk_divider_ro_ops, NULL, NULL, flags);
  178. } else {
  179. gate = kzalloc(sizeof(*gate), GFP_KERNEL);
  180. if (!gate)
  181. goto fail;
  182. gate_hw = &gate->hw;
  183. gate->reg = reg;
  184. gate->bit_idx = CCM_OFF_SHIFT;
  185. gate->lock = &imx_ccm_lock;
  186. gate->flags = CLK_GATE_SET_TO_DISABLE;
  187. hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
  188. mux_hw, &imx93_clk_composite_mux_ops, div_hw,
  189. &imx93_clk_composite_divider_ops, gate_hw,
  190. &imx93_clk_composite_gate_ops,
  191. flags | CLK_SET_RATE_NO_REPARENT);
  192. }
  193. if (IS_ERR(hw))
  194. goto fail;
  195. return hw;
  196. fail:
  197. kfree(gate);
  198. kfree(div);
  199. kfree(mux);
  200. return ERR_CAST(hw);
  201. }
  202. EXPORT_SYMBOL_GPL(imx93_clk_composite_flags);