ccu_mult.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2016 Maxime Ripard
  4. * Maxime Ripard <[email protected]>
  5. */
  6. #include <linux/clk-provider.h>
  7. #include <linux/io.h>
  8. #include "ccu_gate.h"
  9. #include "ccu_mult.h"
  10. struct _ccu_mult {
  11. unsigned long mult, min, max;
  12. };
  13. static void ccu_mult_find_best(unsigned long parent, unsigned long rate,
  14. struct _ccu_mult *mult)
  15. {
  16. int _mult;
  17. _mult = rate / parent;
  18. if (_mult < mult->min)
  19. _mult = mult->min;
  20. if (_mult > mult->max)
  21. _mult = mult->max;
  22. mult->mult = _mult;
  23. }
  24. static unsigned long ccu_mult_round_rate(struct ccu_mux_internal *mux,
  25. struct clk_hw *parent,
  26. unsigned long *parent_rate,
  27. unsigned long rate,
  28. void *data)
  29. {
  30. struct ccu_mult *cm = data;
  31. struct _ccu_mult _cm;
  32. _cm.min = cm->mult.min;
  33. if (cm->mult.max)
  34. _cm.max = cm->mult.max;
  35. else
  36. _cm.max = (1 << cm->mult.width) + cm->mult.offset - 1;
  37. ccu_mult_find_best(*parent_rate, rate, &_cm);
  38. return *parent_rate * _cm.mult;
  39. }
  40. static void ccu_mult_disable(struct clk_hw *hw)
  41. {
  42. struct ccu_mult *cm = hw_to_ccu_mult(hw);
  43. return ccu_gate_helper_disable(&cm->common, cm->enable);
  44. }
  45. static int ccu_mult_enable(struct clk_hw *hw)
  46. {
  47. struct ccu_mult *cm = hw_to_ccu_mult(hw);
  48. return ccu_gate_helper_enable(&cm->common, cm->enable);
  49. }
  50. static int ccu_mult_is_enabled(struct clk_hw *hw)
  51. {
  52. struct ccu_mult *cm = hw_to_ccu_mult(hw);
  53. return ccu_gate_helper_is_enabled(&cm->common, cm->enable);
  54. }
  55. static unsigned long ccu_mult_recalc_rate(struct clk_hw *hw,
  56. unsigned long parent_rate)
  57. {
  58. struct ccu_mult *cm = hw_to_ccu_mult(hw);
  59. unsigned long val;
  60. u32 reg;
  61. if (ccu_frac_helper_is_enabled(&cm->common, &cm->frac))
  62. return ccu_frac_helper_read_rate(&cm->common, &cm->frac);
  63. reg = readl(cm->common.base + cm->common.reg);
  64. val = reg >> cm->mult.shift;
  65. val &= (1 << cm->mult.width) - 1;
  66. parent_rate = ccu_mux_helper_apply_prediv(&cm->common, &cm->mux, -1,
  67. parent_rate);
  68. return parent_rate * (val + cm->mult.offset);
  69. }
  70. static int ccu_mult_determine_rate(struct clk_hw *hw,
  71. struct clk_rate_request *req)
  72. {
  73. struct ccu_mult *cm = hw_to_ccu_mult(hw);
  74. return ccu_mux_helper_determine_rate(&cm->common, &cm->mux,
  75. req, ccu_mult_round_rate, cm);
  76. }
  77. static int ccu_mult_set_rate(struct clk_hw *hw, unsigned long rate,
  78. unsigned long parent_rate)
  79. {
  80. struct ccu_mult *cm = hw_to_ccu_mult(hw);
  81. struct _ccu_mult _cm;
  82. unsigned long flags;
  83. u32 reg;
  84. if (ccu_frac_helper_has_rate(&cm->common, &cm->frac, rate)) {
  85. ccu_frac_helper_enable(&cm->common, &cm->frac);
  86. return ccu_frac_helper_set_rate(&cm->common, &cm->frac,
  87. rate, cm->lock);
  88. } else {
  89. ccu_frac_helper_disable(&cm->common, &cm->frac);
  90. }
  91. parent_rate = ccu_mux_helper_apply_prediv(&cm->common, &cm->mux, -1,
  92. parent_rate);
  93. _cm.min = cm->mult.min;
  94. if (cm->mult.max)
  95. _cm.max = cm->mult.max;
  96. else
  97. _cm.max = (1 << cm->mult.width) + cm->mult.offset - 1;
  98. ccu_mult_find_best(parent_rate, rate, &_cm);
  99. spin_lock_irqsave(cm->common.lock, flags);
  100. reg = readl(cm->common.base + cm->common.reg);
  101. reg &= ~GENMASK(cm->mult.width + cm->mult.shift - 1, cm->mult.shift);
  102. reg |= ((_cm.mult - cm->mult.offset) << cm->mult.shift);
  103. writel(reg, cm->common.base + cm->common.reg);
  104. spin_unlock_irqrestore(cm->common.lock, flags);
  105. ccu_helper_wait_for_lock(&cm->common, cm->lock);
  106. return 0;
  107. }
  108. static u8 ccu_mult_get_parent(struct clk_hw *hw)
  109. {
  110. struct ccu_mult *cm = hw_to_ccu_mult(hw);
  111. return ccu_mux_helper_get_parent(&cm->common, &cm->mux);
  112. }
  113. static int ccu_mult_set_parent(struct clk_hw *hw, u8 index)
  114. {
  115. struct ccu_mult *cm = hw_to_ccu_mult(hw);
  116. return ccu_mux_helper_set_parent(&cm->common, &cm->mux, index);
  117. }
  118. const struct clk_ops ccu_mult_ops = {
  119. .disable = ccu_mult_disable,
  120. .enable = ccu_mult_enable,
  121. .is_enabled = ccu_mult_is_enabled,
  122. .get_parent = ccu_mult_get_parent,
  123. .set_parent = ccu_mult_set_parent,
  124. .determine_rate = ccu_mult_determine_rate,
  125. .recalc_rate = ccu_mult_recalc_rate,
  126. .set_rate = ccu_mult_set_rate,
  127. };
  128. EXPORT_SYMBOL_NS_GPL(ccu_mult_ops, SUNXI_CCU);