sde_hw_uidle.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  5. *
  6. */
  7. #include "sde_hwio.h"
  8. #include "sde_hw_catalog.h"
  9. #include "sde_hw_top.h"
  10. #include "sde_dbg.h"
  11. #include "sde_kms.h"
  12. #define UIDLE_CTL 0x0
  13. #define UIDLE_STATUS 0x4
  14. #define UIDLE_FAL10_VETO_OVERRIDE 0x8
  15. #define UIDLE_QACTIVE_HF_OVERRIDE 0xc
  16. #define UIDLE_WD_TIMER_CTL 0x10
  17. #define UIDLE_WD_TIMER_CTL2 0x14
  18. #define UIDLE_WD_TIMER_LOAD_VALUE 0x18
  19. #define UIDLE_DANGER_STATUS_0 0x20
  20. #define UIDLE_DANGER_STATUS_1 0x24
  21. #define UIDLE_SAFE_STATUS_0 0x30
  22. #define UIDLE_SAFE_STATUS_1 0x34
  23. #define UIDLE_IDLE_STATUS_0 0x38
  24. #define UIDLE_IDLE_STATUS_1 0x3c
  25. #define UIDLE_FAL_STATUS_0 0x40
  26. #define UIDLE_FAL_STATUS_1 0x44
  27. #define UIDLE_GATE_CNTR_CTL 0x50
  28. #define UIDLE_FAL1_GATE_CNTR 0x54
  29. #define UIDLE_FAL10_GATE_CNTR 0x58
  30. #define UIDLE_FAL_WAIT_GATE_CNTR 0x5c
  31. #define UIDLE_FAL1_NUM_TRANSITIONS_CNTR 0x60
  32. #define UIDLE_FAL10_NUM_TRANSITIONS_CNTR 0x64
  33. #define UIDLE_MIN_GATE_CNTR 0x68
  34. #define UIDLE_MAX_GATE_CNTR 0x6c
  35. static const struct sde_uidle_cfg *_top_offset(enum sde_uidle uidle,
  36. struct sde_mdss_cfg *m, void __iomem *addr,
  37. unsigned long len, struct sde_hw_blk_reg_map *b)
  38. {
  39. /* Make sure length of regs offsets is within the mapped memory */
  40. if ((uidle == m->uidle_cfg.id) &&
  41. (m->uidle_cfg.base + m->uidle_cfg.len) < len) {
  42. b->base_off = addr;
  43. b->blk_off = m->uidle_cfg.base;
  44. b->length = m->uidle_cfg.len;
  45. b->hw_rev = m->hw_rev;
  46. b->log_mask = SDE_DBG_MASK_UIDLE;
  47. SDE_DEBUG("base:0x%p blk_off:0x%x length:%d hw_rev:0x%x\n",
  48. b->base_off, b->blk_off, b->length, b->hw_rev);
  49. return &m->uidle_cfg;
  50. }
  51. SDE_ERROR("wrong uidle mapping params, will disable UIDLE!\n");
  52. SDE_ERROR("base_off:0x%pK id:%d base:0x%x len:%d mmio_len:%ld\n",
  53. addr, m->uidle_cfg.id, m->uidle_cfg.base,
  54. m->uidle_cfg.len, len);
  55. m->uidle_cfg.uidle_rev = 0;
  56. return ERR_PTR(-EINVAL);
  57. }
  58. void sde_hw_uidle_get_status(struct sde_hw_uidle *uidle,
  59. struct sde_uidle_status *status)
  60. {
  61. struct sde_hw_blk_reg_map *c = &uidle->hw;
  62. status->uidle_danger_status_0 =
  63. SDE_REG_READ(c, UIDLE_DANGER_STATUS_0);
  64. status->uidle_danger_status_1 =
  65. SDE_REG_READ(c, UIDLE_DANGER_STATUS_1);
  66. status->uidle_safe_status_0 =
  67. SDE_REG_READ(c, UIDLE_SAFE_STATUS_0);
  68. status->uidle_safe_status_1 =
  69. SDE_REG_READ(c, UIDLE_SAFE_STATUS_1);
  70. status->uidle_idle_status_0 =
  71. SDE_REG_READ(c, UIDLE_IDLE_STATUS_0);
  72. status->uidle_idle_status_1 =
  73. SDE_REG_READ(c, UIDLE_IDLE_STATUS_1);
  74. status->uidle_fal_status_0 =
  75. SDE_REG_READ(c, UIDLE_FAL_STATUS_0);
  76. status->uidle_fal_status_1 =
  77. SDE_REG_READ(c, UIDLE_FAL_STATUS_1);
  78. status->uidle_status =
  79. SDE_REG_READ(c, UIDLE_STATUS);
  80. status->uidle_en_fal10 =
  81. (status->uidle_status & BIT(2)) ? 1 : 0;
  82. }
  83. void sde_hw_uidle_get_cntr(struct sde_hw_uidle *uidle,
  84. struct sde_uidle_cntr *cntr)
  85. {
  86. struct sde_hw_blk_reg_map *c = &uidle->hw;
  87. u32 reg_val;
  88. cntr->fal1_gate_cntr =
  89. SDE_REG_READ(c, UIDLE_FAL1_GATE_CNTR);
  90. cntr->fal10_gate_cntr =
  91. SDE_REG_READ(c, UIDLE_FAL10_GATE_CNTR);
  92. cntr->fal_wait_gate_cntr =
  93. SDE_REG_READ(c, UIDLE_FAL_WAIT_GATE_CNTR);
  94. cntr->fal1_num_transitions_cntr =
  95. SDE_REG_READ(c, UIDLE_FAL1_NUM_TRANSITIONS_CNTR);
  96. cntr->fal10_num_transitions_cntr =
  97. SDE_REG_READ(c, UIDLE_FAL10_NUM_TRANSITIONS_CNTR);
  98. cntr->min_gate_cntr =
  99. SDE_REG_READ(c, UIDLE_MIN_GATE_CNTR);
  100. cntr->max_gate_cntr =
  101. SDE_REG_READ(c, UIDLE_MAX_GATE_CNTR);
  102. /* clear counters after read */
  103. reg_val = SDE_REG_READ(c, UIDLE_GATE_CNTR_CTL);
  104. reg_val = reg_val | BIT(31);
  105. SDE_REG_WRITE(c, UIDLE_GATE_CNTR_CTL, reg_val);
  106. reg_val = (reg_val & ~BIT(31));
  107. SDE_REG_WRITE(c, UIDLE_GATE_CNTR_CTL, reg_val);
  108. }
  109. void sde_hw_uidle_setup_cntr(struct sde_hw_uidle *uidle, bool enable)
  110. {
  111. struct sde_hw_blk_reg_map *c = &uidle->hw;
  112. u32 reg_val;
  113. reg_val = SDE_REG_READ(c, UIDLE_GATE_CNTR_CTL);
  114. reg_val = (reg_val & ~BIT(8)) | (enable ? BIT(8) : 0);
  115. SDE_REG_WRITE(c, UIDLE_GATE_CNTR_CTL, reg_val);
  116. }
  117. void sde_hw_uidle_setup_wd_timer(struct sde_hw_uidle *uidle,
  118. struct sde_uidle_wd_cfg *cfg)
  119. {
  120. struct sde_hw_blk_reg_map *c = &uidle->hw;
  121. u32 val_ctl, val_ctl2, val_ld;
  122. val_ctl = SDE_REG_READ(c, UIDLE_WD_TIMER_CTL);
  123. val_ctl2 = SDE_REG_READ(c, UIDLE_WD_TIMER_CTL2);
  124. val_ld = SDE_REG_READ(c, UIDLE_WD_TIMER_LOAD_VALUE);
  125. val_ctl = (val_ctl & ~BIT(0)) | (cfg->clear ? BIT(0) : 0);
  126. val_ctl2 = (val_ctl2 & ~BIT(0)) | (cfg->enable ? BIT(0) : 0);
  127. val_ctl2 = (val_ctl2 & ~GENMASK(4, 1)) |
  128. ((cfg->granularity & 0xF) << 1);
  129. val_ctl2 = (val_ctl2 & ~BIT(8)) | (cfg->heart_beat ? BIT(8) : 0);
  130. val_ld = cfg->load_value;
  131. SDE_REG_WRITE(c, UIDLE_WD_TIMER_CTL, val_ctl);
  132. SDE_REG_WRITE(c, UIDLE_WD_TIMER_CTL2, val_ctl2);
  133. SDE_REG_WRITE(c, UIDLE_WD_TIMER_LOAD_VALUE, val_ld);
  134. }
  135. void sde_hw_uidle_setup_ctl(struct sde_hw_uidle *uidle,
  136. struct sde_uidle_ctl_cfg *cfg)
  137. {
  138. struct sde_hw_blk_reg_map *c = &uidle->hw;
  139. bool enable = false;
  140. u32 reg_val, fal10_veto_regval = 0;
  141. reg_val = SDE_REG_READ(c, UIDLE_CTL);
  142. enable = (cfg->uidle_state > UIDLE_STATE_DISABLE &&
  143. cfg->uidle_state < UIDLE_STATE_ENABLE_MAX);
  144. reg_val = (reg_val & ~BIT(31)) | (enable ? BIT(31) : 0);
  145. reg_val = (reg_val & ~BIT(30)) | (cfg->uidle_state
  146. == UIDLE_STATE_FAL1_ONLY ? BIT(30) : 0);
  147. reg_val = (reg_val & ~FAL10_DANGER_MSK) |
  148. ((cfg->fal10_danger << FAL10_DANGER_SHFT) &
  149. FAL10_DANGER_MSK);
  150. reg_val = (reg_val & ~FAL10_EXIT_DANGER_MSK) |
  151. ((cfg->fal10_exit_danger << FAL10_EXIT_DANGER_SHFT) &
  152. FAL10_EXIT_DANGER_MSK);
  153. reg_val = (reg_val & ~FAL10_EXIT_CNT_MSK) |
  154. ((cfg->fal10_exit_cnt << FAL10_EXIT_CNT_SHFT) &
  155. FAL10_EXIT_CNT_MSK);
  156. SDE_REG_WRITE(c, UIDLE_CTL, reg_val);
  157. if (!enable)
  158. fal10_veto_regval |= (BIT(31) | BIT(0));
  159. SDE_REG_WRITE(c, UIDLE_FAL10_VETO_OVERRIDE, fal10_veto_regval);
  160. }
  161. static void sde_hw_uilde_active_override(struct sde_hw_uidle *uidle,
  162. bool enable)
  163. {
  164. struct sde_hw_blk_reg_map *c = &uidle->hw;
  165. u32 reg_val = 0;
  166. if (enable)
  167. reg_val = BIT(0) | BIT(31);
  168. SDE_REG_WRITE(c, UIDLE_QACTIVE_HF_OVERRIDE, reg_val);
  169. }
  170. static void sde_hw_uidle_fal10_override(struct sde_hw_uidle *uidle,
  171. bool enable)
  172. {
  173. struct sde_hw_blk_reg_map *c = &uidle->hw;
  174. u32 reg_val = 0;
  175. if (enable)
  176. reg_val = BIT(0) | BIT(31);
  177. SDE_REG_WRITE(c, UIDLE_FAL10_VETO_OVERRIDE, reg_val);
  178. wmb();
  179. }
  180. static inline void _setup_uidle_ops(struct sde_hw_uidle_ops *ops,
  181. unsigned long cap)
  182. {
  183. ops->set_uidle_ctl = sde_hw_uidle_setup_ctl;
  184. ops->setup_wd_timer = sde_hw_uidle_setup_wd_timer;
  185. ops->uidle_setup_cntr = sde_hw_uidle_setup_cntr;
  186. ops->uidle_get_cntr = sde_hw_uidle_get_cntr;
  187. ops->uidle_get_status = sde_hw_uidle_get_status;
  188. if (cap & BIT(SDE_UIDLE_QACTIVE_OVERRIDE))
  189. ops->active_override_enable = sde_hw_uilde_active_override;
  190. ops->uidle_fal10_override = sde_hw_uidle_fal10_override;
  191. }
  192. struct sde_hw_uidle *sde_hw_uidle_init(enum sde_uidle idx,
  193. void __iomem *addr, unsigned long len,
  194. struct sde_mdss_cfg *m)
  195. {
  196. struct sde_hw_uidle *c;
  197. const struct sde_uidle_cfg *cfg;
  198. c = kzalloc(sizeof(*c), GFP_KERNEL);
  199. if (!c)
  200. return ERR_PTR(-ENOMEM);
  201. cfg = _top_offset(idx, m, addr, len, &c->hw);
  202. if (IS_ERR_OR_NULL(cfg)) {
  203. kfree(c);
  204. return ERR_PTR(-EINVAL);
  205. }
  206. /*
  207. * Assign ops
  208. */
  209. c->idx = idx;
  210. c->cap = cfg;
  211. _setup_uidle_ops(&c->ops, c->cap->features);
  212. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, "uidle", c->hw.blk_off,
  213. c->hw.blk_off + c->hw.length, 0);
  214. return c;
  215. }