sde_hw_uidle.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  5. *
  6. */
  7. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  8. #include "sde_hwio.h"
  9. #include "sde_hw_catalog.h"
  10. #include "sde_hw_top.h"
  11. #include "sde_dbg.h"
  12. #include "sde_kms.h"
  13. #define UIDLE_CTL 0x0
  14. #define UIDLE_STATUS 0x4
  15. #define UIDLE_FAL10_VETO_OVERRIDE 0x8
  16. #define UIDLE_QACTIVE_HF_OVERRIDE 0xc
  17. #define UIDLE_WD_TIMER_CTL 0x10
  18. #define UIDLE_WD_TIMER_CTL2 0x14
  19. #define UIDLE_WD_TIMER_LOAD_VALUE 0x18
  20. #define UIDLE_DANGER_STATUS_0 0x20
  21. #define UIDLE_DANGER_STATUS_1 0x24
  22. #define UIDLE_SAFE_STATUS_0 0x30
  23. #define UIDLE_SAFE_STATUS_1 0x34
  24. #define UIDLE_IDLE_STATUS_0 0x38
  25. #define UIDLE_IDLE_STATUS_1 0x3c
  26. #define UIDLE_FAL_STATUS_0 0x40
  27. #define UIDLE_FAL_STATUS_1 0x44
  28. #define UIDLE_GATE_CNTR_CTL 0x50
  29. #define UIDLE_FAL1_GATE_CNTR 0x54
  30. #define UIDLE_FAL10_GATE_CNTR 0x58
  31. #define UIDLE_FAL_WAIT_GATE_CNTR 0x5c
  32. #define UIDLE_FAL1_NUM_TRANSITIONS_CNTR 0x60
  33. #define UIDLE_FAL10_NUM_TRANSITIONS_CNTR 0x64
  34. #define UIDLE_MIN_GATE_CNTR 0x68
  35. #define UIDLE_MAX_GATE_CNTR 0x6c
  36. static const struct sde_uidle_cfg *_top_offset(enum sde_uidle uidle,
  37. struct sde_mdss_cfg *m, void __iomem *addr,
  38. unsigned long len, struct sde_hw_blk_reg_map *b)
  39. {
  40. /* Make sure length of regs offsets is within the mapped memory */
  41. if ((uidle == m->uidle_cfg.id) &&
  42. (m->uidle_cfg.base + m->uidle_cfg.len) < len) {
  43. b->base_off = addr;
  44. b->blk_off = m->uidle_cfg.base;
  45. b->length = m->uidle_cfg.len;
  46. b->hw_rev = m->hw_rev;
  47. b->log_mask = SDE_DBG_MASK_UIDLE;
  48. SDE_DEBUG("base:0x%p blk_off:0x%x length:%d hw_rev:0x%x\n",
  49. b->base_off, b->blk_off, b->length, b->hw_rev);
  50. return &m->uidle_cfg;
  51. }
  52. SDE_ERROR("wrong uidle mapping params, will disable UIDLE!\n");
  53. SDE_ERROR("base_off:0x%pK id:%d base:0x%x len:%d mmio_len:%ld\n",
  54. addr, m->uidle_cfg.id, m->uidle_cfg.base,
  55. m->uidle_cfg.len, len);
  56. m->uidle_cfg.uidle_rev = 0;
  57. return ERR_PTR(-EINVAL);
  58. }
  59. void sde_hw_uidle_get_status(struct sde_hw_uidle *uidle,
  60. struct sde_uidle_status *status)
  61. {
  62. struct sde_hw_blk_reg_map *c = &uidle->hw;
  63. status->uidle_danger_status_0 =
  64. SDE_REG_READ(c, UIDLE_DANGER_STATUS_0);
  65. status->uidle_danger_status_1 =
  66. SDE_REG_READ(c, UIDLE_DANGER_STATUS_1);
  67. status->uidle_safe_status_0 =
  68. SDE_REG_READ(c, UIDLE_SAFE_STATUS_0);
  69. status->uidle_safe_status_1 =
  70. SDE_REG_READ(c, UIDLE_SAFE_STATUS_1);
  71. status->uidle_idle_status_0 =
  72. SDE_REG_READ(c, UIDLE_IDLE_STATUS_0);
  73. status->uidle_idle_status_1 =
  74. SDE_REG_READ(c, UIDLE_IDLE_STATUS_1);
  75. status->uidle_fal_status_0 =
  76. SDE_REG_READ(c, UIDLE_FAL_STATUS_0);
  77. status->uidle_fal_status_1 =
  78. SDE_REG_READ(c, UIDLE_FAL_STATUS_1);
  79. status->uidle_status =
  80. SDE_REG_READ(c, UIDLE_STATUS);
  81. status->uidle_en_fal10 =
  82. (status->uidle_status & BIT(2)) ? 1 : 0;
  83. }
  84. void sde_hw_uidle_get_cntr(struct sde_hw_uidle *uidle,
  85. struct sde_uidle_cntr *cntr)
  86. {
  87. struct sde_hw_blk_reg_map *c = &uidle->hw;
  88. u32 reg_val;
  89. cntr->fal1_gate_cntr =
  90. SDE_REG_READ(c, UIDLE_FAL1_GATE_CNTR);
  91. cntr->fal10_gate_cntr =
  92. SDE_REG_READ(c, UIDLE_FAL10_GATE_CNTR);
  93. cntr->fal_wait_gate_cntr =
  94. SDE_REG_READ(c, UIDLE_FAL_WAIT_GATE_CNTR);
  95. cntr->fal1_num_transitions_cntr =
  96. SDE_REG_READ(c, UIDLE_FAL1_NUM_TRANSITIONS_CNTR);
  97. cntr->fal10_num_transitions_cntr =
  98. SDE_REG_READ(c, UIDLE_FAL10_NUM_TRANSITIONS_CNTR);
  99. cntr->min_gate_cntr =
  100. SDE_REG_READ(c, UIDLE_MIN_GATE_CNTR);
  101. cntr->max_gate_cntr =
  102. SDE_REG_READ(c, UIDLE_MAX_GATE_CNTR);
  103. /* clear counters after read */
  104. reg_val = SDE_REG_READ(c, UIDLE_GATE_CNTR_CTL);
  105. reg_val = reg_val | BIT(31);
  106. SDE_REG_WRITE(c, UIDLE_GATE_CNTR_CTL, reg_val);
  107. reg_val = (reg_val & ~BIT(31));
  108. SDE_REG_WRITE(c, UIDLE_GATE_CNTR_CTL, reg_val);
  109. }
  110. void sde_hw_uidle_setup_cntr(struct sde_hw_uidle *uidle, bool enable)
  111. {
  112. struct sde_hw_blk_reg_map *c = &uidle->hw;
  113. u32 reg_val;
  114. reg_val = SDE_REG_READ(c, UIDLE_GATE_CNTR_CTL);
  115. reg_val = (reg_val & ~BIT(8)) | (enable ? BIT(8) : 0);
  116. SDE_REG_WRITE(c, UIDLE_GATE_CNTR_CTL, reg_val);
  117. }
  118. void sde_hw_uidle_setup_wd_timer(struct sde_hw_uidle *uidle,
  119. struct sde_uidle_wd_cfg *cfg)
  120. {
  121. struct sde_hw_blk_reg_map *c = &uidle->hw;
  122. u32 val_ctl, val_ctl2, val_ld;
  123. val_ctl = SDE_REG_READ(c, UIDLE_WD_TIMER_CTL);
  124. val_ctl2 = SDE_REG_READ(c, UIDLE_WD_TIMER_CTL2);
  125. val_ld = SDE_REG_READ(c, UIDLE_WD_TIMER_LOAD_VALUE);
  126. val_ctl = (val_ctl & ~BIT(0)) | (cfg->clear ? BIT(0) : 0);
  127. val_ctl2 = (val_ctl2 & ~BIT(0)) | (cfg->enable ? BIT(0) : 0);
  128. val_ctl2 = (val_ctl2 & ~GENMASK(4, 1)) |
  129. ((cfg->granularity & 0xF) << 1);
  130. val_ctl2 = (val_ctl2 & ~BIT(8)) | (cfg->heart_beat ? BIT(8) : 0);
  131. val_ld = cfg->load_value;
  132. SDE_REG_WRITE(c, UIDLE_WD_TIMER_CTL, val_ctl);
  133. SDE_REG_WRITE(c, UIDLE_WD_TIMER_CTL2, val_ctl2);
  134. SDE_REG_WRITE(c, UIDLE_WD_TIMER_LOAD_VALUE, val_ld);
  135. }
  136. void sde_hw_uidle_setup_ctl(struct sde_hw_uidle *uidle,
  137. struct sde_uidle_ctl_cfg *cfg)
  138. {
  139. struct sde_hw_blk_reg_map *c = &uidle->hw;
  140. bool enable = false;
  141. u32 reg_val, fal10_veto_regval = 0;
  142. reg_val = SDE_REG_READ(c, UIDLE_CTL);
  143. enable = (cfg->uidle_state > UIDLE_STATE_DISABLE &&
  144. cfg->uidle_state < UIDLE_STATE_ENABLE_MAX);
  145. reg_val = (reg_val & ~BIT(31)) | (enable ? BIT(31) : 0);
  146. reg_val = (reg_val & ~BIT(30)) | (cfg->uidle_state
  147. == UIDLE_STATE_FAL1_ONLY ? BIT(30) : 0);
  148. reg_val = (reg_val & ~FAL10_DANGER_MSK) |
  149. ((cfg->fal10_danger << FAL10_DANGER_SHFT) &
  150. FAL10_DANGER_MSK);
  151. reg_val = (reg_val & ~FAL10_EXIT_DANGER_MSK) |
  152. ((cfg->fal10_exit_danger << FAL10_EXIT_DANGER_SHFT) &
  153. FAL10_EXIT_DANGER_MSK);
  154. reg_val = (reg_val & ~FAL10_EXIT_CNT_MSK) |
  155. ((cfg->fal10_exit_cnt << FAL10_EXIT_CNT_SHFT) &
  156. FAL10_EXIT_CNT_MSK);
  157. SDE_REG_WRITE(c, UIDLE_CTL, reg_val);
  158. if (!enable)
  159. fal10_veto_regval |= (BIT(31) | BIT(0));
  160. SDE_REG_WRITE(c, UIDLE_FAL10_VETO_OVERRIDE, fal10_veto_regval);
  161. }
  162. static void sde_hw_uilde_active_override(struct sde_hw_uidle *uidle,
  163. bool enable)
  164. {
  165. struct sde_hw_blk_reg_map *c = &uidle->hw;
  166. u32 reg_val = 0;
  167. if (enable)
  168. reg_val = BIT(0) | BIT(31);
  169. SDE_REG_WRITE(c, UIDLE_QACTIVE_HF_OVERRIDE, reg_val);
  170. }
  171. static void sde_hw_uidle_fal10_override(struct sde_hw_uidle *uidle,
  172. bool enable)
  173. {
  174. struct sde_hw_blk_reg_map *c = &uidle->hw;
  175. u32 reg_val = 0;
  176. if (enable)
  177. reg_val = BIT(0) | BIT(31);
  178. SDE_REG_WRITE(c, UIDLE_FAL10_VETO_OVERRIDE, reg_val);
  179. wmb();
  180. }
  181. static inline void _setup_uidle_ops(struct sde_hw_uidle_ops *ops,
  182. unsigned long cap)
  183. {
  184. ops->set_uidle_ctl = sde_hw_uidle_setup_ctl;
  185. ops->setup_wd_timer = sde_hw_uidle_setup_wd_timer;
  186. ops->uidle_setup_cntr = sde_hw_uidle_setup_cntr;
  187. ops->uidle_get_cntr = sde_hw_uidle_get_cntr;
  188. ops->uidle_get_status = sde_hw_uidle_get_status;
  189. if (cap & BIT(SDE_UIDLE_QACTIVE_OVERRIDE))
  190. ops->active_override_enable = sde_hw_uilde_active_override;
  191. ops->uidle_fal10_override = sde_hw_uidle_fal10_override;
  192. }
  193. struct sde_hw_uidle *sde_hw_uidle_init(enum sde_uidle idx,
  194. void __iomem *addr, unsigned long len,
  195. struct sde_mdss_cfg *m)
  196. {
  197. struct sde_hw_uidle *c;
  198. const struct sde_uidle_cfg *cfg;
  199. c = kzalloc(sizeof(*c), GFP_KERNEL);
  200. if (!c)
  201. return ERR_PTR(-ENOMEM);
  202. cfg = _top_offset(idx, m, addr, len, &c->hw);
  203. if (IS_ERR_OR_NULL(cfg)) {
  204. kfree(c);
  205. return ERR_PTR(-EINVAL);
  206. }
  207. /*
  208. * Assign ops
  209. */
  210. c->idx = idx;
  211. c->cap = cfg;
  212. _setup_uidle_ops(&c->ops, c->cap->features);
  213. sde_dbg_reg_register_dump_range(SDE_DBG_NAME, "uidle", c->hw.blk_off,
  214. c->hw.blk_off + c->hw.length, 0);
  215. return c;
  216. }