sde_hw_vbif.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include "sde_hwio.h"
  6. #include "sde_hw_catalog.h"
  7. #include "sde_hw_vbif.h"
  8. #include "sde_dbg.h"
  9. #define VBIF_VERSION 0x0000
  10. #define VBIF_CLK_FORCE_CTRL0 0x0008
  11. #define VBIF_CLK_FORCE_CTRL1 0x000C
  12. #define VBIF_QOS_REMAP_00 0x0020
  13. #define VBIF_QOS_REMAP_01 0x0024
  14. #define VBIF_QOS_REMAP_10 0x0028
  15. #define VBIF_QOS_REMAP_11 0x002C
  16. #define VBIF_WRITE_GATHER_EN 0x00AC
  17. #define VBIF_IN_RD_LIM_CONF0 0x00B0
  18. #define VBIF_IN_RD_LIM_CONF1 0x00B4
  19. #define VBIF_IN_RD_LIM_CONF2 0x00B8
  20. #define VBIF_IN_WR_LIM_CONF0 0x00C0
  21. #define VBIF_IN_WR_LIM_CONF1 0x00C4
  22. #define VBIF_IN_WR_LIM_CONF2 0x00C8
  23. #define VBIF_OUT_RD_LIM_CONF0 0x00D0
  24. #define VBIF_OUT_WR_LIM_CONF0 0x00D4
  25. #define VBIF_OUT_AXI_AMEMTYPE_CONF0 0x0160
  26. #define VBIF_OUT_AXI_AMEMTYPE_CONF1 0x0164
  27. #define VBIF_OUT_AXI_ASHARED 0x0170
  28. #define VBIF_OUT_AXI_AINNERSHARED 0x0174
  29. #define VBIF_XIN_PND_ERR 0x0190
  30. #define VBIF_XIN_SRC_ERR 0x0194
  31. #define VBIF_XIN_CLR_ERR 0x019C
  32. #define VBIF_XIN_HALT_CTRL0 0x0200
  33. #define VBIF_XIN_HALT_CTRL1 0x0204
  34. #define VBIF_XINL_QOS_RP_REMAP_000 0x0550
  35. #define VBIF_XINL_QOS_LVL_REMAP_000 0x0590
  36. static void sde_hw_clear_errors(struct sde_hw_vbif *vbif,
  37. u32 *pnd_errors, u32 *src_errors)
  38. {
  39. struct sde_hw_blk_reg_map *c;
  40. u32 pnd, src;
  41. if (!vbif)
  42. return;
  43. c = &vbif->hw;
  44. pnd = SDE_REG_READ(c, VBIF_XIN_PND_ERR);
  45. src = SDE_REG_READ(c, VBIF_XIN_SRC_ERR);
  46. if (pnd_errors)
  47. *pnd_errors = pnd;
  48. if (src_errors)
  49. *src_errors = src;
  50. SDE_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
  51. }
  52. static void sde_hw_set_mem_type(struct sde_hw_vbif *vbif,
  53. u32 xin_id, u32 value)
  54. {
  55. struct sde_hw_blk_reg_map *c;
  56. u32 reg_off;
  57. u32 bit_off;
  58. u32 reg_val;
  59. /*
  60. * Assume 4 bits per bit field, 8 fields per 32-bit register so
  61. * 16 bit fields maximum across two registers
  62. */
  63. if (!vbif || xin_id >= MAX_XIN_COUNT)
  64. return;
  65. c = &vbif->hw;
  66. /* enable cacheable */
  67. if (xin_id >= 8) {
  68. xin_id -= 8;
  69. reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
  70. } else {
  71. reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
  72. }
  73. bit_off = (xin_id & 0x7) * 4;
  74. reg_val = SDE_REG_READ(c, reg_off);
  75. reg_val &= ~(0x7 << bit_off);
  76. reg_val |= (value & 0x7) << bit_off;
  77. SDE_REG_WRITE(c, reg_off, reg_val);
  78. }
  79. static void sde_hw_set_mem_type_v1(struct sde_hw_vbif *vbif,
  80. u32 xin_id, u32 value)
  81. {
  82. struct sde_hw_blk_reg_map *c;
  83. u32 reg_val;
  84. if (!vbif || xin_id >= MAX_XIN_COUNT)
  85. return;
  86. sde_hw_set_mem_type(vbif, xin_id, value);
  87. c = &vbif->hw;
  88. /* disable outer shareable */
  89. reg_val = SDE_REG_READ(c, VBIF_OUT_AXI_ASHARED);
  90. reg_val &= ~BIT(xin_id);
  91. SDE_REG_WRITE(c, VBIF_OUT_AXI_ASHARED, 0);
  92. /* disable inner shareable */
  93. reg_val = SDE_REG_READ(c, VBIF_OUT_AXI_AINNERSHARED);
  94. reg_val &= ~BIT(xin_id);
  95. SDE_REG_WRITE(c, VBIF_OUT_AXI_AINNERSHARED, 0);
  96. }
  97. static void sde_hw_set_limit_conf(struct sde_hw_vbif *vbif,
  98. u32 xin_id, bool rd, u32 limit)
  99. {
  100. struct sde_hw_blk_reg_map *c = &vbif->hw;
  101. u32 reg_val;
  102. u32 reg_off;
  103. u32 bit_off;
  104. if (rd)
  105. reg_off = VBIF_IN_RD_LIM_CONF0;
  106. else
  107. reg_off = VBIF_IN_WR_LIM_CONF0;
  108. reg_off += (xin_id / 4) * 4;
  109. bit_off = (xin_id % 4) * 8;
  110. reg_val = SDE_REG_READ(c, reg_off);
  111. reg_val &= ~(0xFF << bit_off);
  112. reg_val |= (limit) << bit_off;
  113. SDE_REG_WRITE(c, reg_off, reg_val);
  114. }
  115. static u32 sde_hw_get_limit_conf(struct sde_hw_vbif *vbif,
  116. u32 xin_id, bool rd)
  117. {
  118. struct sde_hw_blk_reg_map *c = &vbif->hw;
  119. u32 reg_val;
  120. u32 reg_off;
  121. u32 bit_off;
  122. u32 limit;
  123. if (rd)
  124. reg_off = VBIF_IN_RD_LIM_CONF0;
  125. else
  126. reg_off = VBIF_IN_WR_LIM_CONF0;
  127. reg_off += (xin_id / 4) * 4;
  128. bit_off = (xin_id % 4) * 8;
  129. reg_val = SDE_REG_READ(c, reg_off);
  130. limit = (reg_val >> bit_off) & 0xFF;
  131. return limit;
  132. }
  133. static void sde_hw_set_halt_ctrl(struct sde_hw_vbif *vbif,
  134. u32 xin_id, bool enable)
  135. {
  136. struct sde_hw_blk_reg_map *c = &vbif->hw;
  137. u32 reg_val;
  138. reg_val = SDE_REG_READ(c, VBIF_XIN_HALT_CTRL0);
  139. if (enable)
  140. reg_val |= BIT(xin_id);
  141. else
  142. reg_val &= ~BIT(xin_id);
  143. SDE_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
  144. }
  145. static bool sde_hw_get_halt_ctrl(struct sde_hw_vbif *vbif,
  146. u32 xin_id)
  147. {
  148. struct sde_hw_blk_reg_map *c = &vbif->hw;
  149. u32 reg_val;
  150. reg_val = SDE_REG_READ(c, VBIF_XIN_HALT_CTRL1);
  151. return (reg_val & BIT(xin_id)) ? true : false;
  152. }
  153. static void sde_hw_set_qos_remap(struct sde_hw_vbif *vbif,
  154. u32 xin_id, u32 level, u32 remap_level)
  155. {
  156. struct sde_hw_blk_reg_map *c;
  157. u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
  158. if (!vbif)
  159. return;
  160. c = &vbif->hw;
  161. reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
  162. reg_shift = (xin_id & 0x7) * 4;
  163. reg_val = SDE_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
  164. reg_val_lvl = SDE_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
  165. mask = 0x7 << reg_shift;
  166. reg_val &= ~mask;
  167. reg_val |= (remap_level << reg_shift) & mask;
  168. reg_val_lvl &= ~mask;
  169. reg_val_lvl |= (remap_level << reg_shift) & mask;
  170. SDE_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
  171. SDE_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
  172. }
  173. static void sde_hw_set_write_gather_en(struct sde_hw_vbif *vbif, u32 xin_id)
  174. {
  175. struct sde_hw_blk_reg_map *c;
  176. u32 reg_val;
  177. if (!vbif || xin_id >= MAX_XIN_COUNT)
  178. return;
  179. c = &vbif->hw;
  180. reg_val = SDE_REG_READ(c, VBIF_WRITE_GATHER_EN);
  181. reg_val |= BIT(xin_id);
  182. SDE_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
  183. }
  184. static void _setup_vbif_ops(const struct sde_mdss_cfg *m,
  185. struct sde_hw_vbif_ops *ops, unsigned long cap)
  186. {
  187. ops->set_limit_conf = sde_hw_set_limit_conf;
  188. ops->get_limit_conf = sde_hw_get_limit_conf;
  189. ops->set_halt_ctrl = sde_hw_set_halt_ctrl;
  190. ops->get_halt_ctrl = sde_hw_get_halt_ctrl;
  191. if (test_bit(SDE_VBIF_QOS_REMAP, &cap))
  192. ops->set_qos_remap = sde_hw_set_qos_remap;
  193. if (IS_SM8150_TARGET(m->hwversion) || IS_SM6150_TARGET(m->hwversion) ||
  194. IS_SDMMAGPIE_TARGET(m->hwversion) ||
  195. IS_SDMTRINKET_TARGET(m->hwversion) ||
  196. IS_BENGAL_TARGET(m->hwversion))
  197. ops->set_mem_type = sde_hw_set_mem_type_v1;
  198. else
  199. ops->set_mem_type = sde_hw_set_mem_type;
  200. ops->clear_errors = sde_hw_clear_errors;
  201. ops->set_write_gather_en = sde_hw_set_write_gather_en;
  202. }
  203. static const struct sde_vbif_cfg *_top_offset(enum sde_vbif vbif,
  204. const struct sde_mdss_cfg *m,
  205. void __iomem *addr,
  206. struct sde_hw_blk_reg_map *b)
  207. {
  208. int i;
  209. for (i = 0; i < m->vbif_count; i++) {
  210. if (vbif == m->vbif[i].id) {
  211. b->base_off = addr;
  212. b->blk_off = m->vbif[i].base;
  213. b->length = m->vbif[i].len;
  214. b->hwversion = m->hwversion;
  215. b->log_mask = SDE_DBG_MASK_VBIF;
  216. return &m->vbif[i];
  217. }
  218. }
  219. return ERR_PTR(-EINVAL);
  220. }
  221. struct sde_hw_vbif *sde_hw_vbif_init(enum sde_vbif idx,
  222. void __iomem *addr,
  223. const struct sde_mdss_cfg *m)
  224. {
  225. struct sde_hw_vbif *c;
  226. const struct sde_vbif_cfg *cfg;
  227. c = kzalloc(sizeof(*c), GFP_KERNEL);
  228. if (!c)
  229. return ERR_PTR(-ENOMEM);
  230. cfg = _top_offset(idx, m, addr, &c->hw);
  231. if (IS_ERR_OR_NULL(cfg)) {
  232. kfree(c);
  233. return ERR_PTR(-EINVAL);
  234. }
  235. /*
  236. * Assign ops
  237. */
  238. c->idx = idx;
  239. c->cap = cfg;
  240. _setup_vbif_ops(m, &c->ops, c->cap->features);
  241. /* no need to register sub-range in sde dbg, dump entire vbif io base */
  242. mutex_init(&c->mutex);
  243. return c;
  244. }
  245. void sde_hw_vbif_destroy(struct sde_hw_vbif *vbif)
  246. {
  247. if (vbif)
  248. mutex_destroy(&vbif->mutex);
  249. kfree(vbif);
  250. }