sde_hw_vbif.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #include <linux/iopoll.h>
  7. #include "sde_hwio.h"
  8. #include "sde_hw_catalog.h"
  9. #include "sde_hw_vbif.h"
  10. #include "sde_dbg.h"
  11. #define VBIF_VERSION 0x0000
  12. #define VBIF_CLKON 0x0004
  13. #define VBIF_CLK_FORCE_CTRL0 0x0008
  14. #define VBIF_CLK_FORCE_CTRL1 0x000C
  15. #define VBIF_QOS_REMAP_00 0x0020
  16. #define VBIF_QOS_REMAP_01 0x0024
  17. #define VBIF_QOS_REMAP_10 0x0028
  18. #define VBIF_QOS_REMAP_11 0x002C
  19. #define VBIF_WRITE_GATHER_EN 0x00AC
  20. #define VBIF_IN_RD_LIM_CONF0 0x00B0
  21. #define VBIF_IN_RD_LIM_CONF1 0x00B4
  22. #define VBIF_IN_RD_LIM_CONF2 0x00B8
  23. #define VBIF_IN_WR_LIM_CONF0 0x00C0
  24. #define VBIF_IN_WR_LIM_CONF1 0x00C4
  25. #define VBIF_IN_WR_LIM_CONF2 0x00C8
  26. #define VBIF_OUT_RD_LIM_CONF0 0x00D0
  27. #define VBIF_OUT_WR_LIM_CONF0 0x00D4
  28. #define VBIF_OUT_AXI_AMEMTYPE_CONF0 0x0160
  29. #define VBIF_OUT_AXI_AMEMTYPE_CONF1 0x0164
  30. #define VBIF_OUT_AXI_ASHARED 0x0170
  31. #define VBIF_OUT_AXI_AINNERSHARED 0x0174
  32. #define VBIF_XIN_PND_ERR 0x0190
  33. #define VBIF_XIN_SRC_ERR 0x0194
  34. #define VBIF_XIN_CLR_ERR 0x019C
  35. #define VBIF_XIN_HALT_CTRL0 0x0200
  36. #define VBIF_XIN_HALT_CTRL1 0x0204
  37. #define VBIF_AXI_HALT_CTRL0 0x0208
  38. #define VBIF_AXI_HALT_CTRL1 0x020c
  39. #define VBIF_XINL_QOS_RP_REMAP_000 0x0550
  40. #define VBIF_XINL_QOS_LVL_REMAP_000 0x0590
  41. static void sde_hw_clear_errors(struct sde_hw_vbif *vbif,
  42. u32 *pnd_errors, u32 *src_errors)
  43. {
  44. struct sde_hw_blk_reg_map *c;
  45. u32 pnd, src;
  46. if (!vbif)
  47. return;
  48. c = &vbif->hw;
  49. pnd = SDE_REG_READ(c, VBIF_XIN_PND_ERR);
  50. src = SDE_REG_READ(c, VBIF_XIN_SRC_ERR);
  51. if (pnd_errors)
  52. *pnd_errors = pnd;
  53. if (src_errors)
  54. *src_errors = src;
  55. SDE_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
  56. }
  57. static void sde_hw_set_mem_type(struct sde_hw_vbif *vbif,
  58. u32 xin_id, u32 value)
  59. {
  60. struct sde_hw_blk_reg_map *c;
  61. u32 reg_off;
  62. u32 bit_off;
  63. u32 reg_val;
  64. /*
  65. * Assume 4 bits per bit field, 8 fields per 32-bit register so
  66. * 16 bit fields maximum across two registers
  67. */
  68. if (!vbif || xin_id >= MAX_XIN_COUNT)
  69. return;
  70. c = &vbif->hw;
  71. /* enable cacheable */
  72. if (xin_id >= 8) {
  73. xin_id -= 8;
  74. reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
  75. } else {
  76. reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
  77. }
  78. bit_off = (xin_id & 0x7) * 4;
  79. reg_val = SDE_REG_READ(c, reg_off);
  80. reg_val &= ~(0x7 << bit_off);
  81. reg_val |= (value & 0x7) << bit_off;
  82. SDE_REG_WRITE(c, reg_off, reg_val);
  83. }
  84. static void sde_hw_set_mem_type_v1(struct sde_hw_vbif *vbif,
  85. u32 xin_id, u32 value)
  86. {
  87. struct sde_hw_blk_reg_map *c;
  88. u32 reg_val;
  89. if (!vbif || xin_id >= MAX_XIN_COUNT)
  90. return;
  91. sde_hw_set_mem_type(vbif, xin_id, value);
  92. c = &vbif->hw;
  93. /* disable outer shareable */
  94. reg_val = SDE_REG_READ(c, VBIF_OUT_AXI_ASHARED);
  95. reg_val &= ~BIT(xin_id);
  96. SDE_REG_WRITE(c, VBIF_OUT_AXI_ASHARED, 0);
  97. /* disable inner shareable */
  98. reg_val = SDE_REG_READ(c, VBIF_OUT_AXI_AINNERSHARED);
  99. reg_val &= ~BIT(xin_id);
  100. SDE_REG_WRITE(c, VBIF_OUT_AXI_AINNERSHARED, 0);
  101. }
  102. static void sde_hw_set_limit_conf(struct sde_hw_vbif *vbif,
  103. u32 xin_id, bool rd, u32 limit)
  104. {
  105. struct sde_hw_blk_reg_map *c = &vbif->hw;
  106. u32 reg_val;
  107. u32 reg_off;
  108. u32 bit_off;
  109. if (rd)
  110. reg_off = VBIF_IN_RD_LIM_CONF0;
  111. else
  112. reg_off = VBIF_IN_WR_LIM_CONF0;
  113. reg_off += (xin_id / 4) * 4;
  114. bit_off = (xin_id % 4) * 8;
  115. reg_val = SDE_REG_READ(c, reg_off);
  116. reg_val &= ~(0xFF << bit_off);
  117. reg_val |= (limit) << bit_off;
  118. SDE_REG_WRITE(c, reg_off, reg_val);
  119. }
  120. static u32 sde_hw_get_limit_conf(struct sde_hw_vbif *vbif,
  121. u32 xin_id, bool rd)
  122. {
  123. struct sde_hw_blk_reg_map *c = &vbif->hw;
  124. u32 reg_val;
  125. u32 reg_off;
  126. u32 bit_off;
  127. u32 limit;
  128. if (rd)
  129. reg_off = VBIF_IN_RD_LIM_CONF0;
  130. else
  131. reg_off = VBIF_IN_WR_LIM_CONF0;
  132. reg_off += (xin_id / 4) * 4;
  133. bit_off = (xin_id % 4) * 8;
  134. reg_val = SDE_REG_READ(c, reg_off);
  135. limit = (reg_val >> bit_off) & 0xFF;
  136. return limit;
  137. }
  138. static void sde_hw_set_xin_halt(struct sde_hw_vbif *vbif,
  139. u32 xin_id, bool enable)
  140. {
  141. struct sde_hw_blk_reg_map *c = &vbif->hw;
  142. u32 reg_val;
  143. reg_val = SDE_REG_READ(c, VBIF_XIN_HALT_CTRL0);
  144. if (enable)
  145. reg_val |= BIT(xin_id);
  146. else
  147. reg_val &= ~BIT(xin_id);
  148. SDE_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
  149. wmb(); /* make sure that xin client halted */
  150. }
  151. static bool sde_hw_get_xin_halt_status(struct sde_hw_vbif *vbif,
  152. u32 xin_id)
  153. {
  154. struct sde_hw_blk_reg_map *c = &vbif->hw;
  155. u32 reg_val;
  156. reg_val = SDE_REG_READ(c, VBIF_XIN_HALT_CTRL1);
  157. return ((reg_val >> 16) & BIT(xin_id)) ? true : false;
  158. }
  159. static void sde_hw_set_axi_halt(struct sde_hw_vbif *vbif)
  160. {
  161. struct sde_hw_blk_reg_map *c = &vbif->hw;
  162. SDE_REG_WRITE(c, VBIF_CLKON, BIT(0));
  163. SDE_REG_WRITE(c, VBIF_AXI_HALT_CTRL0, BIT(0));
  164. wmb(); /* make sure that axi transactions are halted */
  165. }
  166. static int sde_hw_get_axi_halt_status(struct sde_hw_vbif *vbif)
  167. {
  168. struct sde_hw_blk_reg_map *c = &vbif->hw;
  169. int ctrl = 0;
  170. return read_poll_timeout(sde_reg_read, ctrl, (ctrl & BIT(0)),
  171. 100, false, 4000, c, VBIF_AXI_HALT_CTRL1);
  172. }
  173. static void sde_hw_set_qos_remap(struct sde_hw_vbif *vbif,
  174. u32 xin_id, u32 level, u32 rp_remap, u32 lvl_remap)
  175. {
  176. struct sde_hw_blk_reg_map *c;
  177. u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
  178. if (!vbif)
  179. return;
  180. c = &vbif->hw;
  181. reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
  182. reg_shift = (xin_id & 0x7) * 4;
  183. reg_val = SDE_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
  184. reg_val_lvl = SDE_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
  185. mask = 0x7 << reg_shift;
  186. reg_val &= ~mask;
  187. reg_val |= (rp_remap << reg_shift) & mask;
  188. reg_val_lvl &= ~mask;
  189. reg_val_lvl |= (lvl_remap << reg_shift) & mask;
  190. SDE_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
  191. SDE_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
  192. }
  193. static void sde_hw_set_write_gather_en(struct sde_hw_vbif *vbif, u32 xin_id)
  194. {
  195. struct sde_hw_blk_reg_map *c;
  196. u32 reg_val;
  197. if (!vbif || xin_id >= MAX_XIN_COUNT)
  198. return;
  199. c = &vbif->hw;
  200. reg_val = SDE_REG_READ(c, VBIF_WRITE_GATHER_EN);
  201. reg_val |= BIT(xin_id);
  202. SDE_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
  203. }
  204. static void _setup_vbif_ops(const struct sde_mdss_cfg *m,
  205. struct sde_hw_vbif_ops *ops, unsigned long cap)
  206. {
  207. ops->set_limit_conf = sde_hw_set_limit_conf;
  208. ops->get_limit_conf = sde_hw_get_limit_conf;
  209. ops->set_axi_halt = sde_hw_set_axi_halt;
  210. ops->get_axi_halt_status = sde_hw_get_axi_halt_status;
  211. ops->set_xin_halt = sde_hw_set_xin_halt;
  212. ops->get_xin_halt_status = sde_hw_get_xin_halt_status;
  213. if (test_bit(SDE_VBIF_QOS_REMAP, &cap))
  214. ops->set_qos_remap = sde_hw_set_qos_remap;
  215. if (test_bit(SDE_VBIF_DISABLE_SHAREABLE, &cap))
  216. ops->set_mem_type = sde_hw_set_mem_type_v1;
  217. else
  218. ops->set_mem_type = sde_hw_set_mem_type;
  219. ops->clear_errors = sde_hw_clear_errors;
  220. ops->set_write_gather_en = sde_hw_set_write_gather_en;
  221. }
  222. static const struct sde_vbif_cfg *_top_offset(enum sde_vbif vbif,
  223. const struct sde_mdss_cfg *m,
  224. void __iomem *addr,
  225. struct sde_hw_blk_reg_map *b)
  226. {
  227. int i;
  228. for (i = 0; i < m->vbif_count; i++) {
  229. if (vbif == m->vbif[i].id) {
  230. b->base_off = addr;
  231. b->blk_off = m->vbif[i].base;
  232. b->length = m->vbif[i].len;
  233. b->hw_rev = m->hw_rev;
  234. b->log_mask = SDE_DBG_MASK_VBIF;
  235. return &m->vbif[i];
  236. }
  237. }
  238. return ERR_PTR(-EINVAL);
  239. }
  240. struct sde_hw_vbif *sde_hw_vbif_init(enum sde_vbif idx,
  241. void __iomem *addr,
  242. const struct sde_mdss_cfg *m)
  243. {
  244. struct sde_hw_vbif *c;
  245. const struct sde_vbif_cfg *cfg;
  246. c = kzalloc(sizeof(*c), GFP_KERNEL);
  247. if (!c)
  248. return ERR_PTR(-ENOMEM);
  249. cfg = _top_offset(idx, m, addr, &c->hw);
  250. if (IS_ERR_OR_NULL(cfg)) {
  251. kfree(c);
  252. return ERR_PTR(-EINVAL);
  253. }
  254. /*
  255. * Assign ops
  256. */
  257. c->idx = idx;
  258. c->cap = cfg;
  259. _setup_vbif_ops(m, &c->ops, c->cap->features);
  260. /* no need to register sub-range in sde dbg, dump entire vbif io base */
  261. mutex_init(&c->mutex);
  262. return c;
  263. }
  264. void sde_hw_vbif_destroy(struct sde_hw_vbif *vbif)
  265. {
  266. if (vbif)
  267. mutex_destroy(&vbif->mutex);
  268. kfree(vbif);
  269. }