sde_hw_vbif.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/iopoll.h>
  6. #include "sde_hwio.h"
  7. #include "sde_hw_catalog.h"
  8. #include "sde_hw_vbif.h"
  9. #include "sde_dbg.h"
  10. #define VBIF_VERSION 0x0000
  11. #define VBIF_CLKON 0x0004
  12. #define VBIF_CLK_FORCE_CTRL0 0x0008
  13. #define VBIF_CLK_FORCE_CTRL1 0x000C
  14. #define VBIF_QOS_REMAP_00 0x0020
  15. #define VBIF_QOS_REMAP_01 0x0024
  16. #define VBIF_QOS_REMAP_10 0x0028
  17. #define VBIF_QOS_REMAP_11 0x002C
  18. #define VBIF_WRITE_GATHER_EN 0x00AC
  19. #define VBIF_IN_RD_LIM_CONF0 0x00B0
  20. #define VBIF_IN_RD_LIM_CONF1 0x00B4
  21. #define VBIF_IN_RD_LIM_CONF2 0x00B8
  22. #define VBIF_IN_WR_LIM_CONF0 0x00C0
  23. #define VBIF_IN_WR_LIM_CONF1 0x00C4
  24. #define VBIF_IN_WR_LIM_CONF2 0x00C8
  25. #define VBIF_OUT_RD_LIM_CONF0 0x00D0
  26. #define VBIF_OUT_WR_LIM_CONF0 0x00D4
  27. #define VBIF_OUT_AXI_AMEMTYPE_CONF0 0x0160
  28. #define VBIF_OUT_AXI_AMEMTYPE_CONF1 0x0164
  29. #define VBIF_OUT_AXI_ASHARED 0x0170
  30. #define VBIF_OUT_AXI_AINNERSHARED 0x0174
  31. #define VBIF_XIN_PND_ERR 0x0190
  32. #define VBIF_XIN_SRC_ERR 0x0194
  33. #define VBIF_XIN_CLR_ERR 0x019C
  34. #define VBIF_XIN_HALT_CTRL0 0x0200
  35. #define VBIF_XIN_HALT_CTRL1 0x0204
  36. #define VBIF_AXI_HALT_CTRL0 0x0208
  37. #define VBIF_AXI_HALT_CTRL1 0x020c
  38. #define VBIF_XINL_QOS_RP_REMAP_000 0x0550
  39. #define VBIF_XINL_QOS_LVL_REMAP_000 0x0590
  40. static void sde_hw_clear_errors(struct sde_hw_vbif *vbif,
  41. u32 *pnd_errors, u32 *src_errors)
  42. {
  43. struct sde_hw_blk_reg_map *c;
  44. u32 pnd, src;
  45. if (!vbif)
  46. return;
  47. c = &vbif->hw;
  48. pnd = SDE_REG_READ(c, VBIF_XIN_PND_ERR);
  49. src = SDE_REG_READ(c, VBIF_XIN_SRC_ERR);
  50. if (pnd_errors)
  51. *pnd_errors = pnd;
  52. if (src_errors)
  53. *src_errors = src;
  54. SDE_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
  55. }
  56. static void sde_hw_set_mem_type(struct sde_hw_vbif *vbif,
  57. u32 xin_id, u32 value)
  58. {
  59. struct sde_hw_blk_reg_map *c;
  60. u32 reg_off;
  61. u32 bit_off;
  62. u32 reg_val;
  63. /*
  64. * Assume 4 bits per bit field, 8 fields per 32-bit register so
  65. * 16 bit fields maximum across two registers
  66. */
  67. if (!vbif || xin_id >= MAX_XIN_COUNT)
  68. return;
  69. c = &vbif->hw;
  70. /* enable cacheable */
  71. if (xin_id >= 8) {
  72. xin_id -= 8;
  73. reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
  74. } else {
  75. reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
  76. }
  77. bit_off = (xin_id & 0x7) * 4;
  78. reg_val = SDE_REG_READ(c, reg_off);
  79. reg_val &= ~(0x7 << bit_off);
  80. reg_val |= (value & 0x7) << bit_off;
  81. SDE_REG_WRITE(c, reg_off, reg_val);
  82. }
  83. static void sde_hw_set_mem_type_v1(struct sde_hw_vbif *vbif,
  84. u32 xin_id, u32 value)
  85. {
  86. struct sde_hw_blk_reg_map *c;
  87. u32 reg_val;
  88. if (!vbif || xin_id >= MAX_XIN_COUNT)
  89. return;
  90. sde_hw_set_mem_type(vbif, xin_id, value);
  91. c = &vbif->hw;
  92. /* disable outer shareable */
  93. reg_val = SDE_REG_READ(c, VBIF_OUT_AXI_ASHARED);
  94. reg_val &= ~BIT(xin_id);
  95. SDE_REG_WRITE(c, VBIF_OUT_AXI_ASHARED, 0);
  96. /* disable inner shareable */
  97. reg_val = SDE_REG_READ(c, VBIF_OUT_AXI_AINNERSHARED);
  98. reg_val &= ~BIT(xin_id);
  99. SDE_REG_WRITE(c, VBIF_OUT_AXI_AINNERSHARED, 0);
  100. }
  101. static void sde_hw_set_limit_conf(struct sde_hw_vbif *vbif,
  102. u32 xin_id, bool rd, u32 limit)
  103. {
  104. struct sde_hw_blk_reg_map *c = &vbif->hw;
  105. u32 reg_val;
  106. u32 reg_off;
  107. u32 bit_off;
  108. if (rd)
  109. reg_off = VBIF_IN_RD_LIM_CONF0;
  110. else
  111. reg_off = VBIF_IN_WR_LIM_CONF0;
  112. reg_off += (xin_id / 4) * 4;
  113. bit_off = (xin_id % 4) * 8;
  114. reg_val = SDE_REG_READ(c, reg_off);
  115. reg_val &= ~(0xFF << bit_off);
  116. reg_val |= (limit) << bit_off;
  117. SDE_REG_WRITE(c, reg_off, reg_val);
  118. }
  119. static u32 sde_hw_get_limit_conf(struct sde_hw_vbif *vbif,
  120. u32 xin_id, bool rd)
  121. {
  122. struct sde_hw_blk_reg_map *c = &vbif->hw;
  123. u32 reg_val;
  124. u32 reg_off;
  125. u32 bit_off;
  126. u32 limit;
  127. if (rd)
  128. reg_off = VBIF_IN_RD_LIM_CONF0;
  129. else
  130. reg_off = VBIF_IN_WR_LIM_CONF0;
  131. reg_off += (xin_id / 4) * 4;
  132. bit_off = (xin_id % 4) * 8;
  133. reg_val = SDE_REG_READ(c, reg_off);
  134. limit = (reg_val >> bit_off) & 0xFF;
  135. return limit;
  136. }
  137. static void sde_hw_set_xin_halt(struct sde_hw_vbif *vbif,
  138. u32 xin_id, bool enable)
  139. {
  140. struct sde_hw_blk_reg_map *c = &vbif->hw;
  141. u32 reg_val;
  142. reg_val = SDE_REG_READ(c, VBIF_XIN_HALT_CTRL0);
  143. if (enable)
  144. reg_val |= BIT(xin_id);
  145. else
  146. reg_val &= ~BIT(xin_id);
  147. SDE_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
  148. wmb(); /* make sure that xin client halted */
  149. }
  150. static bool sde_hw_get_xin_halt_status(struct sde_hw_vbif *vbif,
  151. u32 xin_id)
  152. {
  153. struct sde_hw_blk_reg_map *c = &vbif->hw;
  154. u32 reg_val;
  155. reg_val = SDE_REG_READ(c, VBIF_XIN_HALT_CTRL1);
  156. return ((reg_val >> 16) & BIT(xin_id)) ? true : false;
  157. }
  158. static void sde_hw_set_axi_halt(struct sde_hw_vbif *vbif)
  159. {
  160. struct sde_hw_blk_reg_map *c = &vbif->hw;
  161. SDE_REG_WRITE(c, VBIF_CLKON, BIT(0));
  162. SDE_REG_WRITE(c, VBIF_AXI_HALT_CTRL0, BIT(0));
  163. wmb(); /* make sure that axi transactions are halted */
  164. }
  165. static int sde_hw_get_axi_halt_status(struct sde_hw_vbif *vbif)
  166. {
  167. struct sde_hw_blk_reg_map *c = &vbif->hw;
  168. int ctrl = 0;
  169. return readl_poll_timeout(c->base_off + c->blk_off +
  170. VBIF_AXI_HALT_CTRL1, ctrl, ctrl & BIT(0), 100, 4000);
  171. }
  172. static void sde_hw_set_qos_remap(struct sde_hw_vbif *vbif,
  173. u32 xin_id, u32 level, u32 remap_level)
  174. {
  175. struct sde_hw_blk_reg_map *c;
  176. u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
  177. if (!vbif)
  178. return;
  179. c = &vbif->hw;
  180. reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
  181. reg_shift = (xin_id & 0x7) * 4;
  182. reg_val = SDE_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
  183. reg_val_lvl = SDE_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
  184. mask = 0x7 << reg_shift;
  185. reg_val &= ~mask;
  186. reg_val |= (remap_level << reg_shift) & mask;
  187. reg_val_lvl &= ~mask;
  188. reg_val_lvl |= (remap_level << reg_shift) & mask;
  189. SDE_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
  190. SDE_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
  191. }
  192. static void sde_hw_set_write_gather_en(struct sde_hw_vbif *vbif, u32 xin_id)
  193. {
  194. struct sde_hw_blk_reg_map *c;
  195. u32 reg_val;
  196. if (!vbif || xin_id >= MAX_XIN_COUNT)
  197. return;
  198. c = &vbif->hw;
  199. reg_val = SDE_REG_READ(c, VBIF_WRITE_GATHER_EN);
  200. reg_val |= BIT(xin_id);
  201. SDE_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
  202. }
  203. static void _setup_vbif_ops(const struct sde_mdss_cfg *m,
  204. struct sde_hw_vbif_ops *ops, unsigned long cap)
  205. {
  206. ops->set_limit_conf = sde_hw_set_limit_conf;
  207. ops->get_limit_conf = sde_hw_get_limit_conf;
  208. ops->set_axi_halt = sde_hw_set_axi_halt;
  209. ops->get_axi_halt_status = sde_hw_get_axi_halt_status;
  210. ops->set_xin_halt = sde_hw_set_xin_halt;
  211. ops->get_xin_halt_status = sde_hw_get_xin_halt_status;
  212. if (test_bit(SDE_VBIF_QOS_REMAP, &cap))
  213. ops->set_qos_remap = sde_hw_set_qos_remap;
  214. if (test_bit(SDE_VBIF_DISABLE_SHAREABLE, &cap))
  215. ops->set_mem_type = sde_hw_set_mem_type_v1;
  216. else
  217. ops->set_mem_type = sde_hw_set_mem_type;
  218. ops->clear_errors = sde_hw_clear_errors;
  219. ops->set_write_gather_en = sde_hw_set_write_gather_en;
  220. }
  221. static const struct sde_vbif_cfg *_top_offset(enum sde_vbif vbif,
  222. const struct sde_mdss_cfg *m,
  223. void __iomem *addr,
  224. struct sde_hw_blk_reg_map *b)
  225. {
  226. int i;
  227. for (i = 0; i < m->vbif_count; i++) {
  228. if (vbif == m->vbif[i].id) {
  229. b->base_off = addr;
  230. b->blk_off = m->vbif[i].base;
  231. b->length = m->vbif[i].len;
  232. b->hwversion = m->hwversion;
  233. b->log_mask = SDE_DBG_MASK_VBIF;
  234. return &m->vbif[i];
  235. }
  236. }
  237. return ERR_PTR(-EINVAL);
  238. }
  239. struct sde_hw_vbif *sde_hw_vbif_init(enum sde_vbif idx,
  240. void __iomem *addr,
  241. const struct sde_mdss_cfg *m)
  242. {
  243. struct sde_hw_vbif *c;
  244. const struct sde_vbif_cfg *cfg;
  245. c = kzalloc(sizeof(*c), GFP_KERNEL);
  246. if (!c)
  247. return ERR_PTR(-ENOMEM);
  248. cfg = _top_offset(idx, m, addr, &c->hw);
  249. if (IS_ERR_OR_NULL(cfg)) {
  250. kfree(c);
  251. return ERR_PTR(-EINVAL);
  252. }
  253. /*
  254. * Assign ops
  255. */
  256. c->idx = idx;
  257. c->cap = cfg;
  258. _setup_vbif_ops(m, &c->ops, c->cap->features);
  259. /* no need to register sub-range in sde dbg, dump entire vbif io base */
  260. mutex_init(&c->mutex);
  261. return c;
  262. }
  263. void sde_hw_vbif_destroy(struct sde_hw_vbif *vbif)
  264. {
  265. if (vbif)
  266. mutex_destroy(&vbif->mutex);
  267. kfree(vbif);
  268. }