dcss-ctxld.c 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright 2019 NXP.
  4. */
  5. #include <linux/delay.h>
  6. #include <linux/dma-mapping.h>
  7. #include <linux/interrupt.h>
  8. #include <linux/platform_device.h>
  9. #include <linux/slab.h>
  10. #include "dcss-dev.h"
  11. #define DCSS_CTXLD_CONTROL_STATUS 0x0
  12. #define CTXLD_ENABLE BIT(0)
  13. #define ARB_SEL BIT(1)
  14. #define RD_ERR_EN BIT(2)
  15. #define DB_COMP_EN BIT(3)
  16. #define SB_HP_COMP_EN BIT(4)
  17. #define SB_LP_COMP_EN BIT(5)
  18. #define DB_PEND_SB_REC_EN BIT(6)
  19. #define SB_PEND_DISP_ACTIVE_EN BIT(7)
  20. #define AHB_ERR_EN BIT(8)
  21. #define RD_ERR BIT(16)
  22. #define DB_COMP BIT(17)
  23. #define SB_HP_COMP BIT(18)
  24. #define SB_LP_COMP BIT(19)
  25. #define DB_PEND_SB_REC BIT(20)
  26. #define SB_PEND_DISP_ACTIVE BIT(21)
  27. #define AHB_ERR BIT(22)
  28. #define DCSS_CTXLD_DB_BASE_ADDR 0x10
  29. #define DCSS_CTXLD_DB_COUNT 0x14
  30. #define DCSS_CTXLD_SB_BASE_ADDR 0x18
  31. #define DCSS_CTXLD_SB_COUNT 0x1C
  32. #define SB_HP_COUNT_POS 0
  33. #define SB_HP_COUNT_MASK 0xffff
  34. #define SB_LP_COUNT_POS 16
  35. #define SB_LP_COUNT_MASK 0xffff0000
  36. #define DCSS_AHB_ERR_ADDR 0x20
  37. #define CTXLD_IRQ_COMPLETION (DB_COMP | SB_HP_COMP | SB_LP_COMP)
  38. #define CTXLD_IRQ_ERROR (RD_ERR | DB_PEND_SB_REC | AHB_ERR)
  39. /* The following sizes are in context loader entries, 8 bytes each. */
  40. #define CTXLD_DB_CTX_ENTRIES 1024 /* max 65536 */
  41. #define CTXLD_SB_LP_CTX_ENTRIES 10240 /* max 65536 */
  42. #define CTXLD_SB_HP_CTX_ENTRIES 20000 /* max 65536 */
  43. #define CTXLD_SB_CTX_ENTRIES (CTXLD_SB_LP_CTX_ENTRIES + \
  44. CTXLD_SB_HP_CTX_ENTRIES)
  45. /* Sizes, in entries, of the DB, SB_HP and SB_LP context regions. */
  46. static u16 dcss_ctxld_ctx_size[3] = {
  47. CTXLD_DB_CTX_ENTRIES,
  48. CTXLD_SB_HP_CTX_ENTRIES,
  49. CTXLD_SB_LP_CTX_ENTRIES
  50. };
  51. /* this represents an entry in the context loader map */
  52. struct dcss_ctxld_item {
  53. u32 val;
  54. u32 ofs;
  55. };
  56. #define CTX_ITEM_SIZE sizeof(struct dcss_ctxld_item)
  57. struct dcss_ctxld {
  58. struct device *dev;
  59. void __iomem *ctxld_reg;
  60. int irq;
  61. bool irq_en;
  62. struct dcss_ctxld_item *db[2];
  63. struct dcss_ctxld_item *sb_hp[2];
  64. struct dcss_ctxld_item *sb_lp[2];
  65. dma_addr_t db_paddr[2];
  66. dma_addr_t sb_paddr[2];
  67. u16 ctx_size[2][3]; /* holds the sizes of DB, SB_HP and SB_LP ctx */
  68. u8 current_ctx;
  69. bool in_use;
  70. bool armed;
  71. spinlock_t lock; /* protects concurent access to private data */
  72. };
  73. static irqreturn_t dcss_ctxld_irq_handler(int irq, void *data)
  74. {
  75. struct dcss_ctxld *ctxld = data;
  76. struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev);
  77. u32 irq_status;
  78. irq_status = dcss_readl(ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
  79. if (irq_status & CTXLD_IRQ_COMPLETION &&
  80. !(irq_status & CTXLD_ENABLE) && ctxld->in_use) {
  81. ctxld->in_use = false;
  82. if (dcss && dcss->disable_callback)
  83. dcss->disable_callback(dcss);
  84. } else if (irq_status & CTXLD_IRQ_ERROR) {
  85. /*
  86. * Except for throwing an error message and clearing the status
  87. * register, there's not much we can do here.
  88. */
  89. dev_err(ctxld->dev, "ctxld: error encountered: %08x\n",
  90. irq_status);
  91. dev_err(ctxld->dev, "ctxld: db=%d, sb_hp=%d, sb_lp=%d\n",
  92. ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_DB],
  93. ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_HP],
  94. ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_LP]);
  95. }
  96. dcss_clr(irq_status & (CTXLD_IRQ_ERROR | CTXLD_IRQ_COMPLETION),
  97. ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
  98. return IRQ_HANDLED;
  99. }
  100. static int dcss_ctxld_irq_config(struct dcss_ctxld *ctxld,
  101. struct platform_device *pdev)
  102. {
  103. int ret;
  104. ctxld->irq = platform_get_irq_byname(pdev, "ctxld");
  105. if (ctxld->irq < 0)
  106. return ctxld->irq;
  107. ret = request_irq(ctxld->irq, dcss_ctxld_irq_handler,
  108. 0, "dcss_ctxld", ctxld);
  109. if (ret) {
  110. dev_err(ctxld->dev, "ctxld: irq request failed.\n");
  111. return ret;
  112. }
  113. ctxld->irq_en = true;
  114. return 0;
  115. }
  116. static void dcss_ctxld_hw_cfg(struct dcss_ctxld *ctxld)
  117. {
  118. dcss_writel(RD_ERR_EN | SB_HP_COMP_EN |
  119. DB_PEND_SB_REC_EN | AHB_ERR_EN | RD_ERR | AHB_ERR,
  120. ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
  121. }
  122. static void dcss_ctxld_free_ctx(struct dcss_ctxld *ctxld)
  123. {
  124. struct dcss_ctxld_item *ctx;
  125. int i;
  126. for (i = 0; i < 2; i++) {
  127. if (ctxld->db[i]) {
  128. dma_free_coherent(ctxld->dev,
  129. CTXLD_DB_CTX_ENTRIES * sizeof(*ctx),
  130. ctxld->db[i], ctxld->db_paddr[i]);
  131. ctxld->db[i] = NULL;
  132. ctxld->db_paddr[i] = 0;
  133. }
  134. if (ctxld->sb_hp[i]) {
  135. dma_free_coherent(ctxld->dev,
  136. CTXLD_SB_CTX_ENTRIES * sizeof(*ctx),
  137. ctxld->sb_hp[i], ctxld->sb_paddr[i]);
  138. ctxld->sb_hp[i] = NULL;
  139. ctxld->sb_paddr[i] = 0;
  140. }
  141. }
  142. }
  143. static int dcss_ctxld_alloc_ctx(struct dcss_ctxld *ctxld)
  144. {
  145. struct dcss_ctxld_item *ctx;
  146. int i;
  147. for (i = 0; i < 2; i++) {
  148. ctx = dma_alloc_coherent(ctxld->dev,
  149. CTXLD_DB_CTX_ENTRIES * sizeof(*ctx),
  150. &ctxld->db_paddr[i], GFP_KERNEL);
  151. if (!ctx)
  152. return -ENOMEM;
  153. ctxld->db[i] = ctx;
  154. ctx = dma_alloc_coherent(ctxld->dev,
  155. CTXLD_SB_CTX_ENTRIES * sizeof(*ctx),
  156. &ctxld->sb_paddr[i], GFP_KERNEL);
  157. if (!ctx)
  158. return -ENOMEM;
  159. ctxld->sb_hp[i] = ctx;
  160. ctxld->sb_lp[i] = ctx + CTXLD_SB_HP_CTX_ENTRIES;
  161. }
  162. return 0;
  163. }
  164. int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base)
  165. {
  166. struct dcss_ctxld *ctxld;
  167. int ret;
  168. ctxld = kzalloc(sizeof(*ctxld), GFP_KERNEL);
  169. if (!ctxld)
  170. return -ENOMEM;
  171. dcss->ctxld = ctxld;
  172. ctxld->dev = dcss->dev;
  173. spin_lock_init(&ctxld->lock);
  174. ret = dcss_ctxld_alloc_ctx(ctxld);
  175. if (ret) {
  176. dev_err(dcss->dev, "ctxld: cannot allocate context memory.\n");
  177. goto err;
  178. }
  179. ctxld->ctxld_reg = ioremap(ctxld_base, SZ_4K);
  180. if (!ctxld->ctxld_reg) {
  181. dev_err(dcss->dev, "ctxld: unable to remap ctxld base\n");
  182. ret = -ENOMEM;
  183. goto err;
  184. }
  185. ret = dcss_ctxld_irq_config(ctxld, to_platform_device(dcss->dev));
  186. if (ret)
  187. goto err_irq;
  188. dcss_ctxld_hw_cfg(ctxld);
  189. return 0;
  190. err_irq:
  191. iounmap(ctxld->ctxld_reg);
  192. err:
  193. dcss_ctxld_free_ctx(ctxld);
  194. kfree(ctxld);
  195. return ret;
  196. }
  197. void dcss_ctxld_exit(struct dcss_ctxld *ctxld)
  198. {
  199. free_irq(ctxld->irq, ctxld);
  200. if (ctxld->ctxld_reg)
  201. iounmap(ctxld->ctxld_reg);
  202. dcss_ctxld_free_ctx(ctxld);
  203. kfree(ctxld);
  204. }
  205. static int dcss_ctxld_enable_locked(struct dcss_ctxld *ctxld)
  206. {
  207. int curr_ctx = ctxld->current_ctx;
  208. u32 db_base, sb_base, sb_count;
  209. u32 sb_hp_cnt, sb_lp_cnt, db_cnt;
  210. struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev);
  211. if (!dcss)
  212. return 0;
  213. dcss_dpr_write_sysctrl(dcss->dpr);
  214. dcss_scaler_write_sclctrl(dcss->scaler);
  215. sb_hp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_HP];
  216. sb_lp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_LP];
  217. db_cnt = ctxld->ctx_size[curr_ctx][CTX_DB];
  218. /* make sure SB_LP context area comes after SB_HP */
  219. if (sb_lp_cnt &&
  220. ctxld->sb_lp[curr_ctx] != ctxld->sb_hp[curr_ctx] + sb_hp_cnt) {
  221. struct dcss_ctxld_item *sb_lp_adjusted;
  222. sb_lp_adjusted = ctxld->sb_hp[curr_ctx] + sb_hp_cnt;
  223. memcpy(sb_lp_adjusted, ctxld->sb_lp[curr_ctx],
  224. sb_lp_cnt * CTX_ITEM_SIZE);
  225. }
  226. db_base = db_cnt ? ctxld->db_paddr[curr_ctx] : 0;
  227. dcss_writel(db_base, ctxld->ctxld_reg + DCSS_CTXLD_DB_BASE_ADDR);
  228. dcss_writel(db_cnt, ctxld->ctxld_reg + DCSS_CTXLD_DB_COUNT);
  229. if (sb_hp_cnt)
  230. sb_count = ((sb_hp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK) |
  231. ((sb_lp_cnt << SB_LP_COUNT_POS) & SB_LP_COUNT_MASK);
  232. else
  233. sb_count = (sb_lp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK;
  234. sb_base = sb_count ? ctxld->sb_paddr[curr_ctx] : 0;
  235. dcss_writel(sb_base, ctxld->ctxld_reg + DCSS_CTXLD_SB_BASE_ADDR);
  236. dcss_writel(sb_count, ctxld->ctxld_reg + DCSS_CTXLD_SB_COUNT);
  237. /* enable the context loader */
  238. dcss_set(CTXLD_ENABLE, ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
  239. ctxld->in_use = true;
  240. /*
  241. * Toggle the current context to the alternate one so that any updates
  242. * in the modules' settings take place there.
  243. */
  244. ctxld->current_ctx ^= 1;
  245. ctxld->ctx_size[ctxld->current_ctx][CTX_DB] = 0;
  246. ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] = 0;
  247. ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] = 0;
  248. return 0;
  249. }
  250. int dcss_ctxld_enable(struct dcss_ctxld *ctxld)
  251. {
  252. spin_lock_irq(&ctxld->lock);
  253. ctxld->armed = true;
  254. spin_unlock_irq(&ctxld->lock);
  255. return 0;
  256. }
  257. void dcss_ctxld_kick(struct dcss_ctxld *ctxld)
  258. {
  259. unsigned long flags;
  260. spin_lock_irqsave(&ctxld->lock, flags);
  261. if (ctxld->armed && !ctxld->in_use) {
  262. ctxld->armed = false;
  263. dcss_ctxld_enable_locked(ctxld);
  264. }
  265. spin_unlock_irqrestore(&ctxld->lock, flags);
  266. }
  267. void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctxld, u32 ctx_id, u32 val,
  268. u32 reg_ofs)
  269. {
  270. int curr_ctx = ctxld->current_ctx;
  271. struct dcss_ctxld_item *ctx[] = {
  272. [CTX_DB] = ctxld->db[curr_ctx],
  273. [CTX_SB_HP] = ctxld->sb_hp[curr_ctx],
  274. [CTX_SB_LP] = ctxld->sb_lp[curr_ctx]
  275. };
  276. int item_idx = ctxld->ctx_size[curr_ctx][ctx_id];
  277. if (item_idx + 1 > dcss_ctxld_ctx_size[ctx_id]) {
  278. WARN_ON(1);
  279. return;
  280. }
  281. ctx[ctx_id][item_idx].val = val;
  282. ctx[ctx_id][item_idx].ofs = reg_ofs;
  283. ctxld->ctx_size[curr_ctx][ctx_id] += 1;
  284. }
  285. void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id,
  286. u32 val, u32 reg_ofs)
  287. {
  288. spin_lock_irq(&ctxld->lock);
  289. dcss_ctxld_write_irqsafe(ctxld, ctx_id, val, reg_ofs);
  290. spin_unlock_irq(&ctxld->lock);
  291. }
  292. bool dcss_ctxld_is_flushed(struct dcss_ctxld *ctxld)
  293. {
  294. return ctxld->ctx_size[ctxld->current_ctx][CTX_DB] == 0 &&
  295. ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] == 0 &&
  296. ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] == 0;
  297. }
  298. int dcss_ctxld_resume(struct dcss_ctxld *ctxld)
  299. {
  300. dcss_ctxld_hw_cfg(ctxld);
  301. if (!ctxld->irq_en) {
  302. enable_irq(ctxld->irq);
  303. ctxld->irq_en = true;
  304. }
  305. return 0;
  306. }
  307. int dcss_ctxld_suspend(struct dcss_ctxld *ctxld)
  308. {
  309. int ret = 0;
  310. unsigned long timeout = jiffies + msecs_to_jiffies(500);
  311. if (!dcss_ctxld_is_flushed(ctxld)) {
  312. dcss_ctxld_kick(ctxld);
  313. while (!time_after(jiffies, timeout) && ctxld->in_use)
  314. msleep(20);
  315. if (time_after(jiffies, timeout))
  316. return -ETIMEDOUT;
  317. }
  318. spin_lock_irq(&ctxld->lock);
  319. if (ctxld->irq_en) {
  320. disable_irq_nosync(ctxld->irq);
  321. ctxld->irq_en = false;
  322. }
  323. /* reset context region and sizes */
  324. ctxld->current_ctx = 0;
  325. ctxld->ctx_size[0][CTX_DB] = 0;
  326. ctxld->ctx_size[0][CTX_SB_HP] = 0;
  327. ctxld->ctx_size[0][CTX_SB_LP] = 0;
  328. spin_unlock_irq(&ctxld->lock);
  329. return ret;
  330. }
  331. void dcss_ctxld_assert_locked(struct dcss_ctxld *ctxld)
  332. {
  333. lockdep_assert_held(&ctxld->lock);
  334. }