sde_hw_reg_dma_v1.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #include <linux/iopoll.h>
  7. #include "sde_hw_mdss.h"
  8. #include "sde_hw_ctl.h"
  9. #include "sde_hw_reg_dma_v1.h"
  10. #include "msm_drv.h"
  11. #include "msm_mmu.h"
  12. #include "sde_dbg.h"
  13. #include "sde_vbif.h"
  14. #define GUARD_BYTES (BIT(8) - 1)
  15. #define ALIGNED_OFFSET (U32_MAX & ~(GUARD_BYTES))
  16. #define ADDR_ALIGN BIT(8)
  17. #define MAX_RELATIVE_OFF (BIT(21) - 1)
  18. #define ABSOLUTE_RANGE BIT(27)
  19. #define DECODE_SEL_OP (BIT(HW_BLK_SELECT))
  20. #define REG_WRITE_OP ((BIT(REG_SINGLE_WRITE)) | (BIT(REG_BLK_WRITE_SINGLE)) | \
  21. (BIT(REG_BLK_WRITE_INC)) | (BIT(REG_BLK_WRITE_MULTIPLE)) | \
  22. (BIT(REG_SINGLE_MODIFY)) | (BIT(REG_BLK_LUT_WRITE)))
  23. #define REG_DMA_OPS (DECODE_SEL_OP | REG_WRITE_OP)
  24. #define IS_OP_ALLOWED(op, buf_op) (BIT(op) & buf_op)
  25. #define SET_UP_REG_DMA_REG(hw, reg_dma, i) \
  26. do { \
  27. if ((reg_dma)->caps->reg_dma_blks[(i)].valid == false) \
  28. break; \
  29. (hw).base_off = (reg_dma)->addr; \
  30. (hw).blk_off = (reg_dma)->caps->reg_dma_blks[(i)].base; \
  31. (hw).hw_rev = (reg_dma)->caps->version; \
  32. (hw).log_mask = SDE_DBG_MASK_REGDMA; \
  33. } while (0)
  34. #define SIZE_DWORD(x) ((x) / (sizeof(u32)))
  35. #define NOT_WORD_ALIGNED(x) ((x) & 0x3)
  36. #define GRP_VIG_HW_BLK_SELECT (VIG0 | VIG1 | VIG2 | VIG3)
  37. #define GRP_DMA_HW_BLK_SELECT (DMA0 | DMA1 | DMA2 | DMA3 | DMA4 | DMA5)
  38. #define GRP_DSPP_HW_BLK_SELECT (DSPP0 | DSPP1 | DSPP2 | DSPP3)
  39. #define GRP_LTM_HW_BLK_SELECT (LTM0 | LTM1 | LTM2 | LTM3)
  40. #define GRP_MDSS_HW_BLK_SELECT (MDSS)
  41. #define BUFFER_SPACE_LEFT(cfg) ((cfg)->dma_buf->buffer_size - \
  42. (cfg)->dma_buf->index)
  43. #define REL_ADDR_OPCODE (BIT(27))
  44. #define NO_OP_OPCODE (0)
  45. #define SINGLE_REG_WRITE_OPCODE (BIT(28))
  46. #define SINGLE_REG_MODIFY_OPCODE (BIT(29))
  47. #define HW_INDEX_REG_WRITE_OPCODE (BIT(28) | BIT(29))
  48. #define AUTO_INC_REG_WRITE_OPCODE (BIT(30))
  49. #define BLK_REG_WRITE_OPCODE (BIT(30) | BIT(28))
  50. #define LUTBUS_WRITE_OPCODE (BIT(30) | BIT(29))
  51. #define WRAP_MIN_SIZE 2
  52. #define WRAP_MAX_SIZE (BIT(4) - 1)
  53. #define MAX_DWORDS_SZ (BIT(14) - 1)
  54. #define REG_DMA_HEADERS_BUFFER_SZ (sizeof(u32) * 128)
  55. #define LUTBUS_TABLE_SEL_MASK 0x10000
  56. #define LUTBUS_BLOCK_SEL_MASK 0xffff
  57. #define LUTBUS_TRANS_SZ_MASK 0xff0000
  58. #define LUTBUS_LUT_SIZE_MASK 0x3fff
  59. #define PMU_CLK_CTRL 0x1F0
  60. static uint32_t reg_dma_register_count;
  61. static uint32_t reg_dma_decode_sel;
  62. static uint32_t reg_dma_opmode_offset;
  63. static uint32_t reg_dma_ctl0_queue0_cmd0_offset;
  64. static uint32_t reg_dma_ctl0_queue1_cmd0_offset;
  65. static uint32_t reg_dma_intr_status_offset;
  66. static uint32_t reg_dma_intr_4_status_offset;
  67. static uint32_t reg_dma_intr_clear_offset;
  68. static uint32_t reg_dma_ctl_trigger_offset;
  69. static uint32_t reg_dma_ctl0_reset_offset;
  70. static uint32_t reg_dma_error_clear_mask;
  71. static uint32_t reg_dma_ctl_queue_off[CTL_MAX];
  72. static uint32_t reg_dma_ctl_queue1_off[CTL_MAX];
  73. typedef int (*reg_dma_internal_ops) (struct sde_reg_dma_setup_ops_cfg *cfg);
  74. static struct sde_hw_reg_dma *reg_dma;
  75. static u32 ops_mem_size[REG_DMA_SETUP_OPS_MAX] = {
  76. [REG_BLK_WRITE_SINGLE] = sizeof(u32) * 2,
  77. [REG_BLK_WRITE_INC] = sizeof(u32) * 2,
  78. [REG_BLK_WRITE_MULTIPLE] = sizeof(u32) * 2,
  79. [HW_BLK_SELECT] = sizeof(u32) * 2,
  80. [REG_SINGLE_WRITE] = sizeof(u32) * 2,
  81. [REG_SINGLE_MODIFY] = sizeof(u32) * 3,
  82. [REG_BLK_LUT_WRITE] = sizeof(u32) * 2,
  83. };
  84. static u32 queue_sel[DMA_CTL_QUEUE_MAX] = {
  85. [DMA_CTL_QUEUE0] = BIT(0),
  86. [DMA_CTL_QUEUE1] = BIT(4),
  87. };
  88. static u32 dspp_read_sel[DSPP_HIST_MAX] = {
  89. [DSPP0_HIST] = 0,
  90. [DSPP1_HIST] = 1,
  91. [DSPP2_HIST] = 2,
  92. [DSPP3_HIST] = 3,
  93. };
  94. static u32 v1_supported[REG_DMA_FEATURES_MAX] = {
  95. [GAMUT] = GRP_VIG_HW_BLK_SELECT | GRP_DSPP_HW_BLK_SELECT,
  96. [VLUT] = GRP_DSPP_HW_BLK_SELECT,
  97. [GC] = GRP_DSPP_HW_BLK_SELECT,
  98. [IGC] = DSPP_IGC | GRP_DSPP_HW_BLK_SELECT,
  99. [PCC] = GRP_DSPP_HW_BLK_SELECT,
  100. };
  101. static u32 ctl_trigger_done_mask[CTL_MAX][DMA_CTL_QUEUE_MAX] = {
  102. [CTL_0][0] = BIT(16),
  103. [CTL_0][1] = BIT(21),
  104. [CTL_1][0] = BIT(17),
  105. [CTL_1][1] = BIT(22),
  106. [CTL_2][0] = BIT(18),
  107. [CTL_2][1] = BIT(23),
  108. [CTL_3][0] = BIT(19),
  109. [CTL_3][1] = BIT(24),
  110. [CTL_4][0] = BIT(25),
  111. [CTL_4][1] = BIT(27),
  112. [CTL_5][0] = BIT(26),
  113. [CTL_5][1] = BIT(28),
  114. };
  115. static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg);
  116. static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
  117. static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  118. static int validate_blk_lut_write(struct sde_reg_dma_setup_ops_cfg *cfg);
  119. static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  120. static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
  121. static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
  122. static int write_single_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  123. static int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg);
  124. static int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg);
  125. static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  126. static int write_single_modify(struct sde_reg_dma_setup_ops_cfg *cfg);
  127. static int write_block_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  128. static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
  129. static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf);
  130. static int check_support_v1(enum sde_reg_dma_features feature,
  131. enum sde_reg_dma_blk blk, bool *is_supported);
  132. static int setup_payload_v1(struct sde_reg_dma_setup_ops_cfg *cfg);
  133. static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg);
  134. static int reset_v1(struct sde_hw_ctl *ctl);
  135. static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  136. enum sde_reg_dma_last_cmd_mode mode);
  137. static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size);
  138. static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *lut_buf);
  139. static void dump_regs_v1(void);
  140. static int last_cmd_sb_v2(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  141. enum sde_reg_dma_last_cmd_mode mode);
  142. static reg_dma_internal_ops write_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
  143. [HW_BLK_SELECT] = write_decode_sel,
  144. [REG_SINGLE_WRITE] = write_single_reg,
  145. [REG_BLK_WRITE_SINGLE] = write_multi_reg_inc,
  146. [REG_BLK_WRITE_INC] = write_multi_reg_index,
  147. [REG_BLK_WRITE_MULTIPLE] = write_multi_lut_reg,
  148. [REG_SINGLE_MODIFY] = write_single_modify,
  149. [REG_BLK_LUT_WRITE] = write_block_lut_reg,
  150. };
  151. static reg_dma_internal_ops validate_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
  152. [HW_BLK_SELECT] = validate_write_decode_sel,
  153. [REG_SINGLE_WRITE] = validate_write_reg,
  154. [REG_BLK_WRITE_SINGLE] = validate_write_reg,
  155. [REG_BLK_WRITE_INC] = validate_write_reg,
  156. [REG_BLK_WRITE_MULTIPLE] = validate_write_multi_lut_reg,
  157. [REG_SINGLE_MODIFY] = validate_write_reg,
  158. [REG_BLK_LUT_WRITE] = validate_blk_lut_write,
  159. };
  160. static struct sde_reg_dma_buffer *last_cmd_buf_db[CTL_MAX];
  161. static struct sde_reg_dma_buffer *last_cmd_buf_sb[CTL_MAX];
  162. static void get_decode_sel(unsigned long blk, u32 *decode_sel)
  163. {
  164. int i = 0;
  165. *decode_sel = 0;
  166. for_each_set_bit(i, &blk, REG_DMA_BLK_MAX) {
  167. switch (BIT(i)) {
  168. case VIG0:
  169. *decode_sel |= BIT(0);
  170. break;
  171. case VIG1:
  172. *decode_sel |= BIT(1);
  173. break;
  174. case VIG2:
  175. *decode_sel |= BIT(2);
  176. break;
  177. case VIG3:
  178. *decode_sel |= BIT(3);
  179. break;
  180. case DMA0:
  181. *decode_sel |= BIT(5);
  182. break;
  183. case DMA1:
  184. *decode_sel |= BIT(6);
  185. break;
  186. case DMA2:
  187. *decode_sel |= BIT(7);
  188. break;
  189. case DMA3:
  190. *decode_sel |= BIT(8);
  191. break;
  192. case DMA4:
  193. *decode_sel |= BIT(9);
  194. break;
  195. case DMA5:
  196. *decode_sel |= BIT(10);
  197. break;
  198. case DSPP0:
  199. *decode_sel |= BIT(17);
  200. break;
  201. case DSPP1:
  202. *decode_sel |= BIT(18);
  203. break;
  204. case DSPP2:
  205. *decode_sel |= BIT(19);
  206. break;
  207. case DSPP3:
  208. *decode_sel |= BIT(20);
  209. break;
  210. case SSPP_IGC:
  211. *decode_sel |= BIT(4);
  212. break;
  213. case DSPP_IGC:
  214. *decode_sel |= BIT(21);
  215. break;
  216. case LTM0:
  217. *decode_sel |= BIT(22);
  218. break;
  219. case LTM1:
  220. *decode_sel |= BIT(23);
  221. break;
  222. case LTM2:
  223. *decode_sel |= BIT(24);
  224. break;
  225. case LTM3:
  226. *decode_sel |= BIT(25);
  227. break;
  228. case MDSS:
  229. *decode_sel |= BIT(31);
  230. break;
  231. default:
  232. DRM_ERROR("block not supported %zx\n", (size_t)BIT(i));
  233. break;
  234. }
  235. }
  236. }
  237. static int write_multi_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  238. {
  239. u8 *loc = NULL;
  240. loc = (u8 *)cfg->dma_buf->vaddr + cfg->dma_buf->index;
  241. memcpy(loc, cfg->data, cfg->data_size);
  242. cfg->dma_buf->index += cfg->data_size;
  243. cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
  244. cfg->dma_buf->ops_completed |= REG_WRITE_OP;
  245. if (cfg->blk == MDSS)
  246. cfg->dma_buf->abs_write_cnt += SIZE_DWORD(cfg->data_size);
  247. return 0;
  248. }
  249. int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg)
  250. {
  251. u32 *loc = NULL;
  252. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  253. cfg->dma_buf->index);
  254. loc[0] = HW_INDEX_REG_WRITE_OPCODE;
  255. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  256. if (cfg->blk == MDSS)
  257. loc[0] |= ABSOLUTE_RANGE;
  258. loc[1] = SIZE_DWORD(cfg->data_size);
  259. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  260. return write_multi_reg(cfg);
  261. }
  262. int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg)
  263. {
  264. u32 *loc = NULL;
  265. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  266. cfg->dma_buf->index);
  267. loc[0] = AUTO_INC_REG_WRITE_OPCODE;
  268. if (cfg->blk == MDSS)
  269. loc[0] |= ABSOLUTE_RANGE;
  270. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  271. loc[1] = SIZE_DWORD(cfg->data_size);
  272. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  273. return write_multi_reg(cfg);
  274. }
  275. static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  276. {
  277. u32 *loc = NULL;
  278. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  279. cfg->dma_buf->index);
  280. loc[0] = BLK_REG_WRITE_OPCODE;
  281. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  282. if (cfg->blk == MDSS)
  283. loc[0] |= ABSOLUTE_RANGE;
  284. loc[1] = (cfg->inc) ? 0 : BIT(31);
  285. loc[1] |= (cfg->wrap_size & WRAP_MAX_SIZE) << 16;
  286. loc[1] |= ((SIZE_DWORD(cfg->data_size)) & MAX_DWORDS_SZ);
  287. cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
  288. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  289. return write_multi_reg(cfg);
  290. }
  291. static int write_single_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  292. {
  293. u32 *loc = NULL;
  294. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  295. cfg->dma_buf->index);
  296. loc[0] = SINGLE_REG_WRITE_OPCODE;
  297. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  298. if (cfg->blk == MDSS) {
  299. loc[0] |= ABSOLUTE_RANGE;
  300. cfg->dma_buf->abs_write_cnt++;
  301. }
  302. loc[1] = *cfg->data;
  303. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  304. cfg->dma_buf->ops_completed |= REG_WRITE_OP;
  305. cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
  306. return 0;
  307. }
  308. static int write_single_modify(struct sde_reg_dma_setup_ops_cfg *cfg)
  309. {
  310. u32 *loc = NULL;
  311. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  312. cfg->dma_buf->index);
  313. loc[0] = SINGLE_REG_MODIFY_OPCODE;
  314. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  315. if (cfg->blk == MDSS)
  316. loc[0] |= ABSOLUTE_RANGE;
  317. loc[1] = cfg->mask;
  318. loc[2] = *cfg->data;
  319. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  320. cfg->dma_buf->ops_completed |= REG_WRITE_OP;
  321. cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
  322. return 0;
  323. }
  324. static int write_block_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  325. {
  326. u32 *loc = NULL;
  327. int rc = -EINVAL;
  328. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  329. cfg->dma_buf->index);
  330. loc[0] = LUTBUS_WRITE_OPCODE;
  331. loc[0] |= (cfg->table_sel << 16) & LUTBUS_TABLE_SEL_MASK;
  332. loc[0] |= (cfg->block_sel & LUTBUS_BLOCK_SEL_MASK);
  333. loc[1] = (cfg->trans_size << 16) & LUTBUS_TRANS_SZ_MASK;
  334. loc[1] |= (cfg->lut_size & LUTBUS_LUT_SIZE_MASK);
  335. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  336. rc = write_multi_reg(cfg);
  337. if (rc)
  338. return rc;
  339. /* adding 3 NO OPs as SW workaround for REG_BLK_LUT_WRITE
  340. * HW limitation that requires the residual data plus the
  341. * following opcode to exceed 4 DWORDs length.
  342. */
  343. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  344. cfg->dma_buf->index);
  345. loc[0] = NO_OP_OPCODE;
  346. loc[1] = NO_OP_OPCODE;
  347. loc[2] = NO_OP_OPCODE;
  348. cfg->dma_buf->index += sizeof(u32) * 3;
  349. return 0;
  350. }
  351. static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg)
  352. {
  353. u32 *loc = NULL;
  354. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  355. cfg->dma_buf->index);
  356. loc[0] = reg_dma_decode_sel;
  357. get_decode_sel(cfg->blk, &loc[1]);
  358. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  359. cfg->dma_buf->ops_completed |= DECODE_SEL_OP;
  360. cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
  361. return 0;
  362. }
  363. static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  364. {
  365. int rc;
  366. rc = validate_write_reg(cfg);
  367. if (rc)
  368. return rc;
  369. if (cfg->wrap_size < WRAP_MIN_SIZE || cfg->wrap_size > WRAP_MAX_SIZE) {
  370. DRM_ERROR("invalid wrap sz %d min %d max %zd\n",
  371. cfg->wrap_size, WRAP_MIN_SIZE, (size_t)WRAP_MAX_SIZE);
  372. rc = -EINVAL;
  373. }
  374. return rc;
  375. }
  376. static int validate_blk_lut_write(struct sde_reg_dma_setup_ops_cfg *cfg)
  377. {
  378. int rc;
  379. rc = validate_write_reg(cfg);
  380. if (rc)
  381. return rc;
  382. if (cfg->table_sel >= LUTBUS_TABLE_SELECT_MAX ||
  383. cfg->block_sel >= LUTBUS_BLOCK_MAX ||
  384. (cfg->trans_size != LUTBUS_IGC_TRANS_SIZE &&
  385. cfg->trans_size != LUTBUS_GAMUT_TRANS_SIZE &&
  386. cfg->trans_size != LUTBUS_SIXZONE_TRANS_SIZE)) {
  387. DRM_ERROR("invalid table_sel %d block_sel %d trans_size %d\n",
  388. cfg->table_sel, cfg->block_sel,
  389. cfg->trans_size);
  390. rc = -EINVAL;
  391. }
  392. return rc;
  393. }
  394. static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  395. {
  396. u32 remain_len, write_len;
  397. remain_len = BUFFER_SPACE_LEFT(cfg);
  398. write_len = ops_mem_size[cfg->ops] + cfg->data_size;
  399. if (remain_len < write_len) {
  400. DRM_ERROR("buffer is full sz %d needs %d bytes\n",
  401. remain_len, write_len);
  402. return -EINVAL;
  403. }
  404. if (!cfg->data) {
  405. DRM_ERROR("invalid data %pK size %d exp sz %d\n", cfg->data,
  406. cfg->data_size, write_len);
  407. return -EINVAL;
  408. }
  409. if ((SIZE_DWORD(cfg->data_size)) > MAX_DWORDS_SZ ||
  410. NOT_WORD_ALIGNED(cfg->data_size)) {
  411. DRM_ERROR("Invalid data size %d max %zd align %x\n",
  412. cfg->data_size, (size_t)MAX_DWORDS_SZ,
  413. NOT_WORD_ALIGNED(cfg->data_size));
  414. return -EINVAL;
  415. }
  416. if (cfg->blk_offset > MAX_RELATIVE_OFF ||
  417. NOT_WORD_ALIGNED(cfg->blk_offset)) {
  418. DRM_ERROR("invalid offset %d max %zd align %x\n",
  419. cfg->blk_offset, (size_t)MAX_RELATIVE_OFF,
  420. NOT_WORD_ALIGNED(cfg->blk_offset));
  421. return -EINVAL;
  422. }
  423. return 0;
  424. }
  425. static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg)
  426. {
  427. u32 remain_len;
  428. bool vig_blk, dma_blk, dspp_blk, mdss_blk;
  429. remain_len = BUFFER_SPACE_LEFT(cfg);
  430. if (remain_len < ops_mem_size[HW_BLK_SELECT]) {
  431. DRM_ERROR("buffer is full needs %d bytes\n",
  432. ops_mem_size[HW_BLK_SELECT]);
  433. return -EINVAL;
  434. }
  435. if (!cfg->blk) {
  436. DRM_ERROR("blk set as 0\n");
  437. return -EINVAL;
  438. }
  439. vig_blk = (cfg->blk & GRP_VIG_HW_BLK_SELECT) ? true : false;
  440. dma_blk = (cfg->blk & GRP_DMA_HW_BLK_SELECT) ? true : false;
  441. dspp_blk = (cfg->blk & GRP_DSPP_HW_BLK_SELECT) ? true : false;
  442. mdss_blk = (cfg->blk & MDSS) ? true : false;
  443. if ((vig_blk && dspp_blk) || (dma_blk && dspp_blk) ||
  444. (vig_blk && dma_blk) ||
  445. (mdss_blk && (vig_blk | dma_blk | dspp_blk))) {
  446. DRM_ERROR("invalid blk combination %x\n", cfg->blk);
  447. return -EINVAL;
  448. }
  449. return 0;
  450. }
  451. static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg)
  452. {
  453. int rc = 0;
  454. bool supported;
  455. if (!cfg || cfg->ops >= REG_DMA_SETUP_OPS_MAX || !cfg->dma_buf) {
  456. DRM_ERROR("invalid param cfg %pK ops %d dma_buf %pK\n",
  457. cfg, ((cfg) ? cfg->ops : REG_DMA_SETUP_OPS_MAX),
  458. ((cfg) ? cfg->dma_buf : NULL));
  459. return -EINVAL;
  460. }
  461. rc = check_support_v1(cfg->feature, cfg->blk, &supported);
  462. if (rc || !supported) {
  463. DRM_ERROR("check support failed rc %d supported %d\n",
  464. rc, supported);
  465. rc = -EINVAL;
  466. return rc;
  467. }
  468. if (cfg->dma_buf->index >= cfg->dma_buf->buffer_size ||
  469. NOT_WORD_ALIGNED(cfg->dma_buf->index)) {
  470. DRM_ERROR("Buf Overflow index %d max size %d align %x\n",
  471. cfg->dma_buf->index, cfg->dma_buf->buffer_size,
  472. NOT_WORD_ALIGNED(cfg->dma_buf->index));
  473. return -EINVAL;
  474. }
  475. if (cfg->dma_buf->iova & GUARD_BYTES || !cfg->dma_buf->vaddr) {
  476. DRM_ERROR("iova not aligned to %zx iova %llx kva %pK",
  477. (size_t)ADDR_ALIGN, cfg->dma_buf->iova,
  478. cfg->dma_buf->vaddr);
  479. return -EINVAL;
  480. }
  481. if (!IS_OP_ALLOWED(cfg->ops, cfg->dma_buf->next_op_allowed)) {
  482. DRM_ERROR("invalid op %x allowed %x\n", cfg->ops,
  483. cfg->dma_buf->next_op_allowed);
  484. return -EINVAL;
  485. }
  486. if (!validate_dma_op_params[cfg->ops] ||
  487. !write_dma_op_params[cfg->ops]) {
  488. DRM_ERROR("invalid op %d validate %pK write %pK\n", cfg->ops,
  489. validate_dma_op_params[cfg->ops],
  490. write_dma_op_params[cfg->ops]);
  491. return -EINVAL;
  492. }
  493. return rc;
  494. }
  495. static int validate_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
  496. {
  497. if (!cfg || !cfg->ctl || !cfg->dma_buf ||
  498. cfg->dma_type >= REG_DMA_TYPE_MAX) {
  499. DRM_ERROR("invalid cfg %pK ctl %pK dma_buf %pK dma type %d\n",
  500. cfg, ((!cfg) ? NULL : cfg->ctl),
  501. ((!cfg) ? NULL : cfg->dma_buf),
  502. ((!cfg) ? 0 : cfg->dma_type));
  503. return -EINVAL;
  504. }
  505. if (reg_dma->caps->reg_dma_blks[cfg->dma_type].valid == false) {
  506. DRM_DEBUG("REG dma type %d is not supported\n", cfg->dma_type);
  507. return -EOPNOTSUPP;
  508. }
  509. if (cfg->ctl->idx < CTL_0 || cfg->ctl->idx >= CTL_MAX) {
  510. DRM_ERROR("invalid ctl idx %d\n", cfg->ctl->idx);
  511. return -EINVAL;
  512. }
  513. if (cfg->op >= REG_DMA_OP_MAX) {
  514. DRM_ERROR("invalid op %d\n", cfg->op);
  515. return -EINVAL;
  516. }
  517. if ((cfg->op == REG_DMA_WRITE) &&
  518. (!(cfg->dma_buf->ops_completed & DECODE_SEL_OP) ||
  519. !(cfg->dma_buf->ops_completed & REG_WRITE_OP))) {
  520. DRM_ERROR("incomplete write ops %x\n",
  521. cfg->dma_buf->ops_completed);
  522. return -EINVAL;
  523. }
  524. if (cfg->op == REG_DMA_READ && cfg->block_select >= DSPP_HIST_MAX) {
  525. DRM_ERROR("invalid block for read %d\n", cfg->block_select);
  526. return -EINVAL;
  527. }
  528. /* Only immediate triggers are supported now hence hardcode */
  529. cfg->trigger_mode = (cfg->op == REG_DMA_READ) ? (READ_TRIGGER) :
  530. (WRITE_TRIGGER);
  531. if (cfg->dma_buf->iova & GUARD_BYTES) {
  532. DRM_ERROR("Address is not aligned to %zx iova %llx",
  533. (size_t)ADDR_ALIGN, cfg->dma_buf->iova);
  534. return -EINVAL;
  535. }
  536. if (cfg->queue_select >= DMA_CTL_QUEUE_MAX) {
  537. DRM_ERROR("invalid queue selected %d\n", cfg->queue_select);
  538. return -EINVAL;
  539. }
  540. if (SIZE_DWORD(cfg->dma_buf->index) > MAX_DWORDS_SZ ||
  541. !cfg->dma_buf->index) {
  542. DRM_ERROR("invalid dword size %zd max %zd\n",
  543. (size_t)SIZE_DWORD(cfg->dma_buf->index),
  544. (size_t)MAX_DWORDS_SZ);
  545. return -EINVAL;
  546. }
  547. if (cfg->dma_type == REG_DMA_TYPE_SB &&
  548. (cfg->queue_select != DMA_CTL_QUEUE1 ||
  549. cfg->op == REG_DMA_READ)) {
  550. DRM_ERROR("invalid queue selected %d or op %d for SB LUTDMA\n",
  551. cfg->queue_select, cfg->op);
  552. return -EINVAL;
  553. }
  554. if ((cfg->dma_buf->abs_write_cnt % 2) != 0) {
  555. /* Touch up buffer to avoid HW issues with odd number of abs writes */
  556. u32 reg = 0;
  557. struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
  558. dma_write_cfg.dma_buf = cfg->dma_buf;
  559. dma_write_cfg.blk = MDSS;
  560. dma_write_cfg.feature = REG_DMA_FEATURES_MAX;
  561. dma_write_cfg.ops = HW_BLK_SELECT;
  562. if (validate_write_decode_sel(&dma_write_cfg) || write_decode_sel(&dma_write_cfg)) {
  563. DRM_ERROR("Failed setting MDSS decode select for LUTDMA touch up\n");
  564. return -EINVAL;
  565. }
  566. /* Perform dummy write on LUTDMA RO version reg */
  567. dma_write_cfg.ops = REG_SINGLE_WRITE;
  568. dma_write_cfg.blk_offset = reg_dma->caps->base_off +
  569. reg_dma->caps->reg_dma_blks[cfg->dma_type].base;
  570. dma_write_cfg.data = &reg;
  571. dma_write_cfg.data_size = sizeof(uint32_t);
  572. if (validate_write_reg(&dma_write_cfg) || write_single_reg(&dma_write_cfg)) {
  573. DRM_ERROR("Failed to add touch up write to LUTDMA buffer\n");
  574. return -EINVAL;
  575. }
  576. }
  577. return 0;
  578. }
  579. static int write_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
  580. {
  581. u32 cmd1, mask = 0, val = 0;
  582. struct sde_hw_blk_reg_map hw;
  583. memset(&hw, 0, sizeof(hw));
  584. msm_gem_sync(cfg->dma_buf->buf);
  585. cmd1 = (cfg->op == REG_DMA_READ) ?
  586. (dspp_read_sel[cfg->block_select] << 30) : 0;
  587. cmd1 |= (cfg->last_command) ? BIT(24) : 0;
  588. cmd1 |= (cfg->op == REG_DMA_READ) ? (2 << 22) : 0;
  589. cmd1 |= (cfg->op == REG_DMA_WRITE) ? (BIT(22)) : 0;
  590. cmd1 |= (SIZE_DWORD(cfg->dma_buf->index) & MAX_DWORDS_SZ);
  591. if (cfg->dma_type == REG_DMA_TYPE_DB)
  592. SET_UP_REG_DMA_REG(hw, reg_dma, REG_DMA_TYPE_DB);
  593. else if (cfg->dma_type == REG_DMA_TYPE_SB)
  594. SET_UP_REG_DMA_REG(hw, reg_dma, REG_DMA_TYPE_SB);
  595. if (hw.hw_rev == 0) {
  596. DRM_ERROR("DMA type %d is unsupported\n", cfg->dma_type);
  597. return -EOPNOTSUPP;
  598. }
  599. SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
  600. val = SDE_REG_READ(&hw, reg_dma_intr_4_status_offset);
  601. if (val) {
  602. DRM_DEBUG("LUT dma status %x\n", val);
  603. mask = reg_dma_error_clear_mask;
  604. SDE_REG_WRITE(&hw, reg_dma_intr_clear_offset + sizeof(u32) * 4,
  605. mask);
  606. SDE_EVT32(val);
  607. }
  608. if (cfg->dma_type == REG_DMA_TYPE_DB) {
  609. SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx],
  610. cfg->dma_buf->iova);
  611. SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx] + 0x4,
  612. cmd1);
  613. } else if (cfg->dma_type == REG_DMA_TYPE_SB) {
  614. SDE_REG_WRITE(&hw, reg_dma_ctl_queue1_off[cfg->ctl->idx],
  615. cfg->dma_buf->iova);
  616. SDE_REG_WRITE(&hw, reg_dma_ctl_queue1_off[cfg->ctl->idx] + 0x4,
  617. cmd1);
  618. }
  619. if (cfg->last_command) {
  620. mask = ctl_trigger_done_mask[cfg->ctl->idx][cfg->queue_select];
  621. SDE_REG_WRITE(&hw, reg_dma_intr_clear_offset, mask);
  622. /* DB LUTDMA use SW trigger while SB LUTDMA uses DSPP_SB
  623. * flush as its trigger event.
  624. */
  625. if (cfg->dma_type == REG_DMA_TYPE_DB) {
  626. SDE_REG_WRITE(&cfg->ctl->hw, reg_dma_ctl_trigger_offset,
  627. queue_sel[cfg->queue_select]);
  628. }
  629. }
  630. SDE_EVT32(cfg->feature, cfg->dma_type,
  631. ((uint64_t)cfg->dma_buf) >> 32,
  632. ((uint64_t)cfg->dma_buf) & 0xFFFFFFFF,
  633. (cfg->dma_buf->iova) >> 32,
  634. (cfg->dma_buf->iova) & 0xFFFFFFFF,
  635. cfg->op,
  636. cfg->queue_select, cfg->ctl->idx,
  637. SIZE_DWORD(cfg->dma_buf->index));
  638. return 0;
  639. }
  640. static bool setup_clk_force_ctrl(struct sde_hw_blk_reg_map *hw,
  641. enum sde_clk_ctrl_type clk_ctrl, bool enable)
  642. {
  643. u32 reg_val, new_val;
  644. if (!hw)
  645. return false;
  646. if (!SDE_CLK_CTRL_LUTDMA_VALID(clk_ctrl))
  647. return false;
  648. reg_val = SDE_REG_READ(hw, PMU_CLK_CTRL);
  649. if (enable)
  650. new_val = reg_val | (BIT(0) | BIT(16));
  651. else
  652. new_val = reg_val & ~(BIT(0) | BIT(16));
  653. SDE_REG_WRITE(hw, PMU_CLK_CTRL, new_val);
  654. wmb(); /* ensure write finished before progressing */
  655. return !(reg_val & (BIT(0) | BIT(16)));
  656. }
  657. int init_v1(struct sde_hw_reg_dma *cfg)
  658. {
  659. int i = 0, rc = 0;
  660. if (!cfg)
  661. return -EINVAL;
  662. reg_dma = cfg;
  663. for (i = CTL_0; i < CTL_MAX; i++) {
  664. if (!last_cmd_buf_db[i]) {
  665. last_cmd_buf_db[i] =
  666. alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
  667. if (IS_ERR_OR_NULL(last_cmd_buf_db[i])) {
  668. /*
  669. * This will allow reg dma to fall back to
  670. * AHB domain
  671. */
  672. pr_info("Failed to allocate reg dma, ret:%lu\n",
  673. PTR_ERR(last_cmd_buf_db[i]));
  674. return 0;
  675. }
  676. }
  677. if (!last_cmd_buf_sb[i]) {
  678. last_cmd_buf_sb[i] =
  679. alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
  680. if (IS_ERR_OR_NULL(last_cmd_buf_sb[i])) {
  681. /*
  682. * This will allow reg dma to fall back to
  683. * AHB domain
  684. */
  685. pr_info("Failed to allocate reg dma, ret:%lu\n",
  686. PTR_ERR(last_cmd_buf_sb[i]));
  687. return 0;
  688. }
  689. }
  690. }
  691. if (rc) {
  692. for (i = 0; i < CTL_MAX; i++) {
  693. if (!last_cmd_buf_db[i])
  694. continue;
  695. dealloc_reg_dma_v1(last_cmd_buf_db[i]);
  696. last_cmd_buf_db[i] = NULL;
  697. }
  698. for (i = 0; i < CTL_MAX; i++) {
  699. if (!last_cmd_buf_sb[i])
  700. continue;
  701. dealloc_reg_dma_v1(last_cmd_buf_sb[i]);
  702. last_cmd_buf_sb[i] = NULL;
  703. }
  704. return rc;
  705. }
  706. reg_dma->ops.check_support = check_support_v1;
  707. reg_dma->ops.setup_payload = setup_payload_v1;
  708. reg_dma->ops.kick_off = kick_off_v1;
  709. reg_dma->ops.reset = reset_v1;
  710. reg_dma->ops.alloc_reg_dma_buf = alloc_reg_dma_buf_v1;
  711. reg_dma->ops.dealloc_reg_dma = dealloc_reg_dma_v1;
  712. reg_dma->ops.reset_reg_dma_buf = reset_reg_dma_buffer_v1;
  713. reg_dma->ops.last_command = last_cmd_v1;
  714. reg_dma->ops.dump_regs = dump_regs_v1;
  715. reg_dma_register_count = 60;
  716. reg_dma_decode_sel = 0x180ac060;
  717. reg_dma_opmode_offset = 0x4;
  718. reg_dma_ctl0_queue0_cmd0_offset = 0x14;
  719. reg_dma_intr_status_offset = 0x90;
  720. reg_dma_intr_4_status_offset = 0xa0;
  721. reg_dma_intr_clear_offset = 0xb0;
  722. reg_dma_ctl_trigger_offset = 0xd4;
  723. reg_dma_ctl0_reset_offset = 0xe4;
  724. reg_dma_error_clear_mask = BIT(0) | BIT(1) | BIT(2) | BIT(16);
  725. reg_dma_ctl_queue_off[CTL_0] = reg_dma_ctl0_queue0_cmd0_offset;
  726. for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
  727. reg_dma_ctl_queue_off[i] = reg_dma_ctl_queue_off[i - 1] +
  728. (sizeof(u32) * 4);
  729. return 0;
  730. }
  731. int init_v11(struct sde_hw_reg_dma *cfg)
  732. {
  733. int ret = 0, i = 0;
  734. ret = init_v1(cfg);
  735. if (ret) {
  736. DRM_ERROR("failed to initialize v1: ret %d\n", ret);
  737. return -EINVAL;
  738. }
  739. /* initialize register offsets and v1_supported based on version */
  740. reg_dma_register_count = 133;
  741. reg_dma_decode_sel = 0x180ac114;
  742. reg_dma_opmode_offset = 0x4;
  743. reg_dma_ctl0_queue0_cmd0_offset = 0x14;
  744. reg_dma_intr_status_offset = 0x160;
  745. reg_dma_intr_4_status_offset = 0x170;
  746. reg_dma_intr_clear_offset = 0x1a0;
  747. reg_dma_ctl_trigger_offset = 0xd4;
  748. reg_dma_ctl0_reset_offset = 0x200;
  749. reg_dma_error_clear_mask = BIT(0) | BIT(1) | BIT(2) | BIT(16) |
  750. BIT(17) | BIT(18);
  751. reg_dma_ctl_queue_off[CTL_0] = reg_dma_ctl0_queue0_cmd0_offset;
  752. for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
  753. reg_dma_ctl_queue_off[i] = reg_dma_ctl_queue_off[i - 1] +
  754. (sizeof(u32) * 4);
  755. v1_supported[IGC] = DSPP_IGC | GRP_DSPP_HW_BLK_SELECT |
  756. GRP_VIG_HW_BLK_SELECT | GRP_DMA_HW_BLK_SELECT;
  757. v1_supported[GC] = GRP_DMA_HW_BLK_SELECT | GRP_DSPP_HW_BLK_SELECT;
  758. v1_supported[HSIC] = GRP_DSPP_HW_BLK_SELECT;
  759. v1_supported[SIX_ZONE] = GRP_DSPP_HW_BLK_SELECT;
  760. v1_supported[MEMC_SKIN] = GRP_DSPP_HW_BLK_SELECT;
  761. v1_supported[MEMC_SKY] = GRP_DSPP_HW_BLK_SELECT;
  762. v1_supported[MEMC_FOLIAGE] = GRP_DSPP_HW_BLK_SELECT;
  763. v1_supported[MEMC_PROT] = GRP_DSPP_HW_BLK_SELECT;
  764. v1_supported[QSEED] = GRP_VIG_HW_BLK_SELECT;
  765. return 0;
  766. }
  767. int init_v12(struct sde_hw_reg_dma *cfg)
  768. {
  769. int ret = 0;
  770. ret = init_v11(cfg);
  771. if (ret) {
  772. DRM_ERROR("failed to initialize v11: ret %d\n", ret);
  773. return ret;
  774. }
  775. v1_supported[LTM_INIT] = GRP_LTM_HW_BLK_SELECT;
  776. v1_supported[LTM_ROI] = GRP_LTM_HW_BLK_SELECT;
  777. v1_supported[LTM_VLUT] = GRP_LTM_HW_BLK_SELECT;
  778. v1_supported[RC_DATA] = (GRP_DSPP_HW_BLK_SELECT |
  779. GRP_MDSS_HW_BLK_SELECT);
  780. v1_supported[SPR_INIT] = (GRP_DSPP_HW_BLK_SELECT |
  781. GRP_MDSS_HW_BLK_SELECT);
  782. v1_supported[SPR_PU_CFG] = (GRP_DSPP_HW_BLK_SELECT |
  783. GRP_MDSS_HW_BLK_SELECT);
  784. v1_supported[DEMURA_CFG] = MDSS | DSPP0 | DSPP1;
  785. return 0;
  786. }
  787. static int init_reg_dma_vbif(struct sde_hw_reg_dma *cfg)
  788. {
  789. int ret = 0;
  790. struct sde_hw_blk_reg_map *hw;
  791. struct sde_vbif_clk_client clk_client;
  792. struct msm_drm_private *priv = cfg->drm_dev->dev_private;
  793. struct msm_kms *kms = priv->kms;
  794. struct sde_kms *sde_kms = to_sde_kms(kms);
  795. if (cfg->caps->clk_ctrl != SDE_CLK_CTRL_LUTDMA) {
  796. SDE_ERROR("invalid lutdma clk ctrl type %d\n", cfg->caps->clk_ctrl);
  797. return -EINVAL;
  798. }
  799. hw = kzalloc(sizeof(*hw), GFP_KERNEL);
  800. if (!hw) {
  801. SDE_ERROR("failed to create hw block\n");
  802. return -ENOMEM;
  803. }
  804. hw->base_off = cfg->addr;
  805. hw->blk_off = cfg->caps->reg_dma_blks[REG_DMA_TYPE_DB].base;
  806. clk_client.hw = hw;
  807. clk_client.clk_ctrl = cfg->caps->clk_ctrl;
  808. clk_client.ops.setup_clk_force_ctrl = setup_clk_force_ctrl;
  809. ret = sde_vbif_clk_register(sde_kms, &clk_client);
  810. if (ret) {
  811. SDE_ERROR("failed to register vbif client %d\n", cfg->caps->clk_ctrl);
  812. kfree(hw);
  813. }
  814. return ret;
  815. }
  816. int init_v2(struct sde_hw_reg_dma *cfg)
  817. {
  818. int ret = 0, i = 0;
  819. ret = init_v12(cfg);
  820. if (ret) {
  821. DRM_ERROR("failed to initialize v12: ret %d\n", ret);
  822. return ret;
  823. }
  824. /* initialize register offsets based on version delta */
  825. reg_dma_register_count = 0x91;
  826. reg_dma_ctl0_queue1_cmd0_offset = 0x1c;
  827. reg_dma_error_clear_mask |= BIT(19);
  828. reg_dma_ctl_queue1_off[CTL_0] = reg_dma_ctl0_queue1_cmd0_offset;
  829. for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
  830. reg_dma_ctl_queue1_off[i] = reg_dma_ctl_queue1_off[i - 1] +
  831. (sizeof(u32) * 4);
  832. v1_supported[IGC] = GRP_DSPP_HW_BLK_SELECT | GRP_VIG_HW_BLK_SELECT |
  833. GRP_DMA_HW_BLK_SELECT;
  834. if (cfg->caps->reg_dma_blks[REG_DMA_TYPE_SB].valid == true)
  835. reg_dma->ops.last_command_sb = last_cmd_sb_v2;
  836. if (cfg->caps->split_vbif_supported)
  837. ret = init_reg_dma_vbif(cfg);
  838. return ret;
  839. }
  840. static int check_support_v1(enum sde_reg_dma_features feature,
  841. enum sde_reg_dma_blk blk,
  842. bool *is_supported)
  843. {
  844. int ret = 0;
  845. if (!is_supported)
  846. return -EINVAL;
  847. if (feature >= REG_DMA_FEATURES_MAX
  848. || blk >= BIT_ULL(REG_DMA_BLK_MAX)) {
  849. *is_supported = false;
  850. return ret;
  851. }
  852. *is_supported = (blk & v1_supported[feature]) ? true : false;
  853. return ret;
  854. }
  855. static int setup_payload_v1(struct sde_reg_dma_setup_ops_cfg *cfg)
  856. {
  857. int rc = 0;
  858. rc = validate_dma_cfg(cfg);
  859. if (!rc)
  860. rc = validate_dma_op_params[cfg->ops](cfg);
  861. if (!rc)
  862. rc = write_dma_op_params[cfg->ops](cfg);
  863. return rc;
  864. }
  865. static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
  866. {
  867. int rc = 0;
  868. rc = validate_kick_off_v1(cfg);
  869. if (rc)
  870. return rc;
  871. rc = write_kick_off_v1(cfg);
  872. return rc;
  873. }
  874. int reset_v1(struct sde_hw_ctl *ctl)
  875. {
  876. struct sde_hw_blk_reg_map hw;
  877. u32 index, val, i = 0, k = 0;
  878. if (!ctl || ctl->idx > CTL_MAX) {
  879. DRM_ERROR("invalid ctl %pK ctl idx %d\n",
  880. ctl, ((ctl) ? ctl->idx : 0));
  881. return -EINVAL;
  882. }
  883. index = ctl->idx - CTL_0;
  884. for (k = 0; k < REG_DMA_TYPE_MAX; k++) {
  885. memset(&hw, 0, sizeof(hw));
  886. SET_UP_REG_DMA_REG(hw, reg_dma, k);
  887. if (hw.hw_rev == 0)
  888. continue;
  889. SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
  890. SDE_REG_WRITE(&hw, (reg_dma_ctl0_reset_offset +
  891. index * sizeof(u32)), BIT(0));
  892. i = 0;
  893. do {
  894. udelay(1000);
  895. i++;
  896. val = SDE_REG_READ(&hw,
  897. (reg_dma_ctl0_reset_offset +
  898. index * sizeof(u32)));
  899. } while (i < 2 && val);
  900. }
  901. return 0;
  902. }
  903. static void sde_reg_dma_aspace_cb_locked(void *cb_data, bool is_detach)
  904. {
  905. struct sde_reg_dma_buffer *dma_buf = NULL;
  906. struct msm_gem_address_space *aspace = NULL;
  907. u32 iova_aligned, offset;
  908. int rc;
  909. if (!cb_data) {
  910. DRM_ERROR("aspace cb called with invalid dma_buf\n");
  911. return;
  912. }
  913. dma_buf = (struct sde_reg_dma_buffer *)cb_data;
  914. aspace = dma_buf->aspace;
  915. if (is_detach) {
  916. /* invalidate the stored iova */
  917. dma_buf->iova = 0;
  918. /* return the virtual address mapping */
  919. msm_gem_put_vaddr(dma_buf->buf);
  920. msm_gem_vunmap(dma_buf->buf, OBJ_LOCK_NORMAL);
  921. } else {
  922. rc = msm_gem_get_iova(dma_buf->buf, aspace,
  923. &dma_buf->iova);
  924. if (rc) {
  925. DRM_ERROR("failed to get the iova rc %d\n", rc);
  926. return;
  927. }
  928. dma_buf->vaddr = msm_gem_get_vaddr(dma_buf->buf);
  929. if (IS_ERR_OR_NULL(dma_buf->vaddr)) {
  930. DRM_ERROR("failed to get va rc %d\n", rc);
  931. return;
  932. }
  933. iova_aligned = (dma_buf->iova + GUARD_BYTES) & ALIGNED_OFFSET;
  934. offset = iova_aligned - dma_buf->iova;
  935. dma_buf->iova = dma_buf->iova + offset;
  936. dma_buf->vaddr = (void *)(((u8 *)dma_buf->vaddr) + offset);
  937. dma_buf->next_op_allowed = DECODE_SEL_OP;
  938. }
  939. }
  940. static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size)
  941. {
  942. struct sde_reg_dma_buffer *dma_buf = NULL;
  943. u32 iova_aligned, offset;
  944. u32 rsize = size + GUARD_BYTES;
  945. struct msm_gem_address_space *aspace = NULL;
  946. int rc = 0;
  947. if (!size || SIZE_DWORD(size) > MAX_DWORDS_SZ) {
  948. DRM_ERROR("invalid buffer size %lu, max %lu\n",
  949. SIZE_DWORD(size), MAX_DWORDS_SZ);
  950. return ERR_PTR(-EINVAL);
  951. }
  952. dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
  953. if (!dma_buf)
  954. return ERR_PTR(-ENOMEM);
  955. dma_buf->buf = msm_gem_new(reg_dma->drm_dev,
  956. rsize, MSM_BO_UNCACHED);
  957. if (IS_ERR_OR_NULL(dma_buf->buf)) {
  958. rc = -EINVAL;
  959. goto fail;
  960. }
  961. aspace = msm_gem_smmu_address_space_get(reg_dma->drm_dev,
  962. MSM_SMMU_DOMAIN_UNSECURE);
  963. if (PTR_ERR(aspace) == -ENODEV) {
  964. aspace = NULL;
  965. DRM_DEBUG("IOMMU not present, relying on VRAM\n");
  966. } else if (IS_ERR_OR_NULL(aspace)) {
  967. rc = PTR_ERR(aspace);
  968. aspace = NULL;
  969. DRM_ERROR("failed to get aspace %d", rc);
  970. goto free_gem;
  971. } else if (aspace) {
  972. /* register to aspace */
  973. rc = msm_gem_address_space_register_cb(aspace,
  974. sde_reg_dma_aspace_cb_locked,
  975. (void *)dma_buf);
  976. if (rc) {
  977. DRM_ERROR("failed to register callback %d", rc);
  978. goto free_gem;
  979. }
  980. }
  981. dma_buf->aspace = aspace;
  982. rc = msm_gem_get_iova(dma_buf->buf, aspace, &dma_buf->iova);
  983. if (rc) {
  984. DRM_ERROR("failed to get the iova rc %d\n", rc);
  985. goto free_aspace_cb;
  986. }
  987. dma_buf->vaddr = msm_gem_get_vaddr(dma_buf->buf);
  988. if (IS_ERR_OR_NULL(dma_buf->vaddr)) {
  989. DRM_ERROR("failed to get va rc %d\n", rc);
  990. rc = -EINVAL;
  991. goto put_iova;
  992. }
  993. dma_buf->buffer_size = size;
  994. iova_aligned = (dma_buf->iova + GUARD_BYTES) & ALIGNED_OFFSET;
  995. offset = iova_aligned - dma_buf->iova;
  996. dma_buf->iova = dma_buf->iova + offset;
  997. dma_buf->vaddr = (void *)(((u8 *)dma_buf->vaddr) + offset);
  998. dma_buf->next_op_allowed = DECODE_SEL_OP;
  999. return dma_buf;
  1000. put_iova:
  1001. msm_gem_put_iova(dma_buf->buf, aspace);
  1002. free_aspace_cb:
  1003. msm_gem_address_space_unregister_cb(aspace,
  1004. sde_reg_dma_aspace_cb_locked, dma_buf);
  1005. free_gem:
  1006. mutex_lock(&reg_dma->drm_dev->struct_mutex);
  1007. msm_gem_free_object(dma_buf->buf);
  1008. mutex_unlock(&reg_dma->drm_dev->struct_mutex);
  1009. fail:
  1010. kfree(dma_buf);
  1011. return ERR_PTR(rc);
  1012. }
  1013. static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *dma_buf)
  1014. {
  1015. if (!dma_buf) {
  1016. DRM_ERROR("invalid param reg_buf %pK\n", dma_buf);
  1017. return -EINVAL;
  1018. }
  1019. if (dma_buf->buf) {
  1020. msm_gem_put_iova(dma_buf->buf, 0);
  1021. msm_gem_address_space_unregister_cb(dma_buf->aspace,
  1022. sde_reg_dma_aspace_cb_locked, dma_buf);
  1023. mutex_lock(&reg_dma->drm_dev->struct_mutex);
  1024. msm_gem_free_object(dma_buf->buf);
  1025. mutex_unlock(&reg_dma->drm_dev->struct_mutex);
  1026. }
  1027. kfree(dma_buf);
  1028. return 0;
  1029. }
  1030. static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf)
  1031. {
  1032. if (!lut_buf)
  1033. return -EINVAL;
  1034. lut_buf->index = 0;
  1035. lut_buf->ops_completed = 0;
  1036. lut_buf->next_op_allowed = DECODE_SEL_OP;
  1037. lut_buf->abs_write_cnt = 0;
  1038. return 0;
  1039. }
  1040. static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
  1041. {
  1042. u32 remain_len, write_len;
  1043. remain_len = BUFFER_SPACE_LEFT(cfg);
  1044. write_len = sizeof(u32);
  1045. if (remain_len < write_len) {
  1046. DRM_ERROR("buffer is full sz %d needs %d bytes\n",
  1047. remain_len, write_len);
  1048. return -EINVAL;
  1049. }
  1050. return 0;
  1051. }
  1052. static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
  1053. {
  1054. u32 *loc = NULL;
  1055. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  1056. cfg->dma_buf->index);
  1057. loc[0] = reg_dma_decode_sel;
  1058. loc[1] = 0;
  1059. cfg->dma_buf->index = sizeof(u32) * 2;
  1060. cfg->dma_buf->ops_completed = REG_WRITE_OP | DECODE_SEL_OP;
  1061. cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
  1062. return 0;
  1063. }
  1064. static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  1065. enum sde_reg_dma_last_cmd_mode mode)
  1066. {
  1067. struct sde_reg_dma_setup_ops_cfg cfg;
  1068. struct sde_reg_dma_kickoff_cfg kick_off;
  1069. struct sde_hw_blk_reg_map hw;
  1070. u32 val;
  1071. int rc;
  1072. if (!ctl || ctl->idx >= CTL_MAX || q >= DMA_CTL_QUEUE_MAX) {
  1073. DRM_ERROR("ctl %pK q %d index %d\n", ctl, q,
  1074. ((ctl) ? ctl->idx : -1));
  1075. return -EINVAL;
  1076. }
  1077. if (!last_cmd_buf_db[ctl->idx] || !last_cmd_buf_db[ctl->idx]->iova) {
  1078. DRM_ERROR("invalid last cmd buf for idx %d\n", ctl->idx);
  1079. return -EINVAL;
  1080. }
  1081. cfg.dma_buf = last_cmd_buf_db[ctl->idx];
  1082. reset_reg_dma_buffer_v1(last_cmd_buf_db[ctl->idx]);
  1083. if (validate_last_cmd(&cfg)) {
  1084. DRM_ERROR("validate buf failed\n");
  1085. return -EINVAL;
  1086. }
  1087. if (write_last_cmd(&cfg)) {
  1088. DRM_ERROR("write buf failed\n");
  1089. return -EINVAL;
  1090. }
  1091. kick_off.ctl = ctl;
  1092. kick_off.queue_select = q;
  1093. kick_off.trigger_mode = WRITE_IMMEDIATE;
  1094. kick_off.last_command = 1;
  1095. kick_off.op = REG_DMA_WRITE;
  1096. kick_off.dma_type = REG_DMA_TYPE_DB;
  1097. kick_off.dma_buf = last_cmd_buf_db[ctl->idx];
  1098. kick_off.feature = REG_DMA_FEATURES_MAX;
  1099. rc = kick_off_v1(&kick_off);
  1100. if (rc) {
  1101. DRM_ERROR("kick off last cmd failed\n");
  1102. return rc;
  1103. }
  1104. //Lack of block support will be caught by kick_off
  1105. memset(&hw, 0, sizeof(hw));
  1106. SET_UP_REG_DMA_REG(hw, reg_dma, kick_off.dma_type);
  1107. SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, mode, ctl->idx, kick_off.queue_select,
  1108. kick_off.dma_type, kick_off.op);
  1109. if (mode == REG_DMA_WAIT4_COMP) {
  1110. rc = read_poll_timeout(sde_reg_read, val,
  1111. (val & ctl_trigger_done_mask[ctl->idx][q]), 10, false, 20000,
  1112. &hw, reg_dma_intr_status_offset);
  1113. if (rc)
  1114. DRM_ERROR("poll wait failed %d val %x mask %x\n",
  1115. rc, val, ctl_trigger_done_mask[ctl->idx][q]);
  1116. SDE_EVT32(SDE_EVTLOG_FUNC_EXIT, mode);
  1117. }
  1118. return rc;
  1119. }
  1120. void deinit_v1(void)
  1121. {
  1122. int i = 0;
  1123. for (i = CTL_0; i < CTL_MAX; i++) {
  1124. if (last_cmd_buf_db[i])
  1125. dealloc_reg_dma_v1(last_cmd_buf_db[i]);
  1126. last_cmd_buf_db[i] = NULL;
  1127. if (last_cmd_buf_sb[i])
  1128. dealloc_reg_dma_v1(last_cmd_buf_sb[i]);
  1129. last_cmd_buf_sb[i] = NULL;
  1130. }
  1131. }
  1132. static void dump_regs_v1(void)
  1133. {
  1134. uint32_t i = 0, k = 0;
  1135. u32 val;
  1136. struct sde_hw_blk_reg_map hw;
  1137. for (k = 0; k < REG_DMA_TYPE_MAX; k++) {
  1138. memset(&hw, 0, sizeof(hw));
  1139. SET_UP_REG_DMA_REG(hw, reg_dma, k);
  1140. if (hw.hw_rev == 0)
  1141. continue;
  1142. for (i = 0; i < reg_dma_register_count; i++) {
  1143. val = SDE_REG_READ(&hw, i * sizeof(u32));
  1144. DRM_ERROR("offset %x val %x\n", (u32)(i * sizeof(u32)),
  1145. val);
  1146. }
  1147. }
  1148. }
  1149. static int last_cmd_sb_v2(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  1150. enum sde_reg_dma_last_cmd_mode mode)
  1151. {
  1152. struct sde_reg_dma_setup_ops_cfg cfg;
  1153. struct sde_reg_dma_kickoff_cfg kick_off;
  1154. int rc = 0;
  1155. if (!ctl || ctl->idx >= CTL_MAX || q >= DMA_CTL_QUEUE_MAX) {
  1156. DRM_ERROR("ctl %pK q %d index %d\n", ctl, q,
  1157. ((ctl) ? ctl->idx : -1));
  1158. return -EINVAL;
  1159. }
  1160. if (!last_cmd_buf_sb[ctl->idx] || !last_cmd_buf_sb[ctl->idx]->iova) {
  1161. DRM_ERROR("invalid last cmd buf for idx %d\n", ctl->idx);
  1162. return -EINVAL;
  1163. }
  1164. cfg.dma_buf = last_cmd_buf_sb[ctl->idx];
  1165. reset_reg_dma_buffer_v1(last_cmd_buf_sb[ctl->idx]);
  1166. if (validate_last_cmd(&cfg)) {
  1167. DRM_ERROR("validate buf failed\n");
  1168. return -EINVAL;
  1169. }
  1170. if (write_last_cmd(&cfg)) {
  1171. DRM_ERROR("write buf failed\n");
  1172. return -EINVAL;
  1173. }
  1174. kick_off.ctl = ctl;
  1175. kick_off.trigger_mode = WRITE_IMMEDIATE;
  1176. kick_off.last_command = 1;
  1177. kick_off.op = REG_DMA_WRITE;
  1178. kick_off.dma_type = REG_DMA_TYPE_SB;
  1179. kick_off.queue_select = DMA_CTL_QUEUE1;
  1180. kick_off.dma_buf = last_cmd_buf_sb[ctl->idx];
  1181. kick_off.feature = REG_DMA_FEATURES_MAX;
  1182. rc = kick_off_v1(&kick_off);
  1183. if (rc)
  1184. DRM_ERROR("kick off last cmd failed\n");
  1185. SDE_EVT32(ctl->idx, kick_off.queue_select, kick_off.dma_type,
  1186. kick_off.op);
  1187. return rc;
  1188. }