sde_hw_reg_dma_v1.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/iopoll.h>
  6. #include "sde_hw_mdss.h"
  7. #include "sde_hw_ctl.h"
  8. #include "sde_hw_reg_dma_v1.h"
  9. #include "msm_drv.h"
  10. #include "msm_mmu.h"
  11. #include "sde_dbg.h"
  12. #define GUARD_BYTES (BIT(8) - 1)
  13. #define ALIGNED_OFFSET (U32_MAX & ~(GUARD_BYTES))
  14. #define ADDR_ALIGN BIT(8)
  15. #define MAX_RELATIVE_OFF (BIT(20) - 1)
  16. #define ABSOLUTE_RANGE BIT(27)
  17. #define DECODE_SEL_OP (BIT(HW_BLK_SELECT))
  18. #define REG_WRITE_OP ((BIT(REG_SINGLE_WRITE)) | (BIT(REG_BLK_WRITE_SINGLE)) | \
  19. (BIT(REG_BLK_WRITE_INC)) | (BIT(REG_BLK_WRITE_MULTIPLE)) | \
  20. (BIT(REG_SINGLE_MODIFY)) | (BIT(REG_BLK_LUT_WRITE)))
  21. #define REG_DMA_OPS (DECODE_SEL_OP | REG_WRITE_OP)
  22. #define IS_OP_ALLOWED(op, buf_op) (BIT(op) & buf_op)
  23. #define SET_UP_REG_DMA_REG(hw, reg_dma, i) \
  24. do { \
  25. if ((reg_dma)->caps->reg_dma_blks[(i)].valid == false) \
  26. break; \
  27. (hw).base_off = (reg_dma)->addr; \
  28. (hw).blk_off = (reg_dma)->caps->reg_dma_blks[(i)].base; \
  29. (hw).hwversion = (reg_dma)->caps->version; \
  30. (hw).log_mask = SDE_DBG_MASK_REGDMA; \
  31. } while (0)
  32. #define SIZE_DWORD(x) ((x) / (sizeof(u32)))
  33. #define NOT_WORD_ALIGNED(x) ((x) & 0x3)
  34. #define GRP_VIG_HW_BLK_SELECT (VIG0 | VIG1 | VIG2 | VIG3)
  35. #define GRP_DMA_HW_BLK_SELECT (DMA0 | DMA1 | DMA2 | DMA3)
  36. #define GRP_DSPP_HW_BLK_SELECT (DSPP0 | DSPP1 | DSPP2 | DSPP3)
  37. #define GRP_LTM_HW_BLK_SELECT (LTM0 | LTM1)
  38. #define GRP_MDSS_HW_BLK_SELECT (MDSS)
  39. #define BUFFER_SPACE_LEFT(cfg) ((cfg)->dma_buf->buffer_size - \
  40. (cfg)->dma_buf->index)
  41. #define REL_ADDR_OPCODE (BIT(27))
  42. #define NO_OP_OPCODE (0)
  43. #define SINGLE_REG_WRITE_OPCODE (BIT(28))
  44. #define SINGLE_REG_MODIFY_OPCODE (BIT(29))
  45. #define HW_INDEX_REG_WRITE_OPCODE (BIT(28) | BIT(29))
  46. #define AUTO_INC_REG_WRITE_OPCODE (BIT(30))
  47. #define BLK_REG_WRITE_OPCODE (BIT(30) | BIT(28))
  48. #define LUTBUS_WRITE_OPCODE (BIT(30) | BIT(29))
  49. #define WRAP_MIN_SIZE 2
  50. #define WRAP_MAX_SIZE (BIT(4) - 1)
  51. #define MAX_DWORDS_SZ (BIT(14) - 1)
  52. #define REG_DMA_HEADERS_BUFFER_SZ (sizeof(u32) * 128)
  53. #define LUTBUS_TABLE_SEL_MASK 0x10000
  54. #define LUTBUS_BLOCK_SEL_MASK 0xffff
  55. #define LUTBUS_TRANS_SZ_MASK 0xff0000
  56. #define LUTBUS_LUT_SIZE_MASK 0x3fff
  57. static uint32_t reg_dma_register_count;
  58. static uint32_t reg_dma_decode_sel;
  59. static uint32_t reg_dma_opmode_offset;
  60. static uint32_t reg_dma_ctl0_queue0_cmd0_offset;
  61. static uint32_t reg_dma_ctl0_queue1_cmd0_offset;
  62. static uint32_t reg_dma_intr_status_offset;
  63. static uint32_t reg_dma_intr_4_status_offset;
  64. static uint32_t reg_dma_intr_clear_offset;
  65. static uint32_t reg_dma_ctl_trigger_offset;
  66. static uint32_t reg_dma_ctl0_reset_offset;
  67. static uint32_t reg_dma_error_clear_mask;
  68. static uint32_t reg_dma_ctl_queue_off[CTL_MAX];
  69. static uint32_t reg_dma_ctl_queue1_off[CTL_MAX];
  70. typedef int (*reg_dma_internal_ops) (struct sde_reg_dma_setup_ops_cfg *cfg);
  71. static struct sde_hw_reg_dma *reg_dma;
  72. static u32 ops_mem_size[REG_DMA_SETUP_OPS_MAX] = {
  73. [REG_BLK_WRITE_SINGLE] = sizeof(u32) * 2,
  74. [REG_BLK_WRITE_INC] = sizeof(u32) * 2,
  75. [REG_BLK_WRITE_MULTIPLE] = sizeof(u32) * 2,
  76. [HW_BLK_SELECT] = sizeof(u32) * 2,
  77. [REG_SINGLE_WRITE] = sizeof(u32) * 2,
  78. [REG_SINGLE_MODIFY] = sizeof(u32) * 3,
  79. [REG_BLK_LUT_WRITE] = sizeof(u32) * 2,
  80. };
  81. static u32 queue_sel[DMA_CTL_QUEUE_MAX] = {
  82. [DMA_CTL_QUEUE0] = BIT(0),
  83. [DMA_CTL_QUEUE1] = BIT(4),
  84. };
  85. static u32 dspp_read_sel[DSPP_HIST_MAX] = {
  86. [DSPP0_HIST] = 0,
  87. [DSPP1_HIST] = 1,
  88. [DSPP2_HIST] = 2,
  89. [DSPP3_HIST] = 3,
  90. };
  91. static u32 v1_supported[REG_DMA_FEATURES_MAX] = {
  92. [GAMUT] = GRP_VIG_HW_BLK_SELECT | GRP_DSPP_HW_BLK_SELECT,
  93. [VLUT] = GRP_DSPP_HW_BLK_SELECT,
  94. [GC] = GRP_DSPP_HW_BLK_SELECT,
  95. [IGC] = DSPP_IGC | GRP_DSPP_HW_BLK_SELECT,
  96. [PCC] = GRP_DSPP_HW_BLK_SELECT,
  97. };
  98. static u32 ctl_trigger_done_mask[CTL_MAX][DMA_CTL_QUEUE_MAX] = {
  99. [CTL_0][0] = BIT(16),
  100. [CTL_0][1] = BIT(21),
  101. [CTL_1][0] = BIT(17),
  102. [CTL_1][1] = BIT(22),
  103. [CTL_2][0] = BIT(18),
  104. [CTL_2][1] = BIT(23),
  105. [CTL_3][0] = BIT(19),
  106. [CTL_3][1] = BIT(24),
  107. [CTL_4][0] = BIT(25),
  108. [CTL_4][1] = BIT(27),
  109. [CTL_5][0] = BIT(26),
  110. [CTL_5][1] = BIT(28),
  111. };
  112. static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg);
  113. static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
  114. static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  115. static int validate_blk_lut_write(struct sde_reg_dma_setup_ops_cfg *cfg);
  116. static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  117. static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
  118. static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
  119. static int write_single_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  120. static int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg);
  121. static int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg);
  122. static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  123. static int write_single_modify(struct sde_reg_dma_setup_ops_cfg *cfg);
  124. static int write_block_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  125. static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
  126. static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf);
  127. static int check_support_v1(enum sde_reg_dma_features feature,
  128. enum sde_reg_dma_blk blk, bool *is_supported);
  129. static int setup_payload_v1(struct sde_reg_dma_setup_ops_cfg *cfg);
  130. static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg);
  131. static int reset_v1(struct sde_hw_ctl *ctl);
  132. static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  133. enum sde_reg_dma_last_cmd_mode mode);
  134. static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size);
  135. static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *lut_buf);
  136. static void dump_regs_v1(void);
  137. static int last_cmd_sb_v2(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  138. enum sde_reg_dma_last_cmd_mode mode);
  139. static reg_dma_internal_ops write_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
  140. [HW_BLK_SELECT] = write_decode_sel,
  141. [REG_SINGLE_WRITE] = write_single_reg,
  142. [REG_BLK_WRITE_SINGLE] = write_multi_reg_inc,
  143. [REG_BLK_WRITE_INC] = write_multi_reg_index,
  144. [REG_BLK_WRITE_MULTIPLE] = write_multi_lut_reg,
  145. [REG_SINGLE_MODIFY] = write_single_modify,
  146. [REG_BLK_LUT_WRITE] = write_block_lut_reg,
  147. };
  148. static reg_dma_internal_ops validate_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
  149. [HW_BLK_SELECT] = validate_write_decode_sel,
  150. [REG_SINGLE_WRITE] = validate_write_reg,
  151. [REG_BLK_WRITE_SINGLE] = validate_write_reg,
  152. [REG_BLK_WRITE_INC] = validate_write_reg,
  153. [REG_BLK_WRITE_MULTIPLE] = validate_write_multi_lut_reg,
  154. [REG_SINGLE_MODIFY] = validate_write_reg,
  155. [REG_BLK_LUT_WRITE] = validate_blk_lut_write,
  156. };
  157. static struct sde_reg_dma_buffer *last_cmd_buf_db[CTL_MAX];
  158. static struct sde_reg_dma_buffer *last_cmd_buf_sb[CTL_MAX];
  159. static void get_decode_sel(unsigned long blk, u32 *decode_sel)
  160. {
  161. int i = 0;
  162. *decode_sel = 0;
  163. for_each_set_bit(i, &blk, REG_DMA_BLK_MAX) {
  164. switch (BIT(i)) {
  165. case VIG0:
  166. *decode_sel |= BIT(0);
  167. break;
  168. case VIG1:
  169. *decode_sel |= BIT(1);
  170. break;
  171. case VIG2:
  172. *decode_sel |= BIT(2);
  173. break;
  174. case VIG3:
  175. *decode_sel |= BIT(3);
  176. break;
  177. case DMA0:
  178. *decode_sel |= BIT(5);
  179. break;
  180. case DMA1:
  181. *decode_sel |= BIT(6);
  182. break;
  183. case DMA2:
  184. *decode_sel |= BIT(7);
  185. break;
  186. case DMA3:
  187. *decode_sel |= BIT(8);
  188. break;
  189. case DSPP0:
  190. *decode_sel |= BIT(17);
  191. break;
  192. case DSPP1:
  193. *decode_sel |= BIT(18);
  194. break;
  195. case DSPP2:
  196. *decode_sel |= BIT(19);
  197. break;
  198. case DSPP3:
  199. *decode_sel |= BIT(20);
  200. break;
  201. case SSPP_IGC:
  202. *decode_sel |= BIT(4);
  203. break;
  204. case DSPP_IGC:
  205. *decode_sel |= BIT(21);
  206. break;
  207. case LTM0:
  208. *decode_sel |= BIT(22);
  209. break;
  210. case LTM1:
  211. *decode_sel |= BIT(23);
  212. break;
  213. case MDSS:
  214. *decode_sel |= BIT(31);
  215. break;
  216. default:
  217. DRM_ERROR("block not supported %zx\n", (size_t)BIT(i));
  218. break;
  219. }
  220. }
  221. }
  222. static int write_multi_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  223. {
  224. u8 *loc = NULL;
  225. loc = (u8 *)cfg->dma_buf->vaddr + cfg->dma_buf->index;
  226. memcpy(loc, cfg->data, cfg->data_size);
  227. cfg->dma_buf->index += cfg->data_size;
  228. cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
  229. cfg->dma_buf->ops_completed |= REG_WRITE_OP;
  230. return 0;
  231. }
  232. int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg)
  233. {
  234. u32 *loc = NULL;
  235. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  236. cfg->dma_buf->index);
  237. loc[0] = HW_INDEX_REG_WRITE_OPCODE;
  238. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  239. if (cfg->blk == MDSS)
  240. loc[0] |= ABSOLUTE_RANGE;
  241. loc[1] = SIZE_DWORD(cfg->data_size);
  242. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  243. return write_multi_reg(cfg);
  244. }
  245. int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg)
  246. {
  247. u32 *loc = NULL;
  248. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  249. cfg->dma_buf->index);
  250. loc[0] = AUTO_INC_REG_WRITE_OPCODE;
  251. if (cfg->blk == MDSS)
  252. loc[0] |= ABSOLUTE_RANGE;
  253. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  254. loc[1] = SIZE_DWORD(cfg->data_size);
  255. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  256. return write_multi_reg(cfg);
  257. }
  258. static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  259. {
  260. u32 *loc = NULL;
  261. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  262. cfg->dma_buf->index);
  263. loc[0] = BLK_REG_WRITE_OPCODE;
  264. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  265. if (cfg->blk == MDSS)
  266. loc[0] |= ABSOLUTE_RANGE;
  267. loc[1] = (cfg->inc) ? 0 : BIT(31);
  268. loc[1] |= (cfg->wrap_size & WRAP_MAX_SIZE) << 16;
  269. loc[1] |= ((SIZE_DWORD(cfg->data_size)) & MAX_DWORDS_SZ);
  270. cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
  271. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  272. return write_multi_reg(cfg);
  273. }
  274. static int write_single_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  275. {
  276. u32 *loc = NULL;
  277. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  278. cfg->dma_buf->index);
  279. loc[0] = SINGLE_REG_WRITE_OPCODE;
  280. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  281. if (cfg->blk == MDSS)
  282. loc[0] |= ABSOLUTE_RANGE;
  283. loc[1] = *cfg->data;
  284. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  285. cfg->dma_buf->ops_completed |= REG_WRITE_OP;
  286. cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
  287. return 0;
  288. }
  289. static int write_single_modify(struct sde_reg_dma_setup_ops_cfg *cfg)
  290. {
  291. u32 *loc = NULL;
  292. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  293. cfg->dma_buf->index);
  294. loc[0] = SINGLE_REG_MODIFY_OPCODE;
  295. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  296. if (cfg->blk == MDSS)
  297. loc[0] |= ABSOLUTE_RANGE;
  298. loc[1] = cfg->mask;
  299. loc[2] = *cfg->data;
  300. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  301. cfg->dma_buf->ops_completed |= REG_WRITE_OP;
  302. cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
  303. return 0;
  304. }
  305. static int write_block_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  306. {
  307. u32 *loc = NULL;
  308. int rc = -EINVAL;
  309. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  310. cfg->dma_buf->index);
  311. loc[0] = LUTBUS_WRITE_OPCODE;
  312. loc[0] |= (cfg->table_sel << 16) & LUTBUS_TABLE_SEL_MASK;
  313. loc[0] |= (cfg->block_sel & LUTBUS_BLOCK_SEL_MASK);
  314. loc[1] = (cfg->trans_size << 16) & LUTBUS_TRANS_SZ_MASK;
  315. loc[1] |= (cfg->lut_size & LUTBUS_LUT_SIZE_MASK);
  316. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  317. rc = write_multi_reg(cfg);
  318. if (rc)
  319. return rc;
  320. /* adding 3 NO OPs as SW workaround for REG_BLK_LUT_WRITE
  321. * HW limitation that requires the residual data plus the
  322. * following opcode to exceed 4 DWORDs length.
  323. */
  324. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  325. cfg->dma_buf->index);
  326. loc[0] = NO_OP_OPCODE;
  327. loc[1] = NO_OP_OPCODE;
  328. loc[2] = NO_OP_OPCODE;
  329. cfg->dma_buf->index += sizeof(u32) * 3;
  330. return 0;
  331. }
  332. static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg)
  333. {
  334. u32 *loc = NULL;
  335. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  336. cfg->dma_buf->index);
  337. loc[0] = reg_dma_decode_sel;
  338. get_decode_sel(cfg->blk, &loc[1]);
  339. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  340. cfg->dma_buf->ops_completed |= DECODE_SEL_OP;
  341. cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
  342. return 0;
  343. }
  344. static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  345. {
  346. int rc;
  347. rc = validate_write_reg(cfg);
  348. if (rc)
  349. return rc;
  350. if (cfg->wrap_size < WRAP_MIN_SIZE || cfg->wrap_size > WRAP_MAX_SIZE) {
  351. DRM_ERROR("invalid wrap sz %d min %d max %zd\n",
  352. cfg->wrap_size, WRAP_MIN_SIZE, (size_t)WRAP_MAX_SIZE);
  353. rc = -EINVAL;
  354. }
  355. return rc;
  356. }
  357. static int validate_blk_lut_write(struct sde_reg_dma_setup_ops_cfg *cfg)
  358. {
  359. int rc;
  360. rc = validate_write_reg(cfg);
  361. if (rc)
  362. return rc;
  363. if (cfg->table_sel >= LUTBUS_TABLE_SELECT_MAX ||
  364. cfg->block_sel >= LUTBUS_BLOCK_MAX ||
  365. (cfg->trans_size != LUTBUS_IGC_TRANS_SIZE &&
  366. cfg->trans_size != LUTBUS_GAMUT_TRANS_SIZE)) {
  367. DRM_ERROR("invalid table_sel %d block_sel %d trans_size %d\n",
  368. cfg->table_sel, cfg->block_sel,
  369. cfg->trans_size);
  370. rc = -EINVAL;
  371. }
  372. return rc;
  373. }
  374. static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  375. {
  376. u32 remain_len, write_len;
  377. remain_len = BUFFER_SPACE_LEFT(cfg);
  378. write_len = ops_mem_size[cfg->ops] + cfg->data_size;
  379. if (remain_len < write_len) {
  380. DRM_ERROR("buffer is full sz %d needs %d bytes\n",
  381. remain_len, write_len);
  382. return -EINVAL;
  383. }
  384. if (!cfg->data) {
  385. DRM_ERROR("invalid data %pK size %d exp sz %d\n", cfg->data,
  386. cfg->data_size, write_len);
  387. return -EINVAL;
  388. }
  389. if ((SIZE_DWORD(cfg->data_size)) > MAX_DWORDS_SZ ||
  390. NOT_WORD_ALIGNED(cfg->data_size)) {
  391. DRM_ERROR("Invalid data size %d max %zd align %x\n",
  392. cfg->data_size, (size_t)MAX_DWORDS_SZ,
  393. NOT_WORD_ALIGNED(cfg->data_size));
  394. return -EINVAL;
  395. }
  396. if (cfg->blk_offset > MAX_RELATIVE_OFF ||
  397. NOT_WORD_ALIGNED(cfg->blk_offset)) {
  398. DRM_ERROR("invalid offset %d max %zd align %x\n",
  399. cfg->blk_offset, (size_t)MAX_RELATIVE_OFF,
  400. NOT_WORD_ALIGNED(cfg->blk_offset));
  401. return -EINVAL;
  402. }
  403. return 0;
  404. }
  405. static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg)
  406. {
  407. u32 remain_len;
  408. bool vig_blk, dma_blk, dspp_blk, mdss_blk;
  409. remain_len = BUFFER_SPACE_LEFT(cfg);
  410. if (remain_len < ops_mem_size[HW_BLK_SELECT]) {
  411. DRM_ERROR("buffer is full needs %d bytes\n",
  412. ops_mem_size[HW_BLK_SELECT]);
  413. return -EINVAL;
  414. }
  415. if (!cfg->blk) {
  416. DRM_ERROR("blk set as 0\n");
  417. return -EINVAL;
  418. }
  419. vig_blk = (cfg->blk & GRP_VIG_HW_BLK_SELECT) ? true : false;
  420. dma_blk = (cfg->blk & GRP_DMA_HW_BLK_SELECT) ? true : false;
  421. dspp_blk = (cfg->blk & GRP_DSPP_HW_BLK_SELECT) ? true : false;
  422. mdss_blk = (cfg->blk & MDSS) ? true : false;
  423. if ((vig_blk && dspp_blk) || (dma_blk && dspp_blk) ||
  424. (vig_blk && dma_blk) ||
  425. (mdss_blk && (vig_blk | dma_blk | dspp_blk))) {
  426. DRM_ERROR("invalid blk combination %x\n", cfg->blk);
  427. return -EINVAL;
  428. }
  429. return 0;
  430. }
  431. static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg)
  432. {
  433. int rc = 0;
  434. bool supported;
  435. if (!cfg || cfg->ops >= REG_DMA_SETUP_OPS_MAX || !cfg->dma_buf) {
  436. DRM_ERROR("invalid param cfg %pK ops %d dma_buf %pK\n",
  437. cfg, ((cfg) ? cfg->ops : REG_DMA_SETUP_OPS_MAX),
  438. ((cfg) ? cfg->dma_buf : NULL));
  439. return -EINVAL;
  440. }
  441. rc = check_support_v1(cfg->feature, cfg->blk, &supported);
  442. if (rc || !supported) {
  443. DRM_ERROR("check support failed rc %d supported %d\n",
  444. rc, supported);
  445. rc = -EINVAL;
  446. return rc;
  447. }
  448. if (cfg->dma_buf->index >= cfg->dma_buf->buffer_size ||
  449. NOT_WORD_ALIGNED(cfg->dma_buf->index)) {
  450. DRM_ERROR("Buf Overflow index %d max size %d align %x\n",
  451. cfg->dma_buf->index, cfg->dma_buf->buffer_size,
  452. NOT_WORD_ALIGNED(cfg->dma_buf->index));
  453. return -EINVAL;
  454. }
  455. if (cfg->dma_buf->iova & GUARD_BYTES || !cfg->dma_buf->vaddr) {
  456. DRM_ERROR("iova not aligned to %zx iova %llx kva %pK",
  457. (size_t)ADDR_ALIGN, cfg->dma_buf->iova,
  458. cfg->dma_buf->vaddr);
  459. return -EINVAL;
  460. }
  461. if (!IS_OP_ALLOWED(cfg->ops, cfg->dma_buf->next_op_allowed)) {
  462. DRM_ERROR("invalid op %x allowed %x\n", cfg->ops,
  463. cfg->dma_buf->next_op_allowed);
  464. return -EINVAL;
  465. }
  466. if (!validate_dma_op_params[cfg->ops] ||
  467. !write_dma_op_params[cfg->ops]) {
  468. DRM_ERROR("invalid op %d validate %pK write %pK\n", cfg->ops,
  469. validate_dma_op_params[cfg->ops],
  470. write_dma_op_params[cfg->ops]);
  471. return -EINVAL;
  472. }
  473. return rc;
  474. }
  475. static int validate_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
  476. {
  477. if (!cfg || !cfg->ctl || !cfg->dma_buf ||
  478. cfg->dma_type >= REG_DMA_TYPE_MAX) {
  479. DRM_ERROR("invalid cfg %pK ctl %pK dma_buf %pK dma type %d\n",
  480. cfg, ((!cfg) ? NULL : cfg->ctl),
  481. ((!cfg) ? NULL : cfg->dma_buf),
  482. ((!cfg) ? 0 : cfg->dma_type));
  483. return -EINVAL;
  484. }
  485. if (reg_dma->caps->reg_dma_blks[cfg->dma_type].valid == false) {
  486. DRM_DEBUG("REG dma type %d is not supported\n", cfg->dma_type);
  487. return -EOPNOTSUPP;
  488. }
  489. if (cfg->ctl->idx < CTL_0 && cfg->ctl->idx >= CTL_MAX) {
  490. DRM_ERROR("invalid ctl idx %d\n", cfg->ctl->idx);
  491. return -EINVAL;
  492. }
  493. if (cfg->op >= REG_DMA_OP_MAX) {
  494. DRM_ERROR("invalid op %d\n", cfg->op);
  495. return -EINVAL;
  496. }
  497. if ((cfg->op == REG_DMA_WRITE) &&
  498. (!(cfg->dma_buf->ops_completed & DECODE_SEL_OP) ||
  499. !(cfg->dma_buf->ops_completed & REG_WRITE_OP))) {
  500. DRM_ERROR("incomplete write ops %x\n",
  501. cfg->dma_buf->ops_completed);
  502. return -EINVAL;
  503. }
  504. if (cfg->op == REG_DMA_READ && cfg->block_select >= DSPP_HIST_MAX) {
  505. DRM_ERROR("invalid block for read %d\n", cfg->block_select);
  506. return -EINVAL;
  507. }
  508. /* Only immediate triggers are supported now hence hardcode */
  509. cfg->trigger_mode = (cfg->op == REG_DMA_READ) ? (READ_TRIGGER) :
  510. (WRITE_TRIGGER);
  511. if (cfg->dma_buf->iova & GUARD_BYTES) {
  512. DRM_ERROR("Address is not aligned to %zx iova %llx",
  513. (size_t)ADDR_ALIGN, cfg->dma_buf->iova);
  514. return -EINVAL;
  515. }
  516. if (cfg->queue_select >= DMA_CTL_QUEUE_MAX) {
  517. DRM_ERROR("invalid queue selected %d\n", cfg->queue_select);
  518. return -EINVAL;
  519. }
  520. if (SIZE_DWORD(cfg->dma_buf->index) > MAX_DWORDS_SZ ||
  521. !cfg->dma_buf->index) {
  522. DRM_ERROR("invalid dword size %zd max %zd\n",
  523. (size_t)SIZE_DWORD(cfg->dma_buf->index),
  524. (size_t)MAX_DWORDS_SZ);
  525. return -EINVAL;
  526. }
  527. if (cfg->dma_type == REG_DMA_TYPE_SB &&
  528. (cfg->queue_select != DMA_CTL_QUEUE1 ||
  529. cfg->op == REG_DMA_READ)) {
  530. DRM_ERROR("invalid queue selected %d or op %d for SB LUTDMA\n",
  531. cfg->queue_select, cfg->op);
  532. return -EINVAL;
  533. }
  534. return 0;
  535. }
  536. static int write_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
  537. {
  538. u32 cmd1, mask = 0, val = 0;
  539. struct sde_hw_blk_reg_map hw;
  540. memset(&hw, 0, sizeof(hw));
  541. msm_gem_sync(cfg->dma_buf->buf);
  542. cmd1 = (cfg->op == REG_DMA_READ) ?
  543. (dspp_read_sel[cfg->block_select] << 30) : 0;
  544. cmd1 |= (cfg->last_command) ? BIT(24) : 0;
  545. cmd1 |= (cfg->op == REG_DMA_READ) ? (2 << 22) : 0;
  546. cmd1 |= (cfg->op == REG_DMA_WRITE) ? (BIT(22)) : 0;
  547. cmd1 |= (SIZE_DWORD(cfg->dma_buf->index) & MAX_DWORDS_SZ);
  548. if (cfg->dma_type == REG_DMA_TYPE_DB)
  549. SET_UP_REG_DMA_REG(hw, reg_dma, REG_DMA_TYPE_DB);
  550. else if (cfg->dma_type == REG_DMA_TYPE_SB)
  551. SET_UP_REG_DMA_REG(hw, reg_dma, REG_DMA_TYPE_SB);
  552. if (hw.hwversion == 0) {
  553. DRM_ERROR("DMA type %d is unsupported\n", cfg->dma_type);
  554. return -EOPNOTSUPP;
  555. }
  556. SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
  557. val = SDE_REG_READ(&hw, reg_dma_intr_4_status_offset);
  558. if (val) {
  559. DRM_DEBUG("LUT dma status %x\n", val);
  560. mask = reg_dma_error_clear_mask;
  561. SDE_REG_WRITE(&hw, reg_dma_intr_clear_offset + sizeof(u32) * 4,
  562. mask);
  563. SDE_EVT32(val);
  564. }
  565. if (cfg->dma_type == REG_DMA_TYPE_DB) {
  566. SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx],
  567. cfg->dma_buf->iova);
  568. SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx] + 0x4,
  569. cmd1);
  570. } else if (cfg->dma_type == REG_DMA_TYPE_SB) {
  571. SDE_REG_WRITE(&hw, reg_dma_ctl_queue1_off[cfg->ctl->idx],
  572. cfg->dma_buf->iova);
  573. SDE_REG_WRITE(&hw, reg_dma_ctl_queue1_off[cfg->ctl->idx] + 0x4,
  574. cmd1);
  575. }
  576. if (cfg->last_command) {
  577. mask = ctl_trigger_done_mask[cfg->ctl->idx][cfg->queue_select];
  578. SDE_REG_WRITE(&hw, reg_dma_intr_clear_offset, mask);
  579. /* DB LUTDMA use SW trigger while SB LUTDMA uses DSPP_SB
  580. * flush as its trigger event.
  581. */
  582. if (cfg->dma_type == REG_DMA_TYPE_DB) {
  583. SDE_REG_WRITE(&cfg->ctl->hw, reg_dma_ctl_trigger_offset,
  584. queue_sel[cfg->queue_select]);
  585. }
  586. }
  587. return 0;
  588. }
  589. int init_v1(struct sde_hw_reg_dma *cfg)
  590. {
  591. int i = 0, rc = 0;
  592. if (!cfg)
  593. return -EINVAL;
  594. reg_dma = cfg;
  595. for (i = CTL_0; i < CTL_MAX; i++) {
  596. if (!last_cmd_buf_db[i]) {
  597. last_cmd_buf_db[i] =
  598. alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
  599. if (IS_ERR_OR_NULL(last_cmd_buf_db[i])) {
  600. /*
  601. * This will allow reg dma to fall back to
  602. * AHB domain
  603. */
  604. pr_info("Failed to allocate reg dma, ret:%lu\n",
  605. PTR_ERR(last_cmd_buf_db[i]));
  606. return 0;
  607. }
  608. }
  609. if (!last_cmd_buf_sb[i]) {
  610. last_cmd_buf_sb[i] =
  611. alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
  612. if (IS_ERR_OR_NULL(last_cmd_buf_sb[i])) {
  613. /*
  614. * This will allow reg dma to fall back to
  615. * AHB domain
  616. */
  617. pr_info("Failed to allocate reg dma, ret:%lu\n",
  618. PTR_ERR(last_cmd_buf_sb[i]));
  619. return 0;
  620. }
  621. }
  622. }
  623. if (rc) {
  624. for (i = 0; i < CTL_MAX; i++) {
  625. if (!last_cmd_buf_db[i])
  626. continue;
  627. dealloc_reg_dma_v1(last_cmd_buf_db[i]);
  628. last_cmd_buf_db[i] = NULL;
  629. }
  630. for (i = 0; i < CTL_MAX; i++) {
  631. if (!last_cmd_buf_sb[i])
  632. continue;
  633. dealloc_reg_dma_v1(last_cmd_buf_sb[i]);
  634. last_cmd_buf_sb[i] = NULL;
  635. }
  636. return rc;
  637. }
  638. reg_dma->ops.check_support = check_support_v1;
  639. reg_dma->ops.setup_payload = setup_payload_v1;
  640. reg_dma->ops.kick_off = kick_off_v1;
  641. reg_dma->ops.reset = reset_v1;
  642. reg_dma->ops.alloc_reg_dma_buf = alloc_reg_dma_buf_v1;
  643. reg_dma->ops.dealloc_reg_dma = dealloc_reg_dma_v1;
  644. reg_dma->ops.reset_reg_dma_buf = reset_reg_dma_buffer_v1;
  645. reg_dma->ops.last_command = last_cmd_v1;
  646. reg_dma->ops.dump_regs = dump_regs_v1;
  647. reg_dma_register_count = 60;
  648. reg_dma_decode_sel = 0x180ac060;
  649. reg_dma_opmode_offset = 0x4;
  650. reg_dma_ctl0_queue0_cmd0_offset = 0x14;
  651. reg_dma_intr_status_offset = 0x90;
  652. reg_dma_intr_4_status_offset = 0xa0;
  653. reg_dma_intr_clear_offset = 0xb0;
  654. reg_dma_ctl_trigger_offset = 0xd4;
  655. reg_dma_ctl0_reset_offset = 0xe4;
  656. reg_dma_error_clear_mask = BIT(0) | BIT(1) | BIT(2) | BIT(16);
  657. reg_dma_ctl_queue_off[CTL_0] = reg_dma_ctl0_queue0_cmd0_offset;
  658. for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
  659. reg_dma_ctl_queue_off[i] = reg_dma_ctl_queue_off[i - 1] +
  660. (sizeof(u32) * 4);
  661. return 0;
  662. }
  663. int init_v11(struct sde_hw_reg_dma *cfg)
  664. {
  665. int ret = 0, i = 0;
  666. ret = init_v1(cfg);
  667. if (ret) {
  668. DRM_ERROR("failed to initialize v1: ret %d\n", ret);
  669. return -EINVAL;
  670. }
  671. /* initialize register offsets and v1_supported based on version */
  672. reg_dma_register_count = 133;
  673. reg_dma_decode_sel = 0x180ac114;
  674. reg_dma_opmode_offset = 0x4;
  675. reg_dma_ctl0_queue0_cmd0_offset = 0x14;
  676. reg_dma_intr_status_offset = 0x160;
  677. reg_dma_intr_4_status_offset = 0x170;
  678. reg_dma_intr_clear_offset = 0x1a0;
  679. reg_dma_ctl_trigger_offset = 0xd4;
  680. reg_dma_ctl0_reset_offset = 0x200;
  681. reg_dma_error_clear_mask = BIT(0) | BIT(1) | BIT(2) | BIT(16) |
  682. BIT(17) | BIT(18);
  683. reg_dma_ctl_queue_off[CTL_0] = reg_dma_ctl0_queue0_cmd0_offset;
  684. for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
  685. reg_dma_ctl_queue_off[i] = reg_dma_ctl_queue_off[i - 1] +
  686. (sizeof(u32) * 4);
  687. v1_supported[IGC] = DSPP_IGC | GRP_DSPP_HW_BLK_SELECT |
  688. GRP_VIG_HW_BLK_SELECT | GRP_DMA_HW_BLK_SELECT;
  689. v1_supported[GC] = GRP_DMA_HW_BLK_SELECT | GRP_DSPP_HW_BLK_SELECT;
  690. v1_supported[HSIC] = GRP_DSPP_HW_BLK_SELECT;
  691. v1_supported[SIX_ZONE] = GRP_DSPP_HW_BLK_SELECT;
  692. v1_supported[MEMC_SKIN] = GRP_DSPP_HW_BLK_SELECT;
  693. v1_supported[MEMC_SKY] = GRP_DSPP_HW_BLK_SELECT;
  694. v1_supported[MEMC_FOLIAGE] = GRP_DSPP_HW_BLK_SELECT;
  695. v1_supported[MEMC_PROT] = GRP_DSPP_HW_BLK_SELECT;
  696. v1_supported[QSEED] = GRP_VIG_HW_BLK_SELECT;
  697. return 0;
  698. }
  699. int init_v12(struct sde_hw_reg_dma *cfg)
  700. {
  701. int ret = 0;
  702. ret = init_v11(cfg);
  703. if (ret) {
  704. DRM_ERROR("failed to initialize v11: ret %d\n", ret);
  705. return ret;
  706. }
  707. v1_supported[LTM_INIT] = GRP_LTM_HW_BLK_SELECT;
  708. v1_supported[LTM_ROI] = GRP_LTM_HW_BLK_SELECT;
  709. v1_supported[LTM_VLUT] = GRP_LTM_HW_BLK_SELECT;
  710. v1_supported[RC_DATA] = (GRP_DSPP_HW_BLK_SELECT |
  711. GRP_MDSS_HW_BLK_SELECT);
  712. v1_supported[SPR_INIT] = (GRP_DSPP_HW_BLK_SELECT |
  713. GRP_MDSS_HW_BLK_SELECT);
  714. v1_supported[DEMURA_CFG] = MDSS | DSPP0 | DSPP1;
  715. return 0;
  716. }
  717. int init_v2(struct sde_hw_reg_dma *cfg)
  718. {
  719. int ret = 0, i = 0;
  720. ret = init_v12(cfg);
  721. if (ret) {
  722. DRM_ERROR("failed to initialize v12: ret %d\n", ret);
  723. return ret;
  724. }
  725. /* initialize register offsets based on version delta */
  726. reg_dma_register_count = 0x91;
  727. reg_dma_ctl0_queue1_cmd0_offset = 0x1c;
  728. reg_dma_error_clear_mask |= BIT(19);
  729. reg_dma_ctl_queue1_off[CTL_0] = reg_dma_ctl0_queue1_cmd0_offset;
  730. for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
  731. reg_dma_ctl_queue1_off[i] = reg_dma_ctl_queue1_off[i - 1] +
  732. (sizeof(u32) * 4);
  733. v1_supported[IGC] = GRP_DSPP_HW_BLK_SELECT | GRP_VIG_HW_BLK_SELECT |
  734. GRP_DMA_HW_BLK_SELECT;
  735. if (cfg->caps->reg_dma_blks[REG_DMA_TYPE_SB].valid == true)
  736. reg_dma->ops.last_command_sb = last_cmd_sb_v2;
  737. return 0;
  738. }
  739. static int check_support_v1(enum sde_reg_dma_features feature,
  740. enum sde_reg_dma_blk blk,
  741. bool *is_supported)
  742. {
  743. int ret = 0;
  744. if (!is_supported)
  745. return -EINVAL;
  746. if (feature >= REG_DMA_FEATURES_MAX || blk >= BIT(REG_DMA_BLK_MAX)) {
  747. *is_supported = false;
  748. return ret;
  749. }
  750. *is_supported = (blk & v1_supported[feature]) ? true : false;
  751. return ret;
  752. }
  753. static int setup_payload_v1(struct sde_reg_dma_setup_ops_cfg *cfg)
  754. {
  755. int rc = 0;
  756. rc = validate_dma_cfg(cfg);
  757. if (!rc)
  758. rc = validate_dma_op_params[cfg->ops](cfg);
  759. if (!rc)
  760. rc = write_dma_op_params[cfg->ops](cfg);
  761. return rc;
  762. }
  763. static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
  764. {
  765. int rc = 0;
  766. rc = validate_kick_off_v1(cfg);
  767. if (rc)
  768. return rc;
  769. rc = write_kick_off_v1(cfg);
  770. return rc;
  771. }
  772. int reset_v1(struct sde_hw_ctl *ctl)
  773. {
  774. struct sde_hw_blk_reg_map hw;
  775. u32 index, val, i = 0, k = 0;
  776. if (!ctl || ctl->idx > CTL_MAX) {
  777. DRM_ERROR("invalid ctl %pK ctl idx %d\n",
  778. ctl, ((ctl) ? ctl->idx : 0));
  779. return -EINVAL;
  780. }
  781. index = ctl->idx - CTL_0;
  782. for (k = 0; k < REG_DMA_TYPE_MAX; k++) {
  783. memset(&hw, 0, sizeof(hw));
  784. SET_UP_REG_DMA_REG(hw, reg_dma, k);
  785. if (hw.hwversion == 0)
  786. continue;
  787. SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
  788. SDE_REG_WRITE(&hw, (reg_dma_ctl0_reset_offset +
  789. index * sizeof(u32)), BIT(0));
  790. i = 0;
  791. do {
  792. udelay(1000);
  793. i++;
  794. val = SDE_REG_READ(&hw,
  795. (reg_dma_ctl0_reset_offset +
  796. index * sizeof(u32)));
  797. } while (i < 2 && val);
  798. }
  799. return 0;
  800. }
  801. static void sde_reg_dma_aspace_cb_locked(void *cb_data, bool is_detach)
  802. {
  803. struct sde_reg_dma_buffer *dma_buf = NULL;
  804. struct msm_gem_address_space *aspace = NULL;
  805. u32 iova_aligned, offset;
  806. int rc;
  807. if (!cb_data) {
  808. DRM_ERROR("aspace cb called with invalid dma_buf\n");
  809. return;
  810. }
  811. dma_buf = (struct sde_reg_dma_buffer *)cb_data;
  812. aspace = dma_buf->aspace;
  813. if (is_detach) {
  814. /* invalidate the stored iova */
  815. dma_buf->iova = 0;
  816. /* return the virtual address mapping */
  817. msm_gem_put_vaddr(dma_buf->buf);
  818. msm_gem_vunmap(dma_buf->buf, OBJ_LOCK_NORMAL);
  819. } else {
  820. rc = msm_gem_get_iova(dma_buf->buf, aspace,
  821. &dma_buf->iova);
  822. if (rc) {
  823. DRM_ERROR("failed to get the iova rc %d\n", rc);
  824. return;
  825. }
  826. dma_buf->vaddr = msm_gem_get_vaddr(dma_buf->buf);
  827. if (IS_ERR_OR_NULL(dma_buf->vaddr)) {
  828. DRM_ERROR("failed to get va rc %d\n", rc);
  829. return;
  830. }
  831. iova_aligned = (dma_buf->iova + GUARD_BYTES) & ALIGNED_OFFSET;
  832. offset = iova_aligned - dma_buf->iova;
  833. dma_buf->iova = dma_buf->iova + offset;
  834. dma_buf->vaddr = (void *)(((u8 *)dma_buf->vaddr) + offset);
  835. dma_buf->next_op_allowed = DECODE_SEL_OP;
  836. }
  837. }
  838. static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size)
  839. {
  840. struct sde_reg_dma_buffer *dma_buf = NULL;
  841. u32 iova_aligned, offset;
  842. u32 rsize = size + GUARD_BYTES;
  843. struct msm_gem_address_space *aspace = NULL;
  844. int rc = 0;
  845. if (!size || SIZE_DWORD(size) > MAX_DWORDS_SZ) {
  846. DRM_ERROR("invalid buffer size %d, max %d\n",
  847. SIZE_DWORD(size), MAX_DWORDS_SZ);
  848. return ERR_PTR(-EINVAL);
  849. }
  850. dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
  851. if (!dma_buf)
  852. return ERR_PTR(-ENOMEM);
  853. dma_buf->buf = msm_gem_new(reg_dma->drm_dev,
  854. rsize, MSM_BO_UNCACHED);
  855. if (IS_ERR_OR_NULL(dma_buf->buf)) {
  856. rc = -EINVAL;
  857. goto fail;
  858. }
  859. aspace = msm_gem_smmu_address_space_get(reg_dma->drm_dev,
  860. MSM_SMMU_DOMAIN_UNSECURE);
  861. if (!aspace) {
  862. DRM_ERROR("failed to get aspace\n");
  863. rc = -EINVAL;
  864. goto free_gem;
  865. }
  866. /* register to aspace */
  867. rc = msm_gem_address_space_register_cb(aspace,
  868. sde_reg_dma_aspace_cb_locked,
  869. (void *)dma_buf);
  870. if (rc) {
  871. DRM_ERROR("failed to register callback %d", rc);
  872. goto free_gem;
  873. }
  874. dma_buf->aspace = aspace;
  875. rc = msm_gem_get_iova(dma_buf->buf, aspace, &dma_buf->iova);
  876. if (rc) {
  877. DRM_ERROR("failed to get the iova rc %d\n", rc);
  878. goto free_aspace_cb;
  879. }
  880. dma_buf->vaddr = msm_gem_get_vaddr(dma_buf->buf);
  881. if (IS_ERR_OR_NULL(dma_buf->vaddr)) {
  882. DRM_ERROR("failed to get va rc %d\n", rc);
  883. rc = -EINVAL;
  884. goto put_iova;
  885. }
  886. dma_buf->buffer_size = size;
  887. iova_aligned = (dma_buf->iova + GUARD_BYTES) & ALIGNED_OFFSET;
  888. offset = iova_aligned - dma_buf->iova;
  889. dma_buf->iova = dma_buf->iova + offset;
  890. dma_buf->vaddr = (void *)(((u8 *)dma_buf->vaddr) + offset);
  891. dma_buf->next_op_allowed = DECODE_SEL_OP;
  892. return dma_buf;
  893. put_iova:
  894. msm_gem_put_iova(dma_buf->buf, aspace);
  895. free_aspace_cb:
  896. msm_gem_address_space_unregister_cb(aspace,
  897. sde_reg_dma_aspace_cb_locked, dma_buf);
  898. free_gem:
  899. mutex_lock(&reg_dma->drm_dev->struct_mutex);
  900. msm_gem_free_object(dma_buf->buf);
  901. mutex_unlock(&reg_dma->drm_dev->struct_mutex);
  902. fail:
  903. kfree(dma_buf);
  904. return ERR_PTR(rc);
  905. }
  906. static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *dma_buf)
  907. {
  908. if (!dma_buf) {
  909. DRM_ERROR("invalid param reg_buf %pK\n", dma_buf);
  910. return -EINVAL;
  911. }
  912. if (dma_buf->buf) {
  913. msm_gem_put_iova(dma_buf->buf, 0);
  914. msm_gem_address_space_unregister_cb(dma_buf->aspace,
  915. sde_reg_dma_aspace_cb_locked, dma_buf);
  916. mutex_lock(&reg_dma->drm_dev->struct_mutex);
  917. msm_gem_free_object(dma_buf->buf);
  918. mutex_unlock(&reg_dma->drm_dev->struct_mutex);
  919. }
  920. kfree(dma_buf);
  921. return 0;
  922. }
  923. static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf)
  924. {
  925. if (!lut_buf)
  926. return -EINVAL;
  927. lut_buf->index = 0;
  928. lut_buf->ops_completed = 0;
  929. lut_buf->next_op_allowed = DECODE_SEL_OP;
  930. return 0;
  931. }
  932. static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
  933. {
  934. u32 remain_len, write_len;
  935. remain_len = BUFFER_SPACE_LEFT(cfg);
  936. write_len = sizeof(u32);
  937. if (remain_len < write_len) {
  938. DRM_ERROR("buffer is full sz %d needs %d bytes\n",
  939. remain_len, write_len);
  940. return -EINVAL;
  941. }
  942. return 0;
  943. }
  944. static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
  945. {
  946. u32 *loc = NULL;
  947. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  948. cfg->dma_buf->index);
  949. loc[0] = reg_dma_decode_sel;
  950. loc[1] = 0;
  951. cfg->dma_buf->index = sizeof(u32) * 2;
  952. cfg->dma_buf->ops_completed = REG_WRITE_OP | DECODE_SEL_OP;
  953. cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
  954. return 0;
  955. }
  956. static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  957. enum sde_reg_dma_last_cmd_mode mode)
  958. {
  959. struct sde_reg_dma_setup_ops_cfg cfg;
  960. struct sde_reg_dma_kickoff_cfg kick_off;
  961. struct sde_hw_blk_reg_map hw;
  962. u32 val;
  963. int rc;
  964. if (!ctl || ctl->idx >= CTL_MAX || q >= DMA_CTL_QUEUE_MAX) {
  965. DRM_ERROR("ctl %pK q %d index %d\n", ctl, q,
  966. ((ctl) ? ctl->idx : -1));
  967. return -EINVAL;
  968. }
  969. if (!last_cmd_buf_db[ctl->idx] || !last_cmd_buf_db[ctl->idx]->iova) {
  970. DRM_ERROR("invalid last cmd buf for idx %d\n", ctl->idx);
  971. return -EINVAL;
  972. }
  973. cfg.dma_buf = last_cmd_buf_db[ctl->idx];
  974. reset_reg_dma_buffer_v1(last_cmd_buf_db[ctl->idx]);
  975. if (validate_last_cmd(&cfg)) {
  976. DRM_ERROR("validate buf failed\n");
  977. return -EINVAL;
  978. }
  979. if (write_last_cmd(&cfg)) {
  980. DRM_ERROR("write buf failed\n");
  981. return -EINVAL;
  982. }
  983. kick_off.ctl = ctl;
  984. kick_off.queue_select = q;
  985. kick_off.trigger_mode = WRITE_IMMEDIATE;
  986. kick_off.last_command = 1;
  987. kick_off.op = REG_DMA_WRITE;
  988. kick_off.dma_type = REG_DMA_TYPE_DB;
  989. kick_off.dma_buf = last_cmd_buf_db[ctl->idx];
  990. rc = kick_off_v1(&kick_off);
  991. if (rc) {
  992. DRM_ERROR("kick off last cmd failed\n");
  993. return rc;
  994. }
  995. //Lack of block support will be caught by kick_off
  996. memset(&hw, 0, sizeof(hw));
  997. SET_UP_REG_DMA_REG(hw, reg_dma, kick_off.dma_type);
  998. SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, mode);
  999. if (mode == REG_DMA_WAIT4_COMP) {
  1000. rc = readl_poll_timeout(hw.base_off + hw.blk_off +
  1001. reg_dma_intr_status_offset, val,
  1002. (val & ctl_trigger_done_mask[ctl->idx][q]),
  1003. 10, 20000);
  1004. if (rc)
  1005. DRM_ERROR("poll wait failed %d val %x mask %x\n",
  1006. rc, val, ctl_trigger_done_mask[ctl->idx][q]);
  1007. SDE_EVT32(SDE_EVTLOG_FUNC_EXIT, mode);
  1008. }
  1009. return rc;
  1010. }
  1011. void deinit_v1(void)
  1012. {
  1013. int i = 0;
  1014. for (i = CTL_0; i < CTL_MAX; i++) {
  1015. if (last_cmd_buf_db[i])
  1016. dealloc_reg_dma_v1(last_cmd_buf_db[i]);
  1017. last_cmd_buf_db[i] = NULL;
  1018. if (last_cmd_buf_sb[i])
  1019. dealloc_reg_dma_v1(last_cmd_buf_sb[i]);
  1020. last_cmd_buf_sb[i] = NULL;
  1021. }
  1022. }
  1023. static void dump_regs_v1(void)
  1024. {
  1025. uint32_t i = 0, k = 0;
  1026. u32 val;
  1027. struct sde_hw_blk_reg_map hw;
  1028. for (k = 0; k < REG_DMA_TYPE_MAX; k++) {
  1029. memset(&hw, 0, sizeof(hw));
  1030. SET_UP_REG_DMA_REG(hw, reg_dma, k);
  1031. if (hw.hwversion == 0)
  1032. continue;
  1033. for (i = 0; i < reg_dma_register_count; i++) {
  1034. val = SDE_REG_READ(&hw, i * sizeof(u32));
  1035. DRM_ERROR("offset %x val %x\n", (u32)(i * sizeof(u32)),
  1036. val);
  1037. }
  1038. }
  1039. }
  1040. static int last_cmd_sb_v2(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  1041. enum sde_reg_dma_last_cmd_mode mode)
  1042. {
  1043. struct sde_reg_dma_setup_ops_cfg cfg;
  1044. struct sde_reg_dma_kickoff_cfg kick_off;
  1045. int rc = 0;
  1046. if (!ctl || ctl->idx >= CTL_MAX || q >= DMA_CTL_QUEUE_MAX) {
  1047. DRM_ERROR("ctl %pK q %d index %d\n", ctl, q,
  1048. ((ctl) ? ctl->idx : -1));
  1049. return -EINVAL;
  1050. }
  1051. if (!last_cmd_buf_sb[ctl->idx] || !last_cmd_buf_sb[ctl->idx]->iova) {
  1052. DRM_ERROR("invalid last cmd buf for idx %d\n", ctl->idx);
  1053. return -EINVAL;
  1054. }
  1055. cfg.dma_buf = last_cmd_buf_sb[ctl->idx];
  1056. reset_reg_dma_buffer_v1(last_cmd_buf_sb[ctl->idx]);
  1057. if (validate_last_cmd(&cfg)) {
  1058. DRM_ERROR("validate buf failed\n");
  1059. return -EINVAL;
  1060. }
  1061. if (write_last_cmd(&cfg)) {
  1062. DRM_ERROR("write buf failed\n");
  1063. return -EINVAL;
  1064. }
  1065. kick_off.ctl = ctl;
  1066. kick_off.queue_select = q;
  1067. kick_off.trigger_mode = WRITE_IMMEDIATE;
  1068. kick_off.last_command = 1;
  1069. kick_off.op = REG_DMA_WRITE;
  1070. kick_off.dma_type = REG_DMA_TYPE_SB;
  1071. kick_off.queue_select = DMA_CTL_QUEUE1;
  1072. kick_off.dma_buf = last_cmd_buf_sb[ctl->idx];
  1073. rc = kick_off_v1(&kick_off);
  1074. if (rc)
  1075. DRM_ERROR("kick off last cmd failed\n");
  1076. return rc;
  1077. }