sde_hw_reg_dma_v1.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/iopoll.h>
  6. #include "sde_hw_mdss.h"
  7. #include "sde_hw_ctl.h"
  8. #include "sde_hw_reg_dma_v1.h"
  9. #include "msm_drv.h"
  10. #include "msm_mmu.h"
  11. #include "sde_dbg.h"
  12. #define GUARD_BYTES (BIT(8) - 1)
  13. #define ALIGNED_OFFSET (U32_MAX & ~(GUARD_BYTES))
  14. #define ADDR_ALIGN BIT(8)
  15. #define MAX_RELATIVE_OFF (BIT(21) - 1)
  16. #define ABSOLUTE_RANGE BIT(27)
  17. #define DECODE_SEL_OP (BIT(HW_BLK_SELECT))
  18. #define REG_WRITE_OP ((BIT(REG_SINGLE_WRITE)) | (BIT(REG_BLK_WRITE_SINGLE)) | \
  19. (BIT(REG_BLK_WRITE_INC)) | (BIT(REG_BLK_WRITE_MULTIPLE)) | \
  20. (BIT(REG_SINGLE_MODIFY)) | (BIT(REG_BLK_LUT_WRITE)))
  21. #define REG_DMA_OPS (DECODE_SEL_OP | REG_WRITE_OP)
  22. #define IS_OP_ALLOWED(op, buf_op) (BIT(op) & buf_op)
  23. #define SET_UP_REG_DMA_REG(hw, reg_dma, i) \
  24. do { \
  25. if ((reg_dma)->caps->reg_dma_blks[(i)].valid == false) \
  26. break; \
  27. (hw).base_off = (reg_dma)->addr; \
  28. (hw).blk_off = (reg_dma)->caps->reg_dma_blks[(i)].base; \
  29. (hw).hwversion = (reg_dma)->caps->version; \
  30. (hw).log_mask = SDE_DBG_MASK_REGDMA; \
  31. } while (0)
  32. #define SIZE_DWORD(x) ((x) / (sizeof(u32)))
  33. #define NOT_WORD_ALIGNED(x) ((x) & 0x3)
  34. #define GRP_VIG_HW_BLK_SELECT (VIG0 | VIG1 | VIG2 | VIG3)
  35. #define GRP_DMA_HW_BLK_SELECT (DMA0 | DMA1 | DMA2 | DMA3)
  36. #define GRP_DSPP_HW_BLK_SELECT (DSPP0 | DSPP1 | DSPP2 | DSPP3)
  37. #define GRP_LTM_HW_BLK_SELECT (LTM0 | LTM1)
  38. #define GRP_MDSS_HW_BLK_SELECT (MDSS)
  39. #define BUFFER_SPACE_LEFT(cfg) ((cfg)->dma_buf->buffer_size - \
  40. (cfg)->dma_buf->index)
  41. #define REL_ADDR_OPCODE (BIT(27))
  42. #define NO_OP_OPCODE (0)
  43. #define SINGLE_REG_WRITE_OPCODE (BIT(28))
  44. #define SINGLE_REG_MODIFY_OPCODE (BIT(29))
  45. #define HW_INDEX_REG_WRITE_OPCODE (BIT(28) | BIT(29))
  46. #define AUTO_INC_REG_WRITE_OPCODE (BIT(30))
  47. #define BLK_REG_WRITE_OPCODE (BIT(30) | BIT(28))
  48. #define LUTBUS_WRITE_OPCODE (BIT(30) | BIT(29))
  49. #define WRAP_MIN_SIZE 2
  50. #define WRAP_MAX_SIZE (BIT(4) - 1)
  51. #define MAX_DWORDS_SZ (BIT(14) - 1)
  52. #define REG_DMA_HEADERS_BUFFER_SZ (sizeof(u32) * 128)
  53. #define LUTBUS_TABLE_SEL_MASK 0x10000
  54. #define LUTBUS_BLOCK_SEL_MASK 0xffff
  55. #define LUTBUS_TRANS_SZ_MASK 0xff0000
  56. #define LUTBUS_LUT_SIZE_MASK 0x3fff
  57. static uint32_t reg_dma_register_count;
  58. static uint32_t reg_dma_decode_sel;
  59. static uint32_t reg_dma_opmode_offset;
  60. static uint32_t reg_dma_ctl0_queue0_cmd0_offset;
  61. static uint32_t reg_dma_ctl0_queue1_cmd0_offset;
  62. static uint32_t reg_dma_intr_status_offset;
  63. static uint32_t reg_dma_intr_4_status_offset;
  64. static uint32_t reg_dma_intr_clear_offset;
  65. static uint32_t reg_dma_ctl_trigger_offset;
  66. static uint32_t reg_dma_ctl0_reset_offset;
  67. static uint32_t reg_dma_error_clear_mask;
  68. static uint32_t reg_dma_ctl_queue_off[CTL_MAX];
  69. static uint32_t reg_dma_ctl_queue1_off[CTL_MAX];
  70. typedef int (*reg_dma_internal_ops) (struct sde_reg_dma_setup_ops_cfg *cfg);
  71. static struct sde_hw_reg_dma *reg_dma;
  72. static u32 ops_mem_size[REG_DMA_SETUP_OPS_MAX] = {
  73. [REG_BLK_WRITE_SINGLE] = sizeof(u32) * 2,
  74. [REG_BLK_WRITE_INC] = sizeof(u32) * 2,
  75. [REG_BLK_WRITE_MULTIPLE] = sizeof(u32) * 2,
  76. [HW_BLK_SELECT] = sizeof(u32) * 2,
  77. [REG_SINGLE_WRITE] = sizeof(u32) * 2,
  78. [REG_SINGLE_MODIFY] = sizeof(u32) * 3,
  79. [REG_BLK_LUT_WRITE] = sizeof(u32) * 2,
  80. };
  81. static u32 queue_sel[DMA_CTL_QUEUE_MAX] = {
  82. [DMA_CTL_QUEUE0] = BIT(0),
  83. [DMA_CTL_QUEUE1] = BIT(4),
  84. };
  85. static u32 dspp_read_sel[DSPP_HIST_MAX] = {
  86. [DSPP0_HIST] = 0,
  87. [DSPP1_HIST] = 1,
  88. [DSPP2_HIST] = 2,
  89. [DSPP3_HIST] = 3,
  90. };
  91. static u32 v1_supported[REG_DMA_FEATURES_MAX] = {
  92. [GAMUT] = GRP_VIG_HW_BLK_SELECT | GRP_DSPP_HW_BLK_SELECT,
  93. [VLUT] = GRP_DSPP_HW_BLK_SELECT,
  94. [GC] = GRP_DSPP_HW_BLK_SELECT,
  95. [IGC] = DSPP_IGC | GRP_DSPP_HW_BLK_SELECT,
  96. [PCC] = GRP_DSPP_HW_BLK_SELECT,
  97. };
  98. static u32 ctl_trigger_done_mask[CTL_MAX][DMA_CTL_QUEUE_MAX] = {
  99. [CTL_0][0] = BIT(16),
  100. [CTL_0][1] = BIT(21),
  101. [CTL_1][0] = BIT(17),
  102. [CTL_1][1] = BIT(22),
  103. [CTL_2][0] = BIT(18),
  104. [CTL_2][1] = BIT(23),
  105. [CTL_3][0] = BIT(19),
  106. [CTL_3][1] = BIT(24),
  107. [CTL_4][0] = BIT(25),
  108. [CTL_4][1] = BIT(27),
  109. [CTL_5][0] = BIT(26),
  110. [CTL_5][1] = BIT(28),
  111. };
  112. static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg);
  113. static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
  114. static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  115. static int validate_blk_lut_write(struct sde_reg_dma_setup_ops_cfg *cfg);
  116. static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  117. static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
  118. static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
  119. static int write_single_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  120. static int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg);
  121. static int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg);
  122. static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  123. static int write_single_modify(struct sde_reg_dma_setup_ops_cfg *cfg);
  124. static int write_block_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  125. static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
  126. static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf);
  127. static int check_support_v1(enum sde_reg_dma_features feature,
  128. enum sde_reg_dma_blk blk, bool *is_supported);
  129. static int setup_payload_v1(struct sde_reg_dma_setup_ops_cfg *cfg);
  130. static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg);
  131. static int reset_v1(struct sde_hw_ctl *ctl);
  132. static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  133. enum sde_reg_dma_last_cmd_mode mode);
  134. static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size);
  135. static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *lut_buf);
  136. static void dump_regs_v1(void);
  137. static int last_cmd_sb_v2(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  138. enum sde_reg_dma_last_cmd_mode mode);
  139. static reg_dma_internal_ops write_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
  140. [HW_BLK_SELECT] = write_decode_sel,
  141. [REG_SINGLE_WRITE] = write_single_reg,
  142. [REG_BLK_WRITE_SINGLE] = write_multi_reg_inc,
  143. [REG_BLK_WRITE_INC] = write_multi_reg_index,
  144. [REG_BLK_WRITE_MULTIPLE] = write_multi_lut_reg,
  145. [REG_SINGLE_MODIFY] = write_single_modify,
  146. [REG_BLK_LUT_WRITE] = write_block_lut_reg,
  147. };
  148. static reg_dma_internal_ops validate_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
  149. [HW_BLK_SELECT] = validate_write_decode_sel,
  150. [REG_SINGLE_WRITE] = validate_write_reg,
  151. [REG_BLK_WRITE_SINGLE] = validate_write_reg,
  152. [REG_BLK_WRITE_INC] = validate_write_reg,
  153. [REG_BLK_WRITE_MULTIPLE] = validate_write_multi_lut_reg,
  154. [REG_SINGLE_MODIFY] = validate_write_reg,
  155. [REG_BLK_LUT_WRITE] = validate_blk_lut_write,
  156. };
  157. static struct sde_reg_dma_buffer *last_cmd_buf_db[CTL_MAX];
  158. static struct sde_reg_dma_buffer *last_cmd_buf_sb[CTL_MAX];
  159. static void get_decode_sel(unsigned long blk, u32 *decode_sel)
  160. {
  161. int i = 0;
  162. *decode_sel = 0;
  163. for_each_set_bit(i, &blk, REG_DMA_BLK_MAX) {
  164. switch (BIT(i)) {
  165. case VIG0:
  166. *decode_sel |= BIT(0);
  167. break;
  168. case VIG1:
  169. *decode_sel |= BIT(1);
  170. break;
  171. case VIG2:
  172. *decode_sel |= BIT(2);
  173. break;
  174. case VIG3:
  175. *decode_sel |= BIT(3);
  176. break;
  177. case DMA0:
  178. *decode_sel |= BIT(5);
  179. break;
  180. case DMA1:
  181. *decode_sel |= BIT(6);
  182. break;
  183. case DMA2:
  184. *decode_sel |= BIT(7);
  185. break;
  186. case DMA3:
  187. *decode_sel |= BIT(8);
  188. break;
  189. case DSPP0:
  190. *decode_sel |= BIT(17);
  191. break;
  192. case DSPP1:
  193. *decode_sel |= BIT(18);
  194. break;
  195. case DSPP2:
  196. *decode_sel |= BIT(19);
  197. break;
  198. case DSPP3:
  199. *decode_sel |= BIT(20);
  200. break;
  201. case SSPP_IGC:
  202. *decode_sel |= BIT(4);
  203. break;
  204. case DSPP_IGC:
  205. *decode_sel |= BIT(21);
  206. break;
  207. case LTM0:
  208. *decode_sel |= BIT(22);
  209. break;
  210. case LTM1:
  211. *decode_sel |= BIT(23);
  212. break;
  213. case MDSS:
  214. *decode_sel |= BIT(31);
  215. break;
  216. default:
  217. DRM_ERROR("block not supported %zx\n", (size_t)BIT(i));
  218. break;
  219. }
  220. }
  221. }
  222. static int write_multi_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  223. {
  224. u8 *loc = NULL;
  225. loc = (u8 *)cfg->dma_buf->vaddr + cfg->dma_buf->index;
  226. memcpy(loc, cfg->data, cfg->data_size);
  227. cfg->dma_buf->index += cfg->data_size;
  228. cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
  229. cfg->dma_buf->ops_completed |= REG_WRITE_OP;
  230. if (cfg->blk == MDSS)
  231. cfg->dma_buf->abs_write_cnt += SIZE_DWORD(cfg->data_size);
  232. return 0;
  233. }
  234. int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg)
  235. {
  236. u32 *loc = NULL;
  237. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  238. cfg->dma_buf->index);
  239. loc[0] = HW_INDEX_REG_WRITE_OPCODE;
  240. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  241. if (cfg->blk == MDSS)
  242. loc[0] |= ABSOLUTE_RANGE;
  243. loc[1] = SIZE_DWORD(cfg->data_size);
  244. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  245. return write_multi_reg(cfg);
  246. }
  247. int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg)
  248. {
  249. u32 *loc = NULL;
  250. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  251. cfg->dma_buf->index);
  252. loc[0] = AUTO_INC_REG_WRITE_OPCODE;
  253. if (cfg->blk == MDSS)
  254. loc[0] |= ABSOLUTE_RANGE;
  255. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  256. loc[1] = SIZE_DWORD(cfg->data_size);
  257. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  258. return write_multi_reg(cfg);
  259. }
  260. static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  261. {
  262. u32 *loc = NULL;
  263. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  264. cfg->dma_buf->index);
  265. loc[0] = BLK_REG_WRITE_OPCODE;
  266. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  267. if (cfg->blk == MDSS)
  268. loc[0] |= ABSOLUTE_RANGE;
  269. loc[1] = (cfg->inc) ? 0 : BIT(31);
  270. loc[1] |= (cfg->wrap_size & WRAP_MAX_SIZE) << 16;
  271. loc[1] |= ((SIZE_DWORD(cfg->data_size)) & MAX_DWORDS_SZ);
  272. cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
  273. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  274. return write_multi_reg(cfg);
  275. }
  276. static int write_single_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  277. {
  278. u32 *loc = NULL;
  279. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  280. cfg->dma_buf->index);
  281. loc[0] = SINGLE_REG_WRITE_OPCODE;
  282. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  283. if (cfg->blk == MDSS) {
  284. loc[0] |= ABSOLUTE_RANGE;
  285. cfg->dma_buf->abs_write_cnt++;
  286. }
  287. loc[1] = *cfg->data;
  288. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  289. cfg->dma_buf->ops_completed |= REG_WRITE_OP;
  290. cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
  291. return 0;
  292. }
  293. static int write_single_modify(struct sde_reg_dma_setup_ops_cfg *cfg)
  294. {
  295. u32 *loc = NULL;
  296. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  297. cfg->dma_buf->index);
  298. loc[0] = SINGLE_REG_MODIFY_OPCODE;
  299. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  300. if (cfg->blk == MDSS)
  301. loc[0] |= ABSOLUTE_RANGE;
  302. loc[1] = cfg->mask;
  303. loc[2] = *cfg->data;
  304. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  305. cfg->dma_buf->ops_completed |= REG_WRITE_OP;
  306. cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
  307. return 0;
  308. }
  309. static int write_block_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  310. {
  311. u32 *loc = NULL;
  312. int rc = -EINVAL;
  313. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  314. cfg->dma_buf->index);
  315. loc[0] = LUTBUS_WRITE_OPCODE;
  316. loc[0] |= (cfg->table_sel << 16) & LUTBUS_TABLE_SEL_MASK;
  317. loc[0] |= (cfg->block_sel & LUTBUS_BLOCK_SEL_MASK);
  318. loc[1] = (cfg->trans_size << 16) & LUTBUS_TRANS_SZ_MASK;
  319. loc[1] |= (cfg->lut_size & LUTBUS_LUT_SIZE_MASK);
  320. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  321. rc = write_multi_reg(cfg);
  322. if (rc)
  323. return rc;
  324. /* adding 3 NO OPs as SW workaround for REG_BLK_LUT_WRITE
  325. * HW limitation that requires the residual data plus the
  326. * following opcode to exceed 4 DWORDs length.
  327. */
  328. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  329. cfg->dma_buf->index);
  330. loc[0] = NO_OP_OPCODE;
  331. loc[1] = NO_OP_OPCODE;
  332. loc[2] = NO_OP_OPCODE;
  333. cfg->dma_buf->index += sizeof(u32) * 3;
  334. return 0;
  335. }
  336. static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg)
  337. {
  338. u32 *loc = NULL;
  339. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  340. cfg->dma_buf->index);
  341. loc[0] = reg_dma_decode_sel;
  342. get_decode_sel(cfg->blk, &loc[1]);
  343. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  344. cfg->dma_buf->ops_completed |= DECODE_SEL_OP;
  345. cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
  346. return 0;
  347. }
  348. static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  349. {
  350. int rc;
  351. rc = validate_write_reg(cfg);
  352. if (rc)
  353. return rc;
  354. if (cfg->wrap_size < WRAP_MIN_SIZE || cfg->wrap_size > WRAP_MAX_SIZE) {
  355. DRM_ERROR("invalid wrap sz %d min %d max %zd\n",
  356. cfg->wrap_size, WRAP_MIN_SIZE, (size_t)WRAP_MAX_SIZE);
  357. rc = -EINVAL;
  358. }
  359. return rc;
  360. }
  361. static int validate_blk_lut_write(struct sde_reg_dma_setup_ops_cfg *cfg)
  362. {
  363. int rc;
  364. rc = validate_write_reg(cfg);
  365. if (rc)
  366. return rc;
  367. if (cfg->table_sel >= LUTBUS_TABLE_SELECT_MAX ||
  368. cfg->block_sel >= LUTBUS_BLOCK_MAX ||
  369. (cfg->trans_size != LUTBUS_IGC_TRANS_SIZE &&
  370. cfg->trans_size != LUTBUS_GAMUT_TRANS_SIZE)) {
  371. DRM_ERROR("invalid table_sel %d block_sel %d trans_size %d\n",
  372. cfg->table_sel, cfg->block_sel,
  373. cfg->trans_size);
  374. rc = -EINVAL;
  375. }
  376. return rc;
  377. }
  378. static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  379. {
  380. u32 remain_len, write_len;
  381. remain_len = BUFFER_SPACE_LEFT(cfg);
  382. write_len = ops_mem_size[cfg->ops] + cfg->data_size;
  383. if (remain_len < write_len) {
  384. DRM_ERROR("buffer is full sz %d needs %d bytes\n",
  385. remain_len, write_len);
  386. return -EINVAL;
  387. }
  388. if (!cfg->data) {
  389. DRM_ERROR("invalid data %pK size %d exp sz %d\n", cfg->data,
  390. cfg->data_size, write_len);
  391. return -EINVAL;
  392. }
  393. if ((SIZE_DWORD(cfg->data_size)) > MAX_DWORDS_SZ ||
  394. NOT_WORD_ALIGNED(cfg->data_size)) {
  395. DRM_ERROR("Invalid data size %d max %zd align %x\n",
  396. cfg->data_size, (size_t)MAX_DWORDS_SZ,
  397. NOT_WORD_ALIGNED(cfg->data_size));
  398. return -EINVAL;
  399. }
  400. if (cfg->blk_offset > MAX_RELATIVE_OFF ||
  401. NOT_WORD_ALIGNED(cfg->blk_offset)) {
  402. DRM_ERROR("invalid offset %d max %zd align %x\n",
  403. cfg->blk_offset, (size_t)MAX_RELATIVE_OFF,
  404. NOT_WORD_ALIGNED(cfg->blk_offset));
  405. return -EINVAL;
  406. }
  407. return 0;
  408. }
  409. static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg)
  410. {
  411. u32 remain_len;
  412. bool vig_blk, dma_blk, dspp_blk, mdss_blk;
  413. remain_len = BUFFER_SPACE_LEFT(cfg);
  414. if (remain_len < ops_mem_size[HW_BLK_SELECT]) {
  415. DRM_ERROR("buffer is full needs %d bytes\n",
  416. ops_mem_size[HW_BLK_SELECT]);
  417. return -EINVAL;
  418. }
  419. if (!cfg->blk) {
  420. DRM_ERROR("blk set as 0\n");
  421. return -EINVAL;
  422. }
  423. vig_blk = (cfg->blk & GRP_VIG_HW_BLK_SELECT) ? true : false;
  424. dma_blk = (cfg->blk & GRP_DMA_HW_BLK_SELECT) ? true : false;
  425. dspp_blk = (cfg->blk & GRP_DSPP_HW_BLK_SELECT) ? true : false;
  426. mdss_blk = (cfg->blk & MDSS) ? true : false;
  427. if ((vig_blk && dspp_blk) || (dma_blk && dspp_blk) ||
  428. (vig_blk && dma_blk) ||
  429. (mdss_blk && (vig_blk | dma_blk | dspp_blk))) {
  430. DRM_ERROR("invalid blk combination %x\n", cfg->blk);
  431. return -EINVAL;
  432. }
  433. return 0;
  434. }
  435. static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg)
  436. {
  437. int rc = 0;
  438. bool supported;
  439. if (!cfg || cfg->ops >= REG_DMA_SETUP_OPS_MAX || !cfg->dma_buf) {
  440. DRM_ERROR("invalid param cfg %pK ops %d dma_buf %pK\n",
  441. cfg, ((cfg) ? cfg->ops : REG_DMA_SETUP_OPS_MAX),
  442. ((cfg) ? cfg->dma_buf : NULL));
  443. return -EINVAL;
  444. }
  445. rc = check_support_v1(cfg->feature, cfg->blk, &supported);
  446. if (rc || !supported) {
  447. DRM_ERROR("check support failed rc %d supported %d\n",
  448. rc, supported);
  449. rc = -EINVAL;
  450. return rc;
  451. }
  452. if (cfg->dma_buf->index >= cfg->dma_buf->buffer_size ||
  453. NOT_WORD_ALIGNED(cfg->dma_buf->index)) {
  454. DRM_ERROR("Buf Overflow index %d max size %d align %x\n",
  455. cfg->dma_buf->index, cfg->dma_buf->buffer_size,
  456. NOT_WORD_ALIGNED(cfg->dma_buf->index));
  457. return -EINVAL;
  458. }
  459. if (cfg->dma_buf->iova & GUARD_BYTES || !cfg->dma_buf->vaddr) {
  460. DRM_ERROR("iova not aligned to %zx iova %llx kva %pK",
  461. (size_t)ADDR_ALIGN, cfg->dma_buf->iova,
  462. cfg->dma_buf->vaddr);
  463. return -EINVAL;
  464. }
  465. if (!IS_OP_ALLOWED(cfg->ops, cfg->dma_buf->next_op_allowed)) {
  466. DRM_ERROR("invalid op %x allowed %x\n", cfg->ops,
  467. cfg->dma_buf->next_op_allowed);
  468. return -EINVAL;
  469. }
  470. if (!validate_dma_op_params[cfg->ops] ||
  471. !write_dma_op_params[cfg->ops]) {
  472. DRM_ERROR("invalid op %d validate %pK write %pK\n", cfg->ops,
  473. validate_dma_op_params[cfg->ops],
  474. write_dma_op_params[cfg->ops]);
  475. return -EINVAL;
  476. }
  477. return rc;
  478. }
  479. static int validate_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
  480. {
  481. if (!cfg || !cfg->ctl || !cfg->dma_buf ||
  482. cfg->dma_type >= REG_DMA_TYPE_MAX) {
  483. DRM_ERROR("invalid cfg %pK ctl %pK dma_buf %pK dma type %d\n",
  484. cfg, ((!cfg) ? NULL : cfg->ctl),
  485. ((!cfg) ? NULL : cfg->dma_buf),
  486. ((!cfg) ? 0 : cfg->dma_type));
  487. return -EINVAL;
  488. }
  489. if (reg_dma->caps->reg_dma_blks[cfg->dma_type].valid == false) {
  490. DRM_DEBUG("REG dma type %d is not supported\n", cfg->dma_type);
  491. return -EOPNOTSUPP;
  492. }
  493. if (cfg->ctl->idx < CTL_0 || cfg->ctl->idx >= CTL_MAX) {
  494. DRM_ERROR("invalid ctl idx %d\n", cfg->ctl->idx);
  495. return -EINVAL;
  496. }
  497. if (cfg->op >= REG_DMA_OP_MAX) {
  498. DRM_ERROR("invalid op %d\n", cfg->op);
  499. return -EINVAL;
  500. }
  501. if ((cfg->op == REG_DMA_WRITE) &&
  502. (!(cfg->dma_buf->ops_completed & DECODE_SEL_OP) ||
  503. !(cfg->dma_buf->ops_completed & REG_WRITE_OP))) {
  504. DRM_ERROR("incomplete write ops %x\n",
  505. cfg->dma_buf->ops_completed);
  506. return -EINVAL;
  507. }
  508. if (cfg->op == REG_DMA_READ && cfg->block_select >= DSPP_HIST_MAX) {
  509. DRM_ERROR("invalid block for read %d\n", cfg->block_select);
  510. return -EINVAL;
  511. }
  512. /* Only immediate triggers are supported now hence hardcode */
  513. cfg->trigger_mode = (cfg->op == REG_DMA_READ) ? (READ_TRIGGER) :
  514. (WRITE_TRIGGER);
  515. if (cfg->dma_buf->iova & GUARD_BYTES) {
  516. DRM_ERROR("Address is not aligned to %zx iova %llx",
  517. (size_t)ADDR_ALIGN, cfg->dma_buf->iova);
  518. return -EINVAL;
  519. }
  520. if (cfg->queue_select >= DMA_CTL_QUEUE_MAX) {
  521. DRM_ERROR("invalid queue selected %d\n", cfg->queue_select);
  522. return -EINVAL;
  523. }
  524. if (SIZE_DWORD(cfg->dma_buf->index) > MAX_DWORDS_SZ ||
  525. !cfg->dma_buf->index) {
  526. DRM_ERROR("invalid dword size %zd max %zd\n",
  527. (size_t)SIZE_DWORD(cfg->dma_buf->index),
  528. (size_t)MAX_DWORDS_SZ);
  529. return -EINVAL;
  530. }
  531. if (cfg->dma_type == REG_DMA_TYPE_SB &&
  532. (cfg->queue_select != DMA_CTL_QUEUE1 ||
  533. cfg->op == REG_DMA_READ)) {
  534. DRM_ERROR("invalid queue selected %d or op %d for SB LUTDMA\n",
  535. cfg->queue_select, cfg->op);
  536. return -EINVAL;
  537. }
  538. if ((cfg->dma_buf->abs_write_cnt % 2) != 0) {
  539. /* Touch up buffer to avoid HW issues with odd number of abs writes */
  540. u32 reg = 0;
  541. struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
  542. dma_write_cfg.dma_buf = cfg->dma_buf;
  543. dma_write_cfg.blk = MDSS;
  544. dma_write_cfg.feature = REG_DMA_FEATURES_MAX;
  545. dma_write_cfg.ops = HW_BLK_SELECT;
  546. if (validate_write_decode_sel(&dma_write_cfg) || write_decode_sel(&dma_write_cfg)) {
  547. DRM_ERROR("Failed setting MDSS decode select for LUTDMA touch up\n");
  548. return -EINVAL;
  549. }
  550. /* Perform dummy write on LUTDMA RO version reg */
  551. dma_write_cfg.ops = REG_SINGLE_WRITE;
  552. dma_write_cfg.blk_offset = reg_dma->caps->base_off +
  553. reg_dma->caps->reg_dma_blks[cfg->dma_type].base;
  554. dma_write_cfg.data = &reg;
  555. dma_write_cfg.data_size = sizeof(uint32_t);
  556. if (validate_write_reg(&dma_write_cfg) || write_single_reg(&dma_write_cfg)) {
  557. DRM_ERROR("Failed to add touch up write to LUTDMA buffer\n");
  558. return -EINVAL;
  559. }
  560. }
  561. return 0;
  562. }
  563. static int write_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
  564. {
  565. u32 cmd1, mask = 0, val = 0;
  566. struct sde_hw_blk_reg_map hw;
  567. memset(&hw, 0, sizeof(hw));
  568. msm_gem_sync(cfg->dma_buf->buf);
  569. cmd1 = (cfg->op == REG_DMA_READ) ?
  570. (dspp_read_sel[cfg->block_select] << 30) : 0;
  571. cmd1 |= (cfg->last_command) ? BIT(24) : 0;
  572. cmd1 |= (cfg->op == REG_DMA_READ) ? (2 << 22) : 0;
  573. cmd1 |= (cfg->op == REG_DMA_WRITE) ? (BIT(22)) : 0;
  574. cmd1 |= (SIZE_DWORD(cfg->dma_buf->index) & MAX_DWORDS_SZ);
  575. if (cfg->dma_type == REG_DMA_TYPE_DB)
  576. SET_UP_REG_DMA_REG(hw, reg_dma, REG_DMA_TYPE_DB);
  577. else if (cfg->dma_type == REG_DMA_TYPE_SB)
  578. SET_UP_REG_DMA_REG(hw, reg_dma, REG_DMA_TYPE_SB);
  579. if (hw.hwversion == 0) {
  580. DRM_ERROR("DMA type %d is unsupported\n", cfg->dma_type);
  581. return -EOPNOTSUPP;
  582. }
  583. SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
  584. val = SDE_REG_READ(&hw, reg_dma_intr_4_status_offset);
  585. if (val) {
  586. DRM_DEBUG("LUT dma status %x\n", val);
  587. mask = reg_dma_error_clear_mask;
  588. SDE_REG_WRITE(&hw, reg_dma_intr_clear_offset + sizeof(u32) * 4,
  589. mask);
  590. SDE_EVT32(val);
  591. }
  592. if (cfg->dma_type == REG_DMA_TYPE_DB) {
  593. SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx],
  594. cfg->dma_buf->iova);
  595. SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx] + 0x4,
  596. cmd1);
  597. } else if (cfg->dma_type == REG_DMA_TYPE_SB) {
  598. SDE_REG_WRITE(&hw, reg_dma_ctl_queue1_off[cfg->ctl->idx],
  599. cfg->dma_buf->iova);
  600. SDE_REG_WRITE(&hw, reg_dma_ctl_queue1_off[cfg->ctl->idx] + 0x4,
  601. cmd1);
  602. }
  603. if (cfg->last_command) {
  604. mask = ctl_trigger_done_mask[cfg->ctl->idx][cfg->queue_select];
  605. SDE_REG_WRITE(&hw, reg_dma_intr_clear_offset, mask);
  606. /* DB LUTDMA use SW trigger while SB LUTDMA uses DSPP_SB
  607. * flush as its trigger event.
  608. */
  609. if (cfg->dma_type == REG_DMA_TYPE_DB) {
  610. SDE_REG_WRITE(&cfg->ctl->hw, reg_dma_ctl_trigger_offset,
  611. queue_sel[cfg->queue_select]);
  612. }
  613. }
  614. SDE_EVT32(cfg->feature, cfg->dma_type,
  615. ((uint64_t)cfg->dma_buf) >> 32,
  616. ((uint64_t)cfg->dma_buf) & 0xFFFFFFFF,
  617. (cfg->dma_buf->iova) >> 32,
  618. (cfg->dma_buf->iova) & 0xFFFFFFFF,
  619. cfg->op,
  620. cfg->queue_select, cfg->ctl->idx,
  621. SIZE_DWORD(cfg->dma_buf->index));
  622. return 0;
  623. }
  624. int init_v1(struct sde_hw_reg_dma *cfg)
  625. {
  626. int i = 0, rc = 0;
  627. if (!cfg)
  628. return -EINVAL;
  629. reg_dma = cfg;
  630. for (i = CTL_0; i < CTL_MAX; i++) {
  631. if (!last_cmd_buf_db[i]) {
  632. last_cmd_buf_db[i] =
  633. alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
  634. if (IS_ERR_OR_NULL(last_cmd_buf_db[i])) {
  635. /*
  636. * This will allow reg dma to fall back to
  637. * AHB domain
  638. */
  639. pr_info("Failed to allocate reg dma, ret:%lu\n",
  640. PTR_ERR(last_cmd_buf_db[i]));
  641. return 0;
  642. }
  643. }
  644. if (!last_cmd_buf_sb[i]) {
  645. last_cmd_buf_sb[i] =
  646. alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
  647. if (IS_ERR_OR_NULL(last_cmd_buf_sb[i])) {
  648. /*
  649. * This will allow reg dma to fall back to
  650. * AHB domain
  651. */
  652. pr_info("Failed to allocate reg dma, ret:%lu\n",
  653. PTR_ERR(last_cmd_buf_sb[i]));
  654. return 0;
  655. }
  656. }
  657. }
  658. if (rc) {
  659. for (i = 0; i < CTL_MAX; i++) {
  660. if (!last_cmd_buf_db[i])
  661. continue;
  662. dealloc_reg_dma_v1(last_cmd_buf_db[i]);
  663. last_cmd_buf_db[i] = NULL;
  664. }
  665. for (i = 0; i < CTL_MAX; i++) {
  666. if (!last_cmd_buf_sb[i])
  667. continue;
  668. dealloc_reg_dma_v1(last_cmd_buf_sb[i]);
  669. last_cmd_buf_sb[i] = NULL;
  670. }
  671. return rc;
  672. }
  673. reg_dma->ops.check_support = check_support_v1;
  674. reg_dma->ops.setup_payload = setup_payload_v1;
  675. reg_dma->ops.kick_off = kick_off_v1;
  676. reg_dma->ops.reset = reset_v1;
  677. reg_dma->ops.alloc_reg_dma_buf = alloc_reg_dma_buf_v1;
  678. reg_dma->ops.dealloc_reg_dma = dealloc_reg_dma_v1;
  679. reg_dma->ops.reset_reg_dma_buf = reset_reg_dma_buffer_v1;
  680. reg_dma->ops.last_command = last_cmd_v1;
  681. reg_dma->ops.dump_regs = dump_regs_v1;
  682. reg_dma_register_count = 60;
  683. reg_dma_decode_sel = 0x180ac060;
  684. reg_dma_opmode_offset = 0x4;
  685. reg_dma_ctl0_queue0_cmd0_offset = 0x14;
  686. reg_dma_intr_status_offset = 0x90;
  687. reg_dma_intr_4_status_offset = 0xa0;
  688. reg_dma_intr_clear_offset = 0xb0;
  689. reg_dma_ctl_trigger_offset = 0xd4;
  690. reg_dma_ctl0_reset_offset = 0xe4;
  691. reg_dma_error_clear_mask = BIT(0) | BIT(1) | BIT(2) | BIT(16);
  692. reg_dma_ctl_queue_off[CTL_0] = reg_dma_ctl0_queue0_cmd0_offset;
  693. for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
  694. reg_dma_ctl_queue_off[i] = reg_dma_ctl_queue_off[i - 1] +
  695. (sizeof(u32) * 4);
  696. return 0;
  697. }
  698. int init_v11(struct sde_hw_reg_dma *cfg)
  699. {
  700. int ret = 0, i = 0;
  701. ret = init_v1(cfg);
  702. if (ret) {
  703. DRM_ERROR("failed to initialize v1: ret %d\n", ret);
  704. return -EINVAL;
  705. }
  706. /* initialize register offsets and v1_supported based on version */
  707. reg_dma_register_count = 133;
  708. reg_dma_decode_sel = 0x180ac114;
  709. reg_dma_opmode_offset = 0x4;
  710. reg_dma_ctl0_queue0_cmd0_offset = 0x14;
  711. reg_dma_intr_status_offset = 0x160;
  712. reg_dma_intr_4_status_offset = 0x170;
  713. reg_dma_intr_clear_offset = 0x1a0;
  714. reg_dma_ctl_trigger_offset = 0xd4;
  715. reg_dma_ctl0_reset_offset = 0x200;
  716. reg_dma_error_clear_mask = BIT(0) | BIT(1) | BIT(2) | BIT(16) |
  717. BIT(17) | BIT(18);
  718. reg_dma_ctl_queue_off[CTL_0] = reg_dma_ctl0_queue0_cmd0_offset;
  719. for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
  720. reg_dma_ctl_queue_off[i] = reg_dma_ctl_queue_off[i - 1] +
  721. (sizeof(u32) * 4);
  722. v1_supported[IGC] = DSPP_IGC | GRP_DSPP_HW_BLK_SELECT |
  723. GRP_VIG_HW_BLK_SELECT | GRP_DMA_HW_BLK_SELECT;
  724. v1_supported[GC] = GRP_DMA_HW_BLK_SELECT | GRP_DSPP_HW_BLK_SELECT;
  725. v1_supported[HSIC] = GRP_DSPP_HW_BLK_SELECT;
  726. v1_supported[SIX_ZONE] = GRP_DSPP_HW_BLK_SELECT;
  727. v1_supported[MEMC_SKIN] = GRP_DSPP_HW_BLK_SELECT;
  728. v1_supported[MEMC_SKY] = GRP_DSPP_HW_BLK_SELECT;
  729. v1_supported[MEMC_FOLIAGE] = GRP_DSPP_HW_BLK_SELECT;
  730. v1_supported[MEMC_PROT] = GRP_DSPP_HW_BLK_SELECT;
  731. v1_supported[QSEED] = GRP_VIG_HW_BLK_SELECT;
  732. return 0;
  733. }
  734. int init_v12(struct sde_hw_reg_dma *cfg)
  735. {
  736. int ret = 0;
  737. ret = init_v11(cfg);
  738. if (ret) {
  739. DRM_ERROR("failed to initialize v11: ret %d\n", ret);
  740. return ret;
  741. }
  742. v1_supported[LTM_INIT] = GRP_LTM_HW_BLK_SELECT;
  743. v1_supported[LTM_ROI] = GRP_LTM_HW_BLK_SELECT;
  744. v1_supported[LTM_VLUT] = GRP_LTM_HW_BLK_SELECT;
  745. v1_supported[RC_DATA] = (GRP_DSPP_HW_BLK_SELECT |
  746. GRP_MDSS_HW_BLK_SELECT);
  747. v1_supported[SPR_INIT] = (GRP_DSPP_HW_BLK_SELECT |
  748. GRP_MDSS_HW_BLK_SELECT);
  749. v1_supported[SPR_PU_CFG] = (GRP_DSPP_HW_BLK_SELECT |
  750. GRP_MDSS_HW_BLK_SELECT);
  751. v1_supported[DEMURA_CFG] = MDSS | DSPP0 | DSPP1;
  752. return 0;
  753. }
  754. int init_v2(struct sde_hw_reg_dma *cfg)
  755. {
  756. int ret = 0, i = 0;
  757. ret = init_v12(cfg);
  758. if (ret) {
  759. DRM_ERROR("failed to initialize v12: ret %d\n", ret);
  760. return ret;
  761. }
  762. /* initialize register offsets based on version delta */
  763. reg_dma_register_count = 0x91;
  764. reg_dma_ctl0_queue1_cmd0_offset = 0x1c;
  765. reg_dma_error_clear_mask |= BIT(19);
  766. reg_dma_ctl_queue1_off[CTL_0] = reg_dma_ctl0_queue1_cmd0_offset;
  767. for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
  768. reg_dma_ctl_queue1_off[i] = reg_dma_ctl_queue1_off[i - 1] +
  769. (sizeof(u32) * 4);
  770. v1_supported[IGC] = GRP_DSPP_HW_BLK_SELECT | GRP_VIG_HW_BLK_SELECT |
  771. GRP_DMA_HW_BLK_SELECT;
  772. if (cfg->caps->reg_dma_blks[REG_DMA_TYPE_SB].valid == true)
  773. reg_dma->ops.last_command_sb = last_cmd_sb_v2;
  774. return 0;
  775. }
  776. static int check_support_v1(enum sde_reg_dma_features feature,
  777. enum sde_reg_dma_blk blk,
  778. bool *is_supported)
  779. {
  780. int ret = 0;
  781. if (!is_supported)
  782. return -EINVAL;
  783. if (feature >= REG_DMA_FEATURES_MAX
  784. || blk >= BIT_ULL(REG_DMA_BLK_MAX)) {
  785. *is_supported = false;
  786. return ret;
  787. }
  788. *is_supported = (blk & v1_supported[feature]) ? true : false;
  789. return ret;
  790. }
  791. static int setup_payload_v1(struct sde_reg_dma_setup_ops_cfg *cfg)
  792. {
  793. int rc = 0;
  794. rc = validate_dma_cfg(cfg);
  795. if (!rc)
  796. rc = validate_dma_op_params[cfg->ops](cfg);
  797. if (!rc)
  798. rc = write_dma_op_params[cfg->ops](cfg);
  799. return rc;
  800. }
  801. static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
  802. {
  803. int rc = 0;
  804. rc = validate_kick_off_v1(cfg);
  805. if (rc)
  806. return rc;
  807. rc = write_kick_off_v1(cfg);
  808. return rc;
  809. }
  810. int reset_v1(struct sde_hw_ctl *ctl)
  811. {
  812. struct sde_hw_blk_reg_map hw;
  813. u32 index, val, i = 0, k = 0;
  814. if (!ctl || ctl->idx > CTL_MAX) {
  815. DRM_ERROR("invalid ctl %pK ctl idx %d\n",
  816. ctl, ((ctl) ? ctl->idx : 0));
  817. return -EINVAL;
  818. }
  819. index = ctl->idx - CTL_0;
  820. for (k = 0; k < REG_DMA_TYPE_MAX; k++) {
  821. memset(&hw, 0, sizeof(hw));
  822. SET_UP_REG_DMA_REG(hw, reg_dma, k);
  823. if (hw.hwversion == 0)
  824. continue;
  825. SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
  826. SDE_REG_WRITE(&hw, (reg_dma_ctl0_reset_offset +
  827. index * sizeof(u32)), BIT(0));
  828. i = 0;
  829. do {
  830. udelay(1000);
  831. i++;
  832. val = SDE_REG_READ(&hw,
  833. (reg_dma_ctl0_reset_offset +
  834. index * sizeof(u32)));
  835. } while (i < 2 && val);
  836. }
  837. return 0;
  838. }
  839. static void sde_reg_dma_aspace_cb_locked(void *cb_data, bool is_detach)
  840. {
  841. struct sde_reg_dma_buffer *dma_buf = NULL;
  842. struct msm_gem_address_space *aspace = NULL;
  843. u32 iova_aligned, offset;
  844. int rc;
  845. if (!cb_data) {
  846. DRM_ERROR("aspace cb called with invalid dma_buf\n");
  847. return;
  848. }
  849. dma_buf = (struct sde_reg_dma_buffer *)cb_data;
  850. aspace = dma_buf->aspace;
  851. if (is_detach) {
  852. /* invalidate the stored iova */
  853. dma_buf->iova = 0;
  854. /* return the virtual address mapping */
  855. msm_gem_put_vaddr(dma_buf->buf);
  856. msm_gem_vunmap(dma_buf->buf, OBJ_LOCK_NORMAL);
  857. } else {
  858. rc = msm_gem_get_iova(dma_buf->buf, aspace,
  859. &dma_buf->iova);
  860. if (rc) {
  861. DRM_ERROR("failed to get the iova rc %d\n", rc);
  862. return;
  863. }
  864. dma_buf->vaddr = msm_gem_get_vaddr(dma_buf->buf);
  865. if (IS_ERR_OR_NULL(dma_buf->vaddr)) {
  866. DRM_ERROR("failed to get va rc %d\n", rc);
  867. return;
  868. }
  869. iova_aligned = (dma_buf->iova + GUARD_BYTES) & ALIGNED_OFFSET;
  870. offset = iova_aligned - dma_buf->iova;
  871. dma_buf->iova = dma_buf->iova + offset;
  872. dma_buf->vaddr = (void *)(((u8 *)dma_buf->vaddr) + offset);
  873. dma_buf->next_op_allowed = DECODE_SEL_OP;
  874. }
  875. }
  876. static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size)
  877. {
  878. struct sde_reg_dma_buffer *dma_buf = NULL;
  879. u32 iova_aligned, offset;
  880. u32 rsize = size + GUARD_BYTES;
  881. struct msm_gem_address_space *aspace = NULL;
  882. int rc = 0;
  883. if (!size || SIZE_DWORD(size) > MAX_DWORDS_SZ) {
  884. DRM_ERROR("invalid buffer size %lu, max %lu\n",
  885. SIZE_DWORD(size), MAX_DWORDS_SZ);
  886. return ERR_PTR(-EINVAL);
  887. }
  888. dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
  889. if (!dma_buf)
  890. return ERR_PTR(-ENOMEM);
  891. dma_buf->buf = msm_gem_new(reg_dma->drm_dev,
  892. rsize, MSM_BO_UNCACHED);
  893. if (IS_ERR_OR_NULL(dma_buf->buf)) {
  894. rc = -EINVAL;
  895. goto fail;
  896. }
  897. aspace = msm_gem_smmu_address_space_get(reg_dma->drm_dev,
  898. MSM_SMMU_DOMAIN_UNSECURE);
  899. if (PTR_ERR(aspace) == -ENODEV) {
  900. aspace = NULL;
  901. DRM_DEBUG("IOMMU not present, relying on VRAM\n");
  902. } else if (IS_ERR_OR_NULL(aspace)) {
  903. rc = PTR_ERR(aspace);
  904. aspace = NULL;
  905. DRM_ERROR("failed to get aspace %d", rc);
  906. goto free_gem;
  907. } else if (aspace) {
  908. /* register to aspace */
  909. rc = msm_gem_address_space_register_cb(aspace,
  910. sde_reg_dma_aspace_cb_locked,
  911. (void *)dma_buf);
  912. if (rc) {
  913. DRM_ERROR("failed to register callback %d", rc);
  914. goto free_gem;
  915. }
  916. }
  917. dma_buf->aspace = aspace;
  918. rc = msm_gem_get_iova(dma_buf->buf, aspace, &dma_buf->iova);
  919. if (rc) {
  920. DRM_ERROR("failed to get the iova rc %d\n", rc);
  921. goto free_aspace_cb;
  922. }
  923. dma_buf->vaddr = msm_gem_get_vaddr(dma_buf->buf);
  924. if (IS_ERR_OR_NULL(dma_buf->vaddr)) {
  925. DRM_ERROR("failed to get va rc %d\n", rc);
  926. rc = -EINVAL;
  927. goto put_iova;
  928. }
  929. dma_buf->buffer_size = size;
  930. iova_aligned = (dma_buf->iova + GUARD_BYTES) & ALIGNED_OFFSET;
  931. offset = iova_aligned - dma_buf->iova;
  932. dma_buf->iova = dma_buf->iova + offset;
  933. dma_buf->vaddr = (void *)(((u8 *)dma_buf->vaddr) + offset);
  934. dma_buf->next_op_allowed = DECODE_SEL_OP;
  935. return dma_buf;
  936. put_iova:
  937. msm_gem_put_iova(dma_buf->buf, aspace);
  938. free_aspace_cb:
  939. msm_gem_address_space_unregister_cb(aspace,
  940. sde_reg_dma_aspace_cb_locked, dma_buf);
  941. free_gem:
  942. mutex_lock(&reg_dma->drm_dev->struct_mutex);
  943. msm_gem_free_object(dma_buf->buf);
  944. mutex_unlock(&reg_dma->drm_dev->struct_mutex);
  945. fail:
  946. kfree(dma_buf);
  947. return ERR_PTR(rc);
  948. }
  949. static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *dma_buf)
  950. {
  951. if (!dma_buf) {
  952. DRM_ERROR("invalid param reg_buf %pK\n", dma_buf);
  953. return -EINVAL;
  954. }
  955. if (dma_buf->buf) {
  956. msm_gem_put_iova(dma_buf->buf, 0);
  957. msm_gem_address_space_unregister_cb(dma_buf->aspace,
  958. sde_reg_dma_aspace_cb_locked, dma_buf);
  959. mutex_lock(&reg_dma->drm_dev->struct_mutex);
  960. msm_gem_free_object(dma_buf->buf);
  961. mutex_unlock(&reg_dma->drm_dev->struct_mutex);
  962. }
  963. kfree(dma_buf);
  964. return 0;
  965. }
  966. static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf)
  967. {
  968. if (!lut_buf)
  969. return -EINVAL;
  970. lut_buf->index = 0;
  971. lut_buf->ops_completed = 0;
  972. lut_buf->next_op_allowed = DECODE_SEL_OP;
  973. lut_buf->abs_write_cnt = 0;
  974. return 0;
  975. }
  976. static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
  977. {
  978. u32 remain_len, write_len;
  979. remain_len = BUFFER_SPACE_LEFT(cfg);
  980. write_len = sizeof(u32);
  981. if (remain_len < write_len) {
  982. DRM_ERROR("buffer is full sz %d needs %d bytes\n",
  983. remain_len, write_len);
  984. return -EINVAL;
  985. }
  986. return 0;
  987. }
  988. static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
  989. {
  990. u32 *loc = NULL;
  991. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  992. cfg->dma_buf->index);
  993. loc[0] = reg_dma_decode_sel;
  994. loc[1] = 0;
  995. cfg->dma_buf->index = sizeof(u32) * 2;
  996. cfg->dma_buf->ops_completed = REG_WRITE_OP | DECODE_SEL_OP;
  997. cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
  998. return 0;
  999. }
  1000. static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  1001. enum sde_reg_dma_last_cmd_mode mode)
  1002. {
  1003. struct sde_reg_dma_setup_ops_cfg cfg;
  1004. struct sde_reg_dma_kickoff_cfg kick_off;
  1005. struct sde_hw_blk_reg_map hw;
  1006. u32 val;
  1007. int rc;
  1008. if (!ctl || ctl->idx >= CTL_MAX || q >= DMA_CTL_QUEUE_MAX) {
  1009. DRM_ERROR("ctl %pK q %d index %d\n", ctl, q,
  1010. ((ctl) ? ctl->idx : -1));
  1011. return -EINVAL;
  1012. }
  1013. if (!last_cmd_buf_db[ctl->idx] || !last_cmd_buf_db[ctl->idx]->iova) {
  1014. DRM_ERROR("invalid last cmd buf for idx %d\n", ctl->idx);
  1015. return -EINVAL;
  1016. }
  1017. cfg.dma_buf = last_cmd_buf_db[ctl->idx];
  1018. reset_reg_dma_buffer_v1(last_cmd_buf_db[ctl->idx]);
  1019. if (validate_last_cmd(&cfg)) {
  1020. DRM_ERROR("validate buf failed\n");
  1021. return -EINVAL;
  1022. }
  1023. if (write_last_cmd(&cfg)) {
  1024. DRM_ERROR("write buf failed\n");
  1025. return -EINVAL;
  1026. }
  1027. kick_off.ctl = ctl;
  1028. kick_off.queue_select = q;
  1029. kick_off.trigger_mode = WRITE_IMMEDIATE;
  1030. kick_off.last_command = 1;
  1031. kick_off.op = REG_DMA_WRITE;
  1032. kick_off.dma_type = REG_DMA_TYPE_DB;
  1033. kick_off.dma_buf = last_cmd_buf_db[ctl->idx];
  1034. kick_off.feature = REG_DMA_FEATURES_MAX;
  1035. rc = kick_off_v1(&kick_off);
  1036. if (rc) {
  1037. DRM_ERROR("kick off last cmd failed\n");
  1038. return rc;
  1039. }
  1040. //Lack of block support will be caught by kick_off
  1041. memset(&hw, 0, sizeof(hw));
  1042. SET_UP_REG_DMA_REG(hw, reg_dma, kick_off.dma_type);
  1043. SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, mode, ctl->idx, kick_off.queue_select,
  1044. kick_off.dma_type, kick_off.op);
  1045. if (mode == REG_DMA_WAIT4_COMP) {
  1046. rc = readl_poll_timeout(hw.base_off + hw.blk_off +
  1047. reg_dma_intr_status_offset, val,
  1048. (val & ctl_trigger_done_mask[ctl->idx][q]),
  1049. 10, 20000);
  1050. if (rc)
  1051. DRM_ERROR("poll wait failed %d val %x mask %x\n",
  1052. rc, val, ctl_trigger_done_mask[ctl->idx][q]);
  1053. SDE_EVT32(SDE_EVTLOG_FUNC_EXIT, mode);
  1054. }
  1055. return rc;
  1056. }
  1057. void deinit_v1(void)
  1058. {
  1059. int i = 0;
  1060. for (i = CTL_0; i < CTL_MAX; i++) {
  1061. if (last_cmd_buf_db[i])
  1062. dealloc_reg_dma_v1(last_cmd_buf_db[i]);
  1063. last_cmd_buf_db[i] = NULL;
  1064. if (last_cmd_buf_sb[i])
  1065. dealloc_reg_dma_v1(last_cmd_buf_sb[i]);
  1066. last_cmd_buf_sb[i] = NULL;
  1067. }
  1068. }
  1069. static void dump_regs_v1(void)
  1070. {
  1071. uint32_t i = 0, k = 0;
  1072. u32 val;
  1073. struct sde_hw_blk_reg_map hw;
  1074. for (k = 0; k < REG_DMA_TYPE_MAX; k++) {
  1075. memset(&hw, 0, sizeof(hw));
  1076. SET_UP_REG_DMA_REG(hw, reg_dma, k);
  1077. if (hw.hwversion == 0)
  1078. continue;
  1079. for (i = 0; i < reg_dma_register_count; i++) {
  1080. val = SDE_REG_READ(&hw, i * sizeof(u32));
  1081. DRM_ERROR("offset %x val %x\n", (u32)(i * sizeof(u32)),
  1082. val);
  1083. }
  1084. }
  1085. }
  1086. static int last_cmd_sb_v2(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  1087. enum sde_reg_dma_last_cmd_mode mode)
  1088. {
  1089. struct sde_reg_dma_setup_ops_cfg cfg;
  1090. struct sde_reg_dma_kickoff_cfg kick_off;
  1091. int rc = 0;
  1092. if (!ctl || ctl->idx >= CTL_MAX || q >= DMA_CTL_QUEUE_MAX) {
  1093. DRM_ERROR("ctl %pK q %d index %d\n", ctl, q,
  1094. ((ctl) ? ctl->idx : -1));
  1095. return -EINVAL;
  1096. }
  1097. if (!last_cmd_buf_sb[ctl->idx] || !last_cmd_buf_sb[ctl->idx]->iova) {
  1098. DRM_ERROR("invalid last cmd buf for idx %d\n", ctl->idx);
  1099. return -EINVAL;
  1100. }
  1101. cfg.dma_buf = last_cmd_buf_sb[ctl->idx];
  1102. reset_reg_dma_buffer_v1(last_cmd_buf_sb[ctl->idx]);
  1103. if (validate_last_cmd(&cfg)) {
  1104. DRM_ERROR("validate buf failed\n");
  1105. return -EINVAL;
  1106. }
  1107. if (write_last_cmd(&cfg)) {
  1108. DRM_ERROR("write buf failed\n");
  1109. return -EINVAL;
  1110. }
  1111. kick_off.ctl = ctl;
  1112. kick_off.trigger_mode = WRITE_IMMEDIATE;
  1113. kick_off.last_command = 1;
  1114. kick_off.op = REG_DMA_WRITE;
  1115. kick_off.dma_type = REG_DMA_TYPE_SB;
  1116. kick_off.queue_select = DMA_CTL_QUEUE1;
  1117. kick_off.dma_buf = last_cmd_buf_sb[ctl->idx];
  1118. kick_off.feature = REG_DMA_FEATURES_MAX;
  1119. rc = kick_off_v1(&kick_off);
  1120. if (rc)
  1121. DRM_ERROR("kick off last cmd failed\n");
  1122. SDE_EVT32(ctl->idx, kick_off.queue_select, kick_off.dma_type,
  1123. kick_off.op);
  1124. return rc;
  1125. }