sde_hw_reg_dma_v1.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #include <linux/iopoll.h>
  7. #include "sde_hw_mdss.h"
  8. #include "sde_hw_ctl.h"
  9. #include "sde_hw_reg_dma_v1.h"
  10. #include "msm_drv.h"
  11. #include "msm_mmu.h"
  12. #include "sde_dbg.h"
  13. #include "sde_vbif.h"
  14. #define GUARD_BYTES (BIT(8) - 1)
  15. #define ALIGNED_OFFSET (U32_MAX & ~(GUARD_BYTES))
  16. #define ADDR_ALIGN BIT(8)
  17. #define MAX_RELATIVE_OFF (BIT(21) - 1)
  18. #define ABSOLUTE_RANGE BIT(27)
  19. #define DECODE_SEL_OP (BIT(HW_BLK_SELECT))
  20. #define REG_WRITE_OP ((BIT(REG_SINGLE_WRITE)) | (BIT(REG_BLK_WRITE_SINGLE)) | \
  21. (BIT(REG_BLK_WRITE_INC)) | (BIT(REG_BLK_WRITE_MULTIPLE)) | \
  22. (BIT(REG_SINGLE_MODIFY)) | (BIT(REG_BLK_LUT_WRITE)))
  23. #define REG_DMA_OPS (DECODE_SEL_OP | REG_WRITE_OP)
  24. #define IS_OP_ALLOWED(op, buf_op) (BIT(op) & buf_op)
  25. #define SET_UP_REG_DMA_REG(hw, reg_dma, i) \
  26. do { \
  27. if ((reg_dma)->caps->reg_dma_blks[(i)].valid == false) \
  28. break; \
  29. (hw).base_off = (reg_dma)->addr; \
  30. (hw).blk_off = (reg_dma)->caps->reg_dma_blks[(i)].base; \
  31. (hw).hw_rev = (reg_dma)->caps->version; \
  32. (hw).log_mask = SDE_DBG_MASK_REGDMA; \
  33. } while (0)
  34. #define SIZE_DWORD(x) ((x) / (sizeof(u32)))
  35. #define NOT_WORD_ALIGNED(x) ((x) & 0x3)
  36. #define GRP_VIG_HW_BLK_SELECT (VIG0 | VIG1 | VIG2 | VIG3)
  37. #define GRP_DMA_HW_BLK_SELECT (DMA0 | DMA1 | DMA2 | DMA3 | DMA4 | DMA5)
  38. #define GRP_DSPP_HW_BLK_SELECT (DSPP0 | DSPP1 | DSPP2 | DSPP3)
  39. #define GRP_LTM_HW_BLK_SELECT (LTM0 | LTM1 | LTM2 | LTM3)
  40. #define GRP_MDSS_HW_BLK_SELECT (MDSS)
  41. #define BUFFER_SPACE_LEFT(cfg) ((cfg)->dma_buf->buffer_size - \
  42. (cfg)->dma_buf->index)
  43. #define REL_ADDR_OPCODE (BIT(27))
  44. #define NO_OP_OPCODE (0)
  45. #define SINGLE_REG_WRITE_OPCODE (BIT(28))
  46. #define SINGLE_REG_MODIFY_OPCODE (BIT(29))
  47. #define HW_INDEX_REG_WRITE_OPCODE (BIT(28) | BIT(29))
  48. #define AUTO_INC_REG_WRITE_OPCODE (BIT(30))
  49. #define BLK_REG_WRITE_OPCODE (BIT(30) | BIT(28))
  50. #define LUTBUS_WRITE_OPCODE (BIT(30) | BIT(29))
  51. #define WRAP_MIN_SIZE 2
  52. #define WRAP_MAX_SIZE (BIT(4) - 1)
  53. #define MAX_DWORDS_SZ (BIT(14) - 1)
  54. #define REG_DMA_HEADERS_BUFFER_SZ (sizeof(u32) * 128)
  55. #define LUTBUS_TABLE_SEL_MASK 0x10000
  56. #define LUTBUS_BLOCK_SEL_MASK 0xffff
  57. #define LUTBUS_TRANS_SZ_MASK 0xff0000
  58. #define LUTBUS_LUT_SIZE_MASK 0x3fff
  59. #define PMU_CLK_CTRL 0x1F0
  60. static uint32_t reg_dma_register_count;
  61. static uint32_t reg_dma_decode_sel;
  62. static uint32_t reg_dma_opmode_offset;
  63. static uint32_t reg_dma_ctl0_queue0_cmd0_offset;
  64. static uint32_t reg_dma_ctl0_queue1_cmd0_offset;
  65. static uint32_t reg_dma_intr_0_status_offset[CTL_MAX][DMA_CTL_QUEUE_MAX];
  66. static uint32_t reg_dma_intr_0_clear_offset[CTL_MAX][DMA_CTL_QUEUE_MAX];
  67. static uint32_t reg_dma_intr_4_status_offset;
  68. static uint32_t reg_dma_intr_4_clear_offset;
  69. static uint32_t reg_dma_ctl_trigger_offset;
  70. static uint32_t reg_dma_ctl0_reset_offset[CTL_MAX][DMA_CTL_QUEUE_MAX];
  71. static uint32_t reg_dma_error_clear_mask;
  72. static uint32_t reg_dma_ctl_queue_off[CTL_MAX];
  73. static uint32_t reg_dma_ctl_queue1_off[CTL_MAX];
  74. typedef int (*reg_dma_internal_ops) (struct sde_reg_dma_setup_ops_cfg *cfg);
  75. static struct sde_hw_reg_dma *reg_dma;
  76. static u32 ops_mem_size[REG_DMA_SETUP_OPS_MAX] = {
  77. [REG_BLK_WRITE_SINGLE] = sizeof(u32) * 2,
  78. [REG_BLK_WRITE_INC] = sizeof(u32) * 2,
  79. [REG_BLK_WRITE_MULTIPLE] = sizeof(u32) * 2,
  80. [HW_BLK_SELECT] = sizeof(u32) * 2,
  81. [REG_SINGLE_WRITE] = sizeof(u32) * 2,
  82. [REG_SINGLE_MODIFY] = sizeof(u32) * 3,
  83. [REG_BLK_LUT_WRITE] = sizeof(u32) * 2,
  84. };
  85. static u32 queue_sel[DMA_CTL_QUEUE_MAX] = {
  86. [DMA_CTL_QUEUE0] = BIT(0),
  87. [DMA_CTL_QUEUE1] = BIT(4),
  88. };
  89. static u32 dspp_read_sel[DSPP_HIST_MAX] = {
  90. [DSPP0_HIST] = 0,
  91. [DSPP1_HIST] = 1,
  92. [DSPP2_HIST] = 2,
  93. [DSPP3_HIST] = 3,
  94. };
  95. static u32 v1_supported[REG_DMA_FEATURES_MAX] = {
  96. [GAMUT] = GRP_VIG_HW_BLK_SELECT | GRP_DSPP_HW_BLK_SELECT,
  97. [VLUT] = GRP_DSPP_HW_BLK_SELECT,
  98. [GC] = GRP_DSPP_HW_BLK_SELECT,
  99. [IGC] = DSPP_IGC | GRP_DSPP_HW_BLK_SELECT,
  100. [PCC] = GRP_DSPP_HW_BLK_SELECT,
  101. };
  102. static u32 ctl_trigger_done_mask[CTL_MAX][DMA_CTL_QUEUE_MAX] = {
  103. [CTL_0][0] = BIT(16),
  104. [CTL_0][1] = BIT(21),
  105. [CTL_1][0] = BIT(17),
  106. [CTL_1][1] = BIT(22),
  107. [CTL_2][0] = BIT(18),
  108. [CTL_2][1] = BIT(23),
  109. [CTL_3][0] = BIT(19),
  110. [CTL_3][1] = BIT(24),
  111. [CTL_4][0] = BIT(25),
  112. [CTL_4][1] = BIT(27),
  113. [CTL_5][0] = BIT(26),
  114. [CTL_5][1] = BIT(28),
  115. };
  116. static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg);
  117. static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
  118. static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  119. static int validate_blk_lut_write(struct sde_reg_dma_setup_ops_cfg *cfg);
  120. static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  121. static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
  122. static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
  123. static int write_single_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  124. static int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg);
  125. static int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg);
  126. static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  127. static int write_single_modify(struct sde_reg_dma_setup_ops_cfg *cfg);
  128. static int write_block_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  129. static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
  130. static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf);
  131. static int check_support_v1(enum sde_reg_dma_features feature,
  132. enum sde_reg_dma_blk blk, bool *is_supported);
  133. static int setup_payload_v1(struct sde_reg_dma_setup_ops_cfg *cfg);
  134. static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg);
  135. static int reset_v1(struct sde_hw_ctl *ctl);
  136. static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  137. enum sde_reg_dma_last_cmd_mode mode);
  138. static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size);
  139. static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *lut_buf);
  140. static void dump_regs_v1(void);
  141. static int last_cmd_sb_v2(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  142. enum sde_reg_dma_last_cmd_mode mode);
  143. static reg_dma_internal_ops write_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
  144. [HW_BLK_SELECT] = write_decode_sel,
  145. [REG_SINGLE_WRITE] = write_single_reg,
  146. [REG_BLK_WRITE_SINGLE] = write_multi_reg_inc,
  147. [REG_BLK_WRITE_INC] = write_multi_reg_index,
  148. [REG_BLK_WRITE_MULTIPLE] = write_multi_lut_reg,
  149. [REG_SINGLE_MODIFY] = write_single_modify,
  150. [REG_BLK_LUT_WRITE] = write_block_lut_reg,
  151. };
  152. static reg_dma_internal_ops validate_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
  153. [HW_BLK_SELECT] = validate_write_decode_sel,
  154. [REG_SINGLE_WRITE] = validate_write_reg,
  155. [REG_BLK_WRITE_SINGLE] = validate_write_reg,
  156. [REG_BLK_WRITE_INC] = validate_write_reg,
  157. [REG_BLK_WRITE_MULTIPLE] = validate_write_multi_lut_reg,
  158. [REG_SINGLE_MODIFY] = validate_write_reg,
  159. [REG_BLK_LUT_WRITE] = validate_blk_lut_write,
  160. };
  161. static struct sde_reg_dma_buffer *last_cmd_buf_db[CTL_MAX];
  162. static struct sde_reg_dma_buffer *last_cmd_buf_sb[CTL_MAX];
  163. static void get_decode_sel(unsigned long blk, u32 *decode_sel)
  164. {
  165. int i = 0;
  166. *decode_sel = 0;
  167. for_each_set_bit(i, &blk, REG_DMA_BLK_MAX) {
  168. switch (BIT(i)) {
  169. case VIG0:
  170. *decode_sel |= BIT(0);
  171. break;
  172. case VIG1:
  173. *decode_sel |= BIT(1);
  174. break;
  175. case VIG2:
  176. *decode_sel |= BIT(2);
  177. break;
  178. case VIG3:
  179. *decode_sel |= BIT(3);
  180. break;
  181. case DMA0:
  182. *decode_sel |= BIT(5);
  183. break;
  184. case DMA1:
  185. *decode_sel |= BIT(6);
  186. break;
  187. case DMA2:
  188. *decode_sel |= BIT(7);
  189. break;
  190. case DMA3:
  191. *decode_sel |= BIT(8);
  192. break;
  193. case DMA4:
  194. *decode_sel |= BIT(9);
  195. break;
  196. case DMA5:
  197. *decode_sel |= BIT(10);
  198. break;
  199. case DSPP0:
  200. *decode_sel |= BIT(17);
  201. break;
  202. case DSPP1:
  203. *decode_sel |= BIT(18);
  204. break;
  205. case DSPP2:
  206. *decode_sel |= BIT(19);
  207. break;
  208. case DSPP3:
  209. *decode_sel |= BIT(20);
  210. break;
  211. case SSPP_IGC:
  212. *decode_sel |= BIT(4);
  213. break;
  214. case DSPP_IGC:
  215. *decode_sel |= BIT(21);
  216. break;
  217. case LTM0:
  218. *decode_sel |= BIT(22);
  219. break;
  220. case LTM1:
  221. *decode_sel |= BIT(23);
  222. break;
  223. case LTM2:
  224. *decode_sel |= BIT(24);
  225. break;
  226. case LTM3:
  227. *decode_sel |= BIT(25);
  228. break;
  229. case MDSS:
  230. *decode_sel |= BIT(31);
  231. break;
  232. default:
  233. DRM_ERROR("block not supported %zx\n", (size_t)BIT(i));
  234. break;
  235. }
  236. }
  237. }
  238. static int write_multi_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  239. {
  240. u8 *loc = NULL;
  241. loc = (u8 *)cfg->dma_buf->vaddr + cfg->dma_buf->index;
  242. memcpy(loc, cfg->data, cfg->data_size);
  243. cfg->dma_buf->index += cfg->data_size;
  244. cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
  245. cfg->dma_buf->ops_completed |= REG_WRITE_OP;
  246. if (cfg->blk == MDSS)
  247. cfg->dma_buf->abs_write_cnt += SIZE_DWORD(cfg->data_size);
  248. return 0;
  249. }
  250. int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg)
  251. {
  252. u32 *loc = NULL;
  253. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  254. cfg->dma_buf->index);
  255. loc[0] = HW_INDEX_REG_WRITE_OPCODE;
  256. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  257. if (cfg->blk == MDSS)
  258. loc[0] |= ABSOLUTE_RANGE;
  259. loc[1] = SIZE_DWORD(cfg->data_size);
  260. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  261. return write_multi_reg(cfg);
  262. }
  263. int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg)
  264. {
  265. u32 *loc = NULL;
  266. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  267. cfg->dma_buf->index);
  268. loc[0] = AUTO_INC_REG_WRITE_OPCODE;
  269. if (cfg->blk == MDSS)
  270. loc[0] |= ABSOLUTE_RANGE;
  271. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  272. loc[1] = SIZE_DWORD(cfg->data_size);
  273. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  274. return write_multi_reg(cfg);
  275. }
  276. static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  277. {
  278. u32 *loc = NULL;
  279. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  280. cfg->dma_buf->index);
  281. loc[0] = BLK_REG_WRITE_OPCODE;
  282. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  283. if (cfg->blk == MDSS)
  284. loc[0] |= ABSOLUTE_RANGE;
  285. loc[1] = (cfg->inc) ? 0 : BIT(31);
  286. loc[1] |= (cfg->wrap_size & WRAP_MAX_SIZE) << 16;
  287. loc[1] |= ((SIZE_DWORD(cfg->data_size)) & MAX_DWORDS_SZ);
  288. cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
  289. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  290. return write_multi_reg(cfg);
  291. }
  292. static int write_single_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  293. {
  294. u32 *loc = NULL;
  295. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  296. cfg->dma_buf->index);
  297. loc[0] = SINGLE_REG_WRITE_OPCODE;
  298. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  299. if (cfg->blk == MDSS) {
  300. loc[0] |= ABSOLUTE_RANGE;
  301. cfg->dma_buf->abs_write_cnt++;
  302. }
  303. loc[1] = *cfg->data;
  304. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  305. cfg->dma_buf->ops_completed |= REG_WRITE_OP;
  306. cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
  307. return 0;
  308. }
  309. static int write_single_modify(struct sde_reg_dma_setup_ops_cfg *cfg)
  310. {
  311. u32 *loc = NULL;
  312. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  313. cfg->dma_buf->index);
  314. loc[0] = SINGLE_REG_MODIFY_OPCODE;
  315. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  316. if (cfg->blk == MDSS)
  317. loc[0] |= ABSOLUTE_RANGE;
  318. loc[1] = cfg->mask;
  319. loc[2] = *cfg->data;
  320. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  321. cfg->dma_buf->ops_completed |= REG_WRITE_OP;
  322. cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
  323. return 0;
  324. }
  325. static int write_block_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  326. {
  327. u32 *loc = NULL;
  328. int rc = -EINVAL;
  329. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  330. cfg->dma_buf->index);
  331. loc[0] = LUTBUS_WRITE_OPCODE;
  332. loc[0] |= (cfg->table_sel << 16) & LUTBUS_TABLE_SEL_MASK;
  333. loc[0] |= (cfg->block_sel & LUTBUS_BLOCK_SEL_MASK);
  334. loc[1] = (cfg->trans_size << 16) & LUTBUS_TRANS_SZ_MASK;
  335. loc[1] |= (cfg->lut_size & LUTBUS_LUT_SIZE_MASK);
  336. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  337. rc = write_multi_reg(cfg);
  338. if (rc)
  339. return rc;
  340. /* adding 3 NO OPs as SW workaround for REG_BLK_LUT_WRITE
  341. * HW limitation that requires the residual data plus the
  342. * following opcode to exceed 4 DWORDs length.
  343. */
  344. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  345. cfg->dma_buf->index);
  346. loc[0] = NO_OP_OPCODE;
  347. loc[1] = NO_OP_OPCODE;
  348. loc[2] = NO_OP_OPCODE;
  349. cfg->dma_buf->index += sizeof(u32) * 3;
  350. return 0;
  351. }
  352. static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg)
  353. {
  354. u32 *loc = NULL;
  355. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  356. cfg->dma_buf->index);
  357. loc[0] = reg_dma_decode_sel;
  358. get_decode_sel(cfg->blk, &loc[1]);
  359. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  360. cfg->dma_buf->ops_completed |= DECODE_SEL_OP;
  361. cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
  362. return 0;
  363. }
  364. static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  365. {
  366. int rc;
  367. rc = validate_write_reg(cfg);
  368. if (rc)
  369. return rc;
  370. if (cfg->wrap_size < WRAP_MIN_SIZE || cfg->wrap_size > WRAP_MAX_SIZE) {
  371. DRM_ERROR("invalid wrap sz %d min %d max %zd\n",
  372. cfg->wrap_size, WRAP_MIN_SIZE, (size_t)WRAP_MAX_SIZE);
  373. rc = -EINVAL;
  374. }
  375. return rc;
  376. }
  377. static int validate_blk_lut_write(struct sde_reg_dma_setup_ops_cfg *cfg)
  378. {
  379. int rc;
  380. rc = validate_write_reg(cfg);
  381. if (rc)
  382. return rc;
  383. if (cfg->table_sel >= LUTBUS_TABLE_SELECT_MAX ||
  384. cfg->block_sel >= LUTBUS_BLOCK_MAX ||
  385. (cfg->trans_size != LUTBUS_IGC_TRANS_SIZE &&
  386. cfg->trans_size != LUTBUS_GAMUT_TRANS_SIZE &&
  387. cfg->trans_size != LUTBUS_SIXZONE_TRANS_SIZE)) {
  388. DRM_ERROR("invalid table_sel %d block_sel %d trans_size %d\n",
  389. cfg->table_sel, cfg->block_sel,
  390. cfg->trans_size);
  391. rc = -EINVAL;
  392. }
  393. return rc;
  394. }
  395. static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  396. {
  397. u32 remain_len, write_len;
  398. remain_len = BUFFER_SPACE_LEFT(cfg);
  399. write_len = ops_mem_size[cfg->ops] + cfg->data_size;
  400. if (remain_len < write_len) {
  401. DRM_ERROR("buffer is full sz %d needs %d bytes\n",
  402. remain_len, write_len);
  403. return -EINVAL;
  404. }
  405. if (!cfg->data) {
  406. DRM_ERROR("invalid data %pK size %d exp sz %d\n", cfg->data,
  407. cfg->data_size, write_len);
  408. return -EINVAL;
  409. }
  410. if ((SIZE_DWORD(cfg->data_size)) > MAX_DWORDS_SZ ||
  411. NOT_WORD_ALIGNED(cfg->data_size)) {
  412. DRM_ERROR("Invalid data size %d max %zd align %x\n",
  413. cfg->data_size, (size_t)MAX_DWORDS_SZ,
  414. NOT_WORD_ALIGNED(cfg->data_size));
  415. return -EINVAL;
  416. }
  417. if (cfg->blk_offset > MAX_RELATIVE_OFF ||
  418. NOT_WORD_ALIGNED(cfg->blk_offset)) {
  419. DRM_ERROR("invalid offset %d max %zd align %x\n",
  420. cfg->blk_offset, (size_t)MAX_RELATIVE_OFF,
  421. NOT_WORD_ALIGNED(cfg->blk_offset));
  422. return -EINVAL;
  423. }
  424. return 0;
  425. }
  426. static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg)
  427. {
  428. u32 remain_len;
  429. bool vig_blk, dma_blk, dspp_blk, mdss_blk;
  430. remain_len = BUFFER_SPACE_LEFT(cfg);
  431. if (remain_len < ops_mem_size[HW_BLK_SELECT]) {
  432. DRM_ERROR("buffer is full needs %d bytes\n",
  433. ops_mem_size[HW_BLK_SELECT]);
  434. return -EINVAL;
  435. }
  436. if (!cfg->blk) {
  437. DRM_ERROR("blk set as 0\n");
  438. return -EINVAL;
  439. }
  440. vig_blk = (cfg->blk & GRP_VIG_HW_BLK_SELECT) ? true : false;
  441. dma_blk = (cfg->blk & GRP_DMA_HW_BLK_SELECT) ? true : false;
  442. dspp_blk = (cfg->blk & GRP_DSPP_HW_BLK_SELECT) ? true : false;
  443. mdss_blk = (cfg->blk & MDSS) ? true : false;
  444. if ((vig_blk && dspp_blk) || (dma_blk && dspp_blk) ||
  445. (vig_blk && dma_blk) ||
  446. (mdss_blk && (vig_blk | dma_blk | dspp_blk))) {
  447. DRM_ERROR("invalid blk combination %x\n", cfg->blk);
  448. return -EINVAL;
  449. }
  450. return 0;
  451. }
  452. static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg)
  453. {
  454. int rc = 0;
  455. bool supported;
  456. if (!cfg || cfg->ops >= REG_DMA_SETUP_OPS_MAX || !cfg->dma_buf) {
  457. DRM_ERROR("invalid param cfg %pK ops %d dma_buf %pK\n",
  458. cfg, ((cfg) ? cfg->ops : REG_DMA_SETUP_OPS_MAX),
  459. ((cfg) ? cfg->dma_buf : NULL));
  460. return -EINVAL;
  461. }
  462. rc = check_support_v1(cfg->feature, cfg->blk, &supported);
  463. if (rc || !supported) {
  464. DRM_ERROR("check support failed rc %d supported %d\n",
  465. rc, supported);
  466. rc = -EINVAL;
  467. return rc;
  468. }
  469. if (cfg->dma_buf->index >= cfg->dma_buf->buffer_size ||
  470. NOT_WORD_ALIGNED(cfg->dma_buf->index)) {
  471. DRM_ERROR("Buf Overflow index %d max size %d align %x\n",
  472. cfg->dma_buf->index, cfg->dma_buf->buffer_size,
  473. NOT_WORD_ALIGNED(cfg->dma_buf->index));
  474. return -EINVAL;
  475. }
  476. if (cfg->dma_buf->iova & GUARD_BYTES || !cfg->dma_buf->vaddr) {
  477. DRM_ERROR("iova not aligned to %zx iova %llx kva %pK",
  478. (size_t)ADDR_ALIGN, cfg->dma_buf->iova,
  479. cfg->dma_buf->vaddr);
  480. return -EINVAL;
  481. }
  482. if (!IS_OP_ALLOWED(cfg->ops, cfg->dma_buf->next_op_allowed)) {
  483. DRM_ERROR("invalid op %x allowed %x\n", cfg->ops,
  484. cfg->dma_buf->next_op_allowed);
  485. return -EINVAL;
  486. }
  487. if (!validate_dma_op_params[cfg->ops] ||
  488. !write_dma_op_params[cfg->ops]) {
  489. DRM_ERROR("invalid op %d validate %pK write %pK\n", cfg->ops,
  490. validate_dma_op_params[cfg->ops],
  491. write_dma_op_params[cfg->ops]);
  492. return -EINVAL;
  493. }
  494. return rc;
  495. }
  496. static int validate_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
  497. {
  498. if (!cfg || !cfg->ctl || !cfg->dma_buf ||
  499. cfg->dma_type >= REG_DMA_TYPE_MAX) {
  500. DRM_ERROR("invalid cfg %pK ctl %pK dma_buf %pK dma type %d\n",
  501. cfg, ((!cfg) ? NULL : cfg->ctl),
  502. ((!cfg) ? NULL : cfg->dma_buf),
  503. ((!cfg) ? 0 : cfg->dma_type));
  504. return -EINVAL;
  505. }
  506. if (reg_dma->caps->reg_dma_blks[cfg->dma_type].valid == false) {
  507. DRM_DEBUG("REG dma type %d is not supported\n", cfg->dma_type);
  508. return -EOPNOTSUPP;
  509. }
  510. if (cfg->ctl->idx < CTL_0 || cfg->ctl->idx >= CTL_MAX) {
  511. DRM_ERROR("invalid ctl idx %d\n", cfg->ctl->idx);
  512. return -EINVAL;
  513. }
  514. if (cfg->op >= REG_DMA_OP_MAX) {
  515. DRM_ERROR("invalid op %d\n", cfg->op);
  516. return -EINVAL;
  517. }
  518. if ((cfg->op == REG_DMA_WRITE) &&
  519. (!(cfg->dma_buf->ops_completed & DECODE_SEL_OP) ||
  520. !(cfg->dma_buf->ops_completed & REG_WRITE_OP))) {
  521. DRM_ERROR("incomplete write ops %x\n",
  522. cfg->dma_buf->ops_completed);
  523. return -EINVAL;
  524. }
  525. if (cfg->op == REG_DMA_READ && cfg->block_select >= DSPP_HIST_MAX) {
  526. DRM_ERROR("invalid block for read %d\n", cfg->block_select);
  527. return -EINVAL;
  528. }
  529. /* Only immediate triggers are supported now hence hardcode */
  530. cfg->trigger_mode = (cfg->op == REG_DMA_READ) ? (READ_TRIGGER) :
  531. (WRITE_TRIGGER);
  532. if (cfg->dma_buf->iova & GUARD_BYTES) {
  533. DRM_ERROR("Address is not aligned to %zx iova %llx",
  534. (size_t)ADDR_ALIGN, cfg->dma_buf->iova);
  535. return -EINVAL;
  536. }
  537. if (cfg->queue_select >= DMA_CTL_QUEUE_MAX) {
  538. DRM_ERROR("invalid queue selected %d\n", cfg->queue_select);
  539. return -EINVAL;
  540. }
  541. if (SIZE_DWORD(cfg->dma_buf->index) > MAX_DWORDS_SZ ||
  542. !cfg->dma_buf->index) {
  543. DRM_ERROR("invalid dword size %zd max %zd\n",
  544. (size_t)SIZE_DWORD(cfg->dma_buf->index),
  545. (size_t)MAX_DWORDS_SZ);
  546. return -EINVAL;
  547. }
  548. if (cfg->dma_type == REG_DMA_TYPE_SB &&
  549. (cfg->queue_select != DMA_CTL_QUEUE1 ||
  550. cfg->op == REG_DMA_READ)) {
  551. DRM_ERROR("invalid queue selected %d or op %d for SB LUTDMA\n",
  552. cfg->queue_select, cfg->op);
  553. return -EINVAL;
  554. }
  555. if ((cfg->dma_buf->abs_write_cnt % 2) != 0) {
  556. /* Touch up buffer to avoid HW issues with odd number of abs writes */
  557. u32 reg = 0;
  558. struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
  559. dma_write_cfg.dma_buf = cfg->dma_buf;
  560. dma_write_cfg.blk = MDSS;
  561. dma_write_cfg.feature = REG_DMA_FEATURES_MAX;
  562. dma_write_cfg.ops = HW_BLK_SELECT;
  563. if (validate_write_decode_sel(&dma_write_cfg) || write_decode_sel(&dma_write_cfg)) {
  564. DRM_ERROR("Failed setting MDSS decode select for LUTDMA touch up\n");
  565. return -EINVAL;
  566. }
  567. /* Perform dummy write on LUTDMA RO version reg */
  568. dma_write_cfg.ops = REG_SINGLE_WRITE;
  569. dma_write_cfg.blk_offset = reg_dma->caps->base_off +
  570. reg_dma->caps->reg_dma_blks[cfg->dma_type].base;
  571. dma_write_cfg.data = &reg;
  572. dma_write_cfg.data_size = sizeof(uint32_t);
  573. if (validate_write_reg(&dma_write_cfg) || write_single_reg(&dma_write_cfg)) {
  574. DRM_ERROR("Failed to add touch up write to LUTDMA buffer\n");
  575. return -EINVAL;
  576. }
  577. }
  578. return 0;
  579. }
  580. static int write_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
  581. {
  582. u32 cmd1, mask = 0, val = 0;
  583. struct sde_hw_blk_reg_map hw;
  584. memset(&hw, 0, sizeof(hw));
  585. msm_gem_sync(cfg->dma_buf->buf);
  586. cmd1 = (cfg->op == REG_DMA_READ) ?
  587. (dspp_read_sel[cfg->block_select] << 30) : 0;
  588. cmd1 |= (cfg->last_command) ? BIT(24) : 0;
  589. cmd1 |= (cfg->op == REG_DMA_READ) ? (2 << 22) : 0;
  590. cmd1 |= (cfg->op == REG_DMA_WRITE) ? (BIT(22)) : 0;
  591. cmd1 |= (SIZE_DWORD(cfg->dma_buf->index) & MAX_DWORDS_SZ);
  592. if (cfg->dma_type == REG_DMA_TYPE_DB)
  593. SET_UP_REG_DMA_REG(hw, reg_dma, REG_DMA_TYPE_DB);
  594. else if (cfg->dma_type == REG_DMA_TYPE_SB)
  595. SET_UP_REG_DMA_REG(hw, reg_dma, REG_DMA_TYPE_SB);
  596. if (hw.hw_rev == 0) {
  597. DRM_ERROR("DMA type %d is unsupported\n", cfg->dma_type);
  598. return -EOPNOTSUPP;
  599. }
  600. SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
  601. val = SDE_REG_READ(&hw, reg_dma_intr_4_status_offset);
  602. if (val) {
  603. DRM_DEBUG("LUT dma status %x\n", val);
  604. mask = reg_dma_error_clear_mask;
  605. SDE_REG_WRITE(&hw, reg_dma_intr_4_clear_offset, mask);
  606. SDE_EVT32(val);
  607. }
  608. if (cfg->last_command) {
  609. /* ensure all packets are queued in packet queue before
  610. * queuing last command descriptor (last command)
  611. */
  612. wmb();
  613. }
  614. if (cfg->dma_type == REG_DMA_TYPE_DB) {
  615. SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx],
  616. cfg->dma_buf->iova);
  617. SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx] + 0x4,
  618. cmd1);
  619. } else if (cfg->dma_type == REG_DMA_TYPE_SB) {
  620. SDE_REG_WRITE(&hw, reg_dma_ctl_queue1_off[cfg->ctl->idx],
  621. cfg->dma_buf->iova);
  622. SDE_REG_WRITE(&hw, reg_dma_ctl_queue1_off[cfg->ctl->idx] + 0x4,
  623. cmd1);
  624. }
  625. if (cfg->last_command) {
  626. /* ensure last command is queued before lut dma trigger */
  627. wmb();
  628. mask = ctl_trigger_done_mask[cfg->ctl->idx][cfg->queue_select];
  629. SDE_REG_WRITE(&hw, reg_dma_intr_0_clear_offset[cfg->ctl->idx][cfg->queue_select],
  630. mask);
  631. /* DB LUTDMA use SW trigger while SB LUTDMA uses DSPP_SB
  632. * flush as its trigger event.
  633. */
  634. if (cfg->dma_type == REG_DMA_TYPE_DB) {
  635. SDE_REG_WRITE(&cfg->ctl->hw, reg_dma_ctl_trigger_offset,
  636. queue_sel[cfg->queue_select]);
  637. }
  638. }
  639. SDE_EVT32(cfg->feature, cfg->dma_type,
  640. ((uint64_t)cfg->dma_buf) >> 32,
  641. ((uint64_t)cfg->dma_buf) & 0xFFFFFFFF,
  642. (cfg->dma_buf->iova) >> 32,
  643. (cfg->dma_buf->iova) & 0xFFFFFFFF,
  644. cfg->op,
  645. cfg->queue_select, cfg->ctl->idx,
  646. SIZE_DWORD(cfg->dma_buf->index));
  647. return 0;
  648. }
  649. static bool setup_clk_force_ctrl(struct sde_hw_blk_reg_map *hw,
  650. enum sde_clk_ctrl_type clk_ctrl, bool enable)
  651. {
  652. u32 reg_val, new_val;
  653. if (!hw)
  654. return false;
  655. if (!SDE_CLK_CTRL_LUTDMA_VALID(clk_ctrl))
  656. return false;
  657. reg_val = SDE_REG_READ(hw, PMU_CLK_CTRL);
  658. if (enable)
  659. new_val = reg_val | (BIT(0) | BIT(16));
  660. else
  661. new_val = reg_val & ~(BIT(0) | BIT(16));
  662. SDE_REG_WRITE(hw, PMU_CLK_CTRL, new_val);
  663. wmb(); /* ensure write finished before progressing */
  664. return !(reg_val & (BIT(0) | BIT(16)));
  665. }
  666. int init_v1(struct sde_hw_reg_dma *cfg)
  667. {
  668. int i = 0, rc = 0;
  669. if (!cfg)
  670. return -EINVAL;
  671. reg_dma = cfg;
  672. for (i = CTL_0; i < CTL_MAX; i++) {
  673. if (!last_cmd_buf_db[i]) {
  674. last_cmd_buf_db[i] =
  675. alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
  676. if (IS_ERR_OR_NULL(last_cmd_buf_db[i])) {
  677. /*
  678. * This will allow reg dma to fall back to
  679. * AHB domain
  680. */
  681. pr_info("Failed to allocate reg dma, ret:%lu\n",
  682. PTR_ERR(last_cmd_buf_db[i]));
  683. return 0;
  684. }
  685. }
  686. if (!last_cmd_buf_sb[i]) {
  687. last_cmd_buf_sb[i] =
  688. alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
  689. if (IS_ERR_OR_NULL(last_cmd_buf_sb[i])) {
  690. /*
  691. * This will allow reg dma to fall back to
  692. * AHB domain
  693. */
  694. pr_info("Failed to allocate reg dma, ret:%lu\n",
  695. PTR_ERR(last_cmd_buf_sb[i]));
  696. return 0;
  697. }
  698. }
  699. }
  700. if (rc) {
  701. for (i = 0; i < CTL_MAX; i++) {
  702. if (!last_cmd_buf_db[i])
  703. continue;
  704. dealloc_reg_dma_v1(last_cmd_buf_db[i]);
  705. last_cmd_buf_db[i] = NULL;
  706. }
  707. for (i = 0; i < CTL_MAX; i++) {
  708. if (!last_cmd_buf_sb[i])
  709. continue;
  710. dealloc_reg_dma_v1(last_cmd_buf_sb[i]);
  711. last_cmd_buf_sb[i] = NULL;
  712. }
  713. return rc;
  714. }
  715. reg_dma->ops.check_support = check_support_v1;
  716. reg_dma->ops.setup_payload = setup_payload_v1;
  717. reg_dma->ops.kick_off = kick_off_v1;
  718. reg_dma->ops.reset = reset_v1;
  719. reg_dma->ops.alloc_reg_dma_buf = alloc_reg_dma_buf_v1;
  720. reg_dma->ops.dealloc_reg_dma = dealloc_reg_dma_v1;
  721. reg_dma->ops.reset_reg_dma_buf = reset_reg_dma_buffer_v1;
  722. reg_dma->ops.last_command = last_cmd_v1;
  723. reg_dma->ops.dump_regs = dump_regs_v1;
  724. reg_dma_register_count = 60;
  725. reg_dma_decode_sel = 0x180ac060;
  726. reg_dma_opmode_offset = 0x4;
  727. reg_dma_ctl0_queue0_cmd0_offset = 0x14;
  728. reg_dma_intr_4_status_offset = 0xa0;
  729. reg_dma_ctl_trigger_offset = 0xd4;
  730. reg_dma_error_clear_mask = BIT(0) | BIT(1) | BIT(2) | BIT(16);
  731. reg_dma_intr_4_clear_offset = 0xc0;
  732. for (i = 0; i < CTL_MAX; i++) {
  733. reg_dma_intr_0_status_offset[i][DMA_CTL_QUEUE0] = 0x90;
  734. reg_dma_intr_0_status_offset[i][DMA_CTL_QUEUE1] = 0x90;
  735. reg_dma_intr_0_clear_offset[i][DMA_CTL_QUEUE0] = 0xb0;
  736. reg_dma_intr_0_clear_offset[i][DMA_CTL_QUEUE1] = 0xb0;
  737. reg_dma_ctl0_reset_offset[i][DMA_CTL_QUEUE0] = 0xe4 + i * 4;
  738. reg_dma_ctl0_reset_offset[i][DMA_CTL_QUEUE1] = 0xe4 + i * 4;
  739. }
  740. reg_dma_ctl_queue_off[CTL_0] = reg_dma_ctl0_queue0_cmd0_offset;
  741. for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
  742. reg_dma_ctl_queue_off[i] = reg_dma_ctl_queue_off[i - 1] +
  743. (sizeof(u32) * 4);
  744. return 0;
  745. }
  746. int init_v11(struct sde_hw_reg_dma *cfg)
  747. {
  748. int ret = 0, i = 0;
  749. ret = init_v1(cfg);
  750. if (ret) {
  751. DRM_ERROR("failed to initialize v1: ret %d\n", ret);
  752. return -EINVAL;
  753. }
  754. /* initialize register offsets and v1_supported based on version */
  755. reg_dma_register_count = 133;
  756. reg_dma_decode_sel = 0x180ac114;
  757. reg_dma_opmode_offset = 0x4;
  758. reg_dma_ctl0_queue0_cmd0_offset = 0x14;
  759. reg_dma_intr_4_status_offset = 0x170;
  760. reg_dma_ctl_trigger_offset = 0xd4;
  761. reg_dma_intr_4_clear_offset = 0x1b0;
  762. reg_dma_error_clear_mask = BIT(0) | BIT(1) | BIT(2) | BIT(16) |
  763. BIT(17) | BIT(18);
  764. reg_dma_ctl_queue_off[CTL_0] = reg_dma_ctl0_queue0_cmd0_offset;
  765. for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
  766. reg_dma_ctl_queue_off[i] = reg_dma_ctl_queue_off[i - 1] +
  767. (sizeof(u32) * 4);
  768. for (i = 0; i < CTL_MAX; i++) {
  769. reg_dma_intr_0_status_offset[i][DMA_CTL_QUEUE0] = 0x160;
  770. reg_dma_intr_0_status_offset[i][DMA_CTL_QUEUE1] = 0x160;
  771. reg_dma_intr_0_clear_offset[i][DMA_CTL_QUEUE0] = 0x1a0;
  772. reg_dma_intr_0_clear_offset[i][DMA_CTL_QUEUE1] = 0x1a0;
  773. reg_dma_ctl0_reset_offset[i][DMA_CTL_QUEUE0] = 0x200 + i * 4;
  774. reg_dma_ctl0_reset_offset[i][DMA_CTL_QUEUE1] = 0x200 + i * 4;
  775. }
  776. v1_supported[IGC] = DSPP_IGC | GRP_DSPP_HW_BLK_SELECT |
  777. GRP_VIG_HW_BLK_SELECT | GRP_DMA_HW_BLK_SELECT;
  778. v1_supported[GC] = GRP_DMA_HW_BLK_SELECT | GRP_DSPP_HW_BLK_SELECT;
  779. v1_supported[HSIC] = GRP_DSPP_HW_BLK_SELECT;
  780. v1_supported[SIX_ZONE] = GRP_DSPP_HW_BLK_SELECT;
  781. v1_supported[MEMC_SKIN] = GRP_DSPP_HW_BLK_SELECT;
  782. v1_supported[MEMC_SKY] = GRP_DSPP_HW_BLK_SELECT;
  783. v1_supported[MEMC_FOLIAGE] = GRP_DSPP_HW_BLK_SELECT;
  784. v1_supported[MEMC_PROT] = GRP_DSPP_HW_BLK_SELECT;
  785. v1_supported[QSEED] = GRP_VIG_HW_BLK_SELECT;
  786. return 0;
  787. }
  788. int init_v12(struct sde_hw_reg_dma *cfg)
  789. {
  790. int ret = 0;
  791. ret = init_v11(cfg);
  792. if (ret) {
  793. DRM_ERROR("failed to initialize v11: ret %d\n", ret);
  794. return ret;
  795. }
  796. v1_supported[LTM_INIT] = GRP_LTM_HW_BLK_SELECT;
  797. v1_supported[LTM_ROI] = GRP_LTM_HW_BLK_SELECT;
  798. v1_supported[LTM_VLUT] = GRP_LTM_HW_BLK_SELECT;
  799. v1_supported[RC_MASK_CFG] = (GRP_DSPP_HW_BLK_SELECT |
  800. GRP_MDSS_HW_BLK_SELECT);
  801. v1_supported[RC_PU_CFG] = (GRP_DSPP_HW_BLK_SELECT |
  802. GRP_MDSS_HW_BLK_SELECT);
  803. v1_supported[SPR_INIT] = (GRP_DSPP_HW_BLK_SELECT |
  804. GRP_MDSS_HW_BLK_SELECT);
  805. v1_supported[SPR_UDC] = (GRP_DSPP_HW_BLK_SELECT |
  806. GRP_MDSS_HW_BLK_SELECT);
  807. v1_supported[SPR_PU_CFG] = (GRP_DSPP_HW_BLK_SELECT |
  808. GRP_MDSS_HW_BLK_SELECT);
  809. v1_supported[DEMURA_CFG] = MDSS | DSPP0 | DSPP1;
  810. v1_supported[DEMURA_CFG0_PARAM2] = MDSS | DSPP0 | DSPP1;
  811. return 0;
  812. }
  813. static int init_reg_dma_vbif(struct sde_hw_reg_dma *cfg)
  814. {
  815. int ret = 0;
  816. struct sde_hw_blk_reg_map *hw;
  817. struct sde_vbif_clk_client clk_client;
  818. struct msm_drm_private *priv = cfg->drm_dev->dev_private;
  819. struct msm_kms *kms = priv->kms;
  820. struct sde_kms *sde_kms = to_sde_kms(kms);
  821. if (cfg->caps->clk_ctrl != SDE_CLK_CTRL_LUTDMA) {
  822. SDE_ERROR("invalid lutdma clk ctrl type %d\n", cfg->caps->clk_ctrl);
  823. return -EINVAL;
  824. }
  825. hw = kzalloc(sizeof(*hw), GFP_KERNEL);
  826. if (!hw) {
  827. SDE_ERROR("failed to create hw block\n");
  828. return -ENOMEM;
  829. }
  830. hw->base_off = cfg->addr;
  831. hw->blk_off = cfg->caps->reg_dma_blks[REG_DMA_TYPE_DB].base;
  832. clk_client.hw = hw;
  833. clk_client.clk_ctrl = cfg->caps->clk_ctrl;
  834. clk_client.ops.setup_clk_force_ctrl = setup_clk_force_ctrl;
  835. ret = sde_vbif_clk_register(sde_kms, &clk_client);
  836. if (ret) {
  837. SDE_ERROR("failed to register vbif client %d\n", cfg->caps->clk_ctrl);
  838. kfree(hw);
  839. }
  840. return ret;
  841. }
  842. #define BASE_REG_SIZE 0x400
  843. int init_v2(struct sde_hw_reg_dma *cfg)
  844. {
  845. int ret = 0, i = 0;
  846. ret = init_v12(cfg);
  847. if (ret) {
  848. DRM_ERROR("failed to initialize v12: ret %d\n", ret);
  849. return ret;
  850. }
  851. /* initialize register offsets based on version delta */
  852. reg_dma_register_count = 0x91;
  853. reg_dma_ctl0_queue1_cmd0_offset = 0x1c;
  854. reg_dma_error_clear_mask |= BIT(19);
  855. reg_dma_ctl_queue1_off[CTL_0] = reg_dma_ctl0_queue1_cmd0_offset;
  856. for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
  857. reg_dma_ctl_queue1_off[i] = reg_dma_ctl_queue1_off[i - 1] +
  858. (sizeof(u32) * 4);
  859. v1_supported[IGC] = GRP_DSPP_HW_BLK_SELECT | GRP_VIG_HW_BLK_SELECT |
  860. GRP_DMA_HW_BLK_SELECT;
  861. if (cfg->caps->reg_dma_blks[REG_DMA_TYPE_SB].valid == true) {
  862. char name[20];
  863. uint32_t base = cfg->caps->reg_dma_blks[REG_DMA_TYPE_SB].base;
  864. snprintf(name, sizeof(name), "REG_DMA_SB");
  865. sde_dbg_reg_register_dump_range(LUTDMA_DBG_NAME, name, base,
  866. base + BASE_REG_SIZE, cfg->caps->xin_id);
  867. reg_dma->ops.last_command_sb = last_cmd_sb_v2;
  868. }
  869. if (cfg->caps->reg_dma_blks[REG_DMA_TYPE_DB].valid == true) {
  870. char name[20];
  871. uint32_t base = cfg->caps->reg_dma_blks[REG_DMA_TYPE_DB].base;
  872. snprintf(name, sizeof(name), "REG_DMA_DB");
  873. sde_dbg_reg_register_dump_range(LUTDMA_DBG_NAME, name, base,
  874. base + BASE_REG_SIZE, cfg->caps->xin_id);
  875. }
  876. if (cfg->caps->split_vbif_supported)
  877. ret = init_reg_dma_vbif(cfg);
  878. return ret;
  879. }
  880. #define CTL_REG_SIZE 0x80
  881. int init_v3(struct sde_hw_reg_dma *cfg)
  882. {
  883. char name[20];
  884. int ret = 0, i;
  885. ret = init_v2(cfg);
  886. if (ret) {
  887. DRM_ERROR("failed to initialize v12: ret %d\n", ret);
  888. return ret;
  889. }
  890. reg_dma_register_count = 0x7000;
  891. reg_dma_decode_sel = 0x18180114;
  892. reg_dma_ctl0_queue0_cmd0_offset = 0x1000;
  893. reg_dma_ctl0_queue1_cmd0_offset = 0x1000;
  894. for (i = CTL_0; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++) {
  895. reg_dma_ctl_queue_off[i] = reg_dma_ctl0_queue0_cmd0_offset * i;
  896. reg_dma_ctl_queue1_off[i] = reg_dma_ctl0_queue1_cmd0_offset * i + 8;
  897. }
  898. /* Register DBG DUMP RANGES - CTL paths are 0x80 in size */
  899. if (cfg->caps->reg_dma_blks[REG_DMA_TYPE_DB].valid) {
  900. for (i = CTL_0; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++) {
  901. u32 base = cfg->caps->reg_dma_blks[REG_DMA_TYPE_DB].base +
  902. reg_dma_ctl_queue_off[i];
  903. snprintf(name, sizeof(name), "REG_DMA_DB_CTL%d", i);
  904. sde_dbg_reg_register_dump_range(LUTDMA_DBG_NAME, name, base,
  905. base + CTL_REG_SIZE, cfg->caps->xin_id);
  906. }
  907. }
  908. if (cfg->caps->reg_dma_blks[REG_DMA_TYPE_SB].valid) {
  909. for (i = CTL_0; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++) {
  910. u32 base = cfg->caps->reg_dma_blks[REG_DMA_TYPE_SB].base +
  911. reg_dma_ctl_queue_off[i];
  912. snprintf(name, sizeof(name), "REG_DMA_SB_CTL%d", i);
  913. sde_dbg_reg_register_dump_range(LUTDMA_DBG_NAME, name, base,
  914. base + CTL_REG_SIZE, cfg->caps->xin_id);
  915. }
  916. }
  917. for (i = CTL_0; i < CTL_MAX; i++) {
  918. ctl_trigger_done_mask[i][DMA_CTL_QUEUE0] = BIT(3);
  919. ctl_trigger_done_mask[i][DMA_CTL_QUEUE1] = BIT(4);
  920. reg_dma_intr_0_status_offset[i][DMA_CTL_QUEUE0] = 4096 * i + 0x44;
  921. reg_dma_intr_0_status_offset[i][DMA_CTL_QUEUE1] = 4096 * i + 0x44;
  922. reg_dma_intr_0_clear_offset[i][DMA_CTL_QUEUE0] =
  923. reg_dma_intr_0_status_offset[i][DMA_CTL_QUEUE0] + 4;
  924. reg_dma_intr_0_clear_offset[i][DMA_CTL_QUEUE1] =
  925. reg_dma_intr_0_status_offset[i][DMA_CTL_QUEUE1] + 4;
  926. reg_dma_ctl0_reset_offset[i][DMA_CTL_QUEUE0] = 4096 * i + 0x54;
  927. reg_dma_ctl0_reset_offset[i][DMA_CTL_QUEUE1] = 4096 * i + 0x54;
  928. }
  929. v1_supported[DEMURA_CFG] = v1_supported[DEMURA_CFG] | DSPP2 | DSPP3;
  930. v1_supported[DEMURA_CFG0_PARAM2] = v1_supported[DEMURA_CFG0_PARAM2] | DSPP2 | DSPP3;
  931. return 0;
  932. }
  933. static int check_support_v1(enum sde_reg_dma_features feature,
  934. enum sde_reg_dma_blk blk,
  935. bool *is_supported)
  936. {
  937. int ret = 0;
  938. if (!is_supported)
  939. return -EINVAL;
  940. if (feature >= REG_DMA_FEATURES_MAX
  941. || blk >= BIT_ULL(REG_DMA_BLK_MAX)) {
  942. *is_supported = false;
  943. return ret;
  944. }
  945. *is_supported = (blk & v1_supported[feature]) ? true : false;
  946. return ret;
  947. }
  948. static int setup_payload_v1(struct sde_reg_dma_setup_ops_cfg *cfg)
  949. {
  950. int rc = 0;
  951. rc = validate_dma_cfg(cfg);
  952. if (!rc)
  953. rc = validate_dma_op_params[cfg->ops](cfg);
  954. if (!rc)
  955. rc = write_dma_op_params[cfg->ops](cfg);
  956. return rc;
  957. }
  958. static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
  959. {
  960. int rc = 0;
  961. rc = validate_kick_off_v1(cfg);
  962. if (rc)
  963. return rc;
  964. rc = write_kick_off_v1(cfg);
  965. return rc;
  966. }
  967. int reset_v1(struct sde_hw_ctl *ctl)
  968. {
  969. struct sde_hw_blk_reg_map hw;
  970. u32 val, i = 0, k = 0;
  971. if (!ctl || ctl->idx >= CTL_MAX) {
  972. DRM_ERROR("invalid ctl %pK ctl idx %d\n",
  973. ctl, ((ctl) ? ctl->idx : 0));
  974. return -EINVAL;
  975. }
  976. for (k = 0; k < REG_DMA_TYPE_MAX; k++) {
  977. memset(&hw, 0, sizeof(hw));
  978. SET_UP_REG_DMA_REG(hw, reg_dma, k);
  979. if (hw.hw_rev == 0)
  980. continue;
  981. SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
  982. SDE_REG_WRITE(&hw, reg_dma_ctl0_reset_offset[ctl->idx][k], BIT(0));
  983. i = 0;
  984. do {
  985. udelay(1000);
  986. i++;
  987. val = SDE_REG_READ(&hw, reg_dma_ctl0_reset_offset[ctl->idx][k]);
  988. } while (i < 2 && val);
  989. }
  990. return 0;
  991. }
  992. static void sde_reg_dma_aspace_cb_locked(void *cb_data, bool is_detach)
  993. {
  994. struct sde_reg_dma_buffer *dma_buf = NULL;
  995. struct msm_gem_address_space *aspace = NULL;
  996. u32 iova_aligned, offset;
  997. int rc;
  998. if (!cb_data) {
  999. DRM_ERROR("aspace cb called with invalid dma_buf\n");
  1000. return;
  1001. }
  1002. dma_buf = (struct sde_reg_dma_buffer *)cb_data;
  1003. aspace = dma_buf->aspace;
  1004. if (is_detach) {
  1005. /* invalidate the stored iova */
  1006. dma_buf->iova = 0;
  1007. /* return the virtual address mapping */
  1008. msm_gem_put_vaddr(dma_buf->buf);
  1009. msm_gem_vunmap(dma_buf->buf, OBJ_LOCK_NORMAL);
  1010. } else {
  1011. rc = msm_gem_get_iova(dma_buf->buf, aspace,
  1012. &dma_buf->iova);
  1013. if (rc) {
  1014. DRM_ERROR("failed to get the iova rc %d\n", rc);
  1015. return;
  1016. }
  1017. dma_buf->vaddr = msm_gem_get_vaddr(dma_buf->buf);
  1018. if (IS_ERR_OR_NULL(dma_buf->vaddr)) {
  1019. DRM_ERROR("failed to get va rc %d\n", rc);
  1020. return;
  1021. }
  1022. iova_aligned = (dma_buf->iova + GUARD_BYTES) & ALIGNED_OFFSET;
  1023. offset = iova_aligned - dma_buf->iova;
  1024. dma_buf->iova = dma_buf->iova + offset;
  1025. dma_buf->vaddr = (void *)(((u8 *)dma_buf->vaddr) + offset);
  1026. dma_buf->next_op_allowed = DECODE_SEL_OP;
  1027. }
  1028. }
  1029. static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size)
  1030. {
  1031. struct sde_reg_dma_buffer *dma_buf = NULL;
  1032. u32 iova_aligned, offset;
  1033. u32 rsize = size + GUARD_BYTES;
  1034. struct msm_gem_address_space *aspace = NULL;
  1035. int rc = 0;
  1036. if (!size || SIZE_DWORD(size) > MAX_DWORDS_SZ) {
  1037. DRM_ERROR("invalid buffer size %lu, max %lu\n",
  1038. SIZE_DWORD(size), MAX_DWORDS_SZ);
  1039. return ERR_PTR(-EINVAL);
  1040. }
  1041. dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
  1042. if (!dma_buf)
  1043. return ERR_PTR(-ENOMEM);
  1044. dma_buf->buf = msm_gem_new(reg_dma->drm_dev,
  1045. rsize, MSM_BO_UNCACHED);
  1046. if (IS_ERR_OR_NULL(dma_buf->buf)) {
  1047. rc = -EINVAL;
  1048. goto fail;
  1049. }
  1050. aspace = msm_gem_smmu_address_space_get(reg_dma->drm_dev,
  1051. MSM_SMMU_DOMAIN_UNSECURE);
  1052. if (PTR_ERR(aspace) == -ENODEV) {
  1053. aspace = NULL;
  1054. DRM_DEBUG("IOMMU not present, relying on VRAM\n");
  1055. } else if (IS_ERR_OR_NULL(aspace)) {
  1056. rc = PTR_ERR(aspace);
  1057. aspace = NULL;
  1058. DRM_ERROR("failed to get aspace %d", rc);
  1059. goto free_gem;
  1060. } else if (aspace) {
  1061. /* register to aspace */
  1062. rc = msm_gem_address_space_register_cb(aspace,
  1063. sde_reg_dma_aspace_cb_locked,
  1064. (void *)dma_buf);
  1065. if (rc) {
  1066. DRM_ERROR("failed to register callback %d", rc);
  1067. goto free_gem;
  1068. }
  1069. }
  1070. dma_buf->aspace = aspace;
  1071. rc = msm_gem_get_iova(dma_buf->buf, aspace, &dma_buf->iova);
  1072. if (rc) {
  1073. DRM_ERROR("failed to get the iova rc %d\n", rc);
  1074. goto free_aspace_cb;
  1075. }
  1076. dma_buf->vaddr = msm_gem_get_vaddr(dma_buf->buf);
  1077. if (IS_ERR_OR_NULL(dma_buf->vaddr)) {
  1078. DRM_ERROR("failed to get va rc %d\n", rc);
  1079. rc = -EINVAL;
  1080. goto put_iova;
  1081. }
  1082. dma_buf->buffer_size = size;
  1083. iova_aligned = (dma_buf->iova + GUARD_BYTES) & ALIGNED_OFFSET;
  1084. offset = iova_aligned - dma_buf->iova;
  1085. dma_buf->iova = dma_buf->iova + offset;
  1086. dma_buf->vaddr = (void *)(((u8 *)dma_buf->vaddr) + offset);
  1087. dma_buf->next_op_allowed = DECODE_SEL_OP;
  1088. return dma_buf;
  1089. put_iova:
  1090. msm_gem_put_iova(dma_buf->buf, aspace);
  1091. free_aspace_cb:
  1092. msm_gem_address_space_unregister_cb(aspace,
  1093. sde_reg_dma_aspace_cb_locked, dma_buf);
  1094. free_gem:
  1095. mutex_lock(&reg_dma->drm_dev->struct_mutex);
  1096. msm_gem_free_object(dma_buf->buf);
  1097. mutex_unlock(&reg_dma->drm_dev->struct_mutex);
  1098. fail:
  1099. kfree(dma_buf);
  1100. return ERR_PTR(rc);
  1101. }
  1102. static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *dma_buf)
  1103. {
  1104. if (!dma_buf) {
  1105. DRM_ERROR("invalid param reg_buf %pK\n", dma_buf);
  1106. return -EINVAL;
  1107. }
  1108. if (dma_buf->buf) {
  1109. msm_gem_put_iova(dma_buf->buf, 0);
  1110. msm_gem_address_space_unregister_cb(dma_buf->aspace,
  1111. sde_reg_dma_aspace_cb_locked, dma_buf);
  1112. mutex_lock(&reg_dma->drm_dev->struct_mutex);
  1113. msm_gem_free_object(dma_buf->buf);
  1114. mutex_unlock(&reg_dma->drm_dev->struct_mutex);
  1115. }
  1116. kfree(dma_buf);
  1117. return 0;
  1118. }
  1119. static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf)
  1120. {
  1121. if (!lut_buf)
  1122. return -EINVAL;
  1123. lut_buf->index = 0;
  1124. lut_buf->ops_completed = 0;
  1125. lut_buf->next_op_allowed = DECODE_SEL_OP;
  1126. lut_buf->abs_write_cnt = 0;
  1127. return 0;
  1128. }
  1129. static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
  1130. {
  1131. u32 remain_len, write_len;
  1132. remain_len = BUFFER_SPACE_LEFT(cfg);
  1133. write_len = sizeof(u32);
  1134. if (remain_len < write_len) {
  1135. DRM_ERROR("buffer is full sz %d needs %d bytes\n",
  1136. remain_len, write_len);
  1137. return -EINVAL;
  1138. }
  1139. return 0;
  1140. }
  1141. static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
  1142. {
  1143. u32 *loc = NULL;
  1144. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  1145. cfg->dma_buf->index);
  1146. loc[0] = reg_dma_decode_sel;
  1147. loc[1] = 0;
  1148. cfg->dma_buf->index = sizeof(u32) * 2;
  1149. cfg->dma_buf->ops_completed = REG_WRITE_OP | DECODE_SEL_OP;
  1150. cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
  1151. return 0;
  1152. }
  1153. static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  1154. enum sde_reg_dma_last_cmd_mode mode)
  1155. {
  1156. struct sde_reg_dma_setup_ops_cfg cfg;
  1157. struct sde_reg_dma_kickoff_cfg kick_off;
  1158. struct sde_hw_blk_reg_map hw;
  1159. u32 val;
  1160. int rc;
  1161. if (!ctl || ctl->idx >= CTL_MAX || q >= DMA_CTL_QUEUE_MAX) {
  1162. DRM_ERROR("ctl %pK q %d index %d\n", ctl, q,
  1163. ((ctl) ? ctl->idx : -1));
  1164. return -EINVAL;
  1165. }
  1166. if (!last_cmd_buf_db[ctl->idx] || !last_cmd_buf_db[ctl->idx]->iova) {
  1167. DRM_ERROR("invalid last cmd buf for idx %d\n", ctl->idx);
  1168. return -EINVAL;
  1169. }
  1170. cfg.dma_buf = last_cmd_buf_db[ctl->idx];
  1171. reset_reg_dma_buffer_v1(last_cmd_buf_db[ctl->idx]);
  1172. if (validate_last_cmd(&cfg)) {
  1173. DRM_ERROR("validate buf failed\n");
  1174. return -EINVAL;
  1175. }
  1176. if (write_last_cmd(&cfg)) {
  1177. DRM_ERROR("write buf failed\n");
  1178. return -EINVAL;
  1179. }
  1180. kick_off.ctl = ctl;
  1181. kick_off.queue_select = q;
  1182. kick_off.trigger_mode = WRITE_IMMEDIATE;
  1183. kick_off.last_command = 1;
  1184. kick_off.op = REG_DMA_WRITE;
  1185. kick_off.dma_type = REG_DMA_TYPE_DB;
  1186. kick_off.dma_buf = last_cmd_buf_db[ctl->idx];
  1187. kick_off.feature = REG_DMA_FEATURES_MAX;
  1188. rc = kick_off_v1(&kick_off);
  1189. if (rc) {
  1190. DRM_ERROR("kick off last cmd failed\n");
  1191. return rc;
  1192. }
  1193. //Lack of block support will be caught by kick_off
  1194. memset(&hw, 0, sizeof(hw));
  1195. SET_UP_REG_DMA_REG(hw, reg_dma, kick_off.dma_type);
  1196. SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, mode, ctl->idx, kick_off.queue_select,
  1197. kick_off.dma_type, kick_off.op);
  1198. if (mode == REG_DMA_WAIT4_COMP) {
  1199. rc = read_poll_timeout(sde_reg_read, val,
  1200. (val & ctl_trigger_done_mask[ctl->idx][q]), 10, false, 20000,
  1201. &hw, reg_dma_intr_0_status_offset[ctl->idx][q]);
  1202. if (rc)
  1203. DRM_ERROR("poll wait failed %d val %x mask %x\n",
  1204. rc, val, ctl_trigger_done_mask[ctl->idx][q]);
  1205. SDE_EVT32(SDE_EVTLOG_FUNC_EXIT, mode);
  1206. }
  1207. return rc;
  1208. }
  1209. void deinit_v1(void)
  1210. {
  1211. int i = 0;
  1212. for (i = CTL_0; i < CTL_MAX; i++) {
  1213. if (last_cmd_buf_db[i])
  1214. dealloc_reg_dma_v1(last_cmd_buf_db[i]);
  1215. last_cmd_buf_db[i] = NULL;
  1216. if (last_cmd_buf_sb[i])
  1217. dealloc_reg_dma_v1(last_cmd_buf_sb[i]);
  1218. last_cmd_buf_sb[i] = NULL;
  1219. }
  1220. }
  1221. static void dump_regs_v1(void)
  1222. {
  1223. uint32_t i = 0, k = 0;
  1224. u32 val;
  1225. struct sde_hw_blk_reg_map hw;
  1226. for (k = 0; k < REG_DMA_TYPE_MAX; k++) {
  1227. memset(&hw, 0, sizeof(hw));
  1228. SET_UP_REG_DMA_REG(hw, reg_dma, k);
  1229. if (hw.hw_rev == 0)
  1230. continue;
  1231. for (i = 0; i < reg_dma_register_count; i++) {
  1232. val = SDE_REG_READ(&hw, i * sizeof(u32));
  1233. DRM_ERROR("offset %x val %x\n", (u32)(i * sizeof(u32)),
  1234. val);
  1235. }
  1236. }
  1237. }
  1238. static int last_cmd_sb_v2(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  1239. enum sde_reg_dma_last_cmd_mode mode)
  1240. {
  1241. struct sde_reg_dma_setup_ops_cfg cfg;
  1242. struct sde_reg_dma_kickoff_cfg kick_off;
  1243. int rc = 0;
  1244. if (!ctl || ctl->idx >= CTL_MAX || q >= DMA_CTL_QUEUE_MAX) {
  1245. DRM_ERROR("ctl %pK q %d index %d\n", ctl, q,
  1246. ((ctl) ? ctl->idx : -1));
  1247. return -EINVAL;
  1248. }
  1249. if (!last_cmd_buf_sb[ctl->idx] || !last_cmd_buf_sb[ctl->idx]->iova) {
  1250. DRM_ERROR("invalid last cmd buf for idx %d\n", ctl->idx);
  1251. return -EINVAL;
  1252. }
  1253. cfg.dma_buf = last_cmd_buf_sb[ctl->idx];
  1254. reset_reg_dma_buffer_v1(last_cmd_buf_sb[ctl->idx]);
  1255. if (validate_last_cmd(&cfg)) {
  1256. DRM_ERROR("validate buf failed\n");
  1257. return -EINVAL;
  1258. }
  1259. if (write_last_cmd(&cfg)) {
  1260. DRM_ERROR("write buf failed\n");
  1261. return -EINVAL;
  1262. }
  1263. kick_off.ctl = ctl;
  1264. kick_off.trigger_mode = WRITE_IMMEDIATE;
  1265. kick_off.last_command = 1;
  1266. kick_off.op = REG_DMA_WRITE;
  1267. kick_off.dma_type = REG_DMA_TYPE_SB;
  1268. kick_off.queue_select = DMA_CTL_QUEUE1;
  1269. kick_off.dma_buf = last_cmd_buf_sb[ctl->idx];
  1270. kick_off.feature = REG_DMA_FEATURES_MAX;
  1271. rc = kick_off_v1(&kick_off);
  1272. if (rc)
  1273. DRM_ERROR("kick off last cmd failed\n");
  1274. SDE_EVT32(ctl->idx, kick_off.queue_select, kick_off.dma_type,
  1275. kick_off.op);
  1276. return rc;
  1277. }