sde_hw_reg_dma_v1.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #include <linux/iopoll.h>
  7. #include "sde_hw_mdss.h"
  8. #include "sde_hw_ctl.h"
  9. #include "sde_hw_reg_dma_v1.h"
  10. #include "msm_drv.h"
  11. #include "msm_mmu.h"
  12. #include "sde_dbg.h"
  13. #define GUARD_BYTES (BIT(8) - 1)
  14. #define ALIGNED_OFFSET (U32_MAX & ~(GUARD_BYTES))
  15. #define ADDR_ALIGN BIT(8)
  16. #define MAX_RELATIVE_OFF (BIT(21) - 1)
  17. #define ABSOLUTE_RANGE BIT(27)
  18. #define DECODE_SEL_OP (BIT(HW_BLK_SELECT))
  19. #define REG_WRITE_OP ((BIT(REG_SINGLE_WRITE)) | (BIT(REG_BLK_WRITE_SINGLE)) | \
  20. (BIT(REG_BLK_WRITE_INC)) | (BIT(REG_BLK_WRITE_MULTIPLE)) | \
  21. (BIT(REG_SINGLE_MODIFY)) | (BIT(REG_BLK_LUT_WRITE)))
  22. #define REG_DMA_OPS (DECODE_SEL_OP | REG_WRITE_OP)
  23. #define IS_OP_ALLOWED(op, buf_op) (BIT(op) & buf_op)
  24. #define SET_UP_REG_DMA_REG(hw, reg_dma, i) \
  25. do { \
  26. if ((reg_dma)->caps->reg_dma_blks[(i)].valid == false) \
  27. break; \
  28. (hw).base_off = (reg_dma)->addr; \
  29. (hw).blk_off = (reg_dma)->caps->reg_dma_blks[(i)].base; \
  30. (hw).hw_rev = (reg_dma)->caps->version; \
  31. (hw).log_mask = SDE_DBG_MASK_REGDMA; \
  32. } while (0)
  33. #define SIZE_DWORD(x) ((x) / (sizeof(u32)))
  34. #define NOT_WORD_ALIGNED(x) ((x) & 0x3)
  35. #define GRP_VIG_HW_BLK_SELECT (VIG0 | VIG1 | VIG2 | VIG3)
  36. #define GRP_DMA_HW_BLK_SELECT (DMA0 | DMA1 | DMA2 | DMA3 | DMA4 | DMA5)
  37. #define GRP_DSPP_HW_BLK_SELECT (DSPP0 | DSPP1 | DSPP2 | DSPP3)
  38. #define GRP_LTM_HW_BLK_SELECT (LTM0 | LTM1 | LTM2 | LTM3)
  39. #define GRP_MDSS_HW_BLK_SELECT (MDSS)
  40. #define BUFFER_SPACE_LEFT(cfg) ((cfg)->dma_buf->buffer_size - \
  41. (cfg)->dma_buf->index)
  42. #define REL_ADDR_OPCODE (BIT(27))
  43. #define NO_OP_OPCODE (0)
  44. #define SINGLE_REG_WRITE_OPCODE (BIT(28))
  45. #define SINGLE_REG_MODIFY_OPCODE (BIT(29))
  46. #define HW_INDEX_REG_WRITE_OPCODE (BIT(28) | BIT(29))
  47. #define AUTO_INC_REG_WRITE_OPCODE (BIT(30))
  48. #define BLK_REG_WRITE_OPCODE (BIT(30) | BIT(28))
  49. #define LUTBUS_WRITE_OPCODE (BIT(30) | BIT(29))
  50. #define WRAP_MIN_SIZE 2
  51. #define WRAP_MAX_SIZE (BIT(4) - 1)
  52. #define MAX_DWORDS_SZ (BIT(14) - 1)
  53. #define REG_DMA_HEADERS_BUFFER_SZ (sizeof(u32) * 128)
  54. #define LUTBUS_TABLE_SEL_MASK 0x10000
  55. #define LUTBUS_BLOCK_SEL_MASK 0xffff
  56. #define LUTBUS_TRANS_SZ_MASK 0xff0000
  57. #define LUTBUS_LUT_SIZE_MASK 0x3fff
  58. static uint32_t reg_dma_register_count;
  59. static uint32_t reg_dma_decode_sel;
  60. static uint32_t reg_dma_opmode_offset;
  61. static uint32_t reg_dma_ctl0_queue0_cmd0_offset;
  62. static uint32_t reg_dma_ctl0_queue1_cmd0_offset;
  63. static uint32_t reg_dma_intr_status_offset;
  64. static uint32_t reg_dma_intr_4_status_offset;
  65. static uint32_t reg_dma_intr_clear_offset;
  66. static uint32_t reg_dma_ctl_trigger_offset;
  67. static uint32_t reg_dma_ctl0_reset_offset;
  68. static uint32_t reg_dma_error_clear_mask;
  69. static uint32_t reg_dma_ctl_queue_off[CTL_MAX];
  70. static uint32_t reg_dma_ctl_queue1_off[CTL_MAX];
  71. typedef int (*reg_dma_internal_ops) (struct sde_reg_dma_setup_ops_cfg *cfg);
  72. static struct sde_hw_reg_dma *reg_dma;
  73. static u32 ops_mem_size[REG_DMA_SETUP_OPS_MAX] = {
  74. [REG_BLK_WRITE_SINGLE] = sizeof(u32) * 2,
  75. [REG_BLK_WRITE_INC] = sizeof(u32) * 2,
  76. [REG_BLK_WRITE_MULTIPLE] = sizeof(u32) * 2,
  77. [HW_BLK_SELECT] = sizeof(u32) * 2,
  78. [REG_SINGLE_WRITE] = sizeof(u32) * 2,
  79. [REG_SINGLE_MODIFY] = sizeof(u32) * 3,
  80. [REG_BLK_LUT_WRITE] = sizeof(u32) * 2,
  81. };
  82. static u32 queue_sel[DMA_CTL_QUEUE_MAX] = {
  83. [DMA_CTL_QUEUE0] = BIT(0),
  84. [DMA_CTL_QUEUE1] = BIT(4),
  85. };
  86. static u32 dspp_read_sel[DSPP_HIST_MAX] = {
  87. [DSPP0_HIST] = 0,
  88. [DSPP1_HIST] = 1,
  89. [DSPP2_HIST] = 2,
  90. [DSPP3_HIST] = 3,
  91. };
  92. static u32 v1_supported[REG_DMA_FEATURES_MAX] = {
  93. [GAMUT] = GRP_VIG_HW_BLK_SELECT | GRP_DSPP_HW_BLK_SELECT,
  94. [VLUT] = GRP_DSPP_HW_BLK_SELECT,
  95. [GC] = GRP_DSPP_HW_BLK_SELECT,
  96. [IGC] = DSPP_IGC | GRP_DSPP_HW_BLK_SELECT,
  97. [PCC] = GRP_DSPP_HW_BLK_SELECT,
  98. };
  99. static u32 ctl_trigger_done_mask[CTL_MAX][DMA_CTL_QUEUE_MAX] = {
  100. [CTL_0][0] = BIT(16),
  101. [CTL_0][1] = BIT(21),
  102. [CTL_1][0] = BIT(17),
  103. [CTL_1][1] = BIT(22),
  104. [CTL_2][0] = BIT(18),
  105. [CTL_2][1] = BIT(23),
  106. [CTL_3][0] = BIT(19),
  107. [CTL_3][1] = BIT(24),
  108. [CTL_4][0] = BIT(25),
  109. [CTL_4][1] = BIT(27),
  110. [CTL_5][0] = BIT(26),
  111. [CTL_5][1] = BIT(28),
  112. };
  113. static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg);
  114. static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
  115. static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  116. static int validate_blk_lut_write(struct sde_reg_dma_setup_ops_cfg *cfg);
  117. static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  118. static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
  119. static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
  120. static int write_single_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  121. static int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg);
  122. static int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg);
  123. static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  124. static int write_single_modify(struct sde_reg_dma_setup_ops_cfg *cfg);
  125. static int write_block_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  126. static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
  127. static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf);
  128. static int check_support_v1(enum sde_reg_dma_features feature,
  129. enum sde_reg_dma_blk blk, bool *is_supported);
  130. static int setup_payload_v1(struct sde_reg_dma_setup_ops_cfg *cfg);
  131. static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg);
  132. static int reset_v1(struct sde_hw_ctl *ctl);
  133. static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  134. enum sde_reg_dma_last_cmd_mode mode);
  135. static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size);
  136. static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *lut_buf);
  137. static void dump_regs_v1(void);
  138. static int last_cmd_sb_v2(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  139. enum sde_reg_dma_last_cmd_mode mode);
  140. static reg_dma_internal_ops write_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
  141. [HW_BLK_SELECT] = write_decode_sel,
  142. [REG_SINGLE_WRITE] = write_single_reg,
  143. [REG_BLK_WRITE_SINGLE] = write_multi_reg_inc,
  144. [REG_BLK_WRITE_INC] = write_multi_reg_index,
  145. [REG_BLK_WRITE_MULTIPLE] = write_multi_lut_reg,
  146. [REG_SINGLE_MODIFY] = write_single_modify,
  147. [REG_BLK_LUT_WRITE] = write_block_lut_reg,
  148. };
  149. static reg_dma_internal_ops validate_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
  150. [HW_BLK_SELECT] = validate_write_decode_sel,
  151. [REG_SINGLE_WRITE] = validate_write_reg,
  152. [REG_BLK_WRITE_SINGLE] = validate_write_reg,
  153. [REG_BLK_WRITE_INC] = validate_write_reg,
  154. [REG_BLK_WRITE_MULTIPLE] = validate_write_multi_lut_reg,
  155. [REG_SINGLE_MODIFY] = validate_write_reg,
  156. [REG_BLK_LUT_WRITE] = validate_blk_lut_write,
  157. };
  158. static struct sde_reg_dma_buffer *last_cmd_buf_db[CTL_MAX];
  159. static struct sde_reg_dma_buffer *last_cmd_buf_sb[CTL_MAX];
  160. static void get_decode_sel(unsigned long blk, u32 *decode_sel)
  161. {
  162. int i = 0;
  163. *decode_sel = 0;
  164. for_each_set_bit(i, &blk, REG_DMA_BLK_MAX) {
  165. switch (BIT(i)) {
  166. case VIG0:
  167. *decode_sel |= BIT(0);
  168. break;
  169. case VIG1:
  170. *decode_sel |= BIT(1);
  171. break;
  172. case VIG2:
  173. *decode_sel |= BIT(2);
  174. break;
  175. case VIG3:
  176. *decode_sel |= BIT(3);
  177. break;
  178. case DMA0:
  179. *decode_sel |= BIT(5);
  180. break;
  181. case DMA1:
  182. *decode_sel |= BIT(6);
  183. break;
  184. case DMA2:
  185. *decode_sel |= BIT(7);
  186. break;
  187. case DMA3:
  188. *decode_sel |= BIT(8);
  189. break;
  190. case DMA4:
  191. *decode_sel |= BIT(9);
  192. break;
  193. case DMA5:
  194. *decode_sel |= BIT(10);
  195. break;
  196. case DSPP0:
  197. *decode_sel |= BIT(17);
  198. break;
  199. case DSPP1:
  200. *decode_sel |= BIT(18);
  201. break;
  202. case DSPP2:
  203. *decode_sel |= BIT(19);
  204. break;
  205. case DSPP3:
  206. *decode_sel |= BIT(20);
  207. break;
  208. case SSPP_IGC:
  209. *decode_sel |= BIT(4);
  210. break;
  211. case DSPP_IGC:
  212. *decode_sel |= BIT(21);
  213. break;
  214. case LTM0:
  215. *decode_sel |= BIT(22);
  216. break;
  217. case LTM1:
  218. *decode_sel |= BIT(23);
  219. break;
  220. case LTM2:
  221. *decode_sel |= BIT(24);
  222. break;
  223. case LTM3:
  224. *decode_sel |= BIT(25);
  225. break;
  226. case MDSS:
  227. *decode_sel |= BIT(31);
  228. break;
  229. default:
  230. DRM_ERROR("block not supported %zx\n", (size_t)BIT(i));
  231. break;
  232. }
  233. }
  234. }
  235. static int write_multi_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  236. {
  237. u8 *loc = NULL;
  238. loc = (u8 *)cfg->dma_buf->vaddr + cfg->dma_buf->index;
  239. memcpy(loc, cfg->data, cfg->data_size);
  240. cfg->dma_buf->index += cfg->data_size;
  241. cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
  242. cfg->dma_buf->ops_completed |= REG_WRITE_OP;
  243. if (cfg->blk == MDSS)
  244. cfg->dma_buf->abs_write_cnt += SIZE_DWORD(cfg->data_size);
  245. return 0;
  246. }
  247. int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg)
  248. {
  249. u32 *loc = NULL;
  250. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  251. cfg->dma_buf->index);
  252. loc[0] = HW_INDEX_REG_WRITE_OPCODE;
  253. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  254. if (cfg->blk == MDSS)
  255. loc[0] |= ABSOLUTE_RANGE;
  256. loc[1] = SIZE_DWORD(cfg->data_size);
  257. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  258. return write_multi_reg(cfg);
  259. }
  260. int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg)
  261. {
  262. u32 *loc = NULL;
  263. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  264. cfg->dma_buf->index);
  265. loc[0] = AUTO_INC_REG_WRITE_OPCODE;
  266. if (cfg->blk == MDSS)
  267. loc[0] |= ABSOLUTE_RANGE;
  268. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  269. loc[1] = SIZE_DWORD(cfg->data_size);
  270. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  271. return write_multi_reg(cfg);
  272. }
  273. static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  274. {
  275. u32 *loc = NULL;
  276. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  277. cfg->dma_buf->index);
  278. loc[0] = BLK_REG_WRITE_OPCODE;
  279. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  280. if (cfg->blk == MDSS)
  281. loc[0] |= ABSOLUTE_RANGE;
  282. loc[1] = (cfg->inc) ? 0 : BIT(31);
  283. loc[1] |= (cfg->wrap_size & WRAP_MAX_SIZE) << 16;
  284. loc[1] |= ((SIZE_DWORD(cfg->data_size)) & MAX_DWORDS_SZ);
  285. cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
  286. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  287. return write_multi_reg(cfg);
  288. }
  289. static int write_single_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  290. {
  291. u32 *loc = NULL;
  292. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  293. cfg->dma_buf->index);
  294. loc[0] = SINGLE_REG_WRITE_OPCODE;
  295. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  296. if (cfg->blk == MDSS) {
  297. loc[0] |= ABSOLUTE_RANGE;
  298. cfg->dma_buf->abs_write_cnt++;
  299. }
  300. loc[1] = *cfg->data;
  301. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  302. cfg->dma_buf->ops_completed |= REG_WRITE_OP;
  303. cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
  304. return 0;
  305. }
  306. static int write_single_modify(struct sde_reg_dma_setup_ops_cfg *cfg)
  307. {
  308. u32 *loc = NULL;
  309. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  310. cfg->dma_buf->index);
  311. loc[0] = SINGLE_REG_MODIFY_OPCODE;
  312. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  313. if (cfg->blk == MDSS)
  314. loc[0] |= ABSOLUTE_RANGE;
  315. loc[1] = cfg->mask;
  316. loc[2] = *cfg->data;
  317. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  318. cfg->dma_buf->ops_completed |= REG_WRITE_OP;
  319. cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
  320. return 0;
  321. }
  322. static int write_block_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  323. {
  324. u32 *loc = NULL;
  325. int rc = -EINVAL;
  326. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  327. cfg->dma_buf->index);
  328. loc[0] = LUTBUS_WRITE_OPCODE;
  329. loc[0] |= (cfg->table_sel << 16) & LUTBUS_TABLE_SEL_MASK;
  330. loc[0] |= (cfg->block_sel & LUTBUS_BLOCK_SEL_MASK);
  331. loc[1] = (cfg->trans_size << 16) & LUTBUS_TRANS_SZ_MASK;
  332. loc[1] |= (cfg->lut_size & LUTBUS_LUT_SIZE_MASK);
  333. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  334. rc = write_multi_reg(cfg);
  335. if (rc)
  336. return rc;
  337. /* adding 3 NO OPs as SW workaround for REG_BLK_LUT_WRITE
  338. * HW limitation that requires the residual data plus the
  339. * following opcode to exceed 4 DWORDs length.
  340. */
  341. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  342. cfg->dma_buf->index);
  343. loc[0] = NO_OP_OPCODE;
  344. loc[1] = NO_OP_OPCODE;
  345. loc[2] = NO_OP_OPCODE;
  346. cfg->dma_buf->index += sizeof(u32) * 3;
  347. return 0;
  348. }
  349. static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg)
  350. {
  351. u32 *loc = NULL;
  352. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  353. cfg->dma_buf->index);
  354. loc[0] = reg_dma_decode_sel;
  355. get_decode_sel(cfg->blk, &loc[1]);
  356. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  357. cfg->dma_buf->ops_completed |= DECODE_SEL_OP;
  358. cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
  359. return 0;
  360. }
  361. static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  362. {
  363. int rc;
  364. rc = validate_write_reg(cfg);
  365. if (rc)
  366. return rc;
  367. if (cfg->wrap_size < WRAP_MIN_SIZE || cfg->wrap_size > WRAP_MAX_SIZE) {
  368. DRM_ERROR("invalid wrap sz %d min %d max %zd\n",
  369. cfg->wrap_size, WRAP_MIN_SIZE, (size_t)WRAP_MAX_SIZE);
  370. rc = -EINVAL;
  371. }
  372. return rc;
  373. }
  374. static int validate_blk_lut_write(struct sde_reg_dma_setup_ops_cfg *cfg)
  375. {
  376. int rc;
  377. rc = validate_write_reg(cfg);
  378. if (rc)
  379. return rc;
  380. if (cfg->table_sel >= LUTBUS_TABLE_SELECT_MAX ||
  381. cfg->block_sel >= LUTBUS_BLOCK_MAX ||
  382. (cfg->trans_size != LUTBUS_IGC_TRANS_SIZE &&
  383. cfg->trans_size != LUTBUS_GAMUT_TRANS_SIZE &&
  384. cfg->trans_size != LUTBUS_SIXZONE_TRANS_SIZE)) {
  385. DRM_ERROR("invalid table_sel %d block_sel %d trans_size %d\n",
  386. cfg->table_sel, cfg->block_sel,
  387. cfg->trans_size);
  388. rc = -EINVAL;
  389. }
  390. return rc;
  391. }
  392. static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  393. {
  394. u32 remain_len, write_len;
  395. remain_len = BUFFER_SPACE_LEFT(cfg);
  396. write_len = ops_mem_size[cfg->ops] + cfg->data_size;
  397. if (remain_len < write_len) {
  398. DRM_ERROR("buffer is full sz %d needs %d bytes\n",
  399. remain_len, write_len);
  400. return -EINVAL;
  401. }
  402. if (!cfg->data) {
  403. DRM_ERROR("invalid data %pK size %d exp sz %d\n", cfg->data,
  404. cfg->data_size, write_len);
  405. return -EINVAL;
  406. }
  407. if ((SIZE_DWORD(cfg->data_size)) > MAX_DWORDS_SZ ||
  408. NOT_WORD_ALIGNED(cfg->data_size)) {
  409. DRM_ERROR("Invalid data size %d max %zd align %x\n",
  410. cfg->data_size, (size_t)MAX_DWORDS_SZ,
  411. NOT_WORD_ALIGNED(cfg->data_size));
  412. return -EINVAL;
  413. }
  414. if (cfg->blk_offset > MAX_RELATIVE_OFF ||
  415. NOT_WORD_ALIGNED(cfg->blk_offset)) {
  416. DRM_ERROR("invalid offset %d max %zd align %x\n",
  417. cfg->blk_offset, (size_t)MAX_RELATIVE_OFF,
  418. NOT_WORD_ALIGNED(cfg->blk_offset));
  419. return -EINVAL;
  420. }
  421. return 0;
  422. }
  423. static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg)
  424. {
  425. u32 remain_len;
  426. bool vig_blk, dma_blk, dspp_blk, mdss_blk;
  427. remain_len = BUFFER_SPACE_LEFT(cfg);
  428. if (remain_len < ops_mem_size[HW_BLK_SELECT]) {
  429. DRM_ERROR("buffer is full needs %d bytes\n",
  430. ops_mem_size[HW_BLK_SELECT]);
  431. return -EINVAL;
  432. }
  433. if (!cfg->blk) {
  434. DRM_ERROR("blk set as 0\n");
  435. return -EINVAL;
  436. }
  437. vig_blk = (cfg->blk & GRP_VIG_HW_BLK_SELECT) ? true : false;
  438. dma_blk = (cfg->blk & GRP_DMA_HW_BLK_SELECT) ? true : false;
  439. dspp_blk = (cfg->blk & GRP_DSPP_HW_BLK_SELECT) ? true : false;
  440. mdss_blk = (cfg->blk & MDSS) ? true : false;
  441. if ((vig_blk && dspp_blk) || (dma_blk && dspp_blk) ||
  442. (vig_blk && dma_blk) ||
  443. (mdss_blk && (vig_blk | dma_blk | dspp_blk))) {
  444. DRM_ERROR("invalid blk combination %x\n", cfg->blk);
  445. return -EINVAL;
  446. }
  447. return 0;
  448. }
  449. static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg)
  450. {
  451. int rc = 0;
  452. bool supported;
  453. if (!cfg || cfg->ops >= REG_DMA_SETUP_OPS_MAX || !cfg->dma_buf) {
  454. DRM_ERROR("invalid param cfg %pK ops %d dma_buf %pK\n",
  455. cfg, ((cfg) ? cfg->ops : REG_DMA_SETUP_OPS_MAX),
  456. ((cfg) ? cfg->dma_buf : NULL));
  457. return -EINVAL;
  458. }
  459. rc = check_support_v1(cfg->feature, cfg->blk, &supported);
  460. if (rc || !supported) {
  461. DRM_ERROR("check support failed rc %d supported %d\n",
  462. rc, supported);
  463. rc = -EINVAL;
  464. return rc;
  465. }
  466. if (cfg->dma_buf->index >= cfg->dma_buf->buffer_size ||
  467. NOT_WORD_ALIGNED(cfg->dma_buf->index)) {
  468. DRM_ERROR("Buf Overflow index %d max size %d align %x\n",
  469. cfg->dma_buf->index, cfg->dma_buf->buffer_size,
  470. NOT_WORD_ALIGNED(cfg->dma_buf->index));
  471. return -EINVAL;
  472. }
  473. if (cfg->dma_buf->iova & GUARD_BYTES || !cfg->dma_buf->vaddr) {
  474. DRM_ERROR("iova not aligned to %zx iova %llx kva %pK",
  475. (size_t)ADDR_ALIGN, cfg->dma_buf->iova,
  476. cfg->dma_buf->vaddr);
  477. return -EINVAL;
  478. }
  479. if (!IS_OP_ALLOWED(cfg->ops, cfg->dma_buf->next_op_allowed)) {
  480. DRM_ERROR("invalid op %x allowed %x\n", cfg->ops,
  481. cfg->dma_buf->next_op_allowed);
  482. return -EINVAL;
  483. }
  484. if (!validate_dma_op_params[cfg->ops] ||
  485. !write_dma_op_params[cfg->ops]) {
  486. DRM_ERROR("invalid op %d validate %pK write %pK\n", cfg->ops,
  487. validate_dma_op_params[cfg->ops],
  488. write_dma_op_params[cfg->ops]);
  489. return -EINVAL;
  490. }
  491. return rc;
  492. }
  493. static int validate_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
  494. {
  495. if (!cfg || !cfg->ctl || !cfg->dma_buf ||
  496. cfg->dma_type >= REG_DMA_TYPE_MAX) {
  497. DRM_ERROR("invalid cfg %pK ctl %pK dma_buf %pK dma type %d\n",
  498. cfg, ((!cfg) ? NULL : cfg->ctl),
  499. ((!cfg) ? NULL : cfg->dma_buf),
  500. ((!cfg) ? 0 : cfg->dma_type));
  501. return -EINVAL;
  502. }
  503. if (reg_dma->caps->reg_dma_blks[cfg->dma_type].valid == false) {
  504. DRM_DEBUG("REG dma type %d is not supported\n", cfg->dma_type);
  505. return -EOPNOTSUPP;
  506. }
  507. if (cfg->ctl->idx < CTL_0 || cfg->ctl->idx >= CTL_MAX) {
  508. DRM_ERROR("invalid ctl idx %d\n", cfg->ctl->idx);
  509. return -EINVAL;
  510. }
  511. if (cfg->op >= REG_DMA_OP_MAX) {
  512. DRM_ERROR("invalid op %d\n", cfg->op);
  513. return -EINVAL;
  514. }
  515. if ((cfg->op == REG_DMA_WRITE) &&
  516. (!(cfg->dma_buf->ops_completed & DECODE_SEL_OP) ||
  517. !(cfg->dma_buf->ops_completed & REG_WRITE_OP))) {
  518. DRM_ERROR("incomplete write ops %x\n",
  519. cfg->dma_buf->ops_completed);
  520. return -EINVAL;
  521. }
  522. if (cfg->op == REG_DMA_READ && cfg->block_select >= DSPP_HIST_MAX) {
  523. DRM_ERROR("invalid block for read %d\n", cfg->block_select);
  524. return -EINVAL;
  525. }
  526. /* Only immediate triggers are supported now hence hardcode */
  527. cfg->trigger_mode = (cfg->op == REG_DMA_READ) ? (READ_TRIGGER) :
  528. (WRITE_TRIGGER);
  529. if (cfg->dma_buf->iova & GUARD_BYTES) {
  530. DRM_ERROR("Address is not aligned to %zx iova %llx",
  531. (size_t)ADDR_ALIGN, cfg->dma_buf->iova);
  532. return -EINVAL;
  533. }
  534. if (cfg->queue_select >= DMA_CTL_QUEUE_MAX) {
  535. DRM_ERROR("invalid queue selected %d\n", cfg->queue_select);
  536. return -EINVAL;
  537. }
  538. if (SIZE_DWORD(cfg->dma_buf->index) > MAX_DWORDS_SZ ||
  539. !cfg->dma_buf->index) {
  540. DRM_ERROR("invalid dword size %zd max %zd\n",
  541. (size_t)SIZE_DWORD(cfg->dma_buf->index),
  542. (size_t)MAX_DWORDS_SZ);
  543. return -EINVAL;
  544. }
  545. if (cfg->dma_type == REG_DMA_TYPE_SB &&
  546. (cfg->queue_select != DMA_CTL_QUEUE1 ||
  547. cfg->op == REG_DMA_READ)) {
  548. DRM_ERROR("invalid queue selected %d or op %d for SB LUTDMA\n",
  549. cfg->queue_select, cfg->op);
  550. return -EINVAL;
  551. }
  552. if ((cfg->dma_buf->abs_write_cnt % 2) != 0) {
  553. /* Touch up buffer to avoid HW issues with odd number of abs writes */
  554. u32 reg = 0;
  555. struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
  556. dma_write_cfg.dma_buf = cfg->dma_buf;
  557. dma_write_cfg.blk = MDSS;
  558. dma_write_cfg.feature = REG_DMA_FEATURES_MAX;
  559. dma_write_cfg.ops = HW_BLK_SELECT;
  560. if (validate_write_decode_sel(&dma_write_cfg) || write_decode_sel(&dma_write_cfg)) {
  561. DRM_ERROR("Failed setting MDSS decode select for LUTDMA touch up\n");
  562. return -EINVAL;
  563. }
  564. /* Perform dummy write on LUTDMA RO version reg */
  565. dma_write_cfg.ops = REG_SINGLE_WRITE;
  566. dma_write_cfg.blk_offset = reg_dma->caps->base_off +
  567. reg_dma->caps->reg_dma_blks[cfg->dma_type].base;
  568. dma_write_cfg.data = &reg;
  569. dma_write_cfg.data_size = sizeof(uint32_t);
  570. if (validate_write_reg(&dma_write_cfg) || write_single_reg(&dma_write_cfg)) {
  571. DRM_ERROR("Failed to add touch up write to LUTDMA buffer\n");
  572. return -EINVAL;
  573. }
  574. }
  575. return 0;
  576. }
  577. static int write_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
  578. {
  579. u32 cmd1, mask = 0, val = 0;
  580. struct sde_hw_blk_reg_map hw;
  581. memset(&hw, 0, sizeof(hw));
  582. msm_gem_sync(cfg->dma_buf->buf);
  583. cmd1 = (cfg->op == REG_DMA_READ) ?
  584. (dspp_read_sel[cfg->block_select] << 30) : 0;
  585. cmd1 |= (cfg->last_command) ? BIT(24) : 0;
  586. cmd1 |= (cfg->op == REG_DMA_READ) ? (2 << 22) : 0;
  587. cmd1 |= (cfg->op == REG_DMA_WRITE) ? (BIT(22)) : 0;
  588. cmd1 |= (SIZE_DWORD(cfg->dma_buf->index) & MAX_DWORDS_SZ);
  589. if (cfg->dma_type == REG_DMA_TYPE_DB)
  590. SET_UP_REG_DMA_REG(hw, reg_dma, REG_DMA_TYPE_DB);
  591. else if (cfg->dma_type == REG_DMA_TYPE_SB)
  592. SET_UP_REG_DMA_REG(hw, reg_dma, REG_DMA_TYPE_SB);
  593. if (hw.hw_rev == 0) {
  594. DRM_ERROR("DMA type %d is unsupported\n", cfg->dma_type);
  595. return -EOPNOTSUPP;
  596. }
  597. SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
  598. val = SDE_REG_READ(&hw, reg_dma_intr_4_status_offset);
  599. if (val) {
  600. DRM_DEBUG("LUT dma status %x\n", val);
  601. mask = reg_dma_error_clear_mask;
  602. SDE_REG_WRITE(&hw, reg_dma_intr_clear_offset + sizeof(u32) * 4,
  603. mask);
  604. SDE_EVT32(val);
  605. }
  606. if (cfg->dma_type == REG_DMA_TYPE_DB) {
  607. SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx],
  608. cfg->dma_buf->iova);
  609. SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx] + 0x4,
  610. cmd1);
  611. } else if (cfg->dma_type == REG_DMA_TYPE_SB) {
  612. SDE_REG_WRITE(&hw, reg_dma_ctl_queue1_off[cfg->ctl->idx],
  613. cfg->dma_buf->iova);
  614. SDE_REG_WRITE(&hw, reg_dma_ctl_queue1_off[cfg->ctl->idx] + 0x4,
  615. cmd1);
  616. }
  617. if (cfg->last_command) {
  618. mask = ctl_trigger_done_mask[cfg->ctl->idx][cfg->queue_select];
  619. SDE_REG_WRITE(&hw, reg_dma_intr_clear_offset, mask);
  620. /* DB LUTDMA use SW trigger while SB LUTDMA uses DSPP_SB
  621. * flush as its trigger event.
  622. */
  623. if (cfg->dma_type == REG_DMA_TYPE_DB) {
  624. SDE_REG_WRITE(&cfg->ctl->hw, reg_dma_ctl_trigger_offset,
  625. queue_sel[cfg->queue_select]);
  626. }
  627. }
  628. SDE_EVT32(cfg->feature, cfg->dma_type,
  629. ((uint64_t)cfg->dma_buf) >> 32,
  630. ((uint64_t)cfg->dma_buf) & 0xFFFFFFFF,
  631. (cfg->dma_buf->iova) >> 32,
  632. (cfg->dma_buf->iova) & 0xFFFFFFFF,
  633. cfg->op,
  634. cfg->queue_select, cfg->ctl->idx,
  635. SIZE_DWORD(cfg->dma_buf->index));
  636. return 0;
  637. }
  638. int init_v1(struct sde_hw_reg_dma *cfg)
  639. {
  640. int i = 0, rc = 0;
  641. if (!cfg)
  642. return -EINVAL;
  643. reg_dma = cfg;
  644. for (i = CTL_0; i < CTL_MAX; i++) {
  645. if (!last_cmd_buf_db[i]) {
  646. last_cmd_buf_db[i] =
  647. alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
  648. if (IS_ERR_OR_NULL(last_cmd_buf_db[i])) {
  649. /*
  650. * This will allow reg dma to fall back to
  651. * AHB domain
  652. */
  653. pr_info("Failed to allocate reg dma, ret:%lu\n",
  654. PTR_ERR(last_cmd_buf_db[i]));
  655. return 0;
  656. }
  657. }
  658. if (!last_cmd_buf_sb[i]) {
  659. last_cmd_buf_sb[i] =
  660. alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
  661. if (IS_ERR_OR_NULL(last_cmd_buf_sb[i])) {
  662. /*
  663. * This will allow reg dma to fall back to
  664. * AHB domain
  665. */
  666. pr_info("Failed to allocate reg dma, ret:%lu\n",
  667. PTR_ERR(last_cmd_buf_sb[i]));
  668. return 0;
  669. }
  670. }
  671. }
  672. if (rc) {
  673. for (i = 0; i < CTL_MAX; i++) {
  674. if (!last_cmd_buf_db[i])
  675. continue;
  676. dealloc_reg_dma_v1(last_cmd_buf_db[i]);
  677. last_cmd_buf_db[i] = NULL;
  678. }
  679. for (i = 0; i < CTL_MAX; i++) {
  680. if (!last_cmd_buf_sb[i])
  681. continue;
  682. dealloc_reg_dma_v1(last_cmd_buf_sb[i]);
  683. last_cmd_buf_sb[i] = NULL;
  684. }
  685. return rc;
  686. }
  687. reg_dma->ops.check_support = check_support_v1;
  688. reg_dma->ops.setup_payload = setup_payload_v1;
  689. reg_dma->ops.kick_off = kick_off_v1;
  690. reg_dma->ops.reset = reset_v1;
  691. reg_dma->ops.alloc_reg_dma_buf = alloc_reg_dma_buf_v1;
  692. reg_dma->ops.dealloc_reg_dma = dealloc_reg_dma_v1;
  693. reg_dma->ops.reset_reg_dma_buf = reset_reg_dma_buffer_v1;
  694. reg_dma->ops.last_command = last_cmd_v1;
  695. reg_dma->ops.dump_regs = dump_regs_v1;
  696. reg_dma_register_count = 60;
  697. reg_dma_decode_sel = 0x180ac060;
  698. reg_dma_opmode_offset = 0x4;
  699. reg_dma_ctl0_queue0_cmd0_offset = 0x14;
  700. reg_dma_intr_status_offset = 0x90;
  701. reg_dma_intr_4_status_offset = 0xa0;
  702. reg_dma_intr_clear_offset = 0xb0;
  703. reg_dma_ctl_trigger_offset = 0xd4;
  704. reg_dma_ctl0_reset_offset = 0xe4;
  705. reg_dma_error_clear_mask = BIT(0) | BIT(1) | BIT(2) | BIT(16);
  706. reg_dma_ctl_queue_off[CTL_0] = reg_dma_ctl0_queue0_cmd0_offset;
  707. for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
  708. reg_dma_ctl_queue_off[i] = reg_dma_ctl_queue_off[i - 1] +
  709. (sizeof(u32) * 4);
  710. return 0;
  711. }
  712. int init_v11(struct sde_hw_reg_dma *cfg)
  713. {
  714. int ret = 0, i = 0;
  715. ret = init_v1(cfg);
  716. if (ret) {
  717. DRM_ERROR("failed to initialize v1: ret %d\n", ret);
  718. return -EINVAL;
  719. }
  720. /* initialize register offsets and v1_supported based on version */
  721. reg_dma_register_count = 133;
  722. reg_dma_decode_sel = 0x180ac114;
  723. reg_dma_opmode_offset = 0x4;
  724. reg_dma_ctl0_queue0_cmd0_offset = 0x14;
  725. reg_dma_intr_status_offset = 0x160;
  726. reg_dma_intr_4_status_offset = 0x170;
  727. reg_dma_intr_clear_offset = 0x1a0;
  728. reg_dma_ctl_trigger_offset = 0xd4;
  729. reg_dma_ctl0_reset_offset = 0x200;
  730. reg_dma_error_clear_mask = BIT(0) | BIT(1) | BIT(2) | BIT(16) |
  731. BIT(17) | BIT(18);
  732. reg_dma_ctl_queue_off[CTL_0] = reg_dma_ctl0_queue0_cmd0_offset;
  733. for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
  734. reg_dma_ctl_queue_off[i] = reg_dma_ctl_queue_off[i - 1] +
  735. (sizeof(u32) * 4);
  736. v1_supported[IGC] = DSPP_IGC | GRP_DSPP_HW_BLK_SELECT |
  737. GRP_VIG_HW_BLK_SELECT | GRP_DMA_HW_BLK_SELECT;
  738. v1_supported[GC] = GRP_DMA_HW_BLK_SELECT | GRP_DSPP_HW_BLK_SELECT;
  739. v1_supported[HSIC] = GRP_DSPP_HW_BLK_SELECT;
  740. v1_supported[SIX_ZONE] = GRP_DSPP_HW_BLK_SELECT;
  741. v1_supported[MEMC_SKIN] = GRP_DSPP_HW_BLK_SELECT;
  742. v1_supported[MEMC_SKY] = GRP_DSPP_HW_BLK_SELECT;
  743. v1_supported[MEMC_FOLIAGE] = GRP_DSPP_HW_BLK_SELECT;
  744. v1_supported[MEMC_PROT] = GRP_DSPP_HW_BLK_SELECT;
  745. v1_supported[QSEED] = GRP_VIG_HW_BLK_SELECT;
  746. return 0;
  747. }
  748. int init_v12(struct sde_hw_reg_dma *cfg)
  749. {
  750. int ret = 0;
  751. ret = init_v11(cfg);
  752. if (ret) {
  753. DRM_ERROR("failed to initialize v11: ret %d\n", ret);
  754. return ret;
  755. }
  756. v1_supported[LTM_INIT] = GRP_LTM_HW_BLK_SELECT;
  757. v1_supported[LTM_ROI] = GRP_LTM_HW_BLK_SELECT;
  758. v1_supported[LTM_VLUT] = GRP_LTM_HW_BLK_SELECT;
  759. v1_supported[RC_DATA] = (GRP_DSPP_HW_BLK_SELECT |
  760. GRP_MDSS_HW_BLK_SELECT);
  761. v1_supported[SPR_INIT] = (GRP_DSPP_HW_BLK_SELECT |
  762. GRP_MDSS_HW_BLK_SELECT);
  763. v1_supported[SPR_PU_CFG] = (GRP_DSPP_HW_BLK_SELECT |
  764. GRP_MDSS_HW_BLK_SELECT);
  765. v1_supported[DEMURA_CFG] = MDSS | DSPP0 | DSPP1;
  766. return 0;
  767. }
  768. int init_v2(struct sde_hw_reg_dma *cfg)
  769. {
  770. int ret = 0, i = 0;
  771. ret = init_v12(cfg);
  772. if (ret) {
  773. DRM_ERROR("failed to initialize v12: ret %d\n", ret);
  774. return ret;
  775. }
  776. /* initialize register offsets based on version delta */
  777. reg_dma_register_count = 0x91;
  778. reg_dma_ctl0_queue1_cmd0_offset = 0x1c;
  779. reg_dma_error_clear_mask |= BIT(19);
  780. reg_dma_ctl_queue1_off[CTL_0] = reg_dma_ctl0_queue1_cmd0_offset;
  781. for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
  782. reg_dma_ctl_queue1_off[i] = reg_dma_ctl_queue1_off[i - 1] +
  783. (sizeof(u32) * 4);
  784. v1_supported[IGC] = GRP_DSPP_HW_BLK_SELECT | GRP_VIG_HW_BLK_SELECT |
  785. GRP_DMA_HW_BLK_SELECT;
  786. if (cfg->caps->reg_dma_blks[REG_DMA_TYPE_SB].valid == true)
  787. reg_dma->ops.last_command_sb = last_cmd_sb_v2;
  788. return 0;
  789. }
  790. static int check_support_v1(enum sde_reg_dma_features feature,
  791. enum sde_reg_dma_blk blk,
  792. bool *is_supported)
  793. {
  794. int ret = 0;
  795. if (!is_supported)
  796. return -EINVAL;
  797. if (feature >= REG_DMA_FEATURES_MAX
  798. || blk >= BIT_ULL(REG_DMA_BLK_MAX)) {
  799. *is_supported = false;
  800. return ret;
  801. }
  802. *is_supported = (blk & v1_supported[feature]) ? true : false;
  803. return ret;
  804. }
  805. static int setup_payload_v1(struct sde_reg_dma_setup_ops_cfg *cfg)
  806. {
  807. int rc = 0;
  808. rc = validate_dma_cfg(cfg);
  809. if (!rc)
  810. rc = validate_dma_op_params[cfg->ops](cfg);
  811. if (!rc)
  812. rc = write_dma_op_params[cfg->ops](cfg);
  813. return rc;
  814. }
  815. static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
  816. {
  817. int rc = 0;
  818. rc = validate_kick_off_v1(cfg);
  819. if (rc)
  820. return rc;
  821. rc = write_kick_off_v1(cfg);
  822. return rc;
  823. }
  824. int reset_v1(struct sde_hw_ctl *ctl)
  825. {
  826. struct sde_hw_blk_reg_map hw;
  827. u32 index, val, i = 0, k = 0;
  828. if (!ctl || ctl->idx > CTL_MAX) {
  829. DRM_ERROR("invalid ctl %pK ctl idx %d\n",
  830. ctl, ((ctl) ? ctl->idx : 0));
  831. return -EINVAL;
  832. }
  833. index = ctl->idx - CTL_0;
  834. for (k = 0; k < REG_DMA_TYPE_MAX; k++) {
  835. memset(&hw, 0, sizeof(hw));
  836. SET_UP_REG_DMA_REG(hw, reg_dma, k);
  837. if (hw.hw_rev == 0)
  838. continue;
  839. SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
  840. SDE_REG_WRITE(&hw, (reg_dma_ctl0_reset_offset +
  841. index * sizeof(u32)), BIT(0));
  842. i = 0;
  843. do {
  844. udelay(1000);
  845. i++;
  846. val = SDE_REG_READ(&hw,
  847. (reg_dma_ctl0_reset_offset +
  848. index * sizeof(u32)));
  849. } while (i < 2 && val);
  850. }
  851. return 0;
  852. }
  853. static void sde_reg_dma_aspace_cb_locked(void *cb_data, bool is_detach)
  854. {
  855. struct sde_reg_dma_buffer *dma_buf = NULL;
  856. struct msm_gem_address_space *aspace = NULL;
  857. u32 iova_aligned, offset;
  858. int rc;
  859. if (!cb_data) {
  860. DRM_ERROR("aspace cb called with invalid dma_buf\n");
  861. return;
  862. }
  863. dma_buf = (struct sde_reg_dma_buffer *)cb_data;
  864. aspace = dma_buf->aspace;
  865. if (is_detach) {
  866. /* invalidate the stored iova */
  867. dma_buf->iova = 0;
  868. /* return the virtual address mapping */
  869. msm_gem_put_vaddr(dma_buf->buf);
  870. msm_gem_vunmap(dma_buf->buf, OBJ_LOCK_NORMAL);
  871. } else {
  872. rc = msm_gem_get_iova(dma_buf->buf, aspace,
  873. &dma_buf->iova);
  874. if (rc) {
  875. DRM_ERROR("failed to get the iova rc %d\n", rc);
  876. return;
  877. }
  878. dma_buf->vaddr = msm_gem_get_vaddr(dma_buf->buf);
  879. if (IS_ERR_OR_NULL(dma_buf->vaddr)) {
  880. DRM_ERROR("failed to get va rc %d\n", rc);
  881. return;
  882. }
  883. iova_aligned = (dma_buf->iova + GUARD_BYTES) & ALIGNED_OFFSET;
  884. offset = iova_aligned - dma_buf->iova;
  885. dma_buf->iova = dma_buf->iova + offset;
  886. dma_buf->vaddr = (void *)(((u8 *)dma_buf->vaddr) + offset);
  887. dma_buf->next_op_allowed = DECODE_SEL_OP;
  888. }
  889. }
  890. static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size)
  891. {
  892. struct sde_reg_dma_buffer *dma_buf = NULL;
  893. u32 iova_aligned, offset;
  894. u32 rsize = size + GUARD_BYTES;
  895. struct msm_gem_address_space *aspace = NULL;
  896. int rc = 0;
  897. if (!size || SIZE_DWORD(size) > MAX_DWORDS_SZ) {
  898. DRM_ERROR("invalid buffer size %lu, max %lu\n",
  899. SIZE_DWORD(size), MAX_DWORDS_SZ);
  900. return ERR_PTR(-EINVAL);
  901. }
  902. dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
  903. if (!dma_buf)
  904. return ERR_PTR(-ENOMEM);
  905. dma_buf->buf = msm_gem_new(reg_dma->drm_dev,
  906. rsize, MSM_BO_UNCACHED);
  907. if (IS_ERR_OR_NULL(dma_buf->buf)) {
  908. rc = -EINVAL;
  909. goto fail;
  910. }
  911. aspace = msm_gem_smmu_address_space_get(reg_dma->drm_dev,
  912. MSM_SMMU_DOMAIN_UNSECURE);
  913. if (PTR_ERR(aspace) == -ENODEV) {
  914. aspace = NULL;
  915. DRM_DEBUG("IOMMU not present, relying on VRAM\n");
  916. } else if (IS_ERR_OR_NULL(aspace)) {
  917. rc = PTR_ERR(aspace);
  918. aspace = NULL;
  919. DRM_ERROR("failed to get aspace %d", rc);
  920. goto free_gem;
  921. } else if (aspace) {
  922. /* register to aspace */
  923. rc = msm_gem_address_space_register_cb(aspace,
  924. sde_reg_dma_aspace_cb_locked,
  925. (void *)dma_buf);
  926. if (rc) {
  927. DRM_ERROR("failed to register callback %d", rc);
  928. goto free_gem;
  929. }
  930. }
  931. dma_buf->aspace = aspace;
  932. rc = msm_gem_get_iova(dma_buf->buf, aspace, &dma_buf->iova);
  933. if (rc) {
  934. DRM_ERROR("failed to get the iova rc %d\n", rc);
  935. goto free_aspace_cb;
  936. }
  937. dma_buf->vaddr = msm_gem_get_vaddr(dma_buf->buf);
  938. if (IS_ERR_OR_NULL(dma_buf->vaddr)) {
  939. DRM_ERROR("failed to get va rc %d\n", rc);
  940. rc = -EINVAL;
  941. goto put_iova;
  942. }
  943. dma_buf->buffer_size = size;
  944. iova_aligned = (dma_buf->iova + GUARD_BYTES) & ALIGNED_OFFSET;
  945. offset = iova_aligned - dma_buf->iova;
  946. dma_buf->iova = dma_buf->iova + offset;
  947. dma_buf->vaddr = (void *)(((u8 *)dma_buf->vaddr) + offset);
  948. dma_buf->next_op_allowed = DECODE_SEL_OP;
  949. return dma_buf;
  950. put_iova:
  951. msm_gem_put_iova(dma_buf->buf, aspace);
  952. free_aspace_cb:
  953. msm_gem_address_space_unregister_cb(aspace,
  954. sde_reg_dma_aspace_cb_locked, dma_buf);
  955. free_gem:
  956. mutex_lock(&reg_dma->drm_dev->struct_mutex);
  957. msm_gem_free_object(dma_buf->buf);
  958. mutex_unlock(&reg_dma->drm_dev->struct_mutex);
  959. fail:
  960. kfree(dma_buf);
  961. return ERR_PTR(rc);
  962. }
  963. static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *dma_buf)
  964. {
  965. if (!dma_buf) {
  966. DRM_ERROR("invalid param reg_buf %pK\n", dma_buf);
  967. return -EINVAL;
  968. }
  969. if (dma_buf->buf) {
  970. msm_gem_put_iova(dma_buf->buf, 0);
  971. msm_gem_address_space_unregister_cb(dma_buf->aspace,
  972. sde_reg_dma_aspace_cb_locked, dma_buf);
  973. mutex_lock(&reg_dma->drm_dev->struct_mutex);
  974. msm_gem_free_object(dma_buf->buf);
  975. mutex_unlock(&reg_dma->drm_dev->struct_mutex);
  976. }
  977. kfree(dma_buf);
  978. return 0;
  979. }
  980. static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf)
  981. {
  982. if (!lut_buf)
  983. return -EINVAL;
  984. lut_buf->index = 0;
  985. lut_buf->ops_completed = 0;
  986. lut_buf->next_op_allowed = DECODE_SEL_OP;
  987. lut_buf->abs_write_cnt = 0;
  988. return 0;
  989. }
  990. static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
  991. {
  992. u32 remain_len, write_len;
  993. remain_len = BUFFER_SPACE_LEFT(cfg);
  994. write_len = sizeof(u32);
  995. if (remain_len < write_len) {
  996. DRM_ERROR("buffer is full sz %d needs %d bytes\n",
  997. remain_len, write_len);
  998. return -EINVAL;
  999. }
  1000. return 0;
  1001. }
  1002. static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
  1003. {
  1004. u32 *loc = NULL;
  1005. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  1006. cfg->dma_buf->index);
  1007. loc[0] = reg_dma_decode_sel;
  1008. loc[1] = 0;
  1009. cfg->dma_buf->index = sizeof(u32) * 2;
  1010. cfg->dma_buf->ops_completed = REG_WRITE_OP | DECODE_SEL_OP;
  1011. cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
  1012. return 0;
  1013. }
  1014. static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  1015. enum sde_reg_dma_last_cmd_mode mode)
  1016. {
  1017. struct sde_reg_dma_setup_ops_cfg cfg;
  1018. struct sde_reg_dma_kickoff_cfg kick_off;
  1019. struct sde_hw_blk_reg_map hw;
  1020. u32 val;
  1021. int rc;
  1022. if (!ctl || ctl->idx >= CTL_MAX || q >= DMA_CTL_QUEUE_MAX) {
  1023. DRM_ERROR("ctl %pK q %d index %d\n", ctl, q,
  1024. ((ctl) ? ctl->idx : -1));
  1025. return -EINVAL;
  1026. }
  1027. if (!last_cmd_buf_db[ctl->idx] || !last_cmd_buf_db[ctl->idx]->iova) {
  1028. DRM_ERROR("invalid last cmd buf for idx %d\n", ctl->idx);
  1029. return -EINVAL;
  1030. }
  1031. cfg.dma_buf = last_cmd_buf_db[ctl->idx];
  1032. reset_reg_dma_buffer_v1(last_cmd_buf_db[ctl->idx]);
  1033. if (validate_last_cmd(&cfg)) {
  1034. DRM_ERROR("validate buf failed\n");
  1035. return -EINVAL;
  1036. }
  1037. if (write_last_cmd(&cfg)) {
  1038. DRM_ERROR("write buf failed\n");
  1039. return -EINVAL;
  1040. }
  1041. kick_off.ctl = ctl;
  1042. kick_off.queue_select = q;
  1043. kick_off.trigger_mode = WRITE_IMMEDIATE;
  1044. kick_off.last_command = 1;
  1045. kick_off.op = REG_DMA_WRITE;
  1046. kick_off.dma_type = REG_DMA_TYPE_DB;
  1047. kick_off.dma_buf = last_cmd_buf_db[ctl->idx];
  1048. kick_off.feature = REG_DMA_FEATURES_MAX;
  1049. rc = kick_off_v1(&kick_off);
  1050. if (rc) {
  1051. DRM_ERROR("kick off last cmd failed\n");
  1052. return rc;
  1053. }
  1054. //Lack of block support will be caught by kick_off
  1055. memset(&hw, 0, sizeof(hw));
  1056. SET_UP_REG_DMA_REG(hw, reg_dma, kick_off.dma_type);
  1057. SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, mode, ctl->idx, kick_off.queue_select,
  1058. kick_off.dma_type, kick_off.op);
  1059. if (mode == REG_DMA_WAIT4_COMP) {
  1060. rc = readl_poll_timeout(hw.base_off + hw.blk_off +
  1061. reg_dma_intr_status_offset, val,
  1062. (val & ctl_trigger_done_mask[ctl->idx][q]),
  1063. 10, 20000);
  1064. if (rc)
  1065. DRM_ERROR("poll wait failed %d val %x mask %x\n",
  1066. rc, val, ctl_trigger_done_mask[ctl->idx][q]);
  1067. SDE_EVT32(SDE_EVTLOG_FUNC_EXIT, mode);
  1068. }
  1069. return rc;
  1070. }
  1071. void deinit_v1(void)
  1072. {
  1073. int i = 0;
  1074. for (i = CTL_0; i < CTL_MAX; i++) {
  1075. if (last_cmd_buf_db[i])
  1076. dealloc_reg_dma_v1(last_cmd_buf_db[i]);
  1077. last_cmd_buf_db[i] = NULL;
  1078. if (last_cmd_buf_sb[i])
  1079. dealloc_reg_dma_v1(last_cmd_buf_sb[i]);
  1080. last_cmd_buf_sb[i] = NULL;
  1081. }
  1082. }
  1083. static void dump_regs_v1(void)
  1084. {
  1085. uint32_t i = 0, k = 0;
  1086. u32 val;
  1087. struct sde_hw_blk_reg_map hw;
  1088. for (k = 0; k < REG_DMA_TYPE_MAX; k++) {
  1089. memset(&hw, 0, sizeof(hw));
  1090. SET_UP_REG_DMA_REG(hw, reg_dma, k);
  1091. if (hw.hw_rev == 0)
  1092. continue;
  1093. for (i = 0; i < reg_dma_register_count; i++) {
  1094. val = SDE_REG_READ(&hw, i * sizeof(u32));
  1095. DRM_ERROR("offset %x val %x\n", (u32)(i * sizeof(u32)),
  1096. val);
  1097. }
  1098. }
  1099. }
  1100. static int last_cmd_sb_v2(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  1101. enum sde_reg_dma_last_cmd_mode mode)
  1102. {
  1103. struct sde_reg_dma_setup_ops_cfg cfg;
  1104. struct sde_reg_dma_kickoff_cfg kick_off;
  1105. int rc = 0;
  1106. if (!ctl || ctl->idx >= CTL_MAX || q >= DMA_CTL_QUEUE_MAX) {
  1107. DRM_ERROR("ctl %pK q %d index %d\n", ctl, q,
  1108. ((ctl) ? ctl->idx : -1));
  1109. return -EINVAL;
  1110. }
  1111. if (!last_cmd_buf_sb[ctl->idx] || !last_cmd_buf_sb[ctl->idx]->iova) {
  1112. DRM_ERROR("invalid last cmd buf for idx %d\n", ctl->idx);
  1113. return -EINVAL;
  1114. }
  1115. cfg.dma_buf = last_cmd_buf_sb[ctl->idx];
  1116. reset_reg_dma_buffer_v1(last_cmd_buf_sb[ctl->idx]);
  1117. if (validate_last_cmd(&cfg)) {
  1118. DRM_ERROR("validate buf failed\n");
  1119. return -EINVAL;
  1120. }
  1121. if (write_last_cmd(&cfg)) {
  1122. DRM_ERROR("write buf failed\n");
  1123. return -EINVAL;
  1124. }
  1125. kick_off.ctl = ctl;
  1126. kick_off.trigger_mode = WRITE_IMMEDIATE;
  1127. kick_off.last_command = 1;
  1128. kick_off.op = REG_DMA_WRITE;
  1129. kick_off.dma_type = REG_DMA_TYPE_SB;
  1130. kick_off.queue_select = DMA_CTL_QUEUE1;
  1131. kick_off.dma_buf = last_cmd_buf_sb[ctl->idx];
  1132. kick_off.feature = REG_DMA_FEATURES_MAX;
  1133. rc = kick_off_v1(&kick_off);
  1134. if (rc)
  1135. DRM_ERROR("kick off last cmd failed\n");
  1136. SDE_EVT32(ctl->idx, kick_off.queue_select, kick_off.dma_type,
  1137. kick_off.op);
  1138. return rc;
  1139. }