sde_hw_reg_dma_v1.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/iopoll.h>
  6. #include "sde_hw_mdss.h"
  7. #include "sde_hw_ctl.h"
  8. #include "sde_hw_reg_dma_v1.h"
  9. #include "msm_drv.h"
  10. #include "msm_mmu.h"
  11. #include "sde_dbg.h"
  12. #define GUARD_BYTES (BIT(8) - 1)
  13. #define ALIGNED_OFFSET (U32_MAX & ~(GUARD_BYTES))
  14. #define ADDR_ALIGN BIT(8)
  15. #define MAX_RELATIVE_OFF (BIT(20) - 1)
  16. #define ABSOLUTE_RANGE BIT(27)
  17. #define DECODE_SEL_OP (BIT(HW_BLK_SELECT))
  18. #define REG_WRITE_OP ((BIT(REG_SINGLE_WRITE)) | (BIT(REG_BLK_WRITE_SINGLE)) | \
  19. (BIT(REG_BLK_WRITE_INC)) | (BIT(REG_BLK_WRITE_MULTIPLE)) | \
  20. (BIT(REG_SINGLE_MODIFY)))
  21. #define REG_DMA_OPS (DECODE_SEL_OP | REG_WRITE_OP)
  22. #define IS_OP_ALLOWED(op, buf_op) (BIT(op) & buf_op)
  23. #define SET_UP_REG_DMA_REG(hw, reg_dma) \
  24. do { \
  25. (hw).base_off = (reg_dma)->addr; \
  26. (hw).blk_off = (reg_dma)->caps->base; \
  27. (hw).hwversion = (reg_dma)->caps->version; \
  28. (hw).log_mask = SDE_DBG_MASK_REGDMA; \
  29. } while (0)
  30. #define SIZE_DWORD(x) ((x) / (sizeof(u32)))
  31. #define NOT_WORD_ALIGNED(x) ((x) & 0x3)
  32. #define GRP_VIG_HW_BLK_SELECT (VIG0 | VIG1 | VIG2 | VIG3)
  33. #define GRP_DMA_HW_BLK_SELECT (DMA0 | DMA1 | DMA2 | DMA3)
  34. #define GRP_DSPP_HW_BLK_SELECT (DSPP0 | DSPP1 | DSPP2 | DSPP3)
  35. #define GRP_LTM_HW_BLK_SELECT (LTM0 | LTM1)
  36. #define GRP_MDSS_HW_BLK_SELECT (MDSS)
  37. #define BUFFER_SPACE_LEFT(cfg) ((cfg)->dma_buf->buffer_size - \
  38. (cfg)->dma_buf->index)
  39. #define REL_ADDR_OPCODE (BIT(27))
  40. #define SINGLE_REG_WRITE_OPCODE (BIT(28))
  41. #define SINGLE_REG_MODIFY_OPCODE (BIT(29))
  42. #define HW_INDEX_REG_WRITE_OPCODE (BIT(28) | BIT(29))
  43. #define AUTO_INC_REG_WRITE_OPCODE (BIT(30))
  44. #define BLK_REG_WRITE_OPCODE (BIT(30) | BIT(28))
  45. #define WRAP_MIN_SIZE 2
  46. #define WRAP_MAX_SIZE (BIT(4) - 1)
  47. #define MAX_DWORDS_SZ (BIT(14) - 1)
  48. #define REG_DMA_HEADERS_BUFFER_SZ (sizeof(u32) * 128)
  49. static uint32_t reg_dma_register_count;
  50. static uint32_t reg_dma_decode_sel;
  51. static uint32_t reg_dma_opmode_offset;
  52. static uint32_t reg_dma_ctl0_queue0_cmd0_offset;
  53. static uint32_t reg_dma_intr_status_offset;
  54. static uint32_t reg_dma_intr_4_status_offset;
  55. static uint32_t reg_dma_intr_clear_offset;
  56. static uint32_t reg_dma_ctl_trigger_offset;
  57. static uint32_t reg_dma_ctl0_reset_offset;
  58. static uint32_t reg_dma_error_clear_mask;
  59. typedef int (*reg_dma_internal_ops) (struct sde_reg_dma_setup_ops_cfg *cfg);
  60. static struct sde_hw_reg_dma *reg_dma;
  61. static u32 ops_mem_size[REG_DMA_SETUP_OPS_MAX] = {
  62. [REG_BLK_WRITE_SINGLE] = sizeof(u32) * 2,
  63. [REG_BLK_WRITE_INC] = sizeof(u32) * 2,
  64. [REG_BLK_WRITE_MULTIPLE] = sizeof(u32) * 2,
  65. [HW_BLK_SELECT] = sizeof(u32) * 2,
  66. [REG_SINGLE_WRITE] = sizeof(u32) * 2,
  67. [REG_SINGLE_MODIFY] = sizeof(u32) * 3,
  68. };
  69. static u32 queue_sel[DMA_CTL_QUEUE_MAX] = {
  70. [DMA_CTL_QUEUE0] = BIT(0),
  71. [DMA_CTL_QUEUE1] = BIT(4),
  72. };
  73. static u32 reg_dma_ctl_queue_off[CTL_MAX];
  74. static u32 dspp_read_sel[DSPP_HIST_MAX] = {
  75. [DSPP0_HIST] = 0,
  76. [DSPP1_HIST] = 1,
  77. [DSPP2_HIST] = 2,
  78. [DSPP3_HIST] = 3,
  79. };
  80. static u32 v1_supported[REG_DMA_FEATURES_MAX] = {
  81. [GAMUT] = GRP_VIG_HW_BLK_SELECT | GRP_DSPP_HW_BLK_SELECT,
  82. [VLUT] = GRP_DSPP_HW_BLK_SELECT,
  83. [GC] = GRP_DSPP_HW_BLK_SELECT,
  84. [IGC] = DSPP_IGC | GRP_DSPP_HW_BLK_SELECT,
  85. [PCC] = GRP_DSPP_HW_BLK_SELECT,
  86. };
  87. static u32 ctl_trigger_done_mask[CTL_MAX][DMA_CTL_QUEUE_MAX] = {
  88. [CTL_0][0] = BIT(16),
  89. [CTL_0][1] = BIT(21),
  90. [CTL_1][0] = BIT(17),
  91. [CTL_1][1] = BIT(22),
  92. [CTL_2][0] = BIT(18),
  93. [CTL_2][1] = BIT(23),
  94. [CTL_3][0] = BIT(19),
  95. [CTL_3][1] = BIT(24),
  96. [CTL_4][0] = BIT(25),
  97. [CTL_4][1] = BIT(27),
  98. [CTL_5][0] = BIT(26),
  99. [CTL_5][1] = BIT(28),
  100. };
  101. static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg);
  102. static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
  103. static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  104. static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  105. static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
  106. static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
  107. static int write_single_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  108. static int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg);
  109. static int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg);
  110. static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
  111. static int write_single_modify(struct sde_reg_dma_setup_ops_cfg *cfg);
  112. static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
  113. static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf);
  114. static int check_support_v1(enum sde_reg_dma_features feature,
  115. enum sde_reg_dma_blk blk, bool *is_supported);
  116. static int setup_payload_v1(struct sde_reg_dma_setup_ops_cfg *cfg);
  117. static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg);
  118. static int reset_v1(struct sde_hw_ctl *ctl);
  119. static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  120. enum sde_reg_dma_last_cmd_mode mode);
  121. static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size);
  122. static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *lut_buf);
  123. static void dump_regs_v1(void);
  124. static reg_dma_internal_ops write_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
  125. [HW_BLK_SELECT] = write_decode_sel,
  126. [REG_SINGLE_WRITE] = write_single_reg,
  127. [REG_BLK_WRITE_SINGLE] = write_multi_reg_inc,
  128. [REG_BLK_WRITE_INC] = write_multi_reg_index,
  129. [REG_BLK_WRITE_MULTIPLE] = write_multi_lut_reg,
  130. [REG_SINGLE_MODIFY] = write_single_modify,
  131. };
  132. static reg_dma_internal_ops validate_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
  133. [HW_BLK_SELECT] = validate_write_decode_sel,
  134. [REG_SINGLE_WRITE] = validate_write_reg,
  135. [REG_BLK_WRITE_SINGLE] = validate_write_reg,
  136. [REG_BLK_WRITE_INC] = validate_write_reg,
  137. [REG_BLK_WRITE_MULTIPLE] = validate_write_multi_lut_reg,
  138. [REG_SINGLE_MODIFY] = validate_write_reg,
  139. };
  140. static struct sde_reg_dma_buffer *last_cmd_buf[CTL_MAX];
  141. static void get_decode_sel(unsigned long blk, u32 *decode_sel)
  142. {
  143. int i = 0;
  144. *decode_sel = 0;
  145. for_each_set_bit(i, &blk, REG_DMA_BLK_MAX) {
  146. switch (BIT(i)) {
  147. case VIG0:
  148. *decode_sel |= BIT(0);
  149. break;
  150. case VIG1:
  151. *decode_sel |= BIT(1);
  152. break;
  153. case VIG2:
  154. *decode_sel |= BIT(2);
  155. break;
  156. case VIG3:
  157. *decode_sel |= BIT(3);
  158. break;
  159. case DMA0:
  160. *decode_sel |= BIT(5);
  161. break;
  162. case DMA1:
  163. *decode_sel |= BIT(6);
  164. break;
  165. case DMA2:
  166. *decode_sel |= BIT(7);
  167. break;
  168. case DMA3:
  169. *decode_sel |= BIT(8);
  170. break;
  171. case DSPP0:
  172. *decode_sel |= BIT(17);
  173. break;
  174. case DSPP1:
  175. *decode_sel |= BIT(18);
  176. break;
  177. case DSPP2:
  178. *decode_sel |= BIT(19);
  179. break;
  180. case DSPP3:
  181. *decode_sel |= BIT(20);
  182. break;
  183. case SSPP_IGC:
  184. *decode_sel |= BIT(4);
  185. break;
  186. case DSPP_IGC:
  187. *decode_sel |= BIT(21);
  188. break;
  189. case LTM0:
  190. *decode_sel |= BIT(22);
  191. break;
  192. case LTM1:
  193. *decode_sel |= BIT(23);
  194. break;
  195. case MDSS:
  196. *decode_sel |= BIT(31);
  197. break;
  198. default:
  199. DRM_ERROR("block not supported %zx\n", (size_t)BIT(i));
  200. break;
  201. }
  202. }
  203. }
  204. static int write_multi_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  205. {
  206. u8 *loc = NULL;
  207. loc = (u8 *)cfg->dma_buf->vaddr + cfg->dma_buf->index;
  208. memcpy(loc, cfg->data, cfg->data_size);
  209. cfg->dma_buf->index += cfg->data_size;
  210. cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
  211. cfg->dma_buf->ops_completed |= REG_WRITE_OP;
  212. return 0;
  213. }
  214. int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg)
  215. {
  216. u32 *loc = NULL;
  217. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  218. cfg->dma_buf->index);
  219. loc[0] = HW_INDEX_REG_WRITE_OPCODE;
  220. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  221. if (cfg->blk == MDSS)
  222. loc[0] |= ABSOLUTE_RANGE;
  223. loc[1] = SIZE_DWORD(cfg->data_size);
  224. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  225. return write_multi_reg(cfg);
  226. }
  227. int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg)
  228. {
  229. u32 *loc = NULL;
  230. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  231. cfg->dma_buf->index);
  232. loc[0] = AUTO_INC_REG_WRITE_OPCODE;
  233. if (cfg->blk == MDSS)
  234. loc[0] |= ABSOLUTE_RANGE;
  235. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  236. loc[1] = SIZE_DWORD(cfg->data_size);
  237. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  238. return write_multi_reg(cfg);
  239. }
  240. static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  241. {
  242. u32 *loc = NULL;
  243. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  244. cfg->dma_buf->index);
  245. loc[0] = BLK_REG_WRITE_OPCODE;
  246. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  247. if (cfg->blk == MDSS)
  248. loc[0] |= ABSOLUTE_RANGE;
  249. loc[1] = (cfg->inc) ? 0 : BIT(31);
  250. loc[1] |= (cfg->wrap_size & WRAP_MAX_SIZE) << 16;
  251. loc[1] |= ((SIZE_DWORD(cfg->data_size)) & MAX_DWORDS_SZ);
  252. cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
  253. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  254. return write_multi_reg(cfg);
  255. }
  256. static int write_single_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  257. {
  258. u32 *loc = NULL;
  259. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  260. cfg->dma_buf->index);
  261. loc[0] = SINGLE_REG_WRITE_OPCODE;
  262. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  263. if (cfg->blk == MDSS)
  264. loc[0] |= ABSOLUTE_RANGE;
  265. loc[1] = *cfg->data;
  266. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  267. cfg->dma_buf->ops_completed |= REG_WRITE_OP;
  268. cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
  269. return 0;
  270. }
  271. static int write_single_modify(struct sde_reg_dma_setup_ops_cfg *cfg)
  272. {
  273. u32 *loc = NULL;
  274. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  275. cfg->dma_buf->index);
  276. loc[0] = SINGLE_REG_MODIFY_OPCODE;
  277. loc[0] |= (cfg->blk_offset & MAX_RELATIVE_OFF);
  278. if (cfg->blk == MDSS)
  279. loc[0] |= ABSOLUTE_RANGE;
  280. loc[1] = cfg->mask;
  281. loc[2] = *cfg->data;
  282. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  283. cfg->dma_buf->ops_completed |= REG_WRITE_OP;
  284. cfg->dma_buf->next_op_allowed = REG_WRITE_OP | DECODE_SEL_OP;
  285. return 0;
  286. }
  287. static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg)
  288. {
  289. u32 *loc = NULL;
  290. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  291. cfg->dma_buf->index);
  292. loc[0] = reg_dma_decode_sel;
  293. get_decode_sel(cfg->blk, &loc[1]);
  294. cfg->dma_buf->index += ops_mem_size[cfg->ops];
  295. cfg->dma_buf->ops_completed |= DECODE_SEL_OP;
  296. cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
  297. return 0;
  298. }
  299. static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  300. {
  301. int rc;
  302. rc = validate_write_reg(cfg);
  303. if (rc)
  304. return rc;
  305. if (cfg->wrap_size < WRAP_MIN_SIZE || cfg->wrap_size > WRAP_MAX_SIZE) {
  306. DRM_ERROR("invalid wrap sz %d min %d max %zd\n",
  307. cfg->wrap_size, WRAP_MIN_SIZE, (size_t)WRAP_MAX_SIZE);
  308. rc = -EINVAL;
  309. }
  310. return rc;
  311. }
  312. static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
  313. {
  314. u32 remain_len, write_len;
  315. remain_len = BUFFER_SPACE_LEFT(cfg);
  316. write_len = ops_mem_size[cfg->ops] + cfg->data_size;
  317. if (remain_len < write_len) {
  318. DRM_ERROR("buffer is full sz %d needs %d bytes\n",
  319. remain_len, write_len);
  320. return -EINVAL;
  321. }
  322. if (!cfg->data) {
  323. DRM_ERROR("invalid data %pK size %d exp sz %d\n", cfg->data,
  324. cfg->data_size, write_len);
  325. return -EINVAL;
  326. }
  327. if ((SIZE_DWORD(cfg->data_size)) > MAX_DWORDS_SZ ||
  328. NOT_WORD_ALIGNED(cfg->data_size)) {
  329. DRM_ERROR("Invalid data size %d max %zd align %x\n",
  330. cfg->data_size, (size_t)MAX_DWORDS_SZ,
  331. NOT_WORD_ALIGNED(cfg->data_size));
  332. return -EINVAL;
  333. }
  334. if (cfg->blk_offset > MAX_RELATIVE_OFF ||
  335. NOT_WORD_ALIGNED(cfg->blk_offset)) {
  336. DRM_ERROR("invalid offset %d max %zd align %x\n",
  337. cfg->blk_offset, (size_t)MAX_RELATIVE_OFF,
  338. NOT_WORD_ALIGNED(cfg->blk_offset));
  339. return -EINVAL;
  340. }
  341. return 0;
  342. }
  343. static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg)
  344. {
  345. u32 remain_len;
  346. bool vig_blk, dma_blk, dspp_blk, mdss_blk;
  347. remain_len = BUFFER_SPACE_LEFT(cfg);
  348. if (remain_len < ops_mem_size[HW_BLK_SELECT]) {
  349. DRM_ERROR("buffer is full needs %d bytes\n",
  350. ops_mem_size[HW_BLK_SELECT]);
  351. return -EINVAL;
  352. }
  353. if (!cfg->blk) {
  354. DRM_ERROR("blk set as 0\n");
  355. return -EINVAL;
  356. }
  357. vig_blk = (cfg->blk & GRP_VIG_HW_BLK_SELECT) ? true : false;
  358. dma_blk = (cfg->blk & GRP_DMA_HW_BLK_SELECT) ? true : false;
  359. dspp_blk = (cfg->blk & GRP_DSPP_HW_BLK_SELECT) ? true : false;
  360. mdss_blk = (cfg->blk & MDSS) ? true : false;
  361. if ((vig_blk && dspp_blk) || (dma_blk && dspp_blk) ||
  362. (vig_blk && dma_blk) ||
  363. (mdss_blk && (vig_blk | dma_blk | dspp_blk))) {
  364. DRM_ERROR("invalid blk combination %x\n", cfg->blk);
  365. return -EINVAL;
  366. }
  367. return 0;
  368. }
  369. static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg)
  370. {
  371. int rc = 0;
  372. bool supported;
  373. if (!cfg || cfg->ops >= REG_DMA_SETUP_OPS_MAX || !cfg->dma_buf) {
  374. DRM_ERROR("invalid param cfg %pK ops %d dma_buf %pK\n",
  375. cfg, ((cfg) ? cfg->ops : REG_DMA_SETUP_OPS_MAX),
  376. ((cfg) ? cfg->dma_buf : NULL));
  377. return -EINVAL;
  378. }
  379. rc = check_support_v1(cfg->feature, cfg->blk, &supported);
  380. if (rc || !supported) {
  381. DRM_ERROR("check support failed rc %d supported %d\n",
  382. rc, supported);
  383. rc = -EINVAL;
  384. return rc;
  385. }
  386. if (cfg->dma_buf->index >= cfg->dma_buf->buffer_size ||
  387. NOT_WORD_ALIGNED(cfg->dma_buf->index)) {
  388. DRM_ERROR("Buf Overflow index %d max size %d align %x\n",
  389. cfg->dma_buf->index, cfg->dma_buf->buffer_size,
  390. NOT_WORD_ALIGNED(cfg->dma_buf->index));
  391. return -EINVAL;
  392. }
  393. if (cfg->dma_buf->iova & GUARD_BYTES || !cfg->dma_buf->vaddr) {
  394. DRM_ERROR("iova not aligned to %zx iova %llx kva %pK",
  395. (size_t)ADDR_ALIGN, cfg->dma_buf->iova,
  396. cfg->dma_buf->vaddr);
  397. return -EINVAL;
  398. }
  399. if (!IS_OP_ALLOWED(cfg->ops, cfg->dma_buf->next_op_allowed)) {
  400. DRM_ERROR("invalid op %x allowed %x\n", cfg->ops,
  401. cfg->dma_buf->next_op_allowed);
  402. return -EINVAL;
  403. }
  404. if (!validate_dma_op_params[cfg->ops] ||
  405. !write_dma_op_params[cfg->ops]) {
  406. DRM_ERROR("invalid op %d validate %pK write %pK\n", cfg->ops,
  407. validate_dma_op_params[cfg->ops],
  408. write_dma_op_params[cfg->ops]);
  409. return -EINVAL;
  410. }
  411. return rc;
  412. }
  413. static int validate_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
  414. {
  415. if (!cfg || !cfg->ctl || !cfg->dma_buf) {
  416. DRM_ERROR("invalid cfg %pK ctl %pK dma_buf %pK\n",
  417. cfg, ((!cfg) ? NULL : cfg->ctl),
  418. ((!cfg) ? NULL : cfg->dma_buf));
  419. return -EINVAL;
  420. }
  421. if (cfg->ctl->idx < CTL_0 && cfg->ctl->idx >= CTL_MAX) {
  422. DRM_ERROR("invalid ctl idx %d\n", cfg->ctl->idx);
  423. return -EINVAL;
  424. }
  425. if (cfg->op >= REG_DMA_OP_MAX) {
  426. DRM_ERROR("invalid op %d\n", cfg->op);
  427. return -EINVAL;
  428. }
  429. if ((cfg->op == REG_DMA_WRITE) &&
  430. (!(cfg->dma_buf->ops_completed & DECODE_SEL_OP) ||
  431. !(cfg->dma_buf->ops_completed & REG_WRITE_OP))) {
  432. DRM_ERROR("incomplete write ops %x\n",
  433. cfg->dma_buf->ops_completed);
  434. return -EINVAL;
  435. }
  436. if (cfg->op == REG_DMA_READ && cfg->block_select >= DSPP_HIST_MAX) {
  437. DRM_ERROR("invalid block for read %d\n", cfg->block_select);
  438. return -EINVAL;
  439. }
  440. /* Only immediate triggers are supported now hence hardcode */
  441. cfg->trigger_mode = (cfg->op == REG_DMA_READ) ? (READ_TRIGGER) :
  442. (WRITE_TRIGGER);
  443. if (cfg->dma_buf->iova & GUARD_BYTES) {
  444. DRM_ERROR("Address is not aligned to %zx iova %llx",
  445. (size_t)ADDR_ALIGN, cfg->dma_buf->iova);
  446. return -EINVAL;
  447. }
  448. if (cfg->queue_select >= DMA_CTL_QUEUE_MAX) {
  449. DRM_ERROR("invalid queue selected %d\n", cfg->queue_select);
  450. return -EINVAL;
  451. }
  452. if (SIZE_DWORD(cfg->dma_buf->index) > MAX_DWORDS_SZ ||
  453. !cfg->dma_buf->index) {
  454. DRM_ERROR("invalid dword size %zd max %zd\n",
  455. (size_t)SIZE_DWORD(cfg->dma_buf->index),
  456. (size_t)MAX_DWORDS_SZ);
  457. return -EINVAL;
  458. }
  459. return 0;
  460. }
  461. static int write_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
  462. {
  463. u32 cmd1, mask = 0, val = 0;
  464. struct sde_hw_blk_reg_map hw;
  465. memset(&hw, 0, sizeof(hw));
  466. msm_gem_sync(cfg->dma_buf->buf);
  467. cmd1 = (cfg->op == REG_DMA_READ) ?
  468. (dspp_read_sel[cfg->block_select] << 30) : 0;
  469. cmd1 |= (cfg->last_command) ? BIT(24) : 0;
  470. cmd1 |= (cfg->op == REG_DMA_READ) ? (2 << 22) : 0;
  471. cmd1 |= (cfg->op == REG_DMA_WRITE) ? (BIT(22)) : 0;
  472. cmd1 |= (SIZE_DWORD(cfg->dma_buf->index) & MAX_DWORDS_SZ);
  473. SET_UP_REG_DMA_REG(hw, reg_dma);
  474. SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
  475. val = SDE_REG_READ(&hw, reg_dma_intr_4_status_offset);
  476. if (val) {
  477. DRM_DEBUG("LUT dma status %x\n", val);
  478. mask = reg_dma_error_clear_mask;
  479. SDE_REG_WRITE(&hw, reg_dma_intr_clear_offset + sizeof(u32) * 4,
  480. mask);
  481. SDE_EVT32(val);
  482. }
  483. SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx],
  484. cfg->dma_buf->iova);
  485. SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx] + 0x4,
  486. cmd1);
  487. if (cfg->last_command) {
  488. mask = ctl_trigger_done_mask[cfg->ctl->idx][cfg->queue_select];
  489. SDE_REG_WRITE(&hw, reg_dma_intr_clear_offset, mask);
  490. SDE_REG_WRITE(&cfg->ctl->hw, reg_dma_ctl_trigger_offset,
  491. queue_sel[cfg->queue_select]);
  492. }
  493. return 0;
  494. }
  495. int init_v1(struct sde_hw_reg_dma *cfg)
  496. {
  497. int i = 0, rc = 0;
  498. if (!cfg)
  499. return -EINVAL;
  500. reg_dma = cfg;
  501. for (i = CTL_0; i < CTL_MAX; i++) {
  502. if (!last_cmd_buf[i]) {
  503. last_cmd_buf[i] =
  504. alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
  505. if (IS_ERR_OR_NULL(last_cmd_buf[i])) {
  506. /*
  507. * This will allow reg dma to fall back to
  508. * AHB domain
  509. */
  510. pr_info("Failed to allocate reg dma, ret:%lu\n",
  511. PTR_ERR(last_cmd_buf[i]));
  512. return 0;
  513. }
  514. }
  515. }
  516. if (rc) {
  517. for (i = 0; i < CTL_MAX; i++) {
  518. if (!last_cmd_buf[i])
  519. continue;
  520. dealloc_reg_dma_v1(last_cmd_buf[i]);
  521. last_cmd_buf[i] = NULL;
  522. }
  523. return rc;
  524. }
  525. reg_dma->ops.check_support = check_support_v1;
  526. reg_dma->ops.setup_payload = setup_payload_v1;
  527. reg_dma->ops.kick_off = kick_off_v1;
  528. reg_dma->ops.reset = reset_v1;
  529. reg_dma->ops.alloc_reg_dma_buf = alloc_reg_dma_buf_v1;
  530. reg_dma->ops.dealloc_reg_dma = dealloc_reg_dma_v1;
  531. reg_dma->ops.reset_reg_dma_buf = reset_reg_dma_buffer_v1;
  532. reg_dma->ops.last_command = last_cmd_v1;
  533. reg_dma->ops.dump_regs = dump_regs_v1;
  534. reg_dma_register_count = 60;
  535. reg_dma_decode_sel = 0x180ac060;
  536. reg_dma_opmode_offset = 0x4;
  537. reg_dma_ctl0_queue0_cmd0_offset = 0x14;
  538. reg_dma_intr_status_offset = 0x90;
  539. reg_dma_intr_4_status_offset = 0xa0;
  540. reg_dma_intr_clear_offset = 0xb0;
  541. reg_dma_ctl_trigger_offset = 0xd4;
  542. reg_dma_ctl0_reset_offset = 0xe4;
  543. reg_dma_error_clear_mask = BIT(0) | BIT(1) | BIT(2) | BIT(16);
  544. reg_dma_ctl_queue_off[CTL_0] = reg_dma_ctl0_queue0_cmd0_offset;
  545. for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
  546. reg_dma_ctl_queue_off[i] = reg_dma_ctl_queue_off[i - 1] +
  547. (sizeof(u32) * 4);
  548. return 0;
  549. }
  550. int init_v11(struct sde_hw_reg_dma *cfg)
  551. {
  552. int ret = 0, i = 0;
  553. ret = init_v1(cfg);
  554. if (ret) {
  555. DRM_ERROR("failed to initialize v1: ret %d\n", ret);
  556. return -EINVAL;
  557. }
  558. /* initialize register offsets and v1_supported based on version */
  559. reg_dma_register_count = 133;
  560. reg_dma_decode_sel = 0x180ac114;
  561. reg_dma_opmode_offset = 0x4;
  562. reg_dma_ctl0_queue0_cmd0_offset = 0x14;
  563. reg_dma_intr_status_offset = 0x160;
  564. reg_dma_intr_4_status_offset = 0x170;
  565. reg_dma_intr_clear_offset = 0x1a0;
  566. reg_dma_ctl_trigger_offset = 0xd4;
  567. reg_dma_ctl0_reset_offset = 0x200;
  568. reg_dma_error_clear_mask = BIT(0) | BIT(1) | BIT(2) | BIT(16) |
  569. BIT(17) | BIT(18);
  570. reg_dma_ctl_queue_off[CTL_0] = reg_dma_ctl0_queue0_cmd0_offset;
  571. for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
  572. reg_dma_ctl_queue_off[i] = reg_dma_ctl_queue_off[i - 1] +
  573. (sizeof(u32) * 4);
  574. v1_supported[IGC] = DSPP_IGC | GRP_DSPP_HW_BLK_SELECT |
  575. GRP_VIG_HW_BLK_SELECT | GRP_DMA_HW_BLK_SELECT;
  576. v1_supported[GC] = GRP_DMA_HW_BLK_SELECT | GRP_DSPP_HW_BLK_SELECT;
  577. v1_supported[HSIC] = GRP_DSPP_HW_BLK_SELECT;
  578. v1_supported[SIX_ZONE] = GRP_DSPP_HW_BLK_SELECT;
  579. v1_supported[MEMC_SKIN] = GRP_DSPP_HW_BLK_SELECT;
  580. v1_supported[MEMC_SKY] = GRP_DSPP_HW_BLK_SELECT;
  581. v1_supported[MEMC_FOLIAGE] = GRP_DSPP_HW_BLK_SELECT;
  582. v1_supported[MEMC_PROT] = GRP_DSPP_HW_BLK_SELECT;
  583. v1_supported[QSEED] = GRP_VIG_HW_BLK_SELECT;
  584. return 0;
  585. }
  586. int init_v12(struct sde_hw_reg_dma *cfg)
  587. {
  588. int ret = 0;
  589. ret = init_v11(cfg);
  590. if (ret) {
  591. DRM_ERROR("failed to initialize v11: ret %d\n", ret);
  592. return ret;
  593. }
  594. v1_supported[LTM_INIT] = GRP_LTM_HW_BLK_SELECT;
  595. v1_supported[LTM_ROI] = GRP_LTM_HW_BLK_SELECT;
  596. v1_supported[LTM_VLUT] = GRP_LTM_HW_BLK_SELECT;
  597. return 0;
  598. }
  599. static int check_support_v1(enum sde_reg_dma_features feature,
  600. enum sde_reg_dma_blk blk,
  601. bool *is_supported)
  602. {
  603. int ret = 0;
  604. if (!is_supported)
  605. return -EINVAL;
  606. if (feature >= REG_DMA_FEATURES_MAX || blk >= BIT(REG_DMA_BLK_MAX)) {
  607. *is_supported = false;
  608. return ret;
  609. }
  610. *is_supported = (blk & v1_supported[feature]) ? true : false;
  611. return ret;
  612. }
  613. static int setup_payload_v1(struct sde_reg_dma_setup_ops_cfg *cfg)
  614. {
  615. int rc = 0;
  616. rc = validate_dma_cfg(cfg);
  617. if (!rc)
  618. rc = validate_dma_op_params[cfg->ops](cfg);
  619. if (!rc)
  620. rc = write_dma_op_params[cfg->ops](cfg);
  621. return rc;
  622. }
  623. static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
  624. {
  625. int rc = 0;
  626. rc = validate_kick_off_v1(cfg);
  627. if (rc)
  628. return rc;
  629. rc = write_kick_off_v1(cfg);
  630. return rc;
  631. }
  632. int reset_v1(struct sde_hw_ctl *ctl)
  633. {
  634. struct sde_hw_blk_reg_map hw;
  635. u32 index, val, i = 0;
  636. if (!ctl || ctl->idx > CTL_MAX) {
  637. DRM_ERROR("invalid ctl %pK ctl idx %d\n",
  638. ctl, ((ctl) ? ctl->idx : 0));
  639. return -EINVAL;
  640. }
  641. memset(&hw, 0, sizeof(hw));
  642. index = ctl->idx - CTL_0;
  643. SET_UP_REG_DMA_REG(hw, reg_dma);
  644. SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
  645. SDE_REG_WRITE(&hw, (reg_dma_ctl0_reset_offset + index * sizeof(u32)),
  646. BIT(0));
  647. i = 0;
  648. do {
  649. udelay(1000);
  650. i++;
  651. val = SDE_REG_READ(&hw,
  652. (reg_dma_ctl0_reset_offset + index * sizeof(u32)));
  653. } while (i < 2 && val);
  654. return 0;
  655. }
  656. static void sde_reg_dma_aspace_cb_locked(void *cb_data, bool is_detach)
  657. {
  658. struct sde_reg_dma_buffer *dma_buf = NULL;
  659. struct msm_gem_address_space *aspace = NULL;
  660. u32 iova_aligned, offset;
  661. int rc;
  662. if (!cb_data) {
  663. DRM_ERROR("aspace cb called with invalid dma_buf\n");
  664. return;
  665. }
  666. dma_buf = (struct sde_reg_dma_buffer *)cb_data;
  667. aspace = dma_buf->aspace;
  668. if (is_detach) {
  669. /* invalidate the stored iova */
  670. dma_buf->iova = 0;
  671. /* return the virtual address mapping */
  672. msm_gem_put_vaddr(dma_buf->buf);
  673. msm_gem_vunmap(dma_buf->buf, OBJ_LOCK_NORMAL);
  674. } else {
  675. rc = msm_gem_get_iova(dma_buf->buf, aspace,
  676. &dma_buf->iova);
  677. if (rc) {
  678. DRM_ERROR("failed to get the iova rc %d\n", rc);
  679. return;
  680. }
  681. dma_buf->vaddr = msm_gem_get_vaddr(dma_buf->buf);
  682. if (IS_ERR_OR_NULL(dma_buf->vaddr)) {
  683. DRM_ERROR("failed to get va rc %d\n", rc);
  684. return;
  685. }
  686. iova_aligned = (dma_buf->iova + GUARD_BYTES) & ALIGNED_OFFSET;
  687. offset = iova_aligned - dma_buf->iova;
  688. dma_buf->iova = dma_buf->iova + offset;
  689. dma_buf->vaddr = (void *)(((u8 *)dma_buf->vaddr) + offset);
  690. dma_buf->next_op_allowed = DECODE_SEL_OP;
  691. }
  692. }
  693. static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size)
  694. {
  695. struct sde_reg_dma_buffer *dma_buf = NULL;
  696. u32 iova_aligned, offset;
  697. u32 rsize = size + GUARD_BYTES;
  698. struct msm_gem_address_space *aspace = NULL;
  699. int rc = 0;
  700. if (!size || SIZE_DWORD(size) > MAX_DWORDS_SZ) {
  701. DRM_ERROR("invalid buffer size %d, max %d\n",
  702. SIZE_DWORD(size), MAX_DWORDS_SZ);
  703. return ERR_PTR(-EINVAL);
  704. }
  705. dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
  706. if (!dma_buf)
  707. return ERR_PTR(-ENOMEM);
  708. dma_buf->buf = msm_gem_new(reg_dma->drm_dev,
  709. rsize, MSM_BO_UNCACHED);
  710. if (IS_ERR_OR_NULL(dma_buf->buf)) {
  711. rc = -EINVAL;
  712. goto fail;
  713. }
  714. aspace = msm_gem_smmu_address_space_get(reg_dma->drm_dev,
  715. MSM_SMMU_DOMAIN_UNSECURE);
  716. if (!aspace) {
  717. DRM_ERROR("failed to get aspace\n");
  718. rc = -EINVAL;
  719. goto free_gem;
  720. }
  721. /* register to aspace */
  722. rc = msm_gem_address_space_register_cb(aspace,
  723. sde_reg_dma_aspace_cb_locked,
  724. (void *)dma_buf);
  725. if (rc) {
  726. DRM_ERROR("failed to register callback %d", rc);
  727. goto free_gem;
  728. }
  729. dma_buf->aspace = aspace;
  730. rc = msm_gem_get_iova(dma_buf->buf, aspace, &dma_buf->iova);
  731. if (rc) {
  732. DRM_ERROR("failed to get the iova rc %d\n", rc);
  733. goto free_aspace_cb;
  734. }
  735. dma_buf->vaddr = msm_gem_get_vaddr(dma_buf->buf);
  736. if (IS_ERR_OR_NULL(dma_buf->vaddr)) {
  737. DRM_ERROR("failed to get va rc %d\n", rc);
  738. rc = -EINVAL;
  739. goto put_iova;
  740. }
  741. dma_buf->buffer_size = size;
  742. iova_aligned = (dma_buf->iova + GUARD_BYTES) & ALIGNED_OFFSET;
  743. offset = iova_aligned - dma_buf->iova;
  744. dma_buf->iova = dma_buf->iova + offset;
  745. dma_buf->vaddr = (void *)(((u8 *)dma_buf->vaddr) + offset);
  746. dma_buf->next_op_allowed = DECODE_SEL_OP;
  747. return dma_buf;
  748. put_iova:
  749. msm_gem_put_iova(dma_buf->buf, aspace);
  750. free_aspace_cb:
  751. msm_gem_address_space_unregister_cb(aspace,
  752. sde_reg_dma_aspace_cb_locked, dma_buf);
  753. free_gem:
  754. mutex_lock(&reg_dma->drm_dev->struct_mutex);
  755. msm_gem_free_object(dma_buf->buf);
  756. mutex_unlock(&reg_dma->drm_dev->struct_mutex);
  757. fail:
  758. kfree(dma_buf);
  759. return ERR_PTR(rc);
  760. }
  761. static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *dma_buf)
  762. {
  763. if (!dma_buf) {
  764. DRM_ERROR("invalid param reg_buf %pK\n", dma_buf);
  765. return -EINVAL;
  766. }
  767. if (dma_buf->buf) {
  768. msm_gem_put_iova(dma_buf->buf, 0);
  769. msm_gem_address_space_unregister_cb(dma_buf->aspace,
  770. sde_reg_dma_aspace_cb_locked, dma_buf);
  771. mutex_lock(&reg_dma->drm_dev->struct_mutex);
  772. msm_gem_free_object(dma_buf->buf);
  773. mutex_unlock(&reg_dma->drm_dev->struct_mutex);
  774. }
  775. kfree(dma_buf);
  776. return 0;
  777. }
  778. static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf)
  779. {
  780. if (!lut_buf)
  781. return -EINVAL;
  782. lut_buf->index = 0;
  783. lut_buf->ops_completed = 0;
  784. lut_buf->next_op_allowed = DECODE_SEL_OP;
  785. return 0;
  786. }
  787. static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
  788. {
  789. u32 remain_len, write_len;
  790. remain_len = BUFFER_SPACE_LEFT(cfg);
  791. write_len = sizeof(u32);
  792. if (remain_len < write_len) {
  793. DRM_ERROR("buffer is full sz %d needs %d bytes\n",
  794. remain_len, write_len);
  795. return -EINVAL;
  796. }
  797. return 0;
  798. }
  799. static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg)
  800. {
  801. u32 *loc = NULL;
  802. loc = (u32 *)((u8 *)cfg->dma_buf->vaddr +
  803. cfg->dma_buf->index);
  804. loc[0] = reg_dma_decode_sel;
  805. loc[1] = 0;
  806. cfg->dma_buf->index = sizeof(u32) * 2;
  807. cfg->dma_buf->ops_completed = REG_WRITE_OP | DECODE_SEL_OP;
  808. cfg->dma_buf->next_op_allowed = REG_WRITE_OP;
  809. return 0;
  810. }
  811. static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
  812. enum sde_reg_dma_last_cmd_mode mode)
  813. {
  814. struct sde_reg_dma_setup_ops_cfg cfg;
  815. struct sde_reg_dma_kickoff_cfg kick_off;
  816. struct sde_hw_blk_reg_map hw;
  817. u32 val;
  818. int rc;
  819. if (!ctl || ctl->idx >= CTL_MAX || q >= DMA_CTL_QUEUE_MAX) {
  820. DRM_ERROR("ctl %pK q %d index %d\n", ctl, q,
  821. ((ctl) ? ctl->idx : -1));
  822. return -EINVAL;
  823. }
  824. if (!last_cmd_buf[ctl->idx] || !last_cmd_buf[ctl->idx]->iova) {
  825. DRM_DEBUG("invalid last cmd buf for idx %d\n", ctl->idx);
  826. return 0;
  827. }
  828. cfg.dma_buf = last_cmd_buf[ctl->idx];
  829. reset_reg_dma_buffer_v1(last_cmd_buf[ctl->idx]);
  830. if (validate_last_cmd(&cfg)) {
  831. DRM_ERROR("validate buf failed\n");
  832. return -EINVAL;
  833. }
  834. if (write_last_cmd(&cfg)) {
  835. DRM_ERROR("write buf failed\n");
  836. return -EINVAL;
  837. }
  838. kick_off.ctl = ctl;
  839. kick_off.queue_select = q;
  840. kick_off.trigger_mode = WRITE_IMMEDIATE;
  841. kick_off.last_command = 1;
  842. kick_off.op = REG_DMA_WRITE;
  843. kick_off.dma_buf = last_cmd_buf[ctl->idx];
  844. if (kick_off_v1(&kick_off)) {
  845. DRM_ERROR("kick off last cmd failed\n");
  846. return -EINVAL;
  847. }
  848. memset(&hw, 0, sizeof(hw));
  849. SET_UP_REG_DMA_REG(hw, reg_dma);
  850. SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, mode);
  851. if (mode == REG_DMA_WAIT4_COMP) {
  852. rc = readl_poll_timeout(hw.base_off + hw.blk_off +
  853. reg_dma_intr_status_offset, val,
  854. (val & ctl_trigger_done_mask[ctl->idx][q]),
  855. 10, 20000);
  856. if (rc)
  857. DRM_ERROR("poll wait failed %d val %x mask %x\n",
  858. rc, val, ctl_trigger_done_mask[ctl->idx][q]);
  859. SDE_EVT32(SDE_EVTLOG_FUNC_EXIT, mode);
  860. }
  861. return 0;
  862. }
  863. void deinit_v1(void)
  864. {
  865. int i = 0;
  866. for (i = CTL_0; i < CTL_MAX; i++) {
  867. if (last_cmd_buf[i])
  868. dealloc_reg_dma_v1(last_cmd_buf[i]);
  869. last_cmd_buf[i] = NULL;
  870. }
  871. }
  872. static void dump_regs_v1(void)
  873. {
  874. uint32_t i = 0;
  875. u32 val;
  876. struct sde_hw_blk_reg_map hw;
  877. memset(&hw, 0, sizeof(hw));
  878. SET_UP_REG_DMA_REG(hw, reg_dma);
  879. for (i = 0; i < reg_dma_register_count; i++) {
  880. val = SDE_REG_READ(&hw, i * sizeof(u32));
  881. DRM_ERROR("offset %x val %x\n", (u32)(i * sizeof(u32)), val);
  882. }
  883. }