Browse Source

Merge "disp: msm: sde: Add event logs for lutdma kickoff programming"

qctecmdr 4 years ago
parent
commit
4d3912297a
4 changed files with 229 additions and 40 deletions
  1. 9 2
      msm/sde/sde_hw_reg_dma_v1.c
  2. 52 38
      msm/sde/sde_hw_reg_dma_v1_color_proc.c
  3. 2 0
      msm/sde/sde_reg_dma.h
  4. 166 0
      msm/sde_dbg.c

+ 9 - 2
msm/sde/sde_hw_reg_dma_v1.c

@@ -623,6 +623,7 @@ static int validate_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
 				cfg->queue_select, cfg->op);
 		return -EINVAL;
 	}
+
 	return 0;
 }
 
@@ -684,6 +685,8 @@ static int write_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
 		}
 	}
 
+	SDE_EVT32(cfg->feature, cfg->dma_type, cfg->dma_buf, cfg->op,
+			cfg->queue_select, cfg->ctl->idx);
 	return 0;
 }
 
@@ -1171,6 +1174,7 @@ static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
 	kick_off.op = REG_DMA_WRITE;
 	kick_off.dma_type = REG_DMA_TYPE_DB;
 	kick_off.dma_buf = last_cmd_buf_db[ctl->idx];
+	kick_off.feature = REG_DMA_FEATURES_MAX;
 	rc = kick_off_v1(&kick_off);
 	if (rc) {
 		DRM_ERROR("kick off last cmd failed\n");
@@ -1181,7 +1185,8 @@ static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
 	memset(&hw, 0, sizeof(hw));
 	SET_UP_REG_DMA_REG(hw, reg_dma, kick_off.dma_type);
 
-	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, mode);
+	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, mode, ctl->idx, kick_off.queue_select,
+			kick_off.dma_type, kick_off.op);
 	if (mode == REG_DMA_WAIT4_COMP) {
 		rc = readl_poll_timeout(hw.base_off + hw.blk_off +
 			reg_dma_intr_status_offset, val,
@@ -1262,16 +1267,18 @@ static int last_cmd_sb_v2(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
 	}
 
 	kick_off.ctl = ctl;
-	kick_off.queue_select = q;
 	kick_off.trigger_mode = WRITE_IMMEDIATE;
 	kick_off.last_command = 1;
 	kick_off.op = REG_DMA_WRITE;
 	kick_off.dma_type = REG_DMA_TYPE_SB;
 	kick_off.queue_select = DMA_CTL_QUEUE1;
 	kick_off.dma_buf = last_cmd_buf_sb[ctl->idx];
+	kick_off.feature = REG_DMA_FEATURES_MAX;
 	rc = kick_off_v1(&kick_off);
 	if (rc)
 		DRM_ERROR("kick off last cmd failed\n");
 
+	SDE_EVT32(ctl->idx, kick_off.queue_select, kick_off.dma_type,
+			kick_off.op);
 	return rc;
 }

+ 52 - 38
msm/sde/sde_hw_reg_dma_v1_color_proc.c

@@ -11,6 +11,7 @@
 #include "sde_hw_sspp.h"
 #include "sde_hwio.h"
 #include "sde_hw_lm.h"
+#include "sde_dbg.h"
 
 /* Reserve space of 128 words for LUT dma payload set-up */
 #define REG_DMA_HEADERS_BUFFER_SZ (sizeof(u32) * 128)
@@ -236,7 +237,7 @@ static u32 ltm_mapping[LTM_MAX] = {
 	} while (0)
 
 #define REG_DMA_SETUP_KICKOFF(cfg, hw_ctl, feature_dma_buf, ops, ctl_q, \
-		mode) \
+		mode, reg_dma_feature) \
 	do { \
 		memset(&cfg, 0, sizeof(cfg)); \
 		(cfg).ctl = hw_ctl; \
@@ -245,6 +246,7 @@ static u32 ltm_mapping[LTM_MAX] = {
 		(cfg).dma_type = REG_DMA_TYPE_DB; \
 		(cfg).queue_select = ctl_q; \
 		(cfg).trigger_mode = mode; \
+		(cfg).feature = reg_dma_feature; \
 	} while (0)
 
 static int reg_dma_buf_init(struct sde_reg_dma_buffer **buf, u32 sz);
@@ -563,7 +565,7 @@ void reg_dmav1_setup_dspp_vlutv18(struct sde_hw_dspp *ctx, void *cfg)
 	}
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[VLUT][ctx->idx],
-	    REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+	    REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE, VLUT);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc) {
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -684,7 +686,7 @@ static void dspp_3d_gamutv4_off(struct sde_hw_dspp *ctx, void *cfg)
 	}
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[GAMUT][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE, GAMUT);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -802,7 +804,7 @@ static void reg_dmav1_setup_dspp_3d_gamutv4_common(struct sde_hw_dspp *ctx,
 	}
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[GAMUT][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE, GAMUT);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -960,7 +962,7 @@ void reg_dmav1_setup_dspp_gcv18(struct sde_hw_dspp *ctx, void *cfg)
 	}
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[GC][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE, GC);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc) {
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -1010,7 +1012,7 @@ static void _dspp_igcv31_off(struct sde_hw_dspp *ctx, void *cfg)
 	}
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[IGC][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE, IGC);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -1137,7 +1139,7 @@ void reg_dmav1_setup_dspp_igcv31(struct sde_hw_dspp *ctx, void *cfg)
 	}
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[IGC][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE, IGC);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -1214,7 +1216,7 @@ int reg_dmav1_setup_rc_datav1(struct sde_hw_dspp *ctx, void *cfg)
 	/* defer trigger to kickoff phase */
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 		dspp_buf[RC_DATA][ctx->idx], REG_DMA_WRITE,
-		DMA_CTL_QUEUE0, WRITE_TRIGGER);
+		DMA_CTL_QUEUE0, WRITE_TRIGGER, RC_DATA);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc) {
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -1268,7 +1270,7 @@ static void _dspp_pcc_common_off(struct sde_hw_dspp *ctx, void *cfg)
 	}
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[PCC][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE, PCC);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -1389,7 +1391,7 @@ void reg_dmav1_setup_dspp_pcc_common(struct sde_hw_dspp *ctx, void *cfg)
 	}
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[PCC][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE, PCC);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -1573,7 +1575,7 @@ void reg_dmav1_setup_dspp_pa_hsicv17(struct sde_hw_dspp *ctx, void *cfg)
 	}
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[HSIC][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE, HSIC);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -1728,7 +1730,7 @@ void reg_dmav1_setup_dspp_sixzonev17(struct sde_hw_dspp *ctx, void *cfg)
 	}
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 		dspp_buf[SIX_ZONE][ctx->idx],
-		REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+		REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE, SIX_ZONE);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -1896,7 +1898,7 @@ static void __setup_dspp_memcol(struct sde_hw_dspp *ctx,
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 		dspp_buf[type][ctx->idx],
-		REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+		REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE, type);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -2091,7 +2093,7 @@ void reg_dmav1_setup_dspp_memcol_protv17(struct sde_hw_dspp *ctx, void *cfg)
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 			dspp_buf[MEMC_PROT][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			DMA_CTL_QUEUE0, WRITE_IMMEDIATE, MEMC_PROT);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -2213,7 +2215,7 @@ static void vig_gamutv5_off(struct sde_hw_pipe *ctx, void *cfg)
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 			sspp_buf[idx][GAMUT][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			DMA_CTL_QUEUE0, WRITE_IMMEDIATE, GAMUT);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -2325,7 +2327,7 @@ void reg_dmav1_setup_vig_gamutv5(struct sde_hw_pipe *ctx, void *cfg)
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 			sspp_buf[idx][GAMUT][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			DMA_CTL_QUEUE0, WRITE_IMMEDIATE, GAMUT);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -2370,7 +2372,7 @@ static void vig_igcv5_off(struct sde_hw_pipe *ctx, void *cfg)
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 			sspp_buf[idx][IGC][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			DMA_CTL_QUEUE0, WRITE_IMMEDIATE, IGC);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -2504,7 +2506,7 @@ void reg_dmav1_setup_vig_igcv5(struct sde_hw_pipe *ctx, void *cfg)
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 			sspp_buf[idx][IGC][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			DMA_CTL_QUEUE0, WRITE_IMMEDIATE, IGC);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -2564,7 +2566,7 @@ void reg_dmav1_setup_vig_igcv6(struct sde_hw_pipe *ctx, void *cfg)
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 			sspp_buf[idx][IGC][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			DMA_CTL_QUEUE0, WRITE_IMMEDIATE, IGC);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -2610,7 +2612,7 @@ static void dma_igcv5_off(struct sde_hw_pipe *ctx, void *cfg,
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 			sspp_buf[idx][IGC][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			DMA_CTL_QUEUE0, WRITE_IMMEDIATE, IGC);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -2715,7 +2717,7 @@ void reg_dmav1_setup_dma_igcv5(struct sde_hw_pipe *ctx, void *cfg,
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 			sspp_buf[idx][IGC][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			DMA_CTL_QUEUE0, WRITE_IMMEDIATE, IGC);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -2763,7 +2765,7 @@ static void dma_gcv5_off(struct sde_hw_pipe *ctx, void *cfg,
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 			sspp_buf[idx][GC][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			DMA_CTL_QUEUE0, WRITE_IMMEDIATE, GC);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -2842,7 +2844,7 @@ void reg_dmav1_setup_dma_gcv5(struct sde_hw_pipe *ctx, void *cfg,
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 			sspp_buf[idx][GC][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			DMA_CTL_QUEUE0, WRITE_IMMEDIATE, GC);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -3230,7 +3232,7 @@ end:
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg.ctl,
 			sspp_buf[idx][QSEED][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			DMA_CTL_QUEUE0, WRITE_IMMEDIATE, QSEED);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -3416,7 +3418,8 @@ static void ltm_initv1_disable(struct sde_hw_dspp *ctx, void *cfg,
 	}
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, ltm_buf[LTM_INIT][idx],
-				REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+				REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE,
+				LTM_INIT);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc) {
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -3555,7 +3558,8 @@ void reg_dmav1_setup_ltm_initv1(struct sde_hw_dspp *ctx, void *cfg)
 		}
 	}
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, ltm_buf[LTM_INIT][idx],
-				REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+				REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE,
+				LTM_INIT);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc) {
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -3608,7 +3612,8 @@ static void ltm_roiv1_disable(struct sde_hw_dspp *ctx, void *cfg,
 	}
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, ltm_buf[LTM_ROI][idx],
-				REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+				REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE,
+				LTM_ROI);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc) {
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -3729,7 +3734,8 @@ void reg_dmav1_setup_ltm_roiv1(struct sde_hw_dspp *ctx, void *cfg)
 	}
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, ltm_buf[LTM_ROI][idx],
-				REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+				REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE,
+				LTM_ROI);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc) {
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -3782,7 +3788,8 @@ static void ltm_vlutv1_disable(struct sde_hw_dspp *ctx, void *cfg,
 	}
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, ltm_buf[LTM_VLUT][idx],
-				REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+				REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE,
+				LTM_VLUT);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc) {
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -3909,7 +3916,8 @@ void reg_dmav1_setup_ltm_vlutv1(struct sde_hw_dspp *ctx, void *cfg)
 	}
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, ltm_buf[LTM_VLUT][idx],
-				REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+				REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE,
+				LTM_VLUT);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc) {
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -3973,7 +3981,8 @@ static void _perform_sbdma_kickoff(struct sde_hw_dspp *ctx,
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 			dspp_buf[feature][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE1, WRITE_IMMEDIATE);
+			REG_DMA_WRITE, DMA_CTL_QUEUE1, WRITE_IMMEDIATE,
+			feature);
 	kick_off.dma_type = REG_DMA_TYPE_SB;
 	rc = dma_ops->kick_off(&kick_off);
 	if (!rc) {
@@ -3986,7 +3995,7 @@ static void _perform_sbdma_kickoff(struct sde_hw_dspp *ctx,
 
 		REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 				dspp_buf[feature][ctx->idx], REG_DMA_WRITE,
-				DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+				DMA_CTL_QUEUE0, WRITE_IMMEDIATE, feature);
 		rc = dma_ops->kick_off(&kick_off);
 		if (rc)
 			DRM_ERROR("failed dbdma kick off ret %d\n", rc);
@@ -4471,7 +4480,7 @@ void reg_dmav2_setup_vig_gamutv61(struct sde_hw_pipe *ctx, void *cfg)
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 			sspp_buf[idx][GAMUT][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			DMA_CTL_QUEUE0, WRITE_IMMEDIATE, GAMUT);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -4591,7 +4600,8 @@ void reg_dmav1_disable_spr(struct sde_hw_dspp *ctx, void *cfg)
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 			dspp_buf[SPR_INIT][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE,
+			SPR_INIT);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc) {
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -4730,7 +4740,8 @@ void reg_dmav1_setup_spr_init_cfgv1(struct sde_hw_dspp *ctx, void *cfg)
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 			dspp_buf[SPR_INIT][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE,
+			SPR_INIT);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc) {
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -4796,7 +4807,8 @@ void reg_dmav1_setup_spr_pu_cfgv1(struct sde_hw_dspp *ctx, void *cfg)
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 			dspp_buf[SPR_PU_CFG][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE,
+			SPR_PU_CFG);
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc) {
 		DRM_ERROR("failed to kick off ret %d\n", rc);
@@ -4837,7 +4849,8 @@ static void reg_dma_demura_off(struct sde_hw_dspp *ctx,
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 			dspp_buf[DEMURA_CFG][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE,
+			DEMURA_CFG);
 
 	rc = dma_ops->kick_off(&kick_off);
 	if (rc)
@@ -5342,7 +5355,8 @@ void reg_dmav1_setup_demurav1(struct sde_hw_dspp *ctx, void *cfx)
 
 	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
 			dspp_buf[DEMURA_CFG][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE,
+			DEMURA_CFG);
 
 	DRM_DEBUG("enable demura buffer size %d\n",
 				dspp_buf[DEMURA_CFG][ctx->idx]->index);

+ 2 - 0
msm/sde/sde_reg_dma.h

@@ -293,6 +293,7 @@ struct sde_reg_dma_setup_ops_cfg {
  * @trigger_mode: reg dma ops trigger mode
  * @queue_select: queue on which reg dma buffer will be submitted
  * @dma_type: DB or SB LUT DMA block selection
+ * @feature: feature the provided kickoff buffer belongs to
  * @last_command: last command for this vsync
  */
 struct sde_reg_dma_kickoff_cfg {
@@ -303,6 +304,7 @@ struct sde_reg_dma_kickoff_cfg {
 	enum sde_reg_dma_trigger_mode trigger_mode;
 	enum sde_reg_dma_queue queue_select;
 	enum sde_reg_dma_type dma_type;
+	enum sde_reg_dma_features feature;
 	u32 last_command;
 };
 

+ 166 - 0
msm/sde_dbg.c

@@ -26,6 +26,7 @@
 #define DEFAULT_DBGBUS_SDE	SDE_DBG_DUMP_IN_MEM
 #define DEFAULT_DBGBUS_VBIFRT	SDE_DBG_DUMP_IN_MEM
 #define DEFAULT_DBGBUS_DSI	SDE_DBG_DUMP_IN_MEM
+#define DEFAULT_DBGBUS_LUTDMA	SDE_DBG_DUMP_IN_MEM
 #define DEFAULT_BASE_REG_CNT	DEFAULT_MDSS_HW_BLOCK_SIZE
 #define GROUP_BYTES		4
 #define ROW_BYTES		16
@@ -38,6 +39,11 @@
 #define DBGBUS_NAME_SDE		"sde"
 #define DBGBUS_NAME_VBIF_RT	"vbif_rt"
 #define DBGBUS_NAME_DSI		"dsi"
+#define DBGBUS_NAME_LUTDMA	"reg_dma"
+
+/* offsets from LUTDMA top address for the debug buses */
+#define DBGBUS_LUTDMA_0	0x1E8
+#define DBGBUS_LUTDMA_1	0x5E8
 
 /* offsets from sde top address for the debug buses */
 #define DBGBUS_SSPP0	0x188
@@ -164,6 +170,12 @@ struct dsi_debug_bus_entry {
 	u32 sel;
 };
 
+struct lutdma_debug_bus_entry {
+	u32 wr_addr;
+	bool read_engine;
+	u32 indicies;
+};
+
 struct sde_dbg_dsi_ctrl_list_entry {
 	const char *name;
 	void __iomem *base;
@@ -197,6 +209,11 @@ struct sde_dbg_dsi_debug_bus {
 	struct dsi_debug_bus_entry *entries;
 };
 
+struct sde_dbg_lutdma_debug_bus {
+	struct sde_dbg_debug_bus_common cmn;
+	struct lutdma_debug_bus_entry *entries;
+};
+
 /**
  * struct sde_dbg_regbuf - wraps buffer and tracking params for register dumps
  * @buf: pointer to allocated memory for storing register dumps in hw recovery
@@ -228,6 +245,7 @@ struct sde_dbg_regbuf {
  * @enable_reg_dump: whether to dump registers into memory, kernel log, or both
  * @dbgbus_sde: debug bus structure for the sde
  * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
+ * @dbgbus_lutdma: debug bus structure for the lutdma hw
  * @dump_all: dump all entries in register dump
  * @dump_secure: dump entries excluding few as it is in secure-session
  * @dsi_dbg_bus: dump dsi debug bus register
@@ -252,6 +270,7 @@ static struct sde_dbg_base {
 	struct sde_dbg_sde_debug_bus dbgbus_sde;
 	struct sde_dbg_vbif_debug_bus dbgbus_vbif_rt;
 	struct sde_dbg_dsi_debug_bus dbgbus_dsi;
+	struct sde_dbg_lutdma_debug_bus dbgbus_lutdma;
 	bool dump_all;
 	bool dump_secure;
 	u32 debugfs_ctrl;
@@ -4291,6 +4310,13 @@ static struct dsi_debug_bus_entry dsi_dbg_bus_kona[] = {
 	{0, 0x3c}, {0, 0x3d}, {0, 0x3e}, {0, 0x3f},
 };
 
+static struct lutdma_debug_bus_entry dbg_bus_lutdma_lahaina[] = {
+	{ DBGBUS_LUTDMA_0, false, 1024 },
+	{ DBGBUS_LUTDMA_0, true, 1024 },
+	{ DBGBUS_LUTDMA_1, false, 1024 },
+	{ DBGBUS_LUTDMA_1, true, 1024 },
+};
+
 /**
  * _sde_power_check - check if power needs to enabled
  * @dump_mode: to check if power need to be enabled
@@ -4960,6 +4986,103 @@ static void _sde_dbg_dump_dsi_dbg_bus(struct sde_dbg_dsi_debug_bus *bus)
 			bus->cmn.name);
 }
 
+static void _sde_dbg_dump_lutdma_dbg_bus(struct sde_dbg_lutdma_debug_bus *bus)
+{
+	void __iomem *mem_base = NULL;
+	struct sde_dbg_reg_base *reg_base;
+	struct lutdma_debug_bus_entry *entries;
+	bool dump_in_log, dump_in_mem;
+	u32 **dump_mem = NULL;
+	u32 *dump_addr = NULL;
+	u32 i, j, entry_count, addr, count, val, engine_bit, dump_mem_size = 0;
+	int rc;
+
+	if (!bus || !bus->cmn.entries_size)
+		return;
+
+	list_for_each_entry(reg_base, &sde_dbg_base.reg_base_list,
+			reg_base_head) {
+		if (strlen(reg_base->name) &&
+			!strcmp(reg_base->name, bus->cmn.name))
+			mem_base = reg_base->base;
+	}
+
+	if (!mem_base) {
+		pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+		return;
+	}
+
+	entries = bus->entries;
+	entry_count = bus->cmn.entries_size;
+
+	dump_in_log = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_LOG);
+	dump_in_mem = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_MEM);
+	dump_mem = &bus->cmn.dumped_content;
+
+	if (!dump_in_log && !dump_in_mem)
+		return;
+
+	rc = pm_runtime_get_sync(sde_dbg_base.dev);
+	if (rc < 0) {
+		pr_err("failed to enable power %d\n", rc);
+		return;
+	}
+
+	dev_info(sde_dbg_base.dev, "======== start %s dump =========\n",
+			bus->cmn.name);
+
+	if (dump_in_mem) {
+		if (*dump_mem == NULL) {
+			for (i = 0; i < entry_count; i++)
+				dump_mem_size += (entries[i].indicies *
+					sizeof(u32));
+
+			//Ensure enough chunks for debugfs dumping
+			dump_mem_size += dump_mem_size % (DUMP_CLMN_COUNT * 4);
+			*dump_mem = devm_kzalloc(sde_dbg_base.dev,
+					dump_mem_size, GFP_KERNEL);
+			bus->cmn.content_size = dump_mem_size / sizeof(u32);
+		}
+
+		if (*dump_mem) {
+			dump_addr = *dump_mem;
+			dev_info(sde_dbg_base.dev,
+				"%s: start_addr:0x%pK len:0x%x\n",
+				__func__, dump_addr, dump_mem_size);
+		} else {
+			dump_in_mem = false;
+			pr_err("dump_mem: allocation fails\n");
+		}
+	}
+
+	for (i = 0; i < entry_count; i++) {
+		addr = entries[i].wr_addr;
+		count = entries[i].indicies;
+		engine_bit  = entries[i].read_engine ? BIT(14) : 0;
+
+		for (j = 0 ; j < count; j++) {
+			val = (BIT(0) | engine_bit | (j << 1)) & 0xFFFF;
+			writel_relaxed(val, mem_base + addr);
+			wmb(); /* Ensure dbgbus setup occurs before read */
+			val = readl_relaxed(mem_base + addr + 0x4);
+
+			if (dump_in_log)
+				dev_info(sde_dbg_base.dev,
+					"lutdma_waddr=0x%x index=0x%x val=0x%x\n",
+					addr, j, val);
+
+			if (dump_in_mem)
+				dump_addr[i * count +  j] = val;
+		}
+
+		//Disable debug bus when done
+		writel_relaxed(0, mem_base + addr);
+	}
+
+	dev_info(sde_dbg_base.dev, "======== end %s dump =========\n",
+			bus->cmn.name);
+}
+
 /**
  * _sde_dump_array - dump array of register bases
  * @blk_arr: array of register base pointers
@@ -5004,6 +5127,9 @@ static void _sde_dump_array(struct sde_dbg_reg_base *blk_arr[],
 	if (dump_all || dump_dbgbus_dsi)
 		_sde_dbg_dump_dsi_dbg_bus(&sde_dbg_base.dbgbus_dsi);
 
+	if (dump_all || dump_dbgbus_sde)
+		_sde_dbg_dump_lutdma_dbg_bus(&sde_dbg_base.dbgbus_lutdma);
+
 	if (do_panic && sde_dbg_base.panic_on_err)
 		panic(name);
 
@@ -5651,6 +5777,29 @@ static const struct file_operations sde_recovery_dsi_dbgbus_fops = {
 	.read = sde_recovery_dbgbus_dump_read,
 };
 
+
+static int sde_recovery_lutdma_dbgbus_dump_open(struct inode *inode,
+		struct file *file)
+{
+	if (!inode || !file)
+		return -EINVAL;
+
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	file->private_data =  (void *)&sde_dbg_base.dbgbus_lutdma.cmn;
+
+	mutex_lock(&sde_dbg_base.mutex);
+	sde_dbg_base.dbgbus_lutdma.cmn.content_idx = 0;
+	mutex_unlock(&sde_dbg_base.mutex);
+
+	return 0;
+}
+
+static const struct file_operations sde_recovery_lutdma_dbgbus_fops = {
+	.open = sde_recovery_lutdma_dbgbus_dump_open,
+	.read = sde_recovery_dbgbus_dump_read,
+};
+
 /**
  * sde_dbg_reg_base_release - release allocated reg dump file private data
  * @inode: debugfs inode
@@ -6075,6 +6224,16 @@ int sde_dbg_debugfs_register(struct device *dev)
 				&dbg->dbgbus_dsi.cmn.enable_mask);
 	}
 
+	if (dbg->dbgbus_lutdma.entries) {
+		debugfs_create_file("recovery_lutdma_dbgbus", 0400,
+				debugfs_root, NULL,
+				&sde_recovery_lutdma_dbgbus_fops);
+		snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+				dbg->dbgbus_lutdma.cmn.name);
+		debugfs_create_u32(debug_name, 0600, debugfs_root,
+				&dbg->dbgbus_lutdma.cmn.enable_mask);
+	}
+
 	list_for_each_entry(blk_base, &dbg->reg_base_list, reg_base_head) {
 		snprintf(debug_name, sizeof(debug_name), "%s_off",
 				blk_base->name);
@@ -6120,6 +6279,13 @@ void sde_dbg_init_dbg_buses(u32 hwversion)
 				ARRAY_SIZE(dsi_dbg_bus_kona);
 		dbg->dbgbus_dsi.cmn.name = DBGBUS_NAME_DSI;
 		dbg->dbgbus_dsi.cmn.enable_mask = DEFAULT_DBGBUS_DSI;
+
+		dbg->dbgbus_lutdma.entries = dbg_bus_lutdma_lahaina;
+		dbg->dbgbus_lutdma.cmn.name = DBGBUS_NAME_LUTDMA;
+		dbg->dbgbus_lutdma.cmn.entries_size =
+				ARRAY_SIZE(dbg_bus_lutdma_lahaina);
+		dbg->dbgbus_lutdma.cmn.enable_mask = DEFAULT_DBGBUS_LUTDMA;
+		dbg->dbgbus_lutdma.cmn.include_in_deferred_work = true;
 	} else if (SDE_HW_REV_MAJOR(hwversion) == 0x6) {
 		dbg->dbgbus_sde.entries = dbg_bus_sde_kona;
 		dbg->dbgbus_sde.cmn.entries_size =