瀏覽代碼

drm: msm: sde: Attempt DB DMA fallback if SB DMA not supported

The current implementation of DSPP GAMUT + IGC programming rely on
SBDMA for programming. This will cause GAMUT + IGC to be
unprogrammable on derivative Lahaina chipsets that do not have SBDMA
support.

Update the SBDMA handling to more intelligently check if SBDMA is
present, and fallback to DBDMA module where possible.

Change-Id: I89d07e38459ab59b96c69558178b8e97062ed93d
Signed-off-by: Christopher Braga <[email protected]>
Christopher Braga 5 年之前
父節點
當前提交
fe087fde3c
共有 4 個文件被更改,包括 82 次插入74 次删除
  1. 9 9
      msm/sde/sde_color_processing.c
  2. 3 1
      msm/sde/sde_hw_dspp.h
  3. 15 7
      msm/sde/sde_hw_reg_dma_v1.c
  4. 55 57
      msm/sde/sde_hw_reg_dma_v1_color_proc.c

+ 9 - 9
msm/sde/sde_color_processing.c

@@ -1378,8 +1378,8 @@ static const int dspp_feature_to_sub_blk_tbl[SDE_CP_CRTC_MAX_FEATURES] = {
 
 void sde_cp_dspp_flush_helper(struct sde_crtc *sde_crtc, u32 feature)
 {
-	u32 i, sub_blk, num_mixers, dspp_sb;
-	enum sde_dspp dspp;
+	u32 i, sub_blk, num_mixers;
+	struct sde_hw_dspp *dspp;
 	struct sde_hw_ctl *ctl;
 
 	if (!sde_crtc || feature >= SDE_CP_CRTC_MAX_FEATURES) {
@@ -1390,18 +1390,16 @@ void sde_cp_dspp_flush_helper(struct sde_crtc *sde_crtc, u32 feature)
 
 	num_mixers = sde_crtc->num_mixers;
 	sub_blk = dspp_feature_to_sub_blk_tbl[feature];
-	dspp_sb = dspp_feature_to_sub_blk_tbl[SDE_CP_CRTC_DSPP_SB];
 
 	for (i = 0; i < num_mixers; i++) {
 		ctl = sde_crtc->mixers[i].hw_ctl;
-		dspp = sde_crtc->mixers[i].hw_dspp->idx;
+		dspp = sde_crtc->mixers[i].hw_dspp;
 		if (ctl && ctl->ops.update_bitmask_dspp_subblk) {
+			if (feature == SDE_CP_CRTC_DSPP_SB &&
+					!dspp->sb_dma_in_use)
+				continue;
 			ctl->ops.update_bitmask_dspp_subblk(
-					ctl, dspp, sub_blk, true);
-			if (feature == SDE_CP_CRTC_DSPP_IGC ||
-					feature == SDE_CP_CRTC_DSPP_GAMUT)
-				ctl->ops.update_bitmask_dspp_subblk(
-						ctl, dspp, dspp_sb, true);
+					ctl, dspp->idx, sub_blk, true);
 		}
 	}
 }
@@ -1715,6 +1713,8 @@ void sde_cp_crtc_apply_properties(struct drm_crtc *crtc)
 			set_lm_flush = true;
 	}
 
+	sde_cp_dspp_flush_helper(sde_crtc, SDE_CP_CRTC_DSPP_SB);
+
 	if (!list_empty(&sde_crtc->ad_active)) {
 		sde_cp_ad_set_prop(sde_crtc, AD_IPC_RESET);
 		set_dspp_flush = true;

+ 3 - 1
msm/sde/sde_hw_dspp.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _SDE_HW_DSPP_H
@@ -218,6 +218,7 @@ struct sde_hw_dspp_ops {
  * @hw_top: Block hardware top details
  * @idx: DSPP index
  * @cap: Pointer to layer_cfg
+ * @sb_dma_in_use: hint indicating if sb dma is being used for this dspp
  * @ops: Pointer to operations possible for this DSPP
  */
 struct sde_hw_dspp {
@@ -230,6 +231,7 @@ struct sde_hw_dspp {
 	/* dspp */
 	enum sde_dspp idx;
 	const struct sde_dspp_cfg *cap;
+	bool sb_dma_in_use;
 
 	/* Ops */
 	struct sde_hw_dspp_ops ops;

+ 15 - 7
msm/sde/sde_hw_reg_dma_v1.c

@@ -539,13 +539,20 @@ static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg)
 static int validate_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
 {
 
-	if (!cfg || !cfg->ctl || !cfg->dma_buf) {
-		DRM_ERROR("invalid cfg %pK ctl %pK dma_buf %pK\n",
-			cfg, ((!cfg) ? NULL : cfg->ctl),
-			((!cfg) ? NULL : cfg->dma_buf));
+	if (!cfg || !cfg->ctl || !cfg->dma_buf ||
+			cfg->dma_type >= REG_DMA_TYPE_MAX) {
+		DRM_ERROR("invalid cfg %pK ctl %pK dma_buf %pK dma type %d\n",
+				cfg, ((!cfg) ? NULL : cfg->ctl),
+				((!cfg) ? NULL : cfg->dma_buf),
+				((!cfg) ? 0 : cfg->dma_type));
 		return -EINVAL;
 	}
 
+	if (reg_dma->caps->reg_dma_blks[cfg->dma_type].valid == false) {
+		DRM_DEBUG("REG dma type %d is not supported\n", cfg->dma_type);
+		return -EOPNOTSUPP;
+	}
+
 	if (cfg->ctl->idx < CTL_0 && cfg->ctl->idx >= CTL_MAX) {
 		DRM_ERROR("invalid ctl idx %d\n", cfg->ctl->idx);
 		return -EINVAL;
@@ -622,8 +629,8 @@ static int write_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
 		SET_UP_REG_DMA_REG(hw, reg_dma, REG_DMA_TYPE_SB);
 
 	if (hw.hwversion == 0) {
-		DRM_ERROR("invalid dma type %d\n", cfg->dma_type);
-		return -EINVAL;
+		DRM_ERROR("DMA type %d is unsupported\n", cfg->dma_type);
+		return -EOPNOTSUPP;
 	}
 
 	SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
@@ -825,7 +832,8 @@ int init_v2(struct sde_hw_reg_dma *cfg)
 
 	v1_supported[IGC] = GRP_DSPP_HW_BLK_SELECT | GRP_VIG_HW_BLK_SELECT |
 			GRP_DMA_HW_BLK_SELECT;
-	reg_dma->ops.last_command_sb = last_cmd_sb_v2;
+	if (cfg->caps->reg_dma_blks[REG_DMA_TYPE_SB].valid == true)
+		reg_dma->ops.last_command_sb = last_cmd_sb_v2;
 
 	return 0;
 }

+ 55 - 57
msm/sde/sde_hw_reg_dma_v1_color_proc.c

@@ -3813,9 +3813,59 @@ int reg_dmav2_init_dspp_op_v4(int feature, enum sde_dspp idx)
 	return rc;
 }
 
-static void _dspp_igcv32_off(struct sde_hw_dspp *ctx, void *cfg)
+
+/* Attempt to submit a feature buffer to SB DMA.
+ * Note that if SB DMA is not supported, this function
+ * will quitely attempt to fallback to DB DMA
+ */
+static void _perform_sbdma_kickoff(struct sde_hw_dspp *ctx,
+		struct sde_hw_cp_cfg *hw_cfg,
+		struct sde_hw_reg_dma_ops *dma_ops,
+		u32 blk, enum sde_reg_dma_features feature)
 {
+	int rc, i;
 	struct sde_reg_dma_kickoff_cfg kick_off;
+
+	if ((feature != GAMUT && feature != IGC) ||
+			!(blk & (DSPP0 | DSPP1 | DSPP2 | DSPP3))) {
+		DRM_ERROR("SB DMA invalid for feature / block - %d/%d\n",
+				feature, blk);
+		return;
+	}
+
+	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
+			dspp_buf[feature][ctx->idx],
+			REG_DMA_WRITE, DMA_CTL_QUEUE1, WRITE_IMMEDIATE);
+	kick_off.dma_type = REG_DMA_TYPE_SB;
+	rc = dma_ops->kick_off(&kick_off);
+	if (!rc) {
+		rc = dma_ops->last_command_sb(hw_cfg->ctl, DMA_CTL_QUEUE1,
+				REG_DMA_NOWAIT);
+		if (rc) {
+			DRM_ERROR("failed to call last_command_sb ret %d\n",
+					rc);
+		} else {
+			for (i = 0; i < hw_cfg->num_of_mixers; ++i) {
+				if (blk & dspp_mapping[hw_cfg->dspp[i]->idx])
+					hw_cfg->dspp[i]->sb_dma_in_use = true;
+			}
+		}
+	} else if (rc == -EOPNOTSUPP) {
+		DRM_DEBUG("Falling back to dbdma\n", rc);
+
+		REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
+				dspp_buf[feature][ctx->idx], REG_DMA_WRITE,
+				DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+		rc = dma_ops->kick_off(&kick_off);
+		if (rc)
+			DRM_ERROR("failed dbdma kick off ret %d\n", rc);
+	} else {
+		DRM_ERROR("failed sbdma kick off ret %d\n", rc);
+	}
+}
+
+static void _dspp_igcv32_off(struct sde_hw_dspp *ctx, void *cfg)
+{
 	struct sde_hw_cp_cfg *hw_cfg = cfg;
 	struct sde_hw_reg_dma_ops *dma_ops;
 	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
@@ -3852,26 +3902,13 @@ static void _dspp_igcv32_off(struct sde_hw_dspp *ctx, void *cfg)
 		return;
 	}
 
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[IGC][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE1, WRITE_IMMEDIATE);
-	kick_off.dma_type = REG_DMA_TYPE_SB;
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc) {
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-		return;
-	}
-
-	rc = dma_ops->last_command_sb(hw_cfg->ctl, DMA_CTL_QUEUE1,
-			REG_DMA_NOWAIT);
-	if (rc)
-		DRM_ERROR("failed to call last_command_sb ret %d\n", rc);
+	_perform_sbdma_kickoff(ctx, hw_cfg, dma_ops, blk, IGC);
 }
 
 void reg_dmav2_setup_dspp_igcv32(struct sde_hw_dspp *ctx, void *cfg)
 {
 	struct drm_msm_igc_lut *lut_cfg;
 	struct sde_hw_reg_dma_ops *dma_ops;
-	struct sde_reg_dma_kickoff_cfg kick_off;
 	struct sde_hw_cp_cfg *hw_cfg = cfg;
 	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
 	int rc = 0, i = 0, j = 0;
@@ -3972,19 +4009,7 @@ void reg_dmav2_setup_dspp_igcv32(struct sde_hw_dspp *ctx, void *cfg)
 		goto exit;
 	}
 
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[IGC][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE1, WRITE_IMMEDIATE);
-	kick_off.dma_type = REG_DMA_TYPE_SB;
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc) {
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-		goto exit;
-	}
-
-	rc = dma_ops->last_command_sb(hw_cfg->ctl, DMA_CTL_QUEUE1,
-			REG_DMA_NOWAIT);
-	if (rc)
-		DRM_ERROR("failed to call last_command_sb ret %d\n", rc);
+	_perform_sbdma_kickoff(ctx, hw_cfg, dma_ops, blk, IGC);
 
 exit:
 	kfree(data);
@@ -3995,7 +4020,6 @@ static void dspp_3d_gamutv43_off(struct sde_hw_dspp *ctx, void *cfg)
 	struct sde_hw_cp_cfg *hw_cfg = cfg;
 	struct sde_hw_reg_dma_ops *dma_ops;
 	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
 	int rc;
 	u32 op_mode = 0, num_of_mixers, blk = 0;
 
@@ -4029,19 +4053,7 @@ static void dspp_3d_gamutv43_off(struct sde_hw_dspp *ctx, void *cfg)
 		return;
 	}
 
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl, dspp_buf[GAMUT][ctx->idx],
-			REG_DMA_WRITE, DMA_CTL_QUEUE1, WRITE_IMMEDIATE);
-	kick_off.dma_type = REG_DMA_TYPE_SB;
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc) {
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-		return;
-	}
-
-	rc = dma_ops->last_command_sb(hw_cfg->ctl, DMA_CTL_QUEUE1,
-			REG_DMA_NOWAIT);
-	if (rc)
-		DRM_ERROR("failed to call last_command_sb ret %d\n", rc);
+	_perform_sbdma_kickoff(ctx, hw_cfg, dma_ops, blk, GAMUT);
 }
 
 
@@ -4050,7 +4062,6 @@ void reg_dmav2_setup_dspp_3d_gamutv43(struct sde_hw_dspp *ctx, void *cfg)
 	struct sde_hw_cp_cfg *hw_cfg = cfg;
 	struct sde_hw_reg_dma_ops *dma_ops;
 	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
-	struct sde_reg_dma_kickoff_cfg kick_off;
 	struct drm_msm_3d_gamut *payload;
 	int rc;
 	u32 num_of_mixers, blk = 0, i, j, k = 0, len;
@@ -4178,20 +4189,7 @@ void reg_dmav2_setup_dspp_3d_gamutv43(struct sde_hw_dspp *ctx, void *cfg)
 		goto exit;
 	}
 
-	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
-			dspp_buf[GAMUT][ctx->idx], REG_DMA_WRITE,
-			DMA_CTL_QUEUE1, WRITE_IMMEDIATE);
-	kick_off.dma_type = REG_DMA_TYPE_SB;
-	rc = dma_ops->kick_off(&kick_off);
-	if (rc) {
-		DRM_ERROR("failed to kick off ret %d\n", rc);
-		goto exit;
-	}
-
-	rc = dma_ops->last_command_sb(hw_cfg->ctl, DMA_CTL_QUEUE1,
-			REG_DMA_NOWAIT);
-	if (rc)
-		DRM_ERROR("failed to call last_command_sb ret %d\n", rc);
+	_perform_sbdma_kickoff(ctx, hw_cfg, dma_ops, blk, GAMUT);
 
 exit:
 	kfree(data);