Эх сурвалжийг харах

disp: msm: sde: Add support for SB LUTDMA

A new LUTDMA HW instance has been added to support programming of
SB features via LUTDMA. This change adds corresponding support for
the new SB LUTDMA, including catalog parsing, reg_dma init/deinit/ops
updates and new opcode support.

Change-Id: I0fed7a6e93cd96fe9fe562d2470a8789b161d1bc
Signed-off-by: Ping Li <[email protected]>
Signed-off-by: Christopher Braga <[email protected]>
Christopher Braga 5 жил өмнө
parent
commit
5e28b86e3c

+ 26 - 3
msm/sde/sde_hw_catalog.c

@@ -434,6 +434,7 @@ enum {
 
 enum {
 	REG_DMA_OFF,
+	REG_DMA_ID,
 	REG_DMA_VERSION,
 	REG_DMA_TRIGGER_OFF,
 	REG_DMA_BROADCAST_DISABLED,
@@ -781,7 +782,9 @@ static struct sde_prop_type uidle_prop[] = {
 
 static struct sde_prop_type reg_dma_prop[REG_DMA_PROP_MAX] = {
 	[REG_DMA_OFF] =  {REG_DMA_OFF, "qcom,sde-reg-dma-off", false,
-		PROP_TYPE_U32},
+		PROP_TYPE_U32_ARRAY},
+	[REG_DMA_ID] =  {REG_DMA_ID, "qcom,sde-reg-dma-id", false,
+		PROP_TYPE_U32_ARRAY},
 	[REG_DMA_VERSION] = {REG_DMA_VERSION, "qcom,sde-reg-dma-version",
 		false, PROP_TYPE_U32},
 	[REG_DMA_TRIGGER_OFF] = {REG_DMA_TRIGGER_OFF,
@@ -3702,6 +3705,8 @@ static int sde_parse_reg_dma_dt(struct device_node *np,
 	struct sde_prop_value *prop_value = NULL;
 	u32 off_count;
 	bool prop_exists[REG_DMA_PROP_MAX];
+	bool dma_type_exists[REG_DMA_TYPE_MAX];
+	enum sde_reg_dma_type dma_type;
 
 	prop_value = kcalloc(REG_DMA_PROP_MAX,
 			sizeof(struct sde_prop_value), GFP_KERNEL);
@@ -3720,8 +3725,26 @@ static int sde_parse_reg_dma_dt(struct device_node *np,
 	if (rc)
 		goto end;
 
-	sde_cfg->reg_dma_count = off_count;
-	sde_cfg->dma_cfg.base = PROP_VALUE_ACCESS(prop_value, REG_DMA_OFF, 0);
+	sde_cfg->reg_dma_count = 0;
+	memset(&dma_type_exists, 0, sizeof(dma_type_exists));
+	for (i = 0; i < off_count; i++) {
+		dma_type = PROP_VALUE_ACCESS(prop_value, REG_DMA_ID, i);
+		if (dma_type >= REG_DMA_TYPE_MAX) {
+			SDE_ERROR("Invalid DMA type %d\n", dma_type);
+			goto end;
+		} else if (dma_type_exists[dma_type]) {
+			SDE_ERROR("DMA type ID %d exists more than once\n",
+					dma_type);
+			goto end;
+		}
+
+		dma_type_exists[dma_type] = true;
+		sde_cfg->dma_cfg.reg_dma_blks[dma_type].base =
+				PROP_VALUE_ACCESS(prop_value, REG_DMA_OFF, i);
+		sde_cfg->dma_cfg.reg_dma_blks[dma_type].valid = true;
+		sde_cfg->reg_dma_count++;
+	}
+
 	sde_cfg->dma_cfg.version = PROP_VALUE_ACCESS(prop_value,
 						REG_DMA_VERSION, 0);
 	sde_cfg->dma_cfg.trigger_sel_off = PROP_VALUE_ACCESS(prop_value,

+ 29 - 6
msm/sde/sde_hw_catalog.h

@@ -1186,12 +1186,35 @@ struct sde_vbif_cfg {
 	u32 memtype_count;
 	u32 memtype[MAX_XIN_COUNT];
 };
+
 /**
- * struct sde_reg_dma_cfg - information of lut dma blocks
- * @id                 enum identifying this block
- * @base               register offset of this block
- * @features           bit mask identifying sub-blocks/features
- * @version            version of lutdma hw block
+ * enum sde_reg_dma_type - defines reg dma block type
+ * @REG_DMA_TYPE_DB: DB LUT DMA block
+ * @REG_DMA_TYPE_SB: SB LUT DMA block
+ * @REG_DMA_TYPE_MAX: invalid selection
+ */
+enum sde_reg_dma_type {
+	REG_DMA_TYPE_DB,
+	REG_DMA_TYPE_SB,
+	REG_DMA_TYPE_MAX,
+};
+
+/**
+ * struct sde_reg_dma_blk_info - definition of lut dma block.
+ * @valid              bool indicating if the definiton is valid.
+ * @base               register offset of this block.
+ * @features           bit mask identifying sub-blocks/features.
+ */
+struct sde_reg_dma_blk_info {
+	bool valid;
+	u32 base;
+	u32 features;
+};
+
+/**
+ * struct sde_reg_dma_cfg - overall config struct of lut dma blocks.
+ * @reg_dma_blks       Reg DMA blk info for each possible block type
+ * @version            version of lutdma hw blocks
  * @trigger_sel_off    offset to trigger select registers of lutdma
  * @broadcast_disabled flag indicating if broadcast usage should be avoided
  * @xin_id             VBIF xin client-id for LUTDMA
@@ -1199,7 +1222,7 @@ struct sde_vbif_cfg {
  * @clk_ctrl           VBIF xin client clk-ctrl
  */
 struct sde_reg_dma_cfg {
-	SDE_HW_BLK_INFO;
+	struct sde_reg_dma_blk_info reg_dma_blks[REG_DMA_TYPE_MAX];
 	u32 version;
 	u32 trigger_sel_off;
 	u32 broadcast_disabled;

+ 246 - 52
msm/sde/sde_hw_reg_dma_v1.c

@@ -20,15 +20,17 @@
 #define DECODE_SEL_OP (BIT(HW_BLK_SELECT))
 #define REG_WRITE_OP ((BIT(REG_SINGLE_WRITE)) | (BIT(REG_BLK_WRITE_SINGLE)) | \
 	(BIT(REG_BLK_WRITE_INC)) | (BIT(REG_BLK_WRITE_MULTIPLE)) | \
-	(BIT(REG_SINGLE_MODIFY)))
+	(BIT(REG_SINGLE_MODIFY)) | (BIT(REG_BLK_LUT_WRITE)))
 
 #define REG_DMA_OPS (DECODE_SEL_OP | REG_WRITE_OP)
 #define IS_OP_ALLOWED(op, buf_op) (BIT(op) & buf_op)
 
-#define SET_UP_REG_DMA_REG(hw, reg_dma) \
+#define SET_UP_REG_DMA_REG(hw, reg_dma, i) \
 	do { \
+		if ((reg_dma)->caps->reg_dma_blks[(i)].valid == false) \
+			break; \
 		(hw).base_off = (reg_dma)->addr; \
-		(hw).blk_off = (reg_dma)->caps->base; \
+		(hw).blk_off = (reg_dma)->caps->reg_dma_blks[(i)].base; \
 		(hw).hwversion = (reg_dma)->caps->version; \
 		(hw).log_mask = SDE_DBG_MASK_REGDMA; \
 } while (0)
@@ -51,22 +53,31 @@
 #define HW_INDEX_REG_WRITE_OPCODE (BIT(28) | BIT(29))
 #define AUTO_INC_REG_WRITE_OPCODE (BIT(30))
 #define BLK_REG_WRITE_OPCODE (BIT(30) | BIT(28))
+#define LUTBUS_WRITE_OPCODE (BIT(30) | BIT(29))
 
 #define WRAP_MIN_SIZE 2
 #define WRAP_MAX_SIZE (BIT(4) - 1)
 #define MAX_DWORDS_SZ (BIT(14) - 1)
 #define REG_DMA_HEADERS_BUFFER_SZ (sizeof(u32) * 128)
 
+#define LUTBUS_TABLE_SEL_MASK 0x10000
+#define LUTBUS_BLOCK_SEL_MASK 0xffff
+#define LUTBUS_TRANS_SZ_MASK 0xff0000
+#define LUTBUS_LUT_SIZE_MASK 0x3fff
+
 static uint32_t reg_dma_register_count;
 static uint32_t reg_dma_decode_sel;
 static uint32_t reg_dma_opmode_offset;
 static uint32_t reg_dma_ctl0_queue0_cmd0_offset;
+static uint32_t reg_dma_ctl0_queue1_cmd0_offset;
 static uint32_t reg_dma_intr_status_offset;
 static uint32_t reg_dma_intr_4_status_offset;
 static uint32_t reg_dma_intr_clear_offset;
 static uint32_t reg_dma_ctl_trigger_offset;
 static uint32_t reg_dma_ctl0_reset_offset;
 static uint32_t reg_dma_error_clear_mask;
+static uint32_t reg_dma_ctl_queue_off[CTL_MAX];
+static uint32_t reg_dma_ctl_queue1_off[CTL_MAX];
 
 typedef int (*reg_dma_internal_ops) (struct sde_reg_dma_setup_ops_cfg *cfg);
 
@@ -78,6 +89,7 @@ static u32 ops_mem_size[REG_DMA_SETUP_OPS_MAX] = {
 	[HW_BLK_SELECT] = sizeof(u32) * 2,
 	[REG_SINGLE_WRITE] = sizeof(u32) * 2,
 	[REG_SINGLE_MODIFY] = sizeof(u32) * 3,
+	[REG_BLK_LUT_WRITE] = sizeof(u32) * 2,
 };
 
 static u32 queue_sel[DMA_CTL_QUEUE_MAX] = {
@@ -85,7 +97,6 @@ static u32 queue_sel[DMA_CTL_QUEUE_MAX] = {
 	[DMA_CTL_QUEUE1] = BIT(4),
 };
 
-static u32 reg_dma_ctl_queue_off[CTL_MAX];
 static u32 dspp_read_sel[DSPP_HIST_MAX] = {
 	[DSPP0_HIST] = 0,
 	[DSPP1_HIST] = 1,
@@ -119,6 +130,7 @@ static u32 ctl_trigger_done_mask[CTL_MAX][DMA_CTL_QUEUE_MAX] = {
 static int validate_dma_cfg(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int validate_write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
+static int validate_blk_lut_write(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int validate_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg);
@@ -127,6 +139,7 @@ static int write_multi_reg_index(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int write_multi_reg_inc(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int write_single_modify(struct sde_reg_dma_setup_ops_cfg *cfg);
+static int write_block_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int write_last_cmd(struct sde_reg_dma_setup_ops_cfg *cfg);
 static int reset_reg_dma_buffer_v1(struct sde_reg_dma_buffer *lut_buf);
 static int check_support_v1(enum sde_reg_dma_features feature,
@@ -139,6 +152,8 @@ static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
 static struct sde_reg_dma_buffer *alloc_reg_dma_buf_v1(u32 size);
 static int dealloc_reg_dma_v1(struct sde_reg_dma_buffer *lut_buf);
 static void dump_regs_v1(void);
+static int last_cmd_sb_v2(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
+		enum sde_reg_dma_last_cmd_mode mode);
 
 static reg_dma_internal_ops write_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
 	[HW_BLK_SELECT] = write_decode_sel,
@@ -147,6 +162,7 @@ static reg_dma_internal_ops write_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
 	[REG_BLK_WRITE_INC] = write_multi_reg_index,
 	[REG_BLK_WRITE_MULTIPLE] = write_multi_lut_reg,
 	[REG_SINGLE_MODIFY] = write_single_modify,
+	[REG_BLK_LUT_WRITE] = write_block_lut_reg,
 };
 
 static reg_dma_internal_ops validate_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
@@ -156,9 +172,11 @@ static reg_dma_internal_ops validate_dma_op_params[REG_DMA_SETUP_OPS_MAX] = {
 	[REG_BLK_WRITE_INC] = validate_write_reg,
 	[REG_BLK_WRITE_MULTIPLE] = validate_write_multi_lut_reg,
 	[REG_SINGLE_MODIFY] = validate_write_reg,
+	[REG_BLK_LUT_WRITE] = validate_blk_lut_write,
 };
 
-static struct sde_reg_dma_buffer *last_cmd_buf[CTL_MAX];
+static struct sde_reg_dma_buffer *last_cmd_buf_db[CTL_MAX];
+static struct sde_reg_dma_buffer *last_cmd_buf_sb[CTL_MAX];
 
 static void get_decode_sel(unsigned long blk, u32 *decode_sel)
 {
@@ -331,6 +349,22 @@ static int write_single_modify(struct sde_reg_dma_setup_ops_cfg *cfg)
 	return 0;
 }
 
+static int write_block_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
+{
+	u32 *loc = NULL;
+
+	loc =  (u32 *)((u8 *)cfg->dma_buf->vaddr +
+			cfg->dma_buf->index);
+	loc[0] = LUTBUS_WRITE_OPCODE;
+	loc[0] |= (cfg->table_sel << 16) & LUTBUS_TABLE_SEL_MASK;
+	loc[0] |= (cfg->block_sel & LUTBUS_BLOCK_SEL_MASK);
+	loc[1] = (cfg->trans_size << 16) & LUTBUS_TRANS_SZ_MASK;
+	loc[1] |= (cfg->lut_size & LUTBUS_LUT_SIZE_MASK);
+	cfg->dma_buf->index += ops_mem_size[cfg->ops];
+
+	return write_multi_reg(cfg);
+}
+
 static int write_decode_sel(struct sde_reg_dma_setup_ops_cfg *cfg)
 {
 	u32 *loc = NULL;
@@ -363,6 +397,27 @@ static int validate_write_multi_lut_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
 	return rc;
 }
 
+static int validate_blk_lut_write(struct sde_reg_dma_setup_ops_cfg *cfg)
+{
+	int rc;
+
+	rc = validate_write_reg(cfg);
+	if (rc)
+		return rc;
+
+	if (cfg->table_sel >= LUTBUS_TABLE_SELECT_MAX ||
+			cfg->block_sel >= LUTBUS_BLOCK_MAX ||
+			(cfg->trans_size != LUTBUS_IGC_TRANS_SIZE &&
+			cfg->trans_size != LUTBUS_GAMUT_TRANS_SIZE)) {
+		DRM_ERROR("invalid table_sel %d block_sel %d trans_size %d\n",
+				cfg->table_sel, cfg->block_sel,
+				cfg->trans_size);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
 static int validate_write_reg(struct sde_reg_dma_setup_ops_cfg *cfg)
 {
 	u32 remain_len, write_len;
@@ -536,6 +591,14 @@ static int validate_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
 				(size_t)MAX_DWORDS_SZ);
 		return -EINVAL;
 	}
+
+	if (cfg->dma_type == REG_DMA_TYPE_SB &&
+			(cfg->queue_select != DMA_CTL_QUEUE1 ||
+			cfg->op == REG_DMA_READ)) {
+		DRM_ERROR("invalid queue selected %d or op %d for SB LUTDMA\n",
+				cfg->queue_select, cfg->op);
+		return -EINVAL;
+	}
 	return 0;
 }
 
@@ -553,7 +616,16 @@ static int write_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
 	cmd1 |= (cfg->op == REG_DMA_WRITE) ? (BIT(22)) : 0;
 	cmd1 |= (SIZE_DWORD(cfg->dma_buf->index) & MAX_DWORDS_SZ);
 
-	SET_UP_REG_DMA_REG(hw, reg_dma);
+	if (cfg->dma_type == REG_DMA_TYPE_DB)
+		SET_UP_REG_DMA_REG(hw, reg_dma, REG_DMA_TYPE_DB);
+	else if (cfg->dma_type == REG_DMA_TYPE_SB)
+		SET_UP_REG_DMA_REG(hw, reg_dma, REG_DMA_TYPE_SB);
+
+	if (hw.hwversion == 0) {
+		DRM_ERROR("invalid dma type %d\n", cfg->dma_type);
+		return -EINVAL;
+	}
+
 	SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
 	val = SDE_REG_READ(&hw, reg_dma_intr_4_status_offset);
 	if (val) {
@@ -564,15 +636,28 @@ static int write_kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
 		SDE_EVT32(val);
 	}
 
-	SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx],
-			cfg->dma_buf->iova);
-	SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx] + 0x4,
-			cmd1);
+	if (cfg->dma_type == REG_DMA_TYPE_DB) {
+		SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx],
+				cfg->dma_buf->iova);
+		SDE_REG_WRITE(&hw, reg_dma_ctl_queue_off[cfg->ctl->idx] + 0x4,
+				cmd1);
+	} else if (cfg->dma_type == REG_DMA_TYPE_SB) {
+		SDE_REG_WRITE(&hw, reg_dma_ctl_queue1_off[cfg->ctl->idx],
+				cfg->dma_buf->iova);
+		SDE_REG_WRITE(&hw, reg_dma_ctl_queue1_off[cfg->ctl->idx] + 0x4,
+				cmd1);
+	}
+
 	if (cfg->last_command) {
 		mask = ctl_trigger_done_mask[cfg->ctl->idx][cfg->queue_select];
 		SDE_REG_WRITE(&hw, reg_dma_intr_clear_offset, mask);
-		SDE_REG_WRITE(&cfg->ctl->hw, reg_dma_ctl_trigger_offset,
-			queue_sel[cfg->queue_select]);
+		/* DB LUTDMA use SW trigger while SB LUTDMA uses DSPP_SB
+		 * flush as its trigger event.
+		 */
+		if (cfg->dma_type == REG_DMA_TYPE_DB) {
+			SDE_REG_WRITE(&cfg->ctl->hw, reg_dma_ctl_trigger_offset,
+					queue_sel[cfg->queue_select]);
+		}
 	}
 
 	return 0;
@@ -587,26 +672,45 @@ int init_v1(struct sde_hw_reg_dma *cfg)
 
 	reg_dma = cfg;
 	for (i = CTL_0; i < CTL_MAX; i++) {
-		if (!last_cmd_buf[i]) {
-			last_cmd_buf[i] =
+		if (!last_cmd_buf_db[i]) {
+			last_cmd_buf_db[i] =
 			    alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
-			if (IS_ERR_OR_NULL(last_cmd_buf[i])) {
+			if (IS_ERR_OR_NULL(last_cmd_buf_db[i])) {
 				/*
 				 * This will allow reg dma to fall back to
 				 * AHB domain
 				 */
 				pr_info("Failed to allocate reg dma, ret:%lu\n",
-						PTR_ERR(last_cmd_buf[i]));
+						PTR_ERR(last_cmd_buf_db[i]));
+				return 0;
+			}
+		}
+		if (!last_cmd_buf_sb[i]) {
+			last_cmd_buf_sb[i] =
+			    alloc_reg_dma_buf_v1(REG_DMA_HEADERS_BUFFER_SZ);
+			if (IS_ERR_OR_NULL(last_cmd_buf_sb[i])) {
+				/*
+				 * This will allow reg dma to fall back to
+				 * AHB domain
+				 */
+				pr_info("Failed to allocate reg dma, ret:%lu\n",
+						PTR_ERR(last_cmd_buf_sb[i]));
 				return 0;
 			}
 		}
 	}
 	if (rc) {
 		for (i = 0; i < CTL_MAX; i++) {
-			if (!last_cmd_buf[i])
+			if (!last_cmd_buf_db[i])
 				continue;
-			dealloc_reg_dma_v1(last_cmd_buf[i]);
-			last_cmd_buf[i] = NULL;
+			dealloc_reg_dma_v1(last_cmd_buf_db[i]);
+			last_cmd_buf_db[i] = NULL;
+		}
+		for (i = 0; i < CTL_MAX; i++) {
+			if (!last_cmd_buf_sb[i])
+				continue;
+			dealloc_reg_dma_v1(last_cmd_buf_sb[i]);
+			last_cmd_buf_sb[i] = NULL;
 		}
 		return rc;
 	}
@@ -699,6 +803,33 @@ int init_v12(struct sde_hw_reg_dma *cfg)
 	return 0;
 }
 
+int init_v2(struct sde_hw_reg_dma *cfg)
+{
+	int ret = 0, i = 0;
+
+	ret = init_v12(cfg);
+	if (ret) {
+		DRM_ERROR("failed to initialize v12: ret %d\n", ret);
+		return ret;
+	}
+
+	/* initialize register offsets based on version delta */
+	reg_dma_register_count = 0x91;
+	reg_dma_ctl0_queue1_cmd0_offset = 0x1c;
+	reg_dma_error_clear_mask |= BIT(19);
+
+	reg_dma_ctl_queue1_off[CTL_0] = reg_dma_ctl0_queue1_cmd0_offset;
+	for (i = CTL_1; i < ARRAY_SIZE(reg_dma_ctl_queue_off); i++)
+		reg_dma_ctl_queue1_off[i] = reg_dma_ctl_queue1_off[i - 1] +
+				(sizeof(u32) * 4);
+
+	v1_supported[IGC] = GRP_DSPP_HW_BLK_SELECT | GRP_VIG_HW_BLK_SELECT |
+			GRP_DMA_HW_BLK_SELECT;
+	reg_dma->ops.last_command_sb = last_cmd_sb_v2;
+
+	return 0;
+}
+
 static int check_support_v1(enum sde_reg_dma_features feature,
 		     enum sde_reg_dma_blk blk,
 		     bool *is_supported)
@@ -748,7 +879,7 @@ static int kick_off_v1(struct sde_reg_dma_kickoff_cfg *cfg)
 int reset_v1(struct sde_hw_ctl *ctl)
 {
 	struct sde_hw_blk_reg_map hw;
-	u32 index, val, i = 0;
+	u32 index, val, i = 0, k = 0;
 
 	if (!ctl || ctl->idx > CTL_MAX) {
 		DRM_ERROR("invalid ctl %pK ctl idx %d\n",
@@ -756,20 +887,26 @@ int reset_v1(struct sde_hw_ctl *ctl)
 		return -EINVAL;
 	}
 
-	memset(&hw, 0, sizeof(hw));
 	index = ctl->idx - CTL_0;
-	SET_UP_REG_DMA_REG(hw, reg_dma);
-	SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
-	SDE_REG_WRITE(&hw, (reg_dma_ctl0_reset_offset + index * sizeof(u32)),
-			BIT(0));
-
-	i = 0;
-	do {
-		udelay(1000);
-		i++;
-		val = SDE_REG_READ(&hw,
-			(reg_dma_ctl0_reset_offset + index * sizeof(u32)));
-	} while (i < 2 && val);
+	for (k = 0; k < REG_DMA_TYPE_MAX; k++) {
+		memset(&hw, 0, sizeof(hw));
+		SET_UP_REG_DMA_REG(hw, reg_dma, k);
+		if (hw.hwversion == 0)
+			continue;
+
+		SDE_REG_WRITE(&hw, reg_dma_opmode_offset, BIT(0));
+		SDE_REG_WRITE(&hw, (reg_dma_ctl0_reset_offset +
+				index * sizeof(u32)), BIT(0));
+
+		i = 0;
+		do {
+			udelay(1000);
+			i++;
+			val = SDE_REG_READ(&hw,
+					(reg_dma_ctl0_reset_offset +
+					index * sizeof(u32)));
+		} while (i < 2 && val);
+	}
 
 	return 0;
 }
@@ -973,13 +1110,13 @@ static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
 		return -EINVAL;
 	}
 
-	if (!last_cmd_buf[ctl->idx] || !last_cmd_buf[ctl->idx]->iova) {
-		DRM_DEBUG("invalid last cmd buf for idx %d\n", ctl->idx);
-		return 0;
+	if (!last_cmd_buf_db[ctl->idx] || !last_cmd_buf_db[ctl->idx]->iova) {
+		DRM_ERROR("invalid last cmd buf for idx %d\n", ctl->idx);
+		return -EINVAL;
 	}
 
-	cfg.dma_buf = last_cmd_buf[ctl->idx];
-	reset_reg_dma_buffer_v1(last_cmd_buf[ctl->idx]);
+	cfg.dma_buf = last_cmd_buf_db[ctl->idx];
+	reset_reg_dma_buffer_v1(last_cmd_buf_db[ctl->idx]);
 	if (validate_last_cmd(&cfg)) {
 		DRM_ERROR("validate buf failed\n");
 		return -EINVAL;
@@ -995,14 +1132,17 @@ static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
 	kick_off.trigger_mode = WRITE_IMMEDIATE;
 	kick_off.last_command = 1;
 	kick_off.op = REG_DMA_WRITE;
-	kick_off.dma_buf = last_cmd_buf[ctl->idx];
-	if (kick_off_v1(&kick_off)) {
+	kick_off.dma_type = REG_DMA_TYPE_DB;
+	kick_off.dma_buf = last_cmd_buf_db[ctl->idx];
+	rc = kick_off_v1(&kick_off);
+	if (rc) {
 		DRM_ERROR("kick off last cmd failed\n");
-		return -EINVAL;
+		return rc;
 	}
 
+	//Lack of block support will be caught by kick_off
 	memset(&hw, 0, sizeof(hw));
-	SET_UP_REG_DMA_REG(hw, reg_dma);
+	SET_UP_REG_DMA_REG(hw, reg_dma, kick_off.dma_type);
 
 	SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY, mode);
 	if (mode == REG_DMA_WAIT4_COMP) {
@@ -1016,7 +1156,7 @@ static int last_cmd_v1(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
 		SDE_EVT32(SDE_EVTLOG_FUNC_EXIT, mode);
 	}
 
-	return 0;
+	return rc;
 }
 
 void deinit_v1(void)
@@ -1024,23 +1164,77 @@ void deinit_v1(void)
 	int i = 0;
 
 	for (i = CTL_0; i < CTL_MAX; i++) {
-		if (last_cmd_buf[i])
-			dealloc_reg_dma_v1(last_cmd_buf[i]);
-		last_cmd_buf[i] = NULL;
+		if (last_cmd_buf_db[i])
+			dealloc_reg_dma_v1(last_cmd_buf_db[i]);
+		last_cmd_buf_db[i] = NULL;
+		if (last_cmd_buf_sb[i])
+			dealloc_reg_dma_v1(last_cmd_buf_sb[i]);
+		last_cmd_buf_sb[i] = NULL;
 	}
 }
 
 static void dump_regs_v1(void)
 {
-	uint32_t i = 0;
+	uint32_t i = 0, k = 0;
 	u32 val;
 	struct sde_hw_blk_reg_map hw;
 
-	memset(&hw, 0, sizeof(hw));
-	SET_UP_REG_DMA_REG(hw, reg_dma);
+	for (k = 0; k < REG_DMA_TYPE_MAX; k++) {
+		memset(&hw, 0, sizeof(hw));
+		SET_UP_REG_DMA_REG(hw, reg_dma, k);
+		if (hw.hwversion == 0)
+			continue;
 
-	for (i = 0; i < reg_dma_register_count; i++) {
-		val = SDE_REG_READ(&hw, i * sizeof(u32));
-		DRM_ERROR("offset %x val %x\n", (u32)(i * sizeof(u32)), val);
+		for (i = 0; i < reg_dma_register_count; i++) {
+			val = SDE_REG_READ(&hw, i * sizeof(u32));
+			DRM_ERROR("offset %x val %x\n", (u32)(i * sizeof(u32)),
+					val);
+		}
 	}
+
+}
+
+static int last_cmd_sb_v2(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
+		enum sde_reg_dma_last_cmd_mode mode)
+{
+	struct sde_reg_dma_setup_ops_cfg cfg;
+	struct sde_reg_dma_kickoff_cfg kick_off;
+	int rc = 0;
+
+	if (!ctl || ctl->idx >= CTL_MAX || q >= DMA_CTL_QUEUE_MAX) {
+		DRM_ERROR("ctl %pK q %d index %d\n", ctl, q,
+				((ctl) ? ctl->idx : -1));
+		return -EINVAL;
+	}
+
+	if (!last_cmd_buf_sb[ctl->idx] || !last_cmd_buf_sb[ctl->idx]->iova) {
+		DRM_ERROR("invalid last cmd buf for idx %d\n", ctl->idx);
+		return -EINVAL;
+	}
+
+	cfg.dma_buf = last_cmd_buf_sb[ctl->idx];
+	reset_reg_dma_buffer_v1(last_cmd_buf_sb[ctl->idx]);
+	if (validate_last_cmd(&cfg)) {
+		DRM_ERROR("validate buf failed\n");
+		return -EINVAL;
+	}
+
+	if (write_last_cmd(&cfg)) {
+		DRM_ERROR("write buf failed\n");
+		return -EINVAL;
+	}
+
+	kick_off.ctl = ctl;
+	kick_off.queue_select = q;
+	kick_off.trigger_mode = WRITE_IMMEDIATE;
+	kick_off.last_command = 1;
+	kick_off.op = REG_DMA_WRITE;
+	kick_off.dma_type = REG_DMA_TYPE_SB;
+	kick_off.queue_select = DMA_CTL_QUEUE1;
+	kick_off.dma_buf = last_cmd_buf_sb[ctl->idx];
+	rc = kick_off_v1(&kick_off);
+	if (rc)
+		DRM_ERROR("kick off last cmd failed\n");
+
+	return rc;
 }

+ 7 - 1
msm/sde/sde_hw_reg_dma_v1.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  */
 #ifndef _SDE_HW_REG_DMA_V1_H
 #define _SDE_HW_REG_DMA_V1_H
@@ -25,6 +25,12 @@ int init_v11(struct sde_hw_reg_dma *reg_dma);
  */
 int init_v12(struct sde_hw_reg_dma *reg_dma);
 
+/**
+ * init_v2() - initialize the reg dma v2 driver by installing v2 ops
+ * @reg_dma - reg_dma hw info structure exposing capabilities.
+ */
+int init_v2(struct sde_hw_reg_dma *reg_dma);
+
 /**
  * deinit_v1() - free up any resources allocated during the v1 reg dma init
  */

+ 2 - 1
msm/sde/sde_hw_reg_dma_v1_color_proc.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  */
 #include <drm/msm_drm_pp.h>
 #include "sde_reg_dma.h"
@@ -224,6 +224,7 @@ static u32 ltm_mapping[LTM_MAX] = {
 		(cfg).ctl = hw_ctl; \
 		(cfg).dma_buf = feature_dma_buf; \
 		(cfg).op = ops; \
+		(cfg).dma_type = REG_DMA_TYPE_DB; \
 		(cfg).queue_select = ctl_q; \
 		(cfg).trigger_mode = mode; \
 	} while (0)

+ 33 - 21
msm/sde/sde_reg_dma.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
@@ -11,6 +11,7 @@
 #define REG_DMA_VER_1_0 0x00010000
 #define REG_DMA_VER_1_1 0x00010001
 #define REG_DMA_VER_1_2 0x00010002
+#define REG_DMA_VER_2_0 0x00020000
 
 static int default_check_support(enum sde_reg_dma_features feature,
 		     enum sde_reg_dma_blk blk,
@@ -67,21 +68,34 @@ static int default_last_command(struct sde_hw_ctl *ctl,
 	return 0;
 }
 
+static int default_last_command_sb(struct sde_hw_ctl *ctl,
+		enum sde_reg_dma_queue q, enum sde_reg_dma_last_cmd_mode mode)
+{
+	return 0;
+}
+
 static void default_dump_reg(void)
 {
 }
 
-static struct sde_hw_reg_dma reg_dma = {
-	.ops = {default_check_support, default_setup_payload,
+static void set_default_dma_ops(struct sde_hw_reg_dma *reg_dma)
+{
+	const static struct sde_hw_reg_dma_ops ops = {
+		default_check_support, default_setup_payload,
 		default_kick_off, default_reset, default_alloc_reg_dma_buf,
 		default_dealloc_reg_dma, default_buf_reset_reg_dma,
-		default_last_command, default_dump_reg},
-};
+		default_last_command, default_last_command_sb,
+		default_dump_reg};
+	memcpy(&reg_dma->ops, &ops, sizeof(ops));
+}
+
+static struct sde_hw_reg_dma reg_dma;
 
 int sde_reg_dma_init(void __iomem *addr, struct sde_mdss_cfg *m,
 		struct drm_device *dev)
 {
 	int rc = 0;
+	set_default_dma_ops(&reg_dma);
 
 	if (!addr || !m || !dev) {
 		DRM_DEBUG("invalid addr %pK catalog %pK dev %pK\n", addr, m,
@@ -89,13 +103,14 @@ int sde_reg_dma_init(void __iomem *addr, struct sde_mdss_cfg *m,
 		return 0;
 	}
 
-	reg_dma.drm_dev = dev;
-	reg_dma.caps = &m->dma_cfg;
-	reg_dma.addr = addr;
-
 	if (!m->reg_dma_count)
 		return 0;
 
+	reg_dma.reg_dma_count = m->reg_dma_count;
+	reg_dma.drm_dev = dev;
+	reg_dma.addr = addr;
+	reg_dma.caps = &m->dma_cfg;
+
 	switch (reg_dma.caps->version) {
 	case REG_DMA_VER_1_0:
 		rc = init_v1(&reg_dma);
@@ -110,13 +125,18 @@ int sde_reg_dma_init(void __iomem *addr, struct sde_mdss_cfg *m,
 	case REG_DMA_VER_1_2:
 		rc = init_v12(&reg_dma);
 		if (rc)
-			DRM_DEBUG("init v11 dma ops failed\n");
+			DRM_DEBUG("init v12 dma ops failed\n");
+		break;
+	case REG_DMA_VER_2_0:
+		rc = init_v2(&reg_dma);
+		if (rc)
+			DRM_DEBUG("init v2 dma ops failed\n");
 		break;
 	default:
 		break;
 	}
 
-	return 0;
+	return rc;
 }
 
 struct sde_hw_reg_dma_ops *sde_reg_dma_get_ops(void)
@@ -126,27 +146,19 @@ struct sde_hw_reg_dma_ops *sde_reg_dma_get_ops(void)
 
 void sde_reg_dma_deinit(void)
 {
-	struct sde_hw_reg_dma op = {
-	.ops = {default_check_support, default_setup_payload,
-		default_kick_off, default_reset, default_alloc_reg_dma_buf,
-		default_dealloc_reg_dma, default_buf_reset_reg_dma,
-		default_last_command, default_dump_reg},
-	};
-
 	if (!reg_dma.drm_dev || !reg_dma.caps)
 		return;
 
 	switch (reg_dma.caps->version) {
 	case REG_DMA_VER_1_0:
-		deinit_v1();
-		break;
 	case REG_DMA_VER_1_1:
 	case REG_DMA_VER_1_2:
+	case REG_DMA_VER_2_0:
 		deinit_v1();
 		break;
 	default:
 		break;
 	}
 	memset(&reg_dma, 0, sizeof(reg_dma));
-	memcpy(&reg_dma.ops, &op.ops, sizeof(op.ops));
+	set_default_dma_ops(&reg_dma);
 }

+ 39 - 3
msm/sde/sde_reg_dma.h

@@ -90,6 +90,22 @@ enum sde_reg_dma_queue {
 	DMA_CTL_QUEUE_MAX,
 };
 
+#define LUTBUS_TABLE_SELECT_MAX 2
+#define LUTBUS_IGC_TRANS_SIZE 3
+#define LUTBUS_GAMUT_TRANS_SIZE 6
+
+/**
+ * enum sde_reg_dma_lutbus_block - block select values for lutbus op
+ * @LUTBUS_BLOCK_IGC: select IGC block
+ * @LUTBUS_BLOCK_GAMUT: select GAMUT block
+ * @LUTBUS_BLOCK_MAX: invalid selection
+ */
+enum sde_reg_dma_lutbus_block {
+	LUTBUS_BLOCK_IGC = 0,
+	LUTBUS_BLOCK_GAMUT,
+	LUTBUS_BLOCK_MAX,
+};
+
 /**
  * enum sde_reg_dma_trigger_mode - defines reg dma ops trigger mode
  * @WRITE_IMMEDIATE: trigger write op immediately
@@ -117,8 +133,11 @@ enum sde_reg_dma_trigger_mode {
  *                        register
  * @REG_BLK_WRITE_MULTIPLE: op for writing hw index based registers at
  *                         non-consecutive location
- * @REG_SINGLE_MODIFY: op for modifying single register value
- *                    with bitmask at the address provided
+ * @REG_SINGLE_MODIFY: op for modifying single register value with bitmask at
+ *                    the address provided(Reg = (Reg & Mask) | Data),
+ *                    broadcast feature is not supported with this opcode.
+ * @REG_BLK_LUT_WRITE: op for specific faster LUT writes, currently only
+ *                     supports DSPP/SSPP Gamut and DSPP IGC.
  * @REG_DMA_SETUP_OPS_MAX: invalid operation
  */
 enum sde_reg_dma_setup_ops {
@@ -128,6 +147,7 @@ enum sde_reg_dma_setup_ops {
 	REG_BLK_WRITE_INC,
 	REG_BLK_WRITE_MULTIPLE,
 	REG_SINGLE_MODIFY,
+	REG_BLK_LUT_WRITE,
 	REG_DMA_SETUP_OPS_MAX,
 };
 
@@ -232,7 +252,12 @@ struct sde_reg_dma_buffer {
  *                                performed
  * @data: pointer to payload which has to be written into reg dma buffer for
  *        selected op.
+ * @mask: mask value for REG_SINGLE_MODIFY op
  * @data_size: size of payload in data
+ * @table_sel: table select value for REG_BLK_LUT_WRITE opcode
+ * @block_sel: block select value for REG_BLK_LUT_WRITE opcode
+ * @trans_size: transfer size for REG_BLK_LUT_WRITE opcode
+ * @lut_size: lut size in terms of transfer size
  */
 struct sde_reg_dma_setup_ops_cfg {
 	enum sde_reg_dma_setup_ops ops;
@@ -245,6 +270,10 @@ struct sde_reg_dma_setup_ops_cfg {
 	u32 *data;
 	u32 mask;
 	u32 data_size;
+	u32 table_sel;
+	u32 block_sel;
+	u32 trans_size;
+	u32 lut_size;
 };
 
 /**
@@ -254,6 +283,7 @@ struct sde_reg_dma_setup_ops_cfg {
  * @block_select: histogram read select
  * @trigger_mode: reg dma ops trigger mode
  * @queue_select: queue on which reg dma buffer will be submitted
+ * @dma_type: DB or SB LUT DMA block selection
  * @last_command: last command for this vsync
  */
 struct sde_reg_dma_kickoff_cfg {
@@ -263,6 +293,7 @@ struct sde_reg_dma_kickoff_cfg {
 	enum sde_reg_dma_read_sel block_select;
 	enum sde_reg_dma_trigger_mode trigger_mode;
 	enum sde_reg_dma_queue queue_select;
+	enum sde_reg_dma_type dma_type;
 	u32 last_command;
 };
 
@@ -280,6 +311,7 @@ struct sde_reg_dma_kickoff_cfg {
  * @dealloc_reg_dma: de-allocate reg dma buffer
  * @reset_reg_dma_buf: reset the buffer to init state
  * @last_command: notify control that last command is queued
+ * @last_command_sb: notify control that last command for SB LUTDMA is queued
  * @dump_regs: dump reg dma registers
  */
 struct sde_hw_reg_dma_ops {
@@ -294,18 +326,22 @@ struct sde_hw_reg_dma_ops {
 	int (*reset_reg_dma_buf)(struct sde_reg_dma_buffer *buf);
 	int (*last_command)(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
 			enum sde_reg_dma_last_cmd_mode mode);
+	int (*last_command_sb)(struct sde_hw_ctl *ctl, enum sde_reg_dma_queue q,
+			enum sde_reg_dma_last_cmd_mode mode);
 	void (*dump_regs)(void);
 };
 
 /**
  * struct sde_hw_reg_dma - structure to hold reg dma hw info
  * @drm_dev: drm driver dev handle
- * @caps: reg dma hw caps on the platform
+ * @reg_dma_count: number of LUTDMA hw instances
+ * @caps: LUTDMA hw caps on the platform
  * @ops: reg dma ops supported on the platform
  * @addr: reg dma hw block base address
  */
 struct sde_hw_reg_dma {
 	struct drm_device *drm_dev;
+	u32 reg_dma_count;
 	const struct sde_reg_dma_cfg *caps;
 	struct sde_hw_reg_dma_ops ops;
 	void __iomem *addr;