Browse Source

Merge "disp: msm: add support for no blend planes"

qctecmdr 5 years ago
parent
commit
76d89c1e24

+ 56 - 0
include/uapi/display/drm/msm_drm_pp.h

@@ -593,6 +593,62 @@ struct drm_msm_spr_init_cfg {
 	int cfg17[SPR_INIT_PARAM_SIZE_4];
 };
 
+#define FEATURE_DEM
+#define CFG0_PARAM_LEN 8
+#define CFG1_PARAM_LEN 8
+#define CFG1_PARAM0_LEN 153
+#define CFG0_PARAM2_LEN 256
+#define CFG5_PARAM01_LEN 4
+#define CFG3_PARAM01_LEN 4
+
+struct drm_msm_dem_cfg {
+	__u64 flags;
+	__u32 pentile;
+	__u32 cfg0_en;
+	__u32 cfg0_param0_len;
+	__u32 cfg0_param0[CFG0_PARAM_LEN];
+	__u32 cfg0_param1_len;
+	__u32 cfg0_param1[CFG0_PARAM_LEN];
+	__u32 cfg0_param2_len;
+	__u64 cfg0_param2_c0[CFG0_PARAM2_LEN];
+	__u64 cfg0_param2_c1[CFG0_PARAM2_LEN];
+	__u64 cfg0_param2_c2[CFG0_PARAM2_LEN];
+	__u32 cfg0_param3_len;
+	__u32 cfg0_param3_c0[CFG0_PARAM_LEN];
+	__u32 cfg0_param3_c1[CFG0_PARAM_LEN];
+	__u32 cfg0_param3_c2[CFG0_PARAM_LEN];
+	__u32 cfg0_param4_len;
+	__u32 cfg0_param4[CFG0_PARAM_LEN];
+
+	__u32 cfg1_en;
+	__u32 cfg1_high_idx;
+	__u32 cfg1_low_idx;
+	__u32 cfg01_param0_len;
+	__u32 cfg01_param0[CFG1_PARAM_LEN];
+	__u32 cfg1_param0_len;
+	__u32 cfg1_param0_c0[CFG1_PARAM0_LEN];
+	__u32 cfg1_param0_c1[CFG1_PARAM0_LEN];
+	__u32 cfg1_param0_c2[CFG1_PARAM0_LEN];
+
+	__u32 cfg2_en;
+	__u32 cfg3_en;
+	__u32 cfg3_param0_len;
+	__u32 cfg3_param0_a[CFG3_PARAM01_LEN];
+	__u32 cfg3_param0_b[CFG3_PARAM01_LEN];
+	__u32 cfg3_ab_adj;
+	__u32 cfg4_en;
+	__u32 cfg5_en;
+	__u32 cfg5_param0_len;
+	__u32 cfg5_param0[CFG5_PARAM01_LEN];
+	__u32 cfg5_param1_len;
+	__u32 cfg5_param1[CFG5_PARAM01_LEN];
+
+	__u32 c0_depth;
+	__u32 c1_depth;
+	__u32 c2_depth;
+	__u32 src_id;
+};
+
 /**
  * struct drm_msm_ad4_manual_str_cfg - ad4 manual strength config set
  * by user-space client.

+ 3 - 1
include/uapi/display/drm/sde_drm.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
 /*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _SDE_DRM_H_
@@ -59,12 +59,14 @@ extern "C" {
  *                                  less than 0xff, apply modulation as well.
  * @SDE_DRM_BLEND_OP_MAX:           Used to track maximum blend operation
  *                                  possible by mdp.
+ * @SDE_DRM_BLEND_OP_SKIP:          Skip staging the layer in the layer mixer.
  */
 #define SDE_DRM_BLEND_OP_NOT_DEFINED    0
 #define SDE_DRM_BLEND_OP_OPAQUE         1
 #define SDE_DRM_BLEND_OP_PREMULTIPLIED  2
 #define SDE_DRM_BLEND_OP_COVERAGE       3
 #define SDE_DRM_BLEND_OP_MAX            4
+#define SDE_DRM_BLEND_OP_SKIP           5
 
 /**
  * Bit masks for "src_config" property

+ 3 - 1
msm/dsi/dsi_ctrl.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _DSI_CTRL_H_
@@ -195,6 +195,7 @@ struct dsi_ctrl_interrupts {
  * @clk_cb:		 Callback for DSI clock control.
  * @irq_info:            Interrupt information.
  * @recovery_cb:         Recovery call back to SDE.
+ * @panel_id_cb:         Callback for reporting panel id.
  * @clk_info:            Clock information.
  * @clk_freq:            DSi Link clock frequency information.
  * @pwr_info:            Power information.
@@ -247,6 +248,7 @@ struct dsi_ctrl {
 
 	struct dsi_ctrl_interrupts irq_info;
 	struct dsi_event_cb_info recovery_cb;
+	struct dsi_event_cb_info panel_id_cb;
 
 	/* Clock and power states */
 	struct dsi_ctrl_clk_info clk_info;

+ 35 - 1
msm/dsi/dsi_display.c

@@ -2044,6 +2044,12 @@ void dsi_display_enable_event(struct drm_connector *connector,
 							*event_info;
 		}
 		break;
+	case SDE_CONN_EVENT_PANEL_ID:
+		if (event_info)
+			display_for_each_ctrl(i, display)
+				display->ctrl[i].ctrl->panel_id_cb
+				    = *event_info;
+		break;
 	default:
 		/* nothing to do */
 		DSI_DEBUG("[%s] unhandled event %d\n", display->name, event_idx);
@@ -2151,6 +2157,7 @@ static void dsi_display_parse_cmdline_topology(struct dsi_display *display,
 	char *sw_te = NULL;
 	unsigned long cmdline_topology = NO_OVERRIDE;
 	unsigned long cmdline_timing = NO_OVERRIDE;
+	unsigned long panel_id = NO_OVERRIDE;
 
 	if (display_type >= MAX_DSI_ACTIVE_DISPLAY) {
 		DSI_ERR("display_type=%d not supported\n", display_type);
@@ -2166,6 +2173,17 @@ static void dsi_display_parse_cmdline_topology(struct dsi_display *display,
 	if (sw_te)
 		display->sw_te_using_wd = true;
 
+	str = strnstr(boot_str, ":panelid", strlen(boot_str));
+	if (str) {
+		if (kstrtol(str + strlen(":panelid"), INT_BASE_10,
+				(unsigned long *)&panel_id)) {
+			DSI_INFO("panel id not found: %s\n", boot_str);
+		} else {
+			DSI_INFO("panel id found: %lx\n", panel_id);
+			display->panel_id = panel_id;
+		}
+	}
+
 	str = strnstr(boot_str, ":config", strlen(boot_str));
 	if (!str)
 		goto end;
@@ -5193,6 +5211,8 @@ int dsi_display_dev_probe(struct platform_device *pdev)
 		rc =  -EINVAL;
 		goto end;
 	}
+	/* initialize panel id to UINT64_MAX */
+	display->panel_id = ~0x0;
 
 	display->display_type = of_get_property(pdev->dev.of_node,
 				"label", NULL);
@@ -7314,6 +7334,18 @@ int dsi_display_pre_commit(void *display,
 	return rc;
 }
 
+static void dsi_display_panel_id_notification(struct dsi_display *display)
+{
+	if (display->panel_id != ~0x0 &&
+		display->ctrl[0].ctrl->panel_id_cb.event_cb) {
+		display->ctrl[0].ctrl->panel_id_cb.event_cb(
+			display->ctrl[0].ctrl->panel_id_cb.event_usr_ptr,
+			display->ctrl[0].ctrl->panel_id_cb.event_idx,
+			0, ((display->panel_id & 0xffffffff00000000) >> 31),
+			(display->panel_id & 0xffffffff), 0, 0);
+	}
+}
+
 int dsi_display_enable(struct dsi_display *display)
 {
 	int rc = 0;
@@ -7346,6 +7378,8 @@ int dsi_display_enable(struct dsi_display *display)
 
 		display->panel->panel_initialized = true;
 		DSI_DEBUG("cont splash enabled, display enable not required\n");
+		dsi_display_panel_id_notification(display);
+
 		return 0;
 	}
 
@@ -7369,7 +7403,7 @@ int dsi_display_enable(struct dsi_display *display)
 			goto error;
 		}
 	}
-
+	dsi_display_panel_id_notification(display);
 	/* Block sending pps command if modeset is due to fps difference */
 	if ((mode->priv_info->dsc_enabled ||
 			mode->priv_info->vdc_enabled) &&

+ 3 - 0
msm/dsi/dsi_display.h

@@ -275,6 +275,9 @@ struct dsi_display {
 	u32 clk_gating_config;
 	bool queue_cmd_waits;
 	struct workqueue_struct *dma_cmd_workq;
+
+	/* panel id of the display */
+	u64 panel_id;
 };
 
 int dsi_display_dev_probe(struct platform_device *pdev);

+ 1 - 0
msm/msm_drv.h

@@ -184,6 +184,7 @@ enum msm_mdp_conn_property {
 	CONNECTOR_PROP_EXT_HDR_INFO,
 	CONNECTOR_PROP_PP_DITHER,
 	CONNECTOR_PROP_HDR_METADATA,
+	CONNECTOR_PROP_DEMURA_PANEL_ID,
 
 	/* # of blob properties */
 	CONNECTOR_PROP_BLOBCOUNT,

+ 47 - 1
msm/sde/sde_color_processing.c

@@ -72,13 +72,14 @@ static void dspp_hist_install_property(struct drm_crtc *crtc);
 
 static void dspp_dither_install_property(struct drm_crtc *crtc);
 
+static void dspp_demura_install_property(struct drm_crtc *crtc);
+
 typedef void (*dspp_prop_install_func_t)(struct drm_crtc *crtc);
 
 static dspp_prop_install_func_t dspp_prop_install_func[SDE_DSPP_MAX];
 
 static void sde_cp_update_list(struct sde_cp_node *prop_node,
 		struct sde_crtc *crtc, bool dirty_list);
-
 static int sde_cp_ad_validate_prop(struct sde_cp_node *prop_node,
 		struct sde_crtc *crtc);
 
@@ -118,6 +119,7 @@ do { \
 	func[SDE_DSPP_HIST] = dspp_hist_install_property; \
 	func[SDE_DSPP_DITHER] = dspp_dither_install_property; \
 	func[SDE_DSPP_RC] = dspp_rc_install_property; \
+	func[SDE_DSPP_DEMURA] = dspp_demura_install_property; \
 } while (0)
 
 typedef void (*lm_prop_install_func_t)(struct drm_crtc *crtc);
@@ -168,6 +170,8 @@ enum sde_cp_crtc_features {
 	SDE_CP_CRTC_DSPP_SB,
 	SDE_CP_CRTC_DSPP_RC_MASK,
 	SDE_CP_CRTC_DSPP_SPR_INIT,
+	SDE_CP_CRTC_DSPP_DEMURA_INIT,
+	SDE_CP_CRTC_DSPP_DEMURA_BACKLIGHT,
 	SDE_CP_CRTC_DSPP_MAX,
 	/* DSPP features end */
 
@@ -826,6 +830,20 @@ static int set_spr_init_feature(struct sde_hw_dspp *hw_dspp,
 	return ret;
 }
 
+static int set_demura_feature(struct sde_hw_dspp *hw_dspp,
+				   struct sde_hw_cp_cfg *hw_cfg,
+				   struct sde_crtc *hw_crtc)
+{
+	int ret = 0;
+
+	if (!hw_dspp || !hw_dspp->ops.setup_demura_cfg)
+		ret = -EINVAL;
+	else
+		hw_dspp->ops.setup_demura_cfg(hw_dspp, hw_cfg);
+
+	return ret;
+}
+
 feature_wrapper check_crtc_feature_wrappers[SDE_CP_CRTC_MAX_FEATURES];
 #define setup_check_crtc_feature_wrappers(wrappers) \
 do { \
@@ -876,6 +894,7 @@ do { \
 	wrappers[SDE_CP_CRTC_DSPP_LTM_HIST_CTL] = set_ltm_hist_crtl_feature; \
 	wrappers[SDE_CP_CRTC_DSPP_RC_MASK] = set_rc_mask_feature; \
 	wrappers[SDE_CP_CRTC_DSPP_SPR_INIT] = set_spr_init_feature; \
+	wrappers[SDE_CP_CRTC_DSPP_DEMURA_INIT] = set_demura_feature; \
 } while (0)
 
 feature_wrapper set_crtc_pu_feature_wrappers[SDE_CP_CRTC_MAX_PU_FEATURES];
@@ -1538,6 +1557,8 @@ static const int dspp_feature_to_sub_blk_tbl[SDE_CP_CRTC_MAX_FEATURES] = {
 	[SDE_CP_CRTC_DSPP_SB] = SDE_DSPP_SB,
 	[SDE_CP_CRTC_DSPP_SPR_INIT] = SDE_DSPP_SPR,
 	[SDE_CP_CRTC_DSPP_RC_MASK] = SDE_DSPP_RC,
+	[SDE_CP_CRTC_DSPP_DEMURA_INIT] = SDE_DSPP_DEMURA,
+	[SDE_CP_CRTC_DSPP_DEMURA_BACKLIGHT] = SDE_DSPP_DEMURA,
 	[SDE_CP_CRTC_DSPP_MAX] = SDE_DSPP_MAX,
 	[SDE_CP_CRTC_LM_GC] = SDE_DSPP_MAX,
 };
@@ -2759,6 +2780,31 @@ static void dspp_dither_install_property(struct drm_crtc *crtc)
 	}
 }
 
+static  void dspp_demura_install_property(struct drm_crtc *crtc)
+{
+	struct sde_kms *kms = NULL;
+	struct sde_mdss_cfg *catalog = NULL;
+	u32 version;
+
+	kms = get_kms(crtc);
+	catalog = kms->catalog;
+
+	version = catalog->dspp[0].sblk->demura.version >> 16;
+	switch (version) {
+	case 1:
+		sde_cp_crtc_install_blob_property(crtc, "DEMURA_INIT_V1",
+			SDE_CP_CRTC_DSPP_DEMURA_INIT,
+			sizeof(struct drm_msm_dem_cfg));
+		sde_cp_crtc_install_range_property(crtc, "DEMURA_BACKLIGHT",
+				SDE_CP_CRTC_DSPP_DEMURA_BACKLIGHT,
+				0, 1024, 0);
+		break;
+	default:
+		DRM_ERROR("version %d not supported\n", version);
+		break;
+	}
+}
+
 static void sde_cp_update_list(struct sde_cp_node *prop_node,
 		struct sde_crtc *crtc, bool dirty_list)
 {

+ 52 - 0
msm/sde/sde_connector.c

@@ -378,6 +378,37 @@ int sde_connector_state_get_mode_info(struct drm_connector_state *conn_state,
 	return 0;
 }
 
+static int sde_connector_handle_panel_id(uint32_t event_idx,
+			uint32_t instance_idx, void *usr,
+			uint32_t data0, uint32_t data1,
+			uint32_t data2, uint32_t data3)
+{
+	struct sde_connector *c_conn = usr;
+	int i;
+	u64 panel_id;
+	u8 arr[8], shift;
+	u64 mask = 0xff;
+
+	if (!c_conn)
+		return -EINVAL;
+
+	panel_id = (((u64)data0) << 31) | data1;
+	if (panel_id == ~0x0)
+		return 0;
+
+	for (i = 0; i < 8; i++) {
+		shift = 8 * i;
+		arr[7 - i] = (u8)((panel_id & (mask << shift)) >> shift);
+	}
+	/* update the panel id */
+	msm_property_set_blob(&c_conn->property_info,
+		  &c_conn->blob_panel_id, arr, sizeof(arr),
+		  CONNECTOR_PROP_DEMURA_PANEL_ID);
+	sde_connector_register_event(&c_conn->base,
+			SDE_CONN_EVENT_PANEL_ID, NULL, c_conn);
+	return 0;
+}
+
 static int sde_connector_handle_disp_recovery(uint32_t event_idx,
 			uint32_t instance_idx, void *usr,
 			uint32_t data0, uint32_t data1,
@@ -2397,6 +2428,7 @@ static int _sde_connector_install_properties(struct drm_device *dev,
 	struct dsi_display *dsi_display;
 	int rc;
 	struct drm_connector *connector;
+	u64 panel_id = ~0x0;
 
 	msm_property_install_blob(&c_conn->property_info, "capabilities",
 			DRM_MODE_PROP_IMMUTABLE, CONNECTOR_PROP_SDE_INFO);
@@ -2484,6 +2516,17 @@ static int _sde_connector_install_properties(struct drm_device *dev,
 				e_frame_trigger_mode,
 				ARRAY_SIZE(e_frame_trigger_mode),
 				CONNECTOR_PROP_CMD_FRAME_TRIGGER_MODE);
+
+		if (sde_kms->catalog->has_demura) {
+			msm_property_install_blob(&c_conn->property_info,
+				"DEMURA_PANEL_ID", DRM_MODE_PROP_IMMUTABLE,
+				CONNECTOR_PROP_DEMURA_PANEL_ID);
+			msm_property_set_blob(&c_conn->property_info,
+			      &c_conn->blob_panel_id,
+			      &panel_id,
+			      sizeof(panel_id),
+			      CONNECTOR_PROP_DEMURA_PANEL_ID);
+		}
 	}
 
 	msm_property_install_range(&c_conn->property_info, "bl_scale",
@@ -2652,6 +2695,15 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
 	if (rc)
 		goto error_cleanup_fence;
 
+	if (connector_type == DRM_MODE_CONNECTOR_DSI &&
+			sde_kms->catalog->has_demura) {
+		rc = sde_connector_register_event(&c_conn->base,
+			SDE_CONN_EVENT_PANEL_ID,
+			sde_connector_handle_panel_id, c_conn);
+		if (rc)
+			SDE_ERROR("register panel id event err %d\n", rc);
+	}
+
 	rc = msm_property_install_get_status(&c_conn->property_info);
 	if (rc) {
 		SDE_ERROR("failed to create one or more properties\n");

+ 4 - 1
msm/sde/sde_connector.h

@@ -358,6 +358,7 @@ enum sde_connector_events {
 	SDE_CONN_EVENT_CMD_DONE, /* command mode frame done */
 	SDE_CONN_EVENT_VID_FIFO_OVERFLOW, /* dsi fifo overflow error */
 	SDE_CONN_EVENT_CMD_FIFO_UNDERFLOW, /* dsi fifo underflow error */
+	SDE_CONN_EVENT_PANEL_ID, /* returns read panel id from ddic  */
 	SDE_CONN_EVENT_COUNT,
 };
 
@@ -406,6 +407,7 @@ struct sde_connector_dyn_hdr_metadata {
  * @blob_ext_hdr: Pointer to blob structure for 'ext_hdr_properties' property
  * @blob_dither: Pointer to blob structure for default dither config
  * @blob_mode_info: Pointer to blob structure for mode info
+ * @blob_panel_id: Pointer to blob structure for blob_panel_id
  * @fb_kmap: true if kernel mapping of framebuffer is requested
  * @event_table: Array of registered events
  * @event_lock: Lock object for event_table
@@ -429,7 +431,7 @@ struct sde_connector_dyn_hdr_metadata {
  * @qsync_mode: Cached Qsync mode, 0=disabled, 1=continuous mode
  * @qsync_updated: Qsync settings were updated
  * @colorspace_updated: Colorspace property was updated
- * last_cmd_tx_sts: status of the last command transfer
+ * @last_cmd_tx_sts: status of the last command transfer
  * @hdr_capable: external hdr support present
  * @core_clk_rate: MDP core clk rate used for dynamic HDR packet calculation
  */
@@ -461,6 +463,7 @@ struct sde_connector {
 	struct drm_property_blob *blob_ext_hdr;
 	struct drm_property_blob *blob_dither;
 	struct drm_property_blob *blob_mode_info;
+	struct drm_property_blob *blob_panel_id;
 
 	bool fb_kmap;
 	struct sde_connector_evt event_table[SDE_CONN_EVENT_COUNT];

+ 76 - 33
msm/sde/sde_crtc.c

@@ -93,6 +93,8 @@ static struct sde_crtc_custom_events custom_events[] = {
 #define MAX_FRAME_COUNT			1000
 #define MILI_TO_MICRO			1000
 
+#define SKIP_STAGING_PIPE_ZPOS		255
+
 static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
 {
 	struct msm_drm_private *priv;
@@ -491,6 +493,11 @@ static void _sde_crtc_setup_blend_cfg(struct sde_crtc_mixer *mixer,
 			}
 		}
 		break;
+
+	case SDE_DRM_BLEND_OP_SKIP:
+		SDE_ERROR("skip the blending for plane\n");
+		return;
+
 	default:
 		/* do nothing */
 		break;
@@ -1326,6 +1333,24 @@ static void _sde_crtc_set_src_split_order(struct drm_crtc *crtc,
 			cur_pstate->sde_pstate->pipe_order_flags);
 	}
 }
+
+static void __sde_crtc_assign_active_cfg(struct sde_crtc *sdecrtc,
+				struct drm_plane *plane)
+{
+	u8 found = 0;
+	int i;
+
+	for (i = 0; i < SDE_STAGE_MAX && !found; i++) {
+		if (sdecrtc->active_cfg.stage[i][0] == SSPP_NONE)
+			found = 1;
+	}
+	if (!found) {
+		SDE_ERROR("All active configs are allocated\n");
+		return;
+	}
+	sdecrtc->active_cfg.stage[i][0] = sde_plane_pipe(plane);
+}
+
 static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
 		struct drm_crtc_state *old_state, struct sde_crtc *sde_crtc,
 		struct sde_crtc_mixer *mixer)
@@ -1345,6 +1370,7 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
 	int zpos_cnt[SDE_STAGE_MAX + 1] = { 0 };
 	int i, cnt = 0;
 	bool bg_alpha_enable = false;
+	u32 blend_type;
 
 	if (!sde_crtc || !crtc->state || !mixer) {
 		SDE_ERROR("invalid sde_crtc or mixer\n");
@@ -1388,38 +1414,50 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
 			goto end;
 		}
 
-		if (pstate->stage == SDE_STAGE_BASE && format->alpha_enable)
-			bg_alpha_enable = true;
-
-		SDE_EVT32(DRMID(crtc), DRMID(plane),
-				state->fb ? state->fb->base.id : -1,
-				state->src_x >> 16, state->src_y >> 16,
-				state->src_w >> 16, state->src_h >> 16,
-				state->crtc_x, state->crtc_y,
-				state->crtc_w, state->crtc_h,
-				pstate->rotation);
-
-		stage_idx = zpos_cnt[pstate->stage]++;
-		stage_cfg->stage[pstate->stage][stage_idx] =
-					sde_plane_pipe(plane);
-		stage_cfg->multirect_index[pstate->stage][stage_idx] =
-					pstate->multirect_index;
-
-		SDE_EVT32(DRMID(crtc), DRMID(plane), stage_idx,
-			sde_plane_pipe(plane) - SSPP_VIG0, pstate->stage,
-			pstate->multirect_index, pstate->multirect_mode,
-			format->base.pixel_format, fb ? fb->modifier : 0);
-
-		/* blend config update */
-		for (lm_idx = 0; lm_idx < sde_crtc->num_mixers; lm_idx++) {
-			_sde_crtc_setup_blend_cfg(mixer + lm_idx, pstate,
-								format);
+		blend_type = sde_plane_get_property(pstate,
+					PLANE_PROP_BLEND_OP);
 
-			if (bg_alpha_enable && !format->alpha_enable)
-				mixer[lm_idx].mixer_op_mode = 0;
-			else
-				mixer[lm_idx].mixer_op_mode |=
-						1 << pstate->stage;
+		if (blend_type == SDE_DRM_BLEND_OP_SKIP) {
+			__sde_crtc_assign_active_cfg(sde_crtc, plane);
+		} else {
+			if (pstate->stage == SDE_STAGE_BASE &&
+					format->alpha_enable)
+				bg_alpha_enable = true;
+
+			SDE_EVT32(DRMID(crtc), DRMID(plane),
+					state->fb ? state->fb->base.id : -1,
+					state->src_x >> 16, state->src_y >> 16,
+					state->src_w >> 16, state->src_h >> 16,
+					state->crtc_x, state->crtc_y,
+					state->crtc_w, state->crtc_h,
+					pstate->rotation);
+
+			stage_idx = zpos_cnt[pstate->stage]++;
+			stage_cfg->stage[pstate->stage][stage_idx] =
+						sde_plane_pipe(plane);
+			stage_cfg->multirect_index[pstate->stage][stage_idx] =
+						pstate->multirect_index;
+
+			SDE_EVT32(DRMID(crtc), DRMID(plane), stage_idx,
+				sde_plane_pipe(plane) - SSPP_VIG0,
+				pstate->stage,
+				pstate->multirect_index,
+				pstate->multirect_mode,
+				format->base.pixel_format,
+				fb ? fb->modifier : 0);
+
+			/* blend config update */
+			for (lm_idx = 0; lm_idx < sde_crtc->num_mixers;
+							lm_idx++) {
+				_sde_crtc_setup_blend_cfg(mixer + lm_idx,
+						pstate, format);
+
+				if (bg_alpha_enable && !format->alpha_enable)
+					mixer[lm_idx].mixer_op_mode = 0;
+				else
+					mixer[lm_idx].mixer_op_mode |=
+							1 << pstate->stage;
+			}
 		}
 
 		if (cnt >= SDE_PSTATES_MAX)
@@ -1427,7 +1465,11 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
 
 		pstates[cnt].sde_pstate = pstate;
 		pstates[cnt].drm_pstate = state;
-		pstates[cnt].stage = sde_plane_get_property(
+
+		if (blend_type == SDE_DRM_BLEND_OP_SKIP)
+			pstates[cnt].stage = SKIP_STAGING_PIPE_ZPOS;
+		else
+			pstates[cnt].stage = sde_plane_get_property(
 				pstates[cnt].sde_pstate, PLANE_PROP_ZPOS);
 		pstates[cnt].pipe_id = sde_plane_pipe(plane);
 
@@ -1562,6 +1604,7 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc,
 
 	/* initialize stage cfg */
 	memset(&sde_crtc->stage_cfg, 0, sizeof(struct sde_hw_stage_cfg));
+	memset(&sde_crtc->active_cfg, 0, sizeof(sde_crtc->active_cfg));
 
 	if (add_planes)
 		_sde_crtc_blend_setup_mixer(crtc, old_state, sde_crtc, mixer);
@@ -1593,7 +1636,7 @@ static void _sde_crtc_blend_setup(struct drm_crtc *crtc,
 			cfg.pending_flush_mask);
 
 		ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
-			&sde_crtc->stage_cfg);
+			&sde_crtc->stage_cfg, &sde_crtc->active_cfg);
 	}
 
 	_sde_crtc_program_lm_output_roi(crtc);

+ 2 - 0
msm/sde/sde_crtc.h

@@ -207,6 +207,7 @@ struct sde_crtc_misr_info {
  * @property_defaults : Array of default values for generic property support
  * @output_fence  : output release fence context
  * @stage_cfg     : H/w mixer stage configuration
+ * @active_cfg    : H/w pipes active that shouldn't be staged
  * @debugfs_root  : Parent of debugfs node
  * @priv_handle   : Pointer to external private handle, if present
  * @vblank_cb_count : count of vblank callback since last reset
@@ -276,6 +277,7 @@ struct sde_crtc {
 	struct sde_fence_context *output_fence;
 
 	struct sde_hw_stage_cfg stage_cfg;
+	struct sde_hw_stage_cfg active_cfg;
 	struct dentry *debugfs_root;
 	void *priv_handle;
 

+ 1 - 1
msm/sde/sde_encoder.c

@@ -4197,7 +4197,7 @@ int sde_encoder_helper_reset_mixers(struct sde_encoder_phys *phys_enc,
 		/* only enable border color on LM */
 		if (phys_enc->hw_ctl->ops.setup_blendstage)
 			phys_enc->hw_ctl->ops.setup_blendstage(
-					phys_enc->hw_ctl, hw_lm->idx, NULL);
+				phys_enc->hw_ctl, hw_lm->idx, NULL, NULL);
 	}
 
 	if (!lm_valid) {

+ 71 - 1
msm/sde/sde_hw_catalog.c

@@ -407,6 +407,13 @@ enum {
 	SPR_PROP_MAX,
 };
 
+enum {
+	DEMURA_OFF,
+	DEMURA_LEN,
+	DEMURA_VERSION,
+	DEMURA_PROP_MAX,
+};
+
 enum {
 	MIXER_OFF,
 	MIXER_LEN,
@@ -872,6 +879,15 @@ static struct sde_prop_type limit_usecase_prop[] = {
 			PROP_TYPE_BIT_OFFSET_ARRAY},
 };
 
+static struct sde_prop_type demura_prop[] = {
+	[DEMURA_OFF] = {DEMURA_OFF, "qcom,sde-dspp-demura-off", false,
+			PROP_TYPE_U32_ARRAY},
+	[DEMURA_LEN] = {DEMURA_LEN, "qcom,sde-dspp-demura-size", false,
+			PROP_TYPE_U32},
+	[DEMURA_VERSION] = {DEMURA_VERSION, "qcom,sde-dspp-demura-version",
+			false, PROP_TYPE_U32},
+};
+
 /*************************************************************
  * static API list
  *************************************************************/
@@ -2529,6 +2545,46 @@ end:
 	return rc;
 }
 
+static int _sde_dspp_demura_parse_dt(struct device_node *np,
+		struct sde_mdss_cfg *sde_cfg)
+{
+	int off_count, i;
+	struct sde_dt_props *props;
+	struct sde_dspp_cfg *dspp;
+	struct sde_dspp_sub_blks *sblk;
+
+	props = sde_get_dt_props(np, DEMURA_PROP_MAX, demura_prop,
+			ARRAY_SIZE(demura_prop), &off_count);
+	if (IS_ERR(props))
+		return PTR_ERR(props);
+
+	sde_cfg->demura_count = off_count;
+	if (off_count > sde_cfg->dspp_count) {
+		SDE_ERROR("limiting %d demura blocks to %d DSPP instances\n",
+				off_count, sde_cfg->dspp_count);
+		sde_cfg->demura_count = sde_cfg->dspp_count;
+	}
+
+	for (i = 0; i < sde_cfg->dspp_count; i++) {
+		dspp = &sde_cfg->dspp[i];
+		sblk = sde_cfg->dspp[i].sblk;
+
+		sblk->demura.id = SDE_DSPP_DEMURA;
+		if (props->exists[DEMURA_OFF] && i < off_count) {
+			sblk->demura.base = PROP_VALUE_ACCESS(props->values,
+					DEMURA_OFF, i);
+			sblk->demura.len = PROP_VALUE_ACCESS(props->values,
+					DEMURA_LEN, 0);
+			sblk->demura.version = PROP_VALUE_ACCESS(props->values,
+					DEMURA_VERSION, 0);
+			set_bit(SDE_DSPP_DEMURA, &dspp->features);
+		}
+	}
+
+	sde_put_dt_props(props);
+	return 0;
+}
+
 static int _sde_dspp_spr_parse_dt(struct device_node *np,
 		struct sde_mdss_cfg *sde_cfg)
 {
@@ -2753,6 +2809,10 @@ static int sde_dspp_parse_dt(struct device_node *np,
 	if (rc)
 		goto end;
 
+	rc = _sde_dspp_demura_parse_dt(np, sde_cfg);
+	if (rc)
+		goto end;
+
 	rc = _sde_rc_parse_dt(np, sde_cfg);
 end:
 	return rc;
@@ -4510,7 +4570,7 @@ static void _sde_hw_setup_uidle(struct sde_uidle_cfg *uidle_cfg)
 
 static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 {
-	int rc = 0;
+	int rc = 0, i;
 
 	if (!sde_cfg)
 		return -EINVAL;
@@ -4519,6 +4579,11 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 	sde_cfg->has_mixer_combined_alpha = true;
 	sde_cfg->mdss_hw_block_size = DEFAULT_MDSS_HW_BLOCK_SIZE;
 
+	for (i = 0; i < SSPP_MAX; i++) {
+		sde_cfg->demura_supported[i][0] = ~0x0;
+		sde_cfg->demura_supported[i][1] = ~0x0;
+	}
+
 	/* target specific settings */
 	if (IS_MSM8996_TARGET(hw_rev)) {
 		sde_cfg->perf.min_prefill_lines = 21;
@@ -4684,6 +4749,11 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 		sde_cfg->has_sui_blendstage = true;
 		sde_cfg->vbif_disable_inner_outer_shareable = true;
 	} else if (IS_LAHAINA_TARGET(hw_rev)) {
+		sde_cfg->has_demura = true;
+		sde_cfg->demura_supported[SSPP_DMA1][0] = 0;
+		sde_cfg->demura_supported[SSPP_DMA1][1] = 1;
+		sde_cfg->demura_supported[SSPP_DMA3][0] = 0;
+		sde_cfg->demura_supported[SSPP_DMA3][1] = 1;
 		sde_cfg->has_cwb_support = true;
 		sde_cfg->has_wb_ubwc = true;
 		sde_cfg->has_qsync = true;

+ 6 - 0
msm/sde/sde_hw_catalog.h

@@ -770,6 +770,7 @@ struct sde_dspp_sub_blks {
 	struct sde_pp_blk spr;
 	struct sde_pp_blk vlut;
 	struct sde_dspp_rc rc;
+	struct sde_pp_blk demura;
 };
 
 struct sde_pingpong_sub_blks {
@@ -1418,6 +1419,7 @@ struct sde_limit_cfg {
  * @inline_disable_const_clr     Disable constant color during inline rotate
  * @dither_luma_mode_support   Enables dither luma mode
  * @has_base_layer     Supports staging layer as base layer
+ * @demura_supported   Demura pipe support flag(~0x00 - Not supported)
  * @sc_cfg: system cache configuration
  * @uidle_cfg		Settings for uidle feature
  * @sui_misr_supported  indicate if secure-ui-misr is supported
@@ -1436,6 +1438,7 @@ struct sde_limit_cfg {
  * @inline_rot_formats	formats supported by the inline rotator feature
  * @irq_offset_list     list of sde_intr_irq_offsets to initialize irq table
  * @rc_count	number of rounded corner hardware instances
+ * @demura_count number of demura hardware instances
  */
 struct sde_mdss_cfg {
 	u32 hwversion;
@@ -1480,6 +1483,8 @@ struct sde_mdss_cfg {
 	bool inline_disable_const_clr;
 	bool dither_luma_mode_support;
 	bool has_base_layer;
+	bool has_demura;
+	u32 demura_supported[SSPP_MAX][2];
 
 	struct sde_sc_cfg sc_cfg;
 
@@ -1551,6 +1556,7 @@ struct sde_mdss_cfg {
 	u32 ltm_count;
 	u32 rc_count;
 	u32 spr_count;
+	u32 demura_count;
 
 	u32 merge_3d_count;
 	struct sde_merge_3d_cfg merge_3d[MAX_BLOCKS];

+ 17 - 1
msm/sde/sde_hw_color_proc_v4.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  */
 #include <drm/msm_drm_pp.h>
 #include "sde_hw_color_proc_common_v4.h"
@@ -392,3 +392,19 @@ void sde_ltm_read_intr_status(struct sde_hw_dspp *ctx, u32 *status)
 	clear |= BIT(1) | BIT(2);
 	SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->ltm.base + 0x58, clear);
 }
+
+void sde_demura_backlight_cfg(struct sde_hw_dspp *dspp, u64 val)
+{
+	u32 demura_base;
+	u32 backlight;
+
+	if (!dspp) {
+		DRM_ERROR("invalid parameter ctx %pK", dspp);
+		return;
+	}
+	demura_base = dspp->cap->sblk->demura.base;
+	backlight = (val & REG_MASK(11));
+	backlight |= ((val & REG_MASK_SHIFT(11, 32)) >> 16);
+	SDE_REG_WRITE(&dspp->hw, dspp->cap->sblk->demura.base + 0x8,
+			backlight);
+}

+ 8 - 1
msm/sde/sde_hw_color_proc_v4.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  */
 #ifndef _SDE_HW_COLOR_PROC_V4_H_
 #define _SDE_HW_COLOR_PROC_V4_H_
@@ -70,4 +70,11 @@ void sde_setup_dspp_ltm_hist_bufferv1(struct sde_hw_dspp *ctx, u64 addr);
  */
 void sde_ltm_read_intr_status(struct sde_hw_dspp *dspp, u32 *status);
 
+/**
+ * sde_demura_backlight_cfg - api to set backlight for demura
+ * @dspp: pointer to dspp object
+ * @val: value of backlight
+ */
+void sde_demura_backlight_cfg(struct sde_hw_dspp *dspp, u64 val);
+
 #endif /* _SDE_HW_COLOR_PROC_V4_H_ */

+ 14 - 1
msm/sde/sde_hw_ctl.c

@@ -806,7 +806,8 @@ static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx)
 }
 
 static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
-	enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg)
+	enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg,
+	struct sde_hw_stage_cfg *active_cfg)
 {
 	struct sde_hw_blk_reg_map *c;
 	u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
@@ -939,6 +940,18 @@ static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
 		}
 	}
 
+	for (i = 0; i <= stages && active_cfg; i++) {
+		enum sde_sspp pipe = active_cfg->stage[i][0];
+
+		if (pipe == SSPP_NONE)
+			break;
+		if (fetch_tbl[pipe] != CTL_INVALID_BIT) {
+			active_fetch_pipes |= BIT(fetch_tbl[pipe]);
+			SDE_DEBUG("fetch pipe %d active pipes %x\n",
+				pipe, active_fetch_pipes);
+		}
+	}
+
 exit:
 	if ((!mixercfg && !mixercfg_ext && !mixercfg_ext2 && !mixercfg_ext3) ||
 			(stage_cfg && !stage_cfg->stage[0][0]))

+ 3 - 1
msm/sde/sde_hw_ctl.h

@@ -426,9 +426,11 @@ struct sde_hw_ctl_ops {
 	 * @ctx       : ctl path ctx pointer
 	 * @lm        : layer mixer enumeration
 	 * @cfg       : blend stage configuration
+	 * @active_cfg: active no blend stage configuration
 	 */
 	void (*setup_blendstage)(struct sde_hw_ctl *ctx,
-		enum sde_lm lm, struct sde_hw_stage_cfg *cfg);
+		enum sde_lm lm, struct sde_hw_stage_cfg *cfg,
+		struct sde_hw_stage_cfg *active_cfg);
 
 	/**
 	 * Get all the sspp staged on a layer mixer

+ 27 - 0
msm/sde/sde_hw_dspp.c

@@ -284,6 +284,22 @@ static void dspp_spr(struct sde_hw_dspp *c)
 	}
 }
 
+static void dspp_demura(struct sde_hw_dspp *c)
+{
+	int ret;
+
+	if (c->cap->sblk->demura.version == SDE_COLOR_PROCESS_VER(0x1, 0x0)) {
+		ret = reg_dmav1_init_dspp_op_v4(SDE_DSPP_DEMURA, c->idx);
+		c->ops.setup_demura_cfg = NULL;
+		c->ops.setup_demura_backlight_cfg = NULL;
+		if (!ret) {
+			c->ops.setup_demura_cfg = reg_dmav1_setup_demurav1;
+			c->ops.setup_demura_backlight_cfg =
+				sde_demura_backlight_cfg;
+		}
+	}
+}
+
 static void (*dspp_blocks[SDE_DSPP_MAX])(struct sde_hw_dspp *c);
 
 static void _init_dspp_ops(void)
@@ -302,6 +318,7 @@ static void _init_dspp_ops(void)
 	dspp_blocks[SDE_DSPP_LTM] = dspp_ltm;
 	dspp_blocks[SDE_DSPP_RC] = dspp_rc;
 	dspp_blocks[SDE_DSPP_SPR] = dspp_spr;
+	dspp_blocks[SDE_DSPP_DEMURA] = dspp_demura;
 }
 
 static void _setup_dspp_ops(struct sde_hw_dspp *c, unsigned long features)
@@ -390,6 +407,16 @@ struct sde_hw_dspp *sde_hw_dspp_init(enum sde_dspp idx,
 				c->hw.blk_off + cfg->sblk->spr.base +
 				cfg->sblk->spr.len, c->hw.xin_id);
 	}
+
+	if ((cfg->sblk->demura.id == SDE_DSPP_DEMURA) &&
+			cfg->sblk->demura.base) {
+		snprintf(buf, ARRAY_SIZE(buf), "%s_%d", "demura",
+				c->idx - DSPP_0);
+		sde_dbg_reg_register_dump_range(SDE_DBG_NAME, buf,
+				c->hw.blk_off + cfg->sblk->demura.base,
+				c->hw.blk_off + cfg->sblk->demura.base +
+				cfg->sblk->demura.len, c->hw.xin_id);
+	}
 	return c;
 
 blk_init_error:

+ 14 - 0
msm/sde/sde_hw_dspp.h

@@ -256,6 +256,20 @@ struct sde_hw_dspp_ops {
 	 * @cfg: Pointer to configuration
 	 */
 	void (*setup_spr_init_config)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_demura_cfg - function to program demura cfg
+	 * @ctx: Pointer to dspp context
+	 * @status: Pointer to configuration.
+	 */
+	void (*setup_demura_cfg)(struct sde_hw_dspp *ctx, void *cfg);
+
+	/**
+	 * setup_demura_backlight_cfg - function to program demura backlight
+	 * @ctx: Pointer to dspp context
+	 * @status: Pointer to configuration.
+	 */
+	void (*setup_demura_backlight_cfg)(struct sde_hw_dspp *ctx, u64 val);
 };
 
 /**

+ 1 - 0
msm/sde/sde_hw_reg_dma_v1.c

@@ -810,6 +810,7 @@ int init_v12(struct sde_hw_reg_dma *cfg)
 			GRP_MDSS_HW_BLK_SELECT);
 	v1_supported[SPR_INIT] = (GRP_DSPP_HW_BLK_SELECT |
 			GRP_MDSS_HW_BLK_SELECT);
+	v1_supported[DEMURA_CFG] = MDSS | DSPP0 | DSPP1;
 
 	return 0;
 }

+ 553 - 0
msm/sde/sde_hw_reg_dma_v1_color_proc.c

@@ -80,6 +80,8 @@
 		REG_DMA_HEADERS_BUFFER_SZ)
 #define SPR_INIT_MEM_SIZE ((sizeof(struct drm_msm_spr_init_cfg)) + \
 		REG_DMA_HEADERS_BUFFER_SZ)
+#define DEMURA_MEM_SIZE ((sizeof(struct drm_msm_dem_cfg)) + \
+		REG_DMA_HEADERS_BUFFER_SZ)
 
 #define REG_MASK(n) ((BIT(n)) - 1)
 #define REG_MASK_SHIFT(n, shift) ((REG_MASK(n)) << (shift))
@@ -142,6 +144,7 @@ static u32 feature_map[SDE_DSPP_MAX] = {
 	[SDE_DSPP_HIST] = REG_DMA_FEATURES_MAX,
 	[SDE_DSPP_AD] = REG_DMA_FEATURES_MAX,
 	[SDE_DSPP_RC] = RC_DATA,
+	[SDE_DSPP_DEMURA] = DEMURA_CFG,
 };
 
 static u32 sspp_feature_map[SDE_SSPP_MAX] = {
@@ -170,6 +173,7 @@ static u32 feature_reg_dma_sz[SDE_DSPP_MAX] = {
 	[SDE_DSPP_MEMCOLOR] = MEMCOLOR_MEM_SIZE,
 	[SDE_DSPP_RC] = RC_MEM_SIZE,
 	[SDE_DSPP_SPR] = SPR_INIT_MEM_SIZE,
+	[SDE_DSPP_DEMURA] = DEMURA_MEM_SIZE,
 };
 
 static u32 sspp_feature_reg_dma_sz[SDE_SSPP_MAX] = {
@@ -4693,3 +4697,552 @@ void reg_dmav1_setup_spr_init_cfgv1(struct sde_hw_dspp *ctx, void *cfg)
 		return;
 	}
 }
+
+static void reg_dma_demura_off(struct sde_hw_dspp *ctx,
+		struct sde_hw_cp_cfg *hw_cfg)
+{
+	struct sde_hw_reg_dma_ops *dma_ops;
+	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
+	struct sde_reg_dma_kickoff_cfg kick_off;
+	u32 demura_base = ctx->cap->sblk->demura.base;
+	u32 op_mode = 0;
+	int rc;
+
+	dma_ops = sde_reg_dma_get_ops();
+	dma_ops->reset_reg_dma_buf(dspp_buf[DEMURA_CFG][ctx->idx]);
+
+	REG_DMA_INIT_OPS(dma_write_cfg, MDSS, DEMURA_CFG,
+			dspp_buf[DEMURA_CFG][ctx->idx]);
+
+	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
+	rc = dma_ops->setup_payload(&dma_write_cfg);
+	if (rc) {
+		DRM_ERROR("write decode select failed ret %d\n", rc);
+		return;
+	}
+
+	REG_DMA_SETUP_OPS(dma_write_cfg, ctx->hw.blk_off + demura_base + 0x4,
+		&op_mode, sizeof(op_mode), REG_SINGLE_WRITE, 0, 0, 0);
+	rc = dma_ops->setup_payload(&dma_write_cfg);
+	if (rc) {
+		DRM_ERROR("off(0x4): REG_SINGLE_WRITE failed ret %d\n", rc);
+		return;
+	}
+
+	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
+			dspp_buf[DEMURA_CFG][ctx->idx],
+			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+
+	rc = dma_ops->kick_off(&kick_off);
+	if (rc)
+		DRM_ERROR("failed to kick off ret %d\n", rc);
+}
+
+static int __reg_dmav1_setup_demurav1_cfg0_c_params(
+		struct sde_reg_dma_setup_ops_cfg *dma_write_cfg,
+		struct drm_msm_dem_cfg *dcfg,
+		struct sde_hw_reg_dma_ops *dma_ops,
+		u32 *temp, u32 temp_sz, u32 comp_index,
+		u32 demura_base)
+{
+	u32 i, len;
+	u64 *p;
+	int rc;
+
+	if (temp_sz < ARRAY_SIZE(dcfg->cfg0_param2_c0) * 8 || comp_index > 2) {
+		DRM_ERROR("exp sz %zd act sz %d comp index %d\n",
+			ARRAY_SIZE(dcfg->cfg0_param2_c0) * 8,
+			temp_sz, comp_index);
+		return -EINVAL;
+	}
+
+	memset(temp, 0x0, ARRAY_SIZE(dcfg->cfg0_param2_c0) * 8);
+	if (comp_index == 0) {
+		len = 1 << dcfg->c0_depth;
+		p = dcfg->cfg0_param2_c0;
+	} else if (comp_index == 1) {
+		len = 1 << dcfg->c1_depth;
+		p = dcfg->cfg0_param2_c1;
+	} else {
+		len = 1 << dcfg->c2_depth;
+		p = dcfg->cfg0_param2_c2;
+	}
+
+	if (!len || len > 256) {
+		DRM_ERROR("invalid len %d Max 256\n", len);
+		return -EINVAL;
+	}
+
+	i = ((comp_index & 0x3) << 28) | BIT(31);
+	REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x68,
+		&i, sizeof(i), REG_SINGLE_WRITE, 0, 0, 0);
+	rc = dma_ops->setup_payload(dma_write_cfg);
+	if (rc) {
+		DRM_ERROR("0x68: REG_SINGLE_WRITE failed ret %d\n", rc);
+		return rc;
+	}
+
+	for (i = 0; i < len; i++) {
+		temp[i * 2] = p[i] & REG_MASK(32);
+		temp[i * 2 + 1] = (p[i] & REG_MASK_SHIFT(10, 32)) >> 32;
+		DRM_DEBUG_DRIVER("0x6c: index %d value %x\n",
+				i * 2, temp[i * 2]);
+		DRM_DEBUG_DRIVER("0x6c: index %d value %x\n",
+				i * 2 + 1, temp[i * 2 + 1]);
+	}
+	REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x6c,
+		temp, sizeof(u64) * len, REG_BLK_WRITE_INC, 0, 0, 0);
+	rc = dma_ops->setup_payload(dma_write_cfg);
+	if (rc) {
+		DRM_ERROR("0x6c: REG_BLK_WRITE_INC failed ret %d\n", rc);
+		return rc;
+	}
+	return rc;
+}
+
+static int __reg_dmav1_setup_demurav1_cfg0(struct sde_hw_dspp *ctx,
+		struct drm_msm_dem_cfg *dcfg,
+		struct sde_reg_dma_setup_ops_cfg *dma_write_cfg,
+		struct sde_hw_reg_dma_ops *dma_ops,
+		struct sde_hw_cp_cfg *hw_cfg)
+{
+	u32 *temp = NULL, i, *p = NULL, shift, width;
+	int rc;
+	u32 demura_base = ctx->cap->sblk->demura.base + ctx->hw.blk_off;
+
+	if (!dcfg->cfg0_en) {
+		DRM_DEBUG_DRIVER("dcfg->cfg0_en is disabled\n");
+		return 0;
+	}
+
+	temp = kzalloc(sizeof(struct drm_msm_dem_cfg), GFP_KERNEL);
+	if (!temp)
+		return -ENOMEM;
+
+	for (i = 0; i < ARRAY_SIZE(dcfg->cfg01_param0); i += 2) {
+		temp[i >> 1] = (dcfg->cfg01_param0[i] & REG_MASK(12)) |
+			((dcfg->cfg01_param0[i + 1] & REG_MASK(12)) << 16);
+		DRM_DEBUG_DRIVER("0x1c: index %d value %x\n", i >> 1,
+				temp[i >> 1]);
+	}
+	REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x1c,
+		temp, sizeof(u32) * 4, REG_BLK_WRITE_SINGLE, 0, 0, 0);
+	rc = dma_ops->setup_payload(dma_write_cfg);
+	if (rc) {
+		DRM_ERROR("0x1c: write err %d len %zd buffer index %d\n",
+			rc, sizeof(u32) * 4, dma_write_cfg->dma_buf->index);
+		goto quit;
+	}
+
+	memset(temp, 0, ARRAY_SIZE(dcfg->cfg0_param1));
+	for (i = 0; i < ARRAY_SIZE(dcfg->cfg0_param1); i++) {
+		p = (i < 4) ? &temp[0] : &temp[1];
+		shift = (8 * i) % 32;
+		*p |= (((dcfg->cfg0_param1[i] & 0x3f) & REG_MASK(6)) << shift);
+		DRM_DEBUG_DRIVER("0xc: index %d value %x val %x shift %d\n",
+			i, *p, (dcfg->cfg0_param1[i] & 0x3f), shift);
+	}
+	REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0xc,
+		temp, sizeof(u32) * 2, REG_BLK_WRITE_SINGLE, 0, 0, 0);
+	rc = dma_ops->setup_payload(dma_write_cfg);
+	if (rc) {
+		DRM_ERROR("0xc: write err %d len %zd buffer index %d\n",
+		    rc, sizeof(u32) * 2, dma_write_cfg->dma_buf->index);
+		goto quit;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(dcfg->cfg0_param0); i++)
+		DRM_DEBUG_DRIVER("0x2c: index %d value %x\n",
+			i, dcfg->cfg0_param0[i]);
+	REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x2c,
+		dcfg->cfg0_param0, ARRAY_SIZE(dcfg->cfg0_param0) * sizeof(u32),
+		REG_BLK_WRITE_SINGLE, 0, 0, 0);
+	rc = dma_ops->setup_payload(dma_write_cfg);
+	if (rc) {
+		DRM_ERROR("0x2c: write err %d len %zd buf idx %d\n",
+			rc, ARRAY_SIZE(dcfg->cfg0_param0) * sizeof(u32),
+			dma_write_cfg->dma_buf->index);
+		goto quit;
+	}
+
+	shift = 0;
+	memset(temp, 0, ARRAY_SIZE(dcfg->cfg0_param3_c0) * 3 * sizeof(u32));
+	for (i = 0; i < ARRAY_SIZE(dcfg->cfg0_param3_c0); i += 4) {
+		temp[shift] = (dcfg->cfg0_param3_c0[i] & REG_MASK(8)) |
+			(dcfg->cfg0_param3_c0[i + 1] & REG_MASK(8) << 8) |
+			(dcfg->cfg0_param3_c0[i + 2] & REG_MASK(8) << 16) |
+			(dcfg->cfg0_param3_c0[i + 3] & REG_MASK(8) << 23);
+		temp[shift + 2] = (dcfg->cfg0_param3_c1[i] & REG_MASK(8)) |
+			(dcfg->cfg0_param3_c1[i + 1] & REG_MASK(8) << 8) |
+			(dcfg->cfg0_param3_c1[i + 2] & REG_MASK(8) << 16) |
+			(dcfg->cfg0_param3_c1[i + 3] & REG_MASK(8) << 23);
+		temp[shift + 4] = (dcfg->cfg0_param3_c2[i] & REG_MASK(8)) |
+			(dcfg->cfg0_param3_c2[i + 1] & REG_MASK(8) << 8) |
+			(dcfg->cfg0_param3_c2[i + 2] & REG_MASK(8) << 16) |
+			(dcfg->cfg0_param3_c2[i + 3] & REG_MASK(8) << 23);
+		DRM_DEBUG_DRIVER("0xb0: index %d value %x\n",
+				shift,  temp[shift]);
+		DRM_DEBUG_DRIVER("0xb0: index %d value %x\n",
+				shift + 2,  temp[shift + 2]);
+		DRM_DEBUG_DRIVER("0xb0: index %d value %x\n",
+				shift + 4,  temp[shift + 4]);
+		shift++;
+	}
+	REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0xb0,
+		temp, ARRAY_SIZE(dcfg->cfg0_param3_c0) * 3 * sizeof(u32),
+		REG_BLK_WRITE_SINGLE, 0, 0, 0);
+	rc = dma_ops->setup_payload(dma_write_cfg);
+	if (rc) {
+		DRM_ERROR("0xb0: write err %d len %zd buffer index %d\n", rc,
+			ARRAY_SIZE(dcfg->cfg0_param3_c0) * 3 * sizeof(u32),
+			dma_write_cfg->dma_buf->index);
+		goto quit;
+	}
+
+	for (i = 0; i < 3; i++) {
+		rc = __reg_dmav1_setup_demurav1_cfg0_c_params(dma_write_cfg,
+				dcfg, dma_ops, temp,
+				sizeof(struct drm_msm_dem_cfg), i,
+				demura_base);
+		if (rc)
+			goto quit;
+	}
+
+	width = hw_cfg->displayh >> 1;
+	DRM_DEBUG_DRIVER("0x80: value %x\n", width);
+	REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x80,
+		&width, sizeof(width), REG_SINGLE_WRITE, 0, 0, 0);
+	rc = dma_ops->setup_payload(dma_write_cfg);
+	if (rc) {
+		DRM_ERROR("0x80: REG_SINGLE_WRITE err %d len %zd buf idx %d\n",
+			rc, sizeof(width), dma_write_cfg->dma_buf->index);
+		goto quit;
+	}
+
+	i = 0x400;
+	REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0xe0,
+		&i, sizeof(i), REG_SINGLE_WRITE, 0, 0, 0);
+	rc = dma_ops->setup_payload(dma_write_cfg);
+	if (rc) {
+		DRM_ERROR("0xe0: REG_SINGLE_WRITE err %d len %zd buf idx %d\n",
+			rc, sizeof(i), dma_write_cfg->dma_buf->index);
+		goto quit;
+	}
+
+	memset(temp, 0, sizeof(u32) * 2);
+	for (i = 0; i < ARRAY_SIZE(dcfg->cfg0_param4); i++)
+		DRM_DEBUG_DRIVER("hfc gain is %d\n", dcfg->cfg0_param4[i]);
+	temp[0] = (dcfg->cfg0_param4[0] & REG_MASK(5)) |
+			((dcfg->cfg0_param4[1] & REG_MASK(5)) << 8) |
+			  ((dcfg->cfg0_param4[2] & REG_MASK(5)) << 16) |
+			  ((dcfg->cfg0_param4[3] & REG_MASK(5)) << 24);
+	temp[1] = (dcfg->cfg0_param4[4] & REG_MASK(5)) |
+			((dcfg->cfg0_param4[5] & REG_MASK(5)) << 8) |
+			  ((dcfg->cfg0_param4[6] & REG_MASK(5)) << 16) |
+			  ((dcfg->cfg0_param4[7] & REG_MASK(5)) << 24);
+	DRM_DEBUG_DRIVER("0x4c: value is temp[0] %x temp[1] %x\n",
+				temp[0], temp[1]);
+	REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x4c,
+		temp, sizeof(u32) * 2, REG_BLK_WRITE_SINGLE, 0, 0, 0);
+	rc = dma_ops->setup_payload(dma_write_cfg);
+	if (rc) {
+		DRM_ERROR("0x4c: REG_BLK_WRITE_SINGLE %d len %zd buf idx %d\n",
+			rc, sizeof(u32) * 2, dma_write_cfg->dma_buf->index);
+		goto quit;
+	}
+quit:
+	kfree(temp);
+	return rc;
+}
+
+static int __reg_dmav1_setup_demurav1_cfg1(struct sde_hw_dspp *ctx,
+		struct drm_msm_dem_cfg *dcfg,
+		struct sde_reg_dma_setup_ops_cfg *dma_write_cfg,
+		struct sde_hw_reg_dma_ops *dma_ops,
+		struct sde_hw_cp_cfg *hw_cfg)
+{
+	u32 temp[2], i, shift, *cfg1_data = NULL, len = 0;
+	int rc;
+	u32 width = 0;
+	u32 demura_base = ctx->cap->sblk->demura.base + ctx->hw.blk_off;
+
+	if (!dcfg->cfg1_en) {
+		DRM_DEBUG_DRIVER("dcfg->cfg1_en is disabled\n");
+		return 0;
+	}
+	len = ARRAY_SIZE(dcfg->cfg1_param0_c0);
+	cfg1_data = kcalloc(len, sizeof(u32), GFP_KERNEL);
+	if (!cfg1_data)
+		return -ENOMEM;
+
+	DRM_DEBUG_DRIVER("dcfg->cfg1_high_idx %d dcfg->cfg1_low_idx %d\n",
+				dcfg->cfg1_high_idx, dcfg->cfg1_low_idx);
+	if (dcfg->cfg1_high_idx >= ARRAY_SIZE(dcfg->cfg01_param0))
+		dcfg->cfg1_high_idx = ARRAY_SIZE(dcfg->cfg01_param0) - 1;
+
+	if (dcfg->cfg1_low_idx >= ARRAY_SIZE(dcfg->cfg01_param0))
+		dcfg->cfg1_low_idx = ARRAY_SIZE(dcfg->cfg01_param0) - 1;
+
+	temp[0] = dcfg->cfg01_param0[dcfg->cfg1_high_idx];
+	temp[1] = dcfg->cfg01_param0[dcfg->cfg1_low_idx];
+	if (temp[0] > temp[1])
+		shift = temp[0] - temp[1];
+	else
+		shift = 1;
+	i = (1 << 22) / shift;
+	DRM_DEBUG_DRIVER("0x14: value %x\n", i);
+
+	REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x14,
+		&i, sizeof(i), REG_SINGLE_WRITE, 0, 0, 0);
+	rc = dma_ops->setup_payload(dma_write_cfg);
+	if (rc) {
+		DRM_ERROR("0x14: REG_SINGLE_WRITE failed ret %d\n", rc);
+		goto quit;
+	}
+
+	width = hw_cfg->displayh;
+	DRM_DEBUG_DRIVER("width for LFC calculation is %d\n", width);
+	if (hw_cfg->displayh < hw_cfg->displayv) {
+		temp[0] = (8 * (1 << 21)) / width;
+		temp[1] = (16 * (1 << 21)) / hw_cfg->displayv;
+	} else {
+		temp[0] = (16 * (1 << 21)) / width;
+		temp[1] = (8 * (1 << 21)) / hw_cfg->displayv;
+	}
+	temp[0] = (dcfg->pentile) ? ((temp[0]) | BIT(31)) : temp[0];
+
+	REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x54,
+		&temp[0], sizeof(temp[0]), REG_SINGLE_WRITE, 0, 0, 0);
+	DRM_DEBUG_DRIVER("0x54 value %x\n", temp[0]);
+	rc = dma_ops->setup_payload(dma_write_cfg);
+	if (rc) {
+		DRM_ERROR("0x54: REG_SINGLE_WRITE ret %d\n", rc);
+		goto quit;
+	}
+	REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x5c,
+		&temp[1], sizeof(temp[1]), REG_SINGLE_WRITE, 0, 0, 0);
+	DRM_DEBUG_DRIVER("0x5c value %x\n", temp[1]);
+	rc = dma_ops->setup_payload(dma_write_cfg);
+	if (rc) {
+		DRM_ERROR("0x5c: REG_SINGLE_WRITE ret %d\n", rc);
+		goto quit;
+	}
+
+	cfg1_data[0] = (dcfg->cfg1_param0_c0[0] & REG_MASK(10)) |
+		((dcfg->cfg1_param0_c1[0] & REG_MASK(10)) << 10) |
+		((dcfg->cfg1_param0_c2[0] & REG_MASK(10)) << 20) | BIT(31);
+	DRM_DEBUG_DRIVER("0x64: value %x\n", cfg1_data[0]);
+	for (i = 1; i < len; i++) {
+		cfg1_data[i] = (dcfg->cfg1_param0_c0[i] & REG_MASK(10)) |
+			((dcfg->cfg1_param0_c1[i] & REG_MASK(10)) << 10) |
+			((dcfg->cfg1_param0_c2[i] & REG_MASK(10)) << 20);
+			DRM_DEBUG_DRIVER("0x64 index %d value %x\n", i,
+					cfg1_data[i]);
+	}
+	REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x64,
+		cfg1_data, len * sizeof(u32), REG_BLK_WRITE_INC, 0,
+		0, 0);
+	rc = dma_ops->setup_payload(dma_write_cfg);
+	if (rc) {
+		DRM_ERROR("lut write failed ret %d\n", rc);
+		goto quit;
+	}
+
+quit:
+	kfree(cfg1_data);
+	return rc;
+}
+
+static int __reg_dmav1_setup_demurav1_cfg3(struct sde_hw_dspp *ctx,
+		struct drm_msm_dem_cfg *dcfg,
+		struct sde_reg_dma_setup_ops_cfg *dma_write_cfg,
+		struct sde_hw_reg_dma_ops *dma_ops)
+{
+	u32 temp[CFG3_PARAM01_LEN + 1];
+	int rc, i;
+	u32 demura_base = ctx->cap->sblk->demura.base + ctx->hw.blk_off;
+
+	if (!dcfg->cfg3_en) {
+		DRM_DEBUG_DRIVER("dcfg->cfg3_en is disabled\n");
+		return 0;
+	}
+
+	temp[0] = dcfg->cfg3_param0_a[0] & REG_MASK(10);
+	temp[0] = (dcfg->cfg3_param0_a[1] & REG_MASK(10)) << 16;
+	temp[1] = dcfg->cfg3_param0_a[2] & REG_MASK(10);
+	temp[1] = (dcfg->cfg3_param0_a[3] & REG_MASK(10)) << 16;
+	temp[2] = dcfg->cfg3_param0_b[0] & REG_MASK(11);
+	temp[2] = (dcfg->cfg3_param0_b[1] & REG_MASK(11)) << 16;
+	temp[3] = dcfg->cfg3_param0_b[2] & REG_MASK(11);
+	temp[3] = (dcfg->cfg3_param0_b[3] & REG_MASK(11)) << 16;
+	temp[4] = (dcfg->cfg3_ab_adj) & REG_MASK(11);
+	for (i = 0; i < ARRAY_SIZE(temp); i++)
+		DRM_DEBUG_DRIVER("0xd0: index %i value %x\n", i, temp[i]);
+
+	REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0xd0,
+		temp, ARRAY_SIZE(temp) * sizeof(u32), REG_BLK_WRITE_SINGLE, 0,
+		0, 0);
+	rc = dma_ops->setup_payload(dma_write_cfg);
+	if (rc)
+		DRM_ERROR("0xd0: REG_BLK_WRITE_SINGLE failed ret %d\n", rc);
+
+	return rc;
+}
+
+static int __reg_dmav1_setup_demurav1_cfg5(struct sde_hw_dspp *ctx,
+		struct drm_msm_dem_cfg *dcfg,
+		struct sde_reg_dma_setup_ops_cfg *dma_write_cfg,
+		struct sde_hw_reg_dma_ops *dma_ops)
+{
+	u32 temp[CFG5_PARAM01_LEN];
+	int rc, i;
+	u32 demura_base = ctx->cap->sblk->demura.base + ctx->hw.blk_off;
+
+	if (!dcfg->cfg5_en) {
+		DRM_DEBUG_DRIVER("dcfg->cfg5_en is disabled\n");
+		return 0;
+	}
+
+	temp[0] = dcfg->cfg5_param0[0] & REG_MASK(10);
+	temp[0] |= (dcfg->cfg5_param0[1] & REG_MASK(10)) << 16;
+	temp[1] = dcfg->cfg5_param0[2] & REG_MASK(10);
+	temp[1] |= (dcfg->cfg5_param0[3] & REG_MASK(10)) << 16;
+	temp[2] = dcfg->cfg5_param1[0] & REG_MASK(11);
+	temp[2] |= (dcfg->cfg5_param1[1] & REG_MASK(11)) << 16;
+	temp[3] = dcfg->cfg5_param1[2] & REG_MASK(11);
+	temp[3] |= (dcfg->cfg5_param1[3] & REG_MASK(11)) << 16;
+	for (i = 0; i < ARRAY_SIZE(temp); i++)
+		DRM_DEBUG_DRIVER("0xa0: index %i value %x\n", i, temp[i]);
+
+	REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0xa0,
+		temp, ARRAY_SIZE(temp) * sizeof(u32), REG_BLK_WRITE_SINGLE, 0,
+		0, 0);
+	rc = dma_ops->setup_payload(dma_write_cfg);
+	if (rc)
+		DRM_ERROR("0xa0: REG_BLK_WRITE_SINGLE failed ret %d\n", rc);
+
+	return rc;
+}
+
+
+static int __reg_dmav1_setup_demurav1_en(struct sde_hw_dspp *ctx,
+		struct drm_msm_dem_cfg *dcfg,
+		struct sde_reg_dma_setup_ops_cfg *dma_write_cfg,
+		struct sde_hw_reg_dma_ops *dma_ops)
+{
+	u32 en = 0, backl;
+	int rc;
+	u32 demura_base = ctx->cap->sblk->demura.base + ctx->hw.blk_off;
+
+	backl = (1024 << 16) | 1024;
+	REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x8,
+		&backl, sizeof(backl), REG_SINGLE_WRITE, 0, 0, 0);
+	rc = dma_ops->setup_payload(dma_write_cfg);
+	if (rc) {
+		DRM_ERROR("0x8: REG_SINGLE_WRITE failed ret %d\n", rc);
+		return rc;
+	}
+
+	en = (dcfg->src_id) ? BIT(31) : 0;
+	en |= (dcfg->cfg1_high_idx & REG_MASK(3)) << 24;
+	en |= (dcfg->cfg1_low_idx & REG_MASK(3)) << 20;
+	en |= (dcfg->c2_depth & REG_MASK(4)) << 16;
+	en |= (dcfg->c1_depth & REG_MASK(4)) << 12;
+	en |= (dcfg->c0_depth & REG_MASK(4)) << 8;
+	en |= (dcfg->cfg3_en) ? BIT(5) : 0;
+	en |= (dcfg->cfg4_en) ? BIT(4) : 0;
+	en |= (dcfg->cfg2_en) ? BIT(3) : 0;
+	en |= (dcfg->cfg0_en) ? BIT(2) : 0;
+	en |= (dcfg->cfg1_en) ? BIT(1) : 0;
+	DRM_DEBUG_DRIVER("demura en %x\n", en);
+
+	REG_DMA_SETUP_OPS(*dma_write_cfg, demura_base + 0x4,
+		&en, sizeof(en), REG_SINGLE_WRITE, 0, 0, 0);
+	rc = dma_ops->setup_payload(dma_write_cfg);
+	if (rc)
+		DRM_ERROR("0x4: REG_SINGLE_WRITE failed ret %d\n", rc);
+
+	return rc;
+}
+
+void reg_dmav1_setup_demurav1(struct sde_hw_dspp *ctx, void *cfx)
+{
+	struct drm_msm_dem_cfg *dcfg;
+	struct sde_hw_cp_cfg *hw_cfg = cfx;
+	int rc = 0;
+	struct sde_hw_reg_dma_ops *dma_ops;
+	struct sde_reg_dma_setup_ops_cfg dma_write_cfg;
+	struct sde_reg_dma_kickoff_cfg kick_off;
+
+	rc = reg_dma_dspp_check(ctx, cfx, DEMURA_CFG);
+	if (rc)
+		return;
+
+	if (!hw_cfg->payload) {
+		reg_dma_demura_off(ctx, hw_cfg);
+		return;
+	}
+
+	if (hw_cfg->len != sizeof(struct drm_msm_dem_cfg)) {
+		DRM_ERROR("invalid sz of payload len %d exp %zd\n",
+				hw_cfg->len, sizeof(struct drm_msm_dem_cfg));
+	}
+	dcfg = hw_cfg->payload;
+	dma_ops = sde_reg_dma_get_ops();
+	dma_ops->reset_reg_dma_buf(dspp_buf[DEMURA_CFG][ctx->idx]);
+
+	REG_DMA_INIT_OPS(dma_write_cfg, MDSS, DEMURA_CFG,
+			dspp_buf[DEMURA_CFG][ctx->idx]);
+
+	REG_DMA_SETUP_OPS(dma_write_cfg, 0, NULL, 0, HW_BLK_SELECT, 0, 0, 0);
+	rc = dma_ops->setup_payload(&dma_write_cfg);
+	if (rc) {
+		DRM_ERROR("write decode select failed ret %d\n", rc);
+		return;
+	}
+	rc = __reg_dmav1_setup_demurav1_cfg0(ctx, dcfg, &dma_write_cfg,
+			dma_ops, hw_cfg);
+	if (rc) {
+		DRM_ERROR("failed setup_demurav1_cfg0 rc %d", rc);
+		return;
+	}
+	rc = __reg_dmav1_setup_demurav1_cfg1(ctx, dcfg, &dma_write_cfg,
+			dma_ops, hw_cfg);
+	if (rc) {
+		DRM_ERROR("failed setup_demurav1_cfg1 rc %d", rc);
+		return;
+	}
+
+	rc = __reg_dmav1_setup_demurav1_cfg3(ctx, dcfg, &dma_write_cfg,
+		dma_ops);
+	if (rc) {
+		DRM_ERROR("failed setup_demurav1_cfg3 rc %d", rc);
+		return;
+	}
+
+	rc = __reg_dmav1_setup_demurav1_cfg5(ctx, dcfg, &dma_write_cfg,
+		dma_ops);
+	if (rc) {
+		DRM_ERROR("failed setup_demurav1_cfg5 rc %d", rc);
+		return;
+	}
+
+	rc = __reg_dmav1_setup_demurav1_en(ctx, dcfg, &dma_write_cfg,
+			dma_ops);
+	if (rc) {
+		DRM_ERROR("failed setup_demurav1_en rc %d", rc);
+		return;
+	}
+
+	REG_DMA_SETUP_KICKOFF(kick_off, hw_cfg->ctl,
+			dspp_buf[DEMURA_CFG][ctx->idx],
+			REG_DMA_WRITE, DMA_CTL_QUEUE0, WRITE_IMMEDIATE);
+
+	DRM_DEBUG("enable demura buffer size %d\n",
+				dspp_buf[DEMURA_CFG][ctx->idx]->index);
+
+	rc = dma_ops->kick_off(&kick_off);
+	if (rc)
+		DRM_ERROR("failed to kick off ret %d\n", rc);
+
+}

+ 7 - 0
msm/sde/sde_hw_reg_dma_v1_color_proc.h

@@ -295,4 +295,11 @@ void reg_dmav2_setup_vig_gamutv61(struct sde_hw_pipe *ctx, void *cfg);
  */
 void reg_dmav1_setup_spr_init_cfgv1(struct sde_hw_dspp *ctx, void *cfg);
 
+/**
+ * reg_dmav1_setup_demurav1() - function to set up the demurav1 configuration.
+ * @ctx: dspp ctx info
+ * @cfg: pointer to struct sde_hw_cp_cfg
+ */
+void reg_dmav1_setup_demurav1(struct sde_hw_dspp *ctx, void *cfg);
+
 #endif /* _SDE_HW_REG_DMA_V1_COLOR_PROC_H */

+ 2 - 1
msm/sde/sde_hw_util.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _SDE_HW_UTIL_H
@@ -12,6 +12,7 @@
 #include "sde_hw_catalog.h"
 
 #define REG_MASK(n)                     ((BIT(n)) - 1)
+#define REG_MASK_SHIFT(n, shift) ((REG_MASK(n)) << (shift))
 #define LP_DDR4_TYPE			0x7
 
 struct sde_format_extended;

+ 10 - 1
msm/sde/sde_plane.c

@@ -3408,7 +3408,8 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
 		{SDE_DRM_BLEND_OP_NOT_DEFINED,    "not_defined"},
 		{SDE_DRM_BLEND_OP_OPAQUE,         "opaque"},
 		{SDE_DRM_BLEND_OP_PREMULTIPLIED,  "premultiplied"},
-		{SDE_DRM_BLEND_OP_COVERAGE,       "coverage"}
+		{SDE_DRM_BLEND_OP_COVERAGE,       "coverage"},
+		{SDE_DRM_BLEND_OP_SKIP,           "skip_blending"},
 	};
 	static const struct drm_prop_enum_list e_src_config[] = {
 		{SDE_DRM_DEINTERLACE, "deinterlace"}
@@ -3431,6 +3432,7 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
 	int zpos_max = 255;
 	int zpos_def = 0;
 	char feature_name[256];
+	uint32_t index;
 
 	if (!plane || !psde) {
 		SDE_ERROR("invalid plane\n");
@@ -3549,6 +3551,13 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
 			psde->pipe_sblk->max_per_pipe_bw * 1000LL);
 	sde_kms_info_add_keyint(info, "max_per_pipe_bw_high",
 			psde->pipe_sblk->max_per_pipe_bw_high * 1000LL);
+	index = (master_plane_id == 0) ? 0 : 1;
+	if (catalog->has_demura &&
+	    catalog->demura_supported[psde->pipe][index] != ~0x0) {
+		sde_kms_info_add_keyint(info, "demura_block", index);
+		sde_kms_info_add_keyint(info, "demura_pipe_id",
+				psde->pipe - SSPP_DMA0);
+	}
 
 	if ((is_master &&
 		(psde->features & BIT(SDE_SSPP_INVERSE_PMA))) ||

+ 2 - 0
msm/sde/sde_reg_dma.h

@@ -59,6 +59,7 @@ enum sde_reg_dma_read_sel {
  * @LTM_ROI: LTM ROI
  * @LTM_VLUT: LTM VLUT
  * @RC_DATA: Rounded corner data
+ * @DEMURA_CFG: Demura feature
  * @REG_DMA_FEATURES_MAX: invalid selection
  */
 enum sde_reg_dma_features {
@@ -79,6 +80,7 @@ enum sde_reg_dma_features {
 	LTM_ROI,
 	LTM_VLUT,
 	RC_DATA,
+	DEMURA_CFG,
 	REG_DMA_FEATURES_MAX,
 };