Эх сурвалжийг харах

Merge "disp: msm: dsi: add api to control dsi active status"

qctecmdr 5 жил өмнө
parent
commit
f2b1cba0d5

+ 13 - 1
msm/dsi/dsi_display.c

@@ -3784,6 +3784,8 @@ static int dsi_display_res_init(struct dsi_display *display)
 		goto error_ctrl_put;
 	}
 
+	display->is_active = true;
+
 	return 0;
 error_ctrl_put:
 	for (i = i - 1; i >= 0; i--) {
@@ -5447,6 +5449,16 @@ int dsi_display_get_active_displays(void **display_array, u32 max_display_count)
 	return count;
 }
 
+void dsi_display_set_active_state(struct dsi_display *display, bool is_active)
+{
+	if (!display)
+		return;
+
+	mutex_lock(&display->display_lock);
+	display->is_active = is_active;
+	mutex_unlock(&display->display_lock);
+}
+
 int dsi_display_drm_bridge_init(struct dsi_display *display,
 		struct drm_encoder *enc)
 {
@@ -5986,7 +5998,7 @@ int dsi_display_get_info(struct drm_connector *connector,
 	for (i = 0; i < info->num_of_h_tiles; i++)
 		info->h_tile_instance[i] = display->ctrl[i].ctrl->cell_index;
 
-	info->is_connected = true;
+	info->is_connected = display->is_active;
 
 	if (!strcmp(display->display_type, "primary"))
 		info->display_type = SDE_CONNECTOR_PRIMARY;

+ 2 - 0
msm/dsi/dsi_display.h

@@ -187,6 +187,7 @@ struct dsi_display_ext_bridge {
  * @queue_cmd_waits   Indicates if wait for dma commands done has to be queued.
  * @dma_cmd_workq:	Pointer to the workqueue of DMA command transfer done
  *				wait sequence.
+ * @is_active:        status of the display
  */
 struct dsi_display {
 	struct platform_device *pdev;
@@ -279,6 +280,7 @@ struct dsi_display {
 
 	/* panel id of the display */
 	u64 panel_id;
+	bool is_active;
 };
 
 int dsi_display_dev_probe(struct platform_device *pdev);

+ 0 - 6
msm/msm_gem.c

@@ -1259,12 +1259,6 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 	int ret;
 	unsigned long flags = 0;
 
-	/* if we don't have IOMMU, don't bother pretending we can import: */
-	if (!iommu_present(&platform_bus_type)) {
-		dev_err(dev->dev, "cannot import without IOMMU\n");
-		return ERR_PTR(-EINVAL);
-	}
-
 	size = PAGE_ALIGN(dmabuf->size);
 
 	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj,

+ 8 - 0
msm/sde/sde_crtc.c

@@ -1739,6 +1739,8 @@ int sde_crtc_find_plane_fb_modes(struct drm_crtc *crtc,
 		case SDE_DRM_FB_SEC_DIR_TRANS:
 			(*fb_sec_dir)++;
 			break;
+		case SDE_DRM_FB_NON_SEC_DIR_TRANS:
+			break;
 		default:
 			SDE_ERROR("Error: Plane[%d], fb_trans_mode:%d",
 					DRMID(plane), mode);
@@ -1786,6 +1788,8 @@ int sde_crtc_state_find_plane_fb_modes(struct drm_crtc_state *state,
 		case SDE_DRM_FB_SEC_DIR_TRANS:
 			(*fb_sec_dir)++;
 			break;
+		case SDE_DRM_FB_NON_SEC_DIR_TRANS:
+			break;
 		default:
 			SDE_ERROR("Error: Plane[%d], fb_trans_mode:%d",
 					DRMID(plane), mode);
@@ -1940,6 +1944,10 @@ int sde_crtc_get_secure_transition_ops(struct drm_crtc *crtc,
 				old_valid_fb, post_commit, &ops);
 		break;
 
+	case SDE_DRM_FB_NON_SEC_DIR_TRANS:
+		ops = 0;
+		break;
+
 	default:
 		SDE_ERROR("crtc%d: invalid plane fb_mode %d\n",
 				DRMID(crtc), translation_mode);

+ 54 - 4
msm/sde/sde_hw_catalog.c

@@ -161,6 +161,11 @@
 /*************************************************************
  *  DTSI PROPERTY INDEX
  *************************************************************/
+enum {
+	SDE_HW_VERSION,
+	SDE_HW_PROP_MAX,
+};
+
 enum {
 	HW_OFF,
 	HW_LEN,
@@ -524,6 +529,10 @@ struct sde_dt_props {
 /*************************************************************
  * dts property list
  *************************************************************/
+static struct sde_prop_type sde_hw_prop[] = {
+	{SDE_HW_VERSION, "qcom,sde-hw-version", false, PROP_TYPE_U32},
+};
+
 static struct sde_prop_type sde_prop[] = {
 	{SDE_OFF, "qcom,sde-off", true, PROP_TYPE_U32},
 	{SDE_LEN, "qcom,sde-len", false, PROP_TYPE_U32},
@@ -4791,10 +4800,48 @@ void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg)
 	kfree(sde_cfg);
 }
 
+static int sde_hw_ver_parse_dt(struct drm_device *dev, struct device_node *np,
+			struct sde_mdss_cfg *cfg)
+{
+	int rc, len, prop_count[SDE_HW_PROP_MAX];
+	struct sde_prop_value *prop_value = NULL;
+	bool prop_exists[SDE_HW_PROP_MAX];
+
+	if (!cfg) {
+		SDE_ERROR("invalid argument\n");
+		return -EINVAL;
+	}
+
+	prop_value = kzalloc(SDE_HW_PROP_MAX *
+			sizeof(struct sde_prop_value), GFP_KERNEL);
+	if (!prop_value)
+		return -ENOMEM;
+
+	rc = _validate_dt_entry(np, sde_hw_prop, ARRAY_SIZE(sde_hw_prop),
+			prop_count, &len);
+	if (rc)
+		goto end;
+
+	rc = _read_dt_entry(np, sde_hw_prop, ARRAY_SIZE(sde_hw_prop),
+			prop_count, prop_exists, prop_value);
+	if (rc)
+		goto end;
+
+	if (prop_exists[SDE_HW_VERSION])
+		cfg->hwversion = PROP_VALUE_ACCESS(prop_value,
+					SDE_HW_VERSION, 0);
+	else
+		cfg->hwversion = sde_kms_get_hw_version(dev);
+
+end:
+	kfree(prop_value);
+	return rc;
+}
+
 /*************************************************************
  * hardware catalog init
  *************************************************************/
-struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev)
+struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev)
 {
 	int rc;
 	struct sde_mdss_cfg *sde_cfg;
@@ -4807,10 +4854,13 @@ struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev)
 	if (!sde_cfg)
 		return ERR_PTR(-ENOMEM);
 
-	sde_cfg->hwversion = hw_rev;
 	INIT_LIST_HEAD(&sde_cfg->irq_offset_list);
 
-	rc = _sde_hardware_pre_caps(sde_cfg, hw_rev);
+	rc = sde_hw_ver_parse_dt(dev, np, sde_cfg);
+	if (rc)
+		goto end;
+
+	rc = _sde_hardware_pre_caps(sde_cfg, sde_cfg->hwversion);
 	if (rc)
 		goto end;
 
@@ -4906,7 +4956,7 @@ struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev)
 	if (rc)
 		goto end;
 
-	rc = _sde_hardware_post_caps(sde_cfg, hw_rev);
+	rc = _sde_hardware_post_caps(sde_cfg, sde_cfg->hwversion);
 	if (rc)
 		goto end;
 

+ 1 - 2
msm/sde/sde_hw_catalog.h

@@ -1602,11 +1602,10 @@ void sde_hw_mixer_set_preference(struct sde_mdss_cfg *sde_cfg, u32 num_lm,
  * sde_hw_catalog_init - sde hardware catalog init API parses dtsi property
  * and stores all parsed offset, hardware capabilities in config structure.
  * @dev:          drm device node.
- * @hw_rev:       caller needs provide the hardware revision before parsing.
  *
  * Return: parsed sde config structure
  */
-struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev);
+struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev);
 
 /**
  * sde_hw_catalog_deinit - sde hardware catalog cleanup

+ 7 - 0
msm/sde/sde_hw_mdss.h

@@ -636,15 +636,22 @@ struct sde_splash_display {
 	u8 pipe_cnt;
 };
 
+enum sde_handoff_type {
+	SDE_SPLASH_HANDOFF,
+	SDE_VM_HANDOFF,
+};
+
 /**
  * struct sde_splash_data - Struct contains details of continuous splash
  *	for all the displays connected by probe time
+ * @type:                Indicates the type of handoff
  * @num_splash_regions:  Indicates number of splash memory regions from dtsi
  * @num_splash_displays: Indicates count of active displays in continuous splash
  * @splash_mem:          Array of all struct sde_splash_mem listed from dtsi
  * @splash_display:      Array of all struct sde_splash_display
  */
 struct sde_splash_data {
+	enum sde_handoff_type  type;
 	u32 num_splash_regions;
 	u32 num_splash_displays;
 	struct sde_splash_mem splash_mem[MAX_DSI_DISPLAYS];

+ 44 - 5
msm/sde/sde_hw_top.c

@@ -62,8 +62,19 @@
 
 #define DCE_SEL                           0x450
 
-#define ROT_SID_RD			  0x20
-#define ROT_SID_WR			  0x24
+#define MDP_SID_VIG0			  0x0
+#define MDP_SID_VIG1			  0x4
+#define MDP_SID_VIG2			  0x8
+#define MDP_SID_VIG3			  0xC
+#define MDP_SID_DMA0			  0x10
+#define MDP_SID_DMA1			  0x14
+#define MDP_SID_DMA2			  0x18
+#define MDP_SID_DMA3			  0x1C
+#define MDP_SID_ROT_RD			  0x20
+#define MDP_SID_ROT_WR			  0x24
+#define MDP_SID_WB2			  0x28
+#define MDP_SID_XIN7			  0x2C
+
 #define ROT_SID_ID_VAL			  0x1c
 
 static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
@@ -470,10 +481,38 @@ struct sde_hw_sid *sde_hw_sid_init(void __iomem *addr,
 	return c;
 }
 
-void sde_hw_sid_rotator_set(struct sde_hw_sid *sid)
+void sde_hw_set_rotator_sid(struct sde_hw_sid *sid)
 {
-	SDE_REG_WRITE(&sid->hw, ROT_SID_RD, ROT_SID_ID_VAL);
-	SDE_REG_WRITE(&sid->hw, ROT_SID_WR, ROT_SID_ID_VAL);
+	if (!sid)
+		return;
+
+	SDE_REG_WRITE(&sid->hw, MDP_SID_ROT_RD, ROT_SID_ID_VAL);
+	SDE_REG_WRITE(&sid->hw, MDP_SID_ROT_WR, ROT_SID_ID_VAL);
+}
+
+void sde_hw_set_sspp_sid(struct sde_hw_sid *sid, u32 pipe, u32 vm)
+{
+	u32 offset = 0;
+
+	if (!sid)
+		return;
+
+	if ((pipe >= SSPP_VIG0) && (pipe <= SSPP_VIG3))
+		offset = MDP_SID_VIG0 + ((pipe - SSPP_VIG0) * 4);
+	else if ((pipe >= SSPP_DMA0) && (pipe <= SSPP_DMA3))
+		offset = MDP_SID_DMA0 + ((pipe - SSPP_DMA0) * 4);
+	else
+		return;
+
+	SDE_REG_WRITE(&sid->hw, offset, vm << 2);
+}
+
+void sde_hw_set_lutdma_sid(struct sde_hw_sid *sid, u32 vm)
+{
+	if (!sid)
+		return;
+
+	SDE_REG_WRITE(&sid->hw, MDP_SID_XIN7, vm << 2);
 }
 
 static void sde_hw_program_cwb_ppb_ctrl(struct sde_hw_mdp *mdp,

+ 18 - 3
msm/sde/sde_hw_top.h

@@ -243,7 +243,7 @@ struct sde_hw_sid {
 };
 
 /**
- * sde_hw_sid_rotator_set - initialize the sid blk reg map
+ * sde_hw_sid_init - initialize the sid blk reg map
  * @addr: Mapped register io address
  * @sid_len: Length of block
  * @m: Pointer to mdss catalog data
@@ -252,10 +252,25 @@ struct sde_hw_sid *sde_hw_sid_init(void __iomem *addr,
 		u32 sid_len, const struct sde_mdss_cfg *m);
 
 /**
- * sde_hw_sid_rotator_set - set sid values for rotator
+ * sde_hw_set_rotator_sid - set sid values for rotator
  * sid: sde_hw_sid passed from kms
  */
-void sde_hw_sid_rotator_set(struct sde_hw_sid *sid);
+void sde_hw_set_rotator_sid(struct sde_hw_sid *sid);
+
+/**
+ * sde_hw_set_sspp_sid - set sid values for the pipes
+ * sid: sde_hw_sid passed from kms
+ * pipe: sspp id
+ * vm: vm id to set for SIDs
+ */
+void sde_hw_set_sspp_sid(struct sde_hw_sid *sid, u32 pipe, u32 vm);
+
+/**
+ * sde_hw_set_lutdma_sid - set sid values for the pipes
+ * sid: sde_hw_sid passed from kms
+ * vm: vm id to set for SIDs
+ */
+void sde_hw_set_lutdma_sid(struct sde_hw_sid *sid, u32 vm);
 
 /**
  * to_sde_hw_mdp - convert base object sde_hw_base to container

+ 43 - 24
msm/sde/sde_kms.c

@@ -837,7 +837,7 @@ static int _sde_kms_unmap_all_splash_regions(struct sde_kms *sde_kms)
 	int i = 0;
 	int ret = 0;
 
-	if (!sde_kms)
+	if (!sde_kms || !sde_kms->splash_data.num_splash_regions)
 		return -EINVAL;
 
 	for (i = 0; i < sde_kms->splash_data.num_splash_displays; i++) {
@@ -936,14 +936,15 @@ static void sde_kms_commit(struct msm_kms *kms,
 	SDE_ATRACE_END("sde_kms_commit");
 }
 
-static void _sde_kms_free_splash_region(struct sde_kms *sde_kms,
+static void _sde_kms_free_splash_display_data(struct sde_kms *sde_kms,
 		struct sde_splash_display *splash_display)
 {
 	if (!sde_kms || !splash_display ||
 			!sde_kms->splash_data.num_splash_displays)
 		return;
 
-	_sde_kms_splash_mem_put(sde_kms, splash_display->splash);
+	if (sde_kms->splash_data.num_splash_regions)
+		_sde_kms_splash_mem_put(sde_kms, splash_display->splash);
 	sde_kms->splash_data.num_splash_displays--;
 	SDE_DEBUG("cont_splash handoff done, remaining:%d\n",
 				sde_kms->splash_data.num_splash_displays);
@@ -981,7 +982,7 @@ static void _sde_kms_release_splash_resource(struct sde_kms *sde_kms,
 	if (splash_display->cont_splash_enabled) {
 		sde_encoder_update_caps_for_cont_splash(splash_display->encoder,
 				splash_display, false);
-		_sde_kms_free_splash_region(sde_kms, splash_display);
+		_sde_kms_free_splash_display_data(sde_kms, splash_display);
 	}
 
 	/* remove the votes if all displays are done with splash */
@@ -2147,11 +2148,31 @@ _sde_kms_get_address_space(struct msm_kms *kms,
 static struct device *_sde_kms_get_address_space_device(struct msm_kms *kms,
 		unsigned int domain)
 {
-	struct msm_gem_address_space *aspace =
-		_sde_kms_get_address_space(kms, domain);
+	struct sde_kms *sde_kms;
+	struct device *dev;
+	struct msm_gem_address_space *aspace;
+
+	if (!kms) {
+		SDE_ERROR("invalid kms\n");
+		return  NULL;
+	}
+
+	sde_kms = to_sde_kms(kms);
+	if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
+		SDE_ERROR("invalid params\n");
+		return NULL;
+	}
 
-	return (aspace && aspace->domain_attached) ?
-			msm_gem_get_aspace_device(aspace) : NULL;
+	/* return default device, when IOMMU is not present */
+	if (!iommu_present(&platform_bus_type)) {
+		dev = sde_kms->dev->dev;
+	} else {
+		aspace = _sde_kms_get_address_space(kms, domain);
+		dev =  (aspace && aspace->domain_attached) ?
+				msm_gem_get_aspace_device(aspace) : NULL;
+	}
+
+	return dev;
 }
 
 static void _sde_kms_post_open(struct msm_kms *kms, struct drm_file *file)
@@ -2269,7 +2290,8 @@ static int sde_kms_cont_splash_config(struct msm_kms *kms)
 		return -EINVAL;
 	}
 
-	if (!sde_kms->splash_data.num_splash_regions ||
+	if (((sde_kms->splash_data.type == SDE_SPLASH_HANDOFF)
+		&& (!sde_kms->splash_data.num_splash_regions)) ||
 			!sde_kms->splash_data.num_splash_displays) {
 		DRM_INFO("cont_splash feature not enabled\n");
 		return rc;
@@ -2839,12 +2861,6 @@ static const struct msm_kms_funcs kms_funcs = {
 	.get_mixer_count = sde_kms_get_mixer_count,
 };
 
-/* the caller api needs to turn on clock before calling it */
-static inline void _sde_kms_core_hw_rev_init(struct sde_kms *sde_kms)
-{
-	sde_kms->core_rev = readl_relaxed(sde_kms->mmio + 0x0);
-}
-
 static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
 {
 	int i;
@@ -2934,7 +2950,7 @@ static void sde_kms_init_shared_hw(struct sde_kms *sde_kms)
 						sde_kms->catalog);
 
 	if (sde_kms->sid)
-		sde_hw_sid_rotator_set(sde_kms->hw_sid);
+		sde_hw_set_rotator_sid(sde_kms->hw_sid);
 }
 
 static void _sde_kms_set_lutdma_vbif_remap(struct sde_kms *sde_kms)
@@ -3115,7 +3131,8 @@ static int sde_kms_pd_disable(struct generic_pm_domain *genpd)
 	return 0;
 }
 
-static int _sde_kms_get_splash_data(struct sde_splash_data *data)
+static int _sde_kms_get_splash_data(struct sde_kms *sde_kms,
+			struct sde_splash_data *data)
 {
 	int i = 0;
 	int ret = 0;
@@ -3198,6 +3215,8 @@ static int _sde_kms_get_splash_data(struct sde_splash_data *data)
 				splash_display->splash->splash_buf_size);
 	}
 
+	sde_kms->splash_data.type = SDE_SPLASH_HANDOFF;
+
 	return ret;
 }
 
@@ -3325,11 +3344,7 @@ static int _sde_kms_hw_init_blocks(struct sde_kms *sde_kms,
 	struct sde_rm *rm = NULL;
 	int i, rc = -EINVAL;
 
-	_sde_kms_core_hw_rev_init(sde_kms);
-
-	pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
-
-	sde_kms->catalog = sde_hw_catalog_init(dev, sde_kms->core_rev);
+	sde_kms->catalog = sde_hw_catalog_init(dev);
 	if (IS_ERR_OR_NULL(sde_kms->catalog)) {
 		rc = PTR_ERR(sde_kms->catalog);
 		if (!sde_kms->catalog)
@@ -3338,6 +3353,9 @@ static int _sde_kms_hw_init_blocks(struct sde_kms *sde_kms,
 		sde_kms->catalog = NULL;
 		goto power_error;
 	}
+	sde_kms->core_rev = sde_kms->catalog->hwversion;
+
+	pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
 
 	/* initialize power domain if defined */
 	rc = _sde_kms_hw_init_power_helper(dev, sde_kms);
@@ -3400,7 +3418,8 @@ static int _sde_kms_hw_init_blocks(struct sde_kms *sde_kms,
 			 * cont-splash disabled case
 			 */
 			if (!display->cont_splash_enabled || ret)
-				_sde_kms_free_splash_region(sde_kms, display);
+				_sde_kms_free_splash_display_data(
+						sde_kms, display);
 		}
 	}
 
@@ -3515,7 +3534,7 @@ static int sde_kms_hw_init(struct msm_kms *kms)
 	if (rc)
 		goto error;
 
-	rc = _sde_kms_get_splash_data(&sde_kms->splash_data);
+	rc = _sde_kms_get_splash_data(sde_kms, &sde_kms->splash_data);
 	if (rc)
 		SDE_DEBUG("sde splash data fetch failed: %d\n", rc);
 

+ 17 - 0
msm/sde/sde_kms.h

@@ -323,6 +323,23 @@ struct vsync_info {
  */
 bool sde_is_custom_client(void);
 
+/**
+ * sde_kms_get_hw_version - get the hw revision - client is expected to
+ *    enable the power resources before making this call
+ * @dev: Pointer to drm device
+ */
+static inline u32 sde_kms_get_hw_version(struct drm_device *dev)
+{
+	struct sde_kms *sde_kms;
+
+	if (!ddev_to_msm_kms(dev))
+		return 0;
+
+	sde_kms = to_sde_kms(ddev_to_msm_kms(dev));
+
+	return readl_relaxed(sde_kms->mmio + 0x0);
+}
+
 /**
  * sde_kms_power_resource_is_enabled - whether or not power resource is enabled
  * @dev: Pointer to drm device

+ 24 - 0
msm/sde/sde_plane.c

@@ -233,6 +233,29 @@ void sde_plane_setup_src_split_order(struct drm_plane *plane,
 					rect_mode, enable);
 }
 
+void sde_plane_set_sid(struct drm_plane *plane, u32 vm)
+{
+	struct sde_plane *psde;
+	struct sde_kms *sde_kms;
+	struct msm_drm_private *priv;
+
+	if (!plane || !plane->dev) {
+		SDE_ERROR("invalid plane %d\n");
+		return;
+	}
+
+	priv = plane->dev->dev_private;
+	if (!priv || !priv->kms) {
+		SDE_ERROR("invalid KMS reference\n");
+		return;
+	}
+
+	sde_kms = to_sde_kms(priv->kms);
+
+	psde = to_sde_plane(plane);
+	sde_hw_set_sspp_sid(sde_kms->hw_sid, psde->pipe, vm);
+}
+
 /**
  * _sde_plane_set_qos_lut - set danger, safe and creq LUT of the given plane
  * @plane:		Pointer to drm plane
@@ -678,6 +701,7 @@ static int _sde_plane_get_aspace(
 		if (!aspace)
 			return -EINVAL;
 		break;
+	case SDE_DRM_FB_NON_SEC_DIR_TRANS:
 	case SDE_DRM_FB_SEC_DIR_TRANS:
 		*aspace = NULL;
 		break;

+ 7 - 0
msm/sde/sde_plane.h

@@ -327,6 +327,13 @@ void sde_plane_clear_ubwc_error(struct drm_plane *plane);
 void sde_plane_setup_src_split_order(struct drm_plane *plane,
 		enum sde_sspp_multirect_index rect_mode, bool enable);
 
+/*
+ * sde_plane_set_sid - set VM SID for the plane
+ * @plane: Pointer to DRM plane object
+ * @vm: VM id
+ */
+void sde_plane_set_sid(struct drm_plane *plane, u32 vm);
+
 /* sde_plane_is_cache_required - indicates if the system cache is
  *	required for the plane.
  * @plane: Pointer to DRM plane object