Pārlūkot izejas kodu

Merge "disp: rotator: remove setting secure domain attribute at probe"

qctecmdr 5 gadi atpakaļ
vecāks
revīzija
b994f0f191

+ 3 - 2
msm/sde/sde_core_perf.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"[drm:%s:%d] " fmt, __func__, __LINE__
@@ -1041,7 +1041,8 @@ static ssize_t _sde_core_perf_mode_write(struct file *file,
 			SDE_ERROR("failed to set %s clock rate %llu\n",
 					perf->clk_name,
 					perf->max_core_clk_rate);
-		DRM_INFO("minimum performance mode\n");
+		else
+			DRM_INFO("minimum performance mode\n");
 	} else if (perf_mode == SDE_PERF_MODE_NORMAL) {
 		/* reset the perf tune params to 0 */
 		perf->perf_tune.min_core_clk = 0;

+ 32 - 6
msm/sde/sde_crtc.c

@@ -4171,7 +4171,8 @@ end:
 static int _sde_crtc_check_secure_blend_config(struct drm_crtc *crtc,
 	struct drm_crtc_state *state, struct plane_state pstates[],
 	struct sde_crtc_state *cstate, struct sde_kms *sde_kms,
-	int cnt, int secure, int fb_ns, int fb_sec, int fb_sec_dir)
+	int cnt, int secure, int fb_ns, int fb_sec, int fb_sec_dir,
+	bool conn_secure, bool is_wb)
 {
 	struct drm_plane *plane;
 	int i;
@@ -4252,6 +4253,19 @@ static int _sde_crtc_check_secure_blend_config(struct drm_crtc *crtc,
 		}
 	}
 
+	/*
+	 * If any input buffers are secure,
+	 * the output buffer must also be secure.
+	 */
+	if (is_wb && fb_sec && !conn_secure) {
+		SDE_ERROR(
+			"crtc%d: input fb sec %d, output fb secure %d\n",
+			DRMID(crtc),
+			(fb_sec) ? 1 : 0,
+			(conn_secure) ? 1 : 0);
+		return -EINVAL;
+	}
+
 	return 0;
 }
 
@@ -4318,7 +4332,7 @@ static int _sde_crtc_check_secure_state_smmu_translation(struct drm_crtc *crtc,
 
 static int _sde_crtc_check_secure_state(struct drm_crtc *crtc,
 		struct drm_crtc_state *state, struct plane_state pstates[],
-		int cnt)
+		int cnt, bool conn_secure, bool is_wb)
 {
 	struct sde_crtc_state *cstate;
 	struct sde_kms *sde_kms;
@@ -4347,7 +4361,8 @@ static int _sde_crtc_check_secure_state(struct drm_crtc *crtc,
 		return rc;
 
 	rc = _sde_crtc_check_secure_blend_config(crtc, state, pstates, cstate,
-			sde_kms, cnt, secure, fb_ns, fb_sec, fb_sec_dir);
+			sde_kms, cnt, secure, fb_ns, fb_sec, fb_sec_dir,
+			conn_secure, is_wb);
 	if (rc)
 		return rc;
 
@@ -4538,7 +4553,8 @@ static int _sde_crtc_check_zpos(struct drm_crtc_state *state,
 static int _sde_crtc_atomic_check_pstates(struct drm_crtc *crtc,
 		struct drm_crtc_state *state,
 		struct plane_state *pstates,
-		struct sde_multirect_plane_states *multirect_plane)
+		struct sde_multirect_plane_states *multirect_plane,
+		bool conn_secure, bool is_wb)
 {
 	struct sde_crtc *sde_crtc;
 	struct sde_crtc_state *cstate;
@@ -4569,7 +4585,8 @@ static int _sde_crtc_atomic_check_pstates(struct drm_crtc *crtc,
 	if (rc)
 		return rc;
 
-	rc = _sde_crtc_check_secure_state(crtc, state, pstates, cnt);
+	rc = _sde_crtc_check_secure_state(crtc, state, pstates, cnt,
+			conn_secure, is_wb);
 	if (rc)
 		return rc;
 
@@ -4595,6 +4612,7 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
 	struct sde_crtc_state *cstate;
 	struct drm_display_mode *mode;
 	int rc = 0;
+	bool conn_secure = false, is_wb = false;
 	struct sde_multirect_plane_states *multirect_plane = NULL;
 	struct drm_connector *conn;
 	struct drm_connector_list_iter conn_iter;
@@ -4648,6 +4666,14 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
 		if (conn->state && conn->state->crtc == crtc &&
 				cstate->num_connectors < MAX_CONNECTORS) {
 			cstate->connectors[cstate->num_connectors++] = conn;
+
+			if (conn->connector_type ==
+					DRM_MODE_CONNECTOR_VIRTUAL)
+				is_wb = true;
+			if (sde_connector_get_property(conn->state,
+					CONNECTOR_PROP_FB_TRANSLATION_MODE) ==
+					SDE_DRM_FB_SEC)
+				conn_secure = true;
 		}
 	drm_connector_list_iter_end(&conn_iter);
 
@@ -4655,7 +4681,7 @@ static int sde_crtc_atomic_check(struct drm_crtc *crtc,
 	_sde_crtc_setup_lm_bounds(crtc, state);
 
 	rc = _sde_crtc_atomic_check_pstates(crtc, state, pstates,
-			multirect_plane);
+			multirect_plane, conn_secure, is_wb);
 	if (rc) {
 		SDE_ERROR("crtc%d failed pstate check %d\n", crtc->base.id, rc);
 		goto end;

+ 23 - 0
msm/sde/sde_encoder_phys_cmd.c

@@ -1450,6 +1450,7 @@ static int _sde_encoder_phys_cmd_wait_for_wr_ptr(
 	int ret;
 	bool frame_pending = true;
 	struct sde_hw_ctl *ctl;
+	unsigned long lock_flags;
 
 	if (!phys_enc || !phys_enc->hw_ctl) {
 		SDE_ERROR("invalid argument(s)\n");
@@ -1474,6 +1475,28 @@ static int _sde_encoder_phys_cmd_wait_for_wr_ptr(
 			frame_pending = ctl->ops.get_start_state(ctl);
 
 		ret = frame_pending ? ret : 0;
+
+		/*
+		 * There can be few cases of ESD where CTL_START is cleared but
+		 * wr_ptr irq doesn't come. Signaling retire fence in these
+		 * cases to avoid freeze and dangling pending_retire_fence_cnt
+		 */
+		if (!ret) {
+			SDE_EVT32(DRMID(phys_enc->parent),
+				SDE_EVTLOG_FUNC_CASE1);
+
+			if (sde_encoder_phys_cmd_is_master(phys_enc) &&
+				atomic_add_unless(
+				&phys_enc->pending_retire_fence_cnt, -1, 0)) {
+				spin_lock_irqsave(phys_enc->enc_spinlock,
+					lock_flags);
+				phys_enc->parent_ops.handle_frame_done(
+				 phys_enc->parent, phys_enc,
+				 SDE_ENCODER_FRAME_EVENT_SIGNAL_RETIRE_FENCE);
+				spin_unlock_irqrestore(phys_enc->enc_spinlock,
+					lock_flags);
+			}
+		}
 	}
 
 	cmd_enc->wr_ptr_wait_success = (ret == 0) ? true : false;

+ 4 - 3
msm/sde/sde_encoder_phys_vid.c

@@ -756,9 +756,10 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
 	vid_enc = to_sde_encoder_phys_vid(phys_enc);
 	intf = phys_enc->hw_intf;
 	ctl = phys_enc->hw_ctl;
-	if (!phys_enc->hw_intf || !phys_enc->hw_ctl) {
-		SDE_ERROR("invalid hw_intf %d hw_ctl %d\n",
-				!phys_enc->hw_intf, !phys_enc->hw_ctl);
+	if (!phys_enc->hw_intf || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
+		SDE_ERROR("invalid hw_intf %d hw_ctl %d hw_pp %d\n",
+				!phys_enc->hw_intf, !phys_enc->hw_ctl,
+				!phys_enc->hw_pp);
 		return;
 	}
 	if (!ctl->ops.update_bitmask) {

+ 1 - 1
msm/sde/sde_encoder_phys_wb.c

@@ -1039,7 +1039,7 @@ static void _sde_encoder_phys_wb_frame_done_helper(void *arg, bool frame_error)
 				phys_enc, event);
 	}
 
-	if (phys_enc->parent_ops.handle_vblank_virt)
+	if (!phys_enc->in_clone_mode && phys_enc->parent_ops.handle_vblank_virt)
 		phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
 				phys_enc);
 

+ 2 - 1
msm/sde/sde_hw_catalog.c

@@ -1870,7 +1870,8 @@ static int sde_ctl_parse_dt(struct device_node *np,
 				ctl_prop[HW_DISP].prop_name, i, &disp_pref);
 		if (disp_pref && !strcmp(disp_pref, "primary"))
 			set_bit(SDE_CTL_PRIMARY_PREF, &ctl->features);
-		if (i < MAX_SPLIT_DISPLAY_CTL)
+		if ((i < MAX_SPLIT_DISPLAY_CTL) &&
+			!(IS_SDE_CTL_REV_100(sde_cfg->ctl_rev)))
 			set_bit(SDE_CTL_SPLIT_DISPLAY, &ctl->features);
 		if (i < MAX_PP_SPLIT_DISPLAY_CTL)
 			set_bit(SDE_CTL_PINGPONG_SPLIT, &ctl->features);

+ 43 - 0
msm/sde/sde_plane.c

@@ -2498,6 +2498,44 @@ static int _sde_atomic_check_excl_rect(struct sde_plane *psde,
 	return ret;
 }
 
+
+static int _sde_plane_validate_shared_crtc(struct sde_plane *psde,
+				struct drm_plane_state *state)
+{
+	struct sde_kms *sde_kms;
+	struct sde_splash_display *splash_display;
+	int i, j;
+
+	sde_kms = _sde_plane_get_kms(&psde->base);
+
+	if (!sde_kms || !state->crtc)
+		return 0;
+
+	for (i = 0; i < MAX_DSI_DISPLAYS; i++) {
+		splash_display = &sde_kms->splash_data.splash_display[i];
+
+		if (splash_display && splash_display->cont_splash_enabled &&
+			splash_display->encoder &&
+			state->crtc != splash_display->encoder->crtc) {
+
+			for (j = 0; j < MAX_DATA_PATH_PER_DSIPLAY; j++) {
+
+				if (splash_display->pipes[j].sspp ==
+						psde->pipe) {
+					SDE_ERROR_PLANE(psde,
+					"pipe:%d used in cont-splash on crtc:%d\n",
+					psde->pipe,
+					splash_display->encoder->crtc->base.id);
+					return -EINVAL;
+				}
+			}
+		}
+	}
+
+	return 0;
+
+}
+
 static int sde_plane_sspp_atomic_check(struct drm_plane *plane,
 		struct drm_plane_state *state)
 {
@@ -2598,6 +2636,11 @@ static int sde_plane_sspp_atomic_check(struct drm_plane *plane,
 	ret = _sde_atomic_check_excl_rect(psde, pstate,
 		&src, fmt, ret);
 
+	if (ret)
+		return ret;
+
+	ret = _sde_plane_validate_shared_crtc(psde, state);
+
 	if (ret)
 		return ret;
 

+ 13 - 1
msm/sde_power_handle.c

@@ -48,8 +48,10 @@ static void sde_power_event_trigger_locked(struct sde_power_handle *phandle,
 	struct sde_power_event *event;
 
 	list_for_each_entry(event, &phandle->event_list, list) {
-		if (event->event_type & event_type)
+		if (event->event_type & event_type) {
 			event->cb_fnc(event_type, event->usr);
+			phandle->last_event_handled = event_type;
+		}
 	}
 }
 
@@ -768,6 +770,15 @@ int sde_power_clk_set_rate(struct sde_power_handle *phandle, char *clock_name,
 		pr_err("invalid input power handle\n");
 		return -EINVAL;
 	}
+
+	mutex_lock(&phandle->phandle_lock);
+	if (phandle->last_event_handled & SDE_POWER_EVENT_POST_DISABLE) {
+		pr_debug("invalid power state %u\n",
+				phandle->last_event_handled);
+		mutex_unlock(&phandle->phandle_lock);
+		return -EINVAL;
+	}
+
 	mp = &phandle->mp;
 
 	for (i = 0; i < mp->num_clk; i++) {
@@ -781,6 +792,7 @@ int sde_power_clk_set_rate(struct sde_power_handle *phandle, char *clock_name,
 			break;
 		}
 	}
+	mutex_unlock(&phandle->phandle_lock);
 
 	return rc;
 }

+ 2 - 1
msm/sde_power_handle.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _SDE_POWER_HANDLE_H_
@@ -154,6 +154,7 @@ struct sde_power_handle {
 	struct sde_power_data_bus_handle data_bus_handle
 		[SDE_POWER_HANDLE_DBUS_ID_MAX];
 	struct list_head event_list;
+	u32 last_event_handled;
 	struct sde_rsc_client *rsc_client;
 	bool rsc_client_init;
 };

+ 249 - 230
rotator/sde_rotator_r3.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"%s:%d: " fmt, __func__, __LINE__
@@ -569,6 +569,33 @@ static u32 __sde_hw_rotator_get_timestamp(struct sde_hw_rotator *rot, u32 q_id)
 	return ts & SDE_REGDMA_SWTS_MASK;
 }
 
+/**
+ * sde_hw_rotator_disable_irq - Disable hw rotator interrupt with ref. count
+ *				Also, clear rotator/regdma irq enable masks.
+ * @rot: Pointer to hw rotator
+ */
+static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
+{
+	SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
+		atomic_read(&rot->irq_enabled));
+
+	if (!atomic_read(&rot->irq_enabled)) {
+		SDEROT_ERR("irq %d is already disabled\n", rot->irq_num);
+		return;
+	}
+
+	if (!atomic_dec_return(&rot->irq_enabled)) {
+		if (rot->mode == ROT_REGDMA_OFF)
+			SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_EN, 0);
+		else
+			SDE_ROTREG_WRITE(rot->mdss_base,
+				REGDMA_CSR_REGDMA_INT_EN, 0);
+		/* disable irq after last pending irq is handled, if any */
+		synchronize_irq(rot->irq_num);
+		disable_irq_nosync(rot->irq_num);
+	}
+}
+
 /**
  * sde_hw_rotator_elapsed_swts - Find difference of 2 software timestamps
  * @ts_curr: current software timestamp
@@ -582,6 +609,174 @@ static int sde_hw_rotator_elapsed_swts(u32 ts_curr, u32 ts_prev)
 	return sign_extend32(diff, (SDE_REGDMA_SWTS_SHIFT - 1));
 }
 
+/*
+ * sde_hw_rotator_rotirq_handler - non-regdma interrupt handler
+ * @irq: Interrupt number
+ * @ptr: Pointer to private handle provided during registration
+ *
+ * This function services rotator interrupt and wakes up waiting client
+ * with pending rotation requests already submitted to h/w.
+ */
+static irqreturn_t sde_hw_rotator_rotirq_handler(int irq, void *ptr)
+{
+	struct sde_hw_rotator *rot = ptr;
+	struct sde_hw_rotator_context *ctx;
+	irqreturn_t ret = IRQ_NONE;
+	u32 isr;
+
+	isr = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_INTR_STATUS);
+
+	SDEROT_DBG("intr_status = %8.8x\n", isr);
+
+	if (isr & ROT_DONE_MASK) {
+		sde_hw_rotator_disable_irq(rot);
+		SDEROT_DBG("Notify rotator complete\n");
+
+		/* Normal rotator only 1 session, no need to lookup */
+		ctx = rot->rotCtx[0][0];
+		WARN_ON(ctx == NULL);
+		complete_all(&ctx->rot_comp);
+
+		spin_lock(&rot->rotisr_lock);
+		SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
+				ROT_DONE_CLEAR);
+		spin_unlock(&rot->rotisr_lock);
+		ret = IRQ_HANDLED;
+	}
+
+	return ret;
+}
+
+/*
+ * sde_hw_rotator_regdmairq_handler - regdma interrupt handler
+ * @irq: Interrupt number
+ * @ptr: Pointer to private handle provided during registration
+ *
+ * This function services rotator interrupt, decoding the source of
+ * events (high/low priority queue), and wakes up all waiting clients
+ * with pending rotation requests already submitted to h/w.
+ */
+static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
+{
+	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+	struct sde_hw_rotator *rot = ptr;
+	struct sde_hw_rotator_context *ctx, *tmp;
+	irqreturn_t ret = IRQ_NONE;
+	u32 isr, isr_tmp;
+	u32 ts;
+	u32 q_id;
+
+	isr = SDE_ROTREG_READ(rot->mdss_base, REGDMA_CSR_REGDMA_INT_STATUS);
+	/* acknowledge interrupt before reading latest timestamp */
+	SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR, isr);
+
+	SDEROT_DBG("intr_status = %8.8x\n", isr);
+
+	/* Any REGDMA status, including error and watchdog timer, should
+	 * trigger and wake up waiting thread
+	 */
+	if (isr & (REGDMA_INT_HIGH_MASK | REGDMA_INT_LOW_MASK)) {
+		spin_lock(&rot->rotisr_lock);
+
+		/*
+		 * Obtain rotator context based on timestamp from regdma
+		 * and low/high interrupt status
+		 */
+		if (isr & REGDMA_INT_HIGH_MASK) {
+			q_id = ROT_QUEUE_HIGH_PRIORITY;
+		} else if (isr & REGDMA_INT_LOW_MASK) {
+			q_id = ROT_QUEUE_LOW_PRIORITY;
+		} else {
+			SDEROT_ERR("unknown ISR status: isr=0x%X\n", isr);
+			goto done_isr_handle;
+		}
+
+		ts = __sde_hw_rotator_get_timestamp(rot, q_id);
+
+		/*
+		 * Timestamp packet is not available in sbuf mode.
+		 * Simulate timestamp update in the handler instead.
+		 */
+		if (test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map) ||
+				list_empty(&rot->sbuf_ctx[q_id]))
+			goto skip_sbuf;
+
+		ctx = NULL;
+		isr_tmp = isr;
+		list_for_each_entry(tmp, &rot->sbuf_ctx[q_id], list) {
+			u32 mask;
+
+			mask = tmp->timestamp & 0x1 ? REGDMA_INT_1_MASK :
+				REGDMA_INT_0_MASK;
+			if (isr_tmp & mask) {
+				isr_tmp &= ~mask;
+				ctx = tmp;
+				ts = ctx->timestamp;
+				rot->ops.update_ts(rot, ctx->q_id, ts);
+				SDEROT_DBG("update swts:0x%X\n", ts);
+			}
+			SDEROT_EVTLOG(isr, tmp->timestamp);
+		}
+		if (ctx == NULL)
+			SDEROT_ERR("invalid swts ctx\n");
+skip_sbuf:
+		ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
+
+		/*
+		 * Wake up all waiting context from the current and previous
+		 * SW Timestamp.
+		 */
+		while (ctx &&
+			sde_hw_rotator_elapsed_swts(ctx->timestamp, ts) >= 0) {
+			ctx->last_regdma_isr_status = isr;
+			ctx->last_regdma_timestamp  = ts;
+			SDEROT_DBG(
+				"regdma complete: ctx:%pK, ts:%X\n", ctx, ts);
+			wake_up_all(&ctx->regdma_waitq);
+
+			ts  = (ts - 1) & SDE_REGDMA_SWTS_MASK;
+			ctx = rot->rotCtx[q_id]
+				[ts & SDE_HW_ROT_REGDMA_SEG_MASK];
+		};
+
+done_isr_handle:
+		spin_unlock(&rot->rotisr_lock);
+		ret = IRQ_HANDLED;
+	} else if (isr & REGDMA_INT_ERR_MASK) {
+		/*
+		 * For REGDMA Err, we save the isr info and wake up
+		 * all waiting contexts
+		 */
+		int i, j;
+
+		SDEROT_ERR(
+			"regdma err isr:%X, wake up all waiting contexts\n",
+			isr);
+
+		spin_lock(&rot->rotisr_lock);
+
+		for (i = 0; i < ROT_QUEUE_MAX; i++) {
+			for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
+				ctx = rot->rotCtx[i][j];
+				if (ctx && ctx->last_regdma_isr_status == 0) {
+					ts = __sde_hw_rotator_get_timestamp(
+							rot, i);
+					ctx->last_regdma_isr_status = isr;
+					ctx->last_regdma_timestamp  = ts;
+					wake_up_all(&ctx->regdma_waitq);
+					SDEROT_DBG("Wake rotctx[%d][%d]:%pK\n",
+							i, j, ctx);
+				}
+			}
+		}
+
+		spin_unlock(&rot->rotisr_lock);
+		ret = IRQ_HANDLED;
+	}
+
+	return ret;
+}
+
 /**
  * sde_hw_rotator_pending_hwts - Check if the given context is still pending
  * @rot: Pointer to hw rotator
@@ -695,54 +890,76 @@ static void sde_hw_rotator_update_swts(struct sde_hw_rotator *rot,
 	SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, swts);
 }
 
-/**
- * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
- *				Also, clear rotator/regdma irq status.
- * @rot: Pointer to hw rotator
+/*
+ * sde_hw_rotator_irq_setup - setup rotator irq
+ * @mgr: Pointer to rotator manager
+ * return: none
  */
-static void sde_hw_rotator_enable_irq(struct sde_hw_rotator *rot)
+static int sde_hw_rotator_irq_setup(struct sde_hw_rotator *rot)
 {
-	SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
-		atomic_read(&rot->irq_enabled));
+	int rc = 0;
 
-	if (!atomic_read(&rot->irq_enabled)) {
+	/* return early if irq is already setup */
+	if (rot->irq_num >= 0)
+		return 0;
+
+	rot->irq_num = platform_get_irq(rot->pdev, 0);
+	if (rot->irq_num < 0) {
+		rc = rot->irq_num;
+		SDEROT_ERR("fail to get rot irq, fallback to poll %d\n", rc);
+	} else {
 		if (rot->mode == ROT_REGDMA_OFF)
-			SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
-				ROT_DONE_MASK);
+			rc = devm_request_threaded_irq(&rot->pdev->dev,
+					rot->irq_num,
+					sde_hw_rotator_rotirq_handler,
+					NULL, 0, "sde_rotator_r3", rot);
 		else
-			SDE_ROTREG_WRITE(rot->mdss_base,
-				REGDMA_CSR_REGDMA_INT_CLEAR, REGDMA_INT_MASK);
-
-		enable_irq(rot->irq_num);
+			rc = devm_request_threaded_irq(&rot->pdev->dev,
+					rot->irq_num,
+					sde_hw_rotator_regdmairq_handler,
+					NULL, 0, "sde_rotator_r3", rot);
+		if (rc) {
+			SDEROT_ERR("fail to request irq r:%d\n", rc);
+			rot->irq_num = -1;
+		} else {
+			disable_irq(rot->irq_num);
+		}
 	}
-	atomic_inc(&rot->irq_enabled);
+
+	return rc;
 }
 
 /**
- * sde_hw_rotator_disable_irq - Disable hw rotator interrupt with ref. count
- *				Also, clear rotator/regdma irq enable masks.
+ * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
+ *				Also, clear rotator/regdma irq status.
  * @rot: Pointer to hw rotator
  */
-static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
+static int sde_hw_rotator_enable_irq(struct sde_hw_rotator *rot)
 {
+	int ret = 0;
 	SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
 		atomic_read(&rot->irq_enabled));
 
-	if (!atomic_read(&rot->irq_enabled)) {
-		SDEROT_ERR("irq %d is already disabled\n", rot->irq_num);
-		return;
+	ret = sde_hw_rotator_irq_setup(rot);
+	if (ret < 0) {
+		SDEROT_ERR("Rotator irq setup failed %d\n", ret);
+		return ret;
 	}
 
-	if (!atomic_dec_return(&rot->irq_enabled)) {
+	if (!atomic_read(&rot->irq_enabled)) {
+
 		if (rot->mode == ROT_REGDMA_OFF)
-			SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_EN, 0);
+			SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
+				ROT_DONE_MASK);
 		else
 			SDE_ROTREG_WRITE(rot->mdss_base,
-				REGDMA_CSR_REGDMA_INT_EN, 0);
-		/* disable irq after last pending irq is handled, if any */
-		synchronize_irq(rot->irq_num);
-		disable_irq_nosync(rot->irq_num);
+				REGDMA_CSR_REGDMA_INT_CLEAR, REGDMA_INT_MASK);
+
+		enable_irq(rot->irq_num);
 	}
+	atomic_inc(&rot->irq_enabled);
+
+	return ret;
 }
 
 static int sde_hw_rotator_halt_vbif_xin_client(void)
@@ -1814,11 +2031,10 @@ static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
 	mem_rdptr = sde_hw_rotator_get_regdma_segment_base(ctx);
 	wrptr = sde_hw_rotator_get_regdma_segment(ctx);
 
-	if (rot->irq_num >= 0) {
+	if (!sde_hw_rotator_enable_irq(rot)) {
 		SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_EN, 1);
 		SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_CLEAR, 1);
 		reinit_completion(&ctx->rot_comp);
-		sde_hw_rotator_enable_irq(rot);
 	}
 
 	SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
@@ -2526,8 +2742,7 @@ static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext(
 			sde_hw_rotator_swts_create(resinfo->rot);
 	}
 
-	if (resinfo->rot->irq_num >= 0)
-		sde_hw_rotator_enable_irq(resinfo->rot);
+	sde_hw_rotator_enable_irq(resinfo->rot);
 
 	SDEROT_DBG("New rotator resource:%pK, priority:%d\n",
 			resinfo, wb_id);
@@ -2555,8 +2770,7 @@ static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr,
 		resinfo, hw->wb_id, atomic_read(&hw->num_active),
 		hw->pending_count);
 
-	if (resinfo->rot->irq_num >= 0)
-		sde_hw_rotator_disable_irq(resinfo->rot);
+	sde_hw_rotator_disable_irq(resinfo->rot);
 
 	devm_kfree(&mgr->pdev->dev, resinfo);
 }
@@ -3303,176 +3517,6 @@ static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
 	return 0;
 }
 
-/*
- * sde_hw_rotator_rotirq_handler - non-regdma interrupt handler
- * @irq: Interrupt number
- * @ptr: Pointer to private handle provided during registration
- *
- * This function services rotator interrupt and wakes up waiting client
- * with pending rotation requests already submitted to h/w.
- */
-static irqreturn_t sde_hw_rotator_rotirq_handler(int irq, void *ptr)
-{
-	struct sde_hw_rotator *rot = ptr;
-	struct sde_hw_rotator_context *ctx;
-	irqreturn_t ret = IRQ_NONE;
-	u32 isr;
-
-	isr = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_INTR_STATUS);
-
-	SDEROT_DBG("intr_status = %8.8x\n", isr);
-
-	if (isr & ROT_DONE_MASK) {
-		if (rot->irq_num >= 0)
-			sde_hw_rotator_disable_irq(rot);
-		SDEROT_DBG("Notify rotator complete\n");
-
-		/* Normal rotator only 1 session, no need to lookup */
-		ctx = rot->rotCtx[0][0];
-		WARN_ON(ctx == NULL);
-		complete_all(&ctx->rot_comp);
-
-		spin_lock(&rot->rotisr_lock);
-		SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
-				ROT_DONE_CLEAR);
-		spin_unlock(&rot->rotisr_lock);
-		ret = IRQ_HANDLED;
-	}
-
-	return ret;
-}
-
-/*
- * sde_hw_rotator_regdmairq_handler - regdma interrupt handler
- * @irq: Interrupt number
- * @ptr: Pointer to private handle provided during registration
- *
- * This function services rotator interrupt, decoding the source of
- * events (high/low priority queue), and wakes up all waiting clients
- * with pending rotation requests already submitted to h/w.
- */
-static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
-{
-	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
-	struct sde_hw_rotator *rot = ptr;
-	struct sde_hw_rotator_context *ctx, *tmp;
-	irqreturn_t ret = IRQ_NONE;
-	u32 isr, isr_tmp;
-	u32 ts;
-	u32 q_id;
-
-	isr = SDE_ROTREG_READ(rot->mdss_base, REGDMA_CSR_REGDMA_INT_STATUS);
-	/* acknowledge interrupt before reading latest timestamp */
-	SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR, isr);
-
-	SDEROT_DBG("intr_status = %8.8x\n", isr);
-
-	/* Any REGDMA status, including error and watchdog timer, should
-	 * trigger and wake up waiting thread
-	 */
-	if (isr & (REGDMA_INT_HIGH_MASK | REGDMA_INT_LOW_MASK)) {
-		spin_lock(&rot->rotisr_lock);
-
-		/*
-		 * Obtain rotator context based on timestamp from regdma
-		 * and low/high interrupt status
-		 */
-		if (isr & REGDMA_INT_HIGH_MASK) {
-			q_id = ROT_QUEUE_HIGH_PRIORITY;
-		} else if (isr & REGDMA_INT_LOW_MASK) {
-			q_id = ROT_QUEUE_LOW_PRIORITY;
-		} else {
-			SDEROT_ERR("unknown ISR status: isr=0x%X\n", isr);
-			goto done_isr_handle;
-		}
-
-		ts = __sde_hw_rotator_get_timestamp(rot, q_id);
-
-		/*
-		 * Timestamp packet is not available in sbuf mode.
-		 * Simulate timestamp update in the handler instead.
-		 */
-		if (test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map) ||
-				list_empty(&rot->sbuf_ctx[q_id]))
-			goto skip_sbuf;
-
-		ctx = NULL;
-		isr_tmp = isr;
-		list_for_each_entry(tmp, &rot->sbuf_ctx[q_id], list) {
-			u32 mask;
-
-			mask = tmp->timestamp & 0x1 ? REGDMA_INT_1_MASK :
-				REGDMA_INT_0_MASK;
-			if (isr_tmp & mask) {
-				isr_tmp &= ~mask;
-				ctx = tmp;
-				ts = ctx->timestamp;
-				rot->ops.update_ts(rot, ctx->q_id, ts);
-				SDEROT_DBG("update swts:0x%X\n", ts);
-			}
-			SDEROT_EVTLOG(isr, tmp->timestamp);
-		}
-		if (ctx == NULL)
-			SDEROT_ERR("invalid swts ctx\n");
-skip_sbuf:
-		ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
-
-		/*
-		 * Wake up all waiting context from the current and previous
-		 * SW Timestamp.
-		 */
-		while (ctx &&
-			sde_hw_rotator_elapsed_swts(ctx->timestamp, ts) >= 0) {
-			ctx->last_regdma_isr_status = isr;
-			ctx->last_regdma_timestamp  = ts;
-			SDEROT_DBG(
-				"regdma complete: ctx:%pK, ts:%X\n", ctx, ts);
-			wake_up_all(&ctx->regdma_waitq);
-
-			ts  = (ts - 1) & SDE_REGDMA_SWTS_MASK;
-			ctx = rot->rotCtx[q_id]
-				[ts & SDE_HW_ROT_REGDMA_SEG_MASK];
-		}
-
-done_isr_handle:
-		spin_unlock(&rot->rotisr_lock);
-		ret = IRQ_HANDLED;
-	} else if (isr & REGDMA_INT_ERR_MASK) {
-		/*
-		 * For REGDMA Err, we save the isr info and wake up
-		 * all waiting contexts
-		 */
-		int i, j;
-
-		SDEROT_ERR(
-			"regdma err isr:%X, wake up all waiting contexts\n",
-			isr);
-
-		spin_lock(&rot->rotisr_lock);
-
-		for (i = 0; i < ROT_QUEUE_MAX; i++) {
-			for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
-				ctx = rot->rotCtx[i][j];
-				if (ctx && ctx->last_regdma_isr_status == 0) {
-					ts = __sde_hw_rotator_get_timestamp(
-							rot, i);
-					ctx->last_regdma_isr_status = isr;
-					ctx->last_regdma_timestamp  = ts;
-					wake_up_all(&ctx->regdma_waitq);
-					SDEROT_DBG(
-						"Wakeup rotctx[%d][%d]:%pK\n",
-						i, j, ctx);
-				}
-			}
-		}
-
-		spin_unlock(&rot->rotisr_lock);
-		ret = IRQ_HANDLED;
-	}
-
-	return ret;
-}
-
 /*
  * sde_hw_rotator_validate_entry - validate rotation entry
  * @mgr: Pointer to rotator manager
@@ -3985,30 +4029,7 @@ int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
 	if (ret)
 		goto error_parse_dt;
 
-	rot->irq_num = platform_get_irq(mgr->pdev, 0);
-	if (rot->irq_num == -EPROBE_DEFER) {
-		SDEROT_INFO("irq master master not ready, defer probe\n");
-		return -EPROBE_DEFER;
-	} else if (rot->irq_num < 0) {
-		SDEROT_ERR("fail to get rotator irq, fallback to polling\n");
-	} else {
-		if (rot->mode == ROT_REGDMA_OFF)
-			ret = devm_request_threaded_irq(&mgr->pdev->dev,
-					rot->irq_num,
-					sde_hw_rotator_rotirq_handler,
-					NULL, 0, "sde_rotator_r3", rot);
-		else
-			ret = devm_request_threaded_irq(&mgr->pdev->dev,
-					rot->irq_num,
-					sde_hw_rotator_regdmairq_handler,
-					NULL, 0, "sde_rotator_r3", rot);
-		if (ret) {
-			SDEROT_ERR("fail to request irq r:%d\n", ret);
-			rot->irq_num = -1;
-		} else {
-			disable_irq(rot->irq_num);
-		}
-	}
+	rot->irq_num = -EINVAL;
 	atomic_set(&rot->irq_enabled, 0);
 
 	ret = sde_rotator_hw_rev_init(rot);
@@ -4056,8 +4077,6 @@ int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
 	mdata->sde_rot_hw = rot;
 	return 0;
 error_hw_rev_init:
-	if (rot->irq_num >= 0)
-		devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
 	devm_kfree(&mgr->pdev->dev, mgr->hw_data);
 error_parse_dt:
 	return ret;

+ 1 - 14
rotator/sde_rotator_smmu.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  */
 
 #define pr_fmt(fmt)	"%s: " fmt, __func__
@@ -601,17 +601,6 @@ int sde_smmu_probe(struct platform_device *pdev)
 		return -EINVAL;
 	}
 
-	if (smmu_domain.domain == SDE_IOMMU_DOMAIN_ROT_SECURE) {
-		int secure_vmid = VMID_CP_PIXEL;
-
-		rc = iommu_domain_set_attr(sde_smmu->rot_domain,
-			DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
-		if (rc) {
-			SDEROT_ERR("couldn't set secure pixel vmid\n");
-			goto release_mapping;
-		}
-	}
-
 	if (!dev->dma_parms)
 		dev->dma_parms = devm_kzalloc(dev,
 				sizeof(*dev->dma_parms), GFP_KERNEL);
@@ -629,8 +618,6 @@ int sde_smmu_probe(struct platform_device *pdev)
 			smmu_domain.domain);
 	return 0;
 
-release_mapping:
-	sde_smmu->rot_domain = NULL;
 bus_client_destroy:
 	sde_reg_bus_vote_client_destroy(sde_smmu->reg_bus_clt);
 	sde_smmu->reg_bus_clt = NULL;