Pārlūkot izejas kodu

Merge "msm: ipa4: flow control changes for rmnet pipe"

qctecmdr 4 gadi atpakaļ
vecāks
revīzija
c67509b1c4

+ 92 - 1
drivers/platform/msm/gsi/gsi.c

@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/of.h>
@@ -4414,6 +4414,97 @@ free_lock:
 }
 EXPORT_SYMBOL(gsi_alloc_channel_ee);
 
+int gsi_enable_flow_control_ee(unsigned int chan_idx, unsigned int ee,
+								int *code)
+{
+	enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_ENABLE_FLOW_CHANNEL;
+	uint32_t val;
+	enum gsi_chan_state curr_state = GSI_CHAN_STATE_NOT_ALLOCATED;
+	int res;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_idx >= gsi_ctx->max_ch || !code) {
+		GSIERR("bad params chan_idx=%d\n", chan_idx);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	mutex_lock(&gsi_ctx->mlock);
+	reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
+
+	/* invalidate the response */
+	gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+	gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
+	gsi_writel(gsi_ctx->scratch.word0.val, gsi_ctx->base +
+			GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+
+	gsi_ctx->gen_ee_cmd_dbg.flow_ctrl_channel++;
+	val = (((op << GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT) &
+		GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK) |
+		((chan_idx << GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT) &
+			GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK) |
+		((ee << GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT) &
+			GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+		GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(gsi_ctx->per.ee));
+
+	res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
+		msecs_to_jiffies(GSI_CMD_TIMEOUT));
+	if (res == 0) {
+		GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
+		res = -GSI_STATUS_TIMED_OUT;
+		goto free_lock;
+	}
+
+	gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+	if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
+		GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING) {
+		GSIDBG("chan_idx=%u ee=%u not in correct state\n",
+							chan_idx, ee);
+		*code = GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING;
+		res = -GSI_STATUS_RES_ALLOC_FAILURE;
+		goto free_lock;
+	} else if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
+			GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_TYPE ||
+			gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code ==
+			GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_INDEX){
+		GSIERR("chan_idx=%u ee=%u not in correct state\n",
+				chan_idx, ee);
+		GSI_ASSERT();
+	}
+	if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
+		GSIERR("No response received\n");
+		res = -GSI_STATUS_ERROR;
+		goto free_lock;
+	}
+
+	/*Reading current channel state*/
+	val = gsi_readl(gsi_ctx->base +
+				GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(chan_idx, ee));
+	curr_state = (val &
+				GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
+				GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
+	if (curr_state == GSI_CHAN_STATE_FLOW_CONTROL) {
+		GSIDBG("ch %u state updated to %u\n", chan_idx, curr_state);
+		res = GSI_STATUS_SUCCESS;
+	} else {
+		GSIERR("ch %u state updated to %u incorrect state\n",
+						chan_idx, curr_state);
+		res = -GSI_STATUS_ERROR;
+	}
+	*code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
+free_lock:
+	mutex_unlock(&gsi_ctx->mlock);
+
+	return res;
+}
+EXPORT_SYMBOL(gsi_enable_flow_control_ee);
+
 int gsi_map_virtual_ch_to_per_ep(u32 ee, u32 chan_num, u32 per_ep_index)
 {
 	if (!gsi_ctx) {

+ 4 - 1
drivers/platform/msm/gsi/gsi.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
  */
 
 #ifndef GSI_H
@@ -198,6 +198,7 @@ struct ch_debug_stats {
 
 struct gsi_generic_ee_cmd_debug_stats {
 	unsigned long halt_channel;
+	unsigned long flow_ctrl_channel;
 };
 
 struct gsi_coal_chan_info {
@@ -334,6 +335,8 @@ enum gsi_evt_ch_cmd_opcode {
 enum gsi_generic_ee_cmd_opcode {
 	GSI_GEN_EE_CMD_HALT_CHANNEL = 0x1,
 	GSI_GEN_EE_CMD_ALLOC_CHANNEL = 0x2,
+	GSI_GEN_EE_CMD_ENABLE_FLOW_CHANNEL = 0x3,
+	GSI_GEN_EE_CMD_DISABLE_FLOW_CHANNEL = 0x4,
 };
 
 enum gsi_generic_ee_cmd_return_code {

+ 37 - 3
drivers/platform/msm/ipa/ipa_v3/ipa_client.c

@@ -587,6 +587,15 @@ int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
 		IPADBG("ep configuration successful\n");
 	} else {
 		IPADBG("Skipping endpoint configuration.\n");
+		if (IPA_CLIENT_IS_PROD(ipa3_ctx->ep[ipa_ep_idx].client) &&
+			ipa3_ctx->ep[ipa_ep_idx].client == IPA_CLIENT_USB_PROD
+			&& !ipa3_is_mhip_offload_enabled()) {
+			if (ipa3_cfg_ep_seq(ipa_ep_idx,
+						&params->ipa_ep_cfg.seq)) {
+				IPAERR("fail to configure USB pipe seq\n");
+				goto ipa_cfg_ep_fail;
+			}
+		}
 	}
 
 	out_params->clnt_hdl = ipa_ep_idx;
@@ -825,6 +834,7 @@ int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
 	enum gsi_status gsi_res;
 	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
 	u32 holb_max_cnt = ipa3_ctx->uc_ctx.holb_monitor.max_cnt_usb;
+	int code = 0;
 
 	IPADBG("entry\n");
 	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes  ||
@@ -877,6 +887,19 @@ int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
 					ep->gsi_chan_hdl);
 	}
 
+	if (IPA_CLIENT_IS_PROD(ep->client) && ep->skip_ep_cfg &&
+			ipa3_ctx->ipa_endp_delay_wa &&
+			!ipa3_is_mhip_offload_enabled()) {
+		gsi_res = gsi_enable_flow_control_ee(ep->gsi_chan_hdl, 0,
+									&code);
+		if (gsi_res == GSI_STATUS_SUCCESS) {
+			IPADBG("flow control sussess gsi ch %d with code %d\n",
+					ep->gsi_chan_hdl, code);
+		} else {
+			IPADBG("failed to flow control gsi ch %d code %d\n",
+					ep->gsi_chan_hdl, code);
+		}
+	}
 	ipa3_start_gsi_debug_monitor(clnt_hdl);
 	if (!ep->keep_ipa_awake)
 		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
@@ -1289,6 +1312,7 @@ int ipa3_start_stop_client_prod_gsi_chnl(enum ipa_client_type client,
 	int result = 0;
 	int pipe_idx;
 	struct ipa3_ep_context *ep;
+	int code = 0;
 
 	if (IPA_CLIENT_IS_CONS(client)) {
 		IPAERR("client (%d) not PROD\n", client);
@@ -1304,10 +1328,20 @@ int ipa3_start_stop_client_prod_gsi_chnl(enum ipa_client_type client,
 
 	client_lock_unlock_cb(client, true);
 	ep = &ipa3_ctx->ep[pipe_idx];
-	if (ep->valid && ep->skip_ep_cfg && ipa3_get_teth_port_status(client)) {
-		if (start_chnl)
+	if (ep->valid && ep->skip_ep_cfg && ipa3_get_teth_port_status(client)
+		&& !ipa3_is_mhip_offload_enabled()) {
+		if (start_chnl) {
 			result = ipa3_start_gsi_channel(pipe_idx);
-		else
+			result = gsi_enable_flow_control_ee(ep->gsi_chan_hdl,
+								0, &code);
+			if (result == GSI_STATUS_SUCCESS) {
+				IPADBG("flow control sussess ch %d code %d\n",
+						ep->gsi_chan_hdl, code);
+			} else {
+				IPADBG("failed to flow control ch %d code %d\n",
+						ep->gsi_chan_hdl, code);
+			}
+		} else
 			result = ipa3_stop_gsi_channel(pipe_idx);
 	}
 	client_lock_unlock_cb(client, false);

+ 2 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_i.h

@@ -2466,6 +2466,8 @@ int ipa3_clear_endpoint_delay(u32 clnt_hdl);
  */
 int ipa3_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
 
+int ipa3_cfg_ep_seq(u32 clnt_hdl, const struct ipa_ep_cfg_seq *seq_cfg);
+
 int ipa3_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg);
 
 int ipa3_cfg_ep_conn_track(u32 clnt_hdl,

+ 55 - 31
drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c

@@ -182,7 +182,8 @@ static int ipa3_mhi_get_ch_poll_cfg(enum ipa_client_type client,
 }
 
 static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
-	int ipa_ep_idx, struct start_gsi_channel *params)
+	int ipa_ep_idx, struct start_gsi_channel *params,
+	struct ipa_ep_cfg *ipa_ep_cfg)
 {
 	int res = 0;
 	struct gsi_evt_ring_props ev_props;
@@ -193,6 +194,7 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
 	const struct ipa_gsi_ep_config *ep_cfg;
 	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
 	bool burst_mode_enabled = false;
+	int code = 0;
 
 	IPA_MHI_FUNC_ENTRY();
 
@@ -343,6 +345,37 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
 
 	*params->mhi = ch_scratch.mhi;
 
+	res = ipa3_enable_data_path(ipa_ep_idx);
+	if (res) {
+		IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res,
+			ipa_ep_idx);
+		goto fail_ep_cfg;
+	}
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa3_cfg_ep(ipa_ep_idx, ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto fail_ep_cfg;
+		}
+		if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
+			IPAERR("fail to configure status of EP.\n");
+			goto fail_ep_cfg;
+		}
+		IPA_MHI_DBG("ep configuration successful\n");
+	} else {
+		IPA_MHI_DBG("skipping ep configuration\n");
+		if (IPA_CLIENT_IS_PROD(ipa3_ctx->ep[ipa_ep_idx].client) &&
+			ipa3_ctx->ep[ipa_ep_idx].client == IPA_CLIENT_MHI_PROD
+				&& !ipa3_is_mhip_offload_enabled()) {
+			if (ipa3_cfg_ep_seq(ipa_ep_idx,
+						&ipa_ep_cfg->seq)) {
+				IPA_MHI_ERR("fail to configure USB pipe seq\n");
+				goto fail_ep_cfg;
+			}
+		}
+
+	}
+
 	if (IPA_CLIENT_IS_PROD(ep->client) && ep->skip_ep_cfg) {
 		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 		ep_cfg_ctrl.ipa_ep_delay = true;
@@ -357,6 +390,9 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
 		ep->ep_delay_set = false;
 	}
 
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(client))
+		ipa3_install_dflt_flt_rules(ipa_ep_idx);
+
 	IPA_MHI_DBG("Starting channel\n");
 	res = gsi_start_channel(ep->gsi_chan_hdl);
 	if (res) {
@@ -364,9 +400,24 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
 		goto fail_ch_start;
 	}
 
+	if (IPA_CLIENT_IS_PROD(ep->client) && ep->skip_ep_cfg &&
+			ipa3_ctx->ipa_endp_delay_wa &&
+			!ipa3_is_mhip_offload_enabled()) {
+		res = gsi_enable_flow_control_ee(ep->gsi_chan_hdl, 0, &code);
+		if (res == GSI_STATUS_SUCCESS) {
+			IPA_MHI_DBG("flow ctrl sussess gsi ch %d code %d\n",
+					ep->gsi_chan_hdl, code);
+		} else {
+			IPA_MHI_DBG("failed to flow ctrll gsi ch %d code %d\n",
+					ep->gsi_chan_hdl, code);
+		}
+	}
+
 	IPA_MHI_FUNC_EXIT();
 	return 0;
 
+fail_ep_cfg:
+	ipa3_disable_data_path(ipa_ep_idx);
 fail_ch_start:
 fail_ch_scratch:
 	gsi_dealloc_channel(ep->gsi_chan_hdl);
@@ -474,49 +525,22 @@ int ipa3_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
 	ep->keep_ipa_awake = in->sys->keep_ipa_awake;
 
 	res = ipa_mhi_start_gsi_channel(client,
-					ipa_ep_idx, &in->start.gsi);
+					ipa_ep_idx, &in->start.gsi,
+					&in->sys->ipa_ep_cfg);
 	if (res) {
 		IPA_MHI_ERR("ipa_mhi_start_gsi_channel failed %d\n",
 			res);
 		goto fail_start_channel;
 	}
 
-	res = ipa3_enable_data_path(ipa_ep_idx);
-	if (res) {
-		IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res,
-			ipa_ep_idx);
-		goto fail_ep_cfg;
-	}
-
-	if (!ep->skip_ep_cfg) {
-		if (ipa3_cfg_ep(ipa_ep_idx, &in->sys->ipa_ep_cfg)) {
-			IPAERR("fail to configure EP.\n");
-			goto fail_ep_cfg;
-		}
-		if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
-			IPAERR("fail to configure status of EP.\n");
-			goto fail_ep_cfg;
-		}
-		IPA_MHI_DBG("ep configuration successful\n");
-	} else {
-		IPA_MHI_DBG("skipping ep configuration\n");
-	}
-
 	*clnt_hdl = ipa_ep_idx;
-
-	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(client))
-		ipa3_install_dflt_flt_rules(ipa_ep_idx);
-
 	ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
-	IPA_MHI_DBG("client %d (ep: %d) connected\n", client,
-		ipa_ep_idx);
+	IPA_MHI_DBG("client %d (ep: %d) connected\n", client, ipa_ep_idx);
 
 	IPA_MHI_FUNC_EXIT();
 
 	return 0;
 
-fail_ep_cfg:
-	ipa3_disable_data_path(ipa_ep_idx);
 fail_start_channel:
 	memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
 	return -EPERM;