Преглед на файлове

msm: ipa: gsi: Add support for EV RP DDR access feature

Allocate memory on DDR for event RP and provide it to
the GSI context.
Access the event RP using the DDR pointer instead of
reading the GSI register.

Change-Id: Idaaa352aa8cc20089ce663a2740e6bb121d8258d
Acked-by: Nadav Levintov <[email protected]>
Signed-off-by: Sivan Reinstein <[email protected]>
Sivan Reinstein преди 4 години
родител
ревизия
17e4d9ddd5
променени са 4 файла, в които са добавени 137 реда и са изтрити 10 реда
  1. 51 7
      drivers/platform/msm/gsi/gsi.c
  2. 8 0
      drivers/platform/msm/ipa/ipa_v3/ipa.c
  3. 73 3
      drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
  4. 5 0
      drivers/platform/msm/ipa/ipa_v3/ipa_i.h

+ 51 - 7
drivers/platform/msm/gsi/gsi.c

@@ -659,8 +659,8 @@ static void gsi_handle_ieob(int ee)
 check_again:
 			cntr = 0;
 			empty = true;
-			rp = gsi_readl(gsi_ctx->base +
-				GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(i, ee));
+			rp = ctx->props.gsi_read_event_ring_rp(&ctx->props,
+							       ctx->id, ee);
 			rp |= ctx->ring.rp & 0xFFFFFFFF00000000;
 
 			ctx->ring.rp = rp;
@@ -1646,6 +1646,38 @@ static int gsi_cleanup_xfer_user_data(unsigned long chan_hdl,
 	return 0;
 }
 
+/**
+ * gsi_read_event_ring_rp_ddr - function returns the RP value of the event
+ *      ring read from the ring context register.
+ *
+ * @props: Props structere of the event channel
+ * @id: Event channel index
+ * @ee: EE
+ *
+ * @Return pointer to the read pointer
+ */
+static inline uint64_t gsi_read_event_ring_rp_ddr(struct gsi_evt_ring_props* props,
+	uint8_t id, int ee)
+{
+	return gsi_readl(props->rp_update_vaddr);
+}
+
+/**
+ * gsi_read_event_ring_rp_reg - function returns the RP value of the event ring
+ *      read from the DDR.
+ *
+ * @props: Props structere of the event channel
+ * @id: Event channel index
+ * @ee: EE
+ *
+ * @Return pointer to the read pointer
+ */
+static inline uint64_t gsi_read_event_ring_rp_reg(struct gsi_evt_ring_props* props,
+	uint8_t id, int ee)
+{
+	return gsi_readl(gsi_ctx->base + GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(id, ee));
+}
+
 int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
 		unsigned long *evt_ring_hdl)
 {
@@ -1689,6 +1721,19 @@ int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
 	}
 	GSIDBG("Using %lu as virt evt id\n", evt_id);
 
+	if (props->rp_update_addr != 0) {
+		GSIDBG("Using DDR to read event RP for virt evt id: %lu\n",
+			evt_id);
+		props->gsi_read_event_ring_rp =
+			gsi_read_event_ring_rp_ddr;
+	}
+	else {
+		GSIDBG("Using CONTEXT reg to read event RP for virt evt id: %lu\n",
+			evt_id);
+		props->gsi_read_event_ring_rp =
+			gsi_read_event_ring_rp_reg;
+	}
+
 	ctx = &gsi_ctx->evtr[evt_id];
 	memset(ctx, 0, sizeof(*ctx));
 	mutex_init(&ctx->mlock);
@@ -3781,8 +3826,8 @@ int gsi_poll_n_channel(unsigned long chan_hdl,
 	spin_lock_irqsave(&ctx->evtr->ring.slock, flags);
 	if (ctx->evtr->ring.rp == ctx->evtr->ring.rp_local) {
 		/* update rp to see of we have anything new to process */
-		rp = gsi_readl(gsi_ctx->base +
-			GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
+		rp = ctx->evtr->props.gsi_read_event_ring_rp(
+			&ctx->evtr->props, ctx->evtr->id, ee);
 		rp |= ctx->ring.rp & 0xFFFFFFFF00000000ULL;
 
 		ctx->evtr->ring.rp = rp;
@@ -3793,9 +3838,8 @@ int gsi_poll_n_channel(unsigned long chan_hdl,
 				GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
 			/* do another read to close a small window */
 			__iowmb();
-			rp = gsi_readl(gsi_ctx->base +
-				GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(
-				ctx->evtr->id, ee));
+			rp = ctx->evtr->props.gsi_read_event_ring_rp(
+				&ctx->evtr->props, ctx->evtr->id, ee);
 			rp |= ctx->ring.rp & 0xFFFFFFFF00000000ULL;
 			ctx->evtr->ring.rp = rp;
 			if (rp == ctx->evtr->ring.rp_local) {

+ 8 - 0
drivers/platform/msm/ipa/ipa_v3/ipa.c

@@ -6702,6 +6702,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
 		resource_p->do_ram_collection_on_crash;
 	ipa3_ctx->lan_rx_napi_enable = resource_p->lan_rx_napi_enable;
 	ipa3_ctx->tx_napi_enable = resource_p->tx_napi_enable;
+	ipa3_ctx->ipa_gpi_event_rp_ddr = resource_p->ipa_gpi_event_rp_ddr;
 	ipa3_ctx->rmnet_ctl_enable = resource_p->rmnet_ctl_enable;
 	ipa3_ctx->tx_wrapper_cache_max_size = get_tx_wrapper_cache_size(
 			resource_p->tx_wrapper_cache_max_size);
@@ -7469,6 +7470,7 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
 	ipa_drv_res->ipa_fltrt_not_hashable = false;
 	ipa_drv_res->ipa_endp_delay_wa = false;
 	ipa_drv_res->skip_ieob_mask_wa = false;
+	ipa_drv_res->ipa_gpi_event_rp_ddr = false;
 
 	/* Get IPA HW Version */
 	result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
@@ -7698,6 +7700,12 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
 		ipa_drv_res->lan_rx_napi_enable
 		? "True" : "False");
 
+	ipa_drv_res->ipa_gpi_event_rp_ddr =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,ipa-gpi-event-rp-ddr");
+	IPADBG(": Read GPI or GCI Event RP from DDR = %s\n",
+	       ipa_drv_res->ipa_gpi_event_rp_ddr ? "True" : "False");
+
 	ipa_drv_res->tx_napi_enable =
 		of_property_read_bool(pdev->dev.of_node,
 			"qcom,tx-napi");

+ 73 - 3
drivers/platform/msm/ipa/ipa_v3/ipa_dp.c

@@ -16,6 +16,7 @@
 #include "ipahal/ipahal.h"
 #include "ipahal/ipahal_fltrt.h"
 
+#define IPA_GSI_EVENT_RP_SIZE 8
 #define IPA_WAN_AGGR_PKT_CNT 5
 #define IPA_WAN_NAPI_MAX_FRAMES (NAPI_WEIGHT / IPA_WAN_AGGR_PKT_CNT)
 #define IPA_WAN_PAGE_ORDER 3
@@ -1521,6 +1522,16 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl)
 			ep->gsi_mem_info.evt_ring_len,
 			ep->gsi_mem_info.evt_ring_base_vaddr,
 			ep->gsi_mem_info.evt_ring_base_addr);
+
+		if (ep->gsi_mem_info.evt_ring_rp_vaddr) {
+			dma_free_coherent(ipa3_ctx->pdev,
+				IPA_GSI_EVENT_RP_SIZE,
+				ep->gsi_mem_info.evt_ring_rp_vaddr,
+				ep->gsi_mem_info.evt_ring_rp_addr);
+				ep->gsi_mem_info.evt_ring_rp_addr = 0;
+				ep->gsi_mem_info.evt_ring_rp_vaddr = 0;
+		}
+
 		result = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
 		if (WARN(result != GSI_STATUS_SUCCESS, "deall evt %d", result))
 			return result;
@@ -4634,6 +4645,7 @@ int ipa3_alloc_common_event_ring(void)
 {
 	struct gsi_evt_ring_props gsi_evt_ring_props;
 	dma_addr_t evt_dma_addr;
+	dma_addr_t evt_rp_dma_addr;
 	int result;
 
 	memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
@@ -4654,7 +4666,23 @@ int ipa3_alloc_common_event_ring(void)
 	gsi_evt_ring_props.ring_base_addr = evt_dma_addr;
 	gsi_evt_ring_props.int_modt = 0;
 	gsi_evt_ring_props.int_modc = 1; /* moderation comes from channel*/
-	gsi_evt_ring_props.rp_update_addr = 0;
+
+	if (ipa3_ctx->ipa_gpi_event_rp_ddr) {
+		gsi_evt_ring_props.rp_update_vaddr =
+			dma_alloc_coherent(ipa3_ctx->pdev,
+					   IPA_GSI_EVENT_RP_SIZE,
+					   &evt_rp_dma_addr, GFP_KERNEL);
+		if (!gsi_evt_ring_props.rp_update_vaddr) {
+			IPAERR("fail to dma alloc %u bytes\n",
+			       IPA_GSI_EVENT_RP_SIZE);
+			result = -ENOMEM;
+			goto fail_alloc_rp;
+		}
+		gsi_evt_ring_props.rp_update_addr = evt_rp_dma_addr;
+	} else {
+		gsi_evt_ring_props.rp_update_addr = 0;
+	}
+
 	gsi_evt_ring_props.exclusive = false;
 	gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb;
 	gsi_evt_ring_props.user_data = NULL;
@@ -4663,11 +4691,22 @@ int ipa3_alloc_common_event_ring(void)
 		ipa3_ctx->gsi_dev_hdl, &ipa3_ctx->gsi_evt_comm_hdl);
 	if (result) {
 		IPAERR("gsi_alloc_evt_ring failed %d\n", result);
-		return result;
+		goto fail_alloc_evt_ring;
 	}
 	ipa3_ctx->gsi_evt_comm_ring_rem = IPA_COMMON_EVENT_RING_SIZE;
 
 	return 0;
+fail_alloc_evt_ring:
+	if (gsi_evt_ring_props.rp_update_vaddr) {
+		dma_free_coherent(ipa3_ctx->pdev, IPA_GSI_EVENT_RP_SIZE,
+				  gsi_evt_ring_props.rp_update_vaddr,
+				  evt_rp_dma_addr);
+	}
+fail_alloc_rp:
+	dma_free_coherent(ipa3_ctx->pdev, gsi_evt_ring_props.ring_len,
+			  gsi_evt_ring_props.ring_base_vaddr,
+			  evt_dma_addr);
+	return result;
 }
 
 static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
@@ -4754,9 +4793,11 @@ static int ipa_gsi_setup_event_ring(struct ipa3_ep_context *ep,
 {
 	struct gsi_evt_ring_props gsi_evt_ring_props;
 	dma_addr_t evt_dma_addr;
+	dma_addr_t evt_rp_dma_addr;
 	int result;
 
 	evt_dma_addr = 0;
+	evt_rp_dma_addr = 0;
 	memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
 	gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
 	gsi_evt_ring_props.intr = GSI_INTR_IRQ;
@@ -4791,7 +4832,28 @@ static int ipa_gsi_setup_event_ring(struct ipa3_ep_context *ep,
 		ep->client,
 		gsi_evt_ring_props.int_modt,
 		gsi_evt_ring_props.int_modc);
-	gsi_evt_ring_props.rp_update_addr = 0;
+	if (ipa3_ctx->ipa_gpi_event_rp_ddr) {
+		gsi_evt_ring_props.rp_update_vaddr =
+			dma_alloc_coherent(ipa3_ctx->pdev,
+					   IPA_GSI_EVENT_RP_SIZE,
+					   &evt_rp_dma_addr, GFP_KERNEL);
+		if (!gsi_evt_ring_props.rp_update_vaddr) {
+			IPAERR("fail to dma alloc %u bytes\n",
+				IPA_GSI_EVENT_RP_SIZE);
+			result = -ENOMEM;
+			goto fail_alloc_rp;
+		}
+		gsi_evt_ring_props.rp_update_addr = evt_rp_dma_addr;
+	} else {
+		gsi_evt_ring_props.rp_update_addr = 0;
+	}
+
+	/* copy mem info */
+	ep->gsi_mem_info.evt_ring_rp_addr =
+		gsi_evt_ring_props.rp_update_addr;
+	ep->gsi_mem_info.evt_ring_rp_vaddr =
+		gsi_evt_ring_props.rp_update_vaddr;
+
 	gsi_evt_ring_props.exclusive = true;
 	gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb;
 	gsi_evt_ring_props.user_data = NULL;
@@ -4804,6 +4866,14 @@ static int ipa_gsi_setup_event_ring(struct ipa3_ep_context *ep,
 	return 0;
 
 fail_alloc_evt_ring:
+	if (gsi_evt_ring_props.rp_update_vaddr) {
+		dma_free_coherent(ipa3_ctx->pdev, IPA_GSI_EVENT_RP_SIZE,
+				  gsi_evt_ring_props.rp_update_vaddr,
+				  evt_rp_dma_addr);
+		ep->gsi_mem_info.evt_ring_rp_addr = 0;
+		ep->gsi_mem_info.evt_ring_rp_vaddr = 0;
+	}
+fail_alloc_rp:
 	if (ep->gsi_mem_info.evt_ring_base_vaddr)
 		dma_free_coherent(ipa3_ctx->pdev, ep->gsi_mem_info.evt_ring_len,
 			ep->gsi_mem_info.evt_ring_base_vaddr,

+ 5 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_i.h

@@ -911,6 +911,8 @@ struct ipa_gsi_ep_mem_info {
 	u16 chan_ring_len;
 	u64 chan_ring_base_addr;
 	void *chan_ring_base_vaddr;
+	u64 evt_ring_rp_addr;
+	void *evt_ring_rp_vaddr;
 };
 
 struct ipa3_status_stats {
@@ -1908,6 +1910,7 @@ struct ipa3_app_clock_vote {
  * @icc_num_paths - number of paths icc would vote for bw
  * @icc_clk - table for icc bw clock value
  * @coal_cmd_pyld: holds the coslescing close frame command payload
+ * @ipa_gpi_event_rp_ddr: use DDR to access event RP for GPI channels
  * @rmnet_ctl_enable: enable pipe support fow low latency data
  * @gsi_fw_file_name: GSI IPA fw file name
  * @uc_fw_file_name: uC IPA fw file name
@@ -2094,6 +2097,7 @@ struct ipa3_context {
 	u32 tx_wrapper_cache_max_size;
 	struct ipa3_app_clock_vote app_clock_vote;
 	bool clients_registered;
+	bool ipa_gpi_event_rp_ddr;
 	bool rmnet_ctl_enable;
 	char *gsi_fw_file_name;
 	char *uc_fw_file_name;
@@ -2150,6 +2154,7 @@ struct ipa3_plat_drv_res {
 	u32 icc_num_paths;
 	const char *icc_path_name[IPA_ICC_PATH_MAX];
 	u32 icc_clk_val[IPA_ICC_LVL_MAX][IPA_ICC_MAX];
+	bool ipa_gpi_event_rp_ddr;
 	bool rmnet_ctl_enable;
 	bool ipa_use_uc_holb_monitor;
 	u32 ipa_holb_monitor_poll_period;