瀏覽代碼

Merge "msm: ipa: add unit tests for NTN3 offload"

qctecmdr 3 年之前
父節點
當前提交
e7414c97c6

+ 70 - 13
drivers/platform/msm/gsi/gsi.c

@@ -2416,6 +2416,7 @@ static void gsi_program_chan_ctx(struct gsi_chan_props *props, unsigned int ee,
 	case GSI_CHAN_PROT_11AD:
 	case GSI_CHAN_PROT_RTK:
 	case GSI_CHAN_PROT_QDSS:
+	case GSI_CHAN_PROT_NTN:
 		ch_k_cntxt_0.chtype_protocol_msb = 1;
 		break;
 	default:
@@ -4936,25 +4937,81 @@ int gsi_get_refetch_reg(unsigned long chan_hdl, bool is_rp)
 }
 EXPORT_SYMBOL(gsi_get_refetch_reg);
 
-int gsi_get_drop_stats(unsigned long ep_id, int scratch_id)
+int gsi_get_drop_stats(unsigned long ep_id, int scratch_id,
+	unsigned long chan_hdl)
 {
-	/* RTK use scratch 5 */
-	if (scratch_id == 5) {
-		/*
-		 * each channel context is 6 lines of 8 bytes, but n in SHRAM_n
-		 * is in 4 bytes offsets, so multiplying ep_id by 6*2=12 will
-		 * give the beginning of the required channel context, and then
-		 * need to add 7 since the channel context layout has the ring
-		 * rbase (8 bytes) + channel scratch 0-4 (20 bytes) so adding
-		 * additional 28/4 = 7 to get to scratch 5 of the required
-		 * channel.
-		 */
-		gsihal_read_reg_n(GSI_GSI_SHRAM_n, ep_id * 12 + 7);
+#define GSI_RTK_ERR_STATS_MASK 0xFFFF
+#define GSI_NTN_ERR_STATS_MASK 0xFFFFFFFF
+#define GSI_AQC_RX_STATUS_MASK 0x1FFF
+#define GSI_AQC_RX_STATUS_SHIFT 0
+#define GSI_AQC_RDM_ERR_MASK 0x1FFF0000
+#define GSI_AQC_RDM_ERR_SHIFT 16
+
+	uint16_t rx_status;
+	uint16_t rdm_err;
+	uint32_t val;
+
+	/* on newer versions we can read the ch scratch directly from reg */
+	if (gsi_ctx->per.ver >= GSI_VER_3_0) {
+		switch (scratch_id) {
+		case 5:
+			return gsihal_read_reg_nk(
+				GSI_EE_n_GSI_CH_k_SCRATCH_5,
+				gsi_ctx->per.ee,
+				chan_hdl) & GSI_RTK_ERR_STATS_MASK;
+			break;
+		case 6:
+			return gsihal_read_reg_nk(
+				GSI_EE_n_GSI_CH_k_SCRATCH_6,
+				gsi_ctx->per.ee,
+				chan_hdl) & GSI_NTN_ERR_STATS_MASK;
+			break;
+		case 7:
+			val = gsihal_read_reg_nk(
+				GSI_EE_n_GSI_CH_k_SCRATCH_7,
+				gsi_ctx->per.ee,
+				chan_hdl);
+			rx_status = (val & GSI_AQC_RX_STATUS_MASK)
+				>> GSI_AQC_RX_STATUS_SHIFT;
+			rdm_err = (val & GSI_AQC_RDM_ERR_MASK)
+				>> (GSI_AQC_RDM_ERR_SHIFT);
+			return rx_status + rdm_err;
+			break;
+		default:
+			GSIERR("invalid scratch id %d\n", scratch_id);
+			return 0;
+		}
+
+	/* on older versions we need to read the scratch from SHRAM */
+	} else {
+		/* RTK use scratch 5 */
+		if (scratch_id == 5) {
+			/*
+			 * each channel context is 6 lines of 8 bytes, but n in
+			 * SHRAM_n is in 4 bytes offsets, so multiplying ep_id
+			 * by 6*2=12 will give the beginning of the required
+			 * channel context, and then need to add 7 since the
+			 * channel context layout has the ring rbase (8 bytes)
+			 * + channel scratch 0-4 (20 bytes) so adding
+			 * additional 28/4 = 7 to get to scratch 5 of the
+			 * required channel.
+			 */
+			return gsihal_read_reg_n(
+				GSI_GSI_SHRAM_n,
+				ep_id * 12 + 7) & GSI_RTK_ERR_STATS_MASK;
+		}
 	}
 	return 0;
 }
 EXPORT_SYMBOL(gsi_get_drop_stats);
 
+int gsi_get_wp(unsigned long chan_hdl)
+{
+	return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_6, gsi_ctx->per.ee,
+		chan_hdl);
+}
+EXPORT_SYMBOL(gsi_get_wp);
+
 void gsi_wdi3_dump_register(unsigned long chan_hdl)
 {
 	uint32_t val;

+ 33 - 1
drivers/platform/msm/gsi/gsi.h

@@ -144,6 +144,7 @@ enum gsi_evt_chtype {
 	GSI_EVT_CHTYPE_AQC_EV = 0x8,
 	GSI_EVT_CHTYPE_11AD_EV = 0x9,
 	GSI_EVT_CHTYPE_RTK_EV = 0xC,
+	GSI_EVT_CHTYPE_NTN_EV = 0xD,
 };
 
 enum gsi_evt_ring_elem_size {
@@ -227,6 +228,7 @@ enum gsi_chan_prot {
 	GSI_CHAN_PROT_MHIC = 0xA,
 	GSI_CHAN_PROT_QDSS = 0xB,
 	GSI_CHAN_PROT_RTK = 0xC,
+	GSI_CHAN_PROT_NTN = 0xD,
 };
 
 enum gsi_max_prefetch {
@@ -1029,6 +1031,26 @@ union __packed gsi_wdi3_channel_scratch2_reg {
 	 uint32_t reserved2 : 23;
  };
 
+ /**
+ * gsi_ntn_channel_scratch - NTN SW config area of
+ * channel scratch
+ *
+ * @buff_addr_lsb: NTN buffer address LSB
+ * @buff_addr_msb: NTN buffer address MSB
+ * @fix_buff_size: buff size in log2
+ * @ioc_mod_threshold: the threshold for IOC moderation (TX)
+ */
+ struct __packed gsi_ntn_channel_scratch {
+	 uint32_t buff_addr_lsb;
+	 uint32_t buff_addr_msb : 8;
+	 uint32_t fix_buff_size : 4;
+	 uint32_t reserved1 : 20;
+	 uint32_t ioc_mod_threshold : 16;
+	 uint32_t reserved2 : 16;
+	 uint32_t reserved3;
+	 uint32_t reserved4;
+ };
+
 /**
  * gsi_channel_scratch - channel scratch SW config area
  *
@@ -1046,6 +1068,7 @@ union __packed gsi_channel_scratch {
 	struct __packed gsi_wdi2_channel_scratch_new wdi2_new;
 	struct __packed gsi_aqc_channel_scratch aqc;
 	struct __packed gsi_rtk_channel_scratch rtk;
+	struct __packed gsi_ntn_channel_scratch ntn;
 	struct __packed gsi_qdss_channel_scratch qdss;
 	struct __packed {
 		uint32_t word1;
@@ -2238,8 +2261,17 @@ int gsi_get_refetch_reg(unsigned long chan_hdl, bool is_rp);
  *
  * @ep_id: ep index
  * @scratch_id: drop stats on which scratch register
+ * @chan_hdl: gsi channel handle
  */
-int gsi_get_drop_stats(unsigned long ep_id, int scratch_id);
+int gsi_get_drop_stats(unsigned long ep_id, int scratch_id,
+	unsigned long chan_hdl);
+
+/**
+* gsi_get_wp - get channel write pointer for stats
+*
+* @chan_hdl: gsi channel handle
+*/
+int gsi_get_wp(unsigned long chan_hdl);
 
 /**
  * gsi_wdi3_dump_register - dump wdi3 related gsi registers

+ 1 - 1
drivers/platform/msm/ipa/Kbuild

@@ -47,7 +47,7 @@ ipam-$(CONFIG_IPA3_REGDUMP) += ipa_v3/dump/ipa_reg_dump.o
 ipam-$(CONFIG_IPA_UT) += test/ipa_ut_framework.o test/ipa_test_example.o \
 	test/ipa_test_mhi.o test/ipa_test_dma.o \
 	test/ipa_test_hw_stats.o test/ipa_pm_ut.o \
-	test/ipa_test_wdi3.o
+	test/ipa_test_wdi3.o test/ipa_test_ntn.o
 
 ipanetm-y += ipa_v3/ipa_net.o
 

+ 92 - 51
drivers/platform/msm/ipa/ipa_v3/ipa_client.c

@@ -376,7 +376,7 @@ int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt,
 	enum ipa_smmu_cb_type cb_type)
 {
 	struct iommu_domain *smmu_domain;
-	int res;
+	int res, ret = 0;
 	phys_addr_t phys;
 	unsigned long va;
 	struct scatterlist *sg;
@@ -417,7 +417,8 @@ int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt,
 				res = ipa3_iommu_map(smmu_domain, va, phys,
 					len, IOMMU_READ | IOMMU_WRITE);
 				if (res) {
-					IPAERR("Fail to map pa=%pa\n", &phys);
+					IPAERR("Fail to map pa=%pa, va 0x%X\n",
+						&phys, va);
 					return -EINVAL;
 				}
 				va += len;
@@ -437,18 +438,39 @@ int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt,
 			}
 		}
 	} else {
-		res = iommu_unmap(smmu_domain,
-		rounddown(iova, PAGE_SIZE),
-		roundup(size + iova - rounddown(iova, PAGE_SIZE),
-		PAGE_SIZE));
-		if (res != roundup(size + iova - rounddown(iova, PAGE_SIZE),
-			PAGE_SIZE)) {
-			IPAERR("Fail to unmap 0x%llx\n", iova);
-			return -EINVAL;
+		if (sgt != NULL) {
+			va = rounddown(iova, PAGE_SIZE);
+			for_each_sg(sgt->sgl, sg, sgt->nents, i)
+			{
+				page = sg_page(sg);
+				phys = page_to_phys(page);
+				len = PAGE_ALIGN(sg->offset + sg->length);
+				res = iommu_unmap(smmu_domain, va, len);
+				if (res != len) {
+					IPAERR(
+						"Fail to unmap pa=%pa, va 0x%X, res %d\n"
+						, &phys, va, res);
+					ret = -EINVAL;
+				}
+				va += len;
+				count++;
+			}
+		} else {
+			res = iommu_unmap(smmu_domain,
+				rounddown(iova, PAGE_SIZE),
+				roundup(
+				size + iova - rounddown(iova, PAGE_SIZE),
+					PAGE_SIZE));
+			if (res != roundup(
+			size + iova - rounddown(iova, PAGE_SIZE),
+				PAGE_SIZE)) {
+				IPAERR("Fail to unmap 0x%llx\n", iova);
+				return -EINVAL;
+			}
 		}
 	}
 	IPADBG("Peer buff %s 0x%llx\n", map ? "map" : "unmap", iova);
-	return 0;
+	return ret;
 }
 EXPORT_SYMBOL(ipa3_smmu_map_peer_buff);
 
@@ -1992,6 +2014,32 @@ int ipa3_clear_endpoint_delay(u32 clnt_hdl)
 	return 0;
 }
 
+static void ipa3_get_gsi_ring_stats(struct IpaHwRingStats_t *ring,
+	struct ipa3_uc_dbg_stats *ctx_stats, int idx)
+{
+	ring->ringFull = ioread32(
+		ctx_stats->uc_dbg_stats_mmio
+		+ idx * IPA3_UC_DEBUG_STATS_OFF +
+		IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
+
+	ring->ringEmpty = ioread32(
+		ctx_stats->uc_dbg_stats_mmio
+		+ idx * IPA3_UC_DEBUG_STATS_OFF +
+		IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
+	ring->ringUsageHigh = ioread32(
+		ctx_stats->uc_dbg_stats_mmio
+		+ idx * IPA3_UC_DEBUG_STATS_OFF +
+		IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
+	ring->ringUsageLow = ioread32(
+		ctx_stats->uc_dbg_stats_mmio
+		+ idx * IPA3_UC_DEBUG_STATS_OFF +
+		IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
+	ring->RingUtilCount = ioread32(
+		ctx_stats->uc_dbg_stats_mmio
+		+ idx * IPA3_UC_DEBUG_STATS_OFF +
+		IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
+}
+
 /**
  * ipa3_get_aqc_gsi_stats() - Query AQC gsi stats from uc
  * @stats:	[inout] stats blob from client populated by driver
@@ -2011,32 +2059,43 @@ int ipa3_get_aqc_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
 	}
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 	for (i = 0; i < MAX_AQC_CHANNELS; i++) {
-		stats->u.ring[i].ringFull = ioread32(
-			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
-			+ i * IPA3_UC_DEBUG_STATS_OFF +
-			IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
-		stats->u.ring[i].ringEmpty = ioread32(
-			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
-			+ i * IPA3_UC_DEBUG_STATS_OFF +
-			IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
-		stats->u.ring[i].ringUsageHigh = ioread32(
-			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
-			+ i * IPA3_UC_DEBUG_STATS_OFF +
-			IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
-		stats->u.ring[i].ringUsageLow = ioread32(
-			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
-			+ i * IPA3_UC_DEBUG_STATS_OFF +
-			IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
-		stats->u.ring[i].RingUtilCount = ioread32(
-			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
-			+ i * IPA3_UC_DEBUG_STATS_OFF +
-			IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
+		ipa3_get_gsi_ring_stats(stats->u.ring + i,
+			&ipa3_ctx->aqc_ctx.dbg_stats, i);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+
+	return 0;
+}
+
+/**
+* ipa3_get_ntn_gsi_stats() - Query NTN gsi stats from uc
+* @stats:	[inout] stats blob from client populated by driver
+*
+* Returns:	0 on success, negative on failure
+*
+* @note Cannot be called from atomic context
+*
+*/
+int ipa3_get_ntn_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
+{
+	int i;
+
+	if (!ipa3_ctx->ntn_ctx.dbg_stats.uc_dbg_stats_mmio) {
+		IPAERR("bad parms NULL ntn_gsi_stats_mmio\n");
+		return -EINVAL;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	for (i = 0; i < MAX_NTN_CHANNELS; i++) {
+		ipa3_get_gsi_ring_stats(stats->u.ring + i,
+			&ipa3_ctx->ntn_ctx.dbg_stats, i);
 	}
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 
 
 	return 0;
 }
+
 /**
  * ipa3_get_rtk_gsi_stats() - Query RTK gsi stats from uc
  * @stats:	[inout] stats blob from client populated by driver
@@ -2057,26 +2116,8 @@ int ipa3_get_rtk_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
 	}
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 	for (i = 0; i < MAX_RTK_CHANNELS; i++) {
-		stats->u.rtk[i].commStats.ringFull = ioread32(
-			ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
-			+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
-			IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
-		stats->u.rtk[i].commStats.ringEmpty = ioread32(
-			ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
-			+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
-			IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
-		stats->u.rtk[i].commStats.ringUsageHigh = ioread32(
-			ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
-			+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
-			IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
-		stats->u.rtk[i].commStats.ringUsageLow = ioread32(
-			ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
-			+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
-			IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
-		stats->u.rtk[i].commStats.RingUtilCount = ioread32(
-			ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
-			+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
-			IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
+		ipa3_get_gsi_ring_stats(&stats->u.rtk[i].commStats,
+			&ipa3_ctx->rtk_ctx.dbg_stats, i);
 		stats->u.rtk[i].trCount = ioread32(
 			ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +

+ 28 - 15
drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c

@@ -3281,9 +3281,16 @@ static ssize_t ipa3_eth_read_perf_status(struct file *file,
 	switch (client->client_type) {
 	case IPA_ETH_CLIENT_AQC107:
 	case IPA_ETH_CLIENT_AQC113:
-		ret = ipa3_get_aqc_gsi_stats(&stats);
-		tx_ep = IPA_CLIENT_AQC_ETHERNET_CONS;
-		rx_ep = IPA_CLIENT_AQC_ETHERNET_PROD;
+	case IPA_ETH_CLIENT_NTN:
+		if (client->client_type == IPA_ETH_CLIENT_NTN) {
+			ret = ipa3_get_ntn_gsi_stats(&stats);
+			tx_ep = IPA_CLIENT_ETHERNET_CONS;
+			rx_ep = IPA_CLIENT_ETHERNET_PROD;
+		} else {
+			ret = ipa3_get_aqc_gsi_stats(&stats);
+			tx_ep = IPA_CLIENT_AQC_ETHERNET_CONS;
+			rx_ep = IPA_CLIENT_AQC_ETHERNET_PROD;
+		}
 		if (!ret) {
 			nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
 			"%s_ringFull=%u\n"
@@ -3323,7 +3330,9 @@ static ssize_t ipa3_eth_read_perf_status(struct file *file,
 		} else {
 			nbytes = scnprintf(dbg_buff,
 				IPA_MAX_MSG_LEN,
-				"Fail to read AQC GSI stats\n");
+				"Fail to read [%s][%s] GSI stats\n",
+				ipa_clients_strings[rx_ep],
+				ipa_clients_strings[tx_ep]);
 			cnt += nbytes;
 		}
 		break;
@@ -3394,7 +3403,7 @@ static ssize_t ipa3_eth_read_perf_status(struct file *file,
 			cnt += nbytes;
 		} else {
 			nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
-				"Fail to read AQC GSI stats\n");
+				"Fail to read RTK GSI stats\n");
 			cnt += nbytes;
 		}
 		break;
@@ -3415,6 +3424,7 @@ static ssize_t ipa3_eth_read_err_status(struct file *file,
 	int tx_ep, rx_ep;
 	struct ipa3_eth_error_stats tx_stats;
 	struct ipa3_eth_error_stats rx_stats;
+	int scratch_num;
 
 	memset(&tx_stats, 0, sizeof(struct ipa3_eth_error_stats));
 	memset(&rx_stats, 0, sizeof(struct ipa3_eth_error_stats));
@@ -3433,42 +3443,45 @@ static ssize_t ipa3_eth_read_err_status(struct file *file,
 	case IPA_ETH_CLIENT_AQC113:
 		tx_ep = IPA_CLIENT_AQC_ETHERNET_CONS;
 		rx_ep = IPA_CLIENT_AQC_ETHERNET_PROD;
-		break;
+		scratch_num = 7;
 	case IPA_ETH_CLIENT_RTK8111K:
 	case IPA_ETH_CLIENT_RTK8125B:
 		tx_ep = IPA_CLIENT_RTK_ETHERNET_CONS;
 		rx_ep = IPA_CLIENT_RTK_ETHERNET_PROD;
-		ipa3_eth_get_status(tx_ep, 5, &tx_stats);
-		ipa3_eth_get_status(rx_ep, 5, &rx_stats);
+		scratch_num = 5;
 		break;
+	case IPA_ETH_CLIENT_NTN:
+		tx_ep = IPA_CLIENT_ETHERNET_CONS;
+		rx_ep = IPA_CLIENT_ETHERNET_PROD;
+		scratch_num = 6;
 	default:
 		IPAERR("Not supported\n");
 		return 0;
 	}
+	ipa3_eth_get_status(tx_ep, scratch_num, &tx_stats);
+	ipa3_eth_get_status(rx_ep, scratch_num, &rx_stats);
+
 	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
 		"%s_RP=0x%x\n"
 		"%s_WP=0x%x\n"
-		"%s_SCRATCH5=0x%x\n",
+		"%s_err:%u (scratch %d)\n",
 		ipa_clients_strings[tx_ep],
 		tx_stats.rp,
 		ipa_clients_strings[tx_ep],
 		tx_stats.wp,
 		ipa_clients_strings[tx_ep],
-		tx_stats.err);
+		tx_stats.err, scratch_num);
 	cnt += nbytes;
 	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
 		"%s_RP=0x%x\n"
 		"%s_WP=0x%x\n"
-		"%s_SCRATCH5=0x%x\n"
-		"%s_err:%u\n",
+		"%s_err:%u (scratch %d)\n",
 		ipa_clients_strings[rx_ep],
 		rx_stats.rp,
 		ipa_clients_strings[rx_ep],
 		rx_stats.wp,
 		ipa_clients_strings[rx_ep],
-		rx_stats.err,
-		ipa_clients_strings[rx_ep],
-		rx_stats.err & 0xff);
+		rx_stats.err, scratch_num);
 	cnt += nbytes;
 done:
 	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);

+ 285 - 57
drivers/platform/msm/ipa/ipa_v3/ipa_eth_i.c

@@ -17,9 +17,13 @@
 #define IPA_ETH_AQC_MODC_FACTOR (10)
 #define AQC_WRB_MODC_FACTOR (10)
 
+#define IPA_ETH_NTN_MODT (32)
+#define IPA_ETH_NTN_MODC (128)
+
+#define NTN_BUFFER_SIZE 2048 /* 2K */
 
 #define IPA_ETH_AGGR_PKT_LIMIT 1
-#define IPA_ETH_AGGR_BYTE_LIMIT 2 /*2 Kbytes Agger hard byte limit*/
+#define IPA_ETH_AGGR_BYTE_LIMIT 2 /* 2 Kbytes Agger hard byte limit */
 
 #define IPA_ETH_MBOX_M (1)
 
@@ -454,6 +458,7 @@ static struct iommu_domain *ipa_eth_get_smmu_domain(
 		return ipa3_get_eth_smmu_domain();
 	if (IPA_CLIENT_IS_SMMU_ETH1_INSTANCE(client_type))
 		return ipa3_get_eth1_smmu_domain();
+
 	return NULL;
 }
 
@@ -495,7 +500,9 @@ static int ipa3_smmu_map_eth_pipes(struct ipa_eth_client_pipe_info *pipe,
 	enum ipa_smmu_cb_type cb_type;
 
 	if (pipe->info.fix_buffer_size > PAGE_SIZE) {
-		IPAERR("invalid data buff size\n");
+		IPAERR("%s: invalid data buff size %d\n",
+			pipe->dir == IPA_ETH_PIPE_DIR_TX ? "TX" : "RX",
+			pipe->info.fix_buffer_size);
 		return -EINVAL;
 	}
 
@@ -511,7 +518,7 @@ static int ipa3_smmu_map_eth_pipes(struct ipa_eth_client_pipe_info *pipe,
 		pipe->info.transfer_ring_sgt,
 		IPA_SMMU_CB_AP);
 	if (result) {
-		IPAERR("failed to %s ntn ring %d\n",
+		IPAERR("failed to %s ring %d\n",
 			map ? "map" : "unmap", result);
 		return -EINVAL;
 	}
@@ -536,41 +543,58 @@ map_buffer:
 		"SMMU cb %d is not shared, continue to map buffers\n", cb_type);
 	}
 
-	smmu_domain = ipa_eth_get_smmu_domain(client_type);
-	if (!smmu_domain) {
-		IPAERR("invalid smmu domain\n");
-		result = -EINVAL;
-		goto fail_map_buffer_smmu_enabled;
-	}
-
-	prev_iova_p = 0;
-	for (i = 0; i < pipe->info.data_buff_list_size; i++) {
-		iova = (u64)pipe->info.data_buff_list[i].iova;
-		pa = (phys_addr_t)pipe->info.data_buff_list[i].pa;
-		IPA_SMMU_ROUND_TO_PAGE(iova, pa, pipe->info.fix_buffer_size,
-			iova_p, pa_p, size_p);
-		/* Add check on every 2nd buffer for AQC smmu-dup issue */
-		if (prev_iova_p == iova_p) {
-			IPADBG_LOW(
-				"current buffer and previous are on the same page, skip page mapping\n"
-			);
-			continue;
+	if (pipe->info.is_buffer_pool_valid) {
+		IPADBG("buffer pool valid\n");
+		result = ipa3_smmu_map_peer_buff(
+			(u64)pipe->info.buffer_pool_base_addr,
+			pipe->info.fix_buffer_size,
+			map,
+			pipe->info.buffer_pool_base_sgt,
+			cb_type);
+		if (result) {
+			IPAERR("failed to %s buffer %d cb_type %d\n",
+				map ? "map" : "unmap", result, cb_type);
+			goto fail_map_buffer_smmu_enabled;
 		}
-		prev_iova_p = iova_p;
-		IPADBG_LOW("%s 0x%llx to 0x%pa size %d\n", map ? "mapping" :
-			"unmapping", iova_p, &pa_p, size_p);
-		if (map) {
-			result = ipa3_iommu_map(smmu_domain, iova_p, pa_p,
-				size_p, IOMMU_READ | IOMMU_WRITE);
-			if (result)
-				IPAERR("Fail to map 0x%llx\n", iova);
-		} else {
-			result = iommu_unmap(smmu_domain, iova_p, size_p);
-			if (result != size_p) {
-				IPAERR("Fail to unmap 0x%llx\n", iova);
+	} else {
+		IPADBG("buffer pool not valid\n");
+		smmu_domain = ipa_eth_get_smmu_domain(client_type);
+		if (!smmu_domain) {
+			IPAERR("invalid smmu domain\n");
+			result = -EINVAL;
+			goto fail_map_buffer_smmu_enabled;
+		}
+
+		prev_iova_p = 0;
+		for (i = 0; i < pipe->info.data_buff_list_size; i++) {
+			iova = (u64)pipe->info.data_buff_list[i].iova;
+			pa = (phys_addr_t)pipe->info.data_buff_list[i].pa;
+			IPA_SMMU_ROUND_TO_PAGE(iova, pa, pipe->info.fix_buffer_size,
+				iova_p, pa_p, size_p);
+			/* Add check on every 2nd buffer for AQC smmu-dup issue */
+			if (prev_iova_p == iova_p) {
+				IPADBG_LOW(
+					"current buffer and previous are on the same page, skip page mapping\n"
+				);
+				continue;
+			}
+			prev_iova_p = iova_p;
+			IPADBG_LOW("%s 0x%llx to 0x%pa size %d\n", map ? "mapping" :
+				"unmapping", iova_p, &pa_p, size_p);
+			if (map) {
+				result = ipa3_iommu_map(smmu_domain, iova_p, pa_p,
+					size_p, IOMMU_READ | IOMMU_WRITE);
+				if (result)
+					IPAERR("Fail to map 0x%llx\n", iova);
+			} else {
+				result = iommu_unmap(smmu_domain, iova_p, size_p);
+				if (result != size_p) {
+					IPAERR("Fail to unmap 0x%llx\n", iova);
+				}
 			}
 		}
 	}
+
 	return 0;
 
 fail_map_buffer_smmu_enabled:
@@ -709,6 +733,139 @@ fail_get_gsi_ep_info:
 	return result;
 }
 
+static int ipa_eth_setup_ntn_gsi_channel(
+	struct ipa_eth_client_pipe_info *pipe,
+	struct ipa3_ep_context *ep)
+{
+	struct gsi_evt_ring_props gsi_evt_ring_props;
+	struct gsi_chan_props gsi_channel_props;
+	union __packed gsi_channel_scratch ch_scratch;
+	union __packed gsi_evt_scratch evt_scratch;
+	const struct ipa_gsi_ep_config *gsi_ep_info;
+	int result, len;
+	u64 bar_addr;
+
+	if (unlikely(!pipe->info.is_transfer_ring_valid)) {
+		IPAERR("NTN transfer ring invalid\n");
+		ipa_assert();
+		return -EFAULT;
+	}
+
+	/* don't assert bit 40 in test mode as we emulate regs on DDR not
+	 * on PICE address space */
+	bar_addr = pipe->client_info->test ?
+		pipe->info.client_info.ntn.bar_addr :
+		IPA_ETH_PCIE_SET(pipe->info.client_info.ntn.bar_addr);
+
+	/* setup event ring */
+	memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
+	gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_NTN_EV;
+	gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
+	gsi_evt_ring_props.intr = GSI_INTR_MSI;
+	gsi_evt_ring_props.int_modt = IPA_ETH_NTN_MODT;
+	gsi_evt_ring_props.int_modc = IPA_ETH_NTN_MODC;
+	gsi_evt_ring_props.exclusive = true;
+	gsi_evt_ring_props.err_cb = ipa_eth_gsi_evt_ring_err_cb;
+	gsi_evt_ring_props.user_data = NULL;
+	gsi_evt_ring_props.msi_addr =
+		bar_addr +
+		pipe->info.client_info.ntn.tail_ptr_offs;
+	len = pipe->info.transfer_ring_size;
+	gsi_evt_ring_props.ring_len = len;
+	gsi_evt_ring_props.ring_base_addr =
+		(u64)pipe->info.transfer_ring_base;
+	result = gsi_alloc_evt_ring(&gsi_evt_ring_props,
+		ipa3_ctx->gsi_dev_hdl,
+		&ep->gsi_evt_ring_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("fail to alloc RX event ring\n");
+		result = -EFAULT;
+	}
+
+	ep->gsi_mem_info.evt_ring_len =
+		gsi_evt_ring_props.ring_len;
+	ep->gsi_mem_info.evt_ring_base_addr =
+		gsi_evt_ring_props.ring_base_addr;
+
+	/* setup channel ring */
+	memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
+	gsi_channel_props.prot = GSI_CHAN_PROT_NTN;
+	if (pipe->dir == IPA_ETH_PIPE_DIR_TX)
+		gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
+	else
+		gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
+	gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
+	if (!gsi_ep_info) {
+		IPAERR("Failed getting GSI EP info for client=%d\n",
+			ep->client);
+		result = -EINVAL;
+		goto fail_get_gsi_ep_info;
+	} else
+		gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num;
+	gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
+	gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
+	if (pipe->dir == IPA_ETH_PIPE_DIR_TX)
+		gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+	else
+		gsi_channel_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
+	gsi_channel_props.db_in_bytes = 1;
+	gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	gsi_channel_props.prefetch_mode =
+		gsi_ep_info->prefetch_mode;
+	gsi_channel_props.empty_lvl_threshold =
+		gsi_ep_info->prefetch_threshold;
+	gsi_channel_props.low_weight = 1;
+	gsi_channel_props.err_cb = ipa_eth_gsi_chan_err_cb;
+	gsi_channel_props.ring_len = len;
+	gsi_channel_props.ring_base_addr =
+		(u64)pipe->info.transfer_ring_base;
+	result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
+		&ep->gsi_chan_hdl);
+	if (result != GSI_STATUS_SUCCESS)
+		goto fail_get_gsi_ep_info;
+	ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len;
+	ep->gsi_mem_info.chan_ring_base_addr =
+		gsi_channel_props.ring_base_addr;
+
+	/* write event scratch */
+	memset(&evt_scratch, 0, sizeof(evt_scratch));
+	/* nothing is needed for NTN event scratch */
+
+	/* write ch scratch */
+	memset(&ch_scratch, 0, sizeof(ch_scratch));
+	ch_scratch.ntn.fix_buff_size =
+		ilog2(pipe->info.fix_buffer_size);
+	if (pipe->info.is_buffer_pool_valid) {
+		ch_scratch.ntn.buff_addr_lsb =
+			(u32)pipe->info.buffer_pool_base_addr;
+		ch_scratch.ntn.buff_addr_msb =
+			(u32)((u64)(pipe->info.buffer_pool_base_addr) >> 32);
+	}
+	else {
+		ch_scratch.ntn.buff_addr_lsb =
+			(u32)pipe->info.data_buff_list[0].iova;
+		ch_scratch.ntn.buff_addr_msb =
+			(u32)((u64)(pipe->info.data_buff_list[0].iova) >> 32);
+	}
+
+	if (pipe->dir == IPA_ETH_PIPE_DIR_TX)
+		ch_scratch.ntn.ioc_mod_threshold = IPA_ETH_NTN_MODT;
+
+	result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to write evt ring scratch\n");
+		goto fail_write_scratch;
+	}
+	return 0;
+fail_write_scratch:
+	gsi_dealloc_channel(ep->gsi_chan_hdl);
+	ep->gsi_chan_hdl = ~0;
+fail_get_gsi_ep_info:
+	gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+	ep->gsi_evt_ring_hdl = ~0;
+	return result;
+}
+
 static int ipa3_eth_get_prot(struct ipa_eth_client_pipe_info *pipe,
 	enum ipa4_hw_protocol *prot)
 {
@@ -724,6 +881,8 @@ static int ipa3_eth_get_prot(struct ipa_eth_client_pipe_info *pipe,
 		*prot = IPA_HW_PROTOCOL_RTK;
 		break;
 	case IPA_ETH_CLIENT_NTN:
+		*prot = IPA_HW_PROTOCOL_NTN3;
+		break;
 	case IPA_ETH_CLIENT_EMAC:
 		*prot = IPA_HW_PROTOCOL_ETH;
 		break;
@@ -758,6 +917,21 @@ int ipa3_eth_connect(
 		IPAERR("undefined client_type\n");
 		return -EFAULT;
 	}
+
+	/* currently all protocols require valid transfer ring */
+	if (!pipe->info.is_transfer_ring_valid) {
+		IPAERR("transfer ring not valid!\n");
+		return -EINVAL;
+	}
+
+	if (pipe->client_info->client_type == IPA_ETH_CLIENT_NTN) {
+		if (pipe->info.fix_buffer_size != NTN_BUFFER_SIZE) {
+			IPAERR("fix buffer size %u not valid for NTN, use 2K\n"
+				, pipe->info.fix_buffer_size);
+			return -EINVAL;
+		}
+	}
+
 	/* need enhancement for vlan support on multiple attach */
 	result = ipa3_is_vlan_mode(IPA_VLAN_IF_ETH, &vlan_mode);
 	if (result) {
@@ -771,11 +945,6 @@ int ipa3_eth_connect(
 		return result;
 	}
 
-	if (prot == IPA_HW_PROTOCOL_ETH) {
-		IPAERR("EMAC\\NTN still not supported using this framework\n");
-		return -EFAULT;
-	}
-
 	result = ipa3_smmu_map_eth_pipes(pipe, client_type, true);
 	if (result) {
 		IPAERR("failed to map SMMU %d\n", result);
@@ -817,19 +986,26 @@ int ipa3_eth_connect(
 	IPADBG("client %d (ep: %d) connected\n", client_type,
 		ep_idx);
 
-	if (prot == IPA_HW_PROTOCOL_RTK) {
-		if (ipa_eth_setup_rtk_gsi_channel(pipe, ep)) {
-			IPAERR("fail to setup eth gsi rx channel\n");
-			result = -EFAULT;
-			goto setup_gsi_ch_fail;
-		}
-	} else if (prot == IPA_HW_PROTOCOL_AQC) {
-		if (ipa_eth_setup_aqc_gsi_channel(pipe, ep)) {
+	switch (prot) {
+	case IPA_HW_PROTOCOL_RTK:
+		result = ipa_eth_setup_rtk_gsi_channel(pipe, ep);
+		break;
+	case IPA_HW_PROTOCOL_AQC:
+		result = ipa_eth_setup_aqc_gsi_channel(pipe, ep);
+		break;
+	case IPA_HW_PROTOCOL_NTN3:
+		result = ipa_eth_setup_ntn_gsi_channel(pipe, ep);
+		break;
+	default:
+		IPAERR("unknown protocol %d\n", prot);
+		result = -EINVAL;
+	}
+	if (result) {
 			IPAERR("fail to setup eth gsi rx channel\n");
 			result = -EFAULT;
 			goto setup_gsi_ch_fail;
-		}
 	}
+
 	if (gsi_query_channel_db_addr(ep->gsi_chan_hdl,
 		&gsi_db_addr_low, &gsi_db_addr_high)) {
 		IPAERR("failed to query gsi rx db addr\n");
@@ -837,7 +1013,8 @@ int ipa3_eth_connect(
 		goto query_ch_db_fail;
 	}
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v5_0) {
-		if (prot == IPA_HW_PROTOCOL_AQC) {
+		switch (prot) {
+		case IPA_HW_PROTOCOL_AQC:
 			if (IPA_CLIENT_IS_PROD(client_type)) {
 				if (gsi_query_msi_addr(ep->gsi_chan_hdl,
 					&pipe->info.db_pa)) {
@@ -850,13 +1027,19 @@ int ipa3_eth_connect(
 				pipe->info.db_val = 0;
 				/* only 32 bit lsb is used */
 				db_addr = ioremap((phys_addr_t)(gsi_db_addr_low), 4);
+				if (!db_addr) {
+					IPAERR("ioremap failed\n");
+					result = -EFAULT;
+					goto ioremap_fail;
+				}
 				/* TX: Initialize to end of ring */
 				db_val = (u32)ep->gsi_mem_info.chan_ring_base_addr;
 				db_val += (u32)ep->gsi_mem_info.chan_ring_len;
 				iowrite32(db_val, db_addr);
 				iounmap(db_addr);
 			}
-		} else if (prot == IPA_HW_PROTOCOL_RTK) {
+			break;
+		case IPA_HW_PROTOCOL_RTK:
 			if (gsi_query_msi_addr(ep->gsi_chan_hdl,
 					&pipe->info.db_pa)) {
 				result = -EFAULT;
@@ -865,11 +1048,45 @@ int ipa3_eth_connect(
 			if (IPA_CLIENT_IS_CONS(client_type)) {
 				/* only 32 bit lsb is used */
 				db_addr = ioremap((phys_addr_t)(pipe->info.db_pa), 4);
+				if (!db_addr) {
+					IPAERR("ioremap failed\n");
+					result = -EFAULT;
+					goto ioremap_fail;
+				}
 				/* TX: ring MSI doorbell */
 				db_val = IPA_ETH_MSI_DB_VAL;
 				iowrite32(db_val, db_addr);
 				iounmap(db_addr);
 			}
+			break;
+		case IPA_HW_PROTOCOL_NTN3:
+			pipe->info.db_pa = gsi_db_addr_low;
+			pipe->info.db_val = 0;
+
+			/* only 32 bit lsb is used */
+			db_addr = ioremap((phys_addr_t)(gsi_db_addr_low), 4);
+			if (!db_addr) {
+				IPAERR("ioremap failed\n");
+				result = -EFAULT;
+				goto ioremap_fail;
+			}
+			if (IPA_CLIENT_IS_PROD(client_type)) {
+				/* Rx: Initialize to ring base (i.e point 6) */
+				db_val =
+				(u32)ep->gsi_mem_info.chan_ring_base_addr;
+			} else {
+				/* TX: Initialize to end of ring */
+				db_val =
+				(u32)ep->gsi_mem_info.chan_ring_base_addr;
+				db_val +=
+				(u32)ep->gsi_mem_info.chan_ring_len;
+			}
+			iowrite32(db_val, db_addr);
+			iounmap(db_addr);
+			break;
+		default:
+			/* we can't really get here as we checked prot before */
+			IPAERR("unknown protocol %d\n", prot);
 		}
 	} else {
 		if (IPA_CLIENT_IS_PROD(client_type)) {
@@ -891,6 +1108,11 @@ int ipa3_eth_connect(
 			}
 			/* only 32 bit lsb is used */
 			db_addr = ioremap((phys_addr_t)(gsi_db_addr_low), 4);
+			if (!db_addr) {
+				IPAERR("ioremap failed\n");
+				result = -EFAULT;
+				goto ioremap_fail;
+			}
 			/* Rx: Initialize to ring base (i.e point 6) */
 			db_val = (u32)ep->gsi_mem_info.chan_ring_base_addr;
 			iowrite32(db_val, db_addr);
@@ -910,6 +1132,11 @@ int ipa3_eth_connect(
 			}
 			/* only 32 bit lsb is used */
 			db_addr = ioremap((phys_addr_t)(gsi_db_addr_low), 4);
+			if (!db_addr) {
+				IPAERR("ioremap failed\n");
+				result = -EFAULT;
+				goto ioremap_fail;
+			}
 			/* TX: Initialize to end of ring */
 			db_val = (u32)ep->gsi_mem_info.chan_ring_base_addr;
 			db_val += (u32)ep->gsi_mem_info.chan_ring_len;
@@ -924,6 +1151,11 @@ int ipa3_eth_connect(
 		evt_ring_db_addr_high);
 	/* only 32 bit lsb is used */
 	db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
+	if (!db_addr) {
+		IPAERR("ioremap failed\n");
+		result = -EFAULT;
+		goto ioremap_fail;
+	}
 	/*
 	* IPA/GSI driver should ring the event DB once after
 	* initialization of the event, with a value that is
@@ -996,7 +1228,7 @@ int ipa3_eth_connect(
 	ipa3_eth_save_client_mapping(pipe, client_type,
 		id, ep_idx, ep->gsi_chan_hdl);
 	if ((ipa3_ctx->ipa_hw_type == IPA_HW_v4_5) ||
-		(prot != IPA_HW_PROTOCOL_AQC)) {
+		(prot == IPA_HW_PROTOCOL_RTK)) {
 		result = ipa3_eth_config_uc(true,
 			prot,
 			(pipe->dir == IPA_ETH_PIPE_DIR_TX)
@@ -1026,6 +1258,7 @@ uc_init_peripheral_fail:
 start_channel_fail:
 	ipa3_disable_data_path(ep_idx);
 enable_data_path_fail:
+ioremap_fail:
 query_msi_fail:
 query_ch_db_fail:
 setup_gsi_ch_fail:
@@ -1053,11 +1286,6 @@ int ipa3_eth_disconnect(
 		return result;
 	}
 
-	if (prot == IPA_HW_PROTOCOL_ETH) {
-		IPAERR("EMAC\\NTN still not supported using this framework\n");
-		return -EFAULT;
-	}
-
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 	ep_idx = ipa_get_ep_mapping(client_type);
 	if (ep_idx == -1 || ep_idx >= IPA3_MAX_NUM_PIPES) {
@@ -1095,7 +1323,7 @@ int ipa3_eth_disconnect(
 	}
 
 	if ((ipa3_ctx->ipa_hw_type == IPA_HW_v4_5) ||
-		(prot != IPA_HW_PROTOCOL_AQC)) {
+		(prot == IPA_HW_PROTOCOL_RTK)) {
 		result = ipa3_eth_config_uc(false,
 			prot,
 			(pipe->dir == IPA_ETH_PIPE_DIR_TX)

+ 9 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_i.h

@@ -1740,6 +1740,13 @@ struct ipa3_rtk_ctx {
 	struct ipa3_uc_dbg_stats dbg_stats;
 };
 
+/**
+* struct ipa3_ntn_ctx - IPA ntn context
+*/
+struct ipa3_ntn_ctx {
+	struct ipa3_uc_dbg_stats dbg_stats;
+};
+
 /**
  * struct ipa3_transport_pm - transport power management related members
  * @transport_pm_mutex: Mutex to protect the transport_pm functionality.
@@ -2210,6 +2217,7 @@ struct ipa3_context {
 	struct ipa3_mhip_ctx mhip_ctx;
 	struct ipa3_aqc_ctx aqc_ctx;
 	struct ipa3_rtk_ctx rtk_ctx;
+	struct ipa3_ntn_ctx ntn_ctx;
 	atomic_t ipa_clk_vote;
 
 	int (*client_lock_unlock[IPA_MAX_CLNT])(bool is_lock);
@@ -2829,6 +2837,7 @@ int ipa3_get_wdi3_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
 int ipa3_get_usb_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
 int ipa3_get_aqc_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
 int ipa3_get_rtk_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
+int ipa3_get_ntn_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
 int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
 u16 ipa3_get_smem_restr_bytes(void);
 int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid, uint64_t num_bytes);

+ 15 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_uc.c

@@ -440,6 +440,17 @@ static void ipa3_uc_save_dbg_stats(u32 size)
 		} else
 			goto unmap;
 		break;
+	case IPA_HW_PROTOCOL_NTN3:
+		if (!ipa3_ctx->ntn_ctx.dbg_stats.uc_dbg_stats_mmio) {
+			ipa3_ctx->ntn_ctx.dbg_stats.uc_dbg_stats_size =
+				size;
+			ipa3_ctx->ntn_ctx.dbg_stats.uc_dbg_stats_ofst =
+				addr_offset;
+			ipa3_ctx->ntn_ctx.dbg_stats.uc_dbg_stats_mmio =
+				mmio;
+		} else
+			goto unmap;
+		break;
 	case IPA_HW_PROTOCOL_WDI:
 		if (!ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio) {
 			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_size =
@@ -1627,6 +1638,10 @@ int ipa3_uc_debug_stats_dealloc(uint32_t prot_id)
 		iounmap(ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio);
 		ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio = NULL;
 		break;
+	case IPA_HW_PROTOCOL_NTN3:
+		iounmap(ipa3_ctx->ntn_ctx.dbg_stats.uc_dbg_stats_mmio);
+		ipa3_ctx->ntn_ctx.dbg_stats.uc_dbg_stats_mmio = NULL;
+		break;
 	case IPA_HW_PROTOCOL_WDI:
 		iounmap(ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio);
 		ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio = NULL;

+ 3 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h

@@ -25,6 +25,7 @@
 
 #define MAX_AQC_CHANNELS 2
 #define MAX_RTK_CHANNELS 2
+#define MAX_NTN_CHANNELS 2
 #define MAX_11AD_CHANNELS 5
 #define MAX_WDI2_CHANNELS 2
 #define MAX_WDI3_CHANNELS 3
@@ -83,6 +84,7 @@ enum ipa3_hw_features {
  * @IPA_HW_PROTOCOL_MHIP: protocol related to MHIP operation in IPA HW
  * @IPA_HW_PROTOCOL_USB : protocol related to USB operation in IPA HW
  * @IPA_HW_PROTOCOL_RTK : protocol related to RTK operation in IPA HW
+ * @IPA_HW_PROTOCOL_NTN3 : protocol related to NTN3 operation in IPA HW
  */
 enum ipa4_hw_protocol {
 	IPA_HW_PROTOCOL_COMMON = 0x0,
@@ -94,6 +96,7 @@ enum ipa4_hw_protocol {
 	IPA_HW_PROTOCOL_MHIP = 0x6,
 	IPA_HW_PROTOCOL_USB = 0x7,
 	IPA_HW_PROTOCOL_RTK = 0x9,
+	IPA_HW_PROTOCOL_NTN3 = 0xA,
 	IPA_HW_PROTOCOL_MAX
 };
 

+ 44 - 4
drivers/platform/msm/ipa/ipa_v3/ipa_utils.c

@@ -11758,7 +11758,9 @@ void ipa3_get_gsi_stats(int prot_id,
 		stats->num_ch = MAX_WDI3_CHANNELS;
 		ipa3_get_wdi3_gsi_stats(stats);
 		break;
-	case IPA_HW_PROTOCOL_ETH:
+	case IPA_HW_PROTOCOL_NTN3:
+		stats->num_ch = MAX_NTN_CHANNELS;
+		ipa3_get_ntn_gsi_stats(stats);
 		break;
 	case IPA_HW_PROTOCOL_MHIP:
 		stats->num_ch = MAX_MHIP_CHANNELS;
@@ -11914,6 +11916,10 @@ int ipa3_get_prot_id(enum ipa_client_type client)
 void ipa3_eth_get_status(u32 client, int scratch_id,
 	struct ipa3_eth_error_stats *stats)
 {
+#define RTK_GSI_SCRATCH_ID 5
+#define AQC_GSI_SCRATCH_ID 7
+#define NTN_GSI_SCRATCH_ID 6
+
 	int ch_id;
 	int ipa_ep_idx;
 
@@ -11922,9 +11928,43 @@ void ipa3_eth_get_status(u32 client, int scratch_id,
 	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED)
 		return;
 	ch_id = ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl;
-	stats->rp = gsi_get_refetch_reg(ch_id, true);
-	stats->wp = gsi_get_refetch_reg(ch_id, false);
-	stats->err = gsi_get_drop_stats(ipa_ep_idx, scratch_id);
+
+	/*
+	 * drop stats sometimes exist for RX and sometimes for Tx,
+	 * wp sometimes acquired from ch_cntxt_6 and sometimes from refetch,
+	 * depending on protocol.
+	 */
+	stats->err = 0;
+	switch (client) {
+	case IPA_CLIENT_RTK_ETHERNET_PROD:
+		stats->err = gsi_get_drop_stats(ipa_ep_idx, RTK_GSI_SCRATCH_ID,
+			ch_id);
+	case IPA_CLIENT_RTK_ETHERNET_CONS:
+		stats->wp = gsi_get_refetch_reg(ch_id, false);
+		stats->rp = gsi_get_refetch_reg(ch_id, true);
+		break;
+
+	case IPA_CLIENT_AQC_ETHERNET_PROD:
+		stats->err = gsi_get_drop_stats(ipa_ep_idx, AQC_GSI_SCRATCH_ID,
+			ch_id);
+		stats->wp = gsi_get_wp(ch_id);
+		stats->rp = gsi_get_refetch_reg(ch_id, true);
+		break;
+	case IPA_CLIENT_AQC_ETHERNET_CONS:
+		stats->wp = gsi_get_refetch_reg(ch_id, false);
+		stats->rp = gsi_get_refetch_reg(ch_id, true);
+		break;
+	case IPA_CLIENT_ETHERNET_PROD:
+		stats->wp = gsi_get_wp(ch_id);
+		stats->rp = gsi_get_refetch_reg(ch_id, true);
+		break;
+	case IPA_CLIENT_ETHERNET_CONS:
+		stats->err = gsi_get_drop_stats(ipa_ep_idx, NTN_GSI_SCRATCH_ID,
+			ch_id);
+		stats->wp = gsi_get_refetch_reg(ch_id, false);
+		stats->rp = gsi_get_refetch_reg(ch_id, true);
+		break;
+	}
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 }
 

+ 1407 - 0
drivers/platform/msm/ipa/test/ipa_test_ntn.c

@@ -0,0 +1,1407 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include "ipa_ut_framework.h"
+#include <linux/ipa_eth.h>
+#include <linux/ipa.h>
+#include <linux/delay.h>
+#include "ipa_i.h"
+
+#define NUM_TX_BUFS 10
+#define NUM_RX_BUFS 10
+#define NUM_RX_TR_ELE NUM_RX_BUFS
+#define NUM_TX_TR_ELE NUM_TX_BUFS
+
+#define PACKET_HEADER_SIZE 220
+#define ETH_PACKET_SIZE 4
+#define PACKET_CONTENT 0x12345678
+
+#define BUFFER_SIZE 2048 /* 2K */
+
+#define DB_REGISTER_SIZE 4
+#define RX_TAIL_PTR_OFF 0
+#define TX_TAIL_PTR_OFF 8
+
+#define IPA_TEST_NTN_NUM_PIPES 2
+
+struct ipa_test_ntn_context {
+	struct completion init_completion_obj;
+	bool ready;
+	int wait_cnt;
+	struct ipa_eth_client client;
+	struct ipa_eth_client_pipe_info rx_pipe_info;
+	struct ipa_eth_client_pipe_info tx_pipe_info;
+	struct ipa_mem_buffer tx_transfer_ring_addr;
+	struct sg_table *tx_transfer_ring_sgt;
+	struct ipa_mem_buffer rx_transfer_ring_addr;
+	struct sg_table *rx_transfer_ring_sgt;
+	struct ipa_mem_buffer tx_buf;
+	struct sg_table *tx_buff_sgt;
+	struct ipa_eth_buff_smmu_map tx_data_buff_list[NUM_TX_BUFS];
+	struct ipa_mem_buffer rx_buf;
+	struct sg_table *rx_buff_sgt;
+	struct ipa_mem_buffer bar_addr;
+	int rx_db_local;
+	int tx_db_local;
+	int rx_idx;
+	int tx_idx;
+};
+
+static struct ipa_test_ntn_context *test_ntn_ctx;
+
+/* TRE written by NTN (SW) */
+struct tx_transfer_ring_ele {
+	uint32_t res1;
+	uint32_t res2;
+	uint32_t res3;
+	uint32_t res4: 27;
+	uint32_t desc_status : 4;
+	uint32_t own : 1;
+}__packed;
+
+/* event written by GSI */
+struct tx_event_ring_ele {
+	uint32_t buff_addr_LSB;
+	uint32_t buff_addr_MSB;
+	uint32_t buffer_length : 14;
+	uint32_t reserved1 : 17;
+	uint32_t ioc : 1;
+	uint32_t reserved2 : 28;
+	uint32_t ld : 1;
+	uint32_t fd : 1;
+	uint32_t reserved3 : 1;
+	uint32_t own : 1;
+
+}__packed;
+
+/* TRE written by NTN (SW) */
+struct rx_transfer_ring_ele
+{
+	uint32_t reserved1;
+	uint32_t reserved2;
+	uint32_t reserved3;
+	uint32_t packet_length : 14;
+	uint32_t reserverd4 : 14;
+	uint32_t ld : 1;
+	uint32_t fd : 1;
+	uint32_t reserved5 : 1;
+	uint32_t own : 1;
+}__packed;
+
+/* event written by GSI */
+struct rx_event_ring_ele
+{
+	uint32_t buff_addr1;
+	uint32_t res_or_buff_addr1;
+	uint32_t buff_addr2;
+	uint32_t res_or_buff_addr2 : 30;
+	uint32_t ioc : 1;
+	uint32_t own : 1;
+}__packed;
+
+static void ipa_test_ntn_free_dma_buff(struct ipa_mem_buffer *mem)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN);
+
+	if (!mem) {
+		IPA_UT_ERR("empty pointer\n");
+		return;
+	}
+
+	dma_free_coherent(cb->dev, mem->size, mem->base,
+		mem->phys_base);
+}
+
+static int ipa_test_ntn_alloc_mmio(void)
+{
+	int ret = 0;
+	u32 size;
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_WLAN);
+
+	if (!test_ntn_ctx) {
+		IPA_UT_ERR("test_ntn_ctx is not initialized.\n");
+		return -EFAULT;
+	}
+
+	/* allocate tx transfer ring memory */
+	size = NUM_TX_TR_ELE * sizeof(struct tx_transfer_ring_ele);
+	test_ntn_ctx->tx_transfer_ring_addr.size = size;
+	test_ntn_ctx->tx_transfer_ring_addr.base =
+		dma_alloc_coherent(cb->dev, size,
+			&test_ntn_ctx->tx_transfer_ring_addr.phys_base,
+			GFP_KERNEL);
+	if (!test_ntn_ctx->tx_transfer_ring_addr.phys_base) {
+		IPA_UT_ERR("fail to alloc memory.\n");
+		return -ENOMEM;
+	}
+
+	test_ntn_ctx->tx_transfer_ring_sgt = kzalloc(
+		sizeof(test_ntn_ctx->tx_transfer_ring_sgt), GFP_KERNEL);
+	if (!test_ntn_ctx->tx_transfer_ring_sgt) {
+		IPA_UT_ERR("fail to alloc memory.\n");
+		ret = -ENOMEM;
+		goto fail_alloc_tx_sgt;
+	}
+
+	ret = dma_get_sgtable(cb->dev,
+		test_ntn_ctx->tx_transfer_ring_sgt,
+		test_ntn_ctx->tx_transfer_ring_addr.base,
+		test_ntn_ctx->tx_transfer_ring_addr.phys_base,
+		size);
+	if (ret) {
+		IPA_UT_ERR("failed to get sgtable\n");
+		ret = -ENOMEM;
+		goto fail_get_tx_sgtable;
+	}
+
+	/* allocate rx transfer ring memory */
+	size = NUM_RX_TR_ELE * sizeof(struct rx_transfer_ring_ele);
+	test_ntn_ctx->rx_transfer_ring_addr.size = size;
+	test_ntn_ctx->rx_transfer_ring_addr.base =
+		dma_alloc_coherent(cb->dev, size,
+			&test_ntn_ctx->rx_transfer_ring_addr.phys_base,
+			GFP_KERNEL);
+	if (!test_ntn_ctx->rx_transfer_ring_addr.phys_base) {
+		IPA_UT_ERR("fail to alloc memory.\n");
+		ret = -ENOMEM;
+		goto fail_rx_transfer_ring;
+	}
+
+	test_ntn_ctx->rx_transfer_ring_sgt = kzalloc(
+		sizeof(test_ntn_ctx->rx_transfer_ring_sgt), GFP_KERNEL);
+	if (!test_ntn_ctx->rx_transfer_ring_sgt) {
+		IPA_UT_ERR("fail to alloc memory.\n");
+		ret = -ENOMEM;
+		goto fail_alloc_rx_sgt;
+	}
+
+	ret = dma_get_sgtable(cb->dev,
+		test_ntn_ctx->rx_transfer_ring_sgt,
+		test_ntn_ctx->rx_transfer_ring_addr.base,
+		test_ntn_ctx->rx_transfer_ring_addr.phys_base,
+		size);
+	if (ret) {
+		IPA_UT_ERR("failed to get sgtable\n");
+		ret = -ENOMEM;
+		goto fail_get_rx_sgtable;
+	}
+
+	/* allocate tx buffers */
+	size = BUFFER_SIZE * NUM_TX_BUFS;
+	test_ntn_ctx->tx_buf.size = size;
+	test_ntn_ctx->tx_buf.base =
+		dma_alloc_coherent(cb->dev, size,
+			&test_ntn_ctx->tx_buf.phys_base,
+			GFP_KERNEL);
+	if (!test_ntn_ctx->tx_buf.phys_base) {
+		IPA_UT_ERR("fail to alloc memory.\n");
+		ret = -ENOMEM;
+		goto fail_tx_buf;
+	}
+
+	test_ntn_ctx->tx_buff_sgt = kzalloc(
+		sizeof(test_ntn_ctx->tx_buff_sgt), GFP_KERNEL);
+	if (!test_ntn_ctx->tx_buff_sgt) {
+		IPA_UT_ERR("fail to alloc memory.\n");
+		ret = -ENOMEM;
+		goto fail_alloc_tx_buff_sgt;
+	}
+
+	ret = dma_get_sgtable(cb->dev,
+		test_ntn_ctx->tx_buff_sgt,
+		test_ntn_ctx->tx_buf.base,
+		test_ntn_ctx->tx_buf.phys_base,
+		size);
+	if (ret) {
+		IPA_UT_ERR("failed to get sgtable\n");
+		ret = -ENOMEM;
+		goto fail_get_tx_buf_sgtable;
+	}
+
+	/* allocate rx buffers */
+	size = BUFFER_SIZE * NUM_RX_BUFS;
+	test_ntn_ctx->rx_buf.size = size;
+	test_ntn_ctx->rx_buf.base =
+		dma_alloc_coherent(cb->dev, size,
+			&test_ntn_ctx->rx_buf.phys_base,
+			GFP_KERNEL);
+	if (!test_ntn_ctx->rx_buf.phys_base) {
+		IPA_UT_ERR("fail to alloc memory.\n");
+		ret = -ENOMEM;
+		goto fail_rx_bufs;
+	}
+
+	test_ntn_ctx->rx_buff_sgt = kzalloc(
+		sizeof(test_ntn_ctx->rx_buff_sgt), GFP_KERNEL);
+	if (!test_ntn_ctx->rx_buff_sgt) {
+		IPA_UT_ERR("fail to alloc memory.\n");
+		ret = -ENOMEM;
+		goto fail_alloc_rx_buff_sgt;
+	}
+
+	ret = dma_get_sgtable(cb->dev,
+		test_ntn_ctx->rx_buff_sgt,
+		test_ntn_ctx->rx_buf.base,
+		test_ntn_ctx->rx_buf.phys_base,
+		size);
+	if (ret) {
+		IPA_UT_ERR("failed to get sgtable\n");
+		ret = -ENOMEM;
+		goto fail_get_rx_buf_sgtable;
+	}
+
+	/*
+	 * allocate PCI bar with two tail pointers -
+	 * addresses need to be 8B aligned
+	 */
+	test_ntn_ctx->bar_addr.size = 2 * DB_REGISTER_SIZE + 8;
+	test_ntn_ctx->bar_addr.base =
+		dma_alloc_coherent(ipa3_ctx->pdev,
+			test_ntn_ctx->bar_addr.size,
+			&test_ntn_ctx->bar_addr.phys_base,
+			GFP_KERNEL);
+	if (!test_ntn_ctx->bar_addr.base) {
+		IPA_UT_ERR("fail to alloc memory\n");
+		ret = -ENOMEM;
+		goto fail_alloc_bar;
+	}
+
+	return ret;
+
+fail_alloc_bar:
+	sg_free_table(test_ntn_ctx->rx_buff_sgt);
+fail_get_rx_buf_sgtable:
+	kfree(test_ntn_ctx->rx_buff_sgt);
+	test_ntn_ctx->rx_buff_sgt = NULL;
+fail_alloc_rx_buff_sgt:
+	ipa_test_ntn_free_dma_buff(&test_ntn_ctx->rx_buf);
+fail_rx_bufs:
+	sg_free_table(test_ntn_ctx->tx_buff_sgt);
+fail_get_tx_buf_sgtable:
+	kfree(test_ntn_ctx->tx_buff_sgt);
+	test_ntn_ctx->tx_buff_sgt = NULL;
+fail_alloc_tx_buff_sgt:
+	ipa_test_ntn_free_dma_buff(&test_ntn_ctx->tx_buf);
+fail_tx_buf:
+	sg_free_table(test_ntn_ctx->rx_transfer_ring_sgt);
+
+fail_get_rx_sgtable:
+	kfree(test_ntn_ctx->rx_transfer_ring_sgt);
+	test_ntn_ctx->rx_transfer_ring_sgt = NULL;
+fail_alloc_rx_sgt:
+	ipa_test_ntn_free_dma_buff(&test_ntn_ctx->rx_transfer_ring_addr);
+fail_rx_transfer_ring:
+	sg_free_table(test_ntn_ctx->tx_transfer_ring_sgt);
+fail_get_tx_sgtable:
+	kfree(test_ntn_ctx->tx_transfer_ring_sgt);
+	test_ntn_ctx->tx_transfer_ring_sgt = NULL;
+fail_alloc_tx_sgt:
+	ipa_test_ntn_free_dma_buff(&test_ntn_ctx->tx_transfer_ring_addr);
+	return ret;
+}
+
+static int ipa_test_ntn_free_mmio(void)
+{
+	if (!test_ntn_ctx) {
+		IPA_UT_ERR("test_ntn_ctx is not initialized.\n");
+		return -EFAULT;
+	}
+
+	/* rx buff */
+	sg_free_table(test_ntn_ctx->rx_buff_sgt);
+	kfree(test_ntn_ctx->rx_buff_sgt);
+	test_ntn_ctx->rx_buff_sgt = NULL;
+	ipa_test_ntn_free_dma_buff(&test_ntn_ctx->rx_buf);
+
+	/* tx buff */
+	sg_free_table(test_ntn_ctx->tx_buff_sgt);
+	kfree(test_ntn_ctx->tx_buff_sgt);
+	test_ntn_ctx->tx_buff_sgt = NULL;
+	ipa_test_ntn_free_dma_buff(&test_ntn_ctx->tx_buf);
+
+	/* rx transfer ring */
+	sg_free_table(test_ntn_ctx->rx_transfer_ring_sgt);
+	kfree(test_ntn_ctx->rx_transfer_ring_sgt);
+	test_ntn_ctx->rx_transfer_ring_sgt = NULL;
+	ipa_test_ntn_free_dma_buff(&test_ntn_ctx->rx_transfer_ring_addr);
+
+	/* tx transfer ring */
+	sg_free_table(test_ntn_ctx->tx_transfer_ring_sgt);
+	kfree(test_ntn_ctx->tx_transfer_ring_sgt);
+	test_ntn_ctx->tx_transfer_ring_sgt = NULL;
+	ipa_test_ntn_free_dma_buff(&test_ntn_ctx->tx_transfer_ring_addr);
+
+	return 0;
+}
+
+static void ipa_test_ntn_ready_cb(void *user_data)
+{
+	IPA_UT_DBG("ready CB entry\n");
+	test_ntn_ctx->ready = true;
+	complete(&test_ntn_ctx->init_completion_obj);
+}
+
+static struct ipa_eth_ready eth_ready = {
+	.notify = ipa_test_ntn_ready_cb,
+	.userdata = NULL
+};
+
+static int ipa_test_ntn_init_rings(void)
+{
+	struct tx_transfer_ring_ele *tx_ele;
+	struct rx_transfer_ring_ele *rx_ele;
+	int i;
+
+	IPA_UT_DBG("filling the rings\n");
+
+	rx_ele =
+		(struct rx_transfer_ring_ele *)
+		(test_ntn_ctx->rx_transfer_ring_addr.base);
+
+	tx_ele =
+		(struct tx_transfer_ring_ele *)
+		(test_ntn_ctx->tx_transfer_ring_addr.base);
+
+	memset(rx_ele, 0, sizeof(*rx_ele) * NUM_RX_TR_ELE);
+
+	for (i = 0; i < NUM_RX_TR_ELE; i++) {
+		rx_ele->fd = 1;
+		rx_ele->ld = 1;
+		rx_ele++;
+	}
+
+	/* all fields should be zero */
+	memset(tx_ele, 0, sizeof(*tx_ele) * NUM_TX_TR_ELE);
+
+	return 0;
+}
+
+static int ipa_test_ntn_suite_setup(void **priv)
+{
+	int ret = 0;
+
+	IPA_UT_DBG("Start NTN Setup\n");
+
+	/* init ipa ntn ctx */
+	if (!ipa3_ctx) {
+		IPA_UT_ERR("No IPA ctx\n");
+		return -EINVAL;
+	}
+
+	test_ntn_ctx = kzalloc(sizeof(struct ipa_test_ntn_context),
+		GFP_KERNEL);
+	if (!test_ntn_ctx) {
+		IPA_UT_ERR("failed to allocate ctx\n");
+		return -ENOMEM;
+	}
+
+	init_completion(&test_ntn_ctx->init_completion_obj);
+
+	/*
+	 * registering ready callback mandatory for init. CB shall be launched
+	 * anyway so connect the pipe from there.
+	 * our driver expects struct memory to be static as it uses it when CB
+	 * is launched.
+	 */
+	ret = ipa_eth_register_ready_cb(&eth_ready);
+	if (ret) {
+		IPA_UT_ERR("failed to register CB\n");
+		goto fail_alloc_mmio;
+	}
+
+	IPA_UT_DBG("IPA %s ready\n", eth_ready.is_eth_ready ? "is" : "is not");
+
+	ret = ipa_test_ntn_alloc_mmio();
+	if (ret) {
+		IPA_UT_ERR("failed to alloc mmio\n");
+		goto fail_alloc_mmio;
+	}
+
+	*priv = test_ntn_ctx;
+	return 0;
+
+fail_alloc_mmio:
+	kfree(test_ntn_ctx);
+	test_ntn_ctx = NULL;
+	return ret;
+}
+
+static void ipa_ntn_test_print_stats()
+{
+	struct ipa_uc_dbg_ring_stats stats;
+	int ret;
+	int tx_ep, rx_ep;
+	struct ipa3_eth_error_stats tx_stats;
+	struct ipa3_eth_error_stats rx_stats;
+
+	/* first get uC stats */
+	ret = ipa3_get_ntn_gsi_stats(&stats);
+	if (ret) {
+		IPA_UT_ERR("failed to get stats\n");
+		return;
+	}
+	IPA_UT_INFO("\nuC stats:\n");
+	IPA_UT_INFO(
+	"NTN_tx_ringFull=%u\n"
+	"NTN_tx_ringEmpty=%u\n"
+	"NTN_tx_ringUsageHigh=%u\n"
+	"NTN_tx_ringUsageLow=%u\n"
+	"NTN_tx_RingUtilCount=%u\n",
+	stats.u.ring[1].ringFull,
+	stats.u.ring[1].ringEmpty,
+	stats.u.ring[1].ringUsageHigh,
+	stats.u.ring[1].ringUsageLow,
+	stats.u.ring[1].RingUtilCount);
+
+	IPA_UT_INFO(
+	"NTN_rx_ringFull=%u\n"
+	"NTN_rx_ringEmpty=%u\n"
+	"NTN_rx_ringUsageHigh=%u\n"
+	"NTN_rx_ringUsageLow=%u\n"
+	"NTN_rx_RingUtilCount=%u\n",
+	stats.u.ring[0].ringFull,
+	stats.u.ring[0].ringEmpty,
+	stats.u.ring[0].ringUsageHigh,
+	stats.u.ring[0].ringUsageLow,
+	stats.u.ring[0].RingUtilCount);
+
+	/* now get gsi stats */
+	tx_ep = IPA_CLIENT_ETHERNET_CONS;
+	rx_ep = IPA_CLIENT_ETHERNET_PROD;
+	ipa3_eth_get_status(tx_ep, 6, &tx_stats);
+	ipa3_eth_get_status(rx_ep, 6, &rx_stats);
+
+	IPA_UT_INFO("\nGSI stats:\n");
+	IPA_UT_INFO(
+		"NTN_TX_RP=0x%x\n"
+		"NTN_TX_WP=0x%x\n"
+		"NTN_TX_err=%u\n",
+		tx_stats.rp,
+		tx_stats.wp,
+		tx_stats.err);
+
+	IPA_UT_INFO(
+		"NTN_RX_RP=0x%x\n"
+		"NTN_RX_WP=0x%x\n"
+		"NTN_RX_err:%u\n",
+		rx_stats.rp,
+		rx_stats.wp,
+		rx_stats.err);
+}
+
+static int ipa_test_ntn_suite_teardown(void *priv)
+{
+	if (!test_ntn_ctx)
+		return  0;
+
+	ipa_test_ntn_free_mmio();
+	kfree(test_ntn_ctx);
+	test_ntn_ctx = NULL;
+
+	return 0;
+}
+
+static int ipa_ntn_test_ready_cb(void *priv)
+{
+	int ret;
+
+	test_ntn_ctx->wait_cnt++;
+	ret = wait_for_completion_timeout(
+		&test_ntn_ctx->init_completion_obj,
+		msecs_to_jiffies(1000));
+	if (!ret) {
+		IPA_UT_ERR("ipa ready timeout, don't run\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static void ipa_ntn_test_del_client_list()
+{
+	struct ipa_eth_client *eth_client = &test_ntn_ctx->client;
+	struct ipa_eth_client_pipe_info *pipe_info, *tmp;
+
+	list_for_each_entry_safe(pipe_info, tmp, &eth_client->pipe_list, link)
+		list_del(&pipe_info->link);
+}
+
+static int ipa_ntn_test_setup_pipes(void)
+{
+	struct ipa_eth_client *client;
+	int ret, i;
+
+	test_ntn_ctx->client.client_type = IPA_ETH_CLIENT_NTN;
+	test_ntn_ctx->client.inst_id = 0;
+	test_ntn_ctx->client.traffic_type = IPA_ETH_PIPE_BEST_EFFORT;
+
+	/* RX pipe */
+	/* ring */
+	test_ntn_ctx->rx_pipe_info.dir = IPA_ETH_PIPE_DIR_RX;
+	test_ntn_ctx->rx_pipe_info.client_info = &test_ntn_ctx->client;
+	test_ntn_ctx->rx_pipe_info.info.is_transfer_ring_valid = true;
+	test_ntn_ctx->rx_pipe_info.info.transfer_ring_base =
+		test_ntn_ctx->rx_transfer_ring_addr.phys_base;
+	test_ntn_ctx->rx_pipe_info.info.transfer_ring_size =
+		test_ntn_ctx->rx_transfer_ring_addr.size;
+	test_ntn_ctx->rx_pipe_info.info.transfer_ring_sgt =
+		test_ntn_ctx->rx_transfer_ring_sgt;
+
+	IPA_UT_DBG("rx TR phys 0x%X, cpu 0x%X, size %d, sgt 0x%X\n",
+		test_ntn_ctx->rx_transfer_ring_addr.phys_base,
+		test_ntn_ctx->rx_transfer_ring_addr.base,
+		test_ntn_ctx->rx_transfer_ring_addr.size,
+		test_ntn_ctx->rx_transfer_ring_sgt);
+
+	/* buff */
+	test_ntn_ctx->rx_pipe_info.info.is_buffer_pool_valid = true;
+	test_ntn_ctx->rx_pipe_info.info.fix_buffer_size = BUFFER_SIZE;
+	test_ntn_ctx->rx_pipe_info.info.buffer_pool_base_addr =
+		test_ntn_ctx->rx_buf.phys_base;
+	test_ntn_ctx->rx_pipe_info.info.buffer_pool_base_sgt =
+		test_ntn_ctx->rx_buff_sgt;
+
+	IPA_UT_DBG("rx buff phys 0x%X, cpu 0x%X, size %d, fix size %d sgt 0x%X\n"
+		, test_ntn_ctx->rx_buf.phys_base,
+		test_ntn_ctx->rx_buf.base,
+		test_ntn_ctx->rx_buf.size,
+		test_ntn_ctx->rx_pipe_info.info.fix_buffer_size,
+		test_ntn_ctx->rx_buff_sgt);
+
+	/* we don't plan to recieve skb on RX CB */
+	test_ntn_ctx->rx_pipe_info.info.notify = NULL;
+	test_ntn_ctx->rx_pipe_info.info.priv = NULL;
+
+	/* gsi info */
+	test_ntn_ctx->rx_pipe_info.info.client_info.ntn.bar_addr =
+		test_ntn_ctx->bar_addr.phys_base;
+
+	/*
+	* use the first 4 bytes as the RX tail_ptr and the next 4 for TX,
+	* make sure 8B alignment
+	*/
+	test_ntn_ctx->rx_pipe_info.info.client_info.ntn.tail_ptr_offs =
+		RX_TAIL_PTR_OFF;
+
+	IPA_UT_DBG("tail registers bar: phys 0x%X virt 0x%X\n",
+		test_ntn_ctx->bar_addr.phys_base, test_ntn_ctx->bar_addr.base);
+
+	/* TX pipe */
+	/* ring */
+	test_ntn_ctx->tx_pipe_info.dir = IPA_ETH_PIPE_DIR_TX;
+	test_ntn_ctx->tx_pipe_info.client_info = &test_ntn_ctx->client;
+	test_ntn_ctx->tx_pipe_info.info.is_transfer_ring_valid = true;
+	test_ntn_ctx->tx_pipe_info.info.transfer_ring_base =
+		test_ntn_ctx->tx_transfer_ring_addr.phys_base;
+	test_ntn_ctx->tx_pipe_info.info.transfer_ring_size =
+		test_ntn_ctx->tx_transfer_ring_addr.size;
+	test_ntn_ctx->tx_pipe_info.info.transfer_ring_sgt =
+		test_ntn_ctx->tx_transfer_ring_sgt;
+
+	IPA_UT_DBG("tx TR phys 0x%X, cpu 0x%X, size %d, sgt 0x%X\n",
+		test_ntn_ctx->tx_transfer_ring_addr.phys_base,
+		test_ntn_ctx->tx_transfer_ring_addr.base,
+		test_ntn_ctx->tx_transfer_ring_addr.size,
+		test_ntn_ctx->tx_transfer_ring_sgt);
+
+	/* buff - for tx let's use the buffer list method (test both methods) */
+	test_ntn_ctx->tx_pipe_info.info.is_buffer_pool_valid = false;
+	test_ntn_ctx->tx_pipe_info.info.fix_buffer_size = BUFFER_SIZE;
+	test_ntn_ctx->tx_pipe_info.info.data_buff_list =
+		test_ntn_ctx->tx_data_buff_list;
+	for (i = 0; i < NUM_TX_BUFS; i++) {
+		test_ntn_ctx->tx_pipe_info.info.data_buff_list[i].iova =
+			(phys_addr_t)((u8 *)test_ntn_ctx->tx_buf.phys_base +
+				i * BUFFER_SIZE);
+		test_ntn_ctx->tx_pipe_info.info.data_buff_list[i].pa =
+			page_to_phys(vmalloc_to_page(test_ntn_ctx->tx_buf.base
+				+ (BUFFER_SIZE * i))) |
+			((phys_addr_t)(test_ntn_ctx->tx_buf.base +
+			(BUFFER_SIZE * i)) & ~PAGE_MASK);
+
+		IPA_UT_DBG("tx_pipe_info.info.data_buff_list[%d].iova = 0x%lx",
+			i,
+			test_ntn_ctx->tx_pipe_info.info.data_buff_list[i].iova);
+		IPA_UT_DBG("tx_pipe_info.info.data_buff_list[%d].pa = 0x%lx",
+			i,
+			test_ntn_ctx->tx_pipe_info.info.data_buff_list[i].pa);
+	}
+	test_ntn_ctx->tx_pipe_info.info.data_buff_list_size = NUM_TX_BUFS;
+
+	IPA_UT_DBG("tx buff phys 0x%X, cpu 0x%X, size %d, fix size %d sgt 0x%X\n"
+		, test_ntn_ctx->tx_buf.phys_base,
+		test_ntn_ctx->tx_buf.base,
+		test_ntn_ctx->tx_buf.size,
+		test_ntn_ctx->tx_pipe_info.info.fix_buffer_size,
+		test_ntn_ctx->tx_buff_sgt);
+
+	test_ntn_ctx->tx_pipe_info.info.notify = NULL;
+	test_ntn_ctx->tx_pipe_info.info.priv = NULL;
+
+	test_ntn_ctx->tx_pipe_info.info.client_info.ntn.bar_addr =
+		test_ntn_ctx->bar_addr.phys_base;
+
+	/*
+	 * use the first 4 bytes as the RX tail_ptr and the next 4 for TX,
+	 * make sure 8B alignment
+	 */
+	test_ntn_ctx->tx_pipe_info.info.client_info.ntn.tail_ptr_offs =
+		TX_TAIL_PTR_OFF;
+
+	/* add pipes to list */
+	INIT_LIST_HEAD(&test_ntn_ctx->client.pipe_list);
+	list_add(&test_ntn_ctx->rx_pipe_info.link,
+		&test_ntn_ctx->client.pipe_list);
+	list_add(&test_ntn_ctx->tx_pipe_info.link,
+		&test_ntn_ctx->client.pipe_list);
+
+	test_ntn_ctx->client.test = true;
+	client = &test_ntn_ctx->client;
+	ret = ipa_eth_client_conn_pipes(client);
+	if(ret) {
+		IPA_UT_ERR("ipa_eth_client_conn_pipes failed ret %d\n", ret);
+		goto conn_failed;
+	}
+
+	return 0;
+
+conn_failed:
+	ipa_ntn_test_del_client_list();
+	return ret;
+}
+
+static int ipa_ntn_test_reg_intf(void)
+{
+	struct ipa_eth_intf_info intf;
+	char netdev_name[IPA_RESOURCE_NAME_MAX] = { 0 };
+	int ret = 0;
+	u8 hdr_content = 1;
+
+	memset(&intf, 0, sizeof(intf));
+	snprintf(netdev_name, sizeof(netdev_name), "ntn_test");
+	intf.netdev_name = netdev_name;
+	IPA_UT_INFO("netdev name: %s strlen: %lu\n", intf.netdev_name,
+		strlen(intf.netdev_name));
+
+	intf.hdr[0].hdr = &hdr_content;
+	intf.hdr[0].hdr_len = 1;
+	intf.hdr[0].dst_mac_addr_offset = 0;
+	intf.hdr[0].hdr_type = IPA_HDR_L2_ETHERNET_II;
+
+	intf.hdr[1].hdr = &hdr_content;
+	intf.hdr[1].hdr_len = 1;
+	intf.hdr[1].dst_mac_addr_offset = 0;
+	intf.hdr[1].hdr_type = IPA_HDR_L2_ETHERNET_II;
+
+	intf.pipe_hdl_list =
+		kcalloc(IPA_TEST_NTN_NUM_PIPES,
+			sizeof(*intf.pipe_hdl_list),
+			GFP_KERNEL);
+	if (!intf.pipe_hdl_list) {
+		IPA_UT_ERR("Failed to alloc pipe handle list");
+		return -ENOMEM;
+	}
+
+	intf.pipe_hdl_list[0] = test_ntn_ctx->rx_pipe_info.pipe_hdl;
+	intf.pipe_hdl_list[1] = test_ntn_ctx->tx_pipe_info.pipe_hdl;
+	intf.pipe_hdl_list_size = IPA_TEST_NTN_NUM_PIPES;
+
+	ret = ipa_eth_client_reg_intf(&intf);
+	if (ret) {
+		IPA_UT_ERR("Failed to register IPA interface");
+	}
+
+	kfree(intf.pipe_hdl_list);
+
+	return ret;
+}
+
+static int ipa_ntn_test_unreg_intf(void)
+{
+	struct ipa_eth_intf_info intf;
+	char netdev_name[IPA_RESOURCE_NAME_MAX] = { 0 };
+
+	memset(&intf, 0, sizeof(intf));
+	snprintf(netdev_name, sizeof(netdev_name), "ntn_test");
+	intf.netdev_name = netdev_name;
+	IPA_UT_INFO("netdev name: %s strlen: %lu\n", intf.netdev_name,
+		strlen(intf.netdev_name));
+
+	return (ipa_eth_client_unreg_intf(&intf));
+}
+
+static void ipa_ntn_test_advance_db(u32 *db, int steps,
+	int num_words, int ring_size)
+{
+	*db = (*db + steps * num_words) % ring_size;
+}
+
+static int ipa_ntn_send_one_packet(void)
+{
+	u32 *packet;
+	u32 *packet_recv;
+
+	void __iomem *rx_db;
+	void __iomem *tx_db;
+	struct rx_event_ring_ele *rx_event;
+	u32 *tx_ring_tail;
+	u32 orig_tx_tail;
+	u32 *rx_ring_tail;
+	u32 orig_rx_tail;
+	int loop_cnt;
+	u64 evt_addr;
+	u64 pkt_addr;
+	struct rx_transfer_ring_ele *rx_ele;
+
+	int ret = 0;
+
+	rx_db = ioremap(
+		test_ntn_ctx->rx_pipe_info.info.db_pa, DB_REGISTER_SIZE);
+	if (!rx_db) {
+		IPA_UT_ERR("ioremap failed");
+		return ret;
+	}
+
+	tx_db = ioremap(
+		test_ntn_ctx->tx_pipe_info.info.db_pa, DB_REGISTER_SIZE);
+	if (!tx_db) {
+		IPA_UT_ERR("ioremap failed");
+		return ret;
+	}
+
+	/* initialize packet */
+	packet = (u32 *)((u8 *)test_ntn_ctx->rx_buf.base +
+		(test_ntn_ctx->rx_idx * BUFFER_SIZE));
+	pkt_addr = (u64)((u8 *)test_ntn_ctx->rx_buf.phys_base +
+		(test_ntn_ctx->rx_idx * BUFFER_SIZE));
+	*packet = PACKET_CONTENT;
+
+	/* update length in TRE */
+	rx_ele = (struct rx_transfer_ring_ele *)
+		test_ntn_ctx->rx_transfer_ring_addr.base + test_ntn_ctx->rx_idx;
+	rx_ele->packet_length = ETH_PACKET_SIZE;
+
+	/* point to base + 1 */
+	ipa_ntn_test_advance_db(&test_ntn_ctx->rx_db_local, 1,
+		sizeof(struct rx_transfer_ring_ele),
+		test_ntn_ctx->rx_transfer_ring_addr.size);
+
+	tx_ring_tail = (u32 *)((char *)test_ntn_ctx->bar_addr.base +
+		TX_TAIL_PTR_OFF);
+	orig_tx_tail = *tx_ring_tail;
+	rx_ring_tail = (u32 *)((char *)test_ntn_ctx->bar_addr.base +
+		RX_TAIL_PTR_OFF);
+	orig_rx_tail = *rx_ring_tail;
+
+	IPA_UT_DBG("orig tx tail 0x%X\n", orig_tx_tail);
+	IPA_UT_DBG("orig rx tail 0x%X\n", orig_rx_tail);
+
+	/* ring db and send packet */
+	iowrite32(test_ntn_ctx->rx_db_local +
+		lower_32_bits(test_ntn_ctx->rx_transfer_ring_addr.phys_base),
+		rx_db);
+	IPA_UT_DBG("rx_db_local increased to 0x%X\n",
+		test_ntn_ctx->rx_db_local +
+		lower_32_bits(test_ntn_ctx->rx_transfer_ring_addr.phys_base));
+
+	loop_cnt = 0;
+	while ((orig_rx_tail == *rx_ring_tail) ||
+		(orig_tx_tail == *tx_ring_tail)) {
+		loop_cnt++;
+
+		if (loop_cnt == 1000) {
+			IPA_UT_ERR("transfer timeout!\n");
+			IPA_UT_ERR("orig_tx_tail: %X tx_ring_db: %X\n",
+				orig_tx_tail, *tx_ring_tail);
+			IPA_UT_ERR("orig_rx_tail: %X rx_ring_db: %X\n",
+				orig_rx_tail, *rx_ring_tail);
+			IPA_UT_ERR("rx db local: %u\n",
+				test_ntn_ctx->rx_db_local +
+				lower_32_bits(
+				test_ntn_ctx->rx_transfer_ring_addr.phys_base));
+			BUG();
+			ret = -EFAULT;
+			goto err;
+		}
+		usleep_range(1000, 1001);
+	}
+	IPA_UT_DBG("loop_cnt %d\n", loop_cnt);
+	IPA_UT_DBG("rx ring tail 0x%X\n", *rx_ring_tail);
+	IPA_UT_DBG("tx ring tail 0x%X\n", *tx_ring_tail);
+
+	/* verify RX event */
+	rx_event = (struct rx_event_ring_ele *)rx_ele;
+
+	IPA_UT_DBG("address written by GSI is 0x[%X][%X]\n",
+		rx_event->buff_addr2, rx_event->buff_addr1);
+	IPA_UT_DBG("own bit is now %u", rx_event->own);
+
+	if (!rx_event->own) {
+		IPA_UT_ERR("own bit not modified by gsi - failed\n");
+		ret = -EFAULT;
+	}
+
+	evt_addr = ((u64)rx_event->buff_addr2 << 32) |
+		(u64)(rx_event->buff_addr1);
+	IPA_UT_DBG("RX: addr from event 0x%llx, address from buff %llx\n",
+		evt_addr, pkt_addr);
+	if (evt_addr != pkt_addr) {
+		IPA_UT_ERR("addresses are different - fail\n");
+		ret = -EFAULT;
+	}
+
+	/* read received packet */
+	packet_recv = (u32 *)((u8 *)test_ntn_ctx->tx_buf.base +
+		(test_ntn_ctx->tx_idx * BUFFER_SIZE));
+	IPA_UT_DBG("received packet 0x%X\n", *packet_recv);
+
+	if (*packet_recv != *packet) {
+		IPA_UT_ERR("packet content mismatch\n");
+		ret = -EFAULT;
+	}
+
+	/* recycle buffer */
+	*packet_recv = 0;
+
+	/* recycle TRE */
+	/* TX */
+	memset((struct tx_transfer_ring_ele *)
+		test_ntn_ctx->tx_transfer_ring_addr.base + test_ntn_ctx->tx_idx,
+		0, sizeof(struct rx_transfer_ring_ele));
+
+	/* RX */
+	memset(rx_ele, 0, sizeof(struct rx_transfer_ring_ele));
+	rx_ele->fd = 1;
+	rx_ele->ld = 1;
+
+	test_ntn_ctx->rx_idx = (test_ntn_ctx->rx_idx + 1) % NUM_RX_TR_ELE;
+	test_ntn_ctx->tx_idx = (test_ntn_ctx->tx_idx + 1) % NUM_TX_TR_ELE;
+	IPA_UT_DBG("now indexes are: rx %d, tx %d\n", test_ntn_ctx->rx_idx,
+		test_ntn_ctx->tx_idx);
+
+	ipa_ntn_test_advance_db(&test_ntn_ctx->tx_db_local, 1,
+		sizeof(struct tx_transfer_ring_ele),
+		test_ntn_ctx->tx_transfer_ring_addr.size);
+	iowrite32(test_ntn_ctx->tx_db_local +
+		lower_32_bits(test_ntn_ctx->tx_transfer_ring_addr.phys_base),
+		tx_db);
+	IPA_UT_DBG("tx_db_local advanced to 0x%X\n",
+		test_ntn_ctx->tx_db_local +
+		lower_32_bits(test_ntn_ctx->tx_transfer_ring_addr.phys_base));
+err:
+	iounmap(rx_db);
+	iounmap(tx_db);
+	return ret;
+}
+
+static int ipa_ntn_teardown_pipes(void)
+{
+	int ret = 0;
+
+	if (ipa_eth_client_disconn_pipes(&test_ntn_ctx->client)) {
+		IPA_UT_ERR("fail to teardown ntn pipes.\n");
+		ret = -EFAULT;
+	}
+
+	test_ntn_ctx->rx_idx = 0;
+	test_ntn_ctx->tx_idx = 0;
+	test_ntn_ctx->tx_db_local = 0;
+	test_ntn_ctx->rx_db_local = 0;
+	return ret;
+}
+static int ipa_ntn_test_prepare_test(void)
+{
+	struct ipa_ep_cfg ep_cfg = { { 0 } };
+	int offset = 0;
+	int ret = 0;
+
+	if (ipa_test_ntn_init_rings()) {
+		IPA_UT_ERR("fail to fill rings.\n");
+		return -EFAULT;
+	}
+
+	if (ipa_ntn_test_setup_pipes()) {
+		IPA_UT_ERR("fail to setup ntn pipes.\n");
+		return -EFAULT;
+	}
+
+	offset = sizeof(struct rx_transfer_ring_ele) * (NUM_RX_TR_ELE - 1);
+	IPA_UT_DBG("ofset 0x%X\n", offset);
+
+	IPA_UT_DBG("writing to RX tail ptr in 0x%X le value of 0x%X",
+		(u32 *)((char *)test_ntn_ctx->bar_addr.base + RX_TAIL_PTR_OFF),
+		lower_32_bits(test_ntn_ctx->rx_transfer_ring_addr.phys_base +
+			offset));
+
+	*((u32 *)((char *)test_ntn_ctx->bar_addr.base + RX_TAIL_PTR_OFF)) =
+		cpu_to_le32(lower_32_bits(
+			test_ntn_ctx->rx_transfer_ring_addr.phys_base +
+			offset));
+
+	/* initialize tx tail to the beginning of the ring */
+	IPA_UT_DBG("writing to TX tail ptr in 0x%X le value of 0x%X",
+		(u32 *)((char *)test_ntn_ctx->bar_addr.base + TX_TAIL_PTR_OFF),
+		lower_32_bits(test_ntn_ctx->tx_transfer_ring_addr.phys_base));
+
+	*((u32 *)((char *)test_ntn_ctx->bar_addr.base + TX_TAIL_PTR_OFF)) =
+		cpu_to_le32(lower_32_bits(
+			test_ntn_ctx->tx_transfer_ring_addr.phys_base));
+
+	if (ipa_ntn_test_reg_intf()) {
+		IPA_UT_ERR("fail to reg ntn interface.\n");
+		ret = -EFAULT;
+		goto teardown_pipes;
+	}
+
+	/* configure NTN RX EP in DMA mode */
+	ep_cfg.mode.mode = IPA_DMA;
+	ep_cfg.mode.dst = IPA_CLIENT_ETHERNET_CONS;
+
+	ep_cfg.seq.set_dynamic = true;
+
+	if (ipa3_cfg_ep(ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD),
+		&ep_cfg)) {
+		IPA_UT_ERR("fail to configure DMA mode.\n");
+		ret = -EFAULT;
+		goto unreg;
+	}
+
+	return 0;
+
+unreg:
+	if (ipa_ntn_test_unreg_intf()) {
+		IPA_UT_ERR("fail to unregister interface.\n");
+		ret = -EFAULT;
+		goto teardown_pipes;
+	}
+teardown_pipes:
+	if (ipa_ntn_teardown_pipes())
+		ret = -EFAULT;
+	return ret;
+}
+static int ipa_ntn_test_single_transfer(void *priv)
+{
+	int ret = 0;
+
+	if(!test_ntn_ctx->ready) {
+		if (test_ntn_ctx->wait_cnt) {
+			IPA_UT_ERR("ipa ready timeout, don't run\n");
+			return -EFAULT;
+		}
+		/* ready cb test hasn't ran yet and we need to wait */
+		if (ipa_ntn_test_ready_cb(NULL)) {
+			IPA_UT_ERR("ipa ready timeout, don't run\n");
+			return -EFAULT;
+		}
+	}
+
+	if (ipa_ntn_test_prepare_test()) {
+		IPA_UT_ERR("failed to prepare test.\n");
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	if (ipa_ntn_send_one_packet()) {
+		IPA_UT_ERR("fail to transfer packet.\n");
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	IPA_UT_INFO("one packet sent and received succesfully\n");
+
+	ipa_ntn_test_print_stats();
+
+fail:
+	if (ipa_ntn_test_unreg_intf()) {
+		IPA_UT_ERR("fail to unregister interface.\n");
+		ret = -EFAULT;
+	}
+
+	if (ipa_ntn_teardown_pipes())
+		ret = -EFAULT;
+	return ret;
+}
+
+static int ipa_ntn_send_multi_packet_one_by_one(int num)
+{
+	int i;
+
+	IPA_UT_INFO("about to send %d packets\n", num);
+	for (i = 0; i < num; i++) {
+		if (ipa_ntn_send_one_packet()) {
+			IPA_UT_ERR("failed on %d packet\n", i);
+			return -EFAULT;
+		}
+		IPA_UT_DBG("packet %d sent and recieved succesfully\n\n", i);
+	}
+	IPA_UT_INFO("all packets were succesfull\n\n");
+	return 0;
+}
+
+static int ipa_ntn_test_multi_transfer(void *priv)
+{
+	int ret = 0;
+
+	if (!test_ntn_ctx->ready) {
+		if (test_ntn_ctx->wait_cnt) {
+			IPA_UT_ERR("ipa ready timeout, don't run\n");
+			return -EFAULT;
+		}
+		/* ready cb test hasn't ran yet and we need to wait */
+		if (ipa_ntn_test_ready_cb(NULL)) {
+			IPA_UT_ERR("ipa ready timeout, don't run\n");
+			return -EFAULT;
+		}
+	}
+
+	if (ipa_ntn_test_prepare_test()) {
+		IPA_UT_ERR("failed to prepare test.\n");
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	if (ipa_ntn_send_multi_packet_one_by_one(NUM_RX_BUFS/2)) {
+		IPA_UT_ERR("failed to send packets.\n");
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	IPA_UT_INFO("%d packets sent and received succesfully\n",
+		NUM_RX_BUFS / 2);
+
+	ipa_ntn_test_print_stats();
+
+fail:
+	if (ipa_ntn_test_unreg_intf()) {
+		IPA_UT_ERR("fail to unregister interface.\n");
+		ret = -EFAULT;
+	}
+
+	if (ipa_ntn_teardown_pipes())
+		ret = -EFAULT;
+	return ret;
+}
+
+static int ipa_ntn_test_multi_transfer_wrap_around(void *priv)
+{
+	int ret = 0;
+
+	if (!test_ntn_ctx->ready) {
+		if (test_ntn_ctx->wait_cnt) {
+			IPA_UT_ERR("ipa ready timeout, don't run\n");
+			return -EFAULT;
+		}
+		/* ready cb test hasn't ran yet and we need to wait */
+		if (ipa_ntn_test_ready_cb(NULL)) {
+			IPA_UT_ERR("ipa ready timeout, don't run\n");
+			return -EFAULT;
+		}
+	}
+
+	if (ipa_ntn_test_prepare_test()) {
+		IPA_UT_ERR("failed to prepare test.\n");
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	if (ipa_ntn_send_multi_packet_one_by_one(NUM_RX_BUFS * 2)) {
+		IPA_UT_ERR("failed to send packets.\n");
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	IPA_UT_INFO("%d packets sent and received succesfully\n",
+		NUM_RX_BUFS * 2);
+
+	ipa_ntn_test_print_stats();
+
+fail:
+	if (ipa_ntn_test_unreg_intf()) {
+		IPA_UT_ERR("fail to unregister interface.\n");
+		ret = -EFAULT;
+	}
+
+	if (ipa_ntn_teardown_pipes())
+		ret = -EFAULT;
+	return ret;
+}
+
+static int ipa_ntn_send_packet_burst(void)
+{
+	u32 *packet[NUM_RX_TR_ELE];
+	u32 *packet_recv;
+
+	void __iomem *rx_db;
+	void __iomem *tx_db;
+	struct rx_event_ring_ele *rx_event;
+	u32 *tx_ring_tail;
+	u32 orig_tx_tail;
+	u32 *rx_ring_tail;
+	u32 orig_rx_tail;
+	int loop_cnt;
+	u64 evt_addr;
+	u64 pkt_addr[NUM_RX_TR_ELE];
+	struct rx_transfer_ring_ele *rx_ele;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl = { 0 };
+
+	int i, initial_val, ret = 0;
+
+	rx_db = ioremap(
+		test_ntn_ctx->rx_pipe_info.info.db_pa, DB_REGISTER_SIZE);
+	if (!rx_db) {
+		IPA_UT_ERR("ioremap failed");
+		return ret;
+	}
+
+	tx_db = ioremap(
+		test_ntn_ctx->tx_pipe_info.info.db_pa, DB_REGISTER_SIZE);
+	if (!tx_db) {
+		IPA_UT_ERR("ioremap failed");
+		return ret;
+	}
+
+	/* initialize packets */
+	initial_val = PACKET_CONTENT;
+	for (i = 0; i < NUM_RX_TR_ELE - 1; i++, initial_val++) {
+		packet[i] = (u32 *)((u8 *)test_ntn_ctx->rx_buf.base +
+			(i * BUFFER_SIZE));
+		pkt_addr[i] = (u64)((u8 *)test_ntn_ctx->rx_buf.phys_base +
+			(i * BUFFER_SIZE));
+		IPA_UT_DBG("loading packet %d with val 0x%X\n", i, initial_val);
+		*(packet[i]) = initial_val;
+
+		/* update length in TRE */
+		rx_ele = (struct rx_transfer_ring_ele *)
+			test_ntn_ctx->rx_transfer_ring_addr.base + i;
+		rx_ele->packet_length = ETH_PACKET_SIZE;
+	}
+
+	/*
+	 * set ep delay of 20ms to make sure uC is able to poll and see the
+	 * ring full stats for RX
+	 */
+	ep_cfg_ctrl.ipa_ep_delay = true;
+	ret = ipa3_cfg_ep_ctrl(
+		ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD),
+		&ep_cfg_ctrl);
+	if (ret) {
+		IPA_UT_ERR("couldn't set delay to ETHERNET_PROD\n");
+		goto err;
+	}
+	IPA_UT_DBG("delay set succesfully to ETHERNET_PROD\n");
+
+	/* point db to end of ring */
+	ipa_ntn_test_advance_db(&test_ntn_ctx->rx_db_local, NUM_RX_TR_ELE - 1,
+		sizeof(struct rx_transfer_ring_ele),
+		test_ntn_ctx->rx_transfer_ring_addr.size);
+
+	tx_ring_tail = (u32 *)((char *)test_ntn_ctx->bar_addr.base +
+		TX_TAIL_PTR_OFF);
+	orig_tx_tail = *tx_ring_tail;
+	rx_ring_tail = (u32 *)((char *)test_ntn_ctx->bar_addr.base +
+		RX_TAIL_PTR_OFF);
+	orig_rx_tail = *rx_ring_tail;
+
+	IPA_UT_DBG("orig tx tail 0x%X\n", orig_tx_tail);
+	IPA_UT_DBG("orig rx tail 0x%X\n", orig_rx_tail);
+
+	/* ring db and send packet */
+	iowrite32(test_ntn_ctx->rx_db_local +
+		lower_32_bits(test_ntn_ctx->rx_transfer_ring_addr.phys_base),
+		rx_db);
+	IPA_UT_DBG("rx_db_local increased to 0x%X\n",
+		test_ntn_ctx->rx_db_local +
+		lower_32_bits(test_ntn_ctx->rx_transfer_ring_addr.phys_base));
+
+	IPA_UT_DBG("sleep before removing delay\n");
+	msleep(20);
+	ep_cfg_ctrl.ipa_ep_delay = false;
+	ret = ipa3_cfg_ep_ctrl(
+		ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD),
+		&ep_cfg_ctrl);
+	if (ret) {
+		IPA_UT_ERR("couldn't unset delay to ETHERNET_PROD\n");
+		goto err;
+	}
+	IPA_UT_DBG("delay unset succesfully from ETHERNET_PROD\n");
+
+	loop_cnt = 0;
+	while (((*rx_ring_tail - orig_rx_tail) < NUM_RX_TR_ELE - 1) ||
+		((*tx_ring_tail - orig_tx_tail) < NUM_TX_TR_ELE - 1)) {
+		loop_cnt++;
+
+		if (loop_cnt == 1000) {
+			IPA_UT_ERR("transfer timeout!\n");
+			IPA_UT_ERR("orig_tx_tail: %X tx_ring_db: %X\n",
+				orig_tx_tail, *tx_ring_tail);
+			IPA_UT_ERR("orig_rx_tail: %X rx_ring_db: %X\n",
+				orig_rx_tail, *rx_ring_tail);
+			IPA_UT_ERR("rx db local: 0x%X\n",
+				test_ntn_ctx->rx_db_local +
+				lower_32_bits(
+				test_ntn_ctx->rx_transfer_ring_addr.phys_base));
+			BUG();
+			ret = -EFAULT;
+			goto err;
+		}
+		usleep_range(1000, 1001);
+	}
+
+	IPA_UT_DBG("loop_cnt %d\n", loop_cnt);
+	IPA_UT_DBG("rx ring tail 0x%X\n", *rx_ring_tail);
+	IPA_UT_DBG("tx ring tail 0x%X\n", *tx_ring_tail);
+
+	for (i = 0; i < NUM_RX_TR_ELE - 1; i++, initial_val++) {
+		/* verify RX event */
+		rx_ele = (struct rx_transfer_ring_ele *)
+			test_ntn_ctx->rx_transfer_ring_addr.base + i;
+		rx_event = (struct rx_event_ring_ele *)rx_ele;
+
+		IPA_UT_DBG("%d: address written by GSI is 0x[%X][%X]\n",
+			i, rx_event->buff_addr2, rx_event->buff_addr1);
+		IPA_UT_DBG("own bit is now %u", rx_event->own);
+
+		if (!rx_event->own) {
+			IPA_UT_ERR("own bit not modified by gsi - failed\n");
+			ret = -EFAULT;
+		}
+
+		evt_addr = ((u64)rx_event->buff_addr2 << 32) |
+			(u64)(rx_event->buff_addr1);
+		IPA_UT_DBG(
+			"RX: addr from event 0x%llx, address from buff %llx\n",
+			evt_addr, pkt_addr[i]);
+		if (evt_addr != pkt_addr[i]) {
+			IPA_UT_ERR("addresses are different - fail\n");
+			ret = -EFAULT;
+		}
+
+		/* read received packet */
+		packet_recv = (u32 *)((u8 *)test_ntn_ctx->tx_buf.base +
+			(i * BUFFER_SIZE));
+		IPA_UT_DBG("received packet 0x%X\n", *packet_recv);
+
+		if (*packet_recv != *(packet[i])) {
+			IPA_UT_ERR("packet content mismatch 0x%X != 0x%X\n",
+				*packet_recv, *(packet[i]));
+			ret = -EFAULT;
+		}
+		IPA_UT_DBG("packet %d content match!\n", i);
+
+		/* recycle buffer */
+		*packet_recv = 0;
+
+		/* recycle TRE */
+		/* TX */
+		memset((struct tx_transfer_ring_ele *)
+			test_ntn_ctx->tx_transfer_ring_addr.base + i,
+			0, sizeof(struct rx_transfer_ring_ele));
+
+		/* RX */
+		memset(rx_ele, 0, sizeof(struct rx_transfer_ring_ele));
+		rx_ele->fd = 1;
+		rx_ele->ld = 1;
+	}
+
+	ipa_ntn_test_advance_db(&test_ntn_ctx->tx_db_local, NUM_TX_TR_ELE - 1,
+		sizeof(struct tx_transfer_ring_ele),
+		test_ntn_ctx->tx_transfer_ring_addr.size);
+	IPA_UT_DBG("advance tx_db_local to 0x%X\n",
+		test_ntn_ctx->tx_db_local +
+		lower_32_bits(test_ntn_ctx->tx_transfer_ring_addr.phys_base));
+	iowrite32(test_ntn_ctx->tx_db_local +
+		lower_32_bits(test_ntn_ctx->tx_transfer_ring_addr.phys_base),
+		tx_db);
+
+	test_ntn_ctx->rx_idx = NUM_RX_TR_ELE - 1;
+	test_ntn_ctx->tx_idx = NUM_TX_TR_ELE - 1;
+err:
+	iounmap(rx_db);
+	iounmap(tx_db);
+	return ret;
+}
+
+static int ipa_ntn_test_multi_transfer_burst(void *priv)
+{
+	int ret = 0;
+
+	if (!test_ntn_ctx->ready) {
+		if (test_ntn_ctx->wait_cnt) {
+			IPA_UT_ERR("ipa ready timeout, don't run\n");
+			return -EFAULT;
+		}
+		/* ready cb test hasn't ran yet and we need to wait */
+		if (ipa_ntn_test_ready_cb(NULL)) {
+			IPA_UT_ERR("ipa ready timeout, don't run\n");
+			return -EFAULT;
+		}
+	}
+
+	if (ipa_ntn_test_prepare_test()) {
+		IPA_UT_ERR("failed to prepare test.\n");
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	if (ipa_ntn_send_packet_burst()) {
+		IPA_UT_ERR("failed to send packets.\n");
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	IPA_UT_INFO("sent %d packets in a burst succesfully!\n",
+		NUM_TX_TR_ELE - 1);
+
+	if (ipa_ntn_send_one_packet()) {
+		IPA_UT_ERR("failed to send last packet.\n");
+		ret = -EFAULT;
+		goto fail;
+	}
+	IPA_UT_INFO("sent the last packet succesfully!\n");
+
+	ipa_ntn_test_print_stats();
+
+fail:
+	if (ipa_ntn_test_unreg_intf()) {
+		IPA_UT_ERR("fail to unregister interface.\n");
+		ret = -EFAULT;
+	}
+
+	if (ipa_ntn_teardown_pipes())
+		ret = -EFAULT;
+	return ret;
+}
+
+/* Suite definition block */
+IPA_UT_DEFINE_SUITE_START(ntn, "NTN3 tests",
+	ipa_test_ntn_suite_setup, ipa_test_ntn_suite_teardown)
+{
+	IPA_UT_ADD_TEST(ready_cb,
+		"ready callback test",
+		ipa_ntn_test_ready_cb,
+		true, IPA_HW_v5_0, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(single_transfer,
+		"single data transfer",
+		ipa_ntn_test_single_transfer,
+		true, IPA_HW_v5_0, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(multi_transfer,
+		"multi data transfer without wrap around",
+		ipa_ntn_test_multi_transfer,
+		true, IPA_HW_v5_0, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(multi_transfer_w_wrap,
+		"multi data transfer with wrap around",
+		ipa_ntn_test_multi_transfer_wrap_around,
+		true, IPA_HW_v5_0, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(multi_transfer_burst,
+			"send entire ring in one shot",
+			ipa_ntn_test_multi_transfer_burst,
+			true, IPA_HW_v5_0, IPA_HW_MAX),
+} IPA_UT_DEFINE_SUITE_END(ntn);
+
+

+ 3 - 1
drivers/platform/msm/ipa/test/ipa_ut_suite_list.h

@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, 2021 The Linux Foundation. All rights reserved.
  */
 
 #ifndef _IPA_UT_SUITE_LIST_H_
@@ -19,6 +19,7 @@ IPA_UT_DECLARE_SUITE(pm);
 IPA_UT_DECLARE_SUITE(example);
 IPA_UT_DECLARE_SUITE(hw_stats);
 IPA_UT_DECLARE_SUITE(wdi3);
+IPA_UT_DECLARE_SUITE(ntn);
 
 
 /**
@@ -33,6 +34,7 @@ IPA_UT_DEFINE_ALL_SUITES_START
 	IPA_UT_REGISTER_SUITE(example),
 	IPA_UT_REGISTER_SUITE(hw_stats),
 	IPA_UT_REGISTER_SUITE(wdi3),
+	IPA_UT_REGISTER_SUITE(ntn),
 } IPA_UT_DEFINE_ALL_SUITES_END;
 
 #endif /* _IPA_UT_SUITE_LIST_H_ */