Pārlūkot izejas kodu

msm: ipa: create ipa_client for ethernet

Create the ipa_client interface for ethernet offloading.

Change-Id: I8120b0cca9e42a75153fb1468dc1b8bcbd43484c
Signed-off-by: Bojun Pan <[email protected]>
Signed-off-by: Amir Levy <[email protected]>
Amir Levy 4 gadi atpakaļ
vecāks
revīzija
ace2384057

+ 32 - 0
drivers/platform/msm/gsi/gsi.c

@@ -2391,6 +2391,7 @@ static void gsi_program_chan_ctx(struct gsi_chan_props *props, unsigned int ee,
 		break;
 	case GSI_CHAN_PROT_AQC:
 	case GSI_CHAN_PROT_11AD:
+	case GSI_CHAN_PROT_RTK:
 		ch_k_cntxt_0.chtype_protocol_msb = 1;
 		break;
 	default:
@@ -4596,6 +4597,37 @@ void gsi_wdi3_write_evt_ring_db(unsigned long evt_ring_hdl,
 }
 EXPORT_SYMBOL(gsi_wdi3_write_evt_ring_db);
 
+int gsi_get_refetch_reg(unsigned long chan_hdl, bool is_rp)
+{
+	if (is_rp) {
+		return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR,
+			gsi_ctx->per.ee, chan_hdl);
+	} else {
+		return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR,
+			gsi_ctx->per.ee, chan_hdl);
+	}
+}
+EXPORT_SYMBOL(gsi_get_refetch_reg);
+
+int gsi_get_drop_stats(unsigned long ep_id, int scratch_id)
+{
+	/* RTK use scratch 5 */
+	if (scratch_id == 5) {
+		/*
+		 * each channel context is 6 lines of 8 bytes, but n in SHRAM_n
+		 * is in 4 bytes offsets, so multiplying ep_id by 6*2=12 will
+		 * give the beginning of the required channel context, and then
+		 * need to add 7 since the channel context layout has the ring
+		 * rbase (8 bytes) + channel scratch 0-4 (20 bytes) so adding
+		 * additional 28/4 = 7 to get to scratch 5 of the required
+		 * channel.
+		 */
+		gsihal_read_reg_n(GSI_GSI_SHRAM_n, ep_id * 12 + 7);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(gsi_get_drop_stats);
+
 void gsi_wdi3_dump_register(unsigned long chan_hdl)
 {
 	uint32_t val;

+ 54 - 0
drivers/platform/msm/gsi/gsi.h

@@ -142,6 +142,7 @@ enum gsi_evt_chtype {
 	GSI_EVT_CHTYPE_MHIP_EV = 0x7,
 	GSI_EVT_CHTYPE_AQC_EV = 0x8,
 	GSI_EVT_CHTYPE_11AD_EV = 0x9,
+	GSI_EVT_CHTYPE_RTK_EV = 0xC,
 };
 
 enum gsi_evt_ring_elem_size {
@@ -222,6 +223,9 @@ enum gsi_chan_prot {
 	GSI_CHAN_PROT_MHIP = 0x7,
 	GSI_CHAN_PROT_AQC = 0x8,
 	GSI_CHAN_PROT_11AD = 0x9,
+	GSI_CHAN_PROT_MHIC = 0xA,
+	GSI_CHAN_PROT_QDSS = 0xB,
+	GSI_CHAN_PROT_RTK = 0xC,
 };
 
 enum gsi_max_prefetch {
@@ -951,6 +955,28 @@ union __packed gsi_wdi3_channel_scratch2_reg {
 	} data;
 };
 
+/**
+ * gsi_rtk_channel_scratch - Realtek SW config area of
+ * channel scratch
+ *
+ * @rtk_bar_low: Realtek bar address LSB
+ * @rtk_bar_high: Realtek bar address MSB
+ * @queue_number: dma channel number in rtk
+ * @fix_buff_size: buff size in KB
+ * @rtk_buff_addr_high: buffer addr where TRE points to
+ * @rtk_buff_addr_low: buffer addr where TRE points to
+ *			the descriptor
+ */
+ struct __packed gsi_rtk_channel_scratch {
+	uint32_t rtk_bar_low;
+	uint32_t rtk_bar_high : 9;
+	uint32_t queue_number : 5;
+	uint32_t fix_buff_size : 4;
+	uint32_t reserved1 : 6;
+	uint32_t rtk_buff_addr_high : 8;
+	uint32_t rtk_buff_addr_low;
+	uint32_t reserved2;
+};
 
 /**
  * gsi_channel_scratch - channel scratch SW config area
@@ -967,6 +993,7 @@ union __packed gsi_channel_scratch {
 	struct __packed gsi_wdi3_channel_scratch wdi3;
 	struct __packed gsi_mhip_channel_scratch mhip;
 	struct __packed gsi_wdi2_channel_scratch_new wdi2_new;
+	struct __packed gsi_rtk_channel_scratch rtk;
 	struct __packed {
 		uint32_t word1;
 		uint32_t word2;
@@ -1101,6 +1128,17 @@ struct __packed gsi_wdi3_evt_scratch {
 	uint32_t reserved2;
 };
 
+/**
+ * gsi_rtk_evt_scratch - realtek protocol SW config area of
+ * event scratch
+ * @reserved1: reserve bit.
+ * @reserved2: reserve bit.
+ */
+struct __packed gsi_rtk_evt_scratch {
+	uint32_t reserved1;
+	uint32_t reserved2;
+};
+
 /**
  * gsi_evt_scratch - event scratch SW config area
  *
@@ -1112,6 +1150,7 @@ union __packed gsi_evt_scratch {
 	struct __packed gsi_11ad_evt_scratch w11ad;
 	struct __packed gsi_wdi3_evt_scratch wdi3;
 	struct __packed gsi_mhip_evt_scratch mhip;
+	struct __packed gsi_rtk_evt_scratch rtk;
 	struct __packed {
 		uint32_t word1;
 		uint32_t word2;
@@ -2068,6 +2107,21 @@ int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code);
 void gsi_wdi3_write_evt_ring_db(unsigned long chan_hdl, uint32_t db_addr_low,
 	uint32_t db_addr_high);
 
+/**
+ * gsi_get_refetch_reg - get WP/RP value from re_fetch register
+ *
+ * @chan_hdl: gsi channel handle
+ * @is_rp: rp or wp
+ */
+int gsi_get_refetch_reg(unsigned long chan_hdl, bool is_rp);
+
+/**
+ * gsi_get_drop_stats - get drop stats by GSI
+ *
+ * @ep_id: ep index
+ * @scratch_id: drop stats on which scratch register
+ */
+int gsi_get_drop_stats(unsigned long ep_id, int scratch_id);
 
 /**
  * gsi_wdi3_dump_register - dump wdi3 related gsi registers

+ 4 - 0
drivers/platform/msm/gsi/gsihal/gsihal_reg.c

@@ -161,6 +161,7 @@ static const char *gsireg_name_to_str[GSI_REG_MAX] = {
 	__stringify(GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_k),
 	__stringify(GSI_INTER_EE_n_SRC_EV_CH_IRQ_k),
 	__stringify(GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_k),
+	__stringify(GSI_GSI_SHRAM_n),
 };
 
 /*
@@ -1661,6 +1662,9 @@ static struct gsihal_reg_obj gsihal_reg_objs[GSI_VER_MAX][GSI_REG_MAX] = {
 	[GSI_VER_2_5][GSI_GSI_INST_RAM_n] = {
 	gsireg_construct_dummy, gsireg_parse_dummy,
 	0x0001b000, GSI_GSI_INST_RAM_n_WORD_SZ, 0},
+	[GSI_VER_2_5][GSI_GSI_SHRAM_n] = {
+	gsireg_construct_dummy, gsireg_parse_dummy,
+	0x00002000, GSI_GSI_SHRAM_n_WORD_SZ, 0 },
 
 	/* GSIv2_9 */
 	[GSI_VER_2_9][GSI_EE_n_EV_CH_k_CNTXT_1] = {

+ 1 - 0
drivers/platform/msm/gsi/gsihal/gsihal_reg.h

@@ -151,6 +151,7 @@ enum gsihal_reg_name {
 	GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_k,
 	GSI_INTER_EE_n_SRC_EV_CH_IRQ_k,
 	GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_k,
+	GSI_GSI_SHRAM_n,
 	GSI_REG_MAX
 };
 

+ 3 - 0
drivers/platform/msm/gsi/gsihal/gsihal_reg_i.h

@@ -15,6 +15,9 @@
 /* GSI_GSI_INST_RAM_n */
 #define GSI_GSI_INST_RAM_n_WORD_SZ 0x4
 
+/* GSI_GSI_SHRAM_n */
+#define GSI_GSI_SHRAM_n_WORD_SZ 0x4
+
 #define GSI_GSI_INST_RAM_n_MAXn 4095
 #define GSI_V2_0_GSI_INST_RAM_n_MAXn 6143
 #define GSI_V2_2_GSI_INST_RAM_n_MAXn 4095

+ 1 - 0
drivers/platform/msm/ipa/Makefile

@@ -31,6 +31,7 @@ ipam-y += \
 	ipa_v3/ipahal/ipahal_fltrt.o \
 	ipa_v3/ipahal/ipahal_hw_stats.o \
 	ipa_v3/ipahal/ipahal_nat.o \
+	ipa_v3/ipa_eth_i.o \
 
 ipam-$(CONFIG_RMNET_IPA3) += ipa_v3/rmnet_ipa.o ipa_v3/ipa_qmi_service_v01.o \
 	ipa_v3/ipa_qmi_service.o ipa_v3/rmnet_ctl_ipa.o \

+ 1 - 1
drivers/platform/msm/ipa/ipa_clients/Makefile

@@ -7,4 +7,4 @@ obj-$(CONFIG_ECM_IPA) += ecmipam.o
 ecmipam-objs := ecm_ipa.o
 
 obj-$(CONFIG_IPA_CLIENTS_MANAGER) += ipa_clientsm.o
-ipa_clientsm-objs := ipa_clients_manager.o ipa_usb.o ipa_wdi3.o ipa_gsb.o ipa_uc_offload.o ipa_wigig.o ipa_mhi_client.o
+ipa_clientsm-objs := ipa_clients_manager.o ipa_usb.o ipa_wdi3.o ipa_gsb.o ipa_uc_offload.o ipa_wigig.o ipa_mhi_client.o ipa_eth.o

+ 2 - 0
drivers/platform/msm/ipa/ipa_clients/ipa_clients_i.h

@@ -21,4 +21,6 @@ void ipa_mhi_register(void);
 
 void ipa_wigig_register(void);
 
+void ipa_eth_register(void);
+
 #endif /* _IPA_CLIENTS_I_H */

+ 2 - 0
drivers/platform/msm/ipa/ipa_clients/ipa_clients_manager.c

@@ -26,6 +26,8 @@ static int __init ipa_clients_manager_init(void)
 
 	ipa_wigig_register();
 
+	ipa_eth_register();
+
 	ipa3_notify_clients_registered();
 
 	return 0;

+ 1008 - 0
drivers/platform/msm/ipa/ipa_clients/ipa_eth.c

@@ -0,0 +1,1008 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/msm_ipa.h>
+#include "../ipa_common_i.h"
+#include "../ipa_v3/ipa_pm.h"
+#include "../ipa_v3/ipa_i.h"
+#include <linux/ipa_eth.h>
+
+#define OFFLOAD_DRV_NAME "ipa_eth"
+#define IPA_ETH_DBG(fmt, args...) \
+	do { \
+		pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa3_get_ipc_logbuf(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa3_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_ETH_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa3_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_ETH_ERR(fmt, args...) \
+	do { \
+		pr_err(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa3_get_ipc_logbuf(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa3_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+struct ipa_eth_ready_cb_wrapper {
+	struct list_head link;
+	struct ipa_eth_ready *info;
+};
+
+struct ipa_eth_per_client_info {
+	u32 pm_hdl;
+	atomic_t ref_cnt;
+	bool existed;
+};
+
+struct ipa_eth_intf {
+	struct list_head link;
+	char netdev_name[IPA_RESOURCE_NAME_MAX];
+	u8 hdr_len;
+	u32 partial_hdr_hdl[IPA_IP_MAX];
+};
+
+struct ipa_eth_context {
+	struct list_head ready_cb_list;
+	struct completion completion;
+	struct ipa_eth_per_client_info
+		client[IPA_ETH_CLIENT_MAX][IPA_ETH_INST_ID_MAX];
+	struct mutex lock;
+	struct workqueue_struct *wq;
+	bool is_eth_ready;
+	struct idr idr;
+	spinlock_t idr_lock;
+	struct list_head head_intf_list;
+	void *client_priv;
+};
+
+static struct ipa_eth_context *ipa_eth_ctx;
+
+static int ipa_eth_uc_rdy_cb(struct notifier_block *nb,
+	unsigned long action, void *data);
+
+static void ipa_eth_ready_notify_work(struct work_struct *work);
+
+static struct notifier_block uc_rdy_cb = {
+	.notifier_call = ipa_eth_uc_rdy_cb,
+};
+
+static DECLARE_WORK(ipa_eth_ready_notify, ipa_eth_ready_notify_work);
+
+static int ipa_eth_init_internal(void)
+{
+	char buff[IPA_RESOURCE_NAME_MAX];
+	int i, j;
+
+	/* already initialized */
+	if (ipa_eth_ctx)
+		return 0;
+
+	ipa_eth_ctx = kzalloc(sizeof(*ipa_eth_ctx), GFP_KERNEL);
+	if (ipa_eth_ctx == NULL) {
+		IPA_ETH_ERR("fail to alloc eth ctx\n");
+		return -ENOMEM;
+	}
+
+	snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipa_eth_wq");
+	ipa_eth_ctx->wq = alloc_workqueue(buff,
+		WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1);
+	if (!ipa_eth_ctx->wq) {
+		kfree(ipa_eth_ctx);
+		goto wq_err;
+	}
+	mutex_init(&ipa_eth_ctx->lock);
+	INIT_LIST_HEAD(&ipa_eth_ctx->ready_cb_list);
+	ipa_eth_ctx->is_eth_ready = false;
+	for (i = 0; i < IPA_ETH_CLIENT_MAX; i++) {
+		for (j = 0; j < IPA_ETH_INST_ID_MAX; j++) {
+			ipa_eth_ctx->client[i][j].pm_hdl = 0;
+			ipa_eth_ctx->client[i][j].existed = false;
+			atomic_set(&ipa_eth_ctx->client[i][j].ref_cnt, 0);
+		}
+	}
+	idr_init(&ipa_eth_ctx->idr);
+	spin_lock_init(&ipa_eth_ctx->idr_lock);
+	INIT_LIST_HEAD(&ipa_eth_ctx->head_intf_list);
+	ipa_eth_ctx->client_priv = NULL;
+	ipa3_eth_debugfs_init();
+	return 0;
+
+wq_err:
+	kfree(ipa_eth_ctx);
+	ipa_eth_ctx = NULL;
+	return -ENOMEM;
+}
+
+static int ipa_eth_cleanup_internal(void)
+{
+	struct ipa_eth_intf *entry;
+	struct ipa_eth_intf *next;
+
+	/* already deinitialized */
+	if (!ipa_eth_ctx)
+		return 0;
+	/* clear interface list */
+	list_for_each_entry_safe(entry, next,
+		&ipa_eth_ctx->head_intf_list, link) {
+		list_del(&entry->link);
+		kfree(entry);
+	}
+	mutex_destroy(&ipa_eth_ctx->lock);
+	destroy_workqueue(ipa_eth_ctx->wq);
+	kfree(ipa_eth_ctx);
+	ipa_eth_ctx = NULL;
+	return 0;
+}
+
+static int ipa_eth_uc_rdy_cb(struct notifier_block *nb,
+	unsigned long action, void *data)
+{
+	IPA_ETH_DBG("IPA uC is ready for eth");
+	queue_work(ipa_eth_ctx->wq, &ipa_eth_ready_notify);
+	return NOTIFY_OK;
+}
+
+static void ipa_eth_ready_notify_work(struct work_struct *work)
+{
+	struct ipa_eth_ready_cb_wrapper *entry;
+	struct ipa_eth_ready_cb_wrapper *next;
+
+	IPA_ETH_DBG("ipa_eth ready notify\n");
+	mutex_lock(&ipa_eth_ctx->lock);
+	ipa_eth_ctx->is_eth_ready = true;
+	list_for_each_entry_safe(entry, next,
+		&ipa_eth_ctx->ready_cb_list, link) {
+		if (entry && entry->info && entry->info->notify)
+			entry->info->notify(entry->info->userdata);
+		/* remove from list once notify is done */
+		list_del(&entry->link);
+		kfree(entry);
+	}
+	mutex_unlock(&ipa_eth_ctx->lock);
+}
+
+static int ipa_eth_register_ready_cb_internal(struct ipa_eth_ready *ready_info)
+{
+	int rc;
+	struct ipa_eth_ready_cb_wrapper *ready_cb;
+
+	/* validate user input */
+	if (!ready_info) {
+		IPA_ETH_ERR("null ready_info");
+		return -EFAULT;
+	}
+
+	if (!ipa_eth_ctx) {
+		rc = ipa_eth_init_internal();
+		if (rc) {
+			/* it is not normal to fail here */
+			IPA_ETH_ERR("initialization failure\n");
+			return rc;
+		}
+		IPA_ETH_DBG("ipa_eth register ready cb\n");
+		mutex_lock(&ipa_eth_ctx->lock);
+		ready_cb = kmalloc(sizeof(struct ipa_eth_ready_cb_wrapper),
+			GFP_KERNEL);
+		if (!ready_cb) {
+			mutex_unlock(&ipa_eth_ctx->lock);
+			rc = -ENOMEM;
+			goto err_uc;
+		}
+		ready_cb->info = ready_info;
+		list_add_tail(&ready_cb->link, &ipa_eth_ctx->ready_cb_list);
+		mutex_unlock(&ipa_eth_ctx->lock);
+		/* rely on uC ready callback, only register once */
+		rc = ipa3_uc_register_ready_cb(&uc_rdy_cb);
+		if (rc) {
+			IPA_ETH_ERR("Failed to register ready cb\n");
+			goto err_uc;
+		}
+	} else {
+		/* assume only IOSS could register for cb */
+		IPA_ETH_ERR("multiple eth register happens\n");
+		mutex_lock(&ipa_eth_ctx->lock);
+		ready_cb = kmalloc(sizeof(struct ipa_eth_ready_cb_wrapper),
+			GFP_KERNEL);
+		if (!ready_cb) {
+			mutex_unlock(&ipa_eth_ctx->lock);
+			return -ENOMEM;
+		}
+		ready_cb->info = ready_info;
+		list_add_tail(&ready_cb->link, &ipa_eth_ctx->ready_cb_list);
+		/* if already ready, directly callback from wq */
+		if (ipa3_uc_loaded_check())
+			queue_work(ipa_eth_ctx->wq, &ipa_eth_ready_notify);
+		mutex_unlock(&ipa_eth_ctx->lock);
+	}
+
+	/* if uc is already ready, set the output param to true */
+	if (ipa3_uc_loaded_check())
+		ready_info->is_eth_ready = true;
+
+	return 0;
+
+err_uc:
+	list_del(&ready_cb->link);
+	ipa_eth_cleanup_internal();
+	return rc;
+}
+
+static int ipa_eth_unregister_ready_cb_internal(struct ipa_eth_ready *ready_info)
+{
+	struct ipa_eth_ready_cb_wrapper *entry;
+	bool find_ready_info = false;
+
+	/* validate user input */
+	if (!ready_info) {
+		IPA_ETH_ERR("null ready_info");
+		return -EFAULT;
+	}
+
+	if (!ipa_eth_ctx) {
+		IPA_ETH_ERR("unregister called before register\n");
+		return -EFAULT;
+	}
+
+	IPA_ETH_DBG("ipa_eth unregister ready cb\n");
+	mutex_lock(&ipa_eth_ctx->lock);
+	list_for_each_entry(entry, &ipa_eth_ctx->ready_cb_list,
+		link) {
+		if (entry && entry->info == ready_info) {
+			list_del(&entry->link);
+			find_ready_info = true;
+			break;
+		}
+	}
+	if (!find_ready_info) {
+		IPA_ETH_ERR("unable to unregsiter, no ready_info\n");
+		mutex_unlock(&ipa_eth_ctx->lock);
+		return -EFAULT;
+	}
+	if (list_empty(&ipa_eth_ctx->ready_cb_list)) {
+		mutex_unlock(&ipa_eth_ctx->lock);
+		ipa_eth_cleanup_internal();
+		return 0;
+	}
+
+	ready_info->is_eth_ready = false;
+	mutex_unlock(&ipa_eth_ctx->lock);
+	return 0;
+}
+
+static u32 ipa_eth_pipe_hdl_alloc(void *ptr)
+{
+	ipa_eth_hdl_t hdl;
+
+	idr_preload(GFP_KERNEL);
+	spin_lock(&ipa_eth_ctx->idr_lock);
+	hdl = idr_alloc(&ipa_eth_ctx->idr, ptr, 0, 0, GFP_NOWAIT);
+	spin_unlock(&ipa_eth_ctx->idr_lock);
+	idr_preload_end();
+
+	return hdl;
+}
+
+static void ipa_eth_pipe_hdl_remove(ipa_eth_hdl_t hdl)
+{
+	spin_lock(&ipa_eth_ctx->idr_lock);
+	idr_remove(&ipa_eth_ctx->idr, hdl);
+	spin_unlock(&ipa_eth_ctx->idr_lock);
+}
+
+static enum ipa_client_type
+	ipa_eth_get_ipa_client_type_from_pipe(
+	struct ipa_eth_client_pipe_info *pipe)
+{
+	struct ipa_eth_client *client;
+	int ipa_client_type = IPA_CLIENT_MAX;
+
+	if (!pipe) {
+		IPA_ETH_ERR("invalid pipe\n");
+		return ipa_client_type;
+	}
+
+	client = pipe->client_info;
+	if (!client) {
+		IPA_ETH_ERR("invalid client\n");
+		return ipa_client_type;
+	}
+	switch (client->client_type) {
+	case IPA_ETH_CLIENT_AQC107:
+	case IPA_ETH_CLIENT_AQC113:
+		if (client->traffic_type ==
+			IPA_ETH_PIPE_BEST_EFFORT &&
+			client->inst_id == 0) {
+			if (pipe->dir == IPA_ETH_PIPE_DIR_TX) {
+				ipa_client_type =
+					IPA_CLIENT_AQC_ETHERNET_CONS;
+			} else {
+				ipa_client_type =
+					IPA_CLIENT_AQC_ETHERNET_PROD;
+			}
+		}
+		break;
+	case IPA_ETH_CLIENT_RTK8111K:
+	case IPA_ETH_CLIENT_RTK8125B:
+		if (client->traffic_type ==
+			IPA_ETH_PIPE_BEST_EFFORT &&
+			client->inst_id == 0) {
+			if (pipe->dir == IPA_ETH_PIPE_DIR_TX) {
+				ipa_client_type =
+					IPA_CLIENT_RTK_ETHERNET_CONS;
+			} else {
+				ipa_client_type =
+					IPA_CLIENT_RTK_ETHERNET_PROD;
+			}
+		}
+		break;
+	case IPA_ETH_CLIENT_NTN:
+	case IPA_ETH_CLIENT_EMAC:
+		if (client->traffic_type ==
+			IPA_ETH_PIPE_BEST_EFFORT &&
+			client->inst_id == 0) {
+			if (pipe->dir == IPA_ETH_PIPE_DIR_TX) {
+				ipa_client_type =
+					IPA_CLIENT_ETHERNET_CONS;
+			} else {
+				ipa_client_type =
+					IPA_CLIENT_ETHERNET_PROD;
+			}
+		}
+		break;
+	default:
+		IPA_ETH_ERR("invalid client type%d\n",
+			client->client_type);
+	}
+	return ipa_client_type;
+}
+
+static struct ipa_eth_client_pipe_info
+	*ipa_eth_get_pipe_from_hdl(ipa_eth_hdl_t hdl)
+{
+	struct ipa_eth_client_pipe_info *pipe;
+
+	spin_lock(&ipa_eth_ctx->idr_lock);
+	pipe = idr_find(&ipa_eth_ctx->idr, hdl);
+	spin_unlock(&ipa_eth_ctx->idr_lock);
+
+	return pipe;
+}
+
+
+static int ipa_eth_client_connect_pipe(
+	struct ipa_eth_client_pipe_info *pipe)
+{
+	enum ipa_client_type client_type;
+	struct ipa_eth_client *client;
+
+	if (!pipe) {
+		IPA_ETH_ERR("invalid pipe\n");
+		return -EFAULT;
+	}
+	client = pipe->client_info;
+	if (!client) {
+		IPA_ETH_ERR("invalid client\n");
+		return -EFAULT;
+	}
+	client_type =
+		ipa_eth_get_ipa_client_type_from_pipe(pipe);
+	if (client_type == IPA_CLIENT_MAX) {
+		IPA_ETH_ERR("invalid client type\n");
+		return -EFAULT;
+	}
+	pipe->pipe_hdl = ipa_eth_pipe_hdl_alloc((void *)pipe);
+	switch (client->client_type) {
+	case IPA_ETH_CLIENT_AQC107:
+	case IPA_ETH_CLIENT_AQC113:
+		ipa3_eth_aqc_connect(pipe, client_type);
+		break;
+	case IPA_ETH_CLIENT_RTK8111K:
+	case IPA_ETH_CLIENT_RTK8125B:
+		ipa3_eth_rtk_connect(pipe, client_type);
+		break;
+	case IPA_ETH_CLIENT_NTN:
+	case IPA_ETH_CLIENT_EMAC:
+		/* add support if needed */
+		break;
+	default:
+		IPA_ETH_ERR("invalid client type%d\n",
+			client->client_type);
+		ipa_eth_pipe_hdl_remove(pipe->pipe_hdl);
+	}
+	return 0;
+}
+
+static int ipa_eth_client_disconnect_pipe(
+	struct ipa_eth_client_pipe_info *pipe)
+{
+	enum ipa_client_type client_type;
+	struct ipa_eth_client *client;
+
+	if (!pipe) {
+		IPA_ETH_ERR("invalid pipe\n");
+		return -EFAULT;
+	}
+
+	client = pipe->client_info;
+	if (!client) {
+		IPA_ETH_ERR("invalid client\n");
+		return -EFAULT;
+	}
+	client_type =
+		ipa_eth_get_ipa_client_type_from_pipe(pipe);
+	if (client_type == IPA_CLIENT_MAX) {
+		IPA_ETH_ERR("invalid client type\n");
+		return -EFAULT;
+	}
+	switch (client->client_type) {
+	case IPA_ETH_CLIENT_AQC107:
+	case IPA_ETH_CLIENT_AQC113:
+		ipa3_eth_aqc_disconnect(pipe, client_type);
+		break;
+	case IPA_ETH_CLIENT_RTK8111K:
+	case IPA_ETH_CLIENT_RTK8125B:
+		ipa3_eth_rtk_disconnect(pipe, client_type);
+		break;
+	case IPA_ETH_CLIENT_NTN:
+	case IPA_ETH_CLIENT_EMAC:
+		ipa3_eth_emac_disconnect(pipe, client_type);
+		break;
+	default:
+		IPA_ETH_ERR("invalid client type%d\n",
+			client->client_type);
+		return -EFAULT;
+	}
+	ipa_eth_pipe_hdl_remove(pipe->pipe_hdl);
+	return 0;
+}
+
+
+static int ipa_eth_commit_partial_hdr(
+	struct ipa_ioc_add_hdr *hdr,
+	const char *netdev_name,
+	struct ipa_eth_hdr_info *hdr_info)
+{
+	int i;
+
+	if (!hdr || !hdr_info || !netdev_name) {
+		IPA_ETH_ERR("Invalid input\n");
+		return -EINVAL;
+	}
+
+	hdr->commit = 1;
+	hdr->num_hdrs = 2;
+
+	snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name),
+			 "%s_ipv4", netdev_name);
+	snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name),
+			 "%s_ipv6", netdev_name);
+	for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) {
+		hdr->hdr[i].hdr_len = hdr_info[i].hdr_len;
+		memcpy(hdr->hdr[i].hdr, hdr_info[i].hdr, hdr->hdr[i].hdr_len);
+		hdr->hdr[i].type = hdr_info[i].hdr_type;
+		hdr->hdr[i].is_partial = 1;
+		hdr->hdr[i].is_eth2_ofst_valid = 1;
+		hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset;
+	}
+
+	if (ipa_add_hdr(hdr)) {
+		IPA_ETH_ERR("fail to add partial headers\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+
+static void ipa_eth_pm_cb(void *p, enum ipa_pm_cb_event event)
+{
+	IPA_ETH_ERR("received pm event %d\n", event);
+}
+
+static int ipa_eth_pm_register(struct ipa_eth_client *client)
+{
+	struct ipa_pm_register_params pm_params;
+	int client_type, inst_id;
+	char name[IPA_RESOURCE_NAME_MAX];
+	struct ipa_eth_client_pipe_info *pipe;
+	int rc;
+
+	/* validate user input */
+	if (!client) {
+		IPA_ETH_ERR("null client");
+		return -EFAULT;
+	}
+	client_type = client->client_type;
+	inst_id = client->inst_id;
+
+	if (atomic_read(
+		&ipa_eth_ctx->client[client_type][inst_id].ref_cnt))
+		goto add_pipe_list;
+
+	memset(&pm_params, 0, sizeof(pm_params));
+	snprintf(name, IPA_RESOURCE_NAME_MAX,
+		"ipa_eth_%d_%d", client_type, inst_id);
+	pm_params.name = name;
+	pm_params.callback = ipa_eth_pm_cb;
+	pm_params.user_data = NULL;
+	pm_params.group = IPA_PM_GROUP_DEFAULT;
+	if (ipa_pm_register(&pm_params,
+		&ipa_eth_ctx->client[client_type][inst_id].pm_hdl)) {
+		IPA_ETH_ERR("fail to register ipa pm\n");
+		return -EFAULT;
+	}
+	/* vote IPA clock on */
+	rc = ipa_pm_activate_sync(
+		ipa_eth_ctx->client[client_type][inst_id].pm_hdl);
+	if (rc) {
+		IPA_ETH_ERR("fail to activate ipa pm\n");
+		return -EFAULT;
+	}
+add_pipe_list:
+	list_for_each_entry(pipe, &client->pipe_list,
+		link) {
+		if (pipe->dir == IPA_ETH_PIPE_DIR_RX)
+			continue;
+		rc = ipa_pm_associate_ipa_cons_to_client(
+			ipa_eth_ctx->client[client_type][inst_id].pm_hdl,
+			ipa_eth_get_ipa_client_type_from_pipe(pipe));
+		if (rc) {
+			IPA_ETH_ERR("fail to associate cons with PM %d\n", rc);
+			ipa_pm_deregister(
+			ipa_eth_ctx->client[client_type][inst_id].pm_hdl);
+			ipa_eth_ctx->client[client_type][inst_id].pm_hdl = 0;
+			ipa_assert();
+			return rc;
+		}
+	}
+	atomic_inc(
+		&ipa_eth_ctx->client[client_type][inst_id].ref_cnt);
+	return 0;
+}
+
+static int ipa_eth_pm_deregister(struct ipa_eth_client *client)
+{
+	int rc;
+	int client_type, inst_id;
+
+	/* validate user input */
+	if (!client) {
+		IPA_ETH_ERR("null client");
+		return -EFAULT;
+	}
+	client_type = client->client_type;
+	inst_id = client->inst_id;
+	if (atomic_read(
+		&ipa_eth_ctx->client[client_type][inst_id].ref_cnt)
+		== 1) {
+		rc = ipa_pm_deactivate_sync(
+			ipa_eth_ctx->client[client_type][inst_id].pm_hdl);
+		if (rc) {
+			IPA_ETH_ERR("fail to deactivate ipa pm\n");
+			return -EFAULT;
+		}
+		if (ipa_pm_deregister(
+			ipa_eth_ctx->client[client_type][inst_id].pm_hdl)) {
+			IPA_ETH_ERR("fail to deregister ipa pm\n");
+			return -EFAULT;
+		}
+	}
+	atomic_dec(&ipa_eth_ctx->client[client_type][inst_id].ref_cnt);
+	return 0;
+}
+
+static int ipa_eth_client_conn_pipes_internal(struct ipa_eth_client *client)
+{
+	struct ipa_eth_client_pipe_info *pipe;
+	int rc;
+	int client_type, inst_id, traff_type;
+
+	/* validate user input */
+	if (!client) {
+		IPA_ETH_ERR("null client");
+		return -EFAULT;
+	}
+	if (!ipa_eth_ctx) {
+		IPA_ETH_ERR("connect called before register readiness\n");
+		return -EFAULT;
+	}
+
+	if (!ipa_eth_ctx->is_eth_ready) {
+		IPA_ETH_ERR("conn called before IPA eth ready\n");
+		return -EFAULT;
+	}
+	ipa_eth_ctx->client_priv = client->priv;
+	client_type = client->client_type;
+	inst_id = client->inst_id;
+	traff_type = client->traffic_type;
+	IPA_ETH_DBG("ipa_eth conn client %d inst %d, traffic %d\n",
+		client_type, inst_id, traff_type);
+	mutex_lock(&ipa_eth_ctx->lock);
+	rc = ipa_eth_pm_register(client);
+	if (rc) {
+		IPA_ETH_ERR("pm register failed\n");
+		mutex_unlock(&ipa_eth_ctx->lock);
+		return -EFAULT;
+	}
+	list_for_each_entry(pipe, &client->pipe_list,
+		link) {
+		rc = ipa_eth_client_connect_pipe(pipe);
+		if (rc) {
+			IPA_ETH_ERR("pipe connect fails\n");
+			ipa_assert();
+		}
+	}
+	if (!ipa_eth_ctx->client[client_type][inst_id].existed) {
+		ipa3_eth_debugfs_add_node(client);
+		ipa_eth_ctx->client[client_type][inst_id].existed = true;
+	}
+	mutex_unlock(&ipa_eth_ctx->lock);
+	return 0;
+}
+
+static int ipa_eth_client_disconn_pipes_internal(struct ipa_eth_client *client)
+{
+	int rc;
+	struct ipa_eth_client_pipe_info *pipe;
+
+	/* validate user input */
+	if (!client) {
+		IPA_ETH_ERR("null client");
+		return -EFAULT;
+	}
+
+	if (!ipa_eth_ctx) {
+		IPA_ETH_ERR("disconn called before register readiness\n");
+		return -EFAULT;
+	}
+
+	if (!ipa_eth_ctx->is_eth_ready) {
+		IPA_ETH_ERR("disconn called before IPA eth ready\n");
+		return -EFAULT;
+	}
+	IPA_ETH_DBG("ipa_eth disconn client %d inst %d, traffic %d\n",
+		client->client_type, client->inst_id,
+		client->traffic_type);
+	mutex_lock(&ipa_eth_ctx->lock);
+	list_for_each_entry(pipe, &client->pipe_list,
+		link) {
+		rc = ipa_eth_client_disconnect_pipe(pipe);
+		if (rc) {
+			IPA_ETH_ERR("pipe connect fails\n");
+			ipa_assert();
+		}
+	}
+	if (ipa_eth_pm_deregister(client)) {
+		IPA_ETH_ERR("pm deregister failed\n");
+		mutex_unlock(&ipa_eth_ctx->lock);
+		return -EFAULT;
+	}
+	mutex_unlock(&ipa_eth_ctx->lock);
+	return 0;
+}
+
+static int ipa_eth_client_reg_intf_internal(struct ipa_eth_intf_info *intf)
+{
+	struct ipa_eth_intf *new_intf;
+	struct ipa_eth_intf *entry;
+	struct ipa_ioc_add_hdr *hdr;
+	struct ipa_tx_intf tx;
+	struct ipa_rx_intf rx;
+	enum ipa_client_type tx_client[IPA_CLIENT_MAX];
+	enum ipa_client_type rx_client[IPA_CLIENT_MAX];
+	struct ipa_ioc_tx_intf_prop *tx_prop =  NULL;
+	struct ipa_ioc_rx_intf_prop *rx_prop = NULL;
+	struct ipa_eth_client_pipe_info *pipe;
+	u32 len;
+	int ret = 0, i;
+
+	if (intf == NULL) {
+		IPA_ETH_ERR("invalid params intf=%pK\n", intf);
+		return -EINVAL;
+	}
+	if (!ipa_eth_ctx) {
+		IPA_ETH_ERR("disconn called before register readiness\n");
+		return -EFAULT;
+	}
+	IPA_ETH_DBG("register interface for netdev %s\n",
+		intf->netdev_name);
+	mutex_lock(&ipa_eth_ctx->lock);
+	list_for_each_entry(entry, &ipa_eth_ctx->head_intf_list, link)
+		if (strcmp(entry->netdev_name, intf->netdev_name) == 0) {
+			IPA_ETH_DBG("intf was added before.\n");
+			mutex_unlock(&ipa_eth_ctx->lock);
+			return 0;
+		}
+	new_intf = kzalloc(sizeof(*new_intf), GFP_KERNEL);
+	if (new_intf == NULL) {
+		IPA_ETH_ERR("fail to alloc new intf\n");
+		mutex_unlock(&ipa_eth_ctx->lock);
+		return -ENOMEM;
+	}
+	INIT_LIST_HEAD(&new_intf->link);
+	strlcpy(new_intf->netdev_name, intf->netdev_name,
+		sizeof(new_intf->netdev_name));
+	new_intf->hdr_len = intf->hdr[0].hdr_len;
+	/* add partial header */
+	len = sizeof(struct ipa_ioc_add_hdr) + 2 * sizeof(struct ipa_hdr_add);
+	hdr = kzalloc(len, GFP_KERNEL);
+	if (hdr == NULL) {
+		IPA_ETH_ERR("fail to alloc %d bytes\n", len);
+		ret = -EFAULT;
+		goto fail_alloc_hdr;
+	}
+
+	if (ipa_eth_commit_partial_hdr(hdr,
+		intf->netdev_name, intf->hdr)) {
+		IPA_ETH_ERR("fail to commit partial headers\n");
+		ret = -EFAULT;
+		goto fail_commit_hdr;
+	}
+
+	new_intf->partial_hdr_hdl[IPA_IP_v4] = hdr->hdr[IPA_IP_v4].hdr_hdl;
+	new_intf->partial_hdr_hdl[IPA_IP_v6] = hdr->hdr[IPA_IP_v6].hdr_hdl;
+	IPA_ETH_DBG("IPv4 hdr hdl: %d IPv6 hdr hdl: %d\n",
+		hdr->hdr[IPA_IP_v4].hdr_hdl, hdr->hdr[IPA_IP_v6].hdr_hdl);
+
+	memset(&tx, 0, sizeof(struct ipa_tx_intf));
+	memset(&rx, 0, sizeof(struct ipa_rx_intf));
+	for (i = 0; i < intf->pipe_hdl_list_size; i++) {
+		pipe = ipa_eth_get_pipe_from_hdl(intf->pipe_hdl_list[i]);
+		if (pipe->dir == IPA_ETH_PIPE_DIR_TX) {
+			tx_client[tx.num_props] =
+				ipa_eth_get_ipa_client_type_from_pipe(pipe);
+			tx.num_props++;
+		} else {
+			rx_client[rx.num_props] =
+				ipa_eth_get_ipa_client_type_from_pipe(pipe);
+			rx.num_props++;
+		}
+	}
+	/* populate tx prop */
+	if (tx.num_props) {
+		tx_prop = kmalloc(
+			sizeof(*tx_prop) * tx.num_props *
+			IPA_IP_MAX, GFP_KERNEL);
+		if (!tx_prop) {
+			IPAERR("failed to allocate memory\n");
+			ret = -ENOMEM;
+			goto fail_commit_hdr;
+		}
+		memset(tx_prop, 0, sizeof(*tx_prop) *
+			tx.num_props * IPA_IP_MAX);
+		tx.prop = tx_prop;
+		for (i = 0; i < tx.num_props; i++) {
+			tx_prop[i].ip = IPA_IP_v4;
+			tx_prop[i].dst_pipe = tx_client[i];
+			tx_prop[i].hdr_l2_type = intf->hdr[0].hdr_type;
+			strlcpy(tx_prop[i].hdr_name, hdr->hdr[IPA_IP_v4].name,
+				sizeof(tx_prop[i].hdr_name));
+
+			tx_prop[i+1].ip = IPA_IP_v6;
+			tx_prop[i+1].dst_pipe = tx_client[i];
+			tx_prop[i+1].hdr_l2_type = intf->hdr[1].hdr_type;
+			strlcpy(tx_prop[i+1].hdr_name, hdr->hdr[IPA_IP_v6].name,
+				sizeof(tx_prop[i+1].hdr_name));
+		}
+	}
+	/* populate rx prop */
+	if (rx.num_props) {
+		rx_prop = kmalloc(
+			sizeof(*rx_prop) * rx.num_props *
+			IPA_IP_MAX, GFP_KERNEL);
+		if (!rx_prop) {
+			IPAERR("failed to allocate memory\n");
+			ret = -ENOMEM;
+			goto fail_commit_hdr;
+		}
+		memset(rx_prop, 0, sizeof(*rx_prop) *
+			rx.num_props * IPA_IP_MAX);
+		rx.prop = rx_prop;
+		for (i = 0; i < rx.num_props; i++) {
+			rx_prop[i].ip = IPA_IP_v4;
+			rx_prop[i].src_pipe = rx_client[i];
+			rx_prop[i].hdr_l2_type = intf->hdr[0].hdr_type;
+
+			rx_prop[i+1].ip = IPA_IP_v6;
+			rx_prop[i+1].src_pipe = rx_client[i];
+			rx_prop[i+1].hdr_l2_type = intf->hdr[1].hdr_type;
+		}
+		tx.num_props *= IPA_IP_MAX;
+		rx.num_props *= IPA_IP_MAX;
+	}
+	if (ipa_register_intf(intf->netdev_name, &tx, &rx)) {
+		IPA_ETH_ERR("fail to add interface prop\n");
+		ret = -EFAULT;
+		goto fail_commit_hdr;
+	}
+
+	list_add(&new_intf->link, &ipa_eth_ctx->head_intf_list);
+
+	kfree(hdr);
+	kfree(tx_prop);
+	kfree(rx_prop);
+	mutex_unlock(&ipa_eth_ctx->lock);
+	return 0;
+fail_commit_hdr:
+	kfree(hdr);
+	kfree(tx_prop);
+	kfree(rx_prop);
+fail_alloc_hdr:
+	kfree(new_intf);
+	mutex_unlock(&ipa_eth_ctx->lock);
+	return ret;
+}
+
+static int ipa_eth_client_unreg_intf_internal(struct ipa_eth_intf_info *intf)
+{
+	int len, ret = 0;
+	struct ipa_ioc_del_hdr *hdr = NULL;
+	struct ipa_eth_intf *entry;
+	struct ipa_eth_intf *next;
+
+	if (intf == NULL) {
+		IPA_ETH_ERR("invalid params intf=%pK\n", intf);
+		return -EINVAL;
+	}
+	if (!ipa_eth_ctx) {
+		IPA_ETH_ERR("disconn called before register readiness\n");
+		return -EFAULT;
+	}
+	IPA_ETH_DBG("unregister interface for netdev %s\n",
+		intf->netdev_name);
+	mutex_lock(&ipa_eth_ctx->lock);
+	list_for_each_entry_safe(entry, next, &ipa_eth_ctx->head_intf_list,
+		link)
+		if (strcmp(entry->netdev_name, intf->netdev_name) == 0) {
+			len = sizeof(struct ipa_ioc_del_hdr) +
+				IPA_IP_MAX * sizeof(struct ipa_hdr_del);
+			hdr = kzalloc(len, GFP_KERNEL);
+			if (hdr == NULL) {
+				IPA_ETH_ERR("fail to alloc %d bytes\n", len);
+				mutex_unlock(&ipa_eth_ctx->lock);
+				return -ENOMEM;
+			}
+
+			hdr->commit = 1;
+			hdr->num_hdls = 2;
+			hdr->hdl[0].hdl = entry->partial_hdr_hdl[0];
+			hdr->hdl[1].hdl = entry->partial_hdr_hdl[1];
+			IPA_ETH_DBG("IPv4 hdr hdl: %d IPv6 hdr hdl: %d\n",
+				hdr->hdl[0].hdl, hdr->hdl[1].hdl);
+
+			if (ipa_del_hdr(hdr)) {
+				IPA_ETH_ERR("fail to delete partial header\n");
+				ret = -EFAULT;
+				goto fail;
+			}
+
+			if (ipa_deregister_intf(entry->netdev_name)) {
+				IPA_ETH_ERR("fail to del interface props\n");
+				ret = -EFAULT;
+				goto fail;
+			}
+
+			list_del(&entry->link);
+			kfree(entry);
+
+			break;
+		}
+fail:
+	kfree(hdr);
+	mutex_unlock(&ipa_eth_ctx->lock);
+	return ret;
+
+}
+
+static int ipa_eth_client_set_perf_profile_internal(struct ipa_eth_client *client,
+	struct ipa_eth_perf_profile *profile)
+{
+	int client_type, inst_id;
+
+	client_type = client->client_type;
+	inst_id = client->inst_id;
+	if (profile == NULL) {
+		IPA_ETH_ERR("Invalid input\n");
+		return -EINVAL;
+	}
+
+	if (ipa_pm_set_throughput(
+		ipa_eth_ctx->client[client_type][inst_id].pm_hdl,
+		profile->max_supported_bw_mbps)) {
+		IPA_ETH_ERR("fail to set pm throughput\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static void ipa_eth_msg_free_cb(void *buff, u32 len, u32 type)
+{
+	kfree(buff);
+}
+
+static int ipa_eth_client_conn_evt_internal(struct ipa_ecm_msg *msg)
+{
+	struct ipa_msg_meta msg_meta;
+	struct ipa_ecm_msg *eth_msg;
+	int ret;
+
+	IPADBG("enter\n");
+
+	eth_msg = kzalloc(sizeof(*eth_msg), GFP_KERNEL);
+	if (eth_msg == NULL)
+		return -ENOMEM;
+	memcpy(eth_msg, msg, sizeof(struct ipa_ecm_msg));
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_len = sizeof(struct ipa_ecm_msg);
+	msg_meta.msg_type = IPA_PERIPHERAL_CONNECT;
+
+	IPADBG("send IPA_PERIPHERAL_CONNECT, len:%d, buff %pK",
+		msg_meta.msg_len, eth_msg);
+	ret = ipa_send_msg(&msg_meta, eth_msg, ipa_eth_msg_free_cb);
+
+	IPADBG("exit\n");
+
+	return ret;
+}
+
+static int ipa_eth_client_disconn_evt_internal(struct ipa_ecm_msg *msg)
+{
+	struct ipa_msg_meta msg_meta;
+	struct ipa_ecm_msg *eth_msg;
+	int ret;
+
+	IPADBG("enter\n");
+
+	eth_msg = kzalloc(sizeof(*eth_msg), GFP_KERNEL);
+	if (eth_msg == NULL)
+		return -ENOMEM;
+	memcpy(eth_msg, msg, sizeof(struct ipa_ecm_msg));
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_len = sizeof(struct ipa_ecm_msg);
+	msg_meta.msg_type = IPA_PERIPHERAL_DISCONNECT;
+
+	IPADBG("send PERIPHERAL_DISCONNECT, len:%d, buff %pK",
+		msg_meta.msg_len, eth_msg);
+	ret = ipa_send_msg(&msg_meta, eth_msg, ipa_eth_msg_free_cb);
+
+	IPADBG("exit\n");
+
+	return ret;
+}
+
+void ipa_eth_register(void)
+{
+	struct ipa_eth_data funcs;
+
+	funcs.ipa_eth_register_ready_cb = ipa_eth_register_ready_cb_internal;
+	funcs.ipa_eth_unregister_ready_cb =
+		ipa_eth_unregister_ready_cb_internal;
+	funcs.ipa_eth_client_conn_pipes = ipa_eth_client_conn_pipes_internal;
+	funcs.ipa_eth_client_disconn_pipes =
+		ipa_eth_client_disconn_pipes_internal;
+	funcs.ipa_eth_client_reg_intf = ipa_eth_client_reg_intf_internal;
+	funcs.ipa_eth_client_unreg_intf = ipa_eth_client_unreg_intf_internal;
+	funcs.ipa_eth_client_set_perf_profile =
+		ipa_eth_client_set_perf_profile_internal;
+	funcs.ipa_eth_client_conn_evt = ipa_eth_client_conn_evt_internal;
+	funcs.ipa_eth_client_disconn_evt = ipa_eth_client_disconn_evt_internal;
+
+	if (ipa_fmwk_register_ipa_eth(&funcs))
+		pr_err("failed to register ipa_eth APIs\n");
+}

+ 34 - 1
drivers/platform/msm/ipa/ipa_common_i.h

@@ -13,6 +13,7 @@
 #include <linux/ipa_uc_offload.h>
 #include <linux/ipa_wdi3.h>
 #include <linux/ipa_wigig.h>
+#include <linux/ipa_eth.h>
 #include <linux/ratelimit.h>
 #include "gsi.h"
 
@@ -420,7 +421,11 @@ struct IpaHwOffloadStatsAllocCmdData_t {
  * @ch_num: number of ch supported for given protocol
  */
 struct ipa_uc_dbg_ring_stats {
-	struct IpaHwRingStats_t ring[IPA_MAX_CH_STATS_SUPPORTED];
+	union {
+		struct IpaHwRingStats_t ring[IPA_MAX_CH_STATS_SUPPORTED];
+		struct ipa_uc_dbg_rtk_ring_stats
+			rtk[IPA_MAX_CH_STATS_SUPPORTED];
+	} u;
 	u8 num_ch;
 };
 
@@ -772,4 +777,32 @@ int ipa3_get_smmu_params(struct ipa_smmu_in_params *in,
 */
 int ipa3_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs);
 
+int ipa_eth_rtk_connect(
+	struct ipa_eth_client_pipe_info *pipe,
+	enum ipa_client_type client_type);
+
+int ipa_eth_aqc_connect(
+	struct ipa_eth_client_pipe_info *pipe,
+	enum ipa_client_type client_type);
+
+int ipa_eth_emac_connect(
+	struct ipa_eth_client_pipe_info *pipe,
+	enum ipa_client_type client_type);
+
+int ipa_eth_rtk_disconnect(
+	struct ipa_eth_client_pipe_info *pipe,
+	enum ipa_client_type client_type);
+
+int ipa_eth_aqc_disconnect(
+	struct ipa_eth_client_pipe_info *pipe,
+	enum ipa_client_type client_type);
+
+int ipa_eth_emac_disconnect(
+	struct ipa_eth_client_pipe_info *pipe,
+	enum ipa_client_type client_type);
+
+int ipa_eth_client_conn_evt(struct ipa_ecm_msg *msg);
+
+int ipa_eth_client_disconn_evt(struct ipa_ecm_msg *msg);
+
 #endif /* _IPA_COMMON_I_H_ */

+ 75 - 10
drivers/platform/msm/ipa/ipa_v3/ipa_client.c

@@ -762,23 +762,23 @@ int ipa3_get_usb_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
 	}
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 	for (i = 0; i < MAX_USB_CHANNELS; i++) {
-		stats->ring[i].ringFull = ioread32(
+		stats->u.ring[i].ringFull = ioread32(
 			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
-		stats->ring[i].ringEmpty = ioread32(
+		stats->u.ring[i].ringEmpty = ioread32(
 			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
-		stats->ring[i].ringUsageHigh = ioread32(
+		stats->u.ring[i].ringUsageHigh = ioread32(
 			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
-		stats->ring[i].ringUsageLow = ioread32(
+		stats->u.ring[i].ringUsageLow = ioread32(
 			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
-		stats->ring[i].RingUtilCount = ioread32(
+		stats->u.ring[i].RingUtilCount = ioread32(
 			ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
@@ -1939,23 +1939,23 @@ int ipa3_get_aqc_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
 	}
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 	for (i = 0; i < MAX_AQC_CHANNELS; i++) {
-		stats->ring[i].ringFull = ioread32(
+		stats->u.ring[i].ringFull = ioread32(
 			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
-		stats->ring[i].ringEmpty = ioread32(
+		stats->u.ring[i].ringEmpty = ioread32(
 			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
-		stats->ring[i].ringUsageHigh = ioread32(
+		stats->u.ring[i].ringUsageHigh = ioread32(
 			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
-		stats->ring[i].ringUsageLow = ioread32(
+		stats->u.ring[i].ringUsageLow = ioread32(
 			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
-		stats->ring[i].RingUtilCount = ioread32(
+		stats->u.ring[i].RingUtilCount = ioread32(
 			ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
@@ -1963,5 +1963,70 @@ int ipa3_get_aqc_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 
 
+	return 0;
+}
+/**
+ * ipa3_get_rtk_gsi_stats() - Query RTK gsi stats from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_rtk_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
+{
+	int i;
+	u64 low, high;
+
+	if (!ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio) {
+		IPAERR("bad parms NULL eth_gsi_stats_mmio\n");
+		return -EINVAL;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	for (i = 0; i < MAX_RTK_CHANNELS; i++) {
+		stats->u.rtk[i].commStats.ringFull = ioread32(
+			ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
+			IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
+		stats->u.rtk[i].commStats.ringEmpty = ioread32(
+			ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
+			IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
+		stats->u.rtk[i].commStats.ringUsageHigh = ioread32(
+			ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
+		stats->u.rtk[i].commStats.ringUsageLow = ioread32(
+			ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
+		stats->u.rtk[i].commStats.RingUtilCount = ioread32(
+			ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
+			IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
+		stats->u.rtk[i].trCount = ioread32(
+			ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
+			IPA3_UC_DEBUG_STATS_TRCOUNT_OFF);
+		stats->u.rtk[i].erCount = ioread32(
+			ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
+			IPA3_UC_DEBUG_STATS_ERCOUNT_OFF);
+		stats->u.rtk[i].totalAosCount = ioread32(
+			ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
+			IPA3_UC_DEBUG_STATS_AOSCOUNT_OFF);
+		low = ioread32(ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
+			IPA3_UC_DEBUG_STATS_BUSYTIME_OFF);
+		high = ioread32(ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+			+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
+			IPA3_UC_DEBUG_STATS_BUSYTIME_OFF + sizeof(u32));
+		stats->u.rtk[i].busyTime = low | (high << 32);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+
 	return 0;
 }

+ 404 - 61
drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c

@@ -34,6 +34,14 @@ struct ipa3_debugfs_file {
 	const struct file_operations fops;
 };
 
+static const char * const ipa_eth_clients_strings[] = {
+	__stringify(AQC107),
+	__stringify(AQC113),
+	__stringify(RTK8111K),
+	__stringify(RTK8125B),
+	__stringify(NTN),
+	__stringify(EMAC),
+};
 
 const char *ipa3_event_name[IPA_EVENT_MAX_NUM] = {
 	__stringify(WLAN_CLIENT_CONNECT),
@@ -102,6 +110,7 @@ const char *ipa3_hdr_proc_type_name[] = {
 };
 
 static struct dentry *dent;
+static struct dentry *dent_eth;
 static char dbg_buff[IPA_MAX_MSG_LEN + 1];
 static char *active_clients_buf;
 
@@ -2327,11 +2336,11 @@ static ssize_t ipa3_read_wdi_gsi_stats(struct file *file,
 			"TX ringUsageHigh=%u\n"
 			"TX ringUsageLow=%u\n"
 			"TX RingUtilCount=%u\n",
-			stats.ring[1].ringFull,
-			stats.ring[1].ringEmpty,
-			stats.ring[1].ringUsageHigh,
-			stats.ring[1].ringUsageLow,
-			stats.ring[1].RingUtilCount);
+			stats.u.ring[1].ringFull,
+			stats.u.ring[1].ringEmpty,
+			stats.u.ring[1].ringUsageHigh,
+			stats.u.ring[1].ringUsageLow,
+			stats.u.ring[1].RingUtilCount);
 		cnt += nbytes;
 		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
 			"RX ringFull=%u\n"
@@ -2339,11 +2348,11 @@ static ssize_t ipa3_read_wdi_gsi_stats(struct file *file,
 			"RX ringUsageHigh=%u\n"
 			"RX ringUsageLow=%u\n"
 			"RX RingUtilCount=%u\n",
-			stats.ring[0].ringFull,
-			stats.ring[0].ringEmpty,
-			stats.ring[0].ringUsageHigh,
-			stats.ring[0].ringUsageLow,
-			stats.ring[0].RingUtilCount);
+			stats.u.ring[0].ringFull,
+			stats.u.ring[0].ringEmpty,
+			stats.u.ring[0].ringUsageHigh,
+			stats.u.ring[0].ringUsageLow,
+			stats.u.ring[0].RingUtilCount);
 		cnt += nbytes;
 	} else {
 		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
@@ -2374,11 +2383,11 @@ static ssize_t ipa3_read_wdi3_gsi_stats(struct file *file,
 			"TX ringUsageHigh=%u\n"
 			"TX ringUsageLow=%u\n"
 			"TX RingUtilCount=%u\n",
-			stats.ring[1].ringFull,
-			stats.ring[1].ringEmpty,
-			stats.ring[1].ringUsageHigh,
-			stats.ring[1].ringUsageLow,
-			stats.ring[1].RingUtilCount);
+			stats.u.ring[1].ringFull,
+			stats.u.ring[1].ringEmpty,
+			stats.u.ring[1].ringUsageHigh,
+			stats.u.ring[1].ringUsageLow,
+			stats.u.ring[1].RingUtilCount);
 		cnt += nbytes;
 		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
 			"RX ringFull=%u\n"
@@ -2386,11 +2395,11 @@ static ssize_t ipa3_read_wdi3_gsi_stats(struct file *file,
 			"RX ringUsageHigh=%u\n"
 			"RX ringUsageLow=%u\n"
 			"RX RingUtilCount=%u\n",
-			stats.ring[0].ringFull,
-			stats.ring[0].ringEmpty,
-			stats.ring[0].ringUsageHigh,
-			stats.ring[0].ringUsageLow,
-			stats.ring[0].RingUtilCount);
+			stats.u.ring[0].ringFull,
+			stats.u.ring[0].ringEmpty,
+			stats.u.ring[0].ringUsageHigh,
+			stats.u.ring[0].ringUsageLow,
+			stats.u.ring[0].RingUtilCount);
 		cnt += nbytes;
 	} else {
 		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
@@ -2439,11 +2448,11 @@ static ssize_t ipa3_read_aqc_gsi_stats(struct file *file,
 			"TX ringUsageHigh=%u\n"
 			"TX ringUsageLow=%u\n"
 			"TX RingUtilCount=%u\n",
-			stats.ring[1].ringFull,
-			stats.ring[1].ringEmpty,
-			stats.ring[1].ringUsageHigh,
-			stats.ring[1].ringUsageLow,
-			stats.ring[1].RingUtilCount);
+			stats.u.ring[1].ringFull,
+			stats.u.ring[1].ringEmpty,
+			stats.u.ring[1].ringUsageHigh,
+			stats.u.ring[1].ringUsageLow,
+			stats.u.ring[1].RingUtilCount);
 		cnt += nbytes;
 		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
 			"RX ringFull=%u\n"
@@ -2451,11 +2460,11 @@ static ssize_t ipa3_read_aqc_gsi_stats(struct file *file,
 			"RX ringUsageHigh=%u\n"
 			"RX ringUsageLow=%u\n"
 			"RX RingUtilCount=%u\n",
-			stats.ring[0].ringFull,
-			stats.ring[0].ringEmpty,
-			stats.ring[0].ringUsageHigh,
-			stats.ring[0].ringUsageLow,
-			stats.ring[0].RingUtilCount);
+			stats.u.ring[0].ringFull,
+			stats.u.ring[0].ringEmpty,
+			stats.u.ring[0].ringUsageHigh,
+			stats.u.ring[0].ringUsageLow,
+			stats.u.ring[0].RingUtilCount);
 		cnt += nbytes;
 	} else {
 		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
@@ -2486,11 +2495,11 @@ static ssize_t ipa3_read_mhip_gsi_stats(struct file *file,
 			"IPA_CLIENT_MHI_PRIME_TETH_CONS ringUsageHigh=%u\n"
 			"IPA_CLIENT_MHI_PRIME_TETH_CONS ringUsageLow=%u\n"
 			"IPA_CLIENT_MHI_PRIME_TETH_CONS RingUtilCount=%u\n",
-			stats.ring[1].ringFull,
-			stats.ring[1].ringEmpty,
-			stats.ring[1].ringUsageHigh,
-			stats.ring[1].ringUsageLow,
-			stats.ring[1].RingUtilCount);
+			stats.u.ring[1].ringFull,
+			stats.u.ring[1].ringEmpty,
+			stats.u.ring[1].ringUsageHigh,
+			stats.u.ring[1].ringUsageLow,
+			stats.u.ring[1].RingUtilCount);
 		cnt += nbytes;
 		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
 			"IPA_CLIENT_MHI_PRIME_TETH_PROD ringFull=%u\n"
@@ -2498,11 +2507,11 @@ static ssize_t ipa3_read_mhip_gsi_stats(struct file *file,
 			"IPA_CLIENT_MHI_PRIME_TETH_PROD ringUsageHigh=%u\n"
 			"IPA_CLIENT_MHI_PRIME_TETH_PROD ringUsageLow=%u\n"
 			"IPA_CLIENT_MHI_PRIME_TETH_PROD RingUtilCount=%u\n",
-			stats.ring[0].ringFull,
-			stats.ring[0].ringEmpty,
-			stats.ring[0].ringUsageHigh,
-			stats.ring[0].ringUsageLow,
-			stats.ring[0].RingUtilCount);
+			stats.u.ring[0].ringFull,
+			stats.u.ring[0].ringEmpty,
+			stats.u.ring[0].ringUsageHigh,
+			stats.u.ring[0].ringUsageLow,
+			stats.u.ring[0].RingUtilCount);
 		cnt += nbytes;
 		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
 			"IPA_CLIENT_MHI_PRIME_RMNET_CONS ringFull=%u\n"
@@ -2510,11 +2519,11 @@ static ssize_t ipa3_read_mhip_gsi_stats(struct file *file,
 			"IPA_CLIENT_MHI_PRIME_RMNET_CONS ringUsageHigh=%u\n"
 			"IPA_CLIENT_MHI_PRIME_RMNET_CONS ringUsageLow=%u\n"
 			"IPA_CLIENT_MHI_PRIME_RMNET_CONS RingUtilCount=%u\n",
-			stats.ring[3].ringFull,
-			stats.ring[3].ringEmpty,
-			stats.ring[3].ringUsageHigh,
-			stats.ring[3].ringUsageLow,
-			stats.ring[3].RingUtilCount);
+			stats.u.ring[3].ringFull,
+			stats.u.ring[3].ringEmpty,
+			stats.u.ring[3].ringUsageHigh,
+			stats.u.ring[3].ringUsageLow,
+			stats.u.ring[3].RingUtilCount);
 		cnt += nbytes;
 		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
 			"IPA_CLIENT_MHI_PRIME_RMNET_PROD ringFull=%u\n"
@@ -2522,11 +2531,11 @@ static ssize_t ipa3_read_mhip_gsi_stats(struct file *file,
 			"IPA_CLIENT_MHI_PRIME_RMNET_PROD ringUsageHigh=%u\n"
 			"IPA_CLIENT_MHI_PRIME_RMNET_PROD ringUsageLow=%u\n"
 			"IPA_CLIENT_MHI_PRIME_RMNET_PROD RingUtilCount=%u\n",
-			stats.ring[2].ringFull,
-			stats.ring[2].ringEmpty,
-			stats.ring[2].ringUsageHigh,
-			stats.ring[2].ringUsageLow,
-			stats.ring[2].RingUtilCount);
+			stats.u.ring[2].ringFull,
+			stats.u.ring[2].ringEmpty,
+			stats.u.ring[2].ringUsageHigh,
+			stats.u.ring[2].ringUsageLow,
+			stats.u.ring[2].RingUtilCount);
 		cnt += nbytes;
 	} else {
 		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
@@ -2558,11 +2567,11 @@ static ssize_t ipa3_read_usb_gsi_stats(struct file *file,
 			"TX ringUsageHigh=%u\n"
 			"TX ringUsageLow=%u\n"
 			"TX RingUtilCount=%u\n",
-			stats.ring[1].ringFull,
-			stats.ring[1].ringEmpty,
-			stats.ring[1].ringUsageHigh,
-			stats.ring[1].ringUsageLow,
-			stats.ring[1].RingUtilCount);
+			stats.u.ring[1].ringFull,
+			stats.u.ring[1].ringEmpty,
+			stats.u.ring[1].ringUsageHigh,
+			stats.u.ring[1].ringUsageLow,
+			stats.u.ring[1].RingUtilCount);
 		cnt += nbytes;
 		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
 			"RX ringFull=%u\n"
@@ -2570,11 +2579,11 @@ static ssize_t ipa3_read_usb_gsi_stats(struct file *file,
 			"RX ringUsageHigh=%u\n"
 			"RX ringUsageLow=%u\n"
 			"RX RingUtilCount=%u\n",
-			stats.ring[0].ringFull,
-			stats.ring[0].ringEmpty,
-			stats.ring[0].ringUsageHigh,
-			stats.ring[0].ringUsageLow,
-			stats.ring[0].RingUtilCount);
+			stats.u.ring[0].ringFull,
+			stats.u.ring[0].ringEmpty,
+			stats.u.ring[0].ringUsageHigh,
+			stats.u.ring[0].ringUsageLow,
+			stats.u.ring[0].RingUtilCount);
 		cnt += nbytes;
 	} else {
 		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
@@ -2905,7 +2914,7 @@ void ipa3_debugfs_init(void)
 	size_t i;
 	struct dentry *file;
 
-	dent = debugfs_create_dir("ipa", 0);
+	dent = debugfs_create_dir("ipa", NULL);
 	if (IS_ERR(dent)) {
 		IPAERR("fail to create folder in debug_fs.\n");
 		return;
@@ -3011,6 +3020,338 @@ struct dentry *ipa_debugfs_get_root(void)
 }
 EXPORT_SYMBOL(ipa_debugfs_get_root);
 
+static ssize_t ipa3_eth_read_status(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int cnt = 0;
+	int i, j, k, type;
+	struct ipa3_eth_info eth_info;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"This feature only support on IPA4.5+\n");
+		cnt += nbytes;
+		goto done;
+	}
+
+	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"%15s|%10s|%10s|%30s|%10s|%10s\n", "protocol",
+			"instance", "pipe_hdl", "pipe_enum",
+			"pipe_id", "ch_id");
+	cnt += nbytes;
+	for (i = 0; i < IPA_ETH_CLIENT_MAX; i++) {
+		for (j = 0; j < IPA_ETH_INST_ID_MAX; j++) {
+			eth_info = ipa3_ctx->eth_info[i][j];
+			for (k = 0; k < eth_info.num_ch; k++) {
+				if (eth_info.map[j].valid) {
+					type = eth_info.map[k].type;
+					nbytes = scnprintf(dbg_buff + cnt,
+						IPA_MAX_MSG_LEN - cnt,
+						"%15s|%10d|%10d|%30s|%10d|%10d\n",
+						ipa_eth_clients_strings[i],
+						j,
+						eth_info.map[k].pipe_hdl,
+						ipa_clients_strings[type],
+						eth_info.map[k].pipe_id,
+						eth_info.map[k].ch_id);
+					cnt += nbytes;
+				}
+			}
+		}
+	}
+done:
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static const struct file_operations fops_ipa_eth_status = {
+	.read = ipa3_eth_read_status,
+};
+
+void ipa3_eth_debugfs_init(void)
+{
+	struct dentry *file;
+
+	if (IS_ERR_OR_NULL(dent)) {
+		IPAERR("debugs root not created\n");
+		return;
+	}
+	dent_eth = debugfs_create_dir("eth", dent);
+	if (IS_ERR(dent)) {
+		IPAERR("fail to create folder in debug_fs.\n");
+		return;
+	}
+	file = debugfs_create_file("status", IPA_READ_ONLY_MODE,
+		dent_eth, NULL, &fops_ipa_eth_status);
+	if (!file) {
+		IPAERR("could not create status\n");
+		goto fail;
+	}
+	return;
+
+fail:
+	debugfs_remove_recursive(dent_eth);
+}
+
+static ssize_t ipa3_eth_read_perf_status(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int cnt = 0;
+	struct ipa_eth_client *client;
+	struct ipa_uc_dbg_ring_stats stats;
+	int tx_ep, rx_ep;
+	int ret;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5
+		&& (ipa3_ctx->ipa_hw_type != IPA_HW_v4_1
+		|| ipa3_ctx->platform_type != IPA_PLAT_TYPE_APQ)) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"This feature only support on IPA4.5+\n");
+		cnt += nbytes;
+		goto done;
+	}
+	client = (struct ipa_eth_client *)file->private_data;
+	switch (client->client_type) {
+	case IPA_ETH_CLIENT_AQC107:
+	case IPA_ETH_CLIENT_AQC113:
+		ret = ipa3_get_aqc_gsi_stats(&stats);
+		tx_ep = IPA_CLIENT_AQC_ETHERNET_CONS;
+		rx_ep = IPA_CLIENT_AQC_ETHERNET_PROD;
+		if (!ret) {
+			nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"%s_ringFull=%u\n"
+			"%s_ringEmpty=%u\n"
+			"%s_ringUsageHigh=%u\n"
+			"%s_ringUsageLow=%u\n"
+			"%s_RingUtilCount=%u\n",
+			ipa_clients_strings[tx_ep],
+			stats.u.ring[1].ringFull,
+			ipa_clients_strings[tx_ep],
+			stats.u.ring[1].ringEmpty,
+			ipa_clients_strings[tx_ep],
+			stats.u.ring[1].ringUsageHigh,
+			ipa_clients_strings[tx_ep],
+			stats.u.ring[1].ringUsageLow,
+			ipa_clients_strings[tx_ep],
+			stats.u.ring[1].RingUtilCount);
+			cnt += nbytes;
+			nbytes = scnprintf(dbg_buff + cnt,
+			IPA_MAX_MSG_LEN - cnt,
+			"%s_ringFull=%u\n"
+			"%s_ringEmpty=%u\n"
+			"%s_ringUsageHigh=%u\n"
+			"%s_ringUsageLow=%u\n"
+			"%s_RingUtilCount=%u\n",
+			ipa_clients_strings[rx_ep],
+			stats.u.ring[0].ringFull,
+			ipa_clients_strings[rx_ep],
+			stats.u.ring[0].ringEmpty,
+			ipa_clients_strings[rx_ep],
+			stats.u.ring[0].ringUsageHigh,
+			ipa_clients_strings[rx_ep],
+			stats.u.ring[0].ringUsageLow,
+			ipa_clients_strings[rx_ep],
+			stats.u.ring[0].RingUtilCount);
+			cnt += nbytes;
+		} else {
+			nbytes = scnprintf(dbg_buff,
+				IPA_MAX_MSG_LEN,
+				"Fail to read AQC GSI stats\n");
+			cnt += nbytes;
+		}
+		break;
+	case IPA_ETH_CLIENT_RTK8111K:
+	case IPA_ETH_CLIENT_RTK8125B:
+		ret = ipa3_get_rtk_gsi_stats(&stats);
+		tx_ep = IPA_CLIENT_RTK_ETHERNET_CONS;
+		rx_ep = IPA_CLIENT_RTK_ETHERNET_PROD;
+		if (!ret) {
+			nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"%s_ringFull=%u\n"
+			"%s_ringEmpty=%u\n"
+			"%s_ringUsageHigh=%u\n"
+			"%s_ringUsageLow=%u\n"
+			"%s_RingUtilCount=%u\n"
+			"%s_trCount=%u\n"
+			"%s_erCound=%u\n"
+			"%s_totalAoSCount=%u\n"
+			"%s_busytime=%llu\n",
+			ipa_clients_strings[tx_ep],
+			stats.u.rtk[1].commStats.ringFull,
+			ipa_clients_strings[tx_ep],
+			stats.u.rtk[1].commStats.ringEmpty,
+			ipa_clients_strings[tx_ep],
+			stats.u.rtk[1].commStats.ringUsageHigh,
+			ipa_clients_strings[tx_ep],
+			stats.u.rtk[1].commStats.ringUsageLow,
+			ipa_clients_strings[tx_ep],
+			stats.u.rtk[1].commStats.RingUtilCount,
+			ipa_clients_strings[tx_ep],
+			stats.u.rtk[1].trCount,
+			ipa_clients_strings[tx_ep],
+			stats.u.rtk[1].erCount,
+			ipa_clients_strings[tx_ep],
+			stats.u.rtk[1].totalAosCount,
+			ipa_clients_strings[tx_ep],
+			stats.u.rtk[1].busyTime);
+			cnt += nbytes;
+			nbytes = scnprintf(dbg_buff + cnt,
+			IPA_MAX_MSG_LEN - cnt,
+			"%s_ringFull=%u\n"
+			"%s_ringEmpty=%u\n"
+			"%s_ringUsageHigh=%u\n"
+			"%s_ringUsageLow=%u\n"
+			"%s_RingUtilCount=%u\n"
+			"%s_trCount=%u\n"
+			"%s_erCount=%u\n"
+			"%s_totalAoSCount=%u\n"
+			"%s_busytime=%llu\n",
+			ipa_clients_strings[rx_ep],
+			stats.u.rtk[0].commStats.ringFull,
+			ipa_clients_strings[rx_ep],
+			stats.u.rtk[0].commStats.ringEmpty,
+			ipa_clients_strings[rx_ep],
+			stats.u.rtk[0].commStats.ringUsageHigh,
+			ipa_clients_strings[rx_ep],
+			stats.u.rtk[0].commStats.ringUsageLow,
+			ipa_clients_strings[rx_ep],
+			stats.u.rtk[0].commStats.RingUtilCount,
+			ipa_clients_strings[rx_ep],
+			stats.u.rtk[0].trCount,
+			ipa_clients_strings[rx_ep],
+			stats.u.rtk[0].erCount,
+			ipa_clients_strings[rx_ep],
+			stats.u.rtk[0].totalAosCount,
+			ipa_clients_strings[rx_ep],
+			stats.u.rtk[0].busyTime);
+			cnt += nbytes;
+		} else {
+			nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"Fail to read AQC GSI stats\n");
+			cnt += nbytes;
+		}
+		break;
+	default:
+		ret = -EFAULT;
+	}
+
+done:
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_eth_read_err_status(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int cnt = 0;
+	struct ipa_eth_client *client;
+	int tx_ep, rx_ep;
+	struct ipa3_eth_error_stats tx_stats;
+	struct ipa3_eth_error_stats rx_stats;
+
+	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5
+		&& (ipa3_ctx->ipa_hw_type != IPA_HW_v4_1
+		|| ipa3_ctx->platform_type != IPA_PLAT_TYPE_APQ)) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"This feature only support on IPA4.5+\n");
+		cnt += nbytes;
+		goto done;
+	}
+	client = (struct ipa_eth_client *)file->private_data;
+	switch (client->client_type) {
+	case IPA_ETH_CLIENT_AQC107:
+	case IPA_ETH_CLIENT_AQC113:
+		tx_ep = IPA_CLIENT_AQC_ETHERNET_CONS;
+		rx_ep = IPA_CLIENT_AQC_ETHERNET_PROD;
+		break;
+	case IPA_ETH_CLIENT_RTK8111K:
+	case IPA_ETH_CLIENT_RTK8125B:
+		tx_ep = IPA_CLIENT_RTK_ETHERNET_CONS;
+		rx_ep = IPA_CLIENT_RTK_ETHERNET_PROD;
+		ipa3_eth_get_status(tx_ep, 5, &tx_stats);
+		ipa3_eth_get_status(rx_ep, 5, &rx_stats);
+		break;
+	default:
+		IPAERR("Not supported\n");
+		return 0;
+	}
+	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+		"%s_RP=0x%x\n"
+		"%s_WP=0x%x\n"
+		"%s_SCRATCH5=0x%x\n",
+		ipa_clients_strings[tx_ep],
+		tx_stats.rp,
+		ipa_clients_strings[tx_ep],
+		tx_stats.wp,
+		ipa_clients_strings[tx_ep],
+		tx_stats.err);
+	cnt += nbytes;
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+		"%s_RP=0x%x\n"
+		"%s_WP=0x%x\n"
+		"%s_SCRATCH5=0x%x\n"
+		"%s_err:%u\n",
+		ipa_clients_strings[rx_ep],
+		rx_stats.rp,
+		ipa_clients_strings[rx_ep],
+		rx_stats.wp,
+		ipa_clients_strings[rx_ep],
+		rx_stats.err,
+		ipa_clients_strings[rx_ep],
+		rx_stats.err & 0xff);
+	cnt += nbytes;
+done:
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static const struct file_operations fops_ipa_eth_stats = {
+	.read = ipa3_eth_read_perf_status,
+	.open = ipa3_open_dbg,
+};
+static const struct file_operations fops_ipa_eth_client_status = {
+	.read = ipa3_eth_read_err_status,
+	.open = ipa3_open_dbg,
+};
+void ipa3_eth_debugfs_add_node(struct ipa_eth_client *client)
+{
+	struct dentry *file;
+	int type, inst_id;
+	char name[IPA_RESOURCE_NAME_MAX];
+
+	if (IS_ERR_OR_NULL(dent_eth)) {
+		IPAERR("debugs eth root not created\n");
+		return;
+	}
+
+	if (client == NULL) {
+		IPAERR_RL("invalid input\n");
+		return;
+	}
+
+	type = client->client_type;
+	inst_id = client->inst_id;
+	snprintf(name, IPA_RESOURCE_NAME_MAX,
+		"%s_%d_stats", ipa_eth_clients_strings[type], inst_id);
+	file = debugfs_create_file(name, IPA_READ_ONLY_MODE,
+		dent_eth, (void *)client, &fops_ipa_eth_stats);
+	if (!file) {
+		IPAERR("could not create hw_type file\n");
+		return;
+	}
+	snprintf(name, IPA_RESOURCE_NAME_MAX,
+		"%s_%d_status", ipa_eth_clients_strings[type], inst_id);
+	file = debugfs_create_file(name, IPA_READ_ONLY_MODE,
+		dent_eth, (void *)client, &fops_ipa_eth_client_status);
+	if (!file) {
+		IPAERR("could not create hw_type file\n");
+		goto fail;
+	}
+	return;
+fail:
+	debugfs_remove_recursive(dent_eth);
+}
+
 #else /* !CONFIG_DEBUG_FS */
 #define INVALID_NO_OF_CHAR (-1)
 void ipa3_debugfs_init(void) {}
@@ -3023,4 +3364,6 @@ int _ipa_read_ep_reg_v4_0(char *buf, int max_len, int pipe)
 {
 	return INVALID_NO_OF_CHAR;
 }
+void ipa3_eth_debugfs_init(void) {}
+void ipa3_eth_debugfs_add(struct ipa_eth_client *client) {}
 #endif

+ 698 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_eth_i.c

@@ -0,0 +1,698 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+#include "ipa_i.h"
+#include <linux/if_vlan.h>
+#include <linux/ipa_eth.h>
+#include <linux/log2.h>
+
+#define IPA_ETH_RTK_MODT (32)
+#define IPA_ETH_RTK_MODC (128)
+
+#define IPA_ETH_AGGR_PKT_LIMIT 1
+#define IPA_ETH_AGGR_BYTE_LIMIT 2 /*2 Kbytes Agger hard byte limit*/
+
+#define IPA_ETH_MBOX_M (1)
+#define IPA_ETH_RX_MBOX_N (20)
+#define IPA_ETH_TX_MBOX_N (21)
+
+#define IPA_ETH_RX_MBOX_VAL (1)
+#define IPA_ETH_TX_MBOX_VAL (2)
+
+#define IPA_ETH_PCIE_MASK BIT_ULL(40)
+#define IPA_ETH_PCIE_SET(val) (val | IPA_ETH_PCIE_MASK)
+
+enum ipa_eth_dir {
+	IPA_ETH_RX = 0,
+	IPA_ETH_TX = 1,
+};
+
+static void ipa3_eth_save_client_mapping(
+	struct ipa_eth_client_pipe_info *pipe,
+	enum ipa_client_type type, int id,
+	int pipe_id, int ch_id)
+{
+	struct ipa_eth_client *client_info;
+	enum ipa_eth_client_type client_type;
+	u8 inst_id, pipe_hdl;
+	struct ipa3_eth_info *eth_info;
+
+	client_info = pipe->client_info;
+	client_type = client_info->client_type;
+	inst_id = client_info->inst_id;
+	pipe_hdl = pipe->pipe_hdl;
+	eth_info = &ipa3_ctx->eth_info[client_type][inst_id];
+	if (!eth_info->map[id].valid) {
+		eth_info->num_ch++;
+		eth_info->map[id].type = type;
+		eth_info->map[id].pipe_id = pipe_id;
+		eth_info->map[id].ch_id = ch_id;
+		eth_info->map[id].valid = true;
+		eth_info->map[id].pipe_hdl = pipe_hdl;
+	}
+}
+
+static int ipa3_eth_config_uc(bool init,
+	u8 protocol,
+	u8 dir,
+	u8 gsi_ch)
+{
+	struct ipa_mem_buffer cmd;
+	enum ipa_cpu_2_hw_offload_commands command;
+	int result;
+
+	IPADBG("config uc %s\n", init ? "init" : "Deinit");
+	if (init) {
+		struct IpaHwOffloadSetUpCmdData_t_v4_0 *cmd_data;
+
+		cmd.size = sizeof(*cmd_data);
+		cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+		if (cmd.base == NULL) {
+			IPAERR("fail to get DMA memory.\n");
+			return -ENOMEM;
+		}
+		cmd_data =
+			(struct IpaHwOffloadSetUpCmdData_t_v4_0 *)cmd.base;
+		cmd_data->protocol = protocol;
+		switch (protocol) {
+		case IPA_HW_PROTOCOL_AQC:
+			/* TODO: add support for AQC */
+			break;
+		case IPA_HW_PROTOCOL_RTK:
+			cmd_data->SetupCh_params.RtkSetupCh_params.dir =
+				dir;
+			cmd_data->SetupCh_params.RtkSetupCh_params.gsi_ch =
+				gsi_ch;
+			break;
+		default:
+			IPAERR("invalid protocol%d\n", protocol);
+		}
+		command = IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP;
+
+	} else {
+		struct IpaHwOffloadCommonChCmdData_t_v4_0 *cmd_data;
+
+		cmd.size = sizeof(*cmd_data);
+		cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+		if (cmd.base == NULL) {
+			IPAERR("fail to get DMA memory.\n");
+			return -ENOMEM;
+		}
+
+		cmd_data =
+			(struct IpaHwOffloadCommonChCmdData_t_v4_0 *)cmd.base;
+
+		cmd_data->protocol = protocol;
+		switch (protocol) {
+		case IPA_HW_PROTOCOL_AQC:
+			/* TODO: add support for AQC */
+			break;
+		case IPA_HW_PROTOCOL_RTK:
+			cmd_data->CommonCh_params.RtkCommonCh_params.gsi_ch =
+				gsi_ch;
+			break;
+		default:
+			IPAERR("invalid protocol%d\n", protocol);
+		}
+		cmd_data->CommonCh_params.RtkCommonCh_params.gsi_ch = gsi_ch;
+		command = IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+		command,
+		IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+		false, 10 * HZ);
+	if (result) {
+		IPAERR("fail to %s uc for %s gsi channel %d\n",
+			init ? "init" : "deinit",
+			dir == IPA_ETH_RX ? "Rx" : "Tx", gsi_ch);
+	}
+
+	dma_free_coherent(ipa3_ctx->uc_pdev,
+		cmd.size, cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	IPADBG("exit\n");
+	return result;
+}
+
+
+static void ipa_eth_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_EVT_OUT_OF_BUFFERS_ERR:
+		IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_EVT_OUT_OF_RESOURCES_ERR:
+		IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_EVT_EVT_RING_EMPTY_ERR:
+		IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
+		break;
+	default:
+		IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+	ipa_assert();
+}
+
+static void ipa_eth_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_CHAN_INVALID_TRE_ERR:
+		IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
+		break;
+	case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
+		IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_BUFFERS_ERR:
+		IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_RESOURCES_ERR:
+		IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_CHAN_HWO_1_ERR:
+		IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
+		break;
+	default:
+		IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+	ipa_assert();
+}
+
+
+static int ipa_eth_setup_rtk_gsi_channel(
+	struct ipa_eth_client_pipe_info *pipe,
+	struct ipa3_ep_context *ep)
+{
+	struct gsi_evt_ring_props gsi_evt_ring_props;
+	struct gsi_chan_props gsi_channel_props;
+	union __packed gsi_channel_scratch ch_scratch;
+	union __packed gsi_evt_scratch evt_scratch;
+	const struct ipa_gsi_ep_config *gsi_ep_info;
+	int result, len;
+	int queue_number;
+	u64 bar_addr;
+
+	if (unlikely(!pipe->info.is_transfer_ring_valid)) {
+		IPAERR("RTK transfer ring invalid\n");
+		ipa_assert();
+		return -EFAULT;
+	}
+
+	/* setup event ring */
+	bar_addr =
+		IPA_ETH_PCIE_SET(pipe->info.client_info.rtk.bar_addr);
+	memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
+	gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_RTK_EV;
+	gsi_evt_ring_props.intr = GSI_INTR_MSI;
+	gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
+	if (pipe->dir == IPA_ETH_PIPE_DIR_TX) {
+		gsi_evt_ring_props.int_modt = IPA_ETH_RTK_MODT;
+		gsi_evt_ring_props.int_modc = IPA_ETH_RTK_MODC;
+	}
+	gsi_evt_ring_props.exclusive = true;
+	gsi_evt_ring_props.err_cb = ipa_eth_gsi_evt_ring_err_cb;
+	gsi_evt_ring_props.user_data = NULL;
+	gsi_evt_ring_props.msi_addr =
+		bar_addr +
+		pipe->info.client_info.rtk.dest_tail_ptr_offs;
+	len = pipe->info.transfer_ring_size;
+	gsi_evt_ring_props.ring_len = len;
+	gsi_evt_ring_props.ring_base_addr =
+		(u64)pipe->info.transfer_ring_base;
+	result = gsi_alloc_evt_ring(&gsi_evt_ring_props,
+		ipa3_ctx->gsi_dev_hdl,
+		&ep->gsi_evt_ring_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("fail to alloc RX event ring\n");
+		return -EFAULT;
+	}
+	ep->gsi_mem_info.evt_ring_len =
+		gsi_evt_ring_props.ring_len;
+	ep->gsi_mem_info.evt_ring_base_addr =
+		gsi_evt_ring_props.ring_base_addr;
+
+	/* setup channel ring */
+	memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
+	gsi_channel_props.prot = GSI_CHAN_PROT_RTK;
+	if (pipe->dir == IPA_ETH_PIPE_DIR_TX)
+		gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
+	else
+		gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
+		gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
+	if (!gsi_ep_info) {
+		IPAERR("Failed getting GSI EP info for client=%d\n",
+		       ep->client);
+		result = -EINVAL;
+		goto fail_get_gsi_ep_info;
+	} else
+		gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num;
+	gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
+	gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
+	gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+	gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	gsi_channel_props.prefetch_mode =
+		gsi_ep_info->prefetch_mode;
+	gsi_channel_props.empty_lvl_threshold =
+		gsi_ep_info->prefetch_threshold;
+	gsi_channel_props.low_weight = 1;
+	gsi_channel_props.err_cb = ipa_eth_gsi_chan_err_cb;
+	gsi_channel_props.ring_len = len;
+	gsi_channel_props.ring_base_addr =
+		(u64)pipe->info.transfer_ring_base;
+	result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
+		&ep->gsi_chan_hdl);
+	if (result != GSI_STATUS_SUCCESS)
+		goto fail_get_gsi_ep_info;
+	ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len;
+	ep->gsi_mem_info.chan_ring_base_addr =
+		gsi_channel_props.ring_base_addr;
+
+	/* write event scratch */
+	memset(&evt_scratch, 0, sizeof(evt_scratch));
+	/* nothing is needed for RTK event scratch */
+
+	/* write ch scratch */
+	queue_number = pipe->info.client_info.rtk.queue_number;
+	memset(&ch_scratch, 0, sizeof(ch_scratch));
+	ch_scratch.rtk.rtk_bar_low =
+		(u32)bar_addr;
+	ch_scratch.rtk.rtk_bar_high =
+		(u32)((u64)(bar_addr) >> 32);
+	/*
+	 * RX: Queue Number will be as is received from RTK
+	 * (Range 0 - 15).
+	 * TX: Queue Number will be configured to be
+	 * either 16 or 18.
+	 * (For TX Queue 0: Configure 16)
+	 * (For TX Queue 1: Configure 18)
+	 */
+	ch_scratch.rtk.queue_number =
+		(pipe->dir == IPA_ETH_PIPE_DIR_RX) ?
+		pipe->info.client_info.rtk.queue_number :
+		(queue_number == 0) ? 16 : 18;
+	ch_scratch.rtk.fix_buff_size =
+		ilog2(pipe->info.fix_buffer_size);
+	ch_scratch.rtk.rtk_buff_addr_low =
+		(u32)pipe->info.data_buff_list[0].iova;
+	ch_scratch.rtk.rtk_buff_addr_high =
+		(u32)((u64)(pipe->info.data_buff_list[0].iova) >> 32);
+	result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to write evt ring scratch\n");
+		goto fail_write_scratch;
+	}
+	return 0;
+fail_write_scratch:
+	gsi_dealloc_channel(ep->gsi_chan_hdl);
+	ep->gsi_chan_hdl = ~0;
+fail_get_gsi_ep_info:
+	gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+	ep->gsi_evt_ring_hdl = ~0;
+	return result;
+}
+
+static int ipa3_smmu_map_rtk_pipes(struct ipa_eth_client_pipe_info *pipe,
+	bool map)
+{
+	struct iommu_domain *smmu_domain;
+	int result;
+	int i;
+	u64 iova;
+	phys_addr_t pa;
+	u64 iova_p;
+	phys_addr_t pa_p;
+	u32 size_p;
+
+	if (pipe->info.fix_buffer_size > PAGE_SIZE) {
+		IPAERR("invalid data buff size\n");
+		return -EINVAL;
+	}
+
+	if (ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) {
+		IPADBG("AP SMMU is set to s1 bypass\n");
+		return 0;
+	}
+
+	smmu_domain = ipa3_get_smmu_domain();
+	if (!smmu_domain) {
+		IPAERR("invalid smmu domain\n");
+		return -EINVAL;
+	}
+
+	result = ipa3_smmu_map_peer_buff(
+		(u64)pipe->info.transfer_ring_base,
+		pipe->info.transfer_ring_size,
+		map,
+		pipe->info.transfer_ring_sgt,
+		IPA_SMMU_CB_AP);
+	if (result) {
+		IPAERR("failed to %s ntn ring %d\n",
+			map ? "map" : "unmap", result);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < pipe->info.data_buff_list_size; i++) {
+		iova = (u64)pipe->info.data_buff_list[i].iova;
+		pa = (phys_addr_t)pipe->info.data_buff_list[i].pa;
+		IPA_SMMU_ROUND_TO_PAGE(iova, pa, pipe->info.fix_buffer_size,
+			iova_p, pa_p, size_p);
+		IPADBG_LOW("%s 0x%llx to 0x%pa size %d\n", map ? "mapping" :
+			"unmapping", iova_p, &pa_p, size_p);
+		if (map) {
+			result = ipa3_iommu_map(smmu_domain, iova_p, pa_p,
+				size_p, IOMMU_READ | IOMMU_WRITE);
+			if (result)
+				IPAERR("Fail to map 0x%llx\n", iova);
+		} else {
+			result = iommu_unmap(smmu_domain, iova_p, size_p);
+			if (result != size_p) {
+				IPAERR("Fail to unmap 0x%llx\n", iova);
+				goto fail_map_buffer_smmu_enabled;
+			}
+		}
+	}
+	return 0;
+
+fail_map_buffer_smmu_enabled:
+	ipa3_smmu_map_peer_buff(
+		(u64)pipe->info.transfer_ring_base,
+		pipe->info.transfer_ring_size,
+		!map,
+		pipe->info.transfer_ring_sgt,
+		IPA_SMMU_CB_AP);
+	return result;
+}
+
+int ipa3_eth_rtk_connect(
+	struct ipa_eth_client_pipe_info *pipe,
+	enum ipa_client_type client_type)
+{
+	struct ipa3_ep_context *ep;
+	int ep_idx;
+	bool vlan_mode;
+	int result = 0;
+	u32 gsi_db_addr_low, gsi_db_addr_high;
+	void __iomem *db_addr;
+	u32 evt_ring_db_addr_low, evt_ring_db_addr_high, db_val = 0;
+	int id;
+
+	ep_idx = ipa_get_ep_mapping(client_type);
+	if (ep_idx == -1 || ep_idx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("undefined client_type\n");
+		return -EFAULT;
+	}
+	/* need enhancement for vlan support on multiple attach */
+	result = ipa3_is_vlan_mode(IPA_VLAN_IF_ETH, &vlan_mode);
+	if (result) {
+		IPAERR("Could not determine IPA VLAN mode\n");
+		return result;
+	}
+	result = ipa3_smmu_map_rtk_pipes(pipe, true);
+	if (result) {
+		IPAERR("failed to map SMMU %d\n", result);
+		return result;
+	}
+	ep = &ipa3_ctx->ep[ep_idx];
+	memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ep->valid = 1;
+	ep->client = client_type;
+	result = ipa3_disable_data_path(ep_idx);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			ep_idx);
+		goto disable_data_path_fail;
+	}
+	ep->cfg.nat.nat_en = IPA_CLIENT_IS_PROD(client_type) ?
+					IPA_SRC_NAT : IPA_BYPASS_NAT;
+	ep->cfg.hdr.hdr_len = vlan_mode ? VLAN_ETH_HLEN : ETH_HLEN;
+	ep->cfg.mode.mode = IPA_BASIC;
+	if (IPA_CLIENT_IS_CONS(client_type)) {
+		ep->cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
+		ep->cfg.aggr.aggr = IPA_GENERIC;
+		ep->cfg.aggr.aggr_byte_limit = IPA_ETH_AGGR_BYTE_LIMIT;
+		ep->cfg.aggr.aggr_pkt_limit = IPA_ETH_AGGR_PKT_LIMIT;
+		ep->cfg.aggr.aggr_hard_byte_limit_en = IPA_ENABLE_AGGR;
+	} else {
+		ep->client_notify = pipe->info.notify;
+		ep->priv = pipe->info.priv;
+	}
+	if (ipa3_cfg_ep(ep_idx, &ep->cfg)) {
+		IPAERR("fail to setup rx pipe cfg\n");
+		goto cfg_ep_fail;
+	}
+	if (IPA_CLIENT_IS_PROD(client_type))
+		ipa3_install_dflt_flt_rules(ep_idx);
+	IPADBG("client %d (ep: %d) connected\n", client_type,
+		ep_idx);
+	if (ipa_eth_setup_rtk_gsi_channel(pipe, ep)) {
+		IPAERR("fail to setup eth gsi rx channel\n");
+		result = -EFAULT;
+		goto setup_rtk_gsi_ch_fail;
+	}
+	if (gsi_query_channel_db_addr(ep->gsi_chan_hdl,
+		&gsi_db_addr_low, &gsi_db_addr_high)) {
+		IPAERR("failed to query gsi rx db addr\n");
+		result = -EFAULT;
+		goto query_ch_db_fail;
+	}
+	/* only 32 bit lsb is used */
+	db_addr = ioremap((phys_addr_t)(gsi_db_addr_low), 4);
+	if (IPA_CLIENT_IS_PROD(client_type)) {
+		/* Rx: Initialize to ring base (i.e point 6) */
+		db_val = (u32)ep->gsi_mem_info.chan_ring_base_addr;
+	} else {
+		/* TX: Initialize to end of ring */
+		db_val = (u32)ep->gsi_mem_info.chan_ring_base_addr;
+		db_val += (u32)ep->gsi_mem_info.chan_ring_len;
+	}
+	iowrite32(db_val, db_addr);
+	iounmap(db_addr);
+	gsi_query_evt_ring_db_addr(ep->gsi_evt_ring_hdl,
+		&evt_ring_db_addr_low, &evt_ring_db_addr_high);
+	IPADBG("evt_ring_hdl %lu, db_addr_low %u db_addr_high %u\n",
+		ep->gsi_evt_ring_hdl, evt_ring_db_addr_low,
+		evt_ring_db_addr_high);
+	/* only 32 bit lsb is used */
+	db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
+	/*
+	 * IPA/GSI driver should ring the event DB once after
+	 * initialization of the event, with a value that is
+	 * outside of the ring range. Eg: ring base = 0x1000,
+	 * ring size = 0x100 => AP can write value > 0x1100
+	 * into the doorbell address. Eg: 0x 1110.
+	 * Use event ring base addr + event ring size + 1 element size.
+	 */
+	db_val = (u32)ep->gsi_mem_info.evt_ring_base_addr;
+	db_val += (u32)ep->gsi_mem_info.evt_ring_len;
+	db_val += GSI_EVT_RING_RE_SIZE_16B;
+	iowrite32(db_val, db_addr);
+	iounmap(db_addr);
+	if (IPA_CLIENT_IS_PROD(client_type)) {
+		/* RX mailbox */
+		pipe->info.db_pa = ipa3_ctx->ipa_wrapper_base +
+			ipahal_get_reg_base() +
+			ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+			IPA_ETH_MBOX_M,
+			IPA_ETH_RX_MBOX_N);
+		pipe->info.db_val = IPA_ETH_RX_MBOX_VAL;
+	} else {
+		/* TX mailbox */
+		pipe->info.db_pa = ipa3_ctx->ipa_wrapper_base +
+			ipahal_get_reg_base() +
+			ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+			IPA_ETH_MBOX_M,
+			IPA_ETH_TX_MBOX_N);
+		pipe->info.db_val = IPA_ETH_TX_MBOX_VAL;
+	}
+	/* enable data path */
+	result = ipa3_enable_data_path(ep_idx);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d\n", result,
+			ep_idx);
+		goto enable_data_path_fail;
+	}
+
+	/* start gsi channel */
+	result = gsi_start_channel(ep->gsi_chan_hdl);
+	if (result) {
+		IPAERR("failed to start gsi tx channel\n");
+		goto start_channel_fail;
+	}
+
+	id = (pipe->dir == IPA_ETH_PIPE_DIR_TX) ? 1 : 0;
+	/* start uC gsi dbg stats monitor */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_RTK].ch_id_info[id].ch_id
+			= ep->gsi_chan_hdl;
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_RTK].ch_id_info[id].dir
+			= pipe->dir;
+		ipa3_uc_debug_stats_alloc(
+			ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_RTK]);
+	}
+	ipa3_eth_save_client_mapping(pipe, client_type,
+		id, ep_idx, ep->gsi_chan_hdl);
+	result = ipa3_eth_config_uc(true,
+		IPA_HW_PROTOCOL_RTK,
+		(pipe->dir == IPA_ETH_PIPE_DIR_TX)
+		? IPA_ETH_TX : IPA_ETH_RX,
+		ep->gsi_chan_hdl);
+	if (result) {
+		IPAERR("failed to config uc\n");
+		goto config_uc_fail;
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return 0;
+config_uc_fail:
+	/* stop uC gsi dbg stats monitor */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_RTK].ch_id_info[id].ch_id
+			= 0xff;
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_RTK].ch_id_info[id].dir
+			= pipe->dir;
+		ipa3_uc_debug_stats_alloc(
+			ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_RTK]);
+	}
+	ipa3_stop_gsi_channel(ep->gsi_chan_hdl);
+start_channel_fail:
+	ipa3_disable_data_path(ep_idx);
+enable_data_path_fail:
+query_ch_db_fail:
+setup_rtk_gsi_ch_fail:
+cfg_ep_fail:
+disable_data_path_fail:
+	ipa3_smmu_map_rtk_pipes(pipe, false);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+}
+EXPORT_SYMBOL(ipa3_eth_rtk_connect);
+
+int ipa3_eth_aqc_connect(
+	struct ipa_eth_client_pipe_info *pipe,
+	enum ipa_client_type client_type)
+{
+	return 0;
+}
+EXPORT_SYMBOL(ipa3_eth_aqc_connect);
+
+int ipa3_eth_emac_connect(
+	struct ipa_eth_client_pipe_info *pipe,
+	enum ipa_client_type client_type)
+{
+	return 0;
+}
+EXPORT_SYMBOL(ipa3_eth_emac_connect);
+
+int ipa3_eth_rtk_disconnect(
+	struct ipa_eth_client_pipe_info *pipe,
+	enum ipa_client_type client_type)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	int ep_idx;
+	int id;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ep_idx = ipa_get_ep_mapping(client_type);
+	if (ep_idx == -1 || ep_idx >= IPA3_MAX_NUM_PIPES) {
+		IPAERR("undefined client_type\n");
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return -EFAULT;
+	}
+	ep = &ipa3_ctx->ep[ep_idx];
+	/* disable data path */
+	result = ipa3_disable_data_path(ep_idx);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d.\n", result,
+			ep_idx);
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return -EFAULT;
+	}
+	id = (pipe->dir == IPA_ETH_PIPE_DIR_TX) ? 1 : 0;
+	/* stop uC gsi dbg stats monitor */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_RTK].ch_id_info[id].ch_id
+			= 0xff;
+		ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_RTK].ch_id_info[id].dir
+			= pipe->dir;
+		ipa3_uc_debug_stats_alloc(
+			ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_RTK]);
+	}
+	/* stop gsi channel */
+	result = ipa3_stop_gsi_channel(ep_idx);
+	if (result) {
+		IPAERR("failed to stop gsi channel %d\n", ep_idx);
+		result = -EFAULT;
+		ipa_assert();
+		goto fail;
+	}
+	result = ipa3_eth_config_uc(false,
+		IPA_HW_PROTOCOL_RTK,
+		(pipe->dir == IPA_ETH_PIPE_DIR_TX)
+		? IPA_ETH_TX : IPA_ETH_RX,
+		ep->gsi_chan_hdl);
+	if (result)
+		IPAERR("failed to config uc\n");
+
+	/* tear down pipe */
+	result = ipa3_reset_gsi_channel(ep_idx);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to reset gsi channel: %d.\n", result);
+		ipa_assert();
+		goto fail;
+	}
+	result = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to reset evt ring: %d.\n", result);
+		ipa_assert();
+		goto fail;
+	}
+	result = ipa3_release_gsi_channel(ep_idx);
+	if (result) {
+		IPAERR("failed to release gsi channel: %d\n", result);
+		ipa_assert();
+		goto fail;
+	}
+	memset(ep, 0, sizeof(struct ipa3_ep_context));
+	IPADBG("client (ep: %d) disconnected\n", ep_idx);
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
+		ipa3_uc_debug_stats_dealloc(IPA_HW_PROTOCOL_RTK);
+	if (IPA_CLIENT_IS_PROD(client_type))
+		ipa3_delete_dflt_flt_rules(ep_idx);
+	/* unmap th pipe */
+	result = ipa3_smmu_map_rtk_pipes(pipe, false);
+	if (result)
+		IPAERR("failed to unmap SMMU %d\n", result);
+fail:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+}
+EXPORT_SYMBOL(ipa3_eth_rtk_disconnect);
+
+int ipa3_eth_aqc_disconnect(
+	struct ipa_eth_client_pipe_info *pipe,
+	enum ipa_client_type client_type)
+{
+	return 0;
+}
+EXPORT_SYMBOL(ipa3_eth_aqc_disconnect);
+
+int ipa3_eth_emac_disconnect(
+	struct ipa_eth_client_pipe_info *pipe,
+	enum ipa_client_type client_type)
+{
+	return 0;
+}
+EXPORT_SYMBOL(ipa3_eth_emac_disconnect);

+ 63 - 2
drivers/platform/msm/ipa/ipa_v3/ipa_i.h

@@ -1442,6 +1442,12 @@ struct ipa3_stats {
 #define IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF (12)
 #define IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF (16)
 #define IPA3_UC_DEBUG_STATS_OFF (20)
+#define IPA3_UC_DEBUG_STATS_TRCOUNT_OFF (20)
+#define IPA3_UC_DEBUG_STATS_ERCOUNT_OFF (24)
+#define IPA3_UC_DEBUG_STATS_AOSCOUNT_OFF (28)
+#define IPA3_UC_DEBUG_STATS_BUSYTIME_OFF (32)
+#define IPA3_UC_DEBUG_STATS_RTK_OFF (40)
+
 
 /**
  * struct ipa3_uc_dbg_stats - uC dbg stats for offloading
@@ -1655,6 +1661,12 @@ struct ipa3_aqc_ctx {
 	struct ipa3_uc_dbg_stats dbg_stats;
 };
 
+/**
+ * struct ipa3_rtk_ctx - IPA rtk context
+ */
+struct ipa3_rtk_ctx {
+	struct ipa3_uc_dbg_stats dbg_stats;
+};
 
 /**
  * struct ipa3_transport_pm - transport power management related members
@@ -1830,6 +1842,26 @@ struct ipa3_app_clock_vote {
 	u32 cnt;
 };
 
+struct ipa_eth_client_mapping {
+	enum ipa_client_type type;
+	int pipe_id;
+	int pipe_hdl;
+	int ch_id;
+	bool valid;
+};
+
+struct ipa3_eth_info {
+	u8 num_ch;
+	struct ipa_eth_client_mapping map[IPA_MAX_CH_STATS_SUPPORTED];
+};
+
+struct ipa3_eth_error_stats {
+	int rp;
+	int wp;
+	u32 err;
+};
+
+
 /**
  * struct ipa3_context - IPA context
  * @cdev: cdev context
@@ -1937,6 +1969,7 @@ struct ipa3_app_clock_vote {
  * @rmnet_ctl_enable: enable pipe support fow low latency data
  * @gsi_fw_file_name: GSI IPA fw file name
  * @uc_fw_file_name: uC IPA fw file name
+ * @eth_info: ethernet client mapping
  */
 struct ipa3_context {
 	struct ipa3_char_device_context cdev;
@@ -2098,6 +2131,7 @@ struct ipa3_context {
 	struct ipa3_usb_ctx usb_ctx;
 	struct ipa3_mhip_ctx mhip_ctx;
 	struct ipa3_aqc_ctx aqc_ctx;
+	struct ipa3_rtk_ctx rtk_ctx;
 	atomic_t ipa_clk_vote;
 
 	int (*client_lock_unlock[IPA_MAX_CLNT])(bool is_lock);
@@ -2127,6 +2161,8 @@ struct ipa3_context {
 	bool rmnet_ctl_enable;
 	char *gsi_fw_file_name;
 	char *uc_fw_file_name;
+	struct ipa3_eth_info
+		eth_info[IPA_ETH_CLIENT_MAX][IPA_ETH_INST_ID_MAX];
 };
 
 struct ipa3_plat_drv_res {
@@ -2670,6 +2706,7 @@ int ipa3_get_wdi_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
 int ipa3_get_wdi3_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
 int ipa3_get_usb_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
 int ipa3_get_aqc_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
+int ipa3_get_rtk_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
 int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
 u16 ipa3_get_smem_restr_bytes(void);
 int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid, uint64_t num_bytes);
@@ -2810,6 +2847,8 @@ struct ipa3_rt_tbl *__ipa3_find_rt_tbl(enum ipa_ip_type ip, const char *name);
 int ipa3_set_single_ndp_per_mbim(bool enable);
 void ipa3_debugfs_init(void);
 void ipa3_debugfs_remove(void);
+void ipa3_eth_debugfs_init(void);
+void ipa3_eth_debugfs_add(struct ipa_eth_client *client);
 
 void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size);
 #ifdef IPA_DEBUG
@@ -3116,8 +3155,30 @@ void ipa_eth_exit(void);
 #else
 static inline int ipa_eth_init(void) { return 0; }
 static inline void ipa_eth_exit(void) { }
-#endif // CONFIG_IPA_ETH
-
+#endif
+void ipa3_eth_debugfs_add_node(struct ipa_eth_client *client);
+int ipa3_eth_rtk_connect(
+	struct ipa_eth_client_pipe_info *pipe,
+	enum ipa_client_type client_type);
+int ipa3_eth_aqc_connect(
+	struct ipa_eth_client_pipe_info *pipe,
+	enum ipa_client_type client_type);
+int ipa3_eth_emac_connect(
+	struct ipa_eth_client_pipe_info *pipe,
+	enum ipa_client_type client_type);
+int ipa3_eth_rtk_disconnect(
+	struct ipa_eth_client_pipe_info *pipe,
+	enum ipa_client_type client_type);
+int ipa3_eth_aqc_disconnect(
+	struct ipa_eth_client_pipe_info *pipe,
+	enum ipa_client_type client_type);
+int ipa3_eth_emac_disconnect(
+	struct ipa_eth_client_pipe_info *pipe,
+	enum ipa_client_type client_type);
+int ipa3_eth_client_conn_evt(struct ipa_ecm_msg *msg);
+int ipa3_eth_client_disconn_evt(struct ipa_ecm_msg *msg);
+void ipa3_eth_get_status(u32 client, int scratch_id,
+	struct ipa3_eth_error_stats *stats);
 int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info,
 	unsigned long chan_hdl);
 

+ 5 - 5
drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c

@@ -3239,23 +3239,23 @@ int ipa3_get_mhip_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
 	}
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 	for (i = 0; i < MAX_MHIP_CHANNELS; i++) {
-		stats->ring[i].ringFull = ioread32(
+		stats->u.ring[i].ringFull = ioread32(
 			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
-		stats->ring[i].ringEmpty = ioread32(
+		stats->u.ring[i].ringEmpty = ioread32(
 			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
-		stats->ring[i].ringUsageHigh = ioread32(
+		stats->u.ring[i].ringUsageHigh = ioread32(
 			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
-		stats->ring[i].ringUsageLow = ioread32(
+		stats->u.ring[i].ringUsageLow = ioread32(
 			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
-		stats->ring[i].RingUtilCount = ioread32(
+		stats->u.ring[i].RingUtilCount = ioread32(
 			ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);

+ 15 - 0
drivers/platform/msm/ipa/ipa_v3/ipa_uc.c

@@ -409,6 +409,17 @@ static void ipa3_uc_save_dbg_stats(u32 size)
 		} else
 			goto unmap;
 		break;
+	case IPA_HW_PROTOCOL_RTK:
+		if (!ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio) {
+			ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_size =
+				size;
+			ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_ofst =
+				addr_offset;
+			ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio =
+				mmio;
+		} else
+			goto unmap;
+		break;
 	case IPA_HW_PROTOCOL_WDI:
 		if (!ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio) {
 			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_size =
@@ -1484,6 +1495,10 @@ int ipa3_uc_debug_stats_dealloc(uint32_t prot_id)
 		iounmap(ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio);
 		ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio = NULL;
 		break;
+	case IPA_HW_PROTOCOL_RTK:
+		iounmap(ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio);
+		ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio = NULL;
+		break;
 	case IPA_HW_PROTOCOL_WDI:
 		iounmap(ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio);
 		ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio = NULL;

+ 79 - 2
drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h

@@ -24,6 +24,7 @@
 #define DIR_PRODUCER 1
 
 #define MAX_AQC_CHANNELS 2
+#define MAX_RTK_CHANNELS 2
 #define MAX_11AD_CHANNELS 5
 #define MAX_WDI2_CHANNELS 2
 #define MAX_WDI3_CHANNELS 2
@@ -81,6 +82,7 @@ enum ipa3_hw_features {
  * @IPA_HW_PROTOCOL_ETH : protocol related to ETH operation in IPA HW
  * @IPA_HW_PROTOCOL_MHIP: protocol related to MHIP operation in IPA HW
  * @IPA_HW_PROTOCOL_USB : protocol related to USB operation in IPA HW
+ * @IPA_HW_PROTOCOL_RTK : protocol related to RTK operation in IPA HW
  */
 enum ipa4_hw_protocol {
 	IPA_HW_PROTOCOL_COMMON = 0x0,
@@ -91,6 +93,7 @@ enum ipa4_hw_protocol {
 	IPA_HW_PROTOCOL_ETH = 0x5,
 	IPA_HW_PROTOCOL_MHIP = 0x6,
 	IPA_HW_PROTOCOL_USB = 0x7,
+	IPA_HW_PROTOCOL_RTK = 0x9,
 	IPA_HW_PROTOCOL_MAX
 };
 
@@ -589,7 +592,75 @@ struct IpaHw11adInitCmdData_t {
  */
 struct IpaHw11adDeinitCmdData_t {
 	u32 reserved;
-};
+} __packed;
+
+/**
+ * struct IpaHwRtkSetupCmdData_t  - rlk setup channel command data
+ * @dir: Direction RX/TX
+ * @gsi_ch: GSI Channel number
+ * @reserved: 16 bytes padding
+ */
+struct IpaHwRtkSetupCmdData_t {
+	uint8_t dir;
+	uint8_t gsi_ch;
+	uint16_t reserved;
+} __packed;
+
+/**
+ * struct IpaHwRtkCommonChCmdData - rtk tear down channel command data
+ * @gsi_ch: GSI Channel number
+ * @reserved_0: padding
+ * @reserved_1: padding
+ */
+struct IpaHwRtkCommonChCmdData_t {
+	uint8_t gsi_ch;
+	uint8_t reserved_0;
+	uint16_t reserved_1;
+} __packed;
+
+/**
+ * struct IpaHwAQCInitCmdData_t - AQC peripheral init command data
+ * @periph_baddr_lsb: Peripheral Base Address LSB (pa/IOVA)
+ * @periph_baddr_msb: Peripheral Base Address MSB (pa/IOVA)
+ */
+struct IpaHwAQCInitCmdData_t {
+	u32 periph_baddr_lsb;
+	u32 periph_baddr_msb;
+} __packed;
+
+/**
+ * struct IpaHwAQCDeinitCmdData_t - AQC peripheral deinit command data
+ * @reserved: Reserved for future
+ */
+struct IpaHwAQCDeinitCmdData_t {
+	u32 reserved;
+} __packed;
+
+/**
+ * struct IpaHwAQCSetupCmdData_t - AQC setup channel command data
+ * @dir: Direction RX/TX
+ * @aqc_ch: aqc channel number
+ * @gsi_ch: GSI Channel number
+ * @reserved: 8 bytes padding
+ */
+struct IpaHwAQCSetupCmdData_t {
+	u8 dir;
+	u8 aqc_ch;
+	u8 gsi_ch;
+	u8 reserved;
+} __packed;
+
+/**
+ * struct IpaHwAQCCommonChCmdData_t - AQC tear down channel command data
+ * @gsi_ch: GSI Channel number
+ * @reserved_0: padding
+ * @reserved_1: padding
+ */
+struct IpaHwAQCCommonChCmdData_t {
+	u8 gsi_ch;
+	u8 reserved_0;
+	u16 reserved_1;
+} __packed;
 
 /**
  * struct IpaHwSetUpCmd  - Structure holding the parameters
@@ -599,7 +670,9 @@ struct IpaHw11adDeinitCmdData_t {
  */
 union IpaHwSetUpCmd {
 	struct Ipa3HwNtnSetUpCmdData_t NtnSetupCh_params;
-	struct IpaHw11adSetupCmdData_t	W11AdSetupCh_params;
+	struct IpaHwAQCSetupCmdData_t AqcSetupCh_params;
+	struct IpaHw11adSetupCmdData_t W11AdSetupCh_params;
+	struct IpaHwRtkSetupCmdData_t RtkSetupCh_params;
 } __packed;
 
 struct IpaHwOffloadSetUpCmdData_t {
@@ -662,6 +735,8 @@ struct IpaHwOffloadSetUpCmdData_t_v4_0 {
  */
 union IpaHwCommonChCmd {
 	union Ipa3HwNtnCommonChCmdData_t NtnCommonCh_params;
+	struct IpaHwAQCCommonChCmdData_t AqcCommonCh_params;
+	struct IpaHwRtkCommonChCmdData_t RtkCommonCh_params;
 	struct IpaHw11adCommonChCmdData_t W11AdCommonCh_params;
 } __packed;
 
@@ -743,6 +818,7 @@ struct IpaHwOffloadCommonChCmdData_t_v4_0 {
  */
 union IpaHwPeripheralInitCmd {
 	struct IpaHw11adInitCmdData_t W11AdInit_params;
+	struct IpaHwAQCInitCmdData_t AqcInit_params;
 } __packed;
 
 struct IpaHwPeripheralInitCmdData_t {
@@ -757,6 +833,7 @@ struct IpaHwPeripheralInitCmdData_t {
  */
 union IpaHwPeripheralDeinitCmd {
 	struct IpaHw11adDeinitCmdData_t W11AdDeinit_params;
+	struct IpaHwAQCDeinitCmdData_t AqcDeinit_params;
 } __packed;
 
 struct IpaHwPeripheralDeinitCmdData_t {

+ 5 - 5
drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c

@@ -430,23 +430,23 @@ int ipa3_get_wdi_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
 
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 	for (i = 0; i < MAX_WDI2_CHANNELS; i++) {
-		stats->ring[i].ringFull = ioread32(
+		stats->u.ring[i].ringFull = ioread32(
 			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
-		stats->ring[i].ringEmpty = ioread32(
+		stats->u.ring[i].ringEmpty = ioread32(
 			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
-		stats->ring[i].ringUsageHigh = ioread32(
+		stats->u.ring[i].ringUsageHigh = ioread32(
 			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
-		stats->ring[i].ringUsageLow = ioread32(
+		stats->u.ring[i].ringUsageLow = ioread32(
 			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
-		stats->ring[i].RingUtilCount = ioread32(
+		stats->u.ring[i].RingUtilCount = ioread32(
 			ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);

+ 41 - 1
drivers/platform/msm/ipa/ipa_v3/ipa_utils.c

@@ -2341,6 +2341,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
 			QMB_MASTER_SELECT_DDR,
 			{ 8, 2, 27, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 3 }, IPA_TX_INSTANCE_NA },
+	[IPA_4_5][IPA_CLIENT_RTK_ETHERNET_PROD] = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{ 10, 13, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 }, IPA_TX_INSTANCE_NA  },
 	/* Only for test purpose */
 	[IPA_4_5][IPA_CLIENT_TEST_PROD]           = {
 			true, IPA_v4_5_GROUP_UL_DL,
@@ -2457,6 +2463,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
 			IPA_DPS_HPS_SEQ_TYPE_INVALID,
 			QMB_MASTER_SELECT_DDR,
 			{ 18, 4, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 }, IPA_TX_INSTANCE_NA },
+	[IPA_4_5][IPA_CLIENT_RTK_ETHERNET_CONS] = {
+			true, IPA_v4_5_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{ 23, 8, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 }, IPA_TX_INSTANCE_NA  },
 	/* Only for test purpose */
 	/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
 	[IPA_4_5][IPA_CLIENT_TEST_CONS]           = {
@@ -6892,7 +6904,9 @@ int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
 	    param_in->client == IPA_CLIENT_HSIC1_PROD ||
 	    param_in->client == IPA_CLIENT_ODU_PROD ||
 	    param_in->client == IPA_CLIENT_ETHERNET_PROD ||
-		param_in->client == IPA_CLIENT_WIGIG_PROD) {
+		param_in->client == IPA_CLIENT_WIGIG_PROD ||
+		param_in->client == IPA_CLIENT_AQC_ETHERNET_PROD ||
+		param_in->client == IPA_CLIENT_RTK_ETHERNET_PROD) {
 		result = ipa3_cfg_ep_metadata(ipa_ep_idx, &meta);
 	} else if (param_in->client == IPA_CLIENT_WLAN1_PROD ||
 			   param_in->client == IPA_CLIENT_WLAN2_PROD) {
@@ -10138,6 +10152,10 @@ void ipa3_get_gsi_stats(int prot_id,
 		stats->num_ch = MAX_AQC_CHANNELS;
 		ipa3_get_aqc_gsi_stats(stats);
 		break;
+	case IPA_HW_PROTOCOL_RTK:
+		stats->num_ch = MAX_RTK_CHANNELS;
+		ipa3_get_rtk_gsi_stats(stats);
+		break;
 	case IPA_HW_PROTOCOL_11ad:
 		break;
 	case IPA_HW_PROTOCOL_WDI:
@@ -10259,6 +10277,10 @@ int ipa3_get_prot_id(enum ipa_client_type client)
 	case IPA_CLIENT_AQC_ETHERNET_PROD:
 		prot_id = IPA_HW_PROTOCOL_AQC;
 		break;
+	case IPA_CLIENT_RTK_ETHERNET_CONS:
+	case IPA_CLIENT_RTK_ETHERNET_PROD:
+		prot_id = IPA_HW_PROTOCOL_RTK;
+		break;
 	case IPA_CLIENT_MHI_PRIME_TETH_PROD:
 	case IPA_CLIENT_MHI_PRIME_TETH_CONS:
 	case IPA_CLIENT_MHI_PRIME_RMNET_PROD:
@@ -10295,3 +10317,21 @@ int ipa3_get_prot_id(enum ipa_client_type client)
 
 	return prot_id;
 }
+
+
+void ipa3_eth_get_status(u32 client, int scratch_id,
+	struct ipa3_eth_error_stats *stats)
+{
+	int ch_id;
+	int ipa_ep_idx;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED)
+		return;
+	ch_id = ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl;
+	stats->rp = gsi_get_refetch_reg(ch_id, true);
+	stats->wp = gsi_get_refetch_reg(ch_id, false);
+	stats->err = gsi_get_drop_stats(ipa_ep_idx, scratch_id);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}

+ 5 - 5
drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c

@@ -994,23 +994,23 @@ int ipa3_get_wdi3_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
 
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
 	for (i = 0; i < MAX_WDI3_CHANNELS; i++) {
-		stats->ring[i].ringFull = ioread32(
+		stats->u.ring[i].ringFull = ioread32(
 			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
-		stats->ring[i].ringEmpty = ioread32(
+		stats->u.ring[i].ringEmpty = ioread32(
 			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
-		stats->ring[i].ringUsageHigh = ioread32(
+		stats->u.ring[i].ringUsageHigh = ioread32(
 			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
-		stats->ring[i].ringUsageLow = ioread32(
+		stats->u.ring[i].ringUsageLow = ioread32(
 			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
-		stats->ring[i].RingUtilCount = ioread32(
+		stats->u.ring[i].RingUtilCount = ioread32(
 			ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
 			+ i * IPA3_UC_DEBUG_STATS_OFF +
 			IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);