Browse Source

ipa: Remove IPA framework

1. Move the kernel and UAPI header files from the kernel project t the dataipa
2. Add python script which sanitizes and copies UAPI headers
3. Add kernel and UAPI header libraries export to Android makefiles
4. Refactor the driver code to use the internal headers
5. Refactor the driver code export symbols without the IPA framework
6. Merge ipa_clients and rndis modules into the main IPA module

Change-Id: Ie633f291aefe559ff2c39b26a5a9765371399edc
Signed-off-by: Ilia Lin <[email protected]>
Ilia Lin 2 years ago
parent
commit
b37958da46
93 changed files with 13129 additions and 1258 deletions
  1. 0 2
      dataipa_dlkm_vendor_board.mk
  2. 0 2
      dataipa_dlkm_vendor_product.mk
  3. 39 1
      drivers/platform/msm/Android.bp
  4. 2 10
      drivers/platform/msm/Android.mk
  5. 3 0
      drivers/platform/msm/Kbuild
  6. 2466 0
      drivers/platform/msm/include/linux/ipa.h
  7. 278 0
      drivers/platform/msm/include/linux/ipa_eth.h
  8. 170 0
      drivers/platform/msm/include/linux/ipa_mhi.h
  9. 141 0
      drivers/platform/msm/include/linux/ipa_odu_bridge.h
  10. 101 0
      drivers/platform/msm/include/linux/ipa_qdss.h
  11. 326 0
      drivers/platform/msm/include/linux/ipa_uc_offload.h
  12. 737 0
      drivers/platform/msm/include/linux/ipa_wdi3.h
  13. 487 0
      drivers/platform/msm/include/linux/ipa_wigig.h
  14. 31 0
      drivers/platform/msm/include/linux/msm_gsi.h
  15. 2936 0
      drivers/platform/msm/include/uapi/linux/ipa_qmi_service_v01.h
  16. 3998 0
      drivers/platform/msm/include/uapi/linux/msm_ipa.h
  17. 314 0
      drivers/platform/msm/include/uapi/linux/rmnet_ipa_fd_ioctl.h
  18. 12 2
      drivers/platform/msm/ipa/Kbuild
  19. 0 5
      drivers/platform/msm/ipa/ipa_clients/Kbuild
  20. 5 5
      drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
  21. 1 1
      drivers/platform/msm/ipa/ipa_clients/ecm_ipa.h
  22. 0 26
      drivers/platform/msm/ipa/ipa_clients/ipa_clients_i.h
  23. 0 48
      drivers/platform/msm/ipa/ipa_clients/ipa_clients_manager.c
  24. 26 41
      drivers/platform/msm/ipa/ipa_clients/ipa_eth.c
  25. 23 33
      drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c
  26. 21 31
      drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
  27. 19 29
      drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
  28. 40 32
      drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
  29. 76 89
      drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c
  30. 39 53
      drivers/platform/msm/ipa/ipa_clients/ipa_wigig.c
  31. 13 11
      drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
  32. 4 1
      drivers/platform/msm/ipa/ipa_clients/rndis_ipa.h
  33. 11 23
      drivers/platform/msm/ipa/ipa_common_i.h
  34. 1 1
      drivers/platform/msm/ipa/ipa_rm.c
  35. 1 1
      drivers/platform/msm/ipa/ipa_rm_dependency_graph.h
  36. 1 1
      drivers/platform/msm/ipa/ipa_rm_i.h
  37. 1 1
      drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c
  38. 1 1
      drivers/platform/msm/ipa/ipa_rm_resource.h
  39. 1 1
      drivers/platform/msm/ipa/ipa_test_module/ipa_rm_ut.c
  40. 1 1
      drivers/platform/msm/ipa/ipa_test_module/ipa_rm_ut.h
  41. 1 1
      drivers/platform/msm/ipa/ipa_test_module/ipa_test_module.h
  42. 1 1
      drivers/platform/msm/ipa/ipa_test_module/ipa_test_module_impl.c
  43. 158 178
      drivers/platform/msm/ipa/ipa_v3/ipa.c
  44. 24 24
      drivers/platform/msm/ipa/ipa_v3/ipa_client.c
  45. 5 5
      drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
  46. 1 1
      drivers/platform/msm/ipa/ipa_v3/ipa_defs.h
  47. 68 62
      drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
  48. 49 43
      drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
  49. 10 10
      drivers/platform/msm/ipa/ipa_v3/ipa_eth_i.c
  50. 5 5
      drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
  51. 15 11
      drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
  52. 28 28
      drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c
  53. 8 124
      drivers/platform/msm/ipa/ipa_v3/ipa_i.h
  54. 5 4
      drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
  55. 12 11
      drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
  56. 13 13
      drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
  57. 23 23
      drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
  58. 7 7
      drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
  59. 10 1
      drivers/platform/msm/ipa/ipa_v3/ipa_net.c
  60. 2 2
      drivers/platform/msm/ipa/ipa_v3/ipa_odl.c
  61. 2 2
      drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
  62. 10 22
      drivers/platform/msm/ipa/ipa_v3/ipa_qdss.c
  63. 1 1
      drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
  64. 1 1
      drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
  65. 17 9
      drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
  66. 9 9
      drivers/platform/msm/ipa/ipa_v3/ipa_stats.c
  67. 1 1
      drivers/platform/msm/ipa/ipa_v3/ipa_trace.h
  68. 4 4
      drivers/platform/msm/ipa/ipa_v3/ipa_tsp.c
  69. 8 7
      drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
  70. 1 1
      drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c
  71. 1 1
      drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
  72. 61 46
      drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
  73. 81 73
      drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
  74. 5 5
      drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
  75. 8 8
      drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
  76. 1 1
      drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
  77. 1 1
      drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h
  78. 1 1
      drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
  79. 1 1
      drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
  80. 1 1
      drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
  81. 13 10
      drivers/platform/msm/ipa/ipa_v3/rmnet_ctl_ipa.c
  82. 18 18
      drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
  83. 13 10
      drivers/platform/msm/ipa/ipa_v3/rmnet_ll_ipa.c
  84. 1 1
      drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
  85. 1 1
      drivers/platform/msm/ipa/test/ipa_pm_ut.c
  86. 1 1
      drivers/platform/msm/ipa/test/ipa_test_dma.c
  87. 6 6
      drivers/platform/msm/ipa/test/ipa_test_hw_stats.c
  88. 2 2
      drivers/platform/msm/ipa/test/ipa_test_mhi.c
  89. 4 4
      drivers/platform/msm/ipa/test/ipa_test_ntn.c
  90. 2 2
      drivers/platform/msm/ipa/test/ipa_test_wdi3.c
  91. 1 1
      drivers/platform/msm/ipa/test/ipa_ut_framework.c
  92. 80 0
      drivers/platform/msm/ipa_kernel_headers.py
  93. 1 1
      kernel-tests/Android.bp

+ 0 - 2
dataipa_dlkm_vendor_board.mk

@@ -15,8 +15,6 @@ ifeq ($(call is-board-platform-in-list,$(DATA_DLKM_BOARD_PLATFORMS_LIST)),true)
 BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/gsim.ko
 BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ipam.ko
 BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ipanetm.ko
-BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/rndisipam.ko
-BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ipa_clientsm.ko
 ifeq ($(CONFIG_LOCALVERSION), "-gki-consolidate")
 BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/ipatestm.ko
 endif

+ 0 - 2
dataipa_dlkm_vendor_product.mk

@@ -1,8 +1,6 @@
 PRODUCT_PACKAGES += gsim.ko
 PRODUCT_PACKAGES += ipam.ko
 PRODUCT_PACKAGES += ipanetm.ko
-PRODUCT_PACKAGES += rndisipam.ko
-PRODUCT_PACKAGES += ipa_clientsm.ko
 ifeq ($(CONFIG_LOCALVERSION), "-gki-consolidate")
 PRODUCT_PACKAGES += ipatestm.ko
 endif

+ 39 - 1
drivers/platform/msm/Android.bp

@@ -1,12 +1,42 @@
 headers_src = [
+    "include/uapi/linux/*.h",
+]
+
+test_headers_src = [
     "ipa/ipa_test_module/ipa_test_module.h",
 ]
 
+ipa_headers_out = [
+    "linux/msm_ipa.h",
+    "linux/ipa_qmi_service_v01.h",
+    "linux/rmnet_ipa_fd_ioctl.h",
+]
+
 ipa_test_headers_out = [
     "ipa_test_module.h",
 ]
 
+ipa_kernel_headers_verbose = "--verbose "
 ipa_test_kernel_headers_verbose = "--verbose "
+
+genrule {
+    name: "qti_generate_ipa_kernel_headers",
+    tools: ["headers_install.sh",
+            "unifdef"
+    ],
+    tool_files: [
+         "ipa_kernel_headers.py",
+    ],
+    srcs: headers_src,
+    cmd: "python3 -u $(location ipa_kernel_headers.py) " +
+        ipa_kernel_headers_verbose +
+        "--gen_dir $(genDir) " +
+        "--ipa_include_uapi $(locations include/uapi/linux/*.h) " +
+        "--unifdef $(location unifdef) " +
+        "--headers_install $(location headers_install.sh)",
+    out: ipa_headers_out,
+}
+
 genrule {
     name: "qti_generate_ipa_test_kernel_headers",
     tools: ["headers_install.sh",
@@ -15,7 +45,7 @@ genrule {
     tool_files: [
          "ipa_test_kernel_headers.py",
     ],
-    srcs: headers_src,
+    srcs: test_headers_src,
     cmd: "python3 -u $(location ipa_test_kernel_headers.py) " +
         ipa_test_kernel_headers_verbose +
         "--gen_dir $(genDir) " +
@@ -25,6 +55,14 @@ genrule {
     out: ipa_test_headers_out,
 }
 
+cc_library_headers {
+    name: "qti_ipa_kernel_headers",
+    generated_headers: ["qti_generate_ipa_kernel_headers"],
+    export_generated_headers: ["qti_generate_ipa_kernel_headers"],
+    vendor: true,
+    recovery_available: true
+}
+
 cc_library_headers {
     name: "qti_ipa_test_kernel_headers",
     generated_headers: ["qti_generate_ipa_test_kernel_headers"],

+ 2 - 10
drivers/platform/msm/Android.mk

@@ -27,6 +27,8 @@ LOCAL_SRC_FILES   := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
 LOCAL_MODULE              := ipam.ko
 LOCAL_MODULE_KBUILD_NAME  := ipa/ipam.ko
 LOCAL_MODULE_DEBUG_ENABLE := true
+LOCAL_EXPORT_KO_INCLUDE_DIRS    := $(LOCAL_PATH)/include
+LOCAL_EXPORT_KO_INCLUDE_DIRS    += $(LOCAL_PATH)/include/uapi
 LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
 $(warning $(DLKM_DIR))
 include $(DLKM_DIR)/Build_external_kernelmodule.mk
@@ -41,16 +43,6 @@ LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
 $(warning $(DLKM_DIR))
 include $(DLKM_DIR)/Build_external_kernelmodule.mk
 
-include $(CLEAR_VARS)
-KBUILD_OPTIONS += MODNAME=rndisipam
-LOCAL_SRC_FILES   := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
-LOCAL_MODULE              := rndisipam.ko
-LOCAL_MODULE_KBUILD_NAME  := ipa/ipa_clients/rndisipam.ko
-LOCAL_MODULE_DEBUG_ENABLE := true
-LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
-$(warning $(DLKM_DIR))
-include $(DLKM_DIR)/Build_external_kernelmodule.mk
-
 include $(CLEAR_VARS)
 KBUILD_OPTIONS += MODNAME=ipaclientsm
 LOCAL_SRC_FILES   := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)

+ 3 - 0
drivers/platform/msm/Kbuild

@@ -75,6 +75,9 @@ endif
 endif
 
 ifneq (,$(filter $(CONFIG_IPA3) $(CONFIG_GSI),y m))
+LINUXINCLUDE += -I$(DATAIPADRVTOP)/include
+LINUXINCLUDE += -I$(DATAIPADRVTOP)/include/linux
+LINUXINCLUDE += -I$(DATAIPADRVTOP)/include/uapi
 LINUXINCLUDE += -I$(DATAIPADRVTOP)/gsi
 LINUXINCLUDE += -I$(DATAIPADRVTOP)/gsi/gsihal
 LINUXINCLUDE += -I$(DATAIPADRVTOP)/ipa

+ 2466 - 0
drivers/platform/msm/include/linux/ipa.h

@@ -0,0 +1,2466 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _IPA_H_
+#define _IPA_H_
+
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/msm_ipa.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/ipa_qmi_service_v01.h>
+#include <linux/msm_gsi.h>
+
+#define IPA_APPS_MAX_BW_IN_MBPS 700
+#define IPA_BW_THRESHOLD_MAX 3
+
+#define IPA_MAX_CH_STATS_SUPPORTED 5
+#define IPA_EP_ARR_SIZE 2
+#define IPA_EP_PER_REG 32
+
+/* Notifiers for rmnet driver */
+#define BUFF_ABOVE_HIGH_THRESHOLD_FOR_DEFAULT_PIPE        1
+#define BUFF_ABOVE_HIGH_THRESHOLD_FOR_COAL_PIPE           2
+#define BUFF_BELOW_LOW_THRESHOLD_FOR_DEFAULT_PIPE         3
+#define BUFF_BELOW_LOW_THRESHOLD_FOR_COAL_PIPE            4
+#define BUFF_ABOVE_HIGH_THRESHOLD_FOR_LL_PIPE             5
+#define BUFF_BELOW_LOW_THRESHOLD_FOR_LL_PIPE              6
+#define FREE_PAGE_TASK_SCHEDULED                          7
+#define FREE_PAGE_TASK_SCHEDULED_LL                       8
+
+/**
+ * the attributes of the socksv5 options
+ */
+#define IPA_SOCKSv5_ENTRY_VALID	(1ul << 0)
+#define IPA_SOCKSv5_IPV4	(1ul << 1)
+#define IPA_SOCKSv5_IPV6	(1ul << 2)
+#define IPA_SOCKSv5_OPT_TS	(1ul << 3)
+#define IPA_SOCKSv5_OPT_SACK	(1ul << 4)
+#define IPA_SOCKSv5_OPT_WS_STC	(1ul << 5)
+#define IPA_SOCKSv5_OPT_WS_DMC	(1ul << 6)
+
+#define IPA_SOCKsv5_ADD_COM_ID		15
+#define IPA_SOCKsv5_ADD_V6_V4_COM_PM	1
+#define IPA_SOCKsv5_ADD_V4_V6_COM_PM	2
+#define IPA_SOCKsv5_ADD_V6_V6_COM_PM	3
+
+/**
+ * enum ipa_transport_type
+ * transport type: either GSI or SPS
+ */
+enum ipa_transport_type {
+	IPA_TRANSPORT_TYPE_SPS,
+	IPA_TRANSPORT_TYPE_GSI
+};
+
+/**
+ * enum ipa_nat_en_type - NAT setting type in IPA end-point
+ */
+enum ipa_nat_en_type {
+	IPA_BYPASS_NAT,
+	IPA_SRC_NAT,
+	IPA_DST_NAT,
+};
+
+/**
+ * enum ipa_ipv6ct_en_type - IPv6CT setting type in IPA end-point
+ */
+enum ipa_ipv6ct_en_type {
+	IPA_BYPASS_IPV6CT,
+	IPA_ENABLE_IPV6CT,
+};
+
+/**
+ * enum ipa_mode_type - mode setting type in IPA end-point
+ * @BASIC: basic mode
+ * @ENABLE_FRAMING_HDLC: not currently supported
+ * @ENABLE_DEFRAMING_HDLC: not currently supported
+ * @DMA: all data arriving IPA will not go through IPA logic blocks, this
+ *  allows IPA to work as DMA for specific pipes.
+ */
+enum ipa_mode_type {
+	IPA_BASIC,
+	IPA_ENABLE_FRAMING_HDLC,
+	IPA_ENABLE_DEFRAMING_HDLC,
+	IPA_DMA,
+};
+
+/**
+ *  enum ipa_aggr_en_type - aggregation setting type in IPA
+ *  end-point
+ */
+enum ipa_aggr_en_type {
+	IPA_BYPASS_AGGR,
+	IPA_ENABLE_AGGR,
+	IPA_ENABLE_DEAGGR,
+};
+
+/**
+ *  enum ipa_aggr_type - type of aggregation in IPA end-point
+ */
+enum ipa_aggr_type {
+	IPA_MBIM_16 = 0,
+	IPA_HDLC    = 1,
+	IPA_TLP     = 2,
+	IPA_RNDIS   = 3,
+	IPA_GENERIC = 4,
+	IPA_COALESCE = 5,
+	IPA_QCMAP   = 6,
+};
+
+/**
+ * enum ipa_aggr_mode - global aggregation mode
+ */
+enum ipa_aggr_mode {
+	IPA_MBIM_AGGR,
+	IPA_QCNCM_AGGR,
+};
+
+/**
+ * enum ipa_dp_evt_type - type of event client callback is
+ * invoked for on data path
+ * @IPA_RECEIVE: data is struct sk_buff
+ * @IPA_WRITE_DONE: data is struct sk_buff
+ */
+enum ipa_dp_evt_type {
+	IPA_RECEIVE,
+	IPA_WRITE_DONE,
+};
+
+/**
+ * enum hdr_total_len_or_pad_type - type of value held by TOTAL_LEN_OR_PAD
+ * field in header configuration register.
+ * @IPA_HDR_PAD: field is used as padding length
+ * @IPA_HDR_TOTAL_LEN: field is used as total length
+ */
+enum hdr_total_len_or_pad_type {
+	IPA_HDR_PAD = 0,
+	IPA_HDR_TOTAL_LEN = 1,
+};
+
+/**
+ * struct ipa_ep_cfg_nat - NAT configuration in IPA end-point
+ * @nat_en:	This defines the default NAT mode for the pipe: in case of
+ *		filter miss - the default NAT mode defines the NATing operation
+ *		on the packet. Valid for Input Pipes only (IPA consumer)
+ * @nat_exc_suppress: 1 - NAT exception is supressed and packet will be
+ * routed using configured routing tables.
+ *	0 - NAT exception is allowed and packets will be routed to exception
+ * pipe. Valid for input pipes only (IPA consumer)
+ */
+struct ipa_ep_cfg_nat {
+	enum ipa_nat_en_type nat_en;
+	bool nat_exc_suppress;
+};
+
+/**
+ * struct ipa_ep_cfg_conn_track - IPv6 Connection tracking configuration in
+ *	IPA end-point
+ * @conn_track_en: Defines speculative conn_track action, means if specific
+ *		   pipe needs to have UL/DL IPv6 Connection Tracking or Bypass
+ *		   IPv6 Connection Tracking. 0: Bypass IPv6 Connection Tracking
+ *					     1: IPv6 UL/DL Connection Tracking.
+ *		  Valid for Input Pipes only (IPA consumer)
+ */
+struct ipa_ep_cfg_conn_track {
+	enum ipa_ipv6ct_en_type conn_track_en;
+};
+
+/**
+ * struct ipa_ep_cfg_hdr - header configuration in IPA end-point
+ *
+ * @hdr_len:Header length in bytes to be added/removed. Assuming
+ *			header len is constant per endpoint. Valid for
+ *			both Input and Output Pipes
+ * @hdr_ofst_metadata_valid:	0: Metadata_Ofst  value is invalid, i.e., no
+ *			metadata within header.
+ *			1: Metadata_Ofst  value is valid, i.e., metadata
+ *			within header is in offset Metadata_Ofst Valid
+ *			for Input Pipes only (IPA Consumer) (for output
+ *			pipes, metadata already set within the header)
+ * @hdr_ofst_metadata:	Offset within header in which metadata resides
+ *			Size of metadata - 4bytes
+ *			Example -  Stream ID/SSID/mux ID.
+ *			Valid for  Input Pipes only (IPA Consumer) (for output
+ *			pipes, metadata already set within the header)
+ * @hdr_additional_const_len:	Defines the constant length that should be added
+ *			to the payload length in order for IPA to update
+ *			correctly the length field within the header
+ *			(valid only in case Hdr_Ofst_Pkt_Size_Valid=1)
+ *			Valid for Output Pipes (IPA Producer)
+ *			Starting IPA4.5, this field in H/W requires more bits
+ *			to support larger range, but no spare bits to use.
+ *			So the MSB part is done thourgh the EXT register.
+ *			When accessing this register, need to access the EXT
+ *			register as well.
+ * @hdr_ofst_pkt_size_valid:	0: Hdr_Ofst_Pkt_Size  value is invalid, i.e., no
+ *			length field within the inserted header
+ *			1: Hdr_Ofst_Pkt_Size  value is valid, i.e., a
+ *			packet length field resides within the header
+ *			Valid for Output Pipes (IPA Producer)
+ * @hdr_ofst_pkt_size:	Offset within header in which packet size reside. Upon
+ *			Header Insertion, IPA will update this field within the
+ *			header with the packet length . Assumption is that
+ *			header length field size is constant and is 2Bytes
+ *			Valid for Output Pipes (IPA Producer)
+ *			Starting IPA4.5, this field in H/W requires more bits
+ *			to support larger range, but no spare bits to use.
+ *			So the MSB part is done thourgh the EXT register.
+ *			When accessing this register, need to access the EXT
+ *			register as well.
+ * @hdr_a5_mux:	Determines whether A5 Mux header should be added to the packet.
+ *			This bit is valid only when Hdr_En=01(Header Insertion)
+ *			SW should set this bit for IPA-to-A5 pipes.
+ *			0: Do not insert A5 Mux Header
+ *			1: Insert A5 Mux Header
+ *			Valid for Output Pipes (IPA Producer)
+ * @hdr_remove_additional:	bool switch, remove more of the header
+ *			based on the aggregation configuration (register
+ *			HDR_LEN_INC_DEAGG_HDR)
+ * @hdr_metadata_reg_valid:	bool switch, metadata from
+ *			register INIT_HDR_METADATA_n is valid.
+ *			(relevant only for IPA Consumer pipes)
+ *			Starting IPA4.5, this parameter is irrelevant and H/W
+ *			assumes it is always valid.
+ */
+struct ipa_ep_cfg_hdr {
+	u32  hdr_len;
+	u32  hdr_ofst_metadata_valid;
+	u32  hdr_ofst_metadata;
+	u32  hdr_additional_const_len;
+	u32  hdr_ofst_pkt_size_valid;
+	u32  hdr_ofst_pkt_size;
+	u32  hdr_a5_mux;
+	u32  hdr_remove_additional;
+	u32  hdr_metadata_reg_valid;
+};
+
+/**
+ * struct ipa_ep_cfg_hdr_ext - extended header configuration in IPA end-point
+ * @hdr_pad_to_alignment: Pad packet to specified alignment
+ *	(2^pad to alignment value), i.e. value of 3 means pad to 2^3 = 8 bytes
+ *	alignment. Alignment is to 0,2 up to 32 bytes (IPAv2 does not support 64
+ *	byte alignment). Valid for Output Pipes only (IPA Producer).
+ * @hdr_total_len_or_pad_offset: Offset to length field containing either
+ *	total length or pad length, per hdr_total_len_or_pad config
+ * @hdr_payload_len_inc_padding: 0-IPA_ENDP_INIT_HDR_n's
+ *	HDR_OFST_PKT_SIZE does
+ *	not includes padding bytes size, payload_len = packet length,
+ *	1-IPA_ENDP_INIT_HDR_n's HDR_OFST_PKT_SIZE includes
+ *	padding bytes size, payload_len = packet length + padding
+ * @hdr_total_len_or_pad: field is used as PAD length ot as Total length
+ *	(header + packet + padding)
+ * @hdr_total_len_or_pad_valid: 0-Ignore TOTAL_LEN_OR_PAD field, 1-Process
+ *	TOTAL_LEN_OR_PAD field
+ * @hdr_little_endian: 0-Big Endian, 1-Little Endian
+ * @hdr: The header structure. Used starting IPA4.5 where part of the info
+ *	at the header structure is implemented via the EXT register at the H/W
+ * @hdr_bytes_to_remove_valid: 0-Ignore hdr_bytes_to_remove field, 1-Process
+ *	hdr_bytes_to_remove field
+ * @hdr_bytes_to_remove: desired bytes to remove from top of the packet for
+ *	partial L2 header retention
+ */
+struct ipa_ep_cfg_hdr_ext {
+	u32 hdr_pad_to_alignment;
+	u32 hdr_total_len_or_pad_offset;
+	bool hdr_payload_len_inc_padding;
+	enum hdr_total_len_or_pad_type hdr_total_len_or_pad;
+	bool hdr_total_len_or_pad_valid;
+	bool hdr_little_endian;
+	struct ipa_ep_cfg_hdr *hdr;
+	bool hdr_bytes_to_remove_valid;
+	u32 hdr_bytes_to_remove;
+};
+
+/**
+ * struct ipa_ep_cfg_mode - mode configuration in IPA end-point
+ * @mode:	Valid for Input Pipes only (IPA Consumer)
+ * @dst:	This parameter specifies the output pipe to which the packets
+ *		will be routed to.
+ *		This parameter is valid for Mode=DMA and not valid for
+ *		Mode=Basic
+ *		Valid for Input Pipes only (IPA Consumer)
+ */
+struct ipa_ep_cfg_mode {
+	enum ipa_mode_type mode;
+	enum ipa_client_type dst;
+};
+
+/**
+ * struct ipa_ep_cfg_aggr - aggregation configuration in IPA end-point
+ *
+ * @aggr_en:	Valid for both Input and Output Pipes
+ * @aggr:	aggregation type (Valid for both Input and Output Pipes)
+ * @aggr_byte_limit:	Limit of aggregated packet size in KB (<=32KB) When set
+ *			to 0, there is no size limitation on the aggregation.
+ *			When both, Aggr_Byte_Limit and Aggr_Time_Limit are set
+ *			to 0, there is no aggregation, every packet is sent
+ *			independently according to the aggregation structure
+ *			Valid for Output Pipes only (IPA Producer )
+ * @aggr_time_limit:	Timer to close aggregated packet When set to 0,
+ *			there is no time limitation on the aggregation.  When
+ *			both, Aggr_Byte_Limit and Aggr_Time_Limit are set to 0,
+ *			there is no aggregation, every packet is sent
+ *			independently according to the aggregation structure
+ *			Valid for Output Pipes only (IPA Producer).
+ *			Time unit is -->> usec <<--
+ * @aggr_pkt_limit: Defines if EOF close aggregation or not. if set to false
+ *			HW closes aggregation (sends EOT) only based on its
+ *			aggregation config (byte/time limit, etc). if set to
+ *			true EOF closes aggregation in addition to HW based
+ *			aggregation closure. Valid for Output Pipes only (IPA
+ *			Producer). EOF affects only Pipes configured for
+ *			generic aggregation.
+ * @aggr_hard_byte_limit_en: If set to 1, byte-limit aggregation for this
+ *			pipe will apply a hard-limit behavior which will not
+ *			allow frames to be closed with more than byte-limit
+ *			bytes. If set to 0, previous byte-limit behavior
+ *			will apply - frames close once a packet causes the
+ *			accumulated byte-count to cross the byte-limit
+ *			threshold (closed frame will contain that packet).
+ * @aggr_sw_eof_active: 0: EOF does not close aggregation. HW closes aggregation
+ *			(sends EOT) only based on its aggregation config
+ *			(byte/time limit, etc).
+ *			1: EOF closes aggregation in addition to HW based
+ *			aggregation closure. Valid for Output Pipes only (IPA
+ *			Producer). EOF affects only Pipes configured for generic
+ *			aggregation.
+ * @pulse_generator:	Pulse generator number to be used.
+ *			For internal use.
+ *			Supported starting IPA4.5.
+ * @scaled_time:	Time limit in accordance to the pulse generator
+ *			granularity.
+ *			For internal use
+ *			Supported starting IPA4.5
+ * @aggr_coal_l2: enable L2  coalescing on the specifid dest pipe,
+ *			work only if AGGR_TYPE set to AGGR_TYPE_COALESCING.
+ *			Supported starting IPA5.5
+ */
+struct ipa_ep_cfg_aggr {
+	enum ipa_aggr_en_type aggr_en;
+	enum ipa_aggr_type aggr;
+	u32 aggr_byte_limit;
+	u32 aggr_time_limit;
+	u32 aggr_pkt_limit;
+	u32 aggr_hard_byte_limit_en;
+	bool aggr_sw_eof_active;
+	u8 pulse_generator;
+	u8 scaled_time;
+	bool aggr_coal_l2;
+};
+
+/**
+ * struct ipa_ep_cfg_route - route configuration in IPA end-point
+ * @rt_tbl_hdl:	Defines the default routing table index to be used in case there
+ *		is no filter rule matching, valid for Input Pipes only (IPA
+ *		Consumer). Clients should set this to 0 which will cause default
+ *		v4 and v6 routes setup internally by IPA driver to be used for
+ *		this end-point
+ */
+struct ipa_ep_cfg_route {
+	u32 rt_tbl_hdl;
+};
+
+/**
+ * struct ipa_ep_cfg_holb - head of line blocking configuration in IPA end-point
+ * @en: enable(1 => ok to drop pkt)/disable(0 => never drop pkt)
+ * @tmr_val: duration in units of 128 IPA clk clock cyles [0,511], 1 clk=1.28us
+ *	     IPAv2.5 support 32 bit HOLB timeout value, previous versions
+ *	     supports 16 bit
+ *  IPAv4.2: splitting timer value into 2 fields. Timer value is:
+ *   BASE_VALUE * (2^SCALE)
+ *  IPA4.5: tmr_val is in -->>msec<<--. Range is dynamic based
+ *   on H/W configuration. (IPA4.5 absolute maximum is 0.65535*31 -> ~20sec).
+ * @base_val : IPA4.2 only field. base value of the timer.
+ * @scale : IPA4.2 only field. scale value for timer.
+ * @pulse_generator: Pulse generator number to be used.
+ *  For internal use.
+ *  Supported starting IPA4.5.
+ * @scaled_time: Time limit in accordance to the pulse generator granularity
+ *  For internal use
+ *  Supported starting IPA4.5
+ */
+struct ipa_ep_cfg_holb {
+	u32 tmr_val;
+	u32 base_val;
+	u32 scale;
+	u16 en;
+	u8 pulse_generator;
+	u8 scaled_time;
+};
+
+/**
+ * struct ipa_ep_cfg_deaggr - deaggregation configuration in IPA end-point
+ * @deaggr_hdr_len: Deaggregation Header length in bytes. Valid only for Input
+ *	Pipes, which are configured for 'Generic' deaggregation.
+ * @syspipe_err_detection - If set to 1, enables error detection for
+ *	de-aggregration. Valid only for Input Pipes, which are configured
+ *	for 'Generic' deaggregation.
+ *	Note: if this bit is set, de-aggregated frames must be contiguous
+ *	in memory.
+ * @packet_offset_valid: - 0: PACKET_OFFSET is not used, 1: PACKET_OFFSET is
+ *	used.
+ * @packet_offset_location: Location of packet offset field, which specifies
+ *	the offset to the packet from the start of the packet offset field.
+ * @ignore_min_pkt_err - Ignore packets smaller than header. This is intended
+ *	for use in RNDIS de-aggregated pipes, to silently ignore a redundant
+ *	1-byte trailer in MSFT implementation.
+ * @max_packet_len: DEAGGR Max Packet Length in Bytes. A Packet with higher
+ *	size wil be treated as an error. 0 - Packet Length is not Bound,
+ *	IPA should not check for a Max Packet Length.
+ */
+struct ipa_ep_cfg_deaggr {
+	u32 deaggr_hdr_len;
+	bool syspipe_err_detection;
+	bool packet_offset_valid;
+	u32 packet_offset_location;
+	bool ignore_min_pkt_err;
+	u32 max_packet_len;
+};
+
+/**
+ * enum ipa_cs_offload - checksum offload setting
+ */
+enum ipa_cs_offload {
+	IPA_DISABLE_CS_OFFLOAD,
+	/*
+	 * For enum value = 1, we check the csum required/valid bit which is the
+	 * same bit used for both DL and UL but have different meanings.
+	 * For UL pipe, HW checks if it needs to perform Csum caluclation.
+	 * For DL pipe, HW checks if the csum is valid or invalid
+	 */
+	IPA_ENABLE_CS_OFFLOAD_UL,
+	IPA_ENABLE_CS_DL_QMAP = IPA_ENABLE_CS_OFFLOAD_UL,
+	IPA_ENABLE_CS_OFFLOAD_DL,
+	IPA_CS_RSVD
+};
+
+/**
+ * struct ipa_ep_cfg_cfg - IPA ENDP_INIT Configuration register
+ * @frag_offload_en: - 0 - IP packet fragment handling is disabled. IP packet
+ *	fragments should be sent to SW. SW is responsible for
+ *	configuring filter rules, and IP packet filter exception should be
+ *	used to send all fragments to SW. 1 - IP packet fragment
+ *	handling is enabled. IPA checks for fragments and uses frag
+ *	rules table for processing fragments. Valid only for Input Pipes
+ *	(IPA Consumer)
+ * @cs_offload_en: Checksum offload enable: 00: Disable checksum offload, 01:
+ *	Enable checksum calculation offload (UL) - For output pipe
+ *	(IPA producer) specifies that checksum trailer is to be added.
+ *	For input pipe (IPA consumer) specifies presence of checksum
+ *	header and IPA checksum calculation accordingly. 10: Enable
+ *	checksum calculation offload (DL) - For output pipe (IPA
+ *	producer) specifies that checksum trailer is to be added. For
+ *	input pipe (IPA consumer) specifies IPA checksum calculation.
+ *	11: Reserved
+ * @cs_metadata_hdr_offset: Offset in Words (4 bytes) within header in which
+ *	checksum metadata info header (4 bytes) starts (UL). Values are 0-15, which
+ *	mean 0 - 60 byte checksum header offset. Valid for input
+ *	pipes only (IPA consumer)
+ * @gen_qmb_master_sel: Select bit for ENDP GEN-QMB master. This is used to
+ *	separate DDR & PCIe transactions in-order to limit them as
+ *	a group (using MAX_WRITES/READS limiation). Valid for input and
+ *	output pipes (IPA consumer+producer)
+ * @pipe_replicate_en: 1 - For consumer pipe - consumer DPL will be active.
+ *	For producer pipe - producer DPL will be active.
+ *	0 - packet replication disabled for both consumer and producer pipe.
+ *	Supported from IPA5.5 onwards.
+ */
+struct ipa_ep_cfg_cfg {
+	bool frag_offload_en;
+	enum ipa_cs_offload cs_offload_en;
+	u8 cs_metadata_hdr_offset;
+	u8 gen_qmb_master_sel;
+	u8 tx_instance;
+	bool pipe_replicate_en;
+};
+
+/**
+ * struct ipa_ep_cfg_prod_cfg - IPA ENDP_INIT Producer Configuration register
+ * @tx_instance: - 0 - select TX_0 instance.
+ * 1 - select TX_1 instance.
+ * @tsp_enable: boolean to indicate TSP-enablement per producer pipe.
+ * @max_output_size_drop_enable: enable policing by max output size for TSP
+ * feature. In case of TSP_ENABLE == 1 + valid egress_tc, max output size
+ * policing will be valid regardless to this bit.
+ * @tsp_idx: TSP producer-index. Controls pointer to producer-rate database.
+ * Valid only when TSP_ENABLE field is set. Value should be unique.
+ * @max_output_size: max output size allowed per producer. Value is in 64-byte
+ * resolution for TSP feature
+ * @egress_tc_lowest: Lowest egress traffic-class index assignes to this
+ * producer.
+ * @egress_tc_highest: Highest egress traffic-class index assignes to this
+ * producer.
+ */
+struct ipa_ep_cfg_prod_cfg {
+	u8 tx_instance;
+	bool tsp_enable;
+	bool max_output_size_drop_enable;
+	u8 tsp_idx;
+	u8 max_output_size;
+	u8 egress_tc_lowest;
+	u8 egress_tc_highest;
+};
+
+/**
+ * struct ipa_ep_cfg_metadata_mask - Endpoint initialization hdr metadata mask
+ * @metadata_mask: Mask specifying which metadata bits to write to
+ *	IPA_ENDP_INIT_HDR_n.s HDR_OFST_METADATA. Only
+ *	masked metadata bits (set to 1) will be written. Valid for Output
+ *	Pipes only (IPA Producer)
+ */
+struct ipa_ep_cfg_metadata_mask {
+	u32 metadata_mask;
+};
+
+/**
+ * struct ipa_ep_cfg_metadata - Metadata configuration in IPA end-point
+ * @md:	This defines the metadata from tx data descriptor
+ * @qmap_id: qmap id
+ */
+struct ipa_ep_cfg_metadata {
+	u32 qmap_id;
+};
+
+/**
+ * struct ipa_ep_cfg_seq - HPS/DPS sequencer type configuration in IPA end-point
+ * @set_dynamic:  0 - HPS/DPS seq type is configured statically,
+ *		   1 - HPS/DPS seq type is set to seq_type
+ * @seq_type: HPS/DPS sequencer type configuration
+ */
+struct ipa_ep_cfg_seq {
+	bool set_dynamic;
+	int seq_type;
+};
+
+/**
+ * struct ipa_ep_cfg_ulso - ULSO configurations
+ * @ipid_min_max_idx: A value in the range [0, 2]. Determines the registers
+ *		pair from which to read the minimum and maximum of IPv4 packets ID. It
+ *		is set to 0 as this range is platform specific and there is no need for
+ *		more than one pair values for this range. The minimum and maximum values
+ *		are taken from the device tree in pre_init and are stored in dedicated
+ *		registers.
+ * @is_ulso_pipe: Indicates whether the pipe is in ulso operation mode.
+ */
+struct ipa_ep_cfg_ulso {
+	int ipid_min_max_idx;
+	bool is_ulso_pipe;
+};
+
+/**
+ * struct ipa_ep_cfg - configuration of IPA end-point
+ * @nat:		NAT parameters
+ * @conn_track:		IPv6CT parameters
+ * @hdr:		Header parameters
+ * @hdr_ext:		Extended header parameters
+ * @mode:		Mode parameters
+ * @aggr:		Aggregation parameters
+ * @deaggr:		Deaggregation params
+ * @route:		Routing parameters
+ * @cfg:		Configuration register data
+ * @metadata_mask:	Hdr metadata mask
+ * @meta:		Metadata
+ * @seq:		HPS/DPS sequencers configuration
+ * @ulso:		ULSO configuration
+ * @prod_cfg:	Producer specific Configuration register data
+ */
+struct ipa_ep_cfg {
+	struct ipa_ep_cfg_nat nat;
+	struct ipa_ep_cfg_conn_track conn_track;
+	struct ipa_ep_cfg_hdr hdr;
+	struct ipa_ep_cfg_hdr_ext hdr_ext;
+	struct ipa_ep_cfg_mode mode;
+	struct ipa_ep_cfg_aggr aggr;
+	struct ipa_ep_cfg_deaggr deaggr;
+	struct ipa_ep_cfg_route route;
+	struct ipa_ep_cfg_cfg cfg;
+	struct ipa_ep_cfg_metadata_mask metadata_mask;
+	struct ipa_ep_cfg_metadata meta;
+	struct ipa_ep_cfg_seq seq;
+	struct ipa_ep_cfg_ulso ulso;
+	struct ipa_ep_cfg_prod_cfg prod_cfg;
+};
+
+/**
+ * struct ipa_ep_cfg_ctrl - Control configuration in IPA end-point
+ * @ipa_ep_suspend: 0 - ENDP is enabled, 1 - ENDP is suspended (disabled).
+ *			Valid for PROD Endpoints
+ * @ipa_ep_delay:   0 - ENDP is free-running, 1 - ENDP is delayed.
+ *			SW controls the data flow of an endpoint usind this bit.
+ *			Valid for CONS Endpoints
+ */
+struct ipa_ep_cfg_ctrl {
+	bool ipa_ep_suspend;
+	bool ipa_ep_delay;
+};
+
+/**
+ * x should be in bytes
+ */
+#define IPA_NUM_OF_FIFO_DESC(x) (x/sizeof(struct sps_iovec))
+typedef void (*ipa_notify_cb)(void *priv, enum ipa_dp_evt_type evt,
+		       unsigned long data);
+
+/**
+ * enum ipa_wdi_meter_evt_type - type of event client callback is
+ * for AP+STA mode metering
+ * @IPA_GET_WDI_SAP_STATS: get IPA_stats betwen SAP and STA -
+ *			use ipa_get_wdi_sap_stats structure
+ * @IPA_SET_WIFI_QUOTA: set quota limit on STA -
+ *			use ipa_set_wifi_quota structure
+ * @IPA_SET_WLAN_BW: set wlan BW -
+ *			use ipa_set_wlan_bw structure
+ */
+enum ipa_wdi_meter_evt_type {
+	IPA_GET_WDI_SAP_STATS,
+	IPA_SET_WIFI_QUOTA,
+	IPA_INFORM_WLAN_BW,
+};
+
+struct ipa_get_wdi_sap_stats {
+	/* indicate to reset stats after query */
+	uint8_t reset_stats;
+	/* indicate valid stats from wlan-fw */
+	uint8_t stats_valid;
+	/* Tx: SAP->STA */
+	uint64_t ipv4_tx_packets;
+	uint64_t ipv4_tx_bytes;
+	/* Rx: STA->SAP */
+	uint64_t ipv4_rx_packets;
+	uint64_t ipv4_rx_bytes;
+	uint64_t ipv6_tx_packets;
+	uint64_t ipv6_tx_bytes;
+	uint64_t ipv6_rx_packets;
+	uint64_t ipv6_rx_bytes;
+};
+
+/**
+ * struct ipa_set_wifi_quota - structure used for
+ *                                   IPA_SET_WIFI_QUOTA.
+ *
+ * @quota_bytes:    Quota (in bytes) for the STA interface.
+ * @set_quota:       Indicate whether to set the quota (use 1) or
+ *                   unset the quota.
+ *
+ */
+struct ipa_set_wifi_quota {
+	uint64_t quota_bytes;
+	uint8_t  set_quota;
+	/* indicate valid quota set from wlan-fw */
+	uint8_t set_valid;
+};
+
+/**
+ * struct ipa_inform_wlan_bw - structure used for
+ *                                   IPA_INFORM_WLAN_BW.
+ *
+ * @index:       Indicate which bw-index hit
+ * @throughput:  throughput usage
+ *
+ */
+struct ipa_inform_wlan_bw {
+	uint8_t  index;
+	uint64_t throughput;
+};
+
+typedef void (*ipa_wdi_meter_notifier_cb)(enum ipa_wdi_meter_evt_type evt,
+		       void *data);
+
+
+/**
+ * struct ipa_tx_intf - interface tx properties
+ * @num_props:	number of tx properties
+ * @prop:	the tx properties array
+ */
+struct ipa_tx_intf {
+	u32 num_props;
+	struct ipa_ioc_tx_intf_prop *prop;
+};
+
+/**
+ * struct ipa_rx_intf - interface rx properties
+ * @num_props:	number of rx properties
+ * @prop:	the rx properties array
+ */
+struct ipa_rx_intf {
+	u32 num_props;
+	struct ipa_ioc_rx_intf_prop *prop;
+};
+
+/**
+ * struct ipa_ext_intf - interface ext properties
+ * @excp_pipe_valid:	is next field valid?
+ * @excp_pipe:	exception packets should be routed to this pipe
+ * @num_props:	number of ext properties
+ * @prop:	the ext properties array
+ */
+struct ipa_ext_intf {
+	bool excp_pipe_valid;
+	enum ipa_client_type excp_pipe;
+	u32 num_props;
+	struct ipa_ioc_ext_intf_prop *prop;
+};
+
+/**
+ * struct ipa_sys_connect_params - information needed to setup an IPA end-point
+ * in system-BAM mode
+ * @ipa_ep_cfg:	IPA EP configuration
+ * @client:	the type of client who "owns" the EP
+ * @desc_fifo_sz: size of desc FIFO. This number is used to allocate the desc
+ *		fifo for BAM. For GSI, this size is used by IPA driver as a
+ *		baseline to calculate the GSI ring size in the following way:
+ *		For PROD pipes, GSI ring is 4 * desc_fifo_sz.
+		For PROD pipes, GSI ring is 2 * desc_fifo_sz.
+ * @priv:	callback cookie
+ * @notify:	callback
+ *		priv - callback cookie
+ *		evt - type of event
+ *		data - data relevant to event.  May not be valid. See event_type
+ *		enum for valid cases.
+ * @skip_ep_cfg: boolean field that determines if EP should be configured
+ *  by IPA driver
+ * @keep_ipa_awake: when true, IPA will not be clock gated
+ * @napi_enabled: when true, IPA call client callback to start polling
+ * @bypass_agg: when true, IPA bypasses the aggregation
+ * @int_modt: GSI event ring interrupt moderation time
+ *		cycles base interrupt moderation (32KHz clock)
+ * @int_modc: GSI event ring interrupt moderation packet counter
+ * @buff_size: Actual buff size of rx_pkt
+ * @ext_ioctl_v2: Flag to determine whether ioctl_v2 received
+ */
+struct ipa_sys_connect_params {
+	struct ipa_ep_cfg ipa_ep_cfg;
+	enum ipa_client_type client;
+	u32 desc_fifo_sz;
+	void *priv;
+	ipa_notify_cb notify;
+	bool skip_ep_cfg;
+	bool keep_ipa_awake;
+	struct napi_struct *napi_obj;
+	bool recycle_enabled;
+	bool bypass_agg;
+	u32 int_modt;
+	u32 int_modc;
+	u32 buff_size;
+	bool ext_ioctl_v2;
+};
+
+/**
+ * struct ipa_tx_meta - metadata for the TX packet
+ * @dma_address: dma mapped address of TX packet
+ * @dma_address_valid: is above field valid?
+ */
+struct ipa_tx_meta {
+	u8 pkt_init_dst_ep;
+	bool pkt_init_dst_ep_valid;
+	bool pkt_init_dst_ep_remote;
+	dma_addr_t dma_address;
+	bool dma_address_valid;
+};
+
+/**
+ * typedef ipa_msg_free_fn - callback function
+ * @param buff - [in] the message payload to free
+ * @param len - [in] size of message payload
+ * @param type - [in] the message type
+ *
+ * Message callback registered by kernel client with IPA driver to
+ * free message payload after IPA driver processing is complete
+ *
+ * No return value
+ */
+typedef void (*ipa_msg_free_fn)(void *buff, u32 len, u32 type);
+
+/**
+ * typedef ipa_msg_pull_fn - callback function
+ * @param buff - [in] where to copy message payload
+ * @param len - [in] size of buffer to copy payload into
+ * @param type - [in] the message type
+ *
+ * Message callback registered by kernel client with IPA driver for
+ * IPA driver to pull messages from the kernel client upon demand from
+ * user-space
+ *
+ * Returns how many bytes were copied into the buffer.
+ */
+typedef int (*ipa_msg_pull_fn)(void *buff, u32 len, u32 type);
+
+/**
+ * enum ipa_voltage_level - IPA Voltage levels
+ */
+enum ipa_voltage_level {
+	IPA_VOLTAGE_UNSPECIFIED,
+	IPA_VOLTAGE_SVS2 = IPA_VOLTAGE_UNSPECIFIED,
+	IPA_VOLTAGE_SVS,
+	IPA_VOLTAGE_NOMINAL,
+	IPA_VOLTAGE_TURBO,
+	IPA_VOLTAGE_MAX,
+};
+
+/**
+ * enum ipa_rm_event - IPA RM events
+ *
+ * Indicate the resource state change
+ */
+enum ipa_rm_event {
+	IPA_RM_RESOURCE_GRANTED,
+	IPA_RM_RESOURCE_RELEASED
+};
+
+typedef void (*ipa_rm_notify_cb)(void *user_data,
+		enum ipa_rm_event event,
+		unsigned long data);
+/**
+ * struct ipa_rm_register_params - information needed to
+ *      register IPA RM client with IPA RM
+ *
+ * @user_data: IPA RM client provided information
+ *		to be passed to notify_cb callback below
+ * @notify_cb: callback which is called by resource
+ *		to notify the IPA RM client about its state
+ *		change IPA RM client is expected to perform non
+ *		blocking operations only in notify_cb and
+ *		release notification context as soon as
+ *		possible.
+ */
+struct ipa_rm_register_params {
+	void *user_data;
+	ipa_rm_notify_cb notify_cb;
+};
+
+/**
+ * struct ipa_rm_create_params - information needed to initialize
+ *				the resource
+ * @name: resource name
+ * @floor_voltage: floor voltage needed for client to operate in maximum
+ *		bandwidth.
+ * @reg_params: register parameters, contains are ignored
+ *		for consumer resource NULL should be provided
+ *		for consumer resource
+ * @request_resource: function which should be called to request resource,
+ *			NULL should be provided for producer resource
+ * @release_resource: function which should be called to release resource,
+ *			NULL should be provided for producer resource
+ *
+ * IPA RM client is expected to perform non blocking operations only
+ * in request_resource and release_resource functions and
+ * release notification context as soon as possible.
+ */
+struct ipa_rm_create_params {
+	enum ipa_rm_resource_name name;
+	enum ipa_voltage_level floor_voltage;
+	struct ipa_rm_register_params reg_params;
+	int (*request_resource)(void);
+	int (*release_resource)(void);
+};
+
+/**
+ * struct ipa_rm_perf_profile - information regarding IPA RM client performance
+ * profile
+ *
+ * @max_bandwidth_mbps: maximum bandwidth need of the client in Mbps
+ */
+struct ipa_rm_perf_profile {
+	u32 max_supported_bandwidth_mbps;
+};
+
+#define A2_MUX_HDR_NAME_V4_PREF "dmux_hdr_v4_"
+#define A2_MUX_HDR_NAME_V6_PREF "dmux_hdr_v6_"
+
+/**
+ * struct  ipa_tx_data_desc - information needed
+ * to send data packet to HW link: link to data descriptors
+ * priv: client specific private data
+ * @pyld_buffer: pointer to the data buffer that holds frame
+ * @pyld_len: length of the data packet
+ */
+struct ipa_tx_data_desc {
+	struct list_head link;
+	void *priv;
+	void *pyld_buffer;
+	u16  pyld_len;
+};
+
+/**
+ * struct  ipa_rx_data - information needed
+ * to send to wlan driver on receiving data from ipa hw
+ * @skb: skb
+ * @dma_addr: DMA address of this Rx packet
+ */
+struct ipa_rx_data {
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+};
+
+/**
+ * enum ipa_irq_type - IPA Interrupt Type
+ * Used to register handlers for IPA interrupts
+ *
+ * Below enum is a logical mapping and not the actual interrupt bit in HW
+ */
+enum ipa_irq_type {
+	IPA_BAD_SNOC_ACCESS_IRQ,
+	IPA_UC_IRQ_0,
+	IPA_UC_IRQ_1,
+	IPA_UC_IRQ_2,
+	IPA_UC_IRQ_3,
+	IPA_UC_IN_Q_NOT_EMPTY_IRQ,
+	IPA_UC_RX_CMD_Q_NOT_FULL_IRQ,
+	IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ,
+	IPA_RX_ERR_IRQ,
+	IPA_DEAGGR_ERR_IRQ,
+	IPA_TX_ERR_IRQ,
+	IPA_STEP_MODE_IRQ,
+	IPA_PROC_ERR_IRQ,
+	IPA_TX_SUSPEND_IRQ,
+	IPA_TX_HOLB_DROP_IRQ,
+	IPA_BAM_GSI_IDLE_IRQ,
+	IPA_PIPE_YELLOW_MARKER_BELOW_IRQ,
+	IPA_PIPE_RED_MARKER_BELOW_IRQ,
+	IPA_PIPE_YELLOW_MARKER_ABOVE_IRQ,
+	IPA_PIPE_RED_MARKER_ABOVE_IRQ,
+	IPA_UCP_IRQ,
+	IPA_DCMP_IRQ,
+	IPA_GSI_EE_IRQ,
+	IPA_GSI_IPA_IF_TLV_RCVD_IRQ,
+	IPA_GSI_UC_IRQ,
+	IPA_TLV_LEN_MIN_DSM_IRQ,
+	IPA_DRBIP_PKT_EXCEED_MAX_SIZE_IRQ,
+	IPA_DRBIP_DATA_SCTR_CFG_ERROR_IRQ,
+	IPA_DRBIP_IMM_CMD_NO_FLSH_HZRD_IRQ,
+	IPA_IRQ_MAX
+};
+
+/**
+ * typedef ipa_irq_handler_t - irq handler/callback type
+ * @param ipa_irq_type - [in] interrupt type
+ * @param private_data - [in, out] the client private data
+ * @param interrupt_data - [out] interrupt information data
+ *
+ * callback registered by ipa_add_interrupt_handler function to
+ * handle a specific interrupt type
+ *
+ * No return value
+ */
+typedef void (*ipa_irq_handler_t)(enum ipa_irq_type interrupt,
+				void *private_data,
+				void *interrupt_data);
+
+/**
+ * struct IpaHwBamStats_t - Structure holding the BAM statistics
+ *
+ * @bamFifoFull : Number of times Bam Fifo got full - For In Ch: Good,
+ * For Out Ch: Bad
+ * @bamFifoEmpty : Number of times Bam Fifo got empty - For In Ch: Bad,
+ * For Out Ch: Good
+ * @bamFifoUsageHigh : Number of times Bam fifo usage went above 75% -
+ * For In Ch: Good, For Out Ch: Bad
+ * @bamFifoUsageLow : Number of times Bam fifo usage went below 25% -
+ * For In Ch: Bad, For Out Ch: Good
+ */
+struct IpaHwBamStats_t {
+	u32 bamFifoFull;
+	u32 bamFifoEmpty;
+	u32 bamFifoUsageHigh;
+	u32 bamFifoUsageLow;
+	u32 bamUtilCount;
+} __packed;
+
+/**
+ * struct IpaHwRingStats_t - Structure holding the Ring statistics
+ *
+ * @ringFull : Number of times Transfer Ring got full - For In Ch: Good,
+ * For Out Ch: Bad
+ * @ringEmpty : Number of times Transfer Ring got empty - For In Ch: Bad,
+ * For Out Ch: Good
+ * @ringUsageHigh : Number of times Transfer Ring usage went above 75% -
+ * For In Ch: Good, For Out Ch: Bad
+ * @ringUsageLow : Number of times Transfer Ring usage went below 25% -
+ * For In Ch: Bad, For Out Ch: Good
+ */
+struct IpaHwRingStats_t {
+	u32 ringFull;
+	u32 ringEmpty;
+	u32 ringUsageHigh;
+	u32 ringUsageLow;
+	u32 RingUtilCount;
+} __packed;
+
+/**
+ * struct ipa_uc_dbg_rtk_ring_stats - uC dbg stats info for RTK
+ * offloading protocol
+ * @commStats: common stats
+ * @trCount: transfer ring count
+ * @erCount: event ring count
+ * @totalAosCount: total AoS completion count
+ * @busyTime: total busy time
+ */
+struct ipa_uc_dbg_rtk_ring_stats {
+	struct IpaHwRingStats_t commStats;
+	u32 trCount;
+	u32 erCount;
+	u32 totalAosCount;
+	u64 busyTime;
+} __packed;
+
+/**
+ * struct IpaHwStatsWDIRxInfoData_t - Structure holding the WDI Rx channel
+ * structures
+ *
+ * @max_outstanding_pkts : Number of outstanding packets in Rx Ring
+ * @num_pkts_processed : Number of packets processed - cumulative
+ * @rx_ring_rp_value : Read pointer last advertized to the WLAN FW
+ * @rx_ind_ring_stats : Ring info
+ * @bam_stats : BAM info
+ * @num_bam_int_handled : Number of Bam Interrupts handled by FW
+ * @num_db : Number of times the doorbell was rung
+ * @num_unexpected_db : Number of unexpected doorbells
+ * @num_pkts_in_dis_uninit_state : number of completions we
+ *		received in disabled or uninitialized state
+ * @num_ic_inj_vdev_change : Number of times the Imm Cmd is
+ *		injected due to vdev_id change
+ * @num_ic_inj_fw_desc_change : Number of times the Imm Cmd is
+ *		injected due to fw_desc change
+ * @num_qmb_int_handled : Number of QMB interrupts handled
+ */
+struct IpaHwStatsWDIRxInfoData_t {
+	u32 max_outstanding_pkts;
+	u32 num_pkts_processed;
+	u32 rx_ring_rp_value;
+	struct IpaHwRingStats_t rx_ind_ring_stats;
+	struct IpaHwBamStats_t bam_stats;
+	u32 num_bam_int_handled;
+	u32 num_db;
+	u32 num_unexpected_db;
+	u32 num_pkts_in_dis_uninit_state;
+	u32 num_ic_inj_vdev_change;
+	u32 num_ic_inj_fw_desc_change;
+	u32 num_qmb_int_handled;
+	u32 reserved1;
+	u32 reserved2;
+} __packed;
+
+/**
+ * struct IpaHwStatsWDITxInfoData_t  - Structure holding the WDI Tx channel
+ * structures
+ *
+ * @num_pkts_processed : Number of packets processed - cumulative
+ * @copy_engine_doorbell_value : latest value of doorbell written to copy engine
+ * @num_db_fired : Number of DB from uC FW to Copy engine
+ * @tx_comp_ring_stats : ring info
+ * @bam_stats : BAM info
+ * @num_db : Number of times the doorbell was rung
+ * @num_unexpected_db : Number of unexpected doorbells
+ * @num_bam_int_handled : Number of Bam Interrupts handled by FW
+ * @num_bam_int_in_non_running_state : Number of Bam interrupts while not in
+ * Running state
+ * @num_qmb_int_handled : Number of QMB interrupts handled
+ */
+struct IpaHwStatsWDITxInfoData_t {
+	u32 num_pkts_processed;
+	u32 copy_engine_doorbell_value;
+	u32 num_db_fired;
+	struct IpaHwRingStats_t tx_comp_ring_stats;
+	struct IpaHwBamStats_t bam_stats;
+	u32 num_db;
+	u32 num_unexpected_db;
+	u32 num_bam_int_handled;
+	u32 num_bam_int_in_non_running_state;
+	u32 num_qmb_int_handled;
+	u32 num_bam_int_handled_while_wait_for_bam;
+} __packed;
+
+/**
+ * struct IpaHwStatsWDIInfoData_t - Structure holding the WDI channel structures
+ *
+ * @rx_ch_stats : RX stats
+ * @tx_ch_stats : TX stats
+ */
+struct IpaHwStatsWDIInfoData_t {
+	struct IpaHwStatsWDIRxInfoData_t rx_ch_stats;
+	struct IpaHwStatsWDITxInfoData_t tx_ch_stats;
+} __packed;
+
+
+/**
+ * struct  ipa_wdi_ul_params - WDI_RX configuration
+ * @rdy_ring_base_pa: physical address of the base of the Rx ring (containing
+ * Rx buffers)
+ * @rdy_ring_size: size of the Rx ring in bytes
+ * @rdy_ring_rp_pa: physical address of the location through which IPA uc is
+ * reading (WDI-1.0)
+ * @rdy_comp_ring_base_pa: physical address of the base of the Rx completion
+ * ring (WDI-2.0)
+ * @rdy_comp_ring_wp_pa: physical address of the location through which IPA
+ * uc is writing (WDI-2.0)
+ * @rdy_comp_ring_size: size of the Rx_completion ring in bytes
+ * expected to communicate about the Read pointer into the Rx Ring
+ */
+struct ipa_wdi_ul_params {
+	phys_addr_t rdy_ring_base_pa;
+	u32 rdy_ring_size;
+	phys_addr_t rdy_ring_rp_pa;
+	phys_addr_t rdy_comp_ring_base_pa;
+	phys_addr_t rdy_comp_ring_wp_pa;
+	u32 rdy_comp_ring_size;
+	u32 *rdy_ring_rp_va;
+	u32 *rdy_comp_ring_wp_va;
+	bool is_txr_rn_db_pcie_addr;
+	bool is_evt_rn_db_pcie_addr;
+};
+
+/**
+ * struct  ipa_wdi_ul_params_smmu - WDI_RX configuration (with WLAN SMMU)
+ * @rdy_ring: SG table describing the Rx ring (containing Rx buffers)
+ * @rdy_ring_size: size of the Rx ring in bytes
+ * @rdy_ring_rp_pa: physical address of the location through which IPA uc is
+ * expected to communicate about the Read pointer into the Rx Ring
+ */
+struct ipa_wdi_ul_params_smmu {
+	struct sg_table rdy_ring;
+	u32 rdy_ring_size;
+	phys_addr_t rdy_ring_rp_pa;
+	struct sg_table rdy_comp_ring;
+	phys_addr_t rdy_comp_ring_wp_pa;
+	u32 rdy_comp_ring_size;
+	u32 *rdy_ring_rp_va;
+	u32 *rdy_comp_ring_wp_va;
+	bool is_txr_rn_db_pcie_addr;
+	bool is_evt_rn_db_pcie_addr;
+};
+
+/**
+ * struct  ipa_wdi_dl_params - WDI_TX configuration
+ * @comp_ring_base_pa: physical address of the base of the Tx completion ring
+ * @comp_ring_size: size of the Tx completion ring in bytes
+ * @ce_ring_base_pa: physical address of the base of the Copy Engine Source
+ * Ring
+ * @ce_door_bell_pa: physical address of the doorbell that the IPA uC has to
+ * write into to trigger the copy engine
+ * @ce_ring_size: Copy Engine Ring size in bytes
+ * @num_tx_buffers: Number of pkt buffers allocated
+ */
+struct ipa_wdi_dl_params {
+	phys_addr_t comp_ring_base_pa;
+	u32 comp_ring_size;
+	phys_addr_t ce_ring_base_pa;
+	phys_addr_t ce_door_bell_pa;
+	u32 ce_ring_size;
+	u32 num_tx_buffers;
+	bool is_txr_rn_db_pcie_addr;
+	bool is_evt_rn_db_pcie_addr;
+};
+
+/**
+ * struct  ipa_wdi_dl_params_smmu - WDI_TX configuration (with WLAN SMMU)
+ * @comp_ring: SG table describing the Tx completion ring
+ * @comp_ring_size: size of the Tx completion ring in bytes
+ * @ce_ring: SG table describing the Copy Engine Source Ring
+ * @ce_door_bell_pa: physical address of the doorbell that the IPA uC has to
+ * write into to trigger the copy engine
+ * @ce_ring_size: Copy Engine Ring size in bytes
+ * @num_tx_buffers: Number of pkt buffers allocated
+ */
+struct ipa_wdi_dl_params_smmu {
+	struct sg_table comp_ring;
+	u32 comp_ring_size;
+	struct sg_table ce_ring;
+	phys_addr_t ce_door_bell_pa;
+	u32 ce_ring_size;
+	u32 num_tx_buffers;
+	bool is_txr_rn_db_pcie_addr;
+	bool is_evt_rn_db_pcie_addr;
+};
+
+/**
+ * struct  ipa_wdi_in_params - information provided by WDI client
+ * @sys: IPA EP configuration info
+ * @ul: WDI_RX configuration info
+ * @dl: WDI_TX configuration info
+ * @ul_smmu: WDI_RX configuration info when WLAN uses SMMU
+ * @dl_smmu: WDI_TX configuration info when WLAN uses SMMU
+ * @smmu_enabled: true if WLAN uses SMMU
+ * @ipa_wdi_meter_notifier_cb: Get WDI stats and quato info
+ */
+struct ipa_wdi_in_params {
+	struct ipa_sys_connect_params sys;
+	union {
+		struct ipa_wdi_ul_params ul;
+		struct ipa_wdi_dl_params dl;
+		struct ipa_wdi_ul_params_smmu ul_smmu;
+		struct ipa_wdi_dl_params_smmu dl_smmu;
+	} u;
+	bool smmu_enabled;
+#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
+	ipa_wdi_meter_notifier_cb wdi_notify;
+#endif
+};
+
+enum ipa_upstream_type {
+	IPA_UPSTEAM_MODEM = 1,
+	IPA_UPSTEAM_WLAN,
+	IPA_UPSTEAM_MAX
+};
+
+/**
+ * struct  ipa_wdi_out_params - information provided to WDI client
+ * @uc_door_bell_pa: physical address of IPA uc doorbell
+ * @clnt_hdl: opaque handle assigned to client
+ */
+struct ipa_wdi_out_params {
+	phys_addr_t uc_door_bell_pa;
+	u32 clnt_hdl;
+};
+
+/**
+ * struct ipa_wdi_db_params - information provided to retrieve
+ *       physical address of uC doorbell
+ * @client:	type of "client" (IPA_CLIENT_WLAN#_PROD/CONS)
+ * @uc_door_bell_pa: physical address of IPA uc doorbell
+ */
+struct ipa_wdi_db_params {
+	enum ipa_client_type client;
+	phys_addr_t uc_door_bell_pa;
+};
+
+/**
+ * struct  ipa_wdi_uc_ready_params - uC ready CB parameters
+ * @is_uC_ready: uC loaded or not
+ * @priv : callback cookie
+ * @notify:	callback
+ */
+typedef void (*ipa_uc_ready_cb)(void *priv);
+struct ipa_wdi_uc_ready_params {
+	bool is_uC_ready;
+	void *priv;
+	ipa_uc_ready_cb notify;
+};
+
+/**
+ * struct  ipa_wdi_buffer_info - address info of a WLAN allocated buffer
+ * @pa: physical address of the buffer
+ * @iova: IOVA of the buffer as embedded inside the WDI descriptors
+ * @size: size in bytes of the buffer
+ * @result: result of map or unmap operations (out param)
+ *
+ * IPA driver will create/release IOMMU mapping in IPA SMMU from iova->pa
+ */
+struct ipa_wdi_buffer_info {
+	phys_addr_t pa;
+	unsigned long iova;
+	size_t size;
+	int result;
+};
+
+/**
+ * struct  ipa_wdi_bw_info - address info of a WLAN allocated buffer
+ * @threshold: throughput wants to be monitored
+ * @num: number of threshold entries
+ * @stop: true to stop monitoring
+ *
+ * IPA driver will create/release IOMMU mapping in IPA SMMU from iova->pa
+ */
+struct ipa_wdi_bw_info {
+	uint64_t threshold[IPA_BW_THRESHOLD_MAX];
+	int num;
+	bool stop;
+};
+
+/**
+ * struct  ipa_wdi_tx_info - sw tx info from WLAN
+ * @sta_tx: sw tx stats on sta interface
+ * @ap_tx: sw tx stats on ap interface
+ *
+ * IPA driver will create/release IOMMU mapping in IPA SMMU from iova->pa
+ */
+struct ipa_wdi_tx_info {
+	uint64_t sta_tx;
+	uint64_t ap_tx;
+};
+
+/**
+ * struct ipa_gsi_ep_config - IPA GSI endpoint configurations
+ *
+ * @ipa_ep_num: IPA EP pipe number
+ * @ipa_gsi_chan_num: GSI channel number
+ * @ipa_if_tlv: number of IPA_IF TLV
+ * @ipa_if_aos: number of IPA_IF AOS
+ * @ee: Execution environment
+ * @prefetch_mode: Prefetch mode to be used
+ * @prefetch_threshold: Prefetch empty level threshold.
+ *  relevant for smart and free prefetch modes
+ */
+struct ipa_gsi_ep_config {
+	int ipa_ep_num;
+	int ipa_gsi_chan_num;
+	int ipa_if_tlv;
+	int ipa_if_aos;
+	int ee;
+	enum gsi_prefetch_mode prefetch_mode;
+	uint8_t prefetch_threshold;
+};
+
+/**
+ * struct  ipa_smmu_in_params - information provided from client
+ * @ipa_smmu_client_type: clinet requesting for the smmu info.
+ */
+
+enum ipa_smmu_client_type {
+	IPA_SMMU_WLAN_CLIENT,
+	IPA_SMMU_AP_CLIENT,
+	IPA_SMMU_WIGIG_CLIENT,
+	IPA_SMMU_WLAN1_CLIENT,
+	IPA_SMMU_ETH_CLIENT,
+	IPA_SMMU_ETH1_CLIENT,
+	IPA_SMMU_CLIENT_MAX
+};
+
+struct ipa_smmu_in_params {
+	enum ipa_smmu_client_type smmu_client;
+};
+
+/**
+ * struct  ipa_smmu_out_params - information provided to IPA client
+ * @smmu_enable: IPA S1 SMMU enable/disable status
+ * @shared_cb: is client CB shared (mappings should be done by client only)
+ */
+struct ipa_smmu_out_params {
+	bool smmu_enable;
+	bool shared_cb;
+};
+
+struct iphdr_rsv {
+	struct iphdr ipv4_temp;  /* 20 bytes */
+	uint32_t rsv1;
+	uint32_t rsv2;
+	uint32_t rsv3;
+	uint32_t rsv4;
+	uint32_t rsv5;
+} __packed;
+
+union ip_hdr_temp {
+	struct iphdr_rsv ipv4_rsv;	/* 40 bytes */
+	struct ipv6hdr ipv6_temp;	/* 40 bytes */
+} __packed;
+
+struct ipa_socksv5_uc_tmpl {
+	uint16_t cmd_id;
+	uint16_t rsv;
+	uint32_t cmd_param;
+	uint16_t pkt_count;
+	uint16_t rsv2;
+	uint32_t byte_count;
+	union ip_hdr_temp ip_hdr;
+	/* 2B src/dst port */
+	uint16_t src_port;
+	uint16_t dst_port;
+
+	/* attribute mask */
+	uint32_t ipa_sockv5_mask;
+
+	/* reqquired update 4B/4B Seq/Ack/SACK */
+	uint32_t out_irs;
+	uint32_t out_iss;
+	uint32_t in_irs;
+	uint32_t in_iss;
+
+	/* option 10B: time-stamp */
+	uint32_t out_ircv_tsval;
+	uint32_t in_ircv_tsecr;
+	uint32_t out_ircv_tsecr;
+	uint32_t in_ircv_tsval;
+
+	/* option 2B: window-scaling/dynamic */
+	uint16_t in_isnd_wscale:4;
+	uint16_t out_isnd_wscale:4;
+	uint16_t in_ircv_wscale:4;
+	uint16_t out_ircv_wscale:4;
+	uint16_t MAX_WINDOW_SIZE;
+	/* 11*4 + 40 bytes = 84 bytes */
+	uint32_t rsv3;
+	uint32_t rsv4;
+	uint32_t rsv5;
+	uint32_t rsv6;
+	uint32_t rsv7;
+	uint32_t rsv8;
+	uint32_t rsv9;
+} __packed;
+/*reserve 16 bytes : 16 bytes+ 40 bytes + 44 bytes = 100 bytes (28 bytes left)*/
+
+struct ipa_socksv5_info {
+	/* ipa-uc info */
+	struct ipa_socksv5_uc_tmpl ul_out;
+	struct ipa_socksv5_uc_tmpl dl_out;
+
+	/* ipacm info */
+	struct ipacm_socksv5_info ul_in;
+	struct ipacm_socksv5_info dl_in;
+
+	/* output: handle (index) */
+	uint16_t handle;
+};
+
+struct ipa_ipv6_nat_uc_tmpl {
+	uint16_t cmd_id;
+	uint16_t rsv;
+	uint32_t cmd_param;
+	uint16_t pkt_count;
+	uint16_t rsv2;
+	uint32_t byte_count;
+	uint64_t private_address_lsb;
+	uint64_t private_address_msb;
+	uint64_t public_address_lsb;
+	uint64_t public_address_msb;
+	uint16_t private_port;
+	uint16_t public_port;
+	uint32_t rsv3;
+	uint64_t rsv4;
+	uint64_t rsv5;
+	uint64_t rsv6;
+	uint64_t rsv7;
+	uint64_t rsv8;
+	uint64_t rsv9;
+	uint64_t rsv10;
+	uint64_t rsv11;
+	uint64_t rsv12;
+} __packed;
+
+#if IS_ENABLED(CONFIG_IPA3)
+/*
+ * Configuration
+ */
+
+/**
+ * ipa_cfg_ep_ctrl() -  IPA end-point Control configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg_ctrl:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl);
+
+/*
+ * Routing
+ */
+
+/**
+ * ipa_add_rt_rule() - Add the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+
+/**
+ * ipa_put_rt_tbl() - Release the specified routing table handle
+ * @rt_tbl_hdl:	[in] the routing table handle to release
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_put_rt_tbl(u32 rt_tbl_hdl);
+
+/*
+ * Interface
+ */
+int ipa_register_intf(const char *name,
+	const struct ipa_tx_intf *tx,
+	const struct ipa_rx_intf *rx);
+int ipa_deregister_intf(const char *name);
+
+/*
+ * Aggregation
+ */
+
+/**
+ * ipa_set_aggr_mode() - Set the aggregation mode which is a global setting
+ * @mode:	[in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
+ * etc
+ *
+ * Returns:	0 on success
+ */
+
+int ipa_set_aggr_mode(enum ipa_aggr_mode mode);
+
+/**
+ * ipa_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
+ * mode
+ * @sig:	[in] the first 3 bytes of QCNCM NDP signature (expected to be
+ * "QND")
+ *
+ * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
+ * (expected to be 'P') needs to be set using the header addition mechanism
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_set_qcncm_ndp_sig(char sig[3]);
+
+/**
+ * ipa_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
+ * configuration
+ * @enable:	[in] true for single NDP/MBIM; false otherwise
+ *
+ * Returns:	0 on success
+ */
+int ipa_set_single_ndp_per_mbim(bool enable);
+
+/*
+ * interrupts
+ */
+
+/**
+ * ipa_add_interrupt_handler() - Adds handler to an interrupt type
+ * @interrupt:		Interrupt type
+ * @handler:		The handler to be added
+ * @deferred_flag:	whether the handler processing should be deferred in
+ *			a workqueue
+ * @private_data:	the client's private data
+ *
+ * Adds handler to an interrupt type and enable the specific bit
+ * in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled
+ */
+
+int ipa_add_interrupt_handler(enum ipa_irq_type interrupt,
+	ipa_irq_handler_t handler,
+	bool deferred_flag,
+	void *private_data);
+
+/**
+ * ipa_restore_suspend_handler() - restores the original suspend IRQ handler
+ * as it was registered in the IPA init sequence.
+ * Return codes:
+ * 0: success
+ * -EPERM: failed to remove current handler or failed to add original handler
+ */
+int ipa_restore_suspend_handler(void);
+
+/*
+ * Messaging
+ */
+
+/**
+ * ipa_send_msg() - Send "message" from kernel client to IPA driver
+ * @metadata: [in] message metadata
+ * @buff: [in] the payload for message
+ * @callback: [in] free callback
+ *
+ * Client supplies the message metadata and payload which IPA driver buffers
+ * till read by user-space. After read from user space IPA driver invokes the
+ * callback supplied to free the message payload. Client must not touch/free
+ * the message payload after calling this API.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_send_msg(struct ipa_msg_meta *metadata, void *buff,
+		  ipa_msg_free_fn callback);
+
+/*
+ * Data path
+ */
+
+/**
+ * ipa_tx_dp() - Data-path tx handler
+ * @dst:	[in] which IPA destination to route tx packets to
+ * @skb:	[in] the packet to send
+ * @metadata:	[in] TX packet metadata
+ *
+ * Data-path tx handler, this is used for both SW data-path which by-passes most
+ * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
+ * dst is a "valid" CONS type, then SW data-path is used. If dst is the
+ * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
+ * is an error. For errors, client needs to free the skb as needed. For success,
+ * IPA driver will later invoke client callback if one was supplied. That
+ * callback should free the skb. If no callback supplied, IPA driver will free
+ * the skb internally
+ *
+ * The function will use two descriptors for this send command
+ * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
+ * the first descriptor will be used to inform the IPA hardware that
+ * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
+ * Once this send was done from SPS point-of-view the IPA driver will
+ * get notified by the supplied callback - ipa_sps_irq_tx_comp()
+ *
+ * ipa_sps_irq_tx_comp will call to the user supplied
+ * callback (from ipa_connect)
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *metadata);
+
+/*
+ * ipa_rmnet_ctl_xmit - QMAP Flow control TX
+ *
+ * @skb - tx QMAP control packet
+ *
+ * Note: This need to be called after client receive rmnet_ctl_
+ * ready_cb and want to send TX flow control message.
+ *
+ * This funciton will return 0 on success, -EAGAIN if pipe if full.
+ */
+int ipa_rmnet_ctl_xmit(struct sk_buff *skb);
+
+/*
+ * ipa_rmnet_ll_xmit - Low lat data Tx
+ *
+ * @skb - tx low lat data packet
+ *
+ * Note: This need to be called after client receive rmnet_ll_
+ * ready_cb and want to send TX ll data message.
+ *
+ * This funciton will return 0 on success, -EAGAIN if pipe if full.
+ */
+int ipa_rmnet_ll_xmit(struct sk_buff *skb);
+
+/*
+ * ipa_register_notifier - Register for IPA atomic notifier
+ *
+ * @fn_ptr - Function pointer to get the notification
+ *
+ * This funciton will return 0 on success, -EAGAIN if reg fails.
+ */
+int ipa_register_notifier(void *fn_ptr);
+
+/*
+ * ipa_unregister_notifier - Unregister for IPA atomic notifier
+ *
+ * @fn_ptr - Same function pointer used to get the notification
+ *
+ * This funciton will return 0 on success, -EAGAIN if reg fails.
+ */
+int ipa_unregister_notifier(void *fn_ptr);
+
+void ipa_free_skb(struct ipa_rx_data *data);
+
+/*
+ * System pipes
+ */
+
+/**
+ * ipa_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
+ * IPA EP configuration
+ * @sys_in:	[in] input needed to setup BAM pipe and configure EP
+ * @clnt_hdl:	[out] client handle
+ *
+ *  - configure the end-point registers with the supplied
+ *    parameters from the user.
+ *  - call SPS APIs to create a system-to-bam connection with IPA.
+ *  - allocate descriptor FIFO
+ *  - register callback function(ipa_sps_irq_rx_notify or
+ *    ipa_sps_irq_tx_notify - depends on client type) in case the driver is
+ *    not configured to pulling mode
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl);
+
+/**
+ * ipa_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
+ * @clnt_hdl:	[in] the handle obtained from ipa_setup_sys_pipe
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_teardown_sys_pipe(u32 clnt_hdl);
+
+int ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+		struct ipa_wdi_out_params *out);
+int ipa_disconnect_wdi_pipe(u32 clnt_hdl);
+int ipa_enable_wdi_pipe(u32 clnt_hdl);
+int ipa_disable_wdi_pipe(u32 clnt_hdl);
+int ipa_resume_wdi_pipe(u32 clnt_hdl);
+int ipa_suspend_wdi_pipe(u32 clnt_hdl);
+int ipa_reg_uc_rdyCB(struct ipa_wdi_uc_ready_params *param);
+int ipa_dereg_uc_rdyCB(void);
+int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs);
+int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls);
+int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup);
+/**
+ * ipa_get_wdi_stats() - Query WDI statistics from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
+int ipa_uc_bw_monitor(struct ipa_wdi_bw_info *info);
+
+/**
+ * ipa_broadcast_wdi_quota_reach_ind() - quota reach
+ * @uint32_t fid: [in] input netdev ID
+ * @uint64_t num_bytes: [in] used bytes
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid,
+		uint64_t num_bytes);
+
+/*
+ * To retrieve doorbell physical address of
+ * wlan pipes
+ */
+int ipa_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out);
+
+/*
+ * IPADMA
+ */
+ /**
+  * ipa_dma_init() -Initialize IPADMA.
+  *
+  * This function initialize all IPADMA internal data and connect in dma:
+  *	MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS
+  *	MEMCPY_DMA_ASYNC_PROD->MEMCPY_DMA_SYNC_CONS
+  *
+  * Return codes: 0: success
+  *		-EFAULT: IPADMA is already initialized
+  *		-ENOMEM: allocating memory error
+  *		-EPERM: pipe connection failed
+  */
+int ipa_dma_init(void);
+
+/**
+ * ipa_dma_enable() -Vote for IPA clocks.
+ *
+ *Return codes: 0: success
+ *		-EINVAL: IPADMA is not initialized
+ *		-EPERM: Operation not permitted as ipa_dma is already
+ *		 enabled
+ */
+int ipa_dma_enable(void);
+
+/**
+ * ipa_dma_disable()- Unvote for IPA clocks.
+ *
+ * enter to power save mode.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: IPADMA is not initialized
+ *		-EPERM: Operation not permitted as ipa_dma is already
+ *			disabled
+ *		-EFAULT: can not disable ipa_dma as there are pending
+ *			memcopy works
+ */
+int ipa_dma_disable(void);
+
+/**
+ * ipa_dma_sync_memcpy()- Perform synchronous memcpy using IPA.
+ *
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid params
+ *		-EPERM: operation not permitted as ipa_dma isn't enable or
+ *			initialized
+ *		-SPS_ERROR: on sps faliures
+ *		-EFAULT: other
+ */
+int ipa_dma_sync_memcpy(u64 dest, u64 src, int len);
+
+/**
+ * ipa_dma_async_memcpy()- Perform asynchronous memcpy using IPA.
+ *
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ * @user_cb: callback function to notify the client when the copy was done.
+ * @user_param: cookie for user_cb.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid params
+ *		-EPERM: operation not permitted as ipa_dma isn't enable or
+ *			initialized
+ *		-SPS_ERROR: on sps faliures
+ *		-EFAULT: descr fifo is full.
+ */
+int ipa_dma_async_memcpy(u64 dest, u64 src, int len,
+			void (*user_cb)(void *user1), void *user_param);
+
+
+/**
+ * ipa_dma_destroy() -teardown IPADMA pipes and release ipadma.
+ *
+ * this is a blocking function, returns just after destroying IPADMA.
+ */
+void ipa_dma_destroy(void);
+
+/*
+ * Miscellaneous
+ */
+void ipa_bam_reg_dump(void);
+
+int ipa_get_ep_mapping(enum ipa_client_type client);
+
+bool ipa_is_ready(void);
+
+void ipa_proxy_clk_vote(void);
+void ipa_proxy_clk_unvote(void);
+
+#ifdef CONFIG_DEEPSLEEP
+int ipa_fmwk_deepsleep_entry_ipa(void);
+
+int ipa_fmwk_deepsleep_exit_ipa(void);
+#endif
+
+enum ipa_hw_type ipa_get_hw_type(void);
+
+const struct ipa_gsi_ep_config *ipa_get_gsi_ep_info(
+	enum ipa_client_type client);
+
+int ipa_stop_gsi_channel(u32 clnt_hdl);
+
+typedef void (*ipa_ready_cb)(void *user_data);
+
+typedef void (*ipa_rmnet_ctl_ready_cb)(void *user_data);
+
+typedef void (*ipa_rmnet_ctl_stop_cb)(void *user_data);
+
+typedef void (*ipa_rmnet_ctl_rx_notify_cb)(void *user_data, void *rx_data);
+
+typedef void (*ipa_rmnet_ll_ready_cb)(void *user_data);
+
+typedef void (*ipa_rmnet_ll_stop_cb)(void *user_data);
+
+typedef void (*ipa_rmnet_ll_rx_notify_cb)(void *user_data, void *rx_data);
+
+int ipa_get_default_aggr_time_limit(enum ipa_client_type client,
+	u32 *default_aggr_time_limit);
+
+/**
+ * ipa_register_ipa_ready_cb() - register a callback to be invoked
+ * when IPA core driver initialization is complete.
+ *
+ * @ipa_ready_cb:    CB to be triggered.
+ * @user_data:       Data to be sent to the originator of the CB.
+ *
+ * Note: This function is expected to be utilized when ipa_is_ready
+ * function returns false.
+ * An IPA client may also use this function directly rather than
+ * calling ipa_is_ready beforehand, as if this API returns -EEXIST,
+ * this means IPA initialization is complete (and no callback will
+ * be triggered).
+ * When the callback is triggered, the client MUST perform his
+ * operations in a different context.
+ *
+ * The function will return 0 on success, -ENOMEM on memory issues and
+ * -EEXIST if IPA initialization is complete already.
+ */
+int ipa_register_ipa_ready_cb(void (*ipa_ready_cb)(void *user_data),
+			      void *user_data);
+
+/**
+ * ipa_register_rmnet_ctl_cb() - register callbacks to be invoked
+ * to rmnet_ctl for qmap flow control pipes setup/teardown/rx_notify.
+ *
+ * @ipa_rmnet_ctl_ready_cb:  CB to be called when pipes setup.
+ * @user_data1: user_data for ipa_rmnet_ctl_ready_cb.
+ * @ipa_rmnet_ctl_stop_cb: CB to be called when pipes teardown.
+ * @user_data2: user_data for ipa_rmnet_ctl_stop_cb.
+ * @ipa_rmnet_ctl_rx_notify_cb: CB to be called when receive rx pkts.
+ * @user_data3: user_data for ipa_rmnet_ctl_rx_notify_cb.
+ * @rx_data: RX data buffer.
+ *
+ * Note: This function is expected to be utilized for rmnet_ctl
+ * module when new qmap flow control is enabled.
+ *
+ * The function will return 0 on success, -EAGAIN if IPA not ready,
+ * -ENXIO is feature is not enabled, -EEXIST if already called.
+ */
+int ipa_register_rmnet_ctl_cb(
+	void (*ipa_rmnet_ctl_ready_cb)(void *user_data1),
+	void *user_data1,
+	void (*ipa_rmnet_ctl_stop_cb)(void *user_data2),
+	void *user_data2,
+	void (*ipa_rmnet_ctl_rx_notify_cb)(void *user_data3, void *rx_data),
+	void *user_data3);
+
+/**
+ * ipa_unregister_rmnet_ctl_cb() - unregister callbacks to be
+ * invoked to rmnet_ctl for qmap flow control pipes
+ * setup/teardown/rx_notify.
+ *
+ * Note: This function is expected to be utilized for rmnet_ctl
+ * module when new qmap flow control is enabled.
+ *
+ * The function will return 0 on success, -EAGAIN if IPA not ready,
+ * -ENXIO is feature is not enabled.
+ */
+int ipa_unregister_rmnet_ctl_cb(void);
+
+/**
+ * ipa_register_rmnet_ll_cb() - register callbacks to be invoked
+ * to rmnet_ll for low latency data pipes setup/teardown/rx_notify.
+ *
+ * @ipa_rmnet_ll_ready_cb:  CB to be called when pipes setup.
+ * @user_data1: user_data for ipa_rmnet_ctl_ready_cb.
+ * @ipa_rmnet_ll_stop_cb: CB to be called when pipes teardown.
+ * @user_data2: user_data for ipa_rmnet_ctl_stop_cb.
+ * @ipa_rmnet_ll_rx_notify_cb: CB to be called when receive rx pkts.
+ * @user_data3: user_data for ipa_rmnet_ctl_rx_notify_cb.
+ * @rx_data: RX data buffer.
+ *
+ * Note: This function is expected to be utilized for rmnet_ll
+ * module.
+ *
+ * The function will return 0 on success, -EAGAIN if IPA not ready,
+ * -ENXIO is feature is not enabled, -EEXIST if already called.
+ */
+int ipa_register_rmnet_ll_cb(
+	void (*ipa_rmnet_ll_ready_cb)(void *user_data1),
+	void *user_data1,
+	void (*ipa_rmnet_ll_stop_cb)(void *user_data2),
+	void *user_data2,
+	void (*ipa_rmnet_ll_rx_notify_cb)(void *user_data3, void *rx_data),
+	void *user_data3);
+
+/**
+ * ipa_unregister_rmnet_ll_cb() - unregister callbacks to be
+ * invoked to rmnet_ll for low lat data pipes
+ * setup/teardown/rx_notify.
+ *
+ * Note: This function is expected to be utilized for rmnet_ll
+ * module.
+ *
+ * The function will return 0 on success, -EAGAIN if IPA not ready,
+ * -ENXIO is feature is not enabled.
+ */
+int ipa_unregister_rmnet_ll_cb(void);
+
+int ipa_get_smmu_params(struct ipa_smmu_in_params *in,
+	struct ipa_smmu_out_params *out);
+/**
+ * ipa_is_vlan_mode - check if a LAN driver should load in VLAN mode
+ * @iface - type of vlan capable device
+ * @res - query result: true for vlan mode, false for non vlan mode
+ *
+ * API must be called after ipa_is_ready() returns true, otherwise it will fail
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res);
+
+/**
+ * ipa_get_lan_rx_napi - returns true if NAPI is enabled in the LAN RX dp
+ */
+bool ipa_get_lan_rx_napi(void);
+/*
+ * ipa_add_socksv5_conn - add socksv5 info to ipa driver
+ */
+int ipa_add_socksv5_conn(struct ipa_socksv5_info *info);
+
+/*
+ * ipa_del_socksv5_conn - del socksv5 info to ipa driver
+ */
+int ipa_del_socksv5_conn(uint32_t handle);
+
+int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req);
+int ipa_wigig_save_regs(void);
+
+#else /* IS_ENABLED(CONFIG_IPA3) */
+
+/*
+ * Configuration
+ */
+static inline int ipa_cfg_ep_ctrl(u32 clnt_hdl,
+	const struct ipa_ep_cfg_ctrl *ep_ctrl)
+{
+	return -EPERM;
+}
+
+/*
+ * Routing
+ */
+static inline int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+{
+	return -EPERM;
+}
+
+static inline int ipa_put_rt_tbl(u32 rt_tbl_hdl)
+{
+	return -EPERM;
+}
+
+/*
+ * Interface
+ */
+static inline int ipa_register_intf(const char *name,
+	const struct ipa_tx_intf *tx,
+	const struct ipa_rx_intf *rx)
+{
+	return -EPERM;
+}
+
+/*
+ * Aggregation
+ */
+static inline int ipa_set_aggr_mode(enum ipa_aggr_mode mode)
+{
+	return -EPERM;
+}
+
+static inline int ipa_set_qcncm_ndp_sig(char sig[3])
+{
+	return -EPERM;
+}
+
+static inline int ipa_set_single_ndp_per_mbim(bool enable)
+{
+	return -EPERM;
+}
+
+/*
+ * interrupts
+ */
+static inline int ipa_add_interrupt_handler(enum ipa_irq_type interrupt,
+	ipa_irq_handler_t handler,
+	bool deferred_flag,
+	void *private_data)
+{
+	return -EPERM;
+}
+
+static inline int ipa_restore_suspend_handler(void)
+{
+	return -EPERM;
+}
+
+/*
+ * Messaging
+ */
+static inline int ipa_send_msg(struct ipa_msg_meta *metadata, void *buff,
+		ipa_msg_free_fn callback)
+{
+	return -EPERM;
+}
+
+/*
+ * Data path
+ */
+static inline int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *metadata)
+{
+	return -EPERM;
+}
+
+/*
+ * QMAP Flow control TX
+ */
+static inline int ipa_rmnet_ctl_xmit(struct sk_buff *skb)
+{
+	return -EPERM;
+}
+
+/*
+ * Low Latency data Tx
+ */
+static inline int ipa_rmnet_ll_xmit(struct sk_buff *skb)
+{
+	return -EPERM;
+}
+
+/*
+ * Yellow water mark notifier register
+ */
+static inline int ipa_register_notifier(void *fn_ptr)
+{
+	return -EPERM;
+}
+
+/*
+ * Yellow water mark notifier unregister
+ */
+static inline int ipa_unregister_notifier(void *fn_ptr)
+{
+	return -EPERM;
+}
+
+static inline void ipa_free_skb(struct ipa_rx_data *rx_in)
+{
+}
+
+/*
+ * System pipes
+ */
+
+static inline int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in,
+		u32 *clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_teardown_sys_pipe(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+		struct ipa_wdi_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_disconnect_wdi_pipe(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_enable_wdi_pipe(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_disable_wdi_pipe(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_resume_wdi_pipe(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_suspend_wdi_pipe(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid,
+		uint64_t num_bytes)
+{
+	return -EPERM;
+}
+
+static inline int ipa_uc_wdi_get_dbpa(
+	struct ipa_wdi_db_params *out)
+{
+	return -EPERM;
+}
+
+/*
+ * IPADMA
+ */
+static inline int ipa_dma_init(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_dma_enable(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_dma_disable(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_dma_sync_memcpy(phys_addr_t dest, phys_addr_t src
+			, int len)
+{
+	return -EPERM;
+}
+
+static inline int ipa_dma_async_memcpy(phys_addr_t dest, phys_addr_t src
+			, int len, void (*user_cb)(void *user1),
+			void *user_param)
+{
+	return -EPERM;
+}
+
+static inline void ipa_dma_destroy(void)
+{
+}
+
+/*
+ * Miscellaneous
+ */
+
+static inline int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
+{
+	return -EPERM;
+}
+
+static inline int ipa_uc_bw_monitor(struct ipa_wdi_bw_info *info)
+{
+	return -EPERM;
+}
+
+static inline int ipa_get_ep_mapping(enum ipa_client_type client)
+{
+	return -EPERM;
+}
+
+static inline bool ipa_is_ready(void)
+{
+	return false;
+}
+
+static inline int ipa_fmwk_deepsleep_entry_ipa(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_fmwk_deepsleep_exit_ipa(void)
+{
+	return -EPERM;
+}
+
+static inline enum ipa_hw_type ipa_get_hw_type(void)
+{
+	return IPA_HW_None;
+}
+
+static inline int ipa_register_ipa_ready_cb(
+	void (*ipa_ready_cb)(void *user_data),
+	void *user_data)
+{
+	return -EPERM;
+}
+
+static inline int ipa_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res)
+{
+	return -EPERM;
+}
+
+static inline bool ipa_get_lan_rx_napi(void)
+{
+	return false;
+}
+
+static inline int ipa_add_socksv5_conn(struct ipa_socksv5_info *info)
+{
+	return -EPERM;
+}
+
+static inline int ipa_del_socksv5_conn(uint32_t handle)
+{
+	return -EPERM;
+}
+
+static inline const struct ipa_gsi_ep_config *ipa_get_gsi_ep_info(
+	enum ipa_client_type client)
+{
+	return NULL;
+}
+
+static inline int ipa_stop_gsi_channel(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_register_rmnet_ctl_cb(
+	void (*ipa_rmnet_ctl_ready_cb)(void *user_data1),
+	void *user_data1,
+	void (*ipa_rmnet_ctl_stop_cb)(void *user_data2),
+	void *user_data2,
+	void (*ipa_rmnet_ctl_rx_notify_cb)(void *user_data3, void *rx_data),
+	void *user_data3)
+{
+	return -EPERM;
+}
+
+static inline int ipa_unregister_rmnet_ctl_cb(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_uc_reg_rdyCB(
+	struct ipa_wdi_uc_ready_params *inout)
+{
+	return -EPERM;
+}
+
+static inline int ipa_register_rmnet_ll_cb(
+	void (*ipa_rmnet_ll_ready_cb)(void *user_data1),
+	void *user_data1,
+	void (*ipa_rmnet_ll_stop_cb)(void *user_data2),
+	void *user_data2,
+	void (*ipa_rmnet_ll_rx_notify_cb)(void *user_data3, void *rx_data),
+	void *user_data3)
+{
+	return -EPERM;
+}
+
+static inline int ipa_get_default_aggr_time_limit(enum ipa_client_type client,
+	u32 *default_aggr_time_limit)
+{
+	return -EPERM;
+}
+
+static inline int ipa_unregister_rmnet_ll_cb(void)
+{
+	return -EPERM;
+}
+
+#endif /* IS_ENABLED(CONFIG_IPA3) */
+
+/* stubs - to be removed once dependent drivers remove references */
+static inline int ipa_reset_endpoint(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_clear_endpoint_delay(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_commit_hdr(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_put_hdr(u32 hdr_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_deregister_pull_msg(struct ipa_msg_meta *metadata)
+{
+	return -EPERM;
+}
+
+/*
+ * Miscellaneous
+ */
+static inline int ipa_rm_delete_resource(
+	enum ipa_rm_resource_name resource_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_deregister(
+	enum ipa_rm_resource_name resource_name,
+	struct ipa_rm_register_params *reg_params)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_set_perf_profile(
+	enum ipa_rm_resource_name resource_name,
+	struct ipa_rm_perf_profile *profile)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_add_dependency(
+	enum ipa_rm_resource_name resource_name,
+	enum ipa_rm_resource_name depends_on_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_add_dependency_sync(
+	enum ipa_rm_resource_name resource_name,
+	enum ipa_rm_resource_name depends_on_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_delete_dependency(
+	enum ipa_rm_resource_name resource_name,
+	enum ipa_rm_resource_name depends_on_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_request_resource(
+	enum ipa_rm_resource_name resource_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_init(
+	enum ipa_rm_resource_name resource_name,
+	unsigned long msecs)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_release_resource(
+	enum ipa_rm_resource_name resource_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_notify_completion(enum ipa_rm_event event,
+	enum ipa_rm_resource_name resource_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_destroy(
+	enum ipa_rm_resource_name resource_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_request_resource(
+	enum ipa_rm_resource_name resource_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_release_resource(
+	enum ipa_rm_resource_name resource_name)
+{
+	return -EPERM;
+}
+
+static inline enum ipa_rm_resource_name ipa_get_rm_resource_from_ep(
+	int pipe_idx)
+{
+	return -EPERM;
+}
+
+static inline bool ipa_is_client_handle_valid(u32 clnt_hdl)
+{
+	return false;
+}
+
+static inline enum ipa_client_type ipa_get_client_mapping(int pipe_idx)
+{
+	return -EPERM;
+}
+
+static inline bool ipa_get_modem_cfg_emb_pipe_flt(void)
+{
+	return false;
+}
+
+static inline enum ipa_transport_type ipa_get_transport_type(void)
+{
+	return IPA_TRANSPORT_TYPE_GSI;
+}
+
+static inline struct device *ipa_get_dma_dev(void)
+{
+	return NULL;
+}
+
+static inline struct iommu_domain *ipa_get_smmu_domain(void)
+{
+	return NULL;
+}
+
+static inline int ipa_disable_apps_wan_cons_deaggr(
+	uint32_t agg_size, uint32_t agg_count)
+{
+	return -EPERM;
+}
+
+#endif /* _IPA_H_ */

+ 278 - 0
drivers/platform/msm/include/linux/ipa_eth.h

@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _IPA_ETH_H_
+#define _IPA_ETH_H_
+
+#include "ipa.h"
+#include <linux/msm_ipa.h>
+#include <linux/msm_gsi.h>
+
+/* New architecture prototypes */
+
+typedef void (*ipa_eth_ready_cb)(void *user_data);
+typedef u32 ipa_eth_hdl_t;
+
+/**
+ * struct ipa_eth_ready_cb - eth readiness parameters
+ *
+ * @notify: ipa_eth client ready callback notifier
+ * @userdata: userdata for ipa_eth ready cb
+ * @is_eth_ready: true if ipa_eth client is already ready
+ */
+struct ipa_eth_ready {
+	ipa_eth_ready_cb notify;
+	void *userdata;
+
+	/* out params */
+	bool is_eth_ready;
+};
+
+/**
+ * enum ipa_eth_client_type - names for the various IPA
+ * eth "clients".
+ */
+enum ipa_eth_client_type {
+	IPA_ETH_CLIENT_AQC107,
+	IPA_ETH_CLIENT_AQC113,
+	IPA_ETH_CLIENT_RTK8111K,
+	IPA_ETH_CLIENT_RTK8125B,
+	IPA_ETH_CLIENT_NTN,
+	IPA_ETH_CLIENT_EMAC,
+	IPA_ETH_CLIENT_MAX,
+};
+
+/**
+ * enum ipa_eth_pipe_traffic_type - traffic type for the various IPA
+ * eth "pipes".
+ */
+enum ipa_eth_pipe_traffic_type {
+	IPA_ETH_PIPE_BEST_EFFORT,
+	IPA_ETH_PIPE_LOW_LATENCY,
+	IPA_ETH_PIPE_TRAFFIC_TYPE_MAX,
+};
+
+/**
+ * enum ipa_eth_pipe_direction - pipe direcitons for same
+ * ethernet client.
+ */
+enum ipa_eth_pipe_direction {
+	IPA_ETH_PIPE_DIR_TX,
+	IPA_ETH_PIPE_DIR_RX,
+	IPA_ETH_PIPE_DIR_MAX,
+};
+
+#define IPA_ETH_INST_ID_MAX (2)
+
+/**
+ * struct ipa_eth_ntn_setup_info - parameters for ntn ethernet
+ * offloading
+ *
+ * @bar_addr: bar PA to access NTN register
+ * @tail_ptr_offs: tail ptr offset
+ * @ioc_mod_threshold: Descriptors # per interrupt request from
+ * NTN3 HW via descriptor bit as part of the protocol.
+ */
+struct ipa_eth_ntn_setup_info {
+	phys_addr_t bar_addr;
+	phys_addr_t tail_ptr_offs;
+	uint16_t ioc_mod_threshold;
+};
+
+/**
+ * struct ipa_eth_aqc_setup_info - parameters for aqc ethernet
+ * offloading
+ *
+ * @bar_addr: bar PA to access AQC register
+ * @head_ptr_offs: head ptr offset
+ * @aqc_ch: AQC ch number
+ * @dest_tail_ptr_offs: tail ptr offset
+ */
+struct ipa_eth_aqc_setup_info {
+	phys_addr_t bar_addr;
+	phys_addr_t head_ptr_offs;
+	u8 aqc_ch;
+	phys_addr_t dest_tail_ptr_offs;
+};
+
+
+/**
+ * struct ipa_eth_realtek_setup_info - parameters for realtek ethernet
+ * offloading
+ *
+ * @bar_addr: bar PA to access RTK register
+ * @bar_size: bar region size
+ * @queue_number: Which RTK queue to check the status on
+ * @dest_tail_ptr_offs: tail ptr offset
+ */
+struct ipa_eth_realtek_setup_info {
+	phys_addr_t bar_addr;
+	u32 bar_size;
+	u8 queue_number;
+	phys_addr_t dest_tail_ptr_offs;
+};
+
+/**
+ * struct ipa_eth_buff_smmu_map -  IPA iova->pa SMMU mapping
+ * @iova: virtual address of the data buffer
+ * @pa: physical address of the data buffer
+ */
+struct ipa_eth_buff_smmu_map {
+	dma_addr_t iova;
+	phys_addr_t pa;
+};
+
+/**
+ * struct  ipa_eth_pipe_setup_info - info needed for IPA setups
+ * @is_transfer_ring_valid: if transfer ring is needed
+ * @transfer_ring_base:  the base of the transfer ring
+ * @transfer_ring_sgt: sgtable of transfer ring
+ * @transfer_ring_size:  size of the transfer ring
+ * @is_buffer_pool_valid: if buffer pool is needed
+ * @buffer_pool_base_addr:  base of buffer pool address
+ * @buffer_pool_base_sgt:  sgtable of buffer pool
+ * @data_buff_list_size: number of buffers
+ * @data_buff_list: array of data buffer list
+ * @fix_buffer_size: buffer size
+ * @notify:	callback for exception/embedded packets
+ * @priv: priv for exception callback
+ * @client_info: vendor specific pipe setup info
+ * @db_pa: doorbell physical address
+ * @db_val: doorbell value ethernet HW need to ring
+ */
+struct ipa_eth_pipe_setup_info {
+	/* transfer ring info */
+	bool is_transfer_ring_valid;
+	dma_addr_t  transfer_ring_base;
+	struct sg_table *transfer_ring_sgt;
+	u32 transfer_ring_size;
+
+	/* buffer pool info */
+	bool is_buffer_pool_valid;
+	dma_addr_t buffer_pool_base_addr;
+	struct sg_table *buffer_pool_base_sgt;
+
+	/* buffer info */
+	u32 data_buff_list_size;
+	struct ipa_eth_buff_smmu_map *data_buff_list;
+	u32 fix_buffer_size;
+
+	/* client notify cb */
+	ipa_notify_cb notify;
+	void *priv;
+
+	/* vendor specific info */
+	union {
+		struct ipa_eth_aqc_setup_info aqc;
+		struct ipa_eth_realtek_setup_info rtk;
+		struct ipa_eth_ntn_setup_info ntn;
+	} client_info;
+
+	/* output params */
+	phys_addr_t db_pa;
+	u32 db_val;
+};
+
+/**
+ * struct  ipa_eth_client_pipe_info - ETH pipe/gsi related configuration
+ * @link: link of ep for different client function on same ethernet HW
+ * @dir: TX or RX direction
+ * @info: tx/rx pipe setup info
+ * @client_info: client the pipe belongs to
+ * @pipe_hdl: output params, pipe handle
+ */
+struct ipa_eth_client_pipe_info {
+	struct list_head link;
+	enum ipa_eth_pipe_direction dir;
+	struct ipa_eth_pipe_setup_info info;
+	struct ipa_eth_client *client_info;
+
+	/* output params */
+	ipa_eth_hdl_t pipe_hdl;
+};
+
+/**
+ * struct  ipa_eth_client - client info per traffic type
+ * provided by offload client
+ * @client_type: ethernet client type
+ * @inst_id: instance id for dual NIC support
+ * @traffic_type: traffic type
+ * @pipe_list: list of pipes with same traffic type
+ * @priv: private data for client
+ * @test: is test client
+ */
+struct ipa_eth_client {
+	/* vendor driver */
+	enum ipa_eth_client_type client_type;
+	u8 inst_id;
+
+	/* traffic type */
+	enum ipa_eth_pipe_traffic_type traffic_type;
+	struct list_head pipe_list;
+
+	/* client specific priv data*/
+	void *priv;
+	bool test;
+};
+
+/**
+ * struct  ipa_eth_perf_profile - To set BandWidth profile
+ *
+ * @max_supported_bw_mbps: maximum bandwidth needed (in Mbps)
+ */
+struct ipa_eth_perf_profile {
+	u32 max_supported_bw_mbps;
+};
+
+/**
+ * struct ipa_eth_hdr_info - Header to install on IPA HW
+ *
+ * @hdr: header to install on IPA HW
+ * @hdr_len: length of header
+ * @dst_mac_addr_offset: destination mac address offset
+ * @hdr_type: layer two header type
+ */
+struct ipa_eth_hdr_info {
+	u8 *hdr;
+	u8 hdr_len;
+	u8 dst_mac_addr_offset;
+	enum ipa_hdr_l2_type hdr_type;
+};
+
+/**
+ * struct ipa_eth_intf_info - parameters for ipa offload
+ *	interface registration
+ *
+ * @netdev_name: network interface name
+ * @hdr: hdr for ipv4/ipv6
+ * @pipe_hdl_list_size: number of pipes prop needed for this interface
+ * @pipe_hdl_list: array of pipes used for this interface
+ */
+struct ipa_eth_intf_info {
+	const char *netdev_name;
+	struct ipa_eth_hdr_info hdr[IPA_IP_MAX];
+
+	/* tx/rx pipes for same netdev */
+	int pipe_hdl_list_size;
+	ipa_eth_hdl_t *pipe_hdl_list;
+};
+
+int ipa_eth_register_ready_cb(struct ipa_eth_ready *ready_info);
+int ipa_eth_unregister_ready_cb(struct ipa_eth_ready *ready_info);
+int ipa_eth_client_conn_pipes(struct ipa_eth_client *client);
+int ipa_eth_client_disconn_pipes(struct ipa_eth_client *client);
+int ipa_eth_client_reg_intf(struct ipa_eth_intf_info *intf);
+int ipa_eth_client_unreg_intf(struct ipa_eth_intf_info *intf);
+int ipa_eth_client_set_perf_profile(struct ipa_eth_client *client,
+	struct ipa_eth_perf_profile *profile);
+int ipa_eth_client_conn_evt(struct ipa_ecm_msg *msg);
+int ipa_eth_client_disconn_evt(struct ipa_ecm_msg *msg);
+enum ipa_client_type ipa_eth_get_ipa_client_type_from_eth_type(
+	enum ipa_eth_client_type eth_client_type, enum ipa_eth_pipe_direction dir);
+bool ipa_eth_client_exist(
+	enum ipa_eth_client_type eth_client_type, int inst_id);
+
+#endif // _IPA_ETH_H_

+ 170 - 0
drivers/platform/msm/include/linux/ipa_mhi.h

@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef IPA_MHI_H_
+#define IPA_MHI_H_
+
+#include <linux/ipa.h>
+#include <linux/types.h>
+
+/**
+ * enum ipa_mhi_event_type - event type for mhi callback
+ *
+ * @IPA_MHI_EVENT_READY: IPA MHI is ready and IPA uC is loaded. After getting
+ *	this event MHI client is expected to call to ipa_mhi_start() API
+ * @IPA_MHI_EVENT_DATA_AVAILABLE: downlink data available on MHI channel
+ */
+enum ipa_mhi_event_type {
+	IPA_MHI_EVENT_READY,
+	IPA_MHI_EVENT_DATA_AVAILABLE,
+	IPA_MHI_EVENT_MAX,
+};
+
+enum ipa_mhi_mstate {
+	IPA_MHI_STATE_M0,
+	IPA_MHI_STATE_M1,
+	IPA_MHI_STATE_M2,
+	IPA_MHI_STATE_M3,
+	IPA_MHI_STATE_M_MAX
+};
+
+typedef void (*mhi_client_cb)(void *priv, enum ipa_mhi_event_type event,
+	unsigned long data);
+
+/**
+ * struct ipa_mhi_msi_info - parameters for MSI (Message Signaled Interrupts)
+ * @addr_low: MSI lower base physical address
+ * @addr_hi: MSI higher base physical address
+ * @data: Data Pattern to use when generating the MSI
+ * @mask: Mask indicating number of messages assigned by the host to device
+ *
+ * msi value is written according to this formula:
+ *	((data & ~mask) | (mmio.msiVec & mask))
+ */
+struct ipa_mhi_msi_info {
+	u32 addr_low;
+	u32 addr_hi;
+	u32 data;
+	u32 mask;
+};
+
+/**
+ * struct ipa_mhi_init_params - parameters for IPA MHI initialization API
+ *
+ * @msi: MSI (Message Signaled Interrupts) parameters
+ * @mmio_addr: MHI MMIO physical address
+ * @first_ch_idx: First channel ID for hardware accelerated channels.
+ * @first_er_idx: First event ring ID for hardware accelerated channels.
+ * @assert_bit40: should assert bit 40 in order to access host space.
+ *	if PCIe iATU is configured then not need to assert bit40
+ * @notify: client callback
+ * @priv: client private data to be provided in client callback
+ * @test_mode: flag to indicate if IPA MHI is in unit test mode
+ */
+struct ipa_mhi_init_params {
+	struct ipa_mhi_msi_info msi;
+	u32 mmio_addr;
+	u32 first_ch_idx;
+	u32 first_er_idx;
+	bool assert_bit40;
+	mhi_client_cb notify;
+	void *priv;
+	bool test_mode;
+};
+
+/**
+ * struct ipa_mhi_start_params - parameters for IPA MHI start API
+ *
+ * @host_ctrl_addr: Base address of MHI control data structures
+ * @host_data_addr: Base address of MHI data buffers
+ * @channel_context_addr: channel context array address in host address space
+ * @event_context_addr: event context array address in host address space
+ */
+struct ipa_mhi_start_params {
+	u32 host_ctrl_addr;
+	u32 host_data_addr;
+	u64 channel_context_array_addr;
+	u64 event_context_array_addr;
+};
+
+/**
+ * struct ipa_mhi_connect_params - parameters for IPA MHI channel connect API
+ *
+ * @sys: IPA EP configuration info
+ * @channel_id: MHI channel id
+ */
+struct ipa_mhi_connect_params {
+	struct ipa_sys_connect_params sys;
+	u8 channel_id;
+};
+
+/* bit #40 in address should be asserted for MHI transfers over pcie */
+#define IPA_MHI_HOST_ADDR(addr) ((addr) | BIT_ULL(40))
+
+#if IS_ENABLED(CONFIG_IPA3)
+
+int ipa_mhi_init(struct ipa_mhi_init_params *params);
+
+int ipa_mhi_start(struct ipa_mhi_start_params *params);
+
+int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl);
+
+int ipa_mhi_disconnect_pipe(u32 clnt_hdl);
+
+int ipa_mhi_suspend(bool force);
+
+int ipa_mhi_resume(void);
+
+void ipa_mhi_destroy(void);
+
+int ipa_mhi_update_mstate(enum ipa_mhi_mstate mstate_info);
+
+#else /* IS_ENABLED(CONFIG_IPA3) */
+
+static inline int ipa_mhi_init(struct ipa_mhi_init_params *params)
+{
+	return -EPERM;
+}
+
+static inline int ipa_mhi_start(struct ipa_mhi_start_params *params)
+{
+	return -EPERM;
+}
+
+static inline int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in,
+	u32 *clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_mhi_disconnect_pipe(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_mhi_suspend(bool force)
+{
+	return -EPERM;
+}
+
+static inline int ipa_mhi_resume(void)
+{
+	return -EPERM;
+}
+
+static inline void ipa_mhi_destroy(void)
+{
+
+}
+
+static inline int ipa_mhi_update_mstate
+			(enum ipa_mhi_mstate mstate_info)
+{
+	return -EPERM;
+}
+
+#endif /* IS_ENABLED(CONFIG_IPA3) */
+
+#endif /* IPA_MHI_H_ */

+ 141 - 0
drivers/platform/msm/include/linux/ipa_odu_bridge.h

@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _IPA_ODO_BRIDGE_H_
+#define _IPA_ODO_BRIDGE_H_
+
+#include "ipa.h"
+
+/**
+ * struct odu_bridge_params - parameters for odu bridge initialization API
+ *
+ * @netdev_name: network interface name
+ * @priv: private data that will be supplied to client's callback
+ * @tx_dp_notify: callback for handling SKB. the following event are supported:
+ *	IPA_WRITE_DONE:	will be called after client called to odu_bridge_tx_dp()
+ *			Client is expected to free the skb.
+ *	IPA_RECEIVE:	will be called for delivering skb to APPS.
+ *			Client is expected to deliver the skb to network stack.
+ * @send_dl_skb: callback for sending skb on downlink direction to adapter.
+ *		Client is expected to free the skb.
+ * @device_ethaddr: device Ethernet address in network order.
+ * @ipa_desc_size: IPA Sys Pipe Desc Size
+ */
+struct odu_bridge_params {
+	const char *netdev_name;
+	void *priv;
+	ipa_notify_cb tx_dp_notify;
+	int (*send_dl_skb)(void *priv, struct sk_buff *skb);
+	u8 device_ethaddr[ETH_ALEN];
+	u32 ipa_desc_size;
+};
+
+/**
+ * struct ipa_bridge_init_params - parameters for IPA bridge initialization API
+ *
+ * @info: structure contains initialization information
+ * @wakeup_request: callback to client to indicate there is downlink data
+ *	available. Client is expected to call ipa_bridge_resume() to start
+ *	receiving data
+ */
+struct ipa_bridge_init_params {
+	struct odu_bridge_params info;
+	void (*wakeup_request)(void *cl_priv);
+};
+
+#if IS_ENABLED(CONFIG_IPA3)
+
+int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl);
+
+int ipa_bridge_connect(u32 hdl);
+
+int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth);
+
+int ipa_bridge_disconnect(u32 hdl);
+
+int ipa_bridge_suspend(u32 hdl);
+
+int ipa_bridge_resume(u32 hdl);
+
+int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb,
+	struct ipa_tx_meta *metadata);
+
+int ipa_bridge_cleanup(u32 hdl);
+
+#else /* IS_ENABLED(CONFIG_IPA3) */
+
+static inline int ipa_bridge_init(struct odu_bridge_params *params, u32 *hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_connect(u32 hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_disconnect(u32 hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_suspend(u32 hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_resume(u32 hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb,
+struct ipa_tx_meta *metadata)
+{
+	return -EPERM;
+}
+
+static inline int ipa_bridge_cleanup(u32 hdl)
+{
+	return -EPERM;
+}
+
+#endif /* IS_ENABLED(CONFIG_IPA3) */
+
+/* Below API is deprecated. Please use the API above */
+
+static inline int odu_bridge_init(struct odu_bridge_params *params)
+{
+	return -EPERM;
+}
+
+static inline int odu_bridge_disconnect(void)
+{
+	return -EPERM;
+}
+
+static inline int odu_bridge_connect(void)
+{
+	return -EPERM;
+}
+
+static inline int odu_bridge_tx_dp(struct sk_buff *skb,
+						struct ipa_tx_meta *metadata)
+{
+	return -EPERM;
+}
+
+static inline int odu_bridge_cleanup(void)
+{
+	return -EPERM;
+}
+
+#endif /* _IPA_ODO_BRIDGE_H */

+ 101 - 0
drivers/platform/msm/include/linux/ipa_qdss.h

@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _IPA_QDSS_H_
+#define _IPA_QDSS_H_
+
+#include "ipa.h"
+
+/**
+ * enum ipa_qdss_notify - these are the only return items
+ * @IPA_QDSS_SUCCESS: will be returned as it is for both conn
+ *						and disconn
+ * @IPA_QDSS_PIPE_CONN_FAILURE: will be returned as negative value
+ * @IPA_QDSS_PIPE_DISCONN_FAILURE: will be returned as negative value
+ */
+enum ipa_qdss_notify {
+	IPA_QDSS_SUCCESS,
+	IPA_QDSS_PIPE_CONN_FAILURE,
+	IPA_QDSS_PIPE_DISCONN_FAILURE,
+};
+
+/**
+ * struct  ipa_qdss_conn_in_params - QDSS -> IPA TX configuration
+ * @data_fifo_base_addr: Base address of the data FIFO used by BAM
+ * @data_fifo_size: Size of the data FIFO
+ * @desc_fifo_base_addr: Base address of the descriptor FIFO by BAM
+ * @desc_fifo_size: Should be configured to 1 by QDSS
+ * @bam_p_evt_dest_addr: equivalent to event_ring_doorbell_pa
+ *			physical address of the doorbell that IPA uC
+ *			will update the headpointer of the event ring.
+ *			QDSS should send BAM_P_EVNT_REG address in this var
+ *			Configured with the GSI Doorbell Address.
+ *			GSI sends Update RP by doing a write to this address
+ * @bam_p_evt_threshold: Threshold level of how many bytes consumed
+ * @override_eot: if override EOT==1, it doesn't check the EOT bit in
+ *			the descriptor
+ */
+struct ipa_qdss_conn_in_params {
+	phys_addr_t  data_fifo_base_addr;
+	u32  data_fifo_size;
+	phys_addr_t desc_fifo_base_addr;
+	u32 desc_fifo_size;
+	phys_addr_t  bam_p_evt_dest_addr;
+	u32 bam_p_evt_threshold;
+	u32 override_eot;
+};
+
+/**
+ * struct  ipa_qdss_conn_out_params - information provided
+ *				to QDSS driver
+ * @rx_db_pa: physical address of IPA doorbell for RX (QDSS->IPA transactions)
+ *		QDSS to take this address and assign it to BAM_P_EVENT_DEST_ADDR
+ */
+struct ipa_qdss_conn_out_params {
+	phys_addr_t ipa_rx_db_pa;
+};
+
+#if IS_ENABLED(CONFIG_IPA3)
+
+/**
+ * ipa_qdss_conn_pipes - Client should call this
+ * function to connect QDSS -> IPA pipe
+ *
+ * @in: [in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_qdss_conn_pipes(struct ipa_qdss_conn_in_params *in,
+	struct ipa_qdss_conn_out_params *out);
+
+/**
+ * ipa_qdss_disconn_pipes() - Client should call this
+ *		function to disconnect pipes
+ *
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_qdss_disconn_pipes(void);
+
+#else /* CONFIG_IPA3 */
+
+static inline int ipa_qdss_conn_pipes(struct ipa_qdss_conn_in_params *in,
+	struct ipa_qdss_conn_out_params *out)
+{
+	return -IPA_QDSS_PIPE_CONN_FAILURE;
+}
+
+static inline int ipa_qdss_disconn_pipes(void)
+{
+	return -IPA_QDSS_PIPE_DISCONN_FAILURE;
+}
+
+#endif /* CONFIG_IPA3 */
+#endif /* _IPA_QDSS_H_ */

+ 326 - 0
drivers/platform/msm/include/linux/ipa_uc_offload.h

@@ -0,0 +1,326 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _IPA_UC_OFFLOAD_H_
+#define _IPA_UC_OFFLOAD_H_
+
+#include "ipa.h"
+
+/**
+ * enum ipa_uc_offload_proto
+ * Protocol type: either WDI or Neutrino
+ *
+ * @IPA_UC_WDI: wdi Protocol
+ * @IPA_UC_NTN: Neutrino Protocol
+ */
+enum ipa_uc_offload_proto {
+	IPA_UC_INVALID = 0,
+	IPA_UC_WDI = 1,
+	IPA_UC_NTN = 2,
+	IPA_UC_NTN_V2X = 3,
+	IPA_UC_MAX_PROT_SIZE
+};
+
+/**
+ * struct ipa_hdr_info - Header to install on IPA HW
+ *
+ * @hdr: header to install on IPA HW
+ * @hdr_len: length of header
+ * @dst_mac_addr_offset: destination mac address offset
+ * @hdr_type: layer two header type
+ */
+struct ipa_hdr_info {
+	u8 *hdr;
+	u8 hdr_len;
+	u8 dst_mac_addr_offset;
+	enum ipa_hdr_l2_type hdr_type;
+};
+
+/**
+ * struct ipa_uc_offload_intf_params - parameters for uC offload
+ *	interface registration
+ *
+ * @netdev_name: network interface name
+ * @notify:	callback for exception/embedded packets
+ * @priv: callback cookie
+ * @hdr_info: header information
+ * @meta_data: metadata if any
+ * @meta_data_mask: metadata mask
+ * @proto: uC offload protocol type
+ * @alt_dst_pipe: alternate routing output pipe
+ */
+struct ipa_uc_offload_intf_params {
+	const char *netdev_name;
+	ipa_notify_cb notify;
+	void *priv;
+	struct ipa_hdr_info hdr_info[IPA_IP_MAX];
+	u8 is_meta_data_valid;
+	u32 meta_data;
+	u32 meta_data_mask;
+	enum ipa_uc_offload_proto proto;
+	enum ipa_client_type alt_dst_pipe;
+};
+
+/**
+ * struct ntn_buff_smmu_map -  IPA iova->pa SMMU mapping
+ * @iova: virtual address of the data buffer
+ * @pa: physical address of the data buffer
+ */
+struct ntn_buff_smmu_map {
+	dma_addr_t iova;
+	phys_addr_t pa;
+};
+
+/**
+ * struct  ipa_ntn_setup_info - NTN TX/Rx configuration
+ * @client: type of "client" (IPA_CLIENT_ODU#_PROD/CONS)
+ * @smmu_enabled: SMMU is enabled for uC or not
+ * @ring_base_pa: physical address of the base of the Tx/Rx ring
+ * @ring_base_iova: virtual address of the base of the Tx/Rx ring
+ * @ring_base_sgt:Scatter table for ntn_rings,contains valid non NULL
+ *			value when ENAC S1-SMMU enabed, else NULL.
+ * @ntn_ring_size: size of the Tx/Rx ring (in terms of elements)
+ * @buff_pool_base_pa: physical address of the base of the Tx/Rx buffer pool
+ * @buff_pool_base_iova: virtual address of the base of the Tx/Rx buffer pool
+ * @buff_pool_base_sgt: Scatter table for buffer pools,contains valid
+ *			non NULL value. When NULL, do continuosly
+ *			pa to iova mapping (SMMU disable, pa == iova).
+ * @num_buffers: Rx/Tx buffer pool size (in terms of elements)
+ * @data_buff_size: size of the each data buffer allocated in DDR
+ * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN Ring's
+ * @u8 db_mode: 0 means irq mode, 1 means db mode
+ *						tail pointer
+ */
+struct ipa_ntn_setup_info {
+	enum ipa_client_type client;
+	bool smmu_enabled;
+	phys_addr_t ring_base_pa;
+	dma_addr_t ring_base_iova;
+	struct sg_table *ring_base_sgt;
+
+	u32 ntn_ring_size;
+
+	phys_addr_t buff_pool_base_pa;
+	dma_addr_t buff_pool_base_iova;
+	struct sg_table *buff_pool_base_sgt;
+
+	struct ntn_buff_smmu_map *data_buff_list;
+
+	u32 num_buffers;
+
+	u32 data_buff_size;
+
+	phys_addr_t ntn_reg_base_ptr_pa;
+
+	u8 db_mode;
+};
+
+/**
+ * struct ipa_uc_offload_out_params - out parameters for uC offload
+ *
+ * @clnt_hndl: Handle that client need to pass during
+ *	further operations
+ */
+struct ipa_uc_offload_out_params {
+	u32 clnt_hndl;
+};
+
+/**
+ * struct  ipa_ntn_conn_in_params - NTN TX/Rx connect parameters
+ * @ul: parameters to connect UL pipe(from Neutrino to IPA)
+ * @dl: parameters to connect DL pipe(from IPA to Neutrino)
+ */
+struct ipa_ntn_conn_in_params {
+	struct ipa_ntn_setup_info ul;
+	struct ipa_ntn_setup_info dl;
+};
+
+/**
+ * struct  ipa_ntn_conn_out_params - information provided
+ *				to uC offload client
+ * @ul_uc_db_pa: physical address of IPA uc doorbell for UL
+ * @dl_uc_db_pa: physical address of IPA uc doorbell for DL
+ * @clnt_hdl: opaque handle assigned to offload client
+ * @ul_uc_db_iomem: iomem address of IPA uc doorbell for UL
+ * @dl_uc_db_iomem: iomem address of IPA uc doorbell for DL
+ */
+struct ipa_ntn_conn_out_params {
+	phys_addr_t ul_uc_db_pa;
+	phys_addr_t dl_uc_db_pa;
+	void __iomem *ul_uc_db_iomem;
+	void __iomem *dl_uc_db_iomem;
+};
+
+/**
+ * struct  ipa_uc_offload_conn_in_params - information provided by
+ *		uC offload client
+ * @clnt_hndl: Handle that return as part of reg interface
+ * @proto: Protocol to use for offload data path
+ * @ntn: uC RX/Tx configuration info
+ */
+struct ipa_uc_offload_conn_in_params {
+	u32 clnt_hndl;
+	union {
+		struct ipa_ntn_conn_in_params ntn;
+	} u;
+};
+
+/**
+ * struct  ipa_uc_offload_conn_out_params - information provided
+ *		to uC offload client
+ * @ul_uc_db_pa: physical address of IPA uc doorbell for UL
+ * @dl_uc_db_pa: physical address of IPA uc doorbell for DL
+ * @clnt_hdl: opaque handle assigned to offload client
+ */
+struct ipa_uc_offload_conn_out_params {
+	union {
+		struct ipa_ntn_conn_out_params ntn;
+	} u;
+};
+
+/**
+ * struct  ipa_perf_profile - To set BandWidth profile
+ *
+ * @client: type of "client" (IPA_CLIENT_ODU#_PROD/CONS)
+ * @proto: uC offload protocol type
+ * @max_supported_bw_mbps: maximum bandwidth needed (in Mbps)
+ */
+struct ipa_perf_profile {
+	enum ipa_client_type client;
+	enum ipa_uc_offload_proto proto;
+	u32 max_supported_bw_mbps;
+};
+
+/**
+ * struct  ipa_uc_ready_params - uC ready CB parameters
+ * @is_uC_ready: uC loaded or not
+ * @priv : callback cookie
+ * @notify:	callback
+ * @proto: uC offload protocol type
+ */
+struct ipa_uc_ready_params {
+	bool is_uC_ready;
+	void *priv;
+	ipa_uc_ready_cb notify;
+	enum ipa_uc_offload_proto proto;
+};
+
+#if IS_ENABLED(CONFIG_IPA3)
+
+/**
+ * ipa_uc_offload_reg_intf - Client should call this function to
+ * init uC offload data path
+ *
+ * @init:	[in] initialization parameters
+ *
+ * Note: Should not be called from atomic context and only
+ * after checking IPA readiness using ipa_register_ipa_ready_cb()
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_uc_offload_reg_intf(
+	struct ipa_uc_offload_intf_params *in,
+	struct ipa_uc_offload_out_params *out);
+
+/**
+ * ipa_uc_offload_cleanup - Client Driver should call this
+ * function before unload and after disconnect
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_uc_offload_cleanup(u32 clnt_hdl);
+
+/**
+ * ipa_uc_offload_conn_pipes - Client should call this
+ * function to connect uC pipe for offload data path
+ *
+ * @in:	[in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Note: Should not be called from atomic context and only
+ * after checking IPA readiness using ipa_register_ipa_ready_cb()
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *in,
+			struct ipa_uc_offload_conn_out_params *out);
+
+/**
+ * ipa_uc_offload_disconn_pipes() - Client should call this
+ *		function to disconnect uC pipe to disable offload data path
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_uc_offload_disconn_pipes(u32 clnt_hdl);
+
+/**
+ * ipa_set_perf_profile() - Client should call this function to
+ *		set IPA clock Band Width based on data rates
+ * @profile: [in] BandWidth profile to use
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_set_perf_profile(struct ipa_perf_profile *profile);
+
+
+/*
+ * To register uC ready callback if uC not ready
+ * and also check uC readiness
+ * if uC not ready only, register callback
+ */
+int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *param);
+
+/*
+ * To de-register uC ready callback
+ */
+void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto);
+
+#else /* IS_ENABLED(CONFIG_IPA3) */
+
+static inline int ipa_uc_offload_reg_intf(
+		struct ipa_uc_offload_intf_params *in,
+		struct ipa_uc_offload_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_uC_offload_cleanup(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_uc_offload_conn_pipes(
+		struct ipa_uc_offload_conn_in_params *in,
+		struct ipa_uc_offload_conn_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_uc_offload_disconn_pipes(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_set_perf_profile(struct ipa_perf_profile *profile)
+{
+	return -EPERM;
+}
+
+static inline int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *param)
+{
+	return -EPERM;
+}
+
+static inline void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto)
+{
+}
+
+#endif /* CONFIG_IPA3 */
+
+#endif /* _IPA_UC_OFFLOAD_H_ */

+ 737 - 0
drivers/platform/msm/include/linux/ipa_wdi3.h

@@ -0,0 +1,737 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018 - 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _IPA_WDI3_H_
+#define _IPA_WDI3_H_
+
+#include <linux/ipa.h>
+
+#define IPA_HW_WDI3_TCL_DATA_CMD_ER_DESC_SIZE 32
+#define IPA_HW_WDI3_IPA2FW_ER_DESC_SIZE 8
+
+#define IPA_HW_WDI3_MAX_ER_DESC_SIZE \
+	(((IPA_HW_WDI3_TCL_DATA_CMD_ER_DESC_SIZE) > \
+	(IPA_HW_WDI3_IPA2FW_ER_DESC_SIZE)) ?  \
+	(IPA_HW_WDI3_TCL_DATA_CMD_ER_DESC_SIZE) : \
+	(IPA_HW_WDI3_IPA2FW_ER_DESC_SIZE))
+
+#define IPA_WDI_MAX_SUPPORTED_SYS_PIPE 3
+
+typedef u32 ipa_wdi_hdl_t;
+
+enum ipa_wdi_version {
+	IPA_WDI_1,
+	IPA_WDI_2,
+	IPA_WDI_3,
+	IPA_WDI_3_V2,
+	IPA_WDI_VER_MAX
+};
+
+#define IPA_WDI3_TX_DIR 1
+#define IPA_WDI3_TX1_DIR 2
+#define IPA_WDI3_RX_DIR 3
+#define IPA_WDI_INST_MAX (2)
+
+/**
+ * struct ipa_wdi_init_in_params - wdi init input parameters
+ *
+ * @wdi_version: wdi version
+ * @notify: uc ready callback
+ * @priv: uc ready callback cookie
+ */
+struct ipa_wdi_init_in_params {
+	enum ipa_wdi_version wdi_version;
+	ipa_uc_ready_cb notify;
+	void *priv;
+#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
+	ipa_wdi_meter_notifier_cb wdi_notify;
+#endif
+	int inst_id;
+};
+
+/**
+ * struct ipa_wdi_init_out_params - wdi init output parameters
+ *
+ * @is_uC_ready: is uC ready. No API should be called until uC
+    is ready.
+ * @is_smmu_enable: is smmu enabled
+ * @is_over_gsi: is wdi over GSI or uC
+ */
+struct ipa_wdi_init_out_params {
+	bool is_uC_ready;
+	bool is_smmu_enabled;
+	bool is_over_gsi;
+	ipa_wdi_hdl_t hdl;
+};
+
+/**
+ * struct ipa_wdi_hdr_info - Header to install on IPA HW
+ *
+ * @hdr: header to install on IPA HW
+ * @hdr_len: length of header
+ * @dst_mac_addr_offset: destination mac address offset
+ * @hdr_type: layer two header type
+ */
+struct ipa_wdi_hdr_info {
+	u8 *hdr;
+	u8 hdr_len;
+	u8 dst_mac_addr_offset;
+	enum ipa_hdr_l2_type hdr_type;
+};
+
+/**
+ * struct ipa_wdi_reg_intf_in_params - parameters for uC offload
+ *	interface registration
+ *
+ * @netdev_name: network interface name
+ * @hdr_info: header information
+ * @is_meta_data_valid: if metadata is valid
+ * @meta_data: metadata if any
+ * @meta_data_mask: metadata mask
+ * @is_tx1_used: to indicate whether 2.4g or 5g iface
+ */
+struct ipa_wdi_reg_intf_in_params {
+	const char *netdev_name;
+	struct ipa_wdi_hdr_info hdr_info[IPA_IP_MAX];
+	enum ipa_client_type alt_dst_pipe;
+	u8 is_meta_data_valid;
+	u32 meta_data;
+	u32 meta_data_mask;
+	u8 is_tx1_used;
+	ipa_wdi_hdl_t hdl;
+};
+
+/**
+ * struct  ipa_wdi_pipe_setup_info - WDI TX/Rx configuration
+ * @ipa_ep_cfg: ipa endpoint configuration
+ * @client: type of "client"
+ * @transfer_ring_base_pa:  physical address of the base of the transfer ring
+ * @transfer_ring_size:  size of the transfer ring
+ * @transfer_ring_doorbell_pa:  physical address of the doorbell that
+	IPA uC will update the tailpointer of the transfer ring
+ * @is_txr_rn_db_pcie_addr: Bool indicated txr ring DB is pcie or not
+ * @event_ring_base_pa:  physical address of the base of the event ring
+ * @event_ring_size:  event ring size
+ * @event_ring_doorbell_pa:  physical address of the doorbell that IPA uC
+	will update the headpointer of the event ring
+ * @is_evt_rn_db_pcie_addr: Bool indicated evt ring DB is pcie or not
+ * @num_pkt_buffers:  Number of pkt buffers allocated. The size of the event
+	ring and the transfer ring has to be at least ( num_pkt_buffers + 1)
+ * @pkt_offset: packet offset (wdi header length)
+ * @desc_format_template[IPA_HW_WDI3_MAX_ER_DESC_SIZE]:  Holds a cached
+	template of the desc format
+ * @rx_bank_id: value used to perform TCL HW setting
+
+ */
+struct ipa_wdi_pipe_setup_info {
+	struct ipa_ep_cfg ipa_ep_cfg;
+	enum ipa_client_type client;
+	phys_addr_t  transfer_ring_base_pa;
+	u32  transfer_ring_size;
+	phys_addr_t  transfer_ring_doorbell_pa;
+	bool is_txr_rn_db_pcie_addr;
+
+	phys_addr_t  event_ring_base_pa;
+	u32  event_ring_size;
+	phys_addr_t  event_ring_doorbell_pa;
+	bool is_evt_rn_db_pcie_addr;
+	u16  num_pkt_buffers;
+
+	u16 pkt_offset;
+
+	u32  desc_format_template[IPA_HW_WDI3_MAX_ER_DESC_SIZE];
+	u8 rx_bank_id;
+};
+
+/**
+ * struct  ipa_wdi_pipe_setup_info_smmu - WDI TX/Rx configuration
+ * @ipa_ep_cfg: ipa endpoint configuration
+ * @client: type of "client"
+ * @transfer_ring_base_pa:  physical address of the base of the transfer ring
+ * @transfer_ring_size:  size of the transfer ring
+ * @transfer_ring_doorbell_pa:  physical address of the doorbell that
+	IPA uC will update the tailpointer of the transfer ring
+ * @is_txr_rn_db_pcie_addr: Bool indicated  txr ring DB is pcie or not
+ * @event_ring_base_pa:  physical address of the base of the event ring
+ * @event_ring_size:  event ring size
+ * @event_ring_doorbell_pa:  physical address of the doorbell that IPA uC
+	will update the headpointer of the event ring
+ * @is_evt_rn_db_pcie_addr: Bool indicated evt ring DB is pcie or not
+ * @num_pkt_buffers:  Number of pkt buffers allocated. The size of the event
+	ring and the transfer ring has to be at least ( num_pkt_buffers + 1)
+ * @pkt_offset: packet offset (wdi header length)
+ * @desc_format_template[IPA_HW_WDI3_MAX_ER_DESC_SIZE]:  Holds a cached
+	template of the desc format
+ * @rx_bank_id: value used to perform TCL HW setting
+
+ */
+struct ipa_wdi_pipe_setup_info_smmu {
+	struct ipa_ep_cfg ipa_ep_cfg;
+	enum ipa_client_type client;
+	struct sg_table  transfer_ring_base;
+	u32  transfer_ring_size;
+	phys_addr_t  transfer_ring_doorbell_pa;
+	bool is_txr_rn_db_pcie_addr;
+
+	struct sg_table  event_ring_base;
+	u32  event_ring_size;
+	phys_addr_t  event_ring_doorbell_pa;
+	bool is_evt_rn_db_pcie_addr;
+	u16  num_pkt_buffers;
+
+	u16 pkt_offset;
+
+	u32  desc_format_template[IPA_HW_WDI3_MAX_ER_DESC_SIZE];
+	u8 rx_bank_id;
+};
+
+/**
+ * struct  ipa_wdi_conn_in_params - information provided by
+ *		uC offload client
+ * @notify: client callback function
+ * @priv: client cookie
+ * @is_smmu_enabled: if smmu is enabled
+ * @num_sys_pipe_needed: number of sys pipe needed
+ * @sys_in: parameters to setup sys pipe in mcc mode
+ * @tx: parameters to connect TX pipe(from IPA to WLAN)
+ * @tx_smmu: smmu parameters to connect TX pipe(from IPA to WLAN)
+ * @rx: parameters to connect RX pipe(from WLAN to IPA)
+ * @rx_smmu: smmu parameters to connect RX pipe(from WLAN to IPA)
+ * @is_tx1_used: to notify extra pipe required/not
+ * @tx1: parameters to connect TX1 pipe(from IPA to WLAN second pipe)
+ * @tx1_smmu: smmu parameters to connect TX1 pipe(from IPA to WLAN second pipe)
+ */
+struct ipa_wdi_conn_in_params {
+	ipa_notify_cb notify;
+	void *priv;
+	bool is_smmu_enabled;
+	u8 num_sys_pipe_needed;
+	struct ipa_sys_connect_params sys_in[IPA_WDI_MAX_SUPPORTED_SYS_PIPE];
+	union {
+		struct ipa_wdi_pipe_setup_info tx;
+		struct ipa_wdi_pipe_setup_info_smmu tx_smmu;
+	} u_tx;
+	union {
+		struct ipa_wdi_pipe_setup_info rx;
+		struct ipa_wdi_pipe_setup_info_smmu rx_smmu;
+	} u_rx;
+	bool is_tx1_used;
+	union {
+		struct ipa_wdi_pipe_setup_info tx;
+		struct ipa_wdi_pipe_setup_info_smmu tx_smmu;
+	} u_tx1;
+	ipa_wdi_hdl_t hdl;
+};
+
+/**
+ * struct  ipa_wdi_conn_out_params - information provided
+ *				to WLAN driver
+ * @tx_uc_db_pa: physical address of IPA uC doorbell for TX
+ * @rx_uc_db_pa: physical address of IPA uC doorbell for RX
+ * @tx1_uc_db_pa: physical address of IPA uC doorbell for TX1
+ * @is_ddr_mapped: flag set to true if address is from DDR
+ */
+struct ipa_wdi_conn_out_params {
+	phys_addr_t tx_uc_db_pa;
+	phys_addr_t rx_uc_db_pa;
+	phys_addr_t tx1_uc_db_pa;
+	bool is_ddr_mapped;
+};
+
+/**
+ * struct  ipa_wdi_perf_profile - To set BandWidth profile
+ *
+ * @client: type of client
+ * @max_supported_bw_mbps: maximum bandwidth needed (in Mbps)
+ */
+struct ipa_wdi_perf_profile {
+	enum ipa_client_type client;
+	u32 max_supported_bw_mbps;
+};
+
+
+/**
+ * struct ipa_wdi_capabilities - wdi capability parameters
+ *
+ * @num_of_instances: Number of WLAN instances supported.
+ */
+struct ipa_wdi_capabilities_out_params {
+	u8 num_of_instances;
+};
+
+#if IS_ENABLED(CONFIG_IPA3)
+
+/**
+ * ipa_wdi_get_capabilities - Client should call this function to
+ * know the WDI capabilities
+ *
+ * Note: Should not be called from atomic context and only
+ * after checking IPA readiness using ipa_register_ipa_ready_cb()
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wdi_get_capabilities(
+	struct ipa_wdi_capabilities_out_params *out);
+
+/**
+ * ipa_wdi_init - Client should call this function to
+ * init WDI IPA offload data path
+ *
+ * Note: Should not be called from atomic context and only
+ * after checking IPA readiness using ipa_register_ipa_ready_cb()
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wdi_init(struct ipa_wdi_init_in_params *in,
+	struct ipa_wdi_init_out_params *out);
+
+/** ipa_get_wdi_version - return wdi version
+ *
+ * @Return void
+ */
+int ipa_get_wdi_version(void);
+
+/** ipa_wdi_is_tx1_used - return if DBS mode is active
+ *
+ * @Return bool
+ */
+bool ipa_wdi_is_tx1_used(void);
+
+/**
+ * ipa_wdi_init_per_inst - Client should call this function to
+ * init WDI IPA offload data path
+ *
+ * Note: Should not be called from atomic context and only
+ * after checking IPA readiness using ipa_register_ipa_ready_cb()
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wdi_init_per_inst(struct ipa_wdi_init_in_params *in,
+	struct ipa_wdi_init_out_params *out);
+
+/**
+ * ipa_wdi_cleanup - Client should call this function to
+ * clean up WDI IPA offload data path
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wdi_cleanup(void);
+
+/**
+ * ipa_wdi_cleanup_per_inst - Client should call this function to
+ * clean up WDI IPA offload data path
+ *
+ * @hdl: hdl to wdi client
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wdi_cleanup_per_inst(ipa_wdi_hdl_t hdl);
+
+
+/**
+ * ipa_wdi_reg_intf - Client should call this function to
+ * register interface
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wdi_reg_intf(
+	struct ipa_wdi_reg_intf_in_params *in);
+
+/**
+ * ipa_wdi_reg_intf_per_inst - Client should call this function to
+ * register interface
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wdi_reg_intf_per_inst(
+	struct ipa_wdi_reg_intf_in_params *in);
+
+/**
+ * ipa_wdi_dereg_intf - Client Driver should call this
+ * function to deregister before unload and after disconnect
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wdi_dereg_intf(const char *netdev_name);
+
+/**
+ * ipa_wdi_dereg_intf_per_inst - Client Driver should call this
+ * function to deregister before unload and after disconnect
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wdi_dereg_intf_per_inst(const char *netdev_name, ipa_wdi_hdl_t hdl);
+
+/**
+ * ipa_wdi_conn_pipes - Client should call this
+ * function to connect pipes
+ *
+ * @in:	[in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wdi_conn_pipes(struct ipa_wdi_conn_in_params *in,
+	struct ipa_wdi_conn_out_params *out);
+
+/**
+ * ipa_wdi_conn_pipes_per_inst - Client should call this
+ * function to connect pipes
+ *
+ * @in:	[in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wdi_conn_pipes_per_inst(struct ipa_wdi_conn_in_params *in,
+	struct ipa_wdi_conn_out_params *out);
+
+/**
+ * ipa_wdi_disconn_pipes() - Client should call this
+ *		function to disconnect pipes
+ *
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_wdi_disconn_pipes(void);
+
+/**
+ * ipa_wdi_disconn_pipes_per_inst() - Client should call this
+ *		function to disconnect pipes
+ *
+ * @hdl: hdl to wdi client
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_wdi_disconn_pipes_per_inst(ipa_wdi_hdl_t hdl);
+
+/**
+ * ipa_wdi_enable_pipes() - Client should call this
+ *		function to enable IPA offload data path
+ *
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_wdi_enable_pipes(void);
+
+/**
+ * ipa_wdi_enable_pipes_per_inst() - Client should call this
+ *		function to enable IPA offload data path
+ *
+ * @hdl: hdl to wdi client
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_wdi_enable_pipes_per_inst(ipa_wdi_hdl_t hdl);
+
+/**
+ * ipa_wdi_disable_pipes() - Client should call this
+ *		function to disable IPA offload data path
+ *
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_wdi_disable_pipes(void);
+
+/**
+ * ipa_wdi_disable_pipes_per_inst() - Client should call this
+ *		function to disable IPA offload data path
+ *
+ * @hdl: hdl to wdi client
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_wdi_disable_pipes_per_inst(ipa_wdi_hdl_t hdl);
+
+/**
+ * ipa_wdi_set_perf_profile() - Client should call this function to
+ *		set IPA clock bandwidth based on data rates
+ *
+ * @profile: [in] BandWidth profile to use
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_wdi_set_perf_profile(struct ipa_wdi_perf_profile *profile);
+
+/**
+ * ipa_wdi_set_perf_profile_per_inst() - Client should call this function to
+ *		set IPA clock bandwidth based on data rates
+ *
+ * @hdl: hdl to wdi client
+ * @profile: [in] BandWidth profile to use
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_wdi_set_perf_profile_per_inst(ipa_wdi_hdl_t hdl,
+	struct ipa_wdi_perf_profile *profile);
+
+/**
+ * ipa_wdi_create_smmu_mapping() - Create smmu mapping
+ *
+ * @num_buffers: number of buffers
+ *
+ * @info: wdi buffer info
+ */
+int ipa_wdi_create_smmu_mapping(u32 num_buffers,
+	struct ipa_wdi_buffer_info *info);
+
+/**
+ * ipa_wdi_create_smmu_mapping_per_inst() - Create smmu mapping
+ *
+ * @hdl: hdl to wdi client
+ * @num_buffers: number of buffers
+ * @info: wdi buffer info
+ */
+int ipa_wdi_create_smmu_mapping_per_inst(ipa_wdi_hdl_t hdl,
+	u32 num_buffers,
+	struct ipa_wdi_buffer_info *info);
+
+/**
+ * ipa_wdi_release_smmu_mapping() - Release smmu mapping
+ *
+ * @num_buffers: number of buffers
+ *
+ * @info: wdi buffer info
+ */
+int ipa_wdi_release_smmu_mapping(u32 num_buffers,
+	struct ipa_wdi_buffer_info *info);
+
+/**
+ * ipa_wdi_release_smmu_mapping_per_inst() - Release smmu mapping
+ *
+ * @hdl: hdl to wdi client
+ * @num_buffers: number of buffers
+ *
+ * @info: wdi buffer info
+ */
+int ipa_wdi_release_smmu_mapping_per_inst(ipa_wdi_hdl_t hdl,
+	u32 num_buffers,
+	struct ipa_wdi_buffer_info *info);
+
+/**
+ * ipa_wdi_get_stats() - Query WDI statistics
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_wdi_get_stats(struct IpaHwStatsWDIInfoData_t *stats);
+
+
+/**
+ * ipa_wdi_bw_monitor() - set wdi BW monitoring
+ * @info:	[inout] info blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_wdi_bw_monitor(struct ipa_wdi_bw_info *info);
+
+/**
+ * ipa_wdi_sw_stats() - set wdi BW monitoring
+ * @info:	[inout] info blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_wdi_sw_stats(struct ipa_wdi_tx_info *info);
+
+#else /* IS_ENABLED(CONFIG_IPA3) */
+
+/**
+ * ipa_wdi_get_capabilities - Client should call this function to
+ * know the WDI capabilities
+ *
+ * Note: Should not be called from atomic context and only
+ * after checking IPA readiness using ipa_register_ipa_ready_cb()
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wdi_get_capabilities(
+	struct ipa_wdi_capabilities_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_init(struct ipa_wdi_init_in_params *in,
+	struct ipa_wdi_init_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_init_per_inst(
+	struct ipa_wdi_init_in_params *in,
+	struct ipa_wdi_init_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_get_wdi_version(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_is_tx1_used(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_cleanup(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_cleanup_per_inst(ipa_wdi_hdl_t hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_reg_intf(
+	struct ipa_wdi_reg_intf_in_params *in)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_reg_intf_per_inst(
+	struct ipa_wdi_reg_intf_in_params *in)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_dereg_intf(const char *netdev_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_dereg_intf_per_inst(const char *netdev_name,
+	ipa_wdi_hdl_t hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_conn_pipes(struct ipa_wdi_conn_in_params *in,
+	struct ipa_wdi_conn_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_conn_pipes_per_inst(
+	struct ipa_wdi_conn_in_params *in,
+	struct ipa_wdi_conn_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_disconn_pipes(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_disconn_pipes_per_inst(ipa_wdi_hdl_t hdl)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_wdi_enable_pipes(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_enable_pipes_per_inst(ipa_wdi_hdl_t hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_disable_pipes(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_disable_pipes_per_inst(ipa_wdi_hdl_t hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_set_perf_profile(
+	struct ipa_wdi_perf_profile *profile)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_set_perf_profile_per_inst(
+	ipa_wdi_hdl_t hdl,
+	struct ipa_wdi_perf_profile *profile)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_create_smmu_mapping(u32 num_buffers,
+	struct ipa_wdi_buffer_info *info)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_create_smmu_mapping_per_inst(
+	ipa_wdi_hdl_t hdl,
+	u32 num_buffers,
+	struct ipa_wdi_buffer_info *info)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_release_smmu_mapping(u32 num_buffers,
+	struct ipa_wdi_buffer_info *info)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_release_smmu_mapping_per_inst(
+	ipa_wdi_hdl_t hdl,
+	u32 num_buffers,
+	struct ipa_wdi_buffer_info *info)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_get_stats(struct IpaHwStatsWDIInfoData_t *stats)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_bw_monitor(struct ipa_wdi_bw_info *info)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wdi_sw_stats(struct ipa_wdi_tx_info *info)
+{
+	return -EPERM;
+}
+
+#endif /* IS_ENABLED(CONFIG_IPA3) */
+
+#endif /* _IPA_WDI3_H_ */

+ 487 - 0
drivers/platform/msm/include/linux/ipa_wigig.h

@@ -0,0 +1,487 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _IPA_WIGIG_H_
+#define _IPA_WIGIG_H_
+
+#include <linux/msm_ipa.h>
+#include "ipa.h"
+
+typedef void (*ipa_wigig_misc_int_cb)(void *priv);
+
+/*
+ * struct ipa_wigig_init_in_params - wigig init input parameters
+ *
+ * @periph_baddr_pa: physical address of wigig HW base
+ * @pseudo_cause_pa: physical address of wigig HW pseudo_cause register
+ * @int_gen_tx_pa: physical address of wigig HW int_gen_tx register
+ * @int_gen_rx_pa: physical address of wigig HW int_gen_rx register
+ * @dma_ep_misc_pa: physical address of wigig HW dma_ep_misc register
+ * @notify: uc ready callback
+ * @int_notify: wigig misc interrupt callback
+ * @priv: uc ready callback cookie
+ */
+struct ipa_wigig_init_in_params {
+	phys_addr_t periph_baddr_pa;
+	phys_addr_t pseudo_cause_pa;
+	phys_addr_t int_gen_tx_pa;
+	phys_addr_t int_gen_rx_pa;
+	phys_addr_t dma_ep_misc_pa;
+	ipa_uc_ready_cb notify;
+	ipa_wigig_misc_int_cb int_notify;
+	void *priv;
+};
+
+/*
+ * struct ipa_wigig_init_out_params - wigig init output parameters
+ *
+ * @is_uC_ready: is uC ready. No API should be called until uC is ready.
+ * @uc_db_pa: physical address of IPA uC doorbell
+ * @lan_rx_napi_enable: if we use NAPI in the LAN rx
+ */
+struct ipa_wigig_init_out_params {
+	bool is_uc_ready;
+	phys_addr_t uc_db_pa;
+	bool lan_rx_napi_enable;
+};
+
+/*
+ * struct ipa_wigig_hdr_info - Header to install on IPA HW
+ *
+ * @hdr: header to install on IPA HW
+ * @hdr_len: length of header
+ * @dst_mac_addr_offset: destination mac address offset
+ * @hdr_type: layer two header type
+ */
+struct ipa_wigig_hdr_info {
+	u8 *hdr;
+	u8 hdr_len;
+	u8 dst_mac_addr_offset;
+	enum ipa_hdr_l2_type hdr_type;
+};
+
+/*
+ * struct ipa_wigig_reg_intf_in_params - parameters for offload interface
+ *	registration
+ *
+ * @netdev_name: network interface name
+ * @netdev_mac: netdev mac address
+ * @hdr_info: header information
+ */
+struct ipa_wigig_reg_intf_in_params {
+	const char *netdev_name;
+	u8 netdev_mac[IPA_MAC_ADDR_SIZE];
+	struct ipa_wigig_hdr_info hdr_info[IPA_IP_MAX];
+};
+
+/*
+ * struct ipa_wigig_pipe_setup_info - WIGIG TX/Rx configuration
+ * @desc_ring_base_pa: physical address of the base of the descriptor ring
+ * @desc_ring_size: size of the descriptor ring in bytes
+ * @desc_ring_HWHEAD_pa: physical address of the wigig descriptor ring HWHEAD
+ * @desc_ring_HWTAIL_pa: physical address of the wigig descriptor ring HWTAIL
+ * @status_ring_base_pa: physical address of the base of the status ring
+ * @status_ring_size: status ring size in bytes
+ * @desc_ring_HWHEAD_pa: physical address of the wigig descriptor ring HWHEAD
+ * @desc_ring_HWTAIL_pa: physical address of the wigig descriptor ring HWTAIL
+ */
+struct ipa_wigig_pipe_setup_info {
+	phys_addr_t desc_ring_base_pa;
+	u16 desc_ring_size;
+	phys_addr_t desc_ring_HWHEAD_pa;
+	phys_addr_t desc_ring_HWTAIL_pa;
+
+	phys_addr_t status_ring_base_pa;
+	u16 status_ring_size;
+	phys_addr_t status_ring_HWHEAD_pa;
+	phys_addr_t status_ring_HWTAIL_pa;
+};
+
+/*
+ * struct ipa_wigig_pipe_setup_info_smmu - WIGIG TX/Rx configuration smmu mode
+ * @desc_ring_base: sg_table of the base of the descriptor ring
+ * @desc_ring_base_iova: IO virtual address mapped to physical base address
+ * @desc_ring_size: size of the descriptor ring in bytes
+ * @desc_ring_HWHEAD_pa: physical address of the wigig descriptor ring HWHEAD
+ * @desc_ring_HWTAIL_pa: physical address of the wigig descriptor ring HWTAIL
+ * @status_ring_base: sg_table of the base of the status ring
+ * @status_ring_base_iova: IO virtual address mapped to physical base address
+ * @status_ring_size: status ring size in bytes
+ * @desc_ring_HWHEAD_pa: physical address of the wigig descriptor ring HWHEAD
+ * @desc_ring_HWTAIL_pa: physical address of the wigig descriptor ring HWTAIL
+ */
+struct ipa_wigig_pipe_setup_info_smmu {
+	struct sg_table desc_ring_base;
+	u64 desc_ring_base_iova;
+	u16 desc_ring_size;
+	phys_addr_t desc_ring_HWHEAD_pa;
+	phys_addr_t desc_ring_HWTAIL_pa;
+
+	struct sg_table status_ring_base;
+	u64 status_ring_base_iova;
+	u16 status_ring_size;
+	phys_addr_t status_ring_HWHEAD_pa;
+	phys_addr_t status_ring_HWTAIL_pa;
+};
+
+/*
+ * struct ipa_wigig_rx_pipe_data_buffer_info - WIGIG Rx data buffer
+ *	configuration
+ * @data_buffer_base_pa: physical address of the physically contiguous
+ *			Rx data buffer
+ * @data_buffer_size: size of the data buffer
+ */
+struct ipa_wigig_rx_pipe_data_buffer_info {
+	phys_addr_t data_buffer_base_pa;
+	u32 data_buffer_size;
+};
+
+/*
+ * struct ipa_wigig_rx_pipe_data_buffer_info_smmu - WIGIG Rx data buffer
+ *	configuration smmu mode
+ * @data_buffer_base: sg_table of the physically contiguous
+ *			Rx data buffer
+ * @data_buffer_base_iova: IO virtual address mapped to physical base address
+ * @data_buffer_size: size of the data buffer
+ */
+struct ipa_wigig_rx_pipe_data_buffer_info_smmu {
+	struct sg_table data_buffer_base;
+	u64 data_buffer_base_iova;
+	u32 data_buffer_size;
+};
+
+/*
+ * struct ipa_wigig_conn_rx_in_params - information provided by
+ *				WIGIG offload client for Rx pipe
+ * @notify: client callback function
+ * @priv: client cookie
+ * @pipe: parameters to connect Rx pipe (WIGIG to IPA)
+ * @dbuff: Rx data buffer info
+ */
+struct ipa_wigig_conn_rx_in_params {
+	ipa_notify_cb notify;
+	void *priv;
+	struct ipa_wigig_pipe_setup_info pipe;
+	struct ipa_wigig_rx_pipe_data_buffer_info dbuff;
+};
+
+/*
+ * struct ipa_wigig_conn_rx_in_params_smmu - information provided by
+ *				WIGIG offload client for Rx pipe
+ * @notify: client callback function
+ * @priv: client cookie
+ * @pipe_smmu: parameters to connect Rx pipe (WIGIG to IPA) smmu mode
+ * @dbuff_smmu: Rx data buffer info smmu mode
+ */
+struct ipa_wigig_conn_rx_in_params_smmu {
+	ipa_notify_cb notify;
+	void *priv;
+	struct ipa_wigig_pipe_setup_info_smmu pipe_smmu;
+	struct ipa_wigig_rx_pipe_data_buffer_info_smmu dbuff_smmu;
+};
+
+/*
+ * struct ipa_wigig_conn_out_params - information provided
+ *				to WIGIG driver
+ * @client: client type allocated by IPA driver
+ */
+struct ipa_wigig_conn_out_params {
+	enum ipa_client_type client;
+};
+
+/*
+ * struct ipa_wigig_tx_pipe_data_buffer_info - WIGIG Tx data buffer
+ *	configuration
+ * @data_buffer_size: size of a single data buffer
+ */
+struct ipa_wigig_tx_pipe_data_buffer_info {
+	u32 data_buffer_size;
+};
+
+/*
+ * struct ipa_wigig_tx_pipe_data_buffer_info_smmu - WIGIG Tx data buffer
+ *				configuration smmu mode
+ * @data_buffer_base_pa: sg_tables of the Tx data buffers
+ * @data_buffer_base_iova: IO virtual address mapped to physical base address
+ * @num_buffers: number of buffers
+ * @data_buffer_size: size of a single data buffer
+ */
+struct ipa_wigig_tx_pipe_data_buffer_info_smmu {
+	struct sg_table *data_buffer_base;
+	u64 *data_buffer_base_iova;
+	u32 num_buffers;
+	u32 data_buffer_size;
+};
+
+/*
+ * struct ipa_wigig_conn_tx_in_params - information provided by
+ *		wigig offload client for Tx pipe
+ * @pipe: parameters to connect Tx pipe (IPA to WIGIG)
+ * @dbuff: Tx data buffer info
+ * @int_gen_tx_bit_num: bit in int_gen_tx register associated with this client
+ * @client_mac: MAC address of client to be connected
+ */
+struct ipa_wigig_conn_tx_in_params {
+	struct ipa_wigig_pipe_setup_info pipe;
+	struct ipa_wigig_tx_pipe_data_buffer_info dbuff;
+	u8 int_gen_tx_bit_num;
+	u8 client_mac[IPA_MAC_ADDR_SIZE];
+};
+
+/*
+ * struct ipa_wigig_conn_tx_in_params_smmu - information provided by
+ *		wigig offload client for Tx pipe
+ * @pipe_smmu: parameters to connect Tx pipe (IPA to WIGIG) smmu mode
+ * @dbuff_smmu: Tx data buffer info smmu mode
+ * @int_gen_tx_bit_num: bit in int_gen_tx register associated with this client
+ * @client_mac: MAC address of client to be connected
+ */
+struct ipa_wigig_conn_tx_in_params_smmu {
+	struct ipa_wigig_pipe_setup_info_smmu pipe_smmu;
+	struct ipa_wigig_tx_pipe_data_buffer_info_smmu dbuff_smmu;
+	u8 int_gen_tx_bit_num;
+	u8 client_mac[IPA_MAC_ADDR_SIZE];
+};
+
+#if IS_ENABLED(CONFIG_IPA3)
+
+/*
+ * ipa_wigig_init - Client should call this function to
+ * init WIGIG IPA offload data path
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wigig_init(struct ipa_wigig_init_in_params *in,
+	struct ipa_wigig_init_out_params *out);
+
+/*
+ * ipa_wigig_cleanup - Client should call this function to
+ * clean up WIGIG IPA offload data path
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wigig_cleanup(void);
+
+/*
+ * ipa_wigig_is_smmu_enabled - get smmu state
+ *
+ * @Return true if smmu is enabled, false if disabled
+ */
+bool ipa_wigig_is_smmu_enabled(void);
+
+/*
+ * ipa_wigig_reg_intf - Client should call this function to
+ * register interface
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wigig_reg_intf(struct ipa_wigig_reg_intf_in_params *in);
+
+/*
+ * ipa_wigig_dereg_intf - Client Driver should call this
+ * function to deregister before unload and after disconnect
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wigig_dereg_intf(const char *netdev_name);
+
+/*
+ * ipa_wigig_conn_rx_pipe - Client should call this
+ * function to connect the rx (UL) pipe
+ *
+ * @in: [in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Note: Non SMMU mode only, Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wigig_conn_rx_pipe(struct ipa_wigig_conn_rx_in_params *in,
+	struct ipa_wigig_conn_out_params *out);
+
+/*
+ * ipa_wigig_conn_rx_pipe_smmu - Client should call this
+ * function to connect the rx (UL) pipe
+ *
+ * @in: [in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Note: SMMU mode only, Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wigig_conn_rx_pipe_smmu(struct ipa_wigig_conn_rx_in_params_smmu *in,
+	struct ipa_wigig_conn_out_params *out);
+
+/*
+ * ipa_wigig_conn_client - Client should call this
+ * function to connect one of the tx (DL) pipes when a WIGIG client connects
+ *
+ * @in: [in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Note: Non SMMU mode only, Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wigig_conn_client(struct ipa_wigig_conn_tx_in_params *in,
+	struct ipa_wigig_conn_out_params *out);
+
+/*
+ * ipa_wigig_conn_client_smmu - Client should call this
+ * function to connect one of the tx (DL) pipes when a WIGIG client connects
+ *
+ * @in: [in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Note: SMMU mode only, Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_wigig_conn_client_smmu(struct ipa_wigig_conn_tx_in_params_smmu *in,
+	struct ipa_wigig_conn_out_params *out);
+
+/*
+ * ipa_wigig_disconn_pipe() - Client should call this
+ *		function to disconnect a pipe
+ *
+ * @client: [in] pipe to be disconnected
+ *
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_wigig_disconn_pipe(enum ipa_client_type client);
+
+/*
+ * ipa_wigig_enable_pipe() - Client should call this
+ *		function to enable IPA offload data path
+ *
+ * @client: [in] pipe to be enabled
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+
+int ipa_wigig_enable_pipe(enum ipa_client_type client);
+
+/*
+ * ipa_wigig_disable_pipe() - Client should call this
+ *		function to disable IPA offload data path
+ *
+ * @client: [in] pipe to be disabled
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_wigig_disable_pipe(enum ipa_client_type client);
+
+/*
+ * ipa_wigig_tx_dp() - transmit tx packet through IPA to 11ad HW
+ *
+ * @dst: [in] destination ipa client pipe to be used
+ * @skb: [in] skb to be transmitted
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_wigig_tx_dp(enum ipa_client_type dst, struct sk_buff *skb);
+
+/**
+ * ipa_wigig_set_perf_profile() - Client should call this function to
+ *		set IPA clock bandwidth based on data rates
+ *
+ * @max_supported_bw_mbps: [in] maximum bandwidth needed (in Mbps)
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_wigig_set_perf_profile(u32 max_supported_bw_mbps);
+
+#else /* IS_ENABLED(CONFIG_IPA3) */
+static inline int ipa_wigig_init(struct ipa_wigig_init_in_params *in,
+	struct ipa_wigig_init_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_cleanup(void)
+{
+	return -EPERM;
+}
+
+static inline bool ipa_wigig_is_smmu_enabled(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_reg_intf(struct ipa_wigig_reg_intf_in_params *in)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_dereg_intf(const char *netdev_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_conn_rx_pipe(
+	struct ipa_wigig_conn_rx_in_params *in,
+	struct ipa_wigig_conn_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_conn_rx_pipe_smmu(
+	struct ipa_wigig_conn_rx_in_params_smmu *in,
+	struct ipa_wigig_conn_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_conn_client(
+	struct ipa_wigig_conn_tx_in_params *in,
+	struct ipa_wigig_conn_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_conn_client_smmu(
+	struct ipa_wigig_conn_tx_in_params_smmu *in,
+	struct ipa_wigig_conn_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_disconn_pipe(enum ipa_client_type client)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_enable_pipe(enum ipa_client_type client)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_disable_pipe(enum ipa_client_type client)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_tx_dp(enum ipa_client_type dst,
+	struct sk_buff *skb)
+{
+	return -EPERM;
+}
+
+static inline int ipa_wigig_set_perf_profile(u32 max_supported_bw_mbps)
+{
+	return -EPERM;
+}
+#endif /* IS_ENABLED(CONFIG_IPA3) */
+#endif /* _IPA_WIGIG_H_ */

+ 31 - 0
drivers/platform/msm/include/linux/msm_gsi.h

@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef MSM_GSI_H
+#define MSM_GSI_H
+#include <linux/types.h>
+#include <linux/interrupt.h>
+
+enum gsi_chan_dir {
+	GSI_CHAN_DIR_FROM_GSI = 0x0,
+	GSI_CHAN_DIR_TO_GSI = 0x1
+};
+
+/**
+ * @GSI_USE_PREFETCH_BUFS: Channel will use normal prefetch buffers if possible
+ * @GSI_ESCAPE_BUF_ONLY: Channel will always use escape buffers only
+ * @GSI_SMART_PRE_FETCH: Channel will work in smart prefetch mode.
+ *	relevant starting GSI 2.5
+ * @GSI_FREE_PRE_FETCH: Channel will work in free prefetch mode.
+ *	relevant starting GSI 2.5
+ */
+enum gsi_prefetch_mode {
+	GSI_USE_PREFETCH_BUFS = 0x0,
+	GSI_ESCAPE_BUF_ONLY = 0x1,
+	GSI_SMART_PRE_FETCH = 0x2,
+	GSI_FREE_PRE_FETCH = 0x3,
+};
+
+#endif

+ 2936 - 0
drivers/platform/msm/include/uapi/linux/ipa_qmi_service_v01.h

@@ -0,0 +1,2936 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/*
+ * This header file defines the types and structures that were defined in
+ * ipa. It contains the constant values defined, enums, structures,
+ * messages, and service message IDs (in that order) Structures that were
+ * defined in the IDL as messages contain mandatory elements, optional
+ * elements, a combination of mandatory and optional elements (mandatory
+ * always come before optionals in the structure), or nothing (null message)
+
+ * An optional element in a message is preceded by a __u8 value that must be
+ * set to true if the element is going to be included. When decoding a received
+ * message, the __u8 values will be set to true or false by the decode
+ * routine, and should be checked before accessing the values that they
+ * correspond to.
+
+ * Variable sized arrays are defined as static sized arrays with an unsigned
+ * integer (32 bit) preceding it that must be set to the number of elements
+ * in the array that are valid. For Example:
+
+ * __u32 test_opaque_len;
+ * __u8 test_opaque[16];
+
+ * If only 4 elements are added to test_opaque[] then test_opaque_len must be
+ * set to 4 before sending the message.  When decoding, the _len value is set
+ * by the decode routine and should be checked so that the correct number of
+ * elements in the array will be accessed.
+ */
+#ifndef IPA_QMI_SERVICE_V01_H
+#define IPA_QMI_SERVICE_V01_H
+
+#include <linux/types.h>
+
+#define QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01 6
+#define QMI_IPA_MAX_FILTERS_EX_V01 128
+#define QMI_IPA_MAX_FILTERS_EX2_V01 256
+#define QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01 2
+#define QMI_IPA_MAX_FILTERS_V01 64
+#define QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01 2
+#define QMI_IPA_ENDP_DESC_NUM_MAX_V01 31
+#define QMI_IPA_MAX_APN_V01 8
+/* Currently max we can use is only 1. But for scalability purpose
+ * we are having max value as 8.
+ */
+#define QMI_IPA_MAX_CLIENT_DST_PIPES_V01 8
+#define QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01 2
+#define QMI_IPA_MAX_UL_FIREWALL_RULES_V01 64
+#define QMI_IPA_REMOTE_MHI_MEMORY_MAPPING_NUM_MAX_V01 6
+#define QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01 2
+#define QMI_IPA_MAX_PIPES_V01 20
+#define QMI_IPA_MAX_PER_CLIENTS_V01 64
+
+/*
+ * Indicates presence of newly added member to support HW stats.
+ */
+#define IPA_QMI_SUPPORTS_STATS
+#define IPA_QMI_SUPPORT_MHI_DEFAULT
+
+#define IPA_INT_MAX	((int)(~0U>>1))
+#define IPA_INT_MIN	(-IPA_INT_MAX - 1)
+
+/* IPA definition as msm_qmi_interface.h */
+
+enum ipa_qmi_result_type_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	IPA_QMI_RESULT_TYPE_MIN_ENUM_VAL_V01 = IPA_INT_MIN,
+	IPA_QMI_RESULT_SUCCESS_V01 = 0,
+	IPA_QMI_RESULT_FAILURE_V01 = 1,
+	IPA_QMI_RESULT_TYPE_MAX_ENUM_VAL_V01 = IPA_INT_MAX,
+};
+
+enum ipa_qmi_error_type_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	IPA_QMI_ERROR_TYPE_MIN_ENUM_VAL_V01 = IPA_INT_MIN,
+	IPA_QMI_ERR_NONE_V01 = 0x0000,
+	IPA_QMI_ERR_MALFORMED_MSG_V01 = 0x0001,
+	IPA_QMI_ERR_NO_MEMORY_V01 = 0x0002,
+	IPA_QMI_ERR_INTERNAL_V01 = 0x0003,
+	IPA_QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 = 0x0005,
+	IPA_QMI_ERR_INVALID_ID_V01 = 0x0029,
+	IPA_QMI_ERR_ENCODING_V01 = 0x003A,
+	IPA_QMI_ERR_INCOMPATIBLE_STATE_V01 = 0x005A,
+	IPA_QMI_ERR_NOT_SUPPORTED_V01 = 0x005E,
+	IPA_QMI_ERROR_TYPE_MAX_ENUM_VAL_V01 = IPA_INT_MAX,
+};
+
+struct ipa_qmi_response_type_v01 {
+	__u16 result;
+	__u16 error;
+};
+
+enum ipa_platform_type_enum_v01 {
+	IPA_PLATFORM_TYPE_ENUM_MIN_ENUM_VAL_V01 =
+	-2147483647, /* To force a 32 bit signed enum.  Do not change or use */
+	QMI_IPA_PLATFORM_TYPE_INVALID_V01 = 0,
+	/*  Invalid platform identifier */
+	QMI_IPA_PLATFORM_TYPE_TN_V01 = 1,
+	/*  Platform identifier -	Data card device */
+	QMI_IPA_PLATFORM_TYPE_LE_V01 = 2,
+	/*  Platform identifier -	Data router device */
+	QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01 = 3,
+	/*  Platform identifier -	MSM device with Android HLOS */
+	QMI_IPA_PLATFORM_TYPE_MSM_WINDOWS_V01 = 4,
+	/*  Platform identifier -	MSM device with Windows HLOS */
+	QMI_IPA_PLATFORM_TYPE_MSM_QNX_V01 = 5,
+	/* Platform identifier - MDM device with LE HLOS, MHI data router */
+	QMI_IPA_PLATFORM_TYPE_LE_MHI_V01 = 6,
+	/*  Platform identifier -	MSM device with QNX HLOS */
+	IPA_PLATFORM_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use */
+};
+
+#define QMI_IPA_PLATFORM_TYPE_LE_MHI_V01 \
+			QMI_IPA_PLATFORM_TYPE_LE_MHI_V01
+
+struct ipa_hdr_tbl_info_type_v01 {
+	__u32 modem_offset_start;
+	/*	Offset from the start of IPA Shared memory from which
+	 *	modem driver may insert header table entries.
+	 */
+	__u32 modem_offset_end;
+	/*	Offset from the start of IPA shared mem beyond which modem
+	 *	driver shall not insert header table entries. The space
+	 *	available for the modem driver shall include the
+	 *	modem_offset_start and modem_offset_end.
+	 */
+};  /* Type */
+
+struct ipa_route_tbl_info_type_v01 {
+	__u32 route_tbl_start_addr;
+	/*	Identifies the start of the routing table. Denotes the offset
+	 *	from the start of the IPA Shared Mem
+	 */
+
+	__u32 num_indices;
+	/*	Number of indices (starting from 0) that is being allocated to
+	 *	the modem. The number indicated here is also included in the
+	 *	allocation. The value of num_indices shall not exceed 31
+	 *	(5 bits used to specify the routing table index), unless there
+	 *	is a change in the hardware.
+	 */
+};  /* Type */
+
+#define IPA_RQOS_FILTER_STATS_INFO
+struct ipa_filter_stats_info_type_v01 {
+	__u32 hw_filter_stats_start_addr;
+	/*	Identifies the start of the filter stats. Denotes the offset
+	 *	from the start of the IPA Shared Mem
+	 */
+
+	__u32 hw_filter_stats_size;
+	/*	Identifies size in bytes of the HW filter statistics table. */
+
+	__u8 hw_filter_stats_start_index;
+	/* Identifies the start index of the modem driver managed
+	 * indices in the hw filter statistics table.
+	 */
+
+	__u8 hw_filter_stats_end_index;
+	 /* Identifies the end index os the modem driver managed
+	  * indices in the hw filter statistics table.
+	  */
+};  /* Type */
+
+struct ipa_modem_mem_info_type_v01 {
+
+	__u32 block_start_addr;
+	/*	Identifies the start of the memory block allocated for the
+	 *	modem. Denotes the offset from the start of the IPA Shared Mem
+	 */
+
+	__u32 size;
+	/*	Size of the block allocated for the modem driver */
+};  /* Type */
+
+struct ipa_hdr_proc_ctx_tbl_info_type_v01 {
+
+	__u32 modem_offset_start;
+	/*  Offset from the start of IPA shared memory from which the modem
+	 *	driver may insert header processing context table entries.
+	 */
+
+	__u32 modem_offset_end;
+	/*  Offset from the start of IPA shared memory beyond which the modem
+	 *	driver may not insert header proc table entries. The space
+	 *	available for the modem driver includes modem_offset_start and
+	 *	modem_offset_end.
+	 */
+};  /* Type */
+
+struct ipa_zip_tbl_info_type_v01 {
+
+	__u32 modem_offset_start;
+	/*  Offset from the start of IPA shared memory from which the modem
+	 *	driver may insert compression/decompression command entries.
+	 */
+
+	__u32 modem_offset_end;
+	/*  Offset from the start of IPA shared memory beyond which the modem
+	 *	driver may not insert compression/decompression command entries.
+	 *	The space available for the modem driver includes
+	 *  modem_offset_start and modem_offset_end.
+	 */
+};  /* Type */
+
+/**
+ * Request Message; Requests the modem IPA driver
+ * to perform initialization
+ */
+struct ipa_init_modem_driver_req_msg_v01 {
+
+	/* Optional */
+	/*  Platform info */
+	__u8 platform_type_valid;
+	/* Must be set to true if platform_type is being passed */
+	enum ipa_platform_type_enum_v01 platform_type;
+	/*   Provides information about the platform (ex. TN/MN/LE/MSM,etc) */
+
+	/* Optional */
+	/*  Header table info */
+	__u8 hdr_tbl_info_valid;
+	/* Must be set to true if hdr_tbl_info is being passed */
+	struct ipa_hdr_tbl_info_type_v01 hdr_tbl_info;
+	/*	Provides information about the header table */
+
+	/* Optional */
+	/*  IPV4 Routing table info */
+	__u8 v4_route_tbl_info_valid;
+	/* Must be set to true if v4_route_tbl_info is being passed */
+	struct ipa_route_tbl_info_type_v01 v4_route_tbl_info;
+	/*	Provides information about the IPV4 routing table */
+
+	/* Optional */
+	/*  IPV6 Routing table info */
+	__u8 v6_route_tbl_info_valid;
+	/* Must be set to true if v6_route_tbl_info is being passed */
+	struct ipa_route_tbl_info_type_v01 v6_route_tbl_info;
+	/*	Provides information about the IPV6 routing table */
+
+	/* Optional */
+	/*  IPV4 Filter table start address */
+	__u8 v4_filter_tbl_start_addr_valid;
+	/* Must be set to true if v4_filter_tbl_start_addr is being passed */
+	__u32 v4_filter_tbl_start_addr;
+	/*	Provides information about the starting address of IPV4 filter
+	 *	table in IPAv2 or non-hashable IPv4 filter table in IPAv3.
+	 *	Denotes the offset from the start of the IPA Shared Mem
+	 */
+
+	/* Optional */
+	/* IPV6 Filter table start address */
+	__u8 v6_filter_tbl_start_addr_valid;
+	/* Must be set to true if v6_filter_tbl_start_addr is being passed */
+	__u32 v6_filter_tbl_start_addr;
+	/*	Provides information about the starting address of IPV6 filter
+	 *	table in IPAv2 or non-hashable IPv6 filter table in IPAv3.
+	 *	Denotes the offset from the start of the IPA Shared Mem
+	 */
+
+	/* Optional */
+	/*  Modem memory block */
+	__u8 modem_mem_info_valid;
+	/* Must be set to true if modem_mem_info is being passed */
+	struct ipa_modem_mem_info_type_v01 modem_mem_info;
+	/*  Provides information about the start address and the size of
+	 *	the memory block that is being allocated to the modem driver.
+	 *	Denotes the physical address
+	 */
+
+	/* Optional */
+	/*  Destination end point for control commands from modem */
+	__u8 ctrl_comm_dest_end_pt_valid;
+	/* Must be set to true if ctrl_comm_dest_end_pt is being passed */
+	__u32 ctrl_comm_dest_end_pt;
+	/*  Provides information about the destination end point on the
+	 *	application processor to which the modem driver can send
+	 *	control commands. The value of this parameter cannot exceed
+	 *	19 since IPA only supports 20 end points.
+	 */
+
+	/* Optional */
+	/*  Modem Bootup Information */
+	__u8 is_ssr_bootup_valid;
+	/* Must be set to true if is_ssr_bootup is being passed */
+	__u8 is_ssr_bootup;
+	/*	Specifies whether the modem is booting up after a modem only
+	 *	sub-system restart or not. This will let the modem driver
+	 *	know that it doesn't have to reinitialize some of the HW
+	 *	blocks because IPA has not been reset since the previous
+	 *	initialization.
+	 */
+
+	/* Optional */
+	/*  Header Processing Context Table Information */
+	__u8 hdr_proc_ctx_tbl_info_valid;
+	/* Must be set to true if hdr_proc_ctx_tbl_info is being passed */
+	struct ipa_hdr_proc_ctx_tbl_info_type_v01 hdr_proc_ctx_tbl_info;
+	/* Provides information about the header processing context table.
+	 */
+
+	/* Optional */
+	/*  Compression Decompression Table Information */
+	__u8 zip_tbl_info_valid;
+	/* Must be set to true if zip_tbl_info is being passed */
+	struct ipa_zip_tbl_info_type_v01 zip_tbl_info;
+	/* Provides information about the zip table.
+	 */
+
+	/* Optional */
+	/*  IPv4 Hashable Routing Table Information */
+	/** Must be set to true if v4_hash_route_tbl_info is being passed */
+	__u8 v4_hash_route_tbl_info_valid;
+	struct ipa_route_tbl_info_type_v01 v4_hash_route_tbl_info;
+
+	/* Optional */
+	/*  IPv6 Hashable Routing Table Information */
+	/** Must be set to true if v6_hash_route_tbl_info is being passed */
+	__u8 v6_hash_route_tbl_info_valid;
+	struct ipa_route_tbl_info_type_v01 v6_hash_route_tbl_info;
+
+	/*
+	 * Optional
+	 * IPv4 Hashable Filter Table Start Address
+	 * Must be set to true if v4_hash_filter_tbl_start_addr
+	 * is being passed
+	 */
+	__u8 v4_hash_filter_tbl_start_addr_valid;
+	__u32 v4_hash_filter_tbl_start_addr;
+	/* Identifies the starting address of the IPv4 hashable filter
+	 * table in IPAv3 onwards. Denotes the offset from the start of
+	 * the IPA shared memory.
+	 */
+
+	/* Optional
+	 * IPv6 Hashable Filter Table Start Address
+	 * Must be set to true if v6_hash_filter_tbl_start_addr
+	 * is being passed
+	 */
+	__u8 v6_hash_filter_tbl_start_addr_valid;
+	__u32 v6_hash_filter_tbl_start_addr;
+	/* Identifies the starting address of the IPv6 hashable filter
+	 * table in IPAv3 onwards. Denotes the offset from the start of
+	 * the IPA shared memory.
+	 */
+
+	/* Optional
+	 * Modem HW Stats Quota Base address
+	 * Must be set to true if hw_stats_quota_base_addr
+	 * is being passed
+	 */
+	__u8 hw_stats_quota_base_addr_valid;
+	__u32 hw_stats_quota_base_addr;
+
+	/* Optional
+	 * Modem HW Stats Quota Size
+	 * Must be set to true if hw_stats_quota_size
+	 * is being passed
+	 */
+	__u8 hw_stats_quota_size_valid;
+	__u32 hw_stats_quota_size;
+
+	/* Optional
+	 * Modem HW Drop Stats Table Start Address
+	 * Must be set to true if hw_drop_stats_base_addr
+	 * is being passed
+	 */
+	__u8 hw_drop_stats_base_addr_valid;
+	__u32 hw_drop_stats_base_addr;
+
+	/* Optional
+	 * Modem HW Drop Stats Table size
+	 * Must be set to true if hw_drop_stats_table_size
+	 * is being passed
+	 */
+	__u8 hw_drop_stats_table_size_valid;
+	__u32 hw_drop_stats_table_size;
+
+	/* optional
+	 * Modem HW flt stats info
+	 * Must be set to true if filter_stats_info
+	 * is being passed
+	 */
+	__u8 hw_fiter_stats_info_valid;
+	struct ipa_filter_stats_info_type_v01 hw_filter_stats_info;
+
+	/* optional
+	 * Filter table smem info
+	 * Must be set to true if smem_info
+	 * is being passed(Currently not using it)
+	 */
+	__u8 smem_info_valid;
+	struct ipa_modem_mem_info_type_v01 smem_info;
+
+	/* optional
+	 * IPA Peripheral stats info
+	 * Must be set to true if per_stats_info
+	 * is being passed
+	 */
+	__u8 per_stats_smem_info_valid;
+	struct ipa_modem_mem_info_type_v01 per_stats_smem_info;
+};  /* Message */
+
+/* Response Message; Requests the modem IPA driver about initialization */
+struct ipa_init_modem_driver_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type.*/
+
+	/* Optional */
+	/* Destination end point for control commands from master driver */
+	__u8 ctrl_comm_dest_end_pt_valid;
+	/* Must be set to true if ctrl_comm_dest_ep is being passed */
+	__u32 ctrl_comm_dest_end_pt;
+	/*	Provides information about the destination end point on the
+	 *	modem processor to which the master driver can send control
+	 *	commands. The value of this parameter cannot exceed 19 since
+	 *	IPA only supports 20 end points. This field is looked at only
+	 *	if the result in TLV RESULT_CODE is	QMI_RESULT_SUCCESS
+	 */
+
+	/* Optional */
+	/*  Default end point */
+	__u8 default_end_pt_valid;
+	/* Must be set to true if default_end_pt is being passed */
+	__u32 default_end_pt;
+	/*  Provides information about the default end point. The master
+	 *	driver may or may not set the register in the hardware with
+	 *	this value. The value of this parameter cannot exceed 19
+	 *	since IPA only supports 20 end points. This field is looked
+	 *	at only if the result in TLV RESULT_CODE is QMI_RESULT_SUCCESS
+	 */
+
+	/* Optional */
+	/*  Modem Driver Initialization Pending */
+	__u8 modem_driver_init_pending_valid;
+	/* Must be set to true if modem_driver_init_pending is being passed */
+	__u8 modem_driver_init_pending;
+	/*
+	 * Identifies if second level message handshake is needed
+	 *	between drivers to indicate when IPA HWP loading is completed.
+	 *	If this is set by modem driver, AP driver will need to wait
+	 *	for a INIT_MODEM_DRIVER_CMPLT message before communicating with
+	 *	IPA HWP.
+	 */
+};  /* Message */
+
+/*
+ * Request Message; Request from Modem IPA driver to indicate
+ *	modem driver init completion
+ */
+struct ipa_init_modem_driver_cmplt_req_msg_v01 {
+	/* Mandatory */
+	/*  Modem Driver init complete status; */
+	__u8 status;
+	/*
+	 * Specifies whether the modem driver initialization is complete
+	 *	including the micro controller image loading.
+	 */
+};  /* Message */
+
+/*
+ * Response Message; Request from Modem IPA driver to indicate
+ *	modem driver init completion
+ */
+struct ipa_init_modem_driver_cmplt_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/**<   Standard response type.*/
+};  /* Message */
+
+/*	Request Message; This is the message that is exchanged between the
+ *	control point and the service in order to register for indications.
+ */
+struct ipa_indication_reg_req_msg_v01 {
+	/* Optional */
+	/*  Master driver initialization completion */
+	__u8 master_driver_init_complete_valid;
+	/* Must be set to true if master_driver_init_complete is being passed */
+	__u8 master_driver_init_complete;
+	/*  If set to TRUE, this field indicates that the client is
+	 *	interested in getting indications about the completion
+	 *	of the initialization sequence of the master driver.
+	 *	Setting this field in the request message makes sense
+	 *	only when the QMI_IPA_INDICATION_REGISTER_REQ is being
+	 *	originated from the modem driver
+	 */
+
+	/* Optional */
+	/*  Data Usage Quota Reached */
+	__u8 data_usage_quota_reached_valid;
+	/*  Must be set to true if data_usage_quota_reached is being passed */
+	__u8 data_usage_quota_reached;
+	/*  If set to TRUE, this field indicates that the client wants to
+	 *  receive indications about reaching the data usage quota that
+	 *  previously set via QMI_IPA_SET_DATA_USAGE_QUOTA. Setting this field
+	 *  in the request message makes sense only when the
+	 *  QMI_IPA_INDICATION_REGISTER_REQ is being originated from the Master
+	 *  driver
+	 */
+
+	/* Optional */
+	/* IPA MHI Ready Indication */
+	__u8 ipa_mhi_ready_ind_valid;
+	/*  Must be set to true if ipa_mhi_ready_ind is being passed */
+	__u8 ipa_mhi_ready_ind;
+	/*
+	 * If set to TRUE, this field indicates that the client wants to
+	 * receive indications about MHI ready for Channel allocations.
+	 */
+
+	/* Optional */
+	/*  Endpoint Desc Info Indication */
+	__u8 endpoint_desc_ind_valid;
+	/* Must be set to true if endpoint_desc_ind is being passed */
+	__u8 endpoint_desc_ind;
+	/*
+	 * If set to TRUE, this field indicates that the client wants to
+	 * receive indications for Endpoint descriptor information via
+	 * QMI_IPA_ENDP_DESC_INDICATION. Setting this field in the request
+	 * message makes sense only when the  QMI_IPA_INDICATION_REGISTER_REQ
+	 * is being originated from the master driver.
+	 */
+
+	/* Optional */
+	/* BW CHANGE Indication */
+	__u8 bw_change_ind_valid;
+	/* Must be set to true if bw_change_ind is being passed */
+	__u8 bw_change_ind;
+	/*
+	 * If set to TRUE, this field indicates that the client wants to
+	 * receive indications for BW change information via
+	 * QMI_IPA_BW_CHANGE_INDICATION. Setting this field in the request
+	 * message makes sense only when the QMI_IPA_INDICATION_REGISTER_REQ
+	 * is being originated from the master driver.
+	 */
+};  /* Message */
+
+
+/* Response Message; This is the message that is exchanged between the
+ *	control point and the service in order to register for indications.
+ */
+struct ipa_indication_reg_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/**<   Standard response type.*/
+};  /* Message */
+
+
+/*	Indication Message; Indication sent to the Modem IPA driver from
+ *	master IPA driver about initialization being complete.
+ */
+struct ipa_master_driver_init_complt_ind_msg_v01 {
+	/* Mandatory */
+	/*  Master driver initialization completion status */
+	struct ipa_qmi_response_type_v01 master_driver_init_status;
+	/*	Indicates the status of initialization. If everything went
+	 *	as expected, this field is set to SUCCESS. ERROR is set
+	 *	otherwise. Extended error info may be used to convey
+	 *	additional information about the error
+	 */
+};  /* Message */
+
+struct ipa_ipfltr_range_eq_16_type_v01 {
+	__u8 offset;
+	/*	Specifies the offset from the IHL (Internet Header length) */
+
+	__u16 range_low;
+	/*	Specifies the lower bound of the range */
+
+	__u16 range_high;
+	/*	Specifies the upper bound of the range */
+};  /* Type */
+
+struct ipa_ipfltr_mask_eq_32_type_v01 {
+	__u8 offset;
+	/*	Specifies the offset either from IHL or from the start of
+	 *	the IP packet. This depends on the equation that this structure
+	 *	is used in.
+	 */
+
+	__u32 mask;
+	/*	Specifies the mask that has to be used in the comparison.
+	 *	The field is ANDed with the mask and compared against the value.
+	 */
+
+	__u32 value;
+	/*	Specifies the 32 bit value that used in the comparison. */
+};  /* Type */
+
+struct ipa_ipfltr_eq_16_type_v01 {
+	__u8 offset;
+	/*  Specifies the offset into the packet */
+
+	__u16 value;
+	/* Specifies the 16 bit value that should be used in the comparison. */
+};  /* Type */
+
+struct ipa_ipfltr_eq_32_type_v01 {
+	__u8 offset;
+	/* Specifies the offset into the packet */
+
+	__u32 value;
+	/* Specifies the 32 bit value that should be used in the comparison. */
+};  /* Type */
+
+struct ipa_ipfltr_mask_eq_128_type_v01 {
+	__u8 offset;
+	/* Specifies the offset into the packet */
+
+	__u8 mask[16];
+	/*  Specifies the mask that has to be used in the comparison.
+	 *	The field is ANDed with the mask and compared against the value.
+	 */
+
+	__u8 value[16];
+	/* Specifies the 128 bit value that should be used in the comparison. */
+};  /* Type */
+
+
+struct ipa_filter_rule_type_v01 {
+	__u16 rule_eq_bitmap;
+	/* 16-bit Bitmask to indicate how many eqs are valid in this rule */
+
+	__u8 tos_eq_present;
+	/*
+	 * tos_eq_present field has two meanings:
+	 * IPA ver < 4.5:
+	 *  specifies if a type of service check rule is present
+	 *  (as the field name reveals).
+	 * IPA ver >= 4.5:
+	 *  specifies if a tcp pure ack check rule is present
+	 */
+
+	__u8 tos_eq;
+	/* The value to check against the type of service (ipv4) field */
+
+	__u8 protocol_eq_present;
+	/* Specifies if a protocol check rule is present */
+
+	__u8 protocol_eq;
+	/* The value to check against the protocol field */
+
+	__u8 num_ihl_offset_range_16;
+	/*  The number of 16 bit range check rules at the location
+	 *	determined by IP header length plus a given offset
+	 *	in this rule. See the definition of the ipa_filter_range_eq_16
+	 *	for better understanding. The value of this field cannot exceed
+	 *	IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS which is set as 2
+	 */
+
+	struct ipa_ipfltr_range_eq_16_type_v01
+		ihl_offset_range_16[QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01];
+	/*	Array of the registered IP header length offset 16 bit range
+	 *	check rules.
+	 */
+
+	__u8 num_offset_meq_32;
+	/*  The number of 32 bit masked comparison rules present
+	 *  in this rule
+	 */
+
+	struct ipa_ipfltr_mask_eq_32_type_v01
+		offset_meq_32[QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01];
+	/*  An array of all the possible 32bit masked comparison rules
+	 *	in this rule
+	 */
+
+	__u8 tc_eq_present;
+	/*  Specifies if the traffic class rule is present in this rule */
+
+	__u8 tc_eq;
+	/* The value against which the IPV4 traffic class field has to
+	 * be checked
+	 */
+
+	__u8 flow_eq_present;
+	/* Specifies if the "flow equals" rule is present in this rule */
+
+	__u32 flow_eq;
+	/* The value against which the IPV6 flow field has to be checked */
+
+	__u8 ihl_offset_eq_16_present;
+	/*	Specifies if there is a 16 bit comparison required at the
+	 *	location in	the packet determined by "Intenet Header length
+	 *	+ specified offset"
+	 */
+
+	struct ipa_ipfltr_eq_16_type_v01 ihl_offset_eq_16;
+	/* The 16 bit comparison equation */
+
+	__u8 ihl_offset_eq_32_present;
+	/*	Specifies if there is a 32 bit comparison required at the
+	 *	location in the packet determined by "Intenet Header length
+	 *	+ specified offset"
+	 */
+
+	struct ipa_ipfltr_eq_32_type_v01 ihl_offset_eq_32;
+	/*	The 32 bit comparison equation */
+
+	__u8 num_ihl_offset_meq_32;
+	/*	The number of 32 bit masked comparison equations in this
+	 *	rule. The location of the packet to be compared is
+	 *	determined by the IP Header length + the give offset
+	 */
+
+	struct ipa_ipfltr_mask_eq_32_type_v01
+		ihl_offset_meq_32[QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01];
+	/*	Array of 32 bit masked comparison equations.
+	 */
+
+	__u8 num_offset_meq_128;
+	/*	The number of 128 bit comparison equations in this rule */
+
+	struct ipa_ipfltr_mask_eq_128_type_v01
+		offset_meq_128[QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01];
+	/*	Array of 128 bit comparison equations. The location in the
+	 *	packet is determined by the specified offset
+	 */
+
+	__u8 metadata_meq32_present;
+	/*  Boolean indicating if the 32 bit masked comparison equation
+	 *	is present or not. Comparison is done against the metadata
+	 *	in IPA. Metadata can either be extracted from the packet
+	 *	header or from the "metadata" register.
+	 */
+
+	struct ipa_ipfltr_mask_eq_32_type_v01
+			metadata_meq32;
+	/* The metadata  32 bit masked comparison equation */
+
+	__u8 ipv4_frag_eq_present;
+	/* Specifies if the IPv4 Fragment equation is present in this rule */
+};  /* Type */
+
+
+struct ipa_filter_rule_req2_type_v01 {
+	__u16 rule_eq_bitmap;
+	/* 16-bit Bitmask to indicate how many eqs are valid in this rule */
+
+	__u8 pure_ack_eq_present;
+	/*
+	 *  specifies if a tcp pure ack check rule is present
+	 */
+
+	__u8 pure_ack_eq;
+	/* The value to check against the type of service (ipv4) field */
+
+	__u8 protocol_eq_present;
+	/* Specifies if a protocol check rule is present */
+
+	__u8 protocol_eq;
+	/* The value to check against the protocol field */
+
+	__u8 num_ihl_offset_range_16;
+	/*  The number of 16 bit range check rules at the location
+	 *	determined by IP header length plus a given offset
+	 *	in this rule. See the definition of the ipa_filter_range_eq_16
+	 *	for better understanding. The value of this field cannot exceed
+	 *	IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS which is set as 2
+	 */
+
+	struct ipa_ipfltr_range_eq_16_type_v01
+		ihl_offset_range_16[QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01];
+	/*	Array of the registered IP header length offset 16 bit range
+	 *	check rules.
+	 */
+
+	__u8 num_offset_meq_32;
+	/*  The number of 32 bit masked comparison rules present
+	 *  in this rule
+	 */
+
+	struct ipa_ipfltr_mask_eq_32_type_v01
+		offset_meq_32[QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01];
+	/*  An array of all the possible 32bit masked comparison rules
+	 *	in this rule
+	 */
+
+	__u8 tc_eq_present;
+	/*  Specifies if the traffic class rule is present in this rule */
+
+	__u8 tc_eq;
+	/* The value against which the IPV4 traffic class field has to
+	 * be checked
+	 */
+
+	__u8 flow_eq_present;
+	/* Specifies if the "flow equals" rule is present in this rule */
+
+	__u32 flow_eq;
+	/* The value against which the IPV6 flow field has to be checked */
+
+	__u8 ihl_offset_eq_16_present;
+	/*	Specifies if there is a 16 bit comparison required at the
+	 *	location in	the packet determined by "Intenet Header length
+	 *	+ specified offset"
+	 */
+
+	struct ipa_ipfltr_eq_16_type_v01 ihl_offset_eq_16;
+	/* The 16 bit comparison equation */
+
+	__u8 ihl_offset_eq_32_present;
+	/*	Specifies if there is a 32 bit comparison required at the
+	 *	location in the packet determined by "Intenet Header length
+	 *	+ specified offset"
+	 */
+
+	struct ipa_ipfltr_eq_32_type_v01 ihl_offset_eq_32;
+	/*	The 32 bit comparison equation */
+
+	__u8 num_ihl_offset_meq_32;
+	/*	The number of 32 bit masked comparison equations in this
+	 *	rule. The location of the packet to be compared is
+	 *	determined by the IP Header length + the give offset
+	 */
+
+	struct ipa_ipfltr_mask_eq_32_type_v01
+		ihl_offset_meq_32[QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01];
+	/*	Array of 32 bit masked comparison equations.
+	 */
+
+	__u8 num_offset_meq_128;
+	/*	The number of 128 bit comparison equations in this rule */
+
+	struct ipa_ipfltr_mask_eq_128_type_v01
+		offset_meq_128[QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01];
+	/*	Array of 128 bit comparison equations. The location in the
+	 *	packet is determined by the specified offset
+	 */
+
+	__u8 metadata_meq32_present;
+	/*  Boolean indicating if the 32 bit masked comparison equation
+	 *	is present or not. Comparison is done against the metadata
+	 *	in IPA. Metadata can either be extracted from the packet
+	 *	header or from the "metadata" register.
+	 */
+
+	struct ipa_ipfltr_mask_eq_32_type_v01
+			metadata_meq32;
+	/* The metadata  32 bit masked comparison equation */
+
+	__u8 ipv4_frag_eq_present;
+	/* Specifies if the IPv4 Fragment equation is present in this rule */
+};  /* Type */
+
+enum ipa_ip_type_enum_v01 {
+	IPA_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum.  Do not change or use*/
+	QMI_IPA_IP_TYPE_INVALID_V01 = 0,
+	/*  Invalid IP type identifier */
+	QMI_IPA_IP_TYPE_V4_V01 = 1,
+	/*  IP V4 type */
+	QMI_IPA_IP_TYPE_V6_V01 = 2,
+	/*  IP V6 type */
+	QMI_IPA_IP_TYPE_V4V6_V01 = 3,
+	/*  Applies to both IP types */
+	IPA_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use*/
+};
+
+
+enum ipa_filter_action_enum_v01 {
+	IPA_FILTER_ACTION_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum. Do not change or use */
+	QMI_IPA_FILTER_ACTION_INVALID_V01 = 0,
+	/*  Invalid action on filter hit */
+	QMI_IPA_FILTER_ACTION_SRC_NAT_V01 = 1,
+	/*  Pass packet to NAT block for Source NAT */
+	QMI_IPA_FILTER_ACTION_DST_NAT_V01 = 2,
+	/*  Pass packet to NAT block for Destination NAT */
+	QMI_IPA_FILTER_ACTION_ROUTING_V01 = 3,
+	/*  Pass packet to Routing block */
+	QMI_IPA_FILTER_ACTION_EXCEPTION_V01 = 4,
+	/*  Treat packet as exception and send to exception pipe */
+	IPA_FILTER_ACTION_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use*/
+};
+
+struct ipa_filter_spec_type_v01 {
+	__u32 filter_spec_identifier;
+	/*	This field is used to identify a filter spec in the list
+	 *	of filter specs being sent from the client. This field
+	 *	is applicable only in the filter install request and response.
+	 */
+
+	enum ipa_ip_type_enum_v01 ip_type;
+	/*	This field identifies the IP type for which this rule is
+	 *	applicable. The driver needs to identify the filter table
+	 *	(V6 or V4) and this field is essential for that
+	 */
+
+	struct ipa_filter_rule_type_v01 filter_rule;
+	/*	This field specifies the rules in the filter spec. These rules
+	 *	are the ones that are matched against fields in the packet.
+	 */
+
+	enum ipa_filter_action_enum_v01 filter_action;
+	/*	This field specifies the action to be taken when a filter match
+	 *	occurs. The remote side should install this information into the
+	 *	hardware along with the filter equations.
+	 */
+
+	__u8 is_routing_table_index_valid;
+	/*	Specifies whether the routing table index is present or not.
+	 *	If the action is "QMI_IPA_FILTER_ACTION_EXCEPTION", this
+	 *	parameter need not be provided.
+	 */
+
+	__u32 route_table_index;
+	/*	This is the index in the routing table that should be used
+	 *	to route the packets if the filter rule is hit
+	 */
+
+	__u8 is_mux_id_valid;
+	/*	Specifies whether the mux_id is valid */
+
+	__u32 mux_id;
+	/*	This field identifies the QMAP MUX ID. As a part of QMAP
+	 *	protocol, several data calls may be multiplexed over the
+	 *	same physical transport channel. This identifier is used to
+	 *	identify one such data call. The maximum value for this
+	 *	identifier is 255.
+	 */
+};  /* Type */
+
+struct ipa_filter_spec_ex_type_v01 {
+	enum ipa_ip_type_enum_v01 ip_type;
+	/*	This field identifies the IP type for which this rule is
+	 *	applicable. The driver needs to identify the filter table
+	 *	(V6 or V4) and this field is essential for that
+	 */
+
+	struct ipa_filter_rule_type_v01 filter_rule;
+	/*	This field specifies the rules in the filter spec. These rules
+	 *	are the ones that are matched against fields in the packet.
+	 */
+
+	enum ipa_filter_action_enum_v01 filter_action;
+	/*	This field specifies the action to be taken when a filter match
+	 *	occurs. The remote side should install this information into the
+	 *	hardware along with the filter equations.
+	 */
+
+	__u8 is_routing_table_index_valid;
+	/*	Specifies whether the routing table index is present or not.
+	 *	If the action is "QMI_IPA_FILTER_ACTION_EXCEPTION", this
+	 *	parameter need not be provided.
+	 */
+
+	__u32 route_table_index;
+	/*	This is the index in the routing table that should be used
+	 *	to route the packets if the filter rule is hit
+	 */
+
+	__u8 is_mux_id_valid;
+	/*	Specifies whether the mux_id is valid */
+
+	__u32 mux_id;
+	/*	This field identifies the QMAP MUX ID. As a part of QMAP
+	 *	protocol, several data calls may be multiplexed over the
+	 *	same physical transport channel. This identifier is used to
+	 *	identify one such data call. The maximum value for this
+	 *	identifier is 255.
+	 */
+
+	__u32 rule_id;
+	/* Rule Id of the given filter. The Rule Id is populated in the rule
+	 * header when installing the rule in IPA.
+	 */
+
+	__u8 is_rule_hashable;
+	/** Specifies whether the given rule is hashable.
+	 */
+};  /* Type */
+
+struct ipa_filter_spec_ex2_type_v01 {
+	enum ipa_ip_type_enum_v01 ip_type;
+	/*	This field identifies the IP type for which this rule is
+	 *	applicable. The driver needs to identify the filter table
+	 *	(V6 or V4) and this field is essential for that
+	 */
+
+	struct ipa_filter_rule_req2_type_v01 filter_rule;
+	/*	This field specifies the rules in the filter spec. These rules
+	 *	are the ones that are matched against fields in the packet.
+	 */
+
+	enum ipa_filter_action_enum_v01 filter_action;
+	/*	This field specifies the action to be taken when a filter match
+	 *	occurs. The remote side should install this information into the
+	 *	hardware along with the filter equations.
+	 */
+
+	__u8 is_routing_table_index_valid;
+	/*	Specifies whether the routing table index is present or not.
+	 *	If the action is "QMI_IPA_FILTER_ACTION_EXCEPTION", this
+	 *	parameter need not be provided.
+	 */
+
+	__u32 route_table_index;
+	/*	This is the index in the routing table that should be used
+	 *	to route the packets if the filter rule is hit
+	 */
+
+	__u8 is_mux_id_valid;
+	/*	Specifies whether the mux_id is valid */
+
+	__u32 mux_id;
+	/*	This field identifies the QMAP MUX ID. As a part of QMAP
+	 *	protocol, several data calls may be multiplexed over the
+	 *	same physical transport channel. This identifier is used to
+	 *	identify one such data call. The maximum value for this
+	 *	identifier is 255.
+	 */
+
+	__u32 rule_id;
+	/* Rule Id of the given filter. The Rule Id is populated in the rule
+	 * header when installing the rule in IPA.
+	 */
+
+	__u8 is_rule_hashable;
+	/** Specifies whether the given rule is hashable.
+	 */
+};  /* Type */
+
+/*  Request Message; This is the message that is exchanged between the
+ *	control point and the service in order to request the installation
+ *	of filtering rules in the hardware block by the remote side.
+ */
+struct ipa_install_fltr_rule_req_msg_v01 {
+	/* Optional
+	 * IP type that this rule applies to
+	 * Filter specification to be installed in the hardware
+	 */
+	__u8 filter_spec_list_valid;
+	/* Must be set to true if filter_spec_list is being passed */
+	__u32 filter_spec_list_len;
+	/* Must be set to # of elements in filter_spec_list */
+	struct ipa_filter_spec_type_v01
+		filter_spec_list[QMI_IPA_MAX_FILTERS_V01];
+	/*	This structure defines the list of filters that have
+	 *		to be installed in the hardware. The driver installing
+	 *		these rules shall do so in the same order as specified
+	 *		in this list.
+	 */
+
+	/* Optional */
+	/*  Pipe index to intall rule */
+	__u8 source_pipe_index_valid;
+	/* Must be set to true if source_pipe_index is being passed */
+	__u32 source_pipe_index;
+	/*	This is the source pipe on which the filter rule is to be
+	 *	installed. The requestor may always not know the pipe
+	 *	indices. If not specified, the receiver shall install
+	 *	this rule on all the pipes that it controls through
+	 *	which data may be fed into IPA.
+	 */
+
+	/* Optional */
+	/*  Total number of IPv4 filters in the filter spec list */
+	__u8 num_ipv4_filters_valid;
+	/* Must be set to true if num_ipv4_filters is being passed */
+	__u32 num_ipv4_filters;
+	/*   Number of IPv4 rules included in filter spec list */
+
+	/* Optional */
+	/*  Total number of IPv6 filters in the filter spec list */
+	__u8 num_ipv6_filters_valid;
+	/* Must be set to true if num_ipv6_filters is being passed */
+	__u32 num_ipv6_filters;
+	/* Number of IPv6 rules included in filter spec list */
+
+	/* Optional */
+	/*  List of XLAT filter indices in the filter spec list */
+	__u8 xlat_filter_indices_list_valid;
+	/* Must be set to true if xlat_filter_indices_list
+	 * is being passed
+	 */
+	__u32 xlat_filter_indices_list_len;
+	/* Must be set to # of elements in xlat_filter_indices_list */
+	__u32 xlat_filter_indices_list[QMI_IPA_MAX_FILTERS_V01];
+	/* List of XLAT filter indices. Filter rules at specified indices
+	 * will need to be modified by the receiver if the PDN is XLAT
+	 * before installing them on the associated IPA consumer pipe.
+	 */
+
+	/* Optional */
+	/*  Extended Filter Specification */
+	__u8 filter_spec_ex_list_valid;
+	/* Must be set to true if filter_spec_ex_list is being passed */
+	__u32 filter_spec_ex_list_len;
+	/* Must be set to # of elements in filter_spec_ex_list */
+	struct ipa_filter_spec_ex_type_v01
+		filter_spec_ex_list[QMI_IPA_MAX_FILTERS_V01];
+	/*
+	 * List of filter specifications of filters that must be installed in
+	 *	the IPAv3.x hardware.
+	 *	The driver installing these rules must do so in the same
+	 *	order as specified in this list.
+	 */
+
+	/* Optional */
+	/*  Extended Type 2 Filter Specification */
+	__u8 filter_spec_ex2_list_valid;
+	/* Must be set to true if filter_spec_ex2_list is being passed */
+	__u32 filter_spec_ex2_list_len;
+	/* Must be set to # of elements in filter_spec_ex2_list */
+	struct ipa_filter_spec_ex2_type_v01
+		filter_spec_ex2_list[QMI_IPA_MAX_FILTERS_V01];
+
+	/* Optional */
+	/* List of modem UL Filters in the Spec List which need be to
+	 * replicated with AP UL firewall filters
+	 */
+	__u8 ul_firewall_indices_list_valid;
+	/* Must be set to # of elements in ul_firewall_indices_list */
+	__u32 ul_firewall_indices_list_len;
+	__u32 ul_firewall_indices_list[QMI_IPA_MAX_FILTERS_V01];
+	/* List of UL firewall filter indices.
+	 * Filter rules at specified indices must be replicated across
+	 * the firewall filters by the receiver and installed on the
+	 * associated IPA consumer pipe.
+	 */
+};  /* Message */
+
+struct ipa_filter_rule_identifier_to_handle_map_v01 {
+	__u32 filter_spec_identifier;
+	/*	This field is used to identify a filter spec in the list of
+	 *	filter specs being sent from the client. This field is
+	 *	applicable only in the filter install request and response.
+	 */
+	__u32 filter_handle;
+	/*  This field is used to identify a rule in any subsequent message.
+	 *	This is a value that is provided by the server to the control
+	 *	point
+	 */
+};  /* Type */
+
+/* Response Message; This is the message that is exchanged between the
+ * control point and the service in order to request the
+ * installation of filtering rules in the hardware block by
+ * the remote side.
+ */
+struct ipa_install_fltr_rule_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/*	Standard response type.
+	 *	Standard response type. Contains the following data members:
+	 *	- qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE
+	 *	- qmi_error_type  -- Error code. Possible error code values are
+	 *	described in the error codes section of each message definition.
+	 */
+
+	/* Optional */
+	/*  Filter Handle List */
+	__u8 filter_handle_list_valid;
+	/* Must be set to true if filter_handle_list is being passed */
+	__u32 filter_handle_list_len;
+	/* Must be set to # of elements in filter_handle_list */
+	struct ipa_filter_rule_identifier_to_handle_map_v01
+		filter_handle_list[QMI_IPA_MAX_FILTERS_V01];
+	/*
+	 * List of handles returned to the control point. Each handle is
+	 *	mapped to the rule identifier that was specified in the
+	 *	request message. Any further reference to the rule is done
+	 *	using the filter handle.
+	 */
+
+	/* Optional */
+	/*  Rule id List */
+	__u8 rule_id_valid;
+	/* Must be set to true if rule_id is being passed */
+	__u32 rule_id_len;
+	/* Must be set to # of elements in rule_id */
+	__u32 rule_id[QMI_IPA_MAX_FILTERS_V01];
+	/*
+	 * List of rule ids returned to the control point.
+	 *	Any further reference to the rule is done using the
+	 *	filter rule id specified in this list.
+	 */
+};  /* Message */
+
+struct ipa_filter_handle_to_index_map_v01 {
+	__u32 filter_handle;
+	/*	This is a handle that was given to the remote client that
+	 *	requested the rule addition.
+	 */
+	__u32 filter_index;
+	/*	This index denotes the location in a filter table, where the
+	 *	filter rule has been installed. The maximum value of this
+	 *	field is 64.
+	 */
+};  /* Type */
+
+/* Request Message; This is the message that is exchanged between the
+ * control point and the service in order to notify the remote driver
+ * of the installation of the filter rule supplied earlier by the
+ * remote driver.
+ */
+struct ipa_fltr_installed_notif_req_msg_v01 {
+	/*	Mandatory	*/
+	/*  Pipe index	*/
+	__u32 source_pipe_index;
+	/*	This is the source pipe on which the filter rule has been
+	 *	installed or was attempted to be installed
+	 */
+
+	/* Mandatory */
+	/*  Installation Status */
+	enum ipa_qmi_result_type_v01 install_status;
+	/*	This is the status of installation. If this indicates
+	 *	SUCCESS, other optional fields carry additional
+	 *	information
+	 */
+
+	/* Mandatory */
+	/*  List of Filter Indices */
+	__u32 filter_index_list_len;
+	/* Must be set to # of elements in filter_index_list */
+	struct ipa_filter_handle_to_index_map_v01
+		filter_index_list[QMI_IPA_MAX_FILTERS_V01];
+	/*
+	 * Provides the list of filter indices and the corresponding
+	 *	filter handle. If the installation_status indicates a
+	 *	failure, the filter indices must be set to a reserve
+	 *	index (255).
+	 */
+
+	/* Optional */
+	/*  Embedded pipe index */
+	__u8 embedded_pipe_index_valid;
+	/* Must be set to true if embedded_pipe_index is being passed */
+	__u32 embedded_pipe_index;
+	/*	This index denotes the embedded pipe number on which a call to
+	 *	the same PDN has been made. If this field is set, it denotes
+	 *	that this is a use case where PDN sharing is happening. The
+	 *	embedded pipe is used to send data from the embedded client
+	 *	in the device
+	 */
+
+	/* Optional */
+	/*  Retain Header Configuration */
+	__u8 retain_header_valid;
+	/* Must be set to true if retain_header is being passed */
+	__u8 retain_header;
+	/*	This field indicates if the driver installing the rule has
+	 *	turned on the "retain header" bit. If this is true, the
+	 *	header that is removed by IPA is reinserted after the
+	 *	packet processing is completed.
+	 */
+
+	/* Optional */
+	/*  Embedded call Mux Id */
+	__u8 embedded_call_mux_id_valid;
+	/**< Must be set to true if embedded_call_mux_id is being passed */
+	__u32 embedded_call_mux_id;
+	/*	This identifies one of the many calls that have been originated
+	 *	on the embedded pipe. This is how we identify the PDN gateway
+	 *	to which traffic from the source pipe has to flow.
+	 */
+
+	/* Optional */
+	/*  Total number of IPv4 filters in the filter index list */
+	__u8 num_ipv4_filters_valid;
+	/* Must be set to true if num_ipv4_filters is being passed */
+	__u32 num_ipv4_filters;
+	/* Number of IPv4 rules included in filter index list */
+
+	/* Optional */
+	/*  Total number of IPv6 filters in the filter index list */
+	__u8 num_ipv6_filters_valid;
+	/* Must be set to true if num_ipv6_filters is being passed */
+	__u32 num_ipv6_filters;
+	/* Number of IPv6 rules included in filter index list */
+
+	/* Optional */
+	/*  Start index on IPv4 filters installed on source pipe */
+	__u8 start_ipv4_filter_idx_valid;
+	/* Must be set to true if start_ipv4_filter_idx is being passed */
+	__u32 start_ipv4_filter_idx;
+	/* Start index of IPv4 rules in filter index list */
+
+	/* Optional */
+	/*  Start index on IPv6 filters installed on source pipe */
+	__u8 start_ipv6_filter_idx_valid;
+	/* Must be set to true if start_ipv6_filter_idx is being passed */
+	__u32 start_ipv6_filter_idx;
+	/* Start index of IPv6 rules in filter index list */
+
+	/* Optional */
+	/*  List of Rule Ids */
+	__u8 rule_id_valid;
+	/* Must be set to true if rule_id is being passed */
+	__u32 rule_id_len;
+	/* Must be set to # of elements in rule_id */
+	__u32 rule_id[QMI_IPA_MAX_FILTERS_V01];
+	/*
+	 * Provides the list of Rule Ids of rules added in IPA on the given
+	 *	source pipe index. If the install_status TLV indicates a
+	 *	failure, the Rule Ids in this list must be set to a reserved
+	 *	index (255).
+	 */
+
+	/* Optional */
+	/*	List of destination pipe IDs. */
+	__u8 dst_pipe_id_valid;
+	/* Must be set to true if dst_pipe_id is being passed. */
+	__u32 dst_pipe_id_len;
+	/* Must be set to # of elements in dst_pipe_id. */
+	__u32 dst_pipe_id[QMI_IPA_MAX_CLIENT_DST_PIPES_V01];
+	/* Provides the list of destination pipe IDs for a source pipe. */
+
+	/* Optional */
+	/*  List of Rule IDs extended */
+	__u8 rule_id_ex_valid;
+	/* Must be set to true if rule_id_ex is being passed. */
+	__u32 rule_id_ex_len;
+	/* Must be set to # of elements in rule_id_ex */
+	__u32 rule_id_ex[QMI_IPA_MAX_FILTERS_EX2_V01];
+	/* Provides the list of Rule IDs of rules added in IPA on the
+	 * given source pipe index. If the install_status TLV indicates
+	 * a failure, the Rule IDs in this list must be set to a
+	 * reserved index (255).
+	 */
+};  /* Message */
+
+/* Response Message; This is the message that is exchanged between the
+ * control point and the service in order to notify the remote driver
+ * of the installation of the filter rule supplied earlier by the
+ * remote driver.
+ */
+struct ipa_fltr_installed_notif_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/*	Standard response type */
+};  /* Message */
+
+/* Request Message; Notifies the remote driver of the need to clear the data
+ * path to prevent the IPA from being blocked at the head of the processing
+ * pipeline
+ */
+struct ipa_enable_force_clear_datapath_req_msg_v01 {
+	/* Mandatory */
+	/*  Pipe Mask */
+	__u32 source_pipe_bitmask;
+	/* Set of consumer (source) pipes that must be clear of
+	 * active data transfers.
+	 */
+
+	/* Mandatory */
+	/* Request ID */
+	__u32 request_id;
+	/* Identifies the ID of the request that is sent to the server
+	 * The same request ID is used in the message to remove the force_clear
+	 * request. The server is expected to keep track of the request ID and
+	 * the source_pipe_bitmask so that it can revert as needed
+	 */
+
+	/* Optional */
+	/*  Source Throttle State */
+	__u8 throttle_source_valid;
+	/* Must be set to true if throttle_source is being passed */
+	__u8 throttle_source;
+	/*  Specifies whether the server is to throttle the data from
+	 *	these consumer (source) pipes after clearing the exisiting
+	 *	data present in the IPA that were pulled from these pipes
+	 *	The server is expected to put all the source pipes in the
+	 *	source_pipe_bitmask in the same state
+	 */
+
+	/* Optional */
+	/* Pipe Mask Ext State */
+	__u8 source_pipe_bitmask_ext_valid;
+	/*  Pipe Mask Ext */
+	__u32 source_pipe_bitmask_ext[4];
+	/* Set of consumer (source) pipes that must be clear of
+	 * active data transfers.
+	 * The extended mask supports up to 128 endpoints to accommodate newer
+	 * architectures, which use more than 32 endpoints.
+	 * If this new field is used, the old field source_pipe_bitmask
+	 * shall be ignored.
+	 */
+};  /* Message */
+
+/* Response Message; Notifies the remote driver of the need to clear the
+ * data path to prevent the IPA from being blocked at the head of the
+ * processing pipeline
+ */
+struct ipa_enable_force_clear_datapath_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type */
+};  /* Message */
+
+/* Request Message; Notifies the remote driver that the forceful clearing
+ * of the data path can be lifted
+ */
+struct ipa_disable_force_clear_datapath_req_msg_v01 {
+	/* Mandatory */
+	/* Request ID */
+	__u32 request_id;
+	/* Identifies the request that was sent to the server to
+	 * forcibly clear the data path. This request simply undoes
+	 * the operation done in that request
+	 */
+};  /* Message */
+
+/* Response Message; Notifies the remote driver that the forceful clearing
+ * of the data path can be lifted
+ */
+struct ipa_disable_force_clear_datapath_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type */
+};  /* Message */
+
+enum ipa_peripheral_speed_enum_v01 {
+	IPA_PERIPHERAL_SPEED_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum.  Do not change or use */
+	QMI_IPA_PER_USB_FS_V01 = 1,
+	/*  Full-speed USB connection */
+	QMI_IPA_PER_USB_HS_V01 = 2,
+	/*  High-speed USB connection */
+	QMI_IPA_PER_USB_SS_V01 = 3,
+	/*  Super-speed USB connection */
+	QMI_IPA_PER_WLAN_V01 = 4,
+	/*  WLAN connection */
+	IPA_PERIPHERAL_SPEED_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use*/
+};
+
+enum ipa_pipe_mode_enum_v01 {
+	IPA_PIPE_MODE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum.  Do not change or use */
+	QMI_IPA_PIPE_MODE_HW_V01 = 1,
+	/*  Pipe is connected with a hardware block */
+	QMI_IPA_PIPE_MODE_SW_V01 = 2,
+	/*  Pipe is controlled by the software */
+	IPA_PIPE_MODE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use */
+};
+
+enum ipa_peripheral_type_enum_v01 {
+	IPA_PERIPHERAL_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum.  Do not change or use */
+	QMI_IPA_PERIPHERAL_USB_V01 = 1,
+	/*  Specifies a USB peripheral */
+	QMI_IPA_PERIPHERAL_HSIC_V01 = 2,
+	/*  Specifies an HSIC peripheral */
+	QMI_IPA_PERIPHERAL_PCIE_V01 = 3,
+	/*  Specifies a PCIe	peripheral */
+	IPA_PERIPHERAL_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use */
+};
+
+struct ipa_config_req_msg_v01 {
+	/* Optional */
+	/*  Peripheral Type */
+	__u8 peripheral_type_valid;
+	/* Must be set to true if peripheral_type is being passed */
+	enum ipa_peripheral_type_enum_v01 peripheral_type;
+	/* Informs the remote driver about the perhipheral for
+	 * which this configuration information is relevant. Values:
+	 *	- QMI_IPA_PERIPHERAL_USB (1) -- Specifies a USB peripheral
+	 *	- QMI_IPA_PERIPHERAL_HSIC(2) -- Specifies an HSIC peripheral
+	 *	- QMI_IPA_PERIPHERAL_PCIE(3) -- Specifies a PCIe peripheral
+	 */
+
+	/* Optional */
+	/*  HW Deaggregation Support */
+	__u8 hw_deaggr_supported_valid;
+	/* Must be set to true if hw_deaggr_supported is being passed */
+	__u8 hw_deaggr_supported;
+	/* Informs the remote driver whether the local IPA driver
+	 * allows de-aggregation to be performed in the hardware
+	 */
+
+	/* Optional */
+	/*  Maximum Aggregation Frame Size */
+	__u8 max_aggr_frame_size_valid;
+	/* Must be set to true if max_aggr_frame_size is being passed */
+	__u32 max_aggr_frame_size;
+	/* Specifies the maximum size of the aggregated frame that
+	 * the remote driver can expect from this execution environment
+	 *	- Valid range: 128 bytes to 32768 bytes
+	 */
+
+	/* Optional */
+	/*  IPA Ingress Pipe Mode */
+	__u8 ipa_ingress_pipe_mode_valid;
+	/* Must be set to true if ipa_ingress_pipe_mode is being passed */
+
+	enum ipa_pipe_mode_enum_v01 ipa_ingress_pipe_mode;
+	/* Indicates to the remote driver if the ingress pipe into the
+	 *	IPA is in direct connection with another hardware block or
+	 *	if the producer of data to this ingress pipe is a software
+	 *  module. Values:
+	 *	-QMI_IPA_PIPE_MODE_HW(1) --Pipe is connected with hardware block
+	 *	-QMI_IPA_PIPE_MODE_SW(2) --Pipe is controlled by the software
+	 */
+
+	/* Optional */
+	/*  Peripheral Speed Info */
+	__u8 peripheral_speed_info_valid;
+	/* Must be set to true if peripheral_speed_info is being passed */
+
+	enum ipa_peripheral_speed_enum_v01 peripheral_speed_info;
+	/* Indicates the speed that the peripheral connected to the IPA supports
+	 * Values:
+	 *	- QMI_IPA_PER_USB_FS (1) --  Full-speed USB connection
+	 *	- QMI_IPA_PER_USB_HS (2) --  High-speed USB connection
+	 *	- QMI_IPA_PER_USB_SS (3) --  Super-speed USB connection
+	 *  - QMI_IPA_PER_WLAN   (4) --  WLAN connection
+	 */
+
+	/* Optional */
+	/*  Downlink Accumulation Time limit */
+	__u8 dl_accumulation_time_limit_valid;
+	/* Must be set to true if dl_accumulation_time_limit is being passed */
+	__u32 dl_accumulation_time_limit;
+	/* Informs the remote driver about the time for which data
+	 * is accumulated in the downlink direction before it is pushed into the
+	 * IPA (downlink is with respect to the WWAN air interface)
+	 * - Units: milliseconds
+	 * - Maximum value: 255
+	 */
+
+	/* Optional */
+	/*  Downlink Accumulation Packet limit */
+	__u8 dl_accumulation_pkt_limit_valid;
+	/* Must be set to true if dl_accumulation_pkt_limit is being passed */
+	__u32 dl_accumulation_pkt_limit;
+	/* Informs the remote driver about the number of packets
+	 * that are to be accumulated in the downlink direction before it is
+	 * pushed into the IPA - Maximum value: 1023
+	 */
+
+	/* Optional */
+	/*  Downlink Accumulation Byte Limit */
+	__u8 dl_accumulation_byte_limit_valid;
+	/* Must be set to true if dl_accumulation_byte_limit is being passed */
+	__u32 dl_accumulation_byte_limit;
+	/* Inform the remote driver about the number of bytes
+	 * that are to be accumulated in the downlink direction before it
+	 * is pushed into the IPA - Maximum value: TBD
+	 */
+
+	/* Optional */
+	/*  Uplink Accumulation Time Limit */
+	__u8 ul_accumulation_time_limit_valid;
+	/* Must be set to true if ul_accumulation_time_limit is being passed */
+	__u32 ul_accumulation_time_limit;
+	/* Inform thes remote driver about the time for which data
+	 * is to be accumulated in the uplink direction before it is pushed into
+	 * the IPA (downlink is with respect to the WWAN air interface).
+	 * - Units: milliseconds
+	 * - Maximum value: 255
+	 */
+
+	/* Optional */
+	/*  HW Control Flags */
+	__u8 hw_control_flags_valid;
+	/* Must be set to true if hw_control_flags is being passed */
+	__u32 hw_control_flags;
+	/* Informs the remote driver about the hardware control flags:
+	 *	- Bit 0: IPA_HW_FLAG_HALT_SYSTEM_ON_NON_TERMINAL_FAILURE --
+	 *	Indicates to the hardware that it must not continue with
+	 *	any subsequent operation even if the failure is not terminal
+	 *	- Bit 1: IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR --
+	 *	Indicates to the hardware that it is not required to report
+	 *	channel errors to the host.
+	 *	- Bit 2: IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP --
+	 *	Indicates to the hardware that it is not required to generate
+	 *	wake-up events to the host.
+	 *	- Bit 4: IPA_HW_FLAG_WORK_OVER_DDR --
+	 *	Indicates to the hardware that it is accessing addresses in
+	 *  the DDR and not over PCIe
+	 *	- Bit 5: IPA_HW_FLAG_INTERRUPT_MODE_CTRL_FLAG --
+	 *	Indicates whether the device must
+	 *	raise an event to let the host know that it is going into an
+	 *	interrupt mode (no longer polling for data/buffer availability)
+	 */
+
+	/* Optional */
+	/*  Uplink MSI Event Threshold */
+	__u8 ul_msi_event_threshold_valid;
+	/* Must be set to true if ul_msi_event_threshold is being passed */
+	__u32 ul_msi_event_threshold;
+	/* Informs the remote driver about the threshold that will
+	 * cause an interrupt (MSI) to be fired to the host. This ensures
+	 * that the remote driver does not accumulate an excesive number of
+	 * events before firing an interrupt.
+	 * This threshold is applicable for data moved in the UL direction.
+	 * - Maximum value: 65535
+	 */
+
+	/* Optional */
+	/*  Downlink MSI Event Threshold */
+	__u8 dl_msi_event_threshold_valid;
+	/* Must be set to true if dl_msi_event_threshold is being passed */
+	__u32 dl_msi_event_threshold;
+	/* Informs the remote driver about the threshold that will
+	 * cause an interrupt (MSI) to be fired to the host. This ensures
+	 * that the remote driver does not accumulate an excesive number of
+	 * events before firing an interrupt
+	 * This threshold is applicable for data that is moved in the
+	 * DL direction - Maximum value: 65535
+	 */
+
+	/* Optional */
+	/*  Uplink Fifo Size */
+	__u8 ul_fifo_size_valid;
+	/* Must be set to true if ul_fifo_size is being passed */
+	__u32 ul_fifo_size;
+	/*
+	 * Informs the remote driver about the total Uplink xDCI
+	 *	buffer size that holds the complete aggregated frame
+	 *	or BAM data fifo size of the peripheral channel/pipe(in Bytes).
+	 *	This deprecates the max_aggr_frame_size field. This TLV
+	 *	deprecates max_aggr_frame_size TLV from version 1.9 onwards
+	 *	and the max_aggr_frame_size TLV will be ignored in the presence
+	 *	of this TLV.
+	 */
+
+	/* Optional */
+	/*  Downlink Fifo Size */
+	__u8 dl_fifo_size_valid;
+	/* Must be set to true if dl_fifo_size is being passed */
+	__u32 dl_fifo_size;
+	/*
+	 * Informs the remote driver about the total Downlink xDCI buffering
+	 *	capacity or BAM data fifo size of the peripheral channel/pipe.
+	 *	(In Bytes). dl_fifo_size = n * dl_buf_size. This deprecates the
+	 *	max_aggr_frame_size field. If this value is set
+	 *	max_aggr_frame_size is ignored.
+	 */
+
+	/* Optional */
+	/*  Downlink Buffer Size */
+	__u8 dl_buf_size_valid;
+	/* Must be set to true if dl_buf_size is being passed */
+	__u32 dl_buf_size;
+	/* Informs the remote driver about the single xDCI buffer size.
+	 * This is applicable only in GSI mode(in Bytes).\n
+	 */
+};  /* Message */
+
+/* Response Message; Notifies the remote driver of the configuration
+ * information
+ */
+struct ipa_config_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/**<   Standard response type.*/
+}; /* Message */
+
+enum ipa_stats_type_enum_v01 {
+	IPA_STATS_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum.  Do not change or use */
+	QMI_IPA_STATS_TYPE_INVALID_V01 = 0,
+	/* Invalid stats type identifier */
+	QMI_IPA_STATS_TYPE_PIPE_V01 = 1,
+	/* Pipe stats type */
+	QMI_IPA_STATS_TYPE_FILTER_RULES_V01 = 2,
+	/* Filter rule stats type */
+	IPA_STATS_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use */
+};
+
+struct ipa_pipe_stats_info_type_v01 {
+	__u32 pipe_index;
+	/* Pipe index for statistics to be retrieved. */
+
+	__u64 num_ipv4_packets;
+	/* Accumulated number of IPv4 packets over this pipe. */
+
+	__u64 num_ipv4_bytes;
+	/* Accumulated number of IPv4 bytes over this pipe. */
+
+	__u64 num_ipv6_packets;
+	/* Accumulated number of IPv6 packets over this pipe. */
+
+	__u64 num_ipv6_bytes;
+	/* Accumulated number of IPv6 bytes over this pipe. */
+};
+
+struct ipa_stats_type_filter_rule_v01 {
+	__u32 filter_rule_index;
+	/* Filter rule index for statistics to be retrieved. */
+
+	__u64 num_packets;
+	/* Accumulated number of packets over this filter rule. */
+};
+
+/* Request Message; Retrieve the data statistics collected on modem
+ * IPA driver.
+ */
+struct ipa_get_data_stats_req_msg_v01 {
+	/* Mandatory */
+	/*  Stats Type  */
+	enum ipa_stats_type_enum_v01 ipa_stats_type;
+	/* Indicates the type of statistics to be retrieved. */
+
+	/* Optional */
+	/* Reset Statistics */
+	__u8 reset_stats_valid;
+	/* Must be set to true if reset_stats is being passed */
+	__u8 reset_stats;
+	/* Option to reset the specific type of data statistics
+	 * currently collected.
+	 */
+};  /* Message */
+
+/* Response Message; Retrieve the data statistics collected
+ * on modem IPA driver.
+ */
+struct ipa_get_data_stats_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type. */
+
+	/* Optional */
+	/*  Stats Type  */
+	__u8 ipa_stats_type_valid;
+	/* Must be set to true if ipa_stats_type is passed */
+	enum ipa_stats_type_enum_v01 ipa_stats_type;
+	/* Indicates the type of statistics that are retrieved. */
+
+	/* Optional */
+	/*  Uplink Source Pipe Statistics List */
+	__u8 ul_src_pipe_stats_list_valid;
+	/* Must be set to true if ul_src_pipe_stats_list is being passed */
+	__u32 ul_src_pipe_stats_list_len;
+	/* Must be set to # of elements in ul_src_pipe_stats_list */
+	struct ipa_pipe_stats_info_type_v01
+		ul_src_pipe_stats_list[QMI_IPA_MAX_PIPES_V01];
+	/* List of all Uplink pipe statistics that are retrieved. */
+
+	/* Optional */
+	/*  Downlink Destination Pipe Statistics List */
+	__u8 dl_dst_pipe_stats_list_valid;
+	/* Must be set to true if dl_dst_pipe_stats_list is being passed */
+	__u32 dl_dst_pipe_stats_list_len;
+	/* Must be set to # of elements in dl_dst_pipe_stats_list */
+	struct ipa_pipe_stats_info_type_v01
+		dl_dst_pipe_stats_list[QMI_IPA_MAX_PIPES_V01];
+	/* List of all Downlink pipe statistics that are retrieved. */
+
+	/* Optional */
+	/*  Downlink Filter Rule Stats List */
+	__u8 dl_filter_rule_stats_list_valid;
+	/* Must be set to true if dl_filter_rule_stats_list is being passed */
+	__u32 dl_filter_rule_stats_list_len;
+	/* Must be set to # of elements in dl_filter_rule_stats_list */
+	struct ipa_stats_type_filter_rule_v01
+		dl_filter_rule_stats_list[QMI_IPA_MAX_FILTERS_V01];
+	/* List of all Downlink filter rule statistics retrieved. */
+};  /* Message */
+
+struct ipa_apn_data_stats_info_type_v01 {
+	__u32 mux_id;
+	/* Indicates the MUX ID associated with the APN for which the data
+	 * usage statistics is queried
+	 */
+
+	__u64 num_ul_packets;
+	/* Accumulated number of uplink packets corresponding to
+	 * this Mux ID
+	 */
+
+	__u64 num_ul_bytes;
+	/* Accumulated number of uplink bytes corresponding to
+	 * this Mux ID
+	 */
+
+	__u64 num_dl_packets;
+	/* Accumulated number of downlink packets corresponding
+	 * to this Mux ID
+	 */
+
+	__u64 num_dl_bytes;
+	/* Accumulated number of downlink bytes corresponding to
+	 * this Mux ID
+	 */
+};  /* Type */
+
+/* Request Message; Retrieve the APN data statistics collected from modem */
+struct ipa_get_apn_data_stats_req_msg_v01 {
+	/* Optional */
+	/*  Mux ID List */
+	__u8 mux_id_list_valid;
+	/* Must be set to true if mux_id_list is being passed */
+	__u32 mux_id_list_len;
+	/* Must be set to # of elements in mux_id_list */
+	__u32 mux_id_list[QMI_IPA_MAX_APN_V01];
+	/* The list of MUX IDs associated with APNs for which the data usage
+	 * statistics is being retrieved
+	 */
+};  /* Message */
+
+/* Response Message; Retrieve the APN data statistics collected from modem */
+struct ipa_get_apn_data_stats_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type.*/
+
+	/* Optional */
+	/* APN Data Statistics List */
+	__u8 apn_data_stats_list_valid;
+	/* Must be set to true if apn_data_stats_list is being passed */
+	__u32 apn_data_stats_list_len;
+	/* Must be set to # of elements in apn_data_stats_list */
+	struct ipa_apn_data_stats_info_type_v01
+		apn_data_stats_list[QMI_IPA_MAX_APN_V01];
+	/* List of APN data retrieved as per request on mux_id.
+	 * For now, only one APN monitoring is supported on modem driver.
+	 * Making this as list for expandability to support more APNs in future.
+	 */
+};  /* Message */
+
+struct ipa_data_usage_quota_info_type_v01 {
+	__u32 mux_id;
+	/* Indicates the MUX ID associated with the APN for which the data usage
+	 * quota needs to be set
+	 */
+
+	__u64 num_Mbytes;
+	/* Number of Mega-bytes of quota value to be set on this APN associated
+	 * with this Mux ID.
+	 */
+};  /* Type */
+
+#define IPA_DATA_WARNING_QUOTA
+
+/* Request Message; Master driver sets a data usage quota value on
+ * modem driver
+ */
+struct ipa_set_data_usage_quota_req_msg_v01 {
+	/* Optional */
+	/* APN Quota List */
+	__u8 apn_quota_list_valid;
+	/* Must be set to true if apn_quota_list is being passed */
+	__u32 apn_quota_list_len;
+	/* Must be set to # of elements in apn_quota_list */
+	struct ipa_data_usage_quota_info_type_v01
+		apn_quota_list[QMI_IPA_MAX_APN_V01];
+	/* The list of APNs on which a data usage quota to be set on modem
+	 * driver. For now, only one APN monitoring is supported on modem
+	 * driver. Making this as list for expandability to support more
+	 * APNs in future.
+	 */
+
+	/* Optional */
+	/* APN Warning List */
+	__u8 apn_warning_list_valid;
+	/* Must be set to true if apn_warning_list is being passed */
+	__u32 apn_warning_list_len;
+	/* Must be set to # of elements in apn_warning_list */
+	struct ipa_data_usage_quota_info_type_v01
+		apn_warning_list[QMI_IPA_MAX_APN_V01];
+	/* The list of APNs on which a data usage warning to be set on modem
+	 * driver. For now, only one APN monitoring is supported on modem
+	 * driver. Making this as list for expandability to support more
+	 * APNs in future.
+	 */
+
+};  /* Message */
+
+/* Response Message; Master driver sets a data usage on modem driver. */
+struct ipa_set_data_usage_quota_resp_msg_v01 {
+	/* Mandatory */
+	/* Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type.*/
+};  /* Message */
+
+/* Indication Message; Modem driver sends this indication to master
+ * driver when the data usage quota is reached
+ */
+struct ipa_data_usage_quota_reached_ind_msg_v01 {
+	/* Mandatory */
+	/*  APN Quota List */
+	struct ipa_data_usage_quota_info_type_v01 apn;
+	/* This message indicates which APN has the previously set quota
+	 * or warning reached. For now, only one APN monitoring is supported
+	 * on modem driver.
+	 */
+	/* Optional */
+	/* Warning Limit reached indication */
+	/* Must be set to true if is_warning_limit is being passed */
+	__u8 is_warning_limit_valid;
+	__u8 is_warning_limit;
+	/* If set to TRUE, Warning Limit is reached.
+	 * If set to FALSE, Quota Limit is reached.
+	 */
+};  /* Message */
+
+/* Request Message; Master driver request modem driver to terminate
+ * the current data usage quota monitoring session.
+ */
+struct ipa_stop_data_usage_quota_req_msg_v01 {
+	/* Optional */
+	/* Stop monitoring Quota Limit */
+	/* Must be set to true if is_quota_limit is being passed */
+	__u8 is_quota_limit_valid;
+	__u8 is_quota_limit;
+	/* If set to TRUE, Quota Limit will not be monitored */
+
+	/* Optional */
+	/* Stop monitoring Warning Limit */
+	/* Must be set to true if is_warning_limit is being passed */
+	__u8 is_warning_limit_valid;
+	__u8 is_warning_limit;
+	/* If set to TRUE, Warning Limit will not be monitored */
+};  /* Message */
+
+/* Response Message; Master driver request modem driver to terminate
+ * the current quota or warning limit monitoring session.
+ */
+struct ipa_stop_data_usage_quota_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/**<   Standard response type.*/
+};  /* Message */
+
+/* Request Message; Request from Modem IPA driver to set DPL peripheral pipe */
+struct ipa_install_fltr_rule_req_ex_msg_v01 {
+
+	/* Optional */
+	/*  Extended Filter Specification  */
+	__u8 filter_spec_ex_list_valid;
+	__u32 filter_spec_ex_list_len;
+	struct ipa_filter_spec_ex_type_v01
+		filter_spec_ex_list[QMI_IPA_MAX_FILTERS_EX_V01];
+	/* List of filter specifications of filters that must be installed in
+	 * the IPAv3.x hardware.
+	 * The driver installing these rules must do so in the same order as
+	 * specified in this list.
+	 */
+
+	/* Optional */
+	/* Pipe Index to Install Rule */
+	__u8 source_pipe_index_valid;
+	__u32 source_pipe_index;
+	/* Pipe index to install the filter rule.
+	 * The requester may not always know the pipe indices. If not specified,
+	 * the receiver must install this rule on all pipes that it controls,
+	 * through which data may be fed into the IPA.
+	 */
+
+	/* Optional */
+	/* Total Number of IPv4 Filters in the Filter Spec List */
+	__u8 num_ipv4_filters_valid;
+	__u32 num_ipv4_filters;
+	/* Number of IPv4 rules included in the filter specification list. */
+
+	/* Optional */
+	/* Total Number of IPv6 Filters in the Filter Spec List */
+	__u8 num_ipv6_filters_valid;
+	__u32 num_ipv6_filters;
+	/* Number of IPv6 rules included in the filter specification list. */
+
+	/* Optional */
+	/* List of XLAT Filter Indices in the Filter Spec List */
+	__u8 xlat_filter_indices_list_valid;
+	__u32 xlat_filter_indices_list_len;
+	__u32 xlat_filter_indices_list[QMI_IPA_MAX_FILTERS_EX_V01];
+	/* List of XLAT filter indices.
+	 * Filter rules at specified indices must be modified by the
+	 * receiver if the PDN is XLAT before installing them on the associated
+	 * IPA consumer pipe.
+	 */
+
+	/* Optional */
+	/* Extended Type 2 Filter Specification */
+	__u8 filter_spec_ex2_list_valid;
+	/* Must be set to true if filter_spec_ex2_list is being passed */
+	__u32 filter_spec_ex2_list_len;
+	/* Must be set to # of elements in filter_spec_ex2_list */
+	struct ipa_filter_spec_ex2_type_v01
+		filter_spec_ex2_list[QMI_IPA_MAX_FILTERS_V01];
+	/* Optional */
+	/* List of modem UL Filters in the Spec List which need be to
+	 * replicated with AP UL firewall filters
+	 */
+	__u8 ul_firewall_indices_list_valid;
+	/* Must be set to # of elements in ul_firewall_indices_list */
+	__u32 ul_firewall_indices_list_len;
+	__u32 ul_firewall_indices_list[QMI_IPA_MAX_FILTERS_V01];
+	/* List of UL firewall filter indices.
+	 * Filter rules at specified indices must be replicated across
+	 * the firewall filters by the receiver and installed on the
+	 * associated IPA consumer pipe.
+	 */
+};  /* Message */
+
+/* Response Message; Requests installation of filtering rules in the hardware
+ * block on the remote side.
+ */
+struct ipa_install_fltr_rule_resp_ex_msg_v01 {
+	/* Mandatory */
+	/* Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type.
+	 * Standard response type. Contains the following data members:
+	 * - qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE
+	 * - qmi_error_type  -- Error code. Possible error code values are
+	 *					 described in the error codes
+	 *					 section of each message
+	 *					 definition.
+	 */
+
+	/* Optional */
+	/* Rule ID List */
+	__u8 rule_id_valid;
+	__u32 rule_id_len;
+	__u32 rule_id[QMI_IPA_MAX_FILTERS_EX_V01];
+	/* List of rule IDs returned to the control point.
+	 * Any further reference to the rule is done using the filter rule ID
+	 * specified in this list.
+	 */
+};  /* Message */
+
+/*
+ * Request Message; Requests the modem IPA driver to enable or
+ * disable collection of per client statistics.
+ */
+struct ipa_enable_per_client_stats_req_msg_v01 {
+
+	/* Mandatory */
+	/* Collect statistics per client; */
+	__u8 enable_per_client_stats;
+	/*
+	 * Indicates whether to start or stop collecting
+	 * per client statistics.
+	 */
+};  /* Message */
+
+/*
+ * Response Message; Requests the modem IPA driver to enable or disable
+ * collection of per client statistics.
+ */
+struct ipa_enable_per_client_stats_resp_msg_v01 {
+
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type. */
+};  /* Message */
+
+struct ipa_per_client_stats_info_type_v01 {
+
+	__u32 client_id;
+	/*
+	 * Id of the client on APPS processor side for which Modem processor
+	 * needs to send uplink/downlink statistics.
+	 */
+
+	__u32 src_pipe_id;
+	/*
+	 * IPA consumer pipe on which client on APPS side sent uplink
+	 * data to modem.
+	 */
+
+	__u64 num_ul_ipv4_bytes;
+	/*
+	 * Accumulated number of uplink IPv4 bytes for a client.
+	 */
+
+	__u64 num_ul_ipv6_bytes;
+	/*
+	 * Accumulated number of uplink IPv6 bytes for a client.
+	 */
+
+	__u64 num_dl_ipv4_bytes;
+	/*
+	 * Accumulated number of downlink IPv4 bytes for a client.
+	 */
+
+	__u64 num_dl_ipv6_bytes;
+	/*
+	 * Accumulated number of downlink IPv6 byes for a client.
+	 */
+
+
+	__u32 num_ul_ipv4_pkts;
+	/*
+	 * Accumulated number of uplink IPv4 packets for a client.
+	 */
+
+	__u32 num_ul_ipv6_pkts;
+	/*
+	 * Accumulated number of uplink IPv6 packets for a client.
+	 */
+
+	__u32 num_dl_ipv4_pkts;
+	/*
+	 * Accumulated number of downlink IPv4 packets for a client.
+	 */
+
+	__u32 num_dl_ipv6_pkts;
+	/*
+	 * Accumulated number of downlink IPv6 packets for a client.
+	 */
+};  /* Type */
+
+/*
+ * Request Message; Requests the modem IPA driver to provide statistics
+ * for a givenclient.
+ */
+struct ipa_get_stats_per_client_req_msg_v01 {
+
+	/* Mandatory */
+	/*  Client id */
+	__u32 client_id;
+	/*
+	 * Id of the client on APPS processor side for which Modem processor
+	 * needs to send uplink/downlink statistics. if client id is specified
+	 * as 0xffffffff, then Q6 will send the stats for all the clients of
+	 * the specified source pipe.
+	 */
+
+	/* Mandatory */
+	/*  Source pipe id */
+	__u32 src_pipe_id;
+	/*
+	 * IPA consumer pipe on which client on APPS side sent uplink
+	 * data to modem. In future, this implementation can be extended
+	 * to provide 0xffffffff as the source pipe id, where Q6 will send
+	 * the stats of all the clients across all different tethered-pipes.
+	 */
+
+	/* Optional */
+	/*  Reset client statistics. */
+	__u8 reset_stats_valid;
+	/* Must be set to true if reset_stats is being passed. */
+	__u8 reset_stats;
+	/*
+	 * Option to reset the statistics currently collected by modem for this
+	 * particular client.
+	 */
+};  /* Message */
+
+/*
+ * Response Message; Requests the modem IPA driver to provide statistics
+ * for a given client.
+ */
+struct ipa_get_stats_per_client_resp_msg_v01 {
+
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type. */
+
+	/* Optional */
+	/*  Per clients Statistics List */
+	__u8 per_client_stats_list_valid;
+	/* Must be set to true if per_client_stats_list is being passed. */
+	__u32 per_client_stats_list_len;
+	/* Must be set to # of elements in per_client_stats_list. */
+	struct ipa_per_client_stats_info_type_v01
+		per_client_stats_list[QMI_IPA_MAX_PER_CLIENTS_V01];
+	/*
+	 * List of all per client statistics that are retrieved.
+	 */
+};  /* Message */
+
+struct ipa_ul_firewall_rule_type_v01 {
+
+	enum ipa_ip_type_enum_v01 ip_type;
+	/*
+	 * IP type for which this rule is applicable.
+	 * The driver must identify the filter table (v6 or v4), and this
+	 * field is essential for that. Values:
+	 * - QMI_IPA_IP_TYPE_INVALID (0) --  Invalid IP type identifier
+	 * - QMI_IPA_IP_TYPE_V4 (1) --  IPv4 type
+	 * - QMI_IPA_IP_TYPE_V6 (2) --  IPv6 type
+	 */
+
+	struct ipa_filter_rule_type_v01 filter_rule;
+	/*
+	 * Rules in the filter specification. These rules are the
+	 * ones that are matched against fields in the packet.
+	 * Currently we only send IPv6 whitelist rules to Q6.
+	 */
+};  /* Type */
+
+/*
+ * Request Message; Requestes remote IPA driver to install uplink
+ * firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_req_msg_v01 {
+
+	/* Optional */
+	/*  Uplink Firewall Specification  */
+	__u32 firewall_rules_list_len;
+	/* Must be set to # of elements in firewall_rules_list. */
+	struct ipa_ul_firewall_rule_type_v01
+		firewall_rules_list[QMI_IPA_MAX_UL_FIREWALL_RULES_V01];
+	/*
+	 * List of uplink firewall specifications of filters that must be
+	 * installed.
+	 */
+
+	__u32 mux_id;
+	/*
+	 * QMAP Mux ID. As a part of the QMAP protocol,
+	 * several data calls may be multiplexed over the same physical
+	 * transport channel. This identifier is used to identify one
+	 * such data call. The maximum value for this identifier is 255.
+	 */
+
+	/* Optional */
+	__u8 disable_valid;
+	/* Must be set to true if enable is being passed. */
+	__u8 disable;
+	/*
+	 * Indicates whether uplink firewall needs to be enabled or disabled.
+	 */
+
+	/* Optional */
+	__u8 are_blacklist_filters_valid;
+	/* Must be set to true if are_blacklist_filters is being passed. */
+	__u8 are_blacklist_filters;
+	/*
+	 * Indicates whether the filters received as part of this message are
+	 * blacklist filters. i.e. drop uplink packets matching these rules.
+	 */
+};  /* Message */
+
+/*
+ * Response Message; Requestes remote IPA driver to install
+ * uplink firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_resp_msg_v01 {
+
+	/* Mandatory */
+	/* Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/*
+	 * Standard response type.
+	 * Standard response type. Contains the following data members:
+	 * qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE
+	 * qmi_error_type  -- Error code. Possible error code values are
+	 * described in the error codes section of each message definition.
+	 */
+};  /* Message */
+
+enum ipa_ul_firewall_status_enum_v01 {
+	IPA_UL_FIREWALL_STATUS_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum.  Do not change or use*/
+	QMI_IPA_UL_FIREWALL_STATUS_SUCCESS_V01 = 0,
+	/* Indicates that the uplink firewall rules
+	 * are configured successfully.
+	 */
+	QMI_IPA_UL_FIREWALL_STATUS_FAILURE_V01 = 1,
+	/* Indicates that the uplink firewall rules
+	 * are not configured successfully.
+	 */
+	IPA_UL_FIREWALL_STATUS_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use*/
+};
+
+struct ipa_ul_firewall_config_result_type_v01 {
+
+	enum ipa_ul_firewall_status_enum_v01 is_success;
+	/*
+	 * Indicates whether the uplink firewall rules are configured
+	 * successfully.
+	 */
+
+	__u32 mux_id;
+	/*
+	 * QMAP Mux ID. As a part of the QMAP protocol,
+	 * several data calls may be multiplexed over the same physical
+	 * transport channel. This identifier is used to identify one
+	 * such data call. The maximum value for this identifier is 255.
+	 */
+};
+
+/*
+ * Indication Message; Requestes remote IPA driver to install
+ * uplink firewall rules.
+ */
+struct ipa_configure_ul_firewall_rules_ind_msg_v01 {
+	struct ipa_ul_firewall_config_result_type_v01 result;
+};  /* Message */
+
+
+struct ipa_mhi_ch_init_info_type_v01 {
+	__u8 ch_id;
+	/* Remote MHI channel ID */
+
+	__u8 er_id;
+	/* Remote MHI Event ring ID */
+
+	__u32 ch_doorbell_addr;
+	/* TR Channel Doorbell addr */
+
+	__u32 er_doorbell_addr;
+	/* Event ring Doorbell addr */
+
+	__u32 direction_type;
+	/* Direction type */
+};
+
+struct ipa_mhi_smmu_info_type_v01 {
+	__u64 iova_ctl_base_addr;
+	/* IOVA mapped Control Region base address */
+
+	__u64 iova_ctl_size;
+	/* IOVA Control region size */
+
+	__u64 iova_data_base_addr;
+	/* IOVA mapped Data Region base address */
+
+	__u64 iova_data_size;
+	/* IOVA Data Region size */
+};
+
+struct ipa_mhi_ready_indication_msg_v01 {
+	/* Mandatory */
+	__u32 ch_info_arr_len;
+	/* Must be set to # of elements in ch_info_arr. */
+	struct ipa_mhi_ch_init_info_type_v01
+		ch_info_arr[QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01];
+	/* Channel Information array */
+
+	/* Mandatory */
+	__u8 smmu_info_valid;
+	/* Must be set to true if smmu_info is being passed. */
+	struct ipa_mhi_smmu_info_type_v01 smmu_info;
+	/* SMMU enabled indication */
+};
+#define IPA_MHI_READY_INDICATION_MSG_V01_MAX_MSG_LEN 123
+
+struct ipa_mhi_mem_addr_info_type_v01 {
+	__u64 pa;
+	/* Memory region start physical addr */
+
+	__u64 iova;
+	/* Memory region start iova mapped addr */
+
+	__u64 size;
+	/* Memory region size */
+};
+
+enum ipa_mhi_brst_mode_enum_v01 {
+	IPA_MHI_BRST_MODE_ENUM_MIN_VAL_V01 = IPA_INT_MIN,
+
+	QMI_IPA_BURST_MODE_DEFAULT_V01 = 0,
+	/*
+	 * Default - burst mode enabled for hardware channels,
+	 * disabled for software channels
+	 */
+
+	QMI_IPA_BURST_MODE_ENABLED_V01 = 1,
+	/* Burst mode is enabled for this channel */
+
+	QMI_IPA_BURST_MODE_DISABLED_V01 = 2,
+	/* Burst mode is disabled for this channel */
+
+	IPA_MHI_BRST_MODE_ENUM_MAX_VAL_V01 = IPA_INT_MAX,
+};
+
+struct ipa_mhi_tr_info_type_v01 {
+	__u8 ch_id;
+	/* TR Channel ID */
+
+	__u16 poll_cfg;
+	/*
+	 * Poll Configuration - Default or timer to poll the
+	 * MHI context in milliseconds
+	 */
+
+	enum ipa_mhi_brst_mode_enum_v01 brst_mode_type;
+	/* Burst mode configuration */
+
+	__u64 ring_iova;
+	/* IOVA mapped ring base address */
+
+	__u64 ring_len;
+	/* Ring Length in bytes */
+
+	__u64 rp;
+	/* IOVA mapped Read pointer address */
+
+	__u64 wp;
+	/* IOVA mapped write pointer address */
+};
+
+struct ipa_mhi_er_info_type_v01 {
+	__u8 er_id;
+	/* Event ring ID */
+
+	__u32 intmod_cycles;
+	/* Interrupt moderation cycles */
+
+	__u32 intmod_count;
+	/* Interrupt moderation count */
+
+	__u32 msi_addr;
+	/* IOVA mapped MSI address for this ER */
+
+	__u64 ring_iova;
+	/* IOVA mapped ring base address */
+
+	__u64 ring_len;
+	/* Ring length in bytes */
+
+	__u64 rp;
+	/* IOVA mapped Read pointer address */
+
+	__u64 wp;
+	/* IOVA mapped Write pointer address */
+};
+
+struct ipa_mhi_alloc_channel_req_msg_v01 {
+	/* Mandatory */
+	__u32 tr_info_arr_len;
+	/* Must be set to # of elements in tr_info_arr. */
+	struct ipa_mhi_tr_info_type_v01
+		tr_info_arr[QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01];
+	/* Array of TR context information for Remote MHI channels */
+
+	/* Mandatory */
+	__u32 er_info_arr_len;
+	/* Must be set to # of elements in er_info_arr. */
+	struct ipa_mhi_er_info_type_v01
+		er_info_arr[QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01];
+	/* Array of ER context information for Remote MHI channels */
+
+	/* Mandatory */
+	__u32 ctrl_addr_map_info_len;
+	/* Must be set to # of elements in ctrl_addr_map_info. */
+
+	struct ipa_mhi_mem_addr_info_type_v01
+	ctrl_addr_map_info[QMI_IPA_REMOTE_MHI_MEMORY_MAPPING_NUM_MAX_V01];
+	/*
+	 * List of PA-IOVA address mappings for control regions
+	 * used by Modem
+	 */
+
+	/* Mandatory */
+	__u32 data_addr_map_info_len;
+	/* Must be set to # of elements in data_addr_map_info. */
+	struct ipa_mhi_mem_addr_info_type_v01
+	data_addr_map_info[QMI_IPA_REMOTE_MHI_MEMORY_MAPPING_NUM_MAX_V01];
+	/* List of PA-IOVA address mappings for data regions used by Modem */
+};
+#define IPA_MHI_ALLOC_CHANNEL_REQ_MSG_V01_MAX_MSG_LEN 808
+
+struct ipa_mhi_ch_alloc_resp_type_v01 {
+	__u8 ch_id;
+	/* Remote MHI channel ID */
+
+	__u8 is_success;
+	/* Channel Allocation Status */
+};
+
+struct ipa_mhi_alloc_channel_resp_msg_v01 {
+	/* Mandatory */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type. Contains the following data members:
+	 * - qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE
+	 * - qmi_error_type  -- Error code. Possible error code values
+	 *			are described in the error codes section
+	 *			of each message definition.
+	 */
+
+	/* Optional */
+	__u8 alloc_resp_arr_valid;
+	/* Must be set to true if alloc_resp_arr is being passed. */
+	__u32 alloc_resp_arr_len;
+	/* Must be set to # of elements in alloc_resp_arr. */
+	struct ipa_mhi_ch_alloc_resp_type_v01
+		alloc_resp_arr[QMI_IPA_REMOTE_MHI_CHANNELS_NUM_MAX_V01];
+	/* MHI channel allocation response array */
+};
+#define IPA_MHI_ALLOC_CHANNEL_RESP_MSG_V01_MAX_MSG_LEN 23
+
+enum ipa_clock_rate_enum_v01 {
+	IPA_CLOCK_RATE_ENUM_MIN_ENUM_VAL_V01 = IPA_INT_MIN,
+
+	QMI_IPA_CLOCK_RATE_INVALID_V01 = 0,
+
+	QMI_IPA_CLOCK_RATE_LOW_SVS_V01 = 1,
+
+	QMI_IPA_CLOCK_RATE_SVS_V01 = 2,
+
+	QMI_IPA_CLOCK_RATE_NOMINAL_V01 = 3,
+
+	QMI_IPA_CLOCK_RATE_TURBO_V01 = 4,
+
+	IPA_CLOCK_RATE_ENUM_MAX_ENUM_VAL_V01 = IPA_INT_MAX,
+};
+
+struct ipa_mhi_clk_vote_req_msg_v01 {
+	/* Mandatory */
+	__u8 mhi_vote;
+	/*
+	 * MHI vote request
+	 * TRUE  - ON
+	 * FALSE - OFF
+	 */
+	/* Optional */
+	/*  Throughput Value */
+	__u8 tput_value_valid;
+	__u32 tput_value;
+
+	/* Optional */
+	/*  IPA Clock Rate */
+	__u8 clk_rate_valid;
+	enum ipa_clock_rate_enum_v01 clk_rate;
+};
+#define IPA_MHI_CLK_VOTE_REQ_MSG_V01_MAX_MSG_LEN 18
+
+struct ipa_mhi_clk_vote_resp_msg_v01 {
+	/* Mandatory */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type. Contains the following data members:
+	 * - qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE
+	 * - qmi_error_type  -- Error code. Possible error code values
+	 *			are described in the error codes section
+	 *			of each message definition.
+	 */
+};
+#define IPA_MHI_CLK_VOTE_RESP_MSG_V01_MAX_MSG_LEN 7
+
+struct ipa_mhi_cleanup_req_msg_v01 {
+	/* Optional */
+	__u8 cleanup_valid;
+	/* Must be set to true if cleanup is being passed. */
+	__u8 cleanup;
+	/*
+	 * a Flag to indicate the type of action
+	 * 1 - Cleanup Request
+	 */
+};
+#define IPA_MHI_CLEANUP_REQ_MSG_V01_MAX_MSG_LEN 4
+
+struct ipa_mhi_cleanup_resp_msg_v01 {
+	/* Mandatory */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type. Contains the following data members:
+	 * - qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE
+	 * - qmi_error_type  -- Error code. Possible error code values
+	 *			are described in the error codes section
+	 *			of each message definition.
+	 */
+};
+#define IPA_MHI_CLEANUP_RESP_MSG_V01_MAX_MSG_LEN 7
+
+enum ipa_ep_desc_type_enum_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	IPA_EP_DESC_TYPE_ENUM_MIN_VAL_V01 = IPA_INT_MIN,
+	DATA_EP_DESC_TYPE_RESERVED_V01 = 0x00,
+	DATA_EP_DESC_TYPE_EMB_CONS_V01 = 0x01,
+	DATA_EP_DESC_TYPE_EMB_PROD_V01 = 0x02,
+	DATA_EP_DESC_TYPE_RSC_PROD_V01 = 0x03,
+	DATA_EP_DESC_TYPE_QDSS_PROD_V01 = 0x04,
+	DATA_EP_DESC_TYPE_DPL_PROD_V01 = 0x05,
+	DATA_EP_DESC_TYPE_TETH_CONS_V01 = 0x06,
+	DATA_EP_DESC_TYPE_TETH_PROD_V01 = 0x07,
+	DATA_EP_DESC_TYPE_TETH_RMNET_CONS_V01 = 0x08,
+	DATA_EP_DESC_TYPE_TETH_RMNET_PROD_V01 = 0x09,
+	DATA_EP_DESC_TYPE_EMB_FLOW_CTL_CONS_V01 = 0x0A,
+	DATA_EP_DESC_TYPE_EMB_FLOW_CTL_PROD_V01 = 0x0B,
+	IPA_EP_DESC_TYPE_ENUM_MAX_VAL_V01 = IPA_INT_MAX,
+};
+
+enum ipa_ic_type_enum_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	IPA_IC_TYPE_ENUM_MIN_VAL_V01 = IPA_INT_MIN,
+	DATA_IC_TYPE_RESERVED_V01 = 0x00,
+	DATA_IC_TYPE_MHI_V01 = 0x01,
+	DATA_IC_TYPE_MHI_PRIME_V01 = 0x02,
+	DATA_IC_TYPE_USB_V01 = 0x03,
+	DATA_IC_TYPE_AP_V01 = 0x04,
+	DATA_IC_TYPE_Q6_V01 = 0x05,
+	DATA_IC_TYPE_UC_V01 = 0x06,
+	IPA_IC_TYPE_ENUM_MAX_VAL_V01 = IPA_INT_MAX,
+};
+
+enum ipa_ep_status_type_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	IPA_EP_STATUS_TYPE_MIN_VAL_V01 = IPA_INT_MIN,
+	DATA_EP_STATUS_RESERVED_V01 = 0x00,
+	DATA_EP_STATUS_STATIC_V01 = 0x01,
+	DATA_EP_STATUS_CONNECTED_V01 = 0x02,
+	DATA_EP_STATUS_DISCONNECTED_V01 = 0x03,
+	IPA_EP_STATUS_TYPE_MAX_VAL_V01 = IPA_INT_MAX,
+};
+
+struct ipa_ep_id_type_v01 {
+	/* Interconnect type. See ipa_ic_desc_type_enum type */
+	enum ipa_ic_type_enum_v01 ic_type;
+	/* Peripheral end point type */
+	enum ipa_ep_desc_type_enum_v01 ep_type;
+	/* Peripheral interface number */
+	__u32 ep_id;
+	/* Status of endpoint */
+	enum ipa_ep_status_type_v01 ep_status;
+};
+
+struct ipa_endp_desc_indication_msg_v01 {
+	/* Optional */
+	__u8 ep_info_valid;
+	/* Must be set to true if type_arr is being passed */
+	__u32 ep_info_len;
+	/* Must be set to # of elements in type_arr */
+	struct ipa_ep_id_type_v01 ep_info[QMI_IPA_ENDP_DESC_NUM_MAX_V01];
+	/* Optional */
+	__u8 num_eps_valid;
+	/* Must be set to true if num_of_eps is being passed */
+	/* Must be set to # of elements of num_of_eps */
+	__u32 num_eps;
+}; /* Message */
+#define IPA_ENDP_DESC_INDICATION_MSG_V01_MAX_MSG_LEN 507
+
+enum ipa_aggr_enum_type_v01 {
+	IPA_AGGR_ENUM_TYPE_MIN_VAL_V01 = IPA_INT_MIN,
+	DATA_AGGR_TYPE_RESERVED_V01 = 0x00,
+	DATA_AGGR_TYPE_QMAP_V01 = 0x01,
+	DATA_AGGR_TYPE_QMAPv5_V01 = 0x02,
+	DATA_AGGR_TYPE_INHERITED_V01 = 0x03,
+	IPA_AGGR_ENUM_TYPE_MAX_VAL_V01 = IPA_INT_MAX,
+};
+
+struct ipa_mhi_prime_aggr_info_type_v01 {
+	enum ipa_ic_type_enum_v01 ic_type;
+	/* Peripheral end point type */
+	enum ipa_ep_desc_type_enum_v01 ep_type;
+	/* Bytes count in KB */
+	__u32 bytes_count;
+	/* packet count */
+	__u32 pkt_count;
+	/* aggr_type */
+	enum ipa_aggr_enum_type_v01 aggr_type;
+}; /* Message */
+#define IPA_MHI_PRIME_AGGR_INFO_REQ_MSG_V01_MAX_MSG_LEN 631
+
+struct ipa_mhi_prime_aggr_info_req_msg_v01 {
+	/* optional */
+	__u8 aggr_info_valid;
+	/* Aggregration info for MHI prime */
+	/* Must be set to true if aggr_info is being passed*/
+	__u32 aggr_info_len;
+	/* Must be set to # of elements in aggr_info */
+	struct ipa_mhi_prime_aggr_info_type_v01
+		aggr_info[QMI_IPA_ENDP_DESC_NUM_MAX_V01];
+	/* optional */
+	/* Must be set to true if num_eps_valid is being passed*/
+	__u8 num_eps_valid;
+	/* Must be set to # of num_eps */
+	__u32 num_eps;
+}; /* Message */
+#define IPA_MHI_PRIME_AGGR_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+
+struct ipa_mhi_prime_aggr_info_resp_msg_v01 {
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+}; /* Message */
+
+struct ipa_add_offload_connection_req_msg_v01 {
+	/* optional */
+	/* Must be set to true if num_ipv4_filters is being passed*/
+	__u8 num_ipv4_filters_valid;
+	/* Must be set to # of ipv4_filters*/
+	__u32 num_ipv4_filters;
+	/* optional */
+	/* Must be set to true if num_ipv6_filters is being passed*/
+	__u8 num_ipv6_filters_valid;
+	/* Must be set to # of ipv6_filters*/
+	__u32 num_ipv6_filters;
+	/* optional */
+	__u8 xlat_filter_indices_list_valid;
+	/* Must be set to true if xlat_filter_indices_list is being passed*/
+	__u32 xlat_filter_indices_list_len;
+	/* Must be set to # of  xlat_filter_indices_list*/
+	__u32 xlat_filter_indices_list[QMI_IPA_MAX_FILTERS_V01];
+	/* optional */
+	/* Must be set to true if filter_spec_ex_list is being passed*/
+	__u8 filter_spec_ex2_list_valid;
+	/* Must be set to # of  filter_spec_ex_list*/
+	__u32 filter_spec_ex2_list_len;
+	struct ipa_filter_spec_ex2_type_v01
+		filter_spec_ex2_list[QMI_IPA_MAX_FILTERS_V01];
+	/* Optional */
+	/*  Mux ID for embedded call */
+	__u8 embedded_call_mux_id_valid;
+	/* Must be set to true if embedded_call_mux_id is being passed */
+	__u32 embedded_call_mux_id;
+	/* Mux ID for the new embedded call */
+	/* Optional */
+	/*  Default MHI path */
+	__u8 default_mhi_path_valid;
+	/* Must be set to true if default_mhi_path is being passed */
+	__u8 default_mhi_path;
+	/* Default MHI path */
+}; /* Message */
+#define IPA_ADD_OFFLOAD_CONNECTION_REQ_MSG_V01_MAX_MSG_LEN 11361
+
+struct ipa_add_offload_connection_resp_msg_v01 {
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* optional */
+	/* Must be set to true if filter_handle_list is being passed*/
+	__u8 filter_handle_list_valid;
+	/* Must be set to # of  filter_handle_list*/
+	__u32 filter_handle_list_len;
+	struct ipa_filter_rule_identifier_to_handle_map_v01
+		filter_handle_list[QMI_IPA_MAX_FILTERS_V01];
+}; /* Message */
+#define IPA_ADD_OFFLOAD_CONNECTION_RESP_MSG_V01_MAX_MSG_LEN 523
+
+struct ipa_remove_offload_connection_req_msg_v01 {
+	/* optional */
+	/* Must be set to true if filter_handle_list is being passed*/
+	__u8 filter_handle_list_valid;
+	/* Must be set to # of  filter_handle_list*/
+	__u32 filter_handle_list_len;
+	struct ipa_filter_rule_identifier_to_handle_map_v01
+		filter_handle_list[QMI_IPA_MAX_FILTERS_V01];
+	/* Optional */
+	/*  Clean All rules */
+	__u8 clean_all_rules_valid;
+	/* Must be set to true if clean_all_rules is being passed */
+	__u8 clean_all_rules;
+	/* Clean All rules */
+}; /* Message */
+#define IPA_REMOVE_OFFLOAD_CONNECTION_REQ_MSG_V01_MAX_MSG_LEN 520
+
+struct ipa_remove_offload_connection_resp_msg_v01 {
+	/* optional */
+	/* Must be set to true if filter_handle_list is being passed*/
+	__u8 resp_valid;
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+}; /* Message */
+#define IPA_REMOVE_OFFLOAD_CONNECTION_RESP_MSG_V01_MAX_MSG_LEN 7
+
+struct ipa_bw_change_ind_msg_v01 {
+	/* optional */
+	/* Must be set to true if peak_bw_ul is being passed*/
+	__u8 peak_bw_ul_valid;
+	/* Must be set to true if peak_bw_dl is being passed*/
+	__u8 peak_bw_dl_valid;
+	/* Kbps */
+	__u32 peak_bw_ul;
+	/* Kbps */
+	__u32 peak_bw_dl;
+}; /* Message */
+#define IPA_BW_CHANGE_IND_MSG_V01_MAX_MSG_LEN 14
+
+enum ipa_move_nat_type_enum_v01 {
+	QMI_IPA_MOVE_NAT_TO_DDR_V01 = 0,
+	QMI_IPA_MOVE_NAT_TO_SRAM_V01 = 1,
+};
+
+/*
+ * Request Message; Requestes remote IPA driver to move IPA NAT table
+ * according to requested direction TO_DDR\TO_SRAM.
+ */
+struct ipa_move_nat_req_msg_v01 {
+	enum ipa_move_nat_type_enum_v01 nat_move_direction;
+};
+#define IPA_MOVE_NAT_REQ_MSG_V01_MAX_MSG_LEN 8
+
+/*
+ * Response Message; Requestes remote IPA driver to move IPA NAT table
+ * according to requested direction TO_DDR\TO_SRAM.
+ */
+struct ipa_move_nat_resp_msg_v01 {
+
+	/* Mandatory */
+	/* Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/*
+	 * Standard response type.
+	 * Standard response type. Contains the following data members:
+	 * qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE
+	 * qmi_error_type  -- Error code. Possible error code values are
+	 * described in the error codes section of each message definition.
+	 */
+};  /* Message */
+#define IPA_MOVE_NAT_RESP_MSG_V01_MAX_MSG_LEN 7
+
+    /*	Indication Message; Indication sent to the Modem IPA driver from
+     *	master IPA driver about NAT table move result.
+     */
+struct ipa_move_nat_table_complt_ind_msg_v01 {
+	/* Mandatory */
+	/*  Master driver initialization completion status */
+	struct ipa_qmi_response_type_v01 nat_table_move_status;
+	/*	Indicates the status of nat table mvoe. If everything went
+	 *	as expected, this field is set to SUCCESS. ERROR is set
+	 *	otherwise. Extended error info may be used to convey
+	 *	additional information about the error
+	 */
+};  /* Message */
+#define QMI_IPA_NAT_TABLE_MOVE_COMPLETE_IND_MAX_MSG_LEN_V01 7
+
+/*Service Message Definition*/
+#define QMI_IPA_INDICATION_REGISTER_REQ_V01 0x0020
+#define QMI_IPA_INDICATION_REGISTER_RESP_V01 0x0020
+#define QMI_IPA_INIT_MODEM_DRIVER_REQ_V01 0x0021
+#define QMI_IPA_INIT_MODEM_DRIVER_RESP_V01 0x0021
+#define QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01 0x0022
+#define QMI_IPA_INSTALL_FILTER_RULE_REQ_V01 0x0023
+#define QMI_IPA_INSTALL_FILTER_RULE_RESP_V01 0x0023
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01 0x0024
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01 0x0024
+#define QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01 0x0025
+#define QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_V01 0x0025
+#define QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01 0x0026
+#define QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01 0x0026
+#define QMI_IPA_CONFIG_REQ_V01 0x0027
+#define QMI_IPA_CONFIG_RESP_V01 0x0027
+#define QMI_IPA_DISABLE_LINK_LOW_PWR_STATE_REQ_V01 0x0028
+#define QMI_IPA_DISABLE_LINK_LOW_PWR_STATE_RESP_V01 0x0028
+#define QMI_IPA_ENABLE_LINK_LOW_PWR_STATE_REQ_V01 0x0029
+#define QMI_IPA_ENABLE_LINK_LOW_PWR_STATE_RESP_V01 0x0029
+#define QMI_IPA_GET_DATA_STATS_REQ_V01 0x0030
+#define QMI_IPA_GET_DATA_STATS_RESP_V01 0x0030
+#define QMI_IPA_GET_APN_DATA_STATS_REQ_V01 0x0031
+#define QMI_IPA_GET_APN_DATA_STATS_RESP_V01 0x0031
+#define QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01 0x0032
+#define QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01 0x0032
+#define QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01 0x0033
+#define QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01 0x0034
+#define QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01 0x0034
+#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01 0x0035
+#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01 0x0035
+#define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_V01 0x0037
+#define QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_V01 0x0037
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01 0x0038
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01 0x0038
+#define QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01 0x0039
+#define QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01 0x0039
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01 0x003A
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_V01 0x003A
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01 0x003A
+#define QMI_IPA_MHI_CLK_VOTE_REQ_V01 0x003B
+#define QMI_IPA_MHI_CLK_VOTE_RESP_V01 0x003B
+#define QMI_IPA_MHI_READY_IND_V01 0x003C
+#define QMI_IPA_MHI_ALLOC_CHANNEL_REQ_V01 0x003D
+#define QMI_IPA_MHI_ALLOC_CHANNEL_RESP_V01 0x003D
+#define QMI_IPA_MHI_CLEANUP_REQ_V01 0x003E
+#define QMI_IPA_MHI_CLEANUP_RESP_V01 0x003E
+#define QMI_IPA_ENDP_DESC_INDICATION_V01 0x003F
+#define QMI_IPA_MHI_PRIME_AGGR_INFO_REQ_V01 0x0040
+#define QMI_IPA_MHI_PRIME_AGGR_INFO_RESP_V01 0x0040
+#define QMI_IPA_ADD_OFFLOAD_CONNECTION_REQ_V01 0x0041
+#define QMI_IPA_ADD_OFFLOAD_CONNECTION_RESP_V01 0x0041
+#define QMI_IPA_REMOVE_OFFLOAD_CONNECTION_REQ_V01 0x0042
+#define QMI_IPA_REMOVE_OFFLOAD_CONNECTION_RESP_V01 0x0042
+#define QMI_IPA_BW_CHANGE_INDICATION_V01 0x0044
+#define QMI_IPA_MOVE_NAT_REQ_V01 0x0046
+#define QMI_IPA_MOVE_NAT_RESP_V01 0x0046
+#define QMI_IPA_MOVE_NAT_COMPLETE_IND_V01 0x0046
+
+/* add for max length*/
+#define QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01 197
+#define QMI_IPA_INIT_MODEM_DRIVER_RESP_MAX_MSG_LEN_V01 25
+#define QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01 16
+#define QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01 33705
+#define QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01 783
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01 1899
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01 7
+#define QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01 19
+
+
+#define QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01 37
+#define QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01 7
+#define QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01 7
+
+
+#define QMI_IPA_CONFIG_REQ_MAX_MSG_LEN_V01 102
+#define QMI_IPA_CONFIG_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_DISABLE_LINK_LOW_PWR_STATE_REQ_MAX_MSG_LEN_V01 18
+#define QMI_IPA_DISABLE_LINK_LOW_PWR_STATE_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_ENABLE_LINK_LOW_PWR_STATE_REQ_MAX_MSG_LEN_V01 7
+#define QMI_IPA_ENABLE_LINK_LOW_PWR_STATE_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_GET_DATA_STATS_REQ_MAX_MSG_LEN_V01 11
+#define QMI_IPA_GET_DATA_STATS_RESP_MAX_MSG_LEN_V01 2234
+#define QMI_IPA_GET_APN_DATA_STATS_REQ_MAX_MSG_LEN_V01 36
+#define QMI_IPA_GET_APN_DATA_STATS_RESP_MAX_MSG_LEN_V01 299
+#define QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01 200
+#define QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01 8
+#define QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01 7
+
+#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_MAX_MSG_LEN_V01 4
+#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_MAX_MSG_LEN_V01 7
+
+#define QMI_IPA_INSTALL_FILTER_RULE_EX_REQ_MAX_MSG_LEN_V01 34021
+#define QMI_IPA_INSTALL_FILTER_RULE_EX_RESP_MAX_MSG_LEN_V01 523
+
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_MAX_MSG_LEN_V01 4
+#define QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_MAX_MSG_LEN_V01 7
+
+#define QMI_IPA_GET_STATS_PER_CLIENT_REQ_MAX_MSG_LEN_V01 18
+#define QMI_IPA_GET_STATS_PER_CLIENT_RESP_MAX_MSG_LEN_V01 3595
+
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_MAX_MSG_LEN_V01 9875
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_MAX_MSG_LEN_V01 11
+/* Service Object Accessor */
+
+/* This is the largest MAX_MSG_LEN we have for all the messages
+ * we expect to receive. This argument will be used in
+ * qmi_handle_init to allocate a receive buffer for the socket
+ * associated with our qmi_handle
+ */
+#define QMI_IPA_MAX_MSG_LEN 22685
+
+#endif/* IPA_QMI_SERVICE_V01_H */

+ 3998 - 0
drivers/platform/msm/include/uapi/linux/msm_ipa.h

@@ -0,0 +1,3998 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _UAPI_MSM_IPA_H_
+#define _UAPI_MSM_IPA_H_
+
+#ifndef __KERNEL__
+#include <stdio.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <sys/stat.h>
+#endif
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+/**
+ * unique magic number of the IPA device
+ */
+#define IPA_IOC_MAGIC 0xCF
+
+/**
+ * IPA device full path
+ */
+#define IPA_DEV_NAME "/dev/ipa"
+
+/**
+ * IPA NAT table character device name
+ */
+#define IPA_NAT_DEV_NAME "ipaNatTable"
+
+/**
+ * IPA IPv6CT table character device name
+ */
+#define IPA_IPV6CT_DEV_NAME "ipaIpv6CTTable"
+
+/**
+ * name of the default routing tables for v4 and v6
+ */
+#define IPA_DFLT_RT_TBL_NAME "ipa_dflt_rt"
+
+/**
+ * name for default value of invalid protocol of NAT
+ */
+#define IPAHAL_NAT_INVALID_PROTOCOL   0xFF
+
+/**
+ * commands supported by IPA driver
+ */
+#define IPA_IOCTL_ADD_HDR                       0
+#define IPA_IOCTL_DEL_HDR                       1
+#define IPA_IOCTL_ADD_RT_RULE                   2
+#define IPA_IOCTL_DEL_RT_RULE                   3
+#define IPA_IOCTL_ADD_FLT_RULE                  4
+#define IPA_IOCTL_DEL_FLT_RULE                  5
+#define IPA_IOCTL_COMMIT_HDR                    6
+#define IPA_IOCTL_RESET_HDR                     7
+#define IPA_IOCTL_COMMIT_RT                     8
+#define IPA_IOCTL_RESET_RT                      9
+#define IPA_IOCTL_COMMIT_FLT                    10
+#define IPA_IOCTL_RESET_FLT                     11
+#define IPA_IOCTL_DUMP                          12
+#define IPA_IOCTL_GET_RT_TBL                    13
+#define IPA_IOCTL_PUT_RT_TBL                    14
+#define IPA_IOCTL_COPY_HDR                      15
+#define IPA_IOCTL_QUERY_INTF                    16
+#define IPA_IOCTL_QUERY_INTF_TX_PROPS           17
+#define IPA_IOCTL_QUERY_INTF_RX_PROPS           18
+#define IPA_IOCTL_GET_HDR                       19
+#define IPA_IOCTL_PUT_HDR                       20
+#define IPA_IOCTL_SET_FLT                       21
+#define IPA_IOCTL_ALLOC_NAT_MEM                 22
+#define IPA_IOCTL_V4_INIT_NAT                   23
+#define IPA_IOCTL_TABLE_DMA_CMD                 24
+#define IPA_IOCTL_NAT_DMA                       IPA_IOCTL_TABLE_DMA_CMD
+#define IPA_IOCTL_INIT_IPV6CT_TABLE             25
+#define IPA_IOCTL_V4_DEL_NAT                    26
+#define IPA_IOCTL_PULL_MSG                      27
+#define IPA_IOCTL_GET_NAT_OFFSET                28
+#define IPA_IOCTL_RM_ADD_DEPENDENCY             29
+#define IPA_IOCTL_RM_DEL_DEPENDENCY             30
+#define IPA_IOCTL_GENERATE_FLT_EQ               31
+#define IPA_IOCTL_QUERY_INTF_EXT_PROPS          32
+#define IPA_IOCTL_QUERY_EP_MAPPING              33
+#define IPA_IOCTL_QUERY_RT_TBL_INDEX            34
+#define IPA_IOCTL_WRITE_QMAPID                  35
+#define IPA_IOCTL_MDFY_FLT_RULE                 36
+#define IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD 37
+#define IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL 38
+#define IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED    39
+#define IPA_IOCTL_ADD_HDR_PROC_CTX              40
+#define IPA_IOCTL_DEL_HDR_PROC_CTX              41
+#define IPA_IOCTL_MDFY_RT_RULE                  42
+#define IPA_IOCTL_ADD_RT_RULE_AFTER             43
+#define IPA_IOCTL_ADD_FLT_RULE_AFTER            44
+#define IPA_IOCTL_GET_HW_VERSION                45
+#define IPA_IOCTL_ADD_RT_RULE_EXT               46
+#define IPA_IOCTL_ADD_VLAN_IFACE                47
+#define IPA_IOCTL_DEL_VLAN_IFACE                48
+#define IPA_IOCTL_ADD_L2TP_VLAN_MAPPING         49
+#define IPA_IOCTL_DEL_L2TP_VLAN_MAPPING         50
+#define IPA_IOCTL_NAT_MODIFY_PDN                51
+#define IPA_IOCTL_ALLOC_NAT_TABLE               52
+#define IPA_IOCTL_ALLOC_IPV6CT_TABLE            53
+#define IPA_IOCTL_DEL_NAT_TABLE                 54
+#define IPA_IOCTL_DEL_IPV6CT_TABLE              55
+#define IPA_IOCTL_CLEANUP                       56
+#define IPA_IOCTL_QUERY_WLAN_CLIENT             57
+#define IPA_IOCTL_GET_VLAN_MODE                 58
+#define IPA_IOCTL_ADD_BRIDGE_VLAN_MAPPING       59
+#define IPA_IOCTL_DEL_BRIDGE_VLAN_MAPPING       60
+#define IPA_IOCTL_ODL_QUERY_ADAPL_EP_INFO       61
+#define IPA_IOCTL_ODL_GET_AGG_BYTE_LIMIT        62
+#define IPA_IOCTL_ODL_QUERY_MODEM_CONFIG        63
+#define IPA_IOCTL_GSB_CONNECT                   64
+#define IPA_IOCTL_GSB_DISCONNECT                65
+#define IPA_IOCTL_WIGIG_FST_SWITCH              66
+#define IPA_IOCTL_ADD_RT_RULE_V2                67
+#define IPA_IOCTL_ADD_RT_RULE_EXT_V2            68
+#define IPA_IOCTL_ADD_RT_RULE_AFTER_V2          69
+#define IPA_IOCTL_MDFY_RT_RULE_V2               70
+#define IPA_IOCTL_ADD_FLT_RULE_V2               71
+#define IPA_IOCTL_ADD_FLT_RULE_AFTER_V2         72
+#define IPA_IOCTL_MDFY_FLT_RULE_V2              73
+#define IPA_IOCTL_FNR_COUNTER_ALLOC             74
+#define IPA_IOCTL_FNR_COUNTER_DEALLOC           75
+#define IPA_IOCTL_FNR_COUNTER_QUERY             76
+#define IPA_IOCTL_SET_FNR_COUNTER_INFO          77
+#define IPA_IOCTL_GET_NAT_IN_SRAM_INFO          78
+#define IPA_IOCTL_APP_CLOCK_VOTE                79
+#define IPA_IOCTL_PDN_CONFIG                    80
+#define IPA_IOCTL_SET_MAC_FLT                   81
+#define IPA_IOCTL_GET_PHERIPHERAL_EP_INFO       82
+#define IPA_IOCTL_ADD_UC_ACT_ENTRY              83
+#define IPA_IOCTL_DEL_UC_ACT_ENTRY              84
+#define IPA_IOCTL_SET_SW_FLT                    85
+#define IPA_IOCTL_SET_PKT_THRESHOLD             87
+#define IPA_IOCTL_ADD_EoGRE_MAPPING             88
+#define IPA_IOCTL_DEL_EoGRE_MAPPING             89
+#define IPA_IOCTL_SET_IPPT_SW_FLT               90
+#define IPA_IOCTL_ADD_MACSEC_MAPPING            92
+#define IPA_IOCTL_DEL_MACSEC_MAPPING            93
+#define IPA_IOCTL_SET_NAT_EXC_RT_TBL_IDX        94
+#define IPA_IOCTL_SET_CONN_TRACK_EXC_RT_TBL_IDX 95
+#define IPA_IOCTL_COAL_EVICT_POLICY             96
+#define IPA_IOCTL_SET_EXT_ROUTER_MODE           97
+/**
+ * max size of the header to be inserted
+ */
+#define IPA_HDR_MAX_SIZE 255
+
+/**
+ * max size of the name of the resource (routing table, header)
+ */
+#define IPA_RESOURCE_NAME_MAX 32
+
+/**
+ * max number of interface properties
+ */
+#define IPA_NUM_PROPS_MAX 35
+
+/**
+ * size of the mac address
+ */
+#define IPA_MAC_ADDR_SIZE  6
+
+/**
+ * max number of mbim streams
+ */
+#define IPA_MBIM_MAX_STREAM_NUM 8
+
+/**
+ *  size of the ipv6 address
+ */
+#define IPA_WAN_MSG_IPv6_ADDR_GW_LEN 4
+
+/**
+ * max number of lan clients supported per device type
+ * for LAN stats via HW.
+ */
+#define IPA_MAX_NUM_HW_PATH_CLIENTS 16
+
+/**
+ * max number of destination pipes possible for a client.
+ */
+#define QMI_IPA_MAX_CLIENT_DST_PIPES 4
+
+/**
+ * Max number of clients supported for mac based exception
+ */
+
+#define IPA_MAX_NUM_MAC_FLT 32
+#define IPA_MAX_NUM_IPv4_SEGS_FLT 16
+#define IPA_MAX_NUM_IFACE_FLT 4
+
+
+/**
+ * MAX number of the FLT_RT stats counter supported.
+ */
+#define IPA_MAX_FLT_RT_CNT_INDEX (128)
+#define IPA_FLT_RT_HW_COUNTER (120)
+#define IPA_FLT_RT_SW_COUNTER \
+	(IPA_MAX_FLT_RT_CNT_INDEX - IPA_FLT_RT_HW_COUNTER)
+#define IPA_MAX_FLT_RT_CLIENTS 60
+
+/**
+ * Max number of ports/IPs IPPT exception
+ */
+
+#define IPA_MAX_IPPT_NUM_PORT_FLT 5
+
+/**
+ * New feature flag for CV2X config.
+ */
+
+#define IPA_CV2X_SUPPORT
+
+/**
+ * the attributes of the rule (routing or filtering)
+ */
+#define IPA_FLT_TOS			(1ul << 0)
+#define IPA_FLT_PROTOCOL		(1ul << 1)
+#define IPA_FLT_SRC_ADDR		(1ul << 2)
+#define IPA_FLT_DST_ADDR		(1ul << 3)
+#define IPA_FLT_SRC_PORT_RANGE		(1ul << 4)
+#define IPA_FLT_DST_PORT_RANGE		(1ul << 5)
+#define IPA_FLT_TYPE			(1ul << 6)
+#define IPA_FLT_CODE			(1ul << 7)
+#define IPA_FLT_SPI			(1ul << 8)
+#define IPA_FLT_SRC_PORT		(1ul << 9)
+#define IPA_FLT_DST_PORT		(1ul << 10)
+#define IPA_FLT_TC			(1ul << 11)
+#define IPA_FLT_FLOW_LABEL		(1ul << 12)
+#define IPA_FLT_NEXT_HDR		(1ul << 13)
+#define IPA_FLT_META_DATA		(1ul << 14)
+#define IPA_FLT_FRAGMENT		(1ul << 15)
+#define IPA_FLT_TOS_MASKED		(1ul << 16)
+#define IPA_FLT_MAC_SRC_ADDR_ETHER_II	(1ul << 17)
+#define IPA_FLT_MAC_DST_ADDR_ETHER_II	(1ul << 18)
+#define IPA_FLT_MAC_SRC_ADDR_802_3	(1ul << 19)
+#define IPA_FLT_MAC_DST_ADDR_802_3	(1ul << 20)
+#define IPA_FLT_MAC_ETHER_TYPE		(1ul << 21)
+#define IPA_FLT_MAC_DST_ADDR_L2TP	(1ul << 22)
+#define IPA_FLT_TCP_SYN			(1ul << 23)
+#define IPA_FLT_TCP_SYN_L2TP		(1ul << 24)
+#define IPA_FLT_L2TP_INNER_IP_TYPE	(1ul << 25)
+#define IPA_FLT_L2TP_INNER_IPV4_DST_ADDR (1ul << 26)
+#define IPA_FLT_IS_PURE_ACK		(1ul << 27)
+#define IPA_FLT_VLAN_ID			(1ul << 28)
+#define IPA_FLT_MAC_SRC_ADDR_802_1Q	(1ul << 29)
+#define IPA_FLT_MAC_DST_ADDR_802_1Q	(1ul << 30)
+#define IPA_FLT_L2TP_UDP_INNER_MAC_DST_ADDR (1ul << 31)
+
+/* Extended attributes for the rule (routing or filtering) */
+#define IPA_FLT_EXT_L2TP_UDP_TCP_SYN        (1ul << 0)
+#define IPA_FLT_EXT_L2TP_UDP_INNER_ETHER_TYPE       (1ul << 1)
+#define IPA_FLT_EXT_MTU     (1ul << 2)
+#define IPA_FLT_EXT_L2TP_UDP_INNER_NEXT_HDR		(1ul << 3)
+#define IPA_FLT_EXT_NEXT_HDR				(1ul << 4)
+
+
+/**
+ * maximal number of NAT PDNs in the PDN config table
+ */
+#define IPA_MAX_PDN_NUM 16
+#define IPA_MAX_PDN_NUM_v4 5
+
+/**
+ * Macros duplicated from ipa_lnx_spearhead_stats.h and
+ * ipa_lnx_stats.h. All three macros should match.
+ * This needs to be updated whenever the header file structure
+ * and structure length macros are updated to match exactly
+ * the same. This is done to overcome backward and forward
+ * compatibility between userspace and driver spearhead structures.
+ */
+/* IPA Linux basic stats structure macros */
+#define IPA_LNX_PG_RECYCLE_STATS_STRUCT_LEN 32
+#define IPA_LNX_EXCEPTION_STATS_STRUCT_LEN 40
+#define IPA_LNX_ODL_EP_STATS_STRUCT_LEN 16
+#define IPA_LNX_HOLB_DISCARD_STATS_STRUCT_LEN 16
+#define IPA_LNX_HOLB_MONITOR_STATS_STRUCT_LEN 16
+#define IPA_LNX_HOLB_DROP_AND_MON_STATS_STRUCT_LEN (8 + 16 + 16)
+#define IPA_LNX_GENERIC_STATS_STRUCT_LEN (40 + 32 + 40 + 16 + 40)
+/* IPA Linux clock stats structures */
+#define IPA_LNX_PM_CLIENT_STATS_STRUCT_LEN 24
+#define IPA_LNX_CLOCK_STATS_STRUCT_LEN (24 + 24)
+/* Generic instance structures */
+#define IPA_LNX_GSI_RX_DEBUG_STATS_STRUCT_LEN 48
+#define IPA_LNX_GSI_TX_DEBUG_STATS_STRUCT_LEN 56
+#define IPA_LNX_GSI_DEBUG_STATS_STRUCT_LEN (8 + 48 + 56)
+#define IPA_LNX_PIPE_INFO_STATS_STRUCT_LEN 120
+/* IPA Linux wlan instance stats structures */
+#define IPA_LNX_WLAN_INSTANCE_INFO_STRUCT_LEN (32 + 112 + 120)
+#define IPA_LNX_WLAN_INST_STATS_STRUCT_LEN (8 + 264)
+/* IPA Linux eth instance stats structures */
+#define IPA_LNX_ETH_INSTANCE_INFO_STRUCT_LEN (16 + 112 + 120)
+#define IPA_LNX_ETH_INST_STATS_STRUCT_LEN (8 + 248)
+/* IPA Linux usb instance stats structures */
+#define IPA_LNX_USB_INSTANCE_INFO_STRUCT_LEN (16 + 112 + 120)
+#define IPA_LNX_USB_INST_STATS_STRUCT_LEN (8 + 248)
+/* IPA Linux mhip instance stats structures */
+#define IPA_LNX_MHIP_INSTANCE_INFO_STRUCT_LEN (16 + 112 + 120)
+#define IPA_LNX_MHIP_INST_STATS_STRUCT_LEN (8 + 248)
+/* IPA Linux consolidated stats structure */
+#define IPA_LNX_CONSOLIDATED_STATS_STRUCT_LEN (8 + 48)
+/* IPA Linux Instance allocation info structures */
+#define IPA_LNX_EACH_INST_ALLOC_INFO_STRUCT_LEN (24 + 12 + 12 + 16)
+#define IPA_LNX_STATS_ALL_INFO_STRUCT_LEN (32 + 128 + 128 + 128)
+#define IPA_LNX_STATS_SPEARHEAD_CTX_STRUCT_LEN (8 + 4 + 416)
+
+/**
+ * enum ipa_client_type - names for the various IPA "clients"
+ * these are from the perspective of the clients, for e.g.
+ * HSIC1_PROD means HSIC client is the producer and IPA is the
+ * consumer.
+ * PROD clients are always even, and CONS clients are always odd.
+ * Add new clients in the end of the list or replace reserved one,
+ * update IPA_CLIENT_MAX and update the strings array ipa_clients_strings[]
+ * while keeping the ordering of the clients the same
+ */
+enum ipa_client_type {
+	IPA_CLIENT_HSIC1_PROD			= 0,
+	IPA_CLIENT_HSIC1_CONS			= 1,
+
+	IPA_CLIENT_HSIC2_PROD			= 2,
+	IPA_CLIENT_HSIC2_CONS			= 3,
+
+	IPA_CLIENT_HSIC3_PROD			= 4,
+	IPA_CLIENT_HSIC3_CONS			= 5,
+
+	IPA_CLIENT_HSIC4_PROD			= 6,
+	IPA_CLIENT_HSIC4_CONS			= 7,
+
+	IPA_CLIENT_HSIC5_PROD			= 8,
+	IPA_CLIENT_HSIC5_CONS			= 9,
+
+	IPA_CLIENT_WLAN1_PROD			= 10,
+	IPA_CLIENT_WLAN1_CONS			= 11,
+
+	IPA_CLIENT_A5_WLAN_AMPDU_PROD		= 12,
+	IPA_CLIENT_WLAN2_CONS			= 13,
+
+	IPA_CLIENT_WLAN3_PROD			= 14,
+	IPA_CLIENT_WLAN3_CONS			= 15,
+
+	/* RESERVED PROD			= 16, */
+	IPA_CLIENT_WLAN4_CONS			= 17,
+
+	IPA_CLIENT_USB_PROD			= 18,
+	IPA_CLIENT_USB_CONS			= 19,
+
+	IPA_CLIENT_USB2_PROD			= 20,
+	IPA_CLIENT_USB2_CONS			= 21,
+
+	IPA_CLIENT_USB3_PROD			= 22,
+	IPA_CLIENT_USB3_CONS			= 23,
+
+	IPA_CLIENT_USB4_PROD			= 24,
+	IPA_CLIENT_USB4_CONS			= 25,
+
+	IPA_CLIENT_UC_USB_PROD			= 26,
+	IPA_CLIENT_USB_DPL_CONS			= 27,
+
+	IPA_CLIENT_A2_EMBEDDED_PROD		= 28,
+	IPA_CLIENT_A2_EMBEDDED_CONS		= 29,
+
+	IPA_CLIENT_A2_TETHERED_PROD		= 30,
+	IPA_CLIENT_A2_TETHERED_CONS		= 31,
+
+	IPA_CLIENT_APPS_LAN_PROD		= 32,
+	IPA_CLIENT_APPS_LAN_CONS		= 33,
+
+	IPA_CLIENT_APPS_WAN_PROD		= 34,
+	IPA_CLIENT_APPS_LAN_WAN_PROD = IPA_CLIENT_APPS_WAN_PROD,
+	IPA_CLIENT_APPS_WAN_CONS		= 35,
+
+	IPA_CLIENT_APPS_CMD_PROD		= 36,
+	IPA_CLIENT_A5_LAN_WAN_CONS		= 37,
+
+	IPA_CLIENT_ODU_PROD			= 38,
+	IPA_CLIENT_ODU_EMB_CONS			= 39,
+
+	/* RESERVED PROD			= 40, */
+	IPA_CLIENT_ODU_TETH_CONS		= 41,
+
+	IPA_CLIENT_MHI_PROD			= 42,
+	IPA_CLIENT_MHI_CONS			= 43,
+
+	IPA_CLIENT_MEMCPY_DMA_SYNC_PROD		= 44,
+	IPA_CLIENT_MEMCPY_DMA_SYNC_CONS		= 45,
+
+	IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD	= 46,
+	IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS	= 47,
+
+	IPA_CLIENT_ETHERNET_PROD		= 48,
+	IPA_CLIENT_ETHERNET_CONS		= 49,
+
+	IPA_CLIENT_Q6_LAN_PROD			= 50,
+	IPA_CLIENT_Q6_LAN_CONS			= 51,
+
+	IPA_CLIENT_Q6_WAN_PROD			= 52,
+	IPA_CLIENT_Q6_WAN_CONS			= 53,
+
+	IPA_CLIENT_Q6_CMD_PROD			= 54,
+	IPA_CLIENT_Q6_DUN_CONS			= 55,
+
+	IPA_CLIENT_Q6_DECOMP_PROD		= 56,
+	IPA_CLIENT_Q6_DECOMP_CONS		= 57,
+
+	IPA_CLIENT_Q6_DECOMP2_PROD		= 58,
+	IPA_CLIENT_Q6_DECOMP2_CONS		= 59,
+
+	/* RESERVED PROD			= 60, */
+	IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS	= 61,
+
+	IPA_CLIENT_TEST_PROD			= 62,
+	IPA_CLIENT_TEST_CONS			= 63,
+
+	IPA_CLIENT_TEST1_PROD			= 64,
+	IPA_CLIENT_TEST1_CONS			= 65,
+
+	IPA_CLIENT_TEST2_PROD			= 66,
+	IPA_CLIENT_TEST2_CONS			= 67,
+
+	IPA_CLIENT_TEST3_PROD			= 68,
+	IPA_CLIENT_TEST3_CONS			= 69,
+
+	IPA_CLIENT_TEST4_PROD			= 70,
+	IPA_CLIENT_TEST4_CONS			= 71,
+
+	/* RESERVED PROD			= 72, */
+	IPA_CLIENT_DUMMY_CONS			= 73,
+
+	IPA_CLIENT_Q6_DL_NLO_DATA_PROD		= 74,
+	IPA_CLIENT_Q6_UL_NLO_DATA_CONS		= 75,
+
+	/* RESERVERD PROD			= 76, */
+	IPA_CLIENT_Q6_UL_NLO_ACK_CONS		= 77,
+
+	/* RESERVERD PROD			= 78, */
+	IPA_CLIENT_Q6_QBAP_STATUS_CONS		= 79,
+
+	/* RESERVERD PROD			= 80, */
+	IPA_CLIENT_MHI_DPL_CONS			= 81,
+
+	/* RESERVERD PROD			= 82, */
+	IPA_CLIENT_ODL_DPL_CONS			= 83,
+
+	IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD	= 84,
+	IPA_CLIENT_Q6_AUDIO_DMA_MHI_CONS	= 85,
+
+	IPA_CLIENT_WIGIG_PROD			= 86,
+	IPA_CLIENT_WIGIG1_CONS			= 87,
+
+	/* RESERVERD PROD			= 88, */
+	IPA_CLIENT_WIGIG2_CONS			= 89,
+
+	/* RESERVERD PROD			= 90, */
+	IPA_CLIENT_WIGIG3_CONS			= 91,
+
+	/* RESERVERD PROD			= 92, */
+	IPA_CLIENT_WIGIG4_CONS			= 93,
+
+	/* RESERVED PROD			= 94, */
+	IPA_CLIENT_APPS_WAN_COAL_CONS		= 95,
+
+	IPA_CLIENT_MHI_PRIME_TETH_PROD		= 96,
+	IPA_CLIENT_MHI_PRIME_TETH_CONS		= 97,
+
+	IPA_CLIENT_MHI_PRIME_RMNET_PROD		= 98,
+	IPA_CLIENT_MHI_PRIME_RMNET_CONS		= 99,
+
+	IPA_CLIENT_MHI_PRIME_DPL_PROD		= 100,
+	IPA_CLIENT_MHI_COAL_CONS			= 101,
+
+	IPA_CLIENT_AQC_ETHERNET_PROD		= 102,
+	IPA_CLIENT_AQC_ETHERNET_CONS		= 103,
+
+	IPA_CLIENT_APPS_WAN_LOW_LAT_PROD	= 104,
+	IPA_CLIENT_APPS_WAN_LOW_LAT_CONS	= 105,
+
+	IPA_CLIENT_QDSS_PROD    = 106,
+	IPA_CLIENT_MHI_QDSS_CONS        = 107,
+
+	IPA_CLIENT_RTK_ETHERNET_PROD		= 108,
+	IPA_CLIENT_RTK_ETHERNET_CONS		= 109,
+
+	IPA_CLIENT_MHI_LOW_LAT_PROD		= 110,
+	IPA_CLIENT_MHI_LOW_LAT_CONS		= 111,
+
+	IPA_CLIENT_MHI2_PROD	= 112,
+	IPA_CLIENT_MHI2_CONS	= 113,
+
+	IPA_CLIENT_Q6_CV2X_PROD	= 114,
+	IPA_CLIENT_Q6_CV2X_CONS	= 115,
+
+	IPA_CLIENT_ETHERNET2_PROD = 116,
+	IPA_CLIENT_ETHERNET2_CONS = 117,
+
+	/* RESERVED PROD			= 118, */
+	IPA_CLIENT_WLAN2_CONS1			= 119,
+
+	IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD	= 120,
+	IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS	= 121,
+
+	IPA_CLIENT_Q6_DL_NLO_LL_DATA_PROD		= 122,
+	/* RESERVED CONS			= 123, */
+
+	/* RESERVED PROD                        = 124, */
+	IPA_CLIENT_TPUT_CONS                    = 125,
+
+	/* RESERVED PROD                        = 126, */
+	IPA_CLIENT_APPS_LAN_COAL_CONS           = 127,
+
+	IPA_CLIENT_IPSEC_DECAP_PROD		= 128,
+	IPA_CLIENT_IPSEC_DECAP_RECOVERABLE_ERR_CONS = 129,
+
+	IPA_CLIENT_IPSEC_ENCAP_PROD		= 130,
+	IPA_CLIENT_IPSEC_DECAP_NON_RECOVERABLE_ERR_CONS = 131,
+
+	IPA_CLIENT_Q6_DL_NLO_DATA_XLAT_PROD     = 132,
+	IPA_CLIENT_IPSEC_ENCAP_ERR_CONS		= 133,
+};
+
+#define IPA_CLIENT_MAX (IPA_CLIENT_IPSEC_ENCAP_ERR_CONS + 1)
+
+#define IPA_CLIENT_WLAN2_PROD IPA_CLIENT_A5_WLAN_AMPDU_PROD
+#define IPA_CLIENT_Q6_DL_NLO_DATA_PROD IPA_CLIENT_Q6_DL_NLO_DATA_PROD
+#define IPA_CLIENT_Q6_UL_NLO_ACK_CONS IPA_CLIENT_Q6_UL_NLO_ACK_CONS
+#define IPA_CLIENT_Q6_QBAP_STATUS_CONS IPA_CLIENT_Q6_QBAP_STATUS_CONS
+#define IPA_CLIENT_MHI_DPL_CONS IPA_CLIENT_MHI_DPL_CONS
+#define IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD
+#define IPA_CLIENT_Q6_AUDIO_DMA_MHI_CONS IPA_CLIENT_Q6_AUDIO_DMA_MHI_CONS
+#define IPA_CLIENT_WIGIG_PROD IPA_CLIENT_WIGIG_PROD
+#define IPA_CLIENT_WIGIG1_CONS IPA_CLIENT_WIGIG1_CONS
+#define IPA_CLIENT_WIGIG2_CONS IPA_CLIENT_WIGIG2_CONS
+#define IPA_CLIENT_WIGIG3_CONS IPA_CLIENT_WIGIG3_CONS
+#define IPA_CLIENT_WIGIG4_CONS IPA_CLIENT_WIGIG4_CONS
+#define IPA_CLIENT_APPS_WAN_COAL_CONS IPA_CLIENT_APPS_WAN_COAL_CONS
+#define IPA_CLIENT_MHI_PRIME_TETH_PROD IPA_CLIENT_MHI_PRIME_TETH_PROD
+#define IPA_CLIENT_MHI_PRIME_TETH_CONS IPA_CLIENT_MHI_PRIME_TETH_CONS
+#define IPA_CLIENT_MHI_PRIME_RMNET_PROD IPA_CLIENT_MHI_PRIME_RMNET_PROD
+#define IPA_CLIENT_MHI_PRIME_RMNET_CONS IPA_CLIENT_MHI_PRIME_RMNET_CONS
+#define IPA_CLIENT_MHI_PRIME_DPL_PROD IPA_CLIENT_MHI_PRIME_DPL_PROD
+#define IPA_CLIENT_AQC_ETHERNET_PROD IPA_CLIENT_AQC_ETHERNET_PROD
+#define IPA_CLIENT_AQC_ETHERNET_CONS IPA_CLIENT_AQC_ETHERNET_CONS
+#define IPA_CLIENT_MHI_QDSS_CONS IPA_CLIENT_MHI_QDSS_CONS
+#define IPA_CLIENT_QDSS_PROD IPA_CLIENT_QDSS_PROD
+#define IPA_CLIENT_WLAN2_CONS1 IPA_CLIENT_WLAN2_CONS1
+#define IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD
+#define IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS
+#define IPA_CLIENT_Q6_DL_NLO_LL_DATA_PROD IPA_CLIENT_Q6_DL_NLO_LL_DATA_PROD
+#define IPA_CLIENT_APPS_LAN_COAL_CONS IPA_CLIENT_APPS_LAN_COAL_CONS
+#define IPA_CLIENT_MHI_COAL_CONS IPA_CLIENT_MHI_COAL_CONS
+#define IPA_CLIENT_IPSEC_DECAP_PROD IPA_CLIENT_IPSEC_DECAP_PROD
+#define IPA_CLIENT_IPSEC_ENCAP_PROD IPA_CLIENT_IPSEC_ENCAP_PROD
+#define IPA_CLIENT_Q6_DL_NLO_DATA_XLAT_PROD IPA_CLIENT_Q6_DL_NLO_DATA_XLAT_PROD
+#define IPA_CLIENT_IPSEC_DECAP_RECOVERABLE_ERR_CONS IPA_CLIENT_IPSEC_DECAP_RECOVERABLE_ERR_CONS
+#define IPA_CLIENT_IPSEC_DECAP_NON_RECOVERABLE_ERR_CONS \
+	IPA_CLIENT_IPSEC_DECAP_NON_RECOVERABLE_ERR_CONS
+#define IPA_CLIENT_IPSEC_ENCAP_ERR_CONS IPA_CLIENT_IPSEC_ENCAP_ERR_CONS
+
+#define IPA_CLIENT_IS_APPS_CONS(client) \
+	((client) == IPA_CLIENT_APPS_LAN_CONS || \
+	(client) == IPA_CLIENT_APPS_LAN_COAL_CONS || \
+	(client) == IPA_CLIENT_APPS_WAN_CONS || \
+	(client) == IPA_CLIENT_APPS_WAN_COAL_CONS || \
+	(client) == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS || \
+	(client) == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS)
+
+#define IPA_CLIENT_IS_APPS_PROD(client) \
+	((client) == IPA_CLIENT_APPS_LAN_PROD || \
+	(client) == IPA_CLIENT_APPS_WAN_PROD || \
+	(client) == IPA_CLIENT_APPS_WAN_LOW_LAT_PROD || \
+	(client) == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD)
+
+#define IPA_CLIENT_IS_USB_CONS(client) \
+	((client) == IPA_CLIENT_USB_CONS || \
+	(client) == IPA_CLIENT_USB2_CONS || \
+	(client) == IPA_CLIENT_USB3_CONS || \
+	(client) == IPA_CLIENT_USB_DPL_CONS || \
+	(client) == IPA_CLIENT_USB4_CONS)
+
+#define IPA_CLIENT_IS_WAN_CONS(client) \
+	((client) == IPA_CLIENT_APPS_WAN_CONS || \
+	 (client) == IPA_CLIENT_APPS_WAN_COAL_CONS)
+
+#define IPA_CLIENT_IS_LAN_CONS(client) \
+	((client) == IPA_CLIENT_APPS_LAN_CONS || \
+	 (client) == IPA_CLIENT_APPS_LAN_COAL_CONS)
+
+#define IPA_CLIENT_IS_LAN_or_WAN_CONS(client) \
+	((client) == IPA_CLIENT_APPS_LAN_CONS || \
+	 (client) == IPA_CLIENT_APPS_WAN_CONS)
+
+#define IPA_CLIENT_IS_APPS_COAL_CONS(client) \
+	((client) == IPA_CLIENT_APPS_LAN_COAL_CONS || \
+	 (client) == IPA_CLIENT_APPS_WAN_COAL_CONS)
+
+#define IPA_CLIENT_IS_LOW_LAT_CONS(client) \
+	((client) == IPA_CLIENT_APPS_WAN_LOW_LAT_CONS)
+
+#define IPA_CLIENT_IS_WLAN_CONS(client) \
+	((client) == IPA_CLIENT_WLAN1_CONS || \
+	(client) == IPA_CLIENT_WLAN2_CONS || \
+	(client) == IPA_CLIENT_WLAN3_CONS || \
+	(client) == IPA_CLIENT_WLAN2_CONS1 || \
+	(client) == IPA_CLIENT_WLAN4_CONS)
+
+#define IPA_CLIENT_IS_ODU_CONS(client) \
+	((client) == IPA_CLIENT_ODU_EMB_CONS || \
+	(client) == IPA_CLIENT_ODU_TETH_CONS)
+
+#define IPA_CLIENT_IS_Q6_CONS(client) \
+	((client) == IPA_CLIENT_Q6_LAN_CONS || \
+	(client) == IPA_CLIENT_Q6_WAN_CONS || \
+	(client) == IPA_CLIENT_Q6_DUN_CONS || \
+	(client) == IPA_CLIENT_Q6_DECOMP_CONS || \
+	(client) == IPA_CLIENT_Q6_DECOMP2_CONS || \
+	(client) == IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS || \
+	(client) == IPA_CLIENT_Q6_UL_NLO_DATA_CONS || \
+	(client) == IPA_CLIENT_Q6_UL_NLO_ACK_CONS || \
+	(client) == IPA_CLIENT_Q6_QBAP_STATUS_CONS || \
+	(client) == IPA_CLIENT_Q6_CV2X_CONS || \
+	(client) == IPA_CLIENT_Q6_AUDIO_DMA_MHI_CONS)
+
+#define IPA_CLIENT_IS_Q6_PROD(client) \
+	((client) == IPA_CLIENT_Q6_LAN_PROD || \
+	(client) == IPA_CLIENT_Q6_WAN_PROD || \
+	(client) == IPA_CLIENT_Q6_CMD_PROD || \
+	(client) == IPA_CLIENT_Q6_DECOMP_PROD || \
+	(client) == IPA_CLIENT_Q6_DECOMP2_PROD || \
+	(client) == IPA_CLIENT_Q6_DL_NLO_LL_DATA_PROD || \
+	(client) == IPA_CLIENT_Q6_DL_NLO_DATA_PROD || \
+	(client) == IPA_CLIENT_Q6_CV2X_PROD || \
+	(client) == IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD)
+
+#define IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client) \
+	((client) == IPA_CLIENT_Q6_LAN_CONS || \
+	(client) == IPA_CLIENT_Q6_WAN_CONS || \
+	(client) == IPA_CLIENT_Q6_DUN_CONS || \
+	(client) == IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS || \
+	(client) == IPA_CLIENT_Q6_UL_NLO_DATA_CONS || \
+	(client) == IPA_CLIENT_Q6_UL_NLO_ACK_CONS || \
+	(client) == IPA_CLIENT_Q6_QBAP_STATUS_CONS || \
+	(client) == IPA_CLIENT_Q6_CV2X_CONS || \
+	(client) == IPA_CLIENT_Q6_AUDIO_DMA_MHI_CONS)
+
+#define IPA_CLIENT_IS_Q6_ZIP_CONS(client) \
+	((client) == IPA_CLIENT_Q6_DECOMP_CONS || \
+	(client) == IPA_CLIENT_Q6_DECOMP2_CONS)
+
+#define IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client) \
+	((client) == IPA_CLIENT_Q6_LAN_PROD || \
+	(client) == IPA_CLIENT_Q6_WAN_PROD || \
+	(client) == IPA_CLIENT_Q6_CMD_PROD || \
+	(client) == IPA_CLIENT_Q6_DL_NLO_DATA_PROD || \
+	(client) == IPA_CLIENT_Q6_DL_NLO_LL_DATA_PROD || \
+	(client) == IPA_CLIENT_Q6_CV2X_PROD || \
+	(client) == IPA_CLIENT_Q6_AUDIO_DMA_MHI_PROD)
+
+#define IPA_CLIENT_IS_Q6_ZIP_PROD(client) \
+	((client) == IPA_CLIENT_Q6_DECOMP_PROD || \
+	(client) == IPA_CLIENT_Q6_DECOMP2_PROD)
+
+#define IPA_CLIENT_IS_MEMCPY_DMA_CONS(client) \
+	((client) == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS || \
+	(client) == IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS)
+
+#define IPA_CLIENT_IS_MEMCPY_DMA_PROD(client) \
+	((client) == IPA_CLIENT_MEMCPY_DMA_SYNC_PROD || \
+	(client) == IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD)
+
+#define IPA_CLIENT_IS_MHI(client) \
+	((client) == IPA_CLIENT_MHI_CONS || \
+	(client) == IPA_CLIENT_MHI_PROD || \
+	(client) == IPA_CLIENT_MHI2_PROD || \
+	(client) == IPA_CLIENT_MHI2_CONS || \
+	(client) == IPA_CLIENT_MHI_DPL_CONS || \
+	(client) == IPA_CLIENT_MHI_LOW_LAT_CONS || \
+	(client) == IPA_CLIENT_MHI_LOW_LAT_PROD || \
+	(client) == IPA_CLIENT_MHI_QDSS_CONS || \
+	(client) == IPA_CLIENT_MHI_COAL_CONS)
+
+#define IPA_CLIENT_IS_TEST_PROD(client) \
+	((client) == IPA_CLIENT_TEST_PROD || \
+	(client) == IPA_CLIENT_TEST1_PROD || \
+	(client) == IPA_CLIENT_TEST2_PROD || \
+	(client) == IPA_CLIENT_TEST3_PROD || \
+	(client) == IPA_CLIENT_TEST4_PROD)
+
+#define IPA_CLIENT_IS_TEST_CONS(client) \
+	((client) == IPA_CLIENT_TEST_CONS || \
+	(client) == IPA_CLIENT_TEST1_CONS || \
+	(client) == IPA_CLIENT_TEST2_CONS || \
+	(client) == IPA_CLIENT_TEST3_CONS || \
+	(client) == IPA_CLIENT_TEST4_CONS)
+
+#define IPA_CLIENT_IS_TEST(client) \
+	(IPA_CLIENT_IS_TEST_PROD(client) || IPA_CLIENT_IS_TEST_CONS(client))
+
+/**
+ * The following is used to describe the types of memory NAT can
+ * reside in.
+ *
+ * PLEASE KEEP THE FOLLOWING IN SYNC WITH ipa3_nat_mem_in_as_str()
+ * BELOW.
+ */
+enum ipa3_nat_mem_in {
+	IPA_NAT_MEM_IN_DDR  = 0,
+	IPA_NAT_MEM_IN_SRAM = 1,
+
+	IPA_NAT_MEM_IN_MAX
+};
+
+#define IPA_VALID_NAT_MEM_IN(t) \
+	((t) >= IPA_NAT_MEM_IN_DDR && (t) < IPA_NAT_MEM_IN_MAX)
+
+/**
+ * enum ipa_ip_type - Address family: IPv4 or IPv6
+ *
+ * PLEASE KEEP THE FOLLOWING IN SYNC WITH ipa_ip_type_as_str()
+ * BELOW.
+ */
+enum ipa_ip_type {
+	IPA_IP_v4,
+	IPA_IP_v6,
+	IPA_IP_MAX
+};
+
+#define VALID_IPA_IP_TYPE(t) \
+	((t) >= IPA_IP_v4 && (t) < IPA_IP_MAX)
+
+/**
+ * enum ipa_rule_type - Type of routing or filtering rule
+ * Hashable: Rule will be located at the hashable tables
+ * Non_Hashable: Rule will be located at the non-hashable tables
+ */
+enum ipa_rule_type {
+	IPA_RULE_HASHABLE,
+	IPA_RULE_NON_HASHABLE,
+};
+#define IPA_RULE_TYPE_MAX (IPA_RULE_NON_HASHABLE + 1)
+
+/**
+ * enum ipa_flt_action - action field of filtering rule
+ *
+ * Pass to routing: 5'd0
+ * Pass to source NAT: 5'd1
+ * Pass to destination NAT: 5'd2
+ * Pass to default output pipe (e.g., Apps or Modem): 5'd3
+ */
+enum ipa_flt_action {
+	IPA_PASS_TO_ROUTING,
+	IPA_PASS_TO_SRC_NAT,
+	IPA_PASS_TO_DST_NAT,
+	IPA_PASS_TO_EXCEPTION
+};
+
+/**
+ * enum ipa_wlan_event - Events for wlan client
+ *
+ * wlan client connect: New wlan client connected
+ * wlan client disconnect: wlan client disconnected
+ * wlan client power save: wlan client moved to power save
+ * wlan client normal: wlan client moved out of power save
+ * sw routing enable: ipa routing is disabled
+ * sw routing disable: ipa routing is enabled
+ * wlan ap connect: wlan AP(access point) is up
+ * wlan ap disconnect: wlan AP(access point) is down
+ * wlan sta connect: wlan STA(station) is up
+ * wlan sta disconnect: wlan STA(station) is down
+ * wlan client connect ex: new wlan client connected
+ * wlan scc switch: wlan interfaces in scc mode
+ * wlan mcc switch: wlan interfaces in mcc mode
+ * wlan wdi enable: wdi data path completed
+ * wlan wdi disable: wdi data path teardown
+ */
+enum ipa_wlan_event {
+	WLAN_CLIENT_CONNECT,
+	WLAN_CLIENT_DISCONNECT,
+	WLAN_CLIENT_POWER_SAVE_MODE,
+	WLAN_CLIENT_NORMAL_MODE,
+	SW_ROUTING_ENABLE,
+	SW_ROUTING_DISABLE,
+	WLAN_AP_CONNECT,
+	WLAN_AP_DISCONNECT,
+	WLAN_STA_CONNECT,
+	WLAN_STA_DISCONNECT,
+	WLAN_CLIENT_CONNECT_EX,
+	WLAN_SWITCH_TO_SCC,
+	WLAN_SWITCH_TO_MCC,
+	WLAN_WDI_ENABLE,
+	WLAN_WDI_DISABLE,
+	IPA_WLAN_EVENT_MAX
+};
+
+/**
+ * enum ipa_wan_event - Events for wan client
+ *
+ * wan default route add/del
+ * wan embms connect: New wan embms interface connected
+ */
+enum ipa_wan_event {
+	WAN_UPSTREAM_ROUTE_ADD = IPA_WLAN_EVENT_MAX,
+	WAN_UPSTREAM_ROUTE_DEL,
+	WAN_EMBMS_CONNECT,
+	WAN_XLAT_CONNECT,
+	IPA_WAN_EVENT_MAX
+};
+
+enum ipa_ecm_event {
+	ECM_CONNECT = IPA_WAN_EVENT_MAX,
+	ECM_DISCONNECT,
+	IPA_ECM_EVENT_MAX,
+};
+
+enum ipa_tethering_stats_event {
+	IPA_TETHERING_STATS_UPDATE_STATS = IPA_ECM_EVENT_MAX,
+	IPA_TETHERING_STATS_UPDATE_NETWORK_STATS,
+	IPA_TETHERING_STATS_EVENT_MAX,
+};
+
+
+enum ipa_quota_event {
+	IPA_QUOTA_REACH = IPA_TETHERING_STATS_EVENT_MAX,
+	IPA_QUOTA_EVENT_MAX,
+};
+
+enum ipa_ssr_event {
+	IPA_SSR_BEFORE_SHUTDOWN = IPA_QUOTA_EVENT_MAX,
+	IPA_SSR_AFTER_POWERUP,
+	IPA_SSR_EVENT_MAX,
+};
+
+enum ipa_vlan_l2tp_event {
+	ADD_VLAN_IFACE = IPA_SSR_EVENT_MAX,
+	DEL_VLAN_IFACE,
+	ADD_L2TP_VLAN_MAPPING,
+	DEL_L2TP_VLAN_MAPPING,
+	IPA_VLAN_L2TP_EVENT_MAX,
+};
+
+enum ipa_per_client_stats_event {
+	IPA_PER_CLIENT_STATS_CONNECT_EVENT = IPA_VLAN_L2TP_EVENT_MAX,
+	IPA_PER_CLIENT_STATS_DISCONNECT_EVENT,
+	IPA_PER_CLIENT_STATS_EVENT_MAX,
+};
+
+enum ipa_vlan_bridge_event {
+	ADD_BRIDGE_VLAN_MAPPING = IPA_PER_CLIENT_STATS_EVENT_MAX,
+	DEL_BRIDGE_VLAN_MAPPING,
+	BRIDGE_VLAN_MAPPING_MAX,
+};
+
+enum ipa_wlan_fw_ssr_event {
+	WLAN_FWR_SSR_BEFORE_SHUTDOWN = BRIDGE_VLAN_MAPPING_MAX,
+	IPA_WLAN_FW_SSR_EVENT_MAX,
+#define IPA_WLAN_FW_SSR_EVENT_MAX IPA_WLAN_FW_SSR_EVENT_MAX
+};
+
+enum ipa_gsb_event {
+	IPA_GSB_CONNECT = IPA_WLAN_FW_SSR_EVENT_MAX,
+	IPA_GSB_DISCONNECT,
+	IPA_GSB_EVENT_MAX,
+};
+
+enum ipa_coalesce_event {
+	IPA_COALESCE_ENABLE = IPA_GSB_EVENT_MAX,
+	IPA_COALESCE_DISABLE,
+	IPA_COALESCE_EVENT_MAX
+#define IPA_COALESCE_EVENT_MAX IPA_COALESCE_EVENT_MAX
+};
+
+enum ipa_mtu_event {
+	IPA_SET_MTU = IPA_COALESCE_EVENT_MAX,
+	IPA_MTU_EVENT_MAX
+#define IPA_MTU_EVENT_MAX IPA_MTU_EVENT_MAX
+};
+
+enum ipa_peripheral_event {
+	IPA_PERIPHERAL_CONNECT = ECM_CONNECT,
+	IPA_PERIPHERAL_DISCONNECT = ECM_DISCONNECT
+};
+
+#define WIGIG_CLIENT_CONNECT (IPA_MTU_EVENT_MAX)
+#define WIGIG_FST_SWITCH (WIGIG_CLIENT_CONNECT + 1)
+#define WIGIG_EVENT_MAX (WIGIG_FST_SWITCH + 1)
+
+enum ipa_pdn_config_event {
+	IPA_PDN_DEFAULT_MODE_CONFIG = WIGIG_EVENT_MAX, /* Default mode. */
+	IPA_PDN_IP_COLLISION_MODE_CONFIG, /* IP Collision detected. */
+	IPA_PDN_IP_PASSTHROUGH_MODE_CONFIG, /* IP Passthrough mode. */
+	IPA_PDN_CONFIG_EVENT_MAX
+#define IPA_PDN_CONFIG_EVENT_MAX IPA_PDN_CONFIG_EVENT_MAX
+};
+
+enum ipa_mac_flt_event {
+	IPA_MAC_FLT_EVENT = IPA_PDN_CONFIG_EVENT_MAX,
+	IPA_MAC_FLT_EVENT_MAX
+#define IPA_MAC_FLT_EVENT_MAX IPA_MAC_FLT_EVENT_MAX
+};
+
+enum ipa_sockv5_event {
+	IPA_SOCKV5_ADD = IPA_MAC_FLT_EVENT_MAX,
+	IPA_SOCKV5_DEL,
+	IPA_SOCKV5_EVENT_MAX
+#define IPA_SOCKV5_EVENT_MAX IPA_SOCKV5_EVENT_MAX
+};
+
+enum ipa_warning_limit_event {
+	IPA_WARNING_LIMIT_REACHED = IPA_SOCKV5_EVENT_MAX,
+	IPA_WARNING_LIMIT_EVENT_MAX,
+#define IPA_WARNING_LIMIT_EVENT_MAX IPA_WARNING_LIMIT_EVENT_MAX
+};
+
+enum ipa_sw_flt_event {
+	IPA_SW_FLT_EVENT = IPA_WARNING_LIMIT_EVENT_MAX,
+	IPA_SW_FLT_EVENT_MAX
+#define IPA_SW_FLT_EVENT_MAX IPA_SW_FLT_EVENT_MAX
+};
+
+enum ipa_pkt_threshold_event {
+	IPA_PKT_THRESHOLD_EVENT = IPA_SW_FLT_EVENT_MAX,
+	IPA_PKT_THRESHOLD_EVENT_MAX
+#define IPA_PKT_THRESHOLD_EVENT_MAX IPA_PKT_THRESHOLD_EVENT_MAX
+};
+
+
+enum ipa_move_nat_table_event {
+	IPA_MOVE_NAT_TABLE = IPA_PKT_THRESHOLD_EVENT_MAX,
+	IPA_MOVE_NAT_EVENT_MAX
+#define IPA_MOVE_NAT_EVENT_MAX IPA_MOVE_NAT_EVENT_MAX
+};
+
+enum ipa_eogre_event {
+	IPA_EoGRE_UP_EVENT = IPA_MOVE_NAT_EVENT_MAX,
+	IPA_EoGRE_DOWN_EVENT,
+	IPA_EoGRE_EVENT_MAX
+#define IPA_EoGRE_EVENT_MAX IPA_EoGRE_EVENT_MAX
+};
+
+enum ipa_ippt_sw_flt_event {
+	IPA_IPPT_SW_FLT_EVENT = IPA_EoGRE_EVENT_MAX,
+	IPA_IPPT_SW_FLT_EVENT_MAX
+#define IPA_IPPT_SW_FLT_EVENT_MAX IPA_IPPT_SW_FLT_EVENT_MAX
+};
+
+enum ipa_macsec_event {
+	IPA_MACSEC_ADD_EVENT = IPA_IPPT_SW_FLT_EVENT_MAX,
+	IPA_MACSEC_DEL_EVENT,
+	IPA_MACSEC_EVENT_MAX
+#define IPA_MACSEC_EVENT_MAX IPA_MACSEC_EVENT_MAX
+};
+
+enum ipa_ext_route_evt {
+	IPA_SET_EXT_ROUTER_MODE_EVENT = IPA_MACSEC_EVENT_MAX,
+	IPA_SET_EXT_ROUTER_MODE_EVENT_MAX
+#define IPA_SET_EXT_ROUTER_MODE_EVENT_MAX IPA_SET_EXT_ROUTER_MODE_EVENT_MAX
+};
+
+#define IPA_EVENT_MAX_NUM (IPA_SET_EXT_ROUTER_MODE_EVENT_MAX)
+#define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM)
+
+/**
+ * enum ipa_rm_resource_name - IPA RM clients identification names
+ *
+ * PROD resources are always even, and CONS resources are always odd.
+ * Add new clients in the end of the list and update IPA_RM_RESOURCE_MAX
+ */
+enum ipa_rm_resource_name {
+	IPA_RM_RESOURCE_Q6_PROD				= 0,
+	IPA_RM_RESOURCE_Q6_CONS				= 1,
+
+	IPA_RM_RESOURCE_USB_PROD			= 2,
+	IPA_RM_RESOURCE_USB_CONS			= 3,
+
+	IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD		= 4,
+	IPA_RM_RESOURCE_USB_DPL_CONS			= 5,
+
+	IPA_RM_RESOURCE_HSIC_PROD			= 6,
+	IPA_RM_RESOURCE_HSIC_CONS			= 7,
+
+	IPA_RM_RESOURCE_STD_ECM_PROD			= 8,
+	IPA_RM_RESOURCE_APPS_CONS			= 9,
+
+	IPA_RM_RESOURCE_RNDIS_PROD			= 10,
+	/* RESERVED CONS				= 11, */
+
+	IPA_RM_RESOURCE_WWAN_0_PROD			= 12,
+	/* RESERVED CONS				= 13, */
+
+	IPA_RM_RESOURCE_WLAN_PROD			= 14,
+	IPA_RM_RESOURCE_WLAN_CONS			= 15,
+
+	IPA_RM_RESOURCE_ODU_ADAPT_PROD			= 16,
+	IPA_RM_RESOURCE_ODU_ADAPT_CONS			= 17,
+
+	IPA_RM_RESOURCE_MHI_PROD			= 18,
+	IPA_RM_RESOURCE_MHI_CONS			= 19,
+
+	IPA_RM_RESOURCE_ETHERNET_PROD			= 20,
+	IPA_RM_RESOURCE_ETHERNET_CONS			= 21,
+};
+#define IPA_RM_RESOURCE_MAX (IPA_RM_RESOURCE_ETHERNET_CONS + 1)
+
+/**
+ * enum ipa_hw_type - IPA hardware version type
+ * @IPA_HW_None: IPA hardware version not defined
+ * @IPA_HW_v1_0: IPA hardware version 1.0
+ * @IPA_HW_v1_1: IPA hardware version 1.1
+ * @IPA_HW_v2_0: IPA hardware version 2.0
+ * @IPA_HW_v2_1: IPA hardware version 2.1
+ * @IPA_HW_v2_5: IPA hardware version 2.5
+ * @IPA_HW_v2_6: IPA hardware version 2.6
+ * @IPA_HW_v2_6L: IPA hardware version 2.6L
+ * @IPA_HW_v3_0: IPA hardware version 3.0
+ * @IPA_HW_v3_1: IPA hardware version 3.1
+ * @IPA_HW_v3_5: IPA hardware version 3.5
+ * @IPA_HW_v3_5_1: IPA hardware version 3.5.1
+ * @IPA_HW_v4_0: IPA hardware version 4.0
+ * @IPA_HW_v4_1: IPA hardware version 4.1
+ * @IPA_HW_v4_2: IPA hardware version 4.2
+ * @IPA_HW_v4_5: IPA hardware version 4.5
+ * @IPA_HW_v4_7: IPA hardware version 4.7
+ * @IPA_HW_v4_9: IPA hardware version 4.9
+ * @IPA_HW_v4_11: IPA hardware version 4.11
+ * @IPA_HW_v5_0: IPA hardware version 5.0
+ * @IPA_HW_v5_1: IPA hardware version 5.1
+ * @IPA_HW_v5_2: IPA hardware version 5.2
+ * @IPA_HW_v5_5: IPA hardware version 5.5
+ * @IPA_HW_v6_0: IPA hardware version 6.0
+ */
+enum ipa_hw_type {
+	IPA_HW_None = 0,
+	IPA_HW_v1_0 = 1,
+	IPA_HW_v1_1 = 2,
+	IPA_HW_v2_0 = 3,
+	IPA_HW_v2_1 = 4,
+	IPA_HW_v2_5 = 5,
+	IPA_HW_v2_6 = IPA_HW_v2_5,
+	IPA_HW_v2_6L = 6,
+	IPA_HW_v3_0 = 10,
+	IPA_HW_v3_1 = 11,
+	IPA_HW_v3_5 = 12,
+	IPA_HW_v3_5_1 = 13,
+	IPA_HW_v4_0 = 14,
+	IPA_HW_v4_1 = 15,
+	IPA_HW_v4_2 = 16,
+	IPA_HW_v4_5 = 17,
+	IPA_HW_v4_7 = 18,
+	IPA_HW_v4_9 = 19,
+	IPA_HW_v4_11 = 20,
+	IPA_HW_v5_0 = 21,
+	IPA_HW_v5_1 = 22,
+	IPA_HW_v5_2 = 23,
+	IPA_HW_v5_5 = 24,
+	IPA_HW_v6_0 = 25,
+};
+
+#define IPA_HW_MAX (IPA_HW_v6_0 + 1)
+
+#define IPA_HW_v4_0 IPA_HW_v4_0
+#define IPA_HW_v4_1 IPA_HW_v4_1
+#define IPA_HW_v4_2 IPA_HW_v4_2
+#define IPA_HW_v4_5 IPA_HW_v4_5
+#define IPA_HW_v4_7 IPA_HW_v4_7
+#define IPA_HW_v4_9 IPA_HW_v4_9
+#define IPA_HW_v4_11 IPA_HW_v4_11
+#define IPA_HW_v5_0 IPA_HW_v5_0
+#define IPA_HW_v5_1 IPA_HW_v5_1
+#define IPA_HW_v5_2 IPA_HW_v5_2
+#define IPA_HW_v5_5 IPA_HW_v5_5
+#define IPA_HW_v6_0 IPA_HW_v6_0
+
+/**
+ * struct ipa_rule_attrib - attributes of a routing/filtering
+ * rule, all in LE
+ * @attrib_mask: what attributes are valid
+ * @src_port_lo: low port of src port range
+ * @src_port_hi: high port of src port range
+ * @dst_port_lo: low port of dst port range
+ * @dst_port_hi: high port of dst port range
+ * @type: ICMP/IGMP type
+ * @code: ICMP/IGMP code
+ * @spi: IPSec SPI
+ * @src_port: exact src port
+ * @dst_port: exact dst port
+ * @meta_data: metadata val
+ * @meta_data_mask: metadata mask
+ * @u.v4.tos: type of service
+ * @u.v4.protocol: protocol
+ * @u.v4.src_addr: src address value
+ * @u.v4.src_addr_mask: src address mask
+ * @u.v4.dst_addr: dst address value
+ * @u.v4.dst_addr_mask: dst address mask
+ * @u.v6.tc: traffic class
+ * @u.v6.flow_label: flow label
+ * @u.v6.next_hdr: next header
+ * @u.v6.src_addr: src address val
+ * @u.v6.src_addr_mask: src address mask
+ * @u.v6.dst_addr: dst address val
+ * @u.v6.dst_addr_mask: dst address mask
+ * @vlan_id: vlan id value
+ * @payload_length: Payload length.
+ * @ext_attrib_mask: Extended attributes.
+ * @l2tp_udp_next_hdr: next header in L2TP tunneling
+ * @frag_encoding: is-frag equation
+ */
+struct ipa_rule_attrib {
+	uint32_t attrib_mask;
+	uint16_t src_port_lo;
+	uint16_t src_port_hi;
+	uint16_t dst_port_lo;
+	uint16_t dst_port_hi;
+	uint8_t type;
+	uint8_t code;
+	uint8_t tos_value;
+	uint8_t tos_mask;
+	uint32_t spi;
+	uint16_t src_port;
+	uint16_t dst_port;
+	uint32_t meta_data;
+	uint32_t meta_data_mask;
+	uint8_t src_mac_addr[ETH_ALEN];
+	uint8_t src_mac_addr_mask[ETH_ALEN];
+	uint8_t dst_mac_addr[ETH_ALEN];
+	uint8_t dst_mac_addr_mask[ETH_ALEN];
+	uint16_t ether_type;
+	union {
+		struct {
+			uint8_t tos;
+			uint8_t protocol;
+			uint32_t src_addr;
+			uint32_t src_addr_mask;
+			uint32_t dst_addr;
+			uint32_t dst_addr_mask;
+		} v4;
+		struct {
+			uint8_t tc;
+			uint32_t flow_label;
+			uint8_t next_hdr;
+			uint32_t src_addr[4];
+			uint32_t src_addr_mask[4];
+			uint32_t dst_addr[4];
+			uint32_t dst_addr_mask[4];
+		} v6;
+	} u;
+	__u16 vlan_id;
+	__u16 payload_length;
+	__u32 ext_attrib_mask;
+	__u8 l2tp_udp_next_hdr;
+	__u8 is_frag_encoding;
+	__u32 padding2;
+};
+
+
+/*! @brief The maximum number of Mask Equal 32 Eqns */
+#define IPA_IPFLTR_NUM_MEQ_32_EQNS 2
+
+/*! @brief The maximum number of IHL offset Mask Equal 32 Eqns */
+#define IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS 2
+
+/*! @brief The maximum number of Mask Equal 128 Eqns */
+#define IPA_IPFLTR_NUM_MEQ_128_EQNS 2
+
+/*! @brief The maximum number of IHL offset Range Check 16 Eqns */
+#define IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS 2
+
+/*! @brief Offset and 16 bit comparison equation */
+struct ipa_ipfltr_eq_16 {
+	int8_t offset;
+	uint16_t value;
+};
+
+/*! @brief Offset and 32 bit comparison equation */
+struct ipa_ipfltr_eq_32 {
+	int8_t offset;
+	uint32_t value;
+};
+
+/*! @brief Offset and 128 bit masked comparison equation */
+struct ipa_ipfltr_mask_eq_128 {
+	int8_t offset;
+	uint8_t mask[16];
+	uint8_t value[16];
+};
+
+/*! @brief Offset and 32 bit masked comparison equation */
+struct ipa_ipfltr_mask_eq_32 {
+	int8_t offset;
+	uint32_t mask;
+	uint32_t value;
+};
+
+/*! @brief Equation for identifying a range. Ranges are inclusive */
+struct ipa_ipfltr_range_eq_16 {
+	int8_t offset;
+	uint16_t range_low;
+	uint16_t range_high;
+};
+
+/*! @brief Rule equations which are set according to DS filter installation */
+struct ipa_ipfltri_rule_eq {
+	/*! 16-bit Bitmask to indicate how many eqs are valid in this rule  */
+	uint16_t rule_eq_bitmap;
+
+	/*
+	 * tos_eq_present field has two meanings:
+	 * IPA ver < 4.5:
+	 *  specifies if a type of service check rule is present
+	 *  (as the field name reveals).
+	 * IPA ver >= 4.5:
+	 *  specifies if a tcp pure ack check rule is present
+	 */
+	uint8_t tos_eq_present;
+	/*! The value to check against the type of service (ipv4) field */
+	uint8_t tos_eq;
+	/*! Specifies if a protocol check rule is present */
+	uint8_t protocol_eq_present;
+	/*! The value to check against the protocol (ipv6) field */
+	uint8_t protocol_eq;
+	/*! The number of ip header length offset 16 bit range check
+	 * rules in this rule
+	 */
+	uint8_t num_ihl_offset_range_16;
+	/*! An array of the registered ip header length offset 16 bit
+	 * range check rules
+	 */
+	struct ipa_ipfltr_range_eq_16
+		ihl_offset_range_16[IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS];
+	/*! The number of mask equal 32 rules present in this rule */
+	uint8_t num_offset_meq_32;
+	/*! An array of all the possible mask equal 32 rules in this rule */
+	struct ipa_ipfltr_mask_eq_32
+		offset_meq_32[IPA_IPFLTR_NUM_MEQ_32_EQNS];
+	/*! Specifies if the traffic class rule is present in this rule */
+	uint8_t tc_eq_present;
+	/*! The value to check the traffic class (ipv4) field against */
+	uint8_t tc_eq;
+	/*! Specifies if the flow equals rule is present in this rule */
+	uint8_t fl_eq_present;
+	/*! The value to check the flow (ipv6) field against */
+	uint32_t fl_eq;
+	/*! The number of ip header length offset 16 bit equations in this
+	 * rule
+	 */
+	uint8_t ihl_offset_eq_16_present;
+	/*! The ip header length offset 16 bit equation */
+	struct ipa_ipfltr_eq_16 ihl_offset_eq_16;
+	/*! The number of ip header length offset 32 bit equations in this
+	 * rule
+	 */
+	uint8_t ihl_offset_eq_32_present;
+	/*! The ip header length offset 32 bit equation */
+	struct ipa_ipfltr_eq_32 ihl_offset_eq_32;
+	/*! The number of ip header length offset 32 bit mask equations in
+	 * this rule
+	 */
+	uint8_t num_ihl_offset_meq_32;
+	/*! The ip header length offset 32 bit mask equation */
+	struct ipa_ipfltr_mask_eq_32
+		ihl_offset_meq_32[IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS];
+	/*! The number of ip header length offset 128 bit equations in this
+	 * rule
+	 */
+	uint8_t num_offset_meq_128;
+	/*! The ip header length offset 128 bit equation */
+	struct ipa_ipfltr_mask_eq_128
+		offset_meq_128[IPA_IPFLTR_NUM_MEQ_128_EQNS];
+	/*! The metadata 32 bit masked comparison equation present or not */
+	/* Metadata based rules are added internally by IPA driver */
+	uint8_t metadata_meq32_present;
+	/*! The metadata 32 bit masked comparison equation */
+	struct ipa_ipfltr_mask_eq_32 metadata_meq32;
+	/*! Specifies if the Fragment equation is present in this rule */
+	uint8_t ipv4_frag_eq_present;
+	/*! The IS-FRAG equation enhancement change since IPA6.0
+	 * values: IS-FRAG-0, Is-Primary-1, Is-Secondary-2, Not-Frag-3
+	 */
+	uint8_t is_frag_encoding;
+};
+
+/**
+ * struct ipa_flt_rule - attributes of a filtering rule
+ * @retain_hdr: bool switch to instruct IPA core to add back to the packet
+ *  the header removed as part of header removal
+ * @to_uc: bool switch to pass packet to micro-controller
+ * @action: action field
+ * @rt_tbl_hdl: handle of table from "get"
+ * @attrib: attributes of the rule
+ * @eq_attrib: attributes of the rule in equation form (valid when
+ * eq_attrib_type is true)
+ * @rt_tbl_idx: index of RT table referred to by filter rule (valid when
+ * eq_attrib_type is true and non-exception action)
+ * @eq_attrib_type: true if equation level form used to specify attributes
+ * @max_prio: bool switch. is this rule with Max priority? meaning on rule hit,
+ *  IPA will use the rule and will not look for other rules that may have
+ *  higher priority
+ * @hashable: bool switch. is this rule hashable or not?
+ *  ipa uses hashable rules to cache their hit results to be used in
+ *  consecutive packets
+ * @rule_id: rule_id to be assigned to the filter rule. In case client specifies
+ *  rule_id as 0 the driver will assign a new rule_id
+ * @set_metadata: bool switch. should metadata replacement at the NAT block
+ *  take place?
+ * @pdn_idx: if action is "pass to source\destination NAT" then a comparison
+ * against the PDN index in the matching PDN entry will take place as an
+ * additional condition for NAT hit.
+ */
+struct ipa_flt_rule {
+	uint8_t retain_hdr;
+	uint8_t to_uc;
+	enum ipa_flt_action action;
+	uint32_t rt_tbl_hdl;
+	struct ipa_rule_attrib attrib;
+	struct ipa_ipfltri_rule_eq eq_attrib;
+	uint32_t rt_tbl_idx;
+	uint8_t eq_attrib_type;
+	uint8_t max_prio;
+	uint8_t hashable;
+	uint16_t rule_id;
+	uint8_t set_metadata;
+	uint8_t pdn_idx;
+};
+
+#define IPA_FLTRT_TTL_UPDATE
+
+/**
+ * struct ipa_flt_rule_v2 - attributes of a filtering rule
+ * @retain_hdr: bool switch to instruct IPA core to add back to the packet
+ *  the header removed as part of header removal
+ * @to_uc: bool switch to pass packet to micro-controller
+ * @action: action field
+ * @rt_tbl_hdl: handle of table from "get"
+ * @attrib: attributes of the rule
+ * @eq_attrib: attributes of the rule in equation form (valid when
+ * eq_attrib_type is true)
+ * @rt_tbl_idx: index of RT table referred to by filter rule (valid when
+ * eq_attrib_type is true and non-exception action)
+ * @eq_attrib_type: true if equation level form used to specify attributes
+ * @max_prio: bool switch. is this rule with Max priority? meaning on rule hit,
+ *  IPA will use the rule and will not look for other rules that may have
+ *  higher priority
+ * @hashable: bool switch. is this rule hashable or not?
+ *  ipa uses hashable rules to cache their hit results to be used in
+ *  consecutive packets
+ * @rule_id: rule_id to be assigned to the filter rule. In case client specifies
+ *  rule_id as 0 the driver will assign a new rule_id
+ * @set_metadata: bool switch. should metadata replacement at the NAT block
+ *  take place?
+ * @pdn_idx: if action is "pass to source\destination NAT" then a comparison
+ * against the PDN index in the matching PDN entry will take place as an
+ * additional condition for NAT hit.
+ * @enable_stats: is true when we want to enable stats for this
+ * flt rule.
+ * @cnt_idx: if 0 means disable, otherwise use for index.
+ * will be assigned by ipa driver.
+ * @close_aggr_irq_mod: close aggregation/coalescing and close GSI
+ * interrupt moderation
+ * @ttl_update: bool to indicate whether TTL update is needed or not.
+ * @qos_class: QOS classification value.
+ */
+struct ipa_flt_rule_v2 {
+	uint8_t retain_hdr;
+	uint8_t to_uc;
+	enum ipa_flt_action action;
+	uint32_t rt_tbl_hdl;
+	struct ipa_rule_attrib attrib;
+	struct ipa_ipfltri_rule_eq eq_attrib;
+	uint32_t rt_tbl_idx;
+	uint8_t eq_attrib_type;
+	uint8_t max_prio;
+	uint8_t hashable;
+	uint16_t rule_id;
+	uint8_t set_metadata;
+	uint8_t pdn_idx;
+	uint8_t enable_stats;
+	uint8_t cnt_idx;
+	uint8_t close_aggr_irq_mod;
+	uint8_t ttl_update;
+	uint8_t qos_class;
+};
+
+/**
+ * enum ipa_hdr_l2_type - L2 header type
+ * IPA_HDR_L2_NONE: L2 header which isn't Ethernet II and isn't 802_3
+ * IPA_HDR_L2_ETHERNET_II: L2 header of type Ethernet II
+ * IPA_HDR_L2_802_3: L2 header of type 802_3
+ * IPA_HDR_L2_802_1Q: L2 header of type 802_1Q
+ */
+enum ipa_hdr_l2_type {
+	IPA_HDR_L2_NONE,
+	IPA_HDR_L2_ETHERNET_II,
+	IPA_HDR_L2_802_3,
+	IPA_HDR_L2_802_1Q,
+};
+#define IPA_HDR_L2_MAX (IPA_HDR_L2_802_1Q + 1)
+
+#define IPA_HDR_L2_802_1Q IPA_HDR_L2_802_1Q
+
+/**
+ * enum ipa_hdr_l2_type - Processing context type
+ *
+ * IPA_HDR_PROC_NONE:                   No processing context
+ * IPA_HDR_PROC_ETHII_TO_ETHII:         Process Ethernet II to Ethernet II
+ * IPA_HDR_PROC_ETHII_TO_802_3:         Process Ethernet II to 802_3
+ * IPA_HDR_PROC_802_3_TO_ETHII:         Process 802_3 to Ethernet II
+ * IPA_HDR_PROC_802_3_TO_802_3:         Process 802_3 to 802_3
+ * IPA_HDR_PROC_L2TP_HEADER_ADD:
+ * IPA_HDR_PROC_L2TP_HEADER_REMOVE:
+ * IPA_HDR_PROC_ETHII_TO_ETHII_EX:      Process Ethernet II to Ethernet II with
+ *                                      generic lengths of src and dst headers
+ * IPA_HDR_PROC_L2TP_UDP_HEADER_ADD:    Process WLAN To Ethernet packets to
+ *                                      add L2TP UDP header.
+ * IPA_HDR_PROC_L2TP_UDP_HEADER_REMOVE: Process Ethernet To WLAN packets to
+ *                                      remove L2TP UDP header.
+ * IPA_HDR_PROC_SET_DSCP:
+ * IPA_HDR_PROC_EoGRE_HEADER_ADD:       Add IPV[46] GRE header
+ * IPA_HDR_PROC_EoGRE_HEADER_REMOVE:    Remove IPV[46] GRE header
+ */
+enum ipa_hdr_proc_type {
+	IPA_HDR_PROC_NONE,
+	IPA_HDR_PROC_ETHII_TO_ETHII,
+	IPA_HDR_PROC_ETHII_TO_802_3,
+	IPA_HDR_PROC_802_3_TO_ETHII,
+	IPA_HDR_PROC_802_3_TO_802_3,
+	IPA_HDR_PROC_L2TP_HEADER_ADD,
+	IPA_HDR_PROC_L2TP_HEADER_REMOVE,
+	IPA_HDR_PROC_ETHII_TO_ETHII_EX,
+	IPA_HDR_PROC_L2TP_UDP_HEADER_ADD,
+	IPA_HDR_PROC_L2TP_UDP_HEADER_REMOVE,
+	IPA_HDR_PROC_SET_DSCP,
+	IPA_HDR_PROC_EoGRE_HEADER_ADD,
+	IPA_HDR_PROC_EoGRE_HEADER_REMOVE,
+};
+#define IPA_HDR_PROC_MAX (IPA_HDR_PROC_EoGRE_HEADER_REMOVE + 1)
+
+/**
+ * struct ipa_rt_rule - attributes of a routing rule
+ * @dst: dst "client"
+ * @hdr_hdl: handle to the dynamic header
+	it is not an index or an offset
+ * @hdr_proc_ctx_hdl: handle to header processing context. if it is provided
+	hdr_hdl shall be 0
+ * @attrib: attributes of the rule
+ * @max_prio: bool switch. is this rule with Max priority? meaning on rule hit,
+ *  IPA will use the rule and will not look for other rules that may have
+ *  higher priority
+ * @hashable: bool switch. is this rule hashable or not?
+ *  ipa uses hashable rules to cache their hit results to be used in
+ *  consecutive packets
+ * @retain_hdr: bool switch to instruct IPA core to add back to the packet
+ *  the header removed as part of header removal
+ * @coalesce: bool to decide whether packets should be coalesced or not
+ */
+struct ipa_rt_rule {
+	enum ipa_client_type dst;
+	uint32_t hdr_hdl;
+	uint32_t hdr_proc_ctx_hdl;
+	struct ipa_rule_attrib attrib;
+	uint8_t max_prio;
+	uint8_t hashable;
+	uint8_t retain_hdr;
+	uint8_t coalesce;
+};
+#define IPA_RT_SUPPORT_COAL
+
+/**
+ * struct ipa_rt_rule_v2 - attributes of a routing rule
+ * @dst: dst "client"
+ * @hdr_hdl: handle to the dynamic header
+	it is not an index or an offset
+ * @hdr_proc_ctx_hdl: handle to header processing context. if it is provided
+	hdr_hdl shall be 0
+ * @attrib: attributes of the rule
+ * @max_prio: bool switch. is this rule with Max priority? meaning on rule hit,
+ *  IPA will use the rule and will not look for other rules that may have
+ *  higher priority
+ * @hashable: bool switch. is this rule hashable or not?
+ *  ipa uses hashable rules to cache their hit results to be used in
+ *  consecutive packets
+ * @retain_hdr: bool switch to instruct IPA core to add back to the packet
+ *  the header removed as part of header removal
+ * @coalesce: bool to decide whether packets should be coalesced or not
+ * @enable_stats: is true when we want to enable stats for this
+ * rt rule.
+ * @cnt_idx: if enable_stats is 1 and cnt_idx is 0, then cnt_idx
+ * will be assigned by ipa driver.
+ * @close_aggr_irq_mod: close aggregation/coalescing and close GSI
+ * interrupt moderation
+ * @ttl_update: bool to indicate whether TTL update is needed or not.
+ * @qos_class: QOS classification value.
+ * @skip_ingress: bool to skip ingress policing.
+ */
+struct ipa_rt_rule_v2 {
+	enum ipa_client_type dst;
+	uint32_t hdr_hdl;
+	uint32_t hdr_proc_ctx_hdl;
+	struct ipa_rule_attrib attrib;
+	uint8_t max_prio;
+	uint8_t hashable;
+	uint8_t retain_hdr;
+	uint8_t coalesce;
+	uint8_t enable_stats;
+	uint8_t cnt_idx;
+	uint8_t close_aggr_irq_mod;
+	uint8_t ttl_update;
+	uint8_t qos_class;
+	uint8_t skip_ingress;
+};
+
+/**
+ * struct ipa_hdr_add - header descriptor includes in and out
+ * parameters
+ * @name: name of the header
+ * @hdr: actual header to be inserted
+ * @hdr_len: size of above header
+ * @type: l2 header type
+ * @is_partial: header not fully specified
+ * @hdr_hdl: out parameter, handle to header, valid when status is 0
+ * @status:	out parameter, status of header add operation,
+ *		0 for success,
+ *		-1 for failure
+ * @is_eth2_ofst_valid: is eth2_ofst field valid?
+ * @eth2_ofst: offset to start of Ethernet-II/802.3 header
+ */
+struct ipa_hdr_add {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t hdr[IPA_HDR_MAX_SIZE];
+	uint8_t hdr_len;
+	enum ipa_hdr_l2_type type;
+	uint8_t is_partial;
+	uint32_t hdr_hdl;
+	int status;
+	uint8_t is_eth2_ofst_valid;
+	uint16_t eth2_ofst;
+};
+
+/**
+ * struct ipa_ioc_add_hdr - header addition parameters (support
+ * multiple headers and commit)
+ * @commit: should headers be written to IPA HW also?
+ * @num_hdrs: num of headers that follow
+ * @ipa_hdr_add hdr:	all headers need to go here back to
+ *			back, no pointers
+ */
+struct ipa_ioc_add_hdr {
+	uint8_t commit;
+	uint8_t num_hdrs;
+	struct ipa_hdr_add hdr[0];
+};
+
+/**
+ * struct ipa_l2tp_header_add_procparams -
+ * @eth_hdr_retained: Specifies if Ethernet header is retained or not
+ * @input_ip_version: Specifies if Input header is IPV4(0) or IPV6(1)
+ * @output_ip_version: Specifies if template header is IPV4(0) or IPV6(1)
+ * @single_pass: Specifies if second pass is required or not
+ */
+struct ipa_l2tp_header_add_procparams {
+	__u32 eth_hdr_retained:1;
+	__u32 input_ip_version:1;
+	__u32 output_ip_version:1;
+	__u32 second_pass:1;
+	__u32 reserved:28;
+	__u32 padding;
+};
+
+/**
+ * struct ipa_l2tp_header_remove_procparams -
+ * @hdr_len_remove: Specifies how much of the header needs to
+		be removed in bytes
+ * @eth_hdr_retained: Specifies if Ethernet header is retained or not
+ * @hdr_ofst_pkt_size_valid: Specifies if the Header offset is valid
+ * @hdr_ofst_pkt_size: If hdr_ofst_pkt_size_valid =1, this indicates where the
+		packet size field (2bytes) resides
+ * @hdr_endianness: 0:little endian, 1:big endian
+ */
+struct ipa_l2tp_header_remove_procparams {
+	uint32_t hdr_len_remove:8;
+	uint32_t eth_hdr_retained:1;
+	/* Following fields are valid if eth_hdr_retained =1 ( bridge mode) */
+	uint32_t hdr_ofst_pkt_size_valid:1;
+	uint32_t hdr_ofst_pkt_size:6;
+	uint32_t hdr_endianness:1;
+	uint32_t reserved:15;
+};
+
+/**
+ * struct ipa_l2tp_hdr_proc_ctx_params -
+ * @hdr_add_param: parameters for header add
+ * @hdr_remove_param: parameters for header remove
+ * @is_dst_pipe_valid: if dst pipe is valid
+ * @dst_pipe: destination pipe
+ */
+struct ipa_l2tp_hdr_proc_ctx_params {
+	struct ipa_l2tp_header_add_procparams hdr_add_param;
+	struct ipa_l2tp_header_remove_procparams hdr_remove_param;
+	uint8_t is_dst_pipe_valid;
+	enum ipa_client_type dst_pipe;
+};
+
+#define IPA_EoGRE_MAX_PCP_IDX 8 /* From 802.1Q tag format (reflects IEEE P802.1p) */
+#define IPA_EoGRE_MAX_VLAN    8 /* Our supported number of VLAN id's */
+
+/* vlan 12 bits + pcp 3 bites <-> dscp 6 bits */
+struct IpaDscpVlanPcpMap_t {
+	/*
+	 * valid only lower 12 bits
+	 */
+	uint16_t vlan[IPA_EoGRE_MAX_VLAN];
+	/*
+	 * dscp[vlan][pcp], valid only lower 6 bits, using pcp as index
+	 */
+	uint8_t dscp[IPA_EoGRE_MAX_VLAN][IPA_EoGRE_MAX_PCP_IDX];
+	uint8_t num_vlan; /* indicate how many vlans valid */
+	uint8_t reserved0;
+} __packed;
+
+struct ipa_ipgre_info {
+	/* ip address type */
+	enum ipa_ip_type iptype;
+	/* ipv4 */
+	uint32_t ipv4_src;
+	uint32_t ipv4_dst;
+	/* ipv6 */
+	uint32_t ipv6_src[4];
+	uint32_t ipv6_dst[4];
+	/* gre header info */
+	uint16_t gre_protocol;
+};
+
+struct ipa_ioc_eogre_info {
+	/* ip and gre info */
+	struct ipa_ipgre_info ipgre_info;
+	/* mapping info */
+	struct IpaDscpVlanPcpMap_t map_info;
+};
+
+/**
+ * struct ipa_eogre_header_add_procparams -
+ * @eth_hdr_retained:  Specifies if Ethernet header is retained or not
+ * @input_ip_version:  Specifies if Input header is IPV4(0) or IPV6(1)
+ * @output_ip_version: Specifies if template header's outer IP is IPV4(0) or IPV6(1)
+ * @second_pass:       Specifies if the data should be processed again.
+ */
+struct ipa_eogre_header_add_procparams {
+	uint32_t eth_hdr_retained :1;
+	uint32_t input_ip_version :1;
+	uint32_t output_ip_version :1;
+	uint32_t second_pass :1;
+	uint32_t reserved :28;
+};
+
+/**
+ * struct ipa_eogre_header_remove_procparams -
+ * @hdr_len_remove: Specifies how much (in bytes) of the header needs
+ *                  to be removed
+ */
+struct ipa_eogre_header_remove_procparams {
+	uint32_t hdr_len_remove:8; /* 44 bytes for IPV6, 24 for IPV4 */
+	uint32_t reserved:24;
+};
+
+/**
+ * struct ipa_eogre_hdr_proc_ctx_params -
+ * @hdr_add_param: parameters for header add
+ * @hdr_remove_param: parameters for header remove
+ */
+struct ipa_eogre_hdr_proc_ctx_params {
+	struct ipa_eogre_header_add_procparams hdr_add_param;
+	struct ipa_eogre_header_remove_procparams hdr_remove_param;
+};
+
+/**
+ * struct ipa_eth_II_to_eth_II_ex_procparams -
+ * @input_ethhdr_negative_offset: Specifies where the ethernet hdr offset is
+ *	(in bytes) from the start of the input IP hdr
+ * @output_ethhdr_negative_offset: Specifies where the ethernet hdr offset is
+ *	(in bytes) from the end of the template hdr
+ * @reserved: for future use
+ */
+struct ipa_eth_II_to_eth_II_ex_procparams {
+	uint32_t input_ethhdr_negative_offset : 8;
+	uint32_t output_ethhdr_negative_offset : 8;
+	uint32_t reserved : 16;
+};
+
+#define L2TP_USER_SPACE_SPECIFY_DST_PIPE
+
+/**
+ * struct ipa_hdr_proc_ctx_add - processing context descriptor includes
+ * in and out parameters
+ * @type: processing context type
+ * @hdr_hdl: in parameter, handle to header
+ * @l2tp_params: l2tp parameters
+ * @eogre_params: eogre parameters
+ * @generic_params: generic proc_ctx params
+ * @proc_ctx_hdl: out parameter, handle to proc_ctx, valid when status is 0
+ * @status:	out parameter, status of header add operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_hdr_proc_ctx_add {
+	enum ipa_hdr_proc_type type;
+	uint32_t hdr_hdl;
+	uint32_t proc_ctx_hdl;
+	int status;
+	struct ipa_l2tp_hdr_proc_ctx_params l2tp_params;
+	struct ipa_eogre_hdr_proc_ctx_params eogre_params;
+	struct ipa_eth_II_to_eth_II_ex_procparams generic_params;
+};
+
+#define IPA_L2TP_HDR_PROC_SUPPORT
+
+/**
+ * struct ipa_ioc_add_hdr - processing context addition parameters (support
+ * multiple processing context and commit)
+ * @commit: should processing context be written to IPA HW also?
+ * @num_proc_ctxs: num of processing context that follow
+ * @proc_ctx:	all processing context need to go here back to
+ *			back, no pointers
+ */
+struct ipa_ioc_add_hdr_proc_ctx {
+	uint8_t commit;
+	uint8_t num_proc_ctxs;
+	struct ipa_hdr_proc_ctx_add proc_ctx[0];
+};
+
+/**
+ * struct ipa_ioc_copy_hdr - retrieve a copy of the specified
+ * header - caller can then derive the complete header
+ * @name: name of the header resource
+ * @hdr:	out parameter, contents of specified header,
+ *	valid only when ioctl return val is non-negative
+ * @hdr_len: out parameter, size of above header
+ *	valid only when ioctl return val is non-negative
+ * @type: l2 header type
+ *	valid only when ioctl return val is non-negative
+ * @is_partial:	out parameter, indicates whether specified header is partial
+ *		valid only when ioctl return val is non-negative
+ * @is_eth2_ofst_valid: is eth2_ofst field valid?
+ * @eth2_ofst: offset to start of Ethernet-II/802.3 header
+ */
+struct ipa_ioc_copy_hdr {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t hdr[IPA_HDR_MAX_SIZE];
+	uint8_t hdr_len;
+	enum ipa_hdr_l2_type type;
+	uint8_t is_partial;
+	uint8_t is_eth2_ofst_valid;
+	uint16_t eth2_ofst;
+};
+
+/**
+ * struct ipa_ioc_get_hdr - header entry lookup parameters, if lookup was
+ * successful caller must call put to release the reference count when done
+ * @name: name of the header resource
+ * @hdl:	out parameter, handle of header entry
+ *		valid only when ioctl return val is non-negative
+ */
+struct ipa_ioc_get_hdr {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t hdl;
+};
+
+/**
+ * struct ipa_hdr_del - header descriptor includes in and out
+ * parameters
+ *
+ * @hdl: handle returned from header add operation
+ * @status:	out parameter, status of header remove operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_hdr_del {
+	uint32_t hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_del_hdr - header deletion parameters (support
+ * multiple headers and commit)
+ * @commit: should headers be removed from IPA HW also?
+ * @num_hdls: num of headers being removed
+ * @ipa_hdr_del hdl: all handles need to go here back to back, no pointers
+ */
+struct ipa_ioc_del_hdr {
+	uint8_t commit;
+	uint8_t num_hdls;
+	struct ipa_hdr_del hdl[0];
+};
+
+/**
+ * struct ipa_hdr_proc_ctx_del - processing context descriptor includes
+ * in and out parameters
+ * @hdl: handle returned from processing context add operation
+ * @status:	out parameter, status of header remove operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_hdr_proc_ctx_del {
+	uint32_t hdl;
+	int status;
+};
+
+/**
+ * ipa_ioc_del_hdr_proc_ctx - processing context deletion parameters (support
+ * multiple headers and commit)
+ * @commit: should processing contexts be removed from IPA HW also?
+ * @num_hdls: num of processing contexts being removed
+ * @ipa_hdr_proc_ctx_del hdl:	all handles need to go here back to back,
+ *				no pointers
+ */
+struct ipa_ioc_del_hdr_proc_ctx {
+	uint8_t commit;
+	uint8_t num_hdls;
+	struct ipa_hdr_proc_ctx_del hdl[0];
+};
+
+/**
+ * struct ipa_rt_rule_add - routing rule descriptor includes in
+ * and out parameters
+ * @rule: actual rule to be added
+ * @at_rear:	add at back of routing table, it is NOT possible to add rules at
+ *		the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of routing rule add operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_add {
+	struct ipa_rt_rule rule;
+	uint8_t at_rear;
+	uint32_t rt_rule_hdl;
+	int status;
+};
+
+/**
+ * struct ipa_rt_rule_add_v2 - routing rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear:	add at back of routing table, it is NOT possible to add rules at
+ *		the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of routing rule add operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_add_v2 {
+	uint8_t at_rear;
+	uint32_t rt_rule_hdl;
+	int status;
+	struct ipa_rt_rule_v2 rule;
+};
+
+
+/**
+ * struct ipa_ioc_add_rt_rule - routing rule addition parameters (supports
+ * multiple rules and commit);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @ipa_rt_rule_add rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_rt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_rules;
+	struct ipa_rt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule_v2 - routing rule addition
+ * parameters (supports multiple rules and commit);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @rule_add_size: sizeof(struct ipa_rt_rule_add_v2)
+ * @reserved1: reserved bits for alignment
+ * @reserved2: reserved bits for alignment
+ * @ipa_rt_rule_add rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_rt_rule_v2 {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_rules;
+	uint32_t rule_add_size;
+	uint32_t reserved1;
+	uint8_t reserved2;
+	uint64_t rules;
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule_after - routing rule addition after a specific
+ * rule parameters(supports multiple rules and commit);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @add_after_hdl: the rules will be added after this specific rule
+ * @ipa_rt_rule_add rules: all rules need to go back to back here, no pointers
+ *			   at_rear field will be ignored when using this IOCTL
+ */
+struct ipa_ioc_add_rt_rule_after {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_rules;
+	uint32_t add_after_hdl;
+	struct ipa_rt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule_after_v2 - routing rule addition
+ * after a specific rule parameters(supports multiple rules and
+ * commit);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @add_after_hdl: the rules will be added after this specific rule
+ * @rule_add_size: sizeof(struct ipa_rt_rule_add_v2)
+ * @reserved: reserved bits for alignment
+ * @ipa_rt_rule_add rules: all rules need to go back to back here, no pointers
+ *			   at_rear field will be ignored when using this IOCTL
+ */
+struct ipa_ioc_add_rt_rule_after_v2 {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_rules;
+	uint32_t add_after_hdl;
+	uint32_t rule_add_size;
+	uint8_t reserved;
+	uint64_t rules;
+};
+
+/**
+ * struct ipa_rt_rule_mdfy - routing rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @rt_rule_hdl: handle to rule which supposed to modify
+ * @status:	output parameter, status of routing rule modify  operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_rt_rule_mdfy {
+	struct ipa_rt_rule rule;
+	uint32_t rt_rule_hdl;
+	int status;
+};
+
+/**
+ * struct ipa_rt_rule_mdfy_v2 - routing rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @rt_rule_hdl: handle to rule which supposed to modify
+ * @status:	output parameter, status of routing rule modify  operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_rt_rule_mdfy_v2 {
+	uint32_t rt_rule_hdl;
+	int status;
+	struct ipa_rt_rule_v2 rule;
+};
+
+/**
+ * struct ipa_ioc_mdfy_rt_rule - routing rule modify parameters (supports
+ * multiple rules and commit)
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @num_rules: number of routing rules that follow
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_mdfy_rt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	uint8_t num_rules;
+	struct ipa_rt_rule_mdfy rules[0];
+};
+
+/**
+ * struct ipa_ioc_mdfy_rt_rule_v2 - routing rule modify
+ * parameters (supports multiple rules and commit)
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @num_rules: number of routing rules that follow
+ * @rule_mdfy_size: sizeof(struct ipa_rt_rule_mdfy_v2)
+ * @reserved: reserved bits for alignment
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_mdfy_rt_rule_v2 {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	uint8_t num_rules;
+	uint32_t rule_mdfy_size;
+	uint8_t reserved;
+	uint64_t rules;
+};
+
+/**
+ * struct ipa_rt_rule_del - routing rule descriptor includes in
+ * and out parameters
+ * @hdl: handle returned from route rule add operation
+ * @status:	output parameter, status of route rule delete operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_del {
+	uint32_t hdl;
+	int status;
+};
+
+/**
+ * struct ipa_rt_rule_add_ext - routing rule descriptor includes in
+ * and out parameters
+ * @rule: actual rule to be added
+ * @at_rear:	add at back of routing table, it is NOT possible to add rules at
+ *		the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of routing rule add operation,
+ * @rule_id: rule_id to be assigned to the routing rule. In case client
+ *  specifies rule_id as 0 the driver will assign a new rule_id
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_add_ext {
+	struct ipa_rt_rule rule;
+	uint8_t at_rear;
+	uint32_t rt_rule_hdl;
+	int status;
+	uint16_t rule_id;
+};
+
+/**
+ * struct ipa_rt_rule_add_ext_v2 - routing rule descriptor
+ * includes in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear:	add at back of routing table, it is NOT possible to add rules at
+ *		the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of routing rule add operation,
+ * @rule_id: rule_id to be assigned to the routing rule. In case client
+ *  specifies rule_id as 0 the driver will assign a new rule_id
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_add_ext_v2 {
+	uint8_t at_rear;
+	uint32_t rt_rule_hdl;
+	int status;
+	uint16_t rule_id;
+	struct ipa_rt_rule_v2 rule;
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule_ext - routing rule addition
+ * parameters (supports multiple rules and commit with rule_id);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @ipa_rt_rule_add_ext rules: all rules need to go back to back here,
+ *  no pointers
+ */
+struct ipa_ioc_add_rt_rule_ext {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_rules;
+	struct ipa_rt_rule_add_ext rules[0];
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule_ext_v2 - routing rule addition
+ * parameters (supports multiple rules and commit with rule_id);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @rule_add_ext_size: sizeof(struct ipa_rt_rule_add_ext_v2)
+ * @reserved1: reserved bits for alignment
+ * @reserved2: reserved bits for alignment
+ * @ipa_rt_rule_add_ext rules: all rules need to go back to back here,
+ *  no pointers
+ */
+struct ipa_ioc_add_rt_rule_ext_v2 {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_rules;
+	uint32_t rule_add_ext_size;
+	uint32_t reserved1;
+	uint8_t reserved2;
+	uint64_t rules;
+};
+
+
+/**
+ * struct ipa_ioc_del_rt_rule - routing rule deletion parameters (supports
+ * multiple headers and commit)
+ * @commit: should rules be removed from IPA HW also?
+ * @ip: IP family of rules
+ * @num_hdls: num of rules being removed
+ * @ipa_rt_rule_del hdl: all handles need to go back to back here, no pointers
+ */
+struct ipa_ioc_del_rt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	uint8_t num_hdls;
+	struct ipa_rt_rule_del hdl[0];
+};
+
+/**
+ * struct ipa_ioc_get_rt_tbl_indx - routing table index lookup parameters
+ * @ip: IP family of table
+ * @name: name of routing table resource
+ * @index:	output parameter, routing table index, valid only when ioctl
+ *		return val is non-negative
+ */
+struct ipa_ioc_get_rt_tbl_indx {
+	enum ipa_ip_type ip;
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t idx;
+};
+
+/**
+ * struct ipa_flt_rule_add - filtering rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear: add at back of filtering table?
+ * @flt_rule_hdl: out parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of filtering rule add   operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_flt_rule_add {
+	struct ipa_flt_rule rule;
+	uint8_t at_rear;
+	uint32_t flt_rule_hdl;
+	int status;
+};
+
+/**
+ * struct ipa_flt_rule_add_v2 - filtering rule descriptor
+ * includes in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear: add at back of filtering table?
+ * @flt_rule_hdl: out parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of filtering rule add   operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_flt_rule_add_v2 {
+	uint8_t at_rear;
+	uint32_t flt_rule_hdl;
+	int status;
+	struct ipa_flt_rule_v2 rule;
+};
+
+/**
+ * struct ipa_ioc_add_flt_rule - filtering rule addition parameters (supports
+ * multiple rules and commit)
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @ep:	which "clients" pipe does this rule apply to?
+ *	valid only when global is 0
+ * @global: does this apply to global filter table of specific IP family
+ * @num_rules: number of filtering rules that follow
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_flt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	enum ipa_client_type ep;
+	uint8_t global;
+	uint8_t num_rules;
+	struct ipa_flt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_ioc_add_flt_rule_v2 - filtering rule addition
+ * parameters (supports multiple rules and commit)
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @ep:	which "clients" pipe does this rule apply to?
+ *	valid only when global is 0
+ * @global: does this apply to global filter table of specific IP family
+ * @num_rules: number of filtering rules that follow
+ * @flt_rule_size: sizeof(struct ipa_flt_rule_add_v2)
+ * @reserved1: reserved bits for alignment
+ * @reserved2: reserved bits for alignment
+ * @reserved3: reserved bits for alignment
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_flt_rule_v2 {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	enum ipa_client_type ep;
+	uint8_t global;
+	uint8_t num_rules;
+	uint32_t flt_rule_size;
+	uint32_t reserved1;
+	uint16_t reserved2;
+	uint8_t reserved3;
+	uint64_t rules;
+};
+
+
+/**
+ * struct ipa_ioc_add_flt_rule_after - filtering rule addition after specific
+ * rule parameters (supports multiple rules and commit)
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @ep:	which "clients" pipe does this rule apply to?
+ * @num_rules: number of filtering rules that follow
+ * @add_after_hdl: rules will be added after the rule with this handle
+ * @rules: all rules need to go back to back here, no pointers. at rear field
+ *	   is ignored when using this IOCTL
+ */
+struct ipa_ioc_add_flt_rule_after {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	enum ipa_client_type ep;
+	uint8_t num_rules;
+	uint32_t add_after_hdl;
+	struct ipa_flt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_ioc_add_flt_rule_after_v2 - filtering rule
+ * addition after specific rule parameters (supports multiple
+ * rules and commit) all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @ep:	which "clients" pipe does this rule apply to?
+ * @num_rules: number of filtering rules that follow
+ * @add_after_hdl: rules will be added after the rule with this handle
+ * @flt_rule_size: sizeof(struct ipa_flt_rule_add_v2)
+ * @reserved: reserved bits for alignment
+ * @rules: all rules need to go back to back here, no pointers. at rear field
+ *	   is ignored when using this IOCTL
+ */
+struct ipa_ioc_add_flt_rule_after_v2 {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	enum ipa_client_type ep;
+	uint8_t num_rules;
+	uint32_t add_after_hdl;
+	uint32_t flt_rule_size;
+	uint32_t reserved;
+	uint64_t rules;
+};
+
+/**
+ * struct ipa_flt_rule_mdfy - filtering rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @flt_rule_hdl: handle to rule
+ * @status:	output parameter, status of filtering rule modify  operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_flt_rule_mdfy {
+	struct ipa_flt_rule rule;
+	uint32_t rule_hdl;
+	int status;
+};
+
+/**
+ * struct ipa_flt_rule_mdfy_v2 - filtering rule descriptor
+ * includes in and out parameters
+ * @rule: actual rule to be added
+ * @flt_rule_hdl: handle to rule
+ * @status:	output parameter, status of filtering rule modify  operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_flt_rule_mdfy_v2 {
+	uint32_t rule_hdl;
+	int status;
+	struct ipa_flt_rule_v2 rule;
+};
+
+/**
+ * struct ipa_ioc_mdfy_flt_rule - filtering rule modify parameters (supports
+ * multiple rules and commit)
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @num_rules: number of filtering rules that follow
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_mdfy_flt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	uint8_t num_rules;
+	struct ipa_flt_rule_mdfy rules[0];
+};
+
+/**
+ * struct ipa_ioc_mdfy_flt_rule_v2 - filtering rule modify
+ * parameters (supports multiple rules and commit)
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @num_rules: number of filtering rules that follow
+ * @rule_mdfy_size: sizeof(struct ipa_flt_rule_mdfy_v2)
+ * @reserved: reserved bits for alignment
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_mdfy_flt_rule_v2 {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	uint8_t num_rules;
+	uint32_t rule_mdfy_size;
+	uint8_t reserved;
+	uint64_t rules;
+};
+
+/**
+ * struct ipa_flt_rule_del - filtering rule descriptor includes
+ * in and out parameters
+ *
+ * @hdl: handle returned from filtering rule add operation
+ * @status:	output parameter, status of filtering rule delete operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_flt_rule_del {
+	uint32_t hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_del_flt_rule - filtering rule deletion parameters (supports
+ * multiple headers and commit)
+ * @commit: should rules be removed from IPA HW also?
+ * @ip: IP family of rules
+ * @num_hdls: num of rules being removed
+ * @hdl: all handles need to go back to back here, no pointers
+ */
+struct ipa_ioc_del_flt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	uint8_t num_hdls;
+	struct ipa_flt_rule_del hdl[0];
+};
+
+/**
+ * struct ipa_ioc_get_rt_tbl - routing table lookup parameters, if lookup was
+ * successful caller must call put to release the reference
+ * count when done
+ * @ip: IP family of table
+ * @name: name of routing table resource
+ * @htl:	output parameter, handle of routing table, valid only when ioctl
+ *		return val is non-negative
+ */
+struct ipa_ioc_get_rt_tbl {
+	enum ipa_ip_type ip;
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t hdl;
+};
+
+/**
+ * struct ipa_ioc_query_intf - used to lookup number of tx and
+ * rx properties of interface
+ * @name: name of interface
+ * @num_tx_props:	output parameter, number of tx properties
+ *			valid only when ioctl return val is non-negative
+ * @num_rx_props:	output parameter, number of rx properties
+ *			valid only when ioctl return val is non-negative
+ * @num_ext_props:	output parameter, number of ext properties
+ *			valid only when ioctl return val is non-negative
+ * @excp_pipe:		exception packets of this interface should be
+ *			routed to this pipe
+ */
+struct ipa_ioc_query_intf {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t num_tx_props;
+	uint32_t num_rx_props;
+	uint32_t num_ext_props;
+	enum ipa_client_type excp_pipe;
+};
+
+/**
+ * struct ipa_ioc_tx_intf_prop - interface tx property
+ * @ip: IP family of routing rule
+ * @attrib: routing rule
+ * @dst_pipe: routing output pipe
+ * @alt_dst_pipe: alternate routing output pipe
+ * @hdr_name: name of associated header if any, empty string when no header
+ * @hdr_l2_type: type of associated header if any, use NONE when no header
+ */
+struct ipa_ioc_tx_intf_prop {
+	enum ipa_ip_type ip;
+	struct ipa_rule_attrib attrib;
+	enum ipa_client_type dst_pipe;
+	enum ipa_client_type alt_dst_pipe;
+	char hdr_name[IPA_RESOURCE_NAME_MAX];
+	enum ipa_hdr_l2_type hdr_l2_type;
+};
+
+/**
+ * struct ipa_ioc_query_intf_tx_props - interface tx propertie
+ * @name: name of interface
+ * @num_tx_props: number of TX properties
+ * @tx[0]: output parameter, the tx properties go here back to back
+ */
+struct ipa_ioc_query_intf_tx_props {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t num_tx_props;
+	struct ipa_ioc_tx_intf_prop tx[0];
+};
+
+/**
+ * struct ipa_ioc_ext_intf_prop - interface extended property
+ * @ip: IP family of routing rule
+ * @eq_attrib: attributes of the rule in equation form
+ * @action: action field
+ * @rt_tbl_idx: index of RT table referred to by filter rule
+ * @mux_id: MUX_ID
+ * @filter_hdl: handle of filter (as specified by provider of filter rule)
+ * @is_xlat_rule: it is xlat flt rule or not
+ */
+struct ipa_ioc_ext_intf_prop {
+	enum ipa_ip_type ip;
+	struct ipa_ipfltri_rule_eq eq_attrib;
+	enum ipa_flt_action action;
+	uint32_t rt_tbl_idx;
+	uint8_t mux_id;
+	uint32_t filter_hdl;
+	uint8_t is_xlat_rule;
+	uint32_t rule_id;
+	uint8_t is_rule_hashable;
+#define IPA_V6_UL_WL_FIREWALL_HANDLE
+	uint8_t replicate_needed;
+};
+
+/**
+ * struct ipa_ioc_query_intf_ext_props - interface ext propertie
+ * @name: name of interface
+ * @num_ext_props: number of EXT properties
+ * @ext[0]: output parameter, the ext properties go here back to back
+ */
+struct ipa_ioc_query_intf_ext_props {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t num_ext_props;
+	struct ipa_ioc_ext_intf_prop ext[0];
+};
+
+/**
+ * struct ipa_ioc_rx_intf_prop - interface rx property
+ * @ip: IP family of filtering rule
+ * @attrib: filtering rule
+ * @src_pipe: input pipe
+ * @hdr_l2_type: type of associated header if any, use NONE when no header
+ */
+struct ipa_ioc_rx_intf_prop {
+	enum ipa_ip_type ip;
+	struct ipa_rule_attrib attrib;
+	enum ipa_client_type src_pipe;
+	enum ipa_hdr_l2_type hdr_l2_type;
+};
+
+/**
+ * struct ipa_ioc_query_intf_rx_props - interface rx propertie
+ * @name: name of interface
+ * @num_rx_props: number of RX properties
+ * @rx: output parameter, the rx properties go here back to back
+ */
+struct ipa_ioc_query_intf_rx_props {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t num_rx_props;
+	struct ipa_ioc_rx_intf_prop rx[0];
+};
+
+/**
+ * struct ipa_ioc_nat_alloc_mem - nat table memory allocation
+ * properties
+ * @dev_name: input parameter, the name of table
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa_ioc_nat_alloc_mem {
+	char dev_name[IPA_RESOURCE_NAME_MAX];
+	size_t size;
+	__kernel_off_t offset;
+};
+
+/**
+ * struct ipa_ioc_nat_ipv6ct_table_alloc - NAT/IPv6CT table memory allocation
+ * properties
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa_ioc_nat_ipv6ct_table_alloc {
+	size_t size;
+	__kernel_off_t offset;
+};
+
+/**
+ * struct ipa_ioc_v4_nat_init - nat table initialization parameters
+ * @tbl_index: input parameter, index of the table
+ * @ipv4_rules_offset: input parameter, ipv4 rules address offset
+ * @expn_rules_offset: input parameter, ipv4 expansion rules address offset
+ * @index_offset: input parameter, index rules offset
+ * @index_expn_offset: input parameter, index expansion rules offset
+ * @table_entries: input parameter, ipv4 rules table number of entries
+ * @expn_table_entries: input parameter, ipv4 expansion rules table number of
+ *                      entries
+ * @ip_addr: input parameter, public ip address
+ * @mem_type: input parameter, type of memory the table resides in
+ * @focus_change: input parameter, are we moving to/from sram or ddr
+ */
+struct ipa_ioc_v4_nat_init {
+	uint8_t  tbl_index;
+	uint32_t ipv4_rules_offset;
+	uint32_t expn_rules_offset;
+
+	uint32_t index_offset;
+	uint32_t index_expn_offset;
+
+	uint16_t table_entries;
+	uint16_t expn_table_entries;
+	uint32_t ip_addr;
+
+	uint8_t  mem_type;
+	uint8_t  focus_change;
+};
+
+/**
+ * struct ipa_ioc_ipv6ct_init - IPv6CT table initialization parameters
+ * @base_table_offset: input parameter, IPv6CT base table address offset
+ * @expn_table_offset: input parameter, IPv6CT expansion table address offset
+ * @table_entries: input parameter, IPv6CT table number of entries
+ * @expn_table_entries: input parameter, IPv6CT expansion table number of
+ *                      entries
+ * @tbl_index: input parameter, index of the table
+ */
+struct ipa_ioc_ipv6ct_init {
+	uint32_t base_table_offset;
+	uint32_t expn_table_offset;
+	uint16_t table_entries;
+	uint16_t expn_table_entries;
+	uint8_t tbl_index;
+};
+
+/**
+ * struct ipa_ioc_v4_nat_del - nat table delete parameter
+ * @table_index: input parameter, index of the table
+ * @public_ip_addr: input parameter, public ip address
+ */
+struct ipa_ioc_v4_nat_del {
+	uint8_t table_index;
+	uint32_t public_ip_addr;
+};
+
+/**
+ * struct ipa_ioc_nat_ipv6ct_table_del - NAT/IPv6CT table delete parameter
+ * @table_index: input parameter, index of the table
+ * @mem_type: input parameter, type of memory the table resides in
+ */
+struct ipa_ioc_nat_ipv6ct_table_del {
+	uint8_t table_index;
+	uint8_t mem_type;
+};
+
+/**
+ * struct ipa_ioc_nat_dma_one - nat/ipv6ct dma command parameter
+ * @table_index: input parameter, index of the table
+ * @base_addr:	type of table, from which the base address of the table
+ *		can be inferred
+ * @offset: destination offset within the NAT table
+ * @data: data to be written.
+ */
+struct ipa_ioc_nat_dma_one {
+	uint8_t table_index;
+	uint8_t base_addr;
+
+	uint32_t offset;
+	uint16_t data;
+
+};
+
+/**
+ * struct ipa_ioc_nat_dma_cmd - To hold multiple nat/ipv6ct dma commands
+ * @entries: number of dma commands in use
+ * @dma: data pointer to the dma commands
+ * @mem_type: input parameter, type of memory the table resides in
+ */
+struct ipa_ioc_nat_dma_cmd {
+	uint8_t entries;
+	uint8_t mem_type;
+	struct ipa_ioc_nat_dma_one dma[0];
+};
+
+/**
+ * struct ipa_ioc_nat_pdn_entry - PDN entry modification data
+ * @pdn_index: index of the entry in the PDN config table to be changed
+ * @public_ip: PDN's public ip
+ * @src_metadata: PDN's source NAT metadata for metadata replacement
+ * @dst_metadata: PDN's destination NAT metadata for metadata replacement
+ */
+struct ipa_ioc_nat_pdn_entry {
+	uint8_t pdn_index;
+	uint32_t public_ip;
+	uint32_t src_metadata;
+	uint32_t dst_metadata;
+};
+
+/**
+ * struct ipa_ioc_vlan_iface_info - add vlan interface
+ * @name: interface name
+ * @vlan_id: VLAN ID
+ */
+struct ipa_ioc_vlan_iface_info {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t vlan_id;
+};
+
+/**
+ * enum ipa_l2tp_tunnel_type - IP or UDP
+ */
+enum ipa_l2tp_tunnel_type {
+	IPA_L2TP_TUNNEL_IP = 1,
+	IPA_L2TP_TUNNEL_UDP = 2
+#define IPA_L2TP_TUNNEL_UDP IPA_L2TP_TUNNEL_UDP
+};
+
+/**
+ * struct ipa_ioc_l2tp_vlan_mapping_info - l2tp->vlan mapping info
+ * @iptype: l2tp tunnel IP type
+ * @l2tp_iface_name: l2tp interface name
+ * @l2tp_session_id: l2tp session id
+ * @vlan_iface_name: vlan interface name
+ * @tunnel_type: l2tp tunnel type
+ * @src_port: UDP source port
+ * @dst_port: UDP destination port
+ * @mtu: MTU of the L2TP interface
+ */
+struct ipa_ioc_l2tp_vlan_mapping_info {
+	enum ipa_ip_type iptype;
+	char l2tp_iface_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t l2tp_session_id;
+	char vlan_iface_name[IPA_RESOURCE_NAME_MAX];
+	enum ipa_l2tp_tunnel_type tunnel_type;
+	__u16 src_port;
+	__u16 dst_port;
+	__u16 mtu;
+	__u8 padding;
+};
+
+/**
+ * struct ipa_ioc_gsb_info - connect/disconnect
+ * @name: interface name
+ */
+struct ipa_ioc_gsb_info {
+	char name[IPA_RESOURCE_NAME_MAX];
+};
+
+#define QUERY_MAX_EP_PAIRS	2
+
+#define IPA_USB0_EP_ID		11
+#define IPA_USB1_EP_ID		12
+
+#define IPA_PCIE0_EP_ID		21
+#define IPA_PCIE1_EP_ID		22
+
+#define IPA_ETH0_EP_ID		31
+#define IPA_ETH1_EP_ID		32
+
+enum ipa_peripheral_ep_type {
+	IPA_DATA_EP_TYP_RESERVED = 0,
+	IPA_DATA_EP_TYP_HSIC = 1,
+	IPA_DATA_EP_TYP_HSUSB = 2,
+	IPA_DATA_EP_TYP_PCIE = 3,
+	IPA_DATA_EP_TYP_EMBEDDED = 4,
+	IPA_DATA_EP_TYP_BAM_DMUX = 5,
+	IPA_DATA_EP_TYP_ETH,
+};
+
+enum ipa_data_ep_prot_type {
+	IPA_PROT_RMNET = 0,
+	IPA_PROT_RMNET_CV2X = 1,
+	IPA_PROT_MAX
+};
+
+struct ipa_ep_pair_info {
+	__u32 consumer_pipe_num;
+	__u32 producer_pipe_num;
+	__u32 ep_id;
+	__u32 padding;
+};
+
+/**
+ * struct ipa_ioc_get_ep_info - query usb/pcie ep info
+ * @ep_type: type USB/PCIE - i/p param
+ * @max_ep_pairs: max number of ep_pairs (constant),
+		(QUERY_MAX_EP_PAIRS)
+ * @num_ep_pairs: number of ep_pairs - o/p param
+ * @ep_pair_size: sizeof(ipa_ep_pair_info) * max_ep_pairs
+ * @info: structure contains ep pair info
+ * @teth_prot : RMNET/CV2X --i/p param
+ * @teth_prot_valid - validity of i/p param protocol
+ */
+struct ipa_ioc_get_ep_info {
+	enum ipa_peripheral_ep_type ep_type;
+	__u32 ep_pair_size;
+	__u8 max_ep_pairs;
+	__u8 num_ep_pairs;
+	__u16 padding;
+	__u64 info;
+	enum ipa_data_ep_prot_type teth_prot;
+	__u8 teth_prot_valid;
+};
+
+/**
+ * struct ipa_set_pkt_threshold
+ * @pkt_threshold_enable: indicate pkt_thr enable or not
+ * @pkt_threshold: if pkt_threshold_enable = true, given the values
+ */
+struct ipa_set_pkt_threshold {
+	uint8_t pkt_threshold_enable;
+	int pkt_threshold;
+};
+
+/**
+ * struct ipa_ioc_set_pkt_threshold
+ * @ioctl_ptr: has to be typecasted to (__u64)(uintptr_t)
+ * @ioctl_data_size:
+ * Eg: For ipa_set_pkt_threshold = sizeof(ipa_set_pkt_threshold)
+ */
+struct ipa_ioc_set_pkt_threshold {
+	__u64 ioctl_ptr;
+	__u32 ioctl_data_size;
+	__u32 padding;
+};
+
+/**
+ * struct ipa_ioc_wigig_fst_switch - switch between wigig and wlan
+ * @netdev_name: wigig interface name
+ * @client_mac_addr: client to switch between netdevs
+ * @to_wigig: shall wlan client switch to wigig or the opposite?
+ */
+struct ipa_ioc_wigig_fst_switch {
+	uint8_t netdev_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t client_mac_addr[IPA_MAC_ADDR_SIZE];
+	int to_wigig;
+};
+
+/**
+ * struct ipa_msg_meta - Format of the message metadata.
+ * @msg_type: the type of the message
+ * @rsvd: reserved bits for future use.
+ * @msg_len: the length of the message in bytes
+ *
+ * For push model:
+ * Client in user-space should issue a read on the device (/dev/ipa) with a
+ * sufficiently large buffer in a continuous loop, call will block when there is
+ * no message to read. Upon return, client can read the ipa_msg_meta from start
+ * of buffer to find out type and length of message
+ * size of buffer supplied >= (size of largest message + size of metadata)
+ *
+ * For pull model:
+ * Client in user-space can also issue a pull msg IOCTL to device (/dev/ipa)
+ * with a payload containing space for the ipa_msg_meta and the message specific
+ * payload length.
+ * size of buffer supplied == (len of specific message  + size of metadata)
+ */
+struct ipa_msg_meta {
+	uint8_t msg_type;
+	uint8_t rsvd;
+	uint16_t msg_len;
+};
+
+/**
+ * struct ipa_wlan_msg - To hold information about wlan client
+ * @name: name of the wlan interface
+ * @mac_addr: mac address of wlan client
+ * @if_index: netdev interface index
+ *
+ * wlan drivers need to pass name of wlan iface and mac address of
+ * wlan client along with ipa_wlan_event, whenever a wlan client is
+ * connected/disconnected/moved to power save/come out of power save
+ */
+struct ipa_wlan_msg {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t mac_addr[IPA_MAC_ADDR_SIZE];
+	int16_t if_index;
+};
+
+/**
+ * enum ipa_wlan_hdr_attrib_type - attribute type
+ * in wlan client header
+ *
+ * WLAN_HDR_ATTRIB_MAC_ADDR: attrib type mac address
+ * WLAN_HDR_ATTRIB_STA_ID: attrib type station id
+ */
+enum ipa_wlan_hdr_attrib_type {
+	WLAN_HDR_ATTRIB_MAC_ADDR,
+	WLAN_HDR_ATTRIB_STA_ID
+};
+
+/**
+ * struct ipa_wlan_hdr_attrib_val - header attribute value
+ * @attrib_type: type of attribute
+ * @offset: offset of attribute within header
+ * @u.mac_addr: mac address
+ * @u.sta_id: station id
+ */
+struct ipa_wlan_hdr_attrib_val {
+	enum ipa_wlan_hdr_attrib_type attrib_type;
+	uint8_t offset;
+	union {
+		uint8_t mac_addr[IPA_MAC_ADDR_SIZE];
+		uint8_t sta_id;
+	} u;
+};
+
+/**
+ * struct ipa_wlan_msg_ex - To hold information about wlan client
+ * @name: name of the wlan interface
+ * @num_of_attribs: number of attributes
+ * @attrib_val: holds attribute values
+ *
+ * wlan drivers need to pass name of wlan iface and mac address
+ * of wlan client or station id along with ipa_wlan_event,
+ * whenever a wlan client is connected/disconnected/moved to
+ * power save/come out of power save
+ */
+struct ipa_wlan_msg_ex {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_of_attribs;
+	struct ipa_wlan_hdr_attrib_val attribs[0];
+};
+
+/**
+ * struct ipa_wigig_msg- To hold information about wigig event
+ * @name: name of the wigig interface
+ * @client_mac_addr: the relevant wigig client mac address
+ * @ipa_client: TX pipe associated with the wigig client in case of connect
+ * @to_wigig: FST switch direction wlan->wigig?
+ */
+struct ipa_wigig_msg {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t client_mac_addr[IPA_MAC_ADDR_SIZE];
+	union {
+		enum ipa_client_type ipa_client;
+		uint8_t to_wigig;
+	} u;
+};
+
+struct ipa_ecm_msg {
+	char name[IPA_RESOURCE_NAME_MAX];
+	int ifindex;
+};
+
+/**
+ * struct ipa_wan_msg - To hold information about wan client
+ * @name: name of the wan interface
+ *
+ * CnE need to pass the name of default wan iface when connected/disconnected.
+ * CNE need to pass the gw info in wlan AP+STA mode.
+ * netmgr need to pass the name of wan eMBMS iface when connected.
+ */
+struct ipa_wan_msg {
+	char upstream_ifname[IPA_RESOURCE_NAME_MAX];
+	char tethered_ifname[IPA_RESOURCE_NAME_MAX];
+	enum ipa_ip_type ip;
+	uint32_t ipv4_addr_gw;
+	uint32_t ipv6_addr_gw[IPA_WAN_MSG_IPv6_ADDR_GW_LEN];
+};
+
+/* uc activation command Ids */
+#define IPA_SOCKSV5_ADD_COM_ID		15
+#define IPA_IPv6_NAT_COM_ID		16
+
+/**
+ * ipa_kernel_tests_socksv5_uc_tmpl - uc activation entry info
+ * @cmd_id: uc command id
+ * @cmd_param: uC command param
+ * @ipa_kernel_tests_ip_hdr_temp: ip header
+ * @src_port: source port
+ * @dst_port: destination port
+ * @ipa_sockv5_mask: uc attribute mask for options/etc
+ * @out_irs: 4B/4B Seq/Ack/SACK
+ * @out_iss
+ * @in_irs
+ * @in_iss
+ * @out_ircv_tsval: timestamp attributes
+ * @in_ircv_tsecr
+ * @out_ircv_tsecr
+ * @in_ircv_tsval
+ * @in_isnd_wscale: window scale attributes
+ * @out_isnd_wscale
+ * @in_ircv_wscale
+ * @out_ircv_wscale
+ * @direction: 1 for UL 0 for DL
+ * @handle: uc activation table index
+ */
+struct ipa_kernel_tests_socksv5_uc_tmpl {
+	/* direction 1 = UL, 0 = DL */
+	__u8 direction;
+	__u8 padding1;
+	/* output: handle (index) */
+	__u16 handle;
+	__u16 cmd_id;
+	__u16 padding2;
+	__u32 cmd_param;
+
+	__be32 ip_src_addr;
+	__be32 ip_dst_addr;
+	__be32 ipv6_src_addr[4];
+	__be32 ipv6_dst_addr[4];
+
+	/* 2B src/dst port */
+	__u16 src_port;
+	__u16 dst_port;
+
+	/* attribute mask */
+	__u32 ipa_sockv5_mask;
+
+	/* required update 4B/4B Seq/Ack/SACK */
+	__u32 out_irs;
+	__u32 out_iss;
+	__u32 in_irs;
+	__u32 in_iss;
+
+	/* option 10B: time-stamp */
+	__u32 out_ircv_tsval;
+	__u32 in_ircv_tsecr;
+	__u32 out_ircv_tsecr;
+	__u32 in_ircv_tsval;
+
+	/* option 2B: window-scaling/dynamic */
+	__u16 in_isnd_wscale : 4;
+	__u16 out_isnd_wscale : 4;
+	__u16 in_ircv_wscale : 4;
+	__u16 out_ircv_wscale : 4;
+	__u32 padding3;
+
+};
+
+/**
+ * struct ipacm_socksv5_info - To hold information about socksv5 connections
+ * @ip_type: ip type
+ * @ipv4_src: ipv4 src address
+ * @ipv4_dst: ipv4 dst address
+ * @ipv6_src: ipv6 src address
+ * @ipv6_dst: ipv6 dst address
+ * @src_port: src port number
+ * @dst_port: dst port number
+ * @index: the uc activation tbl index
+ */
+
+struct ipacm_socksv5_info {
+	/* ip-type */
+	enum ipa_ip_type ip_type;
+
+	/* ipv4 */
+	__u32 ipv4_src;
+	__u32 ipv4_dst;
+
+	/* ipv6 */
+	__u32 ipv6_src[4];
+	__u32 ipv6_dst[4];
+
+	/* 2B src/dst port */
+	__u16 src_port;
+	__u16 dst_port;
+
+	/* uc-tbl index */
+	__u16 index;
+	__u16 padding;
+};
+
+/**
+ * struct ipa_socksv5_msg - To hold information about socksv5 client
+ * @ul_in: uplink connection info
+ * @dl_in: downlink connection info
+ * @handle: used for ipacm to distinguish connections
+ *
+ * CnE need to pass the name of default wan iface when connected/disconnected.
+ * CNE need to pass the gw info in wlan AP+STA mode.
+ * netmgr need to pass the name of wan eMBMS iface when connected.
+ */
+struct ipa_socksv5_msg {
+	struct ipacm_socksv5_info ul_in;
+	struct ipacm_socksv5_info dl_in;
+
+	/* handle (index) */
+	__u16 handle;
+	__u16 padding;
+};
+
+/**
+ * struct ipa_ioc_ipv6_nat_uc_act_entry - To hold information about IPv6 NAT
+ *	uC entry
+ * @cmd_id[in]: IPv6 NAT uC CMD ID - used for identifying uc activation type
+ * @private_address_lsb[in]: client private address lsb
+ * @private_address_msb[in]: client private address msbst
+ * @public_address_lsb[in]: client public address lsb
+ * @public_address_msb[in]: client public address msb
+ * @private_port[in]: client private port
+ * @public_port[in]: client public port
+ * @index[out]: uC activation entry index
+ */
+struct ipa_ioc_ipv6_nat_uc_act_entry {
+	__u16 cmd_id;
+	__u16 index;
+	__u32 padding;
+	__u32 private_port;
+	__u32 public_port;
+	__u64 private_address_lsb;
+	__u64 private_address_msb;
+	__u64 public_address_lsb;
+	__u64 public_address_msb;
+};
+
+/**
+ * union ipa_ioc_uc_activation_entry - To hold information about uC activation
+ *	entry
+ * @socks[in]: fill here if entry is Socksv5 entry
+ * @ipv6_nat[in]: fill here if entry is IPv6 NAT entry
+ */
+union ipa_ioc_uc_activation_entry {
+	struct ipa_kernel_tests_socksv5_uc_tmpl socks;
+	struct ipa_ioc_ipv6_nat_uc_act_entry ipv6_nat;
+};
+
+/**
+ * struct ipa_ioc_rm_dependency - parameters for add/delete dependency
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ */
+struct ipa_ioc_rm_dependency {
+	enum ipa_rm_resource_name resource_name;
+	enum ipa_rm_resource_name depends_on_name;
+};
+
+struct ipa_ioc_generate_flt_eq {
+	enum ipa_ip_type ip;
+	struct ipa_rule_attrib attrib;
+	struct ipa_ipfltri_rule_eq eq_attrib;
+};
+
+/**
+ * struct ipa_ioc_write_qmapid - to write mux id to endpoint metadata register
+ * @mux_id: mux id of wan
+ */
+struct ipa_ioc_write_qmapid {
+	enum ipa_client_type client;
+	uint8_t qmap_id;
+};
+
+/**
+ * struct ipa_flt_rt_counter_alloc - flt/rt counter id allocation
+ * @num_counters: input param, num of counters need to be allocated
+ * @allow_less: input param, if true, success even few counter than request
+ * @start_id: output param, allocated start_id, 0 when allocation fails
+ * @end_id: output param, allocated start_id, 0 when allocation fails
+ */
+struct ipa_flt_rt_counter_alloc {
+	uint8_t num_counters;
+	uint8_t allow_less;
+	uint8_t start_id;
+	uint8_t end_id;
+};
+
+/**
+ * struct ipa_ioc_flt_rt_counter_alloc - flt/rt counter id allocation ioctl
+ * @hdl: output param, hdl used for deallocation, negative if allocation fails
+ * @hw_counter: HW counters for HW process
+ * @sw_counter: SW counters for uC / non-HW process
+ */
+struct ipa_ioc_flt_rt_counter_alloc {
+	int hdl;
+	struct ipa_flt_rt_counter_alloc hw_counter;
+	struct ipa_flt_rt_counter_alloc sw_counter;
+};
+
+/**
+ * struct ipa_flt_rt_stats - flt/rt stats info
+ * @num_pkts: number of packets
+ * @num_pkts_hash: number of packets in hash entry
+ * @num_bytes: number of bytes
+ */
+struct ipa_flt_rt_stats {
+	uint32_t num_pkts;
+	uint32_t num_pkts_hash;
+	uint64_t num_bytes;
+};
+
+/**
+ * struct ipa_ioc_flt_rt_query - flt/rt counter id query
+ * @start_id: start counter id for query
+ * @end_id: end counter id for query
+ * @reset: this query need hw counter to be reset or not
+ * @stats_size: sizeof(ipa_flt_rt_stats)
+ * @reserved: reserved bits for alignment
+ * @stats: structure contains the query result
+ */
+struct ipa_ioc_flt_rt_query {
+	uint8_t start_id;
+	uint8_t end_id;
+	uint8_t reset;
+	uint32_t stats_size;
+	uint8_t reserved;
+	uint64_t stats;
+};
+
+enum ipacm_client_enum {
+	IPACM_CLIENT_USB = 1,
+	IPACM_CLIENT_WLAN,
+	IPACM_CLIENT_MAX
+};
+
+#define IPACM_SUPPORT_OF_LAN_STATS_FOR_ODU_CLIENTS
+
+enum ipacm_per_client_device_type {
+	IPACM_CLIENT_DEVICE_TYPE_USB = 0,
+	IPACM_CLIENT_DEVICE_TYPE_WLAN = 1,
+	IPACM_CLIENT_DEVICE_TYPE_ETH = 2,
+	IPACM_CLIENT_DEVICE_TYPE_ODU = 3,
+	IPACM_CLIENT_DEVICE_MAX
+};
+
+/**
+ * max number of device types supported.
+ */
+#define IPACM_MAX_CLIENT_DEVICE_TYPES IPACM_CLIENT_DEVICE_MAX
+
+/**
+ * @lanIface - Name of the lan interface
+ * @mac: Mac address of the client.
+ */
+struct ipa_lan_client_msg {
+	char lanIface[IPA_RESOURCE_NAME_MAX];
+	uint8_t mac[IPA_MAC_ADDR_SIZE];
+};
+
+/**
+ * struct ipa_lan_client - lan client data
+ * @mac: MAC Address of the client.
+ * @client_idx: Client Index.
+ * @inited: Bool to indicate whether client info is set.
+ */
+struct ipa_lan_client {
+	uint8_t mac[IPA_MAC_ADDR_SIZE];
+	int8_t client_idx;
+	uint8_t inited;
+};
+
+/**
+ * struct ipa_lan_client_cntr_index
+ * @ul_cnt_idx: H/w counter index for uplink stats
+ * @dl_cnt_idx: H/w counter index for downlink stats
+ */
+struct ipa_lan_client_cntr_index {
+	__u8 ul_cnt_idx;
+	__u8 dl_cnt_idx;
+};
+
+/**
+ * struct ipa_tether_device_info - tether device info indicated from IPACM
+ * @ul_src_pipe: Source pipe of the lan client.
+ * @hdr_len: Header length of the client.
+ * @num_clients: Number of clients connected.
+ */
+struct ipa_tether_device_info {
+	__s32 ul_src_pipe;
+	__u8 hdr_len;
+	__u8 padding1;
+	__u16 padding2;
+	__u32 num_clients;
+	struct ipa_lan_client lan_client[IPA_MAX_NUM_HW_PATH_CLIENTS];
+	struct ipa_lan_client_cntr_index
+		lan_client_indices[IPA_MAX_NUM_HW_PATH_CLIENTS];
+};
+
+/**
+ * enum ipa_vlan_ifaces - vlan interfaces types
+ */
+enum ipa_vlan_ifaces {
+	IPA_VLAN_IF_ETH,
+	IPA_VLAN_IF_ETH0,
+	IPA_VLAN_IF_ETH1,
+	IPA_VLAN_IF_RNDIS,
+	IPA_VLAN_IF_ECM
+};
+
+#define IPA_VLAN_IF_EMAC IPA_VLAN_IF_ETH
+#define IPA_VLAN_IF_MAX (IPA_VLAN_IF_ECM + 1)
+
+/**
+ * struct ipa_get_vlan_mode - get vlan mode of a Lan interface
+ * @iface: Lan interface type to be queried.
+ * @is_vlan_mode: output parameter, is interface in vlan mode, valid only when
+ *		ioctl return val is non-negative
+ */
+struct ipa_ioc_get_vlan_mode {
+	enum ipa_vlan_ifaces iface;
+	uint32_t is_vlan_mode;
+};
+
+/**
+ * struct ipa_ioc_bridge_vlan_mapping_info - vlan to bridge mapping info
+ * @bridge_name: bridge interface name
+ * @vlan_id: vlan ID bridge is mapped to
+ * @bridge_ipv4: bridge interface ipv4 address
+ * @subnet_mask: bridge interface subnet mask
+ * @lan2lan_sw: indicate lan2lan traffic take sw-path or not
+ */
+struct ipa_ioc_bridge_vlan_mapping_info {
+	char bridge_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t lan2lan_sw;
+	uint16_t vlan_id;
+	uint32_t bridge_ipv4;
+	uint32_t subnet_mask;
+};
+
+struct ipa_coalesce_info {
+	uint8_t qmap_id;
+	uint8_t tcp_enable;
+	uint8_t udp_enable;
+};
+
+struct ipa_mtu_info {
+	char if_name[IPA_RESOURCE_NAME_MAX];
+	enum ipa_ip_type ip_type;
+	uint16_t mtu_v4;
+	uint16_t mtu_v6;
+};
+
+struct ipa_odl_ep_info {
+	__u32 cons_pipe_num;
+	__u32 prod_pipe_num;
+	__u32 peripheral_iface_id;
+	__u32 ep_type;
+};
+
+struct odl_agg_pipe_info {
+	__u16 agg_byte_limit;
+};
+
+struct ipa_odl_modem_config {
+	__u8 config_status;
+};
+
+struct ipa_ioc_fnr_index_info {
+	uint8_t hw_counter_offset;
+	uint8_t sw_counter_offset;
+};
+
+enum ipacm_hw_index_counter_type {
+	UL_HW = 0,
+	DL_HW,
+	DL_ALL,
+	UL_ALL,
+};
+
+enum ipacm_hw_index_counter_virtual_type {
+	UL_HW_CACHE = 0,
+	DL_HW_CACHE,
+	UL_WLAN_TX,
+	DL_WLAN_TX
+};
+
+/**
+ * struct ipa_ioc_pdn_config - provide pdn configuration
+ * @dev_name: PDN interface name
+ * @pdn_cfg_type: type of the pdn config applied.
+ * @enable: enable/disable pdn config type.
+ * @u.collison_cfg.pdn_ip_addr: pdn_ip_address used in collision config.
+ * @u.passthrough_cfg.pdn_ip_addr: pdn_ip_address used in passthrough config.
+ * @u.passthrough_cfg.device_type: Device type of the client.
+ * @u.passthrough_cfg.vlan_id: VLAN ID of the client.
+ * @u.passthrough_cfg.client_mac_addr: client mac for which passthough
+ *»       is enabled.
+ * @u.passthrough_cfg.skip_nat: skip NAT processing.
+ * @default_pdn: bool to indicate the config is for default pdn.
+ */
+struct ipa_ioc_pdn_config {
+	char dev_name[IPA_RESOURCE_NAME_MAX];
+	enum ipa_pdn_config_event pdn_cfg_type;
+	__u8 enable;
+	union {
+		struct ipa_pdn_ip_collision_cfg {
+			__u32 pdn_ip_addr;
+		} collison_cfg;
+
+		struct ipa_pdn_ip_passthrough_cfg {
+			__u32 pdn_ip_addr;
+			enum ipacm_per_client_device_type device_type;
+			__u16 vlan_id;
+			__u8 client_mac_addr[IPA_MAC_ADDR_SIZE];
+			__u8 skip_nat;
+		} passthrough_cfg;
+	} u;
+	__u8 default_pdn;
+};
+
+/**
+ * struct ipa_ioc_mac_client_list_type- mac addr exception list
+ * @mac_addr: an array to hold clients mac addrs
+ * @num_of_clients: holds num of clients to blacklist or whitelist
+ * @flt_state: true to block current mac addrs and false to clean
+ *		up all previous mac addrs
+ */
+struct ipa_ioc_mac_client_list_type {
+	int num_of_clients;
+	__u8 mac_addr[IPA_MAX_NUM_MAC_FLT][IPA_MAC_ADDR_SIZE];
+	__u8 flt_state;
+	__u8 padding;
+};
+
+/**
+ * struct ipa_sw_flt_list_type- exception list
+ * @mac_enable: true to block current mac addrs and false to clean
+ *		up all previous mac addrs
+ * @num_of_mac: holds num of clients to blacklist
+ * @mac_addr: an array to hold clients mac addrs
+ * @ipv4_segs_enable: true to block current ipv4 addrs and false to clean
+ *		up all previous ipv4 addrs
+ * @ipv4_segs_ipv6_offload: reserved flexibility for future use.
+ *		true will indicate ipv6 could be still offloaded and
+ *		default is set to false as sw-path for ipv6 as well.
+ * @num_of_ipv4_segs: holds num of ipv4 segs to blacklist
+ * @ipv4_segs: an array to hold clients ipv4 segs addrs
+ * @iface_enable: true to block current ifaces and false to clean
+ *		up all previous ifaces
+ * @num_of_iface: holds num of ifaces to blacklist
+ * @iface: an array to hold netdev ifaces
+ */
+struct ipa_sw_flt_list_type {
+	uint8_t mac_enable;
+	int num_of_mac;
+	uint8_t mac_addr[IPA_MAX_NUM_MAC_FLT][IPA_MAC_ADDR_SIZE];
+	uint8_t ipv4_segs_enable;
+	uint8_t ipv4_segs_ipv6_offload;
+	int num_of_ipv4_segs;
+	uint32_t ipv4_segs[IPA_MAX_NUM_IPv4_SEGS_FLT][2];
+	uint8_t iface_enable;
+	int num_of_iface;
+	char iface[IPA_MAX_NUM_IFACE_FLT][IPA_RESOURCE_NAME_MAX];
+};
+
+/**
+ * struct ipa_ippt_sw_flt_list_type- exception list
+ * @ipv4_enable: true to block ipv4 addrs given below and false to clean
+ *		up all previous ipv4 addrs
+ * @num_of_ipv4: holds num of ipv4 to SW-exception
+ * @ipv4: an array to hold ipv4 addrs to SW-exception
+ * @port_enable: true to block current ports and false to clean
+ *		up all previous ports
+ * @num_of_port: holds num of ports to SW-exception
+ * @port: an array to hold connection ports to SW-exception
+ */
+
+struct ipa_ippt_sw_flt_list_type {
+	uint8_t ipv4_enable;
+	int num_of_ipv4;
+	uint32_t ipv4[IPA_MAX_PDN_NUM];
+	uint8_t port_enable;
+	int num_of_port;
+	uint16_t port[IPA_MAX_IPPT_NUM_PORT_FLT];
+};
+
+/**
+ * struct ipa_ioc_sw_flt_list_type
+ * @ioctl_ptr: has to be typecasted to (__u64)(uintptr_t)
+ * @ioctl_data_size:
+ * Eg: For ipa_sw_flt_list_type = sizeof(ipa_sw_flt_list_type)
+ * Eg: For ipa_ippt_sw_flt_list_type = sizeof(ipa_ippt_sw_flt_list_type)
+ */
+struct ipa_ioc_sw_flt_list_type {
+	__u64 ioctl_ptr;
+	__u32 ioctl_data_size;
+	__u32 padding;
+};
+
+/**
+ * struct ipa_macsec_map - mapping between ethX to macsecY
+ * @phy_name: name of the physical NIC (ethX)
+ *	- must be equal to an existing physical NIC name
+ * @macsec_name: name of the macsec NIC (macsecY)
+ */
+struct ipa_macsec_map {
+	char phy_name[IPA_RESOURCE_NAME_MAX];
+	char macsec_name[IPA_RESOURCE_NAME_MAX];
+};
+
+/**
+ * struct ipa_ioc_macsec_info - provide macsec info
+ * @ioctl_ptr: has to be typecasted to (__u64)(uintptr_t)
+ * @ioctl_data_size:
+ * Eg: For ipa_macsec_map = sizeof(ipa_macsec_map)
+ */
+struct ipa_ioc_macsec_info {
+	__u64 ioctl_ptr;
+	__u32 ioctl_data_size;
+	__u32 padding;
+};
+
+
+enum ipa_ext_router_mode {
+	IPA_PREFIX_DISABLED = 0,
+	IPA_PREFIX_SHARING,
+	IPA_PREFIX_DELEGATION
+};
+
+/**
+ * struct ipa_ioc_ext_router_info - provide ext_router info
+ * @ipa_ext_router_mode: prefix sharing, prefix delegation, or disabled mode
+ * @pdn_name: PDN interface name
+ * @ipv6_addr: the prefix addr used for dummy or delegated prefixes
+ * @ipv6_mask: the ipv6 mask used to mask above addr to get the correct prefix
+ */
+struct ipa_ioc_ext_router_info {
+	enum ipa_ext_router_mode mode;
+	char pdn_name[IPA_RESOURCE_NAME_MAX];
+	uint32_t ipv6_addr[4];
+	uint32_t ipv6_mask[4];
+};
+
+/**
+ *   actual IOCTLs supported by IPA driver
+ */
+#define IPA_IOC_COAL_EVICT_POLICY _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_COAL_EVICT_POLICY, \
+					struct ipa_ioc_coal_evict_policy *)
+#define IPA_IOC_ADD_HDR _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_HDR, \
+					struct ipa_ioc_add_hdr *)
+#define IPA_IOC_DEL_HDR _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_HDR, \
+					struct ipa_ioc_del_hdr *)
+#define IPA_IOC_ADD_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE, \
+					struct ipa_ioc_add_rt_rule *)
+#define IPA_IOC_ADD_RT_RULE_V2 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE_V2, \
+					struct ipa_ioc_add_rt_rule_v2 *)
+#define IPA_IOC_ADD_RT_RULE_EXT _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE_EXT, \
+					struct ipa_ioc_add_rt_rule_ext *)
+#define IPA_IOC_ADD_RT_RULE_EXT_V2 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE_EXT_V2, \
+					struct ipa_ioc_add_rt_rule_ext_v2 *)
+#define IPA_IOC_ADD_RT_RULE_AFTER _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE_AFTER, \
+					struct ipa_ioc_add_rt_rule_after *)
+#define IPA_IOC_ADD_RT_RULE_AFTER_V2 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE_AFTER_V2, \
+					struct ipa_ioc_add_rt_rule_after_v2 *)
+#define IPA_IOC_DEL_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_RT_RULE, \
+					struct ipa_ioc_del_rt_rule *)
+#define IPA_IOC_ADD_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_FLT_RULE, \
+					struct ipa_ioc_add_flt_rule *)
+#define IPA_IOC_ADD_FLT_RULE_V2 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_FLT_RULE_V2, \
+					struct ipa_ioc_add_flt_rule_v2 *)
+#define IPA_IOC_ADD_FLT_RULE_AFTER _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_FLT_RULE_AFTER, \
+					struct ipa_ioc_add_flt_rule_after *)
+#define IPA_IOC_ADD_FLT_RULE_AFTER_V2 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_FLT_RULE_AFTER_V2, \
+					struct ipa_ioc_add_flt_rule_after_v2 *)
+#define IPA_IOC_DEL_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_FLT_RULE, \
+					struct ipa_ioc_del_flt_rule *)
+#define IPA_IOC_COMMIT_HDR _IO(IPA_IOC_MAGIC,\
+					IPA_IOCTL_COMMIT_HDR)
+#define IPA_IOC_RESET_HDR _IO(IPA_IOC_MAGIC,\
+					IPA_IOCTL_RESET_HDR)
+#define IPA_IOC_COMMIT_RT _IOW(IPA_IOC_MAGIC, \
+					IPA_IOCTL_COMMIT_RT, \
+					enum ipa_ip_type)
+#define IPA_IOC_RESET_RT _IOW(IPA_IOC_MAGIC, \
+					IPA_IOCTL_RESET_RT, \
+					enum ipa_ip_type)
+#define IPA_IOC_COMMIT_FLT _IOW(IPA_IOC_MAGIC, \
+					IPA_IOCTL_COMMIT_FLT, \
+					enum ipa_ip_type)
+#define IPA_IOC_RESET_FLT _IOW(IPA_IOC_MAGIC, \
+			IPA_IOCTL_RESET_FLT, \
+			enum ipa_ip_type)
+#define IPA_IOC_DUMP _IO(IPA_IOC_MAGIC, \
+			IPA_IOCTL_DUMP)
+#define IPA_IOC_GET_RT_TBL _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_RT_TBL, \
+				struct ipa_ioc_get_rt_tbl *)
+#define IPA_IOC_PUT_RT_TBL _IOW(IPA_IOC_MAGIC, \
+				IPA_IOCTL_PUT_RT_TBL, \
+				uint32_t)
+#define IPA_IOC_COPY_HDR _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_COPY_HDR, \
+				struct ipa_ioc_copy_hdr *)
+#define IPA_IOC_QUERY_INTF _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF, \
+				struct ipa_ioc_query_intf *)
+#define IPA_IOC_QUERY_INTF_TX_PROPS _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF_TX_PROPS, \
+				struct ipa_ioc_query_intf_tx_props *)
+#define IPA_IOC_QUERY_INTF_RX_PROPS _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_QUERY_INTF_RX_PROPS, \
+					struct ipa_ioc_query_intf_rx_props *)
+#define IPA_IOC_QUERY_INTF_EXT_PROPS _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
+					struct ipa_ioc_query_intf_ext_props *)
+#define IPA_IOC_GET_HDR _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_HDR, \
+				struct ipa_ioc_get_hdr *)
+#define IPA_IOC_PUT_HDR _IOW(IPA_IOC_MAGIC, \
+				IPA_IOCTL_PUT_HDR, \
+				uint32_t)
+#define IPA_IOC_ALLOC_NAT_MEM _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_NAT_MEM, \
+				struct ipa_ioc_nat_alloc_mem *)
+#define IPA_IOC_ALLOC_NAT_TABLE _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_NAT_TABLE, \
+				struct ipa_ioc_nat_ipv6ct_table_alloc *)
+#define IPA_IOC_ALLOC_IPV6CT_TABLE _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_IPV6CT_TABLE, \
+				struct ipa_ioc_nat_ipv6ct_table_alloc *)
+#define IPA_IOC_V4_INIT_NAT _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_INIT_NAT, \
+				struct ipa_ioc_v4_nat_init *)
+#define IPA_IOC_INIT_IPV6CT_TABLE _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_INIT_IPV6CT_TABLE, \
+				struct ipa_ioc_ipv6ct_init *)
+#define IPA_IOC_NAT_DMA _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NAT_DMA, \
+				struct ipa_ioc_nat_dma_cmd *)
+#define IPA_IOC_TABLE_DMA_CMD _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_TABLE_DMA_CMD, \
+				struct ipa_ioc_nat_dma_cmd *)
+#define IPA_IOC_V4_DEL_NAT _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_DEL_NAT, \
+				struct ipa_ioc_v4_nat_del *)
+#define IPA_IOC_DEL_NAT_TABLE _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_NAT_TABLE, \
+				struct ipa_ioc_nat_ipv6ct_table_del *)
+#define IPA_IOC_DEL_IPV6CT_TABLE _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_IPV6CT_TABLE, \
+				struct ipa_ioc_nat_ipv6ct_table_del *)
+#define IPA_IOC_GET_NAT_OFFSET _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_NAT_OFFSET, \
+				uint32_t *)
+#define IPA_IOC_NAT_MODIFY_PDN _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NAT_MODIFY_PDN, \
+				struct ipa_ioc_nat_pdn_entry *)
+#define IPA_IOC_SET_FLT _IOW(IPA_IOC_MAGIC, \
+			IPA_IOCTL_SET_FLT, \
+			uint32_t)
+#define IPA_IOC_PULL_MSG _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_PULL_MSG, \
+				struct ipa_msg_meta *)
+#define IPA_IOC_RM_ADD_DEPENDENCY _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_RM_ADD_DEPENDENCY, \
+				struct ipa_ioc_rm_dependency *)
+#define IPA_IOC_RM_DEL_DEPENDENCY _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_RM_DEL_DEPENDENCY, \
+				struct ipa_ioc_rm_dependency *)
+#define IPA_IOC_GENERATE_FLT_EQ _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GENERATE_FLT_EQ, \
+				struct ipa_ioc_generate_flt_eq *)
+#define IPA_IOC_QUERY_EP_MAPPING _IOR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_EP_MAPPING, \
+				uint32_t)
+#define IPA_IOC_QUERY_RT_TBL_INDEX _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_RT_TBL_INDEX, \
+				struct ipa_ioc_get_rt_tbl_indx *)
+#define IPA_IOC_WRITE_QMAPID  _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_WRITE_QMAPID, \
+				struct ipa_ioc_write_qmapid *)
+#define IPA_IOC_MDFY_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_MDFY_FLT_RULE, \
+					struct ipa_ioc_mdfy_flt_rule *)
+#define IPA_IOC_MDFY_FLT_RULE_V2 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_MDFY_FLT_RULE_V2, \
+					struct ipa_ioc_mdfy_flt_rule_v2 *)
+#define IPA_IOC_MDFY_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_MDFY_RT_RULE, \
+					struct ipa_ioc_mdfy_rt_rule *)
+#define IPA_IOC_MDFY_RT_RULE_V2 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_MDFY_RT_RULE_V2, \
+					struct ipa_ioc_mdfy_rt_rule_v2 *)
+
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
+				struct ipa_wan_msg *)
+
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
+				struct ipa_wan_msg *)
+#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
+				struct ipa_wan_msg *)
+#define IPA_IOC_ADD_HDR_PROC_CTX _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ADD_HDR_PROC_CTX, \
+				struct ipa_ioc_add_hdr_proc_ctx *)
+#define IPA_IOC_DEL_HDR_PROC_CTX _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_HDR_PROC_CTX, \
+				struct ipa_ioc_del_hdr_proc_ctx *)
+
+#define IPA_IOC_GET_HW_VERSION _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_HW_VERSION, \
+				enum ipa_hw_type *)
+
+#define IPA_IOC_ADD_VLAN_IFACE _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ADD_VLAN_IFACE, \
+				struct ipa_ioc_vlan_iface_info *)
+
+#define IPA_IOC_DEL_VLAN_IFACE _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_VLAN_IFACE, \
+				struct ipa_ioc_vlan_iface_info *)
+
+#define IPA_IOC_ADD_L2TP_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ADD_L2TP_VLAN_MAPPING, \
+				struct ipa_ioc_l2tp_vlan_mapping_info *)
+
+#define IPA_IOC_DEL_L2TP_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_L2TP_VLAN_MAPPING, \
+				struct ipa_ioc_l2tp_vlan_mapping_info *)
+#define IPA_IOC_GET_VLAN_MODE _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_VLAN_MODE, \
+				struct ipa_ioc_get_vlan_mode *)
+#define IPA_IOC_ADD_BRIDGE_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ADD_BRIDGE_VLAN_MAPPING, \
+				struct ipa_ioc_bridge_vlan_mapping_info)
+
+#define IPA_IOC_DEL_BRIDGE_VLAN_MAPPING _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_BRIDGE_VLAN_MAPPING, \
+				struct ipa_ioc_bridge_vlan_mapping_info)
+#define IPA_IOC_CLEANUP _IO(IPA_IOC_MAGIC,\
+					IPA_IOCTL_CLEANUP)
+#define IPA_IOC_QUERY_WLAN_CLIENT _IO(IPA_IOC_MAGIC,\
+					IPA_IOCTL_QUERY_WLAN_CLIENT)
+
+#define IPA_IOC_ODL_QUERY_ADAPL_EP_INFO _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ODL_QUERY_ADAPL_EP_INFO, \
+				struct ipa_odl_ep_info)
+#define IPA_IOC_ODL_GET_AGG_BYTE_LIMIT _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ODL_GET_AGG_BYTE_LIMIT, \
+				struct odl_agg_pipe_info)
+
+#define IPA_IOC_ODL_QUERY_MODEM_CONFIG _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ODL_QUERY_MODEM_CONFIG, \
+				struct ipa_odl_modem_config)
+
+#define IPA_IOC_GSB_CONNECT _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GSB_CONNECT, \
+				struct ipa_ioc_gsb_info)
+
+#define IPA_IOC_GSB_DISCONNECT _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GSB_DISCONNECT, \
+				struct ipa_ioc_gsb_info)
+
+#define IPA_IOC_WIGIG_FST_SWITCH _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_WIGIG_FST_SWITCH, \
+				struct ipa_ioc_wigig_fst_switch)
+
+#define IPA_IOC_FNR_COUNTER_ALLOC _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_FNR_COUNTER_ALLOC, \
+				struct ipa_ioc_flt_rt_counter_alloc)
+
+#define IPA_IOC_FNR_COUNTER_DEALLOC _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_FNR_COUNTER_DEALLOC, \
+				int)
+
+#define IPA_IOC_FNR_COUNTER_QUERY _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_FNR_COUNTER_QUERY, \
+				struct ipa_ioc_flt_rt_query)
+
+#define IPA_IOC_SET_FNR_COUNTER_INFO _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_SET_FNR_COUNTER_INFO, \
+				struct ipa_ioc_fnr_index_info)
+
+#define IPA_IOC_GET_NAT_IN_SRAM_INFO _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_NAT_IN_SRAM_INFO, \
+				struct ipa_nat_in_sram_info)
+
+#define IPA_IOC_APP_CLOCK_VOTE _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_APP_CLOCK_VOTE, \
+				uint32_t)
+
+#define IPA_IOC_PDN_CONFIG _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_PDN_CONFIG, \
+				struct ipa_ioc_pdn_config)
+
+#define IPA_IOC_SET_MAC_FLT _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_SET_MAC_FLT, \
+				struct ipa_ioc_mac_client_list_type)
+
+#define IPA_IOC_GET_PHERIPHERAL_EP_INFO _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_PHERIPHERAL_EP_INFO, \
+				struct ipa_ioc_get_ep_info)
+
+#define IPA_IOC_ADD_UC_ACT_ENTRY _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ADD_UC_ACT_ENTRY, \
+				union ipa_ioc_uc_activation_entry)
+
+#define IPA_IOC_DEL_UC_ACT_ENTRY _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_UC_ACT_ENTRY, \
+				__u16)
+
+#define IPA_IOC_SET_SW_FLT _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_SET_SW_FLT, \
+				struct ipa_ioc_sw_flt_list_type)
+
+#define IPA_IOC_SET_PKT_THRESHOLD _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_SET_PKT_THRESHOLD, \
+				struct ipa_ioc_set_pkt_threshold)
+
+#define IPA_IOC_ADD_EoGRE_MAPPING _IOWR(IPA_IOC_MAGIC,	\
+				IPA_IOCTL_ADD_EoGRE_MAPPING, \
+				struct ipa_ioc_eogre_info)
+#define IPA_IOC_DEL_EoGRE_MAPPING _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_EoGRE_MAPPING, \
+				struct ipa_ioc_eogre_info)
+
+#define IPA_IOC_SET_IPPT_SW_FLT _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_SET_IPPT_SW_FLT, \
+				struct ipa_ioc_sw_flt_list_type)
+
+#define IPA_IOC_ADD_MACSEC_MAPPING _IOWR(IPA_IOC_MAGIC,	\
+				IPA_IOCTL_ADD_MACSEC_MAPPING, \
+				struct ipa_ioc_macsec_info)
+#define IPA_IOC_DEL_MACSEC_MAPPING _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_MACSEC_MAPPING, \
+				struct ipa_ioc_macsec_info)
+
+#define IPA_IOC_SET_NAT_EXC_RT_TBL_IDX _IOW(IPA_IOC_MAGIC, \
+				IPA_IOCTL_SET_NAT_EXC_RT_TBL_IDX, \
+				uint32_t)
+
+#define IPA_IOC_SET_CONN_TRACK_EXC_RT_TBL_IDX _IOW(IPA_IOC_MAGIC, \
+				IPA_IOCTL_SET_CONN_TRACK_EXC_RT_TBL_IDX, \
+				uint32_t)
+
+#define IPA_IOC_SET_EXT_ROUTER_MODE _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_SET_EXT_ROUTER_MODE, \
+				struct ipa_ioc_ext_router_info)
+/*
+ * unique magic number of the Tethering bridge ioctls
+ */
+#define TETH_BRIDGE_IOC_MAGIC 0xCE
+
+/*
+ * Ioctls supported by Tethering bridge driver
+ */
+#define TETH_BRIDGE_IOCTL_SET_BRIDGE_MODE	0
+#define TETH_BRIDGE_IOCTL_SET_AGGR_PARAMS	1
+#define TETH_BRIDGE_IOCTL_GET_AGGR_PARAMS	2
+#define TETH_BRIDGE_IOCTL_GET_AGGR_CAPABILITIES	3
+#define TETH_BRIDGE_IOCTL_MAX			4
+
+
+/**
+ * enum teth_link_protocol_type - link protocol (IP / Ethernet)
+ */
+enum teth_link_protocol_type {
+	TETH_LINK_PROTOCOL_IP,
+	TETH_LINK_PROTOCOL_ETHERNET,
+	TETH_LINK_PROTOCOL_MAX,
+};
+
+/**
+ * enum teth_aggr_protocol_type - Aggregation protocol (MBIM / TLP)
+ */
+enum teth_aggr_protocol_type {
+	TETH_AGGR_PROTOCOL_NONE,
+	TETH_AGGR_PROTOCOL_MBIM,
+	TETH_AGGR_PROTOCOL_TLP,
+	TETH_AGGR_PROTOCOL_MAX,
+};
+
+/**
+ * struct teth_aggr_params_link - Aggregation parameters for uplink/downlink
+ * @aggr_prot:			Aggregation protocol (MBIM / TLP)
+ * @max_transfer_size_byte:	Maximal size of aggregated packet in bytes.
+ *				Default value is 16*1024.
+ * @max_datagrams:		Maximal number of IP packets in an aggregated
+ *				packet. Default value is 16
+ */
+struct teth_aggr_params_link {
+	enum teth_aggr_protocol_type aggr_prot;
+	uint32_t max_transfer_size_byte;
+	uint32_t max_datagrams;
+};
+
+
+/**
+ * struct teth_aggr_params - Aggregation parmeters
+ * @ul:	Uplink parameters
+ * @dl: Downlink parmaeters
+ */
+struct teth_aggr_params {
+	struct teth_aggr_params_link ul;
+	struct teth_aggr_params_link dl;
+};
+
+/**
+ * struct teth_aggr_capabilities - Aggregation capabilities
+ * @num_protocols:		Number of protocols described in the array
+ * @prot_caps[]:		Array of aggregation capabilities per protocol
+ */
+struct teth_aggr_capabilities {
+	uint16_t num_protocols;
+	struct teth_aggr_params_link prot_caps[0];
+};
+
+/**
+ * struct teth_ioc_set_bridge_mode
+ * @link_protocol: link protocol (IP / Ethernet)
+ * @lcid: logical channel number
+ */
+struct teth_ioc_set_bridge_mode {
+	enum teth_link_protocol_type link_protocol;
+	uint16_t lcid;
+};
+
+/**
+ * struct teth_ioc_set_aggr_params
+ * @aggr_params: Aggregation parmeters
+ * @lcid: logical channel number
+ */
+struct teth_ioc_aggr_params {
+	struct teth_aggr_params aggr_params;
+	uint16_t lcid;
+};
+
+/**
+ * struct ipa_ioc_coal_evict_policy -
+ *
+ *   Structure used with the IPA_IOCTL_COAL_EVICT_POLICY ioctl to
+ *   control TCP/UDP eviction policy.
+ *
+ * @coal_vp_thrshld:
+ *
+ *   Connection that is opened below this val will not get
+ *   evicted. valid till v5_2.
+ *
+ * @coal_eviction_en:
+ *
+ *   bool -> Enable eviction
+ *
+ * @coal_vp_gran_sel:
+ *
+ * Select the appropriate time granularity: four possible values (0-3)
+ * Valid from v5_5.
+ *
+ * @coal_vp_udp_thrshld:
+ *
+ *   Coalescing eviction threshold. LRU VP stickness/inactivity
+ *   defined by this threshold fot UDP connectiom.  0 mean all UDP's
+ *   non sticky. Valid from v5_5.
+ *
+ * @coal_vp_tcp_thrshld:
+ *
+ *   Coalescing eviction threshold. LRU VP stickness/inactivity
+ *   defined by this threshold fot TCP connection.  0 mean all TCP's
+ *   non sticky. Valid from v5_5.
+ *
+ * @coal_vp_udp_thrshld_en:
+ *
+ *   bool -> Coalescing eviction enable for UDP connections when UDP
+ *   pacjet arrived. 0-disable these evictions. Valid from v5_5.
+ *
+ * @coal_vp_tcp_thrshld_en:
+ *
+ *   bool -> Coalescing eviction enable for TCP connections when TCP
+ *   pacjet arrived. 0-disable these evictions. Valid from v5_5.
+ *
+ * @coal_vp_tcp_num:
+ *
+ *   Configured TCP NUM value. SW define when TCP/UDP will treat as
+ *   excess during eviction process. Valid from v5_5.
+ */
+struct ipa_ioc_coal_evict_policy {
+	uint32_t coal_vp_thrshld;
+	uint32_t reserved1; /* reserved bits for alignment */
+	uint8_t  coal_eviction_en;
+	uint8_t  coal_vp_gran_sel;
+	uint8_t  coal_vp_udp_thrshld;
+	uint8_t  coal_vp_tcp_thrshld;
+	uint8_t  coal_vp_udp_thrshld_en;
+	uint8_t  coal_vp_tcp_thrshld_en;
+	uint8_t  coal_vp_tcp_num;
+	uint8_t  reserved2; /* reserved bits for alignment */
+};
+
+/**
+ * struct ipa_nat_in_sram_info - query for nat in sram particulars
+ * @sram_mem_available_for_nat: Amount SRAM available to fit nat table
+ * @nat_table_offset_into_mmap: Offset into mmap'd vm where table will be
+ * @best_nat_in_sram_size_rqst: The size to request for mmap
+ *
+ * The last two elements above are required to deal with situations
+ * where the SRAM's physical address and size don't play nice with
+ * mmap'ings page size and boundary attributes.
+ */
+struct ipa_nat_in_sram_info {
+	uint32_t sram_mem_available_for_nat;
+	uint32_t nat_table_offset_into_mmap;
+	uint32_t best_nat_in_sram_size_rqst;
+};
+
+/**
+ * enum ipa_app_clock_vote_type
+ *
+ * The types of votes that can be accepted by the
+ * IPA_IOC_APP_CLOCK_VOTE ioctl
+ */
+enum ipa_app_clock_vote_type {
+	IPA_APP_CLK_DEVOTE     = 0,
+	IPA_APP_CLK_VOTE       = 1,
+	IPA_APP_CLK_RESET_VOTE = 2,
+};
+
+#define TETH_BRIDGE_IOC_SET_BRIDGE_MODE _IOW(TETH_BRIDGE_IOC_MAGIC, \
+				TETH_BRIDGE_IOCTL_SET_BRIDGE_MODE, \
+				struct teth_ioc_set_bridge_mode *)
+#define TETH_BRIDGE_IOC_SET_AGGR_PARAMS _IOW(TETH_BRIDGE_IOC_MAGIC, \
+				TETH_BRIDGE_IOCTL_SET_AGGR_PARAMS, \
+				struct teth_ioc_aggr_params *)
+#define TETH_BRIDGE_IOC_GET_AGGR_PARAMS _IOR(TETH_BRIDGE_IOC_MAGIC, \
+				TETH_BRIDGE_IOCTL_GET_AGGR_PARAMS, \
+				struct teth_ioc_aggr_params *)
+#define TETH_BRIDGE_IOC_GET_AGGR_CAPABILITIES _IOWR(TETH_BRIDGE_IOC_MAGIC, \
+				TETH_BRIDGE_IOCTL_GET_AGGR_CAPABILITIES, \
+				struct teth_aggr_capabilities *)
+
+/*
+ * unique magic number of the ODU bridge ioctls
+ */
+#define ODU_BRIDGE_IOC_MAGIC 0xCD
+
+/*
+ * Ioctls supported by ODU bridge driver
+ */
+#define ODU_BRIDGE_IOCTL_SET_MODE	0
+#define ODU_BRIDGE_IOCTL_SET_LLV6_ADDR	1
+#define ODU_BRIDGE_IOCTL_MAX		2
+
+
+/**
+ * enum odu_bridge_mode - bridge mode
+ *			(ROUTER MODE / BRIDGE MODE)
+ */
+enum odu_bridge_mode {
+	ODU_BRIDGE_MODE_ROUTER,
+	ODU_BRIDGE_MODE_BRIDGE,
+	ODU_BRIDGE_MODE_MAX,
+};
+
+#define ODU_BRIDGE_IOC_SET_MODE _IOW(ODU_BRIDGE_IOC_MAGIC, \
+				ODU_BRIDGE_IOCTL_SET_MODE, \
+				enum odu_bridge_mode)
+
+#define ODU_BRIDGE_IOC_SET_LLV6_ADDR _IOW(ODU_BRIDGE_IOC_MAGIC, \
+				ODU_BRIDGE_IOCTL_SET_LLV6_ADDR, \
+				struct in6_addr *)
+
+#endif /* _UAPI_MSM_IPA_H_ */

+ 314 - 0
drivers/platform/msm/include/uapi/linux/rmnet_ipa_fd_ioctl.h

@@ -0,0 +1,314 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _RMNET_IPA_FD_IOCTL_H
+#define _RMNET_IPA_FD_IOCTL_H
+
+#include <linux/ioctl.h>
+#include <linux/ipa_qmi_service_v01.h>
+#include <linux/msm_ipa.h>
+
+/**
+ * unique magic number of the IPA_WAN device
+ */
+#define WAN_IOC_MAGIC 0x69
+
+#define WAN_IOCTL_ADD_FLT_RULE		0
+#define WAN_IOCTL_ADD_FLT_INDEX		1
+#define WAN_IOCTL_VOTE_FOR_BW_MBPS	2
+#define WAN_IOCTL_POLL_TETHERING_STATS  3
+#define WAN_IOCTL_SET_DATA_QUOTA        4
+#define WAN_IOCTL_SET_TETHER_CLIENT_PIPE 5
+#define WAN_IOCTL_QUERY_TETHER_STATS     6
+#define WAN_IOCTL_RESET_TETHER_STATS     7
+#define WAN_IOCTL_QUERY_DL_FILTER_STATS  8
+#define WAN_IOCTL_ADD_FLT_RULE_EX        9
+#define WAN_IOCTL_QUERY_TETHER_STATS_ALL  10
+#define WAN_IOCTL_NOTIFY_WAN_STATE  11
+#define WAN_IOCTL_ADD_UL_FLT_RULE          12
+#define WAN_IOCTL_ENABLE_PER_CLIENT_STATS    13
+#define WAN_IOCTL_QUERY_PER_CLIENT_STATS     14
+#define WAN_IOCTL_SET_LAN_CLIENT_INFO        15
+#define WAN_IOCTL_CLEAR_LAN_CLIENT_INFO      16
+#define WAN_IOCTL_SEND_LAN_CLIENT_MSG        17
+#define WAN_IOCTL_ADD_OFFLOAD_CONNECTION     18
+#define WAN_IOCTL_RMV_OFFLOAD_CONNECTION     19
+#define WAN_IOCTL_GET_WAN_MTU                20
+#define WAN_IOCTL_SET_DATA_QUOTA_WARNING     21
+#define WAN_IOCTL_NOTIFY_NAT_MOVE_RES        22
+
+/* User space may not have this defined. */
+#ifndef IFNAMSIZ
+#define IFNAMSIZ 16
+#endif
+
+/**
+ * struct wan_ioctl_poll_tethering_stats - structure used for
+ *                                         WAN_IOCTL_POLL_TETHERING_STATS IOCTL.
+ *
+ * @polling_interval_secs: Polling interval in seconds.
+ * @reset_stats:           Indicate whether to reset the stats (use 1) or not.
+ *
+ * The structure to be used by the user space in order to request for the
+ * tethering stats to be polled. Setting the interval to 0 indicates to stop
+ * the polling process.
+ */
+struct wan_ioctl_poll_tethering_stats {
+	uint64_t polling_interval_secs;
+	uint8_t  reset_stats;
+};
+
+/**
+ * struct wan_ioctl_set_data_quota - structure used for
+ *                                   WAN_IOCTL_SET_DATA_QUOTA IOCTL.
+ *
+ * @interface_name:  Name of the interface on which to set the quota.
+ * @quota_mbytes:    Quota (in Mbytes) for the above interface.
+ * @set_quota:       Indicate whether to set the quota (use 1) or
+ *                   unset the quota.
+ *
+ * The structure to be used by the user space in order to request
+ * a quota to be set on a specific interface (by specifying its name).
+ */
+struct wan_ioctl_set_data_quota {
+	char     interface_name[IFNAMSIZ];
+	uint64_t quota_mbytes;
+	uint8_t  set_quota;
+};
+
+/**
+ * struct wan_ioctl_set_data_quota_warning - structure used for
+ *                                   WAN_IOCTL_SET_DATA_QUOTA_WARNING IOCTL.
+ *
+ * @interface_name:  Name of the interface on which to set the quota or
+ *                   warning.
+ * @quota_mbytes:    Quota (in Mbytes) for the above interface.
+ * @set_quota:       Indicate whether to set the quota/warning (use 1) or
+ *                   unset the quota/warning.
+ * @set_warning:     Indicate whether to set the quota/warning (use 1) or
+ *                   unset the quota/warning.
+ * @warning_mbytes:  Warning (in Mbytes) for the above interface.
+ * @set_warning:     Indicate whether to set the warning (use 1) or
+ *                   unset the warning.
+ *
+ * The structure to be used by the user space in order to request
+ * a quota to be set on a specific interface (by specifying its name).
+ */
+struct wan_ioctl_set_data_quota_warning {
+	char     interface_name[IFNAMSIZ];
+	uint64_t quota_mbytes;
+	uint8_t  set_quota;
+	uint8_t  set_warning;
+	uint16_t padding2;
+	uint64_t warning_mbytes;
+};
+
+struct wan_ioctl_set_tether_client_pipe {
+	/* enum of tether interface */
+	enum ipacm_client_enum ipa_client;
+	uint8_t reset_client;
+	uint32_t ul_src_pipe_len;
+	uint32_t ul_src_pipe_list[QMI_IPA_MAX_PIPES_V01];
+	uint32_t dl_dst_pipe_len;
+	uint32_t dl_dst_pipe_list[QMI_IPA_MAX_PIPES_V01];
+};
+
+struct wan_ioctl_query_tether_stats {
+	/* Name of the upstream interface */
+	char upstreamIface[IFNAMSIZ];
+	/* Name of the tethered interface */
+	char tetherIface[IFNAMSIZ];
+	/* enum of tether interface */
+	enum ipacm_client_enum ipa_client;
+	uint64_t ipv4_tx_packets;
+	uint64_t ipv4_tx_bytes;
+	uint64_t ipv4_rx_packets;
+	uint64_t ipv4_rx_bytes;
+	uint64_t ipv6_tx_packets;
+	uint64_t ipv6_tx_bytes;
+	uint64_t ipv6_rx_packets;
+	uint64_t ipv6_rx_bytes;
+};
+
+struct wan_ioctl_query_tether_stats_all {
+	/* Name of the upstream interface */
+	char upstreamIface[IFNAMSIZ];
+	/* enum of tether interface */
+	enum ipacm_client_enum ipa_client;
+	uint8_t reset_stats;
+	uint64_t tx_bytes;
+	uint64_t rx_bytes;
+};
+
+struct wan_ioctl_reset_tether_stats {
+	/* Name of the upstream interface, not support now */
+	char upstreamIface[IFNAMSIZ];
+	/* Indicate whether to reset the stats (use 1) or not */
+	uint8_t reset_stats;
+};
+
+struct wan_ioctl_query_dl_filter_stats {
+	/* Indicate whether to reset the filter stats (use 1) or not*/
+	uint8_t reset_stats;
+	/* Modem response QMI */
+	struct ipa_get_data_stats_resp_msg_v01 stats_resp;
+	/* provide right index to 1st firewall rule */
+	uint32_t index;
+};
+
+struct wan_ioctl_notify_wan_state {
+	uint8_t up;
+	/* Name of the upstream interface */
+	char upstreamIface[IFNAMSIZ];
+#define WAN_IOCTL_NOTIFY_WAN_INTF_NAME WAN_IOCTL_NOTIFY_WAN_INTF_NAME
+};
+struct wan_ioctl_send_lan_client_msg {
+	/* Lan client info. */
+	struct ipa_lan_client_msg lan_client;
+	/* Event to indicate whether client is
+	 * connected or disconnected.
+	 */
+	enum ipa_per_client_stats_event client_event;
+};
+
+struct wan_ioctl_lan_client_info {
+	/* Device type of the client. */
+	enum ipacm_per_client_device_type device_type;
+	/* MAC Address of the client. */
+	uint8_t mac[IPA_MAC_ADDR_SIZE];
+	/* Init client. */
+	uint8_t client_init;
+	/* Client Index */
+	int8_t client_idx;
+	/* Header length of the client. */
+	uint8_t hdr_len;
+	/* Source pipe of the lan client. */
+	enum ipa_client_type ul_src_pipe;
+	/* Counter indices for h/w fnr stats */
+#define IPA_HW_FNR_STATS
+	uint8_t ul_cnt_idx;
+	uint8_t dl_cnt_idx;
+};
+
+struct wan_ioctl_per_client_info {
+	/* MAC Address of the client. */
+	uint8_t mac[IPA_MAC_ADDR_SIZE];
+	/* Ipv4 UL traffic bytes. */
+	uint64_t ipv4_tx_bytes;
+	/* Ipv4 DL traffic bytes. */
+	uint64_t ipv4_rx_bytes;
+	/* Ipv6 UL traffic bytes. */
+	uint64_t ipv6_tx_bytes;
+	/* Ipv6 DL traffic bytes. */
+	uint64_t ipv6_rx_bytes;
+};
+
+struct wan_ioctl_query_per_client_stats {
+	/* Device type of the client. */
+	enum ipacm_per_client_device_type device_type;
+	/* Indicate whether to reset the stats (use 1) or not */
+	uint8_t reset_stats;
+	/* Indicates whether client is disconnected. */
+	uint8_t disconnect_clnt;
+	/* Number of clients. */
+	uint8_t num_clients;
+	/* Client information. */
+	struct wan_ioctl_per_client_info
+		client_info[IPA_MAX_NUM_HW_PATH_CLIENTS];
+};
+
+#define WAN_IOC_ADD_FLT_RULE _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ADD_FLT_RULE, \
+		struct ipa_install_fltr_rule_req_msg_v01 *)
+
+#define WAN_IOC_ADD_FLT_RULE_INDEX _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ADD_FLT_INDEX, \
+		struct ipa_fltr_installed_notif_req_msg_v01 *)
+
+#define WAN_IOC_VOTE_FOR_BW_MBPS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_VOTE_FOR_BW_MBPS, \
+		uint32_t *)
+
+#define WAN_IOC_POLL_TETHERING_STATS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_POLL_TETHERING_STATS, \
+		struct wan_ioctl_poll_tethering_stats *)
+
+#define WAN_IOC_SET_DATA_QUOTA _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_SET_DATA_QUOTA, \
+		struct wan_ioctl_set_data_quota *)
+
+#define WAN_IOC_SET_TETHER_CLIENT_PIPE _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_SET_TETHER_CLIENT_PIPE, \
+		struct wan_ioctl_set_tether_client_pipe *)
+
+#define WAN_IOC_QUERY_TETHER_STATS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_QUERY_TETHER_STATS, \
+		struct wan_ioctl_query_tether_stats *)
+
+#define WAN_IOC_RESET_TETHER_STATS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_RESET_TETHER_STATS, \
+		struct wan_ioctl_reset_tether_stats *)
+
+#define WAN_IOC_QUERY_DL_FILTER_STATS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_QUERY_DL_FILTER_STATS, \
+		struct wan_ioctl_query_dl_filter_stats *)
+
+#define WAN_IOC_ADD_FLT_RULE_EX _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ADD_FLT_RULE_EX, \
+		struct ipa_install_fltr_rule_req_ex_msg_v01 *)
+
+#define WAN_IOC_QUERY_TETHER_STATS_ALL _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_QUERY_TETHER_STATS_ALL, \
+		struct wan_ioctl_query_tether_stats_all *)
+
+#define WAN_IOC_NOTIFY_WAN_STATE _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_NOTIFY_WAN_STATE, \
+		struct wan_ioctl_notify_wan_state *)
+
+#define WAN_IOC_ADD_UL_FLT_RULE _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ADD_UL_FLT_RULE, \
+		struct ipa_configure_ul_firewall_rules_req_msg_v01 *)
+
+#define WAN_IOC_ENABLE_PER_CLIENT_STATS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ENABLE_PER_CLIENT_STATS, \
+		bool *)
+
+#define WAN_IOC_QUERY_PER_CLIENT_STATS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_QUERY_PER_CLIENT_STATS, \
+		struct wan_ioctl_query_per_client_stats *)
+
+#define WAN_IOC_SET_LAN_CLIENT_INFO _IOWR(WAN_IOC_MAGIC, \
+			WAN_IOCTL_SET_LAN_CLIENT_INFO, \
+			struct wan_ioctl_lan_client_info *)
+
+#define WAN_IOC_SEND_LAN_CLIENT_MSG _IOWR(WAN_IOC_MAGIC, \
+				WAN_IOCTL_SEND_LAN_CLIENT_MSG, \
+				struct wan_ioctl_send_lan_client_msg *)
+
+#define WAN_IOC_CLEAR_LAN_CLIENT_INFO _IOWR(WAN_IOC_MAGIC, \
+			WAN_IOCTL_CLEAR_LAN_CLIENT_INFO, \
+			struct wan_ioctl_lan_client_info *)
+
+#define WAN_IOC_ADD_OFFLOAD_CONNECTION _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ADD_OFFLOAD_CONNECTION, \
+		struct ipa_add_offload_connection_req_msg_v01 *)
+
+#define WAN_IOC_RMV_OFFLOAD_CONNECTION _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_RMV_OFFLOAD_CONNECTION, \
+		struct ipa_remove_offload_connection_req_msg_v01 *)
+
+#define WAN_IOC_GET_WAN_MTU _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_GET_WAN_MTU, \
+		struct ipa_mtu_info *)
+
+#define WAN_IOC_SET_DATA_QUOTA_WARNING _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_SET_DATA_QUOTA_WARNING, \
+		struct wan_ioctl_set_data_quota_warning)
+
+#define WAN_IOC_NOTIFY_NAT_MOVE_RES _IOWR(WAN_IOC_MAGIC, \
+	WAN_IOCTL_NOTIFY_NAT_MOVE_RES, \
+	bool)
+#endif /* _RMNET_IPA_FD_IOCTL_H */

+ 12 - 2
drivers/platform/msm/ipa/Kbuild

@@ -33,15 +33,25 @@ ipam-y += \
 	ipa_v3/ipahal/ipahal_hw_stats.o \
 	ipa_v3/ipahal/ipahal_nat.o \
 	ipa_v3/ipa_eth_i.o \
-	ipa_v3/ipa_stats.o \
+	ipa_v3/ipa_stats.o
 
 ipam-$(CONFIG_IPA_TSP) += ipa_v3/ipa_tsp.o \
-	ipa_v3/ipahal/ipahal_tsp.o \
+	ipa_v3/ipahal/ipahal_tsp.o
 
 ipam-$(CONFIG_RMNET_IPA3) += ipa_v3/rmnet_ipa.o ipa_v3/ipa_qmi_service_v01.o \
 	ipa_v3/ipa_qmi_service.o ipa_v3/rmnet_ctl_ipa.o \
 	ipa_v3/rmnet_ipa_fd_ioctl.o ipa_v3/rmnet_ll_ipa.o
 
+ipam-$(CONFIG_IPA_CLIENTS_MANAGER) += ipa_clients/ipa_usb.o \
+	ipa_clients/ipa_wdi3.o \
+	ipa_clients/ipa_gsb.o \
+	ipa_clients/ipa_uc_offload.o \
+	ipa_clients/ipa_wigig.o \
+	ipa_clients/ipa_mhi_client.o \
+	ipa_clients/ipa_eth.o
+
+ipam-$(CONFIG_RNDIS_IPA) += ipa_clients/rndis_ipa.o
+
 ipam-$(CONFIG_IPA3_MHI_PRIME_MANAGER) += ipa_v3/ipa_mpm.o
 
 ipam-$(CONFIG_IPA3_MHI_PROXY) += ipa_v3/ipa_mhi_proxy.o

+ 0 - 5
drivers/platform/msm/ipa/ipa_clients/Kbuild

@@ -1,10 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
-obj-$(CONFIG_RNDIS_IPA) += rndisipam.o
-rndisipam-objs := rndis_ipa.o
-
 obj-$(CONFIG_ECM_IPA) += ecmipam.o
 ecmipam-objs := ecm_ipa.o
 
-obj-$(CONFIG_IPA_CLIENTS_MANAGER) += ipa_clientsm.o
-ipa_clientsm-objs := ipa_clients_manager.o ipa_usb.o ipa_wdi3.o ipa_gsb.o ipa_uc_offload.o ipa_wigig.o ipa_mhi_client.o ipa_eth.o

+ 5 - 5
drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c

@@ -1061,7 +1061,7 @@ static int ecm_ipa_rules_cfg(struct ecm_ipa_dev *ecm_ipa_ctx,
 
 	hdrs->commit = 1;
 	hdrs->num_hdrs = 2;
-	result = ipa3_add_hdr(hdrs);
+	result = ipa_add_hdr(hdrs);
 	if (result) {
 		ECM_IPA_ERROR("Fail on Header-Insertion(%d)\n", result);
 		goto out_free_mem;
@@ -1114,9 +1114,9 @@ static void ecm_ipa_rules_destroy(struct ecm_ipa_dev *ecm_ipa_ctx)
 	ipv6 = &del_hdr->hdl[1];
 	ipv6->hdl = ecm_ipa_ctx->eth_ipv6_hdr_hdl;
 
-	result = ipa3_del_hdr(del_hdr);
+	result = ipa_del_hdr(del_hdr);
 	if (result || ipv4->status || ipv6->status)
-		ECM_IPA_ERROR("ipa3_del_hdr failed\n");
+		ECM_IPA_ERROR("ipa_del_hdr failed\n");
 	kfree(del_hdr);
 }
 
@@ -1177,7 +1177,7 @@ static int ecm_ipa_register_properties(struct ecm_ipa_dev *ecm_ipa_ctx)
 	rx_ipv6_property->hdr_l2_type = hdr_l2_type;
 	rx_properties.num_props = 2;
 
-	result = ipa3_register_intf("ecm0", &tx_properties, &rx_properties);
+	result = ipa_register_intf("ecm0", &tx_properties, &rx_properties);
 	if (result)
 		ECM_IPA_ERROR("fail on Tx/Rx properties registration\n");
 
@@ -1191,7 +1191,7 @@ static void ecm_ipa_deregister_properties(void)
 	int result;
 
 	ECM_IPA_LOG_ENTRY();
-	result = ipa3_deregister_intf("ecm0");
+	result = ipa_deregister_intf("ecm0");
 	if (result)
 		ECM_IPA_DEBUG("Fail on Tx prop deregister\n");
 	ECM_IPA_LOG_EXIT();

+ 1 - 1
drivers/platform/msm/ipa/ipa_clients/ecm_ipa.h

@@ -6,7 +6,7 @@
 #ifndef _ECM_IPA_H_
 #define _ECM_IPA_H_
 
-#include <linux/ipa.h>
+#include "ipa.h"
 
 /*
  * @priv: private data given upon ipa_connect

+ 0 - 26
drivers/platform/msm/ipa/ipa_clients/ipa_clients_i.h

@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
-* Copyright (c) 2020, The Linux Foundation. All rights reserved.
-*/
-
-#if !defined(_IPA_CLIENTS_I_H)
-#define _IPA_CLIENTS_I_H
-
-int ipa3_usb_init(void);
-void ipa3_usb_exit(void);
-
-void ipa_wdi3_register(void);
-
-void ipa_gsb_register(void);
-
-void ipa_odu_bridge_register(void);
-
-void ipa_uc_offload_register(void);
-
-void ipa_mhi_register(void);
-
-void ipa_wigig_register(void);
-
-void ipa_eth_register(void);
-
-#endif /* _IPA_CLIENTS_I_H */

+ 0 - 48
drivers/platform/msm/ipa/ipa_clients/ipa_clients_manager.c

@@ -1,48 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include "ipa_clients_i.h"
-#include "ipa_i.h"
-
-static int __init ipa_clients_manager_init(void)
-{
-	pr_info("IPA clients manager init\n");
-
-	ipa3_usb_init();
-
-	ipa_wdi3_register();
-
-	ipa_gsb_register();
-
-	ipa_uc_offload_register();
-
-	ipa_mhi_register();
-
-	ipa_wigig_register();
-
-	ipa_eth_register();
-
-	ipa3_notify_clients_registered();
-
-	ipa3_qdss_register();
-
-	return 0;
-}
-subsys_initcall(ipa_clients_manager_init);
-
-static void __exit ipa_clients_manager_exit(void)
-{
-	pr_debug("IPA clients manger exit\n");
-
-	ipa3_usb_exit();
-}
-module_exit(ipa_clients_manager_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("IPA HW clients manager");

+ 26 - 41
drivers/platform/msm/ipa/ipa_clients/ipa_eth.c

@@ -7,7 +7,7 @@
 #include "../ipa_common_i.h"
 #include "../ipa_v3/ipa_pm.h"
 #include "../ipa_v3/ipa_i.h"
-#include <linux/ipa_eth.h>
+#include "ipa_eth.h"
 
 #define OFFLOAD_DRV_NAME "ipa_eth"
 #define IPA_ETH_DBG(fmt, args...) \
@@ -213,7 +213,7 @@ static void ipa_eth_ready_notify_work(struct work_struct *work)
 	mutex_unlock(&ipa_eth_ctx->lock);
 }
 
-static int ipa_eth_register_ready_cb_internal(struct ipa_eth_ready *ready_info)
+int ipa_eth_register_ready_cb(struct ipa_eth_ready *ready_info)
 {
 	int rc;
 	struct ipa_eth_ready_cb_wrapper *ready_cb;
@@ -278,8 +278,9 @@ err_uc:
 	ipa_eth_cleanup_internal();
 	return rc;
 }
+EXPORT_SYMBOL(ipa_eth_register_ready_cb);
 
-static int ipa_eth_unregister_ready_cb_internal(struct ipa_eth_ready *ready_info)
+int ipa_eth_unregister_ready_cb(struct ipa_eth_ready *ready_info)
 {
 	struct ipa_eth_ready_cb_wrapper *entry;
 	bool find_ready_info = false;
@@ -322,6 +323,7 @@ static int ipa_eth_unregister_ready_cb_internal(struct ipa_eth_ready *ready_info
 	mutex_unlock(&ipa_eth_ctx->lock);
 	return 0;
 }
+EXPORT_SYMBOL(ipa_eth_unregister_ready_cb);
 
 static u32 ipa_eth_pipe_hdl_alloc(void *ptr)
 {
@@ -550,7 +552,7 @@ static int ipa_eth_commit_partial_hdr(
 		hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset;
 	}
 
-	if (ipa3_add_hdr(hdr)) {
+	if (ipa_add_hdr(hdr)) {
 		IPA_ETH_ERR("fail to add partial headers\n");
 		return -EFAULT;
 	}
@@ -656,7 +658,7 @@ static int ipa_eth_pm_deregister(struct ipa_eth_client *client)
 	return 0;
 }
 
-static int ipa_eth_client_conn_pipes_internal(struct ipa_eth_client *client)
+int ipa_eth_client_conn_pipes(struct ipa_eth_client *client)
 {
 	struct ipa_eth_client_pipe_info *pipe;
 	int rc;
@@ -704,8 +706,9 @@ static int ipa_eth_client_conn_pipes_internal(struct ipa_eth_client *client)
 	mutex_unlock(&ipa_eth_ctx->lock);
 	return 0;
 }
+EXPORT_SYMBOL(ipa_eth_client_conn_pipes);
 
-static int ipa_eth_client_disconn_pipes_internal(struct ipa_eth_client *client)
+int ipa_eth_client_disconn_pipes(struct ipa_eth_client *client)
 {
 	int rc;
 	struct ipa_eth_client_pipe_info *pipe;
@@ -762,13 +765,14 @@ static int ipa_eth_client_disconn_pipes_internal(struct ipa_eth_client *client)
 	mutex_unlock(&ipa_eth_ctx->lock);
 	return 0;
 }
+EXPORT_SYMBOL(ipa_eth_client_disconn_pipes);
 
 static void ipa_eth_msg_free_cb(void *buff, u32 len, u32 type)
 {
 	kfree(buff);
 }
 
-static int ipa_eth_client_conn_evt_internal(struct ipa_ecm_msg *msg)
+int ipa_eth_client_conn_evt(struct ipa_ecm_msg *msg)
 {
 	struct ipa_msg_meta msg_meta;
 	struct ipa_ecm_msg *eth_msg;
@@ -791,8 +795,9 @@ static int ipa_eth_client_conn_evt_internal(struct ipa_ecm_msg *msg)
 
 	return ret;
 }
+EXPORT_SYMBOL(ipa_eth_client_conn_evt);
 
-static int ipa_eth_client_disconn_evt_internal(struct ipa_ecm_msg *msg)
+int ipa_eth_client_disconn_evt(struct ipa_ecm_msg *msg)
 {
 	struct ipa_msg_meta msg_meta;
 	struct ipa_ecm_msg *eth_msg;
@@ -815,8 +820,9 @@ static int ipa_eth_client_disconn_evt_internal(struct ipa_ecm_msg *msg)
 
 	return ret;
 }
+EXPORT_SYMBOL(ipa_eth_client_disconn_evt);
 
-static int ipa_eth_client_reg_intf_internal(struct ipa_eth_intf_info *intf)
+int ipa_eth_client_reg_intf(struct ipa_eth_intf_info *intf)
 {
 	struct ipa_eth_intf *new_intf;
 	struct ipa_eth_intf *entry;
@@ -1090,8 +1096,9 @@ fail_alloc_hdr:
 	mutex_unlock(&ipa_eth_ctx->lock);
 	return ret;
 }
+EXPORT_SYMBOL(ipa_eth_client_reg_intf);
 
-static int ipa_eth_client_unreg_intf_internal(struct ipa_eth_intf_info *intf)
+int ipa_eth_client_unreg_intf(struct ipa_eth_intf_info *intf)
 {
 	int len, ret = 0;
 	struct ipa_ioc_del_hdr *hdr = NULL;
@@ -1143,13 +1150,13 @@ static int ipa_eth_client_unreg_intf_internal(struct ipa_eth_intf_info *intf)
 			IPA_ETH_DBG("IPv4 hdr hdl: %d IPv6 hdr hdl: %d\n",
 				hdr->hdl[0].hdl, hdr->hdl[1].hdl);
 
-			if (ipa3_del_hdr(hdr)) {
+			if (ipa_del_hdr(hdr)) {
 				IPA_ETH_ERR("fail to delete partial header\n");
 				ret = -EFAULT;
 				goto fail;
 			}
 
-			if (ipa3_deregister_intf(entry->netdev_name)) {
+			if (ipa_deregister_intf(entry->netdev_name)) {
 				IPA_ETH_ERR("fail to del interface props\n");
 				ret = -EFAULT;
 				goto fail;
@@ -1173,8 +1180,9 @@ fail:
 	return ret;
 
 }
+EXPORT_SYMBOL(ipa_eth_client_unreg_intf);
 
-static int ipa_eth_client_set_perf_profile_internal(struct ipa_eth_client *client,
+int ipa_eth_client_set_perf_profile(struct ipa_eth_client *client,
 	struct ipa_eth_perf_profile *profile)
 {
 	int client_type, inst_id;
@@ -1196,8 +1204,9 @@ static int ipa_eth_client_set_perf_profile_internal(struct ipa_eth_client *clien
 
 	return 0;
 }
+EXPORT_SYMBOL(ipa_eth_client_set_perf_profile);
 
-enum ipa_client_type ipa_eth_get_ipa_client_type_from_eth_type_internal(
+enum ipa_client_type ipa_eth_get_ipa_client_type_from_eth_type(
 	enum ipa_eth_client_type eth_client_type, enum ipa_eth_pipe_direction dir)
 {
 	int ipa_client_type = IPA_CLIENT_MAX;
@@ -1242,36 +1251,12 @@ enum ipa_client_type ipa_eth_get_ipa_client_type_from_eth_type_internal(
 	}
 	return ipa_client_type;
 }
+EXPORT_SYMBOL(ipa_eth_get_ipa_client_type_from_eth_type);
 
-bool ipa_eth_client_exist_internal(enum ipa_eth_client_type eth_client_type, int inst_id)
+bool ipa_eth_client_exist(enum ipa_eth_client_type eth_client_type, int inst_id)
 {
 	if (ipa_eth_ctx)
 		return ipa_eth_ctx->client[eth_client_type][inst_id].existed;
 	else return false;
 }
-
-void ipa_eth_register(void)
-{
-	struct ipa_eth_data funcs;
-
-	funcs.ipa_eth_register_ready_cb = ipa_eth_register_ready_cb_internal;
-	funcs.ipa_eth_unregister_ready_cb =
-		ipa_eth_unregister_ready_cb_internal;
-	funcs.ipa_eth_client_conn_pipes = ipa_eth_client_conn_pipes_internal;
-	funcs.ipa_eth_client_disconn_pipes =
-		ipa_eth_client_disconn_pipes_internal;
-	funcs.ipa_eth_client_reg_intf = ipa_eth_client_reg_intf_internal;
-	funcs.ipa_eth_client_unreg_intf = ipa_eth_client_unreg_intf_internal;
-	funcs.ipa_eth_client_set_perf_profile =
-		ipa_eth_client_set_perf_profile_internal;
-#if IPA_ETH_API_VER < 2
-	funcs.ipa_eth_client_conn_evt = ipa_eth_client_conn_evt_internal;
-	funcs.ipa_eth_client_disconn_evt = ipa_eth_client_disconn_evt_internal;
-#endif
-	funcs.ipa_eth_get_ipa_client_type_from_eth_type =
-		ipa_eth_get_ipa_client_type_from_eth_type_internal;
-	funcs.ipa_eth_client_exist = ipa_eth_client_exist_internal;
-
-	if (ipa_fmwk_register_ipa_eth(&funcs))
-		pr_err("failed to register ipa_eth APIs\n");
-}
+EXPORT_SYMBOL(ipa_eth_client_exist);

+ 23 - 33
drivers/platform/msm/ipa/ipa_clients/ipa_gsb.c

@@ -16,13 +16,12 @@
 #include <linux/types.h>
 #include <linux/ipv6.h>
 #include <net/addrconf.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include <linux/cdev.h>
-#include <linux/ipa_odu_bridge.h>
+#include "ipa_odu_bridge.h"
 #include "ipa_common_i.h"
 #include "ipa_pm.h"
 #include "ipa_i.h"
-#include <linux/ipa_fmwk.h>
 
 #define IPA_GSB_DRV_NAME "ipa_gsb"
 
@@ -331,7 +330,7 @@ static int ipa_gsb_commit_partial_hdr(struct ipa_gsb_iface_info *iface_info)
 				IPA_GSB_SKB_DUMMY_HEADER) = htons(ETH_P_IPV6);
 	}
 
-	if (ipa3_add_hdr(hdr)) {
+	if (ipa_add_hdr(hdr)) {
 		IPA_GSB_ERR("fail to add partial headers\n");
 		kfree(hdr);
 		return -EFAULT;
@@ -364,7 +363,7 @@ static void ipa_gsb_delete_partial_hdr(struct ipa_gsb_iface_info *iface_info)
 	del_hdr->hdl[IPA_IP_v4].hdl = iface_info->partial_hdr_hdl[IPA_IP_v4];
 	del_hdr->hdl[IPA_IP_v6].hdl = iface_info->partial_hdr_hdl[IPA_IP_v6];
 
-	if (ipa3_del_hdr(del_hdr) != 0)
+	if (ipa_del_hdr(del_hdr) != 0)
 		IPA_GSB_ERR("failed to delete partial hdr\n");
 
 	IPA_GSB_DBG("deleted partial hdr hdl for ipv4: %d\n",
@@ -418,7 +417,7 @@ static int ipa_gsb_reg_intf_props(struct ipa_gsb_iface_info *iface_info)
 	rx_prop[1].attrib.meta_data = iface_info->iface_hdl;
 	rx_prop[1].attrib.meta_data_mask = 0xFF;
 
-	if (ipa3_register_intf(iface_info->netdev_name, &tx, &rx)) {
+	if (ipa_register_intf(iface_info->netdev_name, &tx, &rx)) {
 		IPA_GSB_ERR("fail to add interface prop\n");
 		return -EFAULT;
 	}
@@ -428,7 +427,7 @@ static int ipa_gsb_reg_intf_props(struct ipa_gsb_iface_info *iface_info)
 
 static void ipa_gsb_dereg_intf_props(struct ipa_gsb_iface_info *iface_info)
 {
-	if (ipa3_deregister_intf(iface_info->netdev_name) != 0)
+	if (ipa_deregister_intf(iface_info->netdev_name) != 0)
 		IPA_GSB_ERR("fail to dereg intf props\n");
 
 	IPA_GSB_DBG("deregistered iface props for %s\n",
@@ -487,7 +486,7 @@ fail_pm_reg:
 	return ret;
 }
 
-static int ipa_bridge_init_internal(struct ipa_bridge_init_params *params, u32 *hdl)
+int ipa_bridge_init(struct ipa_bridge_init_params *params, u32 *hdl)
 {
 	int i, ret;
 	struct ipa_gsb_iface_info *new_intf;
@@ -604,6 +603,7 @@ fail_alloc_mem:
 	mutex_unlock(&ipa_gsb_ctx->lock);
 	return ret;
 }
+EXPORT_SYMBOL(ipa_bridge_init);
 
 static void ipa_gsb_deregister_pm(void)
 {
@@ -613,7 +613,7 @@ static void ipa_gsb_deregister_pm(void)
 	ipa_gsb_ctx->pm_hdl = ~0;
 }
 
-static int ipa_bridge_cleanup_internal(u32 hdl)
+int ipa_bridge_cleanup(u32 hdl)
 {
 	int i;
 
@@ -668,6 +668,7 @@ static int ipa_bridge_cleanup_internal(u32 hdl)
 	mutex_unlock(&ipa_gsb_ctx->lock);
 	return 0;
 }
+EXPORT_SYMBOL(ipa_bridge_cleanup);
 
 static void ipa_gsb_cons_cb(void *priv, enum ipa_dp_evt_type evt,
 	unsigned long data)
@@ -845,7 +846,7 @@ fail_prod:
 	return res;
 }
 
-static int ipa_bridge_connect_internal(u32 hdl)
+int ipa_bridge_connect(u32 hdl)
 {
 	int ret;
 
@@ -906,6 +907,7 @@ static int ipa_bridge_connect_internal(u32 hdl)
 	mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
 	return 0;
 }
+EXPORT_SYMBOL(ipa_bridge_connect);
 
 static int ipa_gsb_disconnect_sys_pipe(void)
 {
@@ -931,7 +933,7 @@ static int ipa_gsb_disconnect_sys_pipe(void)
 	return 0;
 }
 
-static int ipa_bridge_disconnect_internal(u32 hdl)
+int ipa_bridge_disconnect(u32 hdl)
 {
 	int ret = 0;
 
@@ -998,8 +1000,9 @@ fail:
 	mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
 	return ret;
 }
+EXPORT_SYMBOL(ipa_bridge_disconnect);
 
-static int ipa_bridge_resume_internal(u32 hdl)
+int ipa_bridge_resume(u32 hdl)
 {
 	int ret;
 
@@ -1065,8 +1068,9 @@ static int ipa_bridge_resume_internal(u32 hdl)
 	mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
 	return 0;
 }
+EXPORT_SYMBOL(ipa_bridge_resume);
 
-static int ipa_bridge_suspend_internal(u32 hdl)
+int ipa_bridge_suspend(u32 hdl)
 {
 	int ret;
 
@@ -1107,7 +1111,7 @@ static int ipa_bridge_suspend_internal(u32 hdl)
 
 	mutex_lock(&ipa_gsb_ctx->lock);
 	if (ipa_gsb_ctx->num_resumed_iface == 1) {
-		ret = ipa3_stop_gsi_channel(
+		ret = ipa_stop_gsi_channel(
 			ipa_gsb_ctx->cons_hdl);
 		if (ret) {
 			IPA_GSB_ERR(
@@ -1139,8 +1143,9 @@ static int ipa_bridge_suspend_internal(u32 hdl)
 	mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
 	return 0;
 }
+EXPORT_SYMBOL(ipa_bridge_suspend);
 
-static int ipa_bridge_set_perf_profile_internal(u32 hdl, u32 bandwidth)
+int ipa_bridge_set_perf_profile(u32 hdl, u32 bandwidth)
 {
 	int ret;
 
@@ -1166,8 +1171,9 @@ static int ipa_bridge_set_perf_profile_internal(u32 hdl, u32 bandwidth)
 	mutex_unlock(&ipa_gsb_ctx->iface_lock[hdl]);
 	return ret;
 }
+EXPORT_SYMBOL(ipa_bridge_set_perf_profile);
 
-static int ipa_bridge_tx_dp_internal(u32 hdl, struct sk_buff *skb,
+int ipa_bridge_tx_dp(u32 hdl, struct sk_buff *skb,
 	struct ipa_tx_meta *metadata)
 {
 	struct ipa_gsb_mux_hdr *mux_hdr;
@@ -1228,23 +1234,7 @@ static int ipa_bridge_tx_dp_internal(u32 hdl, struct sk_buff *skb,
 
 	return 0;
 }
-
-void ipa_gsb_register(void)
-{
-	struct ipa_gsb_data funcs;
-
-	funcs.ipa_bridge_init = ipa_bridge_init_internal;
-	funcs.ipa_bridge_connect = ipa_bridge_connect_internal;
-	funcs.ipa_bridge_set_perf_profile = ipa_bridge_set_perf_profile_internal;
-	funcs.ipa_bridge_disconnect = ipa_bridge_disconnect_internal;
-	funcs.ipa_bridge_suspend = ipa_bridge_suspend_internal;
-	funcs.ipa_bridge_resume = ipa_bridge_resume_internal;
-	funcs.ipa_bridge_tx_dp = ipa_bridge_tx_dp_internal;
-	funcs.ipa_bridge_cleanup = ipa_bridge_cleanup_internal;
-
-	if (ipa_fmwk_register_gsb(&funcs))
-		pr_err("failed to register ipa_gsb APIs\n");
-}
+EXPORT_SYMBOL(ipa_bridge_tx_dp);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("ipa gsb driver");

+ 21 - 31
drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c

@@ -8,7 +8,7 @@
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include <linux/msm_gsi.h>
 #include <linux/ipa_mhi.h>
 #include "gsi.h"
@@ -16,7 +16,6 @@
 #include "ipa_pm.h"
 #include "ipa_i.h"
 #include "ipahal.h"
-#include <linux/ipa_fmwk.h>
 
 #define IPA_MHI_DRV_NAME "ipa_mhi_client"
 
@@ -654,7 +653,7 @@ static int ipa_mhi_set_state(enum ipa_mhi_state new_state)
  * Return codes: 0	  : success
  *		 negative : error
  */
-static int ipa_mhi_start_internal(struct ipa_mhi_start_params *params)
+int ipa_mhi_start(struct ipa_mhi_start_params *params)
 {
 	int res;
 	struct ipa_mhi_init_engine init_params;
@@ -735,6 +734,7 @@ fail_pm_activate:
 	ipa_mhi_set_state(IPA_MHI_STATE_INITIALIZED);
 	return res;
 }
+EXPORT_SYMBOL(ipa_mhi_start);
 
 /**
  * ipa_mhi_get_channel_context() - Get corresponding channel context
@@ -1080,7 +1080,7 @@ static int ipa_mhi_suspend_gsi_channel(struct ipa_mhi_channel_ctx *channel)
 	if (clnt_hdl < 0)
 		return -EFAULT;
 
-	res = ipa3_stop_gsi_channel(clnt_hdl);
+	res = ipa_stop_gsi_channel(clnt_hdl);
 	if (res != 0 && res != -GSI_STATUS_AGAIN &&
 	    res != -GSI_STATUS_TIMED_OUT) {
 		IPA_MHI_ERR("GSI stop channel failed %d\n", res);
@@ -1267,7 +1267,7 @@ static enum ipa_client_type ipa3_mhi_get_client_by_chid(u32 chid)
  * Return codes: 0	  : success
  *		 negative : error
  */
-static int ipa_mhi_connect_pipe_internal(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
+int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
 {
 	int res;
 	unsigned long flags;
@@ -1400,6 +1400,7 @@ fail_start_channel:
 	IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
 	return -EPERM;
 }
+EXPORT_SYMBOL(ipa_mhi_connect_pipe);
 
 /**
  * ipa_mhi_disconnect_pipe() - Disconnect pipe from IPA and reset corresponding
@@ -1415,7 +1416,7 @@ fail_start_channel:
  * Return codes: 0	  : success
  *		 negative : error
  */
-static int ipa_mhi_disconnect_pipe_internal(u32 clnt_hdl)
+int ipa_mhi_disconnect_pipe(u32 clnt_hdl)
 {
 	int res;
 	enum ipa_client_type client;
@@ -1474,6 +1475,7 @@ fail_reset_channel:
 	IPA_ACTIVE_CLIENTS_DEC_EP(client);
 	return res;
 }
+EXPORT_SYMBOL(ipa_mhi_disconnect_pipe);
 
 static int ipa_mhi_suspend_channels(struct ipa_mhi_channel_ctx *channels,
 	int max_channels)
@@ -1870,7 +1872,7 @@ fail_suspend_dl_channel:
  * Return codes: 0	  : success
  *		 negative : error
  */
-static int ipa_mhi_suspend_internal(bool force)
+int ipa_mhi_suspend(bool force)
 {
 	int res;
 	bool empty;
@@ -1956,6 +1958,7 @@ fail_suspend_dl_channel:
 	ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
 	return res;
 }
+EXPORT_SYMBOL(ipa_mhi_suspend);
 
 /**
  * ipa_mhi_resume() - Resume MHI accelerated channels
@@ -1971,7 +1974,7 @@ fail_suspend_dl_channel:
  * Return codes: 0	  : success
  *		 negative : error
  */
-static int ipa_mhi_resume_internal(void)
+int ipa_mhi_resume(void)
 {
 	int res;
 
@@ -2038,6 +2041,7 @@ fail_pm_activate:
 	ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
 	return res;
 }
+EXPORT_SYMBOL(ipa_mhi_resume);
 
 
 static int  ipa_mhi_destroy_channels(struct ipa_mhi_channel_ctx *channels,
@@ -2056,7 +2060,7 @@ static int  ipa_mhi_destroy_channels(struct ipa_mhi_channel_ctx *channels,
 		if (channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
 			clnt_hdl = ipa_get_ep_mapping(channel->client);
 			IPA_MHI_DBG("disconnect pipe (ep: %d)\n", clnt_hdl);
-			res = ipa_mhi_disconnect_pipe_internal(clnt_hdl);
+			res = ipa_mhi_disconnect_pipe(clnt_hdl);
 			if (res) {
 				IPA_MHI_ERR(
 					"failed to disconnect pipe %d, err %d\n"
@@ -2136,7 +2140,7 @@ static void ipa_mhi_deregister_pm(void)
  * MHI resources.
  * When this function returns ipa_mhi can re-initialize.
  */
-static void ipa_mhi_destroy_internal(void)
+void ipa_mhi_destroy(void)
 {
 	int res;
 
@@ -2170,6 +2174,7 @@ static void ipa_mhi_destroy_internal(void)
 fail:
 	ipa_assert();
 }
+EXPORT_SYMBOL(ipa_mhi_destroy);
 
 static void ipa_mhi_pm_cb(void *p, enum ipa_pm_cb_event event)
 {
@@ -2264,7 +2269,7 @@ fail_pm_cons:
  * Return codes: 0	  : success
  *		 negative : error
  */
-static int ipa_mhi_init_internal(struct ipa_mhi_init_params *params)
+int ipa_mhi_init(struct ipa_mhi_init_params *params)
 {
 	int res;
 
@@ -2358,6 +2363,7 @@ fail_create_wq:
 fail_alloc_ctx:
 	return res;
 }
+EXPORT_SYMBOL(ipa_mhi_init);
 
 /**
  * ipa_mhi_handle_ipa_config_req() - hanle IPA CONFIG QMI message
@@ -2369,12 +2375,13 @@ fail_alloc_ctx:
  * Return codes: 0	  : success
  *		 negative : error
  */
-static int ipa_mhi_handle_ipa_config_req_cb(struct ipa_config_req_msg_v01 *config_req)
+int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req)
 {
 	IPA_MHI_FUNC_ENTRY();
 	IPA_MHI_FUNC_EXIT();
 	return 0;
 }
+EXPORT_SYMBOL(ipa_mhi_handle_ipa_config_req);
 
 int ipa_mhi_is_using_dma(bool *flag)
 {
@@ -2408,7 +2415,7 @@ int ipa_mhi_is_using_dma(bool *flag)
  * Return codes: 0	  : success
  *		 negative : error
  */
-int ipa_mhi_update_mstate_internal(enum ipa_mhi_mstate mstate_info)
+int ipa_mhi_update_mstate(enum ipa_mhi_mstate mstate_info)
 {
 	IPA_MHI_FUNC_ENTRY();
 
@@ -2425,24 +2432,7 @@ int ipa_mhi_update_mstate_internal(enum ipa_mhi_mstate mstate_info)
 	IPA_MHI_FUNC_EXIT();
 	return 0;
 }
-
-void ipa_mhi_register(void)
-{
-	struct ipa_mhi_data funcs;
-
-	funcs.ipa_mhi_init = ipa_mhi_init_internal;
-	funcs.ipa_mhi_start = ipa_mhi_start_internal;
-	funcs.ipa_mhi_connect_pipe = ipa_mhi_connect_pipe_internal;
-	funcs.ipa_mhi_disconnect_pipe = ipa_mhi_disconnect_pipe_internal;
-	funcs.ipa_mhi_suspend = ipa_mhi_suspend_internal;
-	funcs.ipa_mhi_resume = ipa_mhi_resume_internal;
-	funcs.ipa_mhi_destroy = ipa_mhi_destroy_internal;
-	funcs.ipa_mhi_handle_ipa_config_req = ipa_mhi_handle_ipa_config_req_cb;
-	funcs.ipa_mhi_update_mstate = ipa_mhi_update_mstate_internal;
-	if (ipa_fmwk_register_ipa_mhi(&funcs))
-		pr_err("failed to register ipa_mhi APIs\n");
-}
-EXPORT_SYMBOL(ipa_mhi_register);
+EXPORT_SYMBOL(ipa_mhi_update_mstate);
 
 
 MODULE_LICENSE("GPL v2");

+ 19 - 29
drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c

@@ -3,12 +3,11 @@
  * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/ipa_uc_offload.h>
+#include "ipa_uc_offload.h"
 #include <linux/msm_ipa.h>
 #include <linux/if_vlan.h>
 #include "ipa_common_i.h"
 #include "ipa_pm.h"
-#include <linux/ipa_fmwk.h>
 
 #define IPA_NTN_DMA_POOL_ALIGNMENT 8
 #define OFFLOAD_DRV_NAME "ipa_uc_offload"
@@ -100,7 +99,7 @@ static int ipa_commit_partial_hdr(
 		hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset;
 	}
 
-	if (ipa3_add_hdr(hdr)) {
+	if (ipa_add_hdr(hdr)) {
 		IPA_UC_OFFLOAD_ERR("fail to add partial headers\n");
 		return -EFAULT;
 	}
@@ -273,7 +272,7 @@ static int ipa_uc_offload_ntn_reg_intf(
 		rx_prop[1].attrib.meta_data_mask = inp->meta_data_mask;
 	}
 
-	if (ipa3_register_intf(inp->netdev_name, &tx, &rx)) {
+	if (ipa_register_intf(inp->netdev_name, &tx, &rx)) {
 		IPA_UC_OFFLOAD_ERR("fail to add interface prop\n");
 		memset(ntn_ctx, 0, sizeof(*ntn_ctx));
 		ret = -EFAULT;
@@ -295,7 +294,7 @@ fail_alloc:
 	return ret;
 }
 
-static int ipa_uc_offload_reg_intf_internal(
+int ipa_uc_offload_reg_intf(
 	struct ipa_uc_offload_intf_params *inp,
 	struct ipa_uc_offload_out_params *outp)
 {
@@ -354,6 +353,7 @@ static int ipa_uc_offload_reg_intf_internal(
 
 	return ret;
 }
+EXPORT_SYMBOL(ipa_uc_offload_reg_intf);
 
 
 static int ipa_uc_ntn_alloc_conn_smmu_info(struct ipa_ntn_setup_info *dest,
@@ -465,7 +465,7 @@ fail:
 	return result;
 }
 
-static int ipa_uc_offload_conn_pipes_internal(struct ipa_uc_offload_conn_in_params *inp,
+int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *inp,
 			struct ipa_uc_offload_conn_out_params *outp)
 {
 	int ret = 0;
@@ -509,6 +509,7 @@ static int ipa_uc_offload_conn_pipes_internal(struct ipa_uc_offload_conn_in_para
 
 	return ret;
 }
+EXPORT_SYMBOL(ipa_uc_offload_conn_pipes);
 
 static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx)
 {
@@ -550,7 +551,7 @@ static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx)
 	return ret;
 }
 
-static int ipa_uc_offload_disconn_pipes_internal(u32 clnt_hdl)
+int ipa_uc_offload_disconn_pipes(u32 clnt_hdl)
 {
 	struct ipa_uc_offload_ctx *offload_ctx;
 	int ret = 0;
@@ -586,6 +587,7 @@ static int ipa_uc_offload_disconn_pipes_internal(u32 clnt_hdl)
 
 	return ret;
 }
+EXPORT_SYMBOL(ipa_uc_offload_disconn_pipes);
 
 static int ipa_uc_ntn_cleanup(struct ipa_uc_offload_ctx *ntn_ctx)
 {
@@ -604,13 +606,13 @@ static int ipa_uc_ntn_cleanup(struct ipa_uc_offload_ctx *ntn_ctx)
 	hdr->hdl[0].hdl = ntn_ctx->partial_hdr_hdl[0];
 	hdr->hdl[1].hdl = ntn_ctx->partial_hdr_hdl[1];
 
-	if (ipa3_del_hdr(hdr)) {
+	if (ipa_del_hdr(hdr)) {
 		IPA_UC_OFFLOAD_ERR("fail to delete partial header\n");
 		result = -EFAULT;
 		goto fail;
 	}
 
-	if (ipa3_deregister_intf(ntn_ctx->netdev_name)) {
+	if (ipa_deregister_intf(ntn_ctx->netdev_name)) {
 		IPA_UC_OFFLOAD_ERR("fail to delete interface prop\n");
 		result = -EFAULT;
 		goto fail;
@@ -621,7 +623,7 @@ fail:
 	return result;
 }
 
-static int ipa_uc_offload_cleanup_internal(u32 clnt_hdl)
+int ipa_uc_offload_cleanup(u32 clnt_hdl)
 {
 	struct ipa_uc_offload_ctx *offload_ctx;
 	int ret = 0;
@@ -667,6 +669,7 @@ static int ipa_uc_offload_cleanup_internal(u32 clnt_hdl)
 
 	return ret;
 }
+EXPORT_SYMBOL(ipa_uc_offload_cleanup);
 
 /**
  * ipa_uc_offload_uc_rdyCB() - To register uC ready CB if uC not
@@ -677,7 +680,7 @@ static int ipa_uc_offload_cleanup_internal(u32 clnt_hdl)
  * Returns:	0 on success, negative on failure
  *
  */
-int ipa_uc_offload_reg_rdyCB_internal(struct ipa_uc_ready_params *inp)
+int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *inp)
 {
 	int ret = 0;
 
@@ -697,14 +700,16 @@ int ipa_uc_offload_reg_rdyCB_internal(struct ipa_uc_ready_params *inp)
 
 	return ret;
 }
+EXPORT_SYMBOL(ipa_uc_offload_reg_rdyCB);
 
-void ipa_uc_offload_dereg_rdyCB_internal(enum ipa_uc_offload_proto proto)
+void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto)
 {
 	if (proto == IPA_UC_NTN || proto == IPA_UC_NTN_V2X)
 		ipa3_ntn_uc_dereg_rdyCB();
 }
+EXPORT_SYMBOL(ipa_uc_offload_dereg_rdyCB);
 
-int ipa_set_perf_profile_internal(struct ipa_perf_profile *profile)
+int ipa_set_perf_profile(struct ipa_perf_profile *profile)
 {
 	if (!profile) {
 		IPA_UC_OFFLOAD_ERR("Invalid input\n");
@@ -724,20 +729,5 @@ int ipa_set_perf_profile_internal(struct ipa_perf_profile *profile)
 		ipa_uc_offload_ctx[IPA_UC_NTN]->pm_hdl,
 		profile->max_supported_bw_mbps);
 }
+EXPORT_SYMBOL(ipa_set_perf_profile);
 
-void ipa_uc_offload_register(void)
-{
-	struct ipa_uc_offload_data funcs;
-
-	funcs.ipa_uc_offload_reg_intf = ipa_uc_offload_reg_intf_internal;
-	funcs.ipa_uc_offload_cleanup = ipa_uc_offload_cleanup_internal;
-	funcs.ipa_uc_offload_conn_pipes = ipa_uc_offload_conn_pipes_internal;
-	funcs.ipa_uc_offload_disconn_pipes =
-		ipa_uc_offload_disconn_pipes_internal;
-	funcs.ipa_set_perf_profile = ipa_set_perf_profile_internal;
-	funcs.ipa_uc_offload_reg_rdyCB = ipa_uc_offload_reg_rdyCB_internal;
-	funcs.ipa_uc_offload_dereg_rdyCB = ipa_uc_offload_dereg_rdyCB_internal;
-
-	if (ipa_fmwk_register_uc_offload(&funcs))
-		pr_err("failed to register uc_offload APIs\n");
-}

+ 40 - 32
drivers/platform/msm/ipa/ipa_clients/ipa_usb.c

@@ -7,9 +7,8 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/debugfs.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include <linux/ipa_usb.h>
-#include <linux/ipa_fmwk.h>
 #include "rndis_ipa.h"
 #include "ecm_ipa.h"
 #include "ipa_i.h"
@@ -758,7 +757,7 @@ static int ipa_usb_set_lock_unlock(bool is_lock)
 	return 0;
 }
 
-static int ipa_usb_init_teth_prot_internal(enum ipa_usb_teth_prot teth_prot,
+int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
 			   struct ipa_usb_teth_params *teth_params,
 			   int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
 			   void *),
@@ -955,6 +954,7 @@ bad_params:
 	mutex_unlock(&ipa3_usb_ctx->general_mutex);
 	return result;
 }
+EXPORT_SYMBOL(ipa_usb_init_teth_prot);
 
 static void ipa3_usb_gsi_evt_err_cb(struct gsi_evt_err_notify *notify)
 {
@@ -993,8 +993,7 @@ static bool ipa3_usb_check_chan_params(struct ipa_usb_xdci_chan_params *params)
 	IPA_USB_DBG_LOW("depcmd_hi_addr = %x\n",
 		params->xfer_scratch.depcmd_hi_addr);
 
-	if (params->client >= IPA_CLIENT_MAX  ||
-		params->teth_prot < 0 ||
+	if (params->teth_prot < 0 ||
 		params->teth_prot >= IPA_USB_MAX_TETH_PROT_SIZE ||
 		params->xfer_ring_len % GSI_CHAN_RE_SIZE_16B ||
 		params->xfer_scratch.const_buffer_size < 1 ||
@@ -1004,10 +1003,6 @@ static bool ipa3_usb_check_chan_params(struct ipa_usb_xdci_chan_params *params)
 	}
 	switch (params->teth_prot) {
 	case IPA_USB_DIAG:
-		if (!IPA_CLIENT_IS_CONS(params->client)) {
-			IPA_USB_ERR("DPL supports only DL channel\n");
-			return false;
-		}
 	case IPA_USB_RNDIS:
 	case IPA_USB_ECM:
 		if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state ==
@@ -1143,9 +1138,12 @@ static int ipa3_usb_request_xdci_channel(
 		&ipa3_usb_ctx->teth_prot_ctx[teth_prot].teth_prot_params.ecm;
 
 	memset(&chan_params, 0, sizeof(struct ipa_request_gsi_channel_params));
-	memcpy(&chan_params.ipa_ep_cfg, &params->ipa_ep_cfg,
-		sizeof(struct ipa_ep_cfg));
-	chan_params.client = params->client;
+	chan_params.ipa_ep_cfg.mode.mode = IPA_BASIC;
+	if (params->dir == GSI_CHAN_DIR_TO_GSI)
+		chan_params.client = IPA_CLIENT_USB_PROD;
+	else
+		chan_params.client = (params->teth_prot == IPA_USB_DIAG) ?
+			IPA_CLIENT_USB_DPL_CONS : IPA_CLIENT_USB_CONS;
 	switch (params->teth_prot) {
 	case IPA_USB_RNDIS:
 		chan_params.priv = rndis_ptr->private;
@@ -1239,7 +1237,7 @@ static int ipa3_usb_request_xdci_channel(
 	chan_params.evt_scratch.xdci.gevntcount_hi_addr =
 		params->gevntcount_hi_addr;
 	chan_params.chan_params.prot = GSI_CHAN_PROT_XDCI;
-	chan_params.chan_params.dir = params->dir;
+	chan_params.chan_params.dir = (enum gsi_chan_dir)(params->dir);
 	/* chan_id is set in ipa3_request_gsi_channel() */
 	chan_params.chan_params.re_size = GSI_CHAN_RE_SIZE_16B;
 	chan_params.chan_params.ring_len = params->xfer_ring_len;
@@ -1911,7 +1909,7 @@ static void ipa_usb_debugfs_init(void){}
 static void ipa_usb_debugfs_remove(void){}
 #endif /* CONFIG_DEBUG_FS */
 
-static int ipa_usb_xdci_connect_internal(struct ipa_usb_xdci_chan_params *ul_chan_params,
+int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
 			 struct ipa_usb_xdci_chan_params *dl_chan_params,
 			 struct ipa_req_chan_out_params *ul_out_params,
 			 struct ipa_req_chan_out_params *dl_out_params,
@@ -1993,6 +1991,7 @@ bad_params:
 	mutex_unlock(&ipa3_usb_ctx->general_mutex);
 	return result;
 }
+EXPORT_SYMBOL(ipa_usb_xdci_connect);
 
 static int ipa3_usb_check_disconnect_prot(enum ipa_usb_teth_prot teth_prot)
 {
@@ -2078,7 +2077,7 @@ static int ipa_usb_xdci_dismiss_channels(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 	return 0;
 }
 
-static int ipa_usb_xdci_disconnect_internal(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 			    enum ipa_usb_teth_prot teth_prot)
 {
 	int result = 0;
@@ -2188,8 +2187,9 @@ bad_params:
 	return result;
 
 }
+EXPORT_SYMBOL(ipa_usb_xdci_disconnect);
 
-static int ipa_usb_deinit_teth_prot_internal(enum ipa_usb_teth_prot teth_prot)
+int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
 {
 	int result = -EFAULT;
 	enum ipa3_usb_transport_type ttype;
@@ -2309,6 +2309,7 @@ bad_params:
 	mutex_unlock(&ipa3_usb_ctx->general_mutex);
 	return result;
 }
+EXPORT_SYMBOL(ipa_usb_deinit_teth_prot);
 
 /* Assumes lock already acquired */
 static int ipa3_usb_suspend_no_remote_wakeup(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
@@ -2394,7 +2395,7 @@ fail_exit:
 	return result;
 }
 
-static int ipa_usb_xdci_suspend_internal(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 	enum ipa_usb_teth_prot teth_prot, bool with_remote_wakeup)
 {
 	int result = 0;
@@ -2473,6 +2474,7 @@ bad_params:
 	mutex_unlock(&ipa3_usb_ctx->general_mutex);
 	return result;
 }
+EXPORT_SYMBOL(ipa_usb_xdci_suspend);
 
 /* Assumes lock already acquired */
 static int ipa3_usb_resume_no_remote_wakeup(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
@@ -2552,7 +2554,7 @@ fail_exit:
 	return result;
 }
 
-static int ipa_usb_xdci_resume_internal(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 	enum ipa_usb_teth_prot teth_prot)
 {
 	int result = -EFAULT;
@@ -2639,13 +2641,13 @@ static int ipa_usb_xdci_resume_internal(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 	return 0;
 
 state_change_connected_fail:
-	result = ipa3_stop_gsi_channel(dl_clnt_hdl);
+	result = ipa_stop_gsi_channel(dl_clnt_hdl);
 	if (result)
 		IPA_USB_ERR("Error stopping DL/DPL channel: %d\n",
 			result);
 start_dl_fail:
 	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
-		result = ipa3_stop_gsi_channel(ul_clnt_hdl);
+		result = ipa_stop_gsi_channel(ul_clnt_hdl);
 		if (result)
 			IPA_USB_ERR("Error stopping UL channel: %d\n", result);
 	}
@@ -2661,14 +2663,26 @@ bad_params:
 	mutex_unlock(&ipa3_usb_ctx->general_mutex);
 	return result;
 }
+EXPORT_SYMBOL(ipa_usb_xdci_resume);
 
-static bool ipa_usb_is_teth_prot_connected_internal(enum ipa_usb_teth_prot usb_teth_prot)
+bool ipa_usb_is_teth_prot_connected(enum ipa_usb_teth_prot usb_teth_prot)
 {
 	if (ipa3_usb_ctx)
 		if (ipa3_usb_ctx->teth_prot_ctx[usb_teth_prot].state == IPA_USB_TETH_PROT_CONNECTED)
 			return true;
 	return false;
 }
+EXPORT_SYMBOL(ipa_usb_is_teth_prot_connected);
+
+static struct ipa_usb_ops usb_ops = {
+	ipa_usb_init_teth_prot,
+	ipa_usb_xdci_connect,
+	ipa_usb_xdci_disconnect,
+	ipa_usb_deinit_teth_prot,
+	ipa_usb_xdci_suspend,
+	ipa_usb_xdci_resume,
+	ipa_usb_is_teth_prot_connected,
+};
 
 int ipa3_usb_init(void)
 {
@@ -2676,7 +2690,6 @@ int ipa3_usb_init(void)
 	unsigned long flags;
 	int res;
 	struct ipa3_usb_pm_context *pm_ctx;
-	struct ipa_usb_data funcs;
 
 	pr_info("ipa_usb driver init\n");
 
@@ -2726,17 +2739,12 @@ int ipa3_usb_init(void)
 
 	ipa_usb_debugfs_init();
 
-	funcs.ipa_usb_init_teth_prot = ipa_usb_init_teth_prot_internal;
-	funcs.ipa_usb_xdci_connect = ipa_usb_xdci_connect_internal;
-	funcs.ipa_usb_xdci_disconnect = ipa_usb_xdci_disconnect_internal;
-	funcs.ipa_usb_deinit_teth_prot = ipa_usb_deinit_teth_prot_internal;
-	funcs.ipa_usb_xdci_suspend = ipa_usb_xdci_suspend_internal;
-	funcs.ipa_usb_xdci_resume = ipa_usb_xdci_resume_internal;
-	funcs.ipa_usb_is_teth_prot_connected =
-		ipa_usb_is_teth_prot_connected_internal;
-	if (ipa_fmwk_register_ipa_usb(&funcs)) {
-		pr_err("failed to register ipa_usb APIs\n");
+	res = ipa_register_ipa_ready_cb(ipa_ready_callback, (void *)&usb_ops);
+	if (res < 0) {
+		pr_err("failed to register USB ops CB\n");
+			goto ipa_usb_workqueue_fail;
 	}
+	pr_err("ILIA: ipa_ready_callback registered\n");
 
 	pr_info("exit: IPA_USB init success!\n");
 

+ 76 - 89
drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c

@@ -4,7 +4,7 @@
  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
-#include <linux/ipa_wdi3.h>
+#include "ipa_wdi3.h"
 #include <linux/msm_ipa.h>
 #include <linux/string.h>
 #include "ipa_common_i.h"
@@ -110,20 +110,22 @@ static int assign_hdl_for_inst(int inst_id)
 	return hdl;
 }
 
-static int ipa_get_wdi_version_internal(void)
+int ipa_get_wdi_version(void)
 {
 	if (ipa_wdi_ctx_list[0])
 		return ipa_wdi_ctx_list[0]->wdi_version;
 	/* default version is IPA_WDI_3 */
 	return IPA_WDI_3;
 }
+EXPORT_SYMBOL(ipa_get_wdi_version);
 
-static bool ipa_wdi_is_tx1_used_internal(void)
+bool ipa_wdi_is_tx1_used(void)
 {
 	if (ipa_wdi_ctx_list[0])
 		return ipa_wdi_ctx_list[0]->is_tx1_used;
 	return 0;
 }
+EXPORT_SYMBOL(ipa_wdi_is_tx1_used);
 
 static void ipa_wdi_pm_cb(void *p, enum ipa_pm_cb_event event)
 {
@@ -158,7 +160,7 @@ static int ipa_wdi_commit_partial_hdr(
 		hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset;
 	}
 
-	if (ipa3_add_hdr(hdr)) {
+	if (ipa_add_hdr(hdr)) {
 		IPA_WDI_ERR("fail to add partial headers\n");
 		return -EFAULT;
 	}
@@ -174,7 +176,7 @@ static int ipa_wdi_commit_partial_hdr(
  *
  * @Return 0 on success, negative on failure
  */
-static int ipa_wdi_get_capabilities_internal(
+int ipa_wdi_get_capabilities(
 	struct ipa_wdi_capabilities_out_params *out)
 {
 	if (out == NULL) {
@@ -186,6 +188,7 @@ static int ipa_wdi_get_capabilities_internal(
 	IPA_WDI_DBG("Wdi Capability: %d\n", out->num_of_instances);
 	return 0;
 }
+EXPORT_SYMBOL(ipa_wdi_get_capabilities);
 
 /**
  * function to init WDI IPA offload data path
@@ -195,7 +198,7 @@ static int ipa_wdi_get_capabilities_internal(
  *
  * @Return 0 on success, negative on failure
  */
-static int ipa_wdi_init_per_inst_internal(struct ipa_wdi_init_in_params *in,
+int ipa_wdi_init_per_inst(struct ipa_wdi_init_in_params *in,
 	struct ipa_wdi_init_out_params *out)
 {
 	struct ipa_wdi_uc_ready_params uc_ready_params;
@@ -248,7 +251,7 @@ static int ipa_wdi_init_per_inst_internal(struct ipa_wdi_init_in_params *in,
 	else
 		smmu_in.smmu_client = IPA_SMMU_WLAN1_CLIENT;
 
-	if (ipa3_get_smmu_params(&smmu_in, &smmu_out))
+	if (ipa_get_smmu_params(&smmu_in, &smmu_out))
 		out->is_smmu_enabled = false;
 	else
 		out->is_smmu_enabled = smmu_out.smmu_enable;
@@ -264,6 +267,7 @@ static int ipa_wdi_init_per_inst_internal(struct ipa_wdi_init_in_params *in,
 
 	return 0;
 }
+EXPORT_SYMBOL(ipa_wdi_init_per_inst);
 
 /**
  * function to register interface
@@ -272,7 +276,7 @@ static int ipa_wdi_init_per_inst_internal(struct ipa_wdi_init_in_params *in,
  *
  * @Return 0 on success, negative on failure
  */
-static int ipa_wdi_reg_intf_per_inst_internal(
+int ipa_wdi_reg_intf_per_inst(
 	struct ipa_wdi_reg_intf_in_params *in)
 {
 	struct ipa_ioc_add_hdr *hdr;
@@ -430,7 +434,7 @@ static int ipa_wdi_reg_intf_per_inst_internal(
 		rx_prop[1].attrib.meta_data = in->meta_data;
 		rx_prop[1].attrib.meta_data_mask = in->meta_data_mask;
 	}
-	if (ipa3_register_intf(in->netdev_name, &tx, &rx)) {
+	if (ipa_register_intf(in->netdev_name, &tx, &rx)) {
 		IPA_WDI_ERR("fail to add interface prop\n");
 		ret = -EFAULT;
 	}
@@ -449,6 +453,7 @@ fail_alloc_hdr:
 	mutex_unlock(&ipa_wdi_ctx_list[in->hdl]->lock);
 	return ret;
 }
+EXPORT_SYMBOL(ipa_wdi_reg_intf_per_inst);
 
 /**
  * function to connect pipes
@@ -460,7 +465,7 @@ fail_alloc_hdr:
  *
  * @Return 0 on success, negative on failure
  */
-static int ipa_wdi_conn_pipes_per_inst_internal(struct ipa_wdi_conn_in_params *in,
+int ipa_wdi_conn_pipes_per_inst(struct ipa_wdi_conn_in_params *in,
 	struct ipa_wdi_conn_out_params *out)
 {
 	int i, j, ret = 0;
@@ -574,7 +579,7 @@ static int ipa_wdi_conn_pipes_per_inst_internal(struct ipa_wdi_conn_in_params *i
 				in->u_rx.rx.is_txr_rn_db_pcie_addr;
 			in_rx.u.ul.is_evt_rn_db_pcie_addr =
 				in->u_rx.rx.is_evt_rn_db_pcie_addr;
-			if (ipa3_connect_wdi_pipe(&in_rx, &out_rx)) {
+			if (ipa_connect_wdi_pipe(&in_rx, &out_rx)) {
 				IPA_WDI_ERR("fail to setup rx pipe\n");
 				ret = -EFAULT;
 				goto fail_connect_pipe;
@@ -603,7 +608,7 @@ static int ipa_wdi_conn_pipes_per_inst_internal(struct ipa_wdi_conn_in_params *i
 				in->u_tx.tx.is_txr_rn_db_pcie_addr;
 			in_tx.u.dl.is_evt_rn_db_pcie_addr =
 				in->u_tx.tx.is_evt_rn_db_pcie_addr;
-			if (ipa3_connect_wdi_pipe(&in_tx, &out_tx)) {
+			if (ipa_connect_wdi_pipe(&in_tx, &out_tx)) {
 				IPA_WDI_ERR("fail to setup tx pipe\n");
 				ret = -EFAULT;
 				goto fail;
@@ -634,7 +639,7 @@ static int ipa_wdi_conn_pipes_per_inst_internal(struct ipa_wdi_conn_in_params *i
 				in->u_rx.rx_smmu.is_txr_rn_db_pcie_addr;
 			in_rx.u.ul_smmu.is_evt_rn_db_pcie_addr =
 				in->u_rx.rx_smmu.is_evt_rn_db_pcie_addr;
-			if (ipa3_connect_wdi_pipe(&in_rx, &out_rx)) {
+			if (ipa_connect_wdi_pipe(&in_rx, &out_rx)) {
 				IPA_WDI_ERR("fail to setup rx pipe\n");
 				ret = -EFAULT;
 				goto fail_connect_pipe;
@@ -663,7 +668,7 @@ static int ipa_wdi_conn_pipes_per_inst_internal(struct ipa_wdi_conn_in_params *i
 				in->u_tx.tx_smmu.is_txr_rn_db_pcie_addr;
 			in_tx.u.dl_smmu.is_evt_rn_db_pcie_addr =
 				in->u_tx.tx_smmu.is_evt_rn_db_pcie_addr;
-			if (ipa3_connect_wdi_pipe(&in_tx, &out_tx)) {
+			if (ipa_connect_wdi_pipe(&in_tx, &out_tx)) {
 				IPA_WDI_ERR("fail to setup tx pipe\n");
 				ret = -EFAULT;
 				goto fail;
@@ -684,7 +689,7 @@ static int ipa_wdi_conn_pipes_per_inst_internal(struct ipa_wdi_conn_in_params *i
 	return 0;
 
 fail:
-	ipa3_disconnect_wdi_pipe(ipa_wdi_ctx_list[in->hdl]->rx_pipe_hdl);
+	ipa_disconnect_wdi_pipe(ipa_wdi_ctx_list[in->hdl]->rx_pipe_hdl);
 fail_connect_pipe:
 	ipa_pm_deregister(ipa_wdi_ctx_list[in->hdl]->ipa_pm_hdl);
 
@@ -693,6 +698,7 @@ fail_setup_sys_pipe:
 		ipa_teardown_sys_pipe(ipa_wdi_ctx_list[in->hdl]->sys_pipe_hdl[j]);
 	return ret;
 }
+EXPORT_SYMBOL(ipa_wdi_conn_pipes_per_inst);
 
 /**
  * function to enable IPA offload data path
@@ -702,7 +708,7 @@ fail_setup_sys_pipe:
  *
  * Returns: 0 on success, negative on failure
  */
-static int ipa_wdi_enable_pipes_per_inst_internal(ipa_wdi_hdl_t hdl)
+int ipa_wdi_enable_pipes_per_inst(ipa_wdi_hdl_t hdl)
 {
 	int ret;
 	int ipa_ep_idx_tx, ipa_ep_idx_rx;
@@ -763,19 +769,19 @@ static int ipa_wdi_enable_pipes_per_inst_internal(ipa_wdi_hdl_t hdl)
 			IPA_WDI_ERR("pipe handle not valid\n");
 			return -EFAULT;
 		}
-		if (ipa3_enable_wdi_pipe(ipa_wdi_ctx_list[hdl]->tx_pipe_hdl)) {
+		if (ipa_enable_wdi_pipe(ipa_wdi_ctx_list[hdl]->tx_pipe_hdl)) {
 			IPA_WDI_ERR("fail to enable wdi tx pipe\n");
 			return -EFAULT;
 		}
-		if (ipa3_resume_wdi_pipe(ipa_wdi_ctx_list[hdl]->tx_pipe_hdl)) {
+		if (ipa_resume_wdi_pipe(ipa_wdi_ctx_list[hdl]->tx_pipe_hdl)) {
 			IPA_WDI_ERR("fail to resume wdi tx pipe\n");
 			return -EFAULT;
 		}
-		if (ipa3_enable_wdi_pipe(ipa_wdi_ctx_list[hdl]->rx_pipe_hdl)) {
+		if (ipa_enable_wdi_pipe(ipa_wdi_ctx_list[hdl]->rx_pipe_hdl)) {
 			IPA_WDI_ERR("fail to enable wdi rx pipe\n");
 			return -EFAULT;
 		}
-		if (ipa3_resume_wdi_pipe(ipa_wdi_ctx_list[hdl]->rx_pipe_hdl)) {
+		if (ipa_resume_wdi_pipe(ipa_wdi_ctx_list[hdl]->rx_pipe_hdl)) {
 			IPA_WDI_ERR("fail to resume wdi rx pipe\n");
 			return -EFAULT;
 		}
@@ -783,6 +789,7 @@ static int ipa_wdi_enable_pipes_per_inst_internal(ipa_wdi_hdl_t hdl)
 
 	return 0;
 }
+EXPORT_SYMBOL(ipa_wdi_enable_pipes_per_inst);
 
 /**
  * set IPA clock bandwidth based on data rates
@@ -792,7 +799,7 @@ static int ipa_wdi_enable_pipes_per_inst_internal(ipa_wdi_hdl_t hdl)
  *
  * Returns: 0 on success, negative on failure
  */
-static int ipa_wdi_set_perf_profile_per_inst_internal(ipa_wdi_hdl_t hdl,
+int ipa_wdi_set_perf_profile_per_inst(ipa_wdi_hdl_t hdl,
 	struct ipa_wdi_perf_profile *profile)
 {
 	int res = 0;
@@ -828,6 +835,7 @@ static int ipa_wdi_set_perf_profile_per_inst_internal(ipa_wdi_hdl_t hdl,
 
 	return res;
 }
+EXPORT_SYMBOL(ipa_wdi_set_perf_profile_per_inst);
 
 /**
  * function to create smmu mapping
@@ -836,7 +844,7 @@ static int ipa_wdi_set_perf_profile_per_inst_internal(ipa_wdi_hdl_t hdl,
  * @num_buffers: number of buffers
  * @info: wdi buffer info
  */
-static int ipa_wdi_create_smmu_mapping_per_inst_internal(ipa_wdi_hdl_t hdl,
+int ipa_wdi_create_smmu_mapping_per_inst(ipa_wdi_hdl_t hdl,
 	u32 num_buffers,
 	struct ipa_wdi_buffer_info *info)
 {
@@ -886,6 +894,7 @@ static int ipa_wdi_create_smmu_mapping_per_inst_internal(ipa_wdi_hdl_t hdl,
 
 	return ret;
 }
+EXPORT_SYMBOL(ipa_wdi_create_smmu_mapping_per_inst);
 
 
 /**
@@ -896,7 +905,7 @@ static int ipa_wdi_create_smmu_mapping_per_inst_internal(ipa_wdi_hdl_t hdl,
  *
  * @info: wdi buffer info
  */
-static int ipa_wdi_release_smmu_mapping_per_inst_internal(ipa_wdi_hdl_t hdl,
+int ipa_wdi_release_smmu_mapping_per_inst(ipa_wdi_hdl_t hdl,
 	u32 num_buffers,
 	struct ipa_wdi_buffer_info *info)
 {
@@ -935,6 +944,7 @@ static int ipa_wdi_release_smmu_mapping_per_inst_internal(ipa_wdi_hdl_t hdl,
 
 	return ret;
 }
+EXPORT_SYMBOL(ipa_wdi_release_smmu_mapping_per_inst);
 
 /**
  * clean up WDI IPA offload data path
@@ -943,7 +953,7 @@ static int ipa_wdi_release_smmu_mapping_per_inst_internal(ipa_wdi_hdl_t hdl,
  *
  * @Return 0 on success, negative on failure
  */
-static int ipa_wdi_cleanup_per_inst_internal(ipa_wdi_hdl_t hdl)
+int ipa_wdi_cleanup_per_inst(ipa_wdi_hdl_t hdl)
 {
 	struct ipa_wdi_intf_info *entry;
 	struct ipa_wdi_intf_info *next;
@@ -973,13 +983,14 @@ static int ipa_wdi_cleanup_per_inst_internal(ipa_wdi_hdl_t hdl)
 	ipa_wdi_ctx_list[hdl] = NULL;
 	return 0;
 }
+EXPORT_SYMBOL(ipa_wdi_cleanup_per_inst);
 
 /**
  * function to deregister before unload and after disconnect
  *
  * @Return 0 on success, negative on failure
  */
-static int ipa_wdi_dereg_intf_per_inst_internal(const char *netdev_name,ipa_wdi_hdl_t hdl)
+int ipa_wdi_dereg_intf_per_inst(const char *netdev_name,ipa_wdi_hdl_t hdl)
 {
 	int len, ret = 0;
 	struct ipa_ioc_del_hdr *hdr = NULL;
@@ -1029,13 +1040,13 @@ static int ipa_wdi_dereg_intf_per_inst_internal(const char *netdev_name,ipa_wdi_
 			IPA_WDI_DBG("IPv4 hdr hdl: %d IPv6 hdr hdl: %d\n",
 				hdr->hdl[0].hdl, hdr->hdl[1].hdl);
 
-			if (ipa3_del_hdr(hdr)) {
+			if (ipa_del_hdr(hdr)) {
 				IPA_WDI_ERR("fail to delete partial header\n");
 				ret = -EFAULT;
 				goto fail;
 			}
 
-			if (ipa3_deregister_intf(entry->netdev_name)) {
+			if (ipa_deregister_intf(entry->netdev_name)) {
 				IPA_WDI_ERR("fail to del interface props\n");
 				ret = -EFAULT;
 				goto fail;
@@ -1052,6 +1063,7 @@ fail:
 	mutex_unlock(&ipa_wdi_ctx_list[hdl]->lock);
 	return ret;
 }
+EXPORT_SYMBOL(ipa_wdi_dereg_intf_per_inst);
 
 /**
  * function to disconnect pipes
@@ -1061,7 +1073,7 @@ fail:
  *
  * Returns: 0 on success, negative on failure
  */
-static int ipa_wdi_disconn_pipes_per_inst_internal(ipa_wdi_hdl_t hdl)
+int ipa_wdi_disconn_pipes_per_inst(ipa_wdi_hdl_t hdl)
 {
 	int i, ipa_ep_idx_rx, ipa_ep_idx_tx;
 	int ipa_ep_idx_tx1 = IPA_EP_NOT_ALLOCATED;
@@ -1116,11 +1128,11 @@ static int ipa_wdi_disconn_pipes_per_inst_internal(ipa_wdi_hdl_t hdl)
 			return -EFAULT;
 		}
 	} else {
-		if (ipa3_disconnect_wdi_pipe(ipa_wdi_ctx_list[hdl]->tx_pipe_hdl)) {
+		if (ipa_disconnect_wdi_pipe(ipa_wdi_ctx_list[hdl]->tx_pipe_hdl)) {
 			IPA_WDI_ERR("fail to tear down wdi tx pipes\n");
 			return -EFAULT;
 		}
-		if (ipa3_disconnect_wdi_pipe(ipa_wdi_ctx_list[hdl]->rx_pipe_hdl)) {
+		if (ipa_disconnect_wdi_pipe(ipa_wdi_ctx_list[hdl]->rx_pipe_hdl)) {
 			IPA_WDI_ERR("fail to tear down wdi rx pipes\n");
 			return -EFAULT;
 		}
@@ -1133,6 +1145,7 @@ static int ipa_wdi_disconn_pipes_per_inst_internal(ipa_wdi_hdl_t hdl)
 
 	return 0;
 }
+EXPORT_SYMBOL(ipa_wdi_disconn_pipes_per_inst);
 
 /**
  * function to disable IPA offload data path
@@ -1142,7 +1155,7 @@ static int ipa_wdi_disconn_pipes_per_inst_internal(ipa_wdi_hdl_t hdl)
  *
  * Returns: 0 on success, negative on failure
  */
-static int ipa_wdi_disable_pipes_per_inst_internal(ipa_wdi_hdl_t hdl)
+int ipa_wdi_disable_pipes_per_inst(ipa_wdi_hdl_t hdl)
 {
 	int ret;
 	int ipa_ep_idx_tx, ipa_ep_idx_rx;
@@ -1191,19 +1204,19 @@ static int ipa_wdi_disable_pipes_per_inst_internal(ipa_wdi_hdl_t hdl)
 			return -EFAULT;
 		}
 	} else {
-		if (ipa3_suspend_wdi_pipe(ipa_wdi_ctx_list[hdl]->tx_pipe_hdl)) {
+		if (ipa_suspend_wdi_pipe(ipa_wdi_ctx_list[hdl]->tx_pipe_hdl)) {
 			IPA_WDI_ERR("fail to suspend wdi tx pipe\n");
 			return -EFAULT;
 		}
-		if (ipa3_disable_wdi_pipe(ipa_wdi_ctx_list[hdl]->tx_pipe_hdl)) {
+		if (ipa_disable_wdi_pipe(ipa_wdi_ctx_list[hdl]->tx_pipe_hdl)) {
 			IPA_WDI_ERR("fail to disable wdi tx pipe\n");
 			return -EFAULT;
 		}
-		if (ipa3_suspend_wdi_pipe(ipa_wdi_ctx_list[hdl]->rx_pipe_hdl)) {
+		if (ipa_suspend_wdi_pipe(ipa_wdi_ctx_list[hdl]->rx_pipe_hdl)) {
 			IPA_WDI_ERR("fail to suspend wdi rx pipe\n");
 			return -EFAULT;
 		}
-		if (ipa3_disable_wdi_pipe(ipa_wdi_ctx_list[hdl]->rx_pipe_hdl)) {
+		if (ipa_disable_wdi_pipe(ipa_wdi_ctx_list[hdl]->rx_pipe_hdl)) {
 			IPA_WDI_ERR("fail to disable wdi rx pipe\n");
 			return -EFAULT;
 		}
@@ -1217,8 +1230,9 @@ static int ipa_wdi_disable_pipes_per_inst_internal(ipa_wdi_hdl_t hdl)
 
 	return 0;
 }
+EXPORT_SYMBOL(ipa_wdi_disable_pipes_per_inst);
 
-static int ipa_wdi_init_internal(struct ipa_wdi_init_in_params *in,
+int ipa_wdi_init(struct ipa_wdi_init_in_params *in,
 	struct ipa_wdi_init_out_params *out)
 {
 	if (in == NULL) {
@@ -1227,30 +1241,34 @@ static int ipa_wdi_init_internal(struct ipa_wdi_init_in_params *in,
 	}
 
 	in->inst_id = DEFAULT_INSTANCE_ID;
-	return ipa_wdi_init_per_inst_internal(in, out);
+	return ipa_wdi_init_per_inst(in, out);
 }
+EXPORT_SYMBOL(ipa_wdi_init);
 
-static int ipa_wdi_cleanup_internal(void)
+int ipa_wdi_cleanup(void)
 {
-	return ipa_wdi_cleanup_per_inst_internal(0);
+	return ipa_wdi_cleanup_per_inst(0);
 }
+EXPORT_SYMBOL(ipa_wdi_cleanup);
 
-static int ipa_wdi_reg_intf_internal(struct ipa_wdi_reg_intf_in_params *in)
+int ipa_wdi_reg_intf(struct ipa_wdi_reg_intf_in_params *in)
 {
 	if (in == NULL) {
 		IPA_WDI_ERR("invalid params in=%pK\n", in);
 		return -EINVAL;
 	}
 	in->hdl = 0;
-	return ipa_wdi_reg_intf_per_inst_internal(in);
+	return ipa_wdi_reg_intf_per_inst(in);
 }
+EXPORT_SYMBOL(ipa_wdi_reg_intf);
 
-static int ipa_wdi_dereg_intf_internal(const char *netdev_name)
+int ipa_wdi_dereg_intf(const char *netdev_name)
 {
-	return ipa_wdi_dereg_intf_per_inst_internal(netdev_name, 0);
+	return ipa_wdi_dereg_intf_per_inst(netdev_name, 0);
 }
+EXPORT_SYMBOL(ipa_wdi_dereg_intf);
 
-static int ipa_wdi_conn_pipes_internal(struct ipa_wdi_conn_in_params *in,
+int ipa_wdi_conn_pipes(struct ipa_wdi_conn_in_params *in,
 			struct ipa_wdi_conn_out_params *out)
 {
 	if (!(in && out)) {
@@ -1259,67 +1277,36 @@ static int ipa_wdi_conn_pipes_internal(struct ipa_wdi_conn_in_params *in,
 	}
 
 	in->hdl = 0;
-	return ipa_wdi_conn_pipes_per_inst_internal(in, out);
+	return ipa_wdi_conn_pipes_per_inst(in, out);
 }
+EXPORT_SYMBOL(ipa_wdi_conn_pipes);
 
-static int ipa_wdi_disconn_pipes_internal(void)
+int ipa_wdi_disconn_pipes(void)
 {
-	return ipa_wdi_disconn_pipes_per_inst_internal(0);
+	return ipa_wdi_disconn_pipes_per_inst(0);
 }
+EXPORT_SYMBOL(ipa_wdi_disconn_pipes);
 
-static int ipa_wdi_enable_pipes_internal(void)
+int ipa_wdi_enable_pipes(void)
 {
-	return ipa_wdi_enable_pipes_per_inst_internal(0);
+	return ipa_wdi_enable_pipes_per_inst(0);
 }
+EXPORT_SYMBOL(ipa_wdi_enable_pipes);
 
-static int ipa_wdi_disable_pipes_internal(void)
+int ipa_wdi_disable_pipes(void)
 {
-	return ipa_wdi_disable_pipes_per_inst_internal(0);
+	return ipa_wdi_disable_pipes_per_inst(0);
 }
+EXPORT_SYMBOL(ipa_wdi_disable_pipes);
 
-static int ipa_wdi_set_perf_profile_internal(struct ipa_wdi_perf_profile *profile)
+int ipa_wdi_set_perf_profile(struct ipa_wdi_perf_profile *profile)
 {
 	if (profile == NULL) {
 		IPA_WDI_ERR("Invalid input\n");
 		return -EINVAL;
 	}
 
-	return ipa_wdi_set_perf_profile_per_inst_internal(0, profile);
+	return ipa_wdi_set_perf_profile_per_inst(0, profile);
 }
+EXPORT_SYMBOL(ipa_wdi_set_perf_profile);
 
-void ipa_wdi3_register(void)
-{
-	struct ipa_wdi3_data funcs;
-
-	funcs.ipa_wdi_bw_monitor = ipa_uc_bw_monitor;
-	funcs.ipa_wdi_cleanup = ipa_wdi_cleanup_internal;
-	funcs.ipa_wdi_conn_pipes = ipa_wdi_conn_pipes_internal;
-	funcs.ipa_wdi_create_smmu_mapping = ipa3_create_wdi_mapping;
-	funcs.ipa_wdi_dereg_intf = ipa_wdi_dereg_intf_internal;
-	funcs.ipa_wdi_disable_pipes = ipa_wdi_disable_pipes_internal;
-	funcs.ipa_wdi_disconn_pipes = ipa_wdi_disconn_pipes_internal;
-	funcs.ipa_wdi_enable_pipes = ipa_wdi_enable_pipes_internal;
-	funcs.ipa_wdi_get_stats = ipa_get_wdi_stats;
-	funcs.ipa_wdi_init = ipa_wdi_init_internal;
-	funcs.ipa_wdi_reg_intf = ipa_wdi_reg_intf_internal;
-	funcs.ipa_wdi_release_smmu_mapping = ipa3_release_wdi_mapping;
-	funcs.ipa_wdi_set_perf_profile = ipa_wdi_set_perf_profile_internal;
-	funcs.ipa_wdi_sw_stats = ipa3_set_wlan_tx_info;
-	funcs.ipa_get_wdi_version = ipa_get_wdi_version_internal;
-	funcs.ipa_wdi_is_tx1_used = ipa_wdi_is_tx1_used_internal;
-	funcs.ipa_wdi_get_capabilities = ipa_wdi_get_capabilities_internal;
-	funcs.ipa_wdi_init_per_inst = ipa_wdi_init_per_inst_internal;
-	funcs.ipa_wdi_cleanup_per_inst = ipa_wdi_cleanup_per_inst_internal;
-	funcs.ipa_wdi_reg_intf_per_inst = ipa_wdi_reg_intf_per_inst_internal;
-	funcs.ipa_wdi_dereg_intf_per_inst = ipa_wdi_dereg_intf_per_inst_internal;
-	funcs.ipa_wdi_conn_pipes_per_inst = ipa_wdi_conn_pipes_per_inst_internal;
-	funcs.ipa_wdi_disconn_pipes_per_inst = ipa_wdi_disconn_pipes_per_inst_internal;
-	funcs.ipa_wdi_enable_pipes_per_inst = ipa_wdi_enable_pipes_per_inst_internal;
-	funcs.ipa_wdi_disable_pipes_per_inst = ipa_wdi_disable_pipes_per_inst_internal;
-	funcs.ipa_wdi_set_perf_profile_per_inst = ipa_wdi_set_perf_profile_per_inst_internal;
-	funcs.ipa_wdi_create_smmu_mapping_per_inst = ipa_wdi_create_smmu_mapping_per_inst_internal;
-	funcs.ipa_wdi_release_smmu_mapping_per_inst = ipa_wdi_release_smmu_mapping_per_inst_internal;
-
-	if (ipa_fmwk_register_ipa_wdi3(&funcs))
-		pr_err("failed to register ipa_wdi3 APIs\n");
-}

+ 39 - 53
drivers/platform/msm/ipa/ipa_clients/ipa_wigig.c

@@ -3,12 +3,11 @@
  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/ipa_wigig.h>
+#include "ipa_wigig.h"
 #include <linux/debugfs.h>
 #include <linux/string.h>
 #include "ipa_common_i.h"
 #include "ipa_pm.h"
-#include <linux/ipa_fmwk.h>
 
 #define OFFLOAD_DRV_NAME "ipa_wigig"
 #define IPA_WIGIG_DBG(fmt, args...) \
@@ -124,7 +123,7 @@ static int ipa_wigig_init_debugfs(struct dentry *parent) { return 0; }
 static inline void ipa_wigig_deinit_debugfs(void) { }
 #endif
 
-static int ipa_wigig_init_internal(struct ipa_wigig_init_in_params *in,
+int ipa_wigig_init(struct ipa_wigig_init_in_params *in,
 	struct ipa_wigig_init_out_params *out)
 {
 	struct ipa_wdi_uc_ready_params inout;
@@ -182,8 +181,9 @@ static int ipa_wigig_init_internal(struct ipa_wigig_init_in_params *in,
 
 	return 0;
 }
+EXPORT_SYMBOL(ipa_wigig_init);
 
-static int ipa_wigig_cleanup_internal(void)
+int ipa_wigig_cleanup(void)
 {
 	struct ipa_wigig_intf_info *entry;
 	struct ipa_wigig_intf_info *next;
@@ -210,8 +210,9 @@ static int ipa_wigig_cleanup_internal(void)
 	IPA_WIGIG_DBG("exit\n");
 	return 0;
 }
+EXPORT_SYMBOL(ipa_wigig_cleanup);
 
-static bool ipa_wigig_is_smmu_enabled_internal(void)
+bool ipa_wigig_is_smmu_enabled(void)
 {
 	struct ipa_smmu_in_params in;
 	struct ipa_smmu_out_params out;
@@ -219,12 +220,13 @@ static bool ipa_wigig_is_smmu_enabled_internal(void)
 	IPA_WIGIG_DBG("\n");
 
 	in.smmu_client = IPA_SMMU_WIGIG_CLIENT;
-	ipa3_get_smmu_params(&in, &out);
+	ipa_get_smmu_params(&in, &out);
 
 	IPA_WIGIG_DBG("exit (%d)\n", out.smmu_enable);
 
 	return out.smmu_enable;
 }
+EXPORT_SYMBOL(ipa_wigig_is_smmu_enabled);
 
 static int ipa_wigig_init_smmu_params(void)
 {
@@ -235,7 +237,7 @@ static int ipa_wigig_init_smmu_params(void)
 	IPA_WIGIG_DBG("\n");
 
 	in.smmu_client = IPA_SMMU_WIGIG_CLIENT;
-	ret = ipa3_get_smmu_params(&in, &out);
+	ret = ipa_get_smmu_params(&in, &out);
 	if (ret) {
 		IPA_WIGIG_ERR("couldn't get SMMU params %d\n", ret);
 		return ret;
@@ -284,7 +286,7 @@ static int ipa_wigig_commit_partial_hdr(
 		hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset;
 	}
 
-	if (ipa3_add_hdr(hdr)) {
+	if (ipa_add_hdr(hdr)) {
 		IPA_WIGIG_ERR("fail to add partial headers\n");
 		return -EFAULT;
 	}
@@ -315,7 +317,7 @@ static int ipa_wigig_get_devname(char *netdev_name)
 	return 0;
 }
 
-static int ipa_wigig_reg_intf_internal(
+int ipa_wigig_reg_intf(
 	struct ipa_wigig_reg_intf_in_params *in)
 {
 	struct ipa_wigig_intf_info *new_intf;
@@ -423,7 +425,7 @@ static int ipa_wigig_reg_intf_internal(
 	rx_prop[1].src_pipe = IPA_CLIENT_WIGIG_PROD;
 	rx_prop[1].hdr_l2_type = in->hdr_info[1].hdr_type;
 
-	if (ipa3_register_intf(in->netdev_name, &tx, &rx)) {
+	if (ipa_register_intf(in->netdev_name, &tx, &rx)) {
 		IPA_WIGIG_ERR("fail to add interface prop\n");
 		ret = -EFAULT;
 		goto fail_register;
@@ -445,7 +447,7 @@ static int ipa_wigig_reg_intf_internal(
 	IPA_WIGIG_DBG("exit\n");
 	return 0;
 fail_sendmsg:
-	ipa3_deregister_intf(in->netdev_name);
+	ipa_deregister_intf(in->netdev_name);
 fail_register:
 	del_hdr = kzalloc(sizeof(struct ipa_ioc_del_hdr) +
 		2 * sizeof(struct ipa_hdr_del), GFP_KERNEL);
@@ -454,7 +456,7 @@ fail_register:
 		del_hdr->num_hdls = 2;
 		del_hdr->hdl[0].hdl = new_intf->partial_hdr_hdl[IPA_IP_v4];
 		del_hdr->hdl[1].hdl = new_intf->partial_hdr_hdl[IPA_IP_v6];
-		ipa3_del_hdr(del_hdr);
+		ipa_del_hdr(del_hdr);
 		kfree(del_hdr);
 	}
 	new_intf->partial_hdr_hdl[IPA_IP_v4] = 0;
@@ -467,8 +469,9 @@ fail:
 	mutex_unlock(&ipa_wigig_ctx->lock);
 	return ret;
 }
+EXPORT_SYMBOL(ipa_wigig_reg_intf);
 
-static int ipa_wigig_dereg_intf_internal(const char *netdev_name)
+int ipa_wigig_dereg_intf(const char *netdev_name)
 {
 	int len, ret;
 	struct ipa_ioc_del_hdr *hdr = NULL;
@@ -509,14 +512,14 @@ static int ipa_wigig_dereg_intf_internal(const char *netdev_name)
 			IPA_WIGIG_DBG("IPv4 hdr hdl: %d IPv6 hdr hdl: %d\n",
 				hdr->hdl[0].hdl, hdr->hdl[1].hdl);
 
-			if (ipa3_del_hdr(hdr)) {
+			if (ipa_del_hdr(hdr)) {
 				IPA_WIGIG_ERR(
 					"fail to delete partial header\n");
 				ret = -EFAULT;
 				goto fail;
 			}
 
-			if (ipa3_deregister_intf(entry->netdev_name)) {
+			if (ipa_deregister_intf(entry->netdev_name)) {
 				IPA_WIGIG_ERR("fail to del interface props\n");
 				ret = -EFAULT;
 				goto fail;
@@ -543,6 +546,7 @@ fail:
 	mutex_unlock(&ipa_wigig_ctx->lock);
 	return ret;
 }
+EXPORT_SYMBOL(ipa_wigig_dereg_intf);
 
 static void ipa_wigig_pm_cb(void *p, enum ipa_pm_cb_event event)
 {
@@ -605,7 +609,7 @@ static u8 ipa_wigig_pipe_to_bit_val(int client)
 	return shift_val;
 }
 
-static int ipa_wigig_conn_rx_pipe_internal(struct ipa_wigig_conn_rx_in_params *in,
+int ipa_wigig_conn_rx_pipe(struct ipa_wigig_conn_rx_in_params *in,
 	struct ipa_wigig_conn_out_params *out)
 {
 	int ret;
@@ -695,6 +699,7 @@ fail_msi:
 fail_pm:
 	return ret;
 }
+EXPORT_SYMBOL(ipa_wigig_conn_rx_pipe);
 
 static int ipa_wigig_client_to_idx(enum ipa_client_type client,
 	unsigned int *idx)
@@ -970,7 +975,7 @@ fail_map_desc_h:
 	return ret;
 }
 
-static int ipa_wigig_save_regs_cb(void)
+int ipa_wigig_save_regs(void)
 {
 	void __iomem *desc_ring_h = NULL, *desc_ring_t = NULL,
 		*status_ring_h = NULL, *status_ring_t = NULL,
@@ -1094,6 +1099,7 @@ fail_map_gen_tx:
 fail_map_gen_rx:
 	return ret;
 }
+EXPORT_SYMBOL(ipa_wigig_save_regs);
 
 static void ipa_wigig_clean_rx_buff_smmu_info(void)
 {
@@ -1381,7 +1387,7 @@ static int ipa_wigig_clean_smmu_info(enum ipa_client_type client)
 
 	return 0;
 }
-static int ipa_wigig_conn_rx_pipe_smmu_internal(
+int ipa_wigig_conn_rx_pipe_smmu(
 	struct ipa_wigig_conn_rx_in_params_smmu *in,
 	struct ipa_wigig_conn_out_params *out)
 {
@@ -1478,8 +1484,9 @@ fail_msi:
 fail_pm:
 	return ret;
 }
+EXPORT_SYMBOL(ipa_wigig_conn_rx_pipe_smmu);
 
-static int ipa_wigig_set_perf_profile_internal(u32 max_supported_bw_mbps)
+int ipa_wigig_set_perf_profile(u32 max_supported_bw_mbps)
 {
 	IPA_WIGIG_DBG("setting throughput to %d\n", max_supported_bw_mbps);
 
@@ -1498,6 +1505,7 @@ static int ipa_wigig_set_perf_profile_internal(u32 max_supported_bw_mbps)
 
 	return 0;
 }
+EXPORT_SYMBOL(ipa_wigig_set_perf_profile);
 
 static int ipa_wigig_store_client_mac(enum ipa_client_type client,
 	const char *mac)
@@ -1531,7 +1539,7 @@ static int ipa_wigig_clean_client_mac(enum ipa_client_type client)
 	return ipa_wigig_store_client_mac(client, zero_mac);
 }
 
-static int ipa_wigig_conn_client_internal(struct ipa_wigig_conn_tx_in_params *in,
+int ipa_wigig_conn_client(struct ipa_wigig_conn_tx_in_params *in,
 	struct ipa_wigig_conn_out_params *out)
 {
 	char dev_name[IPA_RESOURCE_NAME_MAX];
@@ -1610,8 +1618,9 @@ fail_convert_client_to_idx:
 	ipa3_disconn_wigig_pipe_i(out->client, NULL, NULL);
 	return -EINVAL;
 }
+EXPORT_SYMBOL(ipa_wigig_conn_client);
 
-static int ipa_wigig_conn_client_smmu_internal(
+int ipa_wigig_conn_client_smmu(
 	struct ipa_wigig_conn_tx_in_params_smmu *in,
 	struct ipa_wigig_conn_out_params *out)
 {
@@ -1696,6 +1705,7 @@ fail_sendmsg:
 	ipa3_disconn_wigig_pipe_i(out->client, &in->pipe_smmu, &in->dbuff_smmu);
 	return ret;
 }
+EXPORT_SYMBOL(ipa_wigig_conn_client_smmu);
 
 static inline int ipa_wigig_validate_client_type(enum ipa_client_type client)
 {
@@ -1714,7 +1724,7 @@ static inline int ipa_wigig_validate_client_type(enum ipa_client_type client)
 	return 0;
 }
 
-static int ipa_wigig_disconn_pipe_internal(enum ipa_client_type client)
+int ipa_wigig_disconn_pipe(enum ipa_client_type client)
 {
 	int ret;
 	char dev_name[IPA_RESOURCE_NAME_MAX];
@@ -1816,8 +1826,9 @@ static int ipa_wigig_disconn_pipe_internal(enum ipa_client_type client)
 	IPA_WIGIG_DBG("exit\n");
 	return 0;
 }
+EXPORT_SYMBOL(ipa_wigig_disconn_pipe);
 
-static int ipa_wigig_enable_pipe_internal(enum ipa_client_type client)
+int ipa_wigig_enable_pipe(enum ipa_client_type client)
 {
 	int ret;
 
@@ -1850,8 +1861,9 @@ fail_pm_active:
 	ipa3_disable_wigig_pipe_i(client);
 	return ret;
 }
+EXPORT_SYMBOL(ipa_wigig_enable_pipe);
 
-static int ipa_wigig_disable_pipe_internal(enum ipa_client_type client)
+int ipa_wigig_disable_pipe(enum ipa_client_type client)
 {
 	int ret;
 
@@ -1877,8 +1889,9 @@ static int ipa_wigig_disable_pipe_internal(enum ipa_client_type client)
 	IPA_WIGIG_DBG("exit\n");
 	return 0;
 }
+EXPORT_SYMBOL(ipa_wigig_disable_pipe);
 
-static int ipa_wigig_tx_dp_internal(enum ipa_client_type dst, struct sk_buff *skb)
+int ipa_wigig_tx_dp(enum ipa_client_type dst, struct sk_buff *skb)
 {
 	int ret;
 
@@ -1895,6 +1908,7 @@ static int ipa_wigig_tx_dp_internal(enum ipa_client_type dst, struct sk_buff *sk
 	IPA_WIGIG_DBG_LOW("exit\n");
 	return 0;
 }
+EXPORT_SYMBOL(ipa_wigig_tx_dp);
 
 
 #ifdef CONFIG_DEBUG_FS
@@ -2049,31 +2063,3 @@ fail_conn_clients:
 }
 #endif
 
-void ipa_wigig_register(void)
-{
-	struct ipa_wigig_data funcs;
-
-	funcs.ipa_wigig_init = ipa_wigig_init_internal;
-	funcs.ipa_wigig_cleanup = ipa_wigig_cleanup_internal;
-	funcs.ipa_wigig_is_smmu_enabled = ipa_wigig_is_smmu_enabled_internal;
-	funcs.ipa_wigig_reg_intf = ipa_wigig_reg_intf_internal;
-	funcs.ipa_wigig_dereg_intf = ipa_wigig_dereg_intf_internal;
-	funcs.ipa_wigig_conn_rx_pipe = ipa_wigig_conn_rx_pipe_internal;
-	funcs.ipa_wigig_conn_rx_pipe_smmu =
-		ipa_wigig_conn_rx_pipe_smmu_internal;
-	funcs.ipa_wigig_conn_client = ipa_wigig_conn_client_internal;
-	funcs.ipa_wigig_conn_client_smmu =
-		ipa_wigig_conn_client_smmu_internal;
-	funcs.ipa_wigig_disconn_pipe = ipa_wigig_disconn_pipe_internal;
-	funcs.ipa_wigig_enable_pipe = ipa_wigig_enable_pipe_internal;
-	funcs.ipa_wigig_disable_pipe = ipa_wigig_disable_pipe_internal;
-	funcs.ipa_wigig_tx_dp = ipa_wigig_tx_dp_internal;
-	funcs.ipa_wigig_set_perf_profile =
-		ipa_wigig_set_perf_profile_internal;
-	funcs.ipa_wigig_save_regs = ipa_wigig_save_regs_cb;
-
-
-	if (ipa_fmwk_register_ipa_wigig(&funcs))
-		pr_err("failed to register ipa_wigig APIs\n");
-}
-EXPORT_SYMBOL(ipa_wigig_register);

+ 13 - 11
drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c

@@ -18,7 +18,7 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/sched.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include <linux/random.h>
 #include <linux/workqueue.h>
 #include <linux/version.h>
@@ -1631,7 +1631,7 @@ static void rndis_ipa_xmit_error_aftercare_wq(struct work_struct *work)
  *  for IPA driver
  * eth_type: the Ethernet type for this header-insertion header
  * hdr_name: string that shall represent this header in IPA data base
- * add_hdr: output for caller to be used with ipa3_add_hdr() to configure
+ * add_hdr: output for caller to be used with ipa_add_hdr() to configure
  *  the IPA core
  * dst_mac: tethered PC MAC (Ethernet) address to be added to packets
  *  for IPA->USB pipe
@@ -1784,7 +1784,7 @@ static int rndis_ipa_hdrs_cfg(
 
 	hdrs->num_hdrs = 2;
 	hdrs->commit = 1;
-	result = ipa3_add_hdr(hdrs);
+	result = ipa_add_hdr(hdrs);
 	if (result) {
 		RNDIS_IPA_ERROR("Fail on Header-Insertion(%d)\n", result);
 		goto fail_add_hdr;
@@ -1839,9 +1839,9 @@ static int rndis_ipa_hdrs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx)
 	ipv6 = &del_hdr->hdl[1];
 	ipv6->hdl = rndis_ipa_ctx->eth_ipv6_hdr_hdl;
 
-	result = ipa3_del_hdr(del_hdr);
+	result = ipa_del_hdr(del_hdr);
 	if (result || ipv4->status || ipv6->status)
-		RNDIS_IPA_ERROR("ipa3_del_hdr failed\n");
+		RNDIS_IPA_ERROR("ipa_del_hdr failed\n");
 	else
 		RNDIS_IPA_DEBUG("hdrs deletion done\n");
 
@@ -1925,7 +1925,7 @@ static int rndis_ipa_register_properties(char *netdev_name, bool is_vlan_mode)
 	rx_ipv6_property->hdr_l2_type = hdr_l2_type;
 	rx_properties.num_props = 2;
 
-	result = ipa3_register_intf("rndis0", &tx_properties, &rx_properties);
+	result = ipa_register_intf("rndis0", &tx_properties, &rx_properties);
 	if (result)
 		RNDIS_IPA_ERROR("fail on Tx/Rx properties registration\n");
 	else
@@ -1948,7 +1948,7 @@ static int  rndis_ipa_deregister_properties(char *netdev_name)
 
 	RNDIS_IPA_LOG_ENTRY();
 
-	result = ipa3_deregister_intf(netdev_name);
+	result = ipa_deregister_intf(netdev_name);
 	if (result) {
 		RNDIS_IPA_DEBUG("Fail on Tx prop deregister\n");
 		return result;
@@ -2555,7 +2555,7 @@ static ssize_t rndis_ipa_debugfs_atomic_read
 	return simple_read_from_buffer(ubuf, count, ppos, atomic_str, nbytes);
 }
 
-static int __init rndis_ipa_init_module(void)
+int rndis_ipa_init_module(void)
 {
 	ipa_rndis_logbuf = ipc_log_context_create(IPA_RNDIS_IPC_LOG_PAGES,
 		"ipa_rndis", MINIDUMP_MASK);
@@ -2565,8 +2565,9 @@ static int __init rndis_ipa_init_module(void)
 	pr_info("RNDIS_IPA module is loaded.\n");
 	return 0;
 }
+EXPORT_SYMBOL(rndis_ipa_init_module);
 
-static void __exit rndis_ipa_cleanup_module(void)
+void rndis_ipa_cleanup_module(void)
 {
 	if (ipa_rndis_logbuf)
 		ipc_log_context_destroy(ipa_rndis_logbuf);
@@ -2574,9 +2575,10 @@ static void __exit rndis_ipa_cleanup_module(void)
 
 	pr_info("RNDIS_IPA module is unloaded.\n");
 }
+EXPORT_SYMBOL(rndis_ipa_cleanup_module);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("RNDIS_IPA network interface");
 
-late_initcall(rndis_ipa_init_module);
-module_exit(rndis_ipa_cleanup_module);
+//late_initcall(rndis_ipa_init_module);
+//module_exit(rndis_ipa_cleanup_module);

+ 4 - 1
drivers/platform/msm/ipa/ipa_clients/rndis_ipa.h

@@ -6,7 +6,7 @@
 #ifndef _RNDIS_IPA_H_
 #define _RNDIS_IPA_H_
 
-#include <linux/ipa.h>
+#include "ipa.h"
 
 /*
  * @priv: private data given upon ipa_connect
@@ -64,6 +64,9 @@ int rndis_ipa_pipe_disconnect_notify(void *private);
 
 void rndis_ipa_cleanup(void *private);
 
+int rndis_ipa_init_module(void);
+void rndis_ipa_cleanup_module(void);
+
 #else /* IS_ENABLED(CONFIG_RNDIS_IPA) */
 
 static inline int rndis_ipa_init(struct ipa_usb_init_params *params)

+ 11 - 23
drivers/platform/msm/ipa/ipa_common_i.h

@@ -5,16 +5,16 @@
 
 #ifndef _IPA_COMMON_I_H_
 #define _IPA_COMMON_I_H_
-#include <linux/ipa_mhi.h>
 #include <linux/ipa_qmi_service_v01.h>
 #include <linux/errno.h>
 #include <linux/ipc_logging.h>
-#include <linux/ipa.h>
-#include <linux/ipa_uc_offload.h>
-#include <linux/ipa_wdi3.h>
-#include <linux/ipa_wigig.h>
-#include <linux/ipa_eth.h>
+#include "ipa.h"
+#include "ipa_uc_offload.h"
+#include "ipa_wdi3.h"
+#include "ipa_wigig.h"
+#include "ipa_eth.h"
 #include <linux/ipa_usb.h>
+#include <linux/ipa_mhi.h>
 #include <linux/ratelimit.h>
 #include "ipa_stats.h"
 #include "gsi.h"
@@ -176,7 +176,7 @@ do {\
  *   2) It assigns a value to index idx
  */
 #define IPA_CLIENT_IS_MAPPED(x, idx) \
-	((idx = ipa3_get_ep_mapping(x)) != IPA_EP_NOT_ALLOCATED)
+	((idx = ipa_get_ep_mapping(x)) != IPA_EP_NOT_ALLOCATED)
 /*
  * Same behavior as the macro above; but in addition, determines if
  * the client is valid as well.
@@ -184,10 +184,10 @@ do {\
 #define IPA_CLIENT_IS_MAPPED_VALID(x, idx) \
 	(IPA_CLIENT_IS_MAPPED(x, idx) && ipa3_ctx->ep[idx].valid == 1)
 #define IPA_CLIENT_IS_ETH_PROD(x) \
-	((x == ipa3_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD)) || \
-	 (x == ipa3_get_ep_mapping(IPA_CLIENT_ETHERNET2_PROD)) || \
-	 (x == ipa3_get_ep_mapping(IPA_CLIENT_AQC_ETHERNET_PROD)) || \
-	 (x == ipa3_get_ep_mapping(IPA_CLIENT_RTK_ETHERNET_PROD)))
+	((x == ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD)) || \
+	 (x == ipa_get_ep_mapping(IPA_CLIENT_ETHERNET2_PROD)) || \
+	 (x == ipa_get_ep_mapping(IPA_CLIENT_AQC_ETHERNET_PROD)) || \
+	 (x == ipa_get_ep_mapping(IPA_CLIENT_RTK_ETHERNET_PROD)))
 
 #define IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC (1000)
 #define IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC (2000)
@@ -731,16 +731,10 @@ int ipa3_add_hdr_hpc_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only);
 
 int ipa3_del_hdr_hpc(struct ipa_ioc_del_hdr *hdrs);
 
-int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs);
-
-int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls);
-
 int ipa3_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only);
 
 int ipa3_reset_hdr(bool user_only);
 
-int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup);
-
 /*
 * Header Processing Context
 */
@@ -846,12 +840,9 @@ int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt);
 /*
 * Interface
 */
-int ipa3_register_intf(const char *name, const struct ipa_tx_intf *tx,
-	const struct ipa_rx_intf *rx);
 int ipa3_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
 	const struct ipa_rx_intf *rx,
 	const struct ipa_ext_intf *ext);
-int ipa3_deregister_intf(const char *name);
 
 /*
 * Miscellaneous
@@ -863,9 +854,6 @@ int ipa3_uc_debug_stats_dealloc(uint32_t protocol);
 void ipa3_get_gsi_stats(int prot_id,
 	struct ipa_uc_dbg_ring_stats *stats);
 int ipa3_get_prot_id(enum ipa_client_type client);
-bool ipa_is_client_handle_valid(u32 clnt_hdl);
-int ipa3_get_smmu_params(struct ipa_smmu_in_params *in,
-	struct ipa_smmu_out_params *out);
 
 /**
 * ipa_tz_unlock_reg - Unlocks memory regions so that they become accessible

+ 1 - 1
drivers/platform/msm/ipa/ipa_rm.c

@@ -5,7 +5,7 @@
 
 #include <linux/slab.h>
 #include <linux/workqueue.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include "ipa_rm_dependency_graph.h"
 #include "ipa_rm_i.h"
 #include "ipa_common_i.h"

+ 1 - 1
drivers/platform/msm/ipa/ipa_rm_dependency_graph.h

@@ -7,7 +7,7 @@
 #define _IPA_RM_DEPENDENCY_GRAPH_H_
 
 #include <linux/list.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include "ipa_rm_resource.h"
 
 struct ipa_rm_dep_graph {

+ 1 - 1
drivers/platform/msm/ipa/ipa_rm_i.h

@@ -7,7 +7,7 @@
 #define _IPA_RM_I_H_
 
 #include <linux/workqueue.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include "ipa_rm_resource.h"
 #include "ipa_common_i.h"
 

+ 1 - 1
drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c

@@ -10,7 +10,7 @@
 #include <linux/timer.h>
 #include <linux/unistd.h>
 #include <linux/workqueue.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include "ipa_rm_i.h"
 
 #define MAX_WS_NAME 20

+ 1 - 1
drivers/platform/msm/ipa/ipa_rm_resource.h

@@ -7,7 +7,7 @@
 #define _IPA_RM_RESOURCE_H_
 
 #include <linux/list.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include "ipa_rm_peers_list.h"
 
 /**

+ 1 - 1
drivers/platform/msm/ipa/ipa_test_module/ipa_rm_ut.c

@@ -6,7 +6,7 @@
 #include <linux/fs.h>
 #include <linux/slab.h>
 #include <linux/workqueue.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include <linux/msm_ipa.h>
 #include <linux/kernel.h>
 #include "ipa_rm_ut.h"

+ 1 - 1
drivers/platform/msm/ipa/ipa_test_module/ipa_rm_ut.h

@@ -13,7 +13,7 @@
  */
 
 #include <linux/msm_ipa.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 
 int build_rmnet_bridge_use_case_graph(
 		int (*create_resource)(struct ipa_rm_create_params *create_params),

+ 1 - 1
drivers/platform/msm/ipa/ipa_test_module/ipa_test_module.h

@@ -11,7 +11,7 @@
 #include <linux/msm_ipa.h>
 #include <linux/ioctl.h>
 #ifdef _KERNEL_
-#include <linux/ipa.h>
+#include "ipa.h"
 #endif
 
 #define IPA_TEST_IOC_MAGIC 0xA5

+ 1 - 1
drivers/platform/msm/ipa/ipa_test_module/ipa_test_module_impl.c

@@ -18,7 +18,7 @@
 #include <linux/dma-mapping.h>	/* dma_alloc_coherent() */
 #include <linux/io.h>
 #include <linux/uaccess.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include <linux/sched.h>
 #include <linux/skbuff.h>	/* sk_buff */
 #include <linux/kfifo.h>  /* Kernel FIFO Implementation */

+ 158 - 178
drivers/platform/msm/ipa/ipa_v3/ipa.c

@@ -129,6 +129,21 @@ struct tz_smmu_ipa_protect_region_s {
 	u32 size_bytes;
 } __packed;
 
+/**
+ * struct ipa_ready_cb_info - A list of all the registrations
+ *  for an indication of IPA driver readiness
+ *
+ * @link: linked list link
+ * @ready_cb: callback
+ * @user_data: User data
+ *
+ */
+struct ipa_ready_cb_info {
+	struct list_head link;
+	ipa_ready_cb ready_cb;
+	void *user_data;
+};
+
 static void ipa3_start_tag_process(struct work_struct *work);
 static DECLARE_WORK(ipa3_tag_work, ipa3_start_tag_process);
 
@@ -193,13 +208,14 @@ int ipa3_pci_drv_probe(struct pci_dev *pci_dev,
  *
  * Return value: enum ipa_hw_type
  */
-enum ipa_hw_type ipa_get_hw_type_internal(void)
+enum ipa_hw_type ipa_get_hw_type(void)
 {
 	if (ipa3_ctx == NULL)
 		return IPA_HW_None;
 
 	return ipa3_ctx->ipa_hw_type;
 }
+EXPORT_SYMBOL(ipa_get_hw_type);
 
 /**
  * ipa_is_test_prod_flt_in_sram_internal() - Return true if test prod FLT tbl is in SRAM
@@ -214,7 +230,7 @@ bool ipa_is_test_prod_flt_in_sram_internal(enum ipa_ip_type ip)
 	if (ipa3_ctx == NULL)
 		return false;
 
-	gsi_ep_info_cfg = ipa3_get_gsi_ep_info(IPA_CLIENT_TEST_PROD);
+	gsi_ep_info_cfg = ipa_get_gsi_ep_info(IPA_CLIENT_TEST_PROD);
 	if(gsi_ep_info_cfg == NULL)
 		return false;
 
@@ -1029,9 +1045,9 @@ static int ipa3_send_wan_msg(unsigned long usr_param,
 	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
 	msg_meta.msg_type = msg_type;
 	msg_meta.msg_len = sizeof(struct ipa_wan_msg);
-	retval = ipa3_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb);
+	retval = ipa_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb);
 	if (retval) {
-		IPAERR_RL("ipa3_send_msg failed: %d\n", retval);
+		IPAERR_RL("ipa_send_msg failed: %d\n", retval);
 		kfree(wan_msg);
 		return retval;
 	}
@@ -1137,10 +1153,10 @@ static int ipa3_send_pdn_config_msg(unsigned long usr_param)
 			pdn_info->u.passthrough_cfg.client_mac_addr[5]);
 	}
 
-	retval = ipa3_send_msg(&msg_meta, buff,
+	retval = ipa_send_msg(&msg_meta, buff,
 		ipa3_pdn_config_msg_free_cb);
 	if (retval) {
-		IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
+		IPAERR("ipa_send_msg failed: %d, msg_type %d\n",
 			retval,
 			msg_meta.msg_type);
 		kfree(buff);
@@ -1221,10 +1237,10 @@ static int ipa3_send_vlan_l2tp_msg(unsigned long usr_param, uint8_t msg_type)
 		return -EFAULT;
 	}
 
-	retval = ipa3_send_msg(&msg_meta, buff,
+	retval = ipa_send_msg(&msg_meta, buff,
 		ipa3_vlan_l2tp_msg_free_cb);
 	if (retval) {
-		IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
+		IPAERR("ipa_send_msg failed: %d, msg_type %d\n",
 			retval,
 			msg_type);
 		kfree(buff);
@@ -1268,11 +1284,11 @@ static void ipa3_get_usb_ep_info(
 		pair_info[i].ep_id = -1;
 	}
 
-	ep_index = ipa3_get_ep_mapping(IPA_CLIENT_USB2_PROD);
+	ep_index = ipa_get_ep_mapping(IPA_CLIENT_USB2_PROD);
 
 	if ((ep_index != -1) && ipa3_ctx->ep[ep_index].valid) {
 		pair_info[ep_info->num_ep_pairs].consumer_pipe_num = ep_index;
-		ep_index = ipa3_get_ep_mapping(IPA_CLIENT_USB2_CONS);
+		ep_index = ipa_get_ep_mapping(IPA_CLIENT_USB2_CONS);
 		if ((ep_index != -1) && (ipa3_ctx->ep[ep_index].valid)) {
 			pair_info[ep_info->num_ep_pairs].producer_pipe_num =
 				ep_index;
@@ -1295,11 +1311,11 @@ static void ipa3_get_usb_ep_info(
 		}
 	}
 
-	ep_index = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
+	ep_index = ipa_get_ep_mapping(IPA_CLIENT_USB_PROD);
 
 	if ((ep_index != -1) && ipa3_ctx->ep[ep_index].valid) {
 		pair_info[ep_info->num_ep_pairs].consumer_pipe_num = ep_index;
-		ep_index = ipa3_get_ep_mapping(IPA_CLIENT_USB_CONS);
+		ep_index = ipa_get_ep_mapping(IPA_CLIENT_USB_CONS);
 		if ((ep_index != -1) && (ipa3_ctx->ep[ep_index].valid)) {
 			pair_info[ep_info->num_ep_pairs].producer_pipe_num =
 				ep_index;
@@ -1341,11 +1357,11 @@ static void ipa3_get_pcie_ep_info(
 	 * Legacy codes for ipa4.X version
 	 */
 	if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0) {
-		ep_index = ipa3_get_ep_mapping(IPA_CLIENT_MHI2_PROD);
+		ep_index = ipa_get_ep_mapping(IPA_CLIENT_MHI2_PROD);
 
 		if ((ep_index != -1) && ipa3_ctx->ep[ep_index].valid) {
 			pair_info[ep_info->num_ep_pairs].consumer_pipe_num = ep_index;
-			ep_index = ipa3_get_ep_mapping(IPA_CLIENT_MHI2_CONS);
+			ep_index = ipa_get_ep_mapping(IPA_CLIENT_MHI2_CONS);
 			if ((ep_index != -1) && (ipa3_ctx->ep[ep_index].valid)) {
 				pair_info[ep_info->num_ep_pairs].producer_pipe_num =
 				ep_index;
@@ -1369,11 +1385,11 @@ static void ipa3_get_pcie_ep_info(
 		}
 	}
 
-	ep_index = ipa3_get_ep_mapping(IPA_CLIENT_MHI_PROD);
+	ep_index = ipa_get_ep_mapping(IPA_CLIENT_MHI_PROD);
 
 	if ((ep_index != -1) && ipa3_ctx->ep[ep_index].valid) {
 		pair_info[ep_info->num_ep_pairs].consumer_pipe_num = ep_index;
-		ep_index = ipa3_get_ep_mapping(IPA_CLIENT_MHI_CONS);
+		ep_index = ipa_get_ep_mapping(IPA_CLIENT_MHI_CONS);
 		if ((ep_index != -1) && (ipa3_ctx->ep[ep_index].valid)) {
 			pair_info[ep_info->num_ep_pairs].producer_pipe_num =
 				ep_index;
@@ -1415,11 +1431,11 @@ static void ipa3_get_eth_ep_info(
 		pair_info[i].ep_id = -1;
 	}
 
-	ep_index = ipa3_get_ep_mapping(IPA_CLIENT_ETHERNET2_PROD);
+	ep_index = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET2_PROD);
 
 	if ((ep_index != -1) && ipa3_ctx->ep[ep_index].valid) {
 		pair_info[ep_info->num_ep_pairs].consumer_pipe_num = ep_index;
-		ep_index = ipa3_get_ep_mapping(IPA_CLIENT_ETHERNET2_CONS);
+		ep_index = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET2_CONS);
 		if ((ep_index != -1) && (ipa3_ctx->ep[ep_index].valid)) {
 			pair_info[ep_info->num_ep_pairs].producer_pipe_num =
 				ep_index;
@@ -1441,11 +1457,11 @@ static void ipa3_get_eth_ep_info(
 		}
 	}
 
-	ep_index = ipa3_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD);
+	ep_index = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD);
 
 	if ((ep_index != -1) && ipa3_ctx->ep[ep_index].valid) {
 		pair_info[ep_info->num_ep_pairs].consumer_pipe_num = ep_index;
-		ep_index = ipa3_get_ep_mapping(IPA_CLIENT_ETHERNET_CONS);
+		ep_index = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_CONS);
 		if ((ep_index != -1) && (ipa3_ctx->ep[ep_index].valid)) {
 			pair_info[ep_info->num_ep_pairs].producer_pipe_num =
 				ep_index;
@@ -1531,10 +1547,10 @@ static int ipa3_send_gsb_msg(unsigned long usr_param, uint8_t msg_type)
 		return -EFAULT;
 	}
 
-	retval = ipa3_send_msg(&msg_meta, buff,
+	retval = ipa_send_msg(&msg_meta, buff,
 		ipa3_gsb_msg_free_cb);
 	if (retval) {
-		IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
+		IPAERR("ipa_send_msg failed: %d, msg_type %d\n",
 			retval,
 			msg_type);
 		kfree(buff);
@@ -2573,10 +2589,10 @@ static int ipa3_send_mac_flt_list(unsigned long usr_param)
 		((struct ipa_ioc_mac_client_list_type *)buff)->num_of_clients,
 		((struct ipa_ioc_mac_client_list_type *)buff)->flt_state);
 
-	retval = ipa3_send_msg(&msg_meta, buff,
+	retval = ipa_send_msg(&msg_meta, buff,
 		ipa3_general_free_cb);
 	if (retval) {
-		IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
+		IPAERR("ipa_send_msg failed: %d, msg_type %d\n",
 		retval,
 		msg_meta.msg_type);
 		kfree(buff);
@@ -2637,10 +2653,10 @@ static int ipa3_send_pkt_threshold(unsigned long usr_param)
 		((struct ipa_set_pkt_threshold *)buff2)->pkt_threshold_enable,
 		((struct ipa_set_pkt_threshold *)buff2)->pkt_threshold);
 
-	retval = ipa3_send_msg(&msg_meta, buff2,
+	retval = ipa_send_msg(&msg_meta, buff2,
 		ipa3_general_free_cb);
 	if (retval) {
-		IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
+		IPAERR("ipa_send_msg failed: %d, msg_type %d\n",
 		retval,
 		msg_meta.msg_type);
 		kfree(buff1);
@@ -2701,10 +2717,10 @@ static int ipa3_send_sw_flt_list(unsigned long usr_param)
 		((struct ipa_sw_flt_list_type *)buff)->num_of_iface,
 		((struct ipa_sw_flt_list_type *)buff)->iface_enable);
 
-	retval = ipa3_send_msg(&msg_meta, buff,
+	retval = ipa_send_msg(&msg_meta, buff,
 		ipa3_general_free_cb);
 	if (retval) {
-		IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
+		IPAERR("ipa_send_msg failed: %d, msg_type %d\n",
 		retval,
 		msg_meta.msg_type);
 		kfree(buff);
@@ -2760,10 +2776,10 @@ static int ipa3_send_ippt_sw_flt_list(unsigned long usr_param)
 		((struct ipa_ippt_sw_flt_list_type *)buff)->num_of_port,
 		((struct ipa_ippt_sw_flt_list_type *)buff)->port_enable);
 
-	retval = ipa3_send_msg(&msg_meta, buff,
+	retval = ipa_send_msg(&msg_meta, buff,
 		ipa3_general_free_cb);
 	if (retval) {
-		IPAERR("ipa3_send_msg failed: %d, msg_type %d\n",
+		IPAERR("ipa_send_msg failed: %d, msg_type %d\n",
 		retval,
 		msg_meta.msg_type);
 		kfree(buff);
@@ -2800,10 +2816,10 @@ int ipa3_send_macsec_info(enum ipa_macsec_event event_type, struct ipa_macsec_ma
 	/*
 	 * Post event to ipacm
 	 */
-	res = ipa3_send_msg(&msg_meta, map, ipa3_general_free_cb);
+	res = ipa_send_msg(&msg_meta, map, ipa3_general_free_cb);
 
 	if (res) {
-		IPAERR_RL("ipa3_send_msg failed: %d\n", res);
+		IPAERR_RL("ipa_send_msg failed: %d\n", res);
 		kfree(map);
 		goto done;
 	}
@@ -2854,7 +2870,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 	if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
 		return -ENOTTY;
 
-	if (!ipa3_is_ready()) {
+	if (!ipa_is_ready()) {
 		IPAERR("IPA not ready, waiting for init completion\n");
 		wait_for_completion(&ipa3_ctx->init_completion_obj);
 	}
@@ -3475,7 +3491,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		}
 		break;
 	case IPA_IOC_PUT_RT_TBL:
-		retval = ipa3_put_rt_tbl(arg);
+		retval = ipa_put_rt_tbl(arg);
 		break;
 	case IPA_IOC_GET_HDR:
 		if (copy_from_user(header, (const void __user *)arg,
@@ -3483,7 +3499,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 			retval = -EFAULT;
 			break;
 		}
-		if (ipa3_get_hdr((struct ipa_ioc_get_hdr *)header)) {
+		if (ipa_get_hdr((struct ipa_ioc_get_hdr *)header)) {
 			retval = -EFAULT;
 			break;
 		}
@@ -3726,7 +3742,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		}
 	case IPA_IOC_QUERY_EP_MAPPING:
 		{
-			retval = ipa3_get_ep_mapping(arg);
+			retval = ipa_get_ep_mapping(arg);
 			break;
 		}
 	case IPA_IOC_QUERY_RT_TBL_INDEX:
@@ -3875,7 +3891,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 			retval = -EFAULT;
 			break;
 		}
-		retval = ipa3_is_vlan_mode(
+		retval = ipa_is_vlan_mode(
 			vlan_mode.iface,
 			&is_vlan_mode);
 		if (retval)
@@ -4482,7 +4498,7 @@ int ipa3_setup_dflt_rt_tables(void)
 	rt_rule_entry->rule.hdr_hdl = ipa3_ctx->excp_hdr_hdl;
 	rt_rule_entry->rule.retain_hdr = 1;
 
-	if (ipa3_add_rt_rule(rt_rule)) {
+	if (ipa_add_rt_rule(rt_rule)) {
 		IPAERR("fail to add dflt v4 rule\n");
 		kfree(rt_rule);
 		return -EPERM;
@@ -4492,7 +4508,7 @@ int ipa3_setup_dflt_rt_tables(void)
 
 	/* setup a default v6 route to point to A5 */
 	rt_rule->ip = IPA_IP_v6;
-	if (ipa3_add_rt_rule(rt_rule)) {
+	if (ipa_add_rt_rule(rt_rule)) {
 		IPAERR("fail to add dflt v6 rule\n");
 		kfree(rt_rule);
 		return -EPERM;
@@ -4535,7 +4551,7 @@ static int ipa3_setup_exception_path(void)
 		strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX);
 		hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
 
-		if (ipa3_add_hdr(hdr)) {
+		if (ipa_add_hdr(hdr)) {
 			IPAERR("fail to add exception hdr\n");
 			ret = -EPERM;
 			goto bail;
@@ -4557,8 +4573,8 @@ static int ipa3_setup_exception_path(void)
 		ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
 
 		/* set the route register to pass exception packets to Apps */
-		route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
-		route.route_frag_def_pipe = ipa3_get_ep_mapping(
+		route.route_def_pipe = ipa_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+		route.route_frag_def_pipe = ipa_get_ep_mapping(
 			IPA_CLIENT_APPS_LAN_CONS);
 		route.route_def_hdr_table = !hdr_entry_internal->is_lcl;
 		route.route_def_retain_hdr = 1;
@@ -4692,10 +4708,10 @@ static void ipa3_q6_pipe_flow_control(bool delay)
 
 	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
 		if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
-			ep_idx = ipa3_get_ep_mapping(client_idx);
+			ep_idx = ipa_get_ep_mapping(client_idx);
 			if (ep_idx == -1)
 				continue;
-			gsi_ep_cfg = ipa3_get_gsi_ep_info(client_idx);
+			gsi_ep_cfg = ipa_get_gsi_ep_info(client_idx);
 			if (!gsi_ep_cfg) {
 				IPAERR("failed to get GSI config\n");
 				ipa_assert();
@@ -4728,7 +4744,7 @@ static void ipa3_q6_pipe_delay(bool delay)
 
 	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
 		if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
-			ep_idx = ipa3_get_ep_mapping(client_idx);
+			ep_idx = ipa_get_ep_mapping(client_idx);
 			if (ep_idx == -1)
 				continue;
 
@@ -4757,7 +4773,7 @@ static void ipa3_q6_avoid_holb(void)
 
 	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
 		if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
-			ep_idx = ipa3_get_ep_mapping(client_idx);
+			ep_idx = ipa_get_ep_mapping(client_idx);
 			if (ep_idx == -1)
 				continue;
 
@@ -4805,11 +4821,11 @@ static void ipa3_halt_q6_gsi_channels(bool prod)
 	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
 		if (IPA_CLIENT_IS_Q6_CONS(client_idx)
 			|| (IPA_CLIENT_IS_Q6_PROD(client_idx) && prod)) {
-			ep_idx = ipa3_get_ep_mapping(client_idx);
+			ep_idx = ipa_get_ep_mapping(client_idx);
 			if (ep_idx == -1)
 				continue;
 
-			gsi_ep_cfg = ipa3_get_gsi_ep_info(client_idx);
+			gsi_ep_cfg = ipa_get_gsi_ep_info(client_idx);
 			if (!gsi_ep_cfg) {
 				IPAERR("failed to get GSI config\n");
 				ipa_assert();
@@ -4918,7 +4934,7 @@ static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
 		goto free_cmd_pyld;
 	}
 
-	coal_ep = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+	coal_ep = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
 	if (coal_ep != IPA_EP_NOT_ALLOCATED && !ipa3_ctx->ulso_wa) {
 		u32 offset = 0;
@@ -5088,11 +5104,11 @@ static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
 	}
 
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1
 		&& !ipa3_ctx->ulso_wa) {
 		u32 offset = 0;
 
-		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		i = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
 		reg_write_coal_close.skip_pipeline_clear = false;
 		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
 		if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
@@ -5219,11 +5235,11 @@ static int ipa3_q6_clean_q6_tables(void)
 	}
 
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1
 		&& !ipa3_ctx->ulso_wa) {
 		u32 offset = 0;
 
-		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		i = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
 		reg_write_coal_close.skip_pipeline_clear = false;
 		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
 		if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
@@ -5320,11 +5336,11 @@ static int ipa3_q6_set_ex_path_to_apps(void)
 		return -ENOMEM;
 
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1
 		&& !ipa3_ctx->ulso_wa) {
 		u32 offset = 0;
 
-		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		i = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
 		reg_write_coal_close.skip_pipeline_clear = false;
 		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
 		if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
@@ -5353,7 +5369,7 @@ static int ipa3_q6_set_ex_path_to_apps(void)
 
 	/* Set the exception path to AP */
 	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
-		ep_idx = ipa3_get_ep_mapping(client_idx);
+		ep_idx = ipa_get_ep_mapping(client_idx);
 		if (ep_idx == -1 || (ep_idx >= ipa3_get_max_num_pipes()))
 			continue;
 
@@ -5533,7 +5549,7 @@ void ipa3_q6_post_shutdown_cleanup(void)
 
 	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
 		if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
-			ep_idx = ipa3_get_ep_mapping(client_idx);
+			ep_idx = ipa_get_ep_mapping(client_idx);
 			if (ep_idx == -1)
 				continue;
 
@@ -5661,15 +5677,15 @@ int _ipa_init_sram_v3(void)
 		IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4);
 	ipa3_sram_set_canary(ipa_sram_mmio,
 		IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
-	if (ipa_get_hw_type_internal() >= IPA_HW_v4_5
-		&& ipa_get_hw_type_internal() < IPA_HW_v5_0) {
+	if (ipa_get_hw_type() >= IPA_HW_v4_5
+		&& ipa_get_hw_type() < IPA_HW_v5_0) {
 		/* 4.5, 4.7, 4.9, 4.11 */
 		ipa3_sram_set_canary(ipa_sram_mmio,
 			IPA_MEM_PART(nat_tbl_ofst) - 12);
 	}
 
-	if (ipa_get_hw_type_internal() >= IPA_HW_v4_0) {
-		if (ipa_get_hw_type_internal() < IPA_HW_v4_5) {
+	if (ipa_get_hw_type() >= IPA_HW_v4_0) {
+		if (ipa_get_hw_type() < IPA_HW_v4_5) {
 			/* 4.0, 4.1, 4.2 */
 			ipa3_sram_set_canary(ipa_sram_mmio,
 				IPA_MEM_PART(pdn_config_ofst) - 4);
@@ -5679,7 +5695,7 @@ int _ipa_init_sram_v3(void)
 				IPA_MEM_PART(stats_quota_q6_ofst) - 4);
 			ipa3_sram_set_canary(ipa_sram_mmio,
 				IPA_MEM_PART(stats_quota_q6_ofst));
-		} else if (ipa_get_hw_type_internal() < IPA_HW_v5_0) {
+		} else if (ipa_get_hw_type() < IPA_HW_v5_0) {
 			/* 4.5, 4.7, 4.11 */
 			ipa3_sram_set_canary(ipa_sram_mmio,
 				IPA_MEM_PART(stats_quota_q6_ofst) - 12);
@@ -5693,14 +5709,14 @@ int _ipa_init_sram_v3(void)
 	}
 
 	/* all excluding 3.5.1, 4.0, 4.1, 4.2 */
-	if (ipa_get_hw_type_internal() <= IPA_HW_v3_5 ||
-		ipa_get_hw_type_internal() >= IPA_HW_v4_5) {
+	if (ipa_get_hw_type() <= IPA_HW_v3_5 ||
+		ipa_get_hw_type() >= IPA_HW_v4_5) {
 		ipa3_sram_set_canary(ipa_sram_mmio,
 			IPA_MEM_PART(modem_ofst) - 4);
 		ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst));
 	}
 
-	if (ipa_get_hw_type_internal() == IPA_HW_v5_0) {
+	if (ipa_get_hw_type() == IPA_HW_v5_0) {
 		ipa3_sram_set_canary(ipa_sram_mmio,
 			IPA_MEM_PART(apps_v4_flt_nhash_ofst) - 4);
 		ipa3_sram_set_canary(ipa_sram_mmio,
@@ -5711,14 +5727,14 @@ int _ipa_init_sram_v3(void)
 			IPA_MEM_PART(stats_fnr_ofst));
 	}
 
-	if (ipa_get_hw_type_internal() >= IPA_HW_v5_0) {
+	if (ipa_get_hw_type() >= IPA_HW_v5_0) {
 		ipa3_sram_set_canary(ipa_sram_mmio,
 			IPA_MEM_PART(pdn_config_ofst - 4));
 		ipa3_sram_set_canary(ipa_sram_mmio,
 			IPA_MEM_PART(pdn_config_ofst));
 	} else {
 		ipa3_sram_set_canary(ipa_sram_mmio,
-			(ipa_get_hw_type_internal() >= IPA_HW_v3_5) ?
+			(ipa_get_hw_type() >= IPA_HW_v3_5) ?
 			IPA_MEM_PART(uc_descriptor_ram_ofst) :
 			IPA_MEM_PART(end_ofst));
 	}
@@ -5784,11 +5800,11 @@ int _ipa_init_hdr_v3_0(void)
 	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
 
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1
 		&& !ipa3_ctx->ulso_wa) {
 		u32 offset = 0;
 
-		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		i = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
 		reg_write_coal_close.skip_pipeline_clear = false;
 		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
 		if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
@@ -6226,7 +6242,7 @@ static int ipa3_setup_apps_pipes(void)
 	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
 	sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
 	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS;
-	if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_cmd)) {
+	if (ipa_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_cmd)) {
 		IPAERR(":setup sys pipe (APPS_CMD_PROD) failed.\n");
 		result = -EPERM;
 		goto fail_ch20_wa;
@@ -6316,7 +6332,7 @@ static int ipa3_setup_apps_pipes(void)
 		 * source EP call-back
 		 */
 		spin_lock_init(&ipa3_ctx->disconnect_lock);
-		if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
+		if (ipa_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
 			IPAERR(":setup sys pipe (LAN_COAL_CONS) failed.\n");
 			result = -EPERM;
 			goto fail_flt_hash_tuple;
@@ -6350,7 +6366,7 @@ static int ipa3_setup_apps_pipes(void)
 		 * This lock intended to protect the access to the source EP call-back
 		 */
 		spin_lock_init(&ipa3_ctx->disconnect_lock);
-		if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
+		if (ipa_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
 			IPAERR(":setup sys pipe (LAN_CONS) failed.\n");
 			result = -EPERM;
 			goto fail_flt_hash_tuple;
@@ -6372,7 +6388,7 @@ static int ipa3_setup_apps_pipes(void)
 			sys_in.ipa_ep_cfg.hdr_ext.hdr_bytes_to_remove_valid = true;
 			sys_in.ipa_ep_cfg.hdr_ext.hdr_bytes_to_remove = QMAP_HDR_LEN;
 		}
-		if (ipa3_setup_sys_pipe(&sys_in,
+		if (ipa_setup_sys_pipe(&sys_in,
 			&ipa3_ctx->clnt_hdl_data_out)) {
 			IPAERR(":setup sys pipe (LAN_PROD) failed.\n");
 			result = -EPERM;
@@ -6384,7 +6400,7 @@ static int ipa3_setup_apps_pipes(void)
 
 fail_lan_data_out:
 	if ( ipa3_ctx->clnt_hdl_data_in )
-		ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
+		ipa_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
 fail_flt_hash_tuple:
 	if (ipa3_ctx->dflt_v6_rt_rule_hdl)
 		__ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
@@ -6392,7 +6408,7 @@ fail_flt_hash_tuple:
 		__ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
 	if (ipa3_ctx->excp_hdr_hdl)
 		__ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
-	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
+	ipa_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
 fail_ch20_wa:
 	return result;
 }
@@ -6400,13 +6416,13 @@ fail_ch20_wa:
 static void ipa3_teardown_apps_pipes(void)
 {
 	if (!ipa3_ctx->ipa_config_is_mhi)
-		ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
+		ipa_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
 	if ( ipa3_ctx->clnt_hdl_data_in )
-		ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
+		ipa_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
 	__ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
 	__ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
 	__ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
-	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
+	ipa_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
 	ipa3_dealloc_common_event_ring();
 }
 
@@ -7422,13 +7438,13 @@ void ipa3_suspend_handler(enum ipa_irq_type interrupt,
 }
 
 /**
- * ipa3_restore_suspend_handler() - restores the original suspend IRQ handler
+ * ipa_restore_suspend_handler() - restores the original suspend IRQ handler
  * as it was registered in the IPA init sequence.
  * Return codes:
  * 0: success
  * -EPERM: failed to remove current handler or failed to add original handler
  */
-int ipa3_restore_suspend_handler(void)
+int ipa_restore_suspend_handler(void)
 {
 	int result = 0;
 
@@ -7438,7 +7454,7 @@ int ipa3_restore_suspend_handler(void)
 		return -EPERM;
 	}
 
-	result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
+	result = ipa_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
 			ipa3_suspend_handler, false, NULL);
 	if (result) {
 		IPAERR("register handler for suspend interrupt failed\n");
@@ -7449,6 +7465,7 @@ int ipa3_restore_suspend_handler(void)
 
 	return result;
 }
+EXPORT_SYMBOL(ipa_restore_suspend_handler);
 
 static void ipa3_transport_release_resource(struct work_struct *work)
 {
@@ -7487,7 +7504,7 @@ int ipa3_init_interrupts(void)
 	}
 
 	/*add handler for suspend interrupt*/
-	result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
+	result = ipa_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
 			ipa3_suspend_handler, false, NULL);
 	if (result) {
 		IPAERR("register handler for suspend interrupt failed\n");
@@ -7724,7 +7741,7 @@ static int ipa3_alloc_gsi_channel(void)
 
 	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
 		type = ipa3_get_client_by_pipe(i);
-		gsi_ep_cfg = ipa3_get_gsi_ep_info(type);
+		gsi_ep_cfg = ipa_get_gsi_ep_info(type);
 		IPADBG("for ep %d client is %d\n", i, type);
 		if (!gsi_ep_cfg)
 			continue;
@@ -7759,94 +7776,60 @@ static inline void ipa3_disable_napi_lan_rx(void)
 		napi_disable(&ipa3_ctx->napi_lan_rx);
 }
 
-static inline void ipa3_register_to_fmwk(void)
-{
-	struct ipa_core_data data;
-
-	data.ipa_tx_dp = ipa3_tx_dp;
-	data.ipa_get_hw_type = ipa_get_hw_type_internal;
-	data.ipa_is_vlan_mode = ipa3_is_vlan_mode;
-	data.ipa_get_smmu_params = ipa3_get_smmu_params;
-	data.ipa_get_lan_rx_napi = ipa3_get_lan_rx_napi;
-	data.ipa_dma_init = ipa3_dma_init;
-	data.ipa_dma_enable = ipa3_dma_enable;
-	data.ipa_dma_disable = ipa3_dma_disable;
-	data.ipa_dma_sync_memcpy = ipa3_dma_sync_memcpy;
-	data.ipa_dma_async_memcpy = ipa3_dma_async_memcpy;
-	data.ipa_dma_destroy = ipa3_dma_destroy;
-	data.ipa_get_ep_mapping = ipa3_get_ep_mapping;
-	data.ipa_send_msg = ipa3_send_msg;
-	data.ipa_free_skb = ipa3_free_skb;
-	data.ipa_setup_sys_pipe = ipa3_setup_sys_pipe;
-	data.ipa_teardown_sys_pipe = ipa3_teardown_sys_pipe;
-	data.ipa_get_wdi_stats = ipa3_get_wdi_stats;
-	data.ipa_uc_bw_monitor = ipa3_uc_bw_monitor;
-	data.ipa_broadcast_wdi_quota_reach_ind =
-		ipa3_broadcast_wdi_quota_reach_ind;
-	data.ipa_uc_wdi_get_dbpa = ipa3_uc_wdi_get_dbpa;
-	data.ipa_cfg_ep_ctrl = ipa3_cfg_ep_ctrl;
-	data.ipa_add_rt_rule = ipa3_add_rt_rule;
-	data.ipa_put_rt_tbl = ipa3_put_rt_tbl;
-	data.ipa_register_intf = ipa3_register_intf;
-	data.ipa_deregister_intf = ipa3_deregister_intf;
-	data.ipa_add_hdr = ipa3_add_hdr;
-	data.ipa_get_hdr = ipa3_get_hdr;
-	data.ipa_del_hdr = ipa3_del_hdr;
-	data.ipa_set_aggr_mode = ipa3_set_aggr_mode;
-	data.ipa_set_qcncm_ndp_sig = ipa3_set_qcncm_ndp_sig;
-	data.ipa_set_single_ndp_per_mbim = ipa3_set_single_ndp_per_mbim;
-	data.ipa_add_interrupt_handler = ipa3_add_interrupt_handler;
-	data.ipa_restore_suspend_handler = ipa3_restore_suspend_handler;
-	data.ipa_get_gsi_ep_info = ipa3_get_gsi_ep_info;
-	data.ipa_stop_gsi_channel = ipa3_stop_gsi_channel;
-	data.ipa_rmnet_ctl_xmit = ipa3_rmnet_ctl_xmit;
-	data.ipa_register_rmnet_ctl_cb = ipa3_register_rmnet_ctl_cb;
-	data.ipa_unregister_rmnet_ctl_cb = ipa3_unregister_rmnet_ctl_cb;
-	if (ipa3_ctx->use_pm_wrapper) {
-		data.ipa_enable_wdi_pipe = ipa_pm_wrapper_enable_wdi_pipe;
-		data.ipa_disable_wdi_pipe = ipa_pm_wrapper_disable_pipe;
-		data.ipa_connect_wdi_pipe = ipa_pm_wrapper_connect_wdi_pipe;
-		data.ipa_disconnect_wdi_pipe = ipa_pm_wrapper_disconnect_wdi_pipe;
+static inline void ipa_trigger_ipa_ready_cbs(void)
+{
+	struct ipa_ready_cb_info *info;
+	struct ipa_ready_cb_info *next;
+
+	/* Call all the CBs */
+	list_for_each_entry_safe(info, next,
+		&ipa3_ctx->ipa_ready_cb_list, link) {
+		if (info->ready_cb)
+			info->ready_cb(info->user_data);
+
+		list_del(&info->link);
+		kfree(info);
 	}
-	else {
-		data.ipa_enable_wdi_pipe = ipa3_enable_wdi_pipe;
-		data.ipa_disable_wdi_pipe = ipa3_disable_wdi_pipe;
-		data.ipa_connect_wdi_pipe = ipa3_connect_wdi_pipe;
-		data.ipa_disconnect_wdi_pipe = ipa3_disconnect_wdi_pipe;
-	}
-	data.ipa_resume_wdi_pipe = ipa3_resume_wdi_pipe;
-	data.ipa_suspend_wdi_pipe = ipa3_suspend_wdi_pipe;
-	data.ipa_uc_reg_rdyCB = ipa3_uc_reg_rdyCB;
-	data.ipa_uc_dereg_rdyCB = ipa3_uc_dereg_rdyCB;
-	data.ipa_rmnet_ll_xmit = ipa3_rmnet_ll_xmit;
-	data.ipa_register_rmnet_ll_cb = ipa3_register_rmnet_ll_cb;
-	data.ipa_unregister_rmnet_ll_cb = ipa3_unregister_rmnet_ll_cb;
-	data.ipa_register_notifier =
-		ipa3_register_notifier;
-	data.ipa_unregister_notifier =
-		ipa3_unregister_notifier;
-	data.ipa_add_socksv5_conn = ipa3_add_socksv5_conn;
-	data.ipa_del_socksv5_conn = ipa3_del_socksv5_conn;
-
-	if (ipa_fmwk_register_ipa(&data)) {
-		IPAERR("couldn't register to IPA framework\n");
+}
+
+int ipa_register_ipa_ready_cb(void(*ipa_ready_cb)(void *user_data),
+	void *user_data)
+{
+	struct ipa_ready_cb_info *cb_info = NULL;
+
+	if (!ipa3_ctx) {
+		IPAERR("ipa framework hasn't been initialized yet\n");
+		return -EPERM;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	if (ipa3_ctx->ipa_initialization_complete) {
+		IPADBG("IPA driver finished initialization already\n");
+		mutex_unlock(&ipa3_ctx->lock);
+		return -EEXIST;
+	}
+
+	cb_info = kmalloc(sizeof(struct ipa_ready_cb_info), GFP_KERNEL);
+	if (!cb_info) {
+		mutex_unlock(&ipa3_ctx->lock);
+		return -ENOMEM;
 	}
+
+	cb_info->ready_cb = ipa_ready_cb;
+	cb_info->user_data = user_data;
+
+	list_add_tail(&cb_info->link, &ipa3_ctx->ipa_ready_cb_list);
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return 0;
 }
+EXPORT_SYMBOL(ipa_register_ipa_ready_cb);
 
 void ipa3_notify_clients_registered(void)
 {
-	bool reg = false;
-
 	mutex_lock(&ipa3_ctx->lock);
-	if (ipa3_ctx->ipa_initialization_complete)
-		reg = true;
 	ipa3_ctx->clients_registered = true;
 	mutex_unlock(&ipa3_ctx->lock);
-
-	if (reg) {
-		IPADBG("register to fmwk\n");
-		ipa3_register_to_fmwk();
-	}
 }
 EXPORT_SYMBOL(ipa3_notify_clients_registered);
 
@@ -7908,7 +7891,6 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
 	struct ipa3_flt_tbl_nhash_lcl *lcl_tbl;
 	int i;
 	struct idr *idr;
-	bool reg = false;
 	enum ipa_ip_type ip;
 #if IS_ENABLED(CONFIG_QCOM_VA_MINIDUMP)
 	struct ipa_minidump_data *mini_dump;
@@ -8063,7 +8045,7 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
 			if (ipa3_ctx->flt_tbl_nhash_lcl[ip] &&
 			    (IPA_CLIENT_IS_ETH_PROD(i) ||
 			     ((ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_TEST) &&
-			      (i == ipa3_get_ep_mapping(IPA_CLIENT_TEST_PROD))))) {
+			      (i == ipa_get_ep_mapping(IPA_CLIENT_TEST_PROD))))) {
 				flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] = false;
 				lcl_tbl = kcalloc(1, sizeof(struct ipa3_flt_tbl_nhash_lcl),
 						  GFP_KERNEL);
@@ -8254,22 +8236,18 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
 	else
 		IPADBG(":mpm init init ok\n");
 
+	ipa3_usb_init();
+
 	mutex_lock(&ipa3_ctx->lock);
 	ipa3_ctx->ipa_initialization_complete = true;
-	if (ipa3_ctx->clients_registered)
-		reg = true;
 	mutex_unlock(&ipa3_ctx->lock);
 	ipa3_enable_napi_lan_rx();
-	if (reg) {
-		IPADBG("register to fmwk\n");
-		ipa3_register_to_fmwk();
-	}
-
 	/* init uc-activation tbl*/
 	ipa3_setup_uc_act_tbl();
+	ipa_trigger_ipa_ready_cbs();
 
 #ifdef CONFIG_DEEPSLEEP
-	if (!ipa3_is_ready())
+	if (!ipa_is_ready())
 		ipa_fmwk_deepsleep_exit_ipa();
 #endif
 	complete_all(&ipa3_ctx->init_completion_obj);
@@ -8751,7 +8729,7 @@ static ssize_t ipa3_write(struct file *file, const char __user *buf,
 	}
 
 	/* Prevent consequent calls from trying to load the FW again. */
-	if (ipa3_is_ready())
+	if (ipa_is_ready())
 		return count;
 
 	ipa_fw_load_sm_handle_event(IPA_FW_LOAD_EVNT_FWFILE_READY);
@@ -8993,7 +8971,7 @@ int ipa_set_pkt_init_ex_hdr_ofst(struct ipa_pkt_init_ex_hdr_ofst_set
 	if (!lookup)
 		return -EINVAL;
 
-	dst_ep_idx = ipa3_get_ep_mapping(lookup->ep);
+	dst_ep_idx = ipa_get_ep_mapping(lookup->ep);
 	IPADBG("dst_ep_idx=%d\n", dst_ep_idx);
 	if (-1 == dst_ep_idx) {
 		IPAERR("Client %u is not mapped\n", lookup->ep);
@@ -11851,10 +11829,11 @@ struct ipa3_context *ipa3_get_ctx(void)
 }
 EXPORT_SYMBOL(ipa3_get_ctx);
 
-bool ipa3_get_lan_rx_napi(void)
+bool ipa_get_lan_rx_napi(void)
 {
 	return ipa3_ctx->lan_rx_napi_enable;
 }
+EXPORT_SYMBOL(ipa_get_lan_rx_napi);
 
 
 #ifdef CONFIG_DEEPSLEEP
@@ -11865,6 +11844,7 @@ static void ipa3_deepsleep_suspend(void)
 
 	/* To allow default routing table delection using this flag */
 	ipa3_ctx->deepsleep = true;
+	ipa3_usb_exit();
 	/*Disabling the LAN NAPI*/
 	ipa3_disable_napi_lan_rx();
 	/*NOt allow uC related operations until uC load again*/
@@ -12015,9 +11995,9 @@ int ipa3_iommu_map(struct iommu_domain *domain,
 EXPORT_SYMBOL(ipa3_iommu_map);
 
 /**
- * ipa3_get_smmu_params()- Return the ipa3 smmu related params.
+ * ipa_get_smmu_params()- Return the ipa3 smmu related params.
  */
-int ipa3_get_smmu_params(struct ipa_smmu_in_params *in,
+int ipa_get_smmu_params(struct ipa_smmu_in_params *in,
 	struct ipa_smmu_out_params *out)
 {
 	bool is_smmu_enable = false;
@@ -12104,7 +12084,7 @@ int ipa3_get_smmu_params(struct ipa_smmu_in_params *in,
 
 	return 0;
 }
-EXPORT_SYMBOL(ipa3_get_smmu_params);
+EXPORT_SYMBOL(ipa_get_smmu_params);
 
 #define MAX_LEN 96
 

+ 24 - 24
drivers/platform/msm/ipa/ipa_v3/ipa_client.c

@@ -104,7 +104,7 @@ int ipa3_enable_data_path(u32 clnt_hdl)
 		    !ipa3_should_pipe_be_suspended(ep->client))) {
 			memset(&ep_cfg_ctrl, 0, sizeof(ep_cfg_ctrl));
 			ep_cfg_ctrl.ipa_ep_suspend = false;
-			res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+			res = ipa_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
 		}
 	}
 
@@ -149,7 +149,7 @@ int ipa3_disable_data_path(u32 clnt_hdl)
 		if (IPA_CLIENT_IS_CONS(ep->client)) {
 			memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 			ep_cfg_ctrl.ipa_ep_suspend = true;
-			res = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+			res = ipa_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
 		}
 
 		udelay(IPA_PKT_FLUSH_TO_US);
@@ -598,7 +598,7 @@ int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
 		return -EINVAL;
 	}
 
-	ipa_ep_idx = ipa3_get_ep_mapping(params->client);
+	ipa_ep_idx = ipa_get_ep_mapping(params->client);
 	if (ipa_ep_idx == -1) {
 		IPAERR("fail to alloc EP.\n");
 		goto fail;
@@ -688,9 +688,9 @@ int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
 		goto write_evt_scratch_fail;
 	}
 
-	gsi_ep_cfg_ptr = ipa3_get_gsi_ep_info(ep->client);
+	gsi_ep_cfg_ptr = ipa_get_gsi_ep_info(ep->client);
 	if (gsi_ep_cfg_ptr == NULL) {
-		IPAERR("Error ipa3_get_gsi_ep_info ret NULL\n");
+		IPAERR("Error ipa_get_gsi_ep_info ret NULL\n");
 		result = -EFAULT;
 		goto write_evt_scratch_fail;
 	}
@@ -882,7 +882,7 @@ int ipa3_xdci_connect(u32 clnt_hdl)
 	goto exit;
 
 stop_ch:
-	(void)ipa3_stop_gsi_channel(clnt_hdl);
+	(void)ipa_stop_gsi_channel(clnt_hdl);
 exit:
 	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
 	return result;
@@ -925,7 +925,7 @@ int ipa3_xdci_start(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
 		ep_cfg_ctrl.ipa_ep_delay = true;
 		ep->ep_delay_set = true;
 
-		result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+		result = ipa_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
 		if (result)
 			IPAERR("client (ep: %d) failed result=%d\n",
 			clnt_hdl, result);
@@ -1116,7 +1116,7 @@ static int ipa3_xdci_stop_gsi_channel(u32 clnt_hdl, bool *stop_in_proc)
 		return -EINVAL;
 	}
 
-	res = ipa3_stop_gsi_channel(clnt_hdl);
+	res = ipa_stop_gsi_channel(clnt_hdl);
 	if (res != 0 && res != -GSI_STATUS_AGAIN &&
 		res != -GSI_STATUS_TIMED_OUT) {
 		IPAERR("xDCI stop channel failed res=%d\n", res);
@@ -1189,7 +1189,7 @@ int ipa3_remove_secondary_flow_ctrl(int gsi_chan_hdl)
 	if (result == GSI_STATUS_SUCCESS) {
 		code = 0;
 		result = gsi_flow_control_ee(gsi_chan_hdl,
-			ipa3_get_ep_mapping_from_gsi(gsi_chan_hdl), 0, false, true, &code);
+			ipa_get_ep_mapping_from_gsi(gsi_chan_hdl), 0, false, true, &code);
 		if (result == GSI_STATUS_SUCCESS) {
 			IPADBG("flow control sussess ch %d code %d\n",
 					gsi_chan_hdl, code);
@@ -1240,7 +1240,7 @@ static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id,
 	if (remove_delay && ep->ep_delay_set == true && !stop_in_proc) {
 		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 		ep_cfg_ctrl.ipa_ep_delay = false;
-		result = ipa3_cfg_ep_ctrl(clnt_hdl,
+		result = ipa_cfg_ep_ctrl(clnt_hdl,
 			&ep_cfg_ctrl);
 		if (result) {
 			IPAERR
@@ -1327,7 +1327,7 @@ exit:
 	if (remove_delay && ep->ep_delay_set == true && !stop_in_proc) {
 		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 		ep_cfg_ctrl.ipa_ep_delay = false;
-		result = ipa3_cfg_ep_ctrl(clnt_hdl,
+		result = ipa_cfg_ep_ctrl(clnt_hdl,
 			&ep_cfg_ctrl);
 		if (result) {
 			IPAERR
@@ -1365,7 +1365,7 @@ int ipa3_set_reset_client_prod_pipe_delay(bool set_reset,
 		return -EINVAL;
 	}
 
-	pipe_idx = ipa3_get_ep_mapping(client);
+	pipe_idx = ipa_get_ep_mapping(client);
 
 	if (pipe_idx == IPA_EP_NOT_ALLOCATED) {
 		IPAERR("client (%d) not valid\n", client);
@@ -1378,7 +1378,7 @@ int ipa3_set_reset_client_prod_pipe_delay(bool set_reset,
 	client_lock_unlock_cb(client, true);
 	if (ep->valid && ep->skip_ep_cfg) {
 		ep->ep_delay_set = ep_ctrl.ipa_ep_delay;
-		result = ipa3_cfg_ep_ctrl(pipe_idx, &ep_ctrl);
+		result = ipa_cfg_ep_ctrl(pipe_idx, &ep_ctrl);
 		if (result)
 			IPAERR("client (ep: %d) failed result=%d\n",
 				pipe_idx, result);
@@ -1418,7 +1418,7 @@ int ipa3_start_stop_client_prod_gsi_chnl(enum ipa_client_type client,
 		return -EINVAL;
 	}
 
-	pipe_idx = ipa3_get_ep_mapping(client);
+	pipe_idx = ipa_get_ep_mapping(client);
 
 	if (pipe_idx == IPA_EP_NOT_ALLOCATED) {
 		IPAERR("client (%d) not valid\n", client);
@@ -1441,7 +1441,7 @@ int ipa3_start_stop_client_prod_gsi_chnl(enum ipa_client_type client,
 						ep->gsi_chan_hdl, code);
 			}
 		} else
-			result = ipa3_stop_gsi_channel(pipe_idx);
+			result = ipa_stop_gsi_channel(pipe_idx);
 	}
 	client_lock_unlock_cb(client, false);
 	return result;
@@ -1466,7 +1466,7 @@ int ipa3_set_reset_client_cons_pipe_sus_holb(bool set_reset,
 		return -EINVAL;
 	}
 
-	pipe_idx = ipa3_get_ep_mapping(client);
+	pipe_idx = ipa_get_ep_mapping(client);
 
 	if (pipe_idx == IPA_EP_NOT_ALLOCATED) {
 		IPAERR("client (%d) not valid\n", client);
@@ -1529,7 +1529,7 @@ void ipa3_xdci_ep_delay_rm(u32 clnt_hdl)
 			IPA_ACTIVE_CLIENTS_INC_EP
 				(ipa3_get_client_mapping(clnt_hdl));
 
-		result = ipa3_cfg_ep_ctrl(clnt_hdl,
+		result = ipa_cfg_ep_ctrl(clnt_hdl,
 			&ep_cfg_ctrl);
 
 		if (!ep->keep_ipa_awake)
@@ -1587,7 +1587,7 @@ int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id)
 	} else {
 		IPADBG("Stopping CONS channel - hdl=%d clnt=%d\n",
 			clnt_hdl, ep->client);
-		result = ipa3_stop_gsi_channel(clnt_hdl);
+		result = ipa_stop_gsi_channel(clnt_hdl);
 		if (result) {
 			IPAERR("Error stopping channel (CONS client): %d\n",
 				result);
@@ -1597,7 +1597,7 @@ int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id)
 			/* Unsuspend the pipe */
 			memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 			ep_cfg_ctrl.ipa_ep_suspend = false;
-			ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+			ipa_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
 		}
 	}
 	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
@@ -1764,7 +1764,7 @@ int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 		/* Suspend the DL/DPL EP */
 		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 		ep_cfg_ctrl.ipa_ep_suspend = true;
-		ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+		ipa_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
 	}
 
 	/*
@@ -1789,7 +1789,7 @@ int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
 	}
 
 	/* Stop DL channel */
-	result = ipa3_stop_gsi_channel(dl_clnt_hdl);
+	result = ipa_stop_gsi_channel(dl_clnt_hdl);
 	if (result) {
 		IPAERR("Error stopping DL/DPL channel: %d\n", result);
 		result = -EFAULT;
@@ -1838,7 +1838,7 @@ unsuspend_dl_and_exit:
 		/* Unsuspend the DL EP */
 		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 		ep_cfg_ctrl.ipa_ep_suspend = false;
-		ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+		ipa_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
 	}
 disable_clk_and_exit:
 	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
@@ -1925,7 +1925,7 @@ int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl)
 		/* Unsuspend the DL/DPL EP */
 		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 		ep_cfg_ctrl.ipa_ep_suspend = false;
-		ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+		ipa_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
 	}
 
 	/* Start DL channel */
@@ -2029,7 +2029,7 @@ int ipa3_clear_endpoint_delay(u32 clnt_hdl)
 	/* If flow is disabled at this point, restore the ep state.*/
 	ep_ctrl.ipa_ep_delay = false;
 	ep_ctrl.ipa_ep_suspend = false;
-	ipa3_cfg_ep_ctrl(clnt_hdl, &ep_ctrl);
+	ipa_cfg_ep_ctrl(clnt_hdl, &ep_ctrl);
 
 	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
 

+ 5 - 5
drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c

@@ -1109,7 +1109,7 @@ static ssize_t ipa3_read_rt(struct file *file, char __user *ubuf, size_t count,
 					>> 5;
 				pr_err("rule_idx:%d dst:%d ep:%d S:%u ",
 					i, entry->rule.dst,
-					ipa3_get_ep_mapping(entry->rule.dst),
+					ipa_get_ep_mapping(entry->rule.dst),
 					!ipa3_ctx->hdr_proc_ctx_tbl_lcl);
 				pr_err("proc_ctx[32B]:%u attrib_mask:%08x ",
 					ofst_words,
@@ -1121,7 +1121,7 @@ static ssize_t ipa3_read_rt(struct file *file, char __user *ubuf, size_t count,
 					ofst = 0;
 				pr_err("rule_idx:%d dst:%d ep:%d S:%u ",
 					i, entry->rule.dst,
-					ipa3_get_ep_mapping(entry->rule.dst),
+					ipa_get_ep_mapping(entry->rule.dst),
 					!(entry->hdr && entry->hdr->is_lcl));
 				pr_err("hdr_ofst[words]:%u attrib_mask:%08x ",
 					ofst >> 2,
@@ -1800,7 +1800,7 @@ static ssize_t ipa3_read_wstats(struct file *file, char __user *ubuf,
 			HEAD_FRMT_STR, "Client IPA_CLIENT_WLAN1_PROD Stats:");
 		cnt += nbytes;
 
-		ipa_ep_idx = ipa3_get_ep_mapping(client);
+		ipa_ep_idx = ipa_get_ep_mapping(client);
 		if (ipa_ep_idx == -1) {
 			nbytes = scnprintf(dbg_buff + cnt,
 				IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
@@ -1858,7 +1858,7 @@ static ssize_t ipa3_read_wstats(struct file *file, char __user *ubuf,
 		"Client IPA_CLIENT_WLAN1_CONS Stats:");
 	cnt += nbytes;
 	while (1) {
-		ipa_ep_idx = ipa3_get_ep_mapping(client);
+		ipa_ep_idx = ipa_get_ep_mapping(client);
 		if (ipa_ep_idx == -1) {
 			nbytes = scnprintf(dbg_buff + cnt,
 				IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
@@ -2051,7 +2051,7 @@ static ssize_t ipa3_read_wdi(struct file *file, char __user *ubuf,
 	int cnt = 0;
 	struct IpaHwStatsWDITxInfoData_t *tx_ch_ptr;
 
-	if (!ipa3_get_wdi_stats(&stats)) {
+	if (!ipa_get_wdi_stats(&stats)) {
 		tx_ch_ptr = &stats.tx_ch_stats;
 		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
 			"TX num_pkts_processed=%u\n"

+ 1 - 1
drivers/platform/msm/ipa/ipa_v3/ipa_defs.h

@@ -5,7 +5,7 @@
 
 #ifndef _IPA_DEFS_H_
 #define _IPA_DEFS_H_
-#include <linux/ipa.h>
+#include "ipa.h"
 
 /**
  * struct ipa_rt_rule_i - attributes of a routing rule

+ 68 - 62
drivers/platform/msm/ipa/ipa_v3/ipa_dma.c

@@ -10,7 +10,7 @@
 #include <linux/kernel.h>
 #include <linux/msm_ipa.h>
 #include <linux/mutex.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include <linux/msm_gsi.h>
 #include <linux/dmapool.h>
 #include "ipa_i.h"
@@ -116,15 +116,15 @@ struct ipa3_dma_ctx {
 static struct ipa3_dma_ctx *ipa3_dma_ctx;
 
 /**
- * struct ipa3_dma_init_refcnt_ctrl -IPADMA driver init control information
+ * struct ipa_dma_init_refcnt_ctrl -IPADMA driver init control information
  * @ref_cnt: reference count for initialization operations
  * @lock: lock for the reference count
  */
-struct ipa3_dma_init_refcnt_ctrl {
+struct ipa_dma_init_refcnt_ctrl {
 	unsigned int ref_cnt;
 	struct mutex lock;
 };
-static struct ipa3_dma_init_refcnt_ctrl *ipa3_dma_init_refcnt_ctrl;
+static struct ipa_dma_init_refcnt_ctrl *ipa_dma_init_refcnt_ctrl;
 
 /**
  * ipa3_dma_setup() - One time setup for IPA DMA
@@ -139,20 +139,20 @@ int ipa3_dma_setup(void)
 {
 	IPADMA_FUNC_ENTRY();
 
-	if (ipa3_dma_init_refcnt_ctrl) {
+	if (ipa_dma_init_refcnt_ctrl) {
 		IPADMA_ERR("Setup already done\n");
 		return -EFAULT;
 	}
 
-	ipa3_dma_init_refcnt_ctrl =
-		kzalloc(sizeof(*(ipa3_dma_init_refcnt_ctrl)), GFP_KERNEL);
+	ipa_dma_init_refcnt_ctrl =
+		kzalloc(sizeof(*(ipa_dma_init_refcnt_ctrl)), GFP_KERNEL);
 
-	if (!ipa3_dma_init_refcnt_ctrl) {
+	if (!ipa_dma_init_refcnt_ctrl) {
 		IPADMA_ERR("kzalloc error.\n");
 		return -ENOMEM;
 	}
 
-	mutex_init(&ipa3_dma_init_refcnt_ctrl->lock);
+	mutex_init(&ipa_dma_init_refcnt_ctrl->lock);
 
 	IPADMA_FUNC_EXIT();
 	return 0;
@@ -171,17 +171,17 @@ void ipa3_dma_shutdown(void)
 {
 	IPADMA_FUNC_ENTRY();
 
-	if (!ipa3_dma_init_refcnt_ctrl)
+	if (!ipa_dma_init_refcnt_ctrl)
 		return;
 
-	kfree(ipa3_dma_init_refcnt_ctrl);
-	ipa3_dma_init_refcnt_ctrl = NULL;
+	kfree(ipa_dma_init_refcnt_ctrl);
+	ipa_dma_init_refcnt_ctrl = NULL;
 
 	IPADMA_FUNC_EXIT();
 }
 
 /**
- * ipa3_dma_init() -Initialize IPADMA.
+ * ipa_dma_init() -Initialize IPADMA.
  *
  * This function initialize all IPADMA internal data and connect in dma:
  *	MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS
@@ -195,7 +195,7 @@ void ipa3_dma_shutdown(void)
  *		-ENOMEM: allocating memory error
  *		-EPERM: pipe connection failed
  */
-int ipa3_dma_init(void)
+int ipa_dma_init(void)
 {
 	struct ipa3_dma_ctx *ipa_dma_ctx_t;
 	struct ipa_sys_connect_params sys_in;
@@ -205,21 +205,21 @@ int ipa3_dma_init(void)
 
 	IPADMA_FUNC_ENTRY();
 
-	if (!ipa3_dma_init_refcnt_ctrl) {
+	if (!ipa_dma_init_refcnt_ctrl) {
 		IPADMA_ERR("Setup isn't done yet!\n");
 		return -EINVAL;
 	}
 
-	mutex_lock(&ipa3_dma_init_refcnt_ctrl->lock);
-	if (ipa3_dma_init_refcnt_ctrl->ref_cnt > 0) {
+	mutex_lock(&ipa_dma_init_refcnt_ctrl->lock);
+	if (ipa_dma_init_refcnt_ctrl->ref_cnt > 0) {
 		IPADMA_DBG("Already initialized refcnt=%d\n",
-			ipa3_dma_init_refcnt_ctrl->ref_cnt);
+			ipa_dma_init_refcnt_ctrl->ref_cnt);
 		if (!ipa3_dma_ctx) {
 			IPADMA_ERR("Context missing. refcnt=%d\n",
-				ipa3_dma_init_refcnt_ctrl->ref_cnt);
+				ipa_dma_init_refcnt_ctrl->ref_cnt);
 			res = -EFAULT;
 		} else {
-			ipa3_dma_init_refcnt_ctrl->ref_cnt++;
+			ipa_dma_init_refcnt_ctrl->ref_cnt++;
 		}
 		goto init_unlock;
 	}
@@ -230,7 +230,7 @@ int ipa3_dma_init(void)
 		goto init_unlock;
 	}
 
-	if (!ipa3_is_ready()) {
+	if (!ipa_is_ready()) {
 		IPADMA_ERR("IPA is not ready yet\n");
 		res = -EINVAL;
 		goto init_unlock;
@@ -316,7 +316,7 @@ int ipa3_dma_init(void)
 	sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
 	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS;
 	sys_in.skip_ep_cfg = false;
-	if (ipa3_setup_sys_pipe(&sys_in,
+	if (ipa_setup_sys_pipe(&sys_in,
 		&ipa_dma_ctx_t->ipa_dma_sync_prod_hdl)) {
 		IPADMA_ERR(":setup sync prod pipe failed\n");
 		res = -EPERM;
@@ -331,7 +331,7 @@ int ipa3_dma_init(void)
 	sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
 	sys_in.notify = NULL;
 	sys_in.priv = NULL;
-	if (ipa3_setup_sys_pipe(&sys_in,
+	if (ipa_setup_sys_pipe(&sys_in,
 		&ipa_dma_ctx_t->ipa_dma_sync_cons_hdl)) {
 		IPADMA_ERR(":setup sync cons pipe failed.\n");
 		res = -EPERM;
@@ -348,7 +348,7 @@ int ipa3_dma_init(void)
 	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS;
 	sys_in.skip_ep_cfg = false;
 	sys_in.notify = NULL;
-	if (ipa3_setup_sys_pipe(&sys_in,
+	if (ipa_setup_sys_pipe(&sys_in,
 		&ipa_dma_ctx_t->ipa_dma_async_prod_hdl)) {
 		IPADMA_ERR(":setup async prod pipe failed.\n");
 		res = -EPERM;
@@ -363,7 +363,7 @@ int ipa3_dma_init(void)
 	sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
 	sys_in.notify = ipa3_dma_async_memcpy_notify_cb;
 	sys_in.priv = NULL;
-	if (ipa3_setup_sys_pipe(&sys_in,
+	if (ipa_setup_sys_pipe(&sys_in,
 		&ipa_dma_ctx_t->ipa_dma_async_cons_hdl)) {
 		IPADMA_ERR(":setup async cons pipe failed.\n");
 		res = -EPERM;
@@ -371,18 +371,18 @@ int ipa3_dma_init(void)
 	}
 	ipa3_dma_debugfs_init();
 	ipa3_dma_ctx = ipa_dma_ctx_t;
-	ipa3_dma_init_refcnt_ctrl->ref_cnt = 1;
+	ipa_dma_init_refcnt_ctrl->ref_cnt = 1;
 	IPADMA_DBG("ASYNC MEMCPY pipes are connected\n");
 
 	IPADMA_FUNC_EXIT();
 	goto init_unlock;
 
 fail_async_cons:
-	ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_async_prod_hdl);
+	ipa_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_async_prod_hdl);
 fail_async_prod:
-	ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_cons_hdl);
+	ipa_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_cons_hdl);
 fail_sync_cons:
-	ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_prod_hdl);
+	ipa_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_prod_hdl);
 fail_sync_prod:
 	dma_free_coherent(ipa3_ctx->pdev, IPA_DMA_DUMMY_BUFF_SZ * 4,
 		ipa_dma_ctx_t->ipa_dma_dummy_src_sync.base,
@@ -393,24 +393,25 @@ fail_mem_ctrl:
 	kfree(ipa_dma_ctx_t);
 	ipa3_dma_ctx = NULL;
 init_unlock:
-	mutex_unlock(&ipa3_dma_init_refcnt_ctrl->lock);
+	mutex_unlock(&ipa_dma_init_refcnt_ctrl->lock);
 	return res;
 
 }
+EXPORT_SYMBOL(ipa_dma_init);
 
 /**
- * ipa3_dma_enable() -Vote for IPA clocks.
+ * ipa_dma_enable() -Vote for IPA clocks.
  *
  * Can be executed several times (re-entrant)
  *
  *Return codes: 0: success
  *		-EINVAL: IPADMA is not initialized
  */
-int ipa3_dma_enable(void)
+int ipa_dma_enable(void)
 {
 	IPADMA_FUNC_ENTRY();
 	if ((ipa3_dma_ctx == NULL) ||
-		(ipa3_dma_init_refcnt_ctrl->ref_cnt < 1)) {
+		(ipa_dma_init_refcnt_ctrl->ref_cnt < 1)) {
 		IPADMA_ERR("IPADMA isn't initialized, can't enable\n");
 		return -EINVAL;
 	}
@@ -429,6 +430,7 @@ int ipa3_dma_enable(void)
 	IPADMA_FUNC_EXIT();
 	return 0;
 }
+EXPORT_SYMBOL(ipa_dma_enable);
 
 static bool ipa3_dma_work_pending(void)
 {
@@ -449,7 +451,7 @@ static bool ipa3_dma_work_pending(void)
 }
 
 /**
- * ipa3_dma_disable()- Unvote for IPA clocks.
+ * ipa_dma_disable()- Unvote for IPA clocks.
  *
  * enter to power save mode.
  *
@@ -460,7 +462,7 @@ static bool ipa3_dma_work_pending(void)
  *		-EFAULT: can not disable ipa_dma as there are pending
  *			memcopy works
  */
-int ipa3_dma_disable(void)
+int ipa_dma_disable(void)
 {
 	unsigned long flags;
 	int res = 0;
@@ -468,7 +470,7 @@ int ipa3_dma_disable(void)
 
 	IPADMA_FUNC_ENTRY();
 	if ((ipa3_dma_ctx == NULL) ||
-		(ipa3_dma_init_refcnt_ctrl->ref_cnt < 1)) {
+		(ipa_dma_init_refcnt_ctrl->ref_cnt < 1)) {
 		IPADMA_ERR("IPADMA isn't initialized, can't disable\n");
 		return -EINVAL;
 	}
@@ -503,9 +505,10 @@ completed:
 	mutex_unlock(&ipa3_dma_ctx->enable_lock);
 	return res;
 }
+EXPORT_SYMBOL(ipa_dma_disable);
 
 /**
- * ipa3_dma_sync_memcpy()- Perform synchronous memcpy using IPA.
+ * ipa_dma_sync_memcpy()- Perform synchronous memcpy using IPA.
  *
  * @dest: physical address to store the copied data.
  * @src: physical address of the source data to copy.
@@ -518,7 +521,7 @@ completed:
  *		-gsi_status : on GSI failures
  *		-EFAULT: other
  */
-int ipa3_dma_sync_memcpy(u64 dest, u64 src, int len)
+int ipa_dma_sync_memcpy(u64 dest, u64 src, int len)
 {
 	int ep_idx;
 	int res;
@@ -558,7 +561,7 @@ int ipa3_dma_sync_memcpy(u64 dest, u64 src, int len)
 	atomic_inc(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
 	spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
 
-	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS);
+	ep_idx = ipa_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS);
 	if (-1 == ep_idx) {
 		IPADMA_ERR("Client %u is not mapped\n",
 			IPA_CLIENT_MEMCPY_DMA_SYNC_CONS);
@@ -566,7 +569,7 @@ int ipa3_dma_sync_memcpy(u64 dest, u64 src, int len)
 	}
 	cons_sys = ipa3_ctx->ep[ep_idx].sys;
 
-	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
+	ep_idx = ipa_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
 	if (-1 == ep_idx) {
 		IPADMA_ERR("Client %u is not mapped\n",
 			IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
@@ -749,9 +752,10 @@ fail_mem_alloc:
 		complete(&ipa3_dma_ctx->done);
 	return res;
 }
+EXPORT_SYMBOL(ipa_dma_sync_memcpy);
 
 /**
- * ipa3_dma_async_memcpy()- Perform asynchronous memcpy using IPA.
+ * ipa_dma_async_memcpy()- Perform asynchronous memcpy using IPA.
  *
  * @dest: physical address to store the copied data.
  * @src: physical address of the source data to copy.
@@ -766,7 +770,7 @@ fail_mem_alloc:
  *		-gsi_status : on GSI failures
  *		-EFAULT: descr fifo is full.
  */
-int ipa3_dma_async_memcpy(u64 dest, u64 src, int len,
+int ipa_dma_async_memcpy(u64 dest, u64 src, int len,
 		void (*user_cb)(void *user1), void *user_param)
 {
 	int ep_idx;
@@ -805,7 +809,7 @@ int ipa3_dma_async_memcpy(u64 dest, u64 src, int len,
 	atomic_inc(&ipa3_dma_ctx->async_memcpy_pending_cnt);
 	spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
 
-	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+	ep_idx = ipa_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
 	if (-1 == ep_idx) {
 		IPADMA_ERR("Client %u is not mapped\n",
 			IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
@@ -813,7 +817,7 @@ int ipa3_dma_async_memcpy(u64 dest, u64 src, int len,
 	}
 	cons_sys = ipa3_ctx->ep[ep_idx].sys;
 
-	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD);
+	ep_idx = ipa_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD);
 	if (-1 == ep_idx) {
 		IPADMA_ERR("Client %u is not mapped\n",
 			IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD);
@@ -945,6 +949,7 @@ fail_mem_alloc:
 		complete(&ipa3_dma_ctx->done);
 	return res;
 }
+EXPORT_SYMBOL(ipa_dma_async_memcpy);
 
 /**
  * ipa3_dma_uc_memcpy() - Perform a memcpy action using IPA uC
@@ -1003,30 +1008,30 @@ dec_and_exit:
 }
 
 /**
- * ipa3_dma_destroy() -teardown IPADMA pipes and release ipadma.
+ * ipa_dma_destroy() -teardown IPADMA pipes and release ipadma.
  *
  * this is a blocking function, returns just after destroying IPADMA.
  */
-void ipa3_dma_destroy(void)
+void ipa_dma_destroy(void)
 {
 	int res = 0;
 
 	IPADMA_FUNC_ENTRY();
 
-	if (!ipa3_dma_init_refcnt_ctrl) {
+	if (!ipa_dma_init_refcnt_ctrl) {
 		IPADMA_ERR("Setup isn't done\n");
 		return;
 	}
 
-	mutex_lock(&ipa3_dma_init_refcnt_ctrl->lock);
-	if (ipa3_dma_init_refcnt_ctrl->ref_cnt > 1) {
+	mutex_lock(&ipa_dma_init_refcnt_ctrl->lock);
+	if (ipa_dma_init_refcnt_ctrl->ref_cnt > 1) {
 		IPADMA_DBG("Multiple initialization done. refcnt=%d\n",
-			ipa3_dma_init_refcnt_ctrl->ref_cnt);
-		ipa3_dma_init_refcnt_ctrl->ref_cnt--;
+			ipa_dma_init_refcnt_ctrl->ref_cnt);
+		ipa_dma_init_refcnt_ctrl->ref_cnt--;
 		goto completed;
 	}
 
-	if ((!ipa3_dma_ctx) || (ipa3_dma_init_refcnt_ctrl->ref_cnt == 0)) {
+	if ((!ipa3_dma_ctx) || (ipa_dma_init_refcnt_ctrl->ref_cnt == 0)) {
 		IPADMA_ERR("IPADMA isn't initialized ctx=%pK\n", ipa3_dma_ctx);
 		goto completed;
 	}
@@ -1042,19 +1047,19 @@ void ipa3_dma_destroy(void)
 		goto completed;
 	}
 
-	res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_async_cons_hdl);
+	res = ipa_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_async_cons_hdl);
 	if (res)
 		IPADMA_ERR("teardown IPADMA ASYNC CONS failed\n");
 	ipa3_dma_ctx->ipa_dma_async_cons_hdl = 0;
-	res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_sync_cons_hdl);
+	res = ipa_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_sync_cons_hdl);
 	if (res)
 		IPADMA_ERR("teardown IPADMA SYNC CONS failed\n");
 	ipa3_dma_ctx->ipa_dma_sync_cons_hdl = 0;
-	res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_async_prod_hdl);
+	res = ipa_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_async_prod_hdl);
 	if (res)
 		IPADMA_ERR("teardown IPADMA ASYNC PROD failed\n");
 	ipa3_dma_ctx->ipa_dma_async_prod_hdl = 0;
-	res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_sync_prod_hdl);
+	res = ipa_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_sync_prod_hdl);
 	if (res)
 		IPADMA_ERR("teardown IPADMA SYNC PROD failed\n");
 	ipa3_dma_ctx->ipa_dma_sync_prod_hdl = 0;
@@ -1067,12 +1072,13 @@ void ipa3_dma_destroy(void)
 	kfree(ipa3_dma_ctx);
 	ipa3_dma_ctx = NULL;
 
-	ipa3_dma_init_refcnt_ctrl->ref_cnt = 0;
+	ipa_dma_init_refcnt_ctrl->ref_cnt = 0;
 	IPADMA_FUNC_EXIT();
 
 completed:
-	mutex_unlock(&ipa3_dma_init_refcnt_ctrl->lock);
+	mutex_unlock(&ipa_dma_init_refcnt_ctrl->lock);
 }
+EXPORT_SYMBOL(ipa_dma_destroy);
 
 /**
  * ipa3_dma_async_memcpy_notify_cb() - Callback function which will be called
@@ -1093,7 +1099,7 @@ void ipa3_dma_async_memcpy_notify_cb(void *priv
 
 	IPADMA_FUNC_ENTRY();
 
-	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+	ep_idx = ipa_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
 	if (ep_idx < 0) {
 		IPADMA_ERR("IPA Client mapping failed\n");
 		return;
@@ -1128,7 +1134,7 @@ static ssize_t ipa3_dma_debugfs_read(struct file *file, char __user *ubuf,
 {
 	int nbytes = 0;
 
-	if (!ipa3_dma_init_refcnt_ctrl) {
+	if (!ipa_dma_init_refcnt_ctrl) {
 		nbytes += scnprintf(&dbg_buff[nbytes],
 			IPADMA_MAX_MSG_LEN - nbytes,
 			"Setup was not done\n");
@@ -1140,12 +1146,12 @@ static ssize_t ipa3_dma_debugfs_read(struct file *file, char __user *ubuf,
 		nbytes += scnprintf(&dbg_buff[nbytes],
 			IPADMA_MAX_MSG_LEN - nbytes,
 			"Status:\n	Not initialized (ref_cnt=%d)\n",
-			ipa3_dma_init_refcnt_ctrl->ref_cnt);
+			ipa_dma_init_refcnt_ctrl->ref_cnt);
 	} else {
 		nbytes += scnprintf(&dbg_buff[nbytes],
 			IPADMA_MAX_MSG_LEN - nbytes,
 			"Status:\n	Initialized (ref_cnt=%d)\n",
-			ipa3_dma_init_refcnt_ctrl->ref_cnt);
+			ipa_dma_init_refcnt_ctrl->ref_cnt);
 		nbytes += scnprintf(&dbg_buff[nbytes],
 			IPADMA_MAX_MSG_LEN - nbytes,
 			"	%s (ref_cnt=%d)\n",

+ 49 - 43
drivers/platform/msm/ipa/ipa_v3/ipa_dp.c

@@ -627,7 +627,7 @@ int ipa3_send(struct ipa3_sys_context *sys,
 	if (unlikely(!in_atomic))
 		mem_flag = GFP_KERNEL;
 
-	gsi_ep_cfg = ipa3_get_gsi_ep_info(sys->ep->client);
+	gsi_ep_cfg = ipa_get_gsi_ep_info(sys->ep->client);
 	if (unlikely(!gsi_ep_cfg)) {
 		IPAERR("failed to get gsi EP config for client=%d\n",
 			sys->ep->client);
@@ -927,7 +927,7 @@ int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr)
 	for (i = 0; i < num_desc; i++)
 		IPADBG("sending imm cmd %d\n", descr[i].opcode);
 
-	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
+	ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
 	if (-1 == ep_idx) {
 		IPAERR("Client %u is not mapped\n",
 			IPA_CLIENT_APPS_CMD_PROD);
@@ -997,7 +997,7 @@ int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout)
 	for (i = 0; i < num_desc; i++)
 		IPADBG("sending imm cmd %d\n", descr[i].opcode);
 
-	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
+	ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
 	if (-1 == ep_idx) {
 		IPAERR("Client %u is not mapped\n",
 			IPA_CLIENT_APPS_CMD_PROD);
@@ -1111,16 +1111,16 @@ void __ipa3_update_curr_poll_state(enum ipa_client_type client, int state)
 
 	switch (client) {
 		case IPA_CLIENT_APPS_WAN_COAL_CONS:
-			ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+			ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
 			break;
 		case IPA_CLIENT_APPS_WAN_CONS:
-			ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+			ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
 			break;
 		case IPA_CLIENT_APPS_LAN_COAL_CONS:
-			ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+			ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
 			break;
 		case IPA_CLIENT_APPS_LAN_CONS:
-			ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_COAL_CONS);
+			ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_LAN_COAL_CONS);
 			break;
 		default:
 			break;
@@ -1331,7 +1331,7 @@ int ipa3_setup_tput_pipe(void)
 	sys_in.client = IPA_CLIENT_TPUT_CONS;
 	sys_in.desc_fifo_sz = IPA_SYS_TPUT_EP_DESC_FIFO_SZ;
 
-	ipa_ep_idx = ipa3_get_ep_mapping(sys_in.client);
+	ipa_ep_idx = ipa_get_ep_mapping(sys_in.client);
 	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
 		IPAERR("Invalid client.\n");
 		return -EFAULT;
@@ -1423,7 +1423,7 @@ static void ipa3_tasklet_find_freepage(unsigned long data)
 }
 
 /**
- * ipa3_setup_sys_pipe() - Setup an IPA GPI pipe and perform
+ * ipa_setup_sys_pipe() - Setup an IPA GPI pipe and perform
  * IPA EP configuration
  * @sys_in:	[in] input needed to setup the pipe and configure EP
  * @clnt_hdl:	[out] client handle
@@ -1435,7 +1435,7 @@ static void ipa3_tasklet_find_freepage(unsigned long data)
  *
  * Returns:	0 on success, negative on failure
  */
-int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
+int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 {
 	struct ipa3_ep_context *ep;
 	int i, ipa_ep_idx;
@@ -1473,8 +1473,8 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 		goto fail_gen;
 	}
 
-	wan_coal_ep_id = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
-	lan_coal_ep_id = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_COAL_CONS);
+	wan_coal_ep_id = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+	lan_coal_ep_id = ipa_get_ep_mapping(IPA_CLIENT_APPS_LAN_COAL_CONS);
 
 	/* save the input config parameters */
 	if (IPA_CLIENT_IS_APPS_COAL_CONS(sys_in->client))
@@ -1844,7 +1844,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 			if (!sys_in->ext_ioctl_v2) {
 				sys_in->client = IPA_CLIENT_APPS_WAN_CONS;
 				sys_in->ipa_ep_cfg = ep_cfg_copy;
-				result = ipa3_setup_sys_pipe(sys_in, &wan_handle);
+				result = ipa_setup_sys_pipe(sys_in, &wan_handle);
 			}
 
 		} else { /* (sys_in->client == IPA_CLIENT_APPS_LAN_COAL_CONS) */
@@ -1855,7 +1855,7 @@ int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 				sys_in->client = IPA_CLIENT_APPS_LAN_CONS;
 				sys_in->ipa_ep_cfg = ep_cfg_copy;
 				sys_in->notify = ipa3_lan_rx_cb;
-				result = ipa3_setup_sys_pipe(sys_in, &lan_handle);
+				result = ipa_setup_sys_pipe(sys_in, &lan_handle);
 			}
 		}
 
@@ -1917,6 +1917,7 @@ fail_gen:
 	IPA_STATS_INC_CNT(ipa3_ctx->stats.pipe_setup_fail_cnt);
 	return result;
 }
+EXPORT_SYMBOL(ipa_setup_sys_pipe);
 
 static void delete_avail_tx_wrapper_list(struct ipa3_ep_context *ep)
 {
@@ -1935,12 +1936,12 @@ static void delete_avail_tx_wrapper_list(struct ipa3_ep_context *ep)
 }
 
 /**
- * ipa3_teardown_sys_pipe() - Teardown the GPI pipe and cleanup IPA EP
- * @clnt_hdl:	[in] the handle obtained from ipa3_setup_sys_pipe
+ * ipa_teardown_sys_pipe() - Teardown the GPI pipe and cleanup IPA EP
+ * @clnt_hdl:	[in] the handle obtained from ipa_setup_sys_pipe
  *
  * Returns:	0 on success, negative on failure
  */
-int ipa3_teardown_sys_pipe(u32 clnt_hdl)
+int ipa_teardown_sys_pipe(u32 clnt_hdl)
 {
 	struct ipa3_ep_context *ep;
 	int empty;
@@ -1993,7 +1994,7 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl)
 
 	/* channel stop might fail on timeout if IPA is busy */
 	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
-		result = ipa3_stop_gsi_channel(clnt_hdl);
+		result = ipa_stop_gsi_channel(clnt_hdl);
 		if (result == GSI_STATUS_SUCCESS)
 			break;
 
@@ -2147,6 +2148,7 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl)
 
 	return 0;
 }
+EXPORT_SYMBOL(ipa_teardown_sys_pipe);
 
 /**
  * ipa3_teardown_pipe()
@@ -2156,7 +2158,7 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl)
  *   with the passed client handle and the endpoint context that the
  *   handle represents.
  *
- * @clnt_hdl:  [in] A handle obtained from ipa3_setup_sys_pipe
+ * @clnt_hdl:  [in] A handle obtained from ipa_setup_sys_pipe
  *
  * Returns:	0 on success, negative on failure
  */
@@ -2172,7 +2174,7 @@ static int ipa3_teardown_pipe(u32 clnt_hdl)
 
 	/* channel stop might fail on timeout if IPA is busy */
 	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
-		result = ipa3_stop_gsi_channel(clnt_hdl);
+		result = ipa_stop_gsi_channel(clnt_hdl);
 		if (result == GSI_STATUS_SUCCESS)
 			break;
 
@@ -2251,7 +2253,7 @@ void ipa3_tx_cmd_comp(void *user1, int user2)
 }
 
 /**
- * ipa3_tx_dp() - Data-path tx handler
+ * ipa_tx_dp() - Data-path tx handler
  * @dst:	[in] which IPA destination to route tx packets to
  * @skb:	[in] the packet to send
  * @metadata:	[in] TX packet meta-data
@@ -2274,7 +2276,7 @@ void ipa3_tx_cmd_comp(void *user1, int user2)
  *
  * Returns:	0 on success, negative on failure
  */
-int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
 		struct ipa_tx_meta *meta)
 {
 	struct ipa3_desc *desc;
@@ -2307,15 +2309,15 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
 	 *
 	 */
 	if (IPA_CLIENT_IS_CONS(dst)) {
-		src_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_PROD);
+		src_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_LAN_PROD);
 		if (-1 == src_ep_idx) {
 			IPAERR("Client %u is not mapped\n",
 				IPA_CLIENT_APPS_LAN_PROD);
 			goto fail_gen;
 		}
-		dst_ep_idx = ipa3_get_ep_mapping(dst);
+		dst_ep_idx = ipa_get_ep_mapping(dst);
 	} else {
-		src_ep_idx = ipa3_get_ep_mapping(dst);
+		src_ep_idx = ipa_get_ep_mapping(dst);
 		if (-1 == src_ep_idx) {
 			IPAERR("Client %u is not mapped\n", dst);
 			goto fail_gen;
@@ -2333,14 +2335,14 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
 		goto fail_pipe_not_valid;
 	}
 
-	trace_ipa3_tx_dp(skb,sys->ep->client);
+	trace_ipa_tx_dp(skb,sys->ep->client);
 	num_frags = skb_shinfo(skb)->nr_frags;
 	/*
 	 * make sure TLV FIFO supports the needed frags.
 	 * 2 descriptors are needed for IP_PACKET_INIT and TAG_STATUS.
 	 * 1 descriptor needed for the linear portion of skb.
 	 */
-	gsi_ep = ipa3_get_gsi_ep_info(ipa3_ctx->ep[src_ep_idx].client);
+	gsi_ep = ipa_get_gsi_ep_info(ipa3_ctx->ep[src_ep_idx].client);
 	if (unlikely(gsi_ep == NULL)) {
 		IPAERR("failed to get EP %d GSI info\n", src_ep_idx);
 		goto fail_gen;
@@ -2525,6 +2527,7 @@ fail_gen:
 fail_pipe_not_valid:
 	return -EPIPE;
 }
+EXPORT_SYMBOL(ipa_tx_dp);
 
 static void ipa3_wq_handle_rx(struct work_struct *work)
 {
@@ -2840,7 +2843,7 @@ static struct ipa3_rx_pkt_wrapper * ipa3_get_free_page
 	return NULL;
 }
 
-int ipa3_register_notifier(void *fn_ptr)
+int ipa_register_notifier(void *fn_ptr)
 {
 	if (fn_ptr == NULL)
 		return -EFAULT;
@@ -2861,8 +2864,9 @@ int ipa3_register_notifier(void *fn_ptr)
 	spin_unlock(&ipa3_ctx->notifier_lock);
 	return 0;
 }
+EXPORT_SYMBOL(ipa_register_notifier);
 
-int ipa3_unregister_notifier(void *fn_ptr)
+int ipa_unregister_notifier(void *fn_ptr)
 {
 	if (fn_ptr == NULL)
 		return -EFAULT;
@@ -2876,6 +2880,7 @@ int ipa3_unregister_notifier(void *fn_ptr)
 	spin_unlock(&ipa3_ctx->notifier_lock);
 	return 0;
 }
+EXPORT_SYMBOL(ipa_unregister_notifier);
 
  static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
 {
@@ -4234,7 +4239,7 @@ static int ipa3_wan_rx_pyld_hdlr(struct sk_buff *skb,
 			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_aggr_close);
 			continue;
 		}
-		ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+		ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
 		if (status.endp_dest_idx != ep_idx) {
 			IPAERR("expected endp_dest_idx %d received %d\n",
 					ep_idx, status.endp_dest_idx);
@@ -4316,7 +4321,7 @@ static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags)
 	return __dev_alloc_skb(len, flags);
 }
 
-static void ipa3_free_skb_rx(struct sk_buff *skb)
+static void ipa_free_skb_rx(struct sk_buff *skb)
 {
 	dev_kfree_skb_any(skb);
 }
@@ -5466,7 +5471,7 @@ static void ipa3_rx_napi_chain(struct ipa3_sys_context *sys,
 					 * For coalescing, we have 2 transfer
 					 * rings to replenish
 					 */
-					ipa_ep_idx = ipa3_get_ep_mapping(
+					ipa_ep_idx = ipa_get_ep_mapping(
 						IPA_CLIENT_APPS_WAN_CONS);
 					if (ipa_ep_idx ==
 						IPA_EP_NOT_ALLOCATED) {
@@ -5528,7 +5533,7 @@ static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys,
 	rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
 	sys->ep->wstats.tx_pkts_rcvd++;
 	if (sys->len <= IPA_WLAN_RX_POOL_SZ_LOW_WM) {
-		ipa3_free_skb(&rx_pkt_expected->data);
+		ipa_free_skb(&rx_pkt_expected->data);
 		sys->ep->wstats.tx_pkts_dropped++;
 	} else {
 		sys->ep->wstats.tx_pkts_sent++;
@@ -5656,7 +5661,7 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
 		 */
 		sys->ep->status.status_en = true;
 		sys->ep->status.status_ep =
-			ipa3_get_ep_mapping(IPA_CLIENT_Q6_WAN_CONS);
+			ipa_get_ep_mapping(IPA_CLIENT_Q6_WAN_CONS);
 		/* Enable status supression to disable sending status for
 		 * every packet.
 		 */
@@ -5700,7 +5705,7 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
 			sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
 				IPA_GENERIC_RX_BUFF_BASE_SZ);
 			sys->get_skb = ipa3_get_skb_ipa_rx;
-			sys->free_skb = ipa3_free_skb_rx;
+			sys->free_skb = ipa_free_skb_rx;
 			if (IPA_CLIENT_IS_APPS_COAL_CONS(in->client))
 				in->ipa_ep_cfg.aggr.aggr = IPA_COALESCE;
 			else
@@ -5805,7 +5810,7 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
 			sys->pyld_hdlr = NULL;
 			sys->repl_hdlr = ipa3_replenish_wlan_rx_cache;
 			sys->get_skb = ipa3_get_skb_ipa_rx;
-			sys->free_skb = ipa3_free_skb_rx;
+			sys->free_skb = ipa_free_skb_rx;
 			sys->free_rx_wrapper = ipa3_free_rx_wrapper;
 			in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
 		} else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
@@ -5825,7 +5830,7 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
 				sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ;
 			sys->pyld_hdlr = ipa3_odu_rx_pyld_hdlr;
 			sys->get_skb = ipa3_get_skb_ipa_rx;
-			sys->free_skb = ipa3_free_skb_rx;
+			sys->free_skb = ipa_free_skb_rx;
 			/* recycle skb for GSB use case */
 			if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
 				sys->free_rx_wrapper =
@@ -5883,7 +5888,7 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
 				IPA_GENERIC_RX_BUFF_SZ(IPA_ODL_RX_BUFF_SZ);
 			sys->pyld_hdlr = ipa3_odl_dpl_rx_pyld_hdlr;
 			sys->get_skb = ipa3_get_skb_ipa_rx;
-			sys->free_skb = ipa3_free_skb_rx;
+			sys->free_skb = ipa_free_skb_rx;
 			sys->free_rx_wrapper = ipa3_recycle_rx_wrapper;
 			sys->repl_hdlr = ipa3_replenish_rx_cache_recycle;
 			sys->rx_pool_sz = in->desc_fifo_sz /
@@ -5983,7 +5988,7 @@ int ipa3_tx_dp_mul(enum ipa_client_type src,
 
 	spin_lock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
 
-	ep_idx = ipa3_get_ep_mapping(src);
+	ep_idx = ipa_get_ep_mapping(src);
 	if (unlikely(ep_idx == -1)) {
 		IPAERR("dest EP does not exist.\n");
 		goto fail_send;
@@ -6063,7 +6068,7 @@ fail_send:
 
 }
 
-void ipa3_free_skb(struct ipa_rx_data *data)
+void ipa_free_skb(struct ipa_rx_data *data)
 {
 	struct ipa3_rx_pkt_wrapper *rx_pkt;
 
@@ -6081,6 +6086,7 @@ void ipa3_free_skb(struct ipa_rx_data *data)
 
 	spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
 }
+EXPORT_SYMBOL(ipa_free_skb);
 
 /* Functions added to support kernel tests */
 
@@ -6106,7 +6112,7 @@ int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in,
 		goto fail_gen;
 	}
 
-	ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
+	ipa_ep_idx = ipa_get_ep_mapping(sys_in->client);
 	if (ipa_ep_idx == -1) {
 		IPAERR("Invalid client :%d\n", sys_in->client);
 		goto fail_gen;
@@ -6766,7 +6772,7 @@ static int ipa_gsi_setup_transfer_ring(struct ipa3_ep_context *ep,
 			gsi_channel_props.max_re_expected = ep->sys->rx_pool_sz;
 	}
 
-	gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
+	gsi_ep_info = ipa_get_gsi_ep_info(ep->client);
 	if (!gsi_ep_info) {
 		IPAERR("Failed getting GSI EP info for client=%d\n",
 		       ep->client);
@@ -7100,7 +7106,7 @@ int ipa3_rx_poll(u32 clnt_hdl, int weight)
 		return cnt;
 	}
 
-	ipa_ep_idx = ipa3_get_ep_mapping(
+	ipa_ep_idx = ipa_get_ep_mapping(
 		IPA_CLIENT_APPS_WAN_CONS);
 	if (ipa_ep_idx ==
 		IPA_EP_NOT_ALLOCATED) {

+ 10 - 10
drivers/platform/msm/ipa/ipa_v3/ipa_eth_i.c

@@ -4,7 +4,7 @@
  */
 #include "ipa_i.h"
 #include <linux/if_vlan.h>
-#include <linux/ipa_eth.h>
+#include "ipa_eth.h"
 #include <linux/log2.h>
 
 #define IPA_ETH_RTK_MODT (32)
@@ -361,7 +361,7 @@ static int ipa_eth_setup_rtk_gsi_channel(
 		gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
 	else
 		gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
-	gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
+	gsi_ep_info = ipa_get_gsi_ep_info(ep->client);
 	if (!gsi_ep_info) {
 		IPAERR("Failed getting GSI EP info for client=%d\n",
 		       ep->client);
@@ -651,7 +651,7 @@ static int ipa_eth_setup_aqc_gsi_channel(
 		gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
 	else
 		gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
-	gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
+	gsi_ep_info = ipa_get_gsi_ep_info(ep->client);
 	if (!gsi_ep_info) {
 		IPAERR("Failed getting GSI EP info for client=%d\n",
 		       ep->client);
@@ -782,7 +782,7 @@ static int ipa_eth_setup_ntn_gsi_channel(
 		gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
 	else
 		gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
-	gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
+	gsi_ep_info = ipa_get_gsi_ep_info(ep->client);
 	if (!gsi_ep_info) {
 		IPAERR("Failed getting GSI EP info for client=%d\n",
 			ep->client);
@@ -935,26 +935,26 @@ int ipa3_eth_connect(
 
 	/* multiple attach support */
 	if (strnstr(net_dev->name, STR_ETH0_IFACE, strlen(net_dev->name))) {
-		result = ipa3_is_vlan_mode(IPA_VLAN_IF_ETH0, &vlan_mode);
+		result = ipa_is_vlan_mode(IPA_VLAN_IF_ETH0, &vlan_mode);
 		if (result) {
 			IPAERR("Could not determine IPA VLAN mode\n");
 			return result;
 		}
 	} else if (strnstr(net_dev->name, STR_ETH1_IFACE, strlen(net_dev->name))) {
-		result = ipa3_is_vlan_mode(IPA_VLAN_IF_ETH1, &vlan_mode);
+		result = ipa_is_vlan_mode(IPA_VLAN_IF_ETH1, &vlan_mode);
 		if (result) {
 			IPAERR("Could not determine IPA VLAN mode\n");
 			return result;
 		}
 	} else {
-		result = ipa3_is_vlan_mode(IPA_VLAN_IF_ETH, &vlan_mode);
+		result = ipa_is_vlan_mode(IPA_VLAN_IF_ETH, &vlan_mode);
 		if (result) {
 			IPAERR("Could not determine IPA VLAN mode\n");
 			return result;
 		}
 	}
 #else
-	result = ipa3_is_vlan_mode(IPA_VLAN_IF_ETH, &vlan_mode);
+	result = ipa_is_vlan_mode(IPA_VLAN_IF_ETH, &vlan_mode);
 	if (result) {
 		IPAERR("Could not determine IPA VLAN mode\n");
 		return result;
@@ -1276,7 +1276,7 @@ config_uc_fail:
 			ipa3_ctx->gsi_info[prot]);
 	}
 uc_init_peripheral_fail:
-	ipa3_stop_gsi_channel(ep->gsi_chan_hdl);
+	ipa_stop_gsi_channel(ep->gsi_chan_hdl);
 start_channel_fail:
 	ipa3_disable_data_path(ep_idx);
 enable_data_path_fail:
@@ -1336,7 +1336,7 @@ int ipa3_eth_disconnect(
 			ipa3_ctx->gsi_info[prot]);
 	}
 	/* stop gsi channel */
-	result = ipa3_stop_gsi_channel(ep_idx);
+	result = ipa_stop_gsi_channel(ep_idx);
 	if (result) {
 		IPAERR("failed to stop gsi channel %d\n", ep_idx);
 		result = -EFAULT;

+ 5 - 5
drivers/platform/msm/ipa/ipa_v3/ipa_flt.c

@@ -453,14 +453,14 @@ static bool ipa_flt_skip_pipe_config(int pipe)
 
 	ep = &ipa3_ctx->ep[pipe];
 
-	if ((ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD) == pipe
+	if ((ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_PROD) == pipe
 		&& ipa3_ctx->modem_cfg_emb_pipe_flt)
 		&& ep->client == IPA_CLIENT_APPS_WAN_PROD) {
 		IPADBG_LOW("skip %d\n", pipe);
 		return true;
 	}
 
-	if ((ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD) == pipe
+	if ((ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD) == pipe
 		&& ipa3_ctx->modem_cfg_emb_pipe_flt)
 		&& ep->client == IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD) {
 		IPADBG_LOW("skip %d\n", pipe);
@@ -608,11 +608,11 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
 	}
 
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1
 		&& !ipa3_ctx->ulso_wa) {
 		u32 offset = 0;
 
-		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		i = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
 		reg_write_coal_close.skip_pipeline_clear = false;
 		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
 		if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
@@ -1196,7 +1196,7 @@ error:
 
 static int __ipa_add_flt_get_ep_idx(enum ipa_client_type ep, int *ipa_ep_idx)
 {
-	*ipa_ep_idx = ipa3_get_ep_mapping(ep);
+	*ipa_ep_idx = ipa_get_ep_mapping(ep);
 	if (*ipa_ep_idx < 0 || *ipa_ep_idx >= ipa3_get_max_num_pipes()) {
 		IPAERR_RL("ep not valid ep=%d\n", ep);
 		return -EINVAL;

+ 15 - 11
drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c

@@ -80,7 +80,7 @@ static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
 			entry->type, entry->offset_entry->offset);
 
 		if (entry->l2tp_params.is_dst_pipe_valid) {
-			ep = ipa3_get_ep_mapping(entry->l2tp_params.dst_pipe);
+			ep = ipa_get_ep_mapping(entry->l2tp_params.dst_pipe);
 
 			if (ep >= 0) {
 				cfg_ptr = &ipa3_ctx->ep[ep].cfg;
@@ -211,11 +211,11 @@ int __ipa_commit_hdr_v3_0(void)
 	}
 
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1
 		&& !ipa3_ctx->ulso_wa) {
 		u32 offset = 0;
 
-		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		i = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
 		reg_write_coal_close.skip_pipeline_clear = false;
 		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
 		if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
@@ -994,7 +994,7 @@ int ipa3_del_hdr_hpc(struct ipa_ioc_del_hdr *hdrs)
 EXPORT_SYMBOL(ipa3_del_hdr_hpc);
 
 /**
- * ipa3_add_hdr() - add the specified headers to SW and optionally commit them
+ * ipa_add_hdr() - add the specified headers to SW and optionally commit them
  * to IPA HW
  * @hdrs:	[inout] set of headers to add
  *
@@ -1002,11 +1002,11 @@ EXPORT_SYMBOL(ipa3_del_hdr_hpc);
  *
  * Note:	Should not be called from atomic context
  */
-int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs)
+int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs)
 {
 	return ipa3_add_hdr_usr(hdrs, false);
 }
-EXPORT_SYMBOL(ipa3_add_hdr);
+EXPORT_SYMBOL(ipa_add_hdr);
 
 /**
  * ipa3_add_hdr_usr() - add the specified headers to SW
@@ -1096,7 +1096,7 @@ bail:
 }
 
 /**
- * ipa3_del_hdr() - Remove the specified headers from SW
+ * ipa_del_hdr() - Remove the specified headers from SW
  * and optionally commit them to IPA HW
  * @hdls:	[inout] set of headers to delete
  *
@@ -1104,11 +1104,11 @@ bail:
  *
  * Note:	Should not be called from atomic context
  */
-int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls)
+int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls)
 {
 	return ipa3_del_hdr_by_user(hdls, false);
 }
-EXPORT_SYMBOL(ipa3_del_hdr);
+EXPORT_SYMBOL(ipa_del_hdr);
 
 /**
  * ipa3_add_hdr_proc_ctx() - add the specified headers to SW
@@ -1215,6 +1215,7 @@ int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
 {
 	return ipa3_del_hdr_proc_ctx_by_user(hdls, false);
 }
+EXPORT_SYMBOL(ipa3_del_hdr_proc_ctx);
 
 /**
  * ipa3_commit_hdr() - commit to IPA HW the current header table in SW
@@ -1246,6 +1247,7 @@ bail:
 	mutex_unlock(&ipa3_ctx->lock);
 	return result;
 }
+EXPORT_SYMBOL(ipa3_commit_hdr);
 
 /**
  * ipa3_reset_hdr() - reset the current header table in SW (does not commit to
@@ -1419,6 +1421,7 @@ int ipa3_reset_hdr(bool user_only)
 	mutex_unlock(&ipa3_ctx->lock);
 	return 0;
 }
+EXPORT_SYMBOL(ipa3_reset_hdr);
 
 static struct ipa3_hdr_entry *__ipa_find_hdr(const char *name)
 {
@@ -1453,7 +1456,7 @@ static struct ipa3_hdr_proc_ctx_entry* __ipa_find_hdr_proc_ctx(const char *name)
 }
 
 /**
- * ipa3_get_hdr() - Lookup the specified header resource
+ * ipa_get_hdr() - Lookup the specified header resource
  * @lookup:	[inout] header to lookup and its handle
  *
  * lookup the specified header resource and return handle if it exists
@@ -1463,7 +1466,7 @@ static struct ipa3_hdr_proc_ctx_entry* __ipa_find_hdr_proc_ctx(const char *name)
  * Note:	Should not be called from atomic context
  *		Caller should call ipa3_put_hdr later if this function succeeds
  */
-int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup)
+int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup)
 {
 	struct ipa3_hdr_entry *entry;
 	int result = -1;
@@ -1483,6 +1486,7 @@ int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup)
 
 	return result;
 }
+EXPORT_SYMBOL(ipa_get_hdr);
 
 /**
  * ipa3_get_hdr_offset() - Get the the offset of the specified header resource

+ 28 - 28
drivers/platform/msm/ipa/ipa_v3/ipa_hw_stats.c

@@ -18,7 +18,7 @@
 static inline u32 ipa_hw_stats_get_ep_bit_n_idx(enum ipa_client_type client,
 	u32 *reg_idx)
 {
-	int ep = ipa3_get_ep_mapping(client);
+	int ep = ipa_get_ep_mapping(client);
 
 	if (ep == IPA_EP_NOT_ALLOCATED)
 		return 0;
@@ -87,7 +87,7 @@ int ipa_hw_stats_init(void)
 		if (ipa_hw_stats_get_ep_bit_n_idx(
 			IPA_CLIENT_MHI_PRIME_TETH_PROD,
 			&reg_idx)) {
-			ep_index = ipa3_get_ep_mapping(
+			ep_index = ipa_get_ep_mapping(
 				IPA_CLIENT_MHI_PRIME_TETH_PROD);
 			if (ep_index == -1) {
 				IPAERR("Invalid client.\n");
@@ -173,7 +173,7 @@ int ipa_hw_stats_init(void)
 		if (ipa_hw_stats_get_ep_bit_n_idx(
 			IPA_CLIENT_Q6_WAN_PROD,
 			&reg_idx)) {
-			ep_index = ipa3_get_ep_mapping(IPA_CLIENT_Q6_WAN_PROD);
+			ep_index = ipa_get_ep_mapping(IPA_CLIENT_Q6_WAN_PROD);
 			if (ep_index == -1) {
 				IPAERR("Invalid client.\n");
 				ret = -EINVAL;
@@ -220,7 +220,7 @@ int ipa_hw_stats_init(void)
 		if (ipa_hw_stats_get_ep_bit_n_idx(
 			IPA_CLIENT_Q6_DL_NLO_DATA_PROD,
 			&reg_idx) && (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)) {
-			ep_index = ipa3_get_ep_mapping(
+			ep_index = ipa_get_ep_mapping(
 				IPA_CLIENT_Q6_DL_NLO_DATA_PROD);
 			if (ep_index == -1) {
 				IPAERR("Invalid client.\n");
@@ -267,7 +267,7 @@ int ipa_hw_stats_init(void)
 		if (ipa_hw_stats_get_ep_bit_n_idx(
 			IPA_CLIENT_Q6_DL_NLO_LL_DATA_PROD,
 			&reg_idx) && (ipa3_ctx->ipa_hw_type >= IPA_HW_v5_0)) {
-			ep_index = ipa3_get_ep_mapping(
+			ep_index = ipa_get_ep_mapping(
 					IPA_CLIENT_Q6_DL_NLO_LL_DATA_PROD);
 			if (ep_index == -1) {
 				IPAERR("Invalid client.\n");
@@ -315,7 +315,7 @@ int ipa_hw_stats_init(void)
 	if (ipa_hw_stats_get_ep_bit_n_idx(
 		IPA_CLIENT_USB_PROD,
 		&reg_idx)) {
-		ep_index = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
+		ep_index = ipa_get_ep_mapping(IPA_CLIENT_USB_PROD);
 		if (ep_index == -1) {
 			IPAERR("Invalid client.\n");
 			ret = -EINVAL;
@@ -344,7 +344,7 @@ int ipa_hw_stats_init(void)
 	if (ipa_hw_stats_get_ep_bit_n_idx(
 		IPA_CLIENT_WLAN1_PROD,
 		&reg_idx)) {
-		ep_index = ipa3_get_ep_mapping(IPA_CLIENT_WLAN1_PROD);
+		ep_index = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_PROD);
 		if (ep_index == -1) {
 			IPAERR("Invalid client.\n");
 			ret = -EINVAL;
@@ -373,7 +373,7 @@ int ipa_hw_stats_init(void)
 	if (ipa_hw_stats_get_ep_bit_n_idx(
 		IPA_CLIENT_WLAN2_PROD,
 		&reg_idx)) {
-		ep_index = ipa3_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
+		ep_index = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
 		if (ep_index == -1) {
 			IPAERR("Invalid client.\n");
 			ret = -EINVAL;
@@ -402,7 +402,7 @@ int ipa_hw_stats_init(void)
 	if (ipa_hw_stats_get_ep_bit_n_idx(
 		IPA_CLIENT_WIGIG_PROD,
 		&reg_idx)) {
-		ep_index = ipa3_get_ep_mapping(IPA_CLIENT_WIGIG_PROD);
+		ep_index = ipa_get_ep_mapping(IPA_CLIENT_WIGIG_PROD);
 		if (ep_index == -1) {
 			IPAERR("Invalid client.\n");
 			ret = -EINVAL;
@@ -453,7 +453,7 @@ static void ipa_close_coal_frame(struct ipahal_imm_cmd_pyld **coal_cmd_pyld)
 	struct ipahal_imm_cmd_register_write reg_write_coal_close;
 	u32 offset = 0;
 
-	i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+	i = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
 	reg_write_coal_close.skip_pipeline_clear = false;
 	reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
 	if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
@@ -546,7 +546,7 @@ int ipa_init_quota_stats(u32 *pipe_bitmask)
 	}
 
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
-	ipa_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+	ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
 	if (ipa_ep_idx != IPA_EP_NOT_ALLOCATED && !ipa3_ctx->ulso_wa) {
 		ipa_close_coal_frame(&coal_cmd_pyld);
 		if (!coal_cmd_pyld) {
@@ -717,7 +717,7 @@ int ipa_get_quota_stats(struct ipa_quota_stats_all *out)
 	}
 
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
 		IPA_EP_NOT_ALLOCATED && !ipa3_ctx->ulso_wa) {
 		ipa_close_coal_frame(&cmd_pyld[num_cmd]);
 		if (!cmd_pyld[num_cmd]) {
@@ -772,7 +772,7 @@ int ipa_get_quota_stats(struct ipa_quota_stats_all *out)
 	 * hardware stats are 0 now
 	 */
 	for (i = 0; i < IPA_CLIENT_MAX; i++) {
-		int ep_idx = ipa3_get_ep_mapping(i);
+		int ep_idx = ipa_get_ep_mapping(i);
 
 		if (ep_idx == -1 || ep_idx >= ipa3_get_max_num_pipes())
 			continue;
@@ -941,7 +941,7 @@ int ipa_init_teth_stats(struct ipa_teth_stats_endpoints *in)
 	}
 
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
 		IPA_EP_NOT_ALLOCATED && !ipa3_ctx->ulso_wa) {
 		ipa_close_coal_frame(&coal_cmd_pyld);
 		if (!coal_cmd_pyld) {
@@ -1125,7 +1125,7 @@ int ipa_get_teth_stats(void)
 	}
 
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
 		IPA_EP_NOT_ALLOCATED && !ipa3_ctx->ulso_wa) {
 		ipa_close_coal_frame(&cmd_pyld[num_cmd]);
 		if (!cmd_pyld[num_cmd]) {
@@ -1188,8 +1188,8 @@ int ipa_get_teth_stats(void)
 	 */
 	for (i = 0; i < IPA_CLIENT_MAX; i++) {
 		for (j = 0; j < IPA_CLIENT_MAX; j++) {
-			int prod_idx = ipa3_get_ep_mapping(i);
-			int cons_idx = ipa3_get_ep_mapping(j);
+			int prod_idx = ipa_get_ep_mapping(i);
+			int cons_idx = ipa_get_ep_mapping(j);
 
 			if (prod_idx == -1 ||
 				prod_idx >= ipa3_get_max_num_pipes())
@@ -1266,7 +1266,7 @@ int ipa_query_teth_stats(enum ipa_client_type prod,
 		ipa3_ctx->hw_stats->teth_stats_enabled))
 		return 0;
 
-	if (!IPA_CLIENT_IS_PROD(prod) || ipa3_get_ep_mapping(prod) == -1) {
+	if (!IPA_CLIENT_IS_PROD(prod) || ipa_get_ep_mapping(prod) == -1) {
 		IPAERR("invalid prod %d\n", prod);
 		return -EINVAL;
 	}
@@ -1349,7 +1349,7 @@ int ipa_reset_all_teth_stats(void)
 
 	/* reading stats will reset them in hardware */
 	for (i = 0; i < IPA_CLIENT_MAX; i++) {
-		if (IPA_CLIENT_IS_PROD(i) && ipa3_get_ep_mapping(i) != -1) {
+		if (IPA_CLIENT_IS_PROD(i) && ipa_get_ep_mapping(i) != -1) {
 			ret = ipa_get_teth_stats();
 			if (ret) {
 				IPAERR("ipa_get_teth_stats failed %d\n", ret);
@@ -1422,7 +1422,7 @@ int ipa_init_flt_rt_stats(void)
 	}
 
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
 		IPA_EP_NOT_ALLOCATED && !ipa3_ctx->ulso_wa) {
 		ipa_close_coal_frame(&coal_cmd_pyld);
 		if (!coal_cmd_pyld) {
@@ -1621,7 +1621,7 @@ static int __ipa_get_flt_rt_stats(struct ipa_ioc_flt_rt_query *query)
 	}
 
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
 		IPA_EP_NOT_ALLOCATED && !ipa3_ctx->ulso_wa) {
 		ipa_close_coal_frame(&cmd_pyld[num_cmd]);
 		if (!cmd_pyld[num_cmd]) {
@@ -1956,7 +1956,7 @@ int ipa_init_drop_stats(u32 *pipe_bitmask)
 	}
 
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
 		IPA_EP_NOT_ALLOCATED && !ipa3_ctx->ulso_wa) {
 		ipa_close_coal_frame(&coal_cmd_pyld);
 		if (!coal_cmd_pyld) {
@@ -2130,7 +2130,7 @@ int ipa_get_drop_stats(struct ipa_drop_stats_all *out)
 	}
 
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) !=
 		IPA_EP_NOT_ALLOCATED && !ipa3_ctx->ulso_wa) {
 		ipa_close_coal_frame(&cmd_pyld[num_cmd]);
 		if (!cmd_pyld[num_cmd]) {
@@ -2185,7 +2185,7 @@ int ipa_get_drop_stats(struct ipa_drop_stats_all *out)
 	 * hardware stats are 0 now
 	 */
 	for (i = 0; i < IPA_CLIENT_MAX; i++) {
-		int ep_idx = ipa3_get_ep_mapping(i);
+		int ep_idx = ipa_get_ep_mapping(i);
 
 		if (ep_idx == -1 || ep_idx >= ipa3_get_max_num_pipes())
 			continue;
@@ -2320,7 +2320,7 @@ static ssize_t ipa_debugfs_print_quota_stats(struct file *file,
 		return res;
 	}
 	for (i = 0; i < IPA_CLIENT_MAX; i++) {
-		int ep_idx = ipa3_get_ep_mapping(i);
+		int ep_idx = ipa_get_ep_mapping(i);
 
 		if (ep_idx == -1)
 			continue;
@@ -2413,7 +2413,7 @@ static ssize_t ipa_debugfs_print_tethering_stats(struct file *file,
 	}
 
 	for (i = 0; i < IPA_CLIENT_MAX; i++) {
-		int ep_idx = ipa3_get_ep_mapping(i);
+		int ep_idx = ipa_get_ep_mapping(i);
 
 		if (ep_idx == -1)
 			continue;
@@ -2437,7 +2437,7 @@ static ssize_t ipa_debugfs_print_tethering_stats(struct file *file,
 		}
 
 		for (j = 0; j < IPA_CLIENT_MAX; j++) {
-			int cons_idx = ipa3_get_ep_mapping(j);
+			int cons_idx = ipa_get_ep_mapping(j);
 
 			if (cons_idx == -1)
 				continue;
@@ -2642,7 +2642,7 @@ static ssize_t ipa_debugfs_print_drop_stats(struct file *file,
 	}
 
 	for (i = 0; i < IPA_CLIENT_MAX; i++) {
-		int ep_idx = ipa3_get_ep_mapping(i);
+		int ep_idx = ipa_get_ep_mapping(i);
 
 		if (ep_idx == -1)
 			continue;

+ 8 - 124
drivers/platform/msm/ipa/ipa_v3/ipa_i.h

@@ -19,8 +19,9 @@
 #include <linux/notifier.h>
 #include <linux/interrupt.h>
 #include <linux/netdevice.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include <linux/ipa_usb.h>
+#include "ipa_qdss.h"
 #include <linux/iommu.h>
 #include <linux/version.h>
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0))
@@ -40,7 +41,6 @@
 #include <linux/mailbox_client.h>
 #include <linux/mailbox/qmp.h>
 #include <linux/rmnet_ipa_fd_ioctl.h>
-#include <linux/ipa_fmwk.h>
 #include "ipa_uc_holb_monitor.h"
 #include <soc/qcom/minidump.h>
 
@@ -2962,8 +2962,6 @@ int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
 
 int ipa3_release_gsi_channel(u32 clnt_hdl);
 
-int ipa3_stop_gsi_channel(u32 clnt_hdl);
-
 int ipa3_reset_gsi_channel(u32 clnt_hdl);
 
 int ipa3_reset_gsi_event_ring(u32 clnt_hdl);
@@ -3034,16 +3032,10 @@ int ipa3_cfg_ep_metadata_mask(u32 clnt_hdl,
 int ipa3_cfg_ep_holb_by_client(enum ipa_client_type client,
 				const struct ipa_ep_cfg_holb *ipa_ep_cfg);
 
-int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl);
-
 int ipa3_cfg_ep_ulso(u32 clnt_hdl, const struct ipa_ep_cfg_ulso *ep_ulso);
 
 int ipa3_setup_uc_act_tbl(void);
 
-int ipa3_add_socksv5_conn(struct ipa_socksv5_info *info);
-
-int ipa3_del_socksv5_conn(uint32_t handle);
-
 /*
  * Header removal / addition
  */
@@ -3074,8 +3066,6 @@ int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls,
 /*
  * Routing
  */
-int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
-
 int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules);
 
 int ipa3_add_rt_rule_ext_v2(struct ipa_ioc_add_rt_rule_ext_v2 *rules,
@@ -3088,8 +3078,6 @@ int ipa3_add_rt_rule_after_v2(struct ipa_ioc_add_rt_rule_after_v2
 
 int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup);
 
-int ipa3_put_rt_tbl(u32 rt_tbl_hdl);
-
 int ipa3_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in);
 
 int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules);
@@ -3151,8 +3139,6 @@ bool lan_coal_enabled( void );
 /*
  * Messaging
  */
-int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff,
-		  ipa_msg_free_fn callback);
 int ipa3_resend_wlan_msg(void);
 int ipa3_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback);
 int ipa3_deregister_pull_msg(struct ipa_msg_meta *meta);
@@ -3160,27 +3146,9 @@ int ipa3_deregister_pull_msg(struct ipa_msg_meta *meta);
 /*
  * Interface
  */
-int ipa3_register_intf(const char *name, const struct ipa_tx_intf *tx,
-		       const struct ipa_rx_intf *rx);
 int ipa3_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
 		       const struct ipa_rx_intf *rx,
 		       const struct ipa_ext_intf *ext);
-int ipa3_deregister_intf(const char *name);
-
-/*
- * Aggregation
- */
-int ipa3_set_aggr_mode(enum ipa_aggr_mode mode);
-
-int ipa3_set_qcncm_ndp_sig(char sig[3]);
-
-int ipa3_set_single_ndp_per_mbim(bool enable);
-
-/*
- * Data path
- */
-int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
-		struct ipa_tx_meta *metadata);
 
 /*
  * To transfer multiple data packets
@@ -3190,24 +3158,10 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
 int ipa3_tx_dp_mul(enum ipa_client_type dst,
 			struct ipa_tx_data_desc *data_desc);
 
-void ipa3_free_skb(struct ipa_rx_data *data);
-
 /*
  * System pipes
  */
 int ipa3_setup_tput_pipe(void);
-
-int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl);
-
-int ipa3_teardown_sys_pipe(u32 clnt_hdl);
-
-int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
-		struct ipa_wdi_out_params *out);
-int ipa3_connect_gsi_wdi_pipe(struct ipa_wdi_in_params *in,
-		struct ipa_wdi_out_params *out);
-
-int ipa3_disconnect_wdi_pipe(u32 clnt_hdl);
-int ipa3_enable_wdi_pipe(u32 clnt_hdl);
 int ipa_pm_wrapper_wdi_set_perf_profile_internal(struct ipa_wdi_perf_profile *profile);
 int ipa_pm_wrapper_connect_wdi_pipe(struct ipa_wdi_in_params *in,
 			struct ipa_wdi_out_params *out);
@@ -3215,12 +3169,9 @@ int ipa_pm_wrapper_disconnect_wdi_pipe(u32 clnt_hdl);
 int ipa_pm_wrapper_enable_wdi_pipe(u32 clnt_hdl);
 int ipa_pm_wrapper_disable_pipe(u32 clnt_hdl);
 int ipa3_enable_gsi_wdi_pipe(u32 clnt_hdl);
-int ipa3_disable_wdi_pipe(u32 clnt_hdl);
 int ipa3_disable_gsi_wdi_pipe(u32 clnt_hdl);
 int ipa3_disconnect_gsi_wdi_pipe(u32 clnt_hdl);
-int ipa3_resume_wdi_pipe(u32 clnt_hdl);
 int ipa3_resume_gsi_wdi_pipe(u32 clnt_hdl);
-int ipa3_suspend_wdi_pipe(u32 clnt_hdl);
 int ipa3_get_wdi_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
 int ipa3_get_wdi3_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
 int ipa3_get_usb_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
@@ -3228,18 +3179,10 @@ bool ipa_usb_is_teth_prot_connected(enum ipa_usb_teth_prot usb_teth_prot);
 int ipa3_get_aqc_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
 int ipa3_get_rtk_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
 int ipa3_get_ntn_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
-int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
 u16 ipa3_get_smem_restr_bytes(void);
-int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid, uint64_t num_bytes);
 
 int ipa3_wigig_init_debugfs_i(struct dentry *dent);
 
-/*
- * To retrieve doorbell physical address of
- * wlan pipes
- */
-int ipa3_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out);
-
 /*
  * To register uC ready callback if uC not ready
  * and also check uC readiness
@@ -3280,44 +3223,12 @@ int ipa3_inform_wlan_bw(struct ipa_inform_wlan_bw *wdi_bw);
 /*
  * IPADMA
  */
-int ipa3_dma_init(void);
-
-int ipa3_dma_enable(void);
-
-int ipa3_dma_disable(void);
-
-int ipa3_dma_sync_memcpy(u64 dest, u64 src, int len);
-
-int ipa3_dma_async_memcpy(u64 dest, u64 src, int len,
-			void (*user_cb)(void *user1), void *user_param);
-
 int ipa3_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
 
-void ipa3_dma_destroy(void);
-
-/*
- * MHI
- */
-
-/*
- * mux id
- */
-
-/*
- * interrupts
- */
-int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
-		ipa_irq_handler_t handler,
-		bool deferred_flag,
-		void *private_data);
-
 /*
  * Miscellaneous
  */
-int ipa3_get_ep_mapping(enum ipa_client_type client);
-int ipa3_get_ep_mapping_from_gsi(int ch_id);
-
-bool ipa3_is_ready(void);
+int ipa_get_ep_mapping_from_gsi(int ch_id);
 
 int ipa3_ctx_get_type(enum ipa_type_mode type);
 bool ipa3_ctx_get_flag(enum ipa_flag flag);
@@ -3341,8 +3252,6 @@ u8 ipa3_get_qmb_master_sel(enum ipa_client_type client);
 
 u8 ipa3_get_tx_instance(enum ipa_client_type client);
 
-bool ipa3_get_lan_rx_napi(void);
-
 bool ipa3_get_qmap_pipe_enable(void);
 
 struct device *ipa3_get_pdev(void);
@@ -3365,7 +3274,7 @@ int ipa3_send(struct ipa3_sys_context *sys,
 		u32 num_desc,
 		struct ipa3_desc *desc,
 		bool in_atomic);
-int ipa3_get_ep_mapping(enum ipa_client_type client);
+int ipa_get_ep_mapping(enum ipa_client_type client);
 int ipa_get_ep_group(enum ipa_client_type client);
 
 int ipa3_generate_hw_rule(enum ipa_ip_type ip,
@@ -3374,7 +3283,7 @@ int ipa3_generate_hw_rule(enum ipa_ip_type ip,
 			 u16 *en_rule);
 int ipa3_init_hw(void);
 struct ipa3_rt_tbl *__ipa3_find_rt_tbl(enum ipa_ip_type ip, const char *name);
-int ipa3_set_single_ndp_per_mbim(bool enable);
+int ipa_set_single_ndp_per_mbim(bool enable);
 void ipa3_debugfs_init(void);
 void ipa3_debugfs_remove(void);
 void ipa3_eth_debugfs_init(void);
@@ -3500,6 +3409,9 @@ int ipa3_write_qmapid_wdi3_gsi_pipe(u32 clnt_hdl, u8 qmap_id);
 int ipa3_tag_process(struct ipa3_desc *desc, int num_descs,
 		    unsigned long timeout);
 
+int ipa3_usb_init(void);
+void ipa3_usb_exit(void);
+
 void ipa3_q6_pre_shutdown_cleanup(void);
 void ipa3_q6_post_shutdown_cleanup(void);
 void ipa3_q6_pre_powerup_cleanup(void);
@@ -3539,11 +3451,8 @@ int ipa3_uc_add_holb_monitor(uint16_t gsi_ch, uint32_t action_mask,
 	uint32_t max_stuck_count, uint8_t ee);
 int ipa3_uc_del_holb_monitor(uint16_t gsi_ch, uint8_t ee);
 int ipa3_uc_disable_holb_monitor(void);
-int ipa3_uc_bw_monitor(struct ipa_wdi_bw_info *info);
 int ipa3_uc_setup_event_ring(void);
 void ipa3_tag_destroy_imm(void *user1, int user2);
-const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info
-	(enum ipa_client_type client);
 void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val);
 
 int ipa3_wigig_init_i(void);
@@ -3637,7 +3546,6 @@ int ipa3_rt_read_tbl_from_hw(u32 tbl_idx,
 	bool hashable,
 	struct ipahal_rt_rule_entry entry[],
 	int *num_entry);
-int ipa3_restore_suspend_handler(void);
 int ipa3_inject_dma_task_for_gsi(void);
 int ipa3_uc_panic_notifier(struct notifier_block *this,
 	unsigned long event, void *ptr);
@@ -3651,34 +3559,12 @@ int emulator_load_fws(
 	u32 transport_mem_size,
 	enum gsi_ver);
 int ipa3_rmnet_ctl_init(void);
-int ipa3_register_rmnet_ctl_cb(
-	void (*ipa_rmnet_ctl_ready_cb)(void *user_data1),
-	void *user_data1,
-	void (*ipa_rmnet_ctl_stop_cb)(void *user_data2),
-	void *user_data2,
-	void (*ipa_rmnet_ctl_rx_notify_cb)(
-	void *user_data3, void *rx_data),
-	void *user_data3);
-int ipa3_unregister_rmnet_ctl_cb(void);
-int ipa3_rmnet_ctl_xmit(struct sk_buff *skb);
 int ipa3_setup_apps_low_lat_prod_pipe(bool rmnet_config,
 	struct rmnet_egress_param *egress_param);
 int ipa3_setup_apps_low_lat_cons_pipe(bool rmnet_config,
 	struct rmnet_ingress_param *ingress_param);
 int ipa3_teardown_apps_low_lat_pipes(void);
 int ipa3_rmnet_ll_init(void);
-int ipa3_register_rmnet_ll_cb(
-	void (*ipa_rmnet_ll_ready_cb)(void *user_data1),
-	void *user_data1,
-	void (*ipa_rmnet_ll_stop_cb)(void *user_data2),
-	void *user_data2,
-	void (*ipa_rmnet_ll_rx_notify_cb)(
-	void *user_data3, void *rx_data),
-	void *user_data3);
-int ipa3_unregister_rmnet_ll_cb(void);
-int ipa3_rmnet_ll_xmit(struct sk_buff *skb);
-int ipa3_register_notifier(void *fn_ptr);
-int ipa3_unregister_notifier(void *fn_ptr);
 int ipa3_setup_apps_low_lat_data_prod_pipe(
 	struct rmnet_egress_param *egress_param,
 	struct net_device *dev);
@@ -3710,7 +3596,6 @@ int ipa3_set_clock_plan_from_pm(int idx);
 void __ipa_gsi_irq_rx_scedule_poll(struct ipa3_sys_context *sys);
 void ipa3_init_imm_cmd_desc(struct ipa3_desc *desc,
 	struct ipahal_imm_cmd_pyld *cmd_pyld);
-int ipa3_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res);
 uint ipa3_get_emulation_type(void);
 int ipa3_get_transport_info(
 	phys_addr_t *phys_addr_ptr,
@@ -3864,7 +3749,6 @@ int ipa3_uc_send_disable_flow_control(void);
 int ipa3_uc_send_update_flow_control(uint32_t bitmask,
 	uint8_t  add_delete);
 
-enum ipa_hw_type ipa_get_hw_type_internal(void);
 bool ipa_is_test_prod_flt_in_sram_internal(enum ipa_ip_type ip);
 /* check if modem is up */
 bool ipa3_is_modem_up(void);

+ 5 - 4
drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c

@@ -422,7 +422,7 @@ irq_handler_t ipa3_get_isr(void)
 }
 
 /**
- * ipa3_add_interrupt_handler() - Adds handler to an interrupt type
+ * ipa_add_interrupt_handler() - Adds handler to an interrupt type
  * @interrupt:		Interrupt type
  * @handler:		The handler to be added
  * @deferred_flag:	whether the handler processing should be deferred in
@@ -432,7 +432,7 @@ irq_handler_t ipa3_get_isr(void)
  * Adds handler to an interrupt type and enable the specific bit
  * in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled
  */
-int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
+int ipa_add_interrupt_handler(enum ipa_irq_type interrupt,
 		ipa_irq_handler_t handler,
 		bool deferred_flag,
 		void *private_data)
@@ -480,7 +480,7 @@ int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
 			for (client_idx = 0;
 				client_idx < IPA_CLIENT_MAX;
 				client_idx++) {
-				ep_idx = ipa3_get_ep_mapping(client_idx);
+				ep_idx = ipa_get_ep_mapping(client_idx);
 				if ((ep_idx != IPA_EP_NOT_ALLOCATED) &&
 					!(IPA_CLIENT_IS_Q6_CONS(client_idx) ||
 					IPA_CLIENT_IS_Q6_PROD(client_idx))) {
@@ -503,7 +503,7 @@ int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
 				client_idx++) {
 				if (IPA_CLIENT_IS_Q6_CONS(client_idx) ||
 					IPA_CLIENT_IS_Q6_PROD(client_idx)) {
-					ep_idx = ipa3_get_ep_mapping(client_idx);
+					ep_idx = ipa_get_ep_mapping(client_idx);
 					IPADBG(
 						"modem ep_idx(%d) client_idx = %d\n"
 						, ep_idx, client_idx);
@@ -520,6 +520,7 @@ int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
 	}
 	return 0;
 }
+EXPORT_SYMBOL(ipa_add_interrupt_handler);
 
 /**
  * ipa3_remove_interrupt_handler() - Removes handler to an interrupt type

+ 12 - 11
drivers/platform/msm/ipa/ipa_v3/ipa_intf.c

@@ -47,12 +47,12 @@ struct ipa3_pull_msg {
  *
  * Note:	Should not be called from atomic context
  */
-int ipa3_register_intf(const char *name, const struct ipa_tx_intf *tx,
+int ipa_register_intf(const char *name, const struct ipa_tx_intf *tx,
 		       const struct ipa_rx_intf *rx)
 {
 	return ipa3_register_intf_ext(name, tx, rx, NULL);
 }
-EXPORT_SYMBOL(ipa3_register_intf);
+EXPORT_SYMBOL(ipa_register_intf);
 
 /**
  * ipa3_register_intf_ext() - register "logical" interface which has only
@@ -155,7 +155,7 @@ int ipa3_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
 }
 
 /**
- * ipa3_deregister_intf() - de-register previously registered logical interface
+ * ipa_deregister_intf() - de-register previously registered logical interface
  * @name: [in] interface name
  *
  * De-register a previously registered interface
@@ -164,7 +164,7 @@ int ipa3_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
  *
  * Note:	Should not be called from atomic context
  */
-int ipa3_deregister_intf(const char *name)
+int ipa_deregister_intf(const char *name)
 {
 	struct ipa3_intf *entry;
 	struct ipa3_intf *next;
@@ -192,7 +192,7 @@ int ipa3_deregister_intf(const char *name)
 
 	return result;
 }
-EXPORT_SYMBOL(ipa3_deregister_intf);
+EXPORT_SYMBOL(ipa_deregister_intf);
 
 /**
  * ipa3_query_intf() - query logical interface properties
@@ -376,7 +376,7 @@ int ipa3_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext)
 	return result;
 }
 
-static void ipa3_send_msg_free(void *buff, u32 len, u32 type)
+static void ipa_send_msg_free(void *buff, u32 len, u32 type)
 {
 	kfree(buff);
 }
@@ -429,7 +429,7 @@ static int wlan_msg_process(struct ipa_msg_meta *meta, void *buff)
 			}
 			memcpy(data_dup, buff, meta->msg_len);
 			msg_dup->buff = data_dup;
-			msg_dup->callback = ipa3_send_msg_free;
+			msg_dup->callback = ipa_send_msg_free;
 		} else {
 			IPAERR("msg_len %d\n", meta->msg_len);
 			kfree(msg_dup);
@@ -483,7 +483,7 @@ static int wlan_msg_process(struct ipa_msg_meta *meta, void *buff)
 }
 
 /**
- * ipa3_send_msg() - Send "message" from kernel client to IPA driver
+ * ipa_send_msg() - Send "message" from kernel client to IPA driver
  * @meta: [in] message meta-data
  * @buff: [in] the payload for message
  * @callback: [in] free callback
@@ -497,7 +497,7 @@ static int wlan_msg_process(struct ipa_msg_meta *meta, void *buff)
  *
  * Note:	Should not be called from atomic context
  */
-int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff,
+int ipa_send_msg(struct ipa_msg_meta *meta, void *buff,
 		  ipa_msg_free_fn callback)
 {
 	struct ipa3_push_msg *msg;
@@ -527,7 +527,7 @@ int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff,
 			return -ENOMEM;
 		}
 		msg->buff = data;
-		msg->callback = ipa3_send_msg_free;
+		msg->callback = ipa_send_msg_free;
 	}
 
 	mutex_lock(&ipa3_ctx->msg_lock);
@@ -546,6 +546,7 @@ int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff,
 
 	return 0;
 }
+EXPORT_SYMBOL(ipa_send_msg);
 
 /**
  * ipa3_resend_wlan_msg() - Resend cached "message" to IPACM
@@ -593,7 +594,7 @@ int ipa3_resend_wlan_msg(void)
 			return -ENOMEM;
 		}
 		msg->buff = data;
-		msg->callback = ipa3_send_msg_free;
+		msg->callback = ipa_send_msg_free;
 		mutex_lock(&ipa3_ctx->msg_lock);
 		list_add_tail(&msg->link, &ipa3_ctx->msg_list);
 		mutex_unlock(&ipa3_ctx->msg_lock);

+ 13 - 13
drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c

@@ -8,7 +8,7 @@
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include <linux/msm_gsi.h>
 #include <linux/ipa_mhi.h>
 #include "gsi.h"
@@ -76,7 +76,7 @@ bool ipa3_mhi_stop_gsi_channel(enum ipa_client_type client)
 	struct ipa3_ep_context *ep;
 
 	IPA_MHI_FUNC_ENTRY();
-	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	ipa_ep_idx = ipa_get_ep_mapping(client);
 	if (ipa_ep_idx == -1) {
 		IPA_MHI_ERR("Invalid client.\n");
 		return -EINVAL;
@@ -111,7 +111,7 @@ static int ipa3_mhi_reset_gsi_channel(enum ipa_client_type client)
 
 	IPA_MHI_FUNC_ENTRY();
 
-	clnt_hdl = ipa3_get_ep_mapping(client);
+	clnt_hdl = ipa_get_ep_mapping(client);
 	if (clnt_hdl < 0)
 		return -EFAULT;
 
@@ -138,7 +138,7 @@ int ipa3_mhi_reset_channel_internal(enum ipa_client_type client)
 		return res;
 	}
 
-	res = ipa3_disable_data_path(ipa3_get_ep_mapping(client));
+	res = ipa3_disable_data_path(ipa_get_ep_mapping(client));
 	if (res) {
 		IPA_MHI_ERR("ipa3_disable_data_path failed %d\n", res);
 		return res;
@@ -156,7 +156,7 @@ int ipa3_mhi_start_channel_internal(enum ipa_client_type client)
 
 	IPA_MHI_FUNC_ENTRY();
 
-	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	ipa_ep_idx = ipa_get_ep_mapping(client);
 	if (ipa_ep_idx < 0) {
 		IPA_MHI_ERR("Invalid client %d\n", client);
 		return -EINVAL;
@@ -212,7 +212,7 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
 	ep = &ipa3_ctx->ep[ipa_ep_idx];
 
 	msi = params->msi;
-	ep_cfg = ipa3_get_gsi_ep_info(client);
+	ep_cfg = ipa_get_gsi_ep_info(client);
 	if (!ep_cfg) {
 		IPA_MHI_ERR("Wrong parameter, ep_cfg is NULL\n");
 		return -EPERM;
@@ -417,7 +417,7 @@ static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
 		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 		ep_cfg_ctrl.ipa_ep_delay = true;
 		ep->ep_delay_set = true;
-		res = ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
+		res = ipa_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
 		if (res)
 			IPA_MHI_ERR("client (ep: %d) failed result=%d\n",
 			ipa_ep_idx, res);
@@ -499,7 +499,7 @@ int ipa3_mhi_init_engine(struct ipa_mhi_init_engine *params)
 	}
 
 	/* Initialize IPA MHI engine */
-	gsi_ep_info = ipa3_get_gsi_ep_info(IPA_CLIENT_MHI_PROD);
+	gsi_ep_info = ipa_get_gsi_ep_info(IPA_CLIENT_MHI_PROD);
 	if (!gsi_ep_info) {
 		IPAERR("MHI PROD has no ep allocated\n");
 		ipa_assert();
@@ -553,7 +553,7 @@ int ipa3_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
 	in->start.gsi.evchid += ipa3_ctx->mhi_evid_limits[0];
 
 	client = in->sys->client;
-	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	ipa_ep_idx = ipa_get_ep_mapping(client);
 	if (ipa_ep_idx == -1) {
 		IPA_MHI_ERR("Invalid client.\n");
 		return -EINVAL;
@@ -633,7 +633,7 @@ int ipa3_disconnect_mhi_pipe(u32 clnt_hdl)
 	if (ep->ep_delay_set) {
 		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 		ep_cfg_ctrl.ipa_ep_delay = false;
-		res = ipa3_cfg_ep_ctrl(clnt_hdl,
+		res = ipa_cfg_ep_ctrl(clnt_hdl,
 			&ep_cfg_ctrl);
 		if (res) {
 			IPAERR
@@ -676,7 +676,7 @@ int ipa3_mhi_resume_channels_internal(enum ipa_client_type client,
 
 	IPA_MHI_FUNC_ENTRY();
 
-	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	ipa_ep_idx = ipa_get_ep_mapping(client);
 	if (ipa_ep_idx < 0) {
 		IPA_MHI_ERR("Invalid client %d\n", client);
 		return -EINVAL;
@@ -754,7 +754,7 @@ int ipa3_mhi_query_ch_info(enum ipa_client_type client,
 
 	IPA_MHI_FUNC_ENTRY();
 
-	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	ipa_ep_idx = ipa_get_ep_mapping(client);
 	if (ipa_ep_idx < 0) {
 		IPA_MHI_ERR("Invalid client %d\n", client);
 		return -EINVAL;
@@ -804,7 +804,7 @@ int ipa3_mhi_destroy_channel(enum ipa_client_type client)
 	int ipa_ep_idx;
 	struct ipa3_ep_context *ep;
 
-	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	ipa_ep_idx = ipa_get_ep_mapping(client);
 	if (ipa_ep_idx < 0) {
 		IPA_MHI_ERR("Invalid client %d\n", client);
 		return -EINVAL;

+ 23 - 23
drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c

@@ -508,10 +508,10 @@ static int ipa_mpm_set_dma_mode(enum ipa_client_type src_pipe,
 	memset(cmd_pyld, 0, sizeof(cmd_pyld));
 
 	/* First step is to clear IPA Pipeline before changing DMA mode */
-	if (ipa3_get_ep_mapping(src_pipe) != IPA_EP_NOT_ALLOCATED) {
+	if (ipa_get_ep_mapping(src_pipe) != IPA_EP_NOT_ALLOCATED) {
 		u32 offset = 0;
 
-		i = ipa3_get_ep_mapping(src_pipe);
+		i = ipa_get_ep_mapping(src_pipe);
 		reg_write_coal_close.skip_pipeline_clear = false;
 		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
 		if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
@@ -935,7 +935,7 @@ static int ipa_mpm_connect_mhip_gsi_pipe(enum ipa_client_type mhip_client,
 		(mhi_idx >= IPA_MPM_MHIP_CH_ID_MAX))
 		goto fail_gen;
 
-	ipa_ep_idx = ipa3_get_ep_mapping(mhip_client);
+	ipa_ep_idx = ipa_get_ep_mapping(mhip_client);
 	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
 		IPA_MPM_ERR("fail to find channel EP.\n");
 		goto fail_gen;
@@ -1213,7 +1213,7 @@ static int ipa_mpm_connect_mhip_gsi_pipe(enum ipa_client_type mhip_client,
 
 fail_start_channel:
 	ipa3_disable_data_path(ipa_ep_idx);
-	ipa3_stop_gsi_channel(ipa_ep_idx);
+	ipa_stop_gsi_channel(ipa_ep_idx);
 fail_alloc_channel:
 	ipa3_release_gsi_channel(ipa_ep_idx);
 fail_smmu_map_db:
@@ -1247,7 +1247,7 @@ static void ipa_mpm_clean_mhip_chan(int mhi_idx,
 	dir = IPA_CLIENT_IS_PROD(mhip_client) ?
 		DMA_TO_HIPA : DMA_FROM_HIPA;
 
-	ipa_ep_idx = ipa3_get_ep_mapping(mhip_client);
+	ipa_ep_idx = ipa_get_ep_mapping(mhip_client);
 	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
 		IPA_MPM_ERR("fail to find channel EP.\n");
 		return;
@@ -1258,7 +1258,7 @@ static void ipa_mpm_clean_mhip_chan(int mhi_idx,
 		ipa3_disable_data_path(ipa_ep_idx);
 
 	/* Release channel */
-	result = ipa3_stop_gsi_channel(ipa_ep_idx);
+	result = ipa_stop_gsi_channel(ipa_ep_idx);
 	if (result) {
 		IPA_MPM_ERR("Stop channel for MHIP_Client =  %d failed\n",
 					mhip_client);
@@ -1735,12 +1735,12 @@ static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
 	get_ipa3_client(probe_id, &ul_chan, &dl_chan);
 
 	if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
-		ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
+		ipa_ep_idx = ipa_get_ep_mapping(ul_chan);
 	} else if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
-		ipa_ep_idx = ipa3_get_ep_mapping(dl_chan);
+		ipa_ep_idx = ipa_get_ep_mapping(dl_chan);
 	} else if (mhip_chan == IPA_MPM_MHIP_CHAN_BOTH) {
-		ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
-		ipa_ep_idx = ipa3_get_ep_mapping(dl_chan);
+		ipa_ep_idx = ipa_get_ep_mapping(ul_chan);
+		ipa_ep_idx = ipa_get_ep_mapping(dl_chan);
 	}
 
 	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
@@ -1833,7 +1833,7 @@ static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
 
 		if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
 			/* First Stop UL GSI channel before unvote PCIe clock */
-			result = ipa3_stop_gsi_channel(ipa_ep_idx);
+			result = ipa_stop_gsi_channel(ipa_ep_idx);
 
 			if (result) {
 				IPA_MPM_ERR("UL chan stop failed\n");
@@ -1845,7 +1845,7 @@ static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
 		}
 
 		if (mhip_chan == IPA_MPM_MHIP_CHAN_DL) {
-			result = ipa3_stop_gsi_channel(ipa_ep_idx);
+			result = ipa_stop_gsi_channel(ipa_ep_idx);
 			if (result) {
 				IPA_MPM_ERR("Fail to stop DL channel\n");
 				goto gsi_chan_fail;
@@ -1937,7 +1937,7 @@ int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
 		  * This info will be used to set delay on the end points upon
 		  * hitting RED water mark.
 		  */
-		ep_cfg = ipa3_get_gsi_ep_info(IPA_CLIENT_WLAN2_PROD);
+		ep_cfg = ipa_get_gsi_ep_info(IPA_CLIENT_WLAN2_PROD);
 
 		if (!ep_cfg)
 			IPA_MPM_ERR("ep = %d not allocated yet\n",
@@ -1945,7 +1945,7 @@ int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
 		else
 			flow_ctrl_mask |= 1 << (ep_cfg->ipa_gsi_chan_num);
 
-		ep_cfg = ipa3_get_gsi_ep_info(IPA_CLIENT_USB_PROD);
+		ep_cfg = ipa_get_gsi_ep_info(IPA_CLIENT_USB_PROD);
 
 		if (!ep_cfg)
 			IPA_MPM_ERR("ep = %d not allocated yet\n",
@@ -2153,7 +2153,7 @@ static void ipa_mpm_read_channel(enum ipa_client_type chan)
 	struct ipa3_ep_context *ep;
 	int res;
 
-	ipa_ep_idx = ipa3_get_ep_mapping(chan);
+	ipa_ep_idx = ipa_get_ep_mapping(chan);
 
 	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
 		IPAERR("failed to get idx");
@@ -2439,7 +2439,7 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 		 * Ring the event DB to a value outside the
 		 * ring range such that rp and wp never meet.
 		 */
-		ipa_ep_idx = ipa3_get_ep_mapping(ul_prod);
+		ipa_ep_idx = ipa_get_ep_mapping(ul_prod);
 
 		if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
 			IPA_MPM_ERR("fail to alloc EP.\n");
@@ -2517,7 +2517,7 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 		iounmap(db_addr);
 
 		/* Ring DL EVENT RING CONSUMER (DEVICE IPA CONSUMER) Doorbell */
-		ipa_ep_idx = ipa3_get_ep_mapping(dl_cons);
+		ipa_ep_idx = ipa_get_ep_mapping(dl_cons);
 
 		if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
 			IPA_MPM_ERR("fail to alloc EP.\n");
@@ -2564,12 +2564,12 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 		}
 		if (ul_prod != IPA_CLIENT_MAX) {
 			/* No teth started yet, disable UL channel */
-			ipa_ep_idx = ipa3_get_ep_mapping(ul_prod);
+			ipa_ep_idx = ipa_get_ep_mapping(ul_prod);
 			if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
 				IPA_MPM_ERR("fail to alloc EP.\n");
 				goto fail_stop_channel;
 			}
-			ret = ipa3_stop_gsi_channel(ipa_ep_idx);
+			ret = ipa_stop_gsi_channel(ipa_ep_idx);
 			if (ret) {
 				IPA_MPM_ERR("MHIP Stop channel err = %d\n",
 					ret);
@@ -2590,7 +2590,7 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 
 		/* Lift the delay for rmnet USB prod pipe */
 		if (probe_id == IPA_MPM_MHIP_CH_ID_1) {
-			pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
+			pipe_idx = ipa_get_ep_mapping(IPA_CLIENT_USB_PROD);
 			ipa3_xdci_ep_delay_rm(pipe_idx);
 			/* Register for BW indication from Q6*/
 			if (!ipa3_qmi_reg_dereg_for_bw(true))
@@ -2624,7 +2624,7 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
 	mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
 	/* Update Flow control Monitoring, only for the teth UL Prod pipes */
 	if (probe_id == IPA_MPM_MHIP_CH_ID_0) {
-		ipa_ep_idx = ipa3_get_ep_mapping(ul_prod);
+		ipa_ep_idx = ipa_get_ep_mapping(ul_prod);
 		ep = &ipa3_ctx->ep[ipa_ep_idx];
 		ret = ipa3_uc_send_enable_flow_control(ep->gsi_chan_hdl,
 			ipa3_ctx->mpm_uc_thresh);
@@ -2959,7 +2959,7 @@ int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot xdci_teth_prot)
 		if (!ipa3_qmi_reg_dereg_for_bw(true))
 			IPA_MPM_DBG("Fail regst QMI BW Indctn,might be SSR");
 
-		pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
+		pipe_idx = ipa_get_ep_mapping(IPA_CLIENT_USB_PROD);
 
 		/* Lift the delay for rmnet USB prod pipe */
 		ipa3_xdci_ep_delay_rm(pipe_idx);
@@ -3152,7 +3152,7 @@ static int ipa_mpm_populate_smmu_info(struct platform_device *pdev)
 
 	/* get IPA SMMU enabled status */
 	smmu_in.smmu_client = IPA_SMMU_AP_CLIENT;
-	if (ipa3_get_smmu_params(&smmu_in, &smmu_out))
+	if (ipa_get_smmu_params(&smmu_in, &smmu_out))
 		ipa_mpm_ctx->dev_info.ipa_smmu_enabled = false;
 	else
 		ipa_mpm_ctx->dev_info.ipa_smmu_enabled =

+ 7 - 7
drivers/platform/msm/ipa/ipa_v3/ipa_nat.c

@@ -1215,11 +1215,11 @@ static int ipa3_nat_send_init_cmd(struct ipahal_imm_cmd_ip_v4_nat_init *cmd,
 	memset(cmd_pyld, 0, sizeof(cmd_pyld));
 
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1
 		&& !ipa3_ctx->ulso_wa) {
 		u32 offset = 0;
 
-		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		i = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
 		reg_write_coal_close.skip_pipeline_clear = false;
 		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
 		if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
@@ -1326,10 +1326,10 @@ static int ipa3_ipv6ct_send_init_cmd(struct ipahal_imm_cmd_ip_v6_ct_init *cmd)
 	memset(cmd_pyld, 0, sizeof(cmd_pyld));
 
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1
 		&& !ipa3_ctx->ulso_wa) {
 		u32 offset = 0;
-		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		i = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
 		reg_write_coal_close.skip_pipeline_clear = false;
 		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
 		if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
@@ -1984,7 +1984,7 @@ int ipa3_table_dma_cmd(
 	 * IPA_MAX_NUM_OF_TABLE_DMA_CMD_DESC - 1 to overcome
 	 * buffer overflow of ipa3_desc array.
 	 */
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1)
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1)
 		max_dma_table_cmds -= 1;
 
 	if (!dma->entries || dma->entries > (max_dma_table_cmds - 1)) {
@@ -2007,11 +2007,11 @@ int ipa3_table_dma_cmd(
 	}
 
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1
 		&& !ipa3_ctx->ulso_wa) {
 		u32 offset = 0;
 
-		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		i = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
 		reg_write_coal_close.skip_pipeline_clear = false;
 		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
 		if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)

+ 10 - 1
drivers/platform/msm/ipa/ipa_v3/ipa_net.c

@@ -18,9 +18,10 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include "ipa_i.h"
 #include "ipa_qmi_service.h"
+#include "rndis_ipa.h"
 
 static int __init ipa_late_init(void)
 {
@@ -35,6 +36,13 @@ static int __init ipa_late_init(void)
 		ipa3_wwan_cleanup();
 	}
 
+	rc = rndis_ipa_init_module();
+	if (rc) {
+		IPAERR("rndis_ipa_init_module failed: %d\n",
+			   rc);
+		rndis_ipa_cleanup_module();
+	}
+
 	return rc;
 }
 fs_initcall(ipa_late_init);
@@ -43,6 +51,7 @@ static void __exit ipa_late_exit(void)
 {
 	IPADBG("IPA late exit\n");
 	ipa3_wwan_cleanup();
+	rndis_ipa_cleanup_module();
 }
 module_exit(ipa_late_exit);
 

+ 2 - 2
drivers/platform/msm/ipa/ipa_v3/ipa_odl.c

@@ -327,7 +327,7 @@ int ipa_setup_odl_pipe(void)
 		ipa_odl_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 0;
 	}
 
-	ret = ipa3_setup_sys_pipe(ipa_odl_ep_cfg,
+	ret = ipa_setup_sys_pipe(ipa_odl_ep_cfg,
 			&ipa3_odl_ctx->odl_client_hdl);
 	return ret;
 
@@ -497,7 +497,7 @@ void ipa3_odl_pipe_cleanup(bool is_ssr)
 	/*Since init will not be done again*/
 	ipa3_odl_ctx->odl_state.odl_init = true;
 
-	ipa3_teardown_sys_pipe(ipa3_odl_ctx->odl_client_hdl);
+	ipa_teardown_sys_pipe(ipa3_odl_ctx->odl_client_hdl);
 	ipa3_odl_ctx->odl_client_hdl = -1;
 	/*Assume QTI will never close this node once opened*/
 	if (ipa_odl_opened)

+ 2 - 2
drivers/platform/msm/ipa/ipa_v3/ipa_pm.c

@@ -905,7 +905,7 @@ int ipa_pm_associate_ipa_cons_to_client(u32 hdl, enum ipa_client_type consumer)
 		return -EPERM;
 	}
 
-	idx = ipa3_get_ep_mapping(consumer);
+	idx = ipa_get_ep_mapping(consumer);
 
 	if (idx < 0) {
 		mutex_unlock(&ipa_pm_ctx->client_mutex);
@@ -1554,7 +1554,7 @@ bool ipa_get_pm_client_stats_filled(struct pm_client_stats *pm_stats_ptr,
 
 int ipa_pm_get_pm_clnt_throughput(enum ipa_client_type client_type)
 {
-	int idx = ipa3_get_ep_mapping(client_type);
+	int idx = ipa_get_ep_mapping(client_type);
 	int throughput;
 
 	mutex_lock(&ipa_pm_ctx->client_mutex);

+ 10 - 22
drivers/platform/msm/ipa/ipa_v3/ipa_qdss.c

@@ -3,10 +3,8 @@
  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/ipa_qdss.h>
 #include <linux/msm_ipa.h>
 #include <linux/string.h>
-#include <linux/ipa_qdss.h>
 #include "ipa_i.h"
 
 #define IPA_HOLB_TMR_VALUE 0
@@ -58,7 +56,7 @@ static void ipa3_qdss_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
 	ipa_assert();
 }
 
-int ipa3_conn_qdss_pipes(struct ipa_qdss_conn_in_params *in,
+int ipa_qdss_conn_pipes(struct ipa_qdss_conn_in_params *in,
 	struct ipa_qdss_conn_out_params *out)
 {
 	struct gsi_chan_props gsi_channel_props;
@@ -76,13 +74,13 @@ int ipa3_conn_qdss_pipes(struct ipa_qdss_conn_in_params *in,
 		return -IPA_QDSS_PIPE_CONN_FAILURE;
 	}
 
-	ipa_ep_idx_tx = ipa3_get_ep_mapping(IPA_CLIENT_MHI_QDSS_CONS);
+	ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_MHI_QDSS_CONS);
 	if ((ipa_ep_idx_tx) < 0 || (!ipa3_ctx->ipa_config_is_mhi)) {
 		IPA_QDSS_ERR("getting EP map failed\n");
 		return -IPA_QDSS_PIPE_CONN_FAILURE;
 	}
 
-	ipa_ep_idx_rx = ipa3_get_ep_mapping(IPA_CLIENT_QDSS_PROD);
+	ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_QDSS_PROD);
 	if ((ipa_ep_idx_rx == -1) ||
 		(ipa_ep_idx_rx >= IPA3_MAX_NUM_PIPES)) {
 		IPA_QDSS_ERR("out of range ipa_ep_idx_rx = %d\n",
@@ -113,7 +111,7 @@ int ipa3_conn_qdss_pipes(struct ipa_qdss_conn_in_params *in,
 	gsi_channel_props.prot = GSI_CHAN_PROT_QDSS;
 	gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
 
-	gsi_ep_info = ipa3_get_gsi_ep_info(ep_rx->client);
+	gsi_ep_info = ipa_get_gsi_ep_info(ep_rx->client);
 	if (!gsi_ep_info) {
 		IPA_QDSS_ERR("Failed getting GSI EP info for client=%d\n",
 			ep_rx->client);
@@ -179,7 +177,7 @@ int ipa3_conn_qdss_pipes(struct ipa_qdss_conn_in_params *in,
 	ep_cfg.mode.mode = IPA_DMA;
 	ep_cfg.mode.dst = IPA_CLIENT_MHI_QDSS_CONS;
 	ep_cfg.seq.set_dynamic = true;
-	if (ipa3_cfg_ep(ipa3_get_ep_mapping(IPA_CLIENT_QDSS_PROD),
+	if (ipa3_cfg_ep(ipa_get_ep_mapping(IPA_CLIENT_QDSS_PROD),
 		&ep_cfg)) {
 		IPA_QDSS_ERR("Setting DMA mode failed\n");
 		goto fail_write_scratch;
@@ -203,8 +201,9 @@ fail:
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 	return -IPA_QDSS_PIPE_CONN_FAILURE;
 }
+EXPORT_SYMBOL(ipa_qdss_conn_pipes);
 
-int ipa3_disconn_qdss_pipes(void)
+int ipa_qdss_disconn_pipes(void)
 {
 	int result = 0;
 	int ipa_ep_idx_rx;
@@ -223,7 +222,7 @@ int ipa3_disconn_qdss_pipes(void)
 	}
 
 	/* Stop QDSS_rx gsi channel / release channel */
-	result = ipa3_stop_gsi_channel(ipa_ep_idx_rx);
+	result = ipa_stop_gsi_channel(ipa_ep_idx_rx);
 	if (result) {
 		IPA_QDSS_ERR("Failed stopping QDSS gsi channel\n");
 		goto fail;
@@ -242,7 +241,7 @@ int ipa3_disconn_qdss_pipes(void)
 	ep_cfg.mode.mode = IPA_BASIC;
 	ep_cfg.mode.dst = IPA_CLIENT_MHI_QDSS_CONS;
 	ep_cfg.seq.set_dynamic = true;
-	if (ipa3_cfg_ep(ipa3_get_ep_mapping(IPA_CLIENT_QDSS_PROD),
+	if (ipa3_cfg_ep(ipa_get_ep_mapping(IPA_CLIENT_QDSS_PROD),
 		&ep_cfg)) {
 		IPAERR("Resetting DMA mode failed\n");
 	}
@@ -260,15 +259,4 @@ fail:
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 	return -IPA_QDSS_PIPE_DISCONN_FAILURE;
 }
-
-void ipa3_qdss_register(void)
-{
-	struct ipa_qdss_data funcs;
-
-	funcs.ipa_qdss_conn_pipes = ipa3_conn_qdss_pipes;
-	funcs.ipa_qdss_disconn_pipes = ipa3_disconn_qdss_pipes;
-
-	if(ipa_fmwk_register_ipa_qdss(&funcs))
-		pr_err("failed to register ipa_qdss APIs\n");
-}
-EXPORT_SYMBOL(ipa3_qdss_register);
+EXPORT_SYMBOL(ipa_qdss_disconn_pipes);

+ 1 - 1
drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c

@@ -12,7 +12,7 @@
 #include <linux/debugfs.h>
 #include <linux/delay.h>
 #include <linux/uaccess.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include <linux/vmalloc.h>
 
 #include "ipa_qmi_service.h"

+ 1 - 1
drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h

@@ -6,7 +6,7 @@
 #ifndef IPA_QMI_SERVICE_H
 #define IPA_QMI_SERVICE_H
 
-#include <linux/ipa.h>
+#include "ipa.h"
 #include <linux/ipa_qmi_service_v01.h>
 #include <uapi/linux/msm_rmnet.h>
 #include <linux/soc/qcom/qmi.h>

+ 17 - 9
drivers/platform/msm/ipa/ipa_v3/ipa_rt.c

@@ -60,7 +60,7 @@ static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip,
 	}
 
 	gen_params.ipt = ip;
-	gen_params.dst_pipe_idx = ipa3_get_ep_mapping(entry->rule.dst);
+	gen_params.dst_pipe_idx = ipa_get_ep_mapping(entry->rule.dst);
 	if (gen_params.dst_pipe_idx == -1) {
 		IPAERR_RL("Wrong destination pipe specified in RT rule\n");
 		WARN_ON_RATELIMIT_IPA(1);
@@ -572,11 +572,11 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
 	}
 
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1
 		&& !ipa3_ctx->ulso_wa) {
 		u32 offset = 0;
 
-		i = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		i = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
 		reg_write_coal_close.skip_pipeline_clear = false;
 		reg_write_coal_close.pipeline_clear_options = IPAHAL_HPS_CLEAR;
 		if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
@@ -1285,7 +1285,7 @@ static void __ipa_convert_rt_mdfy_out(struct ipa_rt_rule_mdfy_i rule_in,
 }
 
 /**
- * ipa3_add_rt_rule() - Add the specified routing rules to SW and optionally
+ * ipa_add_rt_rule() - Add the specified routing rules to SW and optionally
  * commit to IPA HW
  * @rules:	[inout] set of routing rules to add
  *
@@ -1294,10 +1294,11 @@ static void __ipa_convert_rt_mdfy_out(struct ipa_rt_rule_mdfy_i rule_in,
  * Note:	Should not be called from atomic context
  */
 
-int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
 {
 	return ipa3_add_rt_rule_usr(rules, false);
 }
+EXPORT_SYMBOL(ipa_add_rt_rule);
 
 /**
  * ipa3_add_rt_rule_v2() - Add the specified routing rules to SW
@@ -1313,6 +1314,7 @@ int ipa3_add_rt_rule_v2(struct ipa_ioc_add_rt_rule_v2 *rules)
 {
 	return ipa3_add_rt_rule_usr_v2(rules, false);
 }
+EXPORT_SYMBOL(ipa3_add_rt_rule_v2);
 
 /**
  * ipa3_add_rt_rule_usr() - Add the specified routing rules to SW and optionally
@@ -1368,6 +1370,7 @@ bail:
 	mutex_unlock(&ipa3_ctx->lock);
 	return ret;
 }
+EXPORT_SYMBOL(ipa3_add_rt_rule_usr);
 
 /**
  * ipa3_add_rt_rule_usr_v2() - Add the specified routing rules
@@ -1427,6 +1430,7 @@ bail:
 	mutex_unlock(&ipa3_ctx->lock);
 	return ret;
 }
+EXPORT_SYMBOL(ipa3_add_rt_rule_usr_v2);
 
 
 /**
@@ -1480,6 +1484,7 @@ bail:
 	mutex_unlock(&ipa3_ctx->lock);
 	return ret;
 }
+EXPORT_SYMBOL(ipa3_add_rt_rule_ext);
 
 /**
  * ipa3_add_rt_rule_ext_v2() - Add the specified routing rules
@@ -1537,6 +1542,7 @@ bail:
 	mutex_unlock(&ipa3_ctx->lock);
 	return ret;
 }
+EXPORT_SYMBOL(ipa3_add_rt_rule_ext_v2);
 
 /**
  * ipa3_add_rt_rule_after() - Add the given routing rules after the
@@ -1646,6 +1652,7 @@ bail:
 	mutex_unlock(&ipa3_ctx->lock);
 	return ret;
 }
+EXPORT_SYMBOL(ipa3_add_rt_rule_after);
 
 /**
  * ipa3_add_rt_rule_after_v2() - Add the given routing rules
@@ -1758,6 +1765,7 @@ bail:
 	mutex_unlock(&ipa3_ctx->lock);
 	return ret;
 }
+EXPORT_SYMBOL(ipa3_add_rt_rule_after_v2);
 
 int __ipa3_del_rt_rule(u32 rule_hdl)
 {
@@ -2086,7 +2094,7 @@ int ipa3_reset_rt(enum ipa_ip_type ip, bool user_only)
  * Returns:	0 on success, negative on failure
  *
  * Note:	Should not be called from atomic context
- *	Caller should call ipa3_put_rt_tbl later if this function succeeds
+ *	Caller should call ipa_put_rt_tbl later if this function succeeds
  */
 int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
 {
@@ -2123,14 +2131,14 @@ ret:
 EXPORT_SYMBOL(ipa3_get_rt_tbl);
 
 /**
- * ipa3_put_rt_tbl() - Release the specified routing table handle
+ * ipa_put_rt_tbl() - Release the specified routing table handle
  * @rt_tbl_hdl:	[in] the routing table handle to release
  *
  * Returns:	0 on success, negative on failure
  *
  * Note:	Should not be called from atomic context
  */
-int ipa3_put_rt_tbl(u32 rt_tbl_hdl)
+int ipa_put_rt_tbl(u32 rt_tbl_hdl)
 {
 	struct ipa3_rt_tbl *entry;
 	enum ipa_ip_type ip = IPA_IP_MAX;
@@ -2178,7 +2186,7 @@ ret:
 
 	return result;
 }
-
+EXPORT_SYMBOL(ipa_put_rt_tbl);
 
 static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy_i *rtrule)
 {

+ 9 - 9
drivers/platform/msm/ipa/ipa_v3/ipa_stats.c

@@ -277,7 +277,7 @@ static int ipa_get_generic_stats(unsigned long arg)
 	/* HOLB Discard stats */
 	holb_disc_stats_ptr = &generic_stats->holb_stats.holb_disc_stats[0];
 	for (i = 0; i < IPA_CLIENT_MAX; i++) {
-		int ep_idx = ipa3_get_ep_mapping(i);
+		int ep_idx = ipa_get_ep_mapping(i);
 
 		if ((ep_idx == -1) || (!IPA_CLIENT_IS_CONS(i)) ||
 			(IPA_CLIENT_IS_TEST(i)))
@@ -415,7 +415,7 @@ static void ipa_get_gsi_pipe_info(
 	pipe_info_ptr_local->gsi_chan_ring_wp =
 		gsi_read_chan_ring_wp(ep->gsi_chan_hdl, gsi_get_peripheral_ee());
 
-	gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
+	gsi_ep_info = ipa_get_gsi_ep_info(ep->client);
 	pipe_info_ptr_local->gsi_ipa_if_tlv =
 		gsi_ep_info ? gsi_ep_info->ipa_if_tlv : 0;
 	pipe_info_ptr_local->gsi_ipa_if_aos =
@@ -785,7 +785,7 @@ static int ipa_get_wlan_inst_stats(unsigned long arg)
 				uint64_t)pipe_info_ptr +
 				(j * sizeof(struct ipa_lnx_pipe_info)));
 
-			ep_idx = ipa3_get_ep_mapping(
+			ep_idx = ipa_get_ep_mapping(
 				ipa_lnx_agent_ctx.alloc_info.wlan_inst_info[
 				i].pipes_client_type[j]);
 			if (ep_idx == -1) {
@@ -1090,7 +1090,7 @@ static int ipa_get_eth_inst_stats(unsigned long arg)
 				uint64_t)pipe_info_ptr + (j *
 				sizeof(struct ipa_lnx_pipe_info)));
 
-			ep_idx = ipa3_get_ep_mapping(
+			ep_idx = ipa_get_ep_mapping(
 				ipa_lnx_agent_ctx.alloc_info.eth_inst_info[
 					i].pipes_client_type[j]);
 			if (ep_idx == -1) {
@@ -1266,7 +1266,7 @@ static int ipa_get_usb_inst_stats(unsigned long arg)
 				uint64_t)pipe_info_ptr + (j *
 				sizeof(struct ipa_lnx_pipe_info)));
 
-			ep_idx = ipa3_get_ep_mapping(
+			ep_idx = ipa_get_ep_mapping(
 				ipa_lnx_agent_ctx.alloc_info.usb_inst_info[
 					i].pipes_client_type[j]);
 			if (ep_idx == -1) {
@@ -1443,7 +1443,7 @@ static int ipa_get_mhip_inst_stats(unsigned long arg)
 			pipe_info_ptr_local = (struct ipa_lnx_pipe_info *)((uint64_t)
 				pipe_info_ptr + (j * sizeof(struct ipa_lnx_pipe_info)));
 
-			ep_idx = ipa3_get_ep_mapping(
+			ep_idx = ipa_get_ep_mapping(
 				ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[
 					i].pipes_client_type[j]);
 			if (ep_idx == -1) {
@@ -1529,7 +1529,7 @@ static int ipa_stats_get_alloc_info(unsigned long arg)
 	if (ipa_lnx_agent_ctx.log_type_mask &
 		SPRHD_IPA_LOG_TYPE_GENERIC_STATS) {
 		for (i = 0; i < IPA_CLIENT_MAX; i++) {
-			int ep_idx = ipa3_get_ep_mapping(i);
+			int ep_idx = ipa_get_ep_mapping(i);
 
 			if ((ep_idx == -1) || (!IPA_CLIENT_IS_CONS(i)) ||
 				(IPA_CLIENT_IS_TEST(i)))
@@ -1556,8 +1556,8 @@ static int ipa_stats_get_alloc_info(unsigned long arg)
 
 	/* For WLAN instance */
 	if (ipa_lnx_agent_ctx.log_type_mask & SPRHD_IPA_LOG_TYPE_WLAN_STATS) {
-		ipa_ep_idx_tx = ipa3_get_ep_mapping(IPA_CLIENT_WLAN2_CONS);
-		ipa_ep_idx_rx = ipa3_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
+		ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS);
+		ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
 		if ((ipa_ep_idx_tx == -1) || (ipa_ep_idx_rx == -1) ||
 			!ipa3_ctx->ep[ipa_ep_idx_tx].valid ||
 			!ipa3_ctx->ep[ipa_ep_idx_rx].valid) {

+ 1 - 1
drivers/platform/msm/ipa/ipa_v3/ipa_trace.h

@@ -255,7 +255,7 @@ TRACE_EVENT(
 );
 
 TRACE_EVENT(
-	ipa3_tx_dp,
+	ipa_tx_dp,
 
 	TP_PROTO(const struct sk_buff *skb, unsigned long client),
 

+ 4 - 4
drivers/platform/msm/ipa/ipa_v3/ipa_tsp.c

@@ -192,7 +192,7 @@ int ipa_tsp_get_egr_ep(u8 index, struct ipa_ioc_tsp_egress_prod_params *output)
 	output->client = ipa3_ctx->tsp.egr_ep_config[index];
 
 	regval = ipahal_read_reg_n_fields(IPA_ENDP_INIT_PROD_CFG_n,
-		ipa3_get_ep_mapping(output->client), (void *)&prod_cfg);
+		ipa_get_ep_mapping(output->client), (void *)&prod_cfg);
 
 	output->max_out_bytes = prod_cfg.max_output_size << 6; // max_output_size*64
 	output->policing_by_max_out = prod_cfg.max_output_size_drop_enable;
@@ -231,7 +231,7 @@ int ipa_tsp_set_egr_ep(u8 index, const struct ipa_ioc_tsp_egress_prod_params *in
 	ep_tc_mask = GENMASK(input->tc_hi, input->tc_lo);
 	new_tc_range_mask = ipa3_ctx->tsp.egr_tc_range_mask;
 
-	ep_index = ipa3_get_ep_mapping(ipa3_ctx->tsp.egr_ep_config[index]);
+	ep_index = ipa_get_ep_mapping(ipa3_ctx->tsp.egr_ep_config[index]);
 	regval = ipahal_read_reg_n_fields(
 		IPA_ENDP_INIT_PROD_CFG_n, ep_index, (void *)&prod_cfg);
 
@@ -263,7 +263,7 @@ int ipa_tsp_set_egr_ep(u8 index, const struct ipa_ioc_tsp_egress_prod_params *in
 	prod_cfg.max_output_size_drop_enable = input->policing_by_max_out;
 	prod_cfg.egress_tc_lowest = input->tc_lo;
 	prod_cfg.egress_tc_highest = input->tc_hi;
-	if (ipa3_cfg_ep_prod_cfg(ipa3_get_ep_mapping(input->client), &prod_cfg) != 0) {
+	if (ipa3_cfg_ep_prod_cfg(ipa_get_ep_mapping(input->client), &prod_cfg) != 0) {
 		IPAERR("Failed configuring the producer EP.\n");
 		return -EFAULT;
 	}
@@ -344,7 +344,7 @@ int ipa_tsp_reset(void)
 
 	for (i = 0;
 	      i < ipa3_ctx->tsp.egr_ep_max && ipa3_ctx->tsp.egr_ep_config[i] < IPA_CLIENT_MAX; i++)
-		ipa3_cfg_ep_prod_cfg(ipa3_get_ep_mapping(ipa3_ctx->tsp.egr_ep_config[i]),
+		ipa3_cfg_ep_prod_cfg(ipa_get_ep_mapping(ipa3_ctx->tsp.egr_ep_config[i]),
 		      &prod_cfg);
 
 	if (ipa3_ctx->tsp.ingr_tc_tbl.base)

+ 8 - 7
drivers/platform/msm/ipa/ipa_v3/ipa_uc.c

@@ -595,7 +595,7 @@ static void ipa3_event_ring_hdlr(void)
 			e_q->Protocol,
 			e_q->Value.quota_param.ThreasholdReached,
 			e_q->Value.quota_param.usage);
-			if (ipa3_broadcast_wdi_quota_reach_ind(0,
+			if (ipa_broadcast_wdi_quota_reach_ind(0,
 				e_q->Value.quota_param.usage))
 				IPAERR_RL("failed on quota_reach for %d\n",
 						e_q->Protocol);
@@ -1127,7 +1127,7 @@ int ipa3_uc_interface_init(void)
 	}
 
 	if (!ipa3_ctx->apply_rg10_wa) {
-		result = ipa3_add_interrupt_handler(IPA_UC_IRQ_0,
+		result = ipa_add_interrupt_handler(IPA_UC_IRQ_0,
 			ipa3_uc_event_handler, true,
 			ipa3_ctx);
 		if (result) {
@@ -1136,7 +1136,7 @@ int ipa3_uc_interface_init(void)
 			goto irq_fail0;
 		}
 
-		result = ipa3_add_interrupt_handler(IPA_UC_IRQ_1,
+		result = ipa_add_interrupt_handler(IPA_UC_IRQ_1,
 			ipa3_uc_response_hdlr, true,
 			ipa3_ctx);
 		if (result) {
@@ -1145,7 +1145,7 @@ int ipa3_uc_interface_init(void)
 			goto irq_fail1;
 		}
 
-		result = ipa3_add_interrupt_handler(IPA_UC_IRQ_2,
+		result = ipa_add_interrupt_handler(IPA_UC_IRQ_2,
 			ipa3_uc_wigig_misc_int_handler, true,
 			ipa3_ctx);
 		if (result) {
@@ -1210,7 +1210,7 @@ void ipa3_uc_load_notify(void)
 
 	ipa3_init_interrupts();
 
-	result = ipa3_add_interrupt_handler(IPA_UC_IRQ_0,
+	result = ipa_add_interrupt_handler(IPA_UC_IRQ_0,
 		ipa3_uc_event_handler, true,
 		ipa3_ctx);
 	if (result)
@@ -1292,7 +1292,7 @@ int ipa3_uc_is_gsi_channel_empty(enum ipa_client_type ipa_client)
 	union IpaHwChkChEmptyCmdData_t cmd;
 	int ret;
 
-	gsi_ep_info = ipa3_get_gsi_ep_info(ipa_client);
+	gsi_ep_info = ipa_get_gsi_ep_info(ipa_client);
 	if (!gsi_ep_info) {
 		IPAERR("Failed getting GSI EP info for client=%d\n",
 		       ipa_client);
@@ -1792,7 +1792,7 @@ free_cmd:
 	return res;
 }
 
-int ipa3_uc_bw_monitor(struct ipa_wdi_bw_info *info)
+int ipa_uc_bw_monitor(struct ipa_wdi_bw_info *info)
 {
 	int i, ind, res = 0;
 	struct ipa_mem_buffer cmd;
@@ -1889,6 +1889,7 @@ free_cmd:
 
 	return res;
 }
+EXPORT_SYMBOL(ipa_uc_bw_monitor);
 
 int ipa3_set_wlan_tx_info(struct ipa_wdi_tx_info *info)
 {

+ 1 - 1
drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c

@@ -3,7 +3,7 @@
  * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/ipa.h>
+#include "ipa.h"
 #include "ipa_i.h"
 
 /* MHI uC interface definitions */

+ 1 - 1
drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h

@@ -6,7 +6,7 @@
 #ifndef _IPA_UC_OFFLOAD_I_H_
 #define _IPA_UC_OFFLOAD_I_H_
 
-#include <linux/ipa.h>
+#include "ipa.h"
 #include "ipa_i.h"
 
 /*

+ 61 - 46
drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c

@@ -475,7 +475,7 @@ int ipa3_get_wdi_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
 }
 
 /**
- * ipa3_get_wdi_stats() - Query WDI statistics from uc
+ * ipa_get_wdi_stats() - Query WDI statistics from uc
  * @stats:	[inout] stats blob from client populated by driver
  *
  * Returns:	0 on success, negative on failure
@@ -483,7 +483,7 @@ int ipa3_get_wdi_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
  * @note Cannot be called from atomic context
  *
  */
-int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
+int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
 {
 #define TX_STATS(y) stats->tx_ch_stats.y = \
 	ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->tx_ch_stats.y
@@ -545,6 +545,7 @@ int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
 
 	return 0;
 }
+EXPORT_SYMBOL(ipa_get_wdi_stats);
 
 int ipa3_wdi_init(void)
 {
@@ -1155,7 +1156,7 @@ static int ipa3_wdi2_gsi_alloc_channel_ring(
 	int result = -EFAULT;
 	const struct ipa_gsi_ep_config *ep_cfg;
 
-	ep_cfg = ipa3_get_gsi_ep_info(client);
+	ep_cfg = ipa_get_gsi_ep_info(client);
 	if (!ep_cfg) {
 		IPAERR("Failed getting GSI EP info for client=%d\n",
 				client);
@@ -1226,7 +1227,7 @@ int ipa3_connect_gsi_wdi_pipe(struct ipa_wdi_in_params *in,
 	uint32_t addr_low, addr_high;
 	bool is_evt_rn_db_pcie_addr, is_txr_rn_db_pcie_addr;
 
-	ipa_ep_idx = ipa3_get_ep_mapping(in->sys.client);
+	ipa_ep_idx = ipa_get_ep_mapping(in->sys.client);
 	if (ipa_ep_idx == -1) {
 		IPAERR("fail to alloc EP.\n");
 		goto fail;
@@ -1582,7 +1583,7 @@ int ipa3_connect_gsi_wdi_pipe(struct ipa_wdi_in_params *in,
 	if (IPA_CLIENT_IS_PROD(in->sys.client)) {
 		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 		ep_cfg_ctrl.ipa_ep_delay = true;
-		ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
+		ipa_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
 	}
 
 	ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len;
@@ -1735,7 +1736,7 @@ fail:
 }
 
 /**
- * ipa3_connect_wdi_pipe() - WDI client connect
+ * ipa_connect_wdi_pipe() - WDI client connect
  * @in:	[in] input parameters from client
  * @out: [out] output params to client
  *
@@ -1743,7 +1744,7 @@ fail:
  *
  * Note:	Should not be called from atomic context
  */
-int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+int ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in,
 		struct ipa_wdi_out_params *out)
 {
 	int ipa_ep_idx;
@@ -1760,6 +1761,9 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
 	phys_addr_t pa;
 	u32 len;
 
+	if (ipa3_ctx->use_pm_wrapper)
+		return ipa_pm_wrapper_connect_wdi_pipe(in, out);
+
 	if (in == NULL || out == NULL || in->sys.client >= IPA_CLIENT_MAX) {
 		IPAERR("bad parm. in=%pK out=%pK\n", in, out);
 		if (in)
@@ -1792,7 +1796,7 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
 	if (result)
 		return result;
 
-	ipa_ep_idx = ipa3_get_ep_mapping(in->sys.client);
+	ipa_ep_idx = ipa_get_ep_mapping(in->sys.client);
 	if (ipa_ep_idx == -1) {
 		IPAERR("fail to alloc EP.\n");
 		goto fail;
@@ -2199,7 +2203,7 @@ int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
 	if (IPA_CLIENT_IS_PROD(in->sys.client)) {
 		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 		ep_cfg_ctrl.ipa_ep_delay = true;
-		ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
+		ipa_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
 	}
 
 	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
@@ -2269,7 +2273,7 @@ dma_alloc_fail:
 fail:
 	return result;
 }
-EXPORT_SYMBOL(ipa3_connect_wdi_pipe);
+EXPORT_SYMBOL(ipa_connect_wdi_pipe);
 
 int ipa3_disconnect_gsi_wdi_pipe(u32 clnt_hdl)
 {
@@ -2316,19 +2320,22 @@ fail_dealloc_channel:
 }
 
 /**
- * ipa3_disconnect_wdi_pipe() - WDI client disconnect
+ * ipa_disconnect_wdi_pipe() - WDI client disconnect
  * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
  *
  * Returns:	0 on success, negative on failure
  *
  * Note:	Should not be called from atomic context
  */
-int ipa3_disconnect_wdi_pipe(u32 clnt_hdl)
+int ipa_disconnect_wdi_pipe(u32 clnt_hdl)
 {
 	int result = 0;
 	struct ipa3_ep_context *ep;
 	union IpaHwWdiCommonChCmdData_t tear;
 
+	if (ipa3_ctx->use_pm_wrapper)
+		return ipa_pm_wrapper_disconnect_wdi_pipe(clnt_hdl);
+
 	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
 	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
 		IPAERR("bad parm, %d\n", clnt_hdl);
@@ -2383,7 +2390,7 @@ uc_timeout:
 	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
 	return result;
 }
-EXPORT_SYMBOL(ipa3_disconnect_wdi_pipe);
+EXPORT_SYMBOL(ipa_disconnect_wdi_pipe);
 
 int ipa3_enable_gsi_wdi_pipe(u32 clnt_hdl)
 {
@@ -2401,7 +2408,7 @@ int ipa3_enable_gsi_wdi_pipe(u32 clnt_hdl)
 		return -EFAULT;
 	}
 
-	ipa_ep_idx = ipa3_get_ep_mapping(ipa3_get_client_mapping(clnt_hdl));
+	ipa_ep_idx = ipa_get_ep_mapping(ipa3_get_client_mapping(clnt_hdl));
 	if (ipa_ep_idx == -1) {
 		IPAERR("fail to alloc EP.\n");
 		return -EPERM;
@@ -2410,7 +2417,7 @@ int ipa3_enable_gsi_wdi_pipe(u32 clnt_hdl)
 	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
 
 	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
-	ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
+	ipa_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
 
 	if (IPA_CLIENT_IS_CONS(ep->client)) {
 		memset(&holb_cfg, 0, sizeof(holb_cfg));
@@ -2460,9 +2467,9 @@ int ipa3_disable_gsi_wdi_pipe(u32 clnt_hdl)
 			clnt_hdl, ep->client);
 		/* remove delay on wlan-prod pipe*/
 		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
-		ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+		ipa_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
 
-		cons_hdl = ipa3_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+		cons_hdl = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
 		if (cons_hdl == IPA_EP_NOT_ALLOCATED) {
 			IPAERR("Client %u is not mapped\n",
 				IPA_CLIENT_WLAN1_CONS);
@@ -2486,7 +2493,7 @@ int ipa3_disable_gsi_wdi_pipe(u32 clnt_hdl)
 	if (IPA_CLIENT_IS_PROD(ep->client)) {
 		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 		ep_cfg_ctrl.ipa_ep_delay = true;
-		ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+		ipa_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
 	}
 	ep->gsi_offload_state &= ~IPA_WDI_ENABLED;
 	IPADBG("client (ep: %d) disabled\n", clnt_hdl);
@@ -2496,20 +2503,23 @@ gsi_timeout:
 	return result;
 }
 /**
- * ipa3_enable_wdi_pipe() - WDI client enable
+ * ipa_enable_wdi_pipe() - WDI client enable
  * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
  *
  * Returns:	0 on success, negative on failure
  *
  * Note:	Should not be called from atomic context
  */
-int ipa3_enable_wdi_pipe(u32 clnt_hdl)
+int ipa_enable_wdi_pipe(u32 clnt_hdl)
 {
 	int result = 0;
 	struct ipa3_ep_context *ep;
 	union IpaHwWdiCommonChCmdData_t enable;
 	struct ipa_ep_cfg_holb holb_cfg;
 
+	if (ipa3_ctx->use_pm_wrapper)
+		return ipa_pm_wrapper_enable_wdi_pipe(clnt_hdl);
+
 	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
 	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
 		IPAERR("bad parm, %d\n", clnt_hdl);
@@ -2558,17 +2568,17 @@ uc_timeout:
 	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
 	return result;
 }
-EXPORT_SYMBOL(ipa3_enable_wdi_pipe);
+EXPORT_SYMBOL(ipa_enable_wdi_pipe);
 
 /**
- * ipa3_disable_wdi_pipe() - WDI client disable
+ * ipa_disable_wdi_pipe() - WDI client disable
  * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
  *
  * Returns:	0 on success, negative on failure
  *
  * Note:	Should not be called from atomic context
  */
-int ipa3_disable_wdi_pipe(u32 clnt_hdl)
+int ipa_disable_wdi_pipe(u32 clnt_hdl)
 {
 	int result = 0;
 	struct ipa3_ep_context *ep;
@@ -2576,6 +2586,9 @@ int ipa3_disable_wdi_pipe(u32 clnt_hdl)
 	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
 	u32 cons_hdl;
 
+	if (ipa3_ctx->use_pm_wrapper)
+		return ipa_pm_wrapper_disable_pipe(clnt_hdl);
+
 	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
 	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
 		IPAERR("bad parm, %d\n", clnt_hdl);
@@ -2617,9 +2630,9 @@ int ipa3_disable_wdi_pipe(u32 clnt_hdl)
 			clnt_hdl, ep->client);
 		/* remove delay on wlan-prod pipe*/
 		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
-		ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+		ipa_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
 
-		cons_hdl = ipa3_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+		cons_hdl = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
 		if (cons_hdl == IPA_EP_NOT_ALLOCATED) {
 			IPAERR("Client %u is not mapped\n",
 				IPA_CLIENT_WLAN1_CONS);
@@ -2655,7 +2668,7 @@ int ipa3_disable_wdi_pipe(u32 clnt_hdl)
 	if (IPA_CLIENT_IS_PROD(ep->client)) {
 		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 		ep_cfg_ctrl.ipa_ep_delay = true;
-		ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+		ipa_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
 	}
 	ep->uc_offload_state &= ~IPA_WDI_ENABLED;
 	IPADBG("client (ep: %d) disabled\n", clnt_hdl);
@@ -2665,7 +2678,7 @@ uc_timeout:
 	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
 	return result;
 }
-EXPORT_SYMBOL(ipa3_disable_wdi_pipe);
+EXPORT_SYMBOL(ipa_disable_wdi_pipe);
 
 int ipa3_resume_gsi_wdi_pipe(u32 clnt_hdl)
 {
@@ -2688,7 +2701,7 @@ int ipa3_resume_gsi_wdi_pipe(u32 clnt_hdl)
 	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
 
 	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
-	result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+	result = ipa_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
 	if (result)
 		IPAERR("client (ep: %d) fail un-susp/delay result=%d\n",
 				clnt_hdl, result);
@@ -2746,14 +2759,14 @@ int ipa3_resume_gsi_wdi_pipe(u32 clnt_hdl)
 }
 
 /**
- * ipa3_resume_wdi_pipe() - WDI client resume
+ * ipa_resume_wdi_pipe() - WDI client resume
  * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
  *
  * Returns:	0 on success, negative on failure
  *
  * Note:	Should not be called from atomic context
  */
-int ipa3_resume_wdi_pipe(u32 clnt_hdl)
+int ipa_resume_wdi_pipe(u32 clnt_hdl)
 {
 	int result = 0;
 	struct ipa3_ep_context *ep;
@@ -2796,7 +2809,7 @@ int ipa3_resume_wdi_pipe(u32 clnt_hdl)
 	}
 
 	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
-	result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+	result = ipa_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
 	if (result)
 		IPAERR("client (ep: %d) fail un-susp/delay result=%d\n",
 				clnt_hdl, result);
@@ -2809,7 +2822,7 @@ int ipa3_resume_wdi_pipe(u32 clnt_hdl)
 uc_timeout:
 	return result;
 }
-EXPORT_SYMBOL(ipa3_resume_wdi_pipe);
+EXPORT_SYMBOL(ipa_resume_wdi_pipe);
 
 int ipa3_suspend_gsi_wdi_pipe(u32 clnt_hdl)
 {
@@ -2825,7 +2838,7 @@ int ipa3_suspend_gsi_wdi_pipe(u32 clnt_hdl)
 	union __packed gsi_channel_scratch gsi_scratch;
 	struct IpaHwOffloadStatsAllocCmdData_t *pcmd_t = NULL;
 
-	ipa_ep_idx = ipa3_get_ep_mapping(ipa3_get_client_mapping(clnt_hdl));
+	ipa_ep_idx = ipa_get_ep_mapping(ipa3_get_client_mapping(clnt_hdl));
 	if (ipa_ep_idx < 0) {
 		IPAERR("IPA client mapping failed\n");
 		return -EPERM;
@@ -2867,7 +2880,7 @@ int ipa3_suspend_gsi_wdi_pipe(u32 clnt_hdl)
 			}
 		}
 retry_gsi_stop:
-		res = ipa3_stop_gsi_channel(ipa_ep_idx);
+		res = ipa_stop_gsi_channel(ipa_ep_idx);
 		if (res != 0 && res != -GSI_STATUS_AGAIN &&
 				res != -GSI_STATUS_TIMED_OUT) {
 			IPAERR("failed to stop channel res = %d\n", res);
@@ -2927,14 +2940,14 @@ fail_stop_channel:
 }
 
 /**
- * ipa3_suspend_wdi_pipe() - WDI client suspend
+ * ipa_suspend_wdi_pipe() - WDI client suspend
  * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
  *
  * Returns:	0 on success, negative on failure
  *
  * Note:	Should not be called from atomic context
  */
-int ipa3_suspend_wdi_pipe(u32 clnt_hdl)
+int ipa_suspend_wdi_pipe(u32 clnt_hdl)
 {
 	int result = 0;
 	struct ipa3_ep_context *ep;
@@ -3018,7 +3031,7 @@ int ipa3_suspend_wdi_pipe(u32 clnt_hdl)
 	if (IPA_CLIENT_IS_CONS(ep->client)) {
 		if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
 			ep_cfg_ctrl.ipa_ep_suspend = true;
-			result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+			result = ipa_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
 			if (result)
 				IPAERR("(ep: %d) failed to suspend result=%d\n",
 						clnt_hdl, result);
@@ -3027,7 +3040,7 @@ int ipa3_suspend_wdi_pipe(u32 clnt_hdl)
 		}
 	} else {
 		ep_cfg_ctrl.ipa_ep_delay = true;
-		result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+		result = ipa_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
 		if (result)
 			IPAERR("client (ep: %d) failed to delay result=%d\n",
 					clnt_hdl, result);
@@ -3058,7 +3071,7 @@ int ipa3_suspend_wdi_pipe(u32 clnt_hdl)
 uc_timeout:
 	return result;
 }
-EXPORT_SYMBOL(ipa3_suspend_wdi_pipe);
+EXPORT_SYMBOL(ipa_suspend_wdi_pipe);
 
 /**
  * ipa_broadcast_wdi_quota_reach_ind() - quota reach
@@ -3067,7 +3080,7 @@ EXPORT_SYMBOL(ipa3_suspend_wdi_pipe);
  *
  * Returns:	0 on success, negative on failure
  */
-int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid,
+int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid,
 	uint64_t num_bytes)
 {
 	IPAERR_RL("Quota reached indication on fid(%d) Mbytes(%lu)\n",
@@ -3075,6 +3088,7 @@ int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid,
 	ipa3_broadcast_quota_reach_ind(0, IPA_UPSTEAM_WLAN, false);
 	return 0;
 }
+EXPORT_SYMBOL(ipa_broadcast_wdi_quota_reach_ind);
 
 int ipa3_write_qmapid_gsi_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
 {
@@ -3211,7 +3225,7 @@ int ipa3_uc_dereg_rdyCB(void)
 
 
 /**
- * ipa3_uc_wdi_get_dbpa() - To retrieve
+ * ipa_uc_wdi_get_dbpa() - To retrieve
  * doorbell physical address of wlan pipes
  * @param:  [in/out] input/output parameters
  *          from/to client
@@ -3219,7 +3233,7 @@ int ipa3_uc_dereg_rdyCB(void)
  * Returns:	0 on success, negative on failure
  *
  */
-int ipa3_uc_wdi_get_dbpa(
+int ipa_uc_wdi_get_dbpa(
 	struct ipa_wdi_db_params *param)
 {
 	if (param == NULL || param->client >= IPA_CLIENT_MAX) {
@@ -3245,6 +3259,7 @@ int ipa3_uc_wdi_get_dbpa(
 
 	return 0;
 }
+EXPORT_SYMBOL(ipa_uc_wdi_get_dbpa);
 
 static void ipa3_uc_wdi_loaded_handler(void)
 {
@@ -3384,7 +3399,7 @@ int ipa_pm_wrapper_connect_wdi_pipe(struct ipa_wdi_in_params *in,
 		ipa_pm_wdi_ctx.curr_pm_state = IPA_PM_WDI_PM_REGISTERED;
 	}
 
-	if (ipa3_connect_wdi_pipe(in,out)) {
+	if (ipa_connect_wdi_pipe(in,out)) {
 		IPAERR("fail to setup pipe\n");
 		ret = -EFAULT;
 		return ret;
@@ -3402,7 +3417,7 @@ int ipa_pm_wrapper_disconnect_wdi_pipe(u32 clnt_hdl)
 		IPAERR("Unexpected current ipa pm state\n");
 		return -EFAULT;
 	}
-	if (ipa3_disconnect_wdi_pipe(clnt_hdl)) {
+	if (ipa_disconnect_wdi_pipe(clnt_hdl)) {
 		IPAERR("fail to tear down pipe\n");
 		return -EFAULT;
 	}
@@ -3440,7 +3455,7 @@ int ipa_pm_wrapper_enable_wdi_pipe(u32 clnt_hdl)
 		ipa_pm_wdi_ctx.curr_pm_state = IPA_PM_WDI_PM_ACTIVATE;
 	}
 
-	if (ipa3_enable_wdi_pipe(clnt_hdl)) {
+	if (ipa_enable_wdi_pipe(clnt_hdl)) {
 		IPAERR("fail to enable wdi pipe\n");
 		return -EFAULT;
 	}
@@ -3458,7 +3473,7 @@ int ipa_pm_wrapper_disable_pipe(u32 clnt_hdl)
 		return -EFAULT;
 	}
 
-	if (ipa3_disable_wdi_pipe(clnt_hdl)) {
+	if (ipa_disable_wdi_pipe(clnt_hdl)) {
 		IPAERR("fail to disable wdi pipe\n");
 		return -EFAULT;
 	}

+ 81 - 73
drivers/platform/msm/ipa/ipa_v3/ipa_utils.c

@@ -6724,11 +6724,11 @@ int ipa3_get_clients_from_rm_resource(
 
 	switch (resource) {
 	case IPA_RM_RESOURCE_USB_CONS:
-		if (ipa3_get_ep_mapping(IPA_CLIENT_USB_CONS) != -1)
+		if (ipa_get_ep_mapping(IPA_CLIENT_USB_CONS) != -1)
 			clients->names[i++] = IPA_CLIENT_USB_CONS;
 		break;
 	case IPA_RM_RESOURCE_USB_DPL_CONS:
-		if (ipa3_get_ep_mapping(IPA_CLIENT_USB_DPL_CONS) != -1)
+		if (ipa_get_ep_mapping(IPA_CLIENT_USB_DPL_CONS) != -1)
 			clients->names[i++] = IPA_CLIENT_USB_DPL_CONS;
 		break;
 	case IPA_RM_RESOURCE_HSIC_CONS:
@@ -6751,7 +6751,7 @@ int ipa3_get_clients_from_rm_resource(
 		clients->names[i++] = IPA_CLIENT_ETHERNET_CONS;
 		break;
 	case IPA_RM_RESOURCE_USB_PROD:
-		if (ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD) != -1)
+		if (ipa_get_ep_mapping(IPA_CLIENT_USB_PROD) != -1)
 			clients->names[i++] = IPA_CLIENT_USB_PROD;
 		break;
 	case IPA_RM_RESOURCE_HSIC_PROD:
@@ -6785,7 +6785,7 @@ bool ipa3_should_pipe_be_suspended(enum ipa_client_type client)
 	struct ipa3_ep_context *ep;
 	int ipa_ep_idx;
 
-	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	ipa_ep_idx = ipa_get_ep_mapping(client);
 	if (ipa_ep_idx == -1) {
 		IPAERR("Invalid client.\n");
 		WARN_ON(1);
@@ -6844,7 +6844,7 @@ static bool ipa3_should_pipe_channel_be_stopped(enum ipa_client_type client)
 	if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0)
 		return false;
 
-	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	ipa_ep_idx = ipa_get_ep_mapping(client);
 	if (ipa_ep_idx == -1) {
 		IPAERR("Invalid client.\n");
 		WARN_ON(1);
@@ -6891,7 +6891,7 @@ int ipa3_suspend_resource_sync(enum ipa_rm_resource_name resource)
 
 	for (index = 0; index < clients.length; index++) {
 		client = clients.names[index];
-		ipa_ep_idx = ipa3_get_ep_mapping(client);
+		ipa_ep_idx = ipa_get_ep_mapping(client);
 		if (ipa_ep_idx == -1) {
 			IPAERR("Invalid client.\n");
 			res = -EINVAL;
@@ -6904,7 +6904,7 @@ int ipa3_suspend_resource_sync(enum ipa_rm_resource_name resource)
 				/* suspend endpoint */
 				memset(&suspend, 0, sizeof(suspend));
 				suspend.ipa_ep_suspend = true;
-				ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+				ipa_cfg_ep_ctrl(ipa_ep_idx, &suspend);
 				pipe_suspended = true;
 			}
 		}
@@ -6913,7 +6913,7 @@ int ipa3_suspend_resource_sync(enum ipa_rm_resource_name resource)
 			ipa3_should_pipe_channel_be_stopped(client)) {
 			if (ipa3_ctx->ep[ipa_ep_idx].valid) {
 				/* Stop GSI channel */
-				res = ipa3_stop_gsi_channel(ipa_ep_idx);
+				res = ipa_stop_gsi_channel(ipa_ep_idx);
 				if (res) {
 					IPAERR("failed stop gsi ch %lu\n",
 					ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl);
@@ -6963,7 +6963,7 @@ int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name resource)
 
 	for (index = 0; index < clients.length; index++) {
 		client = clients.names[index];
-		ipa_ep_idx = ipa3_get_ep_mapping(client);
+		ipa_ep_idx = ipa_get_ep_mapping(client);
 		if (ipa_ep_idx == -1) {
 			IPAERR("Invalid client.\n");
 			res = -EINVAL;
@@ -6976,7 +6976,7 @@ int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name resource)
 				/* suspend endpoint */
 				memset(&suspend, 0, sizeof(suspend));
 				suspend.ipa_ep_suspend = true;
-				ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+				ipa_cfg_ep_ctrl(ipa_ep_idx, &suspend);
 			}
 		}
 
@@ -7025,7 +7025,7 @@ int ipa3_resume_resource(enum ipa_rm_resource_name resource)
 
 	for (index = 0; index < clients.length; index++) {
 		client = clients.names[index];
-		ipa_ep_idx = ipa3_get_ep_mapping(client);
+		ipa_ep_idx = ipa_get_ep_mapping(client);
 		if (ipa_ep_idx == -1) {
 			IPAERR("Invalid client.\n");
 			res = -EINVAL;
@@ -7042,7 +7042,7 @@ int ipa3_resume_resource(enum ipa_rm_resource_name resource)
 			if (ipa3_ctx->ep[ipa_ep_idx].valid) {
 				memset(&suspend, 0, sizeof(suspend));
 				suspend.ipa_ep_suspend = false;
-				ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+				ipa_cfg_ep_ctrl(ipa_ep_idx, &suspend);
 			}
 		}
 
@@ -7558,12 +7558,12 @@ int ipa3_init_hw(void)
 }
 
 /**
- * ipa3_get_ep_mapping() - provide endpoint mapping
+ * ipa_get_ep_mapping() - provide endpoint mapping
  * @client: client type
  *
  * Return value: endpoint mapping
  */
-int ipa3_get_ep_mapping(enum ipa_client_type client)
+int ipa_get_ep_mapping(enum ipa_client_type client)
 {
 	int ipa_ep_idx;
 	u8 hw_idx;
@@ -7586,14 +7586,15 @@ int ipa3_get_ep_mapping(enum ipa_client_type client)
 
 	return ipa_ep_idx;
 }
+EXPORT_SYMBOL(ipa_get_ep_mapping);
 
 /**
- * ipa3_get_ep_mapping_from_gsi() - provide endpoint mapping
+ * ipa_get_ep_mapping_from_gsi() - provide endpoint mapping
  * @ch_id: GSI Virt CH id
  *
  * Return value: endpoint mapping
  */
-int ipa3_get_ep_mapping_from_gsi(int ch_id)
+int ipa_get_ep_mapping_from_gsi(int ch_id)
 {
 	int ipa_ep_idx = IPA_EP_NOT_ALLOCATED;
 	u8 hw_idx;
@@ -7619,12 +7620,12 @@ int ipa3_get_ep_mapping_from_gsi(int ch_id)
 }
 
 /**
- * ipa3_get_gsi_ep_info() - provide gsi ep information
+ * ipa_get_gsi_ep_info() - provide gsi ep information
  * @client: IPA client value
  *
  * Return value: pointer to ipa_gsi_ep_info
  */
-const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info
+const struct ipa_gsi_ep_config *ipa_get_gsi_ep_info
 	(enum ipa_client_type client)
 {
 	int ep_idx;
@@ -7632,7 +7633,7 @@ const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info
 
 	hw_idx = ipa3_ctx->hw_type_index;
 
-	ep_idx = ipa3_get_ep_mapping(client);
+	ep_idx = ipa_get_ep_mapping(client);
 	if (ep_idx == IPA_EP_NOT_ALLOCATED)
 		return NULL;
 
@@ -7642,6 +7643,7 @@ const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info
 	return &(ipa3_ep_mapping[hw_idx]
 		[client].ipa_gsi_ep_info);
 }
+EXPORT_SYMBOL(ipa_get_gsi_ep_info);
 
 /**
  * ipa_get_ep_group() - provide endpoint group by client
@@ -8560,13 +8562,13 @@ int ipa3_cfg_ep_ulso(u32 clnt_hdl, const struct ipa_ep_cfg_ulso *ep_ulso)
 }
 
 /**
- * ipa3_cfg_ep_ctrl() -  IPA end-point Control configuration
+ * ipa_cfg_ep_ctrl() -  IPA end-point Control configuration
  * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
  * @ipa_ep_cfg_ctrl:	[in] IPA end-point configuration params
  *
  * Returns:	0 on success, negative on failure
  */
-int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
+int ipa_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
 {
 	int code = 0, result;
 	struct ipa3_ep_context *ep;
@@ -8630,6 +8632,7 @@ int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
 
 	return 0;
 }
+EXPORT_SYMBOL(ipa_cfg_ep_ctrl);
 
 const char *ipa3_get_mode_type_str(enum ipa_mode_type mode)
 {
@@ -8675,7 +8678,7 @@ int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
 		return -EINVAL;
 	}
 
-	ep = ipa3_get_ep_mapping(ep_mode->dst);
+	ep = ipa_get_ep_mapping(ep_mode->dst);
 	if (ep == -1 && ep_mode->mode == IPA_DMA) {
 		IPAERR("dst %d does not exist in DMA mode\n", ep_mode->dst);
 		return -EINVAL;
@@ -8684,7 +8687,7 @@ int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
 	WARN_ON(ep_mode->mode == IPA_DMA && IPA_CLIENT_IS_PROD(ep_mode->dst));
 
 	if (!IPA_CLIENT_IS_CONS(ep_mode->dst))
-		ep = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+		ep = ipa_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
 
 	IPADBG("pipe=%d mode=%d(%s), dst_client_number=%d\n",
 			clnt_hdl,
@@ -9210,7 +9213,7 @@ success:
 int ipa3_cfg_ep_holb_by_client(enum ipa_client_type client,
 				const struct ipa_ep_cfg_holb *ep_holb)
 {
-	return ipa3_cfg_ep_holb(ipa3_get_ep_mapping(client), ep_holb);
+	return ipa3_cfg_ep_holb(ipa_get_ep_mapping(client), ep_holb);
 }
 
 /**
@@ -9338,7 +9341,7 @@ int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
 		goto fail;
 	}
 
-	ipa_ep_idx = ipa3_get_ep_mapping(param_in->client);
+	ipa_ep_idx = ipa_get_ep_mapping(param_in->client);
 	if (ipa_ep_idx == -1) {
 		IPAERR_RL("Invalid client.\n");
 		goto fail;
@@ -9404,13 +9407,13 @@ void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size)
 }
 
 /**
- * ipa3_set_aggr_mode() - Set the aggregation mode which is a global setting
+ * ipa_set_aggr_mode() - Set the aggregation mode which is a global setting
  * @mode:	[in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
  * etc
  *
  * Returns:	0 on success
  */
-int ipa3_set_aggr_mode(enum ipa_aggr_mode mode)
+int ipa_set_aggr_mode(enum ipa_aggr_mode mode)
 {
 	struct ipahal_reg_qcncm qcncm;
 
@@ -9429,9 +9432,10 @@ int ipa3_set_aggr_mode(enum ipa_aggr_mode mode)
 
 	return 0;
 }
+EXPORT_SYMBOL(ipa_set_aggr_mode);
 
 /**
- * ipa3_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
+ * ipa_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
  * mode
  * @sig:	[in] the first 3 bytes of QCNCM NDP signature (expected to be
  * "QND")
@@ -9441,7 +9445,7 @@ int ipa3_set_aggr_mode(enum ipa_aggr_mode mode)
  *
  * Returns:	0 on success, negative on failure
  */
-int ipa3_set_qcncm_ndp_sig(char sig[3])
+int ipa_set_qcncm_ndp_sig(char sig[3])
 {
 	struct ipahal_reg_qcncm qcncm;
 
@@ -9462,15 +9466,16 @@ int ipa3_set_qcncm_ndp_sig(char sig[3])
 
 	return 0;
 }
+EXPORT_SYMBOL(ipa_set_qcncm_ndp_sig);
 
 /**
- * ipa3_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
+ * ipa_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
  * configuration
  * @enable:	[in] true for single NDP/MBIM; false otherwise
  *
  * Returns:	0 on success
  */
-int ipa3_set_single_ndp_per_mbim(bool enable)
+int ipa_set_single_ndp_per_mbim(bool enable)
 {
 	struct ipahal_reg_single_ndp_mode mode;
 
@@ -9487,6 +9492,7 @@ int ipa3_set_single_ndp_per_mbim(bool enable)
 
 	return 0;
 }
+EXPORT_SYMBOL(ipa_set_single_ndp_per_mbim);
 
 /**
  * ipa3_straddle_boundary() - Checks whether a memory buffer straddles a
@@ -10312,7 +10318,7 @@ int ipa3_tag_process(struct ipa3_desc desc[],
 	 * by immediate command. So, REQUIRED_TAG_PROCESS_DESCRIPTORS
 	 * should be incremented by 1 to overcome buffer overflow.
 	 */
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1)
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1)
 		req_num_tag_desc += 1;
 
 	/* Not enough room for the required descriptors for the tag process */
@@ -10323,7 +10329,7 @@ int ipa3_tag_process(struct ipa3_desc desc[],
 		return -ENOMEM;
 	}
 
-	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
+	ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
 	if (-1 == ep_idx) {
 		IPAERR("Client %u is not mapped\n",
 			IPA_CLIENT_APPS_CMD_PROD);
@@ -10349,8 +10355,8 @@ int ipa3_tag_process(struct ipa3_desc desc[],
 	}
 
 	/* IC to close the coal frame before HPS Clear if coal is enabled */
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
-		ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1) {
+		ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
 		reg_write_coal_close.skip_pipeline_clear = false;
 		if (ipa3_ctx->ulso_wa) {
 			reg_write_coal_close.pipeline_clear_options = IPAHAL_SRC_GRP_CLEAR;
@@ -10427,7 +10433,7 @@ int ipa3_tag_process(struct ipa3_desc desc[],
 
 	/* IP_PACKET_INIT IC for tag status to be sent to apps */
 	pktinit_cmd.destination_pipe_index =
-		ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+		ipa_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
 	cmd_pyld = ipahal_construct_imm_cmd(
 		IPA_IMM_CMD_IP_PACKET_INIT, &pktinit_cmd, false);
 	if (!cmd_pyld) {
@@ -10599,7 +10605,7 @@ static int ipa3_tag_generate_force_close_desc(struct ipa3_desc desc[],
 			continue;
 		/* Skip Coalescing pipe when ulso wa is enabled. */
 		if (ipa3_ctx->ulso_wa &&
-			(i == ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS)))
+			(i == ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS)))
 			continue;
 		IPADBG("Force close ep: %d\n", i);
 		if (desc_idx + 1 > desc_size) {
@@ -10705,12 +10711,12 @@ fail_free_desc:
 }
 
 /**
- * ipa3_is_ready() - check if IPA module was initialized
+ * ipa_is_ready() - check if IPA module was initialized
  * successfully
  *
  * Return value: true for yes; false for no
  */
-bool ipa3_is_ready(void)
+bool ipa_is_ready(void)
 {
 	bool complete;
 
@@ -10721,6 +10727,7 @@ bool ipa3_is_ready(void)
 	mutex_unlock(&ipa3_ctx->lock);
 	return complete;
 }
+EXPORT_SYMBOL(ipa_is_ready);
 
 /**
  * ipa3_is_client_handle_valid() - check if IPA client handle is valid handle
@@ -10895,7 +10902,7 @@ void ipa3_set_tag_process_before_gating(bool val)
 EXPORT_SYMBOL(ipa3_set_tag_process_before_gating);
 
 /**
- * ipa3_is_vlan_mode - check if a LAN driver should load in VLAN mode
+ * ipa_is_vlan_mode - check if a LAN driver should load in VLAN mode
  * @iface - type of vlan capable device
  * @res - query result: true for vlan mode, false for non vlan mode
  *
@@ -10903,7 +10910,7 @@ EXPORT_SYMBOL(ipa3_set_tag_process_before_gating);
  *
  * Returns: 0 on success, negative on failure
  */
-int ipa3_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res)
+int ipa_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res)
 {
 	if (!res) {
 		IPAERR("NULL out param\n");
@@ -10915,7 +10922,7 @@ int ipa3_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res)
 		return -EINVAL;
 	}
 
-	if (!ipa3_is_ready()) {
+	if (!ipa_is_ready()) {
 		IPAERR("IPA is not ready yet\n");
 		return -ENODEV;
 	}
@@ -10925,6 +10932,7 @@ int ipa3_is_vlan_mode(enum ipa_vlan_ifaces iface, bool *res)
 	IPADBG("Driver %d vlan mode is %d\n", iface, *res);
 	return 0;
 }
+EXPORT_SYMBOL(ipa_is_vlan_mode);
 
 /**
  * ipa_is_modem_pipe()- Checks if pipe is owned by the modem
@@ -10945,7 +10953,7 @@ bool ipa_is_modem_pipe(int pipe_idx)
 		if (!IPA_CLIENT_IS_Q6_CONS(client_idx) &&
 			!IPA_CLIENT_IS_Q6_PROD(client_idx))
 			continue;
-		if (ipa3_get_ep_mapping(client_idx) == pipe_idx)
+		if (ipa_get_ep_mapping(client_idx) == pipe_idx)
 			return true;
 	}
 
@@ -11720,7 +11728,7 @@ static bool ipa3_gsi_channel_is_quite(struct ipa3_ep_context *ep)
 	return empty;
 }
 
-static int __ipa3_stop_gsi_channel(u32 clnt_hdl)
+static int __ipa_stop_gsi_channel(u32 clnt_hdl)
 {
 	struct ipa_mem_buffer mem;
 	int res = 0;
@@ -11836,7 +11844,7 @@ static int __ipa3_stop_gsi_channel(u32 clnt_hdl)
 }
 
 /**
- * ipa3_stop_gsi_channel()- Stops a GSI channel in IPA
+ * ipa_stop_gsi_channel()- Stops a GSI channel in IPA
  * @chan_hdl: GSI channel handle
  *
  * This function implements the sequence to stop a GSI channel
@@ -11844,17 +11852,17 @@ static int __ipa3_stop_gsi_channel(u32 clnt_hdl)
  *
  * Return value: 0 on success, negative otherwise
  */
-int ipa3_stop_gsi_channel(u32 clnt_hdl)
+int ipa_stop_gsi_channel(u32 clnt_hdl)
 {
 	int res;
 
 	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
-	res = __ipa3_stop_gsi_channel(clnt_hdl);
+	res = __ipa_stop_gsi_channel(clnt_hdl);
 	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
 
 	return res;
 }
-EXPORT_SYMBOL(ipa3_stop_gsi_channel);
+EXPORT_SYMBOL(ipa_stop_gsi_channel);
 
 static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
 {
@@ -11863,7 +11871,7 @@ static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
 	struct ipa3_ep_context *ep;
 	int res;
 
-	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	ipa_ep_idx = ipa_get_ep_mapping(client);
 	if (ipa_ep_idx < 0) {
 		IPADBG("client %d not configured\n", client);
 		return 0;
@@ -11880,7 +11888,7 @@ static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
 			client == IPA_CLIENT_APPS_LAN_CONS) {
 			memset(&cfg, 0, sizeof(cfg));
 			cfg.ipa_ep_suspend = suspend;
-			ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+			ipa_cfg_ep_ctrl(ipa_ep_idx, &cfg);
 			if (suspend)
 				ipa3_gsi_poll_after_suspend(ep);
 			else if (!atomic_read(&ep->sys->curr_polling_state))
@@ -11898,7 +11906,7 @@ static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
 	 */
 
 	if (suspend) {
-		res = __ipa3_stop_gsi_channel(ipa_ep_idx);
+		res = __ipa_stop_gsi_channel(ipa_ep_idx);
 		if (res) {
 			IPAERR("failed to stop LAN channel\n");
 			ipa_assert();
@@ -12040,7 +12048,7 @@ int ipa3_suspend_apps_pipes(bool suspend)
 		goto undo_odl_cons;
 	}
 
-	odl_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_ODL_DPL_CONS);
+	odl_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_ODL_DPL_CONS);
 	if (odl_ep_idx != IPA_EP_NOT_ALLOCATED && ipa3_ctx->ep[odl_ep_idx].valid) {
 		memset(&holb_cfg, 0, sizeof(holb_cfg));
 		if (suspend)
@@ -12074,7 +12082,7 @@ int ipa3_suspend_apps_pipes(bool suspend)
 		struct ipahal_reg_tx_wrapper tx;
 		int ep_idx;
 
-		ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+		ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
 		if (ep_idx == IPA_EP_NOT_ALLOCATED ||
 				(!ipa3_ctx->ep[ep_idx].valid))
 			goto do_prod;
@@ -12677,7 +12685,7 @@ static void ipa_gsi_setup_reg(void)
 
 	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
 		type = ipa3_get_client_by_pipe(i);
-		gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type);
+		gsi_ep_info_cfg = ipa_get_gsi_ep_info(type);
 		IPAERR("for ep %d client is %d gsi_ep_info_cfg=%pK\n",
 			i, type, gsi_ep_info_cfg);
 		if (!gsi_ep_info_cfg)
@@ -12691,7 +12699,7 @@ static void ipa_gsi_setup_reg(void)
 	/* setup IPA_ENDP_GSI_CFG_AOS_n reg */
 	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
 		type = ipa3_get_client_by_pipe(i);
-		gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type);
+		gsi_ep_info_cfg = ipa_get_gsi_ep_info(type);
 		if (!gsi_ep_info_cfg)
 			continue;
 		reg_val = ((gsi_ep_info_cfg->ipa_if_aos << 16) & 0x00FF0000);
@@ -12703,7 +12711,7 @@ static void ipa_gsi_setup_reg(void)
 	/* setup GSI_MAP_EE_n_CH_k_VP_TABLE reg */
 	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
 		type = ipa3_get_client_by_pipe(i);
-		gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type);
+		gsi_ep_info_cfg = ipa_get_gsi_ep_info(type);
 		if (!gsi_ep_info_cfg)
 			continue;
 		reg_val = i & 0xFF;
@@ -12716,7 +12724,7 @@ static void ipa_gsi_setup_reg(void)
 	/* setup IPA_ENDP_GSI_CFG1_n reg */
 	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
 		type = ipa3_get_client_by_pipe(i);
-		gsi_ep_info_cfg = ipa3_get_gsi_ep_info(type);
+		gsi_ep_info_cfg = ipa_get_gsi_ep_info(type);
 		if (!gsi_ep_info_cfg)
 			continue;
 		reg_val = (1 << 31) + (1 << 16);
@@ -13230,7 +13238,7 @@ void __ipa_ntn3_prod_stats_get(struct ipa_ntn3_stats_rx *stats, enum ipa_client_
 	int ch_id, ipa_ep_idx;
 
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
-	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	ipa_ep_idx = ipa_get_ep_mapping(client);
 	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED)
 		return;
 	ch_id = ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl;
@@ -13255,7 +13263,7 @@ void __ipa_ntn3_cons_stats_get(struct ipa_ntn3_stats_tx *stats, enum ipa_client_
 	int ch_id, ipa_ep_idx;
 
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
-	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	ipa_ep_idx = ipa_get_ep_mapping(client);
 	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED)
 		return;
 	ch_id = ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl;
@@ -13298,7 +13306,7 @@ void ipa3_eth_get_status(u32 client, int scratch_id,
 	int ipa_ep_idx;
 
 	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
-	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	ipa_ep_idx = ipa_get_ep_mapping(client);
 	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED)
 		return;
 	ch_id = ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl;
@@ -13420,7 +13428,7 @@ int ipa_hdrs_hpc_destroy(u32 hdr_hdl)
 
 	result = ipa3_del_hdr_hpc(del_wrapper);
 	if (result || hdr_del->status)
-		IPAERR("ipa3_del_hdr failed\n");
+		IPAERR("ipa_del_hdr failed\n");
 	kfree(del_wrapper);
 
     return result;
@@ -13608,10 +13616,10 @@ int ipa3_send_eogre_info(
 	/*
 	 * Post event to ipacm
 	 */
-	res = ipa3_send_msg(&msg_meta, eogre_info, ipa3_eogre_info_free_cb);
+	res = ipa_send_msg(&msg_meta, eogre_info, ipa3_eogre_info_free_cb);
 
 	if (res) {
-		IPAERR_RL("ipa3_send_msg failed: %d\n", res);
+		IPAERR_RL("ipa_send_msg failed: %d\n", res);
 		kfree(eogre_info);
 		goto done;
 	}
@@ -13626,9 +13634,9 @@ int ipa_send_mhi_endp_ind_to_modem(void)
 	struct ipa_endp_desc_indication_msg_v01 req;
 	struct ipa_ep_id_type_v01 *ep_info;
 	int ipa_mhi_prod_ep_idx =
-		ipa3_get_ep_mapping(IPA_CLIENT_MHI_LOW_LAT_PROD);
+		ipa_get_ep_mapping(IPA_CLIENT_MHI_LOW_LAT_PROD);
 	int ipa_mhi_cons_ep_idx =
-		ipa3_get_ep_mapping(IPA_CLIENT_MHI_LOW_LAT_CONS);
+		ipa_get_ep_mapping(IPA_CLIENT_MHI_LOW_LAT_CONS);
 
 	mutex_lock(&ipa3_ctx->lock);
 	/* only modem up and MHI ctrl pipes are ready, then send QMI*/
@@ -13751,13 +13759,13 @@ static void ipa3_socksv5_msg_free_cb(void *buff, u32 len, u32 type)
 }
 
 /**
- * ipa3_add_socksv5_conn() - IPA add socksv5_conn
+ * ipa_add_socksv5_conn() - IPA add socksv5_conn
  *
  * Returns:	0 on success, negative on failure
  *
  * Note:	Should not be called from atomic context
  */
-int ipa3_add_socksv5_conn(struct ipa_socksv5_info *info)
+int ipa_add_socksv5_conn(struct ipa_socksv5_info *info)
 {
 	int res = 0;
 	void *rp_va, *wp_va;
@@ -13852,9 +13860,9 @@ int ipa3_add_socksv5_conn(struct ipa_socksv5_info *info)
 	msg_meta.msg_type = IPA_SOCKV5_ADD;
 	msg_meta.msg_len = sizeof(struct ipa_socksv5_msg);
 	/* post event to ipacm*/
-	res = ipa3_send_msg(&msg_meta, socksv5_msg, ipa3_socksv5_msg_free_cb);
+	res = ipa_send_msg(&msg_meta, socksv5_msg, ipa3_socksv5_msg_free_cb);
 	if (res) {
-		IPAERR_RL("ipa3_send_msg failed: %d\n", res);
+		IPAERR_RL("ipa_send_msg failed: %d\n", res);
 		kfree(socksv5_msg);
 		goto error;
 	}
@@ -13911,13 +13919,13 @@ void ipa3_default_evict_register( void )
 }
 
 /**
- * ipa3_del_socksv5_conn() - IPA add socksv5_conn
+ * ipa_del_socksv5_conn() - IPA add socksv5_conn
  *
  * Returns:	0 on success, negative on failure
  *
  * Note:	Should not be called from atomic context
  */
-int ipa3_del_socksv5_conn(uint32_t handle)
+int ipa_del_socksv5_conn(uint32_t handle)
 {
 	int res = 0;
 	void *rp_va;
@@ -13988,10 +13996,10 @@ int ipa3_del_socksv5_conn(uint32_t handle)
 	memcpy(socksv5_handle, &handle, sizeof(handle));
 	msg_meta.msg_type = IPA_SOCKV5_DEL;
 	msg_meta.msg_len = sizeof(uint32_t);
-	res = ipa3_send_msg(&msg_meta, socksv5_handle,
+	res = ipa_send_msg(&msg_meta, socksv5_handle,
 		ipa3_socksv5_msg_free_cb);
 	if (res) {
-		IPAERR_RL("ipa3_send_msg failed: %d\n", res);
+		IPAERR_RL("ipa_send_msg failed: %d\n", res);
 		kfree(socksv5_handle);
 	}
 

+ 5 - 5
drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c

@@ -4,7 +4,7 @@
  */
 
 #include "ipa_i.h"
-#include <linux/ipa_wdi3.h>
+#include "ipa_wdi3.h"
 
 #define UPDATE_RP_MODERATION_CONFIG 1
 #define UPDATE_RP_MODERATION_THRESHOLD 8
@@ -184,7 +184,7 @@ static int ipa3_setup_wdi3_gsi_channel(u8 is_smmu_enabled,
 	else
 		gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
 
-	gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
+	gsi_ep_info = ipa_get_gsi_ep_info(ep->client);
 	if (!gsi_ep_info) {
 		IPAERR("Failed getting GSI EP info for client=%d\n",
 		       ep->client);
@@ -1261,7 +1261,7 @@ int ipa3_disable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx,
 	}
 
 	/* stop gsi rx channel */
-	result = ipa3_stop_gsi_channel(ipa_ep_idx_rx);
+	result = ipa_stop_gsi_channel(ipa_ep_idx_rx);
 	if (result) {
 		IPAERR("failed to stop gsi rx channel\n");
 		result = -EFAULT;
@@ -1269,7 +1269,7 @@ int ipa3_disable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx,
 	}
 
 	/* stop gsi tx channel */
-	result = ipa3_stop_gsi_channel(ipa_ep_idx_tx);
+	result = ipa_stop_gsi_channel(ipa_ep_idx_tx);
 	if (result) {
 		IPAERR("failed to stop gsi tx channel\n");
 		result = -EFAULT;
@@ -1277,7 +1277,7 @@ int ipa3_disable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx,
 	}
 	/* stop gsi tx1 channel */
 	if (ipa_ep_idx_tx1 >= 0) {
-		result = ipa3_stop_gsi_channel(ipa_ep_idx_tx1);
+		result = ipa_stop_gsi_channel(ipa_ep_idx_tx1);
 		if (result) {
 			IPAERR("failed to stop gsi tx1 channel\n");
 			result = -EFAULT;

+ 8 - 8
drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c

@@ -7,7 +7,7 @@
 #include <linux/if_ether.h>
 #include <linux/log2.h>
 #include <linux/debugfs.h>
-#include <linux/ipa_wigig.h>
+#include "ipa_wigig.h"
 
 #define IPA_WIGIG_DESC_RING_EL_SIZE	32
 #define IPA_WIGIG_STATUS_RING_EL_SIZE	16
@@ -977,7 +977,7 @@ int ipa3_conn_wigig_rx_pipe_i(void *in, struct ipa_wigig_conn_out_params *out,
 		return -EFAULT;
 	}
 
-	ep_gsi = ipa3_get_gsi_ep_info(rx_client);
+	ep_gsi = ipa_get_gsi_ep_info(rx_client);
 	if (!ep_gsi) {
 		IPAERR("Failed getting GSI EP info for client=%d\n",
 			rx_client);
@@ -1271,7 +1271,7 @@ int ipa3_conn_wigig_client_i(void *in,
 		return -EFAULT;
 	}
 
-	ep_gsi = ipa3_get_gsi_ep_info(tx_client);
+	ep_gsi = ipa_get_gsi_ep_info(tx_client);
 	if (!ep_gsi) {
 		IPAERR("Failed getting GSI EP info for client=%d\n",
 			tx_client);
@@ -1405,7 +1405,7 @@ int ipa3_disconn_wigig_pipe_i(enum ipa_client_type client,
 		return -EFAULT;
 	}
 
-	ep_gsi = ipa3_get_gsi_ep_info(client);
+	ep_gsi = ipa_get_gsi_ep_info(client);
 	if (!ep_gsi) {
 		IPAERR("Failed getting GSI EP info for client=%d\n",
 			client);
@@ -1687,7 +1687,7 @@ int ipa3_enable_wigig_pipe_i(enum ipa_client_type client)
 		goto fail_enable_datapath;
 
 	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
-	ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
+	ipa_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
 
 	/* ring the event db (outside the ring boundary)*/
 	val = ep->gsi_mem_info.evt_ring_base_addr +
@@ -1747,7 +1747,7 @@ int ipa3_enable_wigig_pipe_i(enum ipa_client_type client)
 	return 0;
 
 fail_ring_ch:
-	res = ipa3_stop_gsi_channel(ipa_ep_idx);
+	res = ipa_stop_gsi_channel(ipa_ep_idx);
 	if (res != 0 && res != -GSI_STATUS_AGAIN &&
 		res != -GSI_STATUS_TIMED_OUT) {
 		IPAERR("failed to stop channel res = %d\n", res);
@@ -1837,7 +1837,7 @@ int ipa3_disable_wigig_pipe_i(enum ipa_client_type client)
 		}
 	}
 retry_gsi_stop:
-	res = ipa3_stop_gsi_channel(ipa_ep_idx);
+	res = ipa_stop_gsi_channel(ipa_ep_idx);
 	if (res != 0 && res != -GSI_STATUS_AGAIN &&
 		res != -GSI_STATUS_TIMED_OUT) {
 		IPAERR("failed to stop channel res = %d\n", res);
@@ -1872,7 +1872,7 @@ retry_gsi_stop:
 	if (IPA_CLIENT_IS_PROD(ep->client)) {
 		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
 		ep_cfg_ctrl.ipa_ep_delay = true;
-		ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
+		ipa_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
 	}
 
 	ep->gsi_offload_state &= ~IPA_WIGIG_ENABLED;

+ 1 - 1
drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c

@@ -3,7 +3,7 @@
  * Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/ipa.h>
+#include "ipa.h"
 #include <linux/errno.h>
 #include <linux/ipc_logging.h>
 #include <linux/debugfs.h>

+ 1 - 1
drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_hw_stats.h

@@ -6,7 +6,7 @@
 #ifndef _IPAHAL_HW_STATS_H_
 #define _IPAHAL_HW_STATS_H_
 
-#include <linux/ipa.h>
+#include "ipa.h"
 
 #define IPAHAL_MAX_PIPES 32
 #define IPAHAL_MAX_PIPES_PER_REG 32

+ 1 - 1
drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h

@@ -6,7 +6,7 @@
 #ifndef _IPAHAL_I_H_
 #define _IPAHAL_I_H_
 
-#include <linux/ipa.h>
+#include "ipa.h"
 #include "ipa_common_i.h"
 
 #define IPAHAL_DRV_NAME "ipahal"

+ 1 - 1
drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c

@@ -5,7 +5,7 @@
  */
 
 #include <linux/init.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include <linux/kernel.h>
 #include <linux/msm_ipa.h>
 #include "ipahal_i.h"

+ 1 - 1
drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h

@@ -7,7 +7,7 @@
 #ifndef _IPAHAL_REG_H_
 #define _IPAHAL_REG_H_
 
-#include <linux/ipa.h>
+#include "ipa.h"
 
 /*
  * Registers names

+ 13 - 10
drivers/platform/msm/ipa/ipa_v3/rmnet_ctl_ipa.c

@@ -6,7 +6,7 @@
 #include <linux/string.h>
 #include <linux/skbuff.h>
 #include <linux/workqueue.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include <uapi/linux/msm_rmnet.h>
 #include "ipa_i.h"
 
@@ -85,8 +85,8 @@ int ipa3_rmnet_ctl_init(void)
 		return -EINVAL;
 	}
 
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_PROD) == -1 ||
-		ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_CONS) == -1)
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_PROD) == -1 ||
+		ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_CONS) == -1)
 	{
 		IPAERR("invalid low lat endpoints\n");
 		return -EINVAL;
@@ -118,7 +118,7 @@ int ipa3_rmnet_ctl_init(void)
 	return 0;
 }
 
-int ipa3_register_rmnet_ctl_cb(
+int ipa_register_rmnet_ctl_cb(
 	void (*ipa_rmnet_ctl_ready_cb)(void *user_data1),
 	void *user_data1,
 	void (*ipa_rmnet_ctl_stop_cb)(void *user_data2),
@@ -167,8 +167,9 @@ int ipa3_register_rmnet_ctl_cb(
 	IPADBG("rmnet_ctl registered successfually\n");
 	return 0;
 }
+EXPORT_SYMBOL(ipa_register_rmnet_ctl_cb);
 
-int ipa3_unregister_rmnet_ctl_cb(void)
+int ipa_unregister_rmnet_ctl_cb(void)
 {
 	/* check ipa3_ctx existed or not */
 	if (!ipa3_ctx) {
@@ -210,6 +211,7 @@ int ipa3_unregister_rmnet_ctl_cb(void)
 	IPADBG("rmnet_ctl unregistered successfually\n");
 	return 0;
 }
+EXPORT_SYMBOL(ipa_unregister_rmnet_ctl_cb);
 
 int ipa3_setup_apps_low_lat_cons_pipe(bool rmnet_config,
 	struct rmnet_ingress_param *ingress_param)
@@ -433,7 +435,7 @@ int ipa3_teardown_apps_low_lat_pipes(void)
 			rmnet_ctl_ipa3_ctx->state = IPA_RMNET_CTL_REGD;
 	}
 	if (rmnet_ctl_ipa3_ctx->pipe_state & IPA_RMNET_CTL_PIPE_RX_READY) {
-		ret = ipa3_teardown_sys_pipe(
+		ret = ipa_teardown_sys_pipe(
 			rmnet_ctl_ipa3_ctx->ipa3_to_apps_low_lat_hdl);
 		if (ret < 0) {
 			IPAERR("Failed to teardown APPS->IPA low lat pipe\n");
@@ -444,7 +446,7 @@ int ipa3_teardown_apps_low_lat_pipes(void)
 	}
 
 	if (rmnet_ctl_ipa3_ctx->pipe_state & IPA_RMNET_CTL_PIPE_TX_READY) {
-		ret = ipa3_teardown_sys_pipe(
+		ret = ipa_teardown_sys_pipe(
 			rmnet_ctl_ipa3_ctx->apps_to_ipa3_low_lat_hdl);
 		if (ret < 0) {
 			return ret;
@@ -456,7 +458,7 @@ int ipa3_teardown_apps_low_lat_pipes(void)
 	return ret;
 }
 
-int ipa3_rmnet_ctl_xmit(struct sk_buff *skb)
+int ipa_rmnet_ctl_xmit(struct sk_buff *skb)
 {
 	int ret;
 	int len;
@@ -533,7 +535,7 @@ int ipa3_rmnet_ctl_xmit(struct sk_buff *skb)
 	 * both data packets and command will be routed to
 	 * IPA_CLIENT_Q6_WAN_CONS based on DMA settings
 	 */
-	ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_LOW_LAT_PROD, skb, NULL);
+	ret = ipa_tx_dp(IPA_CLIENT_APPS_WAN_LOW_LAT_PROD, skb, NULL);
 	if (ret) {
 		if (ret == -EPIPE) {
 			IPAERR("Low lat fatal: pipe is not valid\n");
@@ -567,6 +569,7 @@ out:
 	spin_unlock_irqrestore(&rmnet_ctl_ipa3_ctx->tx_lock, flags);
 	return ret;
 }
+EXPORT_SYMBOL(ipa_rmnet_ctl_xmit);
 
 static void rmnet_ctl_wakeup_ipa(struct work_struct *work)
 {
@@ -598,7 +601,7 @@ static void rmnet_ctl_wakeup_ipa(struct work_struct *work)
 		 * both data packets and command will be routed to
 		 * IPA_CLIENT_Q6_WAN_CONS based on DMA settings
 		 */
-		ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_LOW_LAT_PROD, skb, NULL);
+		ret = ipa_tx_dp(IPA_CLIENT_APPS_WAN_LOW_LAT_PROD, skb, NULL);
 		if (ret) {
 			if (ret == -EPIPE) {
 				/* try to drain skb from queue if pipe teardown */

+ 18 - 18
drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c

@@ -29,7 +29,7 @@
 #include <linux/remoteproc/qcom_rproc.h>
 #include "ipa_qmi_service.h"
 #include <linux/rmnet_ipa_fd_ioctl.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include <uapi/linux/ip.h>
 #include <uapi/linux/msm_rmnet.h>
 #include <net/ipv6.h>
@@ -267,7 +267,7 @@ static int ipa3_setup_a7_qmap_hdr(void)
 	} else
 		hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
 
-	if (ipa3_add_hdr(hdr)) {
+	if (ipa_add_hdr(hdr)) {
 		IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
 		ret = -EPERM;
 		goto bail;
@@ -306,9 +306,9 @@ static void ipa3_del_a7_qmap_hdr(void)
 	hdl_entry = &del_hdr->hdl[0];
 	hdl_entry->hdl = rmnet_ipa3_ctx->qmap_hdr_hdl;
 
-	ret = ipa3_del_hdr(del_hdr);
+	ret = ipa_del_hdr(del_hdr);
 	if (ret || hdl_entry->status)
-		IPAWANERR("ipa3_del_hdr failed\n");
+		IPAWANERR("ipa_del_hdr failed\n");
 	else
 		IPAWANDBG("hdrs deletion done\n");
 
@@ -341,9 +341,9 @@ static void ipa3_del_qmap_hdr(uint32_t hdr_hdl)
 	hdl_entry = &del_hdr->hdl[0];
 	hdl_entry->hdl = hdr_hdl;
 
-	ret = ipa3_del_hdr(del_hdr);
+	ret = ipa_del_hdr(del_hdr);
 	if (ret || hdl_entry->status)
-		IPAWANERR("ipa3_del_hdr failed\n");
+		IPAWANERR("ipa_del_hdr failed\n");
 	else
 		IPAWANDBG("header deletion done\n");
 
@@ -418,7 +418,7 @@ static int ipa3_add_qmap_hdr(uint32_t mux_id, uint32_t *hdr_hdl)
 	IPAWANDBG("header (%s) with mux-id: (%d)\n",
 		hdr_name,
 		hdr_entry->hdr[1]);
-	if (ipa3_add_hdr(hdr)) {
+	if (ipa_add_hdr(hdr)) {
 		IPAWANERR("fail to add IPA_QMAP hdr\n");
 		ret = -EPERM;
 		goto bail;
@@ -584,7 +584,7 @@ static int ipa3_setup_low_lat_rt_rules(void)
 		IPA_FLT_META_DATA;
 	/* Low lat routing is based on metadata */
 	rt_rule_entry[WAN_RT_COMMON].rule.attrib.meta_data =
-		ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS);
+		ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS);
 	rt_rule_entry[WAN_RT_COMMON].rule.attrib.meta_data_mask =
 		0xFF;
 
@@ -596,7 +596,7 @@ static int ipa3_setup_low_lat_rt_rules(void)
 	rt_rule_entry[WAN_RT_ICMP].rule.attrib.attrib_mask =
 		IPA_FLT_META_DATA;
 	rt_rule_entry[WAN_RT_ICMP].rule.attrib.meta_data =
-		ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS);
+		ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS);
 	rt_rule_entry[WAN_RT_ICMP].rule.attrib.meta_data_mask =
 		0xFF;
 	rt_rule_entry[WAN_RT_ICMP].rule.attrib.attrib_mask |=
@@ -1300,7 +1300,7 @@ static void ipa3_cleanup_deregister_intf(void)
 		v_name = rmnet_ipa3_ctx->mux_channel[i].vchannel_name;
 
 		if (rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg) {
-			ret = ipa3_deregister_intf(v_name);
+			ret = ipa_deregister_intf(v_name);
 			if (ret < 0) {
 				IPAWANERR("de-register device %s(%d) failed\n",
 					v_name,
@@ -1365,9 +1365,9 @@ static int __ipa_wwan_close(struct net_device *dev)
 		 * remote side to hang if tried to open again
 		 */
 		reinit_completion(&wwan_ptr->resource_granted_completion);
-		rc = ipa3_deregister_intf(dev->name);
+		rc = ipa_deregister_intf(dev->name);
 		if (rc) {
-			IPAWANERR("[%s]: ipa3_deregister_intf failed %d\n",
+			IPAWANERR("[%s]: ipa_deregister_intf failed %d\n",
 			       dev->name, rc);
 			return rc;
 		}
@@ -1523,7 +1523,7 @@ send:
 	 * both data packets and command will be routed to
 	 * IPA_CLIENT_Q6_WAN_CONS based on status configuration
 	 */
-	ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_PROD, skb, NULL);
+	ret = ipa_tx_dp(IPA_CLIENT_APPS_WAN_PROD, skb, NULL);
 	if (ret) {
 		atomic_dec(&wwan_ptr->outstanding_pkts);
 		if (ret == -EPIPE) {
@@ -1695,7 +1695,7 @@ static int ipa_send_wan_pipe_ind_to_modem(int ingress_eps_mask)
 		req.num_eps_valid = true;
 		req.num_eps++;
 		ep_info = &req.ep_info[req.ep_info_len - 1];
-		ep_info->ep_id = ipa3_get_ep_mapping(
+		ep_info->ep_id = ipa_get_ep_mapping(
 			IPA_CLIENT_APPS_WAN_LOW_LAT_CONS);
 		ep_info->ic_type = DATA_IC_TYPE_AP_V01;
 		ep_info->ep_type = DATA_EP_DESC_TYPE_EMB_FLOW_CTL_PROD_V01;
@@ -1703,7 +1703,7 @@ static int ipa_send_wan_pipe_ind_to_modem(int ingress_eps_mask)
 		req.ep_info_len++;
 		req.num_eps++;
 		ep_info = &req.ep_info[req.ep_info_len - 1];
-		ep_info->ep_id = ipa3_get_ep_mapping(
+		ep_info->ep_id = ipa_get_ep_mapping(
 			IPA_CLIENT_APPS_WAN_LOW_LAT_PROD);
 		ep_info->ic_type = DATA_IC_TYPE_AP_V01;
 		ep_info->ep_type = DATA_EP_DESC_TYPE_EMB_FLOW_CTL_CONS_V01;
@@ -3526,7 +3526,7 @@ static int ipa3_wwan_probe(struct platform_device *pdev)
 
 	pr_info("rmnet_ipa3 started initialization\n");
 
-	if (!ipa3_is_ready()) {
+	if (!ipa_is_ready()) {
 		IPAWANDBG("IPA driver not ready, registering callback\n");
 		ret = ipa_register_ipa_ready_cb(ipa3_ready_cb, (void *)pdev);
 
@@ -3728,12 +3728,12 @@ static int ipa3_wwan_remove(struct platform_device *pdev)
 		if (ret < 0)
 			IPAWANERR("Failed to teardown IPA->APPS LL pipe\n");
 	}
-	ret = ipa3_teardown_sys_pipe(rmnet_ipa3_ctx->ipa3_to_apps_hdl);
+	ret = ipa_teardown_sys_pipe(rmnet_ipa3_ctx->ipa3_to_apps_hdl);
 	if (ret < 0)
 		IPAWANERR("Failed to teardown IPA->APPS pipe\n");
 	else
 		rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
-	ret = ipa3_teardown_sys_pipe(rmnet_ipa3_ctx->apps_to_ipa3_hdl);
+	ret = ipa_teardown_sys_pipe(rmnet_ipa3_ctx->apps_to_ipa3_hdl);
 	if (ret < 0)
 		IPAWANERR("Failed to teardown APPS->IPA pipe\n");
 	else

+ 13 - 10
drivers/platform/msm/ipa/ipa_v3/rmnet_ll_ipa.c

@@ -7,7 +7,7 @@
 #include <linux/string.h>
 #include <linux/skbuff.h>
 #include <linux/workqueue.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include <uapi/linux/msm_rmnet.h>
 #include "ipa_i.h"
 
@@ -240,8 +240,8 @@ int ipa3_rmnet_ll_init(void)
 		return -EINVAL;
 	}
 
-	if (ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD) == -1 ||
-		ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS) == -1)
+	if (ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD) == -1 ||
+		ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS) == -1)
 	{
 		IPAERR("invalid low lat data endpoints\n");
 		return -EINVAL;
@@ -275,7 +275,7 @@ int ipa3_rmnet_ll_init(void)
 	return 0;
 }
 
-int ipa3_register_rmnet_ll_cb(
+int ipa_register_rmnet_ll_cb(
 	void (*ipa_rmnet_ll_ready_cb)(void *user_data1),
 	void *user_data1,
 	void (*ipa_rmnet_ll_stop_cb)(void *user_data2),
@@ -324,8 +324,9 @@ int ipa3_register_rmnet_ll_cb(
 	IPADBG("rmnet_ll registered successfually\n");
 	return 0;
 }
+EXPORT_SYMBOL(ipa_register_rmnet_ll_cb);
 
-int ipa3_unregister_rmnet_ll_cb(void)
+int ipa_unregister_rmnet_ll_cb(void)
 {
 	/* check ipa3_ctx existed or not */
 	if (!ipa3_ctx) {
@@ -367,6 +368,7 @@ int ipa3_unregister_rmnet_ll_cb(void)
 	IPADBG("rmnet_ll unregistered successfually\n");
 	return 0;
 }
+EXPORT_SYMBOL(ipa_unregister_rmnet_ll_cb);
 
 int ipa3_setup_apps_low_lat_data_cons_pipe(
 	struct rmnet_ingress_param *ingress_param,
@@ -605,7 +607,7 @@ int ipa3_teardown_apps_low_lat_data_pipes(void)
 			rmnet_ll_ipa3_ctx->state = IPA_RMNET_LL_REGD;
 	}
 	if (rmnet_ll_ipa3_ctx->pipe_state & IPA_RMNET_LL_PIPE_RX_READY) {
-		ret = ipa3_teardown_sys_pipe(
+		ret = ipa_teardown_sys_pipe(
 			rmnet_ll_ipa3_ctx->ipa3_to_apps_low_lat_data_hdl);
 		if (ret < 0) {
 			IPAERR("Failed to teardown APPS->IPA low lat data pipe\n");
@@ -616,7 +618,7 @@ int ipa3_teardown_apps_low_lat_data_pipes(void)
 	}
 
 	if (rmnet_ll_ipa3_ctx->pipe_state & IPA_RMNET_LL_PIPE_TX_READY) {
-		ret = ipa3_teardown_sys_pipe(
+		ret = ipa_teardown_sys_pipe(
 			rmnet_ll_ipa3_ctx->apps_to_ipa3_low_lat_data_hdl);
 		if (ret < 0) {
 			return ret;
@@ -628,7 +630,7 @@ int ipa3_teardown_apps_low_lat_data_pipes(void)
 	return ret;
 }
 
-int ipa3_rmnet_ll_xmit(struct sk_buff *skb)
+int ipa_rmnet_ll_xmit(struct sk_buff *skb)
 {
 	int ret;
 	int len, free_desc = 0;
@@ -718,7 +720,7 @@ int ipa3_rmnet_ll_xmit(struct sk_buff *skb)
 	 * both data packets and command will be routed to
 	 * IPA_CLIENT_Q6_WAN_CONS based on DMA settings
 	 */
-	ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD, skb, NULL);
+	ret = ipa_tx_dp(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD, skb, NULL);
 	if (ret) {
 		if (ret == -EPIPE) {
 			IPAERR("Low lat data fatal: pipe is not valid\n");
@@ -757,6 +759,7 @@ out:
 	spin_unlock_irqrestore(&rmnet_ll_ipa3_ctx->tx_lock, flags);
 	return (free_desc > 0) ? free_desc : 0;
 }
+EXPORT_SYMBOL(ipa_rmnet_ll_xmit);
 
 static void rmnet_ll_wakeup_ipa(struct work_struct *work)
 {
@@ -788,7 +791,7 @@ static void rmnet_ll_wakeup_ipa(struct work_struct *work)
 		 * both data packets and command will be routed to
 		 * IPA_CLIENT_Q6_WAN_CONS based on DMA settings
 		 */
-		ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD, skb, NULL);
+		ret = ipa_tx_dp(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_PROD, skb, NULL);
 		if (ret) {
 			if (ret == -EPIPE) {
 				/* try to drain skb from queue if pipe teardown */

+ 1 - 1
drivers/platform/msm/ipa/ipa_v3/teth_bridge.c

@@ -15,7 +15,7 @@
 #include <linux/mutex.h>
 #include <linux/skbuff.h>
 #include <linux/types.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include <linux/netdevice.h>
 #include "ipa_i.h"
 

+ 1 - 1
drivers/platform/msm/ipa/test/ipa_pm_ut.c

@@ -3,7 +3,7 @@
  * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/ipa.h>
+#include "ipa.h"
 #include "ipa_pm.h"
 #include "ipa_i.h"
 #include "ipa_ut_framework.h"

+ 1 - 1
drivers/platform/msm/ipa/test/ipa_test_dma.c

@@ -3,7 +3,7 @@
  * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
  */
 
-#include <linux/ipa.h>
+#include "ipa.h"
 #include "ipa_i.h"
 #include "ipa_ut_framework.h"
 

+ 6 - 6
drivers/platform/msm/ipa/test/ipa_test_hw_stats.c

@@ -745,7 +745,7 @@ static int ipa_test_hw_stats_query_drop_stats(void *priv)
                goto fail;
 
        for (i = 0; i <= IPA_CLIENT_MAX; i++) {
-               ep_idx = ipa3_get_ep_mapping(i);
+               ep_idx = ipa_get_ep_mapping(i);
                if (ep_idx == -1 || !IPA_CLIENT_IS_CONS(i) || IPA_CLIENT_IS_TEST(i))
                        continue;
 
@@ -798,7 +798,7 @@ static int ipa_test_hw_stats_query_teth_stats(void *priv)
        }
 
        for (i = 0; i < IPA_CLIENT_MAX; i++) {
-               int ep_idx = ipa3_get_ep_mapping(i);
+               int ep_idx = ipa_get_ep_mapping(i);
 
                if (ep_idx == -1)
                        continue;
@@ -821,7 +821,7 @@ static int ipa_test_hw_stats_query_teth_stats(void *priv)
                }
 
                for (j = 0; j < IPA_CLIENT_MAX; j++) {
-                       int cons_idx = ipa3_get_ep_mapping(j);
+                       int cons_idx = ipa_get_ep_mapping(j);
 
                        if (cons_idx == -1)
                                continue;
@@ -898,7 +898,7 @@ static int ipa_test_hw_stats_query_quota_stats(void *priv)
        }
 
        for (i = 0; i < IPA_CLIENT_MAX; i++) {
-               ep_idx = ipa3_get_ep_mapping(i);
+               ep_idx = ipa_get_ep_mapping(i);
 
                if (ep_idx == -1)
                        continue;
@@ -1022,9 +1022,9 @@ static int ipa_test_hw_stats_set_bw(void *priv)
 	info->threshold[1] = 400;
 	info->threshold[2] = 600;
 
-	ret = ipa3_uc_bw_monitor(info);
+	ret = ipa_uc_bw_monitor(info);
 	if (ret < 0) {
-		IPA_UT_ERR("ipa3_uc_bw_monitor fails\n");
+		IPA_UT_ERR("ipa_uc_bw_monitor fails\n");
 		ret = -ENOMEM;
 	}
 

+ 2 - 2
drivers/platform/msm/ipa/test/ipa_test_mhi.c

@@ -4,12 +4,12 @@
  */
 
 #include <linux/delay.h>
-#include <linux/ipa_mhi.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include "ipa_i.h"
 #include "gsi.h"
 #include "gsihal.h"
 #include "ipa_ut_framework.h"
+#include <linux/ipa_mhi.h>
 
 #define IPA_MHI_TEST_NUM_CHANNELS		8
 #define IPA_MHI_TEST_NUM_EVENT_RINGS		8

+ 4 - 4
drivers/platform/msm/ipa/test/ipa_test_ntn.c

@@ -4,8 +4,8 @@
  */
 
 #include "ipa_ut_framework.h"
-#include <linux/ipa_eth.h>
-#include <linux/ipa.h>
+#include "ipa_eth.h"
+#include "ipa.h"
 #include <linux/delay.h>
 #include "ipa_i.h"
 
@@ -1279,7 +1279,7 @@ static int ipa_ntn_send_packet_burst(void)
 	 * ring full stats for RX
 	 */
 	ep_cfg_ctrl.ipa_ep_delay = true;
-	ret = ipa3_cfg_ep_ctrl(
+	ret = ipa_cfg_ep_ctrl(
 		ipa_get_ep_mapping(test_ntn_ctx->prod_client_type),
 		&ep_cfg_ctrl);
 	if (ret) {
@@ -1314,7 +1314,7 @@ static int ipa_ntn_send_packet_burst(void)
 	IPA_UT_DBG("sleep before removing delay\n");
 	msleep(20);
 	ep_cfg_ctrl.ipa_ep_delay = false;
-	ret = ipa3_cfg_ep_ctrl(
+	ret = ipa_cfg_ep_ctrl(
 		ipa_get_ep_mapping(test_ntn_ctx->prod_client_type),
 		&ep_cfg_ctrl);
 	if (ret) {

+ 2 - 2
drivers/platform/msm/ipa/test/ipa_test_wdi3.c

@@ -4,8 +4,8 @@
  */
 
 #include "ipa_ut_framework.h"
-#include <linux/ipa_wdi3.h>
-#include <linux/ipa.h>
+#include "ipa_wdi3.h"
+#include "ipa.h"
 #include <linux/delay.h>
 #include "ipa_i.h"
 

+ 1 - 1
drivers/platform/msm/ipa/test/ipa_ut_framework.c

@@ -7,7 +7,7 @@
 #include <linux/module.h>
 #include <linux/debugfs.h>
 #include <linux/device.h>
-#include <linux/ipa.h>
+#include "ipa.h"
 #include "ipa_i.h"
 #include "ipa_ut_framework.h"
 #include "ipa_ut_suite_list.h"

+ 80 - 0
drivers/platform/msm/ipa_kernel_headers.py

@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+
+import argparse
+import filecmp
+import os
+import re
+import subprocess
+import sys
+
+def run_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h):
+    if not h.startswith(prefix):
+        print('error: expected prefix [%s] on header [%s]' % (prefix, h))
+        return False
+
+    out_h = os.path.join(gen_dir, h[len(prefix):])
+    (out_h_dirname, out_h_basename) = os.path.split(out_h)
+    env = os.environ.copy()
+    env["LOC_UNIFDEF"] = unifdef
+    cmd = ["sh", headers_install, h, out_h]
+
+    if verbose:
+        print('run_headers_install: cmd is %s' % cmd)
+
+    result = subprocess.call(cmd, env=env)
+
+    if result != 0:
+        print('error: run_headers_install: cmd %s failed %d' % (cmd, result))
+        return False
+    return True
+
+def gen_ipa_headers(verbose, gen_dir, headers_install, unifdef, ipa_include_uapi):
+    error_count = 0
+    for h in ipa_include_uapi:
+        ipa_uapi_include_prefix = os.path.join(h.split('/include/uapi/')[0],
+                                                 'include',
+                                                 'uapi') + os.sep
+
+        if not run_headers_install(
+                verbose, gen_dir, headers_install, unifdef,
+                ipa_uapi_include_prefix, h): error_count += 1
+    return error_count
+
+def main():
+    """Parse command line arguments and perform top level control."""
+    parser = argparse.ArgumentParser(
+            description=__doc__,
+            formatter_class=argparse.RawDescriptionHelpFormatter)
+
+    # Arguments that apply to every invocation of this script.
+    parser.add_argument(
+            '--verbose', action='store_true',
+            help='Print output that describes the workings of this script.')
+    parser.add_argument(
+            '--gen_dir', required=True,
+            help='Where to place the generated files.')
+    parser.add_argument(
+            '--ipa_include_uapi', required=True, nargs='*',
+            help='The list of header files.')
+    parser.add_argument(
+            '--headers_install', required=True,
+            help='The headers_install tool to process input headers.')
+    parser.add_argument(
+            '--unifdef',
+            required=True,
+            help='The unifdef tool used by headers_install.')
+
+    args = parser.parse_args()
+
+    if args.verbose:
+        print('gen_dir [%s]' % args.gen_dir)
+        print('ipa_include_uapi [%s]' % args.ipa_include_uapi)
+        print('headers_install [%s]' % args.headers_install)
+        print('unifdef [%s]' % args.unifdef)
+
+    return gen_ipa_headers(args.verbose, args.gen_dir,
+            args.headers_install, args.unifdef, args.ipa_include_uapi)
+
+if __name__ == '__main__':
+    sys.exit(main())

+ 1 - 1
kernel-tests/Android.bp

@@ -3,7 +3,7 @@ cc_binary {
 
     cflags: ["-Wno-missing-field-initializers"] + ["-Wno-int-to-pointer-cast"] + ["-Wno-int-conversion"],
 
-    header_libs: ["device_kernel_headers"]+["qti_kernel_headers"]+["qti_ipa_test_kernel_headers"],
+    header_libs: ["device_kernel_headers"]+["qti_kernel_headers"]+["qti_ipa_kernel_headers"]+["qti_ipa_test_kernel_headers"],
 
     srcs: [
         "DataPathTestFixture.cpp",