Prechádzať zdrojové kódy

msm: IPA: ULSO LAN USB - RNDIS driver change

Support for ULSO LAN USB use case in rndis datapath.

Change-Id: I2035e5fcc7c927cc3e5d7f5652fb017c304b5ad5
Acked-by: Eliad Ben Yishay <[email protected]>
Signed-off-by: Amir Levy <[email protected]>
Amir Levy 3 rokov pred
rodič
commit
b14a195498

+ 296 - 28
drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c

@@ -11,6 +11,7 @@
 #include <linux/in.h>
 #include <linux/stddef.h>
 #include <linux/ip.h>
+#include <linux/ipv6.h>
 #include <linux/fs.h>
 #include <linux/module.h>
 #include <linux/msm_ipa.h>
@@ -34,6 +35,8 @@
 #define NETDEV_NAME "rndis"
 #define IPV4_HDR_NAME "rndis_eth_ipv4"
 #define IPV6_HDR_NAME "rndis_eth_ipv6"
+#define RNDIS_HDR_NAME "rndis"
+#define ULSO_MAX_SIZE 64000
 #define IPA_TO_USB_CLIENT IPA_CLIENT_USB_CONS
 #define INACTIVITY_MSEC_DELAY 100
 #define DEFAULT_OUTSTANDING_HIGH 64
@@ -110,6 +113,13 @@ static void *ipa_rndis_logbuf;
 #define RNDIS_IPA_LOG_ENTRY() RNDIS_IPA_DEBUG("begin\n")
 #define RNDIS_IPA_LOG_EXIT()  RNDIS_IPA_DEBUG("end\n")
 
+#define IPV4_IS_TCP(iph) ((iph)->protocol == IPPROTO_TCP)
+#define IPV4_IS_UDP(iph) ((iph)->protocol == IPPROTO_UDP)
+#define IPV6_IS_TCP(iph) (((struct ipv6hdr *)iph)->nexthdr == IPPROTO_TCP)
+#define IPV6_IS_UDP(iph) (((struct ipv6hdr *)iph)->nexthdr == IPPROTO_UDP)
+#define IPV4_DELTA 40
+#define IPV6_DELTA 60
+
 /**
  * enum rndis_ipa_state - specify the current driver internal state
  *  which is guarded by a state machine.
@@ -195,6 +205,8 @@ enum rndis_ipa_operation {
  * @pm_hdl: handle for IPA PM framework
  * @is_vlan_mode: should driver work in vlan mode?
  * @netif_rx_function: holds the correct network stack API, needed for NAPI
+ * @is_ulso_mode: indicator for ulso support
+ * @rndis_hdr_hdl: hdr handle of rndis header
  */
 struct rndis_ipa_dev {
 	struct net_device *net;
@@ -225,6 +237,8 @@ struct rndis_ipa_dev {
 	u32 pm_hdl;
 	bool is_vlan_mode;
 	int (*netif_rx_function)(struct sk_buff *skb);
+	bool is_ulso_mode;
+	u32 rndis_hdr_hdl;
 };
 
 /**
@@ -243,6 +257,34 @@ struct rndis_pkt_hdr {
 	__le32  zeroes[7];
 } __packed__;
 
+/**
+ * qmap_hdr -
+ * @next_hdr: 1 - there is a qmap extension header, 0 - opposite
+ * @cd: 0 - data, 1 - command
+ * @packet_len: length excluding qmap header
+ * @ext_next_hdr: always zero
+ * @hdr_type: type of extension header
+ * @additional_hdr_size: distance between end of qmap header to start of ip
+ * 		header
+ * @zero_checksum: 0 - compute checksum, 1 - zero checksum
+ * @ip_id_cfg: 0 - running ip id per segment, 1 - constant ip id
+ * @segment_size: maximum segment size for the segmentation operation
+ */
+struct qmap_hdr {
+    u16 pad: 6;
+    u16 next_hdr: 1;
+    u16 cd: 1;
+    u16 mux_id: 8;
+    u16 packet_len_with_pad: 16;
+    u16 ext_next_hdr: 1;
+    u16 hdr_type: 7;
+    u16 additional_hdr_size: 5;
+    u16 reserved: 1;
+    u16 zero_checksum: 1;
+    u16 ip_id_cfg: 1;
+    u16 segment_size: 16;
+} __packed;
+
 static int rndis_ipa_open(struct net_device *net);
 static void rndis_ipa_packet_receive_notify
 	(void *private, enum ipa_dp_evt_type evt, unsigned long data);
@@ -260,15 +302,17 @@ static int rndis_ipa_stop(struct net_device *net);
 static void rndis_ipa_enable_data_path(struct rndis_ipa_dev *rndis_ipa_ctx);
 static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb,
 	struct rndis_ipa_dev *rndis_ipa_ctx);
+static struct sk_buff* qmap_encapsulate_skb(struct sk_buff *skb);
 static void rndis_ipa_xmit_error(struct sk_buff *skb);
 static void rndis_ipa_xmit_error_aftercare_wq(struct work_struct *work);
 static void rndis_ipa_prepare_header_insertion
 	(int eth_type,
 	const char *hdr_name, struct ipa_hdr_add *add_hdr,
 	const void *dst_mac, const void *src_mac, bool is_vlan_mode);
-static int rndis_ipa_hdrs_cfg
-	(struct rndis_ipa_dev *rndis_ipa_ctx,
+static int rndis_ipa_hdrs_cfg(struct rndis_ipa_dev *rndis_ipa_ctx,
 	const void *dst_mac, const void *src_mac);
+static int rndis_ipa_hdrs_hpc_cfg(struct rndis_ipa_dev *rndis_ipa_ctx);
+static int rndis_ipa_hdrs_hpc_destroy(struct rndis_ipa_dev *rndis_ipa_ctx);
 static int rndis_ipa_hdrs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx);
 static struct net_device_stats *rndis_ipa_get_stats(struct net_device *net);
 static int rndis_ipa_register_properties(char *netdev_name, bool is_vlan_mode);
@@ -330,7 +374,7 @@ static const struct file_operations rndis_ipa_aggr_ops = {
 static struct ipa_ep_cfg ipa_to_usb_ep_cfg = {
 	.mode = {
 		.mode = IPA_BASIC,
-		.dst  = IPA_CLIENT_APPS_LAN_CONS,
+		.dst = IPA_CLIENT_APPS_LAN_CONS,
 	},
 	.hdr = {
 		.hdr_len = ETH_HLEN + sizeof(struct rndis_pkt_hdr),
@@ -338,14 +382,14 @@ static struct ipa_ep_cfg ipa_to_usb_ep_cfg = {
 		.hdr_ofst_metadata = 0,
 		.hdr_additional_const_len = ETH_HLEN,
 		.hdr_ofst_pkt_size_valid = true,
-		.hdr_ofst_pkt_size = 3 * sizeof(u32),
+		.hdr_ofst_pkt_size = offsetof(struct rndis_pkt_hdr, data_len),
 		.hdr_a5_mux = false,
 		.hdr_remove_additional = false,
 		.hdr_metadata_reg_valid = false,
 	},
 	.hdr_ext = {
 		.hdr_pad_to_alignment = 0,
-		.hdr_total_len_or_pad_offset = 1 * sizeof(u32),
+		.hdr_total_len_or_pad_offset = offsetof(struct rndis_pkt_hdr, msg_len),
 		.hdr_payload_len_inc_padding = false,
 		.hdr_total_len_or_pad = IPA_HDR_TOTAL_LEN,
 		.hdr_total_len_or_pad_valid = true,
@@ -484,6 +528,35 @@ static struct rndis_pkt_hdr rndis_template_hdr = {
 	.zeroes = {0},
 };
 
+/**
+ * qmap_template_hdr - QMAP template structure for RNDIS_IPA SW insertion
+ * @pad: Set to 0
+ * @next_hdr: extension header exists - 1
+ * @cd: Data packet - 0
+ * @mux_id: Always 0
+ * @packet_len_with_pad: Set dynamically
+ * @ext_next_hdr: Always 0
+ * @hdr_type: Set to ULSO - 0x3
+ * @additional_hdr_size: - Set to VLAN tag size
+ * @zero_checksum: Always compute checksum - 0
+ * @ip_id_cfg: Always run up ip id per segment - 0
+ * @segment_size: Set dynamically
+ */
+static struct qmap_hdr qmap_template_hdr = {
+	.pad = 0,
+	.next_hdr = 1,
+	.cd = 0,
+	.mux_id = 0,
+	.packet_len_with_pad = 0,
+	.ext_next_hdr = 0,
+	.hdr_type = 0x3,
+	.additional_hdr_size = 0,
+	.reserved = 0,
+	.zero_checksum = 0,
+	.ip_id_cfg = 0,
+	.segment_size = 0,
+};
+
 static void rndis_ipa_msg_free_cb(void *buff, u32 len, u32 type)
 {
 	kfree(buff);
@@ -610,18 +683,36 @@ int rndis_ipa_init(struct ipa_usb_init_params *params)
 		RNDIS_IPA_ERROR_RL("couldn't acquire vlan mode, is ipa ready?\n");
 		goto fail_get_vlan_mode;
 	}
-
 	RNDIS_IPA_DEBUG("is_vlan_mode %d\n", rndis_ipa_ctx->is_vlan_mode);
 
-	result = rndis_ipa_hdrs_cfg
-			(rndis_ipa_ctx,
-			params->host_ethaddr,
+	rndis_ipa_ctx->is_ulso_mode = ipa3_is_ulso_supported();
+	RNDIS_IPA_DEBUG("is_ulso_mode=%d\n", rndis_ipa_ctx->is_ulso_mode);
+
+	result = rndis_ipa_hdrs_cfg(rndis_ipa_ctx, params->host_ethaddr,
 			params->device_ethaddr);
 	if (result) {
 		RNDIS_IPA_ERROR("fail on ipa hdrs set\n");
 		goto fail_hdrs_cfg;
 	}
-	RNDIS_IPA_DEBUG("IPA header-insertion configed for Ethernet+RNDIS\n");
+	RNDIS_IPA_DEBUG("IPA header-insertion configured for Ethernet\n");
+
+	if (rndis_ipa_ctx->is_ulso_mode) {
+		result = rndis_ipa_hdrs_hpc_cfg(rndis_ipa_ctx);
+		if (result) {
+			RNDIS_IPA_ERROR("fail on ipa hdrs hpc set\n");
+			goto fail_add_hdrs_hpc;
+		}
+		RNDIS_IPA_DEBUG("IPA header-insertion configured for RNDIS\n");
+
+		rndis_ipa_ctx->net->hw_features = NETIF_F_RXCSUM;
+		rndis_ipa_ctx->net->hw_features |=
+		    NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+		rndis_ipa_ctx->net->hw_features |= NETIF_F_SG;
+		rndis_ipa_ctx->net->hw_features |= NETIF_F_GRO_HW;
+		rndis_ipa_ctx->net->hw_features |= NETIF_F_GSO_UDP_L4;
+		rndis_ipa_ctx->net->hw_features |= NETIF_F_ALL_TSO;
+		rndis_ipa_ctx->net->gso_max_size = ULSO_MAX_SIZE;
+	}
 
 	result = rndis_ipa_register_properties(net->name,
 		rndis_ipa_ctx->is_vlan_mode);
@@ -651,6 +742,10 @@ int rndis_ipa_init(struct ipa_usb_init_params *params)
 		RNDIS_IPA_DEBUG("LAN RX NAPI enabled = False");
 	}
 
+	if (rndis_ipa_ctx->is_vlan_mode)
+		qmap_template_hdr.additional_hdr_size =
+			VLAN_ETH_HLEN - ETH_HLEN;
+
 	rndis_ipa = rndis_ipa_ctx;
 	params->ipa_rx_notify = rndis_ipa_packet_receive_notify;
 	params->ipa_tx_notify = rndis_ipa_tx_complete_notify;
@@ -667,6 +762,9 @@ int rndis_ipa_init(struct ipa_usb_init_params *params)
 fail_register_netdev:
 	rndis_ipa_deregister_properties(net->name);
 fail_register_tx:
+	if (rndis_ipa_ctx->is_ulso_mode)
+		rndis_ipa_hdrs_hpc_destroy(rndis_ipa_ctx);
+fail_add_hdrs_hpc:
 	rndis_ipa_hdrs_destroy(rndis_ipa_ctx);
 fail_hdrs_cfg:
 fail_get_vlan_mode:
@@ -938,6 +1036,7 @@ static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb,
 	int ret;
 	netdev_tx_t status = NETDEV_TX_BUSY;
 	struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(net);
+	unsigned int skb_len = skb->len;
 
 	netif_trans_update(net);
 
@@ -984,7 +1083,41 @@ static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb,
 		goto out;
 	}
 
-	skb = rndis_encapsulate_skb(skb, rndis_ipa_ctx);
+	if (rndis_ipa_ctx->is_ulso_mode &&
+		(net->features & (NETIF_F_ALL_TSO | NETIF_F_GSO_UDP_L4))){
+		struct iphdr *iph = NULL;
+		/*
+		 * gso_size must be set here because tx feature must be on
+		 * meanning that in case of a small packet its checksum will
+		 * not be computed and we must compute it using the hardware
+		 * and thus marking it as gso packet, and the way to do it is to
+		 * set gso_size to non 0 value. It is only used internally by
+		 * the ipa driver so, there is no significance which non-0 value
+		 * is set.
+		 */
+		if (ntohs(skb->protocol) == ETH_P_IP) {
+			iph = ip_hdr(skb);
+			if (IPV4_IS_TCP(iph) || IPV4_IS_UDP(iph)) {
+				skb = qmap_encapsulate_skb(skb);
+				skb_shinfo(skb)->gso_size =
+					net->mtu - IPV4_DELTA;
+			}
+		} else if (ntohs(skb->protocol) == ETH_P_IPV6) {
+			iph = ip_hdr(skb);
+			if (IPV6_IS_TCP(iph) || IPV6_IS_UDP(iph)) {
+				skb = qmap_encapsulate_skb(skb);
+				skb_shinfo(skb)->gso_size =
+					net->mtu - IPV6_DELTA;
+			}
+		}
+	} else {
+		skb = rndis_encapsulate_skb(skb, rndis_ipa_ctx);
+	}
+	/* This indicates no encapsulation was done - ulso mode with bad skb*/
+	if (unlikely(skb_len == skb->len)) {
+		skb_shinfo(skb)->gso_size = 0;
+		skb = rndis_encapsulate_skb(skb, rndis_ipa_ctx);
+	}
 	trace_rndis_tx_dp(skb->protocol);
 	ret = ipa_tx_dp(IPA_TO_USB_CLIENT, skb, NULL);
 	if (ret) {
@@ -1404,6 +1537,14 @@ void rndis_ipa_cleanup(void *private)
 	}
 	RNDIS_IPA_DEBUG("deregister Tx/Rx properties was successful\n");
 
+	if (rndis_ipa_ctx->is_ulso_mode) {
+		ret = rndis_ipa_hdrs_hpc_destroy(rndis_ipa_ctx);
+		if (ret)
+			RNDIS_IPA_ERROR("rndis_ipa_hdrs_hpc_destroy failed\n");
+		else
+			RNDIS_IPA_DEBUG("rndis_ipa_hdrs_hpc_destroy success\n");
+	}
+
 	ret = rndis_ipa_hdrs_destroy(rndis_ipa_ctx);
 	if (ret)
 		RNDIS_IPA_ERROR(
@@ -1443,6 +1584,8 @@ static void rndis_ipa_enable_data_path(struct rndis_ipa_dev *rndis_ipa_ctx)
 		RNDIS_IPA_DEBUG("device_ready_notify() not supplied\n");
 	}
 
+	qmap_template_hdr.segment_size = htons(rndis_ipa_ctx->net->mtu -
+		sizeof(qmap_template_hdr));
 	netif_start_queue(rndis_ipa_ctx->net);
 	RNDIS_IPA_DEBUG("netif_start_queue() was called\n");
 }
@@ -1570,6 +1713,95 @@ static void rndis_ipa_prepare_header_insertion(
 	}
 }
 
+/**
+ * rndis_ipa_hdrs_hpc_cfg() - configure hpc header insertion in IPA core
+ * @rndis_ipa_ctx: main driver context
+ *
+ * This function adds headers that are used by the hpc header insertion
+ * mechanism.
+ *
+ * Returns negative errno, or zero on success
+ */
+static int rndis_ipa_hdrs_hpc_cfg(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	struct ipa_ioc_add_hdr *hdrs;
+	struct ipa_hdr_add *rndis_hdr;
+	struct ipa_pkt_init_ex_hdr_ofst_set lookup;
+	int result = 0;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	hdrs = kzalloc(sizeof(*hdrs) + sizeof(*rndis_hdr), GFP_KERNEL);
+	if (!hdrs) {
+		result = -ENOMEM;
+		goto fail_mem;
+	}
+	rndis_hdr = &hdrs->hdr[0];
+	strlcpy(rndis_hdr->name, RNDIS_HDR_NAME, sizeof(rndis_hdr->name));
+	memcpy(rndis_hdr->hdr, &rndis_template_hdr, sizeof(rndis_template_hdr));
+	rndis_hdr->hdr_len = sizeof(rndis_template_hdr);
+	rndis_hdr->hdr_hdl = -1;
+	rndis_hdr->is_partial = false;
+	rndis_hdr->status = -1;
+	hdrs->num_hdrs = 1;
+	hdrs->commit = 1;
+
+	result = ipa3_add_hdr_hpc(hdrs);
+	if (result) {
+		RNDIS_IPA_ERROR("Fail on Header-Insertion(%d)\n", result);
+		goto fail_add_hdr;
+	}
+	if (rndis_hdr->status) {
+		RNDIS_IPA_ERROR("Fail on Header-Insertion rndis(%d)\n",
+			rndis_hdr->status);
+		result = rndis_hdr->status;
+		goto fail_add_hdr;
+	}
+
+	rndis_ipa_ctx->rndis_hdr_hdl = rndis_hdr->hdr_hdl;
+	lookup.ep = IPA_TO_USB_CLIENT;
+	strlcpy(lookup.name, RNDIS_HDR_NAME, sizeof(lookup.name));
+	if (ipa_set_pkt_init_ex_hdr_ofst(&lookup, true))
+		goto fail_add_hdr;
+
+	RNDIS_IPA_LOG_EXIT();
+
+fail_add_hdr:
+	kfree(hdrs);
+fail_mem:
+	return result;
+}
+
+/**
+ * rndis_ipa_hdrs_hpc_destroy() - remove the IPA headers hpc configuration done
+ * for the driver data path bridging.
+ * @rndis_ipa_ctx: the driver context
+ *
+ * Revert the work done on rndis_ipa_hdrs_hpc_cfg()
+ */
+static int rndis_ipa_hdrs_hpc_destroy(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	struct ipa_ioc_del_hdr *del_hdr;
+	struct ipa_hdr_del *rndis_hdr;
+	int result;
+
+	del_hdr = kzalloc(sizeof(*del_hdr) + sizeof(*rndis_hdr), GFP_KERNEL);
+	if (!del_hdr)
+		return -ENOMEM;
+
+	del_hdr->commit = 1;
+	del_hdr->num_hdls = 1;
+	rndis_hdr = &del_hdr->hdl[0];
+	rndis_hdr->hdl = rndis_ipa_ctx->rndis_hdr_hdl;
+
+	result = ipa3_del_hdr(del_hdr);
+	if (result || rndis_hdr->status)
+		RNDIS_IPA_ERROR("ipa3_del_hdr failed\n");
+	kfree(del_hdr);
+
+	return result;
+}
+
 /**
  * rndis_ipa_hdrs_cfg() - configure header insertion block in IPA core
  *  to allow HW bridging
@@ -1596,9 +1828,8 @@ static int rndis_ipa_hdrs_cfg(
 
 	RNDIS_IPA_LOG_ENTRY();
 
-	hdrs = kzalloc
-		(sizeof(*hdrs) + sizeof(*ipv4_hdr) + sizeof(*ipv6_hdr),
-		GFP_KERNEL);
+	hdrs = kzalloc(sizeof(*hdrs) + sizeof(*ipv4_hdr) + sizeof(*ipv6_hdr),
+	GFP_KERNEL);
 	if (!hdrs) {
 		result = -ENOMEM;
 		goto fail_mem;
@@ -1606,15 +1837,13 @@ static int rndis_ipa_hdrs_cfg(
 
 	ipv4_hdr = &hdrs->hdr[0];
 	ipv6_hdr = &hdrs->hdr[1];
-	rndis_ipa_prepare_header_insertion
-		(ETH_P_IP, IPV4_HDR_NAME,
+	rndis_ipa_prepare_header_insertion(ETH_P_IP, IPV4_HDR_NAME,
 		ipv4_hdr, dst_mac, src_mac, rndis_ipa_ctx->is_vlan_mode);
-	rndis_ipa_prepare_header_insertion
-		(ETH_P_IPV6, IPV6_HDR_NAME,
+	rndis_ipa_prepare_header_insertion(ETH_P_IPV6, IPV6_HDR_NAME,
 		ipv6_hdr, dst_mac, src_mac, rndis_ipa_ctx->is_vlan_mode);
 
-	hdrs->commit = 1;
 	hdrs->num_hdrs = 2;
+	hdrs->commit = 1;
 	result = ipa3_add_hdr(hdrs);
 	if (result) {
 		RNDIS_IPA_ERROR("Fail on Header-Insertion(%d)\n", result);
@@ -1665,7 +1894,6 @@ static int rndis_ipa_hdrs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx)
 
 	del_hdr->commit = 1;
 	del_hdr->num_hdls = 2;
-
 	ipv4 = &del_hdr->hdl[0];
 	ipv4->hdl = rndis_ipa_ctx->eth_ipv4_hdr_hdl;
 	ipv6 = &del_hdr->hdl[1];
@@ -1840,7 +2068,6 @@ static int rndis_ipa_deregister_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx)
 	return 0;
 }
 
-
 /**
  * rndis_encapsulate_skb() - encapsulate the given Ethernet skb with
  *  an RNDIS header
@@ -1849,7 +2076,7 @@ static int rndis_ipa_deregister_pm_client(struct rndis_ipa_dev *rndis_ipa_ctx)
  *
  * Shall use a template header for RNDIS and update it with the given
  * skb values.
- * Ethernet is expected to be already encapsulate the packet.
+ * Ethernet 2 header should already be encapsulated in the packet.
  */
 static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb,
 	struct rndis_ipa_dev *rndis_ipa_ctx)
@@ -1861,6 +2088,7 @@ static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb,
 	if (unlikely(skb_headroom(skb) < sizeof(rndis_template_hdr))) {
 		struct sk_buff *new_skb = skb_copy_expand(skb,
 			sizeof(rndis_template_hdr), 0, GFP_ATOMIC);
+
 		if (!new_skb) {
 			RNDIS_IPA_ERROR_RL("no memory for skb expand\n");
 			return skb;
@@ -1888,6 +2116,43 @@ static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb,
 	return skb;
 }
 
+/**
+ * qmap_encapsulate_skb() - encapsulate the given Ethernet skb with
+ *  a QMAP header
+ * @skb: packet to be encapsulated with the QMAP header
+ *
+ * Shall use a template header for QMAP and update it with the given
+ * skb values.
+ * Ethernet 2 header should already be encapsulated in the packet.
+ */
+static struct sk_buff* qmap_encapsulate_skb(struct sk_buff *skb)
+{
+	struct qmap_hdr *qh_ptr;
+	struct qmap_hdr qh = qmap_template_hdr;
+
+	/* if there is no room in this skb, allocate a new one */
+	if (unlikely(skb_headroom(skb) < sizeof(qmap_template_hdr))) {
+		struct sk_buff *new_skb = skb_copy_expand(skb,
+			sizeof(qmap_template_hdr), 0, GFP_ATOMIC);
+
+		if (!new_skb) {
+			RNDIS_IPA_ERROR("no memory for skb expand\n");
+			return skb;
+		}
+		RNDIS_IPA_DEBUG("skb expanded. old %pK new %pK\n",
+			skb, new_skb);
+		dev_kfree_skb_any(skb);
+		skb = new_skb;
+	}
+
+	/* make room at the head of the SKB to put the QMAP header */
+	qh_ptr = (struct qmap_hdr *)skb_push(skb, sizeof(qh));
+	qh.packet_len_with_pad = htons(skb->len);
+	memcpy(qh_ptr, &qh, sizeof(*qh_ptr));
+
+	return skb;
+}
+
 /**
  * rx_filter() - logic that decide if the current skb is to be filtered out
  * @skb: skb that may be sent up to the network stack
@@ -1976,20 +2241,21 @@ static int rndis_ipa_ep_registers_cfg(
 	}
 
 	if (is_vlan_mode) {
-		usb_to_ipa_ep_cfg->hdr.hdr_len =
-			VLAN_ETH_HLEN + add;
+		usb_to_ipa_ep_cfg->hdr.hdr_len = VLAN_ETH_HLEN + add;
 		ipa_to_usb_ep_cfg.hdr.hdr_len =
 			VLAN_ETH_HLEN + sizeof(struct rndis_pkt_hdr);
 		ipa_to_usb_ep_cfg.hdr.hdr_additional_const_len = VLAN_ETH_HLEN;
+		qmap_template_hdr.additional_hdr_size =
+				VLAN_ETH_HLEN - ETH_HLEN;
 	} else {
-		usb_to_ipa_ep_cfg->hdr.hdr_len =
-			ETH_HLEN + add;
+		usb_to_ipa_ep_cfg->hdr.hdr_len = ETH_HLEN + add;
 		ipa_to_usb_ep_cfg.hdr.hdr_len =
 			ETH_HLEN + sizeof(struct rndis_pkt_hdr);
 		ipa_to_usb_ep_cfg.hdr.hdr_additional_const_len = ETH_HLEN;
 	}
 
 	usb_to_ipa_ep_cfg->deaggr.max_packet_len = max_xfer_size_bytes_to_dev;
+
 	result = ipa3_cfg_ep(usb_to_ipa_hdl, usb_to_ipa_ep_cfg);
 	if (result) {
 		pr_err("failed to configure USB to IPA point\n");
@@ -2006,8 +2272,7 @@ static int rndis_ipa_ep_registers_cfg(
 	} else {
 		ipa_to_usb_ep_cfg.aggr.aggr_time_limit =
 			DEFAULT_AGGR_TIME_LIMIT;
-		ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit =
-			DEFAULT_AGGR_PKT_LIMIT;
+		ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit = DEFAULT_AGGR_PKT_LIMIT;
 	}
 
 	RNDIS_IPA_DEBUG(
@@ -2020,6 +2285,9 @@ static int rndis_ipa_ep_registers_cfg(
 	/* enable hdr_metadata_reg_valid */
 	usb_to_ipa_ep_cfg->hdr.hdr_metadata_reg_valid = true;
 
+	if (ipa3_is_ulso_supported())
+		ipa_to_usb_ep_cfg.ulso.is_ulso_pipe = true;
+
 	/*xlat config in vlan mode */
 	if (is_vlan_mode) {
 		usb_to_ipa_ep_cfg->hdr.hdr_ofst_metadata_valid = 1;

+ 21 - 0
drivers/platform/msm/ipa/ipa_common_i.h

@@ -173,6 +173,17 @@ do {\
 #define IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC (3000)
 #define IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC (5000)
 
+/**
+ * struct ipa_pkt_init_ex_hdr_ofst_set - header entry lookup parameters, if
+ * lookup was successful than the ep's pkt_init_ex offset will be set.
+ * @name: name of the header resource
+ * @ep:	[out] - the endpoint number to set the IC header offset
+ */
+struct ipa_pkt_init_ex_hdr_ofst_set {
+	char name[IPA_RESOURCE_NAME_MAX];
+	enum ipa_client_type ep;
+};
+
 enum ipa_active_client_log_type {
 	EP,
 	SIMPLE,
@@ -660,6 +671,10 @@ int ipa_cfg_ep_holb_by_client(enum ipa_client_type client,
 /*
 * Header removal / addition
 */
+int ipa3_add_hdr_hpc(struct ipa_ioc_add_hdr *hdrs);
+
+int ipa3_add_hdr_hpc_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only);
+
 int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs);
 
 int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls);
@@ -668,6 +683,8 @@ int ipa3_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only);
 
 int ipa3_reset_hdr(bool user_only);
 
+int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup);
+
 /*
 * Header Processing Context
 */
@@ -840,4 +857,8 @@ int ipa_eth_client_disconn_evt(struct ipa_ecm_msg *msg);
 /* ULSO mode Query */
 bool ipa3_is_ulso_supported(void);
 
+/* IPA_PACKET_INIT_EX IC to pipe API */
+int ipa_set_pkt_init_ex_hdr_ofst(
+	struct ipa_pkt_init_ex_hdr_ofst_set *lookup, bool proc_ctx);
+
 #endif /* _IPA_COMMON_I_H_ */

+ 98 - 43
drivers/platform/msm/ipa/ipa_v3/ipa.c

@@ -158,8 +158,7 @@ struct ipa3_context *ipa3_ctx = NULL;
 EXPORT_SYMBOL(ipa3_ctx);
 
 int ipa3_plat_drv_probe(struct platform_device *pdev_p);
-int ipa3_pci_drv_probe(
-	struct pci_dev            *pci_dev,
+int ipa3_pci_drv_probe(struct pci_dev *pci_dev,
 	const struct pci_device_id *ent);
 
 /**
@@ -239,7 +238,7 @@ static const struct pci_error_handlers ipa_pci_err_handler = {
 };
 
 static struct pci_driver ipa_pci_driver = {
-	.name     = ipa_pci_driver_name,
+	.name = ipa_pci_driver_name,
 	.id_table = ipa_pci_tbl,
 	.probe    = ipa3_pci_drv_probe,
 	.remove   = ipa_pci_remove,
@@ -3672,7 +3671,7 @@ static int ipa3_setup_exception_path(void)
 
 	/* install the basic exception header */
 	hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
-		      sizeof(struct ipa_hdr_add), GFP_KERNEL);
+		sizeof(struct ipa_hdr_add), GFP_KERNEL);
 	if (!hdr)
 		return -ENOMEM;
 
@@ -3777,7 +3776,7 @@ static int ipa3_init_smem_region(int memory_region_size,
 
 /**
  * ipa3_init_q6_smem() - Initialize Q6 general memory and
- *                      header memory regions in IPA.
+ *	header memory regions in IPA.
  *
  * Return codes:
  * 0: success
@@ -4568,7 +4567,7 @@ void ipa3_update_ssr_state(bool is_ssr)
 
 /**
  * ipa3_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration
- *                    in IPA HW. This is performed in case of SSR.
+ *	in IPA HW. This is performed in case of SSR.
  *
  * This is a mandatory procedure, in case one of the steps fails, the
  * AP needs to restart.
@@ -6896,9 +6895,11 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
 	/* The following will retrieve and save the gsi fw version */
 	ipa_save_gsi_ver();
 
-	/* IPA version 3.0 IPAHAL initialized at pre_init as there is no SMMU.
+	/*
+	 * IPA version 3.0 IPAHAL initialized at pre_init as there is no SMMU.
 	 * In normal mode need to wait until SMMU is attached and
-         * thus initialization done here*/
+	 * thus initialization done here
+	 */
 	if (ipa3_ctx->ipa_hw_type != IPA_HW_v3_0) {
 		if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
 				ipa3_ctx->ipa_cfg_offset, ipa3_ctx->pdev)) {
@@ -7800,8 +7801,8 @@ static int ipa_alloc_pkt_init_ex(void)
 	cmd_mask.rt_retain_hdr = true;
 	cmd_mask.rt_pipe_dest_idx = true;
 	for (cmd.rt_pipe_dest_idx = 0;
-	      cmd.rt_pipe_dest_idx < ipa3_ctx->ipa_num_pipes;
-	      cmd.rt_pipe_dest_idx++) {
+		cmd.rt_pipe_dest_idx < ipa3_ctx->ipa_num_pipes;
+		cmd.rt_pipe_dest_idx++) {
 		result = ipahal_modify_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT_EX,
 			cmd_pyld->data, &cmd, &cmd_mask);
 		if (unlikely(result != 0)) {
@@ -7855,6 +7856,63 @@ free_imm:
 	return result;
 }
 
+/**
+ * ipa_set_pkt_init_ex_hdr_ofst() - Set pkt_init_ex header offset for the ep
+ * @lookup: header and ep identifying parameters
+ *
+ * Returns 0 on success
+ */
+int ipa_set_pkt_init_ex_hdr_ofst(struct ipa_pkt_init_ex_hdr_ofst_set
+	*lookup, bool proc_ctx)
+{
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_imm_cmd_ip_packet_init_ex cmd = {0};
+	u32 offset;
+	int res = 0;
+	int dst_ep_idx;
+
+	if (!lookup)
+		return -EINVAL;
+
+	dst_ep_idx = ipa3_get_ep_mapping(lookup->ep);
+	IPADBG("dst_ep_idx=%d\n", dst_ep_idx);
+	if (-1 == dst_ep_idx) {
+		IPAERR("Client %u is not mapped\n", lookup->ep);
+		return -EINVAL;
+	}
+	if (proc_ctx) {
+		res = ipa3_get_hdr_proc_ctx_offset(lookup->name, &offset);
+	} else {
+		res = ipa3_get_hdr_offset(lookup->name ,&offset);
+	}
+	if (res != 0)
+		return res;
+
+	cmd.rt_hdr_offset = offset;
+	IPADBG("cmd.rt_hdr_offset=%d\n", cmd.rt_hdr_offset);
+	cmd.frag_disable = true;
+	cmd.nat_disable = true;
+	cmd.filter_disable = true;
+	cmd.route_disable = true;
+	cmd.hdr_removal_insertion_disable = false;
+	cmd.cs_disable = false;
+	cmd.flt_retain_hdr = true;
+	cmd.rt_retain_hdr = true;
+	cmd.rt_pipe_dest_idx = dst_ep_idx;
+	cmd.rt_proc_ctx = proc_ctx;
+	cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_IP_PACKET_INIT_EX,
+		&cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct IMM cmd\n");
+		return -ENOMEM;
+	}
+	memcpy(ipa3_ctx->pkt_init_ex_mem.base + dst_ep_idx * cmd_pyld->len,
+		cmd_pyld->data, cmd_pyld->len);
+	ipahal_destroy_imm_cmd(cmd_pyld);
+	return 0;
+}
+EXPORT_SYMBOL(ipa_set_pkt_init_ex_hdr_ofst);
+
 /*
  * SCM call to check if secure dump is allowed.
  *
@@ -9149,9 +9207,9 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
 		of_property_read_bool(pdev->dev.of_node,
 		"qcom,ipa-gpi-event-rp-ddr");
 	IPADBG(": Read GPI or GCI Event RP from DDR = %s\n",
-	       ipa_drv_res->ipa_gpi_event_rp_ddr ? "True" : "False");
+		ipa_drv_res->ipa_gpi_event_rp_ddr ? "True" : "False");
 
-	ipa_drv_res->tx_napi_enable = 
+	ipa_drv_res->tx_napi_enable =
 		of_property_read_bool(pdev->dev.of_node,
 		"qcom,tx-napi");
 	IPADBG(": Enable tx NAPI = %s\n",
@@ -9160,9 +9218,8 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
 
 	ipa_drv_res->tx_poll = of_property_read_bool(pdev->dev.of_node,
 		"qcom,tx-poll");
-	IPADBG(": Enable tx polling = %s\n",
-	       ipa_drv_res->tx_poll
-	       ? "True" : "False");
+	IPADBG(": Enable tx polling = %s\n", ipa_drv_res->tx_poll
+		? "True" : "False");
 
 	ipa_drv_res->rmnet_ctl_enable =
 		of_property_read_bool(pdev->dev.of_node,
@@ -9411,16 +9468,16 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
 		ipa_drv_res->emulator_intcntrlr_mem_size =
 		    resource_size(resource);
 		IPADBG(":using intctrl-base at 0x%x of size 0x%x\n",
-		       ipa_drv_res->emulator_intcntrlr_mem_base,
-		       ipa_drv_res->emulator_intcntrlr_mem_size);
+			ipa_drv_res->emulator_intcntrlr_mem_base,
+			ipa_drv_res->emulator_intcntrlr_mem_size);
 	}
 
 	ipa_drv_res->entire_ipa_block_size = 0x100000;
 	result = of_property_read_u32(pdev->dev.of_node,
-				      "qcom,entire-ipa-block-size",
-				      &ipa_drv_res->entire_ipa_block_size);
+		"qcom,entire-ipa-block-size",
+		&ipa_drv_res->entire_ipa_block_size);
 	IPADBG(": entire_ipa_block_size = %d\n",
-	       ipa_drv_res->entire_ipa_block_size);
+		ipa_drv_res->entire_ipa_block_size);
 
 	/*
 	 * We'll read register-collection-on-crash here, but log it
@@ -9437,7 +9494,7 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
 	    of_property_read_bool(pdev->dev.of_node,
 				  "qcom,testbus-collection-on-crash");
 	IPADBG(": doing testbus collection on crash = %u\n",
-	       ipa_drv_res->do_testbus_collection_on_crash);
+		ipa_drv_res->do_testbus_collection_on_crash);
 
 	/*
 	 * We'll read non-tn-collection-on-crash here...
@@ -9446,7 +9503,7 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
 	    of_property_read_bool(pdev->dev.of_node,
 				  "qcom,non-tn-collection-on-crash");
 	IPADBG(": doing non-tn collection on crash = %u\n",
-	       ipa_drv_res->do_non_tn_collection_on_crash);
+		ipa_drv_res->do_non_tn_collection_on_crash);
 
 	/*
 	 * We'll read ram-collection-on-crash here...
@@ -9464,7 +9521,7 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
 		ipa_drv_res->do_register_collection_on_crash = true;
 
 	IPADBG(": doing register collection on crash = %u\n",
-	       ipa_drv_res->do_register_collection_on_crash);
+		ipa_drv_res->do_register_collection_on_crash);
 
 	result = of_property_read_u32(
 		pdev->dev.of_node,
@@ -10557,9 +10614,7 @@ void ipa_pc_qmp_enable(void)
  *            PCIe Version
  *************************************************************/
 
-int ipa3_pci_drv_probe(
-	struct pci_dev            *pci_dev,
-	const struct pci_device_id *ent)
+int ipa3_pci_drv_probe(struct pci_dev *pci_dev, const struct pci_device_id *ent)
 {
 	int result;
 	struct ipa3_plat_drv_res *ipa_drv_res;
@@ -10634,7 +10689,7 @@ int ipa3_pci_drv_probe(
 
 	result =
 		of_property_read_u32(NULL, "emulator-bar0-offset",
-				     &bar0_offset);
+			&bar0_offset);
 	if (result) {
 		IPAERR(":get resource failed for emulator-bar0-offset!\n");
 		pci_release_region(pci_dev, 0);
@@ -10643,8 +10698,8 @@ int ipa3_pci_drv_probe(
 	}
 	IPADBG(":using emulator-bar0-offset 0x%08X\n", bar0_offset);
 
-	ipa_start     = ipa_drv_res->ipa_mem_base;
-	gsi_start     = ipa_drv_res->transport_mem_base;
+	ipa_start = ipa_drv_res->ipa_mem_base;
+	gsi_start = ipa_drv_res->transport_mem_base;
 	intctrl_start = ipa_drv_res->emulator_intcntrlr_mem_base;
 
 	/*
@@ -10661,8 +10716,8 @@ int ipa3_pci_drv_probe(
 	mem_start = pci_resource_start(pci_dev, 0);
 	mem_end   = pci_resource_end(pci_dev, 0);
 
-	IPADBG("PCI START                = 0x%x\n", mem_start);
-	IPADBG("PCI END                  = 0x%x\n", mem_end);
+	IPADBG("PCI START = 0x%x\n", mem_start);
+	IPADBG("PCI END = 0x%x\n", mem_end);
 
 	ipa_drv_res->ipa_mem_base = mem_start + bar0_offset;
 
@@ -10675,20 +10730,20 @@ int ipa3_pci_drv_probe(
 	ipa_drv_res->emulator_intcntrlr_mem_base =
 	    ipa_drv_res->ipa_mem_base + (intctrl_start - ipa_start);
 
-	IPADBG("ipa_mem_base                = 0x%x\n",
-	       ipa_drv_res->ipa_mem_base);
-	IPADBG("ipa_mem_size                = 0x%x\n",
-	       ipa_drv_res->ipa_mem_size);
+	IPADBG("ipa_mem_base = 0x%x\n",
+		ipa_drv_res->ipa_mem_base);
+	IPADBG("ipa_mem_size = 0x%x\n",
+		ipa_drv_res->ipa_mem_size);
 
-	IPADBG("transport_mem_base          = 0x%x\n",
-	       ipa_drv_res->transport_mem_base);
-	IPADBG("transport_mem_size          = 0x%x\n",
-	       ipa_drv_res->transport_mem_size);
+	IPADBG("transport_mem_base = 0x%x\n",
+		ipa_drv_res->transport_mem_base);
+	IPADBG("transport_mem_size = 0x%x\n",
+		ipa_drv_res->transport_mem_size);
 
 	IPADBG("emulator_intcntrlr_mem_base = 0x%x\n",
-	       ipa_drv_res->emulator_intcntrlr_mem_base);
+		ipa_drv_res->emulator_intcntrlr_mem_base);
 	IPADBG("emulator_intcntrlr_mem_size = 0x%x\n",
-	       ipa_drv_res->emulator_intcntrlr_mem_size);
+		ipa_drv_res->emulator_intcntrlr_mem_size);
 
 	bits = (ipa_drv_res->use_64_bit_dma_mask) ? 64 : 32;
 
@@ -10734,12 +10789,12 @@ int ipa3_get_transport_info(
 {
 	if (!phys_addr_ptr || !size_ptr) {
 		IPAERR("Bad arg: phys_addr_ptr(%pK) and/or size_ptr(%pK)\n",
-		       phys_addr_ptr, size_ptr);
+			phys_addr_ptr, size_ptr);
 		return -EINVAL;
 	}
 
 	*phys_addr_ptr = ipa3_res.transport_mem_base;
-	*size_ptr      = ipa3_res.transport_mem_size;
+	*size_ptr = ipa3_res.transport_mem_size;
 
 	return 0;
 }

+ 214 - 3
drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c

@@ -531,8 +531,8 @@ bad_len:
 	return -EPERM;
 }
 
-
-static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user)
+static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user,
+	struct ipa3_hdr_entry **entry_out)
 {
 	struct ipa3_hdr_entry *entry;
 	struct ipa_hdr_offset_entry *offset = NULL;
@@ -650,6 +650,8 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user)
 	entry->id = id;
 	hdr->hdr_hdl = id;
 	entry->ref_cnt++;
+	if (entry_out)
+		*entry_out = entry;
 
 	return 0;
 
@@ -714,6 +716,40 @@ static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl,
 	return 0;
 }
 
+static int __ipa_add_hpc_hdr_insertion(struct ipa_hdr_add *hdr, bool user)
+{
+	struct ipa3_hdr_entry *entry = NULL;
+	struct ipa_hdr_proc_ctx_add proc_ctx;
+
+	hdr->status = IPA_HDR_TO_DDR_PATTERN;
+
+	if (__ipa_add_hdr(hdr, user, &entry))
+		goto error;
+
+	IPADBG("adding processing context for header %s\n", hdr->name);
+	proc_ctx.type = IPA_HDR_PROC_NONE;
+	proc_ctx.hdr_hdl = hdr->hdr_hdl;
+	if (__ipa_add_hdr_proc_ctx(&proc_ctx, false, user)) {
+		IPAERR("failed to add hdr proc ctx\n");
+		goto fail_add_proc_ctx;
+	}
+	entry->proc_ctx = (struct ipa3_hdr_proc_ctx_entry *)
+		ipa3_id_find(proc_ctx.proc_ctx_hdl);
+	if (!entry->proc_ctx) {
+		IPAERR_RL("ipa3_id_find failed\n");
+		goto fail_id_find;
+	}
+
+	return 0;
+
+fail_id_find:
+	__ipa3_del_hdr_proc_ctx(entry->proc_ctx->id, true, user);
+fail_add_proc_ctx:
+	__ipa3_del_hdr(hdr->hdr_hdl, user);
+error:
+	return -EPERM;
+}
+
 int __ipa3_del_hdr(u32 hdr_hdl, bool by_user)
 {
 	struct ipa3_hdr_entry *entry;
@@ -785,6 +821,67 @@ int __ipa3_del_hdr(u32 hdr_hdl, bool by_user)
 	return 0;
 }
 
+/**
+ * ipa3_add_hdr_hpc() - add the specified headers to SW and optionally commit them
+ * to IPA HW
+ * @hdrs:	[inout] set of headers to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_hdr_hpc(struct ipa_ioc_add_hdr *hdrs)
+{
+	return ipa3_add_hdr_hpc_usr(hdrs, false);
+}
+EXPORT_SYMBOL(ipa3_add_hdr_hpc);
+
+/**
+ * ipa3_add_hdr_hpc_usr() - add the specified headers to SW
+ * and optionally commit them to IPA HW
+ * @hdrs:		[inout] set of headers to add
+ * @user_only:	[in] indicate installed from user
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_hdr_hpc_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only)
+{
+	int i;
+	int result = -EFAULT;
+
+	if (hdrs == NULL || hdrs->num_hdrs == 0) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	IPADBG("adding %d headers to IPA driver internal data struct\n",
+		hdrs->num_hdrs);
+	for (i = 0; i < hdrs->num_hdrs; i++) {
+		if (__ipa_add_hpc_hdr_insertion(&hdrs->hdr[i], user_only)) {
+			IPAERR_RL("failed to add hdr hpc %d\n", i);
+			hdrs->hdr[i].status = -1;
+		}
+		else {
+			hdrs->hdr[i].status = 0;
+		}
+	}
+
+	if (hdrs->commit) {
+		IPADBG("committing all headers to IPA core");
+		if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
 /**
  * ipa3_add_hdr() - add the specified headers to SW and optionally commit them
  * to IPA HW
@@ -824,7 +921,7 @@ int ipa3_add_hdr_usr(struct ipa_ioc_add_hdr *hdrs, bool user_only)
 	IPADBG("adding %d headers to IPA driver internal data struct\n",
 			hdrs->num_hdrs);
 	for (i = 0; i < hdrs->num_hdrs; i++) {
-		if (__ipa_add_hdr(&hdrs->hdr[i], user_only)) {
+		if (__ipa_add_hdr(&hdrs->hdr[i], user_only, NULL)) {
 			IPAERR_RL("failed to add hdr %d\n", i);
 			hdrs->hdr[i].status = -1;
 		} else {
@@ -1239,6 +1336,17 @@ static struct ipa3_hdr_entry *__ipa_find_hdr(const char *name)
 	return NULL;
 }
 
+static struct ipa3_hdr_proc_ctx_entry* __ipa_find_hdr_proc_ctx(const char *name)
+{
+	struct ipa3_hdr_entry *entry;
+
+	entry = __ipa_find_hdr(name);
+	if (entry && entry->proc_ctx)
+		return entry->proc_ctx;
+
+	return NULL;
+}
+
 /**
  * ipa3_get_hdr() - Lookup the specified header resource
  * @lookup:	[inout] header to lookup and its handle
@@ -1271,6 +1379,109 @@ int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup)
 	return result;
 }
 
+/**
+ * ipa3_get_hdr_offset() - Get the the offset of the specified header resource
+ * @name: [in] name of header to lookup
+ * @offset: [out] offset of the specified header
+ *
+ * lookup the specified header resource and return its offset if exists
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_get_hdr_offset(char* name, u32* offset)
+{
+	struct ipa3_hdr_entry *entry;
+	int result = -1;
+	if (!name || !offset) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	name[IPA_RESOURCE_NAME_MAX-1] = '\0';
+	entry = __ipa_find_hdr(name);
+	if (entry && entry->offset_entry) {
+		*offset = entry->offset_entry->offset;
+		result = 0;
+	}
+
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_get_hdr_proc_ctx_hdl() - Lookup the specified hpc resource
+ * @lookup:	[inout] hpc to lookup and its handle
+ *
+ * Lookup the specified hpc resource and return handle if it exists.
+ * The hpc returned is identified by the hpc pointed by the hdr associated
+ * with lookup->name.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_get_hdr_proc_ctx_hdl(struct ipa_ioc_get_hdr *lookup)
+{
+	struct ipa3_hdr_proc_ctx_entry *entry;
+	int result = -1;
+	if (lookup == NULL) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	lookup->name[IPA_RESOURCE_NAME_MAX-1] = '\0';
+	entry = __ipa_find_hdr_proc_ctx(lookup->name);
+	if (entry) {
+		lookup->hdl = entry->id;
+		result = 0;
+	}
+
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_get_hdr_proc_ctx_offset() - Lookup the specified hpc resource
+ * @name:	[in] hpc name to lookup
+ * @offset:	[out] offset to of hpc to return
+ *
+ * Lookup the specified hpc resource and return offset if it exists.
+ * The hpc offset returned is of the hpc identified by the hpc pointed
+ * by the hdr associated with name. The offset returned is in 32B
+ * and includes the hpc table start offset.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_get_hdr_proc_ctx_offset(char* name, u32* offset)
+{
+	struct ipa3_hdr_proc_ctx_entry *entry;
+	int result = -1;
+
+	if (!name || !offset) {
+		IPAERR_RL("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	name[IPA_RESOURCE_NAME_MAX-1] = '\0';
+	entry = __ipa_find_hdr_proc_ctx(name);
+	if (entry && entry->offset_entry) {
+		/* offset is in 32 Bytes chunks */
+		*offset = (entry->offset_entry->offset +
+		ipa3_ctx->hdr_proc_ctx_tbl.start_offset) >> 5;
+		result = 0;
+	}
+
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
 /**
  * __ipa3_release_hdr() - drop reference to header and cause
  * deletion if reference count permits

+ 5 - 1
drivers/platform/msm/ipa/ipa_v3/ipa_i.h

@@ -2696,7 +2696,11 @@ int ipa3_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user);
 
 int ipa3_commit_hdr(void);
 
-int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup);
+int ipa3_get_hdr_offset(char* name, u32* offset);
+
+int ipa3_get_hdr_proc_ctx_hdl(struct ipa_ioc_get_hdr *lookup);
+
+int ipa3_get_hdr_proc_ctx_offset(char* name, u32* offset);
 
 int ipa3_put_hdr(u32 hdr_hdl);
 

+ 7 - 7
drivers/platform/msm/ipa/ipa_v3/ipa_utils.c

@@ -5887,16 +5887,16 @@ const char *ipa_clients_strings[IPA_CLIENT_MAX] = {
 	__stringify(IPA_CLIENT_APPS_WAN_COAL_CONS),
 	__stringify(IPA_CLIENT_MHI_PRIME_TETH_PROD),
 	__stringify(IPA_CLIENT_MHI_PRIME_TETH_CONS),
-        __stringify(IPA_CLIENT_MHI_PRIME_RMNET_PROD),
-        __stringify(IPA_CLIENT_MHI_PRIME_RMNET_CONS),
+	__stringify(IPA_CLIENT_MHI_PRIME_RMNET_PROD),
+	__stringify(IPA_CLIENT_MHI_PRIME_RMNET_CONS),
 	__stringify(IPA_CLIENT_MHI_PRIME_DPL_PROD),
 	__stringify(RESERVERD_CONS_101),
 	__stringify(IPA_CLIENT_AQC_ETHERNET_PROD),
 	__stringify(IPA_CLIENT_AQC_ETHERNET_CONS),
 	__stringify(IPA_CLIENT_APPS_WAN_LOW_LAT_PROD),
 	__stringify(IPA_CLIENT_APPS_WAN_LOW_LAT_CONS),
-        __stringify(IPA_CLIENT_QDSS_PROD),
-        __stringify(IPA_CLIENT_MHI_QDSS_CONS),
+	__stringify(IPA_CLIENT_QDSS_PROD),
+	__stringify(IPA_CLIENT_MHI_QDSS_CONS),
 	__stringify(IPA_CLIENT_RTK_ETHERNET_PROD),
 	__stringify(IPA_CLIENT_RTK_ETHERNET_CONS),
 	__stringify(IPA_CLIENT_MHI_LOW_LAT_PROD),
@@ -7091,7 +7091,7 @@ enum ipa_client_type ipa3_get_client_by_pipe(int pipe_idx)
 
 	if (j == IPA_CLIENT_MAX)
 		IPADBG("Got to IPA_CLIENT_MAX (%d) while searching for (%d)\n",
-		       j, pipe_idx);
+			j, pipe_idx);
 
 	return j;
 }
@@ -7655,8 +7655,7 @@ int ipa3_cfg_ep_hdr_ext(u32 clnt_hdl,
  *
  * Note:	Should not be called from atomic context
  */
-int ipa3_cfg_ep_ulso(u32 clnt_hdl,
-		       const struct ipa_ep_cfg_ulso *ep_ulso)
+int ipa3_cfg_ep_ulso(u32 clnt_hdl, const struct ipa_ep_cfg_ulso *ep_ulso)
 {
 	struct ipa3_ep_context *ep;
 
@@ -12022,3 +12021,4 @@ bool ipa3_is_ulso_supported(void)
 
 	return ipa3_ctx->ulso_supported;
 }
+EXPORT_SYMBOL(ipa3_is_ulso_supported);