Prechádzať zdrojové kódy

qcacmn: Enable WLAN host data path support for IPA WDI3.0

Change to support WLAN Napier host autonomy data path architecture.

Change-Id: I07f7592d547bb796a3c12bbc4745cee22e2c0022
CRs-Fixed: 2064810
Yun Park 7 rokov pred
rodič
commit
fde6b9e551

+ 324 - 75
dp/inc/cdp_txrx_ipa.h

@@ -30,115 +30,111 @@
  */
 #ifndef _CDP_TXRX_IPA_H_
 #define _CDP_TXRX_IPA_H_
+#include <ipa.h>
 #include <cdp_txrx_mob_def.h>
 #include "cdp_txrx_handle.h"
+
+#ifdef IPA_OFFLOAD
 /**
- * cdp_ipa_get_resource() - Get allocated wlan resources for ipa data path
+ * cdp_ipa_get_resource() - Get allocated WLAN resources for IPA data path
  * @soc - data path soc handle
  * @pdev - device instance pointer
- * @ipa_res - ipa resources pointer
  *
- * Get allocated wlan resources for ipa data path
+ * Get allocated WLAN resources for IPA data path
  *
- * return none
+ * return QDF_STATUS_SUCCESS
  */
-static inline void
-cdp_ipa_get_resource(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
-		 struct ol_txrx_ipa_resources *ipa_res)
+static inline QDF_STATUS
+cdp_ipa_get_resource(ol_txrx_soc_handle soc, struct cdp_pdev *pdev)
 {
-	if (!soc || !soc->ops || !soc->ops->ipa_ops) {
+	if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
 			"%s invalid instance", __func__);
-		return;
+		return QDF_STATUS_E_FAILURE;
 	}
 
 	if (soc->ops->ipa_ops->ipa_get_resource)
-		return soc->ops->ipa_ops->ipa_get_resource(pdev, ipa_res);
+		return soc->ops->ipa_ops->ipa_get_resource(pdev);
 
-	return;
+	return QDF_STATUS_SUCCESS;
 }
 
 /**
- * cdp_ipa_set_doorbell_paddr() - give IPA db paddr to fw
+ * cdp_ipa_set_doorbell_paddr() - give IPA db paddr to FW
  * @soc - data path soc handle
  * @pdev - device instance pointer
- * @ipa_tx_uc_doorbell_paddr - tx db paddr
- * @ipa_rx_uc_doorbell_paddr - rx db paddr
  *
- * give IPA db paddr to fw
+ * give IPA db paddr to FW
  *
- * return none
+ * return QDF_STATUS_SUCCESS
  */
-static inline void
-cdp_ipa_set_doorbell_paddr(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
-		 qdf_dma_addr_t ipa_tx_uc_doorbell_paddr,
-		 qdf_dma_addr_t ipa_rx_uc_doorbell_paddr)
+static inline QDF_STATUS
+cdp_ipa_set_doorbell_paddr(ol_txrx_soc_handle soc, struct cdp_pdev *pdev)
 {
-	if (!soc || !soc->ops || !soc->ops->ipa_ops) {
+	if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
 			"%s invalid instance", __func__);
-		return;
+		return QDF_STATUS_E_FAILURE;
 	}
 
 	if (soc->ops->ipa_ops->ipa_set_doorbell_paddr)
-		return soc->ops->ipa_ops->ipa_set_doorbell_paddr(pdev,
-			ipa_tx_uc_doorbell_paddr, ipa_rx_uc_doorbell_paddr);
+		return soc->ops->ipa_ops->ipa_set_doorbell_paddr(pdev);
 
-	return;
+	return QDF_STATUS_SUCCESS;
 }
 
 /**
- * cdp_ipa_set_active() - activate/de-ctivate wlan fw ipa data path
+ * cdp_ipa_set_active() - activate/de-ctivate IPA offload path
  * @soc - data path soc handle
  * @pdev - device instance pointer
  * @uc_active - activate or de-activate
  * @is_tx - toggle tx or rx data path
  *
- * activate/de-ctivate wlan fw ipa data path
+ * activate/de-ctivate IPA offload path
  *
- * return none
+ * return QDF_STATUS_SUCCESS
  */
-static inline void
+static inline QDF_STATUS
 cdp_ipa_set_active(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
 		 bool uc_active, bool is_tx)
 {
-	if (!soc || !soc->ops || !soc->ops->ipa_ops) {
+	if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
 			"%s invalid instance", __func__);
-		return;
+		return QDF_STATUS_E_FAILURE;
 	}
 
 	if (soc->ops->ipa_ops->ipa_set_active)
 		return soc->ops->ipa_ops->ipa_set_active(pdev, uc_active,
 				is_tx);
 
-	return;
+	return QDF_STATUS_SUCCESS;
 }
 
 /**
- * cdp_ipa_op_response() - event handler from fw
+ * cdp_ipa_op_response() - event handler from FW
  * @soc - data path soc handle
  * @pdev - device instance pointer
  * @op_msg - event contents from firmware
  *
- * event handler from fw
+ * event handler from FW
  *
- * return none
+ * return QDF_STATUS_SUCCESS
  */
-static inline void
+static inline QDF_STATUS
 cdp_ipa_op_response(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
 		uint8_t *op_msg)
 {
-	if (!soc || !soc->ops || !soc->ops->ipa_ops) {
+	if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
 			"%s invalid instance", __func__);
-		return;
+		return QDF_STATUS_E_FAILURE;
 	}
 
 	if (soc->ops->ipa_ops->ipa_op_response)
 		return soc->ops->ipa_ops->ipa_op_response(pdev, op_msg);
 
-	return;
+	return QDF_STATUS_SUCCESS;
 }
 
 /**
@@ -146,51 +142,51 @@ cdp_ipa_op_response(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
  * @soc - data path soc handle
  * @pdev - device instance pointer
  * @op_cb - event handler callback function pointer
- * @osif_dev -  osif instance pointer
+ * @usr_ctxt - user context to registered
  *
  * register event handler function pointer
  *
- * return none
+ * return QDF_STATUS_SUCCESS
  */
-static inline void
+static inline QDF_STATUS
 cdp_ipa_register_op_cb(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
-		 ipa_op_cb_type op_cb, void *osif_dev)
+		 ipa_uc_op_cb_type op_cb, void *usr_ctxt)
 {
-	if (!soc || !soc->ops || !soc->ops->ipa_ops) {
+	if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
 			"%s invalid instance", __func__);
-		return;
+		return QDF_STATUS_E_FAILURE;
 	}
 
 	if (soc->ops->ipa_ops->ipa_register_op_cb)
 		return soc->ops->ipa_ops->ipa_register_op_cb(pdev, op_cb,
-			osif_dev);
+							     usr_ctxt);
 
-	return;
+	return QDF_STATUS_SUCCESS;
 }
 
 /**
- * cdp_ipa_get_stat() - get ipa data path stats from fw
+ * cdp_ipa_get_stat() - get IPA data path stats from FW
  * @soc - data path soc handle
  * @pdev - device instance pointer
  *
- * get ipa data path stats from fw async
+ * get IPA data path stats from FW async
  *
- * return none
+ * return QDF_STATUS_SUCCESS
  */
-static inline void
+static inline QDF_STATUS
 cdp_ipa_get_stat(ol_txrx_soc_handle soc, struct cdp_pdev *pdev)
 {
-	if (!soc || !soc->ops || !soc->ops->ipa_ops) {
+	if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
 			"%s invalid instance", __func__);
-		return;
+		return QDF_STATUS_E_FAILURE;
 	}
 
 	if (soc->ops->ipa_ops->ipa_get_stat)
 		return soc->ops->ipa_ops->ipa_get_stat(pdev);
 
-	return;
+	return QDF_STATUS_SUCCESS;
 }
 
 /**
@@ -203,7 +199,7 @@ cdp_ipa_get_stat(ol_txrx_soc_handle soc, struct cdp_pdev *pdev)
 static inline qdf_nbuf_t cdp_ipa_tx_send_data_frame(ol_txrx_soc_handle soc,
 				struct cdp_vdev *vdev, qdf_nbuf_t skb)
 {
-	if (!soc || !soc->ops || !soc->ops->ipa_ops) {
+	if (!soc || !soc->ops || !soc->ops->ipa_ops || !vdev) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
 			"%s invalid instance", __func__);
 		return skb;
@@ -220,45 +216,48 @@ static inline qdf_nbuf_t cdp_ipa_tx_send_data_frame(ol_txrx_soc_handle soc,
  * @pdev: physical device instance
  * @value: partition base value
  *
- * Return: none
+ * Return: QDF_STATUS
  */
-static inline void cdp_ipa_set_uc_tx_partition_base(ol_txrx_soc_handle soc,
+static inline QDF_STATUS
+cdp_ipa_set_uc_tx_partition_base(ol_txrx_soc_handle soc,
 				struct cdp_cfg *cfg_pdev, uint32_t value)
 {
-	if (!soc || !soc->ops || !soc->ops->ipa_ops) {
+	if (!soc || !soc->ops || !soc->ops->ipa_ops || !cfg_pdev) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
 			"%s invalid instance", __func__);
-		return;
+		return QDF_STATUS_E_FAILURE;
 	}
 
 	if (soc->ops->ipa_ops->ipa_set_uc_tx_partition_base)
-		return soc->ops->ipa_ops->ipa_set_uc_tx_partition_base(cfg_pdev,
-								       value);
+		soc->ops->ipa_ops->ipa_set_uc_tx_partition_base(cfg_pdev,
+								value);
 
-	return;
+	return QDF_STATUS_SUCCESS;
 }
 
+#ifdef FEATURE_METERING
 /**
  * cdp_ipa_uc_get_share_stats() - get Tx/Rx byte stats from FW
  * @pdev: physical device instance
  * @value: reset stats
  *
- * Return: none
+ * Return: QDF_STATUS
  */
-static inline void cdp_ipa_uc_get_share_stats(ol_txrx_soc_handle soc,
-				struct cdp_pdev *pdev, uint8_t value)
+static inline QDF_STATUS
+cdp_ipa_uc_get_share_stats(ol_txrx_soc_handle soc,
+			struct cdp_pdev *pdev, uint8_t value)
 {
-	if (!soc || !soc->ops || !soc->ops->ipa_ops) {
+	if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
 			"%s invalid instance", __func__);
-		return;
+		return QDF_STATUS_E_FAILURE;
 	}
 
 	if (soc->ops->ipa_ops->ipa_uc_get_share_stats)
 		return soc->ops->ipa_ops->ipa_uc_get_share_stats(pdev,
 								 value);
 
-	return;
+	return QDF_STATUS_SUCCESS;
 }
 
 /**
@@ -266,22 +265,272 @@ static inline void cdp_ipa_uc_get_share_stats(ol_txrx_soc_handle soc,
  * @pdev: physical device instance
  * @value: quota limit bytes
  *
- * Return: none
+ * Return: QDF_STATUS
  */
-static inline void cdp_ipa_uc_set_quota(ol_txrx_soc_handle soc,
-				struct cdp_pdev *pdev, uint64_t value)
+static inline QDF_STATUS
+cdp_ipa_uc_set_quota(ol_txrx_soc_handle soc,
+		struct cdp_pdev *pdev, uint64_t value)
 {
-	if (!soc || !soc->ops || !soc->ops->ipa_ops) {
+	if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
 			"%s invalid instance", __func__);
-		return;
+		return QDF_STATUS_E_FAILURE;
 	}
 
-	if (soc->ops->ipa_ops->ipa_uc_get_share_stats)
+	if (soc->ops->ipa_ops->ipa_uc_set_quota)
 		return soc->ops->ipa_ops->ipa_uc_set_quota(pdev,
 							   value);
 
-	return;
+	return QDF_STATUS_SUCCESS;
+}
+#endif
+
+/**
+ * cdp_ipa_enable_autonomy() - Enable autonomy RX data path
+ * @soc: data path soc handle
+ * @pdev: handle to the device instance
+ *
+ * IPA Data path is enabled and resumed.
+ * All autonomy data path elements are ready to deliver packet
+ * All RX packet should routed to IPA_REO ring, then IPA can receive packet
+ * from WLAN
+ *
+ * Return: QDF_STATUS
+ */
+static inline QDF_STATUS
+cdp_ipa_enable_autonomy(ol_txrx_soc_handle soc, struct cdp_pdev *pdev)
+{
+	if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
+			"%s invalid instance", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (soc->ops->ipa_ops->ipa_enable_autonomy)
+		return soc->ops->ipa_ops->ipa_enable_autonomy(pdev);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdp_ipa_disable_autonomy() - Disable autonomy RX data path
+ * @soc: data path soc handle
+ * @pdev: handle to the device instance
+ *
+ * IPA Data path is enabled and resumed.
+ * All autonomy datapath elements are ready to deliver packet
+ * All RX packet should routed to IPA_REO ring, then IPA can receive packet
+ * from WLAN
+ *
+ * Return: QDF_STATUS
+ */
+static inline QDF_STATUS
+cdp_ipa_disable_autonomy(ol_txrx_soc_handle soc, struct cdp_pdev *pdev)
+{
+	if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
+			"%s invalid instance", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+	if (soc->ops->ipa_ops->ipa_enable_autonomy)
+		return soc->ops->ipa_ops->ipa_disable_autonomy(pdev);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdp_ipa_setup() - Setup and connect IPA pipes
+ * @soc: data path soc handle
+ * @pdev: handle to the device instance
+ * @ipa_i2w_cb: IPA to WLAN callback
+ * @ipa_w2i_cb: WLAN to IPA callback
+ * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback
+ * @ipa_desc_size: IPA descriptor size
+ * @ipa_priv: handle to the HTT instance
+ * @is_rm_enabled: Is IPA RM enabled or not
+ * @tx_pipe_handle: pointer to Tx pipe handle
+ * @rx_pipe_handle: pointer to Rx pipe handle
+ *
+ * Return: QDF_STATUS
+ */
+static inline QDF_STATUS
+cdp_ipa_setup(ol_txrx_soc_handle soc, struct cdp_pdev *pdev, void *ipa_i2w_cb,
+	      void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
+	      uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled,
+	      uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle)
+
+{
+	if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
+			"%s invalid instance", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (soc->ops->ipa_ops->ipa_setup)
+		return soc->ops->ipa_ops->ipa_setup(pdev, ipa_i2w_cb,
+						    ipa_w2i_cb,
+						    ipa_wdi_meter_notifier_cb,
+						    ipa_desc_size, ipa_priv,
+						    is_rm_enabled,
+						    tx_pipe_handle,
+						    rx_pipe_handle);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdp_ipa_cleanup() - Disconnect IPA pipes
+ * @soc: data path soc handle
+ * @tx_pipe_handle: Tx pipe handle
+ * @rx_pipe_handle: Rx pipe handle
+ *
+ * Return: QDF_STATUS
+ */
+static inline QDF_STATUS
+cdp_ipa_cleanup(ol_txrx_soc_handle soc, uint32_t tx_pipe_handle,
+		uint32_t rx_pipe_handle)
+{
+	if (!soc || !soc->ops || !soc->ops->ipa_ops) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
+			"%s invalid instance", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (soc->ops->ipa_ops->ipa_cleanup)
+		return soc->ops->ipa_ops->ipa_cleanup(tx_pipe_handle,
+						      rx_pipe_handle);
+
+	return QDF_STATUS_SUCCESS;
 }
+
+/**
+ * cdp_ipa_setup_iface() - Setup IPA header and register interface
+ * @soc: data path soc handle
+ * @ifname: Interface name
+ * @mac_addr: Interface MAC address
+ * @prod_client: IPA prod client type
+ * @cons_client: IPA cons client type
+ * @session_id: Session ID
+ * @is_ipv6_enabled: Is IPV6 enabled or not
+ *
+ * Return: QDF_STATUS
+ */
+static inline QDF_STATUS
+cdp_ipa_setup_iface(ol_txrx_soc_handle soc, char *ifname, uint8_t *mac_addr,
+		    enum ipa_client_type prod_client,
+		    enum ipa_client_type cons_client,
+		    uint8_t session_id, bool is_ipv6_enabled)
+{
+	if (!soc || !soc->ops || !soc->ops->ipa_ops) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
+			"%s invalid instance", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (soc->ops->ipa_ops->ipa_setup_iface)
+		return soc->ops->ipa_ops->ipa_setup_iface(ifname, mac_addr,
+							  prod_client,
+							  cons_client,
+							  session_id,
+							  is_ipv6_enabled);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface
+ * @soc: data path soc handle
+ * @ifname: Interface name
+ * @is_ipv6_enabled: Is IPV6 enabled or not
+ *
+ * Return: QDF_STATUS
+ */
+static inline QDF_STATUS
+cdp_ipa_cleanup_iface(ol_txrx_soc_handle soc, char *ifname,
+		      bool is_ipv6_enabled)
+{
+	if (!soc || !soc->ops || !soc->ops->ipa_ops) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
+			"%s invalid instance", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (soc->ops->ipa_ops->ipa_cleanup_iface)
+		return soc->ops->ipa_ops->ipa_cleanup_iface(ifname,
+							    is_ipv6_enabled);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+ /**
+ * cdp_ipa_uc_enable_pipes() - Enable and resume traffic on Tx/Rx pipes
+ * @soc: data path soc handle
+ * @pdev: handle to the device instance
+ *
+ * Return: QDF_STATUS
+ */
+static inline QDF_STATUS
+cdp_ipa_enable_pipes(ol_txrx_soc_handle soc, struct cdp_pdev *pdev)
+{
+	if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
+			"%s invalid instance", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (soc->ops->ipa_ops->ipa_enable_pipes)
+		return soc->ops->ipa_ops->ipa_enable_pipes(pdev);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdp_ipa_uc_disable_pipes() - Suspend traffic and disable Tx/Rx pipes
+ * @soc: data path soc handle
+ * @pdev: handle to the device instance
+ *
+ * Return: QDF_STATUS
+ */
+static inline QDF_STATUS
+cdp_ipa_disable_pipes(ol_txrx_soc_handle soc, struct cdp_pdev *pdev)
+{
+	if (!soc || !soc->ops || !soc->ops->ipa_ops || !pdev) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
+			"%s invalid instance", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (soc->ops->ipa_ops->ipa_disable_pipes)
+		return soc->ops->ipa_ops->ipa_disable_pipes(pdev);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * cdp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates
+ * @soc: data path soc handle
+ * @client: WLAN Client ID
+ * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps)
+ *
+ * Return: 0 on success, negative errno on error
+ */
+static inline QDF_STATUS
+cdp_ipa_set_perf_level(ol_txrx_soc_handle soc, int client,
+		       uint32_t max_supported_bw_mbps)
+{
+	if (!soc || !soc->ops || !soc->ops->ipa_ops) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
+			"%s invalid instance", __func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (soc->ops->ipa_ops->ipa_set_perf_level)
+		return soc->ops->ipa_ops->ipa_set_perf_level(client,
+				max_supported_bw_mbps);
+
+	return QDF_STATUS_SUCCESS;
+}
+#endif /* IPA_OFFLOAD */
+
 #endif /* _CDP_TXRX_IPA_H_ */
 

+ 10 - 1
dp/inc/cdp_txrx_mob_def.h

@@ -246,6 +246,7 @@ struct txrx_pdev_cfg_param_t {
 	struct ol_tx_sched_wrr_ac_specs_t ac_specs[TX_WMM_AC_NUM];
 };
 
+#ifdef IPA_OFFLOAD
 /**
  * ol_txrx_ipa_resources - Resources needed for IPA
  */
@@ -267,7 +268,15 @@ struct ol_txrx_ipa_resources {
 	uint32_t rx2_rdy_ring_size;
 	qdf_dma_addr_t rx2_proc_done_idx_paddr;
 	void *rx2_proc_done_idx_vaddr;
+
+	/* IPA UC doorbell registers paddr */
+	qdf_dma_addr_t tx_comp_doorbell_paddr;
+	qdf_dma_addr_t rx_ready_doorbell_paddr;
+
+	uint32_t tx_pipe_handle;
+	uint32_t rx_pipe_handle;
 };
+#endif
 
 struct ol_txrx_ocb_chan_info {
 	uint32_t chan_freq;
@@ -401,7 +410,7 @@ typedef void (*tx_pause_callback)(uint8_t vdev_id,
 		enum netif_action_type action,
 		enum netif_reason_type reason);
 
-typedef void (*ipa_op_cb_type)(uint8_t *op_msg,
+typedef void (*ipa_uc_op_cb_type)(uint8_t *op_msg,
 			void *osif_ctxt);
 
 #endif /* __CDP_TXRX_MOB_DEF_H */

+ 37 - 15
dp/inc/cdp_txrx_ops.h

@@ -25,7 +25,6 @@
 #ifndef _CDP_TXRX_CMN_OPS_H_
 #define _CDP_TXRX_CMN_OPS_H_
 
-
 #include <cdp_txrx_cmn_struct.h>
 #ifdef CONFIG_WIN
 #include <cdp_txrx_stats_struct.h>
@@ -33,6 +32,9 @@
 #include "cdp_txrx_handle.h"
 #include <cdp_txrx_mon_struct.h>
 #include "wlan_objmgr_psoc_obj.h"
+#ifdef IPA_OFFLOAD
+#include <ipa.h>
+#endif
 
 /******************************************************************************
  *
@@ -757,6 +759,7 @@ struct cdp_lflowctl_ops {
 	void (*vdev_unpause)(struct cdp_vdev *vdev, uint32_t reason);
 };
 
+#ifdef IPA_OFFLOAD
 /**
  * struct cdp_ipa_ops - mcl ipa data path ops
  * @ipa_get_resource:
@@ -768,26 +771,43 @@ struct cdp_lflowctl_ops {
  * @ipa_tx_data_frame:
  */
 struct cdp_ipa_ops {
-	void (*ipa_get_resource)(struct cdp_pdev *pdev,
-		struct ol_txrx_ipa_resources *ipa_res);
-	void (*ipa_set_doorbell_paddr)(struct cdp_pdev *pdev,
-		qdf_dma_addr_t ipa_tx_uc_doorbell_paddr,
-		qdf_dma_addr_t ipa_rx_uc_doorbell_paddr);
-	void (*ipa_set_active)(struct cdp_pdev *pdev,
-		bool uc_active, bool is_tx);
-	void (*ipa_op_response)(struct cdp_pdev *pdev, uint8_t *op_msg);
-	void (*ipa_register_op_cb)(struct cdp_pdev *pdev,
+	QDF_STATUS (*ipa_get_resource)(struct cdp_pdev *pdev);
+	QDF_STATUS (*ipa_set_doorbell_paddr)(struct cdp_pdev *pdev);
+	QDF_STATUS (*ipa_set_active)(struct cdp_pdev *pdev, bool uc_active,
+		bool is_tx);
+	QDF_STATUS (*ipa_op_response)(struct cdp_pdev *pdev, uint8_t *op_msg);
+	QDF_STATUS (*ipa_register_op_cb)(struct cdp_pdev *pdev,
 		void (*ipa_uc_op_cb_type)(uint8_t *op_msg, void *osif_ctxt),
-		void *osif_dev);
-	void (*ipa_get_stat)(struct cdp_pdev *pdev);
+		void *usr_ctxt);
+	QDF_STATUS (*ipa_get_stat)(struct cdp_pdev *pdev);
 	qdf_nbuf_t (*ipa_tx_data_frame)(struct cdp_vdev *vdev, qdf_nbuf_t skb);
-	void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *cfg_pdev,
+	void (*ipa_set_uc_tx_partition_base)(struct cdp_cfg *pdev,
 		uint32_t value);
-	void (*ipa_uc_get_share_stats)(struct cdp_pdev *pdev,
+#ifdef FEATURE_METERING
+	QDF_STATUS (*ipa_uc_get_share_stats)(struct cdp_pdev *pdev,
 		uint8_t reset_stats);
-	void (*ipa_uc_set_quota)(struct cdp_pdev *pdev,
+	QDF_STATUS (*ipa_uc_set_quota)(struct cdp_pdev *pdev,
 		uint64_t quota_bytes);
+#endif
+	QDF_STATUS (*ipa_enable_autonomy)(struct cdp_pdev *pdev);
+	QDF_STATUS (*ipa_disable_autonomy)(struct cdp_pdev *pdev);
+	QDF_STATUS (*ipa_setup)(struct cdp_pdev *pdev, void *ipa_i2w_cb,
+		void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
+		uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled,
+		uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle);
+	QDF_STATUS (*ipa_cleanup)(uint32_t tx_pipe_handle,
+		uint32_t rx_pipe_handle);
+	QDF_STATUS (*ipa_setup_iface)(char *ifname, uint8_t *mac_addr,
+		enum ipa_client_type prod_client,
+		enum ipa_client_type cons_client,
+		uint8_t session_id, bool is_ipv6_enabled);
+	QDF_STATUS (*ipa_cleanup_iface)(char *ifname, bool is_ipv6_enabled);
+	QDF_STATUS (*ipa_enable_pipes)(struct cdp_pdev *pdev);
+	QDF_STATUS (*ipa_disable_pipes)(struct cdp_pdev *pdev);
+	QDF_STATUS (*ipa_set_perf_level)(int client,
+		uint32_t max_supported_bw_mbps);
 };
+#endif
 
 /**
  * struct cdp_bus_ops - mcl bus suspend/resume ops
@@ -919,7 +939,9 @@ struct cdp_ops {
 	struct cdp_cfg_ops          *cfg_ops;
 	struct cdp_flowctl_ops      *flowctl_ops;
 	struct cdp_lflowctl_ops     *l_flowctl_ops;
+#ifdef IPA_OFFLOAD
 	struct cdp_ipa_ops          *ipa_ops;
+#endif
 	struct cdp_bus_ops          *bus_ops;
 	struct cdp_ocb_ops          *ocb_ops;
 	struct cdp_peer_ops         *peer_ops;

+ 14 - 4
dp/wifi3.0/dp_htt.c

@@ -367,25 +367,35 @@ int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng,
 	case RXDMA_BUF:
 #ifdef QCA_HOST2FW_RXBUF_RING
 		if (srng_params.ring_id ==
-		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF)) {
+		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0)) {
 			htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
 			htt_ring_type = HTT_SW_TO_SW_RING;
+#ifdef IPA_OFFLOAD
+		} else if (srng_params.ring_id ==
+		    (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2)) {
+			htt_ring_id = HTT_HOST2_TO_FW_RXBUF_RING;
+			htt_ring_type = HTT_SW_TO_SW_RING;
+#endif
 #else
 		if (srng_params.ring_id ==
-			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF +
+			(HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 +
 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
 			htt_ring_type = HTT_SW_TO_HW_RING;
 #endif
 		} else if (srng_params.ring_id ==
+#ifdef IPA_OFFLOAD
+			 (HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 +
+#else
 			 (HAL_SRNG_WMAC1_SW2RXDMA1_BUF +
+#endif
 			  (mac_id * HAL_MAX_RINGS_PER_LMAC))) {
 			htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
 			htt_ring_type = HTT_SW_TO_HW_RING;
 		} else {
 			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-					   "%s: Ring %d currently not supported\n",
-					   __func__, srng_params.ring_id);
+				   "%s: Ring %d currently not supported\n",
+				   __func__, srng_params.ring_id);
 			goto fail1;
 		}
 

+ 1 - 0
dp/wifi3.0/dp_internal.h

@@ -252,6 +252,7 @@ void *dp_peer_find_by_local_id(struct cdp_pdev *pdev_handle, uint8_t local_id);
 QDF_STATUS dp_peer_state_update(struct cdp_pdev *pdev_handle, uint8_t *peer_mac,
 		enum ol_txrx_peer_state state);
 QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id);
+struct cdp_vdev *dp_get_vdev_by_sta_id(uint8_t sta_id);
 struct cdp_vdev *dp_get_vdev_for_peer(void *peer);
 uint8_t *dp_peer_get_peer_mac_addr(void *peer);
 int dp_get_peer_state(void *peer_handle);

+ 548 - 0
dp/wifi3.0/dp_ipa.c

@@ -0,0 +1,548 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+*/
+
+#ifdef IPA_OFFLOAD
+
+#include <ipa_wdi3.h>
+#include <qdf_types.h>
+#include <qdf_lock.h>
+#include <hal_api.h>
+#include <hif.h>
+#include <htt.h>
+#include <wdi_event.h>
+#include <queue.h>
+#include "dp_types.h"
+#include "dp_tx.h"
+#include "dp_ipa.h"
+
+/**
+ * dp_ipa_uc_get_resource() - Client request resource information
+ * @ppdev - handle to the device instance
+ *
+ *  IPA client will request IPA UC related resource information
+ *  Resource information will be distributed to IPA module
+ *  All of the required resources should be pre-allocated
+ *
+ * Return: QDF_STATUS
+ */
+QDF_STATUS dp_ipa_get_resource(struct cdp_pdev *ppdev)
+{
+	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
+	struct dp_soc *soc = pdev->soc;
+	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
+
+	ipa_res->tx_ring_base_paddr =
+		soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr;
+	ipa_res->tx_ring_size =
+		soc->ipa_uc_tx_rsc.ipa_tcl_ring_size;
+	ipa_res->tx_num_alloc_buffer =
+		(uint32_t)soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
+
+	ipa_res->tx_comp_ring_base_paddr =
+		soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr;
+	ipa_res->tx_comp_ring_size =
+		soc->ipa_uc_tx_rsc.ipa_wbm_ring_size;
+
+	ipa_res->rx_rdy_ring_base_paddr =
+		soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr;
+	ipa_res->rx_rdy_ring_size =
+		soc->ipa_uc_rx_rsc.ipa_reo_ring_size;
+
+	ipa_res->rx_refill_ring_base_paddr =
+		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr;
+	ipa_res->rx_refill_ring_size =
+		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size;
+
+	if ((0 == ipa_res->tx_comp_ring_base_paddr) ||
+			(0 == ipa_res->rx_rdy_ring_base_paddr))
+		return QDF_STATUS_E_FAILURE;
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * dp_ipa_set_doorbell_paddr () - Set doorbell register physical address to SRNG
+ * @ppdev - handle to the device instance
+ *
+ * Set TX_COMP_DOORBELL register physical address to WBM Head_Ptr_MemAddr_LSB
+ * Set RX_READ_DOORBELL register physical address to REO Head_Ptr_MemAddr_LSB
+ *
+ * Return: none
+ */
+QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_pdev *ppdev)
+{
+	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
+	struct dp_soc *soc = pdev->soc;
+	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
+
+	hal_srng_set_hp_paddr(soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].
+			      hal_srng, ipa_res->tx_comp_doorbell_paddr);
+	hal_srng_set_hp_paddr(soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].
+			      hal_srng, ipa_res->rx_ready_doorbell_paddr);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * dp_ipa_op_response() - Handle OP command response from firmware
+ * @ppdev - handle to the device instance
+ * @op_msg: op response message from firmware
+ *
+ * Return: none
+ */
+QDF_STATUS dp_ipa_op_response(struct cdp_pdev *ppdev, uint8_t *op_msg)
+{
+	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
+
+	if (pdev->ipa_uc_op_cb) {
+		pdev->ipa_uc_op_cb(op_msg, pdev->usr_ctxt);
+	} else {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		    "%s: IPA callback function is not registered", __func__);
+		qdf_mem_free(op_msg);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * dp_ipa_register_op_cb() - Register OP handler function
+ * @ppdev - handle to the device instance
+ * @op_cb: handler function pointer
+ *
+ * Return: none
+ */
+QDF_STATUS dp_ipa_register_op_cb(struct cdp_pdev *ppdev,
+				 ipa_uc_op_cb_type op_cb,
+				 void *usr_ctxt)
+{
+	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
+
+	pdev->ipa_uc_op_cb = op_cb;
+	pdev->usr_ctxt = usr_ctxt;
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * dp_ipa_get_stat() - Get firmware wdi status
+ * @ppdev - handle to the device instance
+ *
+ * Return: none
+ */
+QDF_STATUS dp_ipa_get_stat(struct cdp_pdev *ppdev)
+{
+	/* TBD */
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * dp_tx_send_ipa_data_frame() - send IPA data frame
+ * @vdev: vdev
+ * @skb: skb
+ *
+ * Return: skb/ NULL is for success
+ */
+qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_vdev *vdev, qdf_nbuf_t skb)
+{
+	qdf_nbuf_t ret;
+
+	/* Terminate the (single-element) list of tx frames */
+	qdf_nbuf_set_next(skb, NULL);
+	ret = dp_tx_send((struct dp_vdev_t *)vdev, skb);
+	if (ret) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "Failed to tx");
+		return ret;
+	}
+
+	return NULL;
+}
+
+/**
+ * dp_ipa_enable_autonomy() – Enable autonomy RX path
+ * @pdev - handle to the device instance
+ *
+ * Set all RX packet route to IPA REO ring
+ * Program Destination_Ring_Ctrl_IX_0 REO register to point IPA REO ring
+ * Return: none
+ */
+QDF_STATUS dp_ipa_enable_autonomy(struct cdp_pdev *ppdev)
+{
+	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
+	struct dp_soc *soc = pdev->soc;
+	uint32_t remap_val;
+
+	/* Call HAL API to remap REO rings to REO2IPA ring */
+	remap_val = HAL_REO_REMAP_VAL(REO_REMAP_TCL, REO_REMAP_TCL) |
+		    HAL_REO_REMAP_VAL(REO_REMAP_SW1, REO_REMAP_SW4) |
+		    HAL_REO_REMAP_VAL(REO_REMAP_SW2, REO_REMAP_SW4) |
+		    HAL_REO_REMAP_VAL(REO_REMAP_SW3, REO_REMAP_SW4) |
+		    HAL_REO_REMAP_VAL(REO_REMAP_SW4, REO_REMAP_SW4) |
+		    HAL_REO_REMAP_VAL(REO_REMAP_RELEASE, REO_REMAP_RELEASE) |
+		    HAL_REO_REMAP_VAL(REO_REMAP_FW, REO_REMAP_FW) |
+		    HAL_REO_REMAP_VAL(REO_REMAP_UNUSED, REO_REMAP_FW);
+	hal_reo_remap_IX0(soc->hal_soc, remap_val);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * dp_ipa_disable_autonomy() – Disable autonomy RX path
+ * @ppdev - handle to the device instance
+ *
+ * Disable RX packet routing to IPA REO
+ * Program Destination_Ring_Ctrl_IX_0 REO register to disable
+ * Return: none
+ */
+QDF_STATUS dp_ipa_disable_autonomy(struct cdp_pdev *ppdev)
+{
+	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
+	struct dp_soc *soc = pdev->soc;
+	uint32_t remap_val;
+
+	/* Call HAL API to remap REO rings to REO2IPA ring */
+	remap_val = HAL_REO_REMAP_VAL(REO_REMAP_TCL, REO_REMAP_TCL) |
+		    HAL_REO_REMAP_VAL(REO_REMAP_SW1, REO_REMAP_SW1) |
+		    HAL_REO_REMAP_VAL(REO_REMAP_SW2, REO_REMAP_SW2) |
+		    HAL_REO_REMAP_VAL(REO_REMAP_SW3, REO_REMAP_SW3) |
+		    HAL_REO_REMAP_VAL(REO_REMAP_SW4, REO_REMAP_SW2) |
+		    HAL_REO_REMAP_VAL(REO_REMAP_RELEASE, REO_REMAP_RELEASE) |
+		    HAL_REO_REMAP_VAL(REO_REMAP_FW, REO_REMAP_FW) |
+		    HAL_REO_REMAP_VAL(REO_REMAP_UNUSED, REO_REMAP_FW);
+	hal_reo_remap_IX0(soc->hal_soc, remap_val);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/* This should be configurable per H/W configuration enable status */
+#define L3_HEADER_PADDING	2
+
+/**
+ * dp_ipa_setup() - Setup and connect IPA pipes
+ * @ppdev - handle to the device instance
+ * @ipa_i2w_cb: IPA to WLAN callback
+ * @ipa_w2i_cb: WLAN to IPA callback
+ * @ipa_wdi_meter_notifier_cb: IPA WDI metering callback
+ * @ipa_desc_size: IPA descriptor size
+ * @ipa_priv: handle to the HTT instance
+ * @is_rm_enabled: Is IPA RM enabled or not
+ * @tx_pipe_handle: pointer to Tx pipe handle
+ * @rx_pipe_handle: pointer to Rx pipe handle
+ *
+ * Return: QDF_STATUS
+ */
+QDF_STATUS dp_ipa_setup(struct cdp_pdev *ppdev, void *ipa_i2w_cb,
+			void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
+			uint32_t ipa_desc_size, void *ipa_priv,
+			bool is_rm_enabled, uint32_t *tx_pipe_handle,
+			uint32_t *rx_pipe_handle)
+{
+	struct dp_pdev *pdev = (struct dp_pdev *)ppdev;
+	struct dp_soc *soc = pdev->soc;
+	struct dp_ipa_resources *ipa_res = &pdev->ipa_resource;
+	struct ipa_wdi3_setup_info tx;
+	struct ipa_wdi3_setup_info rx;
+	struct ipa_wdi3_conn_in_params pipe_in;
+	struct ipa_wdi3_conn_out_params pipe_out;
+	int ret;
+
+	qdf_mem_zero(&pipe_in, sizeof(struct ipa_wdi3_conn_in_params));
+	qdf_mem_zero(&pipe_out, sizeof(struct ipa_wdi3_conn_out_params));
+
+	/* TX PIPE */
+	/**
+	 * Transfer Ring: WBM Ring
+	 * Transfer Ring Doorbell PA: WBM Tail Pointer Address
+	 * Event Ring: TCL ring
+	 * Event Ring Doorbell PA: TCL Head Pointer Address
+	 */
+	tx.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
+	tx.ipa_ep_cfg.hdr.hdr_len = DP_IPA_UC_WLAN_TX_HDR_LEN;
+	tx.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 0;
+	tx.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 0;
+	tx.ipa_ep_cfg.hdr.hdr_additional_const_len = 0;
+	tx.ipa_ep_cfg.mode.mode = IPA_BASIC;
+	tx.ipa_ep_cfg.hdr_ext.hdr_little_endian = true;
+	tx.client = IPA_CLIENT_WLAN1_CONS;
+	tx.transfer_ring_base_pa = ipa_res->tx_comp_ring_base_paddr;
+	tx.transfer_ring_size = ipa_res->tx_comp_ring_size;
+	tx.transfer_ring_doorbell_pa = /* WBM Tail Pointer Address */
+		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
+	tx.event_ring_base_pa = ipa_res->tx_ring_base_paddr;
+	tx.event_ring_size = ipa_res->tx_ring_size;
+	tx.event_ring_doorbell_pa = /* TCL Head Pointer Address */
+		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
+	tx.num_pkt_buffers = ipa_res->tx_num_alloc_buffer;
+	tx.pkt_offset = 0;
+
+	/* RX PIPE */
+	/**
+	 * Transfer Ring: REO Ring
+	 * Transfer Ring Doorbell PA: REO Tail Pointer Address
+	 * Event Ring: FW ring
+	 * Event Ring Doorbell PA: FW Head Pointer Address
+	 */
+	rx.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
+	rx.ipa_ep_cfg.hdr.hdr_len = DP_IPA_UC_WLAN_RX_HDR_LEN;
+	rx.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 0;
+	rx.ipa_ep_cfg.hdr.hdr_metadata_reg_valid = 1;
+	rx.ipa_ep_cfg.mode.mode = IPA_BASIC;
+	rx.client = IPA_CLIENT_WLAN1_PROD;
+	rx.transfer_ring_base_pa = ipa_res->rx_rdy_ring_base_paddr;
+	rx.transfer_ring_size = ipa_res->rx_rdy_ring_size;
+	rx.transfer_ring_doorbell_pa = /* REO Tail Pointer Address */
+		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
+	rx.event_ring_base_pa = ipa_res->rx_refill_ring_base_paddr;
+	rx.event_ring_size = ipa_res->rx_refill_ring_size;
+	rx.event_ring_doorbell_pa = /* FW Head Pointer Address */
+		soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr;
+	rx.pkt_offset = RX_PKT_TLVS_LEN + L3_HEADER_PADDING;
+
+	pipe_in.notify = ipa_w2i_cb;
+	pipe_in.priv = ipa_priv;
+	memcpy(&pipe_in.tx, &tx, sizeof(struct ipa_wdi3_setup_info));
+	memcpy(&pipe_in.rx, &rx, sizeof(struct ipa_wdi3_setup_info));
+
+	/* Connect WDI IPA PIPE */
+	ret = ipa_wdi3_conn_pipes(&pipe_in, &pipe_out);
+	if (ret) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "ipa_wdi3_conn_pipes: IPA pipe setup failed: ret=%d",
+			  ret);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	/* IPA uC Doorbell registers */
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
+		"Tx DB PA=0x%x, Rx DB PA=0x%x",
+		(unsigned int)pipe_out.tx_uc_db_pa,
+		(unsigned int)pipe_out.rx_uc_db_pa);
+
+	ipa_res->tx_comp_doorbell_paddr = pipe_out.tx_uc_db_pa;
+	ipa_res->rx_ready_doorbell_paddr = pipe_out.rx_uc_db_pa;
+
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
+		  "Tx: %s=%p, %s=%d, %s=%p, %s=%p, %s=%d, %s=%p, %s=%d, %s=%p",
+		  "transfer_ring_base_pa",
+		  (void *)pipe_in.tx.transfer_ring_base_pa,
+		  "transfer_ring_size",
+		  pipe_in.tx.transfer_ring_size,
+		  "transfer_ring_doorbell_pa",
+		  (void *)pipe_in.tx.transfer_ring_doorbell_pa,
+		  "event_ring_base_pa",
+		  (void *)pipe_in.tx.event_ring_base_pa,
+		  "event_ring_size",
+		  pipe_in.tx.event_ring_size,
+		  "event_ring_doorbell_pa",
+		  (void *)pipe_in.tx.event_ring_doorbell_pa,
+		  "num_pkt_buffers",
+		  pipe_in.tx.num_pkt_buffers,
+		  "tx_comp_doorbell_paddr",
+		  (void *)ipa_res->tx_comp_doorbell_paddr);
+
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
+		  "Rx: %s=%p, %s=%d, %s=%p, %s=%p, %s=%d, %s=%p, %s=%d, %s=%p",
+		  "transfer_ring_base_pa",
+		  (void *)pipe_in.rx.transfer_ring_base_pa,
+		  "transfer_ring_size",
+		  pipe_in.rx.transfer_ring_size,
+		  "transfer_ring_doorbell_pa",
+		  (void *)pipe_in.rx.transfer_ring_doorbell_pa,
+		  "event_ring_base_pa",
+		  (void *)pipe_in.rx.event_ring_base_pa,
+		  "event_ring_size",
+		  pipe_in.rx.event_ring_size,
+		  "event_ring_doorbell_pa",
+		  (void *)pipe_in.rx.event_ring_doorbell_pa,
+		  "num_pkt_buffers",
+		  pipe_in.rx.num_pkt_buffers,
+		  "tx_comp_doorbell_paddr",
+		  (void *)ipa_res->rx_ready_doorbell_paddr);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * dp_ipa_cleanup() - Disconnect IPA pipes
+ * @tx_pipe_handle: Tx pipe handle
+ * @rx_pipe_handle: Rx pipe handle
+ *
+ * Return: QDF_STATUS
+ */
+QDF_STATUS dp_ipa_cleanup(uint32_t tx_pipe_handle, uint32_t rx_pipe_handle)
+{
+	int ret;
+
+	ret = ipa_wdi3_disconn_pipes();
+	if (ret) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		    "ipa_wdi3_disconn_pipes: IPA pipe cleanup failed: ret=%d",
+		    ret);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * dp_ipa_setup_iface() - Setup IPA header and register interface
+ * @ifname: Interface name
+ * @mac_addr: Interface MAC address
+ * @prod_client: IPA prod client type
+ * @cons_client: IPA cons client type
+ * @session_id: Session ID
+ * @is_ipv6_enabled: Is IPV6 enabled or not
+ *
+ * Return: QDF_STATUS
+ */
+QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
+			      enum ipa_client_type prod_client,
+			      enum ipa_client_type cons_client,
+			      uint8_t session_id, bool is_ipv6_enabled)
+{
+	struct ipa_wdi3_reg_intf_in_params in;
+	struct ipa_wdi3_hdr_info hdr_info;
+	struct dp_ipa_uc_tx_hdr uc_tx_hdr;
+	int ret = -EINVAL;
+
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
+		  "Add Partial hdr: %s, %pM",
+		  ifname, mac_addr);
+
+	qdf_mem_zero(&hdr_info, sizeof(struct ipa_wdi3_hdr_info));
+	qdf_ether_addr_copy(uc_tx_hdr.eth.h_source, mac_addr);
+
+	/* IPV4 header */
+	uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IP);
+
+	hdr_info.hdr = (uint8_t *)&uc_tx_hdr;
+	hdr_info.hdr_len = DP_IPA_UC_WLAN_TX_HDR_LEN;
+	hdr_info.hdr_type = IPA_HDR_L2_ETHERNET_II;
+	hdr_info.dst_mac_addr_offset = DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET;
+
+	in.netdev_name = ifname;
+	memcpy(&(in.hdr_info[0]), &hdr_info, sizeof(struct ipa_wdi3_hdr_info));
+	in.is_meta_data_valid = 1;
+	in.meta_data = htonl(session_id << 16);
+	in.meta_data_mask = htonl(0x00FF0000);
+
+	/* IPV6 header */
+	if (is_ipv6_enabled) {
+		uc_tx_hdr.eth.h_proto = qdf_htons(ETH_P_IPV6);
+		memcpy(&(in.hdr_info[1]), &hdr_info,
+				sizeof(struct ipa_wdi3_hdr_info));
+	}
+
+	ret = ipa_wdi3_reg_intf(&in);
+	if (ret) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		    "ipa_wdi3_reg_intf: register IPA interface falied: ret=%d",
+		    ret);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * dp_ipa_cleanup_iface() - Cleanup IPA header and deregister interface
+ * @ifname: Interface name
+ * @is_ipv6_enabled: Is IPV6 enabled or not
+ *
+ * Return: QDF_STATUS
+ */
+QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled)
+{
+	int ret;
+
+	ret = ipa_wdi3_dereg_intf(ifname);
+	if (ret) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		  "ipa_wdi3_dereg_intf: IPA pipe deregistration failed: ret=%d",
+		  ret);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
+ /**
+ * dp_ipa_uc_enable_pipes() - Enable and resume traffic on Tx/Rx pipes
+ * @ppdev - handle to the device instance
+ *
+ * Return: QDF_STATUS
+ */
+QDF_STATUS dp_ipa_enable_pipes(struct cdp_pdev *ppdev)
+{
+	QDF_STATUS result;
+
+	result = ipa_wdi3_enable_pipes();
+	if (result) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				"%s: Enable WDI PIPE fail, code %d",
+				__func__, result);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * dp_ipa_uc_disable_pipes() – Suspend traffic and disable Tx/Rx pipes
+ * @ppdev - handle to the device instance
+ *
+ * Return: QDF_STATUS
+ */
+QDF_STATUS dp_ipa_disable_pipes(struct cdp_pdev *ppdev)
+{
+	QDF_STATUS result;
+
+	result = ipa_wdi3_disable_pipes();
+	if (result) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				"%s: Disable WDI PIPE fail, code %d",
+				__func__, result);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * dp_ipa_set_perf_level() - Set IPA clock bandwidth based on data rates
+ * @client: Client type
+ * @max_supported_bw_mbps: Maximum bandwidth needed (in Mbps)
+ *
+ * Return: QDF_STATUS
+ */
+QDF_STATUS dp_ipa_set_perf_level(int client, uint32_t max_supported_bw_mbps)
+{
+	struct ipa_wdi3_perf_profile profile;
+	QDF_STATUS result;
+
+	profile.client = client;
+	profile.max_supported_bw_mbps = max_supported_bw_mbps;
+
+	result = ipa_wdi3_set_perf_profile(&profile);
+	if (result) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				"%s: ipa_wdi3_set_perf_profile fail, code %d",
+				__func__, result);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	return QDF_STATUS_SUCCESS;
+}
+#endif

+ 75 - 0
dp/wifi3.0/dp_ipa.h

@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DP_IPA_H_
+#define _DP_IPA_H_
+
+#ifdef IPA_OFFLOAD
+
+#define IPA_TCL_DATA_RING_IDX	2
+#define IPA_TX_COMP_RING_IDX	2
+#define IPA_REO_DEST_RING_IDX	3
+
+/**
+ * struct dp_ipa_uc_tx_hdr - full tx header registered to IPA hardware
+ * @eth:     ether II header
+ */
+struct dp_ipa_uc_tx_hdr {
+	struct ethhdr eth;
+} __packed;
+
+/**
+ * struct dp_ipa_uc_rx_hdr - full rx header registered to IPA hardware
+ * @eth:     ether II header
+ */
+struct dp_ipa_uc_rx_hdr {
+	struct ethhdr eth;
+} __packed;
+
+#define DP_IPA_UC_WLAN_TX_HDR_LEN      sizeof(struct dp_ipa_uc_tx_hdr)
+#define DP_IPA_UC_WLAN_RX_HDR_LEN      sizeof(struct dp_ipa_uc_rx_hdr)
+#define DP_IPA_UC_WLAN_HDR_DES_MAC_OFFSET	0
+
+QDF_STATUS dp_ipa_get_resource(struct cdp_pdev *pdev);
+QDF_STATUS dp_ipa_set_doorbell_paddr(struct cdp_pdev *pdev);
+QDF_STATUS dp_ipa_uc_set_active(struct cdp_pdev *pdev, bool uc_active,
+		bool is_tx);
+QDF_STATUS dp_ipa_op_response(struct cdp_pdev *pdev, uint8_t *op_msg);
+QDF_STATUS dp_ipa_register_op_cb(struct cdp_pdev *pdev, ipa_uc_op_cb_type op_cb,
+		void *usr_ctxt);
+QDF_STATUS dp_ipa_get_stat(struct cdp_pdev *pdev);
+qdf_nbuf_t dp_tx_send_ipa_data_frame(struct cdp_vdev *vdev, qdf_nbuf_t skb);
+QDF_STATUS dp_ipa_enable_autonomy(struct cdp_pdev *pdev);
+QDF_STATUS dp_ipa_disable_autonomy(struct cdp_pdev *pdev);
+QDF_STATUS dp_ipa_setup(struct cdp_pdev *pdev, void *ipa_i2w_cb,
+		void *ipa_w2i_cb, void *ipa_wdi_meter_notifier_cb,
+		uint32_t ipa_desc_size, void *ipa_priv, bool is_rm_enabled,
+		uint32_t *tx_pipe_handle, uint32_t *rx_pipe_handle);
+QDF_STATUS dp_ipa_cleanup(uint32_t tx_pipe_handle,
+		uint32_t rx_pipe_handle);
+QDF_STATUS dp_ipa_remove_header(char *name);
+int dp_ipa_add_header_info(char *ifname, uint8_t *mac_addr,
+		uint8_t session_id, bool is_ipv6_enabled);
+int dp_ipa_register_interface(char *ifname, bool is_ipv6_enabled);
+QDF_STATUS dp_ipa_setup_iface(char *ifname, uint8_t *mac_addr,
+		enum ipa_client_type prod_client,
+		enum ipa_client_type cons_client,
+		uint8_t session_id, bool is_ipv6_enabled);
+QDF_STATUS dp_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled);
+QDF_STATUS dp_ipa_enable_pipes(struct cdp_pdev *pdev);
+QDF_STATUS dp_ipa_disable_pipes(struct cdp_pdev *pdev);
+QDF_STATUS dp_ipa_set_perf_level(int client,
+		uint32_t max_supported_bw_mbps);
+
+#endif
+#endif /* _DP_IPA_H_ */

+ 375 - 21
dp/wifi3.0/dp_main.c

@@ -48,6 +48,8 @@ cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
 	return;
 }
 #endif
+#include <ol_cfg.h>
+#include "dp_ipa.h"
 
 #define DP_INTR_POLL_TIMER_MS	10
 #define DP_WDS_AGING_TIMER_DEFAULT_MS	6000
@@ -58,6 +60,14 @@ cdp_dump_flow_pool_info(struct cdp_soc_t *soc)
 #define DP_CURR_FW_STATS_AVAIL 19
 #define DP_HTT_DBG_EXT_STATS_MAX 256
 
+#ifdef IPA_OFFLOAD
+/* Exclude IPA rings from the interrupt context */
+#define TX_RING_MASK_VAL	0x7
+#define RX_RING_MASK_VAL	0x7
+#else
+#define TX_RING_MASK_VAL	0xF
+#define RX_RING_MASK_VAL	0xF
+#endif
 /**
  * default_dscp_tid_map - Default DSCP-TID mapping
  *
@@ -319,6 +329,8 @@ static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
 	 * See if these settings need to passed from DP layer
 	 */
 	ring_params.flags = 0;
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+		  FL("Ring type: %d, num:%d"), ring_type, ring_num);
 
 	num_entries = (num_entries > max_entries) ? max_entries : num_entries;
 	srng->hal_srng = NULL;
@@ -417,6 +429,201 @@ static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
 				srng->base_paddr_unaligned, 0);
 }
 
+#ifdef IPA_OFFLOAD
+/**
+ * dp_tx_ipa_uc_detach - Free autonomy TX resources
+ * @soc: data path instance
+ * @pdev: core txrx pdev context
+ *
+ * Free allocated TX buffers with WBM SRNG
+ *
+ * Return: none
+ */
+static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
+{
+	int idx;
+
+	for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
+		if (soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr[idx])
+			qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr[idx]);
+	}
+
+	qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr);
+	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr = NULL;
+}
+
+/**
+ * dp_rx_ipa_uc_detach - free autonomy RX resources
+ * @soc: data path instance
+ * @pdev: core txrx pdev context
+ *
+ * This function will detach DP RX into main device context
+ * will free DP Rx resources.
+ *
+ * Return: none
+ */
+static void dp_rx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
+{
+}
+
+static int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
+{
+	/* TX resource detach */
+	dp_tx_ipa_uc_detach(soc, pdev);
+
+	/* RX resource detach */
+	dp_rx_ipa_uc_detach(soc, pdev);
+
+	dp_srng_cleanup(soc, &pdev->ipa_rx_refill_buf_ring, RXDMA_BUF, 2);
+
+	return QDF_STATUS_SUCCESS;	/* success */
+}
+
+/* Hard coded config parameters until dp_ops_cfg.cfg_attach implemented */
+#define CFG_IPA_UC_TX_BUF_SIZE_DEFAULT            (2048)
+
+/**
+ * dp_tx_ipa_uc_attach - Allocate autonomy TX resources
+ * @soc: data path instance
+ * @pdev: Physical device handle
+ *
+ * Allocate TX buffer from non-cacheable memory
+ * Attache allocated TX buffers with WBM SRNG
+ *
+ * Return: int
+ */
+static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
+{
+	uint32_t tx_buffer_count;
+	uint32_t ring_base_align = 8;
+	void *buffer_vaddr_unaligned;
+	void *buffer_vaddr;
+	qdf_dma_addr_t buffer_paddr_unaligned;
+	qdf_dma_addr_t buffer_paddr;
+	void *wbm_srng = soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
+	uint32_t paddr_lo;
+	uint32_t paddr_hi;
+	void *ring_entry;
+	int ring_size = ((struct hal_srng *)wbm_srng)->ring_size;
+	int retval = QDF_STATUS_SUCCESS;
+	/*
+	 * Uncomment when dp_ops_cfg.cfg_attach is implemented
+	 * unsigned int uc_tx_buf_sz =
+	 *		dp_cfg_ipa_uc_tx_buf_size(pdev->osif_pdev);
+	 */
+	unsigned int uc_tx_buf_sz = CFG_IPA_UC_TX_BUF_SIZE_DEFAULT;
+	unsigned int alloc_size = uc_tx_buf_sz + ring_base_align - 1;
+
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+		  "requested %d buffers to be posted to wbm ring",
+			 ring_size);
+
+	soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr = qdf_mem_malloc(ring_size *
+			sizeof(*soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr));
+	if (!soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			  "%s: IPA WBM Ring mem_info alloc fail", __func__);
+		return -ENOMEM;
+	}
+
+	hal_srng_access_start(soc->hal_soc, wbm_srng);
+
+	/* Allocate TX buffers as many as possible */
+	for (tx_buffer_count = 0;
+		tx_buffer_count < ring_size; tx_buffer_count++) {
+
+		ring_entry = hal_srng_src_get_next(soc->hal_soc, wbm_srng);
+		if (!ring_entry) {
+			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+				  "Failed to get WBM ring entry\n");
+			goto fail;
+		}
+
+		buffer_vaddr_unaligned = qdf_mem_alloc_consistent(soc->osdev,
+			soc->osdev->dev, alloc_size, &buffer_paddr_unaligned);
+		if (!buffer_vaddr_unaligned) {
+			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+				  "IPA WDI TX buffer alloc fail %d allocated\n",
+				  tx_buffer_count);
+			break;
+		}
+
+		buffer_vaddr = buffer_vaddr_unaligned +
+			((unsigned long)buffer_vaddr_unaligned %
+			ring_base_align);
+		buffer_paddr = buffer_paddr_unaligned +
+			((unsigned long)(buffer_vaddr) -
+			 (unsigned long)buffer_vaddr_unaligned);
+
+		paddr_lo = ((u64)buffer_paddr & 0x00000000ffffffff);
+		paddr_hi = ((u64)buffer_paddr & 0x0000001f00000000) >> 32;
+		HAL_WBM_PADDR_LO_SET(ring_entry, paddr_lo);
+		HAL_WBM_PADDR_HI_SET(ring_entry, paddr_hi);
+
+		soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr[tx_buffer_count] =
+			buffer_vaddr;
+	}
+
+	hal_srng_access_end(soc->hal_soc, wbm_srng);
+	soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt = tx_buffer_count;
+
+	return retval;
+
+fail:
+	qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr);
+	return retval;
+}
+
+/**
+ * dp_rx_ipa_uc_attach - Allocate autonomy RX resources
+ * @soc: data path instance
+ * @pdev: core txrx pdev context
+ *
+ * This function will attach a DP RX instance into the main
+ * device (SOC) context.
+ *
+ * Return: QDF_STATUS_SUCCESS: success
+ *         QDF_STATUS_E_RESOURCES: Error return
+ */
+static int dp_rx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
+{
+	return QDF_STATUS_SUCCESS;
+}
+
+static int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
+{
+	int error;
+
+	/* TX resource attach */
+	error = dp_tx_ipa_uc_attach(soc, pdev);
+	if (error) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			  "DP IPA UC TX attach fail code %d\n", error);
+		return error;
+	}
+
+	/* RX resource attach */
+	error = dp_rx_ipa_uc_attach(soc, pdev);
+	if (error) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			  "DP IPA UC RX attach fail code %d\n", error);
+		dp_tx_ipa_uc_detach(soc, pdev);
+		return error;
+	}
+
+	return QDF_STATUS_SUCCESS;	/* success */
+}
+#else
+static int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
+{
+	return QDF_STATUS_SUCCESS;
+}
+static int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
+{
+	return QDF_STATUS_SUCCESS;
+}
+#endif
+
 /* TODO: Need this interface from HIF */
 void *hif_get_hal_handle(void *hif_handle);
 
@@ -578,8 +785,8 @@ static QDF_STATUS dp_soc_interrupt_attach_poll(void *txrx_soc)
 
 	for (i = 0; i < wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx); i++) {
 		soc->intr_ctx[i].dp_intr_id = i;
-		soc->intr_ctx[i].tx_ring_mask = 0xF;
-		soc->intr_ctx[i].rx_ring_mask = 0xF;
+		soc->intr_ctx[i].tx_ring_mask = TX_RING_MASK_VAL;
+		soc->intr_ctx[i].rx_ring_mask = RX_RING_MASK_VAL;
 		soc->intr_ctx[i].rx_mon_ring_mask = 0x1;
 		soc->intr_ctx[i].rx_err_ring_mask = 0x1;
 		soc->intr_ctx[i].rx_wbm_rel_ring_mask = 0x1;
@@ -1330,6 +1537,10 @@ static int dp_soc_cmn_setup(struct dp_soc *soc)
 					FL("dp_srng_setup failed for tcl_data_ring[%d]"), i);
 				goto fail1;
 			}
+			/*
+			 * TBD: Set IPA WBM ring size with ini IPA UC tx buffer
+			 * count
+			 */
 			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
 				WBM2SW_RELEASE, i, 0, tx_comp_ring_size)) {
 				QDF_TRACE(QDF_MODULE_ID_DP,
@@ -1603,6 +1814,99 @@ void dp_soc_reset_intr_mask(struct dp_soc *soc, struct dp_pdev *pdev)
 	wlan_cfg_set_rxdma2host_ring_mask(soc->wlan_cfg_ctx, pdev->pdev_id, 0x0);
 }
 
+/*
+ * dp_ipa_ring_resource_setup() - setup IPA ring resources
+ * @soc: data path SoC handle
+ *
+ * Return: none
+ */
+#ifdef IPA_OFFLOAD
+static inline int dp_ipa_ring_resource_setup(struct dp_soc *soc,
+		struct dp_pdev *pdev)
+{
+	void *hal_srng;
+	struct hal_srng_params srng_params;
+	qdf_dma_addr_t hp_addr, tp_addr;
+
+	/* IPA TCL_DATA Ring - HAL_SRNG_SW2TCL4 */
+	hal_srng = soc->tcl_data_ring[IPA_TCL_DATA_RING_IDX].hal_srng;
+	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
+
+	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_paddr =
+		srng_params.ring_base_paddr;
+	soc->ipa_uc_tx_rsc.ipa_tcl_ring_base_vaddr =
+		srng_params.ring_base_vaddr;
+	soc->ipa_uc_tx_rsc.ipa_tcl_ring_size =
+		srng_params.num_entries * srng_params.entry_size;
+	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_srng);
+	soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr = hp_addr;
+
+
+	/* IPA TX COMP Ring - HAL_SRNG_WBM2SW3_RELEASE */
+	hal_srng = soc->tx_comp_ring[IPA_TX_COMP_RING_IDX].hal_srng;
+	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
+
+	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_paddr =
+		srng_params.ring_base_paddr;
+	soc->ipa_uc_tx_rsc.ipa_wbm_ring_base_vaddr =
+		srng_params.ring_base_vaddr;
+	soc->ipa_uc_tx_rsc.ipa_wbm_ring_size =
+		srng_params.num_entries * srng_params.entry_size;
+	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_srng);
+	soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr = tp_addr;
+
+	/* IPA REO_DEST Ring - HAL_SRNG_REO2SW4 */
+	hal_srng = soc->reo_dest_ring[IPA_REO_DEST_RING_IDX].hal_srng;
+	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
+
+	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_paddr =
+		srng_params.ring_base_paddr;
+	soc->ipa_uc_rx_rsc.ipa_reo_ring_base_vaddr =
+		srng_params.ring_base_vaddr;
+	soc->ipa_uc_rx_rsc.ipa_reo_ring_size =
+		srng_params.num_entries * srng_params.entry_size;
+	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_srng);
+	soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr = tp_addr;
+
+	/* IPA RX_REFILL_BUF Ring - ipa_rx_refill_buf_ring */
+	if (dp_srng_setup(soc, &pdev->ipa_rx_refill_buf_ring, RXDMA_BUF, 2,
+				pdev->pdev_id, RXDMA_BUF_RING_SIZE)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				"%s: dp_srng_setup failed IPA rx refill ring\n",
+				__func__);
+		return -EFAULT;
+	}
+
+	hal_srng = pdev->ipa_rx_refill_buf_ring.hal_srng;
+	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
+	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_paddr =
+		srng_params.ring_base_paddr;
+	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_base_vaddr =
+		srng_params.ring_base_vaddr;
+	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_ring_size =
+		srng_params.num_entries * srng_params.entry_size;
+	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_srng);
+	soc->ipa_uc_rx_rsc.ipa_rx_refill_buf_hp_paddr = hp_addr;
+
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		"%s: ring_base_paddr:%p, ring_base_vaddr:%p"
+		"_entries:%d, hp_addr:%p\n",
+		__func__,
+		(void *)srng_params.ring_base_paddr,
+		(void *)srng_params.ring_base_vaddr,
+		srng_params.num_entries,
+		(void *)hp_addr);
+
+	return 0;
+}
+#else
+static inline int dp_ipa_ring_resource_setup(struct dp_soc *soc,
+					     struct dp_pdev *pdev)
+{
+	return 0;
+}
+#endif
+
 /*
 * dp_pdev_attach_wifi3() - attach txrx pdev
 * @osif_pdev: Opaque PDEV handle from OSIF/HDD
@@ -1704,7 +2008,6 @@ static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
 		soc->num_reo_dest_rings++;
 
 	}
-
 	if (dp_srng_setup(soc, &pdev->rx_refill_buf_ring, RXDMA_BUF, 0, pdev_id,
 		RXDMA_REFILL_RING_SIZE)) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
@@ -1755,11 +2058,20 @@ static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
 		goto fail1;
 	}
 
+	if (dp_ipa_ring_resource_setup(soc, pdev))
+		goto fail1;
+
+	if (dp_ipa_uc_attach(soc, pdev) != QDF_STATUS_SUCCESS) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: dp_ipa_uc_attach failed\n", __func__);
+		goto fail1;
+	}
+
 	/* Rx specific init */
 	if (dp_rx_pdev_attach(pdev)) {
-			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
-				FL("dp_rx_pdev_attach failed "));
-			goto fail0;
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			FL("dp_rx_pdev_attach failed "));
+		goto fail0;
 	}
 	DP_STATS_INIT(pdev);
 
@@ -1880,7 +2192,9 @@ static void dp_pdev_detach_wifi3(struct cdp_pdev *txrx_pdev, int force)
 	dp_neighbour_peers_detach(pdev);
 	qdf_spinlock_destroy(&pdev->tx_mutex);
 
-	/* Setup per PDEV REO rings if configured */
+	dp_ipa_uc_detach(soc, pdev);
+
+	/* Cleanup per PDEV REO rings if configured */
 	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
 		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
 			REO_DST, pdev->pdev_id);
@@ -1936,7 +2250,6 @@ static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
 /*
  * dp_soc_detach_wifi3() - Detach txrx SOC
  * @txrx_soc: DP SOC handle, struct cdp_soc_t is first element of struct dp_soc.
- *
  */
 static void dp_soc_detach_wifi3(void *txrx_soc)
 {
@@ -2022,18 +2335,38 @@ static void dp_soc_detach_wifi3(void *txrx_soc)
 }
 
 /*
-* dp_rxdma_ring_config() - configure the RX DMA rings
-*
-* This function is used to configure the MAC rings.
-* On MCL host provides buffers in Host2FW ring
-* FW refills (copies) buffers to the ring and updates
-* ring_idx in register
-*
-* @soc: data path SoC handle
-* @pdev: Physical device handle
-*
-* Return: void
-*/
+ * dp_setup_ipa_rx_refill_buf_ring() - setup IPA RX Refill buffer ring
+ * @soc: data path SoC handle
+ * @pdev: physical device handle
+ *
+ * Return: void
+ */
+#ifdef IPA_OFFLOAD
+static inline void dp_config_ipa_rx_refill_buf_ring(struct dp_soc *soc,
+						   struct dp_pdev *pdev)
+{
+	htt_srng_setup(soc->htt_handle, 0,
+		       pdev->ipa_rx_refill_buf_ring.hal_srng, RXDMA_BUF);
+}
+#else
+static inline void dp_config_ipa_rx_refill_buf_ring(struct dp_soc *soc,
+						   struct dp_pdev *pdev)
+{
+}
+#endif
+
+/*
+ * dp_rxdma_ring_config() - configure the RX DMA rings
+ *
+ * This function is used to configure the MAC rings.
+ * On MCL host provides buffers in Host2FW ring
+ * FW refills (copies) buffers to the ring and updates
+ * ring_idx in register
+ *
+ * @soc: data path SoC handle
+ *
+ * Return: void
+ */
 #ifdef QCA_HOST2FW_RXBUF_RING
 static void dp_rxdma_ring_config(struct dp_soc *soc)
 {
@@ -2054,6 +2387,8 @@ static void dp_rxdma_ring_config(struct dp_soc *soc)
 				 pdev->rx_refill_buf_ring.hal_srng,
 				 RXDMA_BUF);
 
+			dp_config_ipa_rx_refill_buf_ring(soc, pdev);
+
 			if (soc->cdp_soc.ol_ops->
 				is_hw_dbs_2x2_capable) {
 				dbs_enable = soc->cdp_soc.ol_ops->
@@ -4547,9 +4882,25 @@ static struct cdp_lflowctl_ops dp_ops_l_flowctl = {
 	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
 };
 
+#ifdef IPA_OFFLOAD
 static struct cdp_ipa_ops dp_ops_ipa = {
-	/* WIFI 3.0 DP NOT IMPLEMENTED YET */
+	.ipa_get_resource = dp_ipa_get_resource,
+	.ipa_set_doorbell_paddr = dp_ipa_set_doorbell_paddr,
+	.ipa_op_response = dp_ipa_op_response,
+	.ipa_register_op_cb = dp_ipa_register_op_cb,
+	.ipa_get_stat = dp_ipa_get_stat,
+	.ipa_tx_data_frame = dp_tx_send_ipa_data_frame,
+	.ipa_enable_autonomy = dp_ipa_enable_autonomy,
+	.ipa_disable_autonomy = dp_ipa_disable_autonomy,
+	.ipa_setup = dp_ipa_setup,
+	.ipa_cleanup = dp_ipa_cleanup,
+	.ipa_setup_iface = dp_ipa_setup_iface,
+	.ipa_cleanup_iface = dp_ipa_cleanup_iface,
+	.ipa_enable_pipes = dp_ipa_enable_pipes,
+	.ipa_disable_pipes = dp_ipa_disable_pipes,
+	.ipa_set_perf_level = dp_ipa_set_perf_level
 };
+#endif
 
 static struct cdp_bus_ops dp_ops_bus = {
 	.bus_suspend = dp_bus_suspend,
@@ -4582,6 +4933,7 @@ static struct cdp_peer_ops dp_ops_peer = {
 	.peer_find_by_local_id = dp_peer_find_by_local_id,
 	.peer_state_update = dp_peer_state_update,
 	.get_vdevid = dp_get_vdevid,
+	.get_vdev_by_sta_id = dp_get_vdev_by_sta_id,
 	.peer_get_peer_mac_addr = dp_peer_get_peer_mac_addr,
 	.get_vdev_for_peer = dp_get_vdev_for_peer,
 	.get_peer_state = dp_get_peer_state,
@@ -4607,7 +4959,9 @@ static struct cdp_ops dp_txrx_ops = {
 	.cfg_ops = &dp_ops_cfg,
 	.flowctl_ops = &dp_ops_flowctl,
 	.l_flowctl_ops = &dp_ops_l_flowctl,
+#ifdef IPA_OFFLOAD
 	.ipa_ops = &dp_ops_ipa,
+#endif
 	.bus_ops = &dp_ops_bus,
 	.ocb_ops = &dp_ops_ocb,
 	.peer_ops = &dp_ops_peer,

+ 29 - 0
dp/wifi3.0/dp_peer.c

@@ -26,6 +26,7 @@
 #include <hal_reo.h>
 #ifdef CONFIG_MCL
 #include <cds_ieee80211_common.h>
+#include <cds_api.h>
 #endif
 #include <cdp_txrx_handle.h>
 #include <wlan_cfg.h>
@@ -1747,6 +1748,34 @@ QDF_STATUS dp_get_vdevid(void *peer_handle, uint8_t *vdev_id)
 	return QDF_STATUS_SUCCESS;
 }
 
+struct cdp_vdev *dp_get_vdev_by_sta_id(uint8_t sta_id)
+{
+	struct dp_peer *peer = NULL;
+	struct dp_pdev *pdev = NULL;
+
+	if (sta_id >= WLAN_MAX_STA_COUNT) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
+			  "Invalid sta id passed");
+		return NULL;
+	}
+
+	pdev = cds_get_context(QDF_MODULE_ID_TXRX);
+	if (!pdev) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
+			  "PDEV not found for sta_id [%d]", sta_id);
+		return NULL;
+	}
+
+	peer = dp_peer_find_by_local_id((struct cdp_pdev *)pdev, sta_id);
+	if (!peer) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
+			  "PEER [%d] not found", sta_id);
+		return NULL;
+	}
+
+	return (struct cdp_vdev *)peer->vdev;
+}
+
 /**
  * dp_get_vdev_for_peer() - Get virtual interface instance which peer belongs
  * @peer - peer instance

+ 71 - 1
dp/wifi3.0/dp_types.h

@@ -143,7 +143,6 @@ union dp_rx_desc_list_elem_t;
  * hardware
  */
 #define DP_SW2HW_MACID(id) ((id) + 1)
-
 #define DP_HW2SW_MACID(id) ((id) > 0 ? ((id) - 1) : 0)
 #define DP_MAC_ADDR_LEN 6
 
@@ -770,7 +769,67 @@ struct dp_soc {
 	uint32_t htt_msg_len;
 	/* work queue to process htt stats */
 	qdf_work_t htt_stats_work;
+
+#ifdef IPA_OFFLOAD
+	/* IPA uC datapath offload Wlan Tx resources */
+	struct {
+		/* Resource info to be passed to IPA */
+		qdf_dma_addr_t ipa_tcl_ring_base_paddr;
+		void *ipa_tcl_ring_base_vaddr;
+		uint32_t ipa_tcl_ring_size;
+		qdf_dma_addr_t ipa_tcl_hp_paddr;
+		uint32_t alloc_tx_buf_cnt;
+
+		qdf_dma_addr_t ipa_wbm_ring_base_paddr;
+		void *ipa_wbm_ring_base_vaddr;
+		uint32_t ipa_wbm_ring_size;
+		qdf_dma_addr_t ipa_wbm_tp_paddr;
+
+		/* TX buffers populated into the WBM ring */
+		void **tx_buf_pool_vaddr;
+	} ipa_uc_tx_rsc;
+
+	/* IPA uC datapath offload Wlan Rx resources */
+	struct {
+		/* Resource info to be passed to IPA */
+		qdf_dma_addr_t ipa_reo_ring_base_paddr;
+		void *ipa_reo_ring_base_vaddr;
+		uint32_t ipa_reo_ring_size;
+		qdf_dma_addr_t ipa_reo_tp_paddr;
+
+		/* Resource info to be passed to firmware and IPA */
+		qdf_dma_addr_t ipa_rx_refill_buf_ring_base_paddr;
+		void *ipa_rx_refill_buf_ring_base_vaddr;
+		uint32_t ipa_rx_refill_buf_ring_size;
+		qdf_dma_addr_t ipa_rx_refill_buf_hp_paddr;
+	} ipa_uc_rx_rsc;
+#endif
 };
+
+#ifdef IPA_OFFLOAD
+/**
+ * dp_ipa_resources - Resources needed for IPA
+ */
+struct dp_ipa_resources {
+	qdf_dma_addr_t tx_ring_base_paddr;
+	uint32_t tx_ring_size;
+	uint32_t tx_num_alloc_buffer;
+
+	qdf_dma_addr_t tx_comp_ring_base_paddr;
+	uint32_t tx_comp_ring_size;
+
+	qdf_dma_addr_t rx_rdy_ring_base_paddr;
+	uint32_t rx_rdy_ring_size;
+
+	qdf_dma_addr_t rx_refill_ring_base_paddr;
+	uint32_t rx_refill_ring_size;
+
+	/* IPA UC doorbell registers paddr */
+	qdf_dma_addr_t tx_comp_doorbell_paddr;
+	qdf_dma_addr_t rx_ready_doorbell_paddr;
+};
+#endif
+
 #define MAX_RX_MAC_RINGS 2
 /* Same as NAC_MAX_CLENT */
 #define DP_NAC_MAX_CLIENT  24
@@ -811,6 +870,11 @@ struct dp_pdev {
 	/* Ring used to replenish rx buffers (maybe to the firmware of MAC) */
 	struct dp_srng rx_refill_buf_ring;
 
+#ifdef IPA_OFFLOAD
+	/* Ring used to replenish IPA rx buffers */
+	struct dp_srng ipa_rx_refill_buf_ring;
+#endif
+
 	/* Empty ring used by firmware to post rx buffers to the MAC */
 	struct dp_srng rx_mac_buf_ring[MAX_RX_MAC_RINGS];
 
@@ -930,6 +994,12 @@ struct dp_pdev {
 	/* Number of VAPs with mcast enhancement enabled */
 	qdf_atomic_t mc_num_vap_attached;
 
+#ifdef IPA_OFFLOAD
+	ipa_uc_op_cb_type ipa_uc_op_cb;
+	void *usr_ctxt;
+	struct dp_ipa_resources ipa_resource;
+#endif
+
 	/* TBD */
 
 	/* map this pdev to a particular Reo Destination ring */

+ 20 - 0
dp/wifi3.0/hal_rx.h

@@ -172,6 +172,26 @@ enum hal_rx_ret_buf_manager {
 		(cookie << BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_LSB) & \
 		BUFFER_ADDR_INFO_1_SW_BUFFER_COOKIE_MASK)
 
+/*
+ * macro to set the LSW of the nbuf data physical address
+ * to the WBM ring entry
+ */
+#define HAL_WBM_PADDR_LO_SET(buff_addr_info, paddr_lo) \
+		((*(((unsigned int *) buff_addr_info) + \
+		(BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_OFFSET >> 2))) = \
+		(paddr_lo << BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_LSB) & \
+		BUFFER_ADDR_INFO_0_BUFFER_ADDR_31_0_MASK)
+
+/*
+ * macro to set the LSB of MSW of the nbuf data physical address
+ * to the WBM ring entry
+ */
+#define HAL_WBM_PADDR_HI_SET(buff_addr_info, paddr_hi) \
+		((*(((unsigned int *) buff_addr_info) + \
+		(BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_OFFSET >> 2))) = \
+		(paddr_hi << BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_LSB) & \
+		BUFFER_ADDR_INFO_1_BUFFER_ADDR_39_32_MASK)
+
 /*
  * macro to set the manager into the rxdma ring entry
  */

+ 36 - 10
hal/wifi3.0/hal_api.h

@@ -308,6 +308,42 @@ extern void hal_get_shadow_config(void *hal_soc,
 extern void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
 	int mac_id, struct hal_srng_params *ring_params);
 
+/* Remapping ids of REO rings */
+#define REO_REMAP_TCL 0
+#define REO_REMAP_SW1 1
+#define REO_REMAP_SW2 2
+#define REO_REMAP_SW3 3
+#define REO_REMAP_SW4 4
+#define REO_REMAP_RELEASE 5
+#define REO_REMAP_FW 6
+#define REO_REMAP_UNUSED 7
+
+/*
+ * currently this macro only works for IX0 since all the rings we are remapping
+ * can be remapped from HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0
+ */
+#define HAL_REO_REMAP_VAL(_ORIGINAL_DEST, _NEW_DEST) \
+	HAL_REO_REMAP_VAL_(_ORIGINAL_DEST, _NEW_DEST)
+/* allow the destination macros to be expanded */
+#define HAL_REO_REMAP_VAL_(_ORIGINAL_DEST, _NEW_DEST) \
+	(_NEW_DEST << \
+	 (HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_DEST_RING_MAPPING_ ## \
+	  _ORIGINAL_DEST ## _SHFT))
+
+/**
+ * hal_reo_remap_IX0 - Remap REO ring destination
+ * @hal: HAL SOC handle
+ * @remap_val: Remap value
+ */
+extern void hal_reo_remap_IX0(struct hal_soc *hal, uint32_t remap_val);
+
+/**
+ * hal_srng_set_hp_paddr() - Set physical address to SRNG head pointer
+ * @sring: sring pointer
+ * @paddr: physical address
+ */
+extern void hal_srng_set_hp_paddr(struct hal_srng *sring, uint64_t paddr);
+
 /**
  * hal_srng_cleanup - Deinitialize HW SRNG ring.
  * @hal_soc: Opaque HAL SOC handle
@@ -939,11 +975,6 @@ static inline qdf_dma_addr_t hal_srng_get_hp_addr(void *hal_soc, void *hal_ring)
 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
 
-	if (!(srng->flags & HAL_SRNG_LMAC_RING)) {
-		/* Currently this interface is required only for LMAC rings */
-		return (qdf_dma_addr_t)NULL;
-	}
-
 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
 		return hal->shadow_wrptr_mem_paddr +
 		  ((unsigned long)(srng->u.src_ring.hp_addr) -
@@ -967,11 +998,6 @@ static inline qdf_dma_addr_t hal_srng_get_tp_addr(void *hal_soc, void *hal_ring)
 	struct hal_srng *srng = (struct hal_srng *)hal_ring;
 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
 
-	if (!(srng->flags & HAL_SRNG_LMAC_RING)) {
-		/* Currently this interface is required only for LMAC rings */
-		return (qdf_dma_addr_t)NULL;
-	}
-
 	if (srng->ring_dir == HAL_SRNG_SRC_RING) {
 		return hal->shadow_rdptr_mem_paddr +
 			((unsigned long)(srng->u.src_ring.tp_addr) -

+ 17 - 10
hal/wifi3.0/hal_internal.h

@@ -150,18 +150,25 @@ enum hal_srng_ring_id {
 	HAL_SRNG_UMAC_ID_END = 127,
 	/* LMAC rings - The following set will be replicated for each LMAC */
 	HAL_SRNG_LMAC1_ID_START = 128,
-	HAL_SRNG_WMAC1_SW2RXDMA0_BUF = HAL_SRNG_LMAC1_ID_START,
-	HAL_SRNG_WMAC1_SW2RXDMA1_BUF = 129,
-	HAL_SRNG_WMAC1_SW2RXDMA2_BUF = 130,
-	HAL_SRNG_WMAC1_SW2RXDMA0_STATBUF = 131,
-	HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF = 132,
-	HAL_SRNG_WMAC1_RXDMA2SW0 = 133,
-	HAL_SRNG_WMAC1_RXDMA2SW1 = 134,
-	HAL_SRNG_WMAC1_SW2RXDMA1_DESC = 135,
+	HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 = HAL_SRNG_LMAC1_ID_START,
+#ifdef IPA_OFFLOAD
+	HAL_SRNG_WMAC1_SW2RXDMA0_BUF1 = (HAL_SRNG_LMAC1_ID_START + 1),
+	HAL_SRNG_WMAC1_SW2RXDMA0_BUF2 = (HAL_SRNG_LMAC1_ID_START + 2),
+	HAL_SRNG_WMAC1_SW2RXDMA1_BUF = (HAL_SRNG_WMAC1_SW2RXDMA0_BUF2 + 1),
+#else
+	HAL_SRNG_WMAC1_SW2RXDMA1_BUF = (HAL_SRNG_WMAC1_SW2RXDMA0_BUF0 + 1),
+#endif
+	HAL_SRNG_WMAC1_SW2RXDMA2_BUF = (HAL_SRNG_WMAC1_SW2RXDMA1_BUF + 1),
+	HAL_SRNG_WMAC1_SW2RXDMA0_STATBUF = (HAL_SRNG_WMAC1_SW2RXDMA2_BUF + 1),
+	HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF =
+					(HAL_SRNG_WMAC1_SW2RXDMA0_STATBUF + 1),
+	HAL_SRNG_WMAC1_RXDMA2SW0 = (HAL_SRNG_WMAC1_SW2RXDMA1_STATBUF + 1),
+	HAL_SRNG_WMAC1_RXDMA2SW1 = (HAL_SRNG_WMAC1_RXDMA2SW0 + 1),
+	HAL_SRNG_WMAC1_SW2RXDMA1_DESC = (HAL_SRNG_WMAC1_RXDMA2SW1 + 1),
 #ifdef WLAN_FEATURE_CIF_CFR
-	HAL_SRNG_WIFI_POS_SRC_DMA_RING = 136,
+	HAL_SRNG_WIFI_POS_SRC_DMA_RING = (HAL_SRNG_WMAC1_SW2RXDMA1_DESC + 1),
 #endif
-	/* 137-142 unused */
+	/* -142 unused */
 	HAL_SRNG_LMAC1_ID_END = 143
 };
 

+ 41 - 18
hal/wifi3.0/hal_srng.c

@@ -422,12 +422,12 @@ static struct hal_hw_srng_config hw_srng_table[] = {
 		},
 	},
 	{ /* RXDMA_BUF */
-		.start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA0_BUF,
+		.start_ring_id = HAL_SRNG_WMAC1_SW2RXDMA0_BUF0,
+#ifdef IPA_OFFLOAD
+		.max_rings = 3,
+#else
 		.max_rings = 2,
-		/* TODO: Check if the additional IPA buffer ring needs to be
-		 * setup here (in which case max_rings should be set to 2),
-		 * or it will be setup by IPA host driver
-		 */
+#endif
 		.entry_size = sizeof(struct wbm_buffer_ring) >> 2,
 		.lmac_ring = TRUE,
 		.ring_dir = HAL_SRNG_SRC_RING,
@@ -578,8 +578,8 @@ static void hal_update_srng_hp_tp_address(void *hal_soc,
 }
 
 QDF_STATUS hal_set_one_shadow_config(void *hal_soc,
-				      int ring_type,
-				      int ring_num)
+				     int ring_type,
+				     int ring_num)
 {
 	uint32_t target_register;
 	struct hal_soc *hal = (struct hal_soc *)hal_soc;
@@ -608,9 +608,9 @@ QDF_STATUS hal_set_one_shadow_config(void *hal_soc,
 				      ring_num);
 
 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
-			"%s: target_reg %x, shadow_index %x, ring_type %d, ring num %d\n",
-		       __func__, target_register, shadow_config_index,
-		       ring_type, ring_num);
+	    "%s: target_reg %x, shadow_index %x, ring_type %d, ring num %d\n",
+	    __func__, target_register, shadow_config_index,
+	    ring_type, ring_num);
 
 	return QDF_STATUS_SUCCESS;
 }
@@ -813,8 +813,6 @@ extern void hal_detach(void *hal_soc)
 	return;
 }
 
-
-
 /**
  * hal_srng_src_hw_init - Private function to initialize SRNG
  * source ring HW
@@ -955,6 +953,31 @@ static inline void hal_ce_dst_setup(struct hal_soc *hal, struct hal_srng *srng,
 	HAL_REG_WRITE(hal, reg_addr, reg_val);
 }
 
+/**
+ * hal_reo_remap_IX0 - Remap REO ring destination
+ * @hal: HAL SOC handle
+ * @remap_val: Remap value
+ */
+void hal_reo_remap_IX0(struct hal_soc *hal, uint32_t remap_val)
+{
+	uint32_t reg_offset = HWIO_REO_R0_DESTINATION_RING_CTRL_IX_0_ADDR(
+				SEQ_WCSS_UMAC_REO_REG_OFFSET);
+	HAL_REG_WRITE(hal, reg_offset, remap_val);
+}
+
+/**
+ * hal_srng_set_hp_paddr() - Set physical address to SRNG head pointer
+ * @sring: sring pointer
+ * @paddr: physical address
+ */
+void hal_srng_set_hp_paddr(struct hal_srng *sring,
+				uint64_t paddr)
+{
+	SRNG_DST_REG_WRITE(sring, HP_ADDR_LSB,
+			   paddr & 0xffffffff);
+	SRNG_DST_REG_WRITE(sring, HP_ADDR_MSB,
+			   paddr >> 32);
+}
 /**
  * hal_srng_dst_hw_init - Private function to initialize SRNG
  * destination ring HW
@@ -1165,9 +1188,9 @@ void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
 
 			if (CHECK_SHADOW_REGISTERS) {
 				QDF_TRACE(QDF_MODULE_ID_TXRX,
-					  QDF_TRACE_LEVEL_ERROR,
-					  "%s: Ring (%d, %d) missing shadow config\n",
-					  __func__, ring_type, ring_num);
+				    QDF_TRACE_LEVEL_ERROR,
+				    "%s: Ring (%d, %d) missing shadow config\n",
+				    __func__, ring_type, ring_num);
 			}
 		} else {
 			hal_validate_shadow_register(hal,
@@ -1200,9 +1223,9 @@ void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
 
 			if (CHECK_SHADOW_REGISTERS) {
 				QDF_TRACE(QDF_MODULE_ID_TXRX,
-					  QDF_TRACE_LEVEL_ERROR,
-					  "%s: Ring (%d, %d) missing shadow config\n",
-					  __func__, ring_type, ring_num);
+				    QDF_TRACE_LEVEL_ERROR,
+				    "%s: Ring (%d, %d) missing shadow config\n",
+				    __func__, ring_type, ring_num);
 			}
 		} else {
 			hal_validate_shadow_register(hal,

+ 2 - 0
qdf/inc/qdf_mem.h

@@ -134,6 +134,8 @@ void qdf_mem_free_consistent(qdf_device_t osdev, void *dev, qdf_size_t size,
 
 void qdf_mem_zero_outline(void *buf, qdf_size_t size);
 
+void qdf_ether_addr_copy(void *dst_addr, const void *src_addr);
+
 /**
  * qdf_mem_cmp() - memory compare
  * @memory1: pointer to one location in memory to compare.

+ 24 - 0
qdf/linux/src/qdf_mem.c

@@ -1708,3 +1708,27 @@ void qdf_mem_exit(void)
 	qdf_mem_debug_exit();
 }
 EXPORT_SYMBOL(qdf_mem_exit);
+
+/**
+ * qdf_ether_addr_copy() - copy an Ethernet address
+ *
+ * @dst_addr: A six-byte array Ethernet address destination
+ * @src_addr: A six-byte array Ethernet address source
+ *
+ * Please note: dst & src must both be aligned to u16.
+ *
+ * Return: none
+ */
+void qdf_ether_addr_copy(void *dst_addr, const void *src_addr)
+{
+	if ((dst_addr == NULL) || (src_addr == NULL)) {
+		QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+			  "%s called with NULL parameter, source:%p destination:%p",
+			  __func__, src_addr, dst_addr);
+		QDF_ASSERT(0);
+		return;
+	}
+	ether_addr_copy(dst_addr, src_addr);
+}
+EXPORT_SYMBOL(qdf_ether_addr_copy);
+

+ 8 - 0
wlan_cfg/wlan_cfg.c

@@ -36,7 +36,11 @@
 #define WLAN_CFG_PER_PDEV_RX_RING 0
 #define NUM_RXDMA_RINGS_PER_PDEV 2
 #define WLAN_LRO_ENABLE 1
+#ifdef IPA_OFFLOAD
+#define WLAN_CFG_TX_RING_SIZE 2048
+#else
 #define WLAN_CFG_TX_RING_SIZE 512
+#endif
 #define WLAN_CFG_TX_COMP_RING_SIZE 1024
 
 /* Tx Descriptor and Tx Extension Descriptor pool sizes */
@@ -161,7 +165,11 @@
 
 #define WLAN_CFG_MAX_CLIENTS 64
 
+#ifdef IPA_OFFLOAD
+#define WLAN_CFG_PER_PDEV_TX_RING 0
+#else
 #define WLAN_CFG_PER_PDEV_TX_RING 1
+#endif
 #define WLAN_CFG_NUM_TCL_DATA_RINGS 3
 #define WLAN_CFG_NUM_REO_DEST_RING 4