Browse Source

qcacmn: Add Mcast enhancement feature support

Feature to convert Multicast packet and send them as unicast.

Change-Id: I64d44ac337bd366c6799226fd69f89cd3e46e65d
CRs-Fixed: 1116432
Ishank Jain 8 years ago
parent
commit
c838b133c1

+ 11 - 0
dp/inc/cdp_txrx_cmn_struct.h

@@ -307,6 +307,12 @@ typedef void (*ol_txrx_rx_mon_fp)(ol_osif_vdev_handle vdev,
 typedef int (*ol_txrx_proxy_arp_fp)(ol_osif_vdev_handle vdev,
 					    qdf_nbuf_t netbuf);
 
+/*
+ * ol_txrx_mcast_me_fp - function pointer for multicast enhancement
+ */
+typedef int (*ol_txrx_mcast_me_fp)(ol_osif_vdev_handle vdev,
+						qdf_nbuf_t netbuf);
+
 /**
  * ol_txrx_stats_callback - statistics notify callback
  */
@@ -379,6 +385,7 @@ struct ol_txrx_ops {
 
 	/* proxy arp function pointer - specified by OS shim, stored by txrx */
 	ol_txrx_proxy_arp_fp      proxy_arp;
+	ol_txrx_mcast_me_fp          me_convert;
 };
 
 /**
@@ -692,6 +699,10 @@ struct cdp_tx_ingress_stats {
 		uint32_t dropped_send_fail;
 		/* total unicast packets transmitted */
 		uint32_t ucast;
+		/* Segment allocation failure */
+		uint32_t fail_seg_alloc;
+		/* NBUF clone failure */
+		uint32_t clone_fail;
 	} mcast_en;
 
 	/* Packets dropped on the Tx side */

+ 6 - 0
dp/wifi3.0/dp_internal.h

@@ -269,4 +269,10 @@ extern QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc,
 
 extern void dp_reo_status_ring_handler(struct dp_soc *soc);
 void dp_aggregate_vdev_stats(struct dp_vdev *vdev);
+uint16_t dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle,
+		qdf_nbuf_t nbuf, uint8_t newmac[][DP_MAC_ADDR_LEN],
+		uint8_t new_mac_cnt);
+void dp_tx_me_alloc_descriptor(struct cdp_pdev *pdev);
+
+void dp_tx_me_free_descriptor(struct cdp_pdev *pdev);
 #endif /* #ifndef _DP_INTERNAL_H_ */

+ 10 - 1
dp/wifi3.0/dp_main.c

@@ -1599,6 +1599,7 @@ static struct cdp_vdev *dp_vdev_attach_wifi3(struct cdp_pdev *txrx_pdev,
 	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
 	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
 	vdev->dscp_tid_map_id = 0;
+	vdev->mcast_enhancement_en = 0;
 
 	/* TODO: Initialize default HTT meta data that will be used in
 	 * TCL descriptors for packets transmitted from this VDEV
@@ -1655,6 +1656,8 @@ static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
 	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
 #endif
 #endif
+	vdev->me_convert = txrx_ops->me_convert;
+
 	/* TODO: Enable the following once Tx code is integrated */
 	txrx_ops->tx.tx = dp_tx_send;
 
@@ -3050,6 +3053,8 @@ static void dp_set_vdev_param(struct cdp_vdev *vdev_handle,
 	switch (param) {
 	case CDP_ENABLE_NAWDS:
 		vdev->nawds_enabled = val;
+	case CDP_ENABLE_MCAST_EN:
+		vdev->mcast_enhancement_en = val;
 	default:
 		break;
 	}
@@ -3355,7 +3360,11 @@ static struct cdp_ctrl_ops dp_ops_ctrl = {
 };
 
 static struct cdp_me_ops dp_ops_me = {
-	/* TODO */
+#ifdef ATH_SUPPORT_IQUE
+	.tx_me_alloc_descriptor = dp_tx_me_alloc_descriptor,
+	.tx_me_free_descriptor = dp_tx_me_free_descriptor,
+	.tx_me_convert_ucast = dp_tx_me_send_convert_ucast,
+#endif
 };
 
 static struct cdp_mon_ops dp_ops_mon = {

+ 246 - 6
dp/wifi3.0/dp_tx.c

@@ -566,6 +566,8 @@ failure:
 	DP_STATS_INC(vdev, tx_i.dropped.desc_na, 1);
 	DP_STATS_INC_PKT(vdev, tx_i.dropped.dropped_pkt, 1,
 			qdf_nbuf_len(nbuf));
+	if (qdf_unlikely(tx_desc->flags & DP_TX_DESC_FLAG_ME))
+		dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
 	dp_tx_desc_release(tx_desc, desc_pool_id);
 	return NULL;
 }
@@ -962,6 +964,9 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
 		return nbuf;
 	}
 
+	if (msdu_info->frm_type == dp_tx_frm_me)
+		nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
+
 	i = 0;
 
 	/*
@@ -976,6 +981,12 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
 		tx_desc = dp_tx_prepare_desc(vdev, nbuf, msdu_info,
 				tx_q->desc_pool_id);
 
+		if (msdu_info->frm_type == dp_tx_frm_me) {
+			tx_desc->me_buffer =
+				msdu_info->u.sg_info.curr_seg->frags[0].vaddr;
+			tx_desc->flags |= DP_TX_DESC_FLAG_ME;
+		}
+
 		if (!tx_desc) {
 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
 				  "%s Tx_desc prepare Fail vdev %p queue %d\n",
@@ -985,6 +996,9 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
 					tx_i.dropped.dropped_pkt, 1,
 					qdf_nbuf_len(nbuf));
 
+			if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
+				dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
+			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
 			goto done;
 		}
 
@@ -1003,6 +1017,10 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
 			DP_STATS_INC_PKT(pdev,
 					tx_i.dropped.dropped_pkt, 1,
 					qdf_nbuf_len(nbuf));
+
+			if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
+				dp_tx_me_free_buf(pdev, tx_desc->me_buffer);
+
 			dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
 			goto done;
 		}
@@ -1041,14 +1059,14 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
 		 * each converted frame (for a client) is represented as
 		 * 1 segment
 		 */
-		if (msdu_info->frm_type == dp_tx_frm_sg) {
+		if ((msdu_info->frm_type == dp_tx_frm_sg) ||
+				(msdu_info->frm_type == dp_tx_frm_me)) {
 			if (msdu_info->u.sg_info.curr_seg->next) {
 				msdu_info->u.sg_info.curr_seg =
 					msdu_info->u.sg_info.curr_seg->next;
 				nbuf = msdu_info->u.sg_info.curr_seg->nbuf;
 			}
 		}
-
 		i++;
 	}
 
@@ -1344,21 +1362,25 @@ qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
 		goto send_multiple;
 	}
 
+#ifdef ATH_SUPPORT_IQUE
 	/* Mcast to Ucast Conversion*/
-	if (qdf_unlikely(vdev->mcast_enhancement_en == 1)) {
+	if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
 		eh = (struct ether_header *)qdf_nbuf_data(nbuf);
 		if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
-			nbuf = dp_tx_prepare_me(vdev, nbuf, &msdu_info);
 			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
 				  "%s Mcast frm for ME %p\n", __func__, vdev);
 
 			DP_STATS_INC_PKT(vdev,
 					tx_i.mcast_en.mcast_pkt, 1,
 					qdf_nbuf_len(nbuf));
-
-			goto send_multiple;
+			if (dp_tx_prepare_send_me(vdev, nbuf)) {
+				qdf_nbuf_free(nbuf);
+				return NULL;
+			}
+			return nbuf;
 		}
 	}
+#endif
 
 	/* RAW */
 	if (qdf_unlikely(vdev->tx_encap_type == htt_cmn_pkt_type_raw)) {
@@ -1781,6 +1803,10 @@ static void dp_tx_comp_process_desc(struct dp_soc *soc,
 		DP_HIST_PACKET_COUNT_INC(desc->pdev->pdev_id);
 		DP_TRACE(NONE, "pdev_id: %u", desc->pdev->pdev_id);
 		next = desc->next;
+
+		if (desc->flags & DP_TX_DESC_FLAG_ME)
+			dp_tx_me_free_buf(desc->pdev, desc->me_buffer);
+
 		dp_tx_desc_release(desc, desc->pool_id);
 		desc = next;
 	}
@@ -2167,3 +2193,217 @@ fail:
 	dp_tx_soc_detach(soc);
 	return QDF_STATUS_E_RESOURCES;
 }
+
+/*
+ * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
+ * pdev: pointer to DP PDEV structure
+ * seg_info_head: Pointer to the head of list
+ *
+ * return: void
+ */
+static inline void dp_tx_me_mem_free(struct dp_pdev *pdev,
+		struct dp_tx_seg_info_s *seg_info_head)
+{
+	struct dp_tx_me_buf_t *mc_uc_buf;
+	struct dp_tx_seg_info_s *seg_info_new = NULL;
+	qdf_nbuf_t nbuf = NULL;
+	uint64_t phy_addr;
+
+	while (seg_info_head) {
+		nbuf = seg_info_head->nbuf;
+		mc_uc_buf = (struct dp_tx_me_buf_t *)
+			seg_info_new->frags[0].vaddr;
+		phy_addr = seg_info_head->frags[0].paddr_hi;
+		phy_addr =  (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
+		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
+				phy_addr,
+				QDF_DMA_TO_DEVICE , DP_MAC_ADDR_LEN);
+		dp_tx_me_free_buf(pdev, mc_uc_buf);
+		qdf_nbuf_free(nbuf);
+		seg_info_new = seg_info_head;
+		seg_info_head = seg_info_head->next;
+		qdf_mem_free(seg_info_new);
+	}
+}
+
+/**
+ * dp_tx_me_send_convert_ucast(): fuction to convert multicast to unicast
+ * @vdev: DP VDEV handle
+ * @nbuf: Multicast nbuf
+ * @newmac: Table of the clients to which packets have to be sent
+ * @new_mac_cnt: No of clients
+ *
+ * return: no of converted packets
+ */
+uint16_t
+dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
+		uint8_t newmac[][DP_MAC_ADDR_LEN], uint8_t new_mac_cnt)
+{
+	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
+	struct dp_pdev *pdev = vdev->pdev;
+	struct ether_header *eh;
+	uint8_t *data;
+	uint16_t len;
+
+	/* reference to frame dst addr */
+	uint8_t *dstmac;
+	/* copy of original frame src addr */
+	uint8_t srcmac[DP_MAC_ADDR_LEN];
+
+	/* local index into newmac */
+	uint8_t new_mac_idx = 0;
+	struct dp_tx_me_buf_t *mc_uc_buf;
+	qdf_nbuf_t  nbuf_clone;
+	struct dp_tx_msdu_info_s msdu_info;
+	struct dp_tx_seg_info_s *seg_info_head = NULL;
+	struct dp_tx_seg_info_s *seg_info_tail = NULL;
+	struct dp_tx_seg_info_s *seg_info_new;
+	struct dp_tx_frag_info_s data_frag;
+	qdf_dma_addr_t paddr_data;
+	qdf_dma_addr_t paddr_mcbuf = 0;
+	uint8_t empty_entry_mac[DP_MAC_ADDR_LEN] = {0};
+	QDF_STATUS status;
+
+	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
+
+	eh = (struct ether_header *) nbuf;
+	qdf_mem_copy(srcmac, eh->ether_shost, DP_MAC_ADDR_LEN);
+
+	len = qdf_nbuf_len(nbuf);
+
+	data = qdf_nbuf_data(nbuf);
+
+	status = qdf_nbuf_map(vdev->osdev, nbuf,
+			QDF_DMA_TO_DEVICE);
+
+	if (status) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+				"Mapping failure Error:%d", status);
+		DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
+		return 0;
+	}
+
+	paddr_data = qdf_nbuf_get_frag_paddr(nbuf, 0) + IEEE80211_ADDR_LEN;
+
+	/*preparing data fragment*/
+	data_frag.vaddr = qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN;
+	data_frag.paddr_lo = (uint32_t)paddr_data;
+	data_frag.paddr_hi = ((uint64_t)paddr_data & 0xffffffff00000000) >> 32;
+	data_frag.len = len - DP_MAC_ADDR_LEN;
+
+	for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
+		dstmac = newmac[new_mac_idx];
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+				"added mac addr (%pM)", dstmac);
+
+		/* Check for NULL Mac Address */
+		if (!qdf_mem_cmp(dstmac, empty_entry_mac, DP_MAC_ADDR_LEN))
+			continue;
+
+		/* frame to self mac. skip */
+		if (!qdf_mem_cmp(dstmac, srcmac, DP_MAC_ADDR_LEN))
+			continue;
+
+		/*
+		 * TODO: optimize to avoid malloc in per-packet path
+		 * For eg. seg_pool can be made part of vdev structure
+		 */
+		seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
+
+		if (!seg_info_new) {
+			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+					"alloc failed");
+			DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
+			goto fail_seg_alloc;
+		}
+
+		mc_uc_buf = dp_tx_me_alloc_buf(pdev);
+		if (mc_uc_buf == NULL)
+			goto fail_buf_alloc;
+
+		/*
+		 * TODO: Check if we need to clone the nbuf
+		 * Or can we just use the reference for all cases
+		 */
+		if (new_mac_idx < (new_mac_cnt - 1)) {
+			nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
+			if (nbuf_clone == NULL) {
+				DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
+				goto fail_clone;
+			}
+		} else {
+			/*
+			 * Update the ref
+			 * to account for frame sent without cloning
+			 */
+			qdf_nbuf_ref(nbuf);
+			nbuf_clone = nbuf;
+		}
+
+		qdf_mem_copy(mc_uc_buf->data, dstmac, DP_MAC_ADDR_LEN);
+
+		status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
+				QDF_DMA_TO_DEVICE, DP_MAC_ADDR_LEN,
+				&paddr_mcbuf);
+
+		if (status) {
+			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+					"Mapping failure Error:%d", status);
+			DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
+			goto fail_map;
+		}
+
+		seg_info_new->frags[0].vaddr =  (uint8_t *)mc_uc_buf;
+		seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
+		seg_info_new->frags[0].paddr_hi =
+			((u64)paddr_mcbuf & 0xffffffff00000000) >> 32;
+		seg_info_new->frags[0].len = DP_MAC_ADDR_LEN;
+
+		seg_info_new->frags[1] = data_frag;
+		seg_info_new->nbuf = nbuf_clone;
+		seg_info_new->frag_cnt = 2;
+		seg_info_new->total_len = len;
+
+		seg_info_new->next = NULL;
+
+		if (seg_info_head == NULL)
+			seg_info_head = seg_info_new;
+		else
+			seg_info_tail->next = seg_info_new;
+
+		seg_info_tail = seg_info_new;
+	}
+
+	if (!seg_info_head)
+		return 0;
+
+	msdu_info.u.sg_info.curr_seg = seg_info_head;
+	msdu_info.num_seg = new_mac_cnt;
+	msdu_info.frm_type = dp_tx_frm_me;
+
+	DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
+	dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
+
+	while (seg_info_head->next) {
+		seg_info_new = seg_info_head;
+		seg_info_head = seg_info_head->next;
+		qdf_mem_free(seg_info_new);
+	}
+	qdf_mem_free(seg_info_head);
+
+	return new_mac_cnt;
+
+fail_map:
+	qdf_nbuf_free(nbuf_clone);
+
+fail_clone:
+	dp_tx_me_free_buf(pdev, mc_uc_buf);
+
+fail_buf_alloc:
+	qdf_mem_free(seg_info_new);
+
+fail_seg_alloc:
+	dp_tx_me_mem_free(pdev, seg_info_head);
+	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
+	return 0;
+}

+ 4 - 5
dp/wifi3.0/dp_tx.h

@@ -32,6 +32,7 @@
 #define DP_TX_DESC_FLAG_MESH		0x10
 #define DP_TX_DESC_FLAG_QUEUED_TX		0x20
 #define DP_TX_DESC_FLAG_COMPLETED_TX		0x40
+#define DP_TX_DESC_FLAG_ME		0x80
 
 #define DP_TX_FREE_SINGLE_BUF(soc, buf)                  \
 do {                                                           \
@@ -154,6 +155,9 @@ qdf_nbuf_t dp_tx_send(void *data_vdev, qdf_nbuf_t nbuf);
 uint32_t dp_tx_comp_handler(struct dp_soc *soc, uint32_t ring_id,
 		uint32_t budget);
 
+int32_t
+dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
+
 /* TODO TX_FEATURE_NOT_YET */
 static inline void dp_tx_comp_process_exception(struct dp_tx_desc_s *tx_desc)
 {
@@ -163,10 +167,5 @@ static inline QDF_STATUS dp_tx_flow_control(struct dp_vdev *vdev)
 {
 	return  QDF_STATUS_SUCCESS;
 }
-static inline qdf_nbuf_t dp_tx_prepare_me(struct dp_vdev *vdev,
-		qdf_nbuf_t nbuf, struct dp_tx_msdu_info_s *msdu_info)
-{
-	return nbuf;
-}
 /* TODO TX_FEATURE_NOT_YET */
 #endif

+ 42 - 0
dp/wifi3.0/dp_tx_desc.h

@@ -41,6 +41,7 @@
 #define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
 #define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock(lock)
 #define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock(lock)
+#define MAX_POOL_BUFF_COUNT 10000
 
 QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
 		uint16_t num_elem);
@@ -303,4 +304,45 @@ static inline void dp_tx_tso_desc_free(struct dp_soc *soc,
 	soc->tx_tso_desc[pool_id].num_free++;
 	TX_DESC_LOCK_UNLOCK(&soc->tx_tso_desc[pool_id].lock);
 }
+/*
+ * dp_tx_me_alloc_buf() Alloc descriptor from me pool
+ * @pdev DP_PDEV handle for datapath
+ *
+ * Return:dp_tx_me_buf_t(buf)
+ */
+static inline struct dp_tx_me_buf_t*
+dp_tx_me_alloc_buf(struct dp_pdev *pdev)
+{
+	struct dp_tx_me_buf_t *buf = NULL;
+	qdf_spin_lock_bh(&pdev->tx_mutex);
+	if (pdev->me_buf.freelist) {
+		buf = pdev->me_buf.freelist;
+		pdev->me_buf.freelist = pdev->me_buf.freelist->next;
+		pdev->me_buf.buf_in_use++;
+	} else {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+				"Error allocating memory in pool");
+		qdf_spin_unlock_bh(&pdev->tx_mutex);
+		return NULL;
+	}
+	qdf_spin_unlock_bh(&pdev->tx_mutex);
+	return buf;
+}
+
+/*
+ * dp_tx_me_free_buf() - Free me descriptor and add it to pool
+ * @pdev: DP_PDEV handle for datapath
+ * @buf : Allocated ME BUF
+ *
+ * Return:void
+ */
+static inline void
+dp_tx_me_free_buf(struct dp_pdev *pdev, struct dp_tx_me_buf_t *buf)
+{
+	qdf_spin_lock_bh(&pdev->tx_mutex);
+	buf->next = pdev->me_buf.freelist;
+	pdev->me_buf.freelist = buf;
+	pdev->me_buf.buf_in_use--;
+	qdf_spin_unlock_bh(&pdev->tx_mutex);
+}
 #endif /* DP_TX_DESC_H */

+ 192 - 0
dp/wifi3.0/dp_tx_me.c

@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "dp_types.h"
+#include "qdf_nbuf.h"
+#include "qdf_atomic.h"
+#include "qdf_types.h"
+#include "dp_tx.h"
+#include "dp_tx_desc.h"
+#include "dp_internal.h"
+
+#ifdef ATH_SUPPORT_IQUE
+#define MAX_ME_BUF_CHUNK 1424
+#define ME_US_TO_SEC(_x) ((_x)/(1000 * 1000))
+#define ME_CLEAN_WAIT_TIMEOUT (200000) /*200ms*/
+#define ME_CLEAN_WAIT_COUNT 400
+
+/**
+ * dp_tx_me_init():Initialize ME buffer ppol
+ * @pdev: DP PDEV handle
+ *
+ * Return:0 on Succes 1 on failure
+ */
+static inline uint16_t
+dp_tx_me_init(struct dp_pdev *pdev)
+{
+
+	uint16_t i, mc_uc_buf_len, num_pool_elems;
+	uint32_t pool_size;
+
+	struct dp_tx_me_buf_t *p;
+
+	mc_uc_buf_len = sizeof(struct dp_tx_me_buf_t);
+
+	num_pool_elems = MAX_ME_BUF_CHUNK;
+	/* Add flow control buffer count */
+	pool_size = (mc_uc_buf_len) * num_pool_elems;
+	pdev->me_buf.size = mc_uc_buf_len;
+	if (pdev->me_buf.vaddr == NULL) {
+		qdf_spin_lock_bh(&pdev->tx_mutex);
+		pdev->me_buf.vaddr = qdf_mem_malloc(pool_size);
+		if (pdev->me_buf.vaddr == NULL) {
+			qdf_spin_unlock_bh(&pdev->tx_mutex);
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+				"Error allocating memory pool");
+			return 1;
+		}
+		pdev->me_buf.buf_in_use = 0;
+		pdev->me_buf.freelist =
+			(struct dp_tx_me_buf_t *) pdev->me_buf.vaddr;
+		/*
+		 * me_buf looks like this
+		 * |=======+==========================|
+		 * | ptr   |         Dst MAC          |
+		 * |=======+==========================|
+		 */
+		p = pdev->me_buf.freelist;
+		for (i = 0; i < num_pool_elems-1; i++) {
+			p->next = (struct dp_tx_me_buf_t *)
+				((char *)p + pdev->me_buf.size +
+				sizeof(struct dp_tx_me_buf_t));
+			p = p->next;
+		}
+		p->next = NULL;
+		qdf_spin_unlock_bh(&pdev->tx_mutex);
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+				"ME Pool succesfully initialized vaddr - %x \
+				paddr - %x\n num_elems = %d buf_size - %d"
+				"pool_size = %d",
+				pdev->me_buf.vaddr,
+				(unsigned int)pdev->me_buf.paddr,
+				(unsigned int)num_pool_elems,
+				(unsigned int)pdev->me_buf.size,
+				(unsigned int)pool_size);
+	} else {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+				"ME Already Enabled!!");
+	}
+	return 0;
+}
+
+/**
+ * dp_tx_me_alloc_descriptor():Allocate ME descriptor
+ * @pdev_handle: DP PDEV handle
+ *
+ * Return:void
+ */
+void
+dp_tx_me_alloc_descriptor(struct cdp_pdev *pdev_handle)
+{
+	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
+	if (qdf_atomic_read(&pdev->mc_num_vap_attached) == 0) {
+		dp_tx_me_init(pdev);
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+				FL("Enable MCAST_TO_UCAST "));
+	}
+	qdf_atomic_inc(&pdev->mc_num_vap_attached);
+}
+
+/**
+ * dp_tx_me_exit():Free memory and other cleanup required for
+ * multicast unicast conversion
+ * @pdev - DP_PDEV handle
+ *
+ * Return:void
+ */
+static inline void
+dp_tx_me_exit(struct dp_pdev *pdev)
+{
+	/* Add flow control buffer count */
+	uint32_t wait_time = ME_US_TO_SEC(ME_CLEAN_WAIT_TIMEOUT *
+			ME_CLEAN_WAIT_COUNT);
+
+	if (pdev->me_buf.vaddr) {
+		uint16_t wait_cnt = 0;
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+				"Disabling Mcastenhance"
+				"This may take some time");
+		qdf_spin_lock_bh(&pdev->tx_mutex);
+		while ((pdev->me_buf.buf_in_use > 0) &&
+				(wait_cnt < ME_CLEAN_WAIT_COUNT)) {
+			qdf_spin_unlock_bh(&pdev->tx_mutex);
+			OS_SLEEP(ME_CLEAN_WAIT_TIMEOUT);
+			wait_cnt++;
+			qdf_spin_lock_bh(&pdev->tx_mutex);
+		}
+		if (pdev->me_buf.buf_in_use > 0) {
+			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+					"Tx-comp pending for %d "
+					"ME frames after waiting %ds!!\n",
+					pdev->me_buf.buf_in_use, wait_time);
+			qdf_assert_always(0);
+		}
+
+		qdf_mem_free(pdev->me_buf.vaddr);
+		pdev->me_buf.vaddr = NULL;
+		pdev->me_buf.freelist = NULL;
+		qdf_spin_unlock_bh(&pdev->tx_mutex);
+	} else {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+				"ME Already Disabled !!!");
+	}
+}
+
+/**
+ * dp_tx_me_free_descriptor():free ME descriptor
+ * @pdev_handle:DP_PDEV handle
+ *
+ * Return:void
+ */
+void
+dp_tx_me_free_descriptor(struct cdp_pdev *pdev_handle)
+{
+	struct dp_pdev *pdev = (struct dp_pdev *) pdev_handle;
+	qdf_atomic_dec(&pdev->mc_num_vap_attached);
+	if (atomic_read(&pdev->mc_num_vap_attached) == 0) {
+		dp_tx_me_exit(pdev);
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+				"Disable MCAST_TO_UCAST");
+	}
+}
+
+/**
+ * dp_tx_prepare_send_me(): Call to the umac to get the list of clients
+ * @vdev: DP VDEV handle
+ * @nbuf: Multicast buffer
+ *
+ * Return: no of packets transmitted
+ */
+int32_t
+dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
+{
+	if (vdev->me_convert)
+		return vdev->me_convert(vdev->osif_vdev, nbuf);
+	return 0;
+}
+#endif

+ 29 - 6
dp/wifi3.0/dp_types.h

@@ -523,7 +523,7 @@ struct dp_soc {
 	/* HAL SOC handle */
 	void *hal_soc;
 
-    /* DP Interrupts */
+	/* DP Interrupts */
 	struct dp_intr intr_ctx[DP_MAX_INTERRUPT_CONTEXTS];
 
 	/* REO destination rings */
@@ -610,10 +610,9 @@ struct dp_soc {
 	struct {
 		int size;
 		uint32_t paddr;
-		char *vaddr;
+		uint32_t *vaddr;
 		struct dp_tx_me_buf_t *freelist;
 		int buf_in_use;
-		int nonpool_buf_in_use;
 		qdf_dma_mem_context(memctx);
 	} me_buf;
 
@@ -625,9 +624,6 @@ struct dp_soc {
 	 */
 	DP_MUTEX_TYPE peer_ref_mutex;
 
-	/* Number of VAPs with mcast enhancement enabled */
-	atomic_t mc_num_vap_attached;
-
 	/* maximum value for peer_id */
 	int max_peers;
 
@@ -748,6 +744,9 @@ struct dp_pdev {
 	/* monitor mode mutex */
 	qdf_spinlock_t mon_mutex;
 
+	/*tx_mutex for me*/
+	DP_MUTEX_TYPE tx_mutex;
+
 	/* Band steering  */
 	/* TBD */
 
@@ -782,6 +781,19 @@ struct dp_pdev {
 	uint32_t mon_ppdu_status;
 	struct cdp_mon_status rx_mon_recv_status;
 
+	/* pool addr for mcast enhance buff */
+	struct {
+		int size;
+		uint32_t paddr;
+		char *vaddr;
+		struct dp_tx_me_buf_t *freelist;
+		int buf_in_use;
+		qdf_dma_mem_context(memctx);
+	} me_buf;
+
+	/* Number of VAPs with mcast enhancement enabled */
+	qdf_atomic_t mc_num_vap_attached;
+
 	/* TBD */
 
 	/* map this pdev to a particular Reo Destination ring */
@@ -845,6 +857,7 @@ struct dp_vdev {
 	/* callback to hand rx monitor 802.11 MPDU to the OS shim */
 	ol_txrx_rx_mon_fp osif_rx_mon;
 
+	ol_txrx_mcast_me_fp me_convert;
 	/* deferred vdev deletion state */
 	struct {
 		/* VDEV delete pending */
@@ -993,4 +1006,14 @@ struct dp_invalid_peer_msg {
 	uint8_t vdev_id;
 };
 #endif
+
+/*
+ * dp_tx_me_buf_t: ME buffer
+ * data: Destination Mac address
+ * next: pointer to next buffer
+ */
+struct dp_tx_me_buf_t {
+	uint8_t data[DP_MAC_ADDR_LEN];
+	struct dp_tx_me_buf_t *next;
+};
 #endif /* _DP_TYPES_H_ */