Переглянути джерело

Merge "qca-wifi: Move WIN specific code out of cmndev"

Linux Build Service Account 6 роки тому
батько
коміт
02c71a8024

+ 81 - 1
dp/inc/cdp_txrx_extd_struct.h

@@ -79,10 +79,11 @@ struct cdp_rx_indication_mpdu_info {
 	uint32_t nf;
 	uint64_t timestamp;
 	uint32_t length;
-	uint8_t per_chain_rssi[MAX_CHAIN];
+	uint8_t per_chain_rssi[CDP_MAX_RX_CHAINS];
 	uint8_t channel;
 };
 
+#ifdef __KERNEL__
 /**
  * struct cdp_rx_indication_mpdu- Rx MPDU plus MPDU info
  * @mpdu_info: defined in cdp_rx_indication_mpdu_info
@@ -93,5 +94,84 @@ struct cdp_rx_indication_mpdu {
 	struct cdp_rx_indication_mpdu_info mpdu_info;
 	qdf_nbuf_t nbuf;
 };
+#endif
 #endif /* WLAN_RX_PKT_CAPTURE_ENH */
+struct ol_ath_dbg_rx_rssi {
+	uint8_t     rx_rssi_pri20;
+	uint8_t     rx_rssi_sec20;
+	uint8_t     rx_rssi_sec40;
+	uint8_t     rx_rssi_sec80;
+};
+
+struct ol_ath_radiostats {
+	uint64_t    tx_beacon;
+	uint32_t    tx_buf_count;
+	int32_t     tx_mgmt;
+	int32_t     rx_mgmt;
+	uint32_t    rx_num_mgmt;
+	uint32_t    rx_num_ctl;
+	uint32_t    tx_rssi;
+	uint32_t    rx_rssi_comb;
+	struct      ol_ath_dbg_rx_rssi rx_rssi_chain0;
+	struct      ol_ath_dbg_rx_rssi rx_rssi_chain1;
+	struct      ol_ath_dbg_rx_rssi rx_rssi_chain2;
+	struct      ol_ath_dbg_rx_rssi rx_rssi_chain3;
+	uint32_t    rx_overrun;
+	uint32_t    rx_phyerr;
+	uint32_t    ackrcvbad;
+	uint32_t    rtsbad;
+	uint32_t    rtsgood;
+	uint32_t    fcsbad;
+	uint32_t    nobeacons;
+	uint32_t    mib_int_count;
+	uint32_t    rx_looplimit_start;
+	uint32_t    rx_looplimit_end;
+	uint8_t     ap_stats_tx_cal_enable;
+	uint8_t     self_bss_util;
+	uint8_t     obss_util;
+	uint8_t     ap_rx_util;
+	uint8_t     free_medium;
+	uint8_t     ap_tx_util;
+	uint8_t     obss_rx_util;
+	uint8_t     non_wifi_util;
+	uint32_t    tgt_asserts;
+	int16_t     chan_nf;
+	int16_t     chan_nf_sec80;
+	uint64_t    wmi_tx_mgmt;
+	uint64_t    wmi_tx_mgmt_completions;
+	uint32_t    wmi_tx_mgmt_completion_err;
+	uint32_t    peer_delete_req;
+	uint32_t    peer_delete_resp;
+	uint32_t    rx_mgmt_rssi_drop;
+	uint32_t    tx_frame_count;
+	uint32_t    rx_frame_count;
+	uint32_t    rx_clear_count;
+	uint32_t    cycle_count;
+	uint32_t    phy_err_count;
+	uint32_t    chan_tx_pwr;
+	uint32_t    be_nobuf;
+	uint32_t    tx_packets;
+	uint32_t    rx_packets;
+	uint32_t    tx_num_data;
+	uint32_t    rx_num_data;
+	uint32_t    tx_mcs[10];
+	uint32_t    rx_mcs[10];
+	uint64_t    rx_bytes;
+	uint64_t    tx_bytes;
+	uint32_t    tx_compaggr;
+	uint32_t    rx_aggr;
+	uint32_t    tx_bawadv;
+	uint32_t    tx_compunaggr;
+	uint32_t    rx_badcrypt;
+	uint32_t    rx_badmic;
+	uint32_t    rx_crcerr;
+	uint32_t    rx_last_msdu_unset_cnt;
+	uint32_t    rx_data_bytes;
+	uint32_t    tx_retries;
+};
+
+/* Enumeration of PDEV Configuration parameter */
+enum _ol_hal_param_t {
+	OL_HAL_CONFIG_DMA_BEACON_RESPONSE_TIME = 0
+};
 #endif /* _CDP_TXRX_EXTD_STRUCT_H_ */

+ 469 - 0
dp/wifi3.0/dp_txrx_me.c

@@ -0,0 +1,469 @@
+/*
+ * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hal_hw_headers.h"
+#include "dp_types.h"
+#include "qdf_nbuf.h"
+#include "qdf_atomic.h"
+#include "qdf_types.h"
+#include "dp_tx.h"
+#include "dp_tx_desc.h"
+#include "dp_internal.h"
+#include "dp_txrx_me.h"
+#define MAX_ME_BUF_CHUNK 1424
+#define ME_US_TO_SEC(_x) ((_x) / (1000 * 1000))
+#define ME_CLEAN_WAIT_TIMEOUT (200000) /*200ms*/
+#define ME_CLEAN_WAIT_COUNT 400
+
+/**
+ * dp_tx_me_init():Initialize ME buffer ppol
+ * @pdev: DP PDEV handle
+ *
+ * Return:0 on Succes 1 on failure
+ */
+static inline uint16_t
+dp_tx_me_init(struct dp_pdev *pdev)
+{
+	uint16_t i, mc_uc_buf_len, num_pool_elems;
+	uint32_t pool_size;
+
+	struct dp_tx_me_buf_t *p;
+
+	mc_uc_buf_len = sizeof(struct dp_tx_me_buf_t);
+
+	num_pool_elems = MAX_ME_BUF_CHUNK;
+	/* Add flow control buffer count */
+	pool_size = (mc_uc_buf_len) * num_pool_elems;
+	pdev->me_buf.size = mc_uc_buf_len;
+	if (!(pdev->me_buf.vaddr)) {
+		qdf_spin_lock_bh(&pdev->tx_mutex);
+		pdev->me_buf.vaddr = qdf_mem_malloc(pool_size);
+		if (!(pdev->me_buf.vaddr)) {
+			qdf_spin_unlock_bh(&pdev->tx_mutex);
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+			  "Error allocating memory pool");
+			return 1;
+		}
+		pdev->me_buf.buf_in_use = 0;
+		pdev->me_buf.freelist =
+			(struct dp_tx_me_buf_t *)pdev->me_buf.vaddr;
+		/*
+		 * me_buf looks like this
+		 * |=======+==========================|
+		 * | ptr   |         Dst MAC          |
+		 * |=======+==========================|
+		 */
+		p = pdev->me_buf.freelist;
+		for (i = 0; i < num_pool_elems - 1; i++) {
+			p->next = (struct dp_tx_me_buf_t *)
+				((char *)p + pdev->me_buf.size);
+			p = p->next;
+		}
+		p->next = NULL;
+		qdf_spin_unlock_bh(&pdev->tx_mutex);
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+			  "ME Pool successfully initialized vaddr - %x",
+			  pdev->me_buf.vaddr);
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+			  "paddr - %x\n", (unsigned int)pdev->me_buf.paddr);
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+			  "num_elems = %d", (unsigned int)num_pool_elems);
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+			  "buf_size - %d", (unsigned int)pdev->me_buf.size);
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+			  "pool_size = %d", (unsigned int)pool_size);
+	} else {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+			  "ME Already Enabled!!");
+	}
+	return 0;
+}
+
+/**
+ * dp_tx_me_alloc_descriptor():Allocate ME descriptor
+ * @pdev_handle: DP PDEV handle
+ *
+ * Return:void
+ */
+void
+dp_tx_me_alloc_descriptor(struct cdp_pdev *pdev_handle)
+{
+	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
+
+	if (qdf_atomic_read(&pdev->mc_num_vap_attached) == 0) {
+		dp_tx_me_init(pdev);
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+			  FL("Enable MCAST_TO_UCAST "));
+	}
+	qdf_atomic_inc(&pdev->mc_num_vap_attached);
+}
+
+/**
+ * dp_tx_me_exit():Free memory and other cleanup required for
+ * multicast unicast conversion
+ * @pdev - DP_PDEV handle
+ *
+ * Return:void
+ */
+void
+dp_tx_me_exit(struct dp_pdev *pdev)
+{
+	/* Add flow control buffer count */
+	uint32_t wait_time = ME_US_TO_SEC(ME_CLEAN_WAIT_TIMEOUT *
+			ME_CLEAN_WAIT_COUNT);
+
+	if (pdev->me_buf.vaddr) {
+		uint16_t wait_cnt = 0;
+
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+			  "Disabling Mcastenhance,This may take some time");
+		qdf_spin_lock_bh(&pdev->tx_mutex);
+		while ((pdev->me_buf.buf_in_use > 0) &&
+		       (wait_cnt < ME_CLEAN_WAIT_COUNT)) {
+			qdf_spin_unlock_bh(&pdev->tx_mutex);
+			OS_SLEEP(ME_CLEAN_WAIT_TIMEOUT);
+			wait_cnt++;
+			qdf_spin_lock_bh(&pdev->tx_mutex);
+		}
+		if (pdev->me_buf.buf_in_use > 0) {
+			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+				  "Tx-comp pending for %d",
+				  pdev->me_buf.buf_in_use);
+			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+				  "ME frames after waiting %ds!!",
+				  wait_time);
+			qdf_assert_always(0);
+		}
+
+		qdf_mem_free(pdev->me_buf.vaddr);
+		pdev->me_buf.vaddr = NULL;
+		pdev->me_buf.freelist = NULL;
+		qdf_spin_unlock_bh(&pdev->tx_mutex);
+	} else {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+			  "ME Already Disabled !!!");
+	}
+}
+
+/* dp_tx_me_desc_flush() - release me resources associated to tx_desc
+ * @pdev: DP_PDEV handle
+ *
+ * This function will free all outstanding ME buffer
+ * for which either free during
+ * completion didn't happened or completion is not
+ * received.
+ */
+void dp_tx_me_desc_flush(struct dp_pdev *pdev)
+{
+	uint8_t i, num_pool;
+	uint32_t j;
+	uint32_t num_desc, page_id, offset;
+	uint16_t num_desc_per_page;
+	struct dp_soc *soc = pdev->soc;
+	struct dp_tx_desc_s *tx_desc = NULL;
+	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
+
+	num_desc = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
+	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
+
+	for (i = 0; i < num_pool; i++) {
+		tx_desc_pool = &soc->tx_desc[i];
+		if (!tx_desc_pool || !tx_desc_pool->desc_pages.cacheable_pages)
+			continue;
+
+		num_desc_per_page =
+			tx_desc_pool->desc_pages.num_element_per_page;
+		for (j = 0; j < num_desc; j++) {
+			page_id = j / num_desc_per_page;
+			offset = j % num_desc_per_page;
+			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
+
+			if (tx_desc && (tx_desc->pdev == pdev) &&
+			    (tx_desc->flags & DP_TX_DESC_FLAG_ME) &&
+			     (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
+				dp_tx_comp_free_buf(soc, tx_desc);
+				dp_tx_desc_release(tx_desc, i);
+			}
+		}
+	}
+}
+/**
+ * dp_tx_me_free_descriptor():free ME descriptor
+ * @pdev_handle:DP_PDEV handle
+ *
+ * Return:void
+ */
+void
+dp_tx_me_free_descriptor(struct cdp_pdev *pdev_handle)
+{
+	struct dp_pdev *pdev = (struct dp_pdev *)pdev_handle;
+
+	qdf_atomic_dec(&pdev->mc_num_vap_attached);
+	if (atomic_read(&pdev->mc_num_vap_attached) == 0) {
+		dp_tx_me_desc_flush(pdev);
+		dp_tx_me_exit(pdev);
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+			  "Disable MCAST_TO_UCAST");
+	}
+}
+
+/**
+ * dp_tx_prepare_send_me(): Call to the umac to get the list of clients
+ * @vdev: DP VDEV handle
+ * @nbuf: Multicast buffer
+ *
+ * Return: no of packets transmitted
+ */
+QDF_STATUS
+dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
+{
+	if (vdev->me_convert) {
+		if (vdev->me_convert(vdev->osif_vdev, nbuf) > 0)
+			return QDF_STATUS_SUCCESS;
+	}
+
+	return QDF_STATUS_E_FAILURE;
+}
+
+/*
+ * dp_tx_me_mem_free(): Function to free allocated memory in mcast enahncement
+ * pdev: pointer to DP PDEV structure
+ * seg_info_head: Pointer to the head of list
+ *
+ * return: void
+ */
+static void dp_tx_me_mem_free(struct dp_pdev *pdev,
+			      struct dp_tx_seg_info_s *seg_info_head)
+{
+	struct dp_tx_me_buf_t *mc_uc_buf;
+	struct dp_tx_seg_info_s *seg_info_new = NULL;
+	qdf_nbuf_t nbuf = NULL;
+	uint64_t phy_addr;
+
+	while (seg_info_head) {
+		nbuf = seg_info_head->nbuf;
+		mc_uc_buf = (struct dp_tx_me_buf_t *)
+			seg_info_head->frags[0].vaddr;
+		phy_addr = seg_info_head->frags[0].paddr_hi;
+		phy_addr =  (phy_addr << 32) | seg_info_head->frags[0].paddr_lo;
+		qdf_mem_unmap_nbytes_single(pdev->soc->osdev,
+					    phy_addr,
+					    QDF_DMA_TO_DEVICE, QDF_MAC_ADDR_SIZE);
+		dp_tx_me_free_buf(pdev, mc_uc_buf);
+		qdf_nbuf_free(nbuf);
+		seg_info_new = seg_info_head;
+		seg_info_head = seg_info_head->next;
+		qdf_mem_free(seg_info_new);
+	}
+}
+
+/**
+ * dp_tx_me_send_convert_ucast(): function to convert multicast to unicast
+ * @vdev: DP VDEV handle
+ * @nbuf: Multicast nbuf
+ * @newmac: Table of the clients to which packets have to be sent
+ * @new_mac_cnt: No of clients
+ *
+ * return: no of converted packets
+ */
+uint16_t
+dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle, qdf_nbuf_t nbuf,
+		uint8_t newmac[][QDF_MAC_ADDR_SIZE], uint8_t new_mac_cnt)
+{
+	struct dp_vdev *vdev = (struct dp_vdev *) vdev_handle;
+	struct dp_pdev *pdev = vdev->pdev;
+	qdf_ether_header_t *eh;
+	uint8_t *data;
+	uint16_t len;
+
+	/* reference to frame dst addr */
+	uint8_t *dstmac;
+	/* copy of original frame src addr */
+	uint8_t srcmac[QDF_MAC_ADDR_SIZE];
+
+	/* local index into newmac */
+	uint8_t new_mac_idx = 0;
+	struct dp_tx_me_buf_t *mc_uc_buf;
+	qdf_nbuf_t  nbuf_clone;
+	struct dp_tx_msdu_info_s msdu_info;
+	struct dp_tx_seg_info_s *seg_info_head = NULL;
+	struct dp_tx_seg_info_s *seg_info_tail = NULL;
+	struct dp_tx_seg_info_s *seg_info_new;
+	qdf_dma_addr_t paddr_data;
+	qdf_dma_addr_t paddr_mcbuf = 0;
+	uint8_t empty_entry_mac[QDF_MAC_ADDR_SIZE] = {0};
+	QDF_STATUS status;
+
+	qdf_mem_zero(&msdu_info, sizeof(msdu_info));
+
+	dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
+
+	eh = (qdf_ether_header_t *)nbuf;
+	qdf_mem_copy(srcmac, eh->ether_shost, QDF_MAC_ADDR_SIZE);
+
+	len = qdf_nbuf_len(nbuf);
+
+	data = qdf_nbuf_data(nbuf);
+
+	status = qdf_nbuf_map(vdev->osdev, nbuf,
+			QDF_DMA_TO_DEVICE);
+
+	if (status) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+				"Mapping failure Error:%d", status);
+		DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
+		qdf_nbuf_free(nbuf);
+		return 1;
+	}
+
+	paddr_data = qdf_nbuf_mapped_paddr_get(nbuf) + QDF_MAC_ADDR_SIZE;
+
+	for (new_mac_idx = 0; new_mac_idx < new_mac_cnt; new_mac_idx++) {
+		dstmac = newmac[new_mac_idx];
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+				"added mac addr (%pM)", dstmac);
+
+		/* Check for NULL Mac Address */
+		if (!qdf_mem_cmp(dstmac, empty_entry_mac, QDF_MAC_ADDR_SIZE))
+			continue;
+
+		/* frame to self mac. skip */
+		if (!qdf_mem_cmp(dstmac, srcmac, QDF_MAC_ADDR_SIZE))
+			continue;
+
+		/*
+		 * optimize to avoid malloc in per-packet path
+		 * For eg. seg_pool can be made part of vdev structure
+		 */
+		seg_info_new = qdf_mem_malloc(sizeof(*seg_info_new));
+
+		if (!seg_info_new) {
+			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+					"alloc failed");
+			DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc, 1);
+			goto fail_seg_alloc;
+		}
+
+		mc_uc_buf = dp_tx_me_alloc_buf(pdev);
+		if (!mc_uc_buf)
+			goto fail_buf_alloc;
+
+		/*
+		 * Check if we need to clone the nbuf
+		 * Or can we just use the reference for all cases
+		 */
+		if (new_mac_idx < (new_mac_cnt - 1)) {
+			nbuf_clone = qdf_nbuf_clone((qdf_nbuf_t)nbuf);
+			if (!nbuf_clone) {
+				DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail, 1);
+				goto fail_clone;
+			}
+		} else {
+			/*
+			 * Update the ref
+			 * to account for frame sent without cloning
+			 */
+			qdf_nbuf_ref(nbuf);
+			nbuf_clone = nbuf;
+		}
+
+		qdf_mem_copy(mc_uc_buf->data, dstmac, QDF_MAC_ADDR_SIZE);
+
+		status = qdf_mem_map_nbytes_single(vdev->osdev, mc_uc_buf->data,
+				QDF_DMA_TO_DEVICE, QDF_MAC_ADDR_SIZE,
+				&paddr_mcbuf);
+
+		if (status) {
+			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+					"Mapping failure Error:%d", status);
+			DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error, 1);
+			goto fail_map;
+		}
+
+		seg_info_new->frags[0].vaddr =  (uint8_t *)mc_uc_buf;
+		seg_info_new->frags[0].paddr_lo = (uint32_t) paddr_mcbuf;
+		seg_info_new->frags[0].paddr_hi =
+			(uint16_t)((uint64_t)paddr_mcbuf >> 32);
+		seg_info_new->frags[0].len = QDF_MAC_ADDR_SIZE;
+
+		/*preparing data fragment*/
+		seg_info_new->frags[1].vaddr =
+			qdf_nbuf_data(nbuf) + QDF_MAC_ADDR_SIZE;
+		seg_info_new->frags[1].paddr_lo = (uint32_t)paddr_data;
+		seg_info_new->frags[1].paddr_hi =
+			(uint16_t)(((uint64_t)paddr_data) >> 32);
+		seg_info_new->frags[1].len = len - QDF_MAC_ADDR_SIZE;
+
+		seg_info_new->nbuf = nbuf_clone;
+		seg_info_new->frag_cnt = 2;
+		seg_info_new->total_len = len;
+
+		seg_info_new->next = NULL;
+
+		if (!seg_info_head)
+			seg_info_head = seg_info_new;
+		else
+			seg_info_tail->next = seg_info_new;
+
+		seg_info_tail = seg_info_new;
+	}
+
+	if (!seg_info_head) {
+		goto free_return;
+	}
+
+	msdu_info.u.sg_info.curr_seg = seg_info_head;
+	msdu_info.num_seg = new_mac_cnt;
+	msdu_info.frm_type = dp_tx_frm_me;
+
+	msdu_info.tid = HTT_INVALID_TID;
+	if (qdf_unlikely(vdev->mcast_enhancement_en > 0) &&
+	    qdf_unlikely(pdev->hmmc_tid_override_en))
+		msdu_info.tid = pdev->hmmc_tid;
+
+	DP_STATS_INC(vdev, tx_i.mcast_en.ucast, new_mac_cnt);
+	dp_tx_send_msdu_multiple(vdev, nbuf, &msdu_info);
+
+	while (seg_info_head->next) {
+		seg_info_new = seg_info_head;
+		seg_info_head = seg_info_head->next;
+		qdf_mem_free(seg_info_new);
+	}
+	qdf_mem_free(seg_info_head);
+
+	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
+	qdf_nbuf_free(nbuf);
+	return new_mac_cnt;
+
+fail_map:
+	qdf_nbuf_free(nbuf_clone);
+
+fail_clone:
+	dp_tx_me_free_buf(pdev, mc_uc_buf);
+
+fail_buf_alloc:
+	qdf_mem_free(seg_info_new);
+
+fail_seg_alloc:
+	dp_tx_me_mem_free(pdev, seg_info_head);
+
+free_return:
+	qdf_nbuf_unmap(pdev->soc->osdev, nbuf, QDF_DMA_TO_DEVICE);
+	qdf_nbuf_free(nbuf);
+	return 1;
+}

+ 30 - 0
dp/wifi3.0/dp_txrx_me.h

@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _DP_TXRX_ME_H_
+#define _DP_TXRX_ME_H_
+uint16_t dp_tx_me_send_convert_ucast(struct cdp_vdev *vdev_handle,
+				     qdf_nbuf_t nbuf,
+				     uint8_t newmac[][QDF_MAC_ADDR_SIZE],
+				     uint8_t new_mac_cnt);
+void dp_tx_me_alloc_descriptor(struct cdp_pdev *pdev);
+
+void dp_tx_me_free_descriptor(struct cdp_pdev *pdev);
+void dp_tx_me_exit(struct dp_pdev *pdev);
+QDF_STATUS
+dp_tx_prepare_send_me(struct dp_vdev *vdev, qdf_nbuf_t nbuf);
+#endif

+ 379 - 0
dp/wifi3.0/dp_txrx_wds.c

@@ -0,0 +1,379 @@
+/*
+ * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+#include "dp_peer.h"
+#include "hal_rx.h"
+#include "hal_api.h"
+#include "qdf_nbuf.h"
+#include "dp_types.h"
+#include "dp_internal.h"
+#include "dp_txrx_wds.h"
+
+/* Generic AST entry aging timer value */
+#define DP_AST_AGING_TIMER_DEFAULT_MS	1000
+
+static void dp_ast_aging_timer_fn(void *soc_hdl)
+{
+	struct dp_soc *soc = (struct dp_soc *)soc_hdl;
+	struct dp_pdev *pdev;
+	struct dp_vdev *vdev;
+	struct dp_peer *peer;
+	struct dp_ast_entry *ase, *temp_ase;
+	int i;
+	bool check_wds_ase = false;
+
+	if (soc->wds_ast_aging_timer_cnt++ >= DP_WDS_AST_AGING_TIMER_CNT) {
+		soc->wds_ast_aging_timer_cnt = 0;
+		check_wds_ase = true;
+	}
+
+	 /* Peer list access lock */
+	qdf_spin_lock_bh(&soc->peer_ref_mutex);
+
+	/* AST list access lock */
+	qdf_spin_lock_bh(&soc->ast_lock);
+
+	for (i = 0; i < MAX_PDEV_CNT && soc->pdev_list[i]; i++) {
+		pdev = soc->pdev_list[i];
+		qdf_spin_lock_bh(&pdev->vdev_list_lock);
+		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
+			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
+				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
+					/*
+					 * Do not expire static ast entries
+					 * and HM WDS entries
+					 */
+					if (ase->type !=
+					    CDP_TXRX_AST_TYPE_WDS &&
+					    ase->type !=
+					    CDP_TXRX_AST_TYPE_MEC &&
+					    ase->type !=
+					    CDP_TXRX_AST_TYPE_DA)
+						continue;
+
+					/* Expire MEC entry every n sec.
+					 * This needs to be expired in
+					 * case if STA backbone is made as
+					 * AP backbone, In this case it needs
+					 * to be re-added as a WDS entry.
+					 */
+					if (ase->is_active && ase->type ==
+					    CDP_TXRX_AST_TYPE_MEC) {
+						ase->is_active = FALSE;
+						continue;
+					} else if (ase->is_active &&
+						   check_wds_ase) {
+						ase->is_active = FALSE;
+						continue;
+					}
+
+					if (ase->type ==
+					    CDP_TXRX_AST_TYPE_MEC) {
+						DP_STATS_INC(soc,
+							     ast.aged_out, 1);
+						dp_peer_del_ast(soc, ase);
+					} else if (check_wds_ase) {
+						DP_STATS_INC(soc,
+							     ast.aged_out, 1);
+						dp_peer_del_ast(soc, ase);
+					}
+				}
+			}
+		}
+		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
+	}
+
+	qdf_spin_unlock_bh(&soc->ast_lock);
+	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+
+	if (qdf_atomic_read(&soc->cmn_init_done))
+		qdf_timer_mod(&soc->ast_aging_timer,
+			      DP_AST_AGING_TIMER_DEFAULT_MS);
+}
+
+/*
+ * dp_soc_wds_attach() - Setup WDS timer and AST table
+ * @soc:		Datapath SOC handle
+ *
+ * Return: None
+ */
+void dp_soc_wds_attach(struct dp_soc *soc)
+{
+	soc->wds_ast_aging_timer_cnt = 0;
+	qdf_timer_init(soc->osdev, &soc->ast_aging_timer,
+		       dp_ast_aging_timer_fn, (void *)soc,
+		       QDF_TIMER_TYPE_WAKE_APPS);
+
+	qdf_timer_mod(&soc->ast_aging_timer, DP_AST_AGING_TIMER_DEFAULT_MS);
+}
+
+/*
+ * dp_soc_wds_detach() - Detach WDS data structures and timers
+ * @txrx_soc: DP SOC handle
+ *
+ * Return: None
+ */
+void dp_soc_wds_detach(struct dp_soc *soc)
+{
+	qdf_timer_stop(&soc->ast_aging_timer);
+	qdf_timer_free(&soc->ast_aging_timer);
+}
+
+/**
+ * dp_rx_da_learn() - Add AST entry based on DA lookup
+ *			This is a WAR for HK 1.0 and will
+ *			be removed in HK 2.0
+ *
+ * @soc: core txrx main context
+ * @rx_tlv_hdr	: start address of rx tlvs
+ * @ta_peer	: Transmitter peer entry
+ * @nbuf	: nbuf to retrieve destination mac for which AST will be added
+ *
+ */
+void
+dp_rx_da_learn(struct dp_soc *soc,
+	       uint8_t *rx_tlv_hdr,
+	       struct dp_peer *ta_peer,
+	       qdf_nbuf_t nbuf)
+{
+	/* For HKv2 DA port learing is not needed */
+	if (qdf_likely(soc->ast_override_support))
+		return;
+
+	if (qdf_unlikely(!ta_peer))
+		return;
+
+	if (qdf_unlikely(ta_peer->vdev->opmode != wlan_op_mode_ap))
+		return;
+
+	if (!soc->da_war_enabled)
+		return;
+
+	if (qdf_unlikely(!qdf_nbuf_is_da_valid(nbuf) &&
+			 !qdf_nbuf_is_da_mcbc(nbuf))) {
+		dp_peer_add_ast(soc,
+				ta_peer,
+				qdf_nbuf_data(nbuf),
+				CDP_TXRX_AST_TYPE_DA,
+				IEEE80211_NODE_F_WDS_HM);
+	}
+}
+
+/**
+ * dp_tx_mec_handler() - Tx  MEC Notify Handler
+ * @vdev: pointer to dp dev handler
+ * @status : Tx completion status from HTT descriptor
+ *
+ * Handles MEC notify event sent from fw to Host
+ *
+ * Return: none
+ */
+void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
+{
+	struct dp_soc *soc;
+	uint32_t flags = IEEE80211_NODE_F_WDS_HM;
+	struct dp_peer *peer;
+	uint8_t mac_addr[QDF_MAC_ADDR_SIZE], i;
+
+	if (!vdev->mec_enabled)
+		return;
+
+	/* MEC required only in STA mode */
+	if (vdev->opmode != wlan_op_mode_sta)
+		return;
+
+	soc = vdev->pdev->soc;
+	qdf_spin_lock_bh(&soc->peer_ref_mutex);
+	peer = TAILQ_FIRST(&vdev->peer_list);
+	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+
+	if (!peer) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
+			  FL("peer is NULL"));
+		return;
+	}
+
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
+		  "%s Tx MEC Handler",
+		  __func__);
+
+	for (i = 0; i < QDF_MAC_ADDR_SIZE; i++)
+		mac_addr[(QDF_MAC_ADDR_SIZE - 1) - i] =
+					status[(QDF_MAC_ADDR_SIZE - 2) + i];
+
+	if (qdf_mem_cmp(mac_addr, vdev->mac_addr.raw, QDF_MAC_ADDR_SIZE))
+		dp_peer_add_ast(soc,
+				peer,
+				mac_addr,
+				CDP_TXRX_AST_TYPE_MEC,
+				flags);
+}
+
+/**
+ * dp_txrx_set_wds_rx_policy() - API to store datapath
+ *                            config parameters
+ * @vdev_handle - datapath vdev handle
+ * @cfg: ini parameter handle
+ *
+ * Return: status
+ */
+#ifdef WDS_VENDOR_EXTENSION
+void
+dp_txrx_set_wds_rx_policy(struct cdp_vdev *vdev_handle,	u_int32_t val)
+{
+	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
+	struct dp_peer *peer;
+
+	if (vdev->opmode == wlan_op_mode_ap) {
+		/* for ap, set it on bss_peer */
+		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
+			if (peer->bss_peer) {
+				peer->wds_ecm.wds_rx_filter = 1;
+				peer->wds_ecm.wds_rx_ucast_4addr =
+					(val & WDS_POLICY_RX_UCAST_4ADDR) ?
+					1 : 0;
+				peer->wds_ecm.wds_rx_mcast_4addr =
+					(val & WDS_POLICY_RX_MCAST_4ADDR) ?
+					1 : 0;
+				break;
+			}
+		}
+	} else if (vdev->opmode == wlan_op_mode_sta) {
+		peer = TAILQ_FIRST(&vdev->peer_list);
+		peer->wds_ecm.wds_rx_filter = 1;
+		peer->wds_ecm.wds_rx_ucast_4addr =
+			(val & WDS_POLICY_RX_UCAST_4ADDR) ? 1 : 0;
+		peer->wds_ecm.wds_rx_mcast_4addr =
+			(val & WDS_POLICY_RX_MCAST_4ADDR) ? 1 : 0;
+	}
+}
+
+/**
+ * dp_txrx_peer_wds_tx_policy_update() - API to set tx wds policy
+ *
+ * @peer_handle - datapath peer handle
+ * @wds_tx_ucast: policy for unicast transmission
+ * @wds_tx_mcast: policy for multicast transmission
+ *
+ * Return: void
+ */
+void
+dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
+				  int wds_tx_ucast, int wds_tx_mcast)
+{
+	struct dp_peer *peer = (struct dp_peer *)peer_handle;
+
+	if (wds_tx_ucast || wds_tx_mcast) {
+		peer->wds_enabled = 1;
+		peer->wds_ecm.wds_tx_ucast_4addr = wds_tx_ucast;
+		peer->wds_ecm.wds_tx_mcast_4addr = wds_tx_mcast;
+	} else {
+		peer->wds_enabled = 0;
+		peer->wds_ecm.wds_tx_ucast_4addr = 0;
+		peer->wds_ecm.wds_tx_mcast_4addr = 0;
+	}
+
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+		  "Policy Update set to :\n");
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+		  "peer->wds_enabled %d\n", peer->wds_enabled);
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+		  "peer->wds_ecm.wds_tx_ucast_4addr %d\n",
+		  peer->wds_ecm.wds_tx_ucast_4addr);
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+		  "peer->wds_ecm.wds_tx_mcast_4addr %d\n",
+		  peer->wds_ecm.wds_tx_mcast_4addr);
+}
+
+int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr,
+			   struct dp_vdev *vdev,
+			   struct dp_peer *peer)
+{
+	struct dp_peer *bss_peer;
+	int fr_ds, to_ds, rx_3addr, rx_4addr;
+	int rx_policy_ucast, rx_policy_mcast;
+	int rx_mcast = hal_rx_msdu_end_da_is_mcbc_get(rx_tlv_hdr);
+
+	if (vdev->opmode == wlan_op_mode_ap) {
+		TAILQ_FOREACH(bss_peer, &vdev->peer_list, peer_list_elem) {
+			if (bss_peer->bss_peer) {
+				/* if wds policy check is not enabled on this vdev, accept all frames */
+				if (!bss_peer->wds_ecm.wds_rx_filter) {
+					return 1;
+				}
+				break;
+			}
+		}
+		rx_policy_ucast = bss_peer->wds_ecm.wds_rx_ucast_4addr;
+		rx_policy_mcast = bss_peer->wds_ecm.wds_rx_mcast_4addr;
+	} else {             /* sta mode */
+		if (!peer->wds_ecm.wds_rx_filter) {
+			return 1;
+		}
+		rx_policy_ucast = peer->wds_ecm.wds_rx_ucast_4addr;
+		rx_policy_mcast = peer->wds_ecm.wds_rx_mcast_4addr;
+	}
+
+	/* ------------------------------------------------
+	 *                       self
+	 * peer-             rx  rx-
+	 * wds  ucast mcast dir policy accept note
+	 * ------------------------------------------------
+	 * 1     1     0     11  x1     1      AP configured to accept ds-to-ds Rx ucast from wds peers, constraint met; so, accept
+	 * 1     1     0     01  x1     0      AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop
+	 * 1     1     0     10  x1     0      AP configured to accept ds-to-ds Rx ucast from wds peers, constraint not met; so, drop
+	 * 1     1     0     00  x1     0      bad frame, won't see it
+	 * 1     0     1     11  1x     1      AP configured to accept ds-to-ds Rx mcast from wds peers, constraint met; so, accept
+	 * 1     0     1     01  1x     0      AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop
+	 * 1     0     1     10  1x     0      AP configured to accept ds-to-ds Rx mcast from wds peers, constraint not met; so, drop
+	 * 1     0     1     00  1x     0      bad frame, won't see it
+	 * 1     1     0     11  x0     0      AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop
+	 * 1     1     0     01  x0     0      AP configured to accept from-ds Rx ucast from wds peers, constraint not met; so, drop
+	 * 1     1     0     10  x0     1      AP configured to accept from-ds Rx ucast from wds peers, constraint met; so, accept
+	 * 1     1     0     00  x0     0      bad frame, won't see it
+	 * 1     0     1     11  0x     0      AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop
+	 * 1     0     1     01  0x     0      AP configured to accept from-ds Rx mcast from wds peers, constraint not met; so, drop
+	 * 1     0     1     10  0x     1      AP configured to accept from-ds Rx mcast from wds peers, constraint met; so, accept
+	 * 1     0     1     00  0x     0      bad frame, won't see it
+	 *
+	 * 0     x     x     11  xx     0      we only accept td-ds Rx frames from non-wds peers in mode.
+	 * 0     x     x     01  xx     1
+	 * 0     x     x     10  xx     0
+	 * 0     x     x     00  xx     0      bad frame, won't see it
+	 * ------------------------------------------------
+	 */
+
+	fr_ds = hal_rx_mpdu_get_fr_ds(rx_tlv_hdr);
+	to_ds = hal_rx_mpdu_get_to_ds(rx_tlv_hdr);
+	rx_3addr = fr_ds ^ to_ds;
+	rx_4addr = fr_ds & to_ds;
+
+	if (vdev->opmode == wlan_op_mode_ap) {
+		if ((!peer->wds_enabled && rx_3addr && to_ds) ||
+				(peer->wds_enabled && !rx_mcast && (rx_4addr == rx_policy_ucast)) ||
+				(peer->wds_enabled && rx_mcast && (rx_4addr == rx_policy_mcast))) {
+			return 1;
+		}
+	} else {           /* sta mode */
+		if ((!rx_mcast && (rx_4addr == rx_policy_ucast)) ||
+				(rx_mcast && (rx_4addr == rx_policy_mcast))) {
+			return 1;
+		}
+	}
+	return 0;
+}
+#endif

+ 359 - 0
dp/wifi3.0/dp_txrx_wds.h

@@ -0,0 +1,359 @@
+
+/*
+ * Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DP_TXRX_WDS_H_
+#define _DP_TXRX_WDS_H_
+
+/* WDS AST entry aging timer value */
+#define DP_WDS_AST_AGING_TIMER_DEFAULT_MS	120000
+#define DP_WDS_AST_AGING_TIMER_CNT \
+((DP_WDS_AST_AGING_TIMER_DEFAULT_MS / DP_AST_AGING_TIMER_DEFAULT_MS) - 1)
+void dp_soc_wds_attach(struct dp_soc *soc);
+void dp_soc_wds_detach(struct dp_soc *soc);
+
+void
+dp_rx_da_learn(struct dp_soc *soc,
+	       uint8_t *rx_tlv_hdr,
+	       struct dp_peer *ta_peer,
+	       qdf_nbuf_t nbuf);
+
+void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status);
+#ifdef FEATURE_AST
+/*
+ * dp_peer_delete_ast_entries(): Delete all AST entries for a peer
+ * @soc - datapath soc handle
+ * @peer - datapath peer handle
+ *
+ * Delete the AST entries belonging to a peer
+ */
+static inline void dp_peer_delete_ast_entries(struct dp_soc *soc,
+					      struct dp_peer *peer)
+{
+	struct dp_ast_entry *ast_entry, *temp_ast_entry;
+
+	qdf_spin_lock_bh(&soc->ast_lock);
+	DP_PEER_ITERATE_ASE_LIST(peer, ast_entry, temp_ast_entry)
+		dp_peer_del_ast(soc, ast_entry);
+
+	peer->self_ast_entry = NULL;
+	qdf_spin_unlock_bh(&soc->ast_lock);
+}
+static void dp_peer_teardown_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
+{
+	struct dp_vdev *vdev = (struct dp_vdev *)vdev_hdl;
+	struct dp_peer *peer = (struct dp_peer *)peer_hdl;
+	struct dp_soc *soc = (struct dp_soc *)vdev->pdev->soc;
+
+	/*
+	 * For BSS peer, new peer is not created on alloc_node if the
+	 * peer with same address already exists , instead refcnt is
+	 * increased for existing peer. Correspondingly in delete path,
+	 * only refcnt is decreased; and peer is only deleted , when all
+	 * references are deleted. So delete_in_progress should not be set
+	 * for bss_peer, unless only 2 reference remains (peer map reference
+	 * and peer hash table reference).
+	 */
+	if (peer->bss_peer && (qdf_atomic_read(&peer->ref_cnt) > 2))
+		return;
+
+	peer->delete_in_progress = true;
+	dp_peer_delete_ast_entries(soc, peer);
+}
+#endif
+#ifdef FEATURE_WDS
+static inline bool dp_tx_da_search_override(struct dp_vdev *vdev)
+{
+	struct dp_soc *soc = vdev->pdev->soc;
+
+	/*
+	 * If AST index override support is available (HKv2 etc),
+	 * DA search flag be enabled always
+	 *
+	 * If AST index override support is not available (HKv1),
+	 * DA search flag should be used for all modes except QWRAP
+	 */
+	if (soc->ast_override_support || !vdev->proxysta_vdev)
+		return true;
+
+	return false;
+}
+#endif
+#ifdef WDS_VENDOR_EXTENSION
+void
+dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
+				  int wds_tx_ucast, int wds_tx_mcast);
+void
+dp_txrx_set_wds_rx_policy(struct cdp_vdev *vdev_handle,
+			  u_int32_t val);
+#endif
+
+/**
+ * dp_rx_wds_add_or_update_ast() - Add or update the ast entry.
+ *
+ * @soc: core txrx main context
+ * @ta_peer: WDS repeater peer
+ * @mac_addr: mac address of the peer
+ * @is_ad4_valid: 4-address valid flag
+ * @is_sa_valid: source address valid flag
+ * @is_chfrag_start: frag start flag
+ * @sa_idx: source-address index for peer
+ * @sa_sw_peer_id: software source-address peer-id
+ *
+ * Return: void:
+ */
+static inline void
+dp_rx_wds_add_or_update_ast(struct dp_soc *soc, struct dp_peer *ta_peer,
+			    uint8_t *wds_src_mac, uint8_t is_ad4_valid,
+			    uint8_t is_sa_valid, uint8_t is_chfrag_start,
+			    uint16_t sa_idx, uint16_t sa_sw_peer_id)
+{
+	struct dp_peer *sa_peer;
+	struct dp_ast_entry *ast;
+	uint32_t flags = IEEE80211_NODE_F_WDS_HM;
+	uint32_t ret = 0;
+	struct dp_neighbour_peer *neighbour_peer = NULL;
+	struct dp_pdev *pdev = ta_peer->vdev->pdev;
+
+	/* For AP mode : Do wds source port learning only if it is a
+	 * 4-address mpdu
+	 *
+	 * For STA mode : Frames from RootAP backend will be in 3-address mode,
+	 * till RootAP does the WDS source port learning; Hence in repeater/STA
+	 * mode, we enable learning even in 3-address mode , to avoid RootAP
+	 * backbone getting wrongly learnt as MEC on repeater
+	 */
+	if (ta_peer->vdev->opmode != wlan_op_mode_sta) {
+		if (!(is_chfrag_start && is_ad4_valid))
+			return;
+	} else {
+		/* For HKv2 Source port learing is not needed in STA mode
+		 * as we have support in HW
+		 */
+		if (soc->ast_override_support)
+			return;
+	}
+
+	if (qdf_unlikely(!is_sa_valid)) {
+		ret = dp_peer_add_ast(soc, ta_peer, wds_src_mac,
+				      CDP_TXRX_AST_TYPE_WDS, flags);
+		return;
+	}
+
+	qdf_spin_lock_bh(&soc->ast_lock);
+	ast = soc->ast_table[sa_idx];
+	qdf_spin_unlock_bh(&soc->ast_lock);
+
+	if (!ast) {
+		/*
+		 * In HKv1, it is possible that HW retains the AST entry in
+		 * GSE cache on 1 radio , even after the AST entry is deleted
+		 * (on another radio).
+		 *
+		 * Due to this, host might still get sa_is_valid indications
+		 * for frames with SA not really present in AST table.
+		 *
+		 * So we go ahead and send an add_ast command to FW in such
+		 * cases where sa is reported still as valid, so that FW will
+		 * invalidate this GSE cache entry and new AST entry gets
+		 * cached.
+		 */
+		if (!soc->ast_override_support) {
+			ret = dp_peer_add_ast(soc, ta_peer, wds_src_mac,
+					      CDP_TXRX_AST_TYPE_WDS, flags);
+			return;
+		}
+		if (soc->ast_override_support) {
+			/* In HKv2 smart monitor case, when NAC client is
+			 * added first and this client roams within BSS to
+			 * connect to RE, since we have an AST entry for
+			 * NAC we get sa_is_valid bit set. So we check if
+			 * smart monitor is enabled and send add_ast command
+			 * to FW.
+			 */
+			if (pdev->neighbour_peers_added) {
+				qdf_spin_lock_bh(&pdev->neighbour_peer_mutex);
+				TAILQ_FOREACH(neighbour_peer,
+					      &pdev->neighbour_peers_list,
+					      neighbour_peer_list_elem) {
+					if (!qdf_mem_cmp(&neighbour_peer->
+							 neighbour_peers_macaddr
+							 , wds_src_mac,
+							 QDF_MAC_ADDR_SIZE)) {
+						ret = dp_peer_add_ast
+							(soc,
+							 ta_peer,
+							 wds_src_mac,
+							 CDP_TXRX_AST_TYPE_WDS,
+							 flags);
+						QDF_TRACE
+							(QDF_MODULE_ID_DP,
+							 QDF_TRACE_LEVEL_INFO,
+							 "sa valid and nac roamed to wds");
+						break;
+					}
+				}
+				qdf_spin_unlock_bh(&pdev->neighbour_peer_mutex);
+			}
+			return;
+		}
+	}
+
+	if ((ast->type == CDP_TXRX_AST_TYPE_WDS_HM) ||
+	    (ast->type == CDP_TXRX_AST_TYPE_WDS_HM_SEC))
+		return;
+
+	/*
+	 * Ensure we are updating the right AST entry by
+	 * validating ast_idx.
+	 * There is a possibility we might arrive here without
+	 * AST MAP event , so this check is mandatory
+	 */
+	if (ast->is_mapped && (ast->ast_idx == sa_idx))
+		ast->is_active = TRUE;
+
+	if (sa_sw_peer_id != ta_peer->peer_ids[0]) {
+		sa_peer = ast->peer;
+
+		if ((ast->type != CDP_TXRX_AST_TYPE_STATIC) &&
+		    (ast->type != CDP_TXRX_AST_TYPE_SELF) &&
+		    (ast->type != CDP_TXRX_AST_TYPE_STA_BSS)) {
+			if (ast->pdev_id != ta_peer->vdev->pdev->pdev_id) {
+				/* This case is when a STA roams from one
+				 * repeater to another repeater, but these
+				 * repeaters are connected to root AP on
+				 * different radios.
+				 * Ex: rptr1 connected to ROOT AP over 5G
+				 * and rptr2 connected to ROOT AP over 2G
+				 * radio
+				 */
+				qdf_spin_lock_bh(&soc->ast_lock);
+				dp_peer_del_ast(soc, ast);
+				qdf_spin_unlock_bh(&soc->ast_lock);
+			} else {
+				/* this case is when a STA roams from one
+				 * reapter to another repeater, but inside
+				 * same radio.
+				 */
+				qdf_spin_lock_bh(&soc->ast_lock);
+				dp_peer_update_ast(soc, ta_peer, ast, flags);
+				qdf_spin_unlock_bh(&soc->ast_lock);
+				return;
+			}
+		}
+		/*
+		 * Do not kickout STA if it belongs to a different radio.
+		 * For DBDC repeater, it is possible to arrive here
+		 * for multicast loopback frames originated from connected
+		 * clients and looped back (intrabss) by Root AP
+		 */
+		if (ast->pdev_id != ta_peer->vdev->pdev->pdev_id)
+			return;
+
+		/*
+		 * Kickout, when direct associated peer(SA) roams
+		 * to another AP and reachable via TA peer
+		 */
+		if ((sa_peer->vdev->opmode == wlan_op_mode_ap) &&
+		    !sa_peer->delete_in_progress) {
+			sa_peer->delete_in_progress = true;
+			if (soc->cdp_soc.ol_ops->peer_sta_kickout) {
+				soc->cdp_soc.ol_ops->peer_sta_kickout(
+						sa_peer->vdev->pdev->ctrl_pdev,
+						wds_src_mac);
+			}
+		}
+	}
+}
+
+/**
+ * dp_rx_wds_srcport_learn() - Add or update the STA PEER which
+ *				is behind the WDS repeater.
+ *
+ * @soc: core txrx main context
+ * @rx_tlv_hdr: base address of RX TLV header
+ * @ta_peer: WDS repeater peer
+ * @nbuf: rx pkt
+ *
+ * Return: void:
+ */
+static inline void
+dp_rx_wds_srcport_learn(struct dp_soc *soc,
+			uint8_t *rx_tlv_hdr,
+			struct dp_peer *ta_peer,
+			qdf_nbuf_t nbuf)
+{
+	uint16_t sa_sw_peer_id = hal_rx_msdu_end_sa_sw_peer_id_get(rx_tlv_hdr);
+	uint8_t sa_is_valid = hal_rx_msdu_end_sa_is_valid_get(rx_tlv_hdr);
+	uint8_t wds_src_mac[IEEE80211_ADDR_LEN];
+	uint16_t sa_idx;
+	uint8_t is_chfrag_start = 0;
+	uint8_t is_ad4_valid = 0;
+
+	if (qdf_unlikely(!ta_peer))
+		return;
+
+	is_chfrag_start = qdf_nbuf_is_rx_chfrag_start(nbuf);
+	if (is_chfrag_start)
+		is_ad4_valid = hal_rx_get_mpdu_mac_ad4_valid(rx_tlv_hdr);
+
+	memcpy(wds_src_mac, (qdf_nbuf_data(nbuf) + IEEE80211_ADDR_LEN),
+	       IEEE80211_ADDR_LEN);
+
+	/*
+	 * Get the AST entry from HW SA index and mark it as active
+	 */
+	sa_idx = hal_rx_msdu_end_sa_idx_get(rx_tlv_hdr);
+
+	dp_rx_wds_add_or_update_ast(soc, ta_peer, wds_src_mac, is_ad4_valid,
+				    sa_is_valid, is_chfrag_start,
+				    sa_idx, sa_sw_peer_id);
+}
+
+/*
+ * dp_rx_ast_set_active() - set the active flag of the astentry
+ *				    corresponding to a hw index.
+ * @soc: core txrx main context
+ * @sa_idx: hw idx
+ * @is_active: active flag
+ *
+ */
+static inline QDF_STATUS dp_rx_ast_set_active(struct dp_soc *soc,
+					      uint16_t sa_idx, bool is_active)
+{
+	struct dp_ast_entry *ast;
+
+	qdf_spin_lock_bh(&soc->ast_lock);
+	ast = soc->ast_table[sa_idx];
+
+	/*
+	 * Ensure we are updating the right AST entry by
+	 * validating ast_idx.
+	 * There is a possibility we might arrive here without
+	 * AST MAP event , so this check is mandatory
+	 */
+	if (ast && ast->is_mapped && (ast->ast_idx == sa_idx)) {
+		ast->is_active = is_active;
+		qdf_spin_unlock_bh(&soc->ast_lock);
+		return QDF_STATUS_SUCCESS;
+	}
+
+	qdf_spin_unlock_bh(&soc->ast_lock);
+	return QDF_STATUS_E_FAILURE;
+}
+#endif /* DP_TXRX_WDS*/