Browse Source

qcacmn: Lithium data path initialization

1. Intial version of Lithium data path initialization
2. HAL changes for REO and WBM setup

Change-Id: I52f638faf6e60c2a932acc3c76788bf8946c30db
Karunakar Dasineni 8 years ago
parent
commit
9b814ce7e5
8 changed files with 3967 additions and 0 deletions
  1. 705 0
      dp/wifi3.0/dp_htt.c
  2. 95 0
      dp/wifi3.0/dp_htt.h
  3. 65 0
      dp/wifi3.0/dp_internal.h
  4. 1363 0
      dp/wifi3.0/dp_main.c
  5. 841 0
      dp/wifi3.0/dp_peer.c
  6. 514 0
      dp/wifi3.0/dp_types.h
  7. 272 0
      hal/wifi3.0/hal_rx.c
  8. 112 0
      hal/wifi3.0/hal_wbm.c

+ 705 - 0
dp/wifi3.0/dp_htt.c

@@ -0,0 +1,705 @@
+/*
+ * Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <htt.h>
+#include <hal_api.h>
+#include <dp_htt.h>
+
+#define HTT_HTC_PKT_POOL_INIT_SIZE 64
+
+#define HTT_MSG_BUF_SIZE(msg_bytes) \
+	((msg_bytes) + HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING)
+
+void dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id,
+	uint8_t vdev_id, uint8_t *peer_mac_addr);
+void dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id);
+void dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
+	enum htt_sec_type sec_type, int is_unicast,
+	u_int32_t *michael_key, u_int32_t *rx_pn);
+
+/*
+ * htt_htc_pkt_alloc() - Allocate HTC packet buffer
+ * @htt_soc:	HTT SOC handle
+ *
+ * Return: Pointer to htc packet buffer
+ */
+static struct dp_htt_htc_pkt *
+htt_htc_pkt_alloc(struct htt_soc *soc)
+{
+	struct dp_htt_htc_pkt_union *pkt = NULL;
+
+	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
+	if (soc->htt_htc_pkt_freelist) {
+		pkt = soc->htt_htc_pkt_freelist;
+		soc->htt_htc_pkt_freelist = soc->htt_htc_pkt_freelist->u.next;
+	}
+	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
+
+	if (pkt == NULL)
+		pkt = qdf_mem_malloc(sizeof(*pkt));
+	return &pkt->u.pkt; /* not actually a dereference */
+}
+
+/*
+ * htt_htc_pkt_free() - Free HTC packet buffer
+ * @htt_soc:	HTT SOC handle
+ */
+static void
+htt_htc_pkt_free(struct htt_soc *soc, struct dp_htt_htc_pkt *pkt)
+{
+	struct dp_htt_htc_pkt_union *u_pkt =
+		(struct dp_htt_htc_pkt_union *)pkt;
+
+	HTT_TX_MUTEX_ACQUIRE(&soc->htt_tx_mutex);
+	u_pkt->u.next = soc->htt_htc_pkt_freelist;
+	soc->htt_htc_pkt_freelist = u_pkt;
+	HTT_TX_MUTEX_RELEASE(&soc->htt_tx_mutex);
+}
+
+/*
+ * htt_htc_pkt_pool_free() - Free HTC packet pool
+ * @htt_soc:	HTT SOC handle
+ */
+static void
+htt_htc_pkt_pool_free(struct htt_soc *soc)
+{
+	struct dp_htt_htc_pkt_union *pkt, *next;
+	pkt = soc->htt_htc_pkt_freelist;
+	while (pkt) {
+		next = pkt->u.next;
+		qdf_mem_free(pkt);
+		pkt = next;
+	}
+	soc->htt_htc_pkt_freelist = NULL;
+}
+
+/*
+ * htt_t2h_mac_addr_deswizzle() - Swap MAC addr bytes if FW endianess differ
+ * @tgt_mac_addr:	Target MAC
+ * @buffer:		Output buffer
+ */
+static u_int8_t *
+htt_t2h_mac_addr_deswizzle(u_int8_t *tgt_mac_addr, u_int8_t *buffer)
+{
+#ifdef BIG_ENDIAN_HOST
+	/*
+	 * The host endianness is opposite of the target endianness.
+	 * To make u_int32_t elements come out correctly, the target->host
+	 * upload has swizzled the bytes in each u_int32_t element of the
+	 * message.
+	 * For byte-array message fields like the MAC address, this
+	 * upload swizzling puts the bytes in the wrong order, and needs
+	 * to be undone.
+	 */
+	buffer[0] = tgt_mac_addr[3];
+	buffer[1] = tgt_mac_addr[2];
+	buffer[2] = tgt_mac_addr[1];
+	buffer[3] = tgt_mac_addr[0];
+	buffer[4] = tgt_mac_addr[7];
+	buffer[5] = tgt_mac_addr[6];
+	return buffer;
+#else
+	/*
+	 * The host endianness matches the target endianness -
+	 * we can use the mac addr directly from the message buffer.
+	 */
+	return tgt_mac_addr;
+#endif
+}
+
+/*
+ * dp_htt_h2t_send_complete_free_netbuf() - Free completed buffer
+ * @soc:	SOC handle
+ * @status:	Completion status
+ * @netbuf:	HTT buffer
+ */
+static void
+dp_htt_h2t_send_complete_free_netbuf(
+	void *soc, A_STATUS status, qdf_nbuf_t netbuf)
+{
+	qdf_nbuf_free(netbuf);
+}
+
+/*
+ * dp_htt_h2t_send_complete() - H2T completion handler
+ * @context:	Opaque context (HTT SOC handle)
+ * @htc_pkt:	HTC packet
+ */
+void
+dp_htt_h2t_send_complete(void *context, HTC_PACKET *htc_pkt)
+{
+	void (*send_complete_part2)(
+		void *soc, A_STATUS status, qdf_nbuf_t msdu);
+	struct htt_soc *soc =  (struct htt_soc *) context;
+	struct dp_htt_htc_pkt *htt_pkt;
+	qdf_nbuf_t netbuf;
+
+	send_complete_part2 = htc_pkt->pPktContext;
+
+	htt_pkt = container_of(htc_pkt, struct dp_htt_htc_pkt, htc_pkt);
+
+	/* process (free or keep) the netbuf that held the message */
+	netbuf = (qdf_nbuf_t) htc_pkt->pNetBufContext;
+	/*
+	 * adf sendcomplete is required for windows only
+	 */
+	/* qdf_nbuf_set_sendcompleteflag(netbuf, TRUE); */
+	if (send_complete_part2 != NULL) {
+		send_complete_part2(
+			htt_pkt->soc_ctxt, htc_pkt->Status, netbuf);
+	}
+	/* free the htt_htc_pkt / HTC_PACKET object */
+	htt_htc_pkt_free(soc, htt_pkt);
+}
+
+/*
+ * htt_h2t_ver_req_msg() - Send HTT version request message to target
+ * @htt_soc:	HTT SOC handle
+ *
+ * Return: 0 on success; error code on failure
+ */
+static int htt_h2t_ver_req_msg(struct htt_soc *soc)
+{
+	struct dp_htt_htc_pkt *pkt;
+	qdf_nbuf_t msg;
+	uint32_t *msg_word;
+
+	msg = qdf_nbuf_alloc(
+		soc->osdev,
+		HTT_MSG_BUF_SIZE(HTT_VER_REQ_BYTES),
+		/* reserve room for the HTC header */
+		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
+	if (!msg)
+		return QDF_STATUS_E_NOMEM;
+
+	/*
+	 * Set the length of the message.
+	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
+	 * separately during the below call to qdf_nbuf_push_head.
+	 * The contribution from the HTC header is added separately inside HTC.
+	 */
+	if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg\n",
+			__func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	/* fill in the message contents */
+	msg_word = (u_int32_t *) qdf_nbuf_data(msg);
+
+	/* rewind beyond alignment pad to get to the HTC header reserved area */
+	qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
+
+	*msg_word = 0;
+	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_VERSION_REQ);
+
+	pkt = htt_htc_pkt_alloc(soc);
+	if (!pkt) {
+		qdf_nbuf_free(msg);
+		return QDF_STATUS_E_FAILURE;
+	}
+	pkt->soc_ctxt = NULL; /* not used during send-done callback */
+
+	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
+		dp_htt_h2t_send_complete_free_netbuf, qdf_nbuf_data(msg),
+		qdf_nbuf_len(msg), soc->htc_endpoint,
+		1); /* tag - not relevant here */
+
+	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, msg);
+	htc_send_pkt(soc->htc_soc, &pkt->htc_pkt);
+	return 0;
+}
+
+/*
+ * htt_srng_setup() - Send SRNG setup message to target
+ * @htt_soc:	HTT SOC handle
+ * @pdev_id:	PDEV Id
+ * @hal_srng:	Opaque HAL SRNG pointer
+ * @hal_ring_type:	SRNG ring type
+ *
+ * Return: 0 on success; error code on failure
+ */
+int htt_srng_setup(void *htt_soc, int pdev_id, void *hal_srng,
+	int hal_ring_type)
+{
+	struct htt_soc *soc = (struct htt_soc *)htt_soc;
+	struct dp_htt_htc_pkt *pkt;
+	qdf_nbuf_t htt_msg;
+	uint32_t *msg_word;
+	struct hal_srng_params srng_params;
+	qdf_dma_addr_t hp_addr, tp_addr;
+	uint32_t ring_entry_size =
+		hal_srng_get_entrysize(soc->hal_soc, hal_ring_type);
+	int htt_ring_type, htt_ring_id;
+
+	/* Sizes should be set in 4-byte words */
+	ring_entry_size = ring_entry_size >> 2;
+
+	htt_msg = qdf_nbuf_alloc(soc->osdev,
+		HTT_MSG_BUF_SIZE(HTT_SRING_SETUP_SZ),
+		/* reserve room for the HTC header */
+		HTC_HEADER_LEN + HTC_HDR_ALIGNMENT_PADDING, 4, TRUE);
+	if (!htt_msg)
+		goto fail0;
+
+	hal_get_srng_params(soc->hal_soc, hal_srng, &srng_params);
+	hp_addr = hal_srng_get_hp_addr(soc->hal_soc, hal_srng);
+	tp_addr = hal_srng_get_tp_addr(soc->hal_soc, hal_srng);
+
+	switch (hal_ring_type) {
+	case RXDMA_BUF:
+#if QCA_HOST2FW_RXBUF_RING
+		htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
+		htt_ring_type = HTT_SW_TO_SW_RING;
+#else
+		htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
+		htt_ring_type = HTT_SW_TO_HW_RING;
+#endif
+		break;
+	case RXDMA_MONITOR_BUF:
+		htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
+		htt_ring_type = HTT_SW_TO_HW_RING;
+		break;
+	case RXDMA_MONITOR_STATUS:
+		htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
+		htt_ring_type = HTT_SW_TO_HW_RING;
+		break;
+	case RXDMA_MONITOR_DST:
+		htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
+		htt_ring_type = HTT_HW_TO_SW_RING;
+		break;
+	case RXDMA_DST:
+	default:
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: Ring currently not supported\n", __func__);
+			goto fail1;
+	}
+
+	/*
+	 * Set the length of the message.
+	 * The contribution from the HTC_HDR_ALIGNMENT_PADDING is added
+	 * separately during the below call to qdf_nbuf_push_head.
+	 * The contribution from the HTC header is added separately inside HTC.
+	 */
+	if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: Failed to expand head for SRING_SETUP msg\n",
+			__func__);
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	msg_word = (uint32_t *)qdf_nbuf_data(htt_msg);
+
+	/* rewind beyond alignment pad to get to the HTC header reserved area */
+	qdf_nbuf_push_head(htt_msg, HTC_HDR_ALIGNMENT_PADDING);
+
+	/* word 0 */
+	*msg_word = 0;
+	HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_SRING_SETUP);
+	HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, pdev_id);
+	HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
+	/* TODO: Discuss with FW on changing this to unique ID and using
+	 * htt_ring_type to send the type of ring
+	 */
+	HTT_SRING_SETUP_RING_ID_SET(*msg_word, htt_ring_id);
+
+	/* word 1 */
+	msg_word++;
+	*msg_word = 0;
+	HTT_SRING_SETUP_RING_BASE_ADDR_LO_SET(*msg_word,
+		srng_params.ring_base_paddr & 0xffffffff);
+
+	/* word 2 */
+	msg_word++;
+	*msg_word = 0;
+	HTT_SRING_SETUP_RING_BASE_ADDR_HI_SET(*msg_word,
+		(uint64_t)srng_params.ring_base_paddr >> 32);
+
+	/* word 3 */
+	msg_word++;
+	*msg_word = 0;
+	HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
+	HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
+		(ring_entry_size * srng_params.num_entries));
+	if (htt_ring_type == HTT_SW_TO_HW_RING)
+		HTT_SRING_SETUP_RING_MISC_CFG_LOOPCOUNT_DISABLE_SET(*msg_word,
+		1);
+
+	HTT_SRING_SETUP_RING_MISC_CFG_MSI_SWAP_SET(*msg_word,
+		!!(srng_params.flags & HAL_SRNG_MSI_SWAP));
+	HTT_SRING_SETUP_RING_MISC_CFG_TLV_SWAP_SET(*msg_word,
+		!!(srng_params.flags & HAL_SRNG_DATA_TLV_SWAP));
+	HTT_SRING_SETUP_RING_MISC_CFG_HOST_FW_SWAP_SET(*msg_word,
+		!!(srng_params.flags & HAL_SRNG_RING_PTR_SWAP));
+
+	/* word 4 */
+	msg_word++;
+	*msg_word = 0;
+	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
+		hp_addr & 0xffffffff);
+
+	/* word 5 */
+	msg_word++;
+	*msg_word = 0;
+	HTT_SRING_SETUP_HEAD_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
+		(uint64_t)hp_addr >> 32);
+
+	/* word 6 */
+	msg_word++;
+	*msg_word = 0;
+	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_LO_SET(*msg_word,
+		tp_addr & 0xffffffff);
+
+	/* word 7 */
+	msg_word++;
+	*msg_word = 0;
+	HTT_SRING_SETUP_TAIL_OFFSET32_REMOTE_BASE_ADDR_HI_SET(*msg_word,
+		(uint64_t)tp_addr >> 32);
+
+	/* word 8 */
+	msg_word++;
+	*msg_word = 0;
+	HTT_SRING_SETUP_RING_MSI_ADDR_LO_SET(*msg_word,
+		srng_params.msi_addr & 0xffffffff);
+
+	/* word 9 */
+	msg_word++;
+	*msg_word = 0;
+	HTT_SRING_SETUP_RING_MSI_ADDR_HI_SET(*msg_word,
+		(uint64_t)(srng_params.msi_addr) >> 32);
+
+	/* word 10 */
+	msg_word++;
+	*msg_word = 0;
+	HTT_SRING_SETUP_RING_MSI_DATA_SET(*msg_word,
+		srng_params.msi_data);
+
+	/* word 11 */
+	msg_word++;
+	*msg_word = 0;
+	HTT_SRING_SETUP_INTR_BATCH_COUNTER_TH_SET(*msg_word,
+		srng_params.intr_batch_cntr_thres_entries *
+		ring_entry_size);
+	HTT_SRING_SETUP_INTR_TIMER_TH_SET(*msg_word,
+		srng_params.intr_timer_thres_us >> 3);
+
+	/* word 12 */
+	msg_word++;
+	*msg_word = 0;
+	if (srng_params.flags & HAL_SRNG_LOW_THRES_INTR_ENABLE) {
+		/* TODO: Setting low threshold to 1/8th of ring size - see
+		 * if this needs to be configurable
+		 */
+		HTT_SRING_SETUP_INTR_LOW_TH_SET(*msg_word,
+			srng_params.low_threshold);
+	}
+	/* "response_required" field should be set if a HTT response message is
+	 * required after setting up the ring.
+	 */
+	pkt = htt_htc_pkt_alloc(soc);
+	if (!pkt)
+		goto fail1;
+
+	pkt->soc_ctxt = NULL; /* not used during send-done callback */
+
+	SET_HTC_PACKET_INFO_TX(
+		&pkt->htc_pkt,
+		dp_htt_h2t_send_complete_free_netbuf,
+		qdf_nbuf_data(htt_msg),
+		qdf_nbuf_len(htt_msg),
+		soc->htc_endpoint,
+		1); /* tag - not relevant here */
+
+	SET_HTC_PACKET_NET_BUF_CONTEXT(&pkt->htc_pkt, htt_msg);
+	htc_send_pkt(soc->htc_soc, &pkt->htc_pkt);
+	return 0;
+
+fail1:
+	qdf_nbuf_free(htt_msg);
+fail0:
+	return QDF_STATUS_E_FAILURE;
+}
+
+/*
+ * htt_soc_attach_target() - SOC level HTT setup
+ * @htt_soc:	HTT SOC handle
+ *
+ * Return: 0 on success; error code on failure
+ */
+int htt_soc_attach_target(void *htt_soc)
+{
+	struct htt_soc *soc = (struct htt_soc *)htt_soc;
+
+	return htt_h2t_ver_req_msg(soc);
+}
+
+
+/*
+ * dp_htt_t2h_msg_handler() - Generic Target to host Msg/event handler
+ * @context:	Opaque context (HTT SOC handle)
+ * @pkt:	HTC packet
+ */
+static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
+{
+	struct htt_soc *soc = (struct htt_soc *) context;
+	qdf_nbuf_t htt_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
+	u_int32_t *msg_word;
+	enum htt_t2h_msg_type msg_type;
+
+	/* check for successful message reception */
+	if (pkt->Status != A_OK) {
+		if (pkt->Status != A_ECANCELED)
+			soc->stats.htc_err_cnt++;
+
+		qdf_nbuf_free(htt_t2h_msg);
+		return;
+	}
+
+	/* TODO: Check if we should pop the HTC/HTT header alignment padding */
+
+	msg_word = (u_int32_t *) qdf_nbuf_data(htt_t2h_msg);
+	msg_type = HTT_T2H_MSG_TYPE_GET(*msg_word);
+	switch (msg_type) {
+	case HTT_T2H_MSG_TYPE_PEER_MAP:
+		{
+			u_int8_t mac_addr_deswizzle_buf[HTT_MAC_ADDR_LEN];
+			u_int8_t *peer_mac_addr;
+			u_int16_t peer_id;
+			u_int8_t vdev_id;
+
+			peer_id = HTT_RX_PEER_MAP_PEER_ID_GET(*msg_word);
+			vdev_id = HTT_RX_PEER_MAP_VDEV_ID_GET(*msg_word);
+			peer_mac_addr = htt_t2h_mac_addr_deswizzle(
+				(u_int8_t *) (msg_word+1),
+				&mac_addr_deswizzle_buf[0]);
+
+			dp_rx_peer_map_handler(
+				soc->dp_soc, peer_id, vdev_id, peer_mac_addr);
+			break;
+		}
+	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
+		{
+			u_int16_t peer_id;
+			peer_id = HTT_RX_PEER_UNMAP_PEER_ID_GET(*msg_word);
+
+			dp_rx_peer_unmap_handler(soc->dp_soc, peer_id);
+			break;
+		}
+	case HTT_T2H_MSG_TYPE_SEC_IND:
+		{
+			u_int16_t peer_id;
+			enum htt_sec_type sec_type;
+			int is_unicast;
+
+			peer_id = HTT_SEC_IND_PEER_ID_GET(*msg_word);
+			sec_type = HTT_SEC_IND_SEC_TYPE_GET(*msg_word);
+			is_unicast = HTT_SEC_IND_UNICAST_GET(*msg_word);
+			/* point to the first part of the Michael key */
+			msg_word++;
+			dp_rx_sec_ind_handler(
+				soc->dp_soc, peer_id, sec_type, is_unicast,
+				msg_word, msg_word + 2);
+			break;
+		}
+#ifdef notyet
+#ifndef REMOVE_PKT_LOG
+	case HTT_T2H_MSG_TYPE_PKTLOG:
+		{
+			u_int32_t *pl_hdr;
+			pl_hdr = (msg_word + 1);
+			wdi_event_handler(WDI_EVENT_OFFLOAD_ALL, soc->dp_soc,
+				pl_hdr, HTT_INVALID_PEER, WDI_NO_VAL);
+			break;
+		}
+#endif
+#endif /* notyet */
+	case HTT_T2H_MSG_TYPE_VERSION_CONF:
+		{
+			soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
+			soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
+				"target uses HTT version %d.%d; host uses %d.%d\n",
+				soc->tgt_ver.major, soc->tgt_ver.minor,
+				HTT_CURRENT_VERSION_MAJOR,
+				HTT_CURRENT_VERSION_MINOR);
+			if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
+				QDF_TRACE(QDF_MODULE_ID_TXRX,
+					QDF_TRACE_LEVEL_ERROR,
+					"*** Incompatible host/target HTT versions!\n");
+			}
+			/* abort if the target is incompatible with the host */
+			qdf_assert(soc->tgt_ver.major ==
+				HTT_CURRENT_VERSION_MAJOR);
+			if (soc->tgt_ver.minor != HTT_CURRENT_VERSION_MINOR) {
+				QDF_TRACE(QDF_MODULE_ID_TXRX,
+					QDF_TRACE_LEVEL_WARN,
+					"*** Warning: host/target HTT versions"
+					" are different, though compatible!\n");
+			}
+			break;
+		}
+	default:
+		break;
+	};
+
+	/* Free the indication buffer */
+	qdf_nbuf_free(htt_t2h_msg);
+}
+
+/*
+ * dp_htt_h2t_full() - Send full handler (called from HTC)
+ * @context:	Opaque context (HTT SOC handle)
+ * @pkt:	HTC packet
+ *
+ * Return: HTC_SEND_FULL_ACTION
+ */
+static HTC_SEND_FULL_ACTION
+dp_htt_h2t_full(void *context, HTC_PACKET *pkt)
+{
+	return HTC_SEND_FULL_KEEP;
+}
+
+/*
+ * htt_htc_soc_attach() - Register SOC level HTT instance with HTC
+ * @htt_soc:	HTT SOC handle
+ *
+ * Return: 0 on success; error code on failure
+ */
+static int
+htt_htc_soc_attach(struct htt_soc *soc)
+{
+	HTC_SERVICE_CONNECT_REQ connect;
+	HTC_SERVICE_CONNECT_RESP response;
+	A_STATUS status;
+
+	qdf_mem_set(&connect, sizeof(connect), 0);
+	qdf_mem_set(&response, sizeof(response), 0);
+
+	connect.pMetaData = NULL;
+	connect.MetaDataLength = 0;
+	connect.EpCallbacks.pContext = soc;
+	connect.EpCallbacks.EpTxComplete = dp_htt_h2t_send_complete;
+	connect.EpCallbacks.EpTxCompleteMultiple = NULL;
+	connect.EpCallbacks.EpRecv = dp_htt_t2h_msg_handler;
+
+	/* rx buffers currently are provided by HIF, not by EpRecvRefill */
+	connect.EpCallbacks.EpRecvRefill = NULL;
+
+	/* N/A, fill is done by HIF */
+	connect.EpCallbacks.RecvRefillWaterMark = 1;
+
+	connect.EpCallbacks.EpSendFull = dp_htt_h2t_full;
+	/*
+	 * Specify how deep to let a queue get before htc_send_pkt will
+	 * call the EpSendFull function due to excessive send queue depth.
+	 */
+	connect.MaxSendQueueDepth = DP_HTT_MAX_SEND_QUEUE_DEPTH;
+
+	/* disable flow control for HTT data message service */
+	connect.ConnectionFlags |= HTC_CONNECT_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+
+	/* connect to control service */
+	connect.service_id = HTT_DATA_MSG_SVC;
+
+	status = htc_connect_service(soc->htc_soc, &connect, &response);
+
+	if (status != A_OK)
+		return QDF_STATUS_E_FAILURE;
+
+	soc->htc_endpoint = response.Endpoint;
+
+	return 0; /* success */
+}
+
+/*
+ * htt_soc_attach() - SOC level HTT initialization
+ * @dp_soc:	Opaque Data path SOC handle
+ * @osif_soc:	Opaque OSIF SOC handle
+ * @htc_soc:	SOC level HTC handle
+ * @hal_soc:	Opaque HAL SOC handle
+ * @osdev:	QDF device
+ *
+ * Return: HTT handle on success; NULL on failure
+ */
+void *
+htt_soc_attach(void *dp_soc, void *osif_soc, HTC_HANDLE htc_soc,
+	void *hal_soc, qdf_device_t osdev)
+{
+	struct htt_soc *soc;
+	int i;
+
+	soc = qdf_mem_malloc(sizeof(*soc));
+
+	if (!soc)
+		goto fail1;
+
+	soc->osdev = osdev;
+	soc->osif_soc = osif_soc;
+	soc->dp_soc = dp_soc;
+	soc->htc_soc = htc_soc;
+	soc->hal_soc = hal_soc;
+
+	/* TODO: See if any NSS related context is requred in htt_soc */
+
+	soc->htt_htc_pkt_freelist = NULL;
+
+	if (htt_htc_soc_attach(soc))
+		goto fail2;
+
+	/* TODO: See if any Rx data specific intialization is required. For
+	 * MCL use cases, the data will be received as single packet and
+	 * should not required any descriptor or reorder handling
+	 */
+
+	HTT_TX_MUTEX_INIT(&soc->htt_tx_mutex);
+
+	/* pre-allocate some HTC_PACKET objects */
+	for (i = 0; i < HTT_HTC_PKT_POOL_INIT_SIZE; i++) {
+		struct dp_htt_htc_pkt_union *pkt;
+		pkt = qdf_mem_malloc(sizeof(*pkt));
+		if (!pkt)
+			break;
+
+		htt_htc_pkt_free(soc, &pkt->u.pkt);
+	}
+
+	return soc;
+
+fail2:
+	qdf_mem_free(soc);
+
+fail1:
+	return NULL;
+}
+
+
+/*
+ * htt_soc_detach() - Detach SOC level HTT
+ * @htt_soc:	HTT SOC handle
+ */
+void
+htt_soc_detach(void *htt_soc)
+{
+	struct htt_soc *soc = (struct htt_soc *)soc;
+
+	htt_htc_pkt_pool_free(soc);
+	HTT_TX_MUTEX_DESTROY(&soc->htt_tx_mutex);
+	qdf_mem_free(soc);
+}
+

+ 95 - 0
dp/wifi3.0/dp_htt.h

@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DP_HTT_H_
+#define _DP_HTT_H_
+
+#include <qdf_types.h>
+#include <qdf_lock.h>
+#include <qdf_nbuf.h>
+#include <htc_api.h>
+
+#define HTT_TX_MUTEX_TYPE qdf_spinlock_t
+
+#define HTT_TX_MUTEX_INIT(_mutex)				\
+	qdf_spinlock_create(_mutex)
+
+#define HTT_TX_MUTEX_ACQUIRE(_mutex)			\
+	qdf_spin_lock_bh(_mutex)
+
+#define HTT_TX_MUTEX_RELEASE(_mutex)			\
+	qdf_spin_unlock_bh(_mutex)
+
+#define HTT_TX_MUTEX_DESTROY(_mutex)			\
+	qdf_spinlock_destroy(_mutex)
+
+#define DP_HTT_MAX_SEND_QUEUE_DEPTH 64
+
+#ifndef HTT_MAC_ADDR_LEN
+#define HTT_MAC_ADDR_LEN 6
+#endif
+
+struct dp_htt_htc_pkt {
+	void *soc_ctxt;
+	qdf_dma_addr_t nbuf_paddr;
+	HTC_PACKET htc_pkt;
+};
+
+struct dp_htt_htc_pkt_union {
+	union {
+		struct dp_htt_htc_pkt pkt;
+		struct dp_htt_htc_pkt_union *next;
+	} u;
+};
+
+struct htt_soc {
+	void *osif_soc;
+	void *dp_soc;
+	void *hal_soc;
+	HTC_HANDLE htc_soc;
+	qdf_device_t osdev;
+	HTC_ENDPOINT_ID htc_endpoint;
+	struct dp_htt_htc_pkt_union *htt_htc_pkt_freelist;
+	struct {
+		u_int8_t major;
+		u_int8_t minor;
+	} tgt_ver;
+	struct {
+		u_int8_t major;
+		u_int8_t minor;
+	} wifi_ip_ver;
+
+	struct {
+		int htc_err_cnt;
+	} stats;
+
+	HTT_TX_MUTEX_TYPE htt_tx_mutex;
+};
+
+void *
+htt_soc_attach(void *txrx_soc, void *osif_soc, HTC_HANDLE htc_soc,
+	void *hal_soc, qdf_device_t osdev);
+
+void htt_soc_detach(void *soc);
+
+int htt_srng_setup(void *htt_soc, int pdev_id, void *hal_srng,
+	int hal_ring_type);
+
+int htt_soc_attach_target(void *htt_soc);
+
+#endif /* _DP_HTT_H_ */

+ 65 - 0
dp/wifi3.0/dp_internal.h

@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DP_INTERNAL_H_
+#define _DP_INTERNAL_H_
+
+#if DP_PRINT_ENABLE
+#include <stdarg.h>       /* va_list */
+#include <qdf_types.h> /* qdf_vprint */
+
+enum {
+	/* FATAL_ERR - print only irrecoverable error messages */
+	DP_PRINT_LEVEL_FATAL_ERR,
+
+	/* ERR - include non-fatal err messages */
+	DP_PRINT_LEVEL_ERR,
+
+	/* WARN - include warnings */
+	DP_PRINT_LEVEL_WARN,
+
+	/* INFO1 - include fundamental, infrequent events */
+	DP_PRINT_LEVEL_INFO1,
+
+	/* INFO2 - include non-fundamental but infrequent events */
+	DP_PRINT_LEVEL_INFO2,
+};
+
+
+#define dp_print(level, fmt, ...) do { \
+	if (level <= g_txrx_print_level) \
+		qdf_print(fmt, ## __VA_ARGS__); \
+while (0)
+#define DP_PRINT(level, fmt, ...) do { \
+	dp_print(level, "DP: " fmt, ## __VA_ARGS__); \
+while (0)
+#else
+#define DP_PRINT(level, fmt, ...)
+#endif /* DP_PRINT_ENABLE */
+
+extern int dp_peer_find_attach(struct dp_soc *soc);
+extern void dp_peer_find_detach(struct dp_soc *soc);
+extern void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer);
+extern void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer);
+extern void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer);
+extern void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer);
+extern void dp_peer_unref_delete(void *peer_handle);
+extern void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer,
+	unsigned tid, qdf_nbuf_t msdu_list);
+
+#endif /* #ifndef _DP_INTERNAL_H_ */

+ 1363 - 0
dp/wifi3.0/dp_main.c

@@ -0,0 +1,1363 @@
+/*
+ * Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <qdf_types.h>
+#include <qdf_lock.h>
+#include <hal_api.h>
+#include <hif.h>
+#include <htt.h>
+#include <wdi_event.h>
+#include <queue.h>
+#include "dp_htt.h"
+#include "dp_types.h"
+#include "dp_internal.h"
+
+/**
+ * dp_setup_srng - Internal function to setup SRNG rings used by data path
+ */
+static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
+	int ring_type, int ring_num, int pdev_id, uint32_t num_entries)
+{
+	void *hal_soc = soc->hal_soc;
+	uint32_t entry_size = hal_srng_get_entrysize(hal_soc, ring_type);
+	/* TODO: See if we should get align size from hal */
+	uint32_t ring_base_align = 8;
+	struct hal_srng_params ring_params;
+
+
+	srng->hal_srng = NULL;
+	srng->alloc_size = (num_entries * entry_size) + ring_base_align - 1;
+	srng->base_vaddr_unaligned = qdf_mem_alloc_consistent(
+		soc->osdev, NULL, srng->alloc_size,
+		&(srng->base_paddr_unaligned));
+
+	if (!srng->base_vaddr_unaligned) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: alloc failed - ring_type: %d, ring_num %d\n",
+			__func__, ring_type, ring_num);
+		return QDF_STATUS_E_NOMEM;
+	}
+
+	ring_params.ring_base_vaddr = srng->base_vaddr_unaligned +
+		((unsigned long)srng->base_vaddr_unaligned % ring_base_align);
+	ring_params.ring_base_paddr = srng->base_paddr_unaligned +
+		((unsigned long)(ring_params.ring_base_vaddr) -
+		(unsigned long)srng->base_vaddr_unaligned);
+	ring_params.num_entries = num_entries;
+
+	/* TODO: Check MSI support and get MSI settings from HIF layer */
+	ring_params.msi_data = 0;
+	ring_params.msi_addr = 0;
+
+	/* TODO: Setup interrupt timer and batch counter thresholds for
+	 * interrupt mitigation based on ring type
+	 */
+	ring_params.intr_timer_thres_us = 8;
+	ring_params.intr_batch_cntr_thres_entries = 1;
+
+	/* TODO: Currently hal layer takes care of endianness related settings.
+	 * See if these settings need to passed from DP layer
+	 */
+	ring_params.flags = 0;
+
+	/* Enable low threshold interrupts for rx buffer rings (regular and
+	 * monitor buffer rings.
+	 * TODO: See if this is required for any other ring
+	 */
+	if ((ring_type == RXDMA_BUF) || (ring_type == RXDMA_MONITOR_BUF)) {
+		/* TODO: Setting low threshold to 1/8th of ring size
+		 * see if this needs to be configurable
+		 */
+		ring_params.low_threshold = num_entries >> 3;
+		ring_params.flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
+	}
+
+	srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
+		pdev_id, &ring_params);
+	return 0;
+}
+
+/**
+ * dp_srng_cleanup - Internal function to cleanup SRNG rings used by data path
+ * Any buffers allocated and attached to ring entries are expected to be freed
+ * before calling this function.
+ */
+static void dp_srng_cleanup(struct dp_soc *soc, struct dp_srng *srng,
+	int ring_type, int ring_num)
+{
+	if (!srng->hal_srng) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: Ring type: %d, num:%d not setup\n",
+			__func__, ring_type, ring_num);
+		return;
+	}
+
+	hal_srng_cleanup(soc->hal_soc, srng->hal_srng);
+
+	qdf_mem_free_consistent(soc->osdev, NULL,
+				srng->alloc_size,
+				srng->base_vaddr_unaligned,
+				srng->base_paddr_unaligned, 0);
+}
+
+/* TODO: Need this interface from HIF */
+void *hif_get_hal_handle(void *hif_handle);
+
+/*
+ * dp_soc_attach_wifi3() - Attach txrx SOC
+ * @osif_soc:		Opaque SOC handle from OSIF/HDD
+ * @htc_handle:	Opaque HTC handle
+ * @hif_handle:	Opaque HIF handle
+ * @qdf_osdev:	QDF device
+ *
+ * Return: DP SOC handle on success, NULL on failure
+ */
+void *dp_soc_attach_wifi3(void *osif_soc, void *hif_handle,
+	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev,
+	struct ol_if_ops *ol_ops)
+{
+	struct dp_soc *soc = qdf_mem_malloc(sizeof(*soc));
+
+	if (!soc) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: DP SOC memory allocation failed\n", __func__);
+		goto fail0;
+	}
+
+	soc->osif_soc = osif_soc;
+	soc->osdev = qdf_osdev;
+	soc->ol_ops = ol_ops;
+	soc->hif_handle = hif_handle;
+	soc->hal_soc = hif_get_hal_handle(hif_handle);
+	soc->htt_handle = htt_soc_attach(soc, osif_soc, htc_handle,
+		soc->hal_soc, qdf_osdev);
+	if (soc->htt_handle == NULL) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: HTT attach failed\n", __func__);
+		goto fail1;
+	}
+
+#ifdef notyet
+	if (wdi_event_attach(soc)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: WDI event attach failed\n", __func__);
+		goto fail2;
+	}
+#endif
+
+	return (void *)soc;
+
+#ifdef notyet
+fail2:
+	htt_soc_detach(soc->htt_handle);
+#endif
+fail1:
+	qdf_mem_free(soc);
+fail0:
+	return NULL;
+}
+
+/* Temporary definitions to be moved to wlan_cfg */
+static inline uint32_t wlan_cfg_get_max_clients(void *wlan_cfg_ctx)
+{
+	return 512;
+}
+
+static inline uint32_t wlan_cfg_max_alloc_size(void *wlan_cfg_ctx)
+{
+	/* Change this to a lower value to enforce scattered idle list mode */
+	return 32 << 20;
+}
+
+static inline int wlan_cfg_per_pdev_tx_ring(void *wlan_cfg_ctx)
+{
+	return 1;
+}
+
+static inline int wlan_cfg_num_tcl_data_rings(void *wlan_cfg_ctx)
+{
+	return 1;
+}
+
+static inline int wlan_cfg_per_pdev_rx_ring(void *wlan_cfg_ctx)
+{
+	return 1;
+}
+
+static inline int wlan_cfg_num_reo_dest_rings(void *wlan_cfg_ctx)
+{
+	return 4;
+}
+
+static inline int wlan_cfg_pkt_type(void *wlan_cfg_ctx)
+{
+	return htt_pkt_type_ethernet;
+}
+
+#define AVG_MAX_MPDUS_PER_TID 128
+#define AVG_TIDS_PER_CLIENT 2
+#define AVG_FLOWS_PER_TID 2
+#define AVG_MSDUS_PER_FLOW 128
+#define AVG_MSDUS_PER_MPDU 4
+
+/*
+ * Allocate and setup link descriptor pool that will be used by HW for
+ * various link and queue descriptors and managed by WBM
+ */
+static int dp_hw_link_desc_pool_setup(struct dp_soc *soc)
+{
+	int link_desc_size = hal_get_link_desc_size(soc->hal_soc);
+	int link_desc_align = hal_get_link_desc_align(soc->hal_soc);
+	uint32_t max_clients = wlan_cfg_get_max_clients(soc->wlan_cfg_ctx);
+	uint32_t num_mpdus_per_link_desc =
+		hal_num_mpdus_per_link_desc(soc->hal_soc);
+	uint32_t num_msdus_per_link_desc =
+		hal_num_msdus_per_link_desc(soc->hal_soc);
+	uint32_t num_mpdu_links_per_queue_desc =
+		hal_num_mpdu_links_per_queue_desc(soc->hal_soc);
+	uint32_t max_alloc_size = wlan_cfg_max_alloc_size(soc->wlan_cfg_ctx);
+	uint32_t total_link_descs, total_mem_size;
+	uint32_t num_mpdu_link_descs, num_mpdu_queue_descs;
+	uint32_t num_tx_msdu_link_descs, num_rx_msdu_link_descs;
+	uint32_t num_link_desc_banks;
+	uint32_t last_bank_size = 0;
+	uint32_t entry_size, num_entries;
+	int i;
+
+	/* Only Tx queue descriptors are allocated from common link descriptor
+	 * pool Rx queue descriptors are not included in this because (REO queue
+	 * extension descriptors) they are expected to be allocated contiguously
+	 * with REO queue descriptors
+	 */
+	num_mpdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
+		AVG_MAX_MPDUS_PER_TID) / num_mpdus_per_link_desc;
+
+	num_mpdu_queue_descs = num_mpdu_link_descs /
+		num_mpdu_links_per_queue_desc;
+
+	num_tx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
+		AVG_FLOWS_PER_TID * AVG_MSDUS_PER_FLOW) /
+		num_msdus_per_link_desc;
+
+	num_rx_msdu_link_descs = (max_clients * AVG_TIDS_PER_CLIENT *
+		AVG_MAX_MPDUS_PER_TID * AVG_MSDUS_PER_MPDU) / 6;
+
+	num_entries = num_mpdu_link_descs + num_mpdu_queue_descs +
+		num_tx_msdu_link_descs + num_rx_msdu_link_descs;
+
+	/* Round up to power of 2 */
+	total_link_descs = 1;
+	while (total_link_descs < num_entries)
+		total_link_descs <<= 1;
+
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
+		"%s: total_link_descs: %u, link_desc_size: %d\n",
+		__func__, total_link_descs, link_desc_size);
+	total_mem_size =  total_link_descs * link_desc_size;
+
+	total_mem_size += link_desc_align;
+
+	if (total_mem_size <= max_alloc_size) {
+		num_link_desc_banks = 0;
+		last_bank_size = total_mem_size;
+	} else {
+		num_link_desc_banks = (total_mem_size) /
+			(max_alloc_size - link_desc_align);
+		last_bank_size = total_mem_size %
+			(max_alloc_size - link_desc_align);
+	}
+
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
+		"%s: total_mem_size: %d, num_link_desc_banks: %u\n",
+		__func__, total_mem_size, num_link_desc_banks);
+
+	for (i = 0; i < num_link_desc_banks; i++) {
+		soc->link_desc_banks[i].base_vaddr_unaligned =
+			qdf_mem_alloc_consistent(soc->osdev, NULL,
+			max_alloc_size,
+			&(soc->link_desc_banks[i].base_paddr_unaligned));
+		soc->link_desc_banks[i].size = max_alloc_size;
+
+		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)(
+			soc->link_desc_banks[i].base_vaddr_unaligned) +
+			((unsigned long)(
+			soc->link_desc_banks[i].base_vaddr_unaligned) %
+			link_desc_align));
+
+		soc->link_desc_banks[i].base_paddr = (unsigned long)(
+			soc->link_desc_banks[i].base_paddr_unaligned) +
+			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
+			(unsigned long)(
+			soc->link_desc_banks[i].base_vaddr_unaligned));
+
+		if (!soc->link_desc_banks[i].base_vaddr_unaligned) {
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				"%s: Link descriptor memory alloc failed\n",
+				__func__);
+			goto fail;
+		}
+	}
+
+	if (last_bank_size) {
+		/* Allocate last bank in case total memory required is not exact
+		 * multiple of max_alloc_size
+		 */
+		soc->link_desc_banks[i].base_vaddr_unaligned =
+			qdf_mem_alloc_consistent(soc->osdev, NULL,
+			last_bank_size,
+			&(soc->link_desc_banks[i].base_paddr_unaligned));
+		soc->link_desc_banks[i].size = last_bank_size;
+
+		soc->link_desc_banks[i].base_vaddr = (void *)((unsigned long)
+			(soc->link_desc_banks[i].base_vaddr_unaligned) +
+			((unsigned long)(
+			soc->link_desc_banks[i].base_vaddr_unaligned) %
+			link_desc_align));
+
+		soc->link_desc_banks[i].base_paddr =
+			(unsigned long)(
+			soc->link_desc_banks[i].base_paddr_unaligned) +
+			((unsigned long)(soc->link_desc_banks[i].base_vaddr) -
+			(unsigned long)(
+			soc->link_desc_banks[i].base_vaddr_unaligned));
+	}
+
+
+	/* Allocate and setup link descriptor idle list for HW internal use */
+	entry_size = hal_srng_get_entrysize(soc->hal_soc, WBM_IDLE_LINK);
+	total_mem_size = entry_size * total_link_descs;
+
+	if (total_mem_size <= max_alloc_size) {
+		void *desc;
+
+		if (dp_srng_setup(soc, &soc->wbm_idle_link_ring,
+			WBM_IDLE_LINK, 0, 0, total_link_descs)) {
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				"%s: Link desc idle ring setup failed\n",
+				__func__);
+			goto fail;
+		}
+
+		hal_srng_access_start_unlocked(soc->hal_soc,
+			soc->wbm_idle_link_ring.hal_srng);
+
+		for (i = 0; i < MAX_LINK_DESC_BANKS &&
+			soc->link_desc_banks[i].base_paddr; i++) {
+			uint32_t num_entries = (soc->link_desc_banks[i].size -
+				(unsigned long)(
+				soc->link_desc_banks[i].base_vaddr) -
+				(unsigned long)(
+				soc->link_desc_banks[i].base_vaddr_unaligned))
+				/ link_desc_size;
+			unsigned long paddr = (unsigned long)(
+				soc->link_desc_banks[i].base_paddr);
+
+			while (num_entries && (desc = hal_srng_src_get_next(
+				soc->hal_soc,
+				soc->wbm_idle_link_ring.hal_srng))) {
+				hal_set_link_desc_addr(desc, i, paddr);
+				num_entries--;
+				paddr += link_desc_size;
+			}
+		}
+		hal_srng_access_end_unlocked(soc->hal_soc,
+			soc->wbm_idle_link_ring.hal_srng);
+	} else {
+		uint32_t num_scatter_bufs;
+		uint32_t num_entries_per_buf;
+		uint32_t rem_entries;
+		uint8_t *scatter_buf_ptr;
+		uint16_t scatter_buf_num;
+
+		soc->wbm_idle_scatter_buf_size =
+			hal_idle_list_scatter_buf_size(soc->hal_soc);
+		num_entries_per_buf = hal_idle_scatter_buf_num_entries(
+			soc->hal_soc, soc->wbm_idle_scatter_buf_size);
+		num_scatter_bufs = (total_mem_size /
+			soc->wbm_idle_scatter_buf_size) + (total_mem_size %
+				soc->wbm_idle_scatter_buf_size) ? 1 : 0;
+
+		for (i = 0; i < num_scatter_bufs; i++) {
+			soc->wbm_idle_scatter_buf_base_vaddr[i] =
+				qdf_mem_alloc_consistent(soc->osdev, NULL,
+				soc->wbm_idle_scatter_buf_size,
+				&(soc->wbm_idle_scatter_buf_base_paddr[i]));
+			if (soc->wbm_idle_scatter_buf_base_vaddr[i] == NULL) {
+				QDF_TRACE(QDF_MODULE_ID_TXRX,
+					QDF_TRACE_LEVEL_ERROR,
+					"%s:Scatter list memory alloc failed\n",
+					__func__);
+				goto fail;
+			}
+		}
+
+		/* Populate idle list scatter buffers with link descriptor
+		 * pointers
+		 */
+		scatter_buf_num = 0;
+		scatter_buf_ptr = (uint8_t *)(
+			soc->wbm_idle_scatter_buf_base_vaddr[scatter_buf_num]);
+		rem_entries = num_entries_per_buf;
+
+		for (i = 0; i < MAX_LINK_DESC_BANKS &&
+			soc->link_desc_banks[i].base_paddr; i++) {
+			uint32_t num_link_descs =
+				(soc->link_desc_banks[i].size -
+				(unsigned long)(
+				soc->link_desc_banks[i].base_vaddr) -
+				(unsigned long)(
+				soc->link_desc_banks[i].base_vaddr_unaligned)) /
+				link_desc_size;
+			unsigned long paddr = (unsigned long)(
+				soc->link_desc_banks[i].base_paddr);
+			void *desc = NULL;
+
+			while (num_link_descs && (desc =
+				hal_srng_src_get_next(soc->hal_soc,
+				soc->wbm_idle_link_ring.hal_srng))) {
+				hal_set_link_desc_addr((void *)scatter_buf_ptr,
+					i, paddr);
+				num_link_descs--;
+				paddr += link_desc_size;
+				if (rem_entries) {
+					rem_entries--;
+					scatter_buf_ptr += link_desc_size;
+				} else {
+					rem_entries = num_entries_per_buf;
+					scatter_buf_num++;
+					scatter_buf_ptr = (uint8_t *)(
+						soc->wbm_idle_scatter_buf_base_vaddr[
+						scatter_buf_num]);
+				}
+			}
+		}
+		/* Setup link descriptor idle list in HW */
+		hal_setup_link_idle_list(soc->hal_soc,
+			soc->wbm_idle_scatter_buf_base_paddr,
+			soc->wbm_idle_scatter_buf_base_vaddr,
+			num_scatter_bufs, soc->wbm_idle_scatter_buf_size,
+			(uint32_t)(scatter_buf_ptr - (unsigned long)(
+			soc->wbm_idle_scatter_buf_base_vaddr[
+			scatter_buf_num])));
+	}
+	return 0;
+
+fail:
+	if (soc->wbm_idle_link_ring.hal_srng) {
+		dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
+			WBM_IDLE_LINK, 0);
+	}
+
+	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
+		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
+			qdf_mem_free_consistent(soc->osdev, NULL,
+				soc->wbm_idle_scatter_buf_size,
+				soc->wbm_idle_scatter_buf_base_vaddr[i],
+				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
+		}
+	}
+
+	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
+		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
+			qdf_mem_free_consistent(soc->osdev, NULL,
+				soc->link_desc_banks[i].size,
+				soc->link_desc_banks[i].base_vaddr_unaligned,
+				soc->link_desc_banks[i].base_paddr_unaligned,
+				0);
+		}
+	}
+	return QDF_STATUS_E_FAILURE;
+}
+
+/*
+ * Free link descriptor pool that was setup HW
+ */
+static void dp_hw_link_desc_pool_cleanup(struct dp_soc *soc)
+{
+	int i;
+
+	if (soc->wbm_idle_link_ring.hal_srng) {
+		dp_srng_cleanup(soc->hal_soc, &soc->wbm_idle_link_ring,
+			WBM_IDLE_LINK, 0);
+	}
+
+	for (i = 0; i < MAX_IDLE_SCATTER_BUFS; i++) {
+		if (soc->wbm_idle_scatter_buf_base_vaddr[i]) {
+			qdf_mem_free_consistent(soc->osdev, NULL,
+				soc->wbm_idle_scatter_buf_size,
+				soc->wbm_idle_scatter_buf_base_vaddr[i],
+				soc->wbm_idle_scatter_buf_base_paddr[i], 0);
+		}
+	}
+
+	for (i = 0; i < MAX_LINK_DESC_BANKS; i++) {
+		if (soc->link_desc_banks[i].base_vaddr_unaligned) {
+			qdf_mem_free_consistent(soc->osdev, NULL,
+				soc->link_desc_banks[i].size,
+				soc->link_desc_banks[i].base_vaddr_unaligned,
+				soc->link_desc_banks[i].base_paddr_unaligned,
+				0);
+		}
+	}
+}
+
+/* TODO: Following should be configurable */
+#define WBM_RELEASE_RING_SIZE 64
+#define TCL_DATA_RING_SIZE 512
+#define TCL_CMD_RING_SIZE 32
+#define TCL_STATUS_RING_SIZE 32
+#define REO_DST_RING_SIZE 2048
+#define REO_REINJECT_RING_SIZE 32
+#define RX_RELEASE_RING_SIZE 256
+#define REO_EXCEPTION_RING_SIZE 128
+#define REO_CMD_RING_SIZE 32
+#define REO_STATUS_RING_SIZE 32
+#define RXDMA_BUF_RING_SIZE 8192
+#define RXDMA_MONITOR_BUF_RING_SIZE 8192
+#define RXDMA_MONITOR_DST_RING_SIZE 2048
+#define RXDMA_MONITOR_STATUS_RING_SIZE 2048
+
+/*
+ * dp_soc_cmn_setup() - Common SoC level initializion
+ * @soc:		Datapath SOC handle
+ *
+ * This is an internal function used to setup common SOC data structures,
+ * to be called from PDEV attach after receiving HW mode capabilities from FW
+ */
+static int dp_soc_cmn_setup(struct dp_soc *soc)
+{
+	int i;
+
+	if (soc->cmn_init_done)
+		return 0;
+
+	if (dp_peer_find_attach(soc))
+		goto fail0;
+
+	if (dp_hw_link_desc_pool_setup(soc))
+		goto fail1;
+
+	/* Setup SRNG rings */
+	/* Common rings */
+	if (dp_srng_setup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0, 0,
+		WBM_RELEASE_RING_SIZE)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: dp_srng_setup failed for wbm_desc_rel_ring\n",
+			__func__);
+		goto fail1;
+	}
+
+
+	/* Tx data rings */
+	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
+		soc->num_tcl_data_rings =
+			wlan_cfg_num_tcl_data_rings(soc->wlan_cfg_ctx);
+		for (i = 0; i < soc->num_tcl_data_rings; i++) {
+			if (dp_srng_setup(soc, &soc->tcl_data_ring[i],
+				TCL_DATA, i, 0, TCL_DATA_RING_SIZE)) {
+				QDF_TRACE(QDF_MODULE_ID_TXRX,
+					QDF_TRACE_LEVEL_ERROR,
+					"%s: dp_srng_setup failed for tcl_data_ring[%d]\n",
+					__func__, i);
+				goto fail1;
+			}
+			if (dp_srng_setup(soc, &soc->tx_comp_ring[i],
+				WBM2SW_RELEASE, i, 0, TCL_DATA_RING_SIZE)) {
+				QDF_TRACE(QDF_MODULE_ID_TXRX,
+					QDF_TRACE_LEVEL_ERROR,
+					"%s: dp_srng_setup failed for tx_comp_ring[%d]\n",
+					__func__, i);
+				goto fail1;
+			}
+		}
+	} else {
+		/* This will be incremented during per pdev ring setup */
+		soc->num_tcl_data_rings = 0;
+	}
+
+	/* TCL command and status rings */
+	if (dp_srng_setup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0, 0,
+		TCL_CMD_RING_SIZE)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: dp_srng_setup failed for tcl_cmd_ring\n",
+			__func__);
+		goto fail1;
+	}
+
+	if (dp_srng_setup(soc, &soc->tcl_status_ring, TCL_STATUS, 0, 0,
+		TCL_STATUS_RING_SIZE)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: dp_srng_setup failed for tcl_status_ring\n",
+			__func__);
+		goto fail1;
+	}
+
+
+	/* TBD: call dp_tx_init to setup Tx SW descriptors and MSDU extension
+	 * descriptors
+	 */
+
+	/* Rx data rings */
+	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
+		soc->num_reo_dest_rings =
+			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
+		for (i = 0; i < soc->num_reo_dest_rings; i++) {
+			/* TODO: Get number of rings and ring sizes from
+			 * wlan_cfg
+			 */
+			if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
+				i, 0, REO_DST_RING_SIZE)) {
+				QDF_TRACE(QDF_MODULE_ID_TXRX,
+					QDF_TRACE_LEVEL_ERROR,
+					"%s: dp_srng_setup failed for reo_dest_ring[%d]\n",
+					__func__, i);
+				goto fail1;
+			}
+		}
+	} else {
+		/* This will be incremented during per pdev ring setup */
+		soc->num_reo_dest_rings = 0;
+	}
+
+	/* TBD: call dp_rx_init to setup Rx SW descriptors */
+
+	/* REO reinjection ring */
+	if (dp_srng_setup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0, 0,
+		REO_REINJECT_RING_SIZE)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: dp_srng_setup failed for reo_reinject_ring\n",
+			__func__);
+		goto fail1;
+	}
+
+
+	/* Rx release ring */
+	if (dp_srng_setup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 3, 0,
+		RX_RELEASE_RING_SIZE)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: dp_srng_setup failed for rx_rel_ring\n",
+			__func__);
+		goto fail1;
+	}
+
+
+	/* Rx exception ring */
+	if (dp_srng_setup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0,
+		MAX_REO_DEST_RINGS, REO_EXCEPTION_RING_SIZE)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: dp_srng_setup failed for reo_exception_ring\n",
+			__func__);
+		goto fail1;
+	}
+
+
+	/* REO command and status rings */
+	if (dp_srng_setup(soc, &soc->reo_cmd_ring, REO_CMD, 0, 0,
+		REO_CMD_RING_SIZE)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: dp_srng_setup failed for reo_cmd_ring\n",
+			__func__);
+		goto fail1;
+	}
+
+	if (dp_srng_setup(soc, &soc->reo_status_ring, REO_STATUS, 0, 0,
+		REO_STATUS_RING_SIZE)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: dp_srng_setup failed for reo_status_ring\n",
+			__func__);
+		goto fail1;
+	}
+
+
+	/* Setup HW REO */
+	hal_reo_setup(soc->hal_soc);
+
+	soc->cmn_init_done = 1;
+	return 0;
+fail1:
+	/*
+	 * Cleanup will be done as part of soc_detach, which will
+	 * be called on pdev attach failure
+	 */
+fail0:
+	return QDF_STATUS_E_FAILURE;
+}
+
+static void dp_pdev_detach_wifi3(void *txrx_pdev, int force);
+
+/*
+* dp_pdev_attach_wifi3() - attach txrx pdev
+* @osif_pdev: Opaque PDEV handle from OSIF/HDD
+* @txrx_soc: Datapath SOC handle
+* @htc_handle: HTC handle for host-target interface
+* @qdf_osdev: QDF OS device
+* @pdev_id: PDEV ID
+*
+* Return: DP PDEV handle on success, NULL on failure
+*/
+void *dp_pdev_attach_wifi3(void *txrx_soc, void *ctrl_pdev,
+	HTC_HANDLE htc_handle, qdf_device_t qdf_osdev, int pdev_id)
+{
+	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
+	struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
+
+	if (!pdev) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: DP PDEV memory allocation failed\n", __func__);
+		goto fail0;
+	}
+
+	pdev->soc = soc;
+	pdev->osif_pdev = ctrl_pdev;
+	pdev->pdev_id = pdev_id;
+	soc->pdev_list[pdev_id] = pdev;
+
+	TAILQ_INIT(&pdev->vdev_list);
+	pdev->vdev_count = 0;
+
+	if (dp_soc_cmn_setup(soc)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: dp_soc_cmn_setup failed\n", __func__);
+		goto fail0;
+	}
+
+	/* Setup per PDEV TCL rings if configured */
+	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
+		if (dp_srng_setup(soc, &soc->tcl_data_ring[pdev_id], TCL_DATA,
+			pdev_id, pdev_id, TCL_DATA_RING_SIZE)) {
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				"%s: dp_srng_setup failed for tcl_data_ring\n",
+				__func__);
+			goto fail0;
+		}
+		if (dp_srng_setup(soc, &soc->tx_comp_ring[pdev_id],
+			WBM2SW_RELEASE, pdev_id, pdev_id, TCL_DATA_RING_SIZE)) {
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				"%s: dp_srng_setup failed for tx_comp_ring\n",
+				__func__);
+			goto fail0;
+		}
+		soc->num_tcl_data_rings++;
+	}
+
+	/* Setup per PDEV REO rings if configured */
+	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
+		if (dp_srng_setup(soc, &soc->reo_dest_ring[pdev_id], REO_DST,
+			pdev_id, pdev_id, REO_DST_RING_SIZE)) {
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				"%s: dp_srng_setup failed for reo_dest_ring\n",
+				__func__);
+			goto fail0;
+		}
+		soc->num_reo_dest_rings++;
+
+	}
+
+	if (dp_srng_setup(soc, &pdev->rxdma_buf_ring, RXDMA_BUF, 0, pdev_id,
+		RXDMA_BUF_RING_SIZE)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: dp_srng_setup failed for rxdma_buf_ring\n",
+			__func__);
+		goto fail0;
+	}
+
+	/* TODO: RXDMA destination ring is not planned to be used currently.
+	 * Setup the ring when required
+	 */
+	if (dp_srng_setup(soc, &pdev->rxdma_mon_buf_ring, RXDMA_MONITOR_BUF, 0,
+		pdev_id, RXDMA_MONITOR_BUF_RING_SIZE)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: dp_srng_setup failed for rxdma_mon_buf_ring\n",
+			__func__);
+		goto fail0;
+	}
+
+	if (dp_srng_setup(soc, &pdev->rxdma_mon_dst_ring, RXDMA_MONITOR_DST, 0,
+		pdev_id, RXDMA_MONITOR_DST_RING_SIZE)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: dp_srng_setup failed for rxdma_mon_dst_ring\n",
+			__func__);
+		goto fail0;
+	}
+
+
+	if (dp_srng_setup(soc, &pdev->rxdma_mon_status_ring,
+		RXDMA_MONITOR_STATUS, 0, pdev_id,
+		RXDMA_MONITOR_STATUS_RING_SIZE)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: dp_srng_setup failed for rxdma_mon_status_ring\n",
+			__func__);
+		goto fail0;
+	}
+
+
+	return (void *)pdev;
+
+fail0:
+	dp_pdev_detach_wifi3((void *)pdev, 0);
+	return NULL;
+}
+
+/*
+* dp_pdev_detach_wifi3() - detach txrx pdev
+* @txrx_pdev: Datapath PDEV handle
+* @force: Force detach
+*
+*/
+static void dp_pdev_detach_wifi3(void *txrx_pdev, int force)
+{
+	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
+	struct dp_soc *soc = pdev->soc;
+
+	if (wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
+		dp_srng_cleanup(soc, &soc->tcl_data_ring[pdev->pdev_id],
+			TCL_DATA, pdev->pdev_id);
+		dp_srng_cleanup(soc, &soc->tx_comp_ring[pdev->pdev_id],
+			WBM2SW_RELEASE, pdev->pdev_id);
+	}
+
+	/* Setup per PDEV REO rings if configured */
+	if (wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
+		dp_srng_cleanup(soc, &soc->reo_dest_ring[pdev->pdev_id],
+			REO_DST, pdev->pdev_id);
+	}
+
+	dp_srng_cleanup(soc, &pdev->rxdma_buf_ring, RXDMA_BUF, 0);
+
+	dp_srng_cleanup(soc, &pdev->rxdma_mon_buf_ring, RXDMA_MONITOR_BUF, 0);
+
+	dp_srng_cleanup(soc, &pdev->rxdma_mon_dst_ring, RXDMA_MONITOR_DST, 0);
+
+	dp_srng_cleanup(soc, &pdev->rxdma_mon_status_ring,
+		RXDMA_MONITOR_STATUS, 0);
+
+	soc->pdev_list[pdev->pdev_id] = NULL;
+
+	qdf_mem_free(pdev);
+}
+
+/*
+ * dp_soc_detach_wifi3() - Detach txrx SOC
+ * @txrx_soc: DP SOC handle
+ *
+ */
+void dp_soc_detach_wifi3(void *txrx_soc)
+{
+	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
+	struct dp_pdev *pdev = qdf_mem_malloc(sizeof(*pdev));
+	int i;
+
+	soc->cmn_init_done = 0;
+
+	for (i = 0; i < MAX_PDEV_CNT; i++) {
+		if (soc->pdev_list[i])
+			dp_pdev_detach_wifi3((void *)pdev, 1);
+	}
+
+	dp_peer_find_detach(soc);
+
+	/* TBD: Call Tx and Rx cleanup functions to free buffers and
+	 * SW descriptors
+	 */
+
+	/* Free the ring memories */
+	/* Common rings */
+	dp_srng_cleanup(soc, &soc->wbm_desc_rel_ring, SW2WBM_RELEASE, 0);
+
+	/* Tx data rings */
+	if (!wlan_cfg_per_pdev_tx_ring(soc->wlan_cfg_ctx)) {
+		for (i = 0; i < soc->num_tcl_data_rings; i++) {
+			dp_srng_cleanup(soc, &soc->tcl_data_ring[i],
+				TCL_DATA, i);
+			dp_srng_cleanup(soc, &soc->tx_comp_ring[i],
+				WBM2SW_RELEASE, i);
+		}
+	}
+
+	/* TCL command and status rings */
+	dp_srng_cleanup(soc, &soc->tcl_cmd_ring, TCL_CMD, 0);
+	dp_srng_cleanup(soc, &soc->tcl_status_ring, TCL_STATUS, 0);
+
+	/* Rx data rings */
+	if (!wlan_cfg_per_pdev_rx_ring(soc->wlan_cfg_ctx)) {
+		soc->num_reo_dest_rings =
+			wlan_cfg_num_reo_dest_rings(soc->wlan_cfg_ctx);
+		for (i = 0; i < soc->num_reo_dest_rings; i++) {
+			/* TODO: Get number of rings and ring sizes
+			 * from wlan_cfg
+			 */
+			dp_srng_cleanup(soc, &soc->reo_dest_ring[i],
+				REO_DST, i);
+		}
+	}
+	/* REO reinjection ring */
+	dp_srng_cleanup(soc, &soc->reo_reinject_ring, REO_REINJECT, 0);
+
+	/* Rx release ring */
+	dp_srng_cleanup(soc, &soc->rx_rel_ring, WBM2SW_RELEASE, 0);
+
+	/* Rx exception ring */
+	/* TODO: Better to store ring_type and ring_num in
+	 * dp_srng during setup
+	 */
+	dp_srng_cleanup(soc, &soc->reo_exception_ring, REO_EXCEPTION, 0);
+
+	/* REO command and status rings */
+	dp_srng_cleanup(soc, &soc->reo_cmd_ring, REO_CMD, 0);
+	dp_srng_cleanup(soc, &soc->reo_status_ring, REO_STATUS, 0);
+
+	htt_soc_detach(soc->htt_handle);
+}
+
+/*
+ * dp_soc_attach_target_wifi3() - SOC initialization in the target
+ * @txrx_soc: Datapath SOC handle
+ */
+int dp_soc_attach_target_wifi3(void *txrx_soc)
+{
+	struct dp_soc *soc = (struct dp_soc *)txrx_soc;
+	int i;
+
+	htt_soc_attach_target(soc->htt_handle);
+
+	for (i = 0; i < MAX_PDEV_CNT; i++) {
+		struct dp_pdev *pdev = soc->pdev_list[i];
+		if (pdev) {
+			htt_srng_setup(soc->htt_handle, i,
+				pdev->rxdma_buf_ring.hal_srng, RXDMA_BUF);
+#ifdef notyet /* FW doesn't handle monitor rings yet */
+			htt_srng_setup(soc->htt_handle, i,
+				pdev->rxdma_mon_buf_ring.hal_srng,
+				RXDMA_MONITOR_BUF);
+			htt_srng_setup(soc->htt_handle, i,
+				pdev->rxdma_mon_dst_ring.hal_srng,
+				RXDMA_MONITOR_DST);
+			htt_srng_setup(soc->htt_handle, i,
+				pdev->rxdma_mon_status_ring.hal_srng,
+				RXDMA_MONITOR_STATUS);
+#endif
+		}
+	}
+	return 0;
+}
+
+/*
+* dp_vdev_attach_wifi3() - attach txrx vdev
+* @txrx_pdev: Datapath PDEV handle
+* @vdev_mac_addr: MAC address of the virtual interface
+* @vdev_id: VDEV Id
+* @wlan_op_mode: VDEV operating mode
+*
+* Return: DP VDEV handle on success, NULL on failure
+*/
+void *dp_vdev_attach_wifi3(void *txrx_pdev,
+	uint8_t *vdev_mac_addr, uint8_t vdev_id, enum wlan_op_mode op_mode)
+{
+	struct dp_pdev *pdev = (struct dp_pdev *)txrx_pdev;
+	struct dp_soc *soc = pdev->soc;
+	struct dp_vdev *vdev = qdf_mem_malloc(sizeof(*vdev));
+
+	if (!vdev) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: DP VDEV memory allocation failed\n", __func__);
+		goto fail0;
+	}
+
+	vdev->pdev = pdev;
+	vdev->vdev_id = vdev_id;
+	vdev->opmode = op_mode;
+
+	vdev->osif_rx = NULL;
+	vdev->osif_rx_mon = NULL;
+	vdev->osif_vdev = NULL;
+
+	vdev->delete.pending = 0;
+	vdev->safemode = 0;
+	vdev->drop_unenc = 1;
+#ifdef notyet
+	vdev->filters_num = 0;
+#endif
+
+	qdf_mem_copy(
+		&vdev->mac_addr.raw[0], vdev_mac_addr, OL_TXRX_MAC_ADDR_LEN);
+
+	vdev->tx_encap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
+	vdev->rx_decap_type = wlan_cfg_pkt_type(soc->wlan_cfg_ctx);
+
+	/* TODO: Initialize default HTT meta data that will be used in
+	 * TCL descriptors for packets transmitted from this VDEV
+	 */
+
+	TAILQ_INIT(&vdev->peer_list);
+
+	/* add this vdev into the pdev's list */
+	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
+	pdev->vdev_count++;
+
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		"Created vdev %p (%02x:%02x:%02x:%02x:%02x:%02x)\n", vdev,
+		vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
+		vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
+		vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
+
+	return (void *)vdev;
+
+fail0:
+	return NULL;
+}
+
+/**
+ * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
+ * @vdev: Datapath VDEV handle
+ * @osif_vdev: OSIF vdev handle
+ * @txrx_ops: Tx and Rx operations
+ *
+ * Return: DP VDEV handle on success, NULL on failure
+ */
+void dp_vdev_register_wifi3(void *vdev_handle, void *osif_vdev,
+	struct ol_txrx_ops *txrx_ops)
+{
+	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
+	vdev->osif_vdev = osif_vdev;
+	vdev->osif_rx = txrx_ops->rx.rx;
+	vdev->osif_rx_mon = txrx_ops->rx.mon;
+#ifdef notyet
+#if ATH_SUPPORT_WAPI
+	vdev->osif_check_wai = txrx_ops->rx.wai_check;
+#endif
+#if UMAC_SUPPORT_PROXY_ARP
+	vdev->osif_proxy_arp = txrx_ops->proxy_arp;
+#endif
+#endif
+#ifdef notyet
+	/* TODO: Enable the following once Tx code is integrated */
+	txrx_ops->tx.tx = dp_tx_send;
+#endif
+}
+
+/*
+ * dp_vdev_detach_wifi3() - Detach txrx vdev
+ * @txrx_vdev:		Datapath VDEV handle
+ * @callback:		Callback OL_IF on completion of detach
+ * @cb_context:	Callback context
+ *
+ */
+void dp_vdev_detach_wifi3(void *vdev_handle,
+	ol_txrx_vdev_delete_cb callback, void *cb_context)
+{
+	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
+	struct dp_pdev *pdev = vdev->pdev;
+	struct dp_soc *soc = pdev->soc;
+
+	/* preconditions */
+	qdf_assert(vdev);
+
+	/* remove the vdev from its parent pdev's list */
+	TAILQ_REMOVE(&pdev->vdev_list, vdev, vdev_list_elem);
+
+	/*
+	 * Use peer_ref_mutex while accessing peer_list, in case
+	 * a peer is in the process of being removed from the list.
+	 */
+	qdf_spin_lock_bh(&soc->peer_ref_mutex);
+	/* check that the vdev has no peers allocated */
+	if (!TAILQ_EMPTY(&vdev->peer_list)) {
+		/* debug print - will be removed later */
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
+			"%s: not deleting vdev object %p (%02x:%02x:%02x:%02x:%02x:%02x)"
+			"until deletion finishes for all its peers\n",
+			__func__, vdev,
+			vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
+			vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
+			vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
+		/* indicate that the vdev needs to be deleted */
+		vdev->delete.pending = 1;
+		vdev->delete.callback = callback;
+		vdev->delete.context = cb_context;
+		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+		return;
+	}
+	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
+		"%s: deleting vdev object %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
+		__func__, vdev,
+		vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
+		vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
+		vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
+
+	qdf_mem_free(vdev);
+
+	if (callback)
+		callback(cb_context);
+}
+
+/*
+ * dp_peer_attach_wifi3() - attach txrx peer
+ * @txrx_vdev: Datapath VDEV handle
+ * @peer_mac_addr: Peer MAC address
+ *
+ * Return: DP peeer handle on success, NULL on failure
+ */
+void *dp_peer_attach_wifi3(void *vdev_handle, uint8_t *peer_mac_addr)
+{
+	struct dp_peer *peer;
+	int i;
+	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
+	struct dp_pdev *pdev;
+	struct dp_soc *soc;
+
+	/* preconditions */
+	qdf_assert(vdev);
+	qdf_assert(peer_mac_addr);
+
+	pdev = vdev->pdev;
+	soc = pdev->soc;
+#ifdef notyet
+	peer = (struct dp_peer *)qdf_mempool_alloc(soc->osdev,
+		soc->mempool_ol_ath_peer);
+#else
+	peer = (struct dp_peer *)qdf_mem_malloc(sizeof(*peer));
+#endif
+
+	if (!peer)
+		return NULL; /* failure */
+
+	qdf_mem_zero(peer, sizeof(struct dp_peer));
+
+	/* store provided params */
+	peer->vdev = vdev;
+	qdf_mem_copy(
+		&peer->mac_addr.raw[0], peer_mac_addr, OL_TXRX_MAC_ADDR_LEN);
+
+	/* TODO: See of rx_opt_proc is really required */
+	peer->rx_opt_proc = soc->rx_opt_proc;
+
+	dp_peer_rx_init(pdev, peer);
+
+	/* initialize the peer_id */
+	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++)
+		peer->peer_ids[i] = HTT_INVALID_PEER;
+
+	qdf_spin_lock_bh(&soc->peer_ref_mutex);
+
+	qdf_atomic_init(&peer->ref_cnt);
+
+	/* keep one reference for attach */
+	qdf_atomic_inc(&peer->ref_cnt);
+
+	/* add this peer into the vdev's list */
+	TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
+	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+
+	/* TODO: See if hash based search is required */
+	dp_peer_find_hash_add(soc, peer);
+
+	if (soc->ol_ops->peer_set_default_routing) {
+		/* TODO: Check on the destination ring number to be passed
+		 * to FW
+		 */
+		soc->ol_ops->peer_set_default_routing(soc->osif_soc,
+			peer->mac_addr.raw, peer->vdev->vdev_id, 0, 1);
+	}
+
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
+		"vdev %p created peer %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
+		vdev, peer,
+		peer->mac_addr.raw[0], peer->mac_addr.raw[1],
+		peer->mac_addr.raw[2], peer->mac_addr.raw[3],
+		peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
+	/*
+	 * For every peer MAp message search and set if bss_peer
+	 */
+	if (memcmp(peer->mac_addr.raw, vdev->mac_addr.raw, 6) == 0) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
+			"vdev bss_peer!!!!\n");
+		peer->bss_peer = 1;
+		vdev->vap_bss_peer = peer;
+	}
+
+	return (void *)peer;
+}
+
+/*
+ * dp_peer_authorize() - authorize txrx peer
+ * @peer_handle:		Datapath peer handle
+ * @authorize
+ *
+ */
+void dp_peer_authorize(void *peer_handle, uint32_t authorize)
+{
+	struct dp_peer *peer = (struct dp_peer *)peer_handle;
+	struct dp_soc *soc;
+
+	if (peer != NULL) {
+		soc = peer->vdev->pdev->soc;
+
+		qdf_spin_lock_bh(&soc->peer_ref_mutex);
+		peer->authorize = authorize ? 1 : 0;
+#ifdef notyet /* ATH_BAND_STEERING */
+		peer->peer_bs_inact_flag = 0;
+		peer->peer_bs_inact = soc->pdev_bs_inact_reload;
+#endif
+		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+	}
+}
+
+/*
+ * dp_peer_unref_delete() - unref and delete peer
+ * @peer_handle:		Datapath peer handle
+ *
+ */
+void dp_peer_unref_delete(void *peer_handle)
+{
+	struct dp_peer *peer = (struct dp_peer *)peer_handle;
+	struct dp_vdev *vdev = peer->vdev;
+	struct dp_soc *soc = vdev->pdev->soc;
+	struct dp_peer *tmppeer;
+	int found = 0;
+	uint16_t peer_id;
+
+	/*
+	 * Hold the lock all the way from checking if the peer ref count
+	 * is zero until the peer references are removed from the hash
+	 * table and vdev list (if the peer ref count is zero).
+	 * This protects against a new HL tx operation starting to use the
+	 * peer object just after this function concludes it's done being used.
+	 * Furthermore, the lock needs to be held while checking whether the
+	 * vdev's list of peers is empty, to make sure that list is not modified
+	 * concurrently with the empty check.
+	 */
+	qdf_spin_lock_bh(&soc->peer_ref_mutex);
+	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
+		peer_id = peer->peer_ids[0];
+
+		/*
+		 * Make sure that the reference to the peer in
+		 * peer object map is removed
+		 */
+		if (peer_id != HTT_INVALID_PEER)
+			soc->peer_id_to_obj_map[peer_id] = NULL;
+
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
+			"Deleting peer %p (%02x:%02x:%02x:%02x:%02x:%02x)\n",
+			peer, peer->mac_addr.raw[0], peer->mac_addr.raw[1],
+			peer->mac_addr.raw[2], peer->mac_addr.raw[3],
+			peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
+
+		/* remove the reference to the peer from the hash table */
+		dp_peer_find_hash_remove(soc, peer);
+
+		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
+			if (tmppeer == peer) {
+				found = 1;
+				break;
+			}
+		}
+		if (found) {
+			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
+				peer_list_elem);
+		} else {
+			/*Ignoring the remove operation as peer not found*/
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
+				"WARN peer %p not found in vdev (%p)->peer_list:%p\n",
+				peer, vdev, &peer->vdev->peer_list);
+		}
+
+		/* cleanup the Rx reorder queues for this peer */
+		dp_peer_rx_cleanup(vdev, peer);
+
+		/* check whether the parent vdev has no peers left */
+		if (TAILQ_EMPTY(&vdev->peer_list)) {
+			/*
+			 * Now that there are no references to the peer, we can
+			 * release the peer reference lock.
+			 */
+			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+			/*
+			 * Check if the parent vdev was waiting for its peers
+			 * to be deleted, in order for it to be deleted too.
+			 */
+			if (vdev->delete.pending) {
+				ol_txrx_vdev_delete_cb vdev_delete_cb =
+					vdev->delete.callback;
+				void *vdev_delete_context =
+					vdev->delete.context;
+
+				QDF_TRACE(QDF_MODULE_ID_TXRX,
+					QDF_TRACE_LEVEL_INFO_HIGH,
+					"%s: deleting vdev object %p "
+					"(%02x:%02x:%02x:%02x:%02x:%02x)"
+					" - its last peer is done\n",
+					__func__, vdev,
+					vdev->mac_addr.raw[0],
+					vdev->mac_addr.raw[1],
+					vdev->mac_addr.raw[2],
+					vdev->mac_addr.raw[3],
+					vdev->mac_addr.raw[4],
+					vdev->mac_addr.raw[5]);
+				/* all peers are gone, go ahead and delete it */
+				qdf_mem_free(vdev);
+				if (vdev_delete_cb)
+					vdev_delete_cb(vdev_delete_context);
+			}
+		} else {
+			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+		}
+#ifdef notyet
+		qdf_mempool_free(soc->osdev, soc->mempool_ol_ath_peer, peer);
+#else
+		qdf_mem_free(peer);
+#endif
+
+#ifdef notyet /* See why this should be done in DP layer */
+		qdf_atomic_inc(&soc->peer_count);
+#endif
+	} else {
+		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+	}
+}
+
+/*
+ * dp_peer_detach_wifi3() – Detach txrx peer
+ * @peer_handle:		Datapath peer handle
+ *
+ */
+void dp_peer_detach_wifi3(void *peer_handle)
+{
+	struct dp_peer *peer = (struct dp_peer *)peer_handle;
+
+	/* redirect the peer's rx delivery function to point to a
+	 * discard func
+	 */
+	peer->rx_opt_proc = dp_rx_discard;
+
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
+		"%s:peer %p (%02x:%02x:%02x:%02x:%02x:%02x)\n", __func__, peer,
+		  peer->mac_addr.raw[0], peer->mac_addr.raw[1],
+		  peer->mac_addr.raw[2], peer->mac_addr.raw[3],
+		  peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
+
+	/*
+	 * Remove the reference added during peer_attach.
+	 * The peer will still be left allocated until the
+	 * PEER_UNMAP message arrives to remove the other
+	 * reference, added by the PEER_MAP message.
+	 */
+	dp_peer_unref_delete(peer_handle);
+}

+ 841 - 0
dp/wifi3.0/dp_peer.c

@@ -0,0 +1,841 @@
+/*
+ * Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <qdf_types.h>
+#include <qdf_lock.h>
+#include "dp_htt.h"
+#include "dp_types.h"
+#include "dp_internal.h"
+#include <hal_api.h>
+
+/* Temporary definitions to be moved to wlan_cfg */
+static inline uint32_t wlan_cfg_max_peer_id(void *wlan_cfg_ctx)
+{
+	/* TODO: This should be calculated based on target capabilities */
+	return 2048;
+}
+
+static inline int dp_peer_find_mac_addr_cmp(
+	union dp_align_mac_addr *mac_addr1,
+	union dp_align_mac_addr *mac_addr2)
+{
+	return !((mac_addr1->align4.bytes_abcd == mac_addr2->align4.bytes_abcd)
+		/*
+		 * Intentionally use & rather than &&.
+		 * because the operands are binary rather than generic boolean,
+		 * the functionality is equivalent.
+		 * Using && has the advantage of short-circuited evaluation,
+		 * but using & has the advantage of no conditional branching,
+		 * which is a more significant benefit.
+		 */
+		&
+		(mac_addr1->align4.bytes_ef == mac_addr2->align4.bytes_ef));
+}
+
+static inline struct dp_peer *dp_peer_find_by_id(
+	struct dp_soc *soc, uint16_t peer_id)
+{
+	struct dp_peer *peer;
+	peer = (peer_id == HTT_INVALID_PEER) ? NULL :
+		soc->peer_id_to_obj_map[peer_id];
+	/*
+	 * Currently, peer IDs are assigned to vdevs as well as peers.
+	 * If the peer ID is for a vdev, the peer_id_to_obj_map entry
+	 * will hold NULL rather than a valid peer pointer.
+	 */
+	return peer;
+}
+
+static int dp_peer_find_map_attach(struct dp_soc *soc)
+{
+	uint32_t max_peers, peer_map_size;
+
+	/* allocate the peer ID -> peer object map */
+	max_peers = wlan_cfg_max_peer_id(soc->wlan_cfg_ctx) + 1;
+	soc->max_peers = max_peers;
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
+		"\n<=== cfg max peer id %d ====>\n", max_peers);
+	peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
+	soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
+	if (!soc->peer_id_to_obj_map) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: peer map memory allocation failed\n", __func__);
+		return QDF_STATUS_E_NOMEM;
+	}
+
+	/*
+	 * The peer_id_to_obj_map doesn't really need to be initialized,
+	 * since elements are only used after they have been individually
+	 * initialized.
+	 * However, it is convenient for debugging to have all elements
+	 * that are not in use set to 0.
+	 */
+	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
+#ifdef notyet /* ATH_BAND_STEERING */
+		OS_INIT_TIMER(soc->osdev, &(soc->bs_inact_timer),
+			dp_peer_find_inact_timeout_handler, (void *)soc,
+			QDF_TIMER_TYPE_WAKE_APPS);
+#endif
+	return 0; /* success */
+}
+
+static int dp_log2_ceil(unsigned value)
+{
+	unsigned tmp = value;
+	int log2 = -1;
+
+	while (tmp) {
+		log2++;
+		tmp >>= 1;
+	}
+	if (1 << log2 != value)
+		log2++;
+	return log2;
+}
+
+static int dp_peer_find_add_id_to_obj(
+	struct dp_peer *peer,
+	uint16_t peer_id)
+{
+	int i;
+
+	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
+		if (peer->peer_ids[i] == HTT_INVALID_PEER) {
+			peer->peer_ids[i] = peer_id;
+			return 0; /* success */
+		}
+	}
+	return QDF_STATUS_E_FAILURE; /* failure */
+}
+
+#define DP_PEER_HASH_LOAD_MULT  2
+#define DP_PEER_HASH_LOAD_SHIFT 0
+
+static int dp_peer_find_hash_attach(struct dp_soc *soc)
+{
+	int i, hash_elems, log2;
+
+	/* allocate the peer MAC address -> peer object hash table */
+	hash_elems = wlan_cfg_max_peer_id(soc->wlan_cfg_ctx) + 1;
+	hash_elems *= DP_PEER_HASH_LOAD_MULT;
+	hash_elems >>= DP_PEER_HASH_LOAD_SHIFT;
+	log2 = dp_log2_ceil(hash_elems);
+	hash_elems = 1 << log2;
+
+	soc->peer_hash.mask = hash_elems - 1;
+	soc->peer_hash.idx_bits = log2;
+	/* allocate an array of TAILQ peer object lists */
+	soc->peer_hash.bins = qdf_mem_malloc(
+		hash_elems * sizeof(TAILQ_HEAD(anonymous_tail_q, dp_peer)));
+	if (!soc->peer_hash.bins)
+		return QDF_STATUS_E_NOMEM;
+
+	for (i = 0; i < hash_elems; i++)
+		TAILQ_INIT(&soc->peer_hash.bins[i]);
+
+	return 0;
+}
+
+static void dp_peer_find_hash_detach(struct dp_soc *soc)
+{
+	qdf_mem_free(soc->peer_hash.bins);
+}
+
+static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
+	union dp_align_mac_addr *mac_addr)
+{
+	unsigned index;
+
+	index =
+		mac_addr->align2.bytes_ab ^
+		mac_addr->align2.bytes_cd ^
+		mac_addr->align2.bytes_ef;
+	index ^= index >> soc->peer_hash.idx_bits;
+	index &= soc->peer_hash.mask;
+	return index;
+}
+
+
+void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
+{
+	unsigned index;
+
+	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
+	qdf_spin_lock_bh(&soc->peer_ref_mutex);
+	/*
+	 * It is important to add the new peer at the tail of the peer list
+	 * with the bin index.  Together with having the hash_find function
+	 * search from head to tail, this ensures that if two entries with
+	 * the same MAC address are stored, the one added first will be
+	 * found first.
+	 */
+	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
+	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+}
+
+#if ATH_SUPPORT_WRAP
+struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
+	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
+#else
+struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
+	uint8_t *peer_mac_addr, int mac_addr_is_aligned)
+#endif
+{
+	union dp_align_mac_addr local_mac_addr_aligned, *mac_addr;
+	unsigned index;
+	struct dp_peer *peer;
+
+	if (mac_addr_is_aligned) {
+		mac_addr = (union dp_align_mac_addr *) peer_mac_addr;
+	} else {
+		qdf_mem_copy(
+			&local_mac_addr_aligned.raw[0],
+			peer_mac_addr, DP_MAC_ADDR_LEN);
+		mac_addr = &local_mac_addr_aligned;
+	}
+	index = dp_peer_find_hash_index(soc, mac_addr);
+	qdf_spin_lock_bh(&soc->peer_ref_mutex);
+	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
+#if ATH_SUPPORT_WRAP
+		/* ProxySTA may have multiple BSS peer with same MAC address,
+		 * modified find will take care of finding the correct BSS peer.
+		 */
+		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
+			(peer->vdev->vdev_id == vdev_id)) {
+#else
+		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0) {
+#endif
+			/* found it - increment the ref count before releasing
+			 * the lock
+			 */
+			qdf_atomic_inc(&peer->ref_cnt);
+			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+			return peer;
+		}
+	}
+	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+	return NULL; /* failure */
+}
+
+void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
+{
+	unsigned index;
+	struct dp_peer *tmppeer = NULL;
+	int found = 0;
+
+	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
+	/* Check if tail is not empty before delete*/
+	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
+	/*
+	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
+	 * by the caller.
+	 * The caller needs to hold the lock from the time the peer object's
+	 * reference count is decremented and tested up through the time the
+	 * reference to the peer object is removed from the hash table, by
+	 * this function.
+	 * Holding the lock only while removing the peer object reference
+	 * from the hash table keeps the hash table consistent, but does not
+	 * protect against a new HL tx context starting to use the peer object
+	 * if it looks up the peer object from its MAC address just after the
+	 * peer ref count is decremented to zero, but just before the peer
+	 * object reference is removed from the hash table.
+	 */
+	 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
+		if (tmppeer == peer) {
+			found = 1;
+			break;
+		}
+	}
+	QDF_ASSERT(found);
+	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
+}
+
+void dp_peer_find_hash_erase(struct dp_soc *soc)
+{
+	int i;
+
+	/*
+	 * Not really necessary to take peer_ref_mutex lock - by this point,
+	 * it's known that the soc is no longer in use.
+	 */
+	for (i = 0; i <= soc->peer_hash.mask; i++) {
+		if (!TAILQ_EMPTY(&soc->peer_hash.bins[i])) {
+			struct dp_peer *peer, *peer_next;
+
+			/*
+			 * TAILQ_FOREACH_SAFE must be used here to avoid any
+			 * memory access violation after peer is freed
+			 */
+			TAILQ_FOREACH_SAFE(peer, &soc->peer_hash.bins[i],
+				hash_list_elem, peer_next) {
+				/*
+				 * Don't remove the peer from the hash table -
+				 * that would modify the list we are currently
+				 * traversing, and it's not necessary anyway.
+				 */
+				/*
+				 * Artificially adjust the peer's ref count to
+				 * 1, so it will get deleted by
+				 * dp_peer_unref_delete.
+				 */
+				/* set to zero */
+				qdf_atomic_init(&peer->ref_cnt);
+				/* incr to one */
+				qdf_atomic_inc(&peer->ref_cnt);
+				dp_peer_unref_delete(peer);
+			}
+		}
+	}
+}
+
+static void dp_peer_find_map_detach(struct dp_soc *soc)
+{
+#ifdef notyet /* ATH_BAND_STEERING */
+	OS_FREE_TIMER(&(soc->bs_inact_timer));
+#endif
+	qdf_mem_free(soc->peer_id_to_obj_map);
+}
+
+int dp_peer_find_attach(struct dp_soc *soc)
+{
+	if (dp_peer_find_map_attach(soc))
+		return 1;
+
+	if (dp_peer_find_hash_attach(soc)) {
+		dp_peer_find_map_detach(soc);
+		return 1;
+	}
+	return 0; /* success */
+}
+
+static inline void dp_peer_find_add_id(struct dp_soc *soc,
+	uint8_t *peer_mac_addr, uint16_t peer_id, uint8_t vdev_id)
+{
+	struct dp_peer *peer;
+
+	QDF_ASSERT(peer_id <= wlan_cfg_max_peer_id(soc->wlan_cfg_ctx) + 1);
+	/* check if there's already a peer object with this MAC address */
+#if ATH_SUPPORT_WRAP
+	peer = dp_peer_find_hash_find(soc, peer_mac_addr,
+		0 /* is aligned */, vdev_id);
+#else
+	peer = dp_peer_find_hash_find(soc, peer_mac_addr, 0 /* is aligned */);
+#endif
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+		"%s: peer %p ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x\n",
+		__func__, peer, peer_id, vdev_id, peer_mac_addr[0],
+		peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
+		peer_mac_addr[4], peer_mac_addr[5]);
+
+	if (peer) {
+		/* peer's ref count was already incremented by
+		 * peer_find_hash_find
+		 */
+		soc->peer_id_to_obj_map[peer_id] = peer;
+
+		if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
+			/* TBDXXX: assert for now */
+			QDF_ASSERT(0);
+		}
+
+		return;
+	}
+}
+
+void
+dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id, uint8_t vdev_id,
+	uint8_t *peer_mac_addr)
+{
+	struct dp_soc *soc = (struct dp_soc *)soc_handle;
+
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
+		"peer_map_event (soc:%p): peer_id %d, peer_mac "
+		"%02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d\n", soc, peer_id,
+		peer_mac_addr[0], peer_mac_addr[1], peer_mac_addr[2],
+		peer_mac_addr[3], peer_mac_addr[4], peer_mac_addr[5], vdev_id);
+
+	dp_peer_find_add_id(soc, peer_mac_addr, peer_id, vdev_id);
+}
+
+void
+dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id)
+{
+	struct dp_peer *peer;
+	struct dp_soc *soc = (struct dp_soc *)soc_handle;
+	uint8_t i;
+	peer = dp_peer_find_by_id(soc, peer_id);
+
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
+		"peer_unmap_event (soc:%p) peer_id %d peer %p\n",
+		soc, peer_id, peer);
+
+	/*
+	 * Currently peer IDs are assigned for vdevs as well as peers.
+	 * If the peer ID is for a vdev, then the peer pointer stored
+	 * in peer_id_to_obj_map will be NULL.
+	 */
+	if (!peer)
+		return;
+
+	soc->peer_id_to_obj_map[peer_id] = NULL;
+	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
+		if (peer->peer_ids[i] == peer_id) {
+			peer->peer_ids[i] = HTT_INVALID_PEER;
+			break;
+		}
+	}
+
+	/*
+	 * Remove a reference to the peer.
+	 * If there are no more references, delete the peer object.
+	 */
+	dp_peer_unref_delete(peer);
+}
+
+void
+dp_peer_find_detach(struct dp_soc *soc)
+{
+	dp_peer_find_map_detach(soc);
+	dp_peer_find_hash_detach(soc);
+}
+
+/*
+ * dp_rx_tid_update_wifi3() – Update receive TID state
+ * @peer: Datapath peer handle
+ * @tid: TID
+ * @ba_window_size: BlockAck window size
+ * @start_seq: Starting sequence number
+ *
+ * Return: 0 on success, error code on failure
+ */
+int dp_rx_tid_update_wifi3(struct dp_peer *peer, int tid, uint32_t
+ba_window_size, uint32_t start_seq)
+{
+	/* TODO: Implement this once REO command API is available */
+	return 0;
+}
+
+/*
+ * dp_rx_tid_setup_wifi3() – Setup receive TID state
+ * @peer: Datapath peer handle
+ * @tid: TID
+ * @ba_window_size: BlockAck window size
+ * @start_seq: Starting sequence number
+ *
+ * Return: 0 on success, error code on failure
+ */
+int dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
+	uint32_t ba_window_size, uint32_t start_seq)
+{
+	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
+	struct dp_vdev *vdev = peer->vdev;
+	struct dp_soc *soc = vdev->pdev->soc;
+	uint32_t hw_qdesc_size;
+	uint32_t hw_qdesc_align;
+	int hal_pn_type;
+	void *hw_qdesc_vaddr;
+
+	if (rx_tid->hw_qdesc_vaddr_unaligned != NULL)
+		return dp_rx_tid_update_wifi3(peer, tid, ba_window_size,
+			start_seq);
+
+#ifdef notyet
+	hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc, ba_window_size);
+#else
+	/* TODO: Allocating HW queue descriptors based on max BA window size
+	 * for all QOS TIDs so that same descriptor can be used later when
+	 * ADDBA request is recevied. This should be changed to allocate HW
+	 * queue descriptors based on BA window size being negotiated (0 for
+	 * non BA cases), and reallocate when BA window size changes and also
+	 * send WMI message to FW to change the REO queue descriptor in Rx
+	 * peer entry as part of dp_rx_tid_update.
+	 */
+	if (tid != DP_NON_QOS_TID)
+		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
+			HAL_RX_MAX_BA_WINDOW);
+	else
+		hw_qdesc_size = hal_get_reo_qdesc_size(soc->hal_soc,
+			ba_window_size);
+#endif
+	hw_qdesc_align = hal_get_reo_qdesc_align(soc->hal_soc);
+	/* To avoid unnecessary extra allocation for alignment, try allocating
+	 * exact size and see if we already have aligned address.
+	 */
+	rx_tid->hw_qdesc_alloc_size = hw_qdesc_size;
+	rx_tid->hw_qdesc_vaddr_unaligned = qdf_mem_alloc_consistent(
+		soc->osdev, NULL, rx_tid->hw_qdesc_alloc_size,
+		&(rx_tid->hw_qdesc_paddr_unaligned));
+
+	if (!rx_tid->hw_qdesc_vaddr_unaligned) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: Rx tid HW desc alloc failed: tid %d\n",
+			__func__, tid);
+		return QDF_STATUS_E_NOMEM;
+	}
+
+	if ((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
+		hw_qdesc_align) {
+		/* Address allocated above is not alinged. Allocate extra
+		 * memory for alignment
+		 */
+		qdf_mem_free_consistent(soc->osdev, NULL,
+				rx_tid->hw_qdesc_alloc_size,
+				rx_tid->hw_qdesc_vaddr_unaligned,
+				rx_tid->hw_qdesc_paddr_unaligned, 0);
+
+		rx_tid->hw_qdesc_alloc_size =
+			hw_qdesc_size + hw_qdesc_align - 1;
+		rx_tid->hw_qdesc_vaddr_unaligned = qdf_mem_alloc_consistent(
+			soc->osdev, NULL, rx_tid->hw_qdesc_alloc_size,
+			&(rx_tid->hw_qdesc_paddr_unaligned));
+
+		if (!rx_tid->hw_qdesc_vaddr_unaligned) {
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				"%s: Rx tid HW desc alloc failed: tid %d\n",
+				__func__, tid);
+			return QDF_STATUS_E_NOMEM;
+		}
+
+		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned +
+			((unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned) %
+			hw_qdesc_align);
+
+		rx_tid->hw_qdesc_paddr = rx_tid->hw_qdesc_paddr_unaligned +
+			((unsigned long)hw_qdesc_vaddr -
+			(unsigned long)(rx_tid->hw_qdesc_vaddr_unaligned));
+	} else {
+		hw_qdesc_vaddr = rx_tid->hw_qdesc_vaddr_unaligned;
+		rx_tid->hw_qdesc_paddr = rx_tid->hw_qdesc_paddr_unaligned;
+	}
+
+	/* TODO: Ensure that sec_type is set before ADDBA is received.
+	 * Currently this is set based on htt indication
+	 * HTT_T2H_MSG_TYPE_SEC_IND from target
+	 */
+	switch (peer->security[dp_sec_ucast].sec_type) {
+	case htt_sec_type_tkip_nomic:
+	case htt_sec_type_aes_ccmp:
+	case htt_sec_type_aes_ccmp_256:
+	case htt_sec_type_aes_gcmp:
+	case htt_sec_type_aes_gcmp_256:
+		hal_pn_type = HAL_PN_WPA;
+		break;
+	case htt_sec_type_wapi:
+		if (vdev->opmode == wlan_op_mode_ap)
+			hal_pn_type = HAL_PN_WAPI_EVEN;
+		else
+			hal_pn_type = HAL_PN_WAPI_UNEVEN;
+		break;
+	default:
+		hal_pn_type = HAL_PN_NONE;
+		break;
+	}
+
+	hal_reo_qdesc_setup(soc->hal_soc, tid, ba_window_size, start_seq,
+		hw_qdesc_vaddr, rx_tid->hw_qdesc_paddr, hal_pn_type);
+
+	if (soc->ol_ops->peer_rx_reorder_queue_setup) {
+		soc->ol_ops->peer_rx_reorder_queue_setup(soc->osif_soc,
+			peer->vdev->vdev_id, peer->mac_addr.raw,
+			rx_tid->hw_qdesc_paddr, tid, tid);
+	}
+	return 0;
+}
+
+/*
+ * Rx TID deletion callback to free memory allocated for HW queue descriptor
+ */
+static void dp_rx_tid_delete_cb(struct dp_pdev *pdev, void *cb_ctxt, int status)
+{
+	struct dp_soc *soc = pdev->soc;
+	struct dp_rx_tid *rx_tid = (struct dp_rx_tid *)cb_ctxt;
+
+	if (status) {
+		/* Should not happen normally. Just print error for now */
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s: Rx tid HW desc deletion failed: tid %d\n",
+				__func__, rx_tid->tid);
+	}
+
+	qdf_mem_free_consistent(soc->osdev, NULL,
+				rx_tid->hw_qdesc_alloc_size,
+				rx_tid->hw_qdesc_vaddr_unaligned,
+				rx_tid->hw_qdesc_paddr_unaligned, 0);
+
+	rx_tid->hw_qdesc_vaddr_unaligned = NULL;
+	rx_tid->hw_qdesc_alloc_size = 0;
+}
+
+/*
+ * dp_rx_tid_delete_wifi3() – Delete receive TID queue
+ * @peer: Datapath peer handle
+ * @tid: TID
+ *
+ * Return: 0 on success, error code on failure
+ */
+int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
+{
+#ifdef notyet /* TBD: Enable this once REO command interface is available */
+	struct dp_rx_tid *rx_tid = peer->rx_tid[tid];
+	dp_rx_tid_hw_update_valid(rx_tid->hw_qdesc_paddr, 0,
+		dp_rx_tid_delete_cb, (void *)rx_tid);
+#endif
+	return 0;
+}
+
+/*
+ * dp_peer_rx_init() – Initialize receive TID state
+ * @pdev: Datapath pdev
+ * @peer: Datapath peer
+ *
+ */
+void dp_peer_rx_init(struct dp_pdev *pdev, struct dp_peer *peer)
+{
+	int tid;
+	struct dp_rx_tid *rx_tid;
+	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
+		rx_tid = &peer->rx_tid[tid];
+		rx_tid->array = &rx_tid->base;
+		rx_tid->base.head = rx_tid->base.tail = NULL;
+		rx_tid->tid = tid;
+		rx_tid->defrag_timeout_ms = 0;
+		rx_tid->ba_win_size = 0;
+		rx_tid->ba_status = DP_RX_BA_INACTIVE;
+
+		rx_tid->defrag_waitlist_elem.tqe_next = NULL;
+		rx_tid->defrag_waitlist_elem.tqe_prev = NULL;
+
+#ifdef notyet /* TODO: See if this is required for exception handling */
+		/* invalid sequence number */
+		peer->tids_last_seq[tid] = 0xffff;
+#endif
+	}
+
+	/* Setup default (non-qos) rx tid queue */
+	dp_rx_tid_setup_wifi3(peer, DP_NON_QOS_TID, 1, 0);
+	/*
+	 * Set security defaults: no PN check, no security. The target may
+	 * send a HTT SEC_IND message to overwrite these defaults.
+	 */
+	peer->security[dp_sec_ucast].sec_type =
+		peer->security[dp_sec_mcast].sec_type = htt_sec_type_none;
+}
+
+/*
+ * dp_peer_rx_cleanup() – Cleanup receive TID state
+ * @vdev: Datapath vdev
+ * @peer: Datapath peer
+ *
+ */
+void dp_peer_rx_cleanup(struct dp_vdev *vdev, struct dp_peer *peer)
+{
+	int tid;
+	struct dp_rx_tid *rx_tid;
+	uint32_t tid_delete_mask = 0;
+	for (tid = 0; tid < DP_MAX_TIDS; tid++) {
+		if (rx_tid->hw_qdesc_vaddr_unaligned != NULL) {
+			dp_rx_tid_delete_wifi3(peer, tid);
+			tid_delete_mask |= (1 << tid);
+		}
+	}
+#ifdef notyet /* See if FW can remove queues as part of peer cleanup */
+	if (soc->ol_ops->peer_rx_reorder_queue_remove) {
+		soc->ol_ops->peer_rx_reorder_queue_remove(soc->osif_soc,
+			peer->vdev->vdev_id, peer->mac_addr.raw,
+			tid_delete_mask);
+	}
+#endif
+}
+
+/*
+* dp_rx_addba_requestprocess_wifi3() – Process ADDBA request from peer
+*
+* @peer: Datapath peer handle
+* @dialogtoken: dialogtoken from ADDBA frame
+* @baparamset: BlockAck parameters received in ADDBA frame
+* @basequencectrl: BA sequence control received in ADDBA frame
+*
+* Return: 0 on success, error code on failure
+*/
+int dp_addba_requestprocess_wifi3(void *peer_handle, uint8_t dialogtoken,
+	struct ieee80211_ba_parameterset *baparamset, uint16_t batimeout,
+	struct ieee80211_ba_seqctrl basequencectrl)
+{
+	struct dp_peer *peer = (struct dp_peer *)peer_handle;
+	uint16_t tid = baparamset->tid;
+	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
+
+	if ((rx_tid->ba_status == DP_RX_BA_ACTIVE) &&
+			(rx_tid->hw_qdesc_vaddr_unaligned != NULL))
+		rx_tid->ba_status = DP_RX_BA_INACTIVE;
+
+	if (dp_rx_tid_setup_wifi3(peer, tid, baparamset->buffersize,
+		basequencectrl.startseqnum)) {
+		/* TODO: Should we send addba reject in this case */
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	rx_tid->ba_win_size = baparamset->buffersize;
+	rx_tid->dialogtoken = dialogtoken;
+	rx_tid->statuscode = IEEE80211_STATUS_SUCCESS;
+	rx_tid->ba_status = DP_RX_BA_ACTIVE;
+
+	return 0;
+}
+
+/*
+* dp_rx_addba_responsesetup_wifi3() – Process ADDBA request from peer
+*
+* @peer: Datapath peer handle
+* @tid: TID number
+* @dialogtoken: output dialogtoken
+* @statuscode: output dialogtoken
+* @baparamset: Ouput structure to populate BA response parameters
+* @batimeout: Ouput BA timeout
+*/
+void dp_addba_responsesetup_wifi3(void *peer_handle, uint8_t tid,
+	uint8_t *dialogtoken, uint16_t *statuscode,
+	struct ieee80211_ba_parameterset *baparamset, uint16_t *batimeout)
+{
+	struct dp_peer *peer = (struct dp_peer *)peer_handle;
+	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
+
+	/* setup ADDBA response paramters */
+	*dialogtoken = rx_tid->dialogtoken;
+	*statuscode = rx_tid->statuscode;
+	baparamset->amsdusupported = IEEE80211_BA_AMSDU_SUPPORTED;
+	baparamset->bapolicy = IEEE80211_BA_POLICY_IMMEDIATE;
+	baparamset->tid = rx_tid->ba_win_size;
+	baparamset->buffersize = rx_tid->ba_win_size;
+	*batimeout  = 0;
+}
+
+/*
+* dp_rx_delba_process_wifi3() – Process DELBA from peer
+* @peer: Datapath peer handle
+* @delbaparamset: DELBA parameters received in DELBA frame
+* @reasoncode: Reason code received in DELBA frame
+*
+* Return: 0 on success, error code on failure
+*/
+int dp_delba_process_wifi3(void *peer_handle,
+	struct ieee80211_delba_parameterset *delbaparamset, uint16_t reasoncode)
+{
+	struct dp_peer *peer = (struct dp_peer *)peer_handle;
+	uint16_t tid = delbaparamset->tid;
+	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
+
+	if (rx_tid->ba_status != DP_RX_BA_ACTIVE)
+		return QDF_STATUS_E_FAILURE;
+
+	/* TODO: See if we can delete the existing REO queue descriptor and
+	 * replace with a new one without queue extenstion descript to save
+	 * memory
+	 */
+	dp_rx_tid_update_wifi3(peer, tid, 0, 0);
+
+	rx_tid->ba_status = DP_RX_BA_INACTIVE;
+
+	return 0;
+}
+
+void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid,
+	qdf_nbuf_t msdu_list)
+{
+	while (msdu_list) {
+		qdf_nbuf_t msdu = msdu_list;
+
+		msdu_list = qdf_nbuf_next(msdu_list);
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
+			"discard rx %p from partly-deleted peer %p "
+			"(%02x:%02x:%02x:%02x:%02x:%02x)\n",
+			msdu, peer,
+			peer->mac_addr.raw[0], peer->mac_addr.raw[1],
+			peer->mac_addr.raw[2], peer->mac_addr.raw[3],
+			peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
+		qdf_nbuf_free(msdu);
+	}
+}
+
+void
+dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
+	enum htt_sec_type sec_type, int is_unicast, u_int32_t *michael_key,
+	u_int32_t *rx_pn)
+{
+	struct dp_soc *soc = (struct dp_soc *)soc_handle;
+	struct dp_peer *peer;
+	int sec_index;
+
+	peer = dp_peer_find_by_id(soc, peer_id);
+	if (!peer) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"Couldn't find peer from ID %d - skipping security inits\n",
+			peer_id);
+		return;
+	}
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
+		"sec spec for peer %p (%02x:%02x:%02x:%02x:%02x:%02x): "
+		"%s key of type %d\n",
+		peer,
+		peer->mac_addr.raw[0], peer->mac_addr.raw[1],
+		peer->mac_addr.raw[2], peer->mac_addr.raw[3],
+		peer->mac_addr.raw[4], peer->mac_addr.raw[5],
+		is_unicast ? "ucast" : "mcast",
+		sec_type);
+	sec_index = is_unicast ? dp_sec_ucast : dp_sec_mcast;
+	peer->security[sec_index].sec_type = sec_type;
+#if notyet /* TODO: See if this is required for defrag support */
+	/* michael key only valid for TKIP, but for simplicity,
+	 * copy it anyway
+	 */
+	qdf_mem_copy(
+		&peer->security[sec_index].michael_key[0],
+		michael_key,
+		sizeof(peer->security[sec_index].michael_key));
+#ifdef BIG_ENDIAN_HOST
+	OL_IF_SWAPBO(peer->security[sec_index].michael_key[0],
+				 sizeof(peer->security[sec_index].michael_key));
+#endif /* BIG_ENDIAN_HOST */
+#endif
+
+#ifdef notyet /* TODO: Check if this is required for wifi3.0 */
+	if (sec_type != htt_sec_type_wapi) {
+		qdf_mem_set(peer->tids_last_pn_valid, _EXT_TIDS, 0x00);
+	} else {
+		for (i = 0; i < DP_MAX_TIDS; i++) {
+			/*
+			 * Setting PN valid bit for WAPI sec_type,
+			 * since WAPI PN has to be started with predefined value
+			 */
+			peer->tids_last_pn_valid[i] = 1;
+			qdf_mem_copy(
+				(u_int8_t *) &peer->tids_last_pn[i],
+				(u_int8_t *) rx_pn, sizeof(union htt_rx_pn_t));
+			peer->tids_last_pn[i].pn128[1] =
+				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[1]);
+			peer->tids_last_pn[i].pn128[0] =
+				qdf_cpu_to_le64(peer->tids_last_pn[i].pn128[0]);
+		}
+	}
+#endif
+	/* TODO: Update HW TID queue with PN check parameters (pn type for
+	 * all security types and last pn for WAPI) once REO command API
+	 * is available
+	 */
+}
+

+ 514 - 0
dp/wifi3.0/dp_types.h

@@ -0,0 +1,514 @@
+/*
+ * Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DP_TYPES_H_
+#define _DP_TYPES_H_
+
+#include <qdf_types.h>
+#include <qdf_nbuf.h>
+#include <qdf_lock.h>
+#include <qdf_atomic.h>
+#include <qdf_util.h>
+#include <queue.h>
+
+#include <cdp_txrx_cmn.h>
+#include <wdi_event_api.h>    /* WDI subscriber event list */
+
+#define MAX_PDEV_CNT 3
+#define MAX_LINK_DESC_BANKS 8
+#define MAX_TXDESC_POOLS 4
+#define MAX_RXDESC_POOLS 4
+#define MAX_REO_DEST_RINGS 4
+#define MAX_TCL_DATA_RINGS 4
+#define DP_MAX_TX_RINGS 8
+#define DP_MAX_RX_RINGS 8
+#define MAX_IDLE_SCATTER_BUFS 16
+
+struct dp_soc_cmn;
+struct dp_pdev;
+struct dp_vdev;
+union dp_tx_desc_list_elem_t;
+union dp_rx_desc_list_elem_t;
+
+#define DP_MUTEX_TYPE qdf_spinlock_t
+
+struct dp_srng {
+	void *hal_srng;
+	void *base_vaddr_unaligned;
+	qdf_dma_addr_t base_paddr_unaligned;
+	uint32_t alloc_size;
+	int irq;
+};
+
+struct dp_rx_reorder_array_elem {
+	qdf_nbuf_t head;
+	qdf_nbuf_t tail;
+};
+
+#define DP_RX_BA_INACTIVE 0
+#define DP_RX_BA_ACTIVE 1
+
+/* Rx TID */
+struct dp_rx_tid {
+	/* TID */
+	int tid;
+
+	/* REO TID queue descriptors */
+	void *hw_qdesc_vaddr_unaligned;
+	qdf_dma_addr_t hw_qdesc_paddr_unaligned;
+	qdf_dma_addr_t hw_qdesc_paddr;
+	uint32_t hw_qdesc_alloc_size;
+
+	/* RX ADDBA session state */
+	int ba_status;
+
+	/* RX BA window size */
+	uint16_t ba_win_size;
+
+	/* TODO: Check the following while adding defragmentation support */
+	struct dp_rx_reorder_array_elem *array;
+	/* base - single rx reorder element used for non-aggr cases */
+	struct dp_rx_reorder_array_elem base;
+
+	/* only used for defrag right now */
+	TAILQ_ENTRY(dp_rx_tid) defrag_waitlist_elem;
+	uint32_t defrag_timeout_ms;
+	uint16_t dialogtoken;
+	uint16_t statuscode;
+};
+
+struct ol_if_ops {
+	void (*peer_set_default_routing)(void *scn_handle,
+		uint8_t *peer_macaddr, uint8_t vdev_id, bool hash_based,
+		uint8_t ring_num);
+	int (*peer_rx_reorder_queue_setup)(void *ol_soc_handle,
+		uint8_t vdev_id, uint8_t *peer_mac, qdf_dma_addr_t hw_qdesc,
+		int tid, uint16_t queue_num);
+	int (*peer_rx_reorder_queue_remove)(void *ol_soc_handle,
+		uint8_t vdev_id, uint8_t *peer_macaddr, uint32_t tid_mask);
+	/* TODO: Add any other control path calls required to OL_IF/WMA layer */
+};
+
+/* SOC level structure for data path */
+struct dp_soc {
+	/* Common base structure - Should be the first member */
+#ifdef notyet /* TODO: dp_soc_cmn should be defined in cmn headers */
+	struct dp_soc_cmn soc_cmn;
+#endif
+	/* Callbacks to OL_IF layer */
+	struct ol_if_ops *ol_ops;
+
+	/* SoC/softc handle from OSIF layer */
+	void *osif_soc;
+
+	/* OS device abstraction */
+	qdf_device_t osdev;
+
+	/* WLAN config context */
+	void *wlan_cfg_ctx;
+
+	/* HTT handle for host-fw interaction */
+	void *htt_handle;
+
+	/* Commint init done */
+	bool cmn_init_done;
+
+	/* Opaque hif handle */
+	struct hif_opaque_softc *hif_handle;
+
+	/* PDEVs on this SOC */
+	struct dp_pdev *pdev_list[MAX_PDEV_CNT];
+
+	/* Number of PDEVs */
+	uint8_t pdev_count;
+
+	/* Link descriptor memory banks */
+	struct {
+		void *base_vaddr_unaligned;
+		void *base_vaddr;
+		qdf_dma_addr_t base_paddr_unaligned;
+		qdf_dma_addr_t base_paddr;
+		uint32_t size;
+	} link_desc_banks[MAX_LINK_DESC_BANKS];
+
+	/* Link descriptor Idle list for HW internal use (SRNG mode) */
+	struct dp_srng wbm_idle_link_ring;
+
+	/* Link descriptor Idle list for HW internal use (scatter buffer mode)
+	 */
+	qdf_dma_addr_t wbm_idle_scatter_buf_base_paddr[MAX_IDLE_SCATTER_BUFS];
+	void *wbm_idle_scatter_buf_base_vaddr[MAX_IDLE_SCATTER_BUFS];
+	uint32_t wbm_idle_scatter_buf_size;
+
+	/* Tx SW descriptor pool */
+	struct {
+		uint32_t pool_size;
+		union dp_tx_desc_list_elem_t *array;
+		union dp_tx_desc_list_elem_t *freelist;
+	} tx_desc[MAX_TXDESC_POOLS];
+
+	/* Rx SW descriptor pool */
+	struct {
+		uint32_t pool_size;
+		union dp_rx_desc_list_elem_t *array;
+		union dp_rx_desc_list_elem_t *freelist;
+	} rx_desc[MAX_RXDESC_POOLS];
+
+	/* HAL SOC handle */
+	void *hal_soc;
+
+	/* REO destination rings */
+	struct dp_srng reo_dest_ring[MAX_REO_DEST_RINGS];
+
+	/* Number of REO destination rings */
+	uint8_t num_reo_dest_rings;
+
+	/* REO exception ring - See if should combine this with reo_dest_ring */
+	struct dp_srng reo_exception_ring;
+
+	/* REO reinjection ring */
+	struct dp_srng reo_reinject_ring;
+
+	/* REO command ring */
+	struct dp_srng reo_cmd_ring;
+
+	/* REO command status ring */
+	struct dp_srng reo_status_ring;
+
+	/* WBM Rx release ring */
+	struct dp_srng rx_rel_ring;
+
+	/* Number of TCL data rings */
+	uint8_t num_tcl_data_rings;
+
+	/* TCL data ring */
+	struct dp_srng tcl_data_ring[MAX_TCL_DATA_RINGS];
+
+	/* TCL command ring */
+	struct dp_srng tcl_cmd_ring;
+
+	/* TCL command status ring */
+	struct dp_srng tcl_status_ring;
+
+	/* WBM Tx completion rings */
+	struct dp_srng tx_comp_ring[MAX_TCL_DATA_RINGS];
+
+	/* Common WBM link descriptor release ring (SW to WBM) */
+	struct dp_srng wbm_desc_rel_ring;
+
+	/* Tx ring map for interrupt processing */
+	struct dp_srng *tx_ring_map[DP_MAX_TX_RINGS];
+
+	/* Rx ring map for interrupt processing */
+	struct dp_srng *rx_ring_map[DP_MAX_RX_RINGS];
+
+	/* WDI event handlers */
+	struct wdi_event_subscribe_t **wdi_event_list;
+
+	/* peer ID to peer object map (array of pointers to peer objects) */
+	struct dp_peer **peer_id_to_obj_map;
+
+	struct {
+		unsigned mask;
+		unsigned idx_bits;
+		TAILQ_HEAD(, dp_peer) * bins;
+	} peer_hash;
+
+	/* rx defrag state – TBD: do we need this per radio? */
+	struct {
+		struct {
+			TAILQ_HEAD(, dp_rx_tid) waitlist;
+			uint32_t timeout_ms;
+		} defrag;
+		struct {
+			int defrag_timeout_check;
+			int dup_check;
+		} flags;
+	} rx;
+
+	/* optional rx processing function */
+	void (*rx_opt_proc)(
+		struct dp_vdev *vdev,
+		struct dp_peer *peer,
+		unsigned tid,
+		qdf_nbuf_t msdu_list);
+
+	/* pool addr for mcast enhance buff */
+	struct {
+		int size;
+		uint32_t paddr;
+		char *vaddr;
+		struct dp_tx_me_buf_t *freelist;
+		int buf_in_use;
+		int nonpool_buf_in_use;
+		qdf_dma_mem_context(memctx);
+	} me_buf;
+
+	/**
+	 * peer ref mutex:
+	 * 1. Protect peer object lookups until the returned peer object's
+	 *	reference count is incremented.
+	 * 2. Provide mutex when accessing peer object lookup structures.
+	 */
+	DP_MUTEX_TYPE peer_ref_mutex;
+
+	/* Number of VAPs with mcast enhancement enabled */
+	atomic_t mc_num_vap_attached;
+
+	/* maximum value for peer_id */
+	int max_peers;
+
+	/* SoC level data path statistics */
+	struct {
+		/* TBD */
+	} stats;
+};
+
+
+/* PDEV level structure for data path */
+struct dp_pdev {
+	/* PDEV handle from OSIF layer TBD: see if we really need osif_pdev */
+	void *osif_pdev;
+
+	/* PDEV Id */
+	int pdev_id;
+
+	/* TXRX SOC handle */
+	struct dp_soc *soc;
+
+	/* RXDMA buffer replenish ring */
+	struct dp_srng rxdma_buf_ring;
+
+	/* RXDMA monitor buffer replenish ring */
+	struct dp_srng rxdma_mon_buf_ring;
+
+	/* RXDMA monitor destination ring */
+	struct dp_srng rxdma_mon_dst_ring;
+
+	/* RXDMA monitor status ring. TBD: Check format of this ring */
+	struct dp_srng rxdma_mon_status_ring;
+
+	/**
+	 * TODO: See if we need a ring map here for LMAC rings.
+	 * 1. Monitor rings are currently planning to be processed on receiving
+	 * PPDU end interrupts and hence wont need ring based interrupts.
+	 * 2. Rx buffer rings will be replenished during REO destination
+	 * processing and doesn't require regular interrupt handling - we will
+	 * only handle low water mark interrupts which is not expected
+	 * frequently
+	 */
+
+	/* VDEV list */
+	TAILQ_HEAD(, dp_vdev) vdev_list;
+
+	/* Number of vdevs this device have */
+	uint16_t vdev_count;
+
+	/* PDEV transmit lock */
+	qdf_spinlock_t tx_lock;
+
+#ifdef notyet
+	/* Pktlog pdev */
+	ol_pktlog_dev_t *pl_dev;
+#endif
+
+	/* Monitor mode interface and status storage */
+	struct dp_vdev *monitor_vdev;
+
+	/* monitor mode mutex */
+	qdf_spinlock_t mon_mutex;
+
+	/* Band steering  */
+	/* TBD */
+
+	/* PDEV level data path statistics */
+	struct {
+		/* TBD */
+	} stats;
+
+	/* Global RX decap mode for the device */
+	enum htt_pkt_type rx_decap_mode;
+
+	/* Enhanced Stats is enabled */
+	bool ap_stats_tx_cal_enable;
+
+	/* TBD */
+};
+
+struct dp_peer;
+
+#define DP_MAC_ADDR_LEN 6
+union dp_align_mac_addr {
+	uint8_t raw[DP_MAC_ADDR_LEN];
+	struct {
+		uint16_t bytes_ab;
+		uint16_t bytes_cd;
+		uint16_t bytes_ef;
+	} align2;
+	struct {
+		uint32_t bytes_abcd;
+		uint16_t bytes_ef;
+	} align4;
+};
+
+#define MAX_HTT_METADATA_LEN 32
+
+/* VDEV structure for data path state */
+struct dp_vdev {
+	/* physical device that is the parent of this virtual device */
+	struct dp_pdev *pdev;
+
+	/* Handle to the OS shim SW's virtual device */
+	ol_osif_vdev_handle osif_vdev;
+
+	/* vdev_id - ID used to specify a particular vdev to the target */
+	uint8_t vdev_id;
+
+	/* MAC address */
+	union dp_align_mac_addr mac_addr;
+
+	/* node in the pdev's list of vdevs */
+	TAILQ_ENTRY(dp_vdev) vdev_list_elem;
+
+	/* dp_peer list */
+	TAILQ_HEAD(, dp_peer) peer_list;
+
+	/* callback to hand rx frames to the OS shim */
+	ol_txrx_rx_fp osif_rx;
+
+#ifdef notyet
+	/* callback to check if the msdu is an WAI (WAPI) frame */
+	ol_rx_check_wai_fp osif_check_wai;
+#endif
+
+	/* proxy arp function */
+	ol_txrx_proxy_arp_fp osif_proxy_arp;
+
+	/* callback to hand rx monitor 802.11 MPDU to the OS shim */
+	ol_txrx_rx_mon_fp osif_rx_mon;
+
+	/* deferred vdev deletion state */
+	struct {
+		/* VDEV delete pending */
+		int pending;
+		/*
+		* callback and a context argument to provide a
+		* notification for when the vdev is deleted.
+		*/
+		ol_txrx_vdev_delete_cb callback;
+		void *context;
+	} delete;
+
+	/* safe mode control to bypass the encrypt and decipher process*/
+	uint32_t safemode;
+
+	/* rx filter related */
+	uint32_t drop_unenc;
+#if notyet
+	privacy_exemption privacy_filters[MAX_PRIVACY_FILTERS];
+	uint32_t filters_num;
+#endif
+
+	/* VDEV operating mode */
+	enum wlan_op_mode opmode;
+
+	/* Tx encapsulation type for this VAP */
+	enum htt_pkt_type tx_encap_type;
+	/* Rx Decapsulation type for this VAP */
+	enum htt_pkt_type rx_decap_type;
+
+	/* BSS peer */
+	struct dp_peer *vap_bss_peer;
+
+	/* NAWDS enabled */
+	bool nawds_enabled;
+
+	/* Default HTT meta data for this VDEV */
+	/* TBD: check alignment constraints */
+	uint8_t htt_metadata[MAX_HTT_METADATA_LEN];
+	uint32_t htt_metadata_size;
+
+	/* Mesh mode vdev */
+	uint32_t mesh_vdev;
+
+	/* TBD */
+};
+
+
+enum {
+	dp_sec_mcast = 0,
+	dp_sec_ucast
+};
+
+#define MAX_NUM_PEER_ID_PER_PEER 8
+#define DP_MAX_TIDS 17
+#define DP_NON_QOS_TID 16
+/* Peer structure for data path state */
+struct dp_peer {
+	/* VDEV to which this peer is associated */
+	struct dp_vdev *vdev;
+
+	qdf_atomic_t ref_cnt;
+
+	/* TODO: See if multiple peer IDs are required in wifi3.0 */
+	/* peer ID(s) for this peer */
+	uint16_t peer_ids[MAX_NUM_PEER_ID_PER_PEER];
+
+	union dp_align_mac_addr mac_addr;
+
+	/* node in the vdev's list of peers */
+	TAILQ_ENTRY(dp_peer) peer_list_elem;
+	/* node in the hash table bin's list of peers */
+	TAILQ_ENTRY(dp_peer) hash_list_elem;
+
+	/* TID structures */
+	struct dp_rx_tid rx_tid[DP_MAX_TIDS];
+
+	/* TBD: No transmit TID state required? */
+
+	struct {
+		enum htt_sec_type sec_type;
+#if notyet /* TODO: See if this is required for defrag support */
+		u_int32_t michael_key[2]; /* relevant for TKIP */
+#endif
+	} security[2]; /* 0 -> multicast, 1 -> unicast */
+
+	/*
+	* rx proc function: this either is a copy of pdev's rx_opt_proc for
+	* regular rx processing, or has been redirected to a /dev/null discard
+	* function when peer deletion is in progress.
+	*/
+	void (*rx_opt_proc)(struct dp_vdev *vdev, struct dp_peer *peer,
+		unsigned tid, qdf_nbuf_t msdu_list);
+
+	/* set when node is authorized */
+	uint8_t authorize:1;
+
+	/* Band steering: Set when node is inactive */
+	uint8_t peer_bs_inact_flag:1;
+
+	/* NAWDS Flag and Bss Peer bit */
+	uint8_t nawds_enabled:1,
+				bss_peer:1,
+				wapi:1;
+
+	/* TBD */
+};
+
+#endif /* _DP_TYPES_H_ */
+

+ 272 - 0
hal/wifi3.0/hal_rx.c

@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "wcss_seq_hwiobase.h"
+#include "wcss_seq_hwioreg.h"
+#include "sw_xml_headers.h"
+#include "tlv_hdr.h"
+#include "hal_api.h"
+
+/* TODO: See if the following definition is available in HW headers */
+#define HAL_REO_OWNED 4
+#define HAL_REO_QUEUE_DESC 8
+#define HAL_REO_QUEUE_EXT_DESC 9
+
+#define PN_SIZE_24 0
+#define PN_SIZE_48 1
+#define PN_SIZE_128 2
+
+/* TODO: Using associated link desc counter 1 for Rx. Check with FW on
+ * how these counters are assigned
+ */
+#define HAL_RX_LINK_DESC_CNTR 1
+/* TODO: Following definition should be from HW headers */
+#define HAL_DESC_REO_OWNED 4
+
+/* TODO: Move this to common header file */
+static inline void hal_uniform_desc_hdr_setup(uint32_t *desc, uint32_t owner,
+	uint32_t buffer_type)
+{
+	HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, OWNER,
+		owner);
+	HAL_DESC_SET_FIELD(desc, UNIFORM_DESCRIPTOR_HEADER_0, BUFFER_TYPE,
+		buffer_type);
+}
+
+#ifndef TID_TO_WME_AC
+#define WME_AC_BE 0 /* best effort */
+#define WME_AC_BK 1 /* background */
+#define WME_AC_VI 2 /* video */
+#define WME_AC_VO 3 /* voice */
+
+#define TID_TO_WME_AC(_tid) ( \
+	(((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
+	(((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
+	(((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
+	WME_AC_VO)
+#endif
+#define HAL_NON_QOS_TID 16
+
+/**
+ * hal_reo_qdesc_setup - Setup HW REO queue descriptor
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @ba_window_size: BlockAck window size
+ * @start_seq: Starting sequence number
+ * @hw_qdesc_vaddr: Virtual address of REO queue descriptor memory
+ * @hw_qdesc_paddr: Physical address of REO queue descriptor memory
+ * @tid: TID
+ *
+ */
+void hal_reo_qdesc_setup(void *hal_soc, int tid, uint32_t ba_window_size,
+	uint32_t start_seq, void *hw_qdesc_vaddr, qdf_dma_addr_t hw_qdesc_paddr,
+	int pn_type)
+{
+	uint32_t *reo_queue_desc = (uint32_t *)hw_qdesc_vaddr;
+	uint32_t *reo_queue_ext_desc;
+	uint32_t reg_val;
+	uint32_t pn_enable, pn_size;
+
+	qdf_mem_zero(hw_qdesc_vaddr, sizeof(struct rx_reo_queue));
+
+	hal_uniform_desc_hdr_setup(reo_queue_desc, HAL_DESC_REO_OWNED,
+		HAL_REO_QUEUE_DESC);
+
+	/* This a just a SW meta data and will be copied to REO destination
+	 * descriptors indicated by hardware.
+	 * TODO: Setting TID in this field. See if we should set something else.
+	 */
+	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_1,
+		RECEIVE_QUEUE_NUMBER, tid);
+	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
+		VLD, 1);
+	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
+		ASSOCIATED_LINK_DESCRIPTOR_COUNTER, HAL_RX_LINK_DESC_CNTR);
+
+	/*
+	 * Fields DISABLE_DUPLICATE_DETECTION and SOFT_REORDER_ENABLE will be 0
+	 */
+
+	reg_val = TID_TO_WME_AC(tid);
+	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, AC, reg_val);
+
+	/* Check the purpose of RTY field.
+	 * HW documentation says "Retry bit is checked if this bit is set"
+	 */
+
+	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, CHK_2K_MODE, 1);
+	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, OOR_MODE, 1);
+	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, BA_WINDOW_SIZE,
+		ba_window_size - 1);
+
+	switch (pn_type) {
+	case HAL_PN_WPA:
+		pn_enable = 1;
+		pn_size = PN_SIZE_48;
+	case HAL_PN_WAPI_EVEN:
+	case HAL_PN_WAPI_UNEVEN:
+		pn_enable = 1;
+		pn_size = PN_SIZE_128;
+	default:
+		pn_enable = 0;
+	}
+
+	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_CHECK_NEEDED,
+		pn_enable);
+
+	if (pn_type == HAL_PN_WAPI_EVEN)
+		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
+			PN_SHALL_BE_EVEN, 1);
+	else if (pn_type == HAL_PN_WAPI_UNEVEN)
+		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
+			PN_SHALL_BE_UNEVEN, 1);
+
+	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_HANDLING_ENABLE,
+		pn_enable);
+
+	HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2, PN_SIZE,
+		pn_size);
+
+	/* TODO: Check if RX_REO_QUEUE_2_IGNORE_AMPDU_FLAG need to be set
+	 * any other cases
+	 */
+	if ((ba_window_size <= 1) || (tid == HAL_NON_QOS_TID)) {
+		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_2,
+			IGNORE_AMPDU_FLAG, 1);
+	}
+
+	if (start_seq <= 0xfff) {
+		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SVLD, 1);
+		HAL_DESC_SET_FIELD(reo_queue_desc, RX_REO_QUEUE_3, SSN,
+			start_seq);
+	}
+
+	/* TODO: Check if we should set start PN for WAPI */
+
+#ifdef notyet
+	/* Setup first queue extension if BA window size is more than 1 */
+	if (ba_window_size > 1) {
+		reo_queue_ext_desc =
+			(uint32_t *)(((struct rx_reo_queue *)reo_queue_desc) +
+			1);
+		qdf_mem_zero(reo_queue_ext_desc,
+			sizeof(struct rx_reo_queue_ext));
+		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
+			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
+	}
+	/* Setup second queue extension if BA window size is more than 105 */
+	if (ba_window_size > 105) {
+		reo_queue_ext_desc = (uint32_t *)
+			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
+		qdf_mem_zero(reo_queue_ext_desc,
+			sizeof(struct rx_reo_queue_ext));
+		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
+			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
+	}
+	/* Setup third queue extension if BA window size is more than 210 */
+	if (ba_window_size > 210) {
+		reo_queue_ext_desc = (uint32_t *)
+			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
+		qdf_mem_zero(reo_queue_ext_desc,
+			sizeof(struct rx_reo_queue_ext));
+		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
+			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
+	}
+#else
+	/* TODO: HW queue descriptors are currently allocated for max BA
+	 * window size for all QOS TIDs so that same descriptor can be used
+	 * later when ADDBA request is recevied. This should be changed to
+	 * allocate HW queue descriptors based on BA window size being
+	 * negotiated (0 for non BA cases), and reallocate when BA window
+	 * size changes and also send WMI message to FW to change the REO
+	 * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
+	 */
+	if (tid != HAL_NON_QOS_TID) {
+		reo_queue_ext_desc = (uint32_t *)
+			(((struct rx_reo_queue *)reo_queue_desc) + 1);
+		qdf_mem_zero(reo_queue_ext_desc, 3 *
+			sizeof(struct rx_reo_queue_ext));
+		/* Initialize first reo queue extension descriptor */
+		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
+			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
+		/* Initialize second reo queue extension descriptor */
+		reo_queue_ext_desc = (uint32_t *)
+			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
+		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
+			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
+		/* Initialize third reo queue extension descriptor */
+		reo_queue_ext_desc = (uint32_t *)
+			(((struct rx_reo_queue_ext *)reo_queue_ext_desc) + 1);
+		hal_uniform_desc_hdr_setup(reo_queue_ext_desc,
+			HAL_DESC_REO_OWNED, HAL_REO_QUEUE_EXT_DESC);
+	}
+#endif
+}
+
+
+/**
+ * hal_reo_setup - Initialize HW REO block
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ */
+void hal_reo_setup(void *hal_soc)
+{
+	struct hal_soc *soc = (struct hal_soc *)hal_soc;
+
+	HAL_REG_WRITE(soc, HWIO_REO_R0_GENERAL_ENABLE_ADDR(
+		SEQ_WCSS_UMAC_REO_REG_OFFSET),
+		HAL_SM(HWIO_REO_R0_GENERAL_ENABLE,
+		FRAGMENT_DEST_RING, HAL_SRNG_REO_EXCEPTION) |
+		HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, AGING_LIST_ENABLE, 1) |
+		HAL_SM(HWIO_REO_R0_GENERAL_ENABLE, AGING_FLUSH_ENABLE, 1));
+	/* Other ring enable bits and REO_ENABLE will be set by FW */
+
+	/* TODO: Setup destination ring mapping if enabled */
+
+	/* TODO: Error destination ring setting is left to default.
+	 * Default setting is to send all errors to release ring.
+	 */
+
+	HAL_REG_WRITE(soc,
+		HWIO_REO_R0_AGING_THRESHOLD_IX_0_ADDR(
+		SEQ_WCSS_UMAC_REO_REG_OFFSET),
+		HAL_DEFAULT_REO_TIMEOUT_MS * 1000);
+
+	HAL_REG_WRITE(soc,
+		HWIO_REO_R0_AGING_THRESHOLD_IX_1_ADDR(
+		SEQ_WCSS_UMAC_REO_REG_OFFSET),
+		(HAL_DEFAULT_REO_TIMEOUT_MS * 1000));
+
+	HAL_REG_WRITE(soc,
+		HWIO_REO_R0_AGING_THRESHOLD_IX_2_ADDR(
+		SEQ_WCSS_UMAC_REO_REG_OFFSET),
+		(HAL_DEFAULT_REO_TIMEOUT_MS * 1000));
+
+	HAL_REG_WRITE(soc,
+		HWIO_REO_R0_AGING_THRESHOLD_IX_3_ADDR(
+		SEQ_WCSS_UMAC_REO_REG_OFFSET),
+		(HAL_DEFAULT_REO_TIMEOUT_MS * 1000));
+
+	/* TODO: Check if the following registers shoould be setup by host:
+	 * AGING_CONTROL
+	 * HIGH_MEMORY_THRESHOLD
+	 * GLOBAL_LINK_DESC_COUNT_THRESH_IX_0[1,2]
+	 * GLOBAL_LINK_DESC_COUNT_CTRL
+	 */
+}
+

+ 112 - 0
hal/wifi3.0/hal_wbm.c

@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "wcss_seq_hwiobase.h"
+#include "wcss_seq_hwioreg.h"
+#include "sw_xml_headers.h"
+#include "reo_destination_ring.h"
+#include "tcl_data_cmd.h"
+#include "tlv_hdr.h"
+#include "hal_api.h"
+
+/**
+ * hal_setup_link_idle_list - Setup scattered idle list using the
+ * buffer list provided
+ *
+ * @hal_soc: Opaque HAL SOC handle
+ * @scatter_bufs_base_paddr: Array of physical base addresses
+ * @scatter_bufs_base_vaddr: Array of virtual base addresses
+ * @num_scatter_bufs: Number of scatter buffers in the above lists
+ * @scatter_buf_size: Size of each scatter buffer
+ *
+ */
+void hal_setup_link_idle_list(void *hal_soc,
+	qdf_dma_addr_t scatter_bufs_base_paddr[],
+	void *scatter_bufs_base_vaddr[], uint32_t num_scatter_bufs,
+	uint32_t scatter_buf_size, uint32_t last_buf_end_offset)
+{
+	int i;
+	uint32_t *prev_buf_link_ptr = NULL;
+	struct hal_soc *soc = (struct hal_soc *)hal_soc;
+
+	/* Link the scatter buffers */
+	for (i = 0; i < num_scatter_bufs; i++) {
+		if (i > 0) {
+			prev_buf_link_ptr[0] =
+				scatter_bufs_base_paddr[i] & 0xffffffff;
+			prev_buf_link_ptr[1] =
+				((uint64_t)(scatter_bufs_base_paddr[i]) >> 32) &
+				HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB_BASE_ADDRESS_39_32_BMSK;
+		}
+		prev_buf_link_ptr = (uint32_t *)(scatter_bufs_base_vaddr[i] +
+			scatter_buf_size - WBM_IDLE_SCATTER_BUF_NEXT_PTR_SIZE);
+	}
+
+	/* TBD: Setup IDLE_LIST_CTRL and IDLE_LIST_SIZE registers - current
+	 * definitions in HW headers doesn't match those in WBM MLD document
+	 * pending confirmation from HW team
+	 */
+
+	HAL_REG_WRITE(soc,
+		HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_LSB_ADDR(
+		SEQ_WCSS_UMAC_WBM_REG_OFFSET),
+		scatter_bufs_base_paddr[0] & 0xffffffff);
+	HAL_REG_WRITE(soc,
+		HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB_ADDR(
+		SEQ_WCSS_UMAC_WBM_REG_OFFSET),
+		((uint64_t)(scatter_bufs_base_paddr[0]) >> 32) &
+		HWIO_WBM_R0_SCATTERED_LINK_DESC_LIST_BASE_MSB_BASE_ADDRESS_39_32_BMSK);
+	/* ADDRESS_MATCH_TAG field in the above register is expected to match
+	 * with the upper bits of link pointer. The above write sets this field
+	 * to zero and we are also setting the upper bits of link pointers to
+	 * zero while setting up the link list of scatter buffers above
+	 */
+
+	/* Setup head and tail pointers for the idle list */
+	HAL_REG_WRITE(soc,
+		HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HEAD_INFO_IX0_ADDR(
+		SEQ_WCSS_UMAC_WBM_REG_OFFSET),
+		scatter_bufs_base_paddr[0] & 0xffffffff);
+	HAL_REG_WRITE(soc,
+		HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HEAD_INFO_IX1_ADDR(
+		SEQ_WCSS_UMAC_WBM_REG_OFFSET),
+		HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HEAD_INFO_IX1,
+		BUFFER_ADDRESS_39_32,
+		((uint64_t)(scatter_bufs_base_paddr[0]) >> 32)) |
+		HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HEAD_INFO_IX1,
+		HEAD_POINTER_OFFSET, 0));
+
+	HAL_REG_WRITE(soc,
+		HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_HEAD_INFO_IX0_ADDR(
+		SEQ_WCSS_UMAC_WBM_REG_OFFSET),
+		scatter_bufs_base_paddr[0] & 0xffffffff);
+
+	HAL_REG_WRITE(soc,
+		HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_TAIL_INFO_IX0_ADDR(
+		SEQ_WCSS_UMAC_WBM_REG_OFFSET),
+		scatter_bufs_base_paddr[num_scatter_bufs - 1] & 0xffffffff);
+	HAL_REG_WRITE(soc,
+		HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_TAIL_INFO_IX1_ADDR(
+		SEQ_WCSS_UMAC_WBM_REG_OFFSET),
+		HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_TAIL_INFO_IX1,
+		BUFFER_ADDRESS_39_32,
+		((uint64_t)(scatter_bufs_base_paddr[num_scatter_bufs - 1]) >>
+		32)) | HAL_SM(HWIO_WBM_R0_SCATTERED_LINK_DESC_PTR_TAIL_INFO_IX1,
+		TAIL_POINTER_OFFSET, last_buf_end_offset << 2));
+}
+