Browse Source

qcacmn: Add Lithium RX Core Processing Infrastructure

Add Lithium Rx basic packet processing path from REO.
Implement the following in this patch:

 - Rx buffer replenishment directly to RxDMA rings (WIN)

 - Rx descriptor management

 - Rx Path Processing for non-error packets

 - RxDMA optimization (access on 128 byte boundary)

Change-Id: I25aea3a79d5494a0447bc7ca919acb87b74f2760
CRs-Fixed: 1074199
Debashis Dutt 8 years ago
parent
commit
c4c52dc1fd
4 changed files with 779 additions and 0 deletions
  1. 44 0
      dp/wifi3.0/dp_peer.h
  2. 439 0
      dp/wifi3.0/dp_rx.c
  3. 149 0
      dp/wifi3.0/dp_rx.h
  4. 147 0
      dp/wifi3.0/dp_rx_desc.c

+ 44 - 0
dp/wifi3.0/dp_peer.h

@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <qdf_types.h>
+#include <qdf_lock.h>
+#include "dp_types.h"
+
+#define DP_INVALID_PEER_ID 0xffff
+
+/**
+ * dp_peer_find_by_id() - Returns peer object given the peer id
+ *
+ * @soc		: core DP soc context
+ * @peer_id	: peer id from peer object can be retrieved
+ *
+ * Return: struct dp_peer*: Pointer to DP peer object
+ */
+static inline struct dp_peer *
+dp_peer_find_by_id(struct dp_soc *soc,
+		   uint16_t peer_id)
+{
+	struct dp_peer *peer;
+
+	/* TODO: Hold lock */
+	peer = (peer_id >= soc->max_peers) ? NULL :
+				soc->peer_id_to_obj_map[peer_id];
+
+	return peer;
+}

+ 439 - 0
dp/wifi3.0/dp_rx.c

@@ -0,0 +1,439 @@
+/*
+ * Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "dp_types.h"
+#include "dp_rx.h"
+#include "dp_peer.h"
+#include "hal_rx.h"
+#include "hal_api.h"
+#include "qdf_nbuf.h"
+#include <ieee80211.h>
+
+#ifdef RXDMA_OPTIMIZATION
+#define RX_BUFFER_ALIGNMENT	128
+#else /* RXDMA_OPTIMIZATION */
+#define RX_BUFFER_ALIGNMENT	4
+#endif /* RXDMA_OPTIMIZATION */
+
+#define RX_BUFFER_SIZE		2048
+#define RX_BUFFER_RESERVATION	0
+
+/*
+ * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
+ *			       called during dp rx initialization
+ *			       and at the end of dp_rx_process.
+ *
+ * @soc: core txrx main context
+ * @mac_id: mac_id which is one of 3 mac_ids
+ * @desc_list: list of descs if called from dp_rx_process
+ *	       or NULL during dp rx initialization or out of buffer
+ *	       interrupt.
+ * @owner: who owns the nbuf (host, NSS etc...)
+ * Return: return success or failure
+ */
+QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
+				 uint32_t num_req_buffers,
+				 union dp_rx_desc_list_elem_t **desc_list,
+				 union dp_rx_desc_list_elem_t **tail,
+				 uint8_t owner)
+{
+	uint32_t num_alloc_desc;
+	uint16_t num_desc_to_free = 0;
+	struct dp_pdev *dp_pdev = dp_soc->pdev_list[mac_id];
+	uint32_t num_entries_avail;
+	uint32_t count;
+	int sync_hw_ptr = 1;
+	qdf_dma_addr_t paddr;
+	qdf_nbuf_t rx_netbuf;
+	void *rxdma_ring_entry;
+	union dp_rx_desc_list_elem_t *next;
+	struct dp_srng *dp_rxdma_srng = &dp_pdev->rxdma_buf_ring;
+	void *rxdma_srng = dp_rxdma_srng->hal_srng;
+
+	if (!rxdma_srng) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			"rxdma srng not initialized");
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+		"requested %d buffers for replenish", num_req_buffers);
+
+	/*
+	 * if desc_list is NULL, allocate the descs from freelist
+	 */
+	if (!(*desc_list)) {
+		num_alloc_desc = dp_rx_get_free_desc_list(dp_soc, mac_id,
+							  num_req_buffers,
+							  desc_list,
+							  tail);
+
+		if (!num_alloc_desc) {
+			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+				"no free rx_descs in freelist");
+			return QDF_STATUS_E_NOMEM;
+		}
+
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			"%d rx desc allocated", num_alloc_desc);
+		num_req_buffers = num_alloc_desc;
+	}
+
+	hal_srng_access_start(dp_soc->hal_soc, rxdma_srng);
+	num_entries_avail = hal_srng_src_num_avail(dp_soc->hal_soc,
+						   rxdma_srng,
+						   sync_hw_ptr);
+
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			"no of availble entries in rxdma ring: %d",
+			num_entries_avail);
+
+	if (num_entries_avail < num_req_buffers) {
+		num_desc_to_free = num_req_buffers - num_entries_avail;
+		num_req_buffers = num_entries_avail;
+	}
+
+	for (count = 0; count < num_req_buffers; count++) {
+		rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
+							 rxdma_srng);
+
+		rx_netbuf = qdf_nbuf_alloc(dp_pdev->osif_pdev,
+					RX_BUFFER_SIZE,
+					RX_BUFFER_RESERVATION,
+					RX_BUFFER_ALIGNMENT,
+					FALSE);
+
+		qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
+				    QDF_DMA_BIDIRECTIONAL);
+
+		paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
+
+		next = (*desc_list)->next;
+
+		(*desc_list)->rx_desc.nbuf = rx_netbuf;
+		hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
+						(*desc_list)->rx_desc.cookie,
+						owner);
+
+		*desc_list = next;
+	}
+
+	hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
+
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+		"successfully replenished %d buffers", num_req_buffers);
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+		"%d rx desc added back to free list", num_desc_to_free);
+
+	/*
+	 * add any available free desc back to the free list
+	 */
+	if (*desc_list)
+		dp_rx_add_desc_list_to_free_list(dp_soc, desc_list,
+						 tail, mac_id);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * dp_rx_intrabss_fwd() - Implements the Intra-BSS forwarding logic
+ *
+ * @soc: core txrx main context
+ * @rx_desc	: Rx descriptor
+ * @msdu_ifno	: place holder to store Rx MSDU Details from Rx desc
+ * @osdu_ifno	: place holder to store Rx MPDU Details from Rx desc
+ * @is_term: Value filled in by this function, if logic determines this
+ *	     to be a terminating packet
+ *
+ * Return: bool: true if it is forwarded else false
+ */
+static bool
+dp_rx_intrabss_fwd(struct dp_soc *soc,
+		   struct dp_rx_desc *rx_desc,
+		   struct hal_rx_msdu_desc_info *msdu_info,
+		   struct hal_rx_mpdu_desc_info *mpdu_info,
+		   bool *is_term)
+{
+	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+		"%s %d : Intra-BSS forwarding not implemented",
+			__func__, __LINE__);
+	return false;
+}
+
+
+/**
+ * dp_rx_process() - Brain of the Rx processing functionality
+ *		     Called from the bottom half (tasklet/NET_RX_SOFTIRQ)
+ * @soc: core txrx main context
+ * @hal_ring: opaque pointer to the HAL Rx Ring, which will be serviced
+ * @quota: No. of units (packets) that can be serviced in one shot.
+ *
+ * This function implements the core of Rx functionality. This is
+ * expected to handle only non-error frames.
+ *
+ * Return: uint32_t: No. of elements processed
+ */
+uint32_t
+dp_rx_process(struct dp_soc *soc, void *hal_ring, uint32_t quota)
+{
+	void *hal_soc;
+	void *ring_desc;
+	struct dp_rx_desc *rx_desc;
+	qdf_nbuf_t nbuf;
+	union dp_rx_desc_list_elem_t *head = NULL;
+	union dp_rx_desc_list_elem_t *tail = NULL;
+	bool is_term;
+	uint32_t rx_bufs_used = 0, rx_buf_cookie, l2_hdr_offset;
+	uint16_t peer_id;
+	struct dp_peer *peer = NULL;
+	struct hal_rx_msdu_desc_info msdu_desc_info;
+	struct hal_rx_mpdu_desc_info mpdu_desc_info;
+	qdf_nbuf_t head_msdu, tail_msdu;
+	enum hal_reo_error_status error;
+	uint32_t pkt_len;
+
+	/* Debug -- Remove later */
+	qdf_assert(soc && hal_ring);
+
+	hal_soc = soc->hal_soc;
+
+	/* Debug -- Remove later */
+	qdf_assert(hal_soc);
+
+	if (qdf_unlikely(hal_srng_access_start(hal_soc, hal_ring))) {
+
+		/*
+		 * Need API to convert from hal_ring pointer to
+		 * Ring Type / Ring Id combo
+		 */
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			"%s %d : HAL RING Access Failed -- %p\n",
+			__func__, __LINE__, hal_ring);
+		hal_srng_access_end(hal_soc, hal_ring);
+		goto done;
+	}
+
+	head_msdu = tail_msdu = NULL;
+
+	while (qdf_likely((ring_desc =
+				hal_srng_dst_get_next(hal_soc, hal_ring))
+				&& quota--)) {
+
+		error = HAL_RX_ERROR_STATUS_GET(ring_desc);
+
+		if (qdf_unlikely(error == HAL_REO_ERROR_DETECTED)) {
+			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			"%s %d : HAL RING 0x%p:error %d\n",
+			__func__, __LINE__, hal_ring, error);
+			/* Don't know how to deal with this -- assert */
+			qdf_assert(0);
+		}
+
+		rx_buf_cookie = HAL_RX_REO_BUF_COOKIE_GET(ring_desc);
+
+		rx_desc = dp_rx_cookie_2_va(soc, rx_buf_cookie);
+
+		qdf_assert(rx_desc);
+
+		rx_bufs_used++;
+
+		/* Get MSDU DESC info */
+		hal_rx_msdu_desc_info_get(ring_desc, &msdu_desc_info);
+
+		nbuf = rx_desc->nbuf;
+		/* TODO */
+		/*
+		 * Need a separate API for unmapping based on
+		 * phyiscal address
+		 */
+		qdf_nbuf_unmap_single(soc->osdev, nbuf,
+					QDF_DMA_BIDIRECTIONAL);
+
+		rx_desc->rx_buf_start = qdf_nbuf_data(nbuf);
+
+		/*
+		 * HW structures call this L3 header padding -- even though
+		 * this is actually the offset from the buffer beginning
+		 * where the L2 header begins.
+		 */
+		l2_hdr_offset =
+			hal_rx_msdu_end_l3_hdr_padding_get(
+						rx_desc->rx_buf_start);
+
+		pkt_len = msdu_desc_info.msdu_len +
+				l2_hdr_offset + RX_PKT_TLVS_LEN;
+
+		/* Set length in nbuf */
+		qdf_nbuf_set_pktlen(nbuf, pkt_len);
+
+		/*
+		 * Check if DMA completed -- msdu_done is the last bit
+		 * to be written
+		 */
+		if (!hal_rx_attn_msdu_done_get(rx_desc->rx_buf_start)) {
+
+			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			"%s %d : HAL RING 0x%p\n",
+			__func__, __LINE__, hal_ring);
+
+			print_hex_dump(KERN_ERR,
+			       "\t Pkt Desc:", DUMP_PREFIX_NONE, 32, 4,
+				rx_desc->rx_buf_start, 128, false);
+
+			qdf_assert(0);
+		}
+
+		/*
+		 * Advance the packet start pointer by total size of
+		 * pre-header TLV's
+		 */
+		qdf_nbuf_pull_head(nbuf, RX_PKT_TLVS_LEN);
+
+		if (l2_hdr_offset)
+			qdf_nbuf_pull_head(nbuf, l2_hdr_offset);
+
+		/* TODO -- Remove --  Just for initial debug */
+
+		print_hex_dump(KERN_ERR, "\t Pkt Buf:",
+			DUMP_PREFIX_NONE, 32, 4,
+			qdf_nbuf_data(nbuf), 128, false);
+
+		/* Get the MPDU DESC info */
+		hal_rx_mpdu_info_get(ring_desc, &mpdu_desc_info);
+
+		/* TODO */
+		/* WDS Source Port Learning */
+
+		/* Intrabss-fwd */
+		if (dp_rx_intrabss_fwd(soc, rx_desc,
+			&msdu_desc_info, &mpdu_desc_info, &is_term))
+			continue; /* Get next descriptor */
+
+		peer_id = DP_PEER_METADATA_PEER_ID_GET(
+				mpdu_desc_info.peer_meta_data);
+
+		peer = dp_peer_find_by_id(soc, peer_id);
+
+		/* TODO */
+		/*
+		 * In case of roaming peer object may not be
+		 * immediately available -- need to handle this
+		 * Cannot drop these packets right away.
+		 */
+		/* Peer lookup failed */
+		if (!peer) {
+
+			/* Drop & free packet */
+			qdf_nbuf_free(rx_desc->nbuf);
+
+			/* Statistics */
+
+			/* Add free rx_desc to a free list */
+			dp_rx_add_to_free_desc_list(&head, &tail, rx_desc);
+
+			continue;
+		}
+
+		if (qdf_unlikely(!head_msdu))
+			head_msdu = rx_desc->nbuf;
+		else
+			qdf_nbuf_set_next(tail_msdu, rx_desc->nbuf);
+
+		tail_msdu = rx_desc->nbuf;
+
+		dp_rx_add_to_free_desc_list(&head, &tail, rx_desc);
+	}
+
+	hal_srng_access_end(hal_soc, hal_ring);
+
+	if (!head_msdu)
+		return 0;
+
+	/* Replenish buffers */
+	/* Assume MAC id = 0, owner = 0 */
+	dp_rx_buffers_replenish(soc, 0, rx_bufs_used, &head, &tail,
+				HAL_RX_BUF_RBM_SW3_BM);
+
+	qdf_nbuf_set_next(tail_msdu, NULL);
+
+
+	/*
+	 * TODO - this assumes all packets reaped belong to one peer/vdev, which
+	 * may not be true, call this inside while loop for each change in vdev
+	 */
+	if (qdf_likely(peer->vdev->osif_rx))
+		peer->vdev->osif_rx(peer->vdev->osif_vdev, head_msdu);
+
+done:
+	return rx_bufs_used; /* Assume no scale factor for now */
+}
+
+/**
+ * dp_rx_detach() - detach dp rx
+ * @soc: core txrx main context
+ *
+ * This function will detach DP RX into main device context
+ * will free DP Rx resources.
+ *
+ * Return: void
+ */
+void
+dp_rx_pdev_detach(struct dp_pdev *pdev)
+{
+	uint8_t pdev_id = pdev->pdev_id;
+	struct dp_soc *soc = pdev->soc;
+
+	dp_rx_desc_pool_free(soc, pdev_id);
+	qdf_spinlock_destroy(&soc->rx_desc_mutex[pdev_id]);
+}
+
+/**
+ * dp_rx_attach() - attach DP RX
+ * @soc: core txrx main context
+ *
+ * This function will attach a DP RX instance into the main
+ * device (SOC) context. Will allocate dp rx resource and
+ * initialize resources.
+ *
+ * Return: QDF_STATUS_SUCCESS: success
+ *         QDF_STATUS_E_RESOURCES: Error return
+ */
+QDF_STATUS
+dp_rx_pdev_attach(struct dp_pdev *pdev)
+{
+	uint8_t pdev_id = pdev->pdev_id;
+	struct dp_soc *soc = pdev->soc;
+	struct dp_srng rxdma_srng;
+	uint32_t rxdma_entries;
+	union dp_rx_desc_list_elem_t *desc_list = NULL;
+	union dp_rx_desc_list_elem_t *tail = NULL;
+
+	qdf_spinlock_create(&soc->rx_desc_mutex[pdev_id]);
+	pdev = soc->pdev_list[pdev_id];
+	rxdma_srng = pdev->rxdma_buf_ring;
+
+	rxdma_entries = rxdma_srng.alloc_size/hal_srng_get_entrysize(
+						     soc->hal_soc, RXDMA_BUF);
+	dp_rx_desc_pool_alloc(soc, pdev_id);
+
+	/* For Rx buffers, WBM release ring is SW RING 3,for all pdev's */
+	dp_rx_buffers_replenish(soc, pdev_id, rxdma_entries,
+				&desc_list, &tail, HAL_RX_BUF_RBM_SW3_BM);
+
+	return QDF_STATUS_SUCCESS;
+}

+ 149 - 0
dp/wifi3.0/dp_rx.h

@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DP_RX_H
+#define _DP_RX_H
+
+#include "hal_rx.h"
+
+#define DP_PEER_METADATA_PEER_ID_MASK	0x0000ffff
+#define DP_PEER_METADATA_PEER_ID_SHIFT	0
+#define DP_PEER_METADATA_VDEV_ID_MASK	0x00070000
+#define DP_PEER_METADATA_VDEV_ID_SHIFT	16
+
+#define DP_PEER_METADATA_PEER_ID_GET(_peer_metadata)		\
+	(((_peer_metadata) & DP_PEER_METADATA_PEER_ID_MASK)	\
+			>> DP_PEER_METADATA_PEER_ID_SHIFT)
+
+#define DP_PEER_METADATA_ID_GET(_peer_metadata)			\
+	(((_peer_metadata) & DP_PEER_METADATA_VDEV_ID_MASK)	\
+			>> DP_PEER_METADATA_VDEV_ID_SHIFT)
+
+/**
+ * struct dp_rx_desc
+ *
+ * @nbuf		: VA of the "skb" posted
+ * @rx_buf_start	: VA of the original Rx buffer, before
+ *			  movement of any skb->data pointer
+ * @cookie		: index into the sw array which holds
+ *			  the sw Rx descriptors
+ *			  Cookie space is 21 bits:
+ *			  lower 18 bits -- index
+ *			  upper  3 bits -- pool_id
+ * @pool_id		: pool Id for which this allocated.
+ *			  Can only be used if there is no flow
+ *			  steering
+ */
+struct dp_rx_desc {
+	qdf_nbuf_t nbuf;
+	uint8_t *rx_buf_start;
+	uint16_t cookie;
+	uint8_t	 pool_id;
+};
+
+#define RX_DESC_COOKIE_INDEX_SHIFT		0
+#define RX_DESC_COOKIE_INDEX_MASK		0x3ffff /* 18 bits */
+#define RX_DESC_COOKIE_POOL_ID_SHIFT		18
+#define RX_DESC_COOKIE_POOL_ID_MASK		0x1c0000
+
+#define DP_RX_DESC_COOKIE_POOL_ID_GET(_cookie)		\
+	(((_cookie) & RX_DESC_COOKIE_POOL_ID_MASK) >>	\
+			RX_DESC_COOKIE_POOL_ID_SHIFT)
+
+#define DP_RX_DESC_COOKIE_INDEX_GET(_cookie)		\
+	(((_cookie) & RX_DESC_COOKIE_INDEX_MASK) >>	\
+			RX_DESC_COOKIE_INDEX_SHIFT)
+
+/**
+ * struct dp_rx_desc_list_elem_t
+ *
+ * @next		: Next pointer to form free list
+ * @rx_desc		: DP Rx descriptor
+ */
+union dp_rx_desc_list_elem_t {
+	union dp_rx_desc_list_elem_t *next;
+	struct dp_rx_desc rx_desc;
+};
+
+/**
+ * dp_rx_cookie_2_va() - Converts cookie to a virtual address of
+ *			 the Rx descriptor.
+ * @soc: core txrx main context
+ * @cookie: cookie used to lookup virtual address
+ *
+ * Return: void *: Virtual Address of the Rx descriptor
+ */
+static inline
+void *dp_rx_cookie_2_va(struct dp_soc *soc, uint32_t cookie)
+{
+	uint8_t pool_id = DP_RX_DESC_COOKIE_POOL_ID_GET(cookie);
+	uint16_t index = DP_RX_DESC_COOKIE_INDEX_GET(cookie);
+	/* TODO */
+	/* Add sanity for pool_id & index */
+	return &(soc->rx_desc[pool_id].array[index].rx_desc);
+}
+
+void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
+				union dp_rx_desc_list_elem_t **local_desc_list,
+				union dp_rx_desc_list_elem_t **tail,
+				uint16_t pool_id);
+
+uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
+				uint16_t num_descs,
+				union dp_rx_desc_list_elem_t **desc_list,
+				union dp_rx_desc_list_elem_t **tail);
+
+QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id);
+void dp_rx_desc_pool_free(struct dp_soc *soc, uint32_t pool_id);
+
+QDF_STATUS dp_rx_pdev_attach(struct dp_pdev *pdev);
+void dp_rx_pdev_detach(struct dp_pdev *pdev);
+
+QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
+				   uint32_t num_req_buffers,
+				   union dp_rx_desc_list_elem_t **desc_list,
+				   union dp_rx_desc_list_elem_t **tail,
+				   uint8_t owner);
+uint32_t
+dp_rx_process(struct dp_soc *soc, void *hal_ring, uint32_t quota);
+
+/**
+ * dp_rx_add_to_free_desc_list() - Adds to a local free descriptor list
+ *
+ * @head: pointer to the head of local free list
+ * @tail: pointer to the tail of local free list
+ * @new: new descriptor that is added to the free list
+ *
+ * Return: void:
+ */
+static inline
+void dp_rx_add_to_free_desc_list(union dp_rx_desc_list_elem_t **head,
+				 union dp_rx_desc_list_elem_t **tail,
+				 struct dp_rx_desc *new)
+{
+	qdf_assert(head && new);
+
+	new->nbuf = NULL;
+
+	((union dp_rx_desc_list_elem_t *)new)->next = *head;
+	*head = (union dp_rx_desc_list_elem_t *)new;
+	if (*tail == NULL)
+		*tail = *head;
+
+}
+#endif /* _DP_RX_H */

+ 147 - 0
dp/wifi3.0/dp_rx_desc.c

@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "dp_types.h"
+#include "dp_rx.h"
+
+/*
+ * dp_rx_desc_pool_alloc() - create a pool of software rx_descs
+ *			     at the time of dp rx initialization
+ *
+ * @soc: core txrx main context
+ * @pool_id: pool_id which is one of 3 mac_ids
+ *
+ * return success or failure
+ */
+QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id)
+{
+	uint32_t i;
+	struct dp_pdev *dp_pdev = soc->pdev_list[pool_id];
+	struct dp_srng *rxdma_srng = &dp_pdev->rxdma_buf_ring;
+
+	soc->rx_desc[pool_id].array = qdf_mem_malloc(
+		((rxdma_srng->alloc_size/hal_srng_get_entrysize(soc->hal_soc,
+		RXDMA_BUF)) * 3) * sizeof(union dp_rx_desc_list_elem_t));
+
+	if (!(soc->rx_desc[pool_id].array)) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
+			"%s: RX Desc Pool[%d] allocation failed\n",
+			__func__, pool_id);
+		return QDF_STATUS_E_NOMEM;
+	}
+
+	qdf_spin_lock_bh(&soc->rx_desc_mutex[pool_id]);
+	soc->rx_desc[pool_id].pool_size = (rxdma_srng->alloc_size/
+			hal_srng_get_entrysize(soc->hal_soc, RXDMA_BUF)) * 3;
+
+	/* link SW rx descs into a freelist */
+	soc->rx_desc[pool_id].freelist = &soc->rx_desc[pool_id].array[0];
+	for (i = 0; i < soc->rx_desc[pool_id].pool_size-1; i++) {
+		soc->rx_desc[pool_id].array[i].next =
+					&soc->rx_desc[pool_id].array[i+1];
+		soc->rx_desc[pool_id].array[i].rx_desc.cookie =
+					i | (pool_id << 18);
+	}
+
+	soc->rx_desc[pool_id].array[i].next = NULL;
+	soc->rx_desc[pool_id].array[i].rx_desc.cookie = i | (pool_id << 18);
+	qdf_spin_unlock_bh(&soc->rx_desc_mutex[pool_id]);
+	return QDF_STATUS_SUCCESS;
+}
+
+/*
+ * dp_rx_desc_pool_free() - free the sw rx desc pool called during
+ *			    de-initialization of wifi module.
+ *
+ * @soc: core txrx main context
+ * @pool_id: pool_id which is one of 3 mac_ids
+ */
+void dp_rx_desc_pool_free(struct dp_soc *soc, uint32_t pool_id)
+{
+	qdf_spin_lock_bh(&soc->rx_desc_mutex[pool_id]);
+	qdf_mem_free(soc->rx_desc[pool_id].array);
+	qdf_spin_unlock_bh(&soc->rx_desc_mutex[pool_id]);
+}
+
+/*
+ * dp_rx_get_free_desc_list() - provide a list of descriptors from
+ *				the free rx desc pool.
+ *
+ * @soc: core txrx main context
+ * @pool_id: pool_id which is one of 3 mac_ids
+ * @num_descs: number of descs requested from freelist
+ * @desc_list: attach the descs to this list (output parameter)
+ *
+ * Return: number of descs allocated from free list.
+ */
+uint16_t dp_rx_get_free_desc_list(struct dp_soc *soc, uint32_t pool_id,
+				uint16_t num_descs,
+				union dp_rx_desc_list_elem_t **desc_list,
+				union dp_rx_desc_list_elem_t **tail)
+{
+	uint16_t count;
+
+	qdf_spin_lock_bh(&soc->rx_desc_mutex[pool_id]);
+	*desc_list = soc->rx_desc[pool_id].freelist;
+
+	if (!(*desc_list)) {
+		qdf_spin_unlock_bh(&soc->rx_desc_mutex[pool_id]);
+		return 0;
+	}
+
+	for (count = 0; count < num_descs; count++) {
+
+		*tail = soc->rx_desc[pool_id].freelist;
+		soc->rx_desc[pool_id].freelist =
+				soc->rx_desc[pool_id].freelist->next;
+		if (qdf_unlikely(!soc->rx_desc[pool_id].freelist)) {
+			qdf_spin_unlock_bh(&soc->rx_desc_mutex[pool_id]);
+			return count;
+		}
+	}
+	qdf_spin_unlock_bh(&soc->rx_desc_mutex[pool_id]);
+	return count;
+}
+
+/*
+ * dp_rx_add_desc_list_to_free_list() - append unused desc_list back to
+ *					freelist.
+ *
+ * @soc: core txrx main context
+ * @local_desc_list: local desc list provided by the caller (output param)
+ * @pool_id: pool_id which is one of 3 mac_ids
+ */
+void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
+				union dp_rx_desc_list_elem_t **local_desc_list,
+				union dp_rx_desc_list_elem_t **tail,
+				uint16_t pool_id)
+{
+	union dp_rx_desc_list_elem_t *temp_list = NULL;
+
+	qdf_spin_lock_bh(&soc->rx_desc_mutex[pool_id]);
+
+	temp_list = soc->rx_desc[pool_id].freelist;
+	qdf_print(
+	"temp_list: %p, *local_desc_list: %p, *tail: %p (*tail)->next: %p\n",
+	temp_list, *local_desc_list, *tail, (*tail)->next);
+
+	soc->rx_desc[pool_id].freelist = *local_desc_list;
+	(*tail)->next = temp_list;
+
+	qdf_spin_unlock_bh(&soc->rx_desc_mutex[pool_id]);
+}