Browse Source

qcacmn: add tx descriptor handle

Add memory optimized tx descriptor alloc, free and handle code

Change-Id: Iabd499b7690bbf8bc09223ea7e72e8f81818482a
CRs-fixed: 1076601
Leo Chang 8 years ago
parent
commit
c2a7b763d1
4 changed files with 522 additions and 1 deletions
  1. 281 0
      dp/wifi3.0/dp_tx_desc.c
  2. 176 0
      dp/wifi3.0/dp_tx_desc.h
  3. 3 1
      qdf/inc/qdf_mem.h
  4. 62 0
      qdf/linux/src/qdf_mem.c

+ 281 - 0
dp/wifi3.0/dp_tx_desc.c

@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "dp_types.h"
+#include "dp_tx_desc.h"
+
+#ifndef DESC_PARTITION
+#define DP_TX_DESC_SIZE(a) qdf_get_pwr2(a)
+#define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id)     \
+do {                                                                 \
+	uint8_t sig_bit;                                             \
+	soc->tx_desc[pool_id].offset_filter = num_desc_per_page - 1; \
+	/* Calculate page divider to find page number */             \
+	sig_bit = 0;                                                 \
+	while (num_desc_per_page) {                                  \
+		sig_bit++;                                           \
+		num_desc_per_page = num_desc_per_page >> 1;          \
+	}                                                            \
+	soc->tx_desc[pool_id].page_divider = (sig_bit - 1);          \
+} while (0)
+#else
+#define DP_TX_DESC_SIZE(a) a
+#define DP_TX_DESC_PAGE_DIVIDER(soc, num_desc_per_page, pool_id) {}
+#endif /* DESC_PARTITION */
+
+/**
+ * dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
+ * @soc Handle to DP SoC structure
+ * @num_pool Number of pools to allocate
+ * @num_elem Number of descriptor elements per pool
+ *
+ * This function allocates memory for SW tx descriptors
+ * (used within host for tx data path).
+ * The number of tx descriptors required will be large
+ * since based on number of clients (1024 clients x 3 radios),
+ * outstanding MSDUs stored in TQM queues and LMAC queues will be significantly
+ * large.
+ *
+ * To avoid allocating a large contiguous memory, it uses multi_page_alloc qdf
+ * function to allocate memory
+ * in multiple pages. It then iterates through the memory allocated across pages
+ * and links each descriptor
+ * to next descriptor, taking care of page boundaries.
+ *
+ * Since WiFi 3.0 HW supports multiple Tx rings, multiple pools are allocated,
+ * one for each ring;
+ * This minimizes lock contention when hard_start_xmit is called
+ * from multiple CPUs.
+ * Alternately, multiple pools can be used for multiple VDEVs for VDEV level
+ * flow control.
+ *
+ * Return: Status code. 0 for success.
+ */
+QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
+		uint16_t num_elem)
+{
+	uint32_t id, count, page_id, offset, pool_id_32;
+	uint16_t num_page, num_desc_per_page;
+	struct dp_tx_desc_s *tx_desc_elem;
+	uint32_t desc_size;
+
+	desc_size = DP_TX_DESC_SIZE(sizeof(*tx_desc_elem));
+	soc->tx_desc[pool_id].elem_size = desc_size;
+	qdf_mem_multi_pages_alloc(soc->osdev,
+		&soc->tx_desc[pool_id].desc_pages, desc_size, num_elem,
+		0, true);
+	if (!soc->tx_desc[pool_id].desc_pages.num_pages) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			"Multi page alloc fail, tx desc");
+		goto fail_exit;
+	}
+
+	num_page = soc->tx_desc[pool_id].desc_pages.num_pages;
+	num_desc_per_page =
+		soc->tx_desc[pool_id].desc_pages.num_element_per_page;
+	soc->tx_desc[pool_id].freelist = (struct dp_tx_desc_s *)
+		*soc->tx_desc[pool_id].desc_pages.cacheable_pages;
+	if (qdf_mem_multi_page_link(soc->osdev,
+		&soc->tx_desc[pool_id].desc_pages, desc_size, num_elem, true)) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			"invalid tx desc allocation - overflow num link");
+		goto free_tx_desc;
+	}
+
+	/* Set unique IDs for each Tx descriptor */
+	tx_desc_elem = soc->tx_desc[pool_id].freelist;
+	count = 0;
+	pool_id_32 = (uint32_t)pool_id;
+	while (tx_desc_elem) {
+		page_id = count / num_desc_per_page;
+		offset = count % num_desc_per_page;
+		id = ((pool_id_32 << DP_TX_DESC_ID_POOL_OS) |
+			(page_id << DP_TX_DESC_ID_PAGE_OS) | offset);
+
+		tx_desc_elem->id = id;
+		tx_desc_elem->pool_id = pool_id;
+		tx_desc_elem = tx_desc_elem->next;
+		count++;
+	}
+
+	TX_DESC_LOCK_CREATE(&soc->tx_desc[pool_id].lock);
+	return QDF_STATUS_SUCCESS;
+
+free_tx_desc:
+	qdf_mem_multi_pages_free(soc->osdev,
+		&soc->tx_desc[pool_id].desc_pages, 0, true);
+
+fail_exit:
+	return QDF_STATUS_E_FAULT;
+}
+
+/**
+ * dp_tx_desc_pool_free() - Free the memory pool allocated for Tx Descriptors
+ *
+ * @soc Handle to DP SoC structure
+ * @pool_id
+ *
+ * Return:
+ */
+QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
+{
+	qdf_mem_multi_pages_free(soc->osdev,
+		&soc->tx_desc[pool_id].desc_pages, 0, true);
+	TX_DESC_LOCK_DESTROY(&soc->tx_desc[pool_id].lock);
+	return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * dp_tx_ext_desc_pool_alloc() - Allocate tx ext descriptor pool
+ * @soc Handle to DP SoC structure
+ * @pool_id
+ *
+ * Return: NONE
+ */
+QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
+	uint16_t num_elem)
+{
+	uint16_t num_page;
+	uint32_t count;
+	struct dp_tx_ext_desc_elem_s *c_elem, *p_elem;
+	struct qdf_mem_dma_page_t *page_info;
+	struct qdf_mem_multi_page_t *pages;
+	QDF_STATUS status;
+
+	/* Coherent tx extension descriptor alloc */
+	soc->tx_ext_desc[pool_id].elem_size = HAL_TX_EXTENSION_DESC_LEN_BYTES;
+	soc->tx_ext_desc[pool_id].elem_count = num_elem;
+	qdf_mem_multi_pages_alloc(soc->osdev,
+		&soc->tx_ext_desc[pool_id].desc_pages,
+		soc->tx_ext_desc[pool_id].elem_size,
+		soc->tx_ext_desc[pool_id].elem_count,
+		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
+		false);
+	if (!soc->tx_ext_desc[pool_id].desc_pages.num_pages) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			"ext desc page alloc fail");
+		status = QDF_STATUS_E_NOMEM;
+		goto fail_exit;
+	}
+
+	num_page = soc->tx_ext_desc[pool_id].desc_pages.num_pages;
+	/*
+	 * Cacheable ext descriptor link alloc
+	 * This structure also large size already
+	 * single element is 24bytes, 2K elements are 48Kbytes
+	 * Have to alloc multi page cacheable memory
+	 */
+	soc->tx_ext_desc[pool_id].link_elem_size =
+		sizeof(struct dp_tx_ext_desc_elem_s);
+	qdf_mem_multi_pages_alloc(soc->osdev,
+		&soc->tx_ext_desc[pool_id].desc_link_pages,
+		soc->tx_ext_desc[pool_id].link_elem_size,
+		soc->tx_ext_desc[pool_id].elem_count, 0,
+		true);
+	if (!soc->tx_ext_desc[pool_id].desc_link_pages.num_pages) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			"ext link desc page alloc fail");
+		status = QDF_STATUS_E_NOMEM;
+		goto free_ext_desc_page;
+	}
+
+	/* link tx descriptors into a freelist */
+	soc->tx_ext_desc[pool_id].freelist = (struct dp_tx_ext_desc_elem_s *)
+		*soc->tx_ext_desc[pool_id].desc_link_pages.cacheable_pages;
+	if (qdf_mem_multi_page_link(soc->osdev,
+		&soc->tx_ext_desc[pool_id].desc_link_pages,
+		soc->tx_ext_desc[pool_id].link_elem_size,
+		soc->tx_ext_desc[pool_id].elem_count, true)) {
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+			"ext link desc page linking fail");
+		status = QDF_STATUS_E_FAULT;
+		goto free_ext_link_desc_page;
+	}
+
+	/* Assign coherent memory pointer into linked free list */
+	pages = &soc->tx_ext_desc[pool_id].desc_pages;
+	page_info = soc->tx_ext_desc[pool_id].desc_pages.dma_pages;
+	c_elem = soc->tx_ext_desc[pool_id].freelist;
+	p_elem = c_elem;
+	for (count = 0; count < soc->tx_ext_desc[pool_id].elem_count; count++) {
+		if (!(count % pages->num_element_per_page)) {
+			/**
+			 * First element for new page,
+			 * should point next page
+			 */
+			if (!pages->dma_pages->page_v_addr_start) {
+				QDF_TRACE(QDF_MODULE_ID_DP,
+					QDF_TRACE_LEVEL_ERROR,
+					"link over flow");
+				status = QDF_STATUS_E_FAULT;
+				goto free_ext_link_desc_page;
+			}
+			c_elem->vaddr = (void *)page_info->page_v_addr_start;
+			c_elem->paddr = page_info->page_p_addr;
+			page_info++;
+		} else {
+			c_elem->vaddr = (void *)(p_elem->vaddr +
+				soc->tx_ext_desc[pool_id].elem_size);
+			c_elem->paddr = (p_elem->paddr +
+				soc->tx_ext_desc[pool_id].elem_size);
+		}
+		p_elem = c_elem;
+		c_elem = c_elem->next;
+		if (!c_elem)
+			break;
+	}
+
+	TX_DESC_LOCK_CREATE(&soc->tx_ext_desc[pool_id].lock);
+	return QDF_STATUS_SUCCESS;
+
+free_ext_link_desc_page:
+	qdf_mem_multi_pages_free(soc->osdev,
+		&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
+
+free_ext_desc_page:
+	qdf_mem_multi_pages_free(soc->osdev,
+		&soc->tx_ext_desc[pool_id].desc_pages,
+		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
+		false);
+
+fail_exit:
+	return status;
+
+}
+
+/**
+ * dp_tx_ext_desc_pool_free() - free tx ext descriptor pool
+ * @soc: Handle to DP SoC structure
+ * @pool_id: extension descriptor pool id
+ *
+ * Return: NONE
+ */
+QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id)
+{
+	qdf_mem_multi_pages_free(soc->osdev,
+		&soc->tx_ext_desc[pool_id].desc_link_pages, 0, true);
+
+	qdf_mem_multi_pages_free(soc->osdev,
+		&soc->tx_ext_desc[pool_id].desc_pages,
+		qdf_get_dma_mem_context((&soc->tx_ext_desc[pool_id]), memctx),
+		false);
+
+	TX_DESC_LOCK_DESTROY(&soc->tx_ext_desc[pool_id].lock);
+	return QDF_STATUS_SUCCESS;
+}
+

+ 176 - 0
dp/wifi3.0/dp_tx_desc.h

@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef DP_TX_DESC_H
+#define DP_TX_DESC_H
+
+#include "dp_types.h"
+#include "dp_tx.h"
+
+/**
+ * 21 bits cookie
+ * 3 bits ring id 0 ~ 7,      mask 0x1C0000, offset 18
+ * 8 bits page id 0 ~ 255,    mask 0x03C800, offset 10
+ * 10 bits offset id 0 ~ 1023 mask 0x0003FF, offset 0
+ */
+/* ???Ring ID needed??? */
+#define DP_TX_DESC_ID_POOL_MASK    0x1C0000
+#define DP_TX_DESC_ID_POOL_OS      18
+#define DP_TX_DESC_ID_PAGE_MASK    0x03FC00
+#define DP_TX_DESC_ID_PAGE_OS      10
+#define DP_TX_DESC_ID_OFFSET_MASK  0x0003FF
+#define DP_TX_DESC_ID_OFFSET_OS    0
+
+/**
+ * In case of TX descriptor pool and CPU core is combined
+ * TX context and TX comp context also should running on the same core
+ * in this case, each TX desciptror pool operation will be serialized by core
+ * TX and TX_COMP will not race. locking for protection is not requried
+ * TX_DESC_POOL_PER_CORE : this is most likely for WIN
+ * MCL, TX descriptor pool will be tied to VDEV instance.
+ * Then locking protection is required
+ */
+#ifdef TX_CORE_ALIGNED_SEND
+#define TX_DESC_LOCK_CREATE(lock)  /* NOOP */
+#define TX_DESC_LOCK_DESTROY(lock) /* NOOP */
+#define TX_DESC_LOCK_LOCK(lock)    /* NOOP */
+#define TX_DESC_LOCK_UNLOCK(lock)  /* NOOP */
+#else
+#define TX_DESC_LOCK_CREATE(lock)  qdf_spinlock_create(lock)
+#define TX_DESC_LOCK_DESTROY(lock) qdf_spinlock_destroy(lock)
+#define TX_DESC_LOCK_LOCK(lock)    qdf_spin_lock(lock)
+#define TX_DESC_LOCK_UNLOCK(lock)  qdf_spin_unlock(lock)
+#endif /* TX_CORE_ALIGNED_SEND */
+
+QDF_STATUS dp_tx_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
+		uint16_t num_elem);
+QDF_STATUS dp_tx_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
+QDF_STATUS dp_tx_ext_desc_pool_alloc(struct dp_soc *soc, uint8_t pool_id,
+		uint16_t num_elem);
+QDF_STATUS dp_tx_ext_desc_pool_free(struct dp_soc *soc, uint8_t pool_id);
+
+/**
+ * dp_tx_desc_alloc() - Allocate a Software Tx Descriptor from given pool
+ *
+ * @param soc Handle to DP SoC structure
+ * @param pool_id
+ *
+ * Return:
+ */
+static inline struct dp_tx_desc_s *dp_tx_desc_alloc(struct dp_soc *soc,
+		uint8_t desc_pool_id)
+{
+	struct dp_tx_desc_s *tx_desc = NULL;
+
+	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
+
+	tx_desc = soc->tx_desc[desc_pool_id].freelist;
+	/* Pool is exhausted */
+	if (!tx_desc) {
+		TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
+		return NULL;
+	}
+	if (soc->tx_desc[desc_pool_id].freelist) {
+		soc->tx_desc[desc_pool_id].freelist =
+			soc->tx_desc[desc_pool_id].freelist->next;
+		soc->tx_desc[desc_pool_id].num_allocated++;
+	}
+	DP_STATS_ADD(pdev, pub.tx.desc_in_use, 1);
+	tx_desc->flags |= DP_TX_DESC_FLAG_ALLOCATED;
+	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
+
+	return tx_desc;
+}
+
+
+/**
+ * dp_tx_desc_free() - Fee a tx descriptor and attach it to free list
+ *
+ * @soc Handle to DP SoC structure
+ * @pool_id
+ * @tx_desc
+ */
+static inline void
+dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
+		uint8_t desc_pool_id)
+{
+	TX_DESC_LOCK_LOCK(&soc->tx_desc[desc_pool_id].lock);
+
+	tx_desc->flags &= ~DP_TX_DESC_FLAG_ALLOCATED;
+	tx_desc->next = soc->tx_desc[desc_pool_id].freelist;
+	soc->tx_desc[desc_pool_id].freelist = tx_desc;
+	DP_STATS_SUB(pdev, pub.tx.desc_in_use, 1);
+
+	TX_DESC_LOCK_UNLOCK(&soc->tx_desc[desc_pool_id].lock);
+}
+
+/**
+ * dp_tx_desc_find() - find dp tx descriptor from cokie
+ * @soc - handle for the device sending the data
+ * @tx_desc_id - the ID of the descriptor in question
+ * @return the descriptor object that has the specified ID
+ *
+ *  Use a tx descriptor ID to find the corresponding descriptor object.
+ *
+ */
+static inline struct dp_tx_desc_s *dp_tx_desc_find(struct dp_soc *soc,
+		uint8_t pool_id, uint16_t page_id, uint16_t offset)
+{
+	return soc->tx_desc[pool_id].desc_pages.cacheable_pages[page_id] +
+		soc->tx_desc[pool_id].elem_size * offset;
+}
+
+/**
+ * dp_tx_ext_desc_alloc() - Get tx extension descriptor from pool
+ * @soc: handle for the device sending the data
+ * @pool_id: target pool id
+ *
+ * Return: None
+ */
+static inline
+struct dp_tx_ext_desc_elem_s *dp_tx_ext_desc_alloc(struct dp_soc *soc,
+		uint8_t desc_pool_id)
+{
+	struct dp_tx_ext_desc_elem_s *c_elem;
+
+	TX_DESC_LOCK_LOCK(&soc->tx_ext_desc[desc_pool_id].lock);
+	c_elem = soc->tx_ext_desc[desc_pool_id].freelist;
+	soc->tx_ext_desc[desc_pool_id].freelist =
+		soc->tx_ext_desc[desc_pool_id].freelist->next;
+	TX_DESC_LOCK_UNLOCK(&soc->tx_ext_desc[desc_pool_id].lock);
+	return c_elem;
+}
+
+/**
+ * dp_tx_ext_desc_free() - Release tx extension descriptor to the pool
+ * @soc: handle for the device sending the data
+ * @pool_id: target pool id
+ * @elem: ext descriptor pointer should release
+ *
+ * Return: None
+ */
+static inline void dp_tx_ext_desc_free(struct dp_soc *soc,
+	struct dp_tx_ext_desc_elem_s *elem, uint8_t desc_pool_id)
+{
+	TX_DESC_LOCK_LOCK(&soc->tx_ext_desc[desc_pool_id].lock);
+	elem->next = soc->tx_ext_desc[desc_pool_id].freelist;
+	soc->tx_ext_desc[desc_pool_id].freelist = elem;
+	TX_DESC_LOCK_UNLOCK(&soc->tx_ext_desc[desc_pool_id].lock);
+	return;
+}
+#endif /* DP_TX_DESC_H */

+ 3 - 1
qdf/inc/qdf_mem.h

@@ -298,6 +298,8 @@ void qdf_mem_multi_pages_alloc(qdf_device_t osdev,
 void qdf_mem_multi_pages_free(qdf_device_t osdev,
 			      struct qdf_mem_multi_page_t *pages,
 			      qdf_dma_context_t memctxt, bool cacheable);
-
+int qdf_mem_multi_page_link(qdf_device_t osdev,
+		struct qdf_mem_multi_page_t *pages,
+		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable);
 
 #endif /* __QDF_MEMORY_H */

+ 62 - 0
qdf/linux/src/qdf_mem.c

@@ -850,6 +850,68 @@ void qdf_mem_multi_pages_free(qdf_device_t osdev,
 }
 EXPORT_SYMBOL(qdf_mem_multi_pages_free);
 
+/**
+ * qdf_mem_multi_page_link() - Make links for multi page elements
+ * @osdev: OS device handle pointer
+ * @pages: Multi page information storage
+ * @elem_size: Single element size
+ * @elem_count: elements count should be linked
+ * @cacheable: Coherent memory or cacheable memory
+ *
+ * This function will make links for multi page allocated structure
+ *
+ * Return: 0 success
+ */
+int qdf_mem_multi_page_link(qdf_device_t osdev,
+		struct qdf_mem_multi_page_t *pages,
+		uint32_t elem_size, uint32_t elem_count, uint8_t cacheable)
+{
+	uint16_t i, i_int;
+	void *page_info;
+	void **c_elem = NULL;
+	uint32_t num_link = 0;
+
+	for (i = 0; i < pages->num_pages; i++) {
+		if (cacheable)
+			page_info = pages->cacheable_pages[i];
+		else
+			page_info = pages->dma_pages[i].page_v_addr_start;
+
+		if (!page_info)
+			return -ENOMEM;
+
+		c_elem = (void **)page_info;
+		for (i_int = 0; i_int < pages->num_element_per_page; i_int++) {
+			if (i_int == (pages->num_element_per_page - 1)) {
+				if (cacheable)
+					*c_elem = pages->
+						cacheable_pages[i + 1];
+				else
+					*c_elem = pages->
+						dma_pages[i + 1].
+							page_v_addr_start;
+				num_link++;
+				break;
+			} else {
+				*c_elem =
+					(void *)(((char *)c_elem) + elem_size);
+			}
+			num_link++;
+			c_elem = (void **)*c_elem;
+
+			/* Last link established exit */
+			if (num_link == (elem_count - 1))
+				break;
+		}
+	}
+
+	if (c_elem)
+		*c_elem = NULL;
+
+	return 0;
+}
+EXPORT_SYMBOL(qdf_mem_multi_page_link);
+
 /**
  * qdf_mem_copy() - copy memory
  * @dst_addr: Pointer to destination memory location (to copy to)