Browse Source

qcacld-3.0: optimize data path memory allocation

When allocate HTT descriptor, instead of allocating
large single chunk of memory, allocate small sized multiple
chunk of memory. Then not need to allocate order 5 memory.
Will have less chance to have memory alloc fail problem.
HTT descriptor. Frag descriptor. OL descriptor. TSO descriptor

Change-Id: Ib9d4a3f10adbc0656e1418cf3a67429322bb7164
CRs-fixed: 845666
Leo Chang 9 năm trước cách đây
mục cha
commit
376398b24d

+ 37 - 0
core/cdf/inc/cdf_memory.h

@@ -37,6 +37,32 @@
 /* Include Files */
 /* Include Files */
 #include <cdf_types.h>
 #include <cdf_types.h>
 
 
+/**
+ * struct cdf_mem_dma_page_t - Allocated dmaable page
+ * @page_v_addr_start: Page start virtual address
+ * @page_v_addr_end: Page end virtual address
+ * @page_p_addr: Page start physical address
+ */
+struct cdf_mem_dma_page_t {
+	char *page_v_addr_start;
+	char *page_v_addr_end;
+	cdf_dma_addr_t page_p_addr;
+};
+
+/**
+ * struct cdf_mem_multi_page_t - multiple page allocation information storage
+ * @num_element_per_page: Number of element in single page
+ * @num_pages: Number of allocation needed pages
+ * @dma_pages: page information storage in case of coherent memory
+ * @cacheable_pages: page information storage in case of cacheable memory
+ */
+struct cdf_mem_multi_page_t {
+	uint16_t num_element_per_page;
+	uint16_t num_pages;
+	struct cdf_mem_dma_page_t *dma_pages;
+	void **cacheable_pages;
+};
+
 /* Preprocessor definitions and constants */
 /* Preprocessor definitions and constants */
 
 
 #ifdef MEMORY_DEBUG
 #ifdef MEMORY_DEBUG
@@ -222,4 +248,15 @@ static inline int32_t cdf_str_len(const char *str)
 	return strlen(str);
 	return strlen(str);
 }
 }
 
 
+void cdf_mem_multi_pages_alloc(cdf_device_t osdev,
+				struct cdf_mem_multi_page_t *pages,
+				size_t element_size,
+				uint16_t element_num,
+				cdf_dma_context_t memctxt,
+				bool cacheable);
+
+void cdf_mem_multi_pages_free(cdf_device_t osdev,
+				struct cdf_mem_multi_page_t *pages,
+				cdf_dma_context_t memctxt,
+				bool cacheable);
 #endif /* __CDF_MEMORY_H */
 #endif /* __CDF_MEMORY_H */

+ 30 - 0
core/cdf/inc/cdf_util.h

@@ -106,6 +106,14 @@ CDF_INLINE_FN int cdf_status_to_os_return(CDF_STATUS status)
 #define cdf_container_of(ptr, type, member) \
 #define cdf_container_of(ptr, type, member) \
 	 __cdf_container_of(ptr, type, member)
 	 __cdf_container_of(ptr, type, member)
 
 
+/**
+ * cdf_is_pwr2 - test input value is power of 2 integer
+ *
+ * @value: input integer
+ *
+ */
+#define CDF_IS_PWR2(value) (((value) ^ ((value)-1)) == ((value) << 1) - 1)
+
 /**
 /**
  * cdf_is_macaddr_equal() - compare two CDF MacAddress
  * cdf_is_macaddr_equal() - compare two CDF MacAddress
  * @pMacAddr1: Pointer to one cdf MacAddress to compare
  * @pMacAddr1: Pointer to one cdf MacAddress to compare
@@ -322,4 +330,26 @@ CDF_INLINE_FN uint8_t *cdf_get_u32(uint8_t *ptr, uint32_t *pValue)
 	return ptr + 4;
 	return ptr + 4;
 }
 }
 
 
+/**
+ * cdf_get_pwr2() - get next power of 2 integer from input value
+ * @value: input value to find next power of 2 integer
+ *
+ * Get next power of 2 integer from input value
+ *
+ * Return: Power of 2 integer
+ */
+CDF_INLINE_FN int cdf_get_pwr2(int value)
+{
+	int log2;
+	if (CDF_IS_PWR2(value))
+		return value;
+
+	log2 = 0;
+	while (value) {
+		value >>= 1;
+		log2++;
+	}
+	return 1 << log2;
+}
+
 #endif /*_CDF_UTIL_H*/
 #endif /*_CDF_UTIL_H*/

+ 149 - 0
core/cdf/src/cdf_memory.c

@@ -374,6 +374,154 @@ void cdf_mem_free(void *ptr)
 }
 }
 #endif
 #endif
 
 
+/**
+ * cdf_mem_multi_pages_alloc() - allocate large size of kernel memory
+ * @osdev:		OS device handle pointer
+ * @pages:		Multi page information storage
+ * @element_size:	Each element size
+ * @element_num:	Total number of elements should be allocated
+ * @memctxt:		Memory context
+ * @cacheable:		Coherent memory or cacheable memory
+ *
+ * This function will allocate large size of memory over multiple pages.
+ * Large size of contiguous memory allocation will fail frequentely, then
+ * instead of allocate large memory by one shot, allocate through multiple, non
+ * contiguous memory and combine pages when actual usage
+ *
+ * Return: None
+ */
+void cdf_mem_multi_pages_alloc(cdf_device_t osdev,
+				struct cdf_mem_multi_page_t *pages,
+				size_t element_size,
+				uint16_t element_num,
+				cdf_dma_context_t memctxt,
+				bool cacheable)
+{
+	uint16_t page_idx;
+	struct cdf_mem_dma_page_t *dma_pages;
+	void **cacheable_pages = NULL;
+	uint16_t i;
+
+	pages->num_element_per_page = PAGE_SIZE / element_size;
+	if (!pages->num_element_per_page) {
+		cdf_print("Invalid page %d or element size %d",
+			(int)PAGE_SIZE, (int)element_size);
+		goto out_fail;
+	}
+
+	pages->num_pages = element_num / pages->num_element_per_page;
+	if (element_num % pages->num_element_per_page)
+		pages->num_pages++;
+
+	if (cacheable) {
+		/* Pages information storage */
+		pages->cacheable_pages = cdf_mem_malloc(
+			pages->num_pages * sizeof(pages->cacheable_pages));
+		if (!pages->cacheable_pages) {
+			cdf_print("Cacheable page storage alloc fail");
+			goto out_fail;
+		}
+
+		cacheable_pages = pages->cacheable_pages;
+		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
+			cacheable_pages[page_idx] = cdf_mem_malloc(PAGE_SIZE);
+			if (!cacheable_pages[page_idx]) {
+				cdf_print("cacheable page alloc fail, pi %d",
+					page_idx);
+				goto page_alloc_fail;
+			}
+		}
+		pages->dma_pages = NULL;
+	} else {
+		pages->dma_pages = cdf_mem_malloc(
+			pages->num_pages * sizeof(struct cdf_mem_dma_page_t));
+		if (!pages->dma_pages) {
+			cdf_print("dmaable page storage alloc fail");
+			goto out_fail;
+		}
+
+		dma_pages = pages->dma_pages;
+		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
+			dma_pages->page_v_addr_start =
+				cdf_os_mem_alloc_consistent(osdev, PAGE_SIZE,
+					&dma_pages->page_p_addr, memctxt);
+			if (!dma_pages->page_v_addr_start) {
+				cdf_print("dmaable page alloc fail pi %d",
+					page_idx);
+				goto page_alloc_fail;
+			}
+			dma_pages->page_v_addr_end =
+				dma_pages->page_v_addr_start + PAGE_SIZE;
+			dma_pages++;
+		}
+		pages->cacheable_pages = NULL;
+	}
+	return;
+
+page_alloc_fail:
+	if (cacheable) {
+		for (i = 0; i < page_idx; i++)
+			cdf_mem_free(pages->cacheable_pages[i]);
+		cdf_mem_free(pages->cacheable_pages);
+	} else {
+		dma_pages = pages->dma_pages;
+		for (i = 0; i < page_idx; i++) {
+			cdf_os_mem_free_consistent(osdev, PAGE_SIZE,
+				dma_pages->page_v_addr_start,
+				dma_pages->page_p_addr, memctxt);
+			dma_pages++;
+		}
+		cdf_mem_free(pages->dma_pages);
+	}
+
+out_fail:
+	pages->cacheable_pages = NULL;
+	pages->dma_pages = NULL;
+	pages->num_pages = 0;
+	return;
+}
+
+/**
+ * cdf_mem_multi_pages_free() - free large size of kernel memory
+ * @osdev:	OS device handle pointer
+ * @pages:	Multi page information storage
+ * @memctxt:	Memory context
+ * @cacheable:	Coherent memory or cacheable memory
+ *
+ * This function will free large size of memory over multiple pages.
+ *
+ * Return: None
+ */
+void cdf_mem_multi_pages_free(cdf_device_t osdev,
+				struct cdf_mem_multi_page_t *pages,
+				cdf_dma_context_t memctxt,
+				bool cacheable)
+{
+	unsigned int page_idx;
+	struct cdf_mem_dma_page_t *dma_pages;
+
+	if (cacheable) {
+		for (page_idx = 0; page_idx < pages->num_pages; page_idx++)
+			cdf_mem_free(pages->cacheable_pages[page_idx]);
+		cdf_mem_free(pages->cacheable_pages);
+	} else {
+		dma_pages = pages->dma_pages;
+		for (page_idx = 0; page_idx < pages->num_pages; page_idx++) {
+			cdf_os_mem_free_consistent(osdev, PAGE_SIZE,
+				dma_pages->page_v_addr_start,
+				dma_pages->page_p_addr, memctxt);
+			dma_pages++;
+		}
+		cdf_mem_free(pages->dma_pages);
+	}
+
+	pages->cacheable_pages = NULL;
+	pages->dma_pages = NULL;
+	pages->num_pages = 0;
+	return;
+}
+
+
 /**
 /**
  * cdf_mem_set() - set (fill) memory with a specified byte value.
  * cdf_mem_set() - set (fill) memory with a specified byte value.
  * @pMemory:    Pointer to memory that will be set
  * @pMemory:    Pointer to memory that will be set
@@ -629,3 +777,4 @@ cdf_os_mem_dma_sync_single_for_device(cdf_device_t osdev,
 {
 {
 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
 	dma_sync_single_for_device(osdev->dev, bus_addr,  size, direction);
 }
 }
+

+ 4 - 2
core/dp/htt/htt_h2t.c

@@ -154,10 +154,12 @@ A_STATUS htt_h2t_frag_desc_bank_cfg_msg(struct htt_pdev_t *pdev)
 
 
 	/** Bank specific data structure.*/
 	/** Bank specific data structure.*/
 #if HTT_PADDR64
 #if HTT_PADDR64
-	bank_cfg->bank_base_address[0].lo = pdev->frag_descs.pool_paddr;
+	bank_cfg->bank_base_address[0].lo =
+		pdev->frag_descs.desc_pages.dma_pages->page_p_addr;
 	bank_cfg->bank_base_address[0].hi = 0;
 	bank_cfg->bank_base_address[0].hi = 0;
 #else /* ! HTT_PADDR64 */
 #else /* ! HTT_PADDR64 */
-	bank_cfg->bank_base_address[0] = pdev->frag_descs.pool_paddr;
+	bank_cfg->bank_base_address[0] =
+		pdev->frag_descs.desc_pages.dma_pages->page_p_addr;
 #endif /* HTT_PADDR64 */
 #endif /* HTT_PADDR64 */
 	/* Logical Min index */
 	/* Logical Min index */
 	HTT_H2T_FRAG_DESC_BANK_MIN_IDX_SET(bank_cfg->bank_info[0], 0);
 	HTT_H2T_FRAG_DESC_BANK_MIN_IDX_SET(bank_cfg->bank_info[0], 0);

+ 3 - 17
core/dp/htt/htt_rx.c

@@ -149,20 +149,6 @@ void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
 	pdev->rx_ring.hash_table = NULL;
 	pdev->rx_ring.hash_table = NULL;
 }
 }
 
 
-static int ceil_pwr2(int value)
-{
-	int log2;
-	if (IS_PWR2(value))
-		return value;
-
-	log2 = 0;
-	while (value) {
-		value >>= 1;
-		log2++;
-	}
-	return 1 << log2;
-}
-
 static bool
 static bool
 htt_rx_msdu_first_msdu_flag_ll(htt_pdev_handle pdev, void *msdu_desc)
 htt_rx_msdu_first_msdu_flag_ll(htt_pdev_handle pdev, void *msdu_desc)
 {
 {
@@ -206,7 +192,7 @@ static int htt_rx_ring_size(struct htt_pdev_t *pdev)
 	else if (size > HTT_RX_RING_SIZE_MAX)
 	else if (size > HTT_RX_RING_SIZE_MAX)
 		size = HTT_RX_RING_SIZE_MAX;
 		size = HTT_RX_RING_SIZE_MAX;
 
 
-	size = ceil_pwr2(size);
+	size = cdf_get_pwr2(size);
 	return size;
 	return size;
 }
 }
 
 
@@ -2064,7 +2050,7 @@ int htt_rx_hash_init(struct htt_pdev_t *pdev)
 {
 {
 	int i, j;
 	int i, j;
 
 
-	HTT_ASSERT2(IS_PWR2(RX_NUM_HASH_BUCKETS));
+	HTT_ASSERT2(CDF_IS_PWR2(RX_NUM_HASH_BUCKETS));
 
 
 	pdev->rx_ring.hash_table =
 	pdev->rx_ring.hash_table =
 		cdf_mem_malloc(RX_NUM_HASH_BUCKETS *
 		cdf_mem_malloc(RX_NUM_HASH_BUCKETS *
@@ -2150,7 +2136,7 @@ int htt_rx_attach(struct htt_pdev_t *pdev)
 	uint32_t ring_elem_size = sizeof(uint32_t);
 	uint32_t ring_elem_size = sizeof(uint32_t);
 #endif /* HTT_PADDR64 */
 #endif /* HTT_PADDR64 */
 	pdev->rx_ring.size = htt_rx_ring_size(pdev);
 	pdev->rx_ring.size = htt_rx_ring_size(pdev);
-	HTT_ASSERT2(IS_PWR2(pdev->rx_ring.size));
+	HTT_ASSERT2(CDF_IS_PWR2(pdev->rx_ring.size));
 	pdev->rx_ring.size_mask = pdev->rx_ring.size - 1;
 	pdev->rx_ring.size_mask = pdev->rx_ring.size - 1;
 
 
 	/*
 	/*

+ 316 - 128
core/dp/htt/htt_tx.c

@@ -48,6 +48,7 @@
 #include <ol_cfg.h>             /* ol_cfg_netbuf_frags_max, etc. */
 #include <ol_cfg.h>             /* ol_cfg_netbuf_frags_max, etc. */
 #include <ol_htt_tx_api.h>      /* HTT_TX_DESC_VADDR_OFFSET */
 #include <ol_htt_tx_api.h>      /* HTT_TX_DESC_VADDR_OFFSET */
 #include <ol_txrx_htt_api.h>    /* ol_tx_msdu_id_storage */
 #include <ol_txrx_htt_api.h>    /* ol_tx_msdu_id_storage */
+#include <ol_txrx_internal.h>
 #include <htt_internal.h>
 #include <htt_internal.h>
 
 
 /* IPA Micro controler TX data packet HTT Header Preset */
 /* IPA Micro controler TX data packet HTT Header Preset */
@@ -65,25 +66,42 @@
  */
  */
 #define HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT 0x07C04001
 #define HTT_IPA_UC_OFFLOAD_TX_HEADER_DEFAULT 0x07C04001
 
 
+#if HTT_PADDR64
+#define HTT_TX_DESC_FRAG_FIELD_HI_UPDATE(frag_filed_ptr)                       \
+do {                                                                           \
+	frag_filed_ptr++;                                                      \
+	/* frags_desc_ptr.hi */                                                \
+	*frag_filed_ptr = 0;                                                   \
+} while (0)
+#else
+#define HTT_TX_DESC_FRAG_FIELD_HI_UPDATE(frag_filed_ptr) {}
+#endif
+
 /*--- setup / tear-down functions -------------------------------------------*/
 /*--- setup / tear-down functions -------------------------------------------*/
 
 
 #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
 #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
 uint32_t *g_dbg_htt_desc_end_addr, *g_dbg_htt_desc_start_addr;
 uint32_t *g_dbg_htt_desc_end_addr, *g_dbg_htt_desc_start_addr;
 #endif
 #endif
 
 
-int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
-{
-	int i, pool_size;
-	uint32_t **p;
-	cdf_dma_addr_t pool_paddr;
+static cdf_dma_addr_t htt_tx_get_paddr(htt_pdev_handle pdev,
+				char *target_vaddr);
 
 
-#if defined(HELIUMPLUS_PADDR64)
+#ifdef HELIUMPLUS
+/**
+ * htt_tx_desc_get_size() - get tx descripotrs size
+ * @pdev:	htt device instance pointer
+ *
+ * This function will get HTT TX descriptor size and fragment descriptor size
+ *
+ * Return: None
+ */
+static void htt_tx_desc_get_size(struct htt_pdev_t *pdev)
+{
 	pdev->tx_descs.size = sizeof(struct htt_host_tx_desc_t);
 	pdev->tx_descs.size = sizeof(struct htt_host_tx_desc_t);
-
 	if (HTT_WIFI_IP_VERSION(pdev->wifi_ip_ver.major, 0x2)) {
 	if (HTT_WIFI_IP_VERSION(pdev->wifi_ip_ver.major, 0x2)) {
 		/*
 		/*
-		* sizeof MSDU_EXT/Fragmentation descriptor.
-		*/
+		 * sizeof MSDU_EXT/Fragmentation descriptor.
+		 */
 		pdev->frag_descs.size = sizeof(struct msdu_ext_desc_t);
 		pdev->frag_descs.size = sizeof(struct msdu_ext_desc_t);
 	} else {
 	} else {
 		/*
 		/*
@@ -96,7 +114,130 @@ int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
 			(ol_cfg_netbuf_frags_max(pdev->ctrl_pdev)+1) * 8
 			(ol_cfg_netbuf_frags_max(pdev->ctrl_pdev)+1) * 8
 			+ 4;
 			+ 4;
 	}
 	}
-#else /* ! defined(HELIUMPLUS_PADDR64) */
+}
+
+/**
+ * htt_tx_frag_desc_field_update() - Update fragment descriptor field
+ * @pdev:	htt device instance pointer
+ * @fptr:	Fragment descriptor field pointer
+ * @index:	Descriptor index to find page and offset
+ * @desc_v_ptr:	descriptor virtual pointot to find offset
+ *
+ * This function will update fragment descriptor field with actual fragment
+ * descriptor stating physical pointer
+ *
+ * Return: None
+ */
+static void htt_tx_frag_desc_field_update(struct htt_pdev_t *pdev,
+		uint32_t *fptr, unsigned int index,
+		struct htt_tx_msdu_desc_t *desc_v_ptr)
+{
+	unsigned int target_page;
+	unsigned int offset;
+	struct cdf_mem_dma_page_t *dma_page;
+
+	target_page = index / pdev->frag_descs.desc_pages.num_element_per_page;
+	offset = index % pdev->frag_descs.desc_pages.num_element_per_page;
+	dma_page = &pdev->frag_descs.desc_pages.dma_pages[target_page];
+	*fptr = (uint32_t)(dma_page->page_p_addr +
+		offset * pdev->frag_descs.size);
+	HTT_TX_DESC_FRAG_FIELD_HI_UPDATE(fptr);
+	return;
+}
+
+/**
+ * htt_tx_frag_desc_attach() - Attach fragment descriptor
+ * @pdev:		htt device instance pointer
+ * @desc_pool_elems:	Number of fragment descriptor
+ *
+ * This function will allocate fragment descriptor
+ *
+ * Return: 0 success
+ */
+static int htt_tx_frag_desc_attach(struct htt_pdev_t *pdev,
+	uint16_t desc_pool_elems)
+{
+	pdev->frag_descs.pool_elems = desc_pool_elems;
+	cdf_mem_multi_pages_alloc(pdev->osdev, &pdev->frag_descs.desc_pages,
+		pdev->frag_descs.size, desc_pool_elems,
+		cdf_get_dma_mem_context((&pdev->frag_descs), memctx), false);
+	if ((0 == pdev->frag_descs.desc_pages.num_pages) ||
+		(NULL == pdev->frag_descs.desc_pages.dma_pages)) {
+		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+			"FRAG descriptor alloc fail");
+		return -ENOBUFS;
+	}
+	return 0;
+}
+
+/**
+ * htt_tx_frag_desc_detach() - Detach fragment descriptor
+ * @pdev:		htt device instance pointer
+ *
+ * This function will free fragment descriptor
+ *
+ * Return: None
+ */
+static void htt_tx_frag_desc_detach(struct htt_pdev_t *pdev)
+{
+	cdf_mem_multi_pages_free(pdev->osdev, &pdev->frag_descs.desc_pages,
+		cdf_get_dma_mem_context((&pdev->frag_descs), memctx), false);
+}
+
+/**
+ * htt_tx_frag_alloc() - Allocate single fragment descriptor from the pool
+ * @pdev:		htt device instance pointer
+ * @index:		Descriptor index
+ * @frag_paddr_lo:	Fragment descriptor physical address
+ * @frag_ptr:		Fragment descriptor virtual address
+ *
+ * This function will free fragment descriptor
+ *
+ * Return: None
+ */
+int htt_tx_frag_alloc(htt_pdev_handle pdev,
+	u_int16_t index, u_int32_t *frag_paddr_lo, void **frag_ptr)
+{
+	uint16_t frag_page_index;
+	uint16_t frag_elem_index;
+	struct cdf_mem_dma_page_t *dma_page;
+
+	/** Index should never be 0, since its used by the hardware
+	    to terminate the link. */
+	if (index >= pdev->tx_descs.pool_elems) {
+		*frag_ptr = NULL;
+		return 1;
+	}
+
+	frag_page_index = index /
+		pdev->frag_descs.desc_pages.num_element_per_page;
+	frag_elem_index = index %
+		pdev->frag_descs.desc_pages.num_element_per_page;
+	dma_page = &pdev->frag_descs.desc_pages.dma_pages[frag_page_index];
+
+	*frag_ptr = dma_page->page_v_addr_start +
+		frag_elem_index * pdev->frag_descs.size;
+	if (((char *)(*frag_ptr) < dma_page->page_v_addr_start) ||
+		((char *)(*frag_ptr) > dma_page->page_v_addr_end)) {
+		*frag_ptr = NULL;
+		return 1;
+	}
+
+	*frag_paddr_lo = dma_page->page_p_addr +
+		frag_elem_index * pdev->frag_descs.size;
+	return 0;
+}
+#else
+/**
+ * htt_tx_desc_get_size() - get tx descripotrs size
+ * @pdev:	htt device instance pointer
+ *
+ * This function will get HTT TX descriptor size and fragment descriptor size
+ *
+ * Return: None
+ */
+static inline void htt_tx_desc_get_size(struct htt_pdev_t *pdev)
+{
 	/*
 	/*
 	 * Start with the size of the base struct
 	 * Start with the size of the base struct
 	 * that actually gets downloaded.
 	 * that actually gets downloaded.
@@ -111,10 +252,74 @@ int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
 		+ (ol_cfg_netbuf_frags_max(pdev->ctrl_pdev) + 1) * 8
 		+ (ol_cfg_netbuf_frags_max(pdev->ctrl_pdev) + 1) * 8
 		   /* 2x uint32_t */
 		   /* 2x uint32_t */
 		+ 4; /* uint32_t fragmentation list terminator */
 		+ 4; /* uint32_t fragmentation list terminator */
-
 	if (pdev->tx_descs.size < sizeof(uint32_t *))
 	if (pdev->tx_descs.size < sizeof(uint32_t *))
 		pdev->tx_descs.size = sizeof(uint32_t *);
 		pdev->tx_descs.size = sizeof(uint32_t *);
-#endif /* defined(HELIUMPLUS_PADDR64) */
+}
+
+/**
+ * htt_tx_frag_desc_field_update() - Update fragment descriptor field
+ * @pdev:	htt device instance pointer
+ * @fptr:	Fragment descriptor field pointer
+ * @index:	Descriptor index to find page and offset
+ * @desc_v_ptr:	descriptor virtual pointot to find offset
+ *
+ * This function will update fragment descriptor field with actual fragment
+ * descriptor stating physical pointer
+ *
+ * Return: None
+ */
+static void htt_tx_frag_desc_field_update(struct htt_pdev_t *pdev,
+		uint32_t *fptr, unsigned int index,
+		struct htt_tx_msdu_desc_t *desc_v_ptr)
+{
+	*fptr = (uint32_t)htt_tx_get_paddr(pdev, (char *)desc_v_ptr) +
+		HTT_TX_DESC_LEN;
+}
+
+/**
+ * htt_tx_frag_desc_attach() - Attach fragment descriptor
+ * @pdev:	htt device instance pointer
+ * @desc_pool_elems:	Number of fragment descriptor
+ *
+ * This function will allocate fragment descriptor
+ *
+ * Return: 0 success
+ */
+static inline int htt_tx_frag_desc_attach(struct htt_pdev_t *pdev,
+	int desc_pool_elems)
+{
+	return 0;
+}
+
+/**
+ * htt_tx_frag_desc_detach() - Detach fragment descriptor
+ * @pdev:		htt device instance pointer
+ *
+ * This function will free fragment descriptor
+ *
+ * Return: None
+ */
+static void htt_tx_frag_desc_detach(struct htt_pdev_t *pdev) {}
+#endif /* HELIUMPLUS */
+
+/**
+ * htt_tx_attach() - Attach HTT device instance
+ * @pdev:		htt device instance pointer
+ * @desc_pool_elems:	Number of TX descriptors
+ *
+ * This function will allocate HTT TX resources
+ *
+ * Return: 0 Success
+ */
+int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
+{
+	int i, i_int, pool_size;
+	uint32_t **p;
+	struct cdf_mem_dma_page_t *page_info;
+	uint32_t num_link = 0;
+	uint16_t num_page, num_desc_per_page;
+
+	htt_tx_desc_get_size(pdev);
 	/*
 	/*
 	 * Make sure tx_descs.size is a multiple of 4-bytes.
 	 * Make sure tx_descs.size is a multiple of 4-bytes.
 	 * It should be, but round up just to be sure.
 	 * It should be, but round up just to be sure.
@@ -123,94 +328,120 @@ int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
 
 
 	pdev->tx_descs.pool_elems = desc_pool_elems;
 	pdev->tx_descs.pool_elems = desc_pool_elems;
 	pdev->tx_descs.alloc_cnt = 0;
 	pdev->tx_descs.alloc_cnt = 0;
-
 	pool_size = pdev->tx_descs.pool_elems * pdev->tx_descs.size;
 	pool_size = pdev->tx_descs.pool_elems * pdev->tx_descs.size;
-
-	pdev->tx_descs.pool_vaddr =
-		cdf_os_mem_alloc_consistent(
-			pdev->osdev, pool_size,
-			&pool_paddr,
-			cdf_get_dma_mem_context((&pdev->tx_descs), memctx));
-
-	pdev->tx_descs.pool_paddr = pool_paddr;
-
-	if (!pdev->tx_descs.pool_vaddr)
-		return -ENOBUFS;       /* failure */
-
-	cdf_print("%s:htt_desc_start:0x%p htt_desc_end:0x%p\n", __func__,
-		  pdev->tx_descs.pool_vaddr,
-		  (uint32_t *) (pdev->tx_descs.pool_vaddr + pool_size));
-
-#if defined(HELIUMPLUS_PADDR64)
-	pdev->frag_descs.pool_elems = desc_pool_elems;
-	/*
-	 * Allocate space for MSDU extension descriptor
-	 * H/W expects this in contiguous memory
-	 */
-	pool_size = pdev->frag_descs.pool_elems * pdev->frag_descs.size;
-
-	pdev->frag_descs.pool_vaddr = cdf_os_mem_alloc_consistent(
-		pdev->osdev, pool_size, &pool_paddr,
-		cdf_get_dma_mem_context((&pdev->frag_descs), memctx));
-
-	if (!pdev->frag_descs.pool_vaddr)
-		return -ENOBUFS; /* failure */
-
-	pdev->frag_descs.pool_paddr = pool_paddr;
-
-	cdf_print("%s:MSDU Ext.Table Start:0x%p MSDU Ext.Table End:0x%p\n",
-		  __func__, pdev->frag_descs.pool_vaddr,
-		  (u_int32_t *) (pdev->frag_descs.pool_vaddr + pool_size));
-#endif /* defined(HELIUMPLUS_PADDR64) */
-
-#ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
-	g_dbg_htt_desc_end_addr = (uint32_t *)
-				  (pdev->tx_descs.pool_vaddr + pool_size);
-	g_dbg_htt_desc_start_addr = (uint32_t *) pdev->tx_descs.pool_vaddr;
-#endif
+	cdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_descs.desc_pages,
+		pdev->tx_descs.size, pdev->tx_descs.pool_elems,
+		cdf_get_dma_mem_context((&pdev->tx_descs), memctx), false);
+	if ((0 == pdev->tx_descs.desc_pages.num_pages) ||
+		(NULL == pdev->tx_descs.desc_pages.dma_pages)) {
+		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+			"HTT desc alloc fail");
+		goto out_fail;
+	}
+	num_page = pdev->tx_descs.desc_pages.num_pages;
+	num_desc_per_page = pdev->tx_descs.desc_pages.num_element_per_page;
 
 
 	/* link tx descriptors into a freelist */
 	/* link tx descriptors into a freelist */
-	pdev->tx_descs.freelist = (uint32_t *) pdev->tx_descs.pool_vaddr;
+	page_info = pdev->tx_descs.desc_pages.dma_pages;
+	pdev->tx_descs.freelist = (uint32_t *)page_info->page_v_addr_start;
 	p = (uint32_t **) pdev->tx_descs.freelist;
 	p = (uint32_t **) pdev->tx_descs.freelist;
-	for (i = 0; i < desc_pool_elems - 1; i++) {
-		*p = (uint32_t *) (((char *)p) + pdev->tx_descs.size);
-		p = (uint32_t **) *p;
+	for (i = 0; i < num_page; i++) {
+		for (i_int = 0; i_int < num_desc_per_page; i_int++) {
+			if (i_int == (num_desc_per_page - 1)) {
+				/*
+				 * Last element on this page,
+				 * should pint next page */
+				if (!page_info->page_v_addr_start) {
+					TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+						"over flow num link %d\n",
+						num_link);
+					goto free_htt_desc;
+				}
+				page_info++;
+				*p = (uint32_t *)page_info->page_v_addr_start;
+			} else {
+				*p = (uint32_t *)
+					(((char *) p) + pdev->tx_descs.size);
+			}
+			num_link++;
+			p = (uint32_t **) *p;
+			/* Last link established exit */
+			if (num_link == (pdev->tx_descs.pool_elems - 1))
+				break;
+		}
 	}
 	}
 	*p = NULL;
 	*p = NULL;
 
 
-	return 0;               /* success */
+	if (htt_tx_frag_desc_attach(pdev, desc_pool_elems)) {
+		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+			"HTT Frag descriptor alloc fail");
+		goto free_htt_desc;
+	}
+
+	/* success */
+	return 0;
+
+free_htt_desc:
+	cdf_mem_multi_pages_free(pdev->osdev, &pdev->tx_descs.desc_pages,
+		cdf_get_dma_mem_context((&pdev->tx_descs), memctx), false);
+out_fail:
+	return -ENOBUFS;
 }
 }
 
 
 void htt_tx_detach(struct htt_pdev_t *pdev)
 void htt_tx_detach(struct htt_pdev_t *pdev)
 {
 {
-	if (pdev) {
-		cdf_os_mem_free_consistent(
-			pdev->osdev,
-			/* pool_size */
-			pdev->tx_descs.pool_elems * pdev->tx_descs.size,
-			pdev->tx_descs.pool_vaddr,
-			pdev->tx_descs.pool_paddr,
-			cdf_get_dma_mem_context((&pdev->tx_descs), memctx));
-#if defined(HELIUMPLUS_PADDR64)
-		cdf_os_mem_free_consistent(
-			pdev->osdev,
-			/* pool_size */
-			pdev->frag_descs.pool_elems *
-			pdev->frag_descs.size,
-			pdev->frag_descs.pool_vaddr,
-			pdev->frag_descs.pool_paddr,
-			cdf_get_dma_mem_context((&pdev->frag_descs), memctx));
-#endif /* defined(HELIUMPLUS_PADDR64) */
+	if (!pdev) {
+		cdf_print("htt tx detach invalid instance");
+		return;
+	}
+
+	htt_tx_frag_desc_detach(pdev);
+	cdf_mem_multi_pages_free(pdev->osdev, &pdev->tx_descs.desc_pages,
+		cdf_get_dma_mem_context((&pdev->tx_descs), memctx), false);
+}
+
+/**
+ * htt_tx_get_paddr() - get physical address for htt desc
+ *
+ * Get HTT descriptor physical address from virtaul address
+ * Find page first and find offset
+ *
+ * Return: Physical address of descriptor
+ */
+static cdf_dma_addr_t htt_tx_get_paddr(htt_pdev_handle pdev,
+				char *target_vaddr)
+{
+	uint16_t i;
+	struct cdf_mem_dma_page_t *page_info = NULL;
+	uint64_t offset;
+
+	for (i = 0; i < pdev->tx_descs.desc_pages.num_pages; i++) {
+		page_info = pdev->tx_descs.desc_pages.dma_pages + i;
+		if (!page_info->page_v_addr_start) {
+			cdf_assert(0);
+			return 0;
+		}
+		if ((target_vaddr >= page_info->page_v_addr_start) &&
+			(target_vaddr <= page_info->page_v_addr_end))
+			break;
+	}
+
+	if (!page_info) {
+		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "invalid page_info");
+		return 0;
 	}
 	}
+
+	offset = (uint64_t)(target_vaddr - page_info->page_v_addr_start);
+	return page_info->page_p_addr + offset;
 }
 }
 
 
 /*--- descriptor allocation functions ---------------------------------------*/
 /*--- descriptor allocation functions ---------------------------------------*/
 
 
-void *htt_tx_desc_alloc(htt_pdev_handle pdev, uint32_t *paddr_lo)
+void *htt_tx_desc_alloc(htt_pdev_handle pdev, uint32_t *paddr_lo,
+			uint16_t index)
 {
 {
 	struct htt_host_tx_desc_t *htt_host_tx_desc;    /* includes HTC hdr */
 	struct htt_host_tx_desc_t *htt_host_tx_desc;    /* includes HTC hdr */
 	struct htt_tx_msdu_desc_t *htt_tx_desc; /* doesn't include  HTC hdr */
 	struct htt_tx_msdu_desc_t *htt_tx_desc; /* doesn't include  HTC hdr */
-	uint16_t index;
 	uint32_t *fragmentation_descr_field_ptr;
 	uint32_t *fragmentation_descr_field_ptr;
 
 
 	htt_host_tx_desc = (struct htt_host_tx_desc_t *)pdev->tx_descs.freelist;
 	htt_host_tx_desc = (struct htt_host_tx_desc_t *)pdev->tx_descs.freelist;
@@ -234,44 +465,20 @@ void *htt_tx_desc_alloc(htt_pdev_handle pdev, uint32_t *paddr_lo)
 	fragmentation_descr_field_ptr = (uint32_t *)
 	fragmentation_descr_field_ptr = (uint32_t *)
 		((uint32_t *) htt_tx_desc) +
 		((uint32_t *) htt_tx_desc) +
 		HTT_TX_DESC_FRAGS_DESC_PADDR_OFFSET_DWORD;
 		HTT_TX_DESC_FRAGS_DESC_PADDR_OFFSET_DWORD;
-
-	index = ((char *)htt_host_tx_desc -
-		 (char *)(((struct htt_host_tx_desc_t *)
-			   pdev->tx_descs.pool_vaddr))) /
-		pdev->tx_descs.size;
 	/*
 	/*
 	 * The fragmentation descriptor is allocated from consistent
 	 * The fragmentation descriptor is allocated from consistent
 	 * memory. Therefore, we can use the address directly rather
 	 * memory. Therefore, we can use the address directly rather
 	 * than having to map it from a virtual/CPU address to a
 	 * than having to map it from a virtual/CPU address to a
 	 * physical/bus address.
 	 * physical/bus address.
 	 */
 	 */
-#if defined(HELIUMPLUS_PADDR64)
-#if HTT_PADDR64
-	/* this is: frags_desc_ptr.lo */
-	*fragmentation_descr_field_ptr = (uint32_t)
-		(pdev->frag_descs.pool_paddr +
-		 (pdev->frag_descs.size * index));
-	fragmentation_descr_field_ptr++;
-	/* frags_desc_ptr.hi */
-	*fragmentation_descr_field_ptr = 0;
-#else /* ! HTT_PADDR64 */
-	*fragmentation_descr_field_ptr = (uint32_t)
-		(pdev->frag_descs.pool_paddr +
-		 (pdev->frag_descs.size * index));
-	cdf_print("%s %d: i %d frag_paddr 0x%x\n",
-		  __func__, __LINE__, index,
-		  (*fragmentation_descr_field_ptr));
-#endif /* HTT_PADDR64 */
-#else /* !HELIUMPLUS_PADDR64 */
-	*fragmentation_descr_field_ptr =
-		HTT_TX_DESC_PADDR(pdev, htt_tx_desc) + HTT_TX_DESC_LEN;
-#endif /* HELIUMPLUS_PADDR64 */
+	htt_tx_frag_desc_field_update(pdev, fragmentation_descr_field_ptr,
+		index, htt_tx_desc);
 
 
 	/*
 	/*
 	 * Include the headroom for the HTC frame header when specifying the
 	 * Include the headroom for the HTC frame header when specifying the
 	 * physical address for the HTT tx descriptor.
 	 * physical address for the HTT tx descriptor.
 	 */
 	 */
-	*paddr_lo = (uint32_t) HTT_TX_DESC_PADDR(pdev, htt_host_tx_desc);
+	*paddr_lo = (uint32_t)htt_tx_get_paddr(pdev, (char *)htt_host_tx_desc);
 	/*
 	/*
 	 * The allocated tx descriptor space includes headroom for a
 	 * The allocated tx descriptor space includes headroom for a
 	 * HTC frame header.  Hide this headroom, so that we don't have
 	 * HTC frame header.  Hide this headroom, so that we don't have
@@ -312,32 +519,13 @@ void htt_tx_desc_frags_table_set(htt_pdev_handle pdev,
 		*fragmentation_descr_field_ptr = frag_desc_paddr_lo;
 		*fragmentation_descr_field_ptr = frag_desc_paddr_lo;
 #else
 #else
 		*fragmentation_descr_field_ptr =
 		*fragmentation_descr_field_ptr =
-			HTT_TX_DESC_PADDR(pdev, htt_tx_desc) + HTT_TX_DESC_LEN;
+			htt_tx_get_paddr(pdev, htt_tx_desc) + HTT_TX_DESC_LEN;
 #endif
 #endif
 	} else {
 	} else {
 		*fragmentation_descr_field_ptr = paddr;
 		*fragmentation_descr_field_ptr = paddr;
 	}
 	}
 }
 }
 
 
-#if defined(HELIUMPLUS_PADDR64)
-void *
-htt_tx_frag_alloc(htt_pdev_handle pdev,
-		  u_int16_t index,
-		  u_int32_t *frag_paddr_lo)
-{
-	/** Index should never be 0, since its used by the hardware
-	    to terminate the link. */
-	if (index >= pdev->tx_descs.pool_elems)
-		return NULL;
-
-	*frag_paddr_lo = (uint32_t)
-		(pdev->frag_descs.pool_paddr + (pdev->frag_descs.size * index));
-
-	return ((char *) pdev->frag_descs.pool_vaddr) +
-		(pdev->frag_descs.size * index);
-}
-#endif /* defined(HELIUMPLUS_PADDR64) */
-
 /* PUT THESE AS INLINE IN ol_htt_tx_api.h */
 /* PUT THESE AS INLINE IN ol_htt_tx_api.h */
 
 
 void htt_tx_desc_flag_postponed(htt_pdev_handle pdev, void *desc)
 void htt_tx_desc_flag_postponed(htt_pdev_handle pdev, void *desc)

+ 4 - 6
core/dp/htt/htt_types.h

@@ -324,10 +324,9 @@ struct htt_pdev_t {
 
 
 	struct {
 	struct {
 		int size;       /* of each HTT tx desc */
 		int size;       /* of each HTT tx desc */
-		int pool_elems;
-		int alloc_cnt;
-		char *pool_vaddr;
-		uint32_t pool_paddr;
+		uint16_t pool_elems;
+		uint16_t alloc_cnt;
+		struct cdf_mem_multi_page_t desc_pages;
 		uint32_t *freelist;
 		uint32_t *freelist;
 		cdf_dma_mem_context(memctx);
 		cdf_dma_mem_context(memctx);
 	} tx_descs;
 	} tx_descs;
@@ -335,8 +334,7 @@ struct htt_pdev_t {
 	struct {
 	struct {
 		int size; /* of each Fragment/MSDU-Ext descriptor */
 		int size; /* of each Fragment/MSDU-Ext descriptor */
 		int pool_elems;
 		int pool_elems;
-		char *pool_vaddr;
-		uint32_t pool_paddr;
+		struct cdf_mem_multi_page_t desc_pages;
 		cdf_dma_mem_context(memctx);
 		cdf_dma_mem_context(memctx);
 	} frag_descs;
 	} frag_descs;
 #endif /* defined(HELIUMPLUS_PADDR64) */
 #endif /* defined(HELIUMPLUS_PADDR64) */

+ 19 - 9
core/dp/ol/inc/ol_htt_tx_api.h

@@ -357,7 +357,8 @@ uint16_t htt_tx_compl_desc_id(void *iterator, int num);
  * @param[OUT] paddr_lo - physical address of the HTT descriptor
  * @param[OUT] paddr_lo - physical address of the HTT descriptor
  * @return success -> descriptor handle, -OR- failure -> NULL
  * @return success -> descriptor handle, -OR- failure -> NULL
  */
  */
-void *htt_tx_desc_alloc(htt_pdev_handle htt_pdev, uint32_t *paddr_lo);
+void *htt_tx_desc_alloc(htt_pdev_handle pdev, uint32_t *paddr_lo,
+			uint16_t index);
 
 
 /**
 /**
  * @brief Free a HTT abstract tx descriptor.
  * @brief Free a HTT abstract tx descriptor.
@@ -368,17 +369,26 @@ void *htt_tx_desc_alloc(htt_pdev_handle htt_pdev, uint32_t *paddr_lo);
 void htt_tx_desc_free(htt_pdev_handle htt_pdev, void *htt_tx_desc);
 void htt_tx_desc_free(htt_pdev_handle htt_pdev, void *htt_tx_desc);
 
 
 #if defined(HELIUMPLUS_PADDR64)
 #if defined(HELIUMPLUS_PADDR64)
-/* TODO: oka: use kernel-doc format */
 /**
 /**
- * @brief Free a HTT abstract tx descriptor.
+ * @brief Allocate TX frag descriptor
+ * @details
+ *  Allocate TX frag descriptor
  *
  *
- * @param htt_pdev - handle to the HTT instance that made the allocation
- * @param htt_tx_desc - the descriptor to free
+ * @param pdev - handle to the HTT instance that made the allocation
+ * @param index - tx descriptor index
+ * @param frag_paddr_lo - fragment descriptor physical address lower 32bits
+ * @param frag_ptr - fragment descriptor hlos pointe
+ * @return success 0
  */
  */
-void *
-htt_tx_frag_alloc(htt_pdev_handle pdev,
-		  u_int16_t index,
-		  u_int32_t *frag_paddr_lo);
+int htt_tx_frag_alloc(htt_pdev_handle pdev,
+	u_int16_t index, u_int32_t *frag_paddr_lo, void **frag_ptr);
+#else
+static inline int htt_tx_frag_alloc(htt_pdev_handle pdev,
+	u_int16_t index, u_int32_t *frag_paddr_lo, void **frag_ptr)
+{
+	*frag_ptr = NULL;
+	return 0;
+}
 #endif /* defined(HELIUMPLUS_PADDR64) */
 #endif /* defined(HELIUMPLUS_PADDR64) */
 /**
 /**
  * @brief Discard all tx frames in the process of being downloaded.
  * @brief Discard all tx frames in the process of being downloaded.

+ 22 - 23
core/dp/txrx/ol_tx.c

@@ -1159,7 +1159,7 @@ void dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
 	int                     i;
 	int                     i;
 
 
 	cdf_print("OL TX Descriptor 0x%p msdu_id %d\n",
 	cdf_print("OL TX Descriptor 0x%p msdu_id %d\n",
-		 tx_desc, tx_desc->index);
+		 tx_desc, tx_desc->id);
 	cdf_print("HTT TX Descriptor vaddr: 0x%p paddr: 0x%x\n",
 	cdf_print("HTT TX Descriptor vaddr: 0x%p paddr: 0x%x\n",
 		 tx_desc->htt_tx_desc, tx_desc->htt_tx_desc_paddr);
 		 tx_desc->htt_tx_desc, tx_desc->htt_tx_desc_paddr);
 	cdf_print("%s %d: Fragment Descriptor 0x%p\n",
 	cdf_print("%s %d: Fragment Descriptor 0x%p\n",
@@ -1329,35 +1329,34 @@ cdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
 void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
 void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
 {
 {
 	int i;
 	int i;
-	pdev->tso_seg_pool.pool_size = num_seg;
-	pdev->tso_seg_pool.num_free = num_seg;
-
-	pdev->tso_seg_pool.array = NULL;
-	pdev->tso_seg_pool.array = cdf_mem_malloc(num_seg *
-		 sizeof(struct cdf_tso_seg_elem_t));
-	if (!pdev->tso_seg_pool.array) {
-		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
-			 "%s Could not allocate TSO array!\n", __func__);
-		return;
+	struct cdf_tso_seg_elem_t *c_element;
+
+	c_element = cdf_mem_malloc(sizeof(struct cdf_tso_seg_elem_t));
+	pdev->tso_seg_pool.freelist = c_element;
+	for (i = 0; i < (num_seg - 1); i++) {
+		c_element->next =
+			cdf_mem_malloc(sizeof(struct cdf_tso_seg_elem_t));
+		c_element = c_element->next;
+		c_element->next = NULL;
 	}
 	}
-
-	pdev->tso_seg_pool.freelist = &pdev->tso_seg_pool.array[0];
-
-	for (i = 0; i < (num_seg - 1); i++)
-		pdev->tso_seg_pool.array[i].next =
-			 &pdev->tso_seg_pool.array[i + 1];
-
-	pdev->tso_seg_pool.array[i].next = NULL;
-
+	pdev->tso_seg_pool.pool_size = num_seg;
 	cdf_spinlock_init(&pdev->tso_seg_pool.tso_mutex);
 	cdf_spinlock_init(&pdev->tso_seg_pool.tso_mutex);
 }
 }
 
 
 void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
 void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
 {
 {
+	int i;
+	struct cdf_tso_seg_elem_t *c_element;
+	struct cdf_tso_seg_elem_t *temp;
+
 	cdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
 	cdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
-	if (pdev->tso_seg_pool.array) {
-		cdf_mem_free(pdev->tso_seg_pool.array);
-		pdev->tso_seg_pool.array = NULL;
+	c_element = pdev->tso_seg_pool.freelist;
+	for (i = 0; i < pdev->tso_seg_pool.pool_size; i++) {
+		temp = c_element->next;
+		cdf_mem_free(c_element);
+		c_element = temp;
+		if (!c_element)
+			break;
 	}
 	}
 
 
 	pdev->tso_seg_pool.freelist = NULL;
 	pdev->tso_seg_pool.freelist = NULL;

+ 10 - 0
core/dp/txrx/ol_tx.h

@@ -78,5 +78,15 @@ void ol_txrx_mgmt_tx_complete(void *ctxt, cdf_nbuf_t netbuf, int err);
 #if defined(FEATURE_TSO)
 #if defined(FEATURE_TSO)
 void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg);
 void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg);
 void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev);
 void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev);
+#else
+static inline void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev,
+	uint32_t num_seg)
+{
+	return;
+}
+static inline void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
+{
+	return;
+}
 #endif
 #endif
 #endif /* _OL_TX__H_ */
 #endif /* _OL_TX__H_ */

+ 1 - 11
core/dp/txrx/ol_tx_desc.c

@@ -127,7 +127,6 @@ struct ol_tx_desc_t *ol_tx_desc_alloc(struct ol_txrx_pdev_t *pdev,
 		pdev->tx_desc.freelist = pdev->tx_desc.freelist->next;
 		pdev->tx_desc.freelist = pdev->tx_desc.freelist->next;
 		ol_tx_desc_sanity_checks(pdev, tx_desc);
 		ol_tx_desc_sanity_checks(pdev, tx_desc);
 		ol_tx_desc_compute_delay(tx_desc);
 		ol_tx_desc_compute_delay(tx_desc);
-
 	}
 	}
 	cdf_spin_unlock_bh(&pdev->tx_mutex);
 	cdf_spin_unlock_bh(&pdev->tx_mutex);
 	if (!tx_desc)
 	if (!tx_desc)
@@ -231,13 +230,6 @@ ol_tx_desc_alloc_wrapper(struct ol_txrx_pdev_t *pdev,
 #endif
 #endif
 #endif
 #endif
 
 
-/* TBD: make this inline in the .h file? */
-struct ol_tx_desc_t *ol_tx_desc_find(struct ol_txrx_pdev_t *pdev,
-				     uint16_t tx_desc_id)
-{
-	return &pdev->tx_desc.array[tx_desc_id].tx_desc;
-}
-
 #ifndef QCA_LL_TX_FLOW_CONTROL_V2
 #ifndef QCA_LL_TX_FLOW_CONTROL_V2
 /**
 /**
  * ol_tx_desc_free() - put descriptor to freelist
  * ol_tx_desc_free() - put descriptor to freelist
@@ -537,7 +529,7 @@ void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev,
 		 */
 		 */
 #if defined(HELIUMPLUS_DEBUG)
 #if defined(HELIUMPLUS_DEBUG)
 		cdf_print("%s %d: Frag Descriptor Reset [%d] to 0x%x\n",
 		cdf_print("%s %d: Frag Descriptor Reset [%d] to 0x%x\n",
-			  __func__, __LINE__, tx_desc->index,
+			  __func__, __LINE__, tx_desc->id,
 			  frag_desc_paddr_lo);
 			  frag_desc_paddr_lo);
 #endif /* HELIUMPLUS_DEBUG */
 #endif /* HELIUMPLUS_DEBUG */
 #endif /* HELIUMPLUS_PADDR64 */
 #endif /* HELIUMPLUS_PADDR64 */
@@ -591,8 +583,6 @@ struct cdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev)
 		pdev->tso_seg_pool.freelist = pdev->tso_seg_pool.freelist->next;
 		pdev->tso_seg_pool.freelist = pdev->tso_seg_pool.freelist->next;
 	}
 	}
 	cdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
 	cdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
-	if (!tso_seg)
-		return NULL;
 
 
 	return tso_seg;
 	return tso_seg;
 }
 }

+ 12 - 6
core/dp/txrx/ol_tx_desc.h

@@ -73,8 +73,16 @@ struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
  * @param tx_desc_id - the ID of the descriptor in question
  * @param tx_desc_id - the ID of the descriptor in question
  * @return the descriptor object that has the specified ID
  * @return the descriptor object that has the specified ID
  */
  */
-struct ol_tx_desc_t *ol_tx_desc_find(struct ol_txrx_pdev_t *pdev,
-				     uint16_t tx_desc_id);
+static inline struct ol_tx_desc_t *ol_tx_desc_find(
+			struct ol_txrx_pdev_t *pdev, uint16_t tx_desc_id)
+{
+	void **td_base = (void **)pdev->tx_desc.desc_pages.cacheable_pages;
+
+	return &((union ol_tx_desc_list_elem_t *)
+		(td_base[tx_desc_id >> pdev->tx_desc.page_divider] +
+		(pdev->tx_desc.desc_reserved_size *
+		(tx_desc_id & pdev->tx_desc.offset_filter))))->tx_desc;
+}
 
 
 /**
 /**
  * @brief Free a list of tx descriptors and the tx frames they refer to.
  * @brief Free a list of tx descriptors and the tx frames they refer to.
@@ -125,10 +133,8 @@ void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev,
 static inline uint16_t
 static inline uint16_t
 ol_tx_desc_id(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
 ol_tx_desc_id(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
 {
 {
-	TXRX_ASSERT2(((union ol_tx_desc_list_elem_t *)tx_desc -
-		      pdev->tx_desc.array) < pdev->tx_desc.pool_size);
-	return (uint16_t)
-	       ((union ol_tx_desc_list_elem_t *)tx_desc - pdev->tx_desc.array);
+	TXRX_ASSERT2(tx_desc->id < pdev->tx_desc.pool_size);
+	return tx_desc->id;
 }
 }
 
 
 /*
 /*

+ 10 - 12
core/dp/txrx/ol_tx_send.c

@@ -442,8 +442,10 @@ ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
 void ol_tx_discard_target_frms(ol_txrx_pdev_handle pdev)
 void ol_tx_discard_target_frms(ol_txrx_pdev_handle pdev)
 {
 {
 	int i = 0;
 	int i = 0;
-	for (i = 0; i < pdev->tx_desc.pool_size; i++) {
+	struct ol_tx_desc_t *tx_desc;
 
 
+	for (i = 0; i < pdev->tx_desc.pool_size; i++) {
+		tx_desc = ol_tx_desc_find(pdev, i);
 		/*
 		/*
 		 * Confirm that each tx descriptor is "empty", i.e. it has
 		 * Confirm that each tx descriptor is "empty", i.e. it has
 		 * no tx frame attached.
 		 * no tx frame attached.
@@ -451,12 +453,11 @@ void ol_tx_discard_target_frms(ol_txrx_pdev_handle pdev)
 		 * been given to the target to transmit, for which the
 		 * been given to the target to transmit, for which the
 		 * target has never provided a response.
 		 * target has never provided a response.
 		 */
 		 */
-		if (cdf_atomic_read(&pdev->tx_desc.array[i].tx_desc.ref_cnt)) {
+		if (cdf_atomic_read(&tx_desc->ref_cnt)) {
 			TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
 			TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
 				   "Warning: freeing tx frame "
 				   "Warning: freeing tx frame "
 				   "(no tx completion from the target)\n");
 				   "(no tx completion from the target)\n");
 			ol_tx_desc_frame_free_nonstd(pdev,
 			ol_tx_desc_frame_free_nonstd(pdev,
-						     &pdev->tx_desc.array[i].
 						     tx_desc, 1);
 						     tx_desc, 1);
 		}
 		}
 	}
 	}
@@ -487,7 +488,6 @@ ol_tx_completion_handler(ol_txrx_pdev_handle pdev,
 	char *trace_str;
 	char *trace_str;
 
 
 	uint32_t byte_cnt = 0;
 	uint32_t byte_cnt = 0;
-	union ol_tx_desc_list_elem_t *td_array = pdev->tx_desc.array;
 	cdf_nbuf_t netbuf;
 	cdf_nbuf_t netbuf;
 
 
 	union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
 	union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
@@ -500,7 +500,7 @@ ol_tx_completion_handler(ol_txrx_pdev_handle pdev,
 	trace_str = (status) ? "OT:C:F:" : "OT:C:S:";
 	trace_str = (status) ? "OT:C:F:" : "OT:C:S:";
 	for (i = 0; i < num_msdus; i++) {
 	for (i = 0; i < num_msdus; i++) {
 		tx_desc_id = desc_ids[i];
 		tx_desc_id = desc_ids[i];
-		tx_desc = &td_array[tx_desc_id].tx_desc;
+		tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
 		tx_desc->status = status;
 		tx_desc->status = status;
 		netbuf = tx_desc->netbuf;
 		netbuf = tx_desc->netbuf;
 
 
@@ -560,10 +560,9 @@ ol_tx_single_completion_handler(ol_txrx_pdev_handle pdev,
 				enum htt_tx_status status, uint16_t tx_desc_id)
 				enum htt_tx_status status, uint16_t tx_desc_id)
 {
 {
 	struct ol_tx_desc_t *tx_desc;
 	struct ol_tx_desc_t *tx_desc;
-	union ol_tx_desc_list_elem_t *td_array = pdev->tx_desc.array;
 	cdf_nbuf_t netbuf;
 	cdf_nbuf_t netbuf;
 
 
-	tx_desc = &td_array[tx_desc_id].tx_desc;
+	tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
 	tx_desc->status = status;
 	tx_desc->status = status;
 	netbuf = tx_desc->netbuf;
 	netbuf = tx_desc->netbuf;
 
 
@@ -597,7 +596,6 @@ ol_tx_inspect_handler(ol_txrx_pdev_handle pdev,
 	uint16_t *desc_ids = (uint16_t *) tx_desc_id_iterator;
 	uint16_t *desc_ids = (uint16_t *) tx_desc_id_iterator;
 	uint16_t tx_desc_id;
 	uint16_t tx_desc_id;
 	struct ol_tx_desc_t *tx_desc;
 	struct ol_tx_desc_t *tx_desc;
-	union ol_tx_desc_list_elem_t *td_array = pdev->tx_desc.array;
 	union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
 	union ol_tx_desc_list_elem_t *lcl_freelist = NULL;
 	union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
 	union ol_tx_desc_list_elem_t *tx_desc_last = NULL;
 	cdf_nbuf_t netbuf;
 	cdf_nbuf_t netbuf;
@@ -606,7 +604,7 @@ ol_tx_inspect_handler(ol_txrx_pdev_handle pdev,
 
 
 	for (i = 0; i < num_msdus; i++) {
 	for (i = 0; i < num_msdus; i++) {
 		tx_desc_id = desc_ids[i];
 		tx_desc_id = desc_ids[i];
-		tx_desc = &td_array[tx_desc_id].tx_desc;
+		tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
 		netbuf = tx_desc->netbuf;
 		netbuf = tx_desc->netbuf;
 
 
 		/* find the "vdev" this tx_desc belongs to */
 		/* find the "vdev" this tx_desc belongs to */
@@ -848,7 +846,7 @@ ol_tx_delay_tid_from_l3_hdr(struct ol_txrx_pdev_t *pdev,
 static int ol_tx_delay_category(struct ol_txrx_pdev_t *pdev, uint16_t msdu_id)
 static int ol_tx_delay_category(struct ol_txrx_pdev_t *pdev, uint16_t msdu_id)
 {
 {
 #ifdef QCA_COMPUTE_TX_DELAY_PER_TID
 #ifdef QCA_COMPUTE_TX_DELAY_PER_TID
-	struct ol_tx_desc_t *tx_desc = &pdev->tx_desc.array[msdu_id].tx_desc;
+	struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, msdu_id);
 	uint8_t tid;
 	uint8_t tid;
 
 
 	cdf_nbuf_t msdu = tx_desc->netbuf;
 	cdf_nbuf_t msdu = tx_desc->netbuf;
@@ -939,9 +937,9 @@ ol_tx_delay_compute(struct ol_txrx_pdev_t *pdev,
 	pdev->tx_delay.tx_compl_timestamp_ticks = now_ticks;
 	pdev->tx_delay.tx_compl_timestamp_ticks = now_ticks;
 
 
 	for (i = 0; i < num_msdus; i++) {
 	for (i = 0; i < num_msdus; i++) {
-		uint16_t id = desc_ids[i];
-		struct ol_tx_desc_t *tx_desc = &pdev->tx_desc.array[id].tx_desc;
 		int bin;
 		int bin;
+		uint16_t id = desc_ids[i];
+		struct ol_tx_desc_t *tx_desc = ol_tx_desc_find(pdev, id);
 
 
 		tx_delay_queue_ticks =
 		tx_delay_queue_ticks =
 			now_ticks - tx_desc->entry_timestamp_ticks;
 			now_ticks - tx_desc->entry_timestamp_ticks;

+ 103 - 105
core/dp/txrx/ol_txrx.c

@@ -258,29 +258,11 @@ setup_fastpath_ce_handles(struct ol_softc *osc, struct ol_txrx_pdev_t *pdev)
 
 
 }
 }
 
 
-/**
- * init_txdesc_id() initialize Tx desc with id
- *
- * @pdev: handle OL pdev
- * @desc_num: Tx descriptor number
- *
- * Return: void
- */
-static inline void
-init_txdesc_id(struct ol_txrx_pdev_t *pdev, int desc_num)
-{
-	pdev->tx_desc.array[desc_num].tx_desc.id = desc_num;
-	cdf_atomic_init(&pdev->tx_desc.array[desc_num].tx_desc.ref_cnt);
-}
 #else  /* not WLAN_FEATURE_FASTPATH */
 #else  /* not WLAN_FEATURE_FASTPATH */
 static inline void
 static inline void
 setup_fastpath_ce_handles(struct ol_softc *osc, struct ol_txrx_pdev_t *pdev)
 setup_fastpath_ce_handles(struct ol_softc *osc, struct ol_txrx_pdev_t *pdev)
 {
 {
 }
 }
-static inline void
-init_txdesc_id(struct ol_txrx_pdev_t *pdev, int desc_num)
-{
-}
 #endif /* WLAN_FEATURE_FASTPATH */
 #endif /* WLAN_FEATURE_FASTPATH */
 
 
 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
@@ -391,14 +373,20 @@ fail0:
 int
 int
 ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev)
 ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev)
 {
 {
-	int i;
+	uint16_t i;
+	uint16_t fail_idx = 0;
 	int ret = 0;
 	int ret = 0;
 	uint16_t desc_pool_size;
 	uint16_t desc_pool_size;
 	struct ol_softc *osc =  cds_get_context(CDF_MODULE_ID_HIF);
 	struct ol_softc *osc =  cds_get_context(CDF_MODULE_ID_HIF);
 
 
+	uint16_t desc_element_size = sizeof(union ol_tx_desc_list_elem_t);
+	union ol_tx_desc_list_elem_t *c_element;
+	unsigned int sig_bit;
+	uint16_t desc_per_page;
+
 	if (!osc) {
 	if (!osc) {
 		ret = -EINVAL;
 		ret = -EINVAL;
-		goto fail0;
+		goto ol_attach_fail;
 	}
 	}
 
 
 	/*
 	/*
@@ -428,7 +416,7 @@ ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev)
 
 
 	ret = htt_attach(pdev->htt_pdev, desc_pool_size);
 	ret = htt_attach(pdev->htt_pdev, desc_pool_size);
 	if (ret)
 	if (ret)
-		goto fail0;
+		goto ol_attach_fail;
 
 
 	/* Update CE's pkt download length */
 	/* Update CE's pkt download length */
 	ce_pkt_dl_len_set((void *)osc, htt_pkt_dl_len_get(pdev->htt_pdev));
 	ce_pkt_dl_len_set((void *)osc, htt_pkt_dl_len_get(pdev->htt_pdev));
@@ -436,15 +424,32 @@ ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev)
 	/* Attach micro controller data path offload resource */
 	/* Attach micro controller data path offload resource */
 	if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
 	if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
 		if (htt_ipa_uc_attach(pdev->htt_pdev))
 		if (htt_ipa_uc_attach(pdev->htt_pdev))
-			goto fail1;
-
-	pdev->tx_desc.array =
-		cdf_mem_malloc(desc_pool_size *
-			       sizeof(union ol_tx_desc_list_elem_t));
-	if (!pdev->tx_desc.array)
-		goto fail2;
-	cdf_mem_set(pdev->tx_desc.array,
-		    desc_pool_size * sizeof(union ol_tx_desc_list_elem_t), 0);
+			goto uc_attach_fail;
+
+	/* Calculate single element reserved size power of 2 */
+	pdev->tx_desc.desc_reserved_size = cdf_get_pwr2(desc_element_size);
+	cdf_mem_multi_pages_alloc(pdev->osdev, &pdev->tx_desc.desc_pages,
+		pdev->tx_desc.desc_reserved_size, desc_pool_size, 0, true);
+	if ((0 == pdev->tx_desc.desc_pages.num_pages) ||
+		(NULL == pdev->tx_desc.desc_pages.cacheable_pages)) {
+		CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+			"Page alloc fail");
+		goto page_alloc_fail;
+	}
+	desc_per_page = pdev->tx_desc.desc_pages.num_element_per_page;
+	pdev->tx_desc.offset_filter = desc_per_page - 1;
+	/* Calculate page divider to find page number */
+	sig_bit = 0;
+	while (desc_per_page) {
+		sig_bit++;
+		desc_per_page = desc_per_page >> 1;
+	}
+	pdev->tx_desc.page_divider = (sig_bit - 1);
+	CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+		"page_divider 0x%x, offset_filter 0x%x num elem %d, ol desc num page %d, ol desc per page %d",
+		pdev->tx_desc.page_divider, pdev->tx_desc.offset_filter,
+		desc_pool_size, pdev->tx_desc.desc_pages.num_pages,
+		pdev->tx_desc.desc_pages.num_element_per_page);
 
 
 	/*
 	/*
 	 * Each SW tx desc (used only within the tx datapath SW) has a
 	 * Each SW tx desc (used only within the tx datapath SW) has a
@@ -453,79 +458,74 @@ ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev)
 	 * desc now, to avoid doing it during time-critical transmit.
 	 * desc now, to avoid doing it during time-critical transmit.
 	 */
 	 */
 	pdev->tx_desc.pool_size = desc_pool_size;
 	pdev->tx_desc.pool_size = desc_pool_size;
+	pdev->tx_desc.freelist =
+		(union ol_tx_desc_list_elem_t *)
+		(*pdev->tx_desc.desc_pages.cacheable_pages);
+	c_element = pdev->tx_desc.freelist;
 	for (i = 0; i < desc_pool_size; i++) {
 	for (i = 0; i < desc_pool_size; i++) {
 		void *htt_tx_desc;
 		void *htt_tx_desc;
-#if defined(HELIUMPLUS_PADDR64)
-		void *htt_frag_desc;
-		uint32_t frag_paddr_lo;
-#endif /* defined(HELIUMPLUS_PADDR64) */
+		void *htt_frag_desc = NULL;
+		uint32_t frag_paddr_lo = 0;
 		uint32_t paddr_lo;
 		uint32_t paddr_lo;
 
 
-		htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr_lo);
+		if (i == (desc_pool_size - 1))
+			c_element->next = NULL;
+		else
+			c_element->next = (union ol_tx_desc_list_elem_t *)
+				ol_tx_desc_find(pdev, i + 1);
+
+		htt_tx_desc = htt_tx_desc_alloc(pdev->htt_pdev, &paddr_lo, i);
 		if (!htt_tx_desc) {
 		if (!htt_tx_desc) {
 			CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_FATAL,
 			CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_FATAL,
 				  "%s: failed to alloc HTT tx desc (%d of %d)",
 				  "%s: failed to alloc HTT tx desc (%d of %d)",
 				__func__, i, desc_pool_size);
 				__func__, i, desc_pool_size);
-			while (--i >= 0) {
-				htt_tx_desc_free(pdev->htt_pdev,
-						 pdev->tx_desc.array[i].
-						 tx_desc.htt_tx_desc);
-			}
-			goto fail3;
+			fail_idx = i;
+			goto desc_alloc_fail;
 		}
 		}
-		pdev->tx_desc.array[i].tx_desc.htt_tx_desc = htt_tx_desc;
-		pdev->tx_desc.array[i].tx_desc.htt_tx_desc_paddr = paddr_lo;
-
-#if defined(HELIUMPLUS_PADDR64)
-		htt_frag_desc = htt_tx_frag_alloc(pdev->htt_pdev, i,
-						  &frag_paddr_lo);
-		if (!htt_frag_desc) {
-			cdf_print("%s: failed to alloc HTT frag dsc (%d/%d)\n",
-				  __func__, i, desc_pool_size);
+
+		c_element->tx_desc.htt_tx_desc = htt_tx_desc;
+		c_element->tx_desc.htt_tx_desc_paddr = paddr_lo;
+		ret = htt_tx_frag_alloc(pdev->htt_pdev,
+			i, &frag_paddr_lo, &htt_frag_desc);
+		if (ret) {
+			CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
+				"%s: failed to alloc HTT frag dsc (%d/%d)",
+				__func__, i, desc_pool_size);
 			/* Is there a leak here, is this handling correct? */
 			/* Is there a leak here, is this handling correct? */
-			goto fail3;
+			fail_idx = i;
+			goto desc_alloc_fail;
 		}
 		}
-		/* Initialize the first 6 words (TSO flags)
-		   of the frag descriptor */
-		memset(htt_frag_desc, 0, 6*sizeof(uint32_t));
-
-		pdev->tx_desc.array[i].tx_desc.htt_frag_desc = htt_frag_desc;
-#if defined(HELIUMPLUS_DEBUG)
-		cdf_print("%s:%d %d %p\n", __func__, __LINE__,
-			i, pdev->tx_desc.array[i].tx_desc.htt_frag_desc);
-#endif /* HELIUMPLUS_DEBUG */
-		pdev->tx_desc.array[i].tx_desc.htt_frag_desc_paddr =
-			frag_paddr_lo;
-#if defined(HELIUMPLUS_DEBUG)
-		cdf_print("%s:%d - %d FRAG VA 0x%p FRAG PA 0x%x\n",
+		if (!ret && htt_frag_desc) {
+			/* Initialize the first 6 words (TSO flags)
+			   of the frag descriptor */
+			memset(htt_frag_desc, 0, 6 * sizeof(uint32_t));
+			c_element->tx_desc.htt_frag_desc = htt_frag_desc;
+			c_element->tx_desc.htt_frag_desc_paddr = frag_paddr_lo;
+		}
+		CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_INFO_LOW,
+			"%s:%d - %d FRAG VA 0x%p FRAG PA 0x%x",
 			__func__, __LINE__, i,
 			__func__, __LINE__, i,
-			pdev->tx_desc.array[i].tx_desc.htt_frag_desc,
-			pdev->tx_desc.array[i].tx_desc.htt_frag_desc_paddr);
-#endif /* HELIUMPLUS_DEBUG */
-#endif /* defined(HELIUMPLUS_PADDR64) */
-
-		pdev->tx_desc.array[i].tx_desc.index = i;
+			c_element->tx_desc.htt_frag_desc,
+			c_element->tx_desc.htt_frag_desc_paddr);
 #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
 #ifdef QCA_SUPPORT_TXDESC_SANITY_CHECKS
-		pdev->tx_desc.array[i].tx_desc.pkt_type = 0xff;
+		c_element->tx_desc.pkt_type = 0xff;
 #ifdef QCA_COMPUTE_TX_DELAY
 #ifdef QCA_COMPUTE_TX_DELAY
-		pdev->tx_desc.array[i].tx_desc.entry_timestamp_ticks =
+		c_element->tx_desc.entry_timestamp_ticks =
 			0xffffffff;
 			0xffffffff;
 #endif
 #endif
 #endif
 #endif
-		init_txdesc_id(pdev, i);
+		c_element->tx_desc.id = i;
+		cdf_atomic_init(&c_element->tx_desc.ref_cnt);
+		c_element = c_element->next;
+		fail_idx = i;
 	}
 	}
 
 
 	/* link SW tx descs into a freelist */
 	/* link SW tx descs into a freelist */
 	pdev->tx_desc.num_free = desc_pool_size;
 	pdev->tx_desc.num_free = desc_pool_size;
-	pdev->tx_desc.freelist = &pdev->tx_desc.array[0];
 	TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
 	TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
 		   "%s first tx_desc:0x%p Last tx desc:0x%p\n", __func__,
 		   "%s first tx_desc:0x%p Last tx desc:0x%p\n", __func__,
 		   (uint32_t *) pdev->tx_desc.freelist,
 		   (uint32_t *) pdev->tx_desc.freelist,
 		   (uint32_t *) (pdev->tx_desc.freelist + desc_pool_size));
 		   (uint32_t *) (pdev->tx_desc.freelist + desc_pool_size));
-	for (i = 0; i < desc_pool_size - 1; i++)
-		pdev->tx_desc.array[i].next = &pdev->tx_desc.array[i + 1];
-
-	pdev->tx_desc.array[i].next = NULL;
 
 
 	/* check what format of frames are expected to be delivered by the OS */
 	/* check what format of frames are expected to be delivered by the OS */
 	pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);
 	pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);
@@ -540,7 +540,7 @@ ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev)
 		CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
 		CDF_TRACE(CDF_MODULE_ID_TXRX, CDF_TRACE_LEVEL_ERROR,
 			  "%s Invalid standard frame type: %d",
 			  "%s Invalid standard frame type: %d",
 			  __func__, pdev->frame_format);
 			  __func__, pdev->frame_format);
-		goto fail4;
+		goto control_init_fail;
 	}
 	}
 
 
 	/* setup the global rx defrag waitlist */
 	/* setup the global rx defrag waitlist */
@@ -608,7 +608,7 @@ ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev)
 			  "Invalid std frame type; [en/de]cap: f:%x t:%x r:%x",
 			  "Invalid std frame type; [en/de]cap: f:%x t:%x r:%x",
 			  pdev->frame_format,
 			  pdev->frame_format,
 			  pdev->target_tx_tran_caps, pdev->target_rx_tran_caps);
 			  pdev->target_tx_tran_caps, pdev->target_rx_tran_caps);
-		goto fail4;
+		goto control_init_fail;
 	}
 	}
 #endif
 #endif
 
 
@@ -658,7 +658,7 @@ ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev)
 					  CDF_TRACE_LEVEL_ERROR,
 					  CDF_TRACE_LEVEL_ERROR,
 					  "%s: %s", __func__, TRACESTR01);
 					  "%s: %s", __func__, TRACESTR01);
 #undef TRACESTR01
 #undef TRACESTR01
-				goto fail4;
+				goto control_init_fail;
 			}
 			}
 		} else {
 		} else {
 			/* PN check done on target */
 			/* PN check done on target */
@@ -686,10 +686,10 @@ ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev)
 	OL_TXRX_PEER_STATS_MUTEX_INIT(pdev);
 	OL_TXRX_PEER_STATS_MUTEX_INIT(pdev);
 
 
 	if (OL_RX_REORDER_TRACE_ATTACH(pdev) != A_OK)
 	if (OL_RX_REORDER_TRACE_ATTACH(pdev) != A_OK)
-		goto fail5;
+		goto reorder_trace_attach_fail;
 
 
 	if (OL_RX_PN_TRACE_ATTACH(pdev) != A_OK)
 	if (OL_RX_PN_TRACE_ATTACH(pdev) != A_OK)
-		goto fail6;
+		goto pn_trace_attach_fail;
 
 
 #ifdef PERE_IP_HDR_ALIGNMENT_WAR
 #ifdef PERE_IP_HDR_ALIGNMENT_WAR
 	pdev->host_80211_enable = ol_scn_host_80211_enable_get(pdev->ctrl_pdev);
 	pdev->host_80211_enable = ol_scn_host_80211_enable_get(pdev->ctrl_pdev);
@@ -791,45 +791,39 @@ ol_txrx_pdev_attach(ol_txrx_pdev_handle pdev)
 	}
 	}
 #endif /* QCA_COMPUTE_TX_DELAY */
 #endif /* QCA_COMPUTE_TX_DELAY */
 
 
-#ifdef QCA_SUPPORT_TX_THROTTLE
 	/* Thermal Mitigation */
 	/* Thermal Mitigation */
 	ol_tx_throttle_init(pdev);
 	ol_tx_throttle_init(pdev);
-#endif
-
-
-#if defined(FEATURE_TSO)
 	ol_tso_seg_list_init(pdev, desc_pool_size);
 	ol_tso_seg_list_init(pdev, desc_pool_size);
-#endif
-
 	ol_tx_register_flow_control(pdev);
 	ol_tx_register_flow_control(pdev);
 
 
 	return 0;            /* success */
 	return 0;            /* success */
 
 
-fail6:
+pn_trace_attach_fail:
 	OL_RX_REORDER_TRACE_DETACH(pdev);
 	OL_RX_REORDER_TRACE_DETACH(pdev);
 
 
-fail5:
+reorder_trace_attach_fail:
 	cdf_spinlock_destroy(&pdev->tx_mutex);
 	cdf_spinlock_destroy(&pdev->tx_mutex);
 	cdf_spinlock_destroy(&pdev->peer_ref_mutex);
 	cdf_spinlock_destroy(&pdev->peer_ref_mutex);
 	cdf_spinlock_destroy(&pdev->rx.mutex);
 	cdf_spinlock_destroy(&pdev->rx.mutex);
 	cdf_spinlock_destroy(&pdev->last_real_peer_mutex);
 	cdf_spinlock_destroy(&pdev->last_real_peer_mutex);
 	OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
 	OL_TXRX_PEER_STATS_MUTEX_DESTROY(pdev);
 
 
-fail4:
-	for (i = 0; i < desc_pool_size; i++)
+control_init_fail:
+desc_alloc_fail:
+	for (i = 0; i < fail_idx; i++)
 		htt_tx_desc_free(pdev->htt_pdev,
 		htt_tx_desc_free(pdev->htt_pdev,
-				 pdev->tx_desc.array[i].tx_desc.htt_tx_desc);
+			(ol_tx_desc_find(pdev, i))->htt_tx_desc);
 
 
-fail3:
-	cdf_mem_free(pdev->tx_desc.array);
+	cdf_mem_multi_pages_free(pdev->osdev,
+		&pdev->tx_desc.desc_pages, 0, true);
 
 
-fail2:
+page_alloc_fail:
 	if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
 	if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
 		htt_ipa_uc_detach(pdev->htt_pdev);
 		htt_ipa_uc_detach(pdev->htt_pdev);
-fail1:
+uc_attach_fail:
 	htt_detach(pdev->htt_pdev);
 	htt_detach(pdev->htt_pdev);
 
 
-fail0:
+ol_attach_fail:
 	return ret;            /* fail */
 	return ret;            /* fail */
 }
 }
 
 
@@ -841,6 +835,7 @@ A_STATUS ol_txrx_pdev_attach_target(ol_txrx_pdev_handle pdev)
 void ol_txrx_pdev_detach(ol_txrx_pdev_handle pdev, int force)
 void ol_txrx_pdev_detach(ol_txrx_pdev_handle pdev, int force)
 {
 {
 	int i;
 	int i;
+
 	/*checking to ensure txrx pdev structure is not NULL */
 	/*checking to ensure txrx pdev structure is not NULL */
 	if (!pdev) {
 	if (!pdev) {
 		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "NULL pdev passed to %s\n", __func__);
 		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "NULL pdev passed to %s\n", __func__);
@@ -863,7 +858,7 @@ void ol_txrx_pdev_detach(ol_txrx_pdev_handle pdev, int force)
 	cdf_softirq_timer_free(&pdev->tx_throttle.tx_timer);
 	cdf_softirq_timer_free(&pdev->tx_throttle.tx_timer);
 #endif
 #endif
 #endif
 #endif
-
+	ol_tso_seg_list_deinit(pdev);
 	ol_tx_deregister_flow_control(pdev);
 	ol_tx_deregister_flow_control(pdev);
 
 
 	if (force) {
 	if (force) {
@@ -889,7 +884,9 @@ void ol_txrx_pdev_detach(ol_txrx_pdev_handle pdev, int force)
 
 
 	for (i = 0; i < pdev->tx_desc.pool_size; i++) {
 	for (i = 0; i < pdev->tx_desc.pool_size; i++) {
 		void *htt_tx_desc;
 		void *htt_tx_desc;
+		struct ol_tx_desc_t *tx_desc;
 
 
+		tx_desc = ol_tx_desc_find(pdev, i);
 		/*
 		/*
 		 * Confirm that each tx descriptor is "empty", i.e. it has
 		 * Confirm that each tx descriptor is "empty", i.e. it has
 		 * no tx frame attached.
 		 * no tx frame attached.
@@ -897,18 +894,19 @@ void ol_txrx_pdev_detach(ol_txrx_pdev_handle pdev, int force)
 		 * been given to the target to transmit, for which the
 		 * been given to the target to transmit, for which the
 		 * target has never provided a response.
 		 * target has never provided a response.
 		 */
 		 */
-		if (cdf_atomic_read(&pdev->tx_desc.array[i].tx_desc.ref_cnt)) {
+		if (cdf_atomic_read(&tx_desc->ref_cnt)) {
 			TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
 			TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
 				   "Warning: freeing tx frame (no compltn)\n");
 				   "Warning: freeing tx frame (no compltn)\n");
 			ol_tx_desc_frame_free_nonstd(pdev,
 			ol_tx_desc_frame_free_nonstd(pdev,
-						     &pdev->tx_desc.array[i].
 						     tx_desc, 1);
 						     tx_desc, 1);
 		}
 		}
-		htt_tx_desc = pdev->tx_desc.array[i].tx_desc.htt_tx_desc;
+		htt_tx_desc = tx_desc->htt_tx_desc;
 		htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
 		htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
 	}
 	}
 
 
-	cdf_mem_free(pdev->tx_desc.array);
+	cdf_mem_multi_pages_free(pdev->osdev,
+		&pdev->tx_desc.desc_pages, 0, true);
+	pdev->tx_desc.freelist = NULL;
 
 
 	/* Detach micro controller data path offload resource */
 	/* Detach micro controller data path offload resource */
 	if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
 	if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))

+ 6 - 7
core/dp/txrx/ol_txrx_types.h

@@ -33,6 +33,7 @@
 #define _OL_TXRX_TYPES__H_
 #define _OL_TXRX_TYPES__H_
 
 
 #include <cdf_nbuf.h>           /* cdf_nbuf_t */
 #include <cdf_nbuf.h>           /* cdf_nbuf_t */
+#include <cdf_memory.h>
 #include <cds_queue.h>          /* TAILQ */
 #include <cds_queue.h>          /* TAILQ */
 #include <a_types.h>            /* A_UINT8 */
 #include <a_types.h>            /* A_UINT8 */
 #include <htt.h>                /* htt_sec_type, htt_pkt_type, etc. */
 #include <htt.h>                /* htt_sec_type, htt_pkt_type, etc. */
@@ -131,15 +132,10 @@ enum ol_tx_frm_type {
 struct ol_tx_desc_t {
 struct ol_tx_desc_t {
 	cdf_nbuf_t netbuf;
 	cdf_nbuf_t netbuf;
 	void *htt_tx_desc;
 	void *htt_tx_desc;
-#ifdef WLAN_FEATURE_FASTPATH
 	uint16_t id;
 	uint16_t id;
-#endif /* WLAN_FEATURE_FASTPATH */
 	uint32_t htt_tx_desc_paddr;
 	uint32_t htt_tx_desc_paddr;
-#if defined(HELIUMPLUS_PADDR64)
 	void *htt_frag_desc; /* struct msdu_ext_desc_t * */
 	void *htt_frag_desc; /* struct msdu_ext_desc_t * */
 	uint32_t htt_frag_desc_paddr;
 	uint32_t htt_frag_desc_paddr;
-#endif /* defined(HELIUMPLUS_PADDR64) */
-	uint32_t index;
 	cdf_atomic_t ref_cnt;
 	cdf_atomic_t ref_cnt;
 	enum htt_tx_status status;
 	enum htt_tx_status status;
 
 
@@ -565,13 +561,17 @@ struct ol_txrx_pdev_t {
 	struct {
 	struct {
 		uint16_t pool_size;
 		uint16_t pool_size;
 		uint16_t num_free;
 		uint16_t num_free;
-		union ol_tx_desc_list_elem_t *array;
 		union ol_tx_desc_list_elem_t *freelist;
 		union ol_tx_desc_list_elem_t *freelist;
 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
 		uint8_t num_invalid_bin;
 		uint8_t num_invalid_bin;
 		cdf_spinlock_t flow_pool_list_lock;
 		cdf_spinlock_t flow_pool_list_lock;
 		TAILQ_HEAD(flow_pool_list_t, ol_tx_flow_pool_t) flow_pool_list;
 		TAILQ_HEAD(flow_pool_list_t, ol_tx_flow_pool_t) flow_pool_list;
 #endif
 #endif
+		uint32_t page_size;
+		uint16_t desc_reserved_size;
+		uint8_t page_divider;
+		uint32_t offset_filter;
+		struct cdf_mem_multi_page_t desc_pages;
 	} tx_desc;
 	} tx_desc;
 
 
 #if defined(QCA_LL_TX_FLOW_CONTROL_V2)
 #if defined(QCA_LL_TX_FLOW_CONTROL_V2)
@@ -761,7 +761,6 @@ struct ol_txrx_pdev_t {
 	struct {
 	struct {
 		uint16_t pool_size;
 		uint16_t pool_size;
 		uint16_t num_free;
 		uint16_t num_free;
-		struct cdf_tso_seg_elem_t *array;
 		struct cdf_tso_seg_elem_t *freelist;
 		struct cdf_tso_seg_elem_t *freelist;
 		/* tso mutex */
 		/* tso mutex */
 		OL_TX_MUTEX_TYPE tso_mutex;
 		OL_TX_MUTEX_TYPE tso_mutex;