Эх сурвалжийг харах

qcacmn: Changes in HW cookie conversion for MLO

Changes to have HW cookie conversion context per
desc pool.

This context will be used to program CMEM of the
other SOC in case multi-chip MLO.

Change-Id: I5ec68813e8fcb6d124698a52f5553acf9a7b1795
Chaithanya Garrepalli 3 жил өмнө
parent
commit
5be4508174

+ 144 - 113
dp/wifi3.0/be/dp_be.c

@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -118,16 +119,15 @@ void dp_cc_wbm_sw_en_cfg(struct hal_hw_cc_config *cc_cfg)
  * dp_cc_reg_cfg_init() - initialize and configure HW cookie
 			  conversion register
  * @soc: SOC handle
- * @cc_ctx: cookie conversion context pointer
  * @is_4k_align: page address 4k alignd
  *
  * Return: None
  */
 static void dp_cc_reg_cfg_init(struct dp_soc *soc,
-			       struct dp_hw_cookie_conversion_t *cc_ctx,
 			       bool is_4k_align)
 {
 	struct hal_hw_cc_config cc_cfg = { 0 };
+	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
 
 	if (soc->cdp_soc.ol_ops->get_con_mode &&
 	    soc->cdp_soc.ol_ops->get_con_mode() == QDF_GLOBAL_FTM_MODE)
@@ -138,7 +138,7 @@ static void dp_cc_reg_cfg_init(struct dp_soc *soc,
 		return;
 	}
 
-	cc_cfg.lut_base_addr_31_0 = cc_ctx->cmem_base;
+	cc_cfg.lut_base_addr_31_0 = be_soc->cc_cmem_base;
 	cc_cfg.cc_global_en = true;
 	cc_cfg.page_4k_align = is_4k_align;
 	cc_cfg.cookie_offset_msb = DP_CC_DESC_ID_SPT_VA_OS_MSB;
@@ -176,10 +176,10 @@ static inline void dp_hw_cc_cmem_write(hal_soc_handle_t hal_soc_hdl,
  *
  * Return: 0 in case of success, else error value
  */
-static inline QDF_STATUS dp_hw_cc_cmem_addr_init(
-				struct dp_soc *soc,
-				struct dp_hw_cookie_conversion_t *cc_ctx)
+static inline QDF_STATUS dp_hw_cc_cmem_addr_init(struct dp_soc *soc)
 {
+	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
+
 	dp_info("cmem base 0x%llx, size 0x%llx",
 		soc->cmem_base, soc->cmem_size);
 	/* get CMEM for cookie conversion */
@@ -187,8 +187,8 @@ static inline QDF_STATUS dp_hw_cc_cmem_addr_init(
 		dp_err("cmem_size %llu bytes < 4K", soc->cmem_size);
 		return QDF_STATUS_E_RESOURCES;
 	}
-	cc_ctx->cmem_base = (uint32_t)(soc->cmem_base +
-					DP_CC_MEM_OFFSET_IN_CMEM);
+	be_soc->cc_cmem_base = (uint32_t)(soc->cmem_base +
+					  DP_CC_MEM_OFFSET_IN_CMEM);
 
 	return QDF_STATUS_SUCCESS;
 }
@@ -196,7 +196,6 @@ static inline QDF_STATUS dp_hw_cc_cmem_addr_init(
 #else
 
 static inline void dp_cc_reg_cfg_init(struct dp_soc *soc,
-				      struct dp_hw_cookie_conversion_t *cc_ctx,
 				      bool is_4k_align) {}
 
 static inline void dp_hw_cc_cmem_write(hal_soc_handle_t hal_soc_hdl,
@@ -204,31 +203,26 @@ static inline void dp_hw_cc_cmem_write(hal_soc_handle_t hal_soc_hdl,
 				       uint32_t value)
 { }
 
-static inline QDF_STATUS dp_hw_cc_cmem_addr_init(
-				struct dp_soc *soc,
-				struct dp_hw_cookie_conversion_t *cc_ctx)
+static inline QDF_STATUS dp_hw_cc_cmem_addr_init(struct dp_soc *soc)
 {
 	return QDF_STATUS_SUCCESS;
 }
 #endif
 
-static QDF_STATUS dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc)
+QDF_STATUS
+dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc,
+			       struct dp_hw_cookie_conversion_t *cc_ctx,
+			       uint32_t num_descs,
+			       enum dp_desc_type desc_type,
+			       uint8_t desc_pool_id)
 {
 	struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc);
-	struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
-	uint32_t max_tx_rx_desc_num, num_spt_pages, i = 0;
+	uint32_t num_spt_pages, i = 0;
 	struct dp_spt_page_desc *spt_desc;
 	struct qdf_mem_dma_page_t *dma_page;
-	QDF_STATUS qdf_status;
-
-	qdf_status = dp_hw_cc_cmem_addr_init(soc, cc_ctx);
-	if (!QDF_IS_STATUS_SUCCESS(qdf_status))
-		return qdf_status;
 
 	/* estimate how many SPT DDR pages needed */
-	max_tx_rx_desc_num = WLAN_CFG_NUM_TX_DESC_MAX * MAX_TXDESC_POOLS +
-			WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX * MAX_RXDESC_POOLS;
-	num_spt_pages = max_tx_rx_desc_num / DP_CC_SPT_PAGE_MAX_ENTRIES;
+	num_spt_pages = num_descs / DP_CC_SPT_PAGE_MAX_ENTRIES;
 	num_spt_pages = num_spt_pages <= DP_CC_PPT_MAX_ENTRIES ?
 					num_spt_pages : DP_CC_PPT_MAX_ENTRIES;
 	dp_info("num_spt_pages needed %d", num_spt_pages);
@@ -247,6 +241,9 @@ static QDF_STATUS dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc)
 		goto fail_0;
 	}
 
+	cc_ctx->cmem_offset = dp_desc_pool_get_cmem_base(0, desc_pool_id,
+							 desc_type);
+
 	/* initial page desc */
 	spt_desc = cc_ctx->page_desc_base;
 	dma_page = cc_ctx->page_pool.dma_pages;
@@ -278,10 +275,11 @@ fail_0:
 	return QDF_STATUS_E_FAILURE;
 }
 
-static QDF_STATUS dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc)
+QDF_STATUS
+dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc,
+			       struct dp_hw_cookie_conversion_t *cc_ctx)
 {
 	struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc);
-	struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
 
 	qdf_mem_free(cc_ctx->page_desc_base);
 	dp_desc_multi_pages_mem_free(soc, DP_HW_CC_SPT_PAGE_TYPE,
@@ -291,160 +289,193 @@ static QDF_STATUS dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc)
 	return QDF_STATUS_SUCCESS;
 }
 
-static QDF_STATUS dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc)
+QDF_STATUS
+dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc,
+			     struct dp_hw_cookie_conversion_t *cc_ctx)
 {
 	struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc);
-	struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
 	uint32_t i = 0;
 	struct dp_spt_page_desc *spt_desc;
+	uint32_t ppt_index;
+	uint32_t ppt_id_start;
 
 	if (!cc_ctx->total_page_num) {
 		dp_err("total page num is 0");
 		return QDF_STATUS_E_INVAL;
 	}
 
+	ppt_id_start = DP_CMEM_OFFSET_TO_PPT_ID(cc_ctx->cmem_offset);
 	spt_desc = cc_ctx->page_desc_base;
 	while (i < cc_ctx->total_page_num) {
 		/* write page PA to CMEM */
 		dp_hw_cc_cmem_write(soc->hal_soc,
-				    (cc_ctx->cmem_base +
-				     i * DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED),
+				    (cc_ctx->cmem_offset + be_soc->cc_cmem_base
+				     + (i * DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)),
 				    (spt_desc[i].page_p_addr >>
 				     DP_CC_PPT_ENTRY_HW_APEND_BITS_4K_ALIGNED));
 
-		spt_desc[i].ppt_index = i;
-		spt_desc[i].avail_entry_index = 0;
-		/* link page desc */
-		if ((i + 1) != cc_ctx->total_page_num)
-			spt_desc[i].next = &spt_desc[i + 1];
-		else
-			spt_desc[i].next = NULL;
+		ppt_index = ppt_id_start + i;
+		spt_desc[i].ppt_index = ppt_index;
+
+		be_soc->page_desc_base[ppt_index].page_v_addr =
+				spt_desc[i].page_v_addr;
 		i++;
 	}
-
-	cc_ctx->page_desc_freelist = cc_ctx->page_desc_base;
-	cc_ctx->free_page_num = cc_ctx->total_page_num;
-
-	/* write WBM/REO cookie conversion CFG register */
-	dp_cc_reg_cfg_init(soc, cc_ctx, true);
-
 	return QDF_STATUS_SUCCESS;
 }
 
-static QDF_STATUS dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc)
+QDF_STATUS
+dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
+			       struct dp_hw_cookie_conversion_t *cc_ctx)
 {
-	struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
+	struct dp_soc *soc = DP_SOC_BE_GET_SOC(be_soc);
+	uint32_t ppt_index;
+	struct dp_spt_page_desc *spt_desc;
+	int i = 0;
 
-	cc_ctx->page_desc_freelist = NULL;
-	cc_ctx->free_page_num = 0;
+	spt_desc = cc_ctx->page_desc_base;
+	while (i < cc_ctx->total_page_num) {
+		/* reset PA in CMEM to NULL */
+		dp_hw_cc_cmem_write(soc->hal_soc,
+				    (cc_ctx->cmem_offset + be_soc->cc_cmem_base
+				     + (i * DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)),
+				    0);
 
+		ppt_index = spt_desc[i].ppt_index;
+		be_soc->page_desc_base[ppt_index].page_v_addr = NULL;
+		i++;
+	}
 	return QDF_STATUS_SUCCESS;
 }
 
-uint16_t dp_cc_spt_page_desc_alloc(struct dp_soc_be *be_soc,
-				   struct dp_spt_page_desc **list_head,
-				   struct dp_spt_page_desc **list_tail,
-				   uint16_t num_desc)
+static QDF_STATUS dp_soc_detach_be(struct dp_soc *soc)
 {
-	uint16_t num_pages, count;
-	struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
-
-	num_pages = (num_desc / DP_CC_SPT_PAGE_MAX_ENTRIES) +
-			(num_desc % DP_CC_SPT_PAGE_MAX_ENTRIES ? 1 : 0);
-
-	if (num_pages > cc_ctx->free_page_num) {
-		dp_err("fail: num_pages required %d > free_page_num %d",
-		       num_pages,
-		       cc_ctx->free_page_num);
-		return 0;
-	}
-
-	qdf_spin_lock_bh(&cc_ctx->cc_lock);
-
-	*list_head = *list_tail = cc_ctx->page_desc_freelist;
-	for (count = 0; count < num_pages; count++) {
-		if (qdf_unlikely(!cc_ctx->page_desc_freelist)) {
-			cc_ctx->page_desc_freelist = *list_head;
-			*list_head = *list_tail = NULL;
-			qdf_spin_unlock_bh(&cc_ctx->cc_lock);
-			return 0;
-		}
-		*list_tail = cc_ctx->page_desc_freelist;
-		cc_ctx->page_desc_freelist = cc_ctx->page_desc_freelist->next;
-	}
-	(*list_tail)->next = NULL;
-	cc_ctx->free_page_num -= count;
-
-	qdf_spin_unlock_bh(&cc_ctx->cc_lock);
+	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
+	int i = 0;
 
-	return count;
-}
+	dp_tx_deinit_bank_profiles(be_soc);
 
-void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
-			      struct dp_spt_page_desc **list_head,
-			      struct dp_spt_page_desc **list_tail,
-			      uint16_t page_nums)
-{
-	struct dp_hw_cookie_conversion_t *cc_ctx = &be_soc->hw_cc_ctx;
-	struct dp_spt_page_desc *temp_list = NULL;
+	for (i = 0; i < MAX_TXDESC_POOLS; i++)
+		dp_hw_cookie_conversion_detach(be_soc,
+					       &be_soc->tx_cc_ctx[i]);
 
-	qdf_spin_lock_bh(&cc_ctx->cc_lock);
+	for (i = 0; i < MAX_RXDESC_POOLS; i++)
+		dp_hw_cookie_conversion_detach(be_soc,
+					       &be_soc->rx_cc_ctx[i]);
 
-	temp_list = cc_ctx->page_desc_freelist;
-	cc_ctx->page_desc_freelist = *list_head;
-	(*list_tail)->next = temp_list;
-	cc_ctx->free_page_num += page_nums;
-	*list_tail = NULL;
-	*list_head = NULL;
+	qdf_mem_free(be_soc->page_desc_base);
+	be_soc->page_desc_base = NULL;
 
-	qdf_spin_unlock_bh(&cc_ctx->cc_lock);
+	return QDF_STATUS_SUCCESS;
 }
 
 static QDF_STATUS dp_soc_attach_be(struct dp_soc *soc)
 {
 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
+	uint32_t max_tx_rx_desc_num, num_spt_pages;
+	uint32_t num_entries;
+	int i = 0;
+
+	max_tx_rx_desc_num = WLAN_CFG_NUM_TX_DESC_MAX * MAX_TXDESC_POOLS +
+		WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX * MAX_RXDESC_POOLS;
+	/* estimate how many SPT DDR pages needed */
+	num_spt_pages = max_tx_rx_desc_num / DP_CC_SPT_PAGE_MAX_ENTRIES;
+	num_spt_pages = num_spt_pages <= DP_CC_PPT_MAX_ENTRIES ?
+					num_spt_pages : DP_CC_PPT_MAX_ENTRIES;
+
+	be_soc->page_desc_base = qdf_mem_malloc(
+		DP_CC_PPT_MAX_ENTRIES * sizeof(struct dp_spt_page_desc));
+	if (!be_soc->page_desc_base) {
+		dp_err("spt page descs allocation failed");
+		return QDF_STATUS_E_NOMEM;
+	}
 
 	soc->wbm_sw0_bm_id = hal_tx_get_wbm_sw0_bm_id();
 	qdf_status = dp_tx_init_bank_profiles(be_soc);
 
-	/* cookie conversion */
-	qdf_status = dp_hw_cookie_conversion_attach(be_soc);
+	qdf_status = dp_hw_cc_cmem_addr_init(soc);
+	if (!QDF_IS_STATUS_SUCCESS(qdf_status))
+		goto fail;
+
+	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
+		num_entries = wlan_cfg_get_num_tx_desc(soc->wlan_cfg_ctx);
+		qdf_status =
+			dp_hw_cookie_conversion_attach(be_soc,
+						       &be_soc->tx_cc_ctx[i],
+						       num_entries,
+						       DP_TX_DESC_TYPE, i);
+		if (!QDF_IS_STATUS_SUCCESS(qdf_status))
+			goto fail;
+	}
+
+	for (i = 0; i < MAX_RXDESC_POOLS; i++) {
+		num_entries =
+			wlan_cfg_get_dp_soc_rx_sw_desc_num(soc->wlan_cfg_ctx);
+		qdf_status =
+			dp_hw_cookie_conversion_attach(be_soc,
+						       &be_soc->rx_cc_ctx[i],
+						       num_entries,
+						       DP_RX_DESC_BUF_TYPE, i);
+		if (!QDF_IS_STATUS_SUCCESS(qdf_status))
+			goto fail;
+	}
 
+	return qdf_status;
+fail:
+	dp_soc_detach_be(soc);
 	return qdf_status;
 }
 
-static QDF_STATUS dp_soc_detach_be(struct dp_soc *soc)
+static QDF_STATUS dp_soc_deinit_be(struct dp_soc *soc)
 {
 	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
+	int i = 0;
 
-	dp_tx_deinit_bank_profiles(be_soc);
+	for (i = 0; i < MAX_TXDESC_POOLS; i++)
+		dp_hw_cookie_conversion_deinit(be_soc,
+					       &be_soc->tx_cc_ctx[i]);
 
-	dp_hw_cookie_conversion_detach(be_soc);
+	for (i = 0; i < MAX_RXDESC_POOLS; i++)
+		dp_hw_cookie_conversion_deinit(be_soc,
+					       &be_soc->rx_cc_ctx[i]);
 
 	return QDF_STATUS_SUCCESS;
 }
 
 static QDF_STATUS dp_soc_init_be(struct dp_soc *soc)
 {
-	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
 	QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
+	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
+	int i = 0;
 
-	qdf_status = dp_hw_cookie_conversion_init(be_soc);
+	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
+		qdf_status =
+			dp_hw_cookie_conversion_init(be_soc,
+						     &be_soc->tx_cc_ctx[i]);
+		if (!QDF_IS_STATUS_SUCCESS(qdf_status))
+			goto fail;
+	}
+
+	for (i = 0; i < MAX_RXDESC_POOLS; i++) {
+		qdf_status =
+			dp_hw_cookie_conversion_init(be_soc,
+						     &be_soc->rx_cc_ctx[i]);
+		if (!QDF_IS_STATUS_SUCCESS(qdf_status))
+			goto fail;
+	}
 
 	/* route vdev_id mismatch notification via FW completion */
 	hal_tx_vdev_mismatch_routing_set(soc->hal_soc,
 					 HAL_TX_VDEV_MISMATCH_FW_NOTIFY);
-	return qdf_status;
-}
 
-static QDF_STATUS dp_soc_deinit_be(struct dp_soc *soc)
-{
-	struct dp_soc_be *be_soc = dp_get_be_soc_from_dp_soc(soc);
-
-	dp_hw_cookie_conversion_deinit(be_soc);
+	/* write WBM/REO cookie conversion CFG register */
+	dp_cc_reg_cfg_init(soc, true);
 
-	return QDF_STATUS_SUCCESS;
+	return qdf_status;
+fail:
+	dp_soc_deinit_be(soc);
+	return qdf_status;
 }
 
 static QDF_STATUS dp_pdev_attach_be(struct dp_pdev *pdev)

+ 70 - 18
dp/wifi3.0/be/dp_be.h

@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -24,6 +25,9 @@
 /* maximum number of entries in one page of secondary page table */
 #define DP_CC_SPT_PAGE_MAX_ENTRIES 512
 
+/* maximum number of entries in one page of secondary page table */
+#define DP_CC_SPT_PAGE_MAX_ENTRIES_MASK (DP_CC_SPT_PAGE_MAX_ENTRIES - 1)
+
 /* maximum number of entries in primary page table */
 #define DP_CC_PPT_MAX_ENTRIES 1024
 
@@ -76,6 +80,27 @@
 /* WBM2SW ring id for rx release */
 #define WBM2SW_REL_ERR_RING_NUM 5
 #endif
+
+/* tx descriptor are programmed at start of CMEM region*/
+#define DP_TX_DESC_CMEM_OFFSET	0
+
+/* size of CMEM needed for a tx desc pool*/
+#define DP_TX_DESC_POOL_CMEM_SIZE \
+	((WLAN_CFG_NUM_TX_DESC_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
+	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
+
+/* Offset of rx descripotor pool */
+#define DP_RX_DESC_CMEM_OFFSET \
+	DP_TX_DESC_CMEM_OFFSET + (MAX_TXDESC_POOLS * DP_TX_DESC_POOL_CMEM_SIZE)
+
+/* size of CMEM needed for a rx desc pool */
+#define DP_RX_DESC_POOL_CMEM_SIZE \
+	((WLAN_CFG_RX_SW_DESC_NUM_SIZE_MAX / DP_CC_SPT_PAGE_MAX_ENTRIES) * \
+	 DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
+
+/* get ppt_id from CMEM_OFFSET */
+#define DP_CMEM_OFFSET_TO_PPT_ID(offset) \
+	((offset) / DP_CC_PPT_ENTRY_SIZE_4K_ALIGNED)
 /**
  * struct dp_spt_page_desc - secondary page table page descriptors
  * @next: pointer to next linked SPT page Desc
@@ -86,28 +111,23 @@
  * @avail_entry_index: index for available entry that store TX/RX Desc VA
  */
 struct dp_spt_page_desc {
-	struct dp_spt_page_desc *next;
 	uint8_t *page_v_addr;
 	qdf_dma_addr_t page_p_addr;
-	uint16_t ppt_index;
-	uint16_t avail_entry_index;
+	uint32_t ppt_index;
 };
 
 /**
  * struct dp_hw_cookie_conversion_t - main context for HW cookie conversion
- * @cmem_base: CMEM base address for primary page table setup
+ * @cmem_offset: CMEM offset from base address for primary page table setup
  * @total_page_num: total DDR page allocated
- * @free_page_num: available DDR page number for TX/RX Desc ID initialization
  * @page_desc_freelist: available page Desc list
  * @page_desc_base: page Desc buffer base address.
  * @page_pool: DDR pages pool
  * @cc_lock: locks for page acquiring/free
  */
 struct dp_hw_cookie_conversion_t {
-	uint32_t cmem_base;
+	uint32_t cmem_offset;
 	uint32_t total_page_num;
-	uint32_t free_page_num;
-	struct dp_spt_page_desc *page_desc_freelist;
 	struct dp_spt_page_desc *page_desc_base;
 	struct qdf_mem_multi_page_t page_pool;
 	qdf_spinlock_t cc_lock;
@@ -148,9 +168,9 @@ struct dp_tx_bank_profile {
  * @soc: dp soc structure
  * @num_bank_profiles: num TX bank profiles
  * @bank_profiles: bank profiles for various TX banks
- * @hw_cc_ctx: core context of HW cookie conversion
- * @tx_spt_page_desc: spt page desc allocated for TX desc pool
- * @rx_spt_page_desc: spt page desc allocated for RX desc pool
+ * @cc_cmem_base: cmem offset reserved for CC
+ * @tx_cc_ctx: Cookie conversion context for tx desc pools
+ * @rx_cc_ctx: Cookie conversion context for rx desc pools
  * @monitor_soc_be: BE specific monitor object
  */
 struct dp_soc_be {
@@ -158,9 +178,10 @@ struct dp_soc_be {
 	uint8_t num_bank_profiles;
 	qdf_mutex_t tx_bank_lock;
 	struct dp_tx_bank_profile *bank_profiles;
-	struct dp_hw_cookie_conversion_t hw_cc_ctx;
-	struct dp_spt_page_desc_list tx_spt_page_desc[MAX_TXDESC_POOLS];
-	struct dp_spt_page_desc_list rx_spt_page_desc[MAX_RXDESC_POOLS];
+	struct dp_spt_page_desc *page_desc_base;
+	uint32_t cc_cmem_base;
+	struct dp_hw_cookie_conversion_t tx_cc_ctx[MAX_TXDESC_POOLS];
+	struct dp_hw_cookie_conversion_t rx_cc_ctx[MAX_RXDESC_POOLS];
 #ifdef WLAN_SUPPORT_PPEDS
 	struct dp_srng reo2ppe_ring;
 	struct dp_srng ppe2tcl_ring;
@@ -276,6 +297,22 @@ struct dp_peer_be *dp_get_be_peer_from_dp_peer(struct dp_peer *peer)
 	return (struct dp_peer_be *)peer;
 }
 
+QDF_STATUS
+dp_hw_cookie_conversion_attach(struct dp_soc_be *be_soc,
+			       struct dp_hw_cookie_conversion_t *cc_ctx,
+			       uint32_t num_descs,
+			       enum dp_desc_type desc_type,
+			       uint8_t desc_pool_id);
+
+QDF_STATUS
+dp_hw_cookie_conversion_detach(struct dp_soc_be *be_soc,
+			       struct dp_hw_cookie_conversion_t *cc_ctx);
+QDF_STATUS
+dp_hw_cookie_conversion_init(struct dp_soc_be *be_soc,
+			     struct dp_hw_cookie_conversion_t *cc_ctx);
+QDF_STATUS
+dp_hw_cookie_conversion_deinit(struct dp_soc_be *be_soc,
+			       struct dp_hw_cookie_conversion_t *cc_ctx);
 /**
  * dp_cc_spt_page_desc_alloc() - allocate SPT DDR page descriptor from pool
  * @be_soc: beryllium soc handler
@@ -311,7 +348,7 @@ void dp_cc_spt_page_desc_free(struct dp_soc_be *be_soc,
  *
  * Return: cookie ID
  */
-static inline uint32_t dp_cc_desc_id_generate(uint16_t ppt_index,
+static inline uint32_t dp_cc_desc_id_generate(uint32_t ppt_index,
 					      uint16_t spt_index)
 {
 	/*
@@ -337,12 +374,10 @@ static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
 					uint32_t desc_id)
 {
 	struct dp_soc_be *be_soc;
-	struct dp_hw_cookie_conversion_t *cc_ctx;
 	uint16_t ppt_page_id, spt_va_id;
 	uint8_t *spt_page_va;
 
 	be_soc = dp_get_be_soc_from_dp_soc(soc);
-	cc_ctx = &be_soc->hw_cc_ctx;
 	ppt_page_id = (desc_id & DP_CC_DESC_ID_PPT_PAGE_OS_MASK) >>
 			DP_CC_DESC_ID_PPT_PAGE_OS_SHIFT;
 
@@ -355,7 +390,7 @@ static inline uintptr_t dp_cc_desc_find(struct dp_soc *soc,
 	 * entry size in DDR page is 64 bits, for 32 bits system,
 	 * only lower 32 bits VA value is needed.
 	 */
-	spt_page_va = cc_ctx->page_desc_base[ppt_page_id].page_v_addr;
+	spt_page_va = be_soc->page_desc_base[ppt_page_id].page_v_addr;
 
 	return (*((uintptr_t *)(spt_page_va  +
 				spt_va_id * DP_CC_HW_READ_BYTES)));
@@ -471,4 +506,21 @@ _dp_srng_test_and_update_nf_params(struct dp_soc *soc,
 }
 #endif
 
+static inline
+uint32_t dp_desc_pool_get_cmem_base(uint8_t chip_id, uint8_t desc_pool_id,
+				    enum dp_desc_type desc_type)
+{
+	switch (desc_type) {
+	case DP_TX_DESC_TYPE:
+		return (DP_TX_DESC_CMEM_OFFSET +
+			(desc_pool_id * DP_TX_DESC_POOL_CMEM_SIZE));
+	case DP_RX_DESC_BUF_TYPE:
+		return (DP_RX_DESC_CMEM_OFFSET +
+			((chip_id * MAX_RXDESC_POOLS) + desc_pool_id) *
+			DP_RX_DESC_POOL_CMEM_SIZE);
+	default:
+			QDF_BUG(0);
+	}
+	return 0;
+}
 #endif

+ 57 - 69
dp/wifi3.0/be/dp_be_rx.c

@@ -866,45 +866,47 @@ dp_rx_desc_pool_init_be_cc(struct dp_soc *soc,
 			   struct rx_desc_pool *rx_desc_pool,
 			   uint32_t pool_id)
 {
+	struct dp_hw_cookie_conversion_t *cc_ctx;
 	struct dp_soc_be *be_soc;
 	union dp_rx_desc_list_elem_t *rx_desc_elem;
 	struct dp_spt_page_desc *page_desc;
-	struct dp_spt_page_desc_list *page_desc_list;
+	uint32_t ppt_idx = 0;
+	uint32_t avail_entry_index = 0;
 
-	be_soc = dp_get_be_soc_from_dp_soc(soc);
-	page_desc_list = &be_soc->rx_spt_page_desc[pool_id];
-
-	/* allocate SPT pages from page desc pool */
-	page_desc_list->num_spt_pages =
-		dp_cc_spt_page_desc_alloc(be_soc,
-					  &page_desc_list->spt_page_list_head,
-					  &page_desc_list->spt_page_list_tail,
-					  rx_desc_pool->pool_size);
-
-	if (!page_desc_list->num_spt_pages) {
-		dp_err("fail to allocate cookie conversion spt pages");
+	if (!rx_desc_pool->pool_size) {
+		dp_err("desc_num 0 !!");
 		return QDF_STATUS_E_FAILURE;
 	}
 
-	/* put each RX Desc VA to SPT pages and get corresponding ID */
-	page_desc = page_desc_list->spt_page_list_head;
+	be_soc = dp_get_be_soc_from_dp_soc(soc);
+	cc_ctx  = &be_soc->rx_cc_ctx[pool_id];
+
+	page_desc = &cc_ctx->page_desc_base[0];
 	rx_desc_elem = rx_desc_pool->freelist;
 	while (rx_desc_elem) {
+		if (avail_entry_index == 0) {
+			if (ppt_idx >= cc_ctx->total_page_num) {
+				dp_alert("insufficient secondary page tables");
+				qdf_assert_always(0);
+			}
+			/* put each RX Desc VA to SPT pages and
+			 * get corresponding ID
+			 */
+			page_desc = &cc_ctx->page_desc_base[ppt_idx++];
+		}
+
 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
-					 page_desc->avail_entry_index,
+					 avail_entry_index,
 					 &rx_desc_elem->rx_desc);
-
 		rx_desc_elem->rx_desc.cookie =
 			dp_cc_desc_id_generate(page_desc->ppt_index,
-					       page_desc->avail_entry_index);
+					       avail_entry_index);
 		rx_desc_elem->rx_desc.pool_id = pool_id;
 		rx_desc_elem->rx_desc.in_use = 0;
 		rx_desc_elem = rx_desc_elem->next;
 
-		page_desc->avail_entry_index++;
-		if (page_desc->avail_entry_index >=
-				DP_CC_SPT_PAGE_MAX_ENTRIES)
-			page_desc = page_desc->next;
+		avail_entry_index = (avail_entry_index + 1) &
+					DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
 	}
 
 	return QDF_STATUS_SUCCESS;
@@ -915,29 +917,22 @@ dp_rx_desc_pool_init_be_cc(struct dp_soc *soc,
 			   struct rx_desc_pool *rx_desc_pool,
 			   uint32_t pool_id)
 {
+	struct dp_hw_cookie_conversion_t *cc_ctx;
 	struct dp_soc_be *be_soc;
 	struct dp_spt_page_desc *page_desc;
-	struct dp_spt_page_desc_list *page_desc_list;
-	int i;
+	uint32_t ppt_idx = 0;
+	uint32_t avail_entry_index = 0;
+	int i = 0;
 
-	be_soc = dp_get_be_soc_from_dp_soc(soc);
-	page_desc_list = &be_soc->rx_spt_page_desc[pool_id];
-
-	/* allocate SPT pages from page desc pool */
-	page_desc_list->num_spt_pages =
-			dp_cc_spt_page_desc_alloc(
-					be_soc,
-					&page_desc_list->spt_page_list_head,
-					&page_desc_list->spt_page_list_tail,
-					rx_desc_pool->pool_size);
-
-	if (!page_desc_list->num_spt_pages) {
-		dp_err("fail to allocate cookie conversion spt pages");
+	if (!rx_desc_pool->pool_size) {
+		dp_err("desc_num 0 !!");
 		return QDF_STATUS_E_FAILURE;
 	}
 
-	/* put each RX Desc VA to SPT pages and get corresponding ID */
-	page_desc = page_desc_list->spt_page_list_head;
+	be_soc = dp_get_be_soc_from_dp_soc(soc);
+	cc_ctx  = &be_soc->rx_cc_ctx[pool_id];
+
+	page_desc = &cc_ctx->page_desc_base[0];
 	for (i = 0; i <= rx_desc_pool->pool_size - 1; i++) {
 		if (i == rx_desc_pool->pool_size - 1)
 			rx_desc_pool->array[i].next = NULL;
@@ -945,23 +940,29 @@ dp_rx_desc_pool_init_be_cc(struct dp_soc *soc,
 			rx_desc_pool->array[i].next =
 				&rx_desc_pool->array[i + 1];
 
+		if (avail_entry_index == 0) {
+			if (ppt_idx >= cc_ctx->total_page_num) {
+				dp_alert("insufficient secondary page tables");
+				qdf_assert_always(0);
+			}
+			/* put each RX Desc VA to SPT pages and
+			 * get corresponding ID
+			 */
+			page_desc = &cc_ctx->page_desc_base[ppt_idx++];
+		}
+
 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
-					 page_desc->avail_entry_index,
+					 avail_entry_index,
 					 &rx_desc_pool->array[i].rx_desc);
-
 		rx_desc_pool->array[i].rx_desc.cookie =
 			dp_cc_desc_id_generate(page_desc->ppt_index,
-					       page_desc->avail_entry_index);
-
+					       avail_entry_index);
 		rx_desc_pool->array[i].rx_desc.pool_id = pool_id;
 		rx_desc_pool->array[i].rx_desc.in_use = 0;
 
-		page_desc->avail_entry_index++;
-		if (page_desc->avail_entry_index >=
-				DP_CC_SPT_PAGE_MAX_ENTRIES)
-			page_desc = page_desc->next;
+		avail_entry_index = (avail_entry_index + 1) &
+					DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
 	}
-
 	return QDF_STATUS_SUCCESS;
 }
 #endif
@@ -971,32 +972,18 @@ dp_rx_desc_pool_deinit_be_cc(struct dp_soc *soc,
 			     struct rx_desc_pool *rx_desc_pool,
 			     uint32_t pool_id)
 {
-	struct dp_soc_be *be_soc;
 	struct dp_spt_page_desc *page_desc;
-	struct dp_spt_page_desc_list *page_desc_list;
+	struct dp_soc_be *be_soc;
+	int i = 0;
+	struct dp_hw_cookie_conversion_t *cc_ctx;
 
 	be_soc = dp_get_be_soc_from_dp_soc(soc);
-	page_desc_list = &be_soc->rx_spt_page_desc[pool_id];
-
-	if (!page_desc_list->num_spt_pages) {
-		dp_warn("page_desc_list is empty for pool_id %d", pool_id);
-		return;
-	}
+	cc_ctx  = &be_soc->rx_cc_ctx[pool_id];
 
-	/* cleanup for each page */
-	page_desc = page_desc_list->spt_page_list_head;
-	while (page_desc) {
-		page_desc->avail_entry_index = 0;
+	for (i = 0; i < cc_ctx->total_page_num; i++) {
+		page_desc = &cc_ctx->page_desc_base[i];
 		qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
-		page_desc = page_desc->next;
 	}
-
-	/* free pages desc back to pool */
-	dp_cc_spt_page_desc_free(be_soc,
-				 &page_desc_list->spt_page_list_head,
-				 &page_desc_list->spt_page_list_tail,
-				 page_desc_list->num_spt_pages);
-	page_desc_list->num_spt_pages = 0;
 }
 
 QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc,
@@ -1013,7 +1000,8 @@ QDF_STATUS dp_rx_desc_pool_init_be(struct dp_soc *soc,
 						    pool_id);
 	} else {
 		dp_info("non_rx_desc_buf_pool init");
-		status = dp_rx_desc_pool_init_generic(soc, rx_desc_pool, pool_id);
+		status = dp_rx_desc_pool_init_generic(soc, rx_desc_pool,
+						      pool_id);
 	}
 
 	return status;

+ 25 - 43
dp/wifi3.0/be/dp_be_tx.c

@@ -453,10 +453,12 @@ QDF_STATUS dp_tx_desc_pool_init_be(struct dp_soc *soc,
 				   uint8_t pool_id)
 {
 	struct dp_tx_desc_pool_s *tx_desc_pool;
+	struct dp_hw_cookie_conversion_t *cc_ctx;
 	struct dp_soc_be *be_soc;
 	struct dp_spt_page_desc *page_desc;
-	struct dp_spt_page_desc_list *page_desc_list;
 	struct dp_tx_desc_s *tx_desc;
+	uint32_t ppt_idx = 0;
+	uint32_t avail_entry_index = 0;
 
 	if (!num_elem) {
 		dp_err("desc_num 0 !!");
@@ -465,37 +467,31 @@ QDF_STATUS dp_tx_desc_pool_init_be(struct dp_soc *soc,
 
 	be_soc = dp_get_be_soc_from_dp_soc(soc);
 	tx_desc_pool = &soc->tx_desc[pool_id];
-	page_desc_list = &be_soc->tx_spt_page_desc[pool_id];
+	cc_ctx  = &be_soc->tx_cc_ctx[pool_id];
 
-	/* allocate SPT pages from page desc pool */
-	page_desc_list->num_spt_pages =
-		dp_cc_spt_page_desc_alloc(be_soc,
-					  &page_desc_list->spt_page_list_head,
-					  &page_desc_list->spt_page_list_tail,
-					  num_elem);
-
-	if (!page_desc_list->num_spt_pages) {
-		dp_err("fail to allocate cookie conversion spt pages");
-		return QDF_STATUS_E_FAILURE;
-	}
-
-	/* put each TX Desc VA to SPT pages and get corresponding ID */
-	page_desc = page_desc_list->spt_page_list_head;
 	tx_desc = tx_desc_pool->freelist;
 	while (tx_desc) {
+		if (avail_entry_index == 0) {
+			if (ppt_idx >= cc_ctx->total_page_num) {
+				dp_alert("insufficient secondary page tables");
+				qdf_assert_always(0);
+			}
+			/* put each TX Desc VA to SPT pages and
+			 * get corresponding ID
+			 */
+			page_desc = &cc_ctx->page_desc_base[ppt_idx++];
+		}
 		DP_CC_SPT_PAGE_UPDATE_VA(page_desc->page_v_addr,
-					 page_desc->avail_entry_index,
+					 avail_entry_index,
 					 tx_desc);
 		tx_desc->id =
 			dp_cc_desc_id_generate(page_desc->ppt_index,
-					       page_desc->avail_entry_index);
+					       avail_entry_index);
 		tx_desc->pool_id = pool_id;
-		tx_desc = tx_desc->next;
 
-		page_desc->avail_entry_index++;
-		if (page_desc->avail_entry_index >=
-				DP_CC_SPT_PAGE_MAX_ENTRIES)
-			page_desc = page_desc->next;
+		tx_desc = tx_desc->next;
+		avail_entry_index = (avail_entry_index + 1) &
+					DP_CC_SPT_PAGE_MAX_ENTRIES_MASK;
 	}
 
 	return QDF_STATUS_SUCCESS;
@@ -505,32 +501,18 @@ void dp_tx_desc_pool_deinit_be(struct dp_soc *soc,
 			       struct dp_tx_desc_pool_s *tx_desc_pool,
 			       uint8_t pool_id)
 {
-	struct dp_soc_be *be_soc;
 	struct dp_spt_page_desc *page_desc;
-	struct dp_spt_page_desc_list *page_desc_list;
+	struct dp_soc_be *be_soc;
+	int i = 0;
+	struct dp_hw_cookie_conversion_t *cc_ctx;
 
 	be_soc = dp_get_be_soc_from_dp_soc(soc);
-	page_desc_list = &be_soc->tx_spt_page_desc[pool_id];
+	cc_ctx  = &be_soc->tx_cc_ctx[pool_id];
 
-	if (!page_desc_list->num_spt_pages) {
-		dp_warn("page_desc_list is empty for pool_id %d", pool_id);
-		return;
-	}
-
-	/* cleanup for each page */
-	page_desc = page_desc_list->spt_page_list_head;
-	while (page_desc) {
-		page_desc->avail_entry_index = 0;
+	for (i = 0; i < cc_ctx->total_page_num; i++) {
+		page_desc = &cc_ctx->page_desc_base[i];
 		qdf_mem_zero(page_desc->page_v_addr, qdf_page_size);
-		page_desc = page_desc->next;
 	}
-
-	/* free pages desc back to pool */
-	dp_cc_spt_page_desc_free(be_soc,
-				 &page_desc_list->spt_page_list_head,
-				 &page_desc_list->spt_page_list_tail,
-				 page_desc_list->num_spt_pages);
-	page_desc_list->num_spt_pages = 0;
 }
 
 #ifdef WLAN_FEATURE_NEAR_FULL_IRQ