瀏覽代碼

qcacld-3.0: Limit the IPA tx buffer for Genoa

In order to improve the Genoa DBS KPI, need to
increase the IPA tx complete ring size from
1024 to 2048. And the tx buffer count also will
increase from 1023(1024-1) to 2047(2048-1) according.
But in fact, as for wlan fw, it just only reserve
1500(1100 for 5G, 400 for 2.4G) tx desc for packets
from IPA, so some tx buffer will not used here,
which will waste memory. So to save some memory,
export CONFIG_LIMIT_IPA_TX_BUFFER for platform
configuration to limit the max IPA tx buffer count

Change-Id: Id5d4c5a220b588747f3d1981627f6253d7c3305b
Chaoli Zhou 4 年之前
父節點
當前提交
075cb24e55
共有 2 個文件被更改,包括 48 次插入30 次删除
  1. 4 0
      Kbuild
  2. 44 30
      core/dp/htt/htt_tx.c

+ 4 - 0
Kbuild

@@ -3535,6 +3535,10 @@ ifdef CONFIG_BEACON_TX_OFFLOAD_MAX_VDEV
 ccflags-y += -DCFG_TGT_DEFAULT_BEACON_TX_OFFLOAD_MAX_VDEV=$(CONFIG_BEACON_TX_OFFLOAD_MAX_VDEV)
 endif
 
+ifdef CONFIG_LIMIT_IPA_TX_BUFFER
+ccflags-y += -DLIMIT_IPA_TX_BUFFER=$(CONFIG_LIMIT_IPA_TX_BUFFER)
+endif
+
 ifdef CONFIG_LOCK_STATS_ON
 ccflags-y += -DQDF_LOCK_STATS=1
 ccflags-y += -DQDF_LOCK_STATS_DESTROY_PRINT=0

+ 44 - 30
core/dp/htt/htt_tx.c

@@ -1076,6 +1076,40 @@ void htt_tx_desc_display(void *tx_desc)
 
 #ifdef IPA_OFFLOAD
 #ifdef QCA_WIFI_3_0
+
+#ifndef LIMIT_IPA_TX_BUFFER
+#define LIMIT_IPA_TX_BUFFER 2048
+#endif
+
+/**
+ * htt_tx_ipa_get_tx_buf_count() - Update WDI TX buffers count
+ * @uc_tx_buf_cnt: TX Buffer count
+ *
+ * Return: new uc tx buffer count
+ */
+static int htt_tx_ipa_get_limit_tx_buf_count(unsigned int uc_tx_buf_cnt)
+{
+	/* In order to improve the Genoa IPA DBS KPI, need to set
+	 * IpaUcTxBufCount=2048, so tx complete ring size=2048, and
+	 * total tx buffer count = 2047.
+	 * But in fact, wlan fw just only have 5G 1100 tx desc +
+	 * 2.4G 400 desc, it can cover about 1500 packets from
+	 * IPA side.
+	 * So the remaining 2047-1500 packet are not used,
+	 * in order to save some memory, so we can use
+	 * LIMIT_IPA_TX_BUFFER to limit the max tx buffer
+	 * count, which varied from platform.
+	 * And then the tx buffer count always equal to tx complete
+	 * ring size -1 is not mandatory now.
+	 * From the trying, it has the same KPI achievement while
+	 * set LIMIT_IPA_TX_BUFFER=1500 or 2048.
+	 */
+	if (uc_tx_buf_cnt > LIMIT_IPA_TX_BUFFER)
+		return LIMIT_IPA_TX_BUFFER;
+	else
+		return uc_tx_buf_cnt;
+}
+
 /**
  * htt_tx_ipa_uc_wdi_tx_buf_alloc() - Alloc WDI TX buffers
  * @pdev: htt context
@@ -1094,11 +1128,9 @@ static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
 					  unsigned int uc_tx_partition_base)
 {
 	unsigned int tx_buffer_count;
-	unsigned int  tx_buffer_count_pwr2;
 	qdf_dma_addr_t buffer_paddr;
 	uint32_t *header_ptr;
 	target_paddr_t *ring_vaddr;
-	uint16_t idx;
 	qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
 	qdf_shared_mem_t *shared_tx_buffer;
 
@@ -1121,7 +1153,7 @@ static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
 		if (!shared_tx_buffer || !shared_tx_buffer->vaddr) {
 			qdf_print("IPA WDI TX buffer alloc fail %d allocated\n",
 				tx_buffer_count);
-			goto pwr2;
+			goto out;
 		}
 
 		header_ptr = shared_tx_buffer->vaddr;
@@ -1172,38 +1204,14 @@ static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
 		}
 	}
 
-pwr2:
-	/*
-	 * Tx complete ring buffer count should be power of 2.
-	 * So, allocated Tx buffer count should be one less than ring buffer
-	 * size.
-	 */
-	tx_buffer_count_pwr2 = qdf_rounddown_pow_of_two(tx_buffer_count + 1)
-			       - 1;
-	if (tx_buffer_count > tx_buffer_count_pwr2) {
-		QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
-			  "%s: Allocated Tx buffer count %d is rounded down to %d",
-			  __func__, tx_buffer_count, tx_buffer_count_pwr2);
-
-		/* Free over allocated buffers below power of 2 */
-		for (idx = tx_buffer_count_pwr2; idx < tx_buffer_count; idx++) {
-			if (pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx]) {
-				qdf_mem_shared_mem_free(pdev->osdev,
-					pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[
-								idx]);
-				 pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx] =
-									NULL;
-			}
-		}
-	}
-
+out:
 	if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
-		cds_smmu_map_unmap(true, tx_buffer_count_pwr2,
+		cds_smmu_map_unmap(true, tx_buffer_count,
 				   mem_map_table);
 		qdf_mem_free(mem_map_table);
 	}
 
-	return tx_buffer_count_pwr2;
+	return tx_buffer_count;
 }
 
 /**
@@ -1252,6 +1260,11 @@ static void htt_tx_buf_pool_free(struct htt_pdev_t *pdev)
 	}
 }
 #else
+static int htt_tx_ipa_get_limit_tx_buf_count(unsigned int uc_tx_buf_cnt)
+{
+	return uc_tx_buf_cnt;
+}
+
 static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
 					  unsigned int uc_tx_buf_sz,
 					  unsigned int uc_tx_buf_cnt,
@@ -1436,6 +1449,7 @@ int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
 		goto free_tx_ce_idx;
 	}
 
+	uc_tx_buf_cnt = htt_tx_ipa_get_limit_tx_buf_count(uc_tx_buf_cnt);
 	/* Allocate TX BUF vAddress Storage */
 	pdev->ipa_uc_tx_rsc.tx_buf_pool_strg =
 		qdf_mem_malloc(uc_tx_buf_cnt *