Browse Source

qcacld-3.0: Do ipa tx buffer map after ipa register callback invoked

Do ipa tx buffer map after registering IPA ready
callback with IPA driver. Otherwise ipa_is_ready
flag always is false while calling cds_smmu_map_unmap
from htt_tx_ipa_uc_wdi_tx_buf_alloc, and then
it will not really make tx buffer map take effect

Change-Id: Ib2fee8e5b68d5ba06c8079d39c0a5695087cbc2b
Chaoli Zhou 4 years ago
parent
commit
781829f879
4 changed files with 70 additions and 40 deletions
  1. 0 39
      core/dp/htt/htt_tx.c
  2. 2 1
      core/dp/txrx/ol_txrx.c
  3. 57 0
      core/dp/txrx/ol_txrx_ipa.c
  4. 11 0
      core/dp/txrx/ol_txrx_ipa.h

+ 0 - 39
core/dp/htt/htt_tx.c

@@ -1131,18 +1131,9 @@ static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
 	qdf_dma_addr_t buffer_paddr;
 	uint32_t *header_ptr;
 	target_paddr_t *ring_vaddr;
-	qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
 	qdf_shared_mem_t *shared_tx_buffer;
 
 	ring_vaddr = (target_paddr_t *)pdev->ipa_uc_tx_rsc.tx_comp_ring->vaddr;
-	if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
-		mem_map_table = qdf_mem_map_table_alloc(uc_tx_buf_cnt);
-		if (!mem_map_table) {
-			qdf_print("Failed to allocate memory");
-			return 0;
-		}
-		mem_info = mem_map_table;
-	}
 
 	/* Allocate TX buffers as many as possible */
 	for (tx_buffer_count = 0;
@@ -1197,19 +1188,9 @@ static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
 		/* Memory barrier to ensure actual value updated */
 
 		ring_vaddr++;
-		if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
-			*mem_info = pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[
-						tx_buffer_count]->mem_info;
-			 mem_info++;
-		}
 	}
 
 out:
-	if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
-		cds_smmu_map_unmap(true, tx_buffer_count,
-				   mem_map_table);
-		qdf_mem_free(mem_map_table);
-	}
 
 	return tx_buffer_count;
 }
@@ -1276,18 +1257,9 @@ static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
 	uint32_t *header_ptr;
 	uint32_t *ring_vaddr;
 	uint16_t idx;
-	qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
 	qdf_shared_mem_t *shared_tx_buffer;
 
 	ring_vaddr = pdev->ipa_uc_tx_rsc.tx_comp_ring->vaddr;
-	if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
-		mem_map_table = qdf_mem_map_table_alloc(uc_tx_buf_cnt);
-		if (!mem_map_table) {
-			qdf_print("Failed to allocate memory");
-			return 0;
-		}
-		mem_info = mem_map_table;
-	}
 
 	/* Allocate TX buffers as many as possible */
 	for (tx_buffer_count = 0;
@@ -1331,11 +1303,6 @@ static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
 		/* Memory barrier to ensure actual value updated */
 
 		ring_vaddr++;
-		if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
-			*mem_info = pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[
-						tx_buffer_count]->mem_info;
-			 mem_info++;
-		}
 	}
 
 pwr2:
@@ -1363,12 +1330,6 @@ pwr2:
 		}
 	}
 
-	if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
-		cds_smmu_map_unmap(true, tx_buffer_count_pwr2,
-				   mem_map_table);
-		qdf_mem_free(mem_map_table);
-	}
-
 	return tx_buffer_count_pwr2;
 }
 

+ 2 - 1
core/dp/txrx/ol_txrx.c

@@ -6183,8 +6183,9 @@ static struct cdp_ipa_ops ol_ops_ipa = {
 	.ipa_set_perf_level = ol_txrx_ipa_set_perf_level,
 #ifdef FEATURE_METERING
 	.ipa_uc_get_share_stats = ol_txrx_ipa_uc_get_share_stats,
-	.ipa_uc_set_quota = ol_txrx_ipa_uc_set_quota
+	.ipa_uc_set_quota = ol_txrx_ipa_uc_set_quota,
 #endif
+	.ipa_tx_buf_smmu_mapping = ol_txrx_ipa_tx_buf_smmu_mapping
 };
 #endif
 

+ 57 - 0
core/dp/txrx/ol_txrx_ipa.c

@@ -280,6 +280,63 @@ QDF_STATUS ol_txrx_ipa_disable_autonomy(struct cdp_soc_t *soc_hdl,
 	return QDF_STATUS_SUCCESS;
 }
 
+static QDF_STATUS __ol_txrx_ipa_tx_buf_smmu_mapping(
+	struct ol_txrx_soc_t  *soc,
+	struct ol_txrx_pdev_t *pdev,
+	bool create)
+{
+	uint32_t index;
+	QDF_STATUS ret = QDF_STATUS_SUCCESS;
+	struct htt_pdev_t *htt_pdev = pdev->htt_pdev;
+	uint32_t tx_buffer_cnt = htt_pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
+	qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
+
+	if (qdf_mem_smmu_s1_enabled(htt_pdev->osdev)) {
+		mem_map_table = qdf_mem_map_table_alloc(tx_buffer_cnt);
+		if (!mem_map_table) {
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				  "Failed to allocate memory");
+			return QDF_STATUS_E_FAILURE;
+		}
+		mem_info = mem_map_table;
+	}
+
+	for (index = 0; index < tx_buffer_cnt; index++) {
+		*mem_info = htt_pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[
+						index]->mem_info;
+		if (!mem_info)
+			continue;
+		ret = cds_smmu_map_unmap(true, 1, mem_info);
+		mem_info++;
+	}
+	if (qdf_mem_smmu_s1_enabled(htt_pdev->osdev))
+		qdf_mem_free(mem_map_table);
+
+	return ret;
+}
+
+QDF_STATUS ol_txrx_ipa_tx_buf_smmu_mapping(
+	struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
+{
+	QDF_STATUS ret;
+	struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
+	ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
+
+	if (!pdev) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "invalid instance");
+		return QDF_STATUS_E_FAILURE;
+	}
+
+	if (!qdf_mem_smmu_s1_enabled(pdev->htt_pdev->osdev)) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
+			  "SMMU S1 disabled");
+		return QDF_STATUS_SUCCESS;
+	}
+	ret = __ol_txrx_ipa_tx_buf_smmu_mapping(soc, pdev, true);
+	return ret;
+}
+
 #ifdef CONFIG_IPA_WDI_UNIFIED_API
 
 #ifndef QCA_LL_TX_FLOW_CONTROL_V2

+ 11 - 0
core/dp/txrx/ol_txrx_ipa.h

@@ -210,6 +210,17 @@ QDF_STATUS ol_txrx_ipa_enable_autonomy(struct cdp_soc_t *soc_hdl,
 QDF_STATUS ol_txrx_ipa_disable_autonomy(struct cdp_soc_t *soc_hdl,
 					uint8_t pdev_id);
 
+/**
+ * ol_txrx_ipa_tx_buf_smmu_mapping() Create SMMU mappings for IPA
+ * allocated TX buffers
+ * @soc_hdl - handle to the soc
+ * @pdev_id - pdev id number, to get the handle
+ *
+ * Return: QDF_STATUS
+ */
+QDF_STATUS ol_txrx_ipa_tx_buf_smmu_mapping(
+	struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
+
 #ifdef CONFIG_IPA_WDI_UNIFIED_API
 /**
  * ol_txrx_ipa_setup() - Setup and connect IPA pipes