浏览代码

qcacld-3.0: Map/Unmap IPA RX buffers at driver load/unload time

Map/Unmap IPA RX buffers at driver load/unload time when WLAN
SMMU is enabled.

Change-Id: Ibe2c5d234cc67a18979aed9af273d2340beb124a
CRs-Fixed: 2088439
Sravan Kumar Kairam 7 年之前
父节点
当前提交
4329c5fea8
共有 7 个文件被更改,包括 136 次插入83 次删除
  1. 8 0
      core/cds/src/cds_api.c
  2. 4 1
      core/dp/htt/htt.c
  3. 0 10
      core/dp/htt/htt_h2t.c
  4. 0 12
      core/dp/htt/htt_internal.h
  5. 69 45
      core/dp/htt/htt_rx.c
  6. 54 14
      core/dp/htt/htt_tx.c
  7. 1 1
      core/dp/htt/htt_types.h

+ 8 - 0
core/cds/src/cds_api.c

@@ -2862,10 +2862,18 @@ void cds_smmu_mem_map_setup(qdf_device_t osdev)
 		osdev->smmu_s1_enabled = true;
 }
 
+#ifdef IPA_OFFLOAD
 int cds_smmu_map_unmap(bool map, uint32_t num_buf, qdf_mem_info_t *buf_arr)
 {
 	return hdd_ipa_uc_smmu_map(map, num_buf, buf_arr);
 }
+#else
+int cds_smmu_map_unmap(bool map, uint32_t num_buf, qdf_mem_info_t *buf_arr)
+{
+	return 0;
+}
+#endif
+
 #else
 void cds_smmu_mem_map_setup(qdf_device_t osdev)
 {

+ 4 - 1
core/dp/htt/htt.c

@@ -465,6 +465,10 @@ htt_attach(struct htt_pdev_t *pdev, int desc_pool_size)
 	int i;
 	int ret = 0;
 
+	pdev->is_ipa_uc_enabled = false;
+	if (ol_cfg_ipa_uc_offload_enabled(pdev->ctrl_pdev))
+		pdev->is_ipa_uc_enabled = true;
+
 	ret = htt_tx_attach(pdev, desc_pool_size);
 	if (ret)
 		goto fail1;
@@ -843,7 +847,6 @@ int htt_ipa_uc_attach(struct htt_pdev_t *pdev)
 	QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, "%s: enter",
 		  __func__);
 
-	pdev->uc_map_reqd = 0;
 	/* TX resource attach */
 	error = htt_tx_ipa_uc_attach(
 		pdev,

+ 0 - 10
core/dp/htt/htt_h2t.c

@@ -1267,16 +1267,6 @@ int htt_h2t_ipa_uc_set_active(struct htt_pdev_t *pdev,
 	pkt->msdu_id = HTT_TX_COMPL_INV_MSDU_ID;
 	pkt->pdev_ctxt = NULL;  /* not used during send-done callback */
 
-	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && uc_active && !is_tx) {
-		if (htt_rx_ipa_uc_buf_pool_map(pdev)) {
-			qdf_print("%s: Unable to create mapping for IPA rx buffers\n",
-				  __func__);
-			htt_htc_pkt_free(pdev, pkt);
-			return -A_NO_MEMORY;
-		}
-		pdev->uc_map_reqd = 1;
-	}
-
 	/* reserve room for HTC header */
 	msg = qdf_nbuf_alloc(pdev->osdev,
 			     HTT_MSG_BUF_SIZE(HTT_WDI_IPA_OP_REQUEST_SZ),

+ 0 - 12
core/dp/htt/htt_internal.h

@@ -589,13 +589,6 @@ int htt_tx_ipa_uc_detach(struct htt_pdev_t *pdev);
 
 int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev);
 
-/**
- * htt_rx_ipa_uc_buf_pool_map() - create mappings for IPA rx buffers
- * @pdev: htt context
- *
- * Return: 0 success
- */
-int htt_rx_ipa_uc_buf_pool_map(struct htt_pdev_t *pdev);
 #else
 /**
  * htt_tx_ipa_uc_attach() - attach htt ipa uc tx resource
@@ -638,11 +631,6 @@ static inline int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
 	return 0;
 }
 
-static inline int htt_rx_ipa_uc_buf_pool_map(struct htt_pdev_t *pdev)
-{
-	return 0;
-}
-
 #endif /* IPA_OFFLOAD */
 
 /* Maximum Outstanding Bus Download */

+ 69 - 45
core/dp/htt/htt_rx.c

@@ -155,10 +155,23 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
 	struct htt_rx_hash_entry *hash_entry;
 	struct htt_rx_hash_bucket **hash_table;
 	struct htt_list_node *list_iter = NULL;
+	qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
+	uint32_t num_unmapped = 0;
 
 	if (NULL == pdev->rx_ring.hash_table)
 		return;
 
+	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled) {
+		mem_map_table = qdf_mem_map_table_alloc(
+					pdev->rx_ring.fill_level);
+		if (!mem_map_table) {
+			qdf_print("%s: Failed to allocate memory for mem map table\n",
+				  __func__);
+			return;
+		}
+		mem_info = mem_map_table;
+	}
+
 	qdf_spin_lock_bh(&(pdev->rx_ring.rx_hash_lock));
 	hash_table = pdev->rx_ring.hash_table;
 	pdev->rx_ring.hash_table = NULL;
@@ -173,6 +186,16 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
 							     pdev->rx_ring.
 							     listnode_offset);
 			if (hash_entry->netbuf) {
+				if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
+						pdev->is_ipa_uc_enabled) {
+					qdf_update_mem_map_table(pdev->osdev,
+						mem_info,
+						QDF_NBUF_CB_PADDR(
+							hash_entry->netbuf),
+						HTT_RX_BUF_SIZE);
+					mem_info++;
+					num_unmapped++;
+				}
 #ifdef DEBUG_DMA_DONE
 				qdf_nbuf_unmap(pdev->osdev, hash_entry->netbuf,
 					       QDF_DMA_BIDIRECTIONAL);
@@ -196,6 +219,12 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
 
 	qdf_spinlock_destroy(&(pdev->rx_ring.rx_hash_lock));
 
+	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled) {
+		if (num_unmapped)
+			cds_smmu_map_unmap(false, num_unmapped,
+					   mem_map_table);
+		qdf_mem_free(mem_map_table);
+	}
 }
 #endif
 
@@ -489,7 +518,7 @@ static int htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
 	int num_alloc = 0;
 
 	idx = *(pdev->rx_ring.alloc_idx.vaddr);
-	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->uc_map_reqd) {
+	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled) {
 		mem_map_table = qdf_mem_map_table_alloc(num);
 		if (!mem_map_table) {
 			qdf_print("%s: Failed to allocate memory for mem map table\n",
@@ -586,7 +615,8 @@ moretofill:
 			pdev->rx_ring.buf.netbufs_ring[idx] = rx_netbuf;
 		}
 
-		if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->uc_map_reqd) {
+		if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
+					pdev->is_ipa_uc_enabled) {
 			qdf_update_mem_map_table(pdev->osdev, mem_info,
 						 paddr, HTT_RX_BUF_SIZE);
 			mem_info++;
@@ -608,7 +638,7 @@ moretofill:
 	}
 
 free_mem_map_table:
-	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->uc_map_reqd) {
+	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled) {
 		cds_smmu_map_unmap(true, num_alloc, mem_map_table);
 		qdf_mem_free(mem_map_table);
 	}
@@ -746,8 +776,31 @@ void htt_rx_detach(struct htt_pdev_t *pdev)
 		htt_rx_hash_deinit(pdev);
 	} else {
 		int sw_rd_idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
-
+		qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
+		uint32_t num_unmapped = 0;
+
+		if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
+					pdev->is_ipa_uc_enabled) {
+			mem_map_table = qdf_mem_map_table_alloc(
+						pdev->rx_ring.fill_level);
+			if (!mem_map_table) {
+				qdf_print("%s: Failed to allocate memory for mem map table\n",
+					  __func__);
+				return;
+			}
+			mem_info = mem_map_table;
+		}
 		while (sw_rd_idx != *(pdev->rx_ring.alloc_idx.vaddr)) {
+			if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
+						pdev->is_ipa_uc_enabled) {
+				qdf_update_mem_map_table(pdev->osdev, mem_info,
+					QDF_NBUF_CB_PADDR(
+						pdev->rx_ring.buf.netbufs_ring[
+								sw_rd_idx]),
+					HTT_RX_BUF_SIZE);
+				mem_info++;
+				num_unmapped++;
+			}
 #ifdef DEBUG_DMA_DONE
 			qdf_nbuf_unmap(pdev->osdev,
 				       pdev->rx_ring.buf.
@@ -765,6 +818,14 @@ void htt_rx_detach(struct htt_pdev_t *pdev)
 			sw_rd_idx &= pdev->rx_ring.size_mask;
 		}
 		qdf_mem_free(pdev->rx_ring.buf.netbufs_ring);
+
+		if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
+					pdev->is_ipa_uc_enabled) {
+			if (num_unmapped)
+				cds_smmu_map_unmap(false, num_unmapped,
+						   mem_map_table);
+			qdf_mem_free(mem_map_table);
+		}
 	}
 
 	qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
@@ -2375,7 +2436,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
 	/* Get the total number of MSDUs */
 	msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1));
 	HTT_RX_CHECK_MSDU_COUNT(msdu_count);
-	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->uc_map_reqd) {
+	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled) {
 		mem_map_table = qdf_mem_map_table_alloc(msdu_count);
 		if (!mem_map_table) {
 			qdf_print("%s: Failed to allocate memory for mem map table\n",
@@ -2409,7 +2470,8 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
 	}
 
 	while (msdu_count > 0) {
-		if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->uc_map_reqd) {
+		if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
+					pdev->is_ipa_uc_enabled) {
 			qdf_update_mem_map_table(pdev->osdev, mem_info,
 						 QDF_NBUF_CB_PADDR(msdu),
 						 HTT_RX_BUF_SIZE);
@@ -2553,7 +2615,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
 	}
 
 free_mem_map_table:
-	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->uc_map_reqd) {
+	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled) {
 		if (num_unmapped)
 			cds_smmu_map_unmap(false, num_unmapped,
 					   mem_map_table);
@@ -3975,44 +4037,6 @@ int htt_rx_ipa_uc_detach(struct htt_pdev_t *pdev)
 	htt_rx_ipa_uc_free_wdi2_rsc(pdev);
 	return 0;
 }
-
-int htt_rx_ipa_uc_buf_pool_map(struct htt_pdev_t *pdev)
-{
-	struct htt_rx_hash_entry *hash_entry;
-	struct htt_list_node *list_iter = NULL;
-	qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
-	uint32_t num_alloc = 0;
-	uint32_t i;
-
-	mem_map_table = qdf_mem_map_table_alloc(HTT_RX_RING_SIZE_MAX);
-	if (!mem_map_table) {
-		qdf_print("%s: Failed to allocate memory for mem map table\n",
-			  __func__);
-		return 1;
-	}
-	mem_info = mem_map_table;
-	for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
-		list_iter = pdev->rx_ring.hash_table[i]->listhead.next;
-		while (list_iter != &pdev->rx_ring.hash_table[i]->listhead) {
-			hash_entry = (struct htt_rx_hash_entry *)(
-				(char *)list_iter -
-				pdev->rx_ring.listnode_offset);
-			if (hash_entry->netbuf) {
-				qdf_update_mem_map_table(pdev->osdev,
-					mem_info,
-					QDF_NBUF_CB_PADDR(hash_entry->netbuf),
-					HTT_RX_BUF_SIZE);
-				mem_info++;
-				num_alloc++;
-			}
-			list_iter = list_iter->next;
-		}
-	}
-	cds_smmu_map_unmap(true, num_alloc, mem_map_table);
-	qdf_mem_free(mem_map_table);
-
-	return 0;
-}
 #endif /* IPA_OFFLOAD */
 
 /**

+ 54 - 14
core/dp/htt/htt_tx.c

@@ -1142,9 +1142,8 @@ static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
 
 		ring_vaddr++;
 		if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
-			qdf_update_mem_map_table(pdev->osdev, mem_info,
-					shared_tx_buffer->mem_info.iova,
-					uc_tx_buf_sz);
+			*mem_info = pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[
+						tx_buffer_count]->mem_info;
 			 mem_info++;
 		}
 	}
@@ -1194,19 +1193,40 @@ pwr2:
 static void htt_tx_buf_pool_free(struct htt_pdev_t *pdev)
 {
 	uint16_t idx;
+	qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
+	uint32_t num_unmapped = 0;
+
+	if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
+		mem_map_table = qdf_mem_map_table_alloc(
+					pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt);
+		if (!mem_map_table) {
+			qdf_print("%s: Failed to allocate memory for mem map table\n",
+				  __func__);
+			return;
+		}
+		mem_info = mem_map_table;
+	}
 
 	for (idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
 		if (pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx]) {
-			if (qdf_mem_smmu_s1_enabled(pdev->osdev))
-				cds_smmu_map_unmap(false, 1,
-					&pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[
-						idx]->mem_info);
+			if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
+				*mem_info = pdev->ipa_uc_tx_rsc.
+					      tx_buf_pool_strg[idx]->mem_info;
+				mem_info++;
+				num_unmapped++;
+			}
 			qdf_mem_shared_mem_free(pdev->osdev,
 						pdev->ipa_uc_tx_rsc.
 							tx_buf_pool_strg[idx]);
 			pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx] = NULL;
 		}
 	}
+
+	if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
+		if (num_unmapped)
+			cds_smmu_map_unmap(false, num_unmapped, mem_map_table);
+		qdf_mem_free(mem_map_table);
+	}
 }
 #else
 static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
@@ -1277,9 +1297,8 @@ static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
 
 		ring_vaddr++;
 		if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
-			qdf_update_mem_map_table(pdev->osdev, mem_info,
-					shared_tx_buffer->mem_info.iova,
-					uc_tx_buf_sz);
+			*mem_info = pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[
+						tx_buffer_count]->mem_info;
 			 mem_info++;
 		}
 	}
@@ -1321,19 +1340,40 @@ pwr2:
 static void htt_tx_buf_pool_free(struct htt_pdev_t *pdev)
 {
 	uint16_t idx;
+	qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
+	uint32_t num_unmapped = 0;
+
+	if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
+		mem_map_table = qdf_mem_map_table_alloc(
+					pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt);
+		if (!mem_map_table) {
+			qdf_print("%s: Failed to allocate memory for mem map table\n",
+				  __func__);
+			return;
+		}
+		mem_info = mem_map_table;
+	}
 
 	for (idx = 0; idx < pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
 		if (pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx]) {
-			if (qdf_mem_smmu_s1_enabled(pdev->osdev))
-				cds_smmu_map_unmap(false, 1,
-					&pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[
-						idx]->mem_info);
+			if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
+				*mem_info = pdev->ipa_uc_tx_rsc.
+					      tx_buf_pool_strg[idx]->mem_info;
+				mem_info++;
+				num_unmapped++;
+			}
 			qdf_mem_shared_mem_free(pdev->osdev,
 						pdev->ipa_uc_tx_rsc.
 							tx_buf_pool_strg[idx]);
 			pdev->ipa_uc_tx_rsc.tx_buf_pool_strg[idx] = NULL;
 		}
 	}
+
+	if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
+		if (num_unmapped)
+			cds_smmu_map_unmap(false, num_unmapped, mem_map_table);
+		qdf_mem_free(mem_map_table);
+	}
 }
 #endif
 

+ 1 - 1
core/dp/htt/htt_types.h

@@ -412,7 +412,7 @@ struct htt_pdev_t {
 
 	struct htt_ipa_uc_tx_resource_t ipa_uc_tx_rsc;
 	struct htt_ipa_uc_rx_resource_t ipa_uc_rx_rsc;
-	int uc_map_reqd;
+	int is_ipa_uc_enabled;
 
 	struct htt_tx_credit_t htt_tx_credit;