Explorar el Código

qcacld-3.0: Dynamic SMMU map/unmap only when IPA enabled

IPA SMMU mapping for RX buffers is needed only when IPA offload
and IPA pipes are enabled. Currently in STA only case where IPA
is not enabled SMMU map/unmap is done for RX buffers. So enable
SMMU mapping only when IPA pipes are enabled.

Change-Id: I88db2cc8606bdf4586644a7ffccd0415f85c8241
CRs-Fixed: 2213795
Sravan Kumar Kairam hace 6 años
padre
commit
b5870bb811
Se han modificado 4 ficheros con 161 adiciones y 72 borrados
  1. 100 61
      core/dp/htt/htt_rx.c
  2. 1 0
      core/dp/htt/htt_types.h
  3. 8 0
      core/dp/ol/inc/ol_htt_rx_api.h
  4. 52 11
      core/dp/txrx/ol_txrx_ipa.c

+ 100 - 61
core/dp/htt/htt_rx.c

@@ -135,10 +135,15 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
 	struct htt_rx_hash_bucket **hash_table;
 	struct htt_list_node *list_iter = NULL;
 	qdf_mem_info_t mem_map_table = {0};
+	bool ipa_smmu = false;
 
 	if (NULL == pdev->rx_ring.hash_table)
 		return;
 
+	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled &&
+	    pdev->rx_ring.smmu_map)
+		ipa_smmu = true;
+
 	qdf_spin_lock_bh(&(pdev->rx_ring.rx_hash_lock));
 	hash_table = pdev->rx_ring.hash_table;
 	pdev->rx_ring.hash_table = NULL;
@@ -153,8 +158,7 @@ static void htt_rx_hash_deinit(struct htt_pdev_t *pdev)
 							     pdev->rx_ring.
 							     listnode_offset);
 			if (hash_entry->netbuf) {
-				if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
-						pdev->is_ipa_uc_enabled) {
+				if (ipa_smmu) {
 					qdf_update_mem_map_table(pdev->osdev,
 						&mem_map_table,
 						QDF_NBUF_CB_PADDR(
@@ -395,9 +399,14 @@ static int htt_rx_ring_fill_n(struct htt_pdev_t *pdev, int num)
 	int filled = 0;
 	int debt_served = 0;
 	qdf_mem_info_t mem_map_table = {0};
+	bool ipa_smmu = false;
 
 	idx = *(pdev->rx_ring.alloc_idx.vaddr);
 
+	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled &&
+	    pdev->rx_ring.smmu_map)
+		ipa_smmu = true;
+
 	if ((idx < 0) || (idx > pdev->rx_ring.size_mask) ||
 	    (num > pdev->rx_ring.size))  {
 		QDF_TRACE(QDF_MODULE_ID_HTT,
@@ -494,8 +503,7 @@ moretofill:
 			pdev->rx_ring.buf.netbufs_ring[idx] = rx_netbuf;
 		}
 
-		if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
-					pdev->is_ipa_uc_enabled) {
+		if (ipa_smmu) {
 			qdf_update_mem_map_table(pdev->osdev, &mem_map_table,
 						 paddr, HTT_RX_BUF_SIZE);
 			cds_smmu_map_unmap(true, 1, &mem_map_table);
@@ -633,10 +641,15 @@ static inline unsigned int htt_rx_in_order_ring_elems(struct htt_pdev_t *pdev)
 
 void htt_rx_detach(struct htt_pdev_t *pdev)
 {
+	bool ipa_smmu = false;
 	qdf_timer_stop(&pdev->rx_ring.refill_retry_timer);
 	qdf_timer_free(&pdev->rx_ring.refill_retry_timer);
 	htt_rx_dbg_rxbuf_deinit(pdev);
 
+	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled &&
+	    pdev->rx_ring.smmu_map)
+		ipa_smmu = true;
+
 	if (pdev->cfg.is_full_reorder_offload) {
 		qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
 					   sizeof(uint32_t),
@@ -649,30 +662,18 @@ void htt_rx_detach(struct htt_pdev_t *pdev)
 		htt_rx_hash_deinit(pdev);
 	} else {
 		int sw_rd_idx = pdev->rx_ring.sw_rd_idx.msdu_payld;
-		qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
-		uint32_t num_unmapped = 0;
-
-		if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
-					pdev->is_ipa_uc_enabled) {
-			mem_map_table = qdf_mem_map_table_alloc(
-						pdev->rx_ring.fill_level);
-			if (!mem_map_table) {
-				qdf_print("%s: Failed to allocate memory for mem map table\n",
-					  __func__);
-				return;
-			}
-			mem_info = mem_map_table;
-		}
+		qdf_mem_info_t mem_map_table = {0};
+
 		while (sw_rd_idx != *(pdev->rx_ring.alloc_idx.vaddr)) {
-			if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
-						pdev->is_ipa_uc_enabled) {
-				qdf_update_mem_map_table(pdev->osdev, mem_info,
+			if (ipa_smmu) {
+				qdf_update_mem_map_table(pdev->osdev,
+					&mem_map_table,
 					QDF_NBUF_CB_PADDR(
-						pdev->rx_ring.buf.netbufs_ring[
-								sw_rd_idx]),
+						pdev->rx_ring.buf.
+						netbufs_ring[sw_rd_idx]),
 					HTT_RX_BUF_SIZE);
-				mem_info++;
-				num_unmapped++;
+				cds_smmu_map_unmap(false, 1,
+						   &mem_map_table);
 			}
 #ifdef DEBUG_DMA_DONE
 			qdf_nbuf_unmap(pdev->osdev,
@@ -692,13 +693,6 @@ void htt_rx_detach(struct htt_pdev_t *pdev)
 		}
 		qdf_mem_free(pdev->rx_ring.buf.netbufs_ring);
 
-		if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
-					pdev->is_ipa_uc_enabled) {
-			if (num_unmapped)
-				cds_smmu_map_unmap(false, num_unmapped,
-						   mem_map_table);
-			qdf_mem_free(mem_map_table);
-		}
 	}
 
 	qdf_mem_free_consistent(pdev->osdev, pdev->osdev->dev,
@@ -1651,9 +1645,9 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
 	struct htt_host_rx_desc_base *rx_desc;
 	enum rx_pkt_fate status = RX_PKT_FATE_SUCCESS;
 	qdf_dma_addr_t paddr;
-	qdf_mem_info_t *mem_map_table = NULL, *mem_info = NULL;
-	uint32_t num_unmapped = 0;
+	qdf_mem_info_t mem_map_table = {0};
 	int ret = 1;
+	bool ipa_smmu = false;
 
 	HTT_ASSERT1(htt_rx_in_order_ring_elems(pdev) != 0);
 
@@ -1669,15 +1663,11 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
 	/* Get the total number of MSDUs */
 	msdu_count = HTT_RX_IN_ORD_PADDR_IND_MSDU_CNT_GET(*(msg_word + 1));
 	HTT_RX_CHECK_MSDU_COUNT(msdu_count);
-	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled) {
-		mem_map_table = qdf_mem_map_table_alloc(msdu_count);
-		if (!mem_map_table) {
-			qdf_print("%s: Failed to allocate memory for mem map table\n",
-				  __func__);
-			return 0;
-		}
-		mem_info = mem_map_table;
-	}
+
+	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled &&
+	    pdev->rx_ring.smmu_map)
+		ipa_smmu = true;
+
 	ol_rx_update_histogram_stats(msdu_count, frag_ind, offload_ind);
 	htt_rx_dbg_rxbuf_httrxind(pdev, msdu_count);
 
@@ -1688,7 +1678,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
 							msg_word);
 		*head_msdu = *tail_msdu = NULL;
 		ret = 0;
-		goto free_mem_map_table;
+		goto end;
 	}
 
 	paddr = htt_rx_in_ord_paddr_get(msg_word);
@@ -1699,17 +1689,15 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
 		*tail_msdu = NULL;
 		pdev->rx_ring.pop_fail_cnt++;
 		ret = 0;
-		goto free_mem_map_table;
+		goto end;
 	}
 
 	while (msdu_count > 0) {
-		if (qdf_mem_smmu_s1_enabled(pdev->osdev) &&
-					pdev->is_ipa_uc_enabled) {
-			qdf_update_mem_map_table(pdev->osdev, mem_info,
+		if (ipa_smmu) {
+			qdf_update_mem_map_table(pdev->osdev, &mem_map_table,
 						 QDF_NBUF_CB_PADDR(msdu),
 						 HTT_RX_BUF_SIZE);
-			mem_info++;
-			num_unmapped++;
+			cds_smmu_map_unmap(false, 1, &mem_map_table);
 		}
 
 		/*
@@ -1787,11 +1775,11 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
 				if (!prev) {
 					*head_msdu = *tail_msdu = NULL;
 					ret = 0;
-					goto free_mem_map_table;
+					goto end;
 				}
 				*tail_msdu = prev;
 				qdf_nbuf_set_next(prev, NULL);
-				goto free_mem_map_table;
+				goto end;
 			} else { /* if this is not the last msdu */
 				/* get the next msdu */
 				msg_word += HTT_RX_IN_ORD_PADDR_IND_MSDU_DWORDS;
@@ -1803,7 +1791,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
 					*tail_msdu = NULL;
 					pdev->rx_ring.pop_fail_cnt++;
 					ret = 0;
-					goto free_mem_map_table;
+					goto end;
 				}
 
 				/* if this is not the first msdu, update the
@@ -1836,7 +1824,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
 				*tail_msdu = NULL;
 				pdev->rx_ring.pop_fail_cnt++;
 				ret = 0;
-				goto free_mem_map_table;
+				goto end;
 			}
 			qdf_nbuf_set_next(msdu, next);
 			prev = msdu;
@@ -1847,13 +1835,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
 		}
 	}
 
-free_mem_map_table:
-	if (qdf_mem_smmu_s1_enabled(pdev->osdev) && pdev->is_ipa_uc_enabled) {
-		if (num_unmapped)
-			cds_smmu_map_unmap(false, num_unmapped,
-					   mem_map_table);
-		qdf_mem_free(mem_map_table);
-	}
+end:
 	return ret;
 }
 #endif
@@ -2945,4 +2927,61 @@ void htt_deregister_rx_pkt_dump_callback(struct htt_pdev_t *pdev)
 	}
 	pdev->rx_pkt_dump_cb = NULL;
 }
+
+static QDF_STATUS htt_rx_hash_smmu_map(bool map, struct htt_pdev_t *pdev)
+{
+	uint32_t i;
+	struct htt_rx_hash_entry *hash_entry;
+	struct htt_rx_hash_bucket **hash_table;
+	struct htt_list_node *list_iter = NULL;
+	qdf_mem_info_t mem_map_table = {0};
+	int ret;
+
+	qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock);
+	hash_table = pdev->rx_ring.hash_table;
+
+	for (i = 0; i < RX_NUM_HASH_BUCKETS; i++) {
+		/* Free the hash entries in hash bucket i */
+		list_iter = hash_table[i]->listhead.next;
+		while (list_iter != &hash_table[i]->listhead) {
+			hash_entry =
+				(struct htt_rx_hash_entry *)((char *)list_iter -
+							     pdev->rx_ring.
+							     listnode_offset);
+			if (hash_entry->netbuf) {
+				qdf_update_mem_map_table(pdev->osdev,
+						&mem_map_table,
+						QDF_NBUF_CB_PADDR(
+							hash_entry->netbuf),
+						HTT_RX_BUF_SIZE);
+				ret = cds_smmu_map_unmap(map, 1,
+							 &mem_map_table);
+				if (ret)
+					return QDF_STATUS_E_FAILURE;
+			}
+			list_iter = list_iter->next;
+		}
+	}
+	qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS htt_rx_update_smmu_map(struct htt_pdev_t *pdev, bool map)
+{
+	QDF_STATUS status;
+
+	if (NULL == pdev->rx_ring.hash_table)
+		return QDF_STATUS_SUCCESS;
+
+	if (!qdf_mem_smmu_s1_enabled(pdev->osdev) || !pdev->is_ipa_uc_enabled)
+		return QDF_STATUS_SUCCESS;
+
+	qdf_spin_lock_bh(&pdev->rx_ring.refill_lock);
+	pdev->rx_ring.smmu_map = map;
+	status = htt_rx_hash_smmu_map(map, pdev);
+	qdf_spin_unlock_bh(&pdev->rx_ring.refill_lock);
+
+	return status;
+}
 #endif

+ 1 - 0
core/dp/htt/htt_types.h

@@ -359,6 +359,7 @@ struct htt_pdev_t {
 		qdf_spinlock_t rx_hash_lock;
 		struct htt_rx_hash_bucket **hash_table;
 		uint32_t listnode_offset;
+		bool smmu_map;
 	} rx_ring;
 #ifdef CONFIG_HL_SUPPORT
 	int rx_desc_size_hl;

+ 8 - 0
core/dp/ol/inc/ol_htt_rx_api.h

@@ -896,4 +896,12 @@ htt_rx_offload_paddr_msdu_pop_ll(htt_pdev_handle pdev,
 
 uint32_t htt_rx_amsdu_rx_in_order_get_pktlog(qdf_nbuf_t rx_ind_msg);
 
+/**
+ * htt_rx_update_smmu_map() - set smmu map/unmap for rx buffers
+ * @pdev: htt pdev handle
+ * @map: value to set smmu map/unmap for rx buffers
+ *
+ * Return: QDF_STATUS
+ */
+QDF_STATUS htt_rx_update_smmu_map(struct htt_pdev_t *pdev, bool map);
 #endif /* _OL_HTT_RX_API__H_ */

+ 52 - 11
core/dp/txrx/ol_txrx_ipa.c

@@ -728,8 +728,16 @@ QDF_STATUS ol_txrx_ipa_cleanup_iface(char *ifname, bool is_ipv6_enabled)
 QDF_STATUS ol_txrx_ipa_enable_pipes(struct cdp_pdev *ppdev)
 {
 	ol_txrx_pdev_handle pdev = (ol_txrx_pdev_handle)ppdev;
+	QDF_STATUS status;
 	int ret;
 
+	status = htt_rx_update_smmu_map(pdev->htt_pdev, true);
+	if (status != QDF_STATUS_SUCCESS) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "IPA SMMU map failed status:%d", status);
+		return status;
+	}
+
 	/* ACTIVATE TX PIPE */
 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
 		  "%s: Enable IPA pipes", __func__);
@@ -738,6 +746,10 @@ QDF_STATUS ol_txrx_ipa_enable_pipes(struct cdp_pdev *ppdev)
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
 			  "%s: ipa_wdi_enable_pipes failed: ret=%d",
 				__func__, ret);
+		status = htt_rx_update_smmu_map(pdev->htt_pdev, false);
+		if (status != QDF_STATUS_SUCCESS)
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+				  "IPA SMMU unmap failed");
 		return QDF_STATUS_E_FAILURE;
 	}
 
@@ -764,10 +776,15 @@ QDF_STATUS ol_txrx_ipa_disable_pipes(struct cdp_pdev *ppdev)
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
 			  "%s: ipa_wdi_disable_pipes failed: ret=%d",
 			  __func__, ret);
-		return QDF_STATUS_E_FAILURE;
 	}
 
-	return QDF_STATUS_SUCCESS;
+	if (htt_rx_update_smmu_map(pdev->htt_pdev, false) !=
+	    QDF_STATUS_SUCCESS) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "IPA SMMU unmap failed")
+	}
+
+	return ret ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
 }
 
 /**
@@ -1436,6 +1453,14 @@ QDF_STATUS ol_txrx_ipa_enable_pipes(struct cdp_pdev *ppdev)
 	ol_txrx_pdev_handle pdev = (ol_txrx_pdev_handle)ppdev;
 	struct ol_txrx_ipa_resources *ipa_res = &pdev->ipa_resource;
 	int result;
+	QDF_STATUS status;
+
+	status = htt_rx_update_smmu_map(pdev->htt_pdev, true);
+	if (status != QDF_STATUS_SUCCESS) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "IPA SMMU map failed status:%d", status);
+		return status;
+	}
 
 	/* ACTIVATE TX PIPE */
 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
@@ -1446,14 +1471,14 @@ QDF_STATUS ol_txrx_ipa_enable_pipes(struct cdp_pdev *ppdev)
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
 				"%s: Enable TX PIPE fail, code %d",
 				__func__, result);
-		return QDF_STATUS_E_FAILURE;
+		goto smmu_unmap;
 	}
 	result = qdf_ipa_resume_wdi_pipe(ipa_res->tx_pipe_handle);
 	if (result) {
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
 				"%s: Resume TX PIPE fail, code %d",
 				__func__, result);
-		return QDF_STATUS_E_FAILURE;
+		goto smmu_unmap;
 	}
 	ol_txrx_ipa_uc_set_active((struct cdp_pdev *)pdev, true, true);
 
@@ -1466,18 +1491,27 @@ QDF_STATUS ol_txrx_ipa_enable_pipes(struct cdp_pdev *ppdev)
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
 				"%s: Enable RX PIPE fail, code %d",
 				__func__, result);
-		return QDF_STATUS_E_FAILURE;
+		goto smmu_unmap;
 	}
 	result = qdf_ipa_resume_wdi_pipe(ipa_res->rx_pipe_handle);
 	if (result) {
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
 				"%s: Resume RX PIPE fail, code %d",
 				__func__, result);
-		return QDF_STATUS_E_FAILURE;
+		goto smmu_unmap;
 	}
 	ol_txrx_ipa_uc_set_active((struct cdp_pdev *)pdev, true, false);
 
 	return QDF_STATUS_SUCCESS;
+
+smmu_unmap:
+	if (htt_rx_update_smmu_map(pdev->htt_pdev, false) !=
+	    QDF_STATUS_SUCCESS) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "IPA SMMU unmap failed");
+	}
+
+	return QDF_STATUS_E_FAILURE;
 }
 
 /**
@@ -1499,7 +1533,7 @@ QDF_STATUS ol_txrx_ipa_disable_pipes(struct cdp_pdev *ppdev)
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
 				"%s: Suspend RX PIPE fail, code %d",
 				__func__, result);
-		return QDF_STATUS_E_FAILURE;
+		goto smmu_unmap;
 	}
 
 	result = qdf_ipa_disable_wdi_pipe(ipa_res->rx_pipe_handle);
@@ -1507,7 +1541,7 @@ QDF_STATUS ol_txrx_ipa_disable_pipes(struct cdp_pdev *ppdev)
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
 				"%s: Disable RX PIPE fail, code %d",
 				__func__, result);
-		return QDF_STATUS_E_FAILURE;
+		goto smmu_unmap;
 	}
 
 	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
@@ -1517,17 +1551,24 @@ QDF_STATUS ol_txrx_ipa_disable_pipes(struct cdp_pdev *ppdev)
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
 				"%s: Suspend TX PIPE fail, code %d",
 				__func__, result);
-		return QDF_STATUS_E_FAILURE;
+		goto smmu_unmap;
 	}
 	result = qdf_ipa_disable_wdi_pipe(ipa_res->tx_pipe_handle);
 	if (result) {
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
 				"%s: Disable TX PIPE fail, code %d",
 				__func__, result);
-		return QDF_STATUS_E_FAILURE;
+		goto smmu_unmap;
 	}
 
-	return QDF_STATUS_SUCCESS;
+smmu_unmap:
+	if (htt_rx_update_smmu_map(pdev->htt_pdev, false) !=
+	    QDF_STATUS_SUCCESS) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "IPA SMMU unmap failed");
+	}
+
+	return result ? QDF_STATUS_E_FAILURE : QDF_STATUS_SUCCESS;
 }
 
 /**