Pārlūkot izejas kodu

qcacmn: Update the DB address pcie status to IPA

With pcie 1 and pcie slot 2, if ipa updates the 40th bit then
access to pcie memory region is going in pcie_0 memory region.
IPA is not allowed to update 40th bit for any address given
from pcie_1 and pcie_2 memory region.

Fix is do not send the pcie update status as true to IPA for DB address,
so that IPA will take long path of mapping the address in smmu context and
then it will access the pcie region.

Change-Id: Ieeaf1262cef927ca89cded3714c6e37550b8f197
CRs-Fixed: 3448895
Devender Kumar 2 gadi atpakaļ
vecāks
revīzija
576ea0d8b2
1 mainītis faili ar 58 papildinājumiem un 3 dzēšanām
  1. 58 3
      dp/wifi3.0/dp_ipa.c

+ 58 - 3
dp/wifi3.0/dp_ipa.c

@@ -39,6 +39,9 @@
 #ifdef FEATURE_WDS
 #include "dp_txrx_wds.h"
 #endif
+#ifdef QCA_IPA_LL_TX_FLOW_CONTROL
+#include <pld_common.h>
+#endif
 
 #ifdef IPA_OFFLOAD
 
@@ -2141,12 +2144,64 @@ bool dp_ipa_is_target_ready(struct dp_soc *soc)
 	else
 		return true;
 }
+
+/**
+ * dp_ipa_update_txr_db_status() - Indicate transfer ring DB is SMMU mapped or not
+ * @dev: Pointer to device
+ * @txrx_smmu: WDI TX/RX configuration
+ *
+ * Return: None
+ */
+static inline
+void dp_ipa_update_txr_db_status(struct device *dev,
+				 qdf_ipa_wdi_pipe_setup_info_smmu_t *txrx_smmu)
+{
+	int pcie_slot = pld_get_pci_slot(dev);
+
+	if (pcie_slot)
+		QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(txrx_smmu) = false;
+	else
+		QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(txrx_smmu) = true;
+}
+
+/**
+ * dp_ipa_update_evt_db_status() - Indicate evt ring DB is SMMU mapped or not
+ * @dev: Pointer to device
+ * @txrx_smmu: WDI TX/RX configuration
+ *
+ * Return: None
+ */
+static inline
+void dp_ipa_update_evt_db_status(struct device *dev,
+				 qdf_ipa_wdi_pipe_setup_info_smmu_t *txrx_smmu)
+{
+	int pcie_slot = pld_get_pci_slot(dev);
+
+	if (pcie_slot)
+		QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(txrx_smmu) = false;
+	else
+		QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(txrx_smmu) = true;
+}
 #else
 static inline
 bool dp_ipa_is_target_ready(struct dp_soc *soc)
 {
 	return true;
 }
+
+static inline
+void dp_ipa_update_txr_db_status(struct device *dev,
+				 qdf_ipa_wdi_pipe_setup_info_smmu_t *txrx_smmu)
+{
+	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(txrx_smmu) = true;
+}
+
+static inline
+void dp_ipa_update_evt_db_status(struct device *dev,
+				 qdf_ipa_wdi_pipe_setup_info_smmu_t *txrx_smmu)
+{
+	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(txrx_smmu) = true;
+}
 #endif
 
 QDF_STATUS dp_ipa_enable_autonomy(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
@@ -2407,7 +2462,7 @@ dp_ipa_wdi_tx_smmu_params(struct dp_soc *soc,
 	/* WBM Tail Pointer Address */
 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(tx_smmu) =
 		soc->ipa_uc_tx_rsc.ipa_wbm_tp_paddr;
-	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(tx_smmu) = true;
+	dp_ipa_update_txr_db_status(soc->osdev->dev, tx_smmu);
 
 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(tx_smmu),
 		     &ipa_res->tx_ring.sgtable,
@@ -2418,7 +2473,7 @@ dp_ipa_wdi_tx_smmu_params(struct dp_soc *soc,
 	/* TCL Head Pointer Address */
 	QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_DOORBELL_PA(tx_smmu) =
 		soc->ipa_uc_tx_rsc.ipa_tcl_hp_paddr;
-	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_EVT_RN_DB_PCIE_ADDR(tx_smmu) = true;
+	dp_ipa_update_evt_db_status(soc->osdev->dev, tx_smmu);
 
 	QDF_IPA_WDI_SETUP_INFO_SMMU_NUM_PKT_BUFFERS(tx_smmu) =
 		ipa_res->tx_num_alloc_buffer;
@@ -2458,7 +2513,7 @@ dp_ipa_wdi_rx_smmu_params(struct dp_soc *soc,
 	/* REO Tail Pointer Address */
 	QDF_IPA_WDI_SETUP_INFO_SMMU_TRANSFER_RING_DOORBELL_PA(rx_smmu) =
 		soc->ipa_uc_rx_rsc.ipa_reo_tp_paddr;
-	QDF_IPA_WDI_SETUP_INFO_SMMU_IS_TXR_RN_DB_PCIE_ADDR(rx_smmu) = true;
+	dp_ipa_update_txr_db_status(soc->osdev->dev, rx_smmu);
 
 	qdf_mem_copy(&QDF_IPA_WDI_SETUP_INFO_SMMU_EVENT_RING_BASE(rx_smmu),
 		     &ipa_res->rx_refill_ring.sgtable,