Browse Source

qcacmn: fix unmapped REO queue address for MLO roaming

During MLO roaming, first link peer has unmapped REO queue Desc
address due to roaming is in progress, but 2nd link peer still send
stale REO queue Desc address to FW, SMMU fault will be captured if
REO access to this stale REO queue Desc address.

Add roaming in progress and REO queue address check before
sending WMI PEER_REORDER_QUEUE_SETUP_CMDID

Change-Id: Ic868685d98b802fa904ed440cdbb3cef908c6d95
CRs-Fixed: 3101658
Jinwei Chen 3 years ago
parent
commit
61a06721b0
1 changed files with 9 additions and 3 deletions
  1. 9 3
      dp/wifi3.0/dp_peer.c

+ 9 - 3
dp/wifi3.0/dp_peer.c

@@ -2899,6 +2899,10 @@ static QDF_STATUS dp_peer_rx_reorder_queue_setup(struct dp_soc *soc,
 	struct dp_rx_tid *rx_tid;
 	struct dp_rx_tid *rx_tid;
 	struct dp_soc *link_peer_soc;
 	struct dp_soc *link_peer_soc;
 
 
+	rx_tid = &peer->rx_tid[tid];
+	if (!rx_tid->hw_qdesc_paddr)
+		return QDF_STATUS_E_INVAL;
+
 	if (IS_MLO_DP_MLD_PEER(peer)) {
 	if (IS_MLO_DP_MLD_PEER(peer)) {
 		/* get link peers with reference */
 		/* get link peers with reference */
 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
 		dp_get_link_peers_ref_from_mld_peer(soc, peer,
@@ -2907,7 +2911,6 @@ static QDF_STATUS dp_peer_rx_reorder_queue_setup(struct dp_soc *soc,
 		/* send WMI cmd to each link peers */
 		/* send WMI cmd to each link peers */
 		for (i = 0; i < link_peers_info.num_links; i++) {
 		for (i = 0; i < link_peers_info.num_links; i++) {
 			link_peer = link_peers_info.link_peers[i];
 			link_peer = link_peers_info.link_peers[i];
-			rx_tid = &link_peer->rx_tid[tid];
 			link_peer_soc = link_peer->vdev->pdev->soc;
 			link_peer_soc = link_peer->vdev->pdev->soc;
 			if (link_peer_soc->cdp_soc.ol_ops->
 			if (link_peer_soc->cdp_soc.ol_ops->
 					peer_rx_reorder_queue_setup) {
 					peer_rx_reorder_queue_setup) {
@@ -2929,7 +2932,6 @@ static QDF_STATUS dp_peer_rx_reorder_queue_setup(struct dp_soc *soc,
 		/* release link peers reference */
 		/* release link peers reference */
 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
 		dp_release_link_peers_ref(&link_peers_info, DP_MOD_ID_CDP);
 	} else if (peer->peer_type == CDP_LINK_PEER_TYPE) {
 	} else if (peer->peer_type == CDP_LINK_PEER_TYPE) {
-			rx_tid = &peer->rx_tid[tid];
 			if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
 			if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
 				if (soc->cdp_soc.ol_ops->
 				if (soc->cdp_soc.ol_ops->
 					peer_rx_reorder_queue_setup(
 					peer_rx_reorder_queue_setup(
@@ -2972,6 +2974,9 @@ static QDF_STATUS dp_peer_rx_reorder_queue_setup(struct dp_soc *soc,
 {
 {
 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
 
 
+	if (!rx_tid->hw_qdesc_paddr)
+		return QDF_STATUS_E_INVAL;
+
 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
 	if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup) {
 		if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
 		if (soc->cdp_soc.ol_ops->peer_rx_reorder_queue_setup(
 		    soc->ctrl_psoc,
 		    soc->ctrl_psoc,
@@ -3337,12 +3342,12 @@ try_desc_alloc:
 		}
 		}
 	}
 	}
 
 
+send_wmi_reo_cmd:
 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
 	if (dp_get_peer_vdev_roaming_in_progress(peer)) {
 		status = QDF_STATUS_E_PERM;
 		status = QDF_STATUS_E_PERM;
 		goto error;
 		goto error;
 	}
 	}
 
 
-send_wmi_reo_cmd:
 	status = dp_peer_rx_reorder_queue_setup(soc, peer,
 	status = dp_peer_rx_reorder_queue_setup(soc, peer,
 						tid, ba_window_size);
 						tid, ba_window_size);
 	if (QDF_IS_STATUS_SUCCESS(status))
 	if (QDF_IS_STATUS_SUCCESS(status))
@@ -3359,6 +3364,7 @@ error:
 				rx_tid->hw_qdesc_alloc_size);
 				rx_tid->hw_qdesc_alloc_size);
 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
 		qdf_mem_free(rx_tid->hw_qdesc_vaddr_unaligned);
 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
 		rx_tid->hw_qdesc_vaddr_unaligned = NULL;
+		rx_tid->hw_qdesc_paddr = 0;
 	}
 	}
 	return status;
 	return status;
 }
 }