ソースを参照

qcacmn: Update vdev list and peer list access locking in below order

a. Acquire peer list lock
b. Acquire vdev list lock
y. Release vdev list lock
z. Release peer list lock

Change-Id: Ifc1e57fd1a243d909ca65421f269830d24d6b31b
CRs-Fixed: 2372947
Amir Patel 6 年 前
コミット
17b91788b0
1 ファイル変更40 行追加9 行削除
  1. 40 9
      dp/wifi3.0/dp_main.c

+ 40 - 9
dp/wifi3.0/dp_main.c

@@ -3125,15 +3125,15 @@ void  dp_iterate_update_peer_list(void *pdev_hdl)
 	struct dp_vdev *vdev = NULL;
 	struct dp_peer *peer = NULL;
 
-	qdf_spin_lock_bh(&pdev->vdev_list_lock);
 	qdf_spin_lock_bh(&soc->peer_ref_mutex);
+	qdf_spin_lock_bh(&pdev->vdev_list_lock);
 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
 		DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
 			dp_cal_client_update_peer_stats(&peer->stats);
 		}
 	}
-	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
+	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
 }
 #else
 void  dp_iterate_update_peer_list(void *pdev_hdl)
@@ -6084,10 +6084,8 @@ void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
 
 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
 
-	qdf_spin_lock_bh(&soc->peer_ref_mutex);
 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
 		dp_update_vdev_stats(vdev_stats, peer);
-	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
 
 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
@@ -6105,6 +6103,7 @@ void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
 static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
 {
 	struct dp_vdev *vdev = NULL;
+	struct dp_soc *soc;
 	struct cdp_vdev_stats *vdev_stats =
 			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
 
@@ -6121,6 +6120,8 @@ static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
 	if (pdev->mcopy_mode)
 		DP_UPDATE_STATS(pdev, pdev->invalid_peer);
 
+	soc = pdev->soc;
+	qdf_spin_lock_bh(&soc->peer_ref_mutex);
 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
 
@@ -6129,6 +6130,7 @@ static inline void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
 		dp_update_pdev_ingress_stats(pdev, vdev);
 	}
 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
+	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
 	qdf_mem_free(vdev_stats);
 
 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
@@ -6148,8 +6150,20 @@ static void dp_vdev_getstats(void *vdev_handle,
 		struct cdp_dev_stats *stats)
 {
 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
-	struct cdp_vdev_stats *vdev_stats =
-			qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
+	struct dp_pdev *pdev;
+	struct dp_soc *soc;
+	struct cdp_vdev_stats *vdev_stats;
+
+	if (!vdev)
+		return;
+
+	pdev = vdev->pdev;
+	if (!pdev)
+		return;
+
+	soc = pdev->soc;
+
+	vdev_stats = qdf_mem_malloc(sizeof(struct cdp_vdev_stats));
 
 	if (!vdev_stats) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
@@ -6157,7 +6171,9 @@ static void dp_vdev_getstats(void *vdev_handle,
 		return;
 	}
 
+	qdf_spin_lock_bh(&soc->peer_ref_mutex);
 	dp_aggregate_vdev_stats(vdev, vdev_stats);
+	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
 
 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
@@ -8097,12 +8113,27 @@ static int  dp_txrx_get_vdev_stats(struct cdp_vdev *vdev_handle, void *buf,
 				   bool is_aggregate)
 {
 	struct dp_vdev *vdev = (struct dp_vdev *)vdev_handle;
-	struct cdp_vdev_stats *vdev_stats = (struct cdp_vdev_stats *)buf;
+	struct cdp_vdev_stats *vdev_stats;
+	struct dp_pdev *pdev;
+	struct dp_soc *soc;
 
-	if (is_aggregate)
+	if (!vdev)
+		return 1;
+
+	pdev = vdev->pdev;
+	if (!pdev)
+		return 1;
+
+	soc = pdev->soc;
+	vdev_stats = (struct cdp_vdev_stats *)buf;
+
+	if (is_aggregate) {
+		qdf_spin_lock_bh(&soc->peer_ref_mutex);
 		dp_aggregate_vdev_stats(vdev, buf);
-	else
+		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+	} else {
 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
+	}
 
 	return 0;
 }