瀏覽代碼

qcacmn: use granular locks to protect peer structures

Remove the global soc->peer_ref_mutex lock to protect
peer access

Instead use granular locks to protect the individual
peer DBs

1) soc→peer_hash_lock : To protect peer hash table
2) soc→peer_map_lock : To protect id to objmap array
3) vdev→peer_list_lock : To protect vdev level peer list

Change-Id: I9f044c4d71fe1155817a2b22a5a050545ec37445
Chaithanya Garrepalli 4 年之前
父節點
當前提交
86d3df0605
共有 7 個文件被更改,包括 260 次插入145 次删除
  1. 9 1
      dp/wifi3.0/dp_internal.h
  2. 30 71
      dp/wifi3.0/dp_main.c
  3. 206 57
      dp/wifi3.0/dp_peer.c
  4. 3 3
      dp/wifi3.0/dp_peer.h
  5. 0 4
      dp/wifi3.0/dp_stats.c
  6. 6 2
      dp/wifi3.0/dp_tx.c
  7. 6 7
      dp/wifi3.0/dp_types.h

+ 9 - 1
dp/wifi3.0/dp_internal.h

@@ -945,7 +945,15 @@ extern void dp_peer_find_detach(struct dp_soc *soc);
 extern void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer);
 extern void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer);
 extern void dp_peer_find_hash_erase(struct dp_soc *soc);
-
+void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
+			   struct dp_peer *peer);
+uint8_t dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
+				 struct dp_peer *peer);
+void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
+				struct dp_peer *peer,
+				uint16_t peer_id);
+void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
+				   uint16_t peer_id);
 /*
  * dp_peer_ppdu_delayed_ba_init() Initialize ppdu in peer
  * @peer: Datapath peer

+ 30 - 71
dp/wifi3.0/dp_main.c

@@ -784,6 +784,7 @@ dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
 		pdev = soc->pdev_list[i];
 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
+			qdf_spin_lock_bh(&vdev->peer_list_lock);
 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
 					if ((ase->type ==
@@ -793,6 +794,7 @@ dp_wds_reset_ast_table_wifi3(struct cdp_soc_t  *soc_hdl,
 						dp_peer_del_ast(soc, ase);
 				}
 			}
+			qdf_spin_unlock_bh(&vdev->peer_list_lock);
 		}
 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
 	}
@@ -823,6 +825,7 @@ static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
 		pdev = soc->pdev_list[i];
 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
+			qdf_spin_lock_bh(&vdev->peer_list_lock);
 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
 				DP_PEER_ITERATE_ASE_LIST(peer, ase, temp_ase) {
 					if ((ase->type ==
@@ -835,6 +838,7 @@ static void dp_wds_flush_ast_table_wifi3(struct cdp_soc_t  *soc_hdl)
 					dp_peer_del_ast(soc, ase);
 				}
 			}
+			qdf_spin_unlock_bh(&vdev->peer_list_lock);
 		}
 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
 	}
@@ -1238,6 +1242,7 @@ void dp_print_ast_stats(struct dp_soc *soc)
 		pdev = soc->pdev_list[i];
 		qdf_spin_lock_bh(&pdev->vdev_list_lock);
 		DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
+			qdf_spin_lock_bh(&vdev->peer_list_lock);
 			DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
 				DP_PEER_ITERATE_ASE_LIST(peer, ase, tmp_ase) {
 				    DP_PRINT_STATS("%6d mac_addr = %pM"
@@ -1265,6 +1270,7 @@ void dp_print_ast_stats(struct dp_soc *soc)
 					    vdev->vdev_id);
 				}
 			}
+			qdf_spin_unlock_bh(&vdev->peer_list_lock);
 		}
 		qdf_spin_unlock_bh(&pdev->vdev_list_lock);
 	}
@@ -1289,11 +1295,8 @@ static void dp_print_peer_table(struct dp_vdev *vdev)
 	struct dp_peer *peer = NULL;
 
 	DP_PRINT_STATS("Dumping Peer Table  Stats:");
+	qdf_spin_lock_bh(&vdev->peer_list_lock);
 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
-		if (!peer) {
-			DP_PRINT_STATS("Invalid Peer");
-			return;
-		}
 		DP_PRINT_STATS("    peer_mac_addr = %pM"
 			       " nawds_enabled = %d"
 			       " bss_peer = %d"
@@ -1311,6 +1314,7 @@ static void dp_print_peer_table(struct dp_vdev *vdev)
 			       peer->delete_in_progress,
 			       peer->peer_id);
 	}
+	qdf_spin_unlock_bh(&vdev->peer_list_lock);
 }
 
 #ifdef WLAN_DP_PER_RING_TYPE_CONFIG
@@ -3791,19 +3795,18 @@ QDF_STATUS dp_mon_rings_alloc(struct dp_soc *soc, struct dp_pdev *pdev)
 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
 {
 	struct dp_pdev *pdev = (struct dp_pdev *)pdev_hdl;
-	struct dp_soc *soc = pdev->soc;
 	struct dp_vdev *vdev = NULL;
 	struct dp_peer *peer = NULL;
 
-	qdf_spin_lock_bh(&soc->peer_ref_mutex);
 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
 	DP_PDEV_ITERATE_VDEV_LIST(pdev, vdev) {
+		qdf_spin_lock_bh(&vdev->peer_list_lock);
 		DP_VDEV_ITERATE_PEER_LIST(vdev, peer) {
 			dp_cal_client_update_peer_stats(&peer->stats);
 		}
+		qdf_spin_unlock_bh(&vdev->peer_list_lock);
 	}
 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
-	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
 }
 #else
 void  dp_iterate_update_peer_list(struct cdp_pdev *pdev_hdl)
@@ -4348,7 +4351,6 @@ static void dp_soc_deinit(void *txrx_soc)
 	DEINIT_RX_HW_STATS_LOCK(soc);
 
 	qdf_spinlock_destroy(&soc->ast_lock);
-	qdf_spinlock_destroy(&soc->peer_ref_mutex);
 
 	qdf_nbuf_queue_free(&soc->htt_stats.msg);
 
@@ -4979,6 +4981,7 @@ static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
 	 * TCL descriptors for packets transmitted from this VDEV
 	 */
 
+	qdf_spinlock_create(&vdev->peer_list_lock);
 	TAILQ_INIT(&vdev->peer_list);
 	dp_peer_multipass_list_init(vdev);
 
@@ -5128,7 +5131,7 @@ static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
 		}
 	}
 
-	qdf_spin_lock_bh(&soc->peer_ref_mutex);
+	qdf_spin_lock_bh(&vdev->peer_list_lock);
 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
 		if (!unmap_only && n < soc->max_peers)
 			peer_array[n++] = peer;
@@ -5137,7 +5140,7 @@ static void dp_vdev_flush_peers(struct cdp_vdev *vdev_handle, bool unmap_only)
 			if (j < soc->max_peers)
 				peer_ids[j++] = peer->peer_id;
 	}
-	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+	qdf_spin_unlock_bh(&vdev->peer_list_lock);
 
 	/*
 	 * If peer id is invalid, need to flush the peer if
@@ -5243,7 +5246,7 @@ static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
 	 * Use peer_ref_mutex while accessing peer_list, in case
 	 * a peer is in the process of being removed from the list.
 	 */
-	qdf_spin_lock_bh(&soc->peer_ref_mutex);
+	qdf_spin_lock_bh(&vdev->peer_list_lock);
 	/* check that the vdev has no peers allocated */
 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
 		/* debug print - will be removed later */
@@ -5258,10 +5261,10 @@ static QDF_STATUS dp_vdev_detach_wifi3(struct cdp_soc_t *cdp_soc,
 		vdev->delete.pending = 1;
 		vdev->delete.callback = callback;
 		vdev->delete.context = cb_context;
-		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+		qdf_spin_unlock_bh(&vdev->peer_list_lock);
 		return QDF_STATUS_E_FAILURE;
 	}
-	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+	qdf_spin_unlock_bh(&vdev->peer_list_lock);
 
 	if (wlan_op_mode_monitor == vdev->opmode)
 		goto free_vdev;
@@ -5304,6 +5307,7 @@ free_vdev:
 		vdev->vdev_dp_ext_handle = NULL;
 	}
 
+	qdf_spinlock_destroy(&vdev->peer_list_lock);
 	dp_info("deleting vdev object %pK (%pM)", vdev, vdev->mac_addr.raw);
 
 	qdf_mem_free(vdev);
@@ -5539,20 +5543,13 @@ dp_peer_create_wifi3(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
 	/* reset the ast index to flowid table */
 	dp_peer_reset_flowq_map(peer);
 
-	qdf_spin_lock_bh(&soc->peer_ref_mutex);
 
 	qdf_atomic_init(&peer->ref_cnt);
 
 	/* keep one reference for attach */
 	qdf_atomic_inc(&peer->ref_cnt);
 
-	/* add this peer into the vdev's list */
-	if (wlan_op_mode_sta == vdev->opmode)
-		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
-	else
-		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
-
-	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+	dp_peer_vdev_list_add(soc, vdev, peer);
 
 	/* TODO: See if hash based search is required */
 	dp_peer_find_hash_add(soc, peer);
@@ -6142,9 +6139,7 @@ dp_peer_authorize(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
 			  "%s: Peer is NULL!\n", __func__);
 		status = QDF_STATUS_E_FAILURE;
 	} else {
-		qdf_spin_lock_bh(&soc->peer_ref_mutex);
 		peer->authorize = authorize ? 1 : 0;
-		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
 	}
 
 	if (peer)
@@ -6240,8 +6235,6 @@ void dp_peer_unref_delete(struct dp_peer *peer)
 	struct dp_vdev *vdev = peer->vdev;
 	struct dp_pdev *pdev = vdev->pdev;
 	struct dp_soc *soc = pdev->soc;
-	struct dp_peer *tmppeer;
-	int found = 0;
 	uint16_t peer_id;
 	uint16_t vdev_id;
 	bool vdev_delete = false;
@@ -6259,7 +6252,6 @@ void dp_peer_unref_delete(struct dp_peer *peer)
 	 * vdev's list of peers is empty, to make sure that list is not modified
 	 * concurrently with the empty check.
 	 */
-	qdf_spin_lock_bh(&soc->peer_ref_mutex);
 	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
 		peer_id = peer->peer_id;
 		vdev_id = vdev->vdev_id;
@@ -6269,7 +6261,7 @@ void dp_peer_unref_delete(struct dp_peer *peer)
 		 * peer object map is removed
 		 */
 		if (peer_id != HTT_INVALID_PEER)
-			soc->peer_id_to_obj_map[peer_id] = NULL;
+			dp_peer_find_id_to_obj_remove(soc, peer_id);
 
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
 			  "Deleting peer %pK (%pM)", peer, peer->mac_addr.raw);
@@ -6283,22 +6275,7 @@ void dp_peer_unref_delete(struct dp_peer *peer)
 		}
 		qdf_spin_unlock_bh(&soc->ast_lock);
 
-		TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
-			if (tmppeer == peer) {
-				found = 1;
-				break;
-			}
-		}
-
-		if (found) {
-			TAILQ_REMOVE(&peer->vdev->peer_list, peer,
-				peer_list_elem);
-		} else {
-			/*Ignoring the remove operation as peer not found*/
-			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
-				  "peer:%pK not found in vdev:%pK peerlist:%pK",
-				  peer, vdev, &peer->vdev->peer_list);
-		}
+		vdev_delete = dp_peer_vdev_list_remove(soc, vdev, peer);
 
 		/*
 		 * Deallocate the extended stats contenxt
@@ -6329,17 +6306,6 @@ void dp_peer_unref_delete(struct dp_peer *peer)
 		vdev_opmode = vdev->opmode;
 		qdf_mem_copy(vdev_mac_addr, vdev->mac_addr.raw,
 			     QDF_MAC_ADDR_SIZE);
-		/*
-		 * check whether the parent vdev is pending for deleting
-		 * and no peers left.
-		 */
-		if (vdev->delete.pending && TAILQ_EMPTY(&vdev->peer_list))
-			vdev_delete = true;
-		/*
-		 * Now that there are no references to the peer, we can
-		 * release the peer reference lock.
-		 */
-		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
 
 		wlan_minidump_remove(peer);
 		/*
@@ -6356,8 +6322,6 @@ void dp_peer_unref_delete(struct dp_peer *peer)
 		if (vdev_delete)
 			dp_delete_pending_vdev(pdev, vdev, vdev_id);
 
-	} else {
-		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
 	}
 }
 
@@ -7062,8 +7026,10 @@ void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
 
 	qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
 
+	qdf_spin_lock_bh(&vdev->peer_list_lock);
 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem)
 		dp_update_vdev_stats(vdev_stats, peer);
+	qdf_spin_unlock_bh(&vdev->peer_list_lock);
 
 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
@@ -7093,7 +7059,6 @@ void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
 		DP_UPDATE_STATS(pdev, pdev->invalid_peer);
 
 	soc = pdev->soc;
-	qdf_spin_lock_bh(&soc->peer_ref_mutex);
 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
 
@@ -7102,7 +7067,6 @@ void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
 		dp_update_pdev_ingress_stats(pdev, vdev);
 	}
 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
-	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
 	qdf_mem_free(vdev_stats);
 
 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
@@ -7143,9 +7107,7 @@ static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
 		return QDF_STATUS_E_FAILURE;
 	}
 
-	qdf_spin_lock_bh(&soc->peer_ref_mutex);
 	dp_aggregate_vdev_stats(vdev, vdev_stats);
-	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
 
 	stats->tx_packets = vdev_stats->tx_i.rcvd.num;
 	stats->tx_bytes = vdev_stats->tx_i.rcvd.bytes;
@@ -7332,6 +7294,7 @@ dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
 
 	hif_clear_napi_stats(vdev->pdev->soc->hif_handle);
 
+	qdf_spin_lock_bh(&vdev->peer_list_lock);
 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
 		struct dp_rx_tid *rx_tid;
 		uint8_t tid;
@@ -7348,6 +7311,7 @@ dp_txrx_host_stats_clr(struct dp_vdev *vdev, struct dp_soc *soc)
 				     UPDATE_PEER_STATS, vdev->pdev->pdev_id);
 #endif
 	}
+	qdf_spin_unlock_bh(&vdev->peer_list_lock);
 
 #if defined(FEATURE_PERPKT_INFO) && WDI_EVENT_ENABLE
 	dp_wdi_event_handler(WDI_EVENT_UPDATE_DP_STATS, vdev->pdev->soc,
@@ -8789,9 +8753,7 @@ static int dp_txrx_get_vdev_stats(struct cdp_soc_t *soc, uint8_t vdev_id,
 	vdev_stats = (struct cdp_vdev_stats *)buf;
 
 	if (is_aggregate) {
-		qdf_spin_lock_bh(&((struct dp_soc *)soc)->peer_ref_mutex);
 		dp_aggregate_vdev_stats(vdev, buf);
-		qdf_spin_unlock_bh(&((struct dp_soc *)soc)->peer_ref_mutex);
 	} else {
 		qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
 	}
@@ -9794,9 +9756,9 @@ static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
 	if (!pdev)
 		return QDF_STATUS_E_FAILURE;
 
-	qdf_spin_lock_bh(&soc->peer_ref_mutex);
 	qdf_spin_lock_bh(&pdev->vdev_list_lock);
 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
+		qdf_spin_lock_bh(&vdev->peer_list_lock);
 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
 			if (peer && !peer->bss_peer)
 				dp_wdi_event_handler(
@@ -9805,9 +9767,9 @@ static QDF_STATUS dp_flush_rate_stats_req(struct cdp_soc_t *soc_hdl,
 					peer->peer_id,
 					WDI_NO_VAL, pdev_id);
 		}
+		qdf_spin_unlock_bh(&vdev->peer_list_lock);
 	}
 	qdf_spin_unlock_bh(&pdev->vdev_list_lock);
-	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
 
 	return QDF_STATUS_SUCCESS;
 }
@@ -10397,9 +10359,7 @@ static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
 		return 0;
 	}
 
-	qdf_spin_lock_bh(&soc->peer_ref_mutex);
 	dp_aggregate_vdev_stats(vdev, vdev_stats);
-	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
 
 	tx_success = vdev_stats->tx.tx_success.num;
 	qdf_mem_free(vdev_stats);
@@ -11239,7 +11199,6 @@ void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
 
 	qdf_nbuf_queue_init(&soc->htt_stats.msg);
 
-	qdf_spinlock_create(&soc->peer_ref_mutex);
 	qdf_spinlock_create(&soc->ast_lock);
 
 	qdf_spinlock_create(&soc->reo_desc_freelist_lock);
@@ -11903,17 +11862,17 @@ uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
 			      u_int8_t newmac[][QDF_MAC_ADDR_SIZE],
 			      u_int16_t mac_cnt)
 {
+	struct dp_soc *dp_soc = (struct dp_soc *)soc;
 	struct dp_vdev *vdev =
-		dp_get_vdev_from_soc_vdev_id_wifi3((struct dp_soc *)soc,
+		dp_get_vdev_from_soc_vdev_id_wifi3(dp_soc,
 						   vdev_id);
-	struct dp_soc *dp_soc = (struct dp_soc *)soc;
 	struct dp_peer *peer;
 	uint16_t new_mac_cnt = 0;
 
 	if (!vdev)
 		return new_mac_cnt;
 
-	qdf_spin_lock_bh(&dp_soc->peer_ref_mutex);
+	qdf_spin_lock_bh(&vdev->peer_list_lock);
 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
 		if (peer->bss_peer)
 			continue;
@@ -11922,7 +11881,7 @@ uint16_t dp_get_peer_mac_list(ol_txrx_soc_handle soc, uint8_t vdev_id,
 			new_mac_cnt++;
 		}
 	}
-	qdf_spin_unlock_bh(&dp_soc->peer_ref_mutex);
+	qdf_spin_unlock_bh(&vdev->peer_list_lock);
 	return new_mac_cnt;
 }
 

+ 206 - 57
dp/wifi3.0/dp_peer.c

@@ -114,6 +114,12 @@ static int dp_peer_ast_table_attach(struct dp_soc *soc)
 	return 0; /* success */
 }
 
+/*
+ * dp_peer_find_map_attach() - allocate memory for peer_id_to_obj_map
+ * @soc: soc handle
+ *
+ * return: none
+ */
 static int dp_peer_find_map_attach(struct dp_soc *soc)
 {
 	uint32_t max_peers, peer_map_size;
@@ -138,6 +144,8 @@ static int dp_peer_find_map_attach(struct dp_soc *soc)
 	 * that are not in use set to 0.
 	 */
 	qdf_mem_zero(soc->peer_id_to_obj_map, peer_map_size);
+
+	qdf_spinlock_create(&soc->peer_map_lock);
 	return 0; /* success */
 }
 
@@ -155,24 +163,18 @@ static int dp_log2_ceil(unsigned int value)
 	return log2;
 }
 
-static int dp_peer_find_add_id_to_obj(
-	struct dp_peer *peer,
-	uint16_t peer_id)
-{
-
-	if (peer->peer_id == HTT_INVALID_PEER) {
-		peer->peer_id = peer_id;
-		return 0; /* success */
-	}
-	return QDF_STATUS_E_FAILURE; /* failure */
-}
-
 #define DP_PEER_HASH_LOAD_MULT  2
 #define DP_PEER_HASH_LOAD_SHIFT 0
 
 #define DP_AST_HASH_LOAD_MULT  2
 #define DP_AST_HASH_LOAD_SHIFT 0
 
+/*
+ * dp_peer_find_hash_attach() - allocate memory for peer_hash table
+ * @soc: soc handle
+ *
+ * return: none
+ */
 static int dp_peer_find_hash_attach(struct dp_soc *soc)
 {
 	int i, hash_elems, log2;
@@ -195,14 +197,22 @@ static int dp_peer_find_hash_attach(struct dp_soc *soc)
 	for (i = 0; i < hash_elems; i++)
 		TAILQ_INIT(&soc->peer_hash.bins[i]);
 
+	qdf_spinlock_create(&soc->peer_hash_lock);
 	return 0;
 }
 
+/*
+ * dp_peer_find_hash_detach() - cleanup memory for peer_hash table
+ * @soc: soc handle
+ *
+ * return: none
+ */
 static void dp_peer_find_hash_detach(struct dp_soc *soc)
 {
 	if (soc->peer_hash.bins) {
 		qdf_mem_free(soc->peer_hash.bins);
 		soc->peer_hash.bins = NULL;
+		qdf_spinlock_destroy(&soc->peer_hash_lock);
 	}
 }
 
@@ -220,13 +230,19 @@ static inline unsigned dp_peer_find_hash_index(struct dp_soc *soc,
 	return index;
 }
 
-
+/*
+ * dp_peer_find_hash_add() - add peer to peer_hash_table
+ * @soc: soc handle
+ * @peer: peer handle
+ *
+ * return: none
+ */
 void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
 {
 	unsigned index;
 
 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
-	qdf_spin_lock_bh(&soc->peer_ref_mutex);
+	qdf_spin_lock_bh(&soc->peer_hash_lock);
 	/*
 	 * It is important to add the new peer at the tail of the peer list
 	 * with the bin index.  Together with having the hash_find function
@@ -235,7 +251,118 @@ void dp_peer_find_hash_add(struct dp_soc *soc, struct dp_peer *peer)
 	 * found first.
 	 */
 	TAILQ_INSERT_TAIL(&soc->peer_hash.bins[index], peer, hash_list_elem);
-	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+	qdf_spin_unlock_bh(&soc->peer_hash_lock);
+}
+
+/*
+ * dp_peer_vdev_list_add() - add peer into vdev list
+ * @soc: soc handle
+ * @vdev: vdev handle
+ * @peer: peer handle
+ *
+ * return: none
+ */
+void dp_peer_vdev_list_add(struct dp_soc *soc, struct dp_vdev *vdev,
+			   struct dp_peer *peer)
+{
+	qdf_spin_lock_bh(&vdev->peer_list_lock);
+
+	/* add this peer into the vdev's list */
+	if (wlan_op_mode_sta == vdev->opmode)
+		TAILQ_INSERT_HEAD(&vdev->peer_list, peer, peer_list_elem);
+	else
+		TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
+
+	qdf_spin_unlock_bh(&vdev->peer_list_lock);
+}
+
+/*
+ * dp_peer_vdev_list_remove() - remove peer from vdev list
+ * @soc: SoC handle
+ * @vdev: VDEV handle
+ * @peer: peer handle
+ *
+ * Return: true when vdev need to be deleted and current peer
+ *         is last in the peer_list
+ */
+uint8_t dp_peer_vdev_list_remove(struct dp_soc *soc, struct dp_vdev *vdev,
+				 struct dp_peer *peer)
+{
+	uint8_t found = 0;
+	struct dp_peer *tmppeer = NULL;
+	uint8_t vdev_delete = false;
+
+	qdf_spin_lock_bh(&vdev->peer_list_lock);
+	TAILQ_FOREACH(tmppeer, &peer->vdev->peer_list, peer_list_elem) {
+		if (tmppeer == peer) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (found) {
+		TAILQ_REMOVE(&peer->vdev->peer_list, peer,
+			     peer_list_elem);
+	} else {
+		/*Ignoring the remove operation as peer not found*/
+		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
+			  "peer:%pK not found in vdev:%pK peerlist:%pK",
+			  peer, vdev, &peer->vdev->peer_list);
+	}
+
+	/*
+	 * check whether the parent vdev is pending for deleting
+	 * and no peers left.
+	 */
+	if (vdev->delete.pending && TAILQ_EMPTY(&vdev->peer_list))
+		vdev_delete = true;
+
+	qdf_spin_unlock_bh(&vdev->peer_list_lock);
+
+	return vdev_delete;
+}
+
+/*
+ * dp_peer_find_id_to_obj_add() - Add peer into peer_id table
+ * @soc: SoC handle
+ * @peer: peer handle
+ * @peer_id: peer_id
+ *
+ * Return: None
+ */
+void dp_peer_find_id_to_obj_add(struct dp_soc *soc,
+				struct dp_peer *peer,
+				uint16_t peer_id)
+{
+	QDF_ASSERT(peer_id <= soc->max_peers);
+
+	qdf_spin_lock_bh(&soc->peer_map_lock);
+	if (!soc->peer_id_to_obj_map[peer_id]) {
+		soc->peer_id_to_obj_map[peer_id] = peer;
+	} else {
+		/* Peer map event came for peer_id which
+		 * is already mapped, this is not expected
+		 */
+		QDF_ASSERT(0);
+	}
+	qdf_spin_unlock_bh(&soc->peer_map_lock);
+}
+
+/*
+ * dp_peer_find_id_to_obj_remove() - remove peer from peer_id table
+ * @soc: SoC handle
+ * @peer_id: peer_id
+ *
+ * Return: None
+ */
+void dp_peer_find_id_to_obj_remove(struct dp_soc *soc,
+				   uint16_t peer_id)
+{
+	QDF_ASSERT(peer_id <= soc->max_peers);
+
+	qdf_spin_lock_bh(&soc->peer_map_lock);
+	soc->peer_id_to_obj_map[peer_id] = NULL;
+	qdf_spin_unlock_bh(&soc->peer_map_lock);
 }
 
 /*
@@ -267,7 +394,7 @@ static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
 		mac_addr = &local_mac_addr_aligned;
 	}
 	index = dp_peer_find_hash_index(soc, mac_addr);
-	qdf_spin_lock_bh(&soc->peer_ref_mutex);
+	qdf_spin_lock_bh(&soc->peer_hash_lock);
 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
 		    (peer->vdev->pdev == pdev)) {
@@ -275,7 +402,7 @@ static bool dp_peer_exist_on_pdev(struct dp_soc *soc,
 			break;
 		}
 	}
-	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+	qdf_spin_unlock_bh(&soc->peer_hash_lock);
 	return found;
 }
 
@@ -1449,6 +1576,17 @@ static int dp_peer_ast_free_entry_by_mac(struct dp_soc *soc,
 	return QDF_STATUS_SUCCESS;
 }
 
+/*
+ * dp_peer_find_hash_find() - returns peer from peer_hash_table matching
+ *                            vdev_id and mac_address
+ * @soc: soc handle
+ * @peer_mac_addr: peer mac address
+ * @mac_addr_is_aligned: is mac addr alligned
+ * @vdev_id: vdev_id
+ *
+ * return: peer in sucsess
+ *         NULL in failure
+ */
 struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
 	uint8_t *peer_mac_addr, int mac_addr_is_aligned, uint8_t vdev_id)
 {
@@ -1465,7 +1603,7 @@ struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
 		mac_addr = &local_mac_addr_aligned;
 	}
 	index = dp_peer_find_hash_index(soc, mac_addr);
-	qdf_spin_lock_bh(&soc->peer_ref_mutex);
+	qdf_spin_lock_bh(&soc->peer_hash_lock);
 	TAILQ_FOREACH(peer, &soc->peer_hash.bins[index], hash_list_elem) {
 		if (dp_peer_find_mac_addr_cmp(mac_addr, &peer->mac_addr) == 0 &&
 			((peer->vdev->vdev_id == vdev_id) ||
@@ -1474,14 +1612,21 @@ struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
 			 * the lock
 			 */
 			qdf_atomic_inc(&peer->ref_cnt);
-			qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+			qdf_spin_unlock_bh(&soc->peer_hash_lock);
 			return peer;
 		}
 	}
-	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+	qdf_spin_unlock_bh(&soc->peer_hash_lock);
 	return NULL; /* failure */
 }
 
+/*
+ * dp_peer_find_hash_remove() - remove peer from peer_hash_table
+ * @soc: soc handle
+ * @peer: peer handle
+ *
+ * return: none
+ */
 void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
 {
 	unsigned index;
@@ -1491,21 +1636,9 @@ void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
 	index = dp_peer_find_hash_index(soc, &peer->mac_addr);
 	/* Check if tail is not empty before delete*/
 	QDF_ASSERT(!TAILQ_EMPTY(&soc->peer_hash.bins[index]));
-	/*
-	 * DO NOT take the peer_ref_mutex lock here - it needs to be taken
-	 * by the caller.
-	 * The caller needs to hold the lock from the time the peer object's
-	 * reference count is decremented and tested up through the time the
-	 * reference to the peer object is removed from the hash table, by
-	 * this function.
-	 * Holding the lock only while removing the peer object reference
-	 * from the hash table keeps the hash table consistent, but does not
-	 * protect against a new HL tx context starting to use the peer object
-	 * if it looks up the peer object from its MAC address just after the
-	 * peer ref count is decremented to zero, but just before the peer
-	 * object reference is removed from the hash table.
-	 */
-	 TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
+
+	qdf_spin_lock_bh(&soc->peer_hash_lock);
+	TAILQ_FOREACH(tmppeer, &soc->peer_hash.bins[index], hash_list_elem) {
 		if (tmppeer == peer) {
 			found = 1;
 			break;
@@ -1513,6 +1646,7 @@ void dp_peer_find_hash_remove(struct dp_soc *soc, struct dp_peer *peer)
 	}
 	QDF_ASSERT(found);
 	TAILQ_REMOVE(&soc->peer_hash.bins[index], peer, hash_list_elem);
+	qdf_spin_unlock_bh(&soc->peer_hash_lock);
 }
 
 void dp_peer_find_hash_erase(struct dp_soc *soc)
@@ -1561,11 +1695,18 @@ static void dp_peer_ast_table_detach(struct dp_soc *soc)
 	}
 }
 
+/*
+ * dp_peer_find_map_detach() - cleanup memory for peer_id_to_obj_map
+ * @soc: soc handle
+ *
+ * return: none
+ */
 static void dp_peer_find_map_detach(struct dp_soc *soc)
 {
 	if (soc->peer_id_to_obj_map) {
 		qdf_mem_free(soc->peer_id_to_obj_map);
 		soc->peer_id_to_obj_map = NULL;
+		qdf_spinlock_destroy(&soc->peer_map_lock);
 	}
 }
 
@@ -1690,6 +1831,17 @@ void dp_rx_tid_stats_cb(struct dp_soc *soc, void *cb_ctxt,
 			rx_tid->pn_size);
 }
 
+/*
+ * dp_peer_find_add_id() - map peer_id with peer
+ * @soc: soc handle
+ * @peer_mac_addr: peer mac address
+ * @peer_id: peer id to be mapped
+ * @hw_peer_id: HW ast index
+ * @vdev_id: vdev_id
+ *
+ * return: peer in success
+ *         NULL in failure
+ */
 static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
 	uint8_t *peer_mac_addr, uint16_t peer_id, uint16_t hw_peer_id,
 	uint8_t vdev_id)
@@ -1711,20 +1863,13 @@ static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
 			  "%s: ref_cnt: %d", __func__,
 			   qdf_atomic_read(&peer->ref_cnt));
-		if (!soc->peer_id_to_obj_map[peer_id])
-			soc->peer_id_to_obj_map[peer_id] = peer;
-		else {
-			/* Peer map event came for peer_id which
-			 * is already mapped, this is not expected
-			 */
-			QDF_ASSERT(0);
-		}
 
-		if (dp_peer_find_add_id_to_obj(peer, peer_id)) {
-			/* TBDXXX: assert for now */
-			QDF_ASSERT(0);
-		} else {
+		dp_peer_find_id_to_obj_add(soc, peer, peer_id);
+		if (peer->peer_id == HTT_INVALID_PEER) {
+			peer->peer_id = peer_id;
 			dp_peer_tid_peer_id_update(peer, peer->peer_id);
+		} else {
+			QDF_ASSERT(0);
 		}
 
 		return peer;
@@ -1767,7 +1912,11 @@ dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
 	 * obj map
 	 */
 	if (is_wds) {
-		peer = soc->peer_id_to_obj_map[peer_id];
+		peer = dp_peer_find_by_id(soc, peer_id);
+
+		err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
+				      vdev_id, ast_hash, is_wds);
+
 	} else {
 		/*
 		 * It's the responsibility of the CP and FW to ensure
@@ -1817,9 +1966,9 @@ dp_rx_peer_map_handler(struct dp_soc *soc, uint16_t peer_id,
 			}
 
 		}
+		err = dp_peer_map_ast(soc, peer, peer_mac_addr, hw_peer_id,
+				      vdev_id, ast_hash, is_wds);
 	}
-	err = dp_peer_map_ast(soc, peer, peer_mac_addr,
-			      hw_peer_id, vdev_id, ast_hash, is_wds);
 
 	return err;
 }
@@ -1873,7 +2022,7 @@ dp_rx_peer_unmap_handler(struct dp_soc *soc, uint16_t peer_id,
 	dp_info("peer_unmap_event (soc:%pK) peer_id %d peer %pK",
 		soc, peer_id, peer);
 
-	soc->peer_id_to_obj_map[peer_id] = NULL;
+	dp_peer_find_id_to_obj_remove(soc, peer_id);
 	peer->peer_id = HTT_INVALID_PEER;
 
 	/*
@@ -4076,18 +4225,18 @@ struct dp_peer *dp_vdev_bss_peer_ref_n_get(struct dp_soc *soc,
 {
 	struct dp_peer *peer;
 
-	qdf_spin_lock_bh(&soc->peer_ref_mutex);
+	qdf_spin_lock_bh(&vdev->peer_list_lock);
 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
 		if (peer->bss_peer)
 			break;
 	}
 
 	if (!peer || !qdf_atomic_inc_not_zero(&peer->ref_cnt)) {
-		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+		qdf_spin_unlock_bh(&vdev->peer_list_lock);
 		return NULL;
 	}
 
-	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+	qdf_spin_unlock_bh(&vdev->peer_list_lock);
 	return peer;
 }
 
@@ -4106,17 +4255,17 @@ struct dp_peer *dp_sta_vdev_self_peer_ref_n_get(struct dp_soc *soc,
 	if (vdev->opmode != wlan_op_mode_sta)
 		return NULL;
 
-	qdf_spin_lock_bh(&soc->peer_ref_mutex);
+	qdf_spin_lock_bh(&vdev->peer_list_lock);
 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
 		if (peer->sta_self_peer)
 			break;
 	}
 
 	if (!peer || !qdf_atomic_inc_not_zero(&peer->ref_cnt)) {
-		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+		qdf_spin_unlock_bh(&vdev->peer_list_lock);
 		return NULL;
 	}
 
-	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+	qdf_spin_unlock_bh(&vdev->peer_list_lock);
 	return peer;
 }

+ 3 - 3
dp/wifi3.0/dp_peer.h

@@ -62,14 +62,14 @@ struct dp_peer *dp_peer_find_by_id(struct dp_soc *soc,
 {
 	struct dp_peer *peer;
 
-	qdf_spin_lock_bh(&soc->peer_ref_mutex);
+	qdf_spin_lock_bh(&soc->peer_map_lock);
 	peer = __dp_peer_find_by_id(soc, peer_id);
 	if (!peer || (peer && peer->delete_in_progress)) {
-		qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+		qdf_spin_unlock_bh(&soc->peer_map_lock);
 		return NULL;
 	}
 	qdf_atomic_inc(&peer->ref_cnt);
-	qdf_spin_unlock_bh(&soc->peer_ref_mutex);
+	qdf_spin_unlock_bh(&soc->peer_map_lock);
 
 	return peer;
 }

+ 0 - 4
dp/wifi3.0/dp_stats.c

@@ -6052,10 +6052,6 @@ dp_aggregate_pdev_ctrl_frames_stats(struct dp_pdev *pdev)
 
 	TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
-			if (!peer) {
-				dp_err("DP Invalid Peer refernce");
-				return;
-			}
 
 			if (peer->delete_in_progress) {
 				dp_err("DP Peer deletion in progress");

+ 6 - 2
dp/wifi3.0/dp_tx.c

@@ -2396,7 +2396,7 @@ void dp_tx_nawds_handler(struct cdp_soc_t *soc, struct dp_vdev *vdev,
 		qdf_spin_unlock_bh(&dp_soc->ast_lock);
 	}
 
-	qdf_spin_lock_bh(&dp_soc->peer_ref_mutex);
+	qdf_spin_lock_bh(&vdev->peer_list_lock);
 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
 		if (!peer->bss_peer && peer->nawds_enabled) {
 			peer_id = peer->peer_id;
@@ -2436,7 +2436,7 @@ void dp_tx_nawds_handler(struct cdp_soc_t *soc, struct dp_vdev *vdev,
 		}
 	}
 
-	qdf_spin_unlock_bh(&dp_soc->peer_ref_mutex);
+	qdf_spin_unlock_bh(&vdev->peer_list_lock);
 }
 
 /**
@@ -2658,6 +2658,7 @@ void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
 	}
 	is_ucast = !is_mcast;
 
+	qdf_spin_lock_bh(&vdev->peer_list_lock);
 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
 		if (peer->bss_peer)
 			continue;
@@ -2673,11 +2674,13 @@ void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
 			break;
 		}
 	}
+	qdf_spin_unlock_bh(&vdev->peer_list_lock);
 #endif
 
 	if (qdf_unlikely(vdev->mesh_vdev)) {
 		DP_TX_FREE_SINGLE_BUF(vdev->pdev->soc, tx_desc->nbuf);
 	} else {
+		qdf_spin_lock_bh(&vdev->peer_list_lock);
 		TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
 			if ((peer->peer_id != HTT_INVALID_PEER) &&
 #ifdef WDS_VENDOR_EXTENSION
@@ -2726,6 +2729,7 @@ void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
 				}
 			}
 		}
+		qdf_spin_unlock_bh(&vdev->peer_list_lock);
 	}
 
 	qdf_nbuf_free(nbuf);

+ 6 - 7
dp/wifi3.0/dp_types.h

@@ -1285,13 +1285,10 @@ struct dp_soc {
 		qdf_dma_mem_context(memctx);
 	} me_buf;
 
-	/**
-	 * peer ref mutex:
-	 * 1. Protect peer object lookups until the returned peer object's
-	 *	reference count is incremented.
-	 * 2. Provide mutex when accessing peer object lookup structures.
-	 */
-	DP_MUTEX_TYPE peer_ref_mutex;
+	/* Protect peer hash table */
+	DP_MUTEX_TYPE peer_hash_lock;
+	/* Protect peer_id_to_objmap */
+	DP_MUTEX_TYPE peer_map_lock;
 
 	/* maximum value for peer_id */
 	uint32_t max_peers;
@@ -2120,6 +2117,8 @@ struct dp_vdev {
 
 	/* dp_peer list */
 	TAILQ_HEAD(, dp_peer) peer_list;
+	/* to protect peer_list */
+	DP_MUTEX_TYPE peer_list_lock;
 
 	/* RX call back function to flush GRO packets*/
 	ol_txrx_rx_gro_flush_ind_fp osif_gro_flush;