Browse Source

qcacld-3.0: Add protections for accessing peer

Add necessary protections for accessing ol txrx peer:
1. Add protection when increase peer reference count;
2. Add protection when update vdev last peer.

Change-Id: I93628d8d2e34aac7ac041877310e0f430b8238bc
CRs-Fixed: 2166668
Frank Liu 7 years ago
parent
commit
4362e46c32
3 changed files with 40 additions and 27 deletions
  1. 8 6
      core/dp/txrx/ol_tx_classify.c
  2. 30 21
      core/dp/txrx/ol_txrx.c
  3. 2 0
      core/dp/txrx/ol_txrx_peer_find.c

+ 8 - 6
core/dp/txrx/ol_tx_classify.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
  *
  * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  *
@@ -350,15 +350,17 @@ struct ol_txrx_peer_t *ol_tx_tdls_peer_find(struct ol_txrx_pdev_t *pdev,
 	struct ol_txrx_peer_t *peer = NULL;
 
 	if (vdev->hlTdlsFlag) {
-		peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
-						vdev->hl_tdls_ap_mac_addr.raw,
-						peer_id);
+		peer = ol_txrx_peer_find_hash_find_get_ref(pdev,
+					vdev->hl_tdls_ap_mac_addr.raw, 0, 1,
+					PEER_DEBUG_ID_OL_INTERNAL);
+
 		if (peer &&  (peer->peer_ids[0] == HTT_INVALID_PEER_ID)) {
+			ol_txrx_peer_release_ref(peer,
+						 PEER_DEBUG_ID_OL_INTERNAL);
 			peer = NULL;
 		} else {
 			if (peer)
-				ol_txrx_peer_get_ref(peer,
-						     PEER_DEBUG_ID_OL_INTERNAL);
+				*peer_id = peer->local_id;
 		}
 	}
 	if (!peer)

+ 30 - 21
core/dp/txrx/ol_txrx.c

@@ -131,6 +131,7 @@ ol_txrx_copy_mac_addr_raw(struct cdp_vdev *pvdev, uint8_t *bss_addr)
 {
 	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t  *)pvdev;
 
+	qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
 	if (bss_addr && vdev->last_real_peer &&
 	    !qdf_mem_cmp((u8 *)bss_addr,
 			     vdev->last_real_peer->mac_addr.raw,
@@ -138,6 +139,7 @@ ol_txrx_copy_mac_addr_raw(struct cdp_vdev *pvdev, uint8_t *bss_addr)
 		qdf_mem_copy(vdev->hl_tdls_ap_mac_addr.raw,
 			     vdev->last_real_peer->mac_addr.raw,
 			     OL_TXRX_MAC_ADDR_LEN);
+	qdf_spin_unlock_bh(&vdev->pdev->last_real_peer_mutex);
 }
 
 /**
@@ -156,16 +158,16 @@ ol_txrx_add_last_real_peer(struct cdp_pdev *ppdev,
 	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
 	ol_txrx_peer_handle peer;
 
-	if (vdev->last_real_peer == NULL) {
-		peer = NULL;
-		peer = ol_txrx_find_peer_by_addr(
-				(struct cdp_pdev *)pdev,
-				vdev->hl_tdls_ap_mac_addr.raw,
-				peer_id);
-		if (peer && (peer->peer_ids[0] !=
-					HTT_INVALID_PEER_ID))
-			vdev->last_real_peer = peer;
-	}
+	peer = ol_txrx_find_peer_by_addr(
+		(struct cdp_pdev *)pdev,
+		vdev->hl_tdls_ap_mac_addr.raw,
+		peer_id);
+
+	qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
+	if (!vdev->last_real_peer && peer &&
+	    (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
+		vdev->last_real_peer = peer;
+	qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
 }
 
 /**
@@ -201,14 +203,18 @@ ol_txrx_update_last_real_peer(struct cdp_pdev *ppdev, void *ppeer,
 	struct ol_txrx_peer_t *peer = ppeer;
 	struct ol_txrx_vdev_t *vdev;
 
+	if (!restore_last_peer)
+		return;
+
 	vdev = peer->vdev;
-	if (restore_last_peer && (vdev->last_real_peer == NULL)) {
-		peer = NULL;
-		peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
+	peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
 				vdev->hl_tdls_ap_mac_addr.raw, peer_id);
-		if (peer && (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
-			vdev->last_real_peer = peer;
-	}
+
+	qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
+	if (!vdev->last_real_peer && peer &&
+	    (peer->peer_ids[0] != HTT_INVALID_PEER_ID))
+		vdev->last_real_peer = peer;
+	qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
 }
 #endif
 
@@ -2697,8 +2703,11 @@ ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr)
 	TAILQ_INSERT_TAIL(&vdev->peer_list, peer, peer_list_elem);
 	qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
 	/* check whether this is a real peer (peer mac addr != vdev mac addr) */
-	if (ol_txrx_peer_find_mac_addr_cmp(&vdev->mac_addr, &peer->mac_addr))
+	if (ol_txrx_peer_find_mac_addr_cmp(&vdev->mac_addr, &peer->mac_addr)) {
+		qdf_spin_lock_bh(&pdev->last_real_peer_mutex);
 		vdev->last_real_peer = peer;
+		qdf_spin_unlock_bh(&pdev->last_real_peer_mutex);
+	}
 
 	peer->rx_opt_proc = pdev->rx_opt_proc;
 
@@ -3722,9 +3731,6 @@ static void ol_txrx_peer_detach(void *ppeer, uint32_t bitmap)
 		   peer->mac_addr.raw[2], peer->mac_addr.raw[3],
 		   peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
 
-	if (peer->vdev->last_real_peer == peer)
-		peer->vdev->last_real_peer = NULL;
-
 	qdf_spin_lock_bh(&vdev->pdev->last_real_peer_mutex);
 	if (vdev->last_real_peer == peer)
 		vdev->last_real_peer = NULL;
@@ -4434,8 +4440,11 @@ static void ol_txrx_disp_peer_stats(ol_txrx_pdev_handle pdev)
 	for (i = 0; i < OL_TXRX_NUM_LOCAL_PEER_IDS; i++) {
 		qdf_spin_lock_bh(&pdev->local_peer_ids.lock);
 		peer = pdev->local_peer_ids.map[i];
-		if (peer)
+		if (peer) {
+			qdf_spin_lock_bh(&pdev->peer_ref_mutex);
 			ol_txrx_peer_get_ref(peer, PEER_DEBUG_ID_OL_INTERNAL);
+			qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
+		}
 		qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
 
 		if (peer) {

+ 2 - 0
core/dp/txrx/ol_txrx_peer_find.c

@@ -750,8 +750,10 @@ struct ol_txrx_peer_t *ol_txrx_assoc_peer_find(struct ol_txrx_vdev_t *vdev)
 	 */
 	if (vdev->last_real_peer
 	    && vdev->last_real_peer->peer_ids[0] != HTT_INVALID_PEER_ID) {
+		qdf_spin_lock_bh(&vdev->pdev->peer_ref_mutex);
 		ol_txrx_peer_get_ref(vdev->last_real_peer,
 				     PEER_DEBUG_ID_OL_INTERNAL);
+		qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
 		peer = vdev->last_real_peer;
 	} else {
 		peer = NULL;