Browse Source

qcacld-3.0: Fix NULL pointer dereferencing of peer due to race condition

Peer get deleted during ol_txrx_peer_detach_force_delete when
WMA_ROAM_OFFLOAD_SYNCH_IND is received. As peer deletion is
happening in different context and ol_rx_send_pktlog_event is
accessing the peer in different context, a possible race condition
has occurred which leads to NULL pointer dereferencing of peer.

Ignore the peer deletion during ol_txrx_peer_detach_force_delete and
delete it during ol_rx_peer_unmap_handler.

Change-Id: Icf252612081a41f94db6df4684348f2962b2da9d
CRs-Fixed: 2238214
Alok Kumar 7 years ago
parent
commit
bda73bb1f2

+ 56 - 1
core/dp/txrx/ol_txrx.c

@@ -1342,6 +1342,7 @@ ol_txrx_pdev_attach(ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev,
 	ol_txrx_tso_stats_init(pdev);
 
 	TAILQ_INIT(&pdev->vdev_list);
+	TAILQ_INIT(&pdev->roam_stale_peer_list);
 
 	TAILQ_INIT(&pdev->req_list);
 	pdev->req_list_depth = 0;
@@ -3460,6 +3461,36 @@ static inline void ol_txrx_peer_free_tids(ol_txrx_peer_handle peer)
 	}
 }
 
+bool ol_txrx_is_peer_eligible_for_deletion(ol_txrx_peer_handle peer)
+{
+	struct ol_txrx_vdev_t *vdev;
+	struct ol_txrx_pdev_t *pdev;
+	bool peerdel = true;
+	u_int16_t peer_id;
+	int i;
+
+	vdev = peer->vdev;
+	pdev = vdev->pdev;
+	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
+		peer_id = peer->peer_ids[i];
+
+		if (!pdev->peer_id_to_obj_map[peer_id].peer_ref)
+			continue;
+
+		if (pdev->peer_id_to_obj_map[peer_id].peer_ref != peer)
+			continue;
+
+		if (qdf_atomic_read(&pdev->peer_id_to_obj_map[peer_id].
+					del_peer_id_ref_cnt)) {
+			peerdel = false;
+			break;
+		}
+
+		pdev->peer_id_to_obj_map[peer_id].peer_ref = NULL;
+	}
+	return peerdel;
+}
+
 /**
  * ol_txrx_peer_release_ref() - release peer reference
  * @peer: peer handle
@@ -3657,7 +3688,31 @@ int ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,
 
 		ol_txrx_dump_peer_access_list(peer);
 
-		qdf_mem_free(peer);
+		qdf_spin_lock_bh(&pdev->peer_map_unmap_lock);
+		if (ol_txrx_is_peer_eligible_for_deletion(peer)) {
+			qdf_mem_free(peer);
+		} else {
+			/*
+			 * Mark this PEER as a stale peer, to be deleted
+			 * during PEER UNMAP. Remove this peer from
+			 * roam_stale_peer_list during UNMAP.
+			 */
+			struct ol_txrx_roam_stale_peer_t *roam_stale_peer;
+
+			roam_stale_peer = qdf_mem_malloc(
+				sizeof(struct ol_txrx_roam_stale_peer_t));
+			if (roam_stale_peer) {
+				roam_stale_peer->peer = peer;
+				TAILQ_INSERT_TAIL(&pdev->roam_stale_peer_list,
+						  roam_stale_peer,
+						  next_stale_entry);
+			} else {
+				QDF_TRACE(QDF_MODULE_ID_TXRX,
+					  QDF_TRACE_LEVEL_ERROR,
+					  "No memory allocated");
+			}
+		}
+		qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
 	} else {
 		access_list = qdf_atomic_read(
 						&peer->access_list[debug_id]);

+ 6 - 0
core/dp/txrx/ol_txrx.h

@@ -39,6 +39,12 @@ ol_txrx_peer_handle ol_txrx_peer_get_ref_by_addr(ol_txrx_pdev_handle pdev,
 
 int  ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,
 			      enum peer_debug_id_type dbg_id);
+/* ol_txrx_is_peer_eligible_for_deletion() - check if peer to be deleted
+ * @peer: peer handler
+ *
+ * Return: true if eligible for deletion else false
+ */
+bool ol_txrx_is_peer_eligible_for_deletion(ol_txrx_peer_handle peer);
 
 /**
  * ol_tx_desc_pool_size_hl() - allocate tx descriptor pool size for HL systems

+ 65 - 0
core/dp/txrx/ol_txrx_peer_find.c

@@ -92,6 +92,41 @@ int ol_txrx_peer_get_ref(struct ol_txrx_peer_t *peer,
 	return refs_dbg_id;
 }
 
+/**
+ * ol_txrx_peer_delete_roam_stale_peer() - delete stale peers marked in roaming
+ * @pdev: pointer to pdev structure
+ *
+ * Return: none
+ */
+void ol_txrx_peer_delete_roam_stale_peer(struct ol_txrx_pdev_t *pdev)
+{
+	struct ol_txrx_peer_t *peer;
+	struct ol_txrx_roam_stale_peer_t *stale_peer;
+	struct ol_txrx_roam_stale_peer_t *stale_peer_next;
+	u_int16_t peer_id;
+	int i;
+
+	TAILQ_FOREACH_SAFE(stale_peer, &pdev->roam_stale_peer_list,
+			   next_stale_entry, stale_peer_next) {
+		peer = stale_peer->peer;
+		for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
+			peer_id = peer->peer_ids[i];
+
+			if (pdev->peer_id_to_obj_map[peer_id].peer_ref != peer)
+				continue;
+
+			pdev->peer_id_to_obj_map[peer_id].peer_ref = NULL;
+			qdf_atomic_set(&pdev->peer_id_to_obj_map[peer_id].
+				       del_peer_id_ref_cnt, 0);
+		}
+		qdf_mem_free(peer);
+		stale_peer->peer = NULL;
+		TAILQ_REMOVE(&pdev->roam_stale_peer_list, stale_peer,
+			     next_stale_entry);
+		qdf_mem_free(stale_peer);
+	}
+}
+
 /*=== function definitions for peer MAC addr --> peer object hash table =====*/
 
 /*
@@ -278,6 +313,7 @@ void ol_txrx_peer_find_hash_erase(struct ol_txrx_pdev_t *pdev)
 	 * Not really necessary to take peer_ref_mutex lock - by this point,
 	 * it's known that the pdev is no longer in use.
 	 */
+	ol_txrx_peer_delete_roam_stale_peer(pdev);
 
 	for (i = 0; i <= pdev->peer_hash.mask; i++) {
 		if (!TAILQ_EMPTY(&pdev->peer_hash.bins[i])) {
@@ -576,6 +612,8 @@ void ol_txrx_peer_tx_ready_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
 void ol_rx_peer_unmap_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
 {
 	struct ol_txrx_peer_t *peer;
+	struct ol_txrx_roam_stale_peer_t *stale_peer = NULL;
+	struct ol_txrx_roam_stale_peer_t *stale_peer_next = NULL;
 	int i = 0;
 	int32_t ref_cnt;
 
@@ -597,6 +635,26 @@ void ol_rx_peer_unmap_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
 					del_peer_id_ref_cnt);
 		ref_cnt = qdf_atomic_read(&pdev->peer_id_to_obj_map[peer_id].
 							del_peer_id_ref_cnt);
+
+		peer = pdev->peer_id_to_obj_map[peer_id].peer_ref;
+		if (peer && ol_txrx_is_peer_eligible_for_deletion(peer)) {
+			TAILQ_FOREACH_SAFE(stale_peer,
+					   &pdev->roam_stale_peer_list,
+					   next_stale_entry,
+					   stale_peer_next) {
+				if (stale_peer->peer == peer) {
+					stale_peer->peer = NULL;
+					break;
+				}
+			}
+			qdf_mem_free(peer);
+			if (stale_peer) {
+				TAILQ_REMOVE(&pdev->roam_stale_peer_list,
+					     stale_peer,
+					     next_stale_entry);
+				qdf_mem_free(stale_peer);
+			}
+		}
 		qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
 		wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
 				    DEBUG_PEER_UNMAP_EVENT,
@@ -714,7 +772,14 @@ void ol_txrx_peer_remove_obj_map_entries(ol_txrx_pdev_handle pdev,
 		num_deleted_maps += peer_id_ref_cnt;
 		pdev->peer_id_to_obj_map[peer_id].peer = NULL;
 		peer->peer_ids[i] = HTT_INVALID_PEER;
+
+		if (peer_id_ref_cnt)
+			pdev->peer_id_to_obj_map[peer_id].peer_ref = peer;
+		else
+			pdev->peer_id_to_obj_map[peer_id].peer_ref = NULL;
+
 	}
+
 	qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
 
 	/* Debug print the information after releasing bh spinlock */

+ 2 - 0
core/dp/txrx/ol_txrx_peer_find.h

@@ -115,6 +115,8 @@ ol_txrx_peer_find_hash_remove(struct ol_txrx_pdev_t *pdev,
 
 void ol_txrx_peer_find_hash_erase(struct ol_txrx_pdev_t *pdev);
 
+void ol_txrx_peer_delete_roam_stale_peer(struct ol_txrx_pdev_t *pdev);
+
 struct ol_txrx_peer_t *ol_txrx_assoc_peer_find(struct ol_txrx_vdev_t *vdev);
 void ol_txrx_peer_remove_obj_map_entries(ol_txrx_pdev_handle pdev,
 					struct ol_txrx_peer_t *peer);

+ 10 - 0
core/dp/txrx/ol_txrx_types.h

@@ -511,6 +511,7 @@ struct ol_tx_flow_pool_t {
 /*
  * struct ol_txrx_peer_id_map - Map of firmware peer_ids to peers on host
  * @peer: Pointer to peer object
+ * @peer_ref: Pointer to peer marked as stale
  * @peer_id_ref_cnt: No. of firmware references to the peer_id
  * @del_peer_id_ref_cnt: No. of outstanding unmap events for peer_id
  *                       after the peer object is deleted on the host.
@@ -519,6 +520,7 @@ struct ol_tx_flow_pool_t {
  */
 struct ol_txrx_peer_id_map {
 	struct ol_txrx_peer_t *peer;
+	struct ol_txrx_peer_t *peer_ref;
 	qdf_atomic_t peer_id_ref_cnt;
 	qdf_atomic_t del_peer_id_ref_cnt;
 };
@@ -656,6 +658,8 @@ struct ol_txrx_pdev_t {
 	int req_list_depth;
 	qdf_spinlock_t req_list_spinlock;
 
+	TAILQ_HEAD(, ol_txrx_roam_stale_peer_t) roam_stale_peer_list;
+
 	/* peer ID to peer object map (array of pointers to peer objects) */
 	struct ol_txrx_peer_id_map *peer_id_to_obj_map;
 
@@ -1206,6 +1210,12 @@ struct ol_txrx_cached_bufq_t {
 	uint32_t dropped;
 };
 
+struct ol_txrx_roam_stale_peer_t {
+	ol_txrx_peer_handle peer;
+
+	TAILQ_ENTRY(ol_txrx_roam_stale_peer_t)next_stale_entry;
+};
+
 struct ol_txrx_peer_t {
 	struct ol_txrx_vdev_t *vdev;