Forráskód Böngészése

qcacld-3.0: Force delete peer entry during LFR3 roaming

While processing ROAM_OFFLOAD_SYNCH_IND, delete the peer entry
even if reference count is non-zero sine firmware has already
deleted that peer. It allows addition of new peer without waiting
for peer_unmap events. Fix the logic error in checking the timeout
in ol_txrx_peer_attach().

Change-Id: Ib028c29863d4e95ccac434f7d47bfedd59ef883f
CRs-Fixed: 1046754
Deepak Dhamdhere 8 éve
szülő
commit
363c6bc5bf

+ 16 - 5
core/dp/txrx/ol_txrx.c

@@ -1730,6 +1730,7 @@ ol_txrx_vdev_attach(ol_txrx_pdev_handle pdev,
 		    uint8_t vdev_id, enum wlan_op_mode op_mode)
 {
 	struct ol_txrx_vdev_t *vdev;
+	QDF_STATUS qdf_status;
 
 	/* preconditions */
 	TXRX_ASSERT2(pdev);
@@ -1789,6 +1790,9 @@ ol_txrx_vdev_attach(ol_txrx_pdev_handle pdev,
 	/* Default MAX Q depth for every VDEV */
 	vdev->ll_pause.max_q_depth =
 		ol_tx_cfg_max_tx_queue_depth_ll(vdev->pdev->ctrl_pdev);
+
+	qdf_status = qdf_event_create(&vdev->wait_delete_comp);
+
 	/* add this vdev into the pdev's list */
 	TAILQ_INSERT_TAIL(&pdev->vdev_list, vdev, vdev_list_elem);
 
@@ -2092,7 +2096,7 @@ ol_txrx_peer_attach(ol_txrx_vdev_handle vdev, uint8_t *peer_mac_addr)
 				peer_mac_addr[4], peer_mac_addr[5]);
 			if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
 				vdev->wait_on_peer_id = temp_peer->local_id;
-				qdf_event_create(&vdev->wait_delete_comp);
+				qdf_event_reset(&vdev->wait_delete_comp);
 				wait_on_deletion = true;
 			} else {
 				qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
@@ -2106,7 +2110,7 @@ ol_txrx_peer_attach(ol_txrx_vdev_handle vdev, uint8_t *peer_mac_addr)
 		/* wait for peer deletion */
 		rc = qdf_wait_single_event(&vdev->wait_delete_comp,
 					   PEER_DELETION_TIMEOUT);
-		if (!rc) {
+		if (QDF_STATUS_E_TIMEOUT == rc) {
 			TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
 				"timedout waiting for peer(%d) deletion\n",
 				vdev->wait_on_peer_id);
@@ -2149,6 +2153,7 @@ ol_txrx_peer_attach(ol_txrx_vdev_handle vdev, uint8_t *peer_mac_addr)
 
 	qdf_atomic_init(&peer->delete_in_progress);
 	qdf_atomic_init(&peer->flush_in_progress);
+	qdf_atomic_init(&peer->exists_in_fw);
 
 	qdf_atomic_init(&peer->ref_cnt);
 
@@ -2767,15 +2772,18 @@ void ol_txrx_peer_unref_delete(ol_txrx_peer_handle peer)
 	 * concurrently with the empty check.
 	 */
 	qdf_spin_lock_bh(&pdev->peer_ref_mutex);
-	if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
+	if (qdf_atomic_dec_and_test(&peer->ref_cnt) ||
+		!qdf_atomic_read(&peer->exists_in_fw)) {
 		u_int16_t peer_id;
 
 		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
-			   "Deleting peer %p (%02x:%02x:%02x:%02x:%02x:%02x)",
+			   "Deleting peer %p (%02x:%02x:%02x:%02x:%02x:%02x) ref_cnt %d, exists_in_fw %d\n",
 			   peer,
 			   peer->mac_addr.raw[0], peer->mac_addr.raw[1],
 			   peer->mac_addr.raw[2], peer->mac_addr.raw[3],
-			   peer->mac_addr.raw[4], peer->mac_addr.raw[5]);
+			   peer->mac_addr.raw[4], peer->mac_addr.raw[5],
+			   qdf_atomic_read(&peer->ref_cnt),
+			   qdf_atomic_read(&peer->exists_in_fw));
 
 		peer_id = peer->local_id;
 		/* remove the reference to the peer from the hash table */
@@ -2861,6 +2869,9 @@ void ol_txrx_peer_unref_delete(ol_txrx_peer_handle peer)
 			}
 		}
 
+		/* remove references to this peer in peer_id_to_obj_map */
+		ol_txrx_peer_remove_obj_map_entries(pdev, peer);
+
 		qdf_mem_free(peer);
 	} else {
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,

+ 27 - 0
core/dp/txrx/ol_txrx_peer_find.c

@@ -584,6 +584,33 @@ struct ol_txrx_peer_t *ol_txrx_assoc_peer_find(struct ol_txrx_vdev_t *vdev)
 	return peer;
 }
 
+/**
+ * ol_txrx_peer_remove_obj_map_entries() - Remove matching pdev peer map entries
+ *
+ * @pdev: pdev handle
+ * @peer: peer to be removed
+ *
+ * @Return: None
+ */
+void ol_txrx_peer_remove_obj_map_entries(ol_txrx_pdev_handle pdev,
+				     struct ol_txrx_peer_t *peer)
+{
+	int i;
+	uint16_t peer_id;
+
+	for (i = 0; i < MAX_NUM_PEER_ID_PER_PEER; i++) {
+		peer_id = peer->peer_ids[i];
+		if (peer_id != HTT_INVALID_PEER) {
+			QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_LOW,
+				  FL("remove map entry for peer_id = %d"),
+				  peer_id);
+			pdev->peer_id_to_obj_map[peer_id].peer = NULL;
+			qdf_atomic_init
+			  (&pdev->peer_id_to_obj_map[peer_id].peer_id_ref_cnt);
+		}
+	}
+}
+
 /*=== function definitions for debug ========================================*/
 
 #if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5

+ 3 - 0
core/dp/txrx/ol_txrx_peer_find.h

@@ -107,6 +107,9 @@ void ol_txrx_peer_find_hash_erase(struct ol_txrx_pdev_t *pdev);
 
 struct ol_txrx_peer_t *ol_txrx_assoc_peer_find(struct ol_txrx_vdev_t *vdev);
 
+void ol_txrx_peer_remove_obj_map_entries(ol_txrx_pdev_handle pdev,
+				     struct ol_txrx_peer_t *peer);
+
 #if defined(TXRX_DEBUG_LEVEL) && TXRX_DEBUG_LEVEL > 5
 void ol_txrx_peer_find_display(ol_txrx_pdev_handle pdev, int indent);
 #else

+ 1 - 0
core/dp/txrx/ol_txrx_types.h

@@ -1206,6 +1206,7 @@ struct ol_txrx_peer_t {
 	qdf_time_t last_assoc_rcvd;
 	qdf_time_t last_disassoc_rcvd;
 	qdf_time_t last_deauth_rcvd;
+	qdf_atomic_t exists_in_fw;
 };
 
 enum ol_rx_err_type {

+ 5 - 1
core/wma/src/wma_dev_if.c

@@ -1058,6 +1058,9 @@ void wma_remove_peer(tp_wma_handle wma, uint8_t *bssid,
 			wma->interfaces[vdev_id].peer_count);
 		return;
 	}
+	if (roam_synch_in_progress)
+		qdf_atomic_set(&peer->exists_in_fw, 0);
+
 	if (peer)
 		ol_txrx_peer_detach(peer);
 
@@ -1126,7 +1129,7 @@ QDF_STATUS wma_create_peer(tp_wma_handle wma, ol_txrx_pdev_handle pdev,
 		goto err;
 	}
 	if (roam_synch_in_progress) {
-
+		qdf_atomic_set(&peer->exists_in_fw, 1);
 		WMA_LOGE("%s: LFR3: Created peer %p with peer_addr %pM vdev_id %d,"
 			 "peer_count - %d",
 			 __func__, peer, peer_addr, vdev_id,
@@ -1147,6 +1150,7 @@ QDF_STATUS wma_create_peer(tp_wma_handle wma, ol_txrx_pdev_handle pdev,
 		  peer_addr, vdev_id,
 		  wma->interfaces[vdev_id].peer_count);
 
+	qdf_atomic_set(&peer->exists_in_fw, 1);
 	mac_addr_raw = ol_txrx_get_vdev_mac_addr(vdev);
 	if (mac_addr_raw == NULL) {
 		WMA_LOGE("%s: peer mac addr is NULL", __func__);