瀏覽代碼

qcacmn: Acquire lock while accessing rx tid in rx defrag store fragment

Currently, we are Flushing fragments for peer as part of rx defrag
clean up. There is the possibility that frag flush is called while
we are accessing frags in rx defrag store fragment path in parallel.
Which will cause access after the free issue.

Adding lock in rx defrag store fragment path to avoid any
parallel flush while accessing the frags.

Change-Id: Ic27caab58429d776449f4b774eb7163ffa6215ac
CRs-Fixed: 2995156
Amit Mehta 3 年之前
父節點
當前提交
e1bfd2dbdc
共有 1 個文件被更改,包括 8 次插入0 次删除
  1. 8 0
      dp/wifi3.0/dp_rx_defrag.c

+ 8 - 0
dp/wifi3.0/dp_rx_defrag.c

@@ -1457,6 +1457,9 @@ static QDF_STATUS dp_rx_defrag(struct dp_peer *peer, unsigned tid,
 	struct dp_soc *soc = vdev->pdev->soc;
 	struct dp_soc *soc = vdev->pdev->soc;
 	uint8_t status = 0;
 	uint8_t status = 0;
 
 
+	if (!cur)
+		return QDF_STATUS_E_DEFRAG_ERROR;
+
 	hdr_space = dp_rx_defrag_hdrsize(soc, cur);
 	hdr_space = dp_rx_defrag_hdrsize(soc, cur);
 	index = hal_rx_msdu_is_wlan_mcast(soc->hal_soc, cur) ?
 	index = hal_rx_msdu_is_wlan_mcast(soc->hal_soc, cur) ?
 		dp_sec_mcast : dp_sec_ucast;
 		dp_sec_mcast : dp_sec_ucast;
@@ -1828,9 +1831,11 @@ dp_rx_defrag_store_fragment(struct dp_soc *soc,
 	 * If the earlier sequence was dropped, this will be the fresh start.
 	 * If the earlier sequence was dropped, this will be the fresh start.
 	 * Else, continue with next fragment in a given sequence
 	 * Else, continue with next fragment in a given sequence
 	 */
 	 */
+	qdf_spin_lock_bh(&rx_tid->tid_lock);
 	status = dp_rx_defrag_fraglist_insert(peer, tid, &rx_reorder_array_elem->head,
 	status = dp_rx_defrag_fraglist_insert(peer, tid, &rx_reorder_array_elem->head,
 			&rx_reorder_array_elem->tail, frag,
 			&rx_reorder_array_elem->tail, frag,
 			&all_frag_present);
 			&all_frag_present);
+	qdf_spin_unlock_bh(&rx_tid->tid_lock);
 
 
 	/*
 	/*
 	 * Currently, we can have only 6 MSDUs per-MPDU, if the current
 	 * Currently, we can have only 6 MSDUs per-MPDU, if the current
@@ -1884,6 +1889,7 @@ dp_rx_defrag_store_fragment(struct dp_soc *soc,
 		  "All fragments received for sequence: %d", rxseq);
 		  "All fragments received for sequence: %d", rxseq);
 
 
 	/* Process the fragments */
 	/* Process the fragments */
+	qdf_spin_lock_bh(&rx_tid->tid_lock);
 	status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head,
 	status = dp_rx_defrag(peer, tid, rx_reorder_array_elem->head,
 		rx_reorder_array_elem->tail);
 		rx_reorder_array_elem->tail);
 	if (QDF_IS_STATUS_ERROR(status)) {
 	if (QDF_IS_STATUS_ERROR(status)) {
@@ -1902,12 +1908,14 @@ dp_rx_defrag_store_fragment(struct dp_soc *soc,
 					"%s: Failed to return link desc",
 					"%s: Failed to return link desc",
 					__func__);
 					__func__);
 		dp_rx_defrag_cleanup(peer, tid);
 		dp_rx_defrag_cleanup(peer, tid);
+		qdf_spin_unlock_bh(&rx_tid->tid_lock);
 		goto end;
 		goto end;
 	}
 	}
 
 
 	/* Re-inject the fragments back to REO for further processing */
 	/* Re-inject the fragments back to REO for further processing */
 	status = dp_rx_defrag_reo_reinject(peer, tid,
 	status = dp_rx_defrag_reo_reinject(peer, tid,
 			rx_reorder_array_elem->head);
 			rx_reorder_array_elem->head);
+	qdf_spin_unlock_bh(&rx_tid->tid_lock);
 	if (QDF_IS_STATUS_SUCCESS(status)) {
 	if (QDF_IS_STATUS_SUCCESS(status)) {
 		rx_reorder_array_elem->head = NULL;
 		rx_reorder_array_elem->head = NULL;
 		rx_reorder_array_elem->tail = NULL;
 		rx_reorder_array_elem->tail = NULL;