Эх сурвалжийг харах

qcacld-3.0: fix qdf_nbuf_unmap_segment crash

Make the following changes to fix the issue:
- Wrap TSO seg unmap code inside a spinlock
- Add TSO descriptor DUP detection logic before unmapping TSO segs
- De-initialize TSO seg freepool after in-use Tx descriptors have been
  de-inited

Change-Id: I63b100879b302e4919c2952143509e76c14b36ec
CRs-Fixed: 2018317
Mohit Khanna 8 жил өмнө
parent
commit
54f3a38c61

+ 90 - 35
core/dp/txrx/ol_tx_desc.c

@@ -305,37 +305,75 @@ ol_tx_desc_vdev_rm(struct ol_tx_desc_t *tx_desc)
 #endif
 
 #ifdef FEATURE_TSO
-static void ol_tx_tso_desc_free(struct ol_txrx_pdev_t *pdev,
-				struct ol_tx_desc_t *tx_desc)
+/**
+ * ol_tso_unmap_tso_segment() - Unmap TSO segment
+ * @pdev: pointer to ol_txrx_pdev_t structure
+ * @tx_desc: pointer to ol_tx_desc_t containing the TSO segment
+ *
+ * Unmap TSO segment (frag[1]). If it is the last TSO segment corresponding the
+ * nbuf, also unmap the EIT header(frag[0]).
+ *
+ * Return: None
+ */
+static void ol_tso_unmap_tso_segment(struct ol_txrx_pdev_t *pdev,
+						struct ol_tx_desc_t *tx_desc)
 {
+	bool is_last_seg = false;
+	struct qdf_tso_num_seg_elem_t *tso_num_desc = NULL;
+
 	if (qdf_unlikely(tx_desc->tso_desc == NULL)) {
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
 			  "%s %d TSO desc is NULL!",
 			  __func__, __LINE__);
 		qdf_assert(0);
+		return;
 	} else if (qdf_unlikely(tx_desc->tso_num_desc == NULL)) {
 		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
 			  "%s %d TSO common info is NULL!",
 			  __func__, __LINE__);
 		qdf_assert(0);
-	} else {
-		struct qdf_tso_num_seg_elem_t *tso_num_desc =
-			(struct qdf_tso_num_seg_elem_t *)tx_desc->tso_num_desc;
-		if (tso_num_desc->num_seg.tso_cmn_num_seg > 1) {
-			tso_num_desc->num_seg.tso_cmn_num_seg--;
-			qdf_nbuf_unmap_tso_segment(pdev->osdev,
-						   tx_desc->tso_desc, false);
-		} else {
-			tso_num_desc->num_seg.tso_cmn_num_seg--;
-			qdf_assert(tso_num_desc->num_seg.tso_cmn_num_seg == 0);
-			qdf_nbuf_unmap_tso_segment(pdev->osdev,
-						   tx_desc->tso_desc, true);
-			ol_tso_num_seg_free(pdev, tx_desc->tso_num_desc);
-			tx_desc->tso_num_desc = NULL;
-		}
-		ol_tso_free_segment(pdev, tx_desc->tso_desc);
-		tx_desc->tso_desc = NULL;
+		return;
+	}
+
+	tso_num_desc = tx_desc->tso_num_desc;
+
+	qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
+
+	tso_num_desc->num_seg.tso_cmn_num_seg--;
+	is_last_seg = (tso_num_desc->num_seg.tso_cmn_num_seg == 0) ?
+								true : false;
+	qdf_nbuf_unmap_tso_segment(pdev->osdev, tx_desc->tso_desc, is_last_seg);
+
+	qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
+
+}
+
+/**
+ * ol_tx_tso_desc_free() - Add TSO TX descs back to the freelist
+ * @pdev: pointer to ol_txrx_pdev_t structure
+ * @tx_desc: pointer to ol_tx_desc_t containing the TSO segment
+ *
+ * Add qdf_tso_seg_elem_t corresponding to the TSO seg back to freelist.
+ * If it is the last segment of the jumbo skb, also add the
+ * qdf_tso_num_seg_elem_t to the free list.
+ *
+ * Return: None
+ */
+static void ol_tx_tso_desc_free(struct ol_txrx_pdev_t *pdev,
+				struct ol_tx_desc_t *tx_desc)
+{
+	bool is_last_seg;
+	struct qdf_tso_num_seg_elem_t *tso_num_desc = tx_desc->tso_num_desc;
+
+	is_last_seg = (tso_num_desc->num_seg.tso_cmn_num_seg == 0) ?
+								true : false;
+	if (is_last_seg) {
+		ol_tso_num_seg_free(pdev, tx_desc->tso_num_desc);
+		tx_desc->tso_num_desc = NULL;
 	}
+
+	ol_tso_free_segment(pdev, tx_desc->tso_desc);
+	tx_desc->tso_desc = NULL;
 }
 
 #else
@@ -344,20 +382,26 @@ static inline void ol_tx_tso_desc_free(struct ol_txrx_pdev_t *pdev,
 {
 }
 
+static inline void ol_tso_unmap_tso_segment(
+					struct ol_txrx_pdev_t *pdev,
+					struct ol_tx_desc_t *tx_desc)
+{
+}
 #endif
 
-#ifndef QCA_LL_TX_FLOW_CONTROL_V2
 /**
- * ol_tx_desc_free() - put descriptor to freelist
+ * ol_tx_desc_free_common() - common funcs to free tx_desc for all flow ctl vers
  * @pdev: pdev handle
  * @tx_desc: tx descriptor
  *
+ * Set of common functions needed for QCA_LL_TX_FLOW_CONTROL_V2 and older
+ * versions of flow control. Needs to be called from within a spinlock.
+ *
  * Return: None
  */
-void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
+static void ol_tx_desc_free_common(struct ol_txrx_pdev_t *pdev,
+						struct ol_tx_desc_t *tx_desc)
 {
-	qdf_spin_lock_bh(&pdev->tx_mutex);
-
 	ol_tx_desc_dup_detect_reset(pdev, tx_desc);
 
 	if (tx_desc->pkt_type == OL_TX_FRM_TSO)
@@ -365,10 +409,25 @@ void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
 
 	ol_tx_desc_reset_pkt_type(tx_desc);
 	ol_tx_desc_reset_timestamp(tx_desc);
+	tx_desc->vdev_id = OL_TXRX_INVALID_VDEV_ID;
+}
+
+#ifndef QCA_LL_TX_FLOW_CONTROL_V2
+/**
+ * ol_tx_desc_free() - put descriptor to freelist
+ * @pdev: pdev handle
+ * @tx_desc: tx descriptor
+ *
+ * Return: None
+ */
+void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
+{
+	qdf_spin_lock_bh(&pdev->tx_mutex);
+
+	ol_tx_desc_free_common(pdev, tx_desc);
 
 	ol_tx_put_desc_global_pool(pdev, tx_desc);
 	ol_tx_desc_vdev_rm(tx_desc);
-	tx_desc->vdev_id = OL_TXRX_INVALID_VDEV_ID;
 
 	qdf_spin_unlock_bh(&pdev->tx_mutex);
 }
@@ -385,14 +444,9 @@ void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
 {
 	struct ol_tx_flow_pool_t *pool = tx_desc->pool;
 
-	if (tx_desc->pkt_type == OL_TX_FRM_TSO)
-		ol_tx_tso_desc_free(pdev, tx_desc);
-
-	ol_tx_desc_reset_pkt_type(tx_desc);
-	ol_tx_desc_reset_timestamp(tx_desc);
-
 	qdf_spin_lock_bh(&pool->flow_pool_lock);
-	ol_tx_desc_dup_detect_reset(pdev, tx_desc);
+
+	ol_tx_desc_free_common(pdev, tx_desc);
 	ol_tx_put_desc_flow_pool(pool, tx_desc);
 	switch (pool->status) {
 	case FLOW_POOL_ACTIVE_PAUSED:
@@ -420,8 +474,6 @@ void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
 		break;
 	};
 
-	tx_desc->vdev_id = OL_TXRX_INVALID_VDEV_ID;
-
 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
 
 }
@@ -704,7 +756,10 @@ void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev,
 		}
 		/* let the code below unmap and free the frame */
 	}
-	qdf_nbuf_unmap(pdev->osdev, tx_desc->netbuf, QDF_DMA_TO_DEVICE);
+	if (tx_desc->pkt_type == OL_TX_FRM_TSO)
+		ol_tso_unmap_tso_segment(pdev, tx_desc);
+	else
+		qdf_nbuf_unmap(pdev->osdev, tx_desc->netbuf, QDF_DMA_TO_DEVICE);
 	/* check the frame type to see what kind of special steps are needed */
 	if ((tx_desc->pkt_type >= OL_TXRX_MGMT_TYPE_BASE) &&
 		   (tx_desc->pkt_type != ol_tx_frm_freed)) {

+ 53 - 33
core/dp/txrx/ol_txrx.c

@@ -1811,6 +1811,50 @@ static A_STATUS ol_txrx_pdev_attach_target(struct cdp_pdev *ppdev)
 	return htt_attach_target(pdev->htt_pdev) == QDF_STATUS_SUCCESS ? 0:1;
 }
 
+/**
+ * ol_tx_free_descs_inuse - free tx descriptors which are in use
+ * @pdev - the physical device for which tx descs need to be freed
+ *
+ * Cycle through the list of TX descriptors (for a pdev) which are in use,
+ * for which TX completion has not been received and free them. Should be
+ * called only when the interrupts are off and all lower layer RX is stopped.
+ * Otherwise there may be a race condition with TX completions.
+ *
+ * Return: None
+ */
+static void ol_tx_free_descs_inuse(ol_txrx_pdev_handle pdev)
+{
+	int i;
+	void *htt_tx_desc;
+	struct ol_tx_desc_t *tx_desc;
+	int num_freed_tx_desc = 0;
+
+	for (i = 0; i < pdev->tx_desc.pool_size; i++) {
+		tx_desc = ol_tx_desc_find(pdev, i);
+		/*
+		 * Confirm that each tx descriptor is "empty", i.e. it has
+		 * no tx frame attached.
+		 * In particular, check that there are no frames that have
+		 * been given to the target to transmit, for which the
+		 * target has never provided a response.
+		 */
+		if (qdf_atomic_read(&tx_desc->ref_cnt)) {
+			ol_txrx_dbg("Warning: freeing tx frame (no compltn)");
+			ol_tx_desc_frame_free_nonstd(pdev,
+						     tx_desc, 1);
+			num_freed_tx_desc++;
+		}
+		htt_tx_desc = tx_desc->htt_tx_desc;
+		htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
+	}
+
+	if (num_freed_tx_desc)
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
+		"freed %d tx frames for which no resp from target",
+		num_freed_tx_desc);
+
+}
+
 /**
  * ol_txrx_pdev_pre_detach() - detach the data SW state
  * @pdev - the data physical device object being removed
@@ -1826,8 +1870,6 @@ static A_STATUS ol_txrx_pdev_attach_target(struct cdp_pdev *ppdev)
 static void ol_txrx_pdev_pre_detach(struct cdp_pdev *ppdev, int force)
 {
 	struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
-	int i;
-	int num_freed_tx_desc = 0;
 
 	/* preconditions */
 	TXRX_ASSERT2(pdev);
@@ -1844,8 +1886,6 @@ static void ol_txrx_pdev_pre_detach(struct cdp_pdev *ppdev, int force)
 	qdf_timer_free(&pdev->tx_throttle.tx_timer);
 #endif
 #endif
-	ol_tso_seg_list_deinit(pdev);
-	ol_tso_num_seg_list_deinit(pdev);
 
 	if (force) {
 		/*
@@ -1867,37 +1907,17 @@ static void ol_txrx_pdev_pre_detach(struct cdp_pdev *ppdev, int force)
 
 	/* to get flow pool status before freeing descs */
 	ol_tx_dump_flow_pool_info((void *)pdev);
+	ol_tx_free_descs_inuse(pdev);
+	ol_tx_deregister_flow_control(pdev);
 
-	for (i = 0; i < pdev->tx_desc.pool_size; i++) {
-		void *htt_tx_desc;
-		struct ol_tx_desc_t *tx_desc;
-
-		tx_desc = ol_tx_desc_find(pdev, i);
-		/*
-		 * Confirm that each tx descriptor is "empty", i.e. it has
-		 * no tx frame attached.
-		 * In particular, check that there are no frames that have
-		 * been given to the target to transmit, for which the
-		 * target has never provided a response.
-		 */
-		if (qdf_atomic_read(&tx_desc->ref_cnt) &&
-				tx_desc->vdev_id != OL_TXRX_INVALID_VDEV_ID) {
-			ol_txrx_dbg(
-				   "Warning: freeing tx frame (no compltn)\n");
-			ol_tx_desc_frame_free_nonstd(pdev,
-						     tx_desc, 1);
-			num_freed_tx_desc++;
-		}
-		htt_tx_desc = tx_desc->htt_tx_desc;
-		htt_tx_desc_free(pdev->htt_pdev, htt_tx_desc);
-	}
-
-	if (num_freed_tx_desc)
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
-		"freed %d tx frames for which no resp from target",
-		num_freed_tx_desc);
+	/*
+	 * ol_tso_seg_list_deinit should happen after
+	 * ol_tx_deinit_tx_desc_inuse as it tries to access the tso seg freelist
+	 * which is being de-initilized in ol_tso_seg_list_deinit
+	 */
+	ol_tso_seg_list_deinit(pdev);
+	ol_tso_num_seg_list_deinit(pdev);
 
-	ol_tx_deregister_flow_control(pdev);
 	/* Stop the communication between HTT and target at first */
 	htt_detach_target(pdev->htt_pdev);