Browse Source

qcacmn: Fix SSR nbuf unmap issue

During SSR test, TX skb sent to target will not get TX completion,
so skb unmap and free is missed. later when driver is unloaded,
nbuf unmap leak is detected then panic happened.
Take care to unmap and free the corresponding skb of pending TX
descriptor during vdev detach.

Change-Id: I9f4e6443682097ec76632c96a0188ffa2c1a5fcc
CRs-Fixed: 2347770
Jinwei Chen 6 years ago
parent
commit
b3f9d202db
1 changed files with 55 additions and 21 deletions
  1. 55 21
      dp/wifi3.0/dp_tx.c

+ 55 - 21
dp/wifi3.0/dp_tx.c

@@ -3334,11 +3334,6 @@ void dp_tx_vdev_update_search_flags(struct dp_vdev *vdev)
 }
 
 #ifdef QCA_LL_TX_FLOW_CONTROL_V2
-static void dp_tx_desc_flush(struct dp_vdev *vdev)
-{
-}
-#else /* QCA_LL_TX_FLOW_CONTROL_V2! */
-
 /* dp_tx_desc_flush() - release resources associated
  *                      to tx_desc
  * @vdev: virtual device instance
@@ -3347,12 +3342,51 @@ static void dp_tx_desc_flush(struct dp_vdev *vdev)
  * including ME buffer for which either free during
  * completion didn't happened or completion is not
  * received.
-*/
+ */
+static void dp_tx_desc_flush(struct dp_vdev *vdev)
+{
+	uint8_t i;
+	uint32_t j;
+	uint32_t num_desc, page_id, offset;
+	uint16_t num_desc_per_page;
+	struct dp_soc *soc = vdev->pdev->soc;
+	struct dp_tx_desc_s *tx_desc = NULL;
+	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
+
+	for (i = 0; i < MAX_TXDESC_POOLS; i++) {
+		tx_desc_pool = &soc->tx_desc[i];
+		if (!(tx_desc_pool->pool_size) ||
+		    IS_TX_DESC_POOL_STATUS_INACTIVE(tx_desc_pool) ||
+		    !(tx_desc_pool->desc_pages.cacheable_pages))
+			continue;
+
+		num_desc = tx_desc_pool->pool_size;
+		num_desc_per_page =
+			tx_desc_pool->desc_pages.num_element_per_page;
+		for (j = 0; j < num_desc; j++) {
+			page_id = j / num_desc_per_page;
+			offset = j % num_desc_per_page;
+
+			if (qdf_unlikely(!(tx_desc_pool->
+					 desc_pages.cacheable_pages)))
+				break;
+
+			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
+			if (tx_desc && (tx_desc->vdev == vdev) &&
+			    (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
+				dp_tx_comp_free_buf(soc, tx_desc);
+				dp_tx_desc_release(tx_desc, i);
+			}
+		}
+	}
+}
+#else /* QCA_LL_TX_FLOW_CONTROL_V2! */
 static void dp_tx_desc_flush(struct dp_vdev *vdev)
 {
 	uint8_t i, num_pool;
 	uint32_t j;
-	uint32_t num_desc;
+	uint32_t num_desc, page_id, offset;
+	uint16_t num_desc_per_page;
 	struct dp_soc *soc = vdev->pdev->soc;
 	struct dp_tx_desc_s *tx_desc = NULL;
 	struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
@@ -3361,21 +3395,21 @@ static void dp_tx_desc_flush(struct dp_vdev *vdev)
 	num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
 
 	for (i = 0; i < num_pool; i++) {
+		tx_desc_pool = &soc->tx_desc[i];
+		if (!tx_desc_pool->desc_pages.cacheable_pages)
+			continue;
+
+		num_desc_per_page =
+			tx_desc_pool->desc_pages.num_element_per_page;
 		for (j = 0; j < num_desc; j++) {
-			tx_desc_pool = &((soc)->tx_desc[(i)]);
-			if (tx_desc_pool &&
-				tx_desc_pool->desc_pages.cacheable_pages) {
-				tx_desc = dp_tx_desc_find(soc, i,
-					(j & DP_TX_DESC_ID_PAGE_MASK) >>
-					DP_TX_DESC_ID_PAGE_OS,
-					(j & DP_TX_DESC_ID_OFFSET_MASK) >>
-					DP_TX_DESC_ID_OFFSET_OS);
-
-				if (tx_desc && (tx_desc->vdev == vdev) &&
-					(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
-					dp_tx_comp_free_buf(soc, tx_desc);
-					dp_tx_desc_release(tx_desc, i);
-				}
+			page_id = j / num_desc_per_page;
+			offset = j % num_desc_per_page;
+			tx_desc = dp_tx_desc_find(soc, i, page_id, offset);
+
+			if (tx_desc && (tx_desc->vdev == vdev) &&
+			    (tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED)) {
+				dp_tx_comp_free_buf(soc, tx_desc);
+				dp_tx_desc_release(tx_desc, i);
 			}
 		}
 	}