Browse Source

qcacmn: add nbuf map result check when replenish

add the nbuf map result checking when replenish rx
nbuf to monitor status ring, this could avoid the
case nbuf map failed and invalid dma address
posted to HW or do unmap for unmapped nbuf.

Change-Id: I45cfc015f71a2d7295f7fcb4803bd6e68e2752d4
CRs-Fixed: 2182546
jinweic chen 7 years ago
parent
commit
c3546321b6
4 changed files with 118 additions and 28 deletions
  1. 60 1
      dp/wifi3.0/dp_rx.c
  2. 3 0
      dp/wifi3.0/dp_rx.h
  3. 52 27
      dp/wifi3.0/dp_rx_mon_status.c
  4. 3 0
      qdf/inc/qdf_nbuf.h

+ 60 - 1
dp/wifi3.0/dp_rx.c

@@ -170,7 +170,7 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
 
 		ret = qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
 				    QDF_DMA_BIDIRECTIONAL);
-		if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
+		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
 			qdf_nbuf_free(rx_netbuf);
 			DP_STATS_INC(dp_pdev, replenish.map_err, 1);
 			continue;
@@ -1708,3 +1708,62 @@ dp_rx_pdev_attach(struct dp_pdev *pdev)
 
 	return QDF_STATUS_SUCCESS;
 }
+
+/*
+ * dp_rx_nbuf_prepare() - prepare RX nbuf
+ * @soc: core txrx main context
+ * @pdev: core txrx pdev context
+ *
+ * This function alloc & map nbuf for RX dma usage, retry it if failed
+ * until retry times reaches max threshold or succeeded.
+ *
+ * Return: qdf_nbuf_t pointer if succeeded, NULL if failed.
+ */
+qdf_nbuf_t
+dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev)
+{
+	uint8_t *buf;
+	int32_t nbuf_retry_count;
+	QDF_STATUS ret;
+	qdf_nbuf_t nbuf = NULL;
+
+	for (nbuf_retry_count = 0; nbuf_retry_count <
+		QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD;
+			nbuf_retry_count++) {
+		/* Allocate a new skb */
+		nbuf = qdf_nbuf_alloc(soc->osdev,
+					RX_BUFFER_SIZE,
+					RX_BUFFER_RESERVATION,
+					RX_BUFFER_ALIGNMENT,
+					FALSE);
+
+		if (nbuf == NULL) {
+			DP_STATS_INC(pdev,
+				replenish.nbuf_alloc_fail, 1);
+			continue;
+		}
+
+		buf = qdf_nbuf_data(nbuf);
+
+		memset(buf, 0, RX_BUFFER_SIZE);
+
+		ret = qdf_nbuf_map_single(soc->osdev, nbuf,
+				    QDF_DMA_BIDIRECTIONAL);
+
+		/* nbuf map failed */
+		if (qdf_unlikely(QDF_IS_STATUS_ERROR(ret))) {
+			qdf_nbuf_free(nbuf);
+			DP_STATS_INC(pdev, replenish.map_err, 1);
+			continue;
+		}
+		/* qdf_nbuf alloc and map succeeded */
+		break;
+	}
+
+	/* qdf_nbuf still alloc or map failed */
+	if (qdf_unlikely(nbuf_retry_count >=
+			QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD))
+		return NULL;
+
+	return nbuf;
+}

+ 3 - 0
dp/wifi3.0/dp_rx.h

@@ -723,4 +723,7 @@ QDF_STATUS dp_rx_filter_mesh_packets(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
 int dp_wds_rx_policy_check(uint8_t *rx_tlv_hdr, struct dp_vdev *vdev,
 				struct dp_peer *peer, int rx_mcast);
 
+qdf_nbuf_t
+dp_rx_nbuf_prepare(struct dp_soc *soc, struct dp_pdev *pdev);
+
 #endif /* _DP_RX_H */

+ 52 - 27
dp/wifi3.0/dp_rx_mon_status.c

@@ -500,16 +500,38 @@ dp_rx_mon_status_srng_process(struct dp_soc *soc, uint32_t mac_id,
 			rx_desc = &desc_list->rx_desc;
 		}
 
-		/* Allocate a new skb */
-		status_nbuf = qdf_nbuf_alloc(soc->osdev, RX_BUFFER_SIZE,
-			RX_BUFFER_RESERVATION, RX_BUFFER_ALIGNMENT, FALSE);
+		status_nbuf = dp_rx_nbuf_prepare(soc, pdev);
+
+		/*
+		 * qdf_nbuf alloc or map failed,
+		 * free the dp rx desc to free list,
+		 * fill in NULL dma address at current HP entry,
+		 * keep HP in mon_status_ring unchanged,
+		 * wait next time dp_rx_mon_status_srng_process
+		 * to fill in buffer at current HP.
+		 */
+		if (qdf_unlikely(status_nbuf == NULL)) {
+			union dp_rx_desc_list_elem_t *desc_list = NULL;
+			union dp_rx_desc_list_elem_t *tail = NULL;
+			struct rx_desc_pool *rx_desc_pool;
 
-		status_buf = qdf_nbuf_data(status_nbuf);
+			rx_desc_pool = &soc->rx_desc_status[mac_id];
 
-		hal_clear_rx_status_done(status_buf);
+			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+				"%s: fail to allocate or map qdf_nbuf",
+				__func__);
+			dp_rx_add_to_free_desc_list(&desc_list,
+						&tail, rx_desc);
+			dp_rx_add_desc_list_to_free_list(soc, &desc_list,
+						&tail, mac_id, rx_desc_pool);
+
+			hal_rxdma_buff_addr_info_set(
+						rxdma_mon_status_ring_entry,
+						0, 0, HAL_RX_BUF_RBM_SW3_BM);
+			work_done++;
+			break;
+		}
 
-		qdf_nbuf_map_single(soc->osdev, status_nbuf,
-			QDF_DMA_BIDIRECTIONAL);
 		paddr = qdf_nbuf_get_frag_paddr(status_nbuf, 0);
 
 		rx_desc->nbuf = status_nbuf;
@@ -620,14 +642,14 @@ QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
 	uint32_t num_alloc_desc;
 	uint16_t num_desc_to_free = 0;
 	uint32_t num_entries_avail;
-	uint32_t count;
+	uint32_t count = 0;
 	int sync_hw_ptr = 1;
 	qdf_dma_addr_t paddr;
 	qdf_nbuf_t rx_netbuf;
 	void *rxdma_ring_entry;
 	union dp_rx_desc_list_elem_t *next;
 	void *rxdma_srng;
-	uint8_t *status_buf;
+	struct dp_pdev *dp_pdev = dp_get_pdev_for_mac_id(dp_soc, mac_id);
 
 	rxdma_srng = dp_rxdma_srng->hal_srng;
 
@@ -675,23 +697,21 @@ QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
 		num_req_buffers = num_entries_avail;
 	}
 
-	for (count = 0; count < num_req_buffers; count++) {
-		rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
-							 rxdma_srng);
-
-		rx_netbuf = qdf_nbuf_alloc(dp_soc->osdev,
-					RX_BUFFER_SIZE,
-					RX_BUFFER_RESERVATION,
-					RX_BUFFER_ALIGNMENT,
-					FALSE);
+	while (count < num_req_buffers) {
+		rx_netbuf = dp_rx_nbuf_prepare(dp_soc, dp_pdev);
 
-		status_buf = qdf_nbuf_data(rx_netbuf);
-		hal_clear_rx_status_done(status_buf);
-
-		memset(status_buf, 0, RX_BUFFER_SIZE);
-
-		qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
-				    QDF_DMA_BIDIRECTIONAL);
+		/*
+		 * qdf_nbuf alloc or map failed,
+		 * keep HP in mon_status_ring unchanged,
+		 * wait dp_rx_mon_status_srng_process
+		 * to fill in buffer at current HP.
+		 */
+		if (qdf_unlikely(rx_netbuf == NULL)) {
+			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
+				"%s: qdf_nbuf allocate or map fail, count %d",
+				__func__, count);
+			break;
+		}
 
 		paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
 
@@ -699,15 +719,20 @@ QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
 
 		(*desc_list)->rx_desc.nbuf = rx_netbuf;
 		(*desc_list)->rx_desc.in_use = 1;
+
+		count++;
+		rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
+							 rxdma_srng);
+
 		hal_rxdma_buff_addr_info_set(rxdma_ring_entry, paddr,
 			(*desc_list)->rx_desc.cookie, owner);
 
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
 			"[%s][%d] rx_desc=%pK, cookie=%d, nbuf=%pK, \
-			status_buf=%pK paddr=%pK\n",
+			paddr=%pK\n",
 			__func__, __LINE__, &(*desc_list)->rx_desc,
 			(*desc_list)->rx_desc.cookie, rx_netbuf,
-			status_buf, (void *)paddr);
+			(void *)paddr);
 
 		*desc_list = next;
 	}

+ 3 - 0
qdf/inc/qdf_nbuf.h

@@ -142,6 +142,9 @@
 #define QDF_NBUF_TX_PKT_STATE_MAX            10
 #define QDF_NBUF_TX_PKT_LI_DP                11
 
+/* qdf_nbuf allocate and map max retry threshold when failed */
+#define QDF_NBUF_ALLOC_MAP_RETRY_THRESHOLD      20
+
 /* Enable flag to print TSO specific prints in datapath */
 #ifdef TSO_DEBUG_LOG_ENABLE
 #define TSO_DEBUG(fmt, args ...) \