Эх сурвалжийг харах

qcacmn: For fast recycle case invalidate TLV header

In case of fast_recycled is set we can avoid invalidating
the complete buffer as it would have been invalidated
by tx driver before giving to recycler.

But we need to still invalidate rx_pkt_tlv_size as this
area will not be invalidated in TX path

Change-Id: I9e5202d8b251a5d59117609a705a311e499d05b0
CRs-Fixed: 3332713
Chaithanya Garrepalli 2 жил өмнө
parent
commit
720568a65d

+ 13 - 0
dp/wifi3.0/dp_rx.h

@@ -2427,6 +2427,7 @@ qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc,
 	return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
 }
 #else
+#define L3_HEADER_PAD 2
 static inline
 qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc,
 				      qdf_nbuf_t nbuf,
@@ -2436,7 +2437,19 @@ qdf_dma_addr_t dp_rx_nbuf_sync_no_dsb(struct dp_soc *dp_soc,
 		qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
 					      (void *)(nbuf->data + buf_size));
 	} else {
+		/*
+		 * In case of fast_recycled is set we can avoid invalidating
+		 * the complete buffer as it would have been invalidated
+		 * by tx driver before giving to recycler.
+		 *
+		 * But we need to still invalidate rx_pkt_tlv_size as this
+		 * area will not be invalidated in TX path
+		 */
 		DP_STATS_INC(dp_soc, rx.fast_recycled, 1);
+		qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
+					      (void *)(nbuf->data +
+						       dp_soc->rx_pkt_tlv_size +
+						       L3_HEADER_PAD));
 	}
 
 	nbuf->fast_recycled = 0;

+ 22 - 0
dp/wifi3.0/dp_rx_err.c

@@ -2723,6 +2723,27 @@ dp_rx_is_sg_formation_required(struct hal_wbm_err_desc_info *info)
 	return false;
 }
 
+#ifdef QCA_DP_NBUF_FAST_RECYCLE_CHECK
+static inline void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
+					    qdf_nbuf_t nbuf)
+{
+	/*
+	 * In case of fast recycle TX driver can avoid invalidate
+	 * of buffer in case of SFE forward. We need to invalidate
+	 * the TLV headers after writing to this location
+	 */
+	qdf_nbuf_dma_inv_range_no_dsb((void *)nbuf->data,
+				      (void *)(nbuf->data +
+					       soc->rx_pkt_tlv_size +
+					       L3_HEADER_PAD));
+}
+#else
+static inline void dp_rx_err_tlv_invalidate(struct dp_soc *soc,
+					    qdf_nbuf_t nbuf)
+{
+}
+#endif
+
 uint32_t
 dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
 		      hal_ring_handle_t hal_ring_hdl, uint32_t quota)
@@ -2897,6 +2918,7 @@ dp_rx_wbm_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
 					    (uint8_t *)&wbm_err_info,
 					    sizeof(wbm_err_info));
 
+		dp_rx_err_tlv_invalidate(soc, nbuf);
 		rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
 
 		if (qdf_nbuf_is_rx_chfrag_cont(nbuf) || process_sg_buf) {