Browse Source

qcacmn: RX nbuf allocation from phy addr 0x50000000 and above for x86

We should always ensure memory allocation for rx nbufs
should always happen from phy address 0x50000000 and above
else drop the nbuf and try for a new nbuf.

Change-Id: I675a0f1289e04f720949ad9b6917bbb733270a78
CRs-Fixed: 2003174
Tallapragada Kalyan 8 years ago
parent
commit
4e3341aa2a
2 changed files with 94 additions and 13 deletions
  1. 19 13
      dp/wifi3.0/dp_rx.c
  2. 75 0
      dp/wifi3.0/dp_rx.h

+ 19 - 13
dp/wifi3.0/dp_rx.c

@@ -24,15 +24,6 @@
 #include "qdf_nbuf.h"
 #include <ieee80211.h>
 
-#ifdef RXDMA_OPTIMIZATION
-#define RX_BUFFER_ALIGNMENT	128
-#else /* RXDMA_OPTIMIZATION */
-#define RX_BUFFER_ALIGNMENT	4
-#endif /* RXDMA_OPTIMIZATION */
-
-#define RX_BUFFER_SIZE		2048
-#define RX_BUFFER_RESERVATION	0
-
 /*
  * dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
  *			       called during dp rx initialization
@@ -64,6 +55,7 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
 	union dp_rx_desc_list_elem_t *next;
 	struct dp_srng *dp_rxdma_srng = &dp_pdev->rx_refill_buf_ring;
 	void *rxdma_srng = dp_rxdma_srng->hal_srng;
+	int32_t ret;
 
 	if (!rxdma_srng) {
 		QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
@@ -108,10 +100,9 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
 		num_req_buffers = num_entries_avail;
 	}
 
-	for (count = 0; count < num_req_buffers; count++) {
-		rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
-							 rxdma_srng);
+	count = 0;
 
+	while (count < num_req_buffers) {
 		rx_netbuf = qdf_nbuf_alloc(dp_pdev->osif_pdev,
 					RX_BUFFER_SIZE,
 					RX_BUFFER_RESERVATION,
@@ -119,13 +110,28 @@ QDF_STATUS dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
 					FALSE);
 
 		if (rx_netbuf == NULL)
-			break;
+			continue;
 
 		qdf_nbuf_map_single(dp_soc->osdev, rx_netbuf,
 				    QDF_DMA_BIDIRECTIONAL);
 
 		paddr = qdf_nbuf_get_frag_paddr(rx_netbuf, 0);
 
+		/*
+		 * check if the physical address of nbuf->data is
+		 * less then 0x50000000 then free the nbuf and try
+		 * allocating new nbuf. We can try for 100 times.
+		 * this is a temp WAR till we fix it properly.
+		 */
+		ret = check_x86_paddr(dp_soc, &rx_netbuf, &paddr, dp_pdev);
+		if (ret == QDF_STATUS_E_FAILURE)
+			break;
+
+		count++;
+
+		rxdma_ring_entry = hal_srng_src_get_next(dp_soc->hal_soc,
+								rxdma_srng);
+
 		next = (*desc_list)->next;
 
 		(*desc_list)->rx_desc.nbuf = rx_netbuf;

+ 75 - 0
dp/wifi3.0/dp_rx.h

@@ -22,6 +22,15 @@
 #include "hal_rx.h"
 #include "dp_tx.h"
 
+#ifdef RXDMA_OPTIMIZATION
+#define RX_BUFFER_ALIGNMENT     128
+#else /* RXDMA_OPTIMIZATION */
+#define RX_BUFFER_ALIGNMENT     4
+#endif /* RXDMA_OPTIMIZATION */
+
+#define RX_BUFFER_SIZE          2048
+#define RX_BUFFER_RESERVATION   0
+
 #define DP_PEER_METADATA_PEER_ID_MASK	0x0000ffff
 #define DP_PEER_METADATA_PEER_ID_SHIFT	0
 #define DP_PEER_METADATA_VDEV_ID_MASK	0x00070000
@@ -163,4 +172,70 @@ do {                                                \
 	qdf_nbuf_set_next((tail), NULL);            \
 } while (0)
 
+#ifndef BUILD_X86
+static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
+				qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
+{
+	return QDF_STATUS_SUCCESS;
+}
+#else
+#define MAX_RETRY 100
+static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
+				qdf_dma_addr_t *paddr, struct dp_pdev *pdev)
+{
+	uint32_t nbuf_retry = 0;
+	int32_t ret;
+	const uint32_t x86_phy_addr = 0x50000000;
+	/*
+	 * in M2M emulation platforms (x86) the memory below 0x50000000
+	 * is reserved for target use, so any memory allocated in this
+	 * region should not be used by host
+	 */
+	do {
+		if (qdf_likely(*paddr > x86_phy_addr))
+			return QDF_STATUS_SUCCESS;
+		else {
+			QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
+				"phy addr %p exceded 0x50000000 trying again\n",
+				paddr);
+
+			nbuf_retry++;
+			if ((*rx_netbuf)) {
+				qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
+							QDF_DMA_BIDIRECTIONAL);
+				qdf_nbuf_free(*rx_netbuf);
+			}
+
+			*rx_netbuf = qdf_nbuf_alloc(pdev->osif_pdev,
+							RX_BUFFER_SIZE,
+							RX_BUFFER_RESERVATION,
+							RX_BUFFER_ALIGNMENT,
+							FALSE);
+
+			if (qdf_unlikely(!(*rx_netbuf)))
+				return QDF_STATUS_E_FAILURE;
+
+			ret = qdf_nbuf_map_single(dp_soc->osdev, *rx_netbuf,
+							QDF_DMA_BIDIRECTIONAL);
+
+			if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
+				qdf_nbuf_free(*rx_netbuf);
+				*rx_netbuf = NULL;
+				continue;
+			}
+
+			*paddr = qdf_nbuf_get_frag_paddr(*rx_netbuf, 0);
+		}
+	} while (nbuf_retry < MAX_RETRY);
+
+	if ((*rx_netbuf)) {
+		qdf_nbuf_unmap_single(dp_soc->osdev, *rx_netbuf,
+					QDF_DMA_BIDIRECTIONAL);
+		qdf_nbuf_free(*rx_netbuf);
+	}
+
+	return QDF_STATUS_E_FAILURE;
+}
+#endif
+
 #endif /* _DP_RX_H */