Browse Source

qcacmn: Cache rx frames during dp peer register

Currently frames will be dropped if it arrives before
peer is registered with data path module by control path.
So cache rx frames and flush them to the upper layer when
peer is registered at data path.

Change-Id: I086122fffdcf33e25ba57774ef944550cdd2fa20
CRs-Fixed: 2329308
Sravan Kumar Kairam 6 years ago
parent
commit
ebd627e195
5 changed files with 260 additions and 16 deletions
  1. 36 0
      dp/wifi3.0/dp_main.c
  2. 5 0
      dp/wifi3.0/dp_peer.c
  3. 170 13
      dp/wifi3.0/dp_rx.c
  4. 24 0
      dp/wifi3.0/dp_rx.h
  5. 25 3
      dp/wifi3.0/dp_types.h

+ 36 - 0
dp/wifi3.0/dp_main.c

@@ -153,6 +153,10 @@ bool is_dp_verbose_debug_enabled;
 				      DP_PPDU_TXLITE_STATS_BITMASK_CFG)
 
 #define RNG_ERR		"SRNG setup failed for"
+
+/* Threshold for peer's cached buf queue beyond which frames are dropped */
+#define DP_RX_CACHED_BUFQ_THRESH 64
+
 /**
  * default_dscp_tid_map - Default DSCP-TID mapping
  *
@@ -4862,6 +4866,19 @@ static inline void dp_peer_ast_handle_roam_del(struct dp_soc *soc,
 }
 #endif
 
+#ifdef PEER_CACHE_RX_PKTS
+static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
+{
+	qdf_spinlock_create(&peer->bufq_info.bufq_lock);
+	peer->bufq_info.thresh = DP_RX_CACHED_BUFQ_THRESH;
+	qdf_list_create(&peer->bufq_info.cached_bufq, DP_RX_CACHED_BUFQ_THRESH);
+}
+#else
+static inline void dp_peer_rx_bufq_resources_init(struct dp_peer *peer)
+{
+}
+#endif
+
 /*
  * dp_peer_create_wifi3() - attach txrx peer
  * @txrx_vdev: Datapath VDEV handle
@@ -4967,6 +4984,8 @@ static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
 
 	qdf_spinlock_create(&peer->peer_info_lock);
 
+	dp_peer_rx_bufq_resources_init(peer);
+
 	qdf_mem_copy(
 		&peer->mac_addr.raw[0], peer_mac_addr, QDF_MAC_ADDR_SIZE);
 
@@ -5012,6 +5031,7 @@ static void *dp_peer_create_wifi3(struct cdp_vdev *vdev_handle,
 	for (i = 0; i < DP_MAX_TIDS; i++)
 		qdf_spinlock_create(&peer->rx_tid[i].tid_lock);
 
+	peer->valid = 1;
 	dp_local_peer_id_alloc(pdev, peer);
 	DP_STATS_INIT(peer);
 
@@ -5671,6 +5691,18 @@ void dp_peer_unref_delete(void *peer_handle)
 	}
 }
 
+#ifdef PEER_CACHE_RX_PKTS
+static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
+{
+	qdf_list_destroy(&peer->bufq_info.cached_bufq);
+	qdf_spinlock_destroy(&peer->bufq_info.bufq_lock);
+}
+#else
+static inline void dp_peer_rx_bufq_resources_deinit(struct dp_peer *peer)
+{
+}
+#endif
+
 /*
  * dp_peer_detach_wifi3() – Detach txrx peer
  * @peer_handle: Datapath peer handle
@@ -5696,12 +5728,16 @@ static void dp_peer_delete_wifi3(void *peer_handle, uint32_t bitmap)
 	      !peer->bss_peer))
 		peer->ctrl_peer = NULL;
 
+	peer->valid = 0;
+
 	QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
 		FL("peer %pK (%pM)"),  peer, peer->mac_addr.raw);
 
 	dp_local_peer_id_free(peer->vdev->pdev, peer);
 	qdf_spinlock_destroy(&peer->peer_info_lock);
 
+	dp_peer_rx_bufq_resources_deinit(peer);
+
 	/*
 	 * Remove the reference added during peer_attach.
 	 * The peer will still be left allocated until the

+ 5 - 0
dp/wifi3.0/dp_peer.c

@@ -24,6 +24,7 @@
 #include "dp_internal.h"
 #include "dp_peer.h"
 #include "dp_rx_defrag.h"
+#include "dp_rx.h"
 #include <hal_api.h>
 #include <hal_reo.h>
 #ifdef CONFIG_MCL
@@ -2760,6 +2761,8 @@ QDF_STATUS dp_register_peer(struct cdp_pdev *pdev_handle,
 	peer->state = OL_TXRX_PEER_STATE_CONN;
 	qdf_spin_unlock_bh(&peer->peer_info_lock);
 
+	dp_rx_flush_rx_cached(peer, false);
+
 	return QDF_STATUS_SUCCESS;
 }
 
@@ -2786,6 +2789,8 @@ QDF_STATUS dp_clear_peer(struct cdp_pdev *pdev_handle, uint8_t local_id)
 	peer->state = OL_TXRX_PEER_STATE_DISC;
 	qdf_spin_unlock_bh(&peer->peer_info_lock);
 
+	dp_rx_flush_rx_cached(peer, true);
+
 	return QDF_STATUS_SUCCESS;
 }
 

+ 170 - 13
dp/wifi3.0/dp_rx.c

@@ -1144,28 +1144,186 @@ void dp_rx_compute_delay(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
 	vdev->prev_rx_deliver_tstamp = current_ts;
 }
 
+/**
+ * dp_rx_drop_nbuf_list() - drop an nbuf list
+ * @pdev: dp pdev reference
+ * @buf_list: buffer list to be dropepd
+ *
+ * Return: int (number of bufs dropped)
+ */
+static inline int dp_rx_drop_nbuf_list(struct dp_pdev *pdev,
+				       qdf_nbuf_t buf_list)
+{
+	struct cdp_tid_rx_stats *stats = NULL;
+	uint8_t tid = 0;
+	int num_dropped = 0;
+	qdf_nbuf_t buf, next_buf;
+
+	buf = buf_list;
+	while (buf) {
+		next_buf = qdf_nbuf_queue_next(buf);
+		tid = qdf_nbuf_get_priority(buf);
+		stats = &pdev->stats.tid_stats.tid_rx_stats[tid];
+		stats->fail_cnt[INVALID_PEER_VDEV]++;
+		stats->delivered_to_stack--;
+		qdf_nbuf_free(buf);
+		buf = next_buf;
+		num_dropped++;
+	}
+
+	return num_dropped;
+}
+
+#ifdef PEER_CACHE_RX_PKTS
+/**
+ * dp_rx_flush_rx_cached() - flush cached rx frames
+ * @peer: peer
+ * @drop: flag to drop frames or forward to net stack
+ *
+ * Return: None
+ */
+void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
+{
+	struct dp_peer_cached_bufq *bufqi;
+	struct dp_rx_cached_buf *cache_buf = NULL;
+	ol_txrx_rx_fp data_rx = NULL;
+	int num_buff_elem;
+	QDF_STATUS status;
+
+	if (qdf_atomic_inc_return(&peer->flush_in_progress) > 1) {
+		qdf_atomic_dec(&peer->flush_in_progress);
+		return;
+	}
+
+	qdf_spin_lock_bh(&peer->peer_info_lock);
+	if (peer->state >= OL_TXRX_PEER_STATE_CONN && peer->vdev->osif_rx)
+		data_rx = peer->vdev->osif_rx;
+	else
+		drop = true;
+	qdf_spin_unlock_bh(&peer->peer_info_lock);
+
+	bufqi = &peer->bufq_info;
+
+	qdf_spin_lock_bh(&bufqi->bufq_lock);
+	if (qdf_list_empty(&bufqi->cached_bufq)) {
+		qdf_spin_unlock_bh(&bufqi->bufq_lock);
+		return;
+	}
+	qdf_list_remove_front(&bufqi->cached_bufq,
+			      (qdf_list_node_t **)&cache_buf);
+	while (cache_buf) {
+		num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(
+								cache_buf->buf);
+		bufqi->entries -= num_buff_elem;
+		qdf_spin_unlock_bh(&bufqi->bufq_lock);
+		if (drop) {
+			bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
+							      cache_buf->buf);
+		} else {
+			/* Flush the cached frames to OSIF DEV */
+			status = data_rx(peer->vdev->osif_vdev, cache_buf->buf);
+			if (status != QDF_STATUS_SUCCESS)
+				bufqi->dropped = dp_rx_drop_nbuf_list(
+							peer->vdev->pdev,
+							cache_buf->buf);
+		}
+		qdf_mem_free(cache_buf);
+		cache_buf = NULL;
+		qdf_spin_lock_bh(&bufqi->bufq_lock);
+		qdf_list_remove_front(&bufqi->cached_bufq,
+				      (qdf_list_node_t **)&cache_buf);
+	}
+	qdf_spin_unlock_bh(&bufqi->bufq_lock);
+	qdf_atomic_dec(&peer->flush_in_progress);
+}
+
+/**
+ * dp_rx_enqueue_rx() - cache rx frames
+ * @peer: peer
+ * @rx_buf_list: cache buffer list
+ *
+ * Return: None
+ */
+static QDF_STATUS
+dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
+{
+	struct dp_rx_cached_buf *cache_buf;
+	struct dp_peer_cached_bufq *bufqi = &peer->bufq_info;
+	int num_buff_elem;
+
+	QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_TXRX, "bufq->curr %d bufq->drops %d",
+			   bufqi->entries, bufqi->dropped);
+
+	if (!peer->valid) {
+		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
+						      rx_buf_list);
+		return QDF_STATUS_E_INVAL;
+	}
+
+	qdf_spin_lock_bh(&bufqi->bufq_lock);
+	if (bufqi->entries >= bufqi->thresh) {
+		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
+						      rx_buf_list);
+		qdf_spin_unlock_bh(&bufqi->bufq_lock);
+		return QDF_STATUS_E_RESOURCES;
+	}
+	qdf_spin_unlock_bh(&bufqi->bufq_lock);
+
+	num_buff_elem = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(rx_buf_list);
+
+	cache_buf = qdf_mem_malloc_atomic(sizeof(*cache_buf));
+	if (!cache_buf) {
+		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+			  "Failed to allocate buf to cache rx frames");
+		bufqi->dropped = dp_rx_drop_nbuf_list(peer->vdev->pdev,
+						      rx_buf_list);
+		return QDF_STATUS_E_NOMEM;
+	}
+
+	cache_buf->buf = rx_buf_list;
+
+	qdf_spin_lock_bh(&bufqi->bufq_lock);
+	qdf_list_insert_back(&bufqi->cached_bufq,
+			     &cache_buf->node);
+	bufqi->entries += num_buff_elem;
+	qdf_spin_unlock_bh(&bufqi->bufq_lock);
+
+	return QDF_STATUS_SUCCESS;
+}
+
+static inline
+bool dp_rx_is_peer_cache_bufq_supported(void)
+{
+	return true;
+}
+#else
+static inline
+bool dp_rx_is_peer_cache_bufq_supported(void)
+{
+	return false;
+}
+
+static inline QDF_STATUS
+dp_rx_enqueue_rx(struct dp_peer *peer, qdf_nbuf_t rx_buf_list)
+{
+	return QDF_STATUS_SUCCESS;
+}
+#endif
+
 static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev,
 						struct dp_peer *peer,
 						qdf_nbuf_t nbuf_head,
 						qdf_nbuf_t nbuf_tail)
 {
-	struct cdp_tid_rx_stats *stats = NULL;
-	uint8_t tid = 0;
 	/*
 	 * highly unlikely to have a vdev without a registered rx
 	 * callback function. if so let us free the nbuf_list.
 	 */
 	if (qdf_unlikely(!vdev->osif_rx)) {
-		qdf_nbuf_t nbuf;
-		do {
-			nbuf = nbuf_head;
-			nbuf_head = nbuf_head->next;
-			tid = qdf_nbuf_get_priority(nbuf);
-			stats = &vdev->pdev->stats.tid_stats.tid_rx_stats[tid];
-			stats->fail_cnt[INVALID_PEER_VDEV]++;
-			stats->delivered_to_stack--;
-			qdf_nbuf_free(nbuf);
-		} while (nbuf_head);
+		if (dp_rx_is_peer_cache_bufq_supported())
+			dp_rx_enqueue_rx(peer, nbuf_head);
+		else
+			dp_rx_drop_nbuf_list(vdev->pdev, nbuf_head);
 
 		return;
 	}
@@ -1177,7 +1335,6 @@ static inline void dp_rx_deliver_to_stack(struct dp_vdev *vdev,
 	}
 
 	vdev->osif_rx(vdev->osif_vdev, nbuf_head);
-
 }
 
 /**

+ 24 - 0
dp/wifi3.0/dp_rx.h

@@ -128,6 +128,16 @@ struct dp_rx_desc {
  */
 #define DP_SKIP_VLAN		8
 
+/**
+ * struct dp_rx_cached_buf - rx cached buffer
+ * @list: linked list node
+ * @buf: skb buffer
+ */
+struct dp_rx_cached_buf {
+	qdf_list_node_t node;
+	qdf_nbuf_t buf;
+};
+
 /*
  *dp_rx_xor_block() - xor block of data
  *@b: destination data block
@@ -1225,4 +1235,18 @@ void dp_rx_process_rxdma_err(struct dp_soc *soc, qdf_nbuf_t nbuf,
 			     uint8_t *rx_tlv_hdr, struct dp_peer *peer,
 			     uint8_t err_code);
 
+#ifdef PEER_CACHE_RX_PKTS
+/**
+ * dp_rx_flush_rx_cached() - flush cached rx frames
+ * @peer: peer
+ * @drop: set flag to drop frames
+ *
+ * Return: None
+ */
+void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop);
+#else
+static inline void dp_rx_flush_rx_cached(struct dp_peer *peer, bool drop)
+{
+}
+#endif
 #endif /* _DP_RX_H */

+ 25 - 3
dp/wifi3.0/dp_types.h

@@ -1641,6 +1641,22 @@ typedef struct {
 } dp_ecm_policy;
 #endif
 
+/*
+ * struct dp_peer_cached_bufq - cached_bufq to enqueue rx packets
+ * @cached_bufq: nbuff list to enqueue rx packets
+ * @bufq_lock: spinlock for nbuff list access
+ * @thres: maximum threshold for number of rx buff to enqueue
+ * @entries: number of entries
+ * @dropped: number of packets dropped
+ */
+struct dp_peer_cached_bufq {
+	qdf_list_t cached_bufq;
+	qdf_spinlock_t bufq_lock;
+	uint32_t thresh;
+	uint32_t entries;
+	uint32_t dropped;
+};
+
 /* Peer structure for data path state */
 struct dp_peer {
 	/* VDEV to which this peer is associated */
@@ -1692,9 +1708,10 @@ struct dp_peer {
 
 	/* NAWDS Flag and Bss Peer bit */
 	uint8_t nawds_enabled:1,
-				bss_peer:1,
-				wapi:1,
-				wds_enabled:1;
+		bss_peer:1,
+		wapi:1,
+		wds_enabled:1,
+		valid:1;
 
 	/* MCL specific peer local id */
 	uint16_t local_id;
@@ -1736,6 +1753,11 @@ struct dp_peer {
 	struct cdp_peer_rate_stats_ctx *wlanstats_ctx;
 	/* average sojourn time */
 	qdf_ewma_tx_lag avg_sojourn_msdu[CDP_DATA_TID_MAX];
+
+#ifdef PEER_CACHE_RX_PKTS
+	qdf_atomic_t flush_in_progress;
+	struct dp_peer_cached_bufq bufq_info;
+#endif
 };
 
 #ifdef CONFIG_WIN