|
@@ -24,9 +24,11 @@
|
|
|
#include <cdp_txrx_peer_ops.h>
|
|
|
#include <cds_sched.h>
|
|
|
|
|
|
-/* Timeout in ms to wait for a DP rx thread */
|
|
|
#ifdef HAL_CONFIG_SLUB_DEBUG_ON
|
|
|
+/* Timeout in ms to wait for a DP rx thread */
|
|
|
#define DP_RX_THREAD_WAIT_TIMEOUT 4000
|
|
|
+/* number of rx pkts that thread should yield */
|
|
|
+#define DP_RX_THREAD_YIELD_PKT_CNT 20000
|
|
|
#else
|
|
|
#define DP_RX_THREAD_WAIT_TIMEOUT 2000
|
|
|
#endif
|
|
@@ -390,6 +392,26 @@ static qdf_nbuf_t dp_rx_tm_thread_dequeue(struct dp_rx_thread *rx_thread)
|
|
|
return head;
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_SLUB_DEBUG_ON
|
|
|
+/**
|
|
|
+ * dp_rx_thread_should_yield() - check whether rx loop should yield
|
|
|
+ * @iter - iteration of packets recevied
|
|
|
+ *
|
|
|
+ * Returns: should yield or not
|
|
|
+ */
|
|
|
+static inline bool dp_rx_thread_should_yield(uint32_t iter)
|
|
|
+{
|
|
|
+ if (iter >= DP_RX_THREAD_YIELD_PKT_CNT)
|
|
|
+ return true;
|
|
|
+ return false;
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline bool dp_rx_thread_should_yield(uint32_t iter)
|
|
|
+{
|
|
|
+ return false;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
/**
|
|
|
* dp_rx_thread_process_nbufq() - process nbuf queue of a thread
|
|
|
* @rx_thread - rx_thread whose nbuf queue needs to be processed
|
|
@@ -404,6 +426,7 @@ static int dp_rx_thread_process_nbufq(struct dp_rx_thread *rx_thread)
|
|
|
ol_osif_vdev_handle osif_vdev;
|
|
|
ol_txrx_soc_handle soc;
|
|
|
uint32_t num_list_elements = 0;
|
|
|
+ uint32_t iterates = 0;
|
|
|
|
|
|
struct dp_txrx_handle_cmn *txrx_handle_cmn;
|
|
|
|
|
@@ -427,6 +450,7 @@ static int dp_rx_thread_process_nbufq(struct dp_rx_thread *rx_thread)
|
|
|
/* count aggregated RX frame into stats */
|
|
|
num_list_elements += qdf_nbuf_get_gso_segs(nbuf_list);
|
|
|
rx_thread->stats.nbuf_dequeued += num_list_elements;
|
|
|
+ iterates += num_list_elements;
|
|
|
|
|
|
vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf_list);
|
|
|
cdp_get_os_rx_handles_from_vdev(soc, vdev_id, &stack_fn,
|
|
@@ -442,6 +466,10 @@ static int dp_rx_thread_process_nbufq(struct dp_rx_thread *rx_thread)
|
|
|
rx_thread->stats.nbuf_sent_to_stack +=
|
|
|
num_list_elements;
|
|
|
}
|
|
|
+ if (unlikely(dp_rx_thread_should_yield(iterates))) {
|
|
|
+ rx_thread->stats.rx_nbufq_loop_yield++;
|
|
|
+ break;
|
|
|
+ }
|
|
|
nbuf_list = dp_rx_tm_thread_dequeue(rx_thread);
|
|
|
}
|
|
|
|