qcacld-3.0: Support for DP RX Threads
Add support for DP RX Threads as a part of the FR. Multiple RX threads can be enabled from the ini. The code is added in a new DP module outside of the cmn project. Change-Id: Ief6ee955f13c5e527986307371b8e45677cb9700 CRs-Fixed: 2256446
Bu işleme şunda yer alıyor:

işlemeyi yapan:
nshrivas

ebeveyn
cd430b6ebb
işleme
7032200a77
17
Kbuild
17
Kbuild
@@ -157,6 +157,10 @@ ifeq ($(CONFIG_LITHIUM), y)
|
||||
HDD_OBJS += $(HDD_SRC_DIR)/wlan_hdd_rx_monitor.o
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_LITHIUM), y)
|
||||
CONFIG_WLAN_FEATURE_DP_RX_THREADS := y
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_WLAN_NUD_TRACKING), y)
|
||||
HDD_OBJS += $(HDD_SRC_DIR)/wlan_hdd_nud_tracking.o
|
||||
endif
|
||||
@@ -1077,6 +1081,11 @@ ifeq ($(CONFIG_QCA_SUPPORT_TX_THROTTLE), y)
|
||||
TXRX_OBJS += $(TXRX_DIR)/ol_tx_throttle.o
|
||||
endif
|
||||
|
||||
############ TXRX 3.0 ############
|
||||
TXRX3.0_DIR := core/dp/txrx3.0
|
||||
TXRX3.0_INC := -I$(WLAN_ROOT)/$(TXRX3.0_DIR)
|
||||
TXRX3.0_OBJS := $(TXRX3.0_DIR)/dp_txrx.o \
|
||||
$(TXRX3.0_DIR)/dp_rx_thread.o
|
||||
ifeq ($(CONFIG_LITHIUM), y)
|
||||
############ DP 3.0 ############
|
||||
DP_INC := -I$(WLAN_COMMON_INC)/dp/inc \
|
||||
@@ -1553,7 +1562,8 @@ INCS += $(WMA_INC) \
|
||||
$(REGULATORY_INC) \
|
||||
$(HTC_INC) \
|
||||
$(DFS_INC) \
|
||||
$(WCFG_INC)
|
||||
$(WCFG_INC) \
|
||||
$(TXRX3.0_INC)
|
||||
|
||||
INCS += $(HIF_INC) \
|
||||
$(BMI_INC)
|
||||
@@ -1721,6 +1731,10 @@ ifeq ($(CONFIG_LITHIUM), y)
|
||||
OBJS += $(DP_OBJS)
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_WLAN_FEATURE_DP_RX_THREADS), y)
|
||||
OBJS += $(TXRX3.0_OBJS)
|
||||
endif
|
||||
|
||||
ccflags-y += $(INCS)
|
||||
|
||||
cppflags-y += -DANI_OS_TYPE_ANDROID=6 \
|
||||
@@ -1773,6 +1787,7 @@ cppflags-$(CONFIG_FEATURE_FW_LOG_PARSING) += -DFEATURE_FW_LOG_PARSING
|
||||
cppflags-$(CONFIG_PLD_SDIO_CNSS_FLAG) += -DCONFIG_PLD_SDIO_CNSS
|
||||
cppflags-$(CONFIG_PLD_PCIE_CNSS_FLAG) += -DCONFIG_PLD_PCIE_CNSS
|
||||
cppflags-$(CONFIG_PLD_PCIE_INIT_FLAG) += -DCONFIG_PLD_PCIE_INIT
|
||||
cppflags-$(CONFIG_WLAN_FEATURE_DP_RX_THREADS) += -DFEATURE_WLAN_DP_RX_THREADS
|
||||
|
||||
#Enable NL80211 test mode
|
||||
cppflags-$(CONFIG_NL80211_TESTMODE) += -DWLAN_NL80211_TESTMODE
|
||||
|
@@ -91,6 +91,8 @@ enum active_apf_mode {
|
||||
* @max_scan: Maximum number of parallel scans
|
||||
* @tx_flow_stop_queue_th: Threshold to stop queue in percentage
|
||||
* @tx_flow_start_queue_offset: Start queue offset in percentage
|
||||
* @num_dp_rx_threads: number of dp rx threads to be configured
|
||||
* @enable_dp_rx_threads: enable dp rx threads
|
||||
* @is_lpass_enabled: Indicate whether LPASS is enabled or not
|
||||
* @bool apf_packet_filter_enable; Indicate apf filter enabled or not
|
||||
* @tx_chain_mask_cck: Tx chain mask enabled or not
|
||||
@@ -146,6 +148,8 @@ struct cds_config_info {
|
||||
uint32_t tx_flow_stop_queue_th;
|
||||
uint32_t tx_flow_start_queue_offset;
|
||||
#endif
|
||||
uint8_t num_dp_rx_threads;
|
||||
uint8_t enable_dp_rx_threads;
|
||||
#ifdef WLAN_FEATURE_LPSS
|
||||
bool is_lpass_enabled;
|
||||
#endif
|
||||
|
@@ -58,7 +58,7 @@
|
||||
#include "target_type.h"
|
||||
#include "wlan_ocb_ucfg_api.h"
|
||||
#include "wlan_ipa_ucfg_api.h"
|
||||
|
||||
#include "dp_txrx.h"
|
||||
#ifdef ENABLE_SMMU_S1_TRANSLATION
|
||||
#include "pld_common.h"
|
||||
#include <asm/dma-iommu.h>
|
||||
@@ -728,6 +728,9 @@ err_wma_complete_event:
|
||||
|
||||
QDF_STATUS cds_dp_open(struct wlan_objmgr_psoc *psoc)
|
||||
{
|
||||
QDF_STATUS qdf_status;
|
||||
struct dp_txrx_config dp_config;
|
||||
|
||||
if (cdp_txrx_intr_attach(gp_cds_context->dp_soc)
|
||||
!= QDF_STATUS_SUCCESS) {
|
||||
cds_alert("Failed to attach interrupts");
|
||||
@@ -746,6 +749,16 @@ QDF_STATUS cds_dp_open(struct wlan_objmgr_psoc *psoc)
|
||||
goto intr_close;
|
||||
}
|
||||
|
||||
dp_config.num_rx_threads = gp_cds_context->cds_cfg->num_dp_rx_threads;
|
||||
dp_config.enable_rx_threads =
|
||||
gp_cds_context->cds_cfg->enable_dp_rx_threads;
|
||||
qdf_status = dp_txrx_init(cds_get_context(QDF_MODULE_ID_SOC),
|
||||
cds_get_context(QDF_MODULE_ID_TXRX),
|
||||
&dp_config);
|
||||
|
||||
if (!QDF_IS_STATUS_SUCCESS(qdf_status))
|
||||
goto pdev_detach;
|
||||
|
||||
pmo_ucfg_psoc_set_txrx_handle(psoc, gp_cds_context->pdev_txrx_ctx);
|
||||
ucfg_ocb_set_txrx_handle(psoc, gp_cds_context->pdev_txrx_ctx);
|
||||
|
||||
@@ -753,6 +766,9 @@ QDF_STATUS cds_dp_open(struct wlan_objmgr_psoc *psoc)
|
||||
|
||||
return 0;
|
||||
|
||||
pdev_detach:
|
||||
cdp_pdev_detach(gp_cds_context->dp_soc,
|
||||
cds_get_context(QDF_MODULE_ID_TXRX), false);
|
||||
intr_close:
|
||||
cdp_txrx_intr_detach(gp_cds_context->dp_soc);
|
||||
close:
|
||||
@@ -1176,10 +1192,13 @@ QDF_STATUS cds_dp_close(struct wlan_objmgr_psoc *psoc)
|
||||
void *ctx;
|
||||
|
||||
cdp_txrx_intr_detach(gp_cds_context->dp_soc);
|
||||
|
||||
ctx = cds_get_context(QDF_MODULE_ID_TXRX);
|
||||
|
||||
dp_txrx_deinit(cds_get_context(QDF_MODULE_ID_SOC));
|
||||
|
||||
cdp_pdev_detach(cds_get_context(QDF_MODULE_ID_SOC),
|
||||
(struct cdp_pdev *)ctx, 1);
|
||||
|
||||
cds_set_context(QDF_MODULE_ID_TXRX, NULL);
|
||||
pmo_ucfg_psoc_set_txrx_handle(psoc, NULL);
|
||||
|
||||
|
639
core/dp/txrx3.0/dp_rx_thread.c
Normal dosya
639
core/dp/txrx3.0/dp_rx_thread.c
Normal dosya
@@ -0,0 +1,639 @@
|
||||
/*
|
||||
* Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <dp_txrx.h>
|
||||
#include <cdp_txrx_cmn_struct.h>
|
||||
#include <cdp_txrx_peer_ops.h>
|
||||
#include <cds_sched.h>
|
||||
|
||||
/* Timeout in ms to wait for a DP rx thread */
|
||||
#define DP_RX_THREAD_WAIT_TIMEOUT 200
|
||||
|
||||
#define DP_RX_TM_DEBUG 0
|
||||
#if DP_RX_TM_DEBUG
|
||||
/**
|
||||
* dp_rx_tm_walk_skb_list() - Walk skb list and print members
|
||||
* @nbuf_list - nbuf list to print
|
||||
*
|
||||
* Returns: None
|
||||
*/
|
||||
static inline void dp_rx_tm_walk_skb_list(qdf_nbuf_t nbuf_list)
|
||||
{
|
||||
qdf_nbuf_t nbuf;
|
||||
int i = 0;
|
||||
|
||||
nbuf = nbuf_list;
|
||||
while (nbuf) {
|
||||
dp_debug("%d nbuf:%pk nbuf->next:%pK nbuf->data:%pk ", i,
|
||||
nbuf, qdf_nbuf_next(nbuf), qdf_nbuf_data(nbuf));
|
||||
nbuf = qdf_nbuf_next(nbuf);
|
||||
i++;
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void dp_rx_tm_walk_skb_list(qdf_nbuf_t nbuf_list)
|
||||
{ }
|
||||
#endif /* DP_RX_TM_DEBUG */
|
||||
|
||||
/**
|
||||
* dp_rx_tm_thread_dump_stats() - display stats for a rx_thread
|
||||
* @rx_thread - rx_thread pointer for which the stats need to be
|
||||
* displayed
|
||||
*
|
||||
* Returns: None
|
||||
*/
|
||||
static void dp_rx_tm_thread_dump_stats(struct dp_rx_thread *rx_thread)
|
||||
{
|
||||
uint8_t reo_ring_num;
|
||||
uint32_t off = 0;
|
||||
char nbuf_queued_string[100];
|
||||
uint32_t total_queued = 0;
|
||||
uint32_t temp = 0;
|
||||
|
||||
qdf_mem_set(nbuf_queued_string, 0, sizeof(nbuf_queued_string));
|
||||
|
||||
for (reo_ring_num = 0; reo_ring_num < DP_RX_TM_MAX_REO_RINGS;
|
||||
reo_ring_num++) {
|
||||
temp = rx_thread->stats.nbuf_queued[reo_ring_num];
|
||||
if (!temp)
|
||||
continue;
|
||||
total_queued += temp;
|
||||
if (off >= sizeof(nbuf_queued_string))
|
||||
continue;
|
||||
off += qdf_scnprintf(&nbuf_queued_string[off],
|
||||
sizeof(nbuf_queued_string) - off,
|
||||
"reo[%u]:%u ", reo_ring_num, temp);
|
||||
}
|
||||
dp_info("thread:%u - qlen:%u queued:(total:%u %s) dequeued:%u stack:%u max_len:%u invalid(peer:%u vdev:%u others:%u)",
|
||||
rx_thread->id,
|
||||
qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue),
|
||||
total_queued,
|
||||
nbuf_queued_string,
|
||||
rx_thread->stats.nbuf_dequeued,
|
||||
rx_thread->stats.nbuf_sent_to_stack,
|
||||
rx_thread->stats.nbufq_max_len,
|
||||
rx_thread->stats.dropped_invalid_peer,
|
||||
rx_thread->stats.dropped_invalid_vdev,
|
||||
rx_thread->stats.dropped_others);
|
||||
}
|
||||
|
||||
QDF_STATUS dp_rx_tm_dump_stats(struct dp_rx_tm_handle *rx_tm_hdl)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < DP_MAX_RX_THREADS; i++) {
|
||||
if (!rx_tm_hdl->rx_thread[i])
|
||||
continue;
|
||||
dp_rx_tm_thread_dump_stats(rx_tm_hdl->rx_thread[i]);
|
||||
}
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_tm_thread_enqueue() - enqueue nbuf list into rx_thread
|
||||
* @rx_thread - rx_thread in which the nbuf needs to be queued
|
||||
* @nbuf_list - list of packets to be queued into the thread
|
||||
*
|
||||
* Enqueue packet into rx_thread and wake it up. The function
|
||||
* moves the next pointer of the nbuf_list into the ext list of
|
||||
* the first nbuf for storage into the thread. Only the first
|
||||
* nbuf is queued into the thread nbuf queue. The reverse is
|
||||
* done at the time of dequeue.
|
||||
*
|
||||
* Returns: QDF_STATUS_SUCCESS on success or qdf error code on
|
||||
* failure
|
||||
*/
|
||||
static QDF_STATUS dp_rx_tm_thread_enqueue(struct dp_rx_thread *rx_thread,
|
||||
qdf_nbuf_t nbuf_list)
|
||||
{
|
||||
qdf_nbuf_t head_ptr, next_ptr_list;
|
||||
uint32_t temp_qlen;
|
||||
uint32_t num_elements_in_nbuf;
|
||||
struct dp_rx_tm_handle_cmn *tm_handle_cmn;
|
||||
uint8_t reo_ring_num = QDF_NBUF_CB_RX_CTX_ID(nbuf_list);
|
||||
qdf_wait_queue_head_t *wait_q_ptr;
|
||||
|
||||
tm_handle_cmn = rx_thread->rtm_handle_cmn;
|
||||
|
||||
if (!tm_handle_cmn) {
|
||||
dp_alert("tm_handle_cmn is null!");
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
wait_q_ptr = dp_rx_thread_get_wait_queue(tm_handle_cmn);
|
||||
|
||||
if (reo_ring_num >= DP_RX_TM_MAX_REO_RINGS) {
|
||||
dp_alert("incorrect ring %u", reo_ring_num);
|
||||
QDF_BUG(0);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
num_elements_in_nbuf = QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf_list);
|
||||
|
||||
dp_rx_tm_walk_skb_list(nbuf_list);
|
||||
|
||||
head_ptr = nbuf_list;
|
||||
next_ptr_list = head_ptr->next;
|
||||
|
||||
if (next_ptr_list) {
|
||||
/* move ->next pointer to ext list */
|
||||
qdf_nbuf_append_ext_list(head_ptr, next_ptr_list, 0);
|
||||
dp_debug("appended next_ptr_list %pK to nbuf %pK ext list %pK",
|
||||
qdf_nbuf_next(nbuf_list), nbuf_list,
|
||||
qdf_nbuf_get_ext_list(nbuf_list));
|
||||
}
|
||||
qdf_nbuf_set_next(head_ptr, NULL);
|
||||
|
||||
qdf_nbuf_queue_head_enqueue_tail(&rx_thread->nbuf_queue, head_ptr);
|
||||
temp_qlen = qdf_nbuf_queue_head_qlen(&rx_thread->nbuf_queue);
|
||||
|
||||
rx_thread->stats.nbuf_queued[reo_ring_num] += num_elements_in_nbuf;
|
||||
|
||||
if (temp_qlen > rx_thread->stats.nbufq_max_len)
|
||||
rx_thread->stats.nbufq_max_len = temp_qlen;
|
||||
|
||||
qdf_set_bit(RX_POST_EVENT, &rx_thread->event_flag);
|
||||
qdf_wake_up_interruptible(wait_q_ptr);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_tm_thread_dequeue() - dequeue nbuf list from rx_thread
|
||||
* @rx_thread - rx_thread from which the nbuf needs to be dequeued
|
||||
*
|
||||
* Returns: nbuf or nbuf_list dequeued from rx_thread
|
||||
*/
|
||||
static qdf_nbuf_t dp_rx_tm_thread_dequeue(struct dp_rx_thread *rx_thread)
|
||||
{
|
||||
qdf_nbuf_t head, next_ptr_list, nbuf_list;
|
||||
|
||||
head = qdf_nbuf_queue_head_dequeue(&rx_thread->nbuf_queue);
|
||||
nbuf_list = head;
|
||||
if (head) {
|
||||
/* move ext list to ->next pointer */
|
||||
next_ptr_list = qdf_nbuf_get_ext_list(head);
|
||||
qdf_nbuf_append_ext_list(head, NULL, 0);
|
||||
qdf_nbuf_set_next(nbuf_list, next_ptr_list);
|
||||
dp_rx_tm_walk_skb_list(nbuf_list);
|
||||
}
|
||||
return nbuf_list;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_thread_process_nbufq() - process nbuf queue of a thread
|
||||
* @rx_thread - rx_thread whose nbuf queue needs to be processed
|
||||
*
|
||||
* Returns: 0 on success, error code on failure
|
||||
*/
|
||||
static int dp_rx_thread_process_nbufq(struct dp_rx_thread *rx_thread)
|
||||
{
|
||||
qdf_nbuf_t nbuf_list;
|
||||
uint32_t peer_local_id;
|
||||
void *peer;
|
||||
struct cdp_vdev *vdev;
|
||||
ol_txrx_rx_fp stack_fn;
|
||||
ol_osif_vdev_handle osif_vdev;
|
||||
ol_txrx_soc_handle soc;
|
||||
uint32_t num_list_elements = 0;
|
||||
struct cdp_pdev *pdev;
|
||||
|
||||
struct dp_txrx_handle_cmn *txrx_handle_cmn;
|
||||
|
||||
txrx_handle_cmn =
|
||||
dp_rx_thread_get_txrx_handle(rx_thread->rtm_handle_cmn);
|
||||
|
||||
soc = dp_txrx_get_soc_from_ext_handle(txrx_handle_cmn);
|
||||
pdev = dp_txrx_get_pdev_from_ext_handle(txrx_handle_cmn);
|
||||
|
||||
if (!soc || !pdev) {
|
||||
dp_err("invalid soc or pdev!");
|
||||
QDF_BUG(0);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
nbuf_list = dp_rx_tm_thread_dequeue(rx_thread);
|
||||
while (nbuf_list) {
|
||||
num_list_elements =
|
||||
QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(nbuf_list);
|
||||
rx_thread->stats.nbuf_dequeued += num_list_elements;
|
||||
|
||||
peer_local_id = QDF_NBUF_CB_RX_PEER_LOCAL_ID(nbuf_list);
|
||||
peer = cdp_peer_find_by_local_id(soc, pdev, peer_local_id);
|
||||
|
||||
if (!peer) {
|
||||
rx_thread->stats.dropped_invalid_peer +=
|
||||
num_list_elements;
|
||||
dp_err("peer not found for local_id %u!",
|
||||
peer_local_id);
|
||||
qdf_nbuf_list_free(nbuf_list);
|
||||
continue;
|
||||
}
|
||||
|
||||
vdev = cdp_peer_get_vdev(soc, peer);
|
||||
if (!vdev) {
|
||||
rx_thread->stats.dropped_invalid_vdev +=
|
||||
num_list_elements;
|
||||
dp_err("vdev not found for local_id %u!, pkt dropped",
|
||||
peer_local_id);
|
||||
qdf_nbuf_list_free(nbuf_list);
|
||||
continue;
|
||||
}
|
||||
|
||||
cdp_get_os_rx_handles_from_vdev(soc, vdev, &stack_fn,
|
||||
&osif_vdev);
|
||||
if (!stack_fn || !osif_vdev) {
|
||||
dp_alert("stack_fn or osif_vdev is null, pkt dropped!");
|
||||
QDF_BUG(0);
|
||||
rx_thread->stats.dropped_others +=
|
||||
num_list_elements;
|
||||
qdf_nbuf_list_free(nbuf_list);
|
||||
}
|
||||
stack_fn(osif_vdev, nbuf_list);
|
||||
rx_thread->stats.nbuf_sent_to_stack += num_list_elements;
|
||||
|
||||
nbuf_list = dp_rx_tm_thread_dequeue(rx_thread);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_thread_sub_loop() - rx thread subloop
|
||||
* @rx_thread - rx_thread to be processed
|
||||
* @shutdown - pointer to shutdown variable
|
||||
*
|
||||
* The function handles shutdown and suspend events from other
|
||||
* threads and processes nbuf queue of a rx thread. In case a
|
||||
* shutdown event is received from some other wlan thread, the
|
||||
* function sets the shutdown pointer to true and returns
|
||||
*
|
||||
* Returns: 0 on success, error code on failure
|
||||
*/
|
||||
static int dp_rx_thread_sub_loop(struct dp_rx_thread *rx_thread, bool *shutdown)
|
||||
{
|
||||
while (true) {
|
||||
if (qdf_atomic_test_and_clear_bit(RX_SHUTDOWN_EVENT,
|
||||
&rx_thread->event_flag)) {
|
||||
if (qdf_atomic_test_and_clear_bit(RX_SUSPEND_EVENT,
|
||||
&rx_thread->event_flag)) {
|
||||
qdf_event_set(&rx_thread->suspend_event);
|
||||
}
|
||||
dp_debug("shutting down (%s) id %d pid %d",
|
||||
qdf_get_current_comm(), rx_thread->id,
|
||||
qdf_get_current_pid());
|
||||
*shutdown = true;
|
||||
break;
|
||||
}
|
||||
|
||||
dp_rx_thread_process_nbufq(rx_thread);
|
||||
|
||||
if (qdf_atomic_test_and_clear_bit(RX_SUSPEND_EVENT,
|
||||
&rx_thread->event_flag)) {
|
||||
dp_debug("received suspend ind (%s) id %d pid %d",
|
||||
qdf_get_current_comm(), rx_thread->id,
|
||||
qdf_get_current_pid());
|
||||
qdf_spin_lock(&rx_thread->lock);
|
||||
qdf_event_reset(&rx_thread->resume_event);
|
||||
qdf_event_set(&rx_thread->suspend_event);
|
||||
qdf_spin_unlock(&rx_thread->lock);
|
||||
dp_debug("waiting for resume (%s) id %d pid %d",
|
||||
qdf_get_current_comm(), rx_thread->id,
|
||||
qdf_get_current_pid());
|
||||
qdf_wait_single_event(&rx_thread->resume_event, 0);
|
||||
}
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_thread_loop() - main dp rx thread loop
|
||||
* @arg: pointer to dp_rx_thread structure for the rx thread
|
||||
*
|
||||
* Return: thread exit code
|
||||
*/
|
||||
static int dp_rx_thread_loop(void *arg)
|
||||
{
|
||||
struct dp_rx_thread *rx_thread = arg;
|
||||
bool shutdown = false;
|
||||
int status;
|
||||
struct dp_rx_tm_handle_cmn *tm_handle_cmn;
|
||||
|
||||
tm_handle_cmn = rx_thread->rtm_handle_cmn;
|
||||
|
||||
if (!arg) {
|
||||
dp_err("bad Args passed");
|
||||
return 0;
|
||||
}
|
||||
|
||||
qdf_set_user_nice(qdf_get_current_task(), -1);
|
||||
qdf_set_wake_up_idle(true);
|
||||
|
||||
qdf_event_set(&rx_thread->start_event);
|
||||
dp_info("starting rx_thread (%s) id %d pid %d", qdf_get_current_comm(),
|
||||
rx_thread->id, qdf_get_current_pid());
|
||||
while (!shutdown) {
|
||||
/* This implements the execution model algorithm */
|
||||
dp_debug("sleeping");
|
||||
status =
|
||||
qdf_wait_queue_interruptible
|
||||
(DP_RX_THREAD_GET_WAIT_QUEUE_OBJ(tm_handle_cmn),
|
||||
qdf_atomic_test_bit(RX_POST_EVENT,
|
||||
&rx_thread->event_flag) ||
|
||||
qdf_atomic_test_bit(RX_SUSPEND_EVENT,
|
||||
&rx_thread->event_flag));
|
||||
dp_debug("woken up");
|
||||
|
||||
if (status == -ERESTARTSYS) {
|
||||
dp_err("wait_event_interruptible returned -ERESTARTSYS");
|
||||
QDF_DEBUG_PANIC();
|
||||
break;
|
||||
}
|
||||
qdf_atomic_clear_bit(RX_POST_EVENT, &rx_thread->event_flag);
|
||||
dp_rx_thread_sub_loop(rx_thread, &shutdown);
|
||||
}
|
||||
|
||||
/* If we get here the scheduler thread must exit */
|
||||
dp_info("exiting (%s) id %d pid %d", qdf_get_current_comm(),
|
||||
rx_thread->id, qdf_get_current_pid());
|
||||
qdf_event_set(&rx_thread->shutdown_event);
|
||||
qdf_exit_thread(QDF_STATUS_SUCCESS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* dp_rx_tm_thread_init() - Initialize dp_rx_thread structure and thread
|
||||
*
|
||||
* @rx_thread: dp_rx_thread structure to be initialized
|
||||
* @id: id of the thread to be initialized
|
||||
*
|
||||
* Return: QDF_STATUS on success, QDF error code on failure
|
||||
*/
|
||||
static QDF_STATUS dp_rx_tm_thread_init(struct dp_rx_thread *rx_thread,
|
||||
uint8_t id)
|
||||
{
|
||||
char thread_name[15];
|
||||
QDF_STATUS qdf_status;
|
||||
|
||||
qdf_mem_set(thread_name, 0, sizeof(thread_name));
|
||||
|
||||
if (!rx_thread) {
|
||||
dp_err("rx_thread is null!");
|
||||
return QDF_STATUS_E_FAULT;
|
||||
}
|
||||
rx_thread->id = id;
|
||||
rx_thread->event_flag = 0;
|
||||
qdf_nbuf_queue_head_init(&rx_thread->nbuf_queue);
|
||||
qdf_event_create(&rx_thread->start_event);
|
||||
qdf_event_create(&rx_thread->suspend_event);
|
||||
qdf_event_create(&rx_thread->resume_event);
|
||||
qdf_event_create(&rx_thread->shutdown_event);
|
||||
qdf_scnprintf(thread_name, sizeof(thread_name), "dp_rx_thread_%u", id);
|
||||
dp_info("%s %u", thread_name, id);
|
||||
rx_thread->task = qdf_create_thread(dp_rx_thread_loop,
|
||||
rx_thread, thread_name);
|
||||
if (!rx_thread->task) {
|
||||
dp_err("could not create dp_rx_thread %d", id);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
qdf_wake_up_process(rx_thread->task);
|
||||
qdf_status = qdf_wait_single_event(&rx_thread->start_event, 0);
|
||||
|
||||
if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
|
||||
dp_err("failed waiting for thread creation id %d", id);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/*
|
||||
* dp_rx_tm_thread_deinit() - De-Initialize dp_rx_thread structure and thread
|
||||
* @rx_thread: dp_rx_thread structure to be de-initialized
|
||||
* @id: id of the thread to be initialized
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS
|
||||
*/
|
||||
static QDF_STATUS dp_rx_tm_thread_deinit(struct dp_rx_thread *rx_thread)
|
||||
{
|
||||
qdf_event_destroy(&rx_thread->start_event);
|
||||
qdf_event_destroy(&rx_thread->suspend_event);
|
||||
qdf_event_destroy(&rx_thread->resume_event);
|
||||
qdf_event_destroy(&rx_thread->shutdown_event);
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
QDF_STATUS dp_rx_tm_init(struct dp_rx_tm_handle *rx_tm_hdl,
|
||||
uint8_t num_dp_rx_threads)
|
||||
{
|
||||
int i;
|
||||
QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
|
||||
/* ignoring num_dp_rx_threads for now */
|
||||
qdf_init_waitqueue_head(&rx_tm_hdl->wait_q);
|
||||
|
||||
for (i = 0; i < DP_MAX_RX_THREADS; i++) {
|
||||
rx_tm_hdl->rx_thread[i] =
|
||||
(struct dp_rx_thread *)
|
||||
qdf_mem_malloc(sizeof(struct dp_rx_thread));
|
||||
if (qdf_unlikely(!rx_tm_hdl->rx_thread[i])) {
|
||||
QDF_ASSERT(0);
|
||||
qdf_status = QDF_STATUS_E_NOMEM;
|
||||
dp_err("failed to allocate memory for dp_rx_thread");
|
||||
goto ret;
|
||||
}
|
||||
rx_tm_hdl->rx_thread[i]->rtm_handle_cmn =
|
||||
(struct dp_rx_tm_handle_cmn *)rx_tm_hdl;
|
||||
qdf_status =
|
||||
dp_rx_tm_thread_init(rx_tm_hdl->rx_thread[i], i);
|
||||
if (!QDF_IS_STATUS_SUCCESS(qdf_status))
|
||||
break;
|
||||
}
|
||||
ret:
|
||||
if (!QDF_IS_STATUS_SUCCESS(qdf_status))
|
||||
dp_rx_tm_deinit(rx_tm_hdl);
|
||||
|
||||
return qdf_status;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_tm_resume() - suspend DP RX threads
|
||||
* @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
|
||||
* infrastructure
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS
|
||||
*/
|
||||
QDF_STATUS dp_rx_tm_suspend(struct dp_rx_tm_handle *rx_tm_hdl)
|
||||
{
|
||||
int i;
|
||||
QDF_STATUS qdf_status;
|
||||
struct dp_rx_thread *rx_thread;
|
||||
|
||||
for (i = 0; i < DP_MAX_RX_THREADS; i++) {
|
||||
if (!rx_tm_hdl->rx_thread[i])
|
||||
continue;
|
||||
qdf_set_bit(RX_SUSPEND_EVENT,
|
||||
&rx_tm_hdl->rx_thread[i]->event_flag);
|
||||
}
|
||||
|
||||
qdf_wake_up_interruptible(&rx_tm_hdl->wait_q);
|
||||
|
||||
for (i = 0; i < DP_MAX_RX_THREADS; i++) {
|
||||
rx_thread = rx_tm_hdl->rx_thread[i];
|
||||
if (!rx_thread)
|
||||
continue;
|
||||
dp_debug("thread %d", i);
|
||||
qdf_status = qdf_wait_single_event(&rx_thread->suspend_event,
|
||||
DP_RX_THREAD_WAIT_TIMEOUT);
|
||||
if (QDF_IS_STATUS_SUCCESS(qdf_status))
|
||||
dp_debug("thread:%d suspended", rx_thread->id);
|
||||
else if (qdf_status == QDF_STATUS_E_TIMEOUT)
|
||||
dp_err("thread:%d timed out waiting for suspend",
|
||||
rx_thread->id);
|
||||
else
|
||||
dp_err("thread:%d failed while waiting for suspend",
|
||||
rx_thread->id);
|
||||
}
|
||||
rx_tm_hdl->state = DP_RX_THREAD_SUSPENDED;
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_tm_resume() - resume DP RX threads
|
||||
* @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
|
||||
* infrastructure
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS
|
||||
*/
|
||||
QDF_STATUS dp_rx_tm_resume(struct dp_rx_tm_handle *rx_tm_hdl)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (rx_tm_hdl->state != DP_RX_THREAD_SUSPENDED) {
|
||||
dp_err("resume callback received without suspend");
|
||||
return QDF_STATUS_E_FAULT;
|
||||
}
|
||||
|
||||
for (i = 0; i < DP_MAX_RX_THREADS; i++) {
|
||||
if (!rx_tm_hdl->rx_thread[i])
|
||||
continue;
|
||||
dp_debug("calling thread %d to resume", i);
|
||||
qdf_event_set(&rx_tm_hdl->rx_thread[i]->resume_event);
|
||||
}
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_tm_shutdown() - shutdown all DP RX threads
|
||||
* @rx_tm_hdl: dp_rx_tm_handle containing the overall thread infrastructure
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS
|
||||
*/
|
||||
static QDF_STATUS dp_rx_tm_shutdown(struct dp_rx_tm_handle *rx_tm_hdl)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < DP_MAX_RX_THREADS; i++) {
|
||||
if (!rx_tm_hdl->rx_thread[i])
|
||||
continue;
|
||||
qdf_set_bit(RX_SHUTDOWN_EVENT,
|
||||
&rx_tm_hdl->rx_thread[i]->event_flag);
|
||||
qdf_set_bit(RX_POST_EVENT,
|
||||
&rx_tm_hdl->rx_thread[i]->event_flag);
|
||||
}
|
||||
|
||||
qdf_wake_up_interruptible(&rx_tm_hdl->wait_q);
|
||||
|
||||
for (i = 0; i < DP_MAX_RX_THREADS; i++) {
|
||||
if (!rx_tm_hdl->rx_thread[i])
|
||||
continue;
|
||||
dp_debug("waiting for shutdown of thread %d", i);
|
||||
qdf_wait_single_event(&rx_tm_hdl->rx_thread[i]->shutdown_event,
|
||||
0);
|
||||
}
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_tm_deinit() - de-initialize RX thread infrastructure
|
||||
* @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
|
||||
* infrastructure
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS
|
||||
*/
|
||||
QDF_STATUS dp_rx_tm_deinit(struct dp_rx_tm_handle *rx_tm_hdl)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
dp_rx_tm_shutdown(rx_tm_hdl);
|
||||
|
||||
for (i = 0; i < DP_MAX_RX_THREADS; i++) {
|
||||
if (!rx_tm_hdl->rx_thread[i])
|
||||
continue;
|
||||
dp_rx_tm_thread_deinit(rx_tm_hdl->rx_thread[i]);
|
||||
qdf_mem_free(rx_tm_hdl->rx_thread[i]);
|
||||
}
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_tm_select_thread() - select a DP RX thread for a nbuf
|
||||
* @rx_tm_hdl: dp_rx_tm_handle containing the overall thread
|
||||
* infrastructure
|
||||
* @nbuf_list: list of nbufs to be enqueued in to the thread
|
||||
*
|
||||
* The function relies on the presence of QDF_NBUF_CB_RX_CTX_ID
|
||||
* in the nbuf list. Depending on the RX_CTX (copy engine or reo
|
||||
* ring) on which the packet was received, the function selects
|
||||
* a corresponding rx_thread.
|
||||
* The function uses a simplistic mapping -
|
||||
*
|
||||
* RX_THREAD = RX_CTX % number of RX threads in the system.
|
||||
*
|
||||
* This also means that if RX_CTX < # rx threads, more than one
|
||||
* interrupt source may end up on the same rx_thread.
|
||||
*
|
||||
* Return: rx thread ID selected for the nbuf
|
||||
*/
|
||||
static uint8_t dp_rx_tm_select_thread(struct dp_rx_tm_handle *rx_tm_hdl,
|
||||
qdf_nbuf_t nbuf_list)
|
||||
{
|
||||
uint8_t selected_rx_thread;
|
||||
uint8_t reo_ring_num = QDF_NBUF_CB_RX_CTX_ID(nbuf_list);
|
||||
|
||||
selected_rx_thread = reo_ring_num % DP_MAX_RX_THREADS;
|
||||
|
||||
return selected_rx_thread;
|
||||
}
|
||||
|
||||
QDF_STATUS dp_rx_tm_enqueue_pkt(struct dp_rx_tm_handle *rx_tm_hdl,
|
||||
qdf_nbuf_t nbuf_list)
|
||||
{
|
||||
uint8_t selected_thread_id;
|
||||
|
||||
selected_thread_id = dp_rx_tm_select_thread(rx_tm_hdl, nbuf_list);
|
||||
|
||||
dp_rx_tm_thread_enqueue(rx_tm_hdl->rx_thread[selected_thread_id],
|
||||
nbuf_list);
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
211
core/dp/txrx3.0/dp_rx_thread.h
Normal dosya
211
core/dp/txrx3.0/dp_rx_thread.h
Normal dosya
@@ -0,0 +1,211 @@
|
||||
/*
|
||||
* Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#if !defined(__DP_RX_THREAD__H)
|
||||
#define __DP_RX_THREAD_H
|
||||
|
||||
#include <qdf_lock.h>
|
||||
#include <qdf_event.h>
|
||||
#include <qdf_threads.h>
|
||||
#include <wlan_objmgr_vdev_obj.h>
|
||||
/* Maximum number of REO rings supported (for stats tracking) */
|
||||
#define DP_RX_TM_MAX_REO_RINGS 4
|
||||
|
||||
/* Number of DP RX threads supported */
|
||||
#define DP_MAX_RX_THREADS 3
|
||||
|
||||
/*
|
||||
* Macro to get to wait_queue structure. Needed since wait_q is an object.
|
||||
* API qdf_wait_queue_interruptible needs the object be passed to it and not a
|
||||
* pointer
|
||||
*/
|
||||
#define DP_RX_THREAD_GET_WAIT_QUEUE_OBJ(rx_tm_handle_cmn) \
|
||||
(((struct dp_rx_tm_handle *)rx_tm_handle_cmn)->wait_q)
|
||||
/*
|
||||
* struct dp_rx_tm_handle_cmn - Opaque handle for rx_threads to store
|
||||
* rx_tm_handle. This handle will be common for all the threads.
|
||||
* Individual threads should not be accessing
|
||||
* elements from dp_rx_tm_handle. It should be via an API.
|
||||
*/
|
||||
struct dp_rx_tm_handle_cmn;
|
||||
|
||||
/**
|
||||
* struct dp_rx_thread_stats - structure holding stats for DP RX thread
|
||||
* @nbuf_queued: packets queued into the thread per reo ring
|
||||
* @nbuf_dequeued: packets de-queued from the thread
|
||||
* @nbuf_sent_to_stack: packets sent to the stack. some dequeued packets may be
|
||||
* dropped due to no peer or vdev, hence this stat.
|
||||
* @nbufq_max_len: maximum number of nbuf_lists queued for the thread
|
||||
* @dropped_invalid_vdev: packets(nbuf_list) dropped due to no vdev
|
||||
* @dropped_invalid_peer: packets(nbuf_list) dropped due to no peer
|
||||
* @dropped_others: packets dropped due to other reasons
|
||||
|
||||
*/
|
||||
struct dp_rx_thread_stats {
|
||||
unsigned int nbuf_queued[DP_RX_TM_MAX_REO_RINGS];
|
||||
unsigned int nbuf_dequeued;
|
||||
unsigned int nbuf_sent_to_stack;
|
||||
unsigned int nbufq_max_len;
|
||||
unsigned int dropped_invalid_vdev;
|
||||
unsigned int dropped_invalid_peer;
|
||||
unsigned int dropped_others;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dp_rx_thread - structure holding variables for a single DP RX thread
|
||||
* @task: task structure corresponding to the thread
|
||||
* @start_event: handle of Event for DP Rx thread to signal startup
|
||||
* @suspend_event: handle of Event for DP Rx thread to signal suspend
|
||||
* @resume_event: handle of Event for DP Rx thread to signal resume
|
||||
* @shutdown_event: handle of Event for DP Rx thread to signal shutdown
|
||||
* @event_flag: event flag to post events to DP Rx thread
|
||||
* @nbuf_queue:nbuf queue used to store RX packets
|
||||
* @nbufq_len: length of the nbuf queue
|
||||
* @aff_mask: cuurent affinity mask of the DP Rx thread
|
||||
* @stats: per thread stats
|
||||
* @id: id of the dp_rx_thread (0 or 1 or 2..DP_MAX_RX_THREADS - 1)
|
||||
* @rtm_handle_cmn: abstract RX TM handle. This allows access to the dp_rx_tm
|
||||
* structures via APIs.
|
||||
*/
|
||||
struct dp_rx_thread {
|
||||
qdf_thread_t *task;
|
||||
qdf_spinlock_t lock;
|
||||
qdf_event_t start_event;
|
||||
qdf_event_t suspend_event;
|
||||
qdf_event_t resume_event;
|
||||
qdf_event_t shutdown_event;
|
||||
unsigned long event_flag;
|
||||
qdf_nbuf_queue_head_t nbuf_queue;
|
||||
unsigned long aff_mask;
|
||||
struct dp_rx_thread_stats stats;
|
||||
uint8_t id;
|
||||
struct dp_rx_tm_handle_cmn *rtm_handle_cmn;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum dp_rx_thread_state - enum to keep track of the state of the rx thread
|
||||
* @DP_RX_THREAD_INVALID: initial invalid state
|
||||
* @DP_RX_THREAD_INIT: state after being initialized
|
||||
* @DP_RX_THREAD_RUNNING: rx thread is functional(NOT suspended, processing
|
||||
* packets or waiting on a wait_queue)
|
||||
* @DP_RX_THREAD_SUSPENDED: rx_thread operation is suspeded from cfg8011 suspend
|
||||
*/
|
||||
enum dp_rx_thread_state {
|
||||
DP_RX_THREAD_INVALID,
|
||||
DP_RX_THREAD_INIT,
|
||||
DP_RX_THREAD_RUNNING,
|
||||
DP_RX_THREAD_SUSPENDED
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dp_rx_tm_handle - DP RX thread infrastructure handle
|
||||
* @txrx_handle_cmn: opaque txrx handle to get to pdev and soc
|
||||
* wait_q: wait_queue for the rx_threads to wait on and expect an event
|
||||
* @state: state of the rx_threads. All of them should be in the same state.
|
||||
* @rx_thread: array of pointers of type struct dp_rx_thread
|
||||
*/
|
||||
struct dp_rx_tm_handle {
|
||||
struct dp_txrx_handle_cmn *txrx_handle_cmn;
|
||||
qdf_wait_queue_head_t wait_q;
|
||||
enum dp_rx_thread_state state;
|
||||
struct dp_rx_thread *rx_thread[DP_MAX_RX_THREADS];
|
||||
};
|
||||
|
||||
/**
|
||||
* dp_rx_tm_init() - initialize DP Rx thread infrastructure
|
||||
* @rx_tm_hdl: dp_rx_tm_handle containing the overall thread infrastructure
|
||||
* @num_dp_rx_threads: number of DP Rx threads to be initialized
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS
|
||||
*/
|
||||
QDF_STATUS dp_rx_tm_init(struct dp_rx_tm_handle *rx_tm_hdl,
|
||||
uint8_t num_dp_rx_threads);
|
||||
|
||||
/**
|
||||
* dp_rx_tm_deinit() - de-initialize DP Rx thread infrastructure
|
||||
* @rx_tm_hdl: dp_rx_tm_handle containing the overall thread infrastructure
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
|
||||
*/
|
||||
QDF_STATUS dp_rx_tm_deinit(struct dp_rx_tm_handle *rx_tm_hdl);
|
||||
|
||||
/**
|
||||
* dp_rx_tm_enqueue_pkt() - enqueue RX packet into RXTI
|
||||
* @rx_tm_hdl: dp_rx_tm_handle containing the overall thread infrastructure
|
||||
* @nbuf_list: single or a list of nbufs to be enqueued into RXTI
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS
|
||||
*/
|
||||
QDF_STATUS dp_rx_tm_enqueue_pkt(struct dp_rx_tm_handle *rx_tm_hdl,
|
||||
qdf_nbuf_t nbuf_list);
|
||||
|
||||
/**
|
||||
* dp_rx_tm_suspend() - suspend all threads in RXTI
|
||||
* @rx_tm_handle: pointer to dp_rx_tm_handle object
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
|
||||
*/
|
||||
QDF_STATUS dp_rx_tm_suspend(struct dp_rx_tm_handle *rx_tm_handle);
|
||||
|
||||
/**
|
||||
* dp_rx_tm_resume() - resume all threads in RXTI
|
||||
* @rx_tm_handle: pointer to dp_rx_tm_handle object
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
|
||||
*/
|
||||
QDF_STATUS dp_rx_tm_resume(struct dp_rx_tm_handle *rx_tm_handle);
|
||||
|
||||
/**
|
||||
* dp_rx_tm_dump_stats() - dump stats for all threads in RXTI
|
||||
* @rx_tm_handle: pointer to dp_rx_tm_handle object
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
|
||||
*/
|
||||
QDF_STATUS dp_rx_tm_dump_stats(struct dp_rx_tm_handle *rx_tm_handle);
|
||||
|
||||
/**
|
||||
* dp_rx_thread_get_txrx_handle() - get txrx handle from rx_tm_handle_cmn
|
||||
* @rx_tm_handle_cmn: opaque pointer to dp_rx_tm_handle_cmn struct
|
||||
*
|
||||
* Return: pointer to dp_txrx_handle_cmn handle
|
||||
*/
|
||||
static inline struct dp_txrx_handle_cmn*
|
||||
dp_rx_thread_get_txrx_handle(struct dp_rx_tm_handle_cmn *rx_tm_handle_cmn)
|
||||
{
|
||||
return (((struct dp_rx_tm_handle *)rx_tm_handle_cmn)->txrx_handle_cmn);
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_rx_thread_get_wait_queue() - get wait_q from dp_rx_tm_handle
|
||||
* @rx_tm_handle_cmn: opaque pointer to dp_rx_tm_handle_cmn struct
|
||||
*
|
||||
* The function is needed since dp_rx_thread does not have access to the real
|
||||
* dp_rx_tm_handle structure, but only an opaque dp_rx_tm_handle_cmn handle
|
||||
*
|
||||
* Return: pointer to dp_txrx_handle_cmn handle
|
||||
*/
|
||||
static inline qdf_wait_queue_head_t*
|
||||
dp_rx_thread_get_wait_queue(struct dp_rx_tm_handle_cmn *rx_tm_handle_cmn)
|
||||
{
|
||||
struct dp_rx_tm_handle *rx_tm_handle;
|
||||
|
||||
rx_tm_handle = (struct dp_rx_tm_handle *)rx_tm_handle_cmn;
|
||||
return &rx_tm_handle->wait_q;
|
||||
}
|
||||
|
||||
#endif /* __DP_RX_THREAD_H */
|
70
core/dp/txrx3.0/dp_txrx.c
Normal dosya
70
core/dp/txrx3.0/dp_txrx.c
Normal dosya
@@ -0,0 +1,70 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <wlan_objmgr_pdev_obj.h>
|
||||
#include <dp_txrx.h>
|
||||
#include <cdp_txrx_cmn.h>
|
||||
|
||||
QDF_STATUS dp_txrx_init(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
|
||||
struct dp_txrx_config *config)
|
||||
{
|
||||
struct dp_txrx_handle *dp_ext_hdl;
|
||||
QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
|
||||
|
||||
dp_ext_hdl = qdf_mem_malloc(sizeof(*dp_ext_hdl));
|
||||
if (!dp_ext_hdl) {
|
||||
dp_err("failed to alloc dp_txrx_handle");
|
||||
QDF_ASSERT(0);
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
|
||||
dp_debug("dp_txrx_handle allocated");
|
||||
dp_ext_hdl->soc = soc;
|
||||
dp_ext_hdl->pdev = pdev;
|
||||
cdp_soc_set_dp_txrx_handle(soc, dp_ext_hdl);
|
||||
qdf_mem_copy(&dp_ext_hdl->config, config, sizeof(*config));
|
||||
dp_ext_hdl->rx_tm_hdl.txrx_handle_cmn =
|
||||
dp_txrx_get_cmn_hdl_frm_ext_hdl(dp_ext_hdl);
|
||||
|
||||
if (dp_ext_hdl->config.enable_rx_threads) {
|
||||
qdf_status = dp_rx_tm_init(&dp_ext_hdl->rx_tm_hdl,
|
||||
dp_ext_hdl->config.num_rx_threads);
|
||||
}
|
||||
|
||||
return qdf_status;
|
||||
}
|
||||
|
||||
QDF_STATUS dp_txrx_deinit(ol_txrx_soc_handle soc)
|
||||
{
|
||||
struct dp_txrx_handle *dp_ext_hdl;
|
||||
|
||||
if (!soc)
|
||||
return QDF_STATUS_E_INVAL;
|
||||
|
||||
dp_ext_hdl = cdp_soc_get_dp_txrx_handle(soc);
|
||||
if (!dp_ext_hdl)
|
||||
return QDF_STATUS_E_FAULT;
|
||||
|
||||
dp_rx_tm_deinit(&dp_ext_hdl->rx_tm_hdl);
|
||||
qdf_mem_free(dp_ext_hdl);
|
||||
dp_info("dp_txrx_handle_t de-allocated");
|
||||
|
||||
cdp_soc_set_dp_txrx_handle(soc, NULL);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
248
core/dp/txrx3.0/dp_txrx.h
Normal dosya
248
core/dp/txrx3.0/dp_txrx.h
Normal dosya
@@ -0,0 +1,248 @@
|
||||
/*
|
||||
* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for
|
||||
* any purpose with or without fee is hereby granted, provided that the
|
||||
* above copyright notice and this permission notice appear in all
|
||||
* copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
|
||||
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
|
||||
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
|
||||
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
|
||||
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
|
||||
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
* PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _DP_TXRX_H
|
||||
#define _DP_TXRX_H
|
||||
|
||||
#include <wlan_objmgr_psoc_obj.h>
|
||||
#include <dp_rx_thread.h>
|
||||
#include <qdf_trace.h>
|
||||
#include <cdp_txrx_cmn_struct.h>
|
||||
#include <cdp_txrx_cmn.h>
|
||||
|
||||
/**
|
||||
* struct dp_txrx_config - dp txrx configuration passed to dp txrx modules
|
||||
* @enable_dp_rx_threads: enable DP rx threads or not
|
||||
* @num_rx_threads: number of DP RX threads
|
||||
*/
|
||||
struct dp_txrx_config {
|
||||
bool enable_rx_threads;
|
||||
uint8_t num_rx_threads;
|
||||
};
|
||||
|
||||
struct dp_txrx_handle_cmn;
|
||||
/**
|
||||
* struct dp_txrx_handle - main dp txrx container handle
|
||||
* @soc: ol_txrx_soc_handle soc handle
|
||||
* @rx_tm_hdl: rx thread infrastructure handle
|
||||
*/
|
||||
struct dp_txrx_handle {
|
||||
ol_txrx_soc_handle soc;
|
||||
struct cdp_pdev *pdev;
|
||||
struct dp_rx_tm_handle rx_tm_hdl;
|
||||
struct dp_txrx_config config;
|
||||
};
|
||||
|
||||
#ifdef FEATURE_WLAN_DP_RX_THREADS
|
||||
/**
|
||||
* dp_txrx_get_cmn_hdl_frm_ext_hdl() - conversion func ext_hdl->txrx_handle_cmn
|
||||
* @dp_ext_hdl: pointer to dp_txrx_handle structure
|
||||
*
|
||||
* Return: typecasted pointer of type - struct dp_txrx_handle_cmn
|
||||
*/
|
||||
static inline struct dp_txrx_handle_cmn *
|
||||
dp_txrx_get_cmn_hdl_frm_ext_hdl(struct dp_txrx_handle *dp_ext_hdl)
|
||||
{
|
||||
return (struct dp_txrx_handle_cmn *)dp_ext_hdl;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_txrx_get_ext_hdl_frm_cmn_hdl() - conversion func txrx_handle_cmn->ext_hdl
|
||||
* @txrx_cmn_hdl: pointer to dp_txrx_handle_cmn structure
|
||||
*
|
||||
* Return: typecasted pointer of type - struct dp_txrx_handle
|
||||
*/
|
||||
static inline struct dp_txrx_handle *
|
||||
dp_txrx_get_ext_hdl_frm_cmn_hdl(struct dp_txrx_handle_cmn *txrx_cmn_hdl)
|
||||
{
|
||||
return (struct dp_txrx_handle *)txrx_cmn_hdl;
|
||||
}
|
||||
|
||||
static inline ol_txrx_soc_handle
|
||||
dp_txrx_get_soc_from_ext_handle(struct dp_txrx_handle_cmn *txrx_cmn_hdl)
|
||||
{
|
||||
struct dp_txrx_handle *dp_ext_hdl;
|
||||
|
||||
dp_ext_hdl = dp_txrx_get_ext_hdl_frm_cmn_hdl(txrx_cmn_hdl);
|
||||
|
||||
return dp_ext_hdl->soc;
|
||||
}
|
||||
|
||||
static inline struct cdp_pdev*
|
||||
dp_txrx_get_pdev_from_ext_handle(struct dp_txrx_handle_cmn *txrx_cmn_hdl)
|
||||
{
|
||||
struct dp_txrx_handle *dp_ext_hdl;
|
||||
|
||||
dp_ext_hdl = dp_txrx_get_ext_hdl_frm_cmn_hdl(txrx_cmn_hdl);
|
||||
|
||||
return dp_ext_hdl->pdev;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_txrx_init() - initialize DP TXRX module
|
||||
* @soc: ol_txrx_soc_handle
|
||||
* @config: configuration for DP TXRX modules
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
|
||||
*/
|
||||
QDF_STATUS dp_txrx_init(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
|
||||
struct dp_txrx_config *config);
|
||||
|
||||
/**
|
||||
* dp_txrx_deinit() - de-initialize DP TXRX module
|
||||
* @soc: ol_txrx_soc_handle
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
|
||||
*/
|
||||
QDF_STATUS dp_txrx_deinit(ol_txrx_soc_handle soc);
|
||||
|
||||
/**
|
||||
* dp_txrx_resume() - resume all threads
|
||||
* @soc: ol_txrx_soc_handle object
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
|
||||
*/
|
||||
static inline QDF_STATUS dp_txrx_resume(ol_txrx_soc_handle soc)
|
||||
{
|
||||
struct dp_txrx_handle *dp_ext_hdl;
|
||||
QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
|
||||
|
||||
if (!soc) {
|
||||
qdf_status = QDF_STATUS_E_INVAL;
|
||||
goto ret;
|
||||
}
|
||||
|
||||
dp_ext_hdl = cdp_soc_get_dp_txrx_handle(soc);
|
||||
if (!dp_ext_hdl) {
|
||||
qdf_status = QDF_STATUS_E_FAULT;
|
||||
goto ret;
|
||||
}
|
||||
|
||||
qdf_status = dp_rx_tm_resume(&dp_ext_hdl->rx_tm_hdl);
|
||||
ret:
|
||||
return qdf_status;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_txrx_suspend() - suspend all threads
|
||||
* @soc: ol_txrx_soc_handle object
|
||||
*
|
||||
* Return: QDF_STATUS_SUCCESS on success, error qdf status on failure
|
||||
*/
|
||||
static inline QDF_STATUS dp_txrx_suspend(ol_txrx_soc_handle soc)
|
||||
{
|
||||
struct dp_txrx_handle *dp_ext_hdl;
|
||||
QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
|
||||
|
||||
if (!soc) {
|
||||
qdf_status = QDF_STATUS_E_INVAL;
|
||||
goto ret;
|
||||
}
|
||||
|
||||
dp_ext_hdl = cdp_soc_get_dp_txrx_handle(soc);
|
||||
if (!dp_ext_hdl) {
|
||||
qdf_status = QDF_STATUS_E_FAULT;
|
||||
goto ret;
|
||||
}
|
||||
|
||||
qdf_status = dp_rx_tm_suspend(&dp_ext_hdl->rx_tm_hdl);
|
||||
|
||||
ret:
|
||||
return qdf_status;
|
||||
}
|
||||
|
||||
static inline
|
||||
QDF_STATUS dp_rx_enqueue_pkt(ol_txrx_soc_handle soc, qdf_nbuf_t nbuf_list)
|
||||
{
|
||||
struct dp_txrx_handle *dp_ext_hdl;
|
||||
QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
|
||||
|
||||
if (!soc || !nbuf_list) {
|
||||
qdf_status = QDF_STATUS_E_INVAL;
|
||||
dp_err("invalid input params soc %pK nbuf %pK"
|
||||
, soc, nbuf_list);
|
||||
goto ret;
|
||||
}
|
||||
|
||||
dp_ext_hdl = cdp_soc_get_dp_txrx_handle(soc);
|
||||
if (!dp_ext_hdl) {
|
||||
qdf_status = QDF_STATUS_E_FAULT;
|
||||
goto ret;
|
||||
}
|
||||
|
||||
qdf_status = dp_rx_tm_enqueue_pkt(&dp_ext_hdl->rx_tm_hdl, nbuf_list);
|
||||
ret:
|
||||
return qdf_status;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS dp_txrx_dump_stats(ol_txrx_soc_handle soc)
|
||||
{
|
||||
struct dp_txrx_handle *dp_ext_hdl;
|
||||
QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
|
||||
|
||||
if (!soc) {
|
||||
qdf_status = QDF_STATUS_E_INVAL;
|
||||
dp_err("invalid input params soc %pK", soc);
|
||||
goto ret;
|
||||
}
|
||||
|
||||
dp_ext_hdl = cdp_soc_get_dp_txrx_handle(soc);
|
||||
if (!dp_ext_hdl) {
|
||||
qdf_status = QDF_STATUS_E_FAULT;
|
||||
goto ret;
|
||||
}
|
||||
|
||||
qdf_status = dp_rx_tm_dump_stats(&dp_ext_hdl->rx_tm_hdl);
|
||||
ret:
|
||||
return qdf_status;
|
||||
}
|
||||
#else
|
||||
static inline
|
||||
QDF_STATUS dp_txrx_init(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
|
||||
struct dp_txrx_config *config)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS dp_txrx_deinit(ol_txrx_soc_handle soc)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS dp_txrx_resume(ol_txrx_soc_handle soc)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS dp_txrx_suspend(ol_txrx_soc_handle soc)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline
|
||||
QDF_STATUS dp_rx_enqueue_pkt(ol_txrx_soc_handle soc, qdf_nbuf_t nbuf_list)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
static inline QDF_STATUS dp_txrx_dump_stats(ol_txrx_soc_handle soc)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
#endif /* FEATURE_WLAN_DP_RX_THREADS */
|
||||
#endif /* _DP_TXRX_H */
|
@@ -45,10 +45,11 @@ struct hdd_context;
|
||||
#define FW_MODULE_LOG_LEVEL_STRING_LENGTH (512)
|
||||
#define TX_SCHED_WRR_PARAM_STRING_LENGTH (50)
|
||||
#define TX_SCHED_WRR_PARAMS_NUM (5)
|
||||
#define CFG_ENABLE_RX_THREAD (1 << 0)
|
||||
#define CFG_ENABLE_RPS (1 << 1)
|
||||
#define CFG_ENABLE_NAPI (1 << 2)
|
||||
#define CFG_ENABLE_DYNAMIC_RPS (1 << 3)
|
||||
#define CFG_ENABLE_RX_THREAD BIT(0)
|
||||
#define CFG_ENABLE_RPS BIT(1)
|
||||
#define CFG_ENABLE_NAPI BIT(2)
|
||||
#define CFG_ENABLE_DYNAMIC_RPS BIT(3)
|
||||
#define CFG_ENABLE_DP_RX_THREADS BIT(4)
|
||||
|
||||
#ifdef DHCP_SERVER_OFFLOAD
|
||||
#define IPADDR_NUM_ENTRIES (4)
|
||||
@@ -9143,18 +9144,18 @@ enum dot11p_mode {
|
||||
|
||||
/*
|
||||
* <ini>
|
||||
* rx_mode - Control to decide rx mode
|
||||
* rx_mode - Control to decide rx mode for packet procesing
|
||||
*
|
||||
* @Min: 0
|
||||
* @Max: (CFG_ENABLE_RX_THREAD | CFG_ENABLE_RPS | CFG_ENABLE_NAPI | \
|
||||
* CFG_ENABLE_DYNAMIC_RPS)
|
||||
* @Default: MDM_PLATFORM - 0
|
||||
* HELIUMPLUS - CFG_ENABLE_NAPI
|
||||
* Other cases - (CFG_ENABLE_RX_THREAD | CFG_ENABLE_NAPI)
|
||||
*
|
||||
* This ini is used to decide mode for the rx path
|
||||
*
|
||||
* Supported Feature: NAPI
|
||||
* CFG_ENABLE_DYNAMIC_RPS)
|
||||
* Some possible configurations:
|
||||
* rx_mode=0 - Uses tasklets for bottom half
|
||||
* CFG_ENABLE_NAPI (rx_mode=4) - Uses NAPI for bottom half
|
||||
* CFG_ENABLE_RX_THREAD | CFG_ENABLE_NAPI (rx_mode=5) - NAPI for bottom half,
|
||||
* rx_thread for stack. Single threaded.
|
||||
* CFG_ENABLE_DP_RX_THREAD | CFG_ENABLE_NAPI (rx_mode=10) - NAPI for bottom
|
||||
* half, dp_rx_thread for stack processing. Supports multiple rx threads.
|
||||
*
|
||||
* Usage: Internal
|
||||
*
|
||||
@@ -9163,15 +9164,35 @@ enum dot11p_mode {
|
||||
#define CFG_RX_MODE_NAME "rx_mode"
|
||||
#define CFG_RX_MODE_MIN (0)
|
||||
#define CFG_RX_MODE_MAX (CFG_ENABLE_RX_THREAD | CFG_ENABLE_RPS | \
|
||||
CFG_ENABLE_NAPI | CFG_ENABLE_DYNAMIC_RPS)
|
||||
CFG_ENABLE_NAPI | CFG_ENABLE_DYNAMIC_RPS | \
|
||||
CFG_ENABLE_DP_RX_THREADS)
|
||||
#ifdef MDM_PLATFORM
|
||||
#define CFG_RX_MODE_DEFAULT (0)
|
||||
#elif defined(HELIUMPLUS)
|
||||
#define CFG_RX_MODE_DEFAULT CFG_ENABLE_NAPI
|
||||
#elif defined(QCA_WIFI_QCA6290_11AX)
|
||||
#define CFG_RX_MODE_DEFAULT (CFG_ENABLE_DP_RX_THREADS | CFG_ENABLE_NAPI)
|
||||
#else
|
||||
#define CFG_RX_MODE_DEFAULT (CFG_ENABLE_RX_THREAD | CFG_ENABLE_NAPI)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* <ini>
|
||||
* num_dp_rx_threads - Control to set the number of dp rx threads
|
||||
*
|
||||
* @Min: 1
|
||||
* @Max: 4
|
||||
* @Default: 1
|
||||
*
|
||||
* Usage: Internal
|
||||
*
|
||||
* </ini>
|
||||
*/
|
||||
#define CFG_NUM_DP_RX_THREADS_NAME "num_dp_rx_threads"
|
||||
#define CFG_NUM_DP_RX_THREADS_MIN (1)
|
||||
#define CFG_NUM_DP_RX_THREADS_MAX (4)
|
||||
#define CFG_NUM_DP_RX_THREADS_DEFAULT (1)
|
||||
|
||||
/*
|
||||
* <ini>
|
||||
* ce_service_max_yield_time - Control to set ce service max yield time (in us)
|
||||
@@ -14458,6 +14479,7 @@ struct hdd_config {
|
||||
uint8_t dot11p_mode;
|
||||
bool etsi13_srd_chan_in_master_mode;
|
||||
uint8_t rx_mode;
|
||||
uint8_t num_dp_rx_threads;
|
||||
uint32_t ce_service_max_yield_time;
|
||||
uint8_t ce_service_max_rx_ind_flush;
|
||||
uint32_t napi_cpu_affinity_mask;
|
||||
|
@@ -1876,6 +1876,8 @@ struct hdd_context {
|
||||
bool rps;
|
||||
bool dynamic_rps;
|
||||
bool enable_rxthread;
|
||||
/* support for DP RX threads */
|
||||
bool enable_dp_rx_threads;
|
||||
bool napi_enable;
|
||||
bool stop_modules_in_progress;
|
||||
bool start_modules_in_progress;
|
||||
@@ -2548,6 +2550,7 @@ int hdd_wlan_dump_stats(struct hdd_adapter *adapter, int value);
|
||||
void wlan_hdd_deinit_tx_rx_histogram(struct hdd_context *hdd_ctx);
|
||||
void wlan_hdd_display_tx_rx_histogram(struct hdd_context *hdd_ctx);
|
||||
void wlan_hdd_clear_tx_rx_histogram(struct hdd_context *hdd_ctx);
|
||||
|
||||
void
|
||||
wlan_hdd_display_netif_queue_history(struct hdd_context *hdd_ctx,
|
||||
enum qdf_stats_verbosity_level verb_lvl);
|
||||
|
@@ -98,7 +98,7 @@ QDF_STATUS hdd_softap_deinit_tx_rx_sta(struct hdd_adapter *adapter,
|
||||
|
||||
/**
|
||||
* hdd_softap_rx_packet_cbk() - Receive packet handler
|
||||
* @context: pointer to HDD context
|
||||
* @adapter_context: pointer to HDD adapter
|
||||
* @rx_buf: pointer to rx qdf_nbuf chain
|
||||
*
|
||||
* Receive callback registered with the Data Path. The Data Path will
|
||||
@@ -108,7 +108,7 @@ QDF_STATUS hdd_softap_deinit_tx_rx_sta(struct hdd_adapter *adapter,
|
||||
* Return: QDF_STATUS_E_FAILURE if any errors encountered,
|
||||
* QDF_STATUS_SUCCESS otherwise
|
||||
*/
|
||||
QDF_STATUS hdd_softap_rx_packet_cbk(void *context, qdf_nbuf_t rx_buf);
|
||||
QDF_STATUS hdd_softap_rx_packet_cbk(void *adapter_context, qdf_nbuf_t rx_buf);
|
||||
|
||||
/**
|
||||
* hdd_softap_deregister_sta() - Deregister a STA with the Data Path
|
||||
|
@@ -33,6 +33,27 @@
|
||||
|
||||
struct hdd_context;
|
||||
|
||||
#define hdd_dp_alert(params...) QDF_TRACE_FATAL(QDF_MODULE_ID_HDD_DATA, params)
|
||||
#define hdd_dp_err(params...) QDF_TRACE_ERROR(QDF_MODULE_ID_HDD_DATA, params)
|
||||
#define hdd_dp_warn(params...) QDF_TRACE_WARN(QDF_MODULE_ID_HDD_DATA, params)
|
||||
#define hdd_dp_info(params...) QDF_TRACE_INFO(QDF_MODULE_ID_HDD_DATA, params)
|
||||
#define hdd_dp_debug(params...) QDF_TRACE_DEBUG(QDF_MODULE_ID_HDD_DATA, params)
|
||||
|
||||
#define hdd_dp_alert_rl(params...) \
|
||||
QDF_TRACE_FATAL_RL(QDF_MODULE_ID_HDD_DATA, params)
|
||||
#define hdd_dp_err_rl(params...) \
|
||||
QDF_TRACE_ERROR_RL(QDF_MODULE_ID_HDD_DATA, params)
|
||||
#define hdd_dp_warn_rl(params...) \
|
||||
QDF_TRACE_WARN_RL(QDF_MODULE_ID_HDD_DATA, params)
|
||||
#define hdd_dp_info_rl(params...) \
|
||||
QDF_TRACE_INFO_RL(QDF_MODULE_ID_HDD_DATA, params)
|
||||
#define hdd_dp_debug_rl(params...) \
|
||||
QDF_TRACE_DEBUG_RL(QDF_MODULE_ID_HDD_DATA, params)
|
||||
|
||||
#define hdd_dp_enter() hdd_dp_debug("enter")
|
||||
#define hdd_dp_enter_dev(dev) hdd_dp_debug("enter(%s)", (dev)->name)
|
||||
#define hdd_dp_exit() hdd_dp_debug("exit")
|
||||
|
||||
#define HDD_ETHERTYPE_802_1_X 0x888E
|
||||
#define HDD_ETHERTYPE_802_1_X_FRAME_OFFSET 12
|
||||
#ifdef FEATURE_WLAN_WAPI
|
||||
@@ -55,7 +76,33 @@ void hdd_tx_timeout(struct net_device *dev);
|
||||
|
||||
QDF_STATUS hdd_init_tx_rx(struct hdd_adapter *adapter);
|
||||
QDF_STATUS hdd_deinit_tx_rx(struct hdd_adapter *adapter);
|
||||
QDF_STATUS hdd_rx_packet_cbk(void *context, qdf_nbuf_t rxBuf);
|
||||
|
||||
/**
|
||||
* hdd_rx_packet_cbk() - Receive packet handler
|
||||
* @adapter_context: pointer to HDD adapter context
|
||||
* @rxBuf: pointer to rx qdf_nbuf
|
||||
*
|
||||
* Receive callback registered with data path. DP will call this to notify
|
||||
* the HDD when one or more packets were received for a registered
|
||||
* STA.
|
||||
*
|
||||
* Return: QDF_STATUS_E_FAILURE if any errors encountered,
|
||||
* QDF_STATUS_SUCCESS otherwise
|
||||
*/
|
||||
QDF_STATUS hdd_rx_packet_cbk(void *adapter_context, qdf_nbuf_t rxBuf);
|
||||
|
||||
/**
|
||||
* hdd_rx_pkt_thread_enqueue_cbk() - receive pkt handler to enqueue into thread
|
||||
* @adapter: pointer to HDD adapter
|
||||
* @rxBuf: pointer to rx qdf_nbuf
|
||||
*
|
||||
* Receive callback registered with DP layer which enqueues packets into dp rx
|
||||
* thread
|
||||
* Return: QDF_STATUS_E_FAILURE if any errors encountered,
|
||||
* QDF_STATUS_SUCCESS otherwise
|
||||
*/
|
||||
QDF_STATUS hdd_rx_pkt_thread_enqueue_cbk(void *adapter_context,
|
||||
qdf_nbuf_t nbuf_list);
|
||||
|
||||
/**
|
||||
* hdd_rx_ol_init() - Initialize Rx mode(LRO or GRO) method
|
||||
|
@@ -2066,7 +2066,15 @@ QDF_STATUS hdd_roam_register_sta(struct hdd_adapter *adapter,
|
||||
|
||||
/* Register the vdev transmit and receive functions */
|
||||
qdf_mem_zero(&txrx_ops, sizeof(txrx_ops));
|
||||
txrx_ops.rx.rx = hdd_rx_packet_cbk;
|
||||
|
||||
if (adapter->hdd_ctx->enable_dp_rx_threads) {
|
||||
txrx_ops.rx.rx = hdd_rx_pkt_thread_enqueue_cbk;
|
||||
txrx_ops.rx.rx_stack = hdd_rx_packet_cbk;
|
||||
} else {
|
||||
txrx_ops.rx.rx = hdd_rx_packet_cbk;
|
||||
txrx_ops.rx.rx_stack = NULL;
|
||||
}
|
||||
|
||||
txrx_ops.rx.stats_rx = hdd_tx_rx_collect_connectivity_stats_info;
|
||||
|
||||
adapter->txrx_vdev = (void *)cdp_get_vdev_from_vdev_id(soc,
|
||||
|
@@ -3887,6 +3887,13 @@ struct reg_table_entry g_registry_table[] = {
|
||||
CFG_RX_MODE_MIN,
|
||||
CFG_RX_MODE_MAX),
|
||||
|
||||
REG_VARIABLE(CFG_NUM_DP_RX_THREADS_NAME, WLAN_PARAM_Integer,
|
||||
struct hdd_config, num_dp_rx_threads,
|
||||
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
|
||||
CFG_NUM_DP_RX_THREADS_DEFAULT,
|
||||
CFG_NUM_DP_RX_THREADS_MIN,
|
||||
CFG_NUM_DP_RX_THREADS_MAX),
|
||||
|
||||
REG_VARIABLE(CFG_CE_SERVICE_MAX_YIELD_TIME_NAME, WLAN_PARAM_Integer,
|
||||
struct hdd_config, ce_service_max_yield_time,
|
||||
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
|
||||
@@ -7383,6 +7390,8 @@ static void hdd_set_rx_mode_value(struct hdd_context *hdd_ctx)
|
||||
|
||||
if (hdd_ctx->config->rx_mode & CFG_ENABLE_RX_THREAD)
|
||||
hdd_ctx->enable_rxthread = true;
|
||||
else if (hdd_ctx->config->rx_mode & CFG_ENABLE_DP_RX_THREADS)
|
||||
hdd_ctx->enable_dp_rx_threads = true;
|
||||
|
||||
if (hdd_ctx->config->rx_mode & CFG_ENABLE_RPS)
|
||||
hdd_ctx->rps = true;
|
||||
@@ -7392,6 +7401,11 @@ static void hdd_set_rx_mode_value(struct hdd_context *hdd_ctx)
|
||||
|
||||
if (hdd_ctx->config->rx_mode & CFG_ENABLE_DYNAMIC_RPS)
|
||||
hdd_ctx->dynamic_rps = true;
|
||||
|
||||
hdd_info("rx_mode:%u dp_rx_threads:%u rx_thread:%u napi:%u rps:%u dynamic rps %u",
|
||||
hdd_ctx->config->rx_mode, hdd_ctx->enable_dp_rx_threads,
|
||||
hdd_ctx->enable_rxthread, hdd_ctx->napi_enable,
|
||||
hdd_ctx->rps, hdd_ctx->dynamic_rps);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -125,6 +125,7 @@
|
||||
#include "sme_power_save_api.h"
|
||||
#include "enet.h"
|
||||
#include <cdp_txrx_cmn_struct.h>
|
||||
#include <dp_txrx.h>
|
||||
#include "wlan_hdd_sysfs.h"
|
||||
#include "wlan_disa_ucfg_api.h"
|
||||
#include "wlan_disa_obj_mgmt_api.h"
|
||||
@@ -7355,11 +7356,10 @@ static int hdd_wiphy_init(struct hdd_context *hdd_ctx)
|
||||
*
|
||||
* Returns: None
|
||||
*/
|
||||
static inline
|
||||
void hdd_display_periodic_stats(struct hdd_context *hdd_ctx,
|
||||
bool data_in_interval)
|
||||
static void hdd_display_periodic_stats(struct hdd_context *hdd_ctx,
|
||||
bool data_in_interval)
|
||||
{
|
||||
static u32 counter;
|
||||
static uint32_t counter;
|
||||
static bool data_in_time_period;
|
||||
ol_txrx_pdev_handle pdev;
|
||||
|
||||
@@ -7379,6 +7379,11 @@ void hdd_display_periodic_stats(struct hdd_context *hdd_ctx,
|
||||
if (counter * hdd_ctx->config->busBandwidthComputeInterval >=
|
||||
hdd_ctx->config->periodic_stats_disp_time * 1000) {
|
||||
if (data_in_time_period) {
|
||||
wlan_hdd_display_txrx_stats(hdd_ctx);
|
||||
dp_txrx_dump_stats(cds_get_context(QDF_MODULE_ID_SOC));
|
||||
cdp_display_stats(cds_get_context(QDF_MODULE_ID_SOC),
|
||||
CDP_RX_RING_STATS,
|
||||
QDF_STATS_VERBOSITY_LEVEL_LOW);
|
||||
cdp_display_stats(cds_get_context(QDF_MODULE_ID_SOC),
|
||||
CDP_TXRX_PATH_STATS,
|
||||
QDF_STATS_VERBOSITY_LEVEL_LOW);
|
||||
@@ -9350,6 +9355,9 @@ static inline void hdd_txrx_populate_cds_config(struct cds_config_info
|
||||
hdd_ctx->config->TxFlowStopQueueThreshold;
|
||||
cds_cfg->tx_flow_start_queue_offset =
|
||||
hdd_ctx->config->TxFlowStartQueueOffset;
|
||||
/* configuration for DP RX Threads */
|
||||
cds_cfg->enable_dp_rx_threads = hdd_ctx->enable_dp_rx_threads;
|
||||
cds_cfg->num_dp_rx_threads = hdd_ctx->config->num_dp_rx_threads;
|
||||
}
|
||||
#else
|
||||
static inline void hdd_txrx_populate_cds_config(struct cds_config_info
|
||||
|
@@ -74,10 +74,10 @@
|
||||
#include "cds_utils.h"
|
||||
#include "wlan_hdd_packet_filter_api.h"
|
||||
#include "wlan_cfg80211_scan.h"
|
||||
#include <dp_txrx.h>
|
||||
#include "wlan_ipa_ucfg_api.h"
|
||||
#include <wlan_cfg80211_mc_cp_stats.h>
|
||||
#include "wlan_p2p_ucfg_api.h"
|
||||
|
||||
/* Preprocessor definitions and constants */
|
||||
#ifdef QCA_WIFI_NAPIER_EMULATION
|
||||
#define HDD_SSR_BRING_UP_TIME 3000000
|
||||
@@ -85,6 +85,9 @@
|
||||
#define HDD_SSR_BRING_UP_TIME 30000
|
||||
#endif
|
||||
|
||||
/* timeout in msec to wait for RX_THREAD to suspend */
|
||||
#define HDD_RXTHREAD_SUSPEND_TIMEOUT 200
|
||||
|
||||
/* Type declarations */
|
||||
|
||||
#ifdef FEATURE_WLAN_DIAG_SUPPORT
|
||||
@@ -1232,12 +1235,11 @@ QDF_STATUS hdd_wlan_shutdown(void)
|
||||
hdd_ctx->is_scheduler_suspended = false;
|
||||
hdd_ctx->is_wiphy_suspended = false;
|
||||
}
|
||||
#ifdef QCA_CONFIG_SMP
|
||||
|
||||
if (true == hdd_ctx->is_ol_rx_thread_suspended) {
|
||||
complete(&cds_sched_context->ol_resume_rx_event);
|
||||
hdd_ctx->is_ol_rx_thread_suspended = false;
|
||||
}
|
||||
#endif
|
||||
|
||||
hdd_wlan_stop_modules(hdd_ctx, false);
|
||||
|
||||
@@ -1538,13 +1540,16 @@ static int __wlan_hdd_cfg80211_resume_wlan(struct wiphy *wiphy)
|
||||
scheduler_resume();
|
||||
hdd_ctx->is_scheduler_suspended = false;
|
||||
}
|
||||
#ifdef QCA_CONFIG_SMP
|
||||
|
||||
/* Resume tlshim Rx thread */
|
||||
if (hdd_ctx->is_ol_rx_thread_suspended) {
|
||||
if (hdd_ctx->enable_rxthread && hdd_ctx->is_ol_rx_thread_suspended) {
|
||||
complete(&cds_sched_context->ol_resume_rx_event);
|
||||
hdd_ctx->is_ol_rx_thread_suspended = false;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (hdd_ctx->enable_dp_rx_threads)
|
||||
dp_txrx_resume(cds_get_context(QDF_MODULE_ID_SOC));
|
||||
|
||||
|
||||
MTRACE(qdf_trace(QDF_MODULE_ID_HDD,
|
||||
TRACE_CODE_HDD_CFG80211_RESUME_WLAN,
|
||||
@@ -1595,9 +1600,6 @@ static void hdd_suspend_cb(void)
|
||||
static int __wlan_hdd_cfg80211_suspend_wlan(struct wiphy *wiphy,
|
||||
struct cfg80211_wowlan *wow)
|
||||
{
|
||||
#ifdef QCA_CONFIG_SMP
|
||||
#define RX_TLSHIM_SUSPEND_TIMEOUT 200 /* msecs */
|
||||
#endif
|
||||
struct hdd_context *hdd_ctx = wiphy_priv(wiphy);
|
||||
p_cds_sched_context cds_sched_context = get_cds_sched_ctxt();
|
||||
struct hdd_adapter *adapter;
|
||||
@@ -1722,22 +1724,27 @@ static int __wlan_hdd_cfg80211_suspend_wlan(struct wiphy *wiphy,
|
||||
}
|
||||
hdd_ctx->is_scheduler_suspended = true;
|
||||
|
||||
#ifdef QCA_CONFIG_SMP
|
||||
/* Suspend tlshim rx thread */
|
||||
set_bit(RX_SUSPEND_EVENT, &cds_sched_context->ol_rx_event_flag);
|
||||
wake_up_interruptible(&cds_sched_context->ol_rx_wait_queue);
|
||||
rc = wait_for_completion_timeout(&cds_sched_context->
|
||||
ol_suspend_rx_event,
|
||||
msecs_to_jiffies
|
||||
(RX_TLSHIM_SUSPEND_TIMEOUT));
|
||||
if (!rc) {
|
||||
clear_bit(RX_SUSPEND_EVENT,
|
||||
&cds_sched_context->ol_rx_event_flag);
|
||||
hdd_err("Failed to stop tl_shim rx thread");
|
||||
goto resume_all;
|
||||
if (hdd_ctx->enable_rxthread) {
|
||||
/* Suspend tlshim rx thread */
|
||||
set_bit(RX_SUSPEND_EVENT, &cds_sched_context->ol_rx_event_flag);
|
||||
wake_up_interruptible(&cds_sched_context->ol_rx_wait_queue);
|
||||
rc = wait_for_completion_timeout(&cds_sched_context->
|
||||
ol_suspend_rx_event,
|
||||
msecs_to_jiffies
|
||||
(HDD_RXTHREAD_SUSPEND_TIMEOUT)
|
||||
);
|
||||
if (!rc) {
|
||||
clear_bit(RX_SUSPEND_EVENT,
|
||||
&cds_sched_context->ol_rx_event_flag);
|
||||
hdd_err("Failed to stop tl_shim rx thread");
|
||||
goto resume_all;
|
||||
}
|
||||
hdd_ctx->is_ol_rx_thread_suspended = true;
|
||||
}
|
||||
hdd_ctx->is_ol_rx_thread_suspended = true;
|
||||
#endif
|
||||
|
||||
if (hdd_ctx->enable_dp_rx_threads)
|
||||
dp_txrx_suspend(cds_get_context(QDF_MODULE_ID_SOC));
|
||||
|
||||
if (hdd_suspend_wlan() < 0)
|
||||
goto resume_all;
|
||||
|
||||
@@ -1751,15 +1758,10 @@ static int __wlan_hdd_cfg80211_suspend_wlan(struct wiphy *wiphy,
|
||||
hdd_exit();
|
||||
return 0;
|
||||
|
||||
#ifdef QCA_CONFIG_SMP
|
||||
resume_all:
|
||||
|
||||
scheduler_resume();
|
||||
hdd_ctx->is_scheduler_suspended = false;
|
||||
#endif
|
||||
|
||||
resume_tx:
|
||||
|
||||
hdd_resume_wlan();
|
||||
return -ETIME;
|
||||
|
||||
|
@@ -825,7 +825,7 @@ static void hdd_softap_notify_tx_compl_cbk(struct sk_buff *skb,
|
||||
}
|
||||
}
|
||||
|
||||
QDF_STATUS hdd_softap_rx_packet_cbk(void *context, qdf_nbuf_t rx_buf)
|
||||
QDF_STATUS hdd_softap_rx_packet_cbk(void *adapter_context, qdf_nbuf_t rx_buf)
|
||||
{
|
||||
struct hdd_adapter *adapter = NULL;
|
||||
int rxstat;
|
||||
@@ -837,13 +837,13 @@ QDF_STATUS hdd_softap_rx_packet_cbk(void *context, qdf_nbuf_t rx_buf)
|
||||
uint8_t staid;
|
||||
|
||||
/* Sanity check on inputs */
|
||||
if (unlikely((NULL == context) || (NULL == rx_buf))) {
|
||||
if (unlikely((!adapter_context) || (!rx_buf))) {
|
||||
QDF_TRACE(QDF_MODULE_ID_HDD_SAP_DATA, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Null params being passed", __func__);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
adapter = (struct hdd_adapter *)context;
|
||||
adapter = (struct hdd_adapter *)adapter_context;
|
||||
if (unlikely(WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
|
||||
"Magic cookie(%x) for adapter sanity verification is invalid",
|
||||
@@ -1036,8 +1036,17 @@ QDF_STATUS hdd_softap_register_sta(struct hdd_adapter *adapter,
|
||||
|
||||
/* Register the vdev transmit and receive functions */
|
||||
qdf_mem_zero(&txrx_ops, sizeof(txrx_ops));
|
||||
txrx_ops.rx.rx = hdd_softap_rx_packet_cbk;
|
||||
|
||||
txrx_ops.tx.tx_comp = hdd_softap_notify_tx_compl_cbk;
|
||||
|
||||
if (adapter->hdd_ctx->enable_dp_rx_threads) {
|
||||
txrx_ops.rx.rx = hdd_rx_pkt_thread_enqueue_cbk;
|
||||
txrx_ops.rx.rx_stack = hdd_softap_rx_packet_cbk;
|
||||
} else {
|
||||
txrx_ops.rx.rx = hdd_softap_rx_packet_cbk;
|
||||
txrx_ops.rx.rx_stack = NULL;
|
||||
}
|
||||
|
||||
cdp_vdev_register(soc,
|
||||
(struct cdp_vdev *)cdp_get_vdev_from_vdev_id(soc,
|
||||
(struct cdp_pdev *)pdev, adapter->session_id),
|
||||
|
@@ -6017,3 +6017,42 @@ int wlan_hdd_get_temperature(struct hdd_adapter *adapter, int *temperature)
|
||||
hdd_exit();
|
||||
return 0;
|
||||
}
|
||||
|
||||
void wlan_hdd_display_txrx_stats(struct hdd_context *hdd_ctx)
|
||||
{
|
||||
struct hdd_adapter *adapter = NULL;
|
||||
struct hdd_tx_rx_stats *stats;
|
||||
int i = 0;
|
||||
uint32_t total_rx_pkt, total_rx_dropped,
|
||||
total_rx_delv, total_rx_refused;
|
||||
|
||||
hdd_for_each_adapter(hdd_ctx, adapter) {
|
||||
total_rx_pkt = 0;
|
||||
total_rx_dropped = 0;
|
||||
total_rx_delv = 0;
|
||||
total_rx_refused = 0;
|
||||
stats = &adapter->hdd_stats.tx_rx_stats;
|
||||
hdd_info("adapter: %u", adapter->session_id);
|
||||
for (; i < NUM_CPUS; i++) {
|
||||
total_rx_pkt += stats->rx_packets[i];
|
||||
total_rx_dropped += stats->rx_dropped[i];
|
||||
total_rx_delv += stats->rx_delivered[i];
|
||||
total_rx_refused += stats->rx_refused[i];
|
||||
}
|
||||
|
||||
hdd_info("Total Transmit - called %u, dropped %u orphan %u",
|
||||
stats->tx_called, stats->tx_dropped,
|
||||
stats->tx_orphaned);
|
||||
|
||||
for (i = 0; i < NUM_CPUS; i++) {
|
||||
if (stats->rx_packets[i] == 0)
|
||||
continue;
|
||||
hdd_info("Rx CPU[%d]: packets %u, dropped %u, delivered %u, refused %u",
|
||||
i, stats->rx_packets[i], stats->rx_dropped[i],
|
||||
stats->rx_delivered[i], stats->rx_refused[i]);
|
||||
}
|
||||
hdd_info("Total Receive - packets %u, dropped %u, delivered %u, refused %u",
|
||||
total_rx_pkt, total_rx_dropped, total_rx_delv,
|
||||
total_rx_refused);
|
||||
}
|
||||
}
|
||||
|
@@ -462,4 +462,14 @@ int wlan_hdd_get_temperature(struct hdd_adapter *adapter, int *temperature);
|
||||
* Return: QDF_STATUS_SUCCESS if adapter's statistics were updated
|
||||
*/
|
||||
int wlan_hdd_request_station_stats(struct hdd_adapter *adapter);
|
||||
|
||||
/**
|
||||
* wlan_hdd_display_txrx_stats() - display HDD txrx stats summary
|
||||
* @hdd_ctx: hdd context
|
||||
*
|
||||
* Display TXRX Stats for all adapters
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
void wlan_hdd_display_txrx_stats(struct hdd_context *hdd_ctx);
|
||||
#endif /* end #if !defined(WLAN_HDD_STATS_H) */
|
||||
|
@@ -59,6 +59,7 @@
|
||||
#include "wma_api.h"
|
||||
|
||||
#include "wlan_hdd_nud_tracking.h"
|
||||
#include "dp_txrx.h"
|
||||
|
||||
#ifdef QCA_LL_TX_FLOW_CONTROL_V2
|
||||
/*
|
||||
@@ -1818,19 +1819,17 @@ static inline void hdd_tsf_timestamp_rx(struct hdd_context *hdd_ctx,
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* hdd_rx_packet_cbk() - Receive packet handler
|
||||
* @context: pointer to HDD context
|
||||
* @rxBuf: pointer to rx qdf_nbuf
|
||||
*
|
||||
* Receive callback registered with TL. TL will call this to notify
|
||||
* the HDD when one or more packets were received for a registered
|
||||
* STA.
|
||||
*
|
||||
* Return: QDF_STATUS_E_FAILURE if any errors encountered,
|
||||
* QDF_STATUS_SUCCESS otherwise
|
||||
*/
|
||||
QDF_STATUS hdd_rx_packet_cbk(void *context, qdf_nbuf_t rxBuf)
|
||||
QDF_STATUS hdd_rx_pkt_thread_enqueue_cbk(void *adapter,
|
||||
qdf_nbuf_t nbuf_list) {
|
||||
if (unlikely((!adapter) || (!nbuf_list))) {
|
||||
hdd_err("Null params being passed");
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
return dp_rx_enqueue_pkt(cds_get_context(QDF_MODULE_ID_SOC), nbuf_list);
|
||||
}
|
||||
|
||||
QDF_STATUS hdd_rx_packet_cbk(void *adapter_context,
|
||||
qdf_nbuf_t rxBuf)
|
||||
{
|
||||
struct hdd_adapter *adapter = NULL;
|
||||
struct hdd_context *hdd_ctx = NULL;
|
||||
@@ -1846,13 +1845,13 @@ QDF_STATUS hdd_rx_packet_cbk(void *context, qdf_nbuf_t rxBuf)
|
||||
bool track_arp = false;
|
||||
|
||||
/* Sanity check on inputs */
|
||||
if (unlikely((NULL == context) || (NULL == rxBuf))) {
|
||||
if (unlikely((!adapter_context) || (!rxBuf))) {
|
||||
QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Null params being passed", __func__);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
adapter = (struct hdd_adapter *)context;
|
||||
adapter = (struct hdd_adapter *)adapter_context;
|
||||
if (unlikely(WLAN_HDD_ADAPTER_MAGIC != adapter->magic)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
|
||||
"Magic cookie(%x) for adapter sanity verification is invalid",
|
||||
|
Yeni konuda referans
Bir kullanıcı engelle