qcacmn: Add SWLM support for WCN6450

Changes required to support SWLM feature on wcn6450.

Change-Id: I306cba8dcefa8f34a9546285b33b974987aec625
CRs-Fixed: 3540269
This commit is contained in:
Venkateswara Naralasetty
2023-06-26 10:52:53 +05:30
committed by Rahul Choudhary
parent 6ca74c40fd
commit fc93f83a78
11 changed files with 108 additions and 17 deletions

View File

@@ -2986,6 +2986,7 @@ void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops)
#if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM) #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
arch_ops->dp_update_ring_hptp = dp_update_ring_hptp; arch_ops->dp_update_ring_hptp = dp_update_ring_hptp;
#endif #endif
arch_ops->dp_flush_tx_ring = dp_flush_tcl_ring;
dp_initialize_arch_ops_be_ipa(arch_ops); dp_initialize_arch_ops_be_ipa(arch_ops);
dp_initialize_arch_ops_be_single_dev(arch_ops); dp_initialize_arch_ops_be_single_dev(arch_ops);
dp_initialize_arch_ops_be_fisa(arch_ops); dp_initialize_arch_ops_be_fisa(arch_ops);

View File

@@ -396,6 +396,15 @@ void dp_drain_txrx(struct cdp_soc_t *soc_handle);
void dp_update_ring_hptp(struct dp_soc *soc, bool force_flush_tx); void dp_update_ring_hptp(struct dp_soc *soc, bool force_flush_tx);
#endif #endif
/*
* dp_flush_tcl_ring() - flush TCL ring hp
* @pdev: dp pdev
* @ring_id: TCL ring id
*
* Return: 0 on success and error code on failure
*/
int dp_flush_tcl_ring(struct dp_pdev *pdev, int ring_id);
#ifdef WLAN_FEATURE_STATS_EXT #ifdef WLAN_FEATURE_STATS_EXT
/** /**
* dp_request_rx_hw_stats - request rx hardware stats * dp_request_rx_hw_stats - request rx hardware stats

View File

@@ -3904,6 +3904,45 @@ void dp_update_ring_hptp(struct dp_soc *soc, bool force_flush_tx)
} }
#endif #endif
#ifdef WLAN_DP_FEATURE_SW_LATENCY_MGR
/*
* dp_flush_tcl_ring() - flush TCL ring hp
* @pdev: dp pdev
* @ring_id: TCL ring id
*
* Return: 0 on success and error code on failure
*/
int dp_flush_tcl_ring(struct dp_pdev *pdev, int ring_id)
{
struct dp_soc *soc = pdev->soc;
hal_ring_handle_t hal_ring_hdl =
soc->tcl_data_ring[ring_id].hal_srng;
int ret;
ret = hal_srng_try_access_start(soc->hal_soc, hal_ring_hdl);
if (ret)
return ret;
ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
if (ret) {
hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
hal_srng_inc_flush_cnt(hal_ring_hdl);
return ret;
}
hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
return ret;
}
#else
int dp_flush_tcl_ring(struct dp_pdev *pdev, int ring_id)
{
return QDF_STATUS_SUCCESS;
}
#endif
#ifdef WLAN_FEATURE_STATS_EXT #ifdef WLAN_FEATURE_STATS_EXT
/* rx hw stats event wait timeout in ms */ /* rx hw stats event wait timeout in ms */
#define DP_REO_STATUS_STATS_TIMEOUT 100 #define DP_REO_STATUS_STATS_TIMEOUT 100

View File

@@ -1993,7 +1993,7 @@ static inline uint32_t dp_tx_get_pkt_len(struct dp_tx_desc_s *tx_desc)
{ {
return tx_desc->frm_type == dp_tx_frm_tso ? return tx_desc->frm_type == dp_tx_frm_tso ?
tx_desc->msdu_ext_desc->tso_desc->seg.total_len : tx_desc->msdu_ext_desc->tso_desc->seg.total_len :
qdf_nbuf_len(tx_desc->nbuf); tx_desc->length;
} }
#ifdef FEATURE_RUNTIME_PM #ifdef FEATURE_RUNTIME_PM

View File

@@ -2242,6 +2242,7 @@ enum dp_context_type {
* @txrx_soc_ppeds_txdesc_pool_reset: * @txrx_soc_ppeds_txdesc_pool_reset:
* @dp_update_ring_hptp: Update rings hptp during suspend/resume * @dp_update_ring_hptp: Update rings hptp during suspend/resume
* @dp_get_fst_cmem_base: Get CMEM base address for FISA * @dp_get_fst_cmem_base: Get CMEM base address for FISA
* @dp_flush_tx_ring: Flush TCL ring HP
*/ */
struct dp_arch_ops { struct dp_arch_ops {
/* INIT/DEINIT Arch Ops */ /* INIT/DEINIT Arch Ops */
@@ -2504,6 +2505,7 @@ struct dp_arch_ops {
#endif #endif
void (*dp_update_ring_hptp)(struct dp_soc *soc, bool force_flush_tx); void (*dp_update_ring_hptp)(struct dp_soc *soc, bool force_flush_tx);
uint64_t (*dp_get_fst_cmem_base)(struct dp_soc *soc, uint64_t size); uint64_t (*dp_get_fst_cmem_base)(struct dp_soc *soc, uint64_t size);
int (*dp_flush_tx_ring)(struct dp_pdev *pdev, int ring_id);
}; };
/** /**

View File

@@ -722,6 +722,7 @@ void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
#if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM) #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
arch_ops->dp_update_ring_hptp = dp_update_ring_hptp; arch_ops->dp_update_ring_hptp = dp_update_ring_hptp;
#endif #endif
arch_ops->dp_flush_tx_ring = dp_flush_tcl_ring;
} }
#ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH #ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH

View File

@@ -836,4 +836,5 @@ void dp_initialize_arch_ops_rh(struct dp_arch_ops *arch_ops)
#if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM) #if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
arch_ops->dp_update_ring_hptp = dp_update_ring_hptp_rh; arch_ops->dp_update_ring_hptp = dp_update_ring_hptp_rh;
#endif #endif
arch_ops->dp_flush_tx_ring = dp_flush_tx_ring_rh;
} }

View File

@@ -186,33 +186,31 @@ dp_tx_record_hw_desc_rh(uint8_t *hal_tx_desc_cached, struct dp_soc *soc)
#if defined(FEATURE_RUNTIME_PM) #if defined(FEATURE_RUNTIME_PM)
static void dp_tx_update_write_index(struct dp_soc *soc, static void dp_tx_update_write_index(struct dp_soc *soc,
struct dp_tx_ep_info_rh *tx_ep_info) struct dp_tx_ep_info_rh *tx_ep_info,
int coalesce)
{ {
int ret; int ret;
/* Avoid runtime get and put APIs under high throughput scenarios */ /* Avoid runtime get and put APIs under high throughput scenarios */
if (dp_get_rtpm_tput_policy_requirement(soc)) { if (dp_get_rtpm_tput_policy_requirement(soc)) {
ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl, ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
true); coalesce);
return; return;
} }
ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP); ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
if (QDF_IS_STATUS_SUCCESS(ret)) { if (QDF_IS_STATUS_SUCCESS(ret)) {
if (hif_system_pm_state_check(soc->hif_handle)) { if (hif_system_pm_state_check(soc->hif_handle)) {
ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl, false);
ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring, ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
CE_RING_FLUSH_EVENT); CE_RING_FLUSH_EVENT);
ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring); ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
} else { } else {
ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl, ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
true); coalesce);
} }
hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP); hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
} else { } else {
dp_runtime_get(soc); dp_runtime_get(soc);
ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
false);
ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring, ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
CE_RING_FLUSH_EVENT); CE_RING_FLUSH_EVENT);
ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring); ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
@@ -225,14 +223,12 @@ static void dp_tx_update_write_index(struct dp_soc *soc,
struct dp_tx_ep_info_rh *tx_ep_info) struct dp_tx_ep_info_rh *tx_ep_info)
{ {
if (hif_system_pm_state_check(soc->hif_handle)) { if (hif_system_pm_state_check(soc->hif_handle)) {
ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
false);
ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring, ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
CE_RING_FLUSH_EVENT); CE_RING_FLUSH_EVENT);
ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring); ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
} else { } else {
ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl, ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
true); coalesce);
} }
} }
#else #else
@@ -240,10 +236,40 @@ static void dp_tx_update_write_index(struct dp_soc *soc,
struct dp_tx_ep_info_rh *tx_ep_info) struct dp_tx_ep_info_rh *tx_ep_info)
{ {
ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl, ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
true); coalesce);
} }
#endif #endif
/*
* dp_flush_tx_ring_rh() - flush tx ring write index
* @pdev: dp pdev handle
* @ring_id: Tx ring id
*
* Return: 0 on success and error code on failure
*/
int dp_flush_tx_ring_rh(struct dp_pdev *pdev, int ring_id)
{
struct dp_pdev_rh *rh_pdev = dp_get_rh_pdev_from_dp_pdev(pdev);
struct dp_tx_ep_info_rh *tx_ep_info = &rh_pdev->tx_ep_info;
int ret;
ce_ring_aquire_lock(tx_ep_info->ce_tx_hdl);
ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
if (ret) {
ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
CE_RING_FLUSH_EVENT);
ce_ring_inc_flush_cnt(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring);
return ret;
}
ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl, false);
ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_DP);
return ret;
}
QDF_STATUS QDF_STATUS
dp_tx_hw_enqueue_rh(struct dp_soc *soc, struct dp_vdev *vdev, dp_tx_hw_enqueue_rh(struct dp_soc *soc, struct dp_vdev *vdev,
struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata, struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
@@ -256,6 +282,7 @@ dp_tx_hw_enqueue_rh(struct dp_soc *soc, struct dp_vdev *vdev,
qdf_nbuf_t nbuf = tx_desc->nbuf; qdf_nbuf_t nbuf = tx_desc->nbuf;
uint8_t tid = msdu_info->tid; uint8_t tid = msdu_info->tid;
uint32_t *hal_tx_desc_cached; uint32_t *hal_tx_desc_cached;
int coalesce = 0;
int ret; int ret;
/* /*
@@ -353,12 +380,16 @@ dp_tx_hw_enqueue_rh(struct dp_soc *soc, struct dp_vdev *vdev,
goto enqueue_fail; goto enqueue_fail;
} }
dp_tx_update_write_index(soc, tx_ep_info); coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
msdu_info, 0);
dp_tx_update_write_index(soc, tx_ep_info, coalesce);
ce_ring_release_lock(tx_ep_info->ce_tx_hdl); ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX; tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, nbuf); dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, nbuf);
DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length); DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
dp_tx_update_stats(soc, tx_desc, 0);
status = QDF_STATUS_SUCCESS; status = QDF_STATUS_SUCCESS;
dp_tx_record_hw_desc_rh((uint8_t *)hal_tx_desc_cached, soc); dp_tx_record_hw_desc_rh((uint8_t *)hal_tx_desc_cached, soc);

View File

@@ -164,4 +164,13 @@ void dp_tx_desc_pool_free_rh(struct dp_soc *soc, uint8_t pool_id);
* Return: none * Return: none
*/ */
void dp_tx_compl_handler_rh(struct dp_soc *soc, qdf_nbuf_t htt_msg); void dp_tx_compl_handler_rh(struct dp_soc *soc, qdf_nbuf_t htt_msg);
/**
* dp_flush_tx_ring_rh() - flush tx ring write index
* @pdev: dp pdev handle
* @ring_id: Tx ring id
*
* Return: 0 on success and error code on failure
*/
int dp_flush_tx_ring_rh(struct dp_pdev *pdev, int ring_id);
#endif #endif

View File

@@ -708,7 +708,7 @@ void ce_engine_service_reg(struct hif_softc *scn, int CE_id);
void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id); void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id);
void ce_tx_ring_write_idx_update_wrapper(struct CE_handle *ce_tx_hdl, void ce_tx_ring_write_idx_update_wrapper(struct CE_handle *ce_tx_hdl,
bool flush); int coalesce);
/* /*
* ce_ring_flush_write_idx() - CE handler to flush write index * ce_ring_flush_write_idx() - CE handler to flush write index

View File

@@ -423,17 +423,15 @@ void ce_flush_tx_ring_write_idx(struct CE_handle *ce_tx_hdl, bool force_flush)
/* Make sure this wrapper is called under ce_index_lock */ /* Make sure this wrapper is called under ce_index_lock */
void ce_tx_ring_write_idx_update_wrapper(struct CE_handle *ce_tx_hdl, void ce_tx_ring_write_idx_update_wrapper(struct CE_handle *ce_tx_hdl,
bool flush) int coalesce)
{ {
struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl; struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
struct CE_ring_state *src_ring = ce_state->src_ring; struct CE_ring_state *src_ring = ce_state->src_ring;
struct hif_softc *scn = ce_state->scn; struct hif_softc *scn = ce_state->scn;
if (flush) if (!coalesce)
CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr, CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
src_ring->write_index); src_ring->write_index);
else
ce_ring_set_event(src_ring, CE_RING_FLUSH_EVENT);
} }
/* /*