qcacmn: suspend/resume changes for WCN6450
Changes required for suspend/resume support for WCN6450 Change-Id: I4610f6bdb8de92f03884af6c07a5141dd27174be CRs-Fixed: 3447469
This commit is contained in:

committed by
Madan Koyyalamudi

parent
476546a791
commit
26099afe23
@@ -2887,6 +2887,9 @@ void dp_initialize_arch_ops_be(struct dp_arch_ops *arch_ops)
|
||||
arch_ops->reo_remap_config = dp_reo_remap_config_be;
|
||||
arch_ops->txrx_get_vdev_mcast_param = dp_txrx_get_vdev_mcast_param_be;
|
||||
arch_ops->txrx_srng_init = dp_srng_init_be;
|
||||
#if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
|
||||
arch_ops->dp_update_ring_hptp = dp_update_ring_hptp;
|
||||
#endif
|
||||
dp_initialize_arch_ops_be_ipa(arch_ops);
|
||||
dp_initialize_arch_ops_be_single_dev(arch_ops);
|
||||
}
|
||||
|
@@ -10679,6 +10679,93 @@ inline void dp_find_missing_tx_comp(struct dp_soc *soc)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef FEATURE_RUNTIME_PM
|
||||
/**
|
||||
* dp_runtime_suspend() - ensure DP is ready to runtime suspend
|
||||
* @soc_hdl: Datapath soc handle
|
||||
* @pdev_id: id of data path pdev handle
|
||||
*
|
||||
* DP is ready to runtime suspend if there are no pending TX packets.
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
static QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
|
||||
{
|
||||
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
|
||||
struct dp_pdev *pdev;
|
||||
int32_t tx_pending;
|
||||
|
||||
pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
|
||||
if (!pdev) {
|
||||
dp_err("pdev is NULL");
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
/* Abort if there are any pending TX packets */
|
||||
tx_pending = dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev));
|
||||
if (tx_pending) {
|
||||
dp_info_rl("%pK: Abort suspend due to pending TX packets %d",
|
||||
soc, tx_pending);
|
||||
dp_find_missing_tx_comp(soc);
|
||||
/* perform a force flush if tx is pending */
|
||||
soc->arch_ops.dp_update_ring_hptp(soc, true);
|
||||
qdf_atomic_set(&soc->tx_pending_rtpm, 0);
|
||||
|
||||
return QDF_STATUS_E_AGAIN;
|
||||
}
|
||||
|
||||
if (dp_runtime_get_refcount(soc)) {
|
||||
dp_init_info("refcount: %d", dp_runtime_get_refcount(soc));
|
||||
|
||||
return QDF_STATUS_E_AGAIN;
|
||||
}
|
||||
|
||||
if (soc->intr_mode == DP_INTR_POLL)
|
||||
qdf_timer_stop(&soc->int_timer);
|
||||
|
||||
dp_rx_fst_update_pm_suspend_status(soc, true);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
#define DP_FLUSH_WAIT_CNT 10
|
||||
#define DP_RUNTIME_SUSPEND_WAIT_MS 10
|
||||
/**
|
||||
* dp_runtime_resume() - ensure DP is ready to runtime resume
|
||||
* @soc_hdl: Datapath soc handle
|
||||
* @pdev_id: id of data path pdev handle
|
||||
*
|
||||
* Resume DP for runtime PM.
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
static QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
|
||||
{
|
||||
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
|
||||
int suspend_wait = 0;
|
||||
|
||||
if (soc->intr_mode == DP_INTR_POLL)
|
||||
qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
|
||||
|
||||
/*
|
||||
* Wait until dp runtime refcount becomes zero or time out, then flush
|
||||
* pending tx for runtime suspend.
|
||||
*/
|
||||
while (dp_runtime_get_refcount(soc) &&
|
||||
suspend_wait < DP_FLUSH_WAIT_CNT) {
|
||||
qdf_sleep(DP_RUNTIME_SUSPEND_WAIT_MS);
|
||||
suspend_wait++;
|
||||
}
|
||||
|
||||
soc->arch_ops.dp_update_ring_hptp(soc, false);
|
||||
qdf_atomic_set(&soc->tx_pending_rtpm, 0);
|
||||
|
||||
dp_rx_fst_update_pm_suspend_status(soc, false);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
#endif /* FEATURE_RUNTIME_PM */
|
||||
|
||||
/**
|
||||
* dp_tx_get_success_ack_stats() - get tx success completion count
|
||||
* @soc_hdl: Datapath soc handle
|
||||
@@ -11174,7 +11261,6 @@ static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
|
||||
{
|
||||
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
|
||||
struct dp_pdev *pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
|
||||
uint8_t i;
|
||||
|
||||
if (qdf_unlikely(!pdev)) {
|
||||
dp_err("pdev is NULL");
|
||||
@@ -11189,10 +11275,8 @@ static QDF_STATUS dp_bus_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
|
||||
|
||||
dp_resume_fse_cache_flush(soc);
|
||||
|
||||
for (i = 0; i < soc->num_tcl_data_rings; i++)
|
||||
dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
|
||||
soc->arch_ops.dp_update_ring_hptp(soc, false);
|
||||
|
||||
dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
|
||||
dp_rx_fst_update_pm_suspend_status(soc, false);
|
||||
|
||||
dp_rx_fst_requeue_wq(soc);
|
||||
|
@@ -369,42 +369,14 @@ dp_dump_wbm_idle_hptp(struct dp_soc *soc, struct dp_pdev *pdev);
|
||||
void dp_display_srng_info(struct cdp_soc_t *soc_hdl);
|
||||
|
||||
#if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
|
||||
/**
|
||||
* dp_flush_ring_hptp() - Update ring shadow
|
||||
* register HP/TP address when runtime
|
||||
* resume
|
||||
* @soc: DP soc context
|
||||
* @hal_srng: HAL srng context
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng);
|
||||
|
||||
void dp_drain_txrx(struct cdp_soc_t *soc_handle);
|
||||
|
||||
#ifdef FEATURE_RUNTIME_PM
|
||||
/**
|
||||
* dp_runtime_suspend() - ensure DP is ready to runtime suspend
|
||||
* @soc_hdl: Datapath soc handle
|
||||
* @pdev_id: id of data path pdev handle
|
||||
*
|
||||
* DP is ready to runtime suspend if there are no pending TX packets.
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
/*
|
||||
* dp_update_ring_hptp() - update dp rings hptp
|
||||
* @soc: dp soc handler
|
||||
* @force_flush_tx: force flush the Tx ring hp
|
||||
*/
|
||||
QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
|
||||
|
||||
/**
|
||||
* dp_runtime_resume() - ensure DP is ready to runtime resume
|
||||
* @soc_hdl: Datapath soc handle
|
||||
* @pdev_id: id of data path pdev handle
|
||||
*
|
||||
* Resume DP for runtime PM.
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
|
||||
#endif
|
||||
void dp_update_ring_hptp(struct dp_soc *soc, bool force_flush_tx);
|
||||
#endif
|
||||
|
||||
#ifdef WLAN_FEATURE_STATS_EXT
|
||||
@@ -719,55 +691,9 @@ static inline void dp_display_srng_info(struct cdp_soc_t *soc_hdl)
|
||||
}
|
||||
|
||||
#if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
|
||||
/**
|
||||
* dp_flush_ring_hptp() - Update ring shadow
|
||||
* register HP/TP address when runtime
|
||||
* resume
|
||||
* @soc: DP soc context
|
||||
* @hal_srng: HAL srng context
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
static inline void
|
||||
dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dp_drain_txrx(struct cdp_soc_t *soc_handle)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef FEATURE_RUNTIME_PM
|
||||
/**
|
||||
* dp_runtime_suspend() - ensure DP is ready to runtime suspend
|
||||
* @soc_hdl: Datapath soc handle
|
||||
* @pdev_id: id of data path pdev handle
|
||||
*
|
||||
* DP is ready to runtime suspend if there are no pending TX packets.
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
static inline
|
||||
QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* dp_runtime_resume() - ensure DP is ready to runtime resume
|
||||
* @soc_hdl: Datapath soc handle
|
||||
* @pdev_id: id of data path pdev handle
|
||||
*
|
||||
* Resume DP for runtime PM.
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
static inline
|
||||
QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
|
||||
{
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
#endif /* WLAN_SOFTUMAC_SUPPORT */
|
||||
|
||||
|
@@ -4011,7 +4011,7 @@ void dp_drain_txrx(struct cdp_soc_t *soc_handle)
|
||||
*
|
||||
* Return: None
|
||||
*/
|
||||
void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
|
||||
static void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
|
||||
{
|
||||
if (hal_srng && hal_srng_get_clear_event(hal_srng,
|
||||
HAL_SRNG_FLUSH_EVENT)) {
|
||||
@@ -4025,102 +4025,27 @@ void dp_flush_ring_hptp(struct dp_soc *soc, hal_ring_handle_t hal_srng)
|
||||
dp_debug("flushed");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef FEATURE_RUNTIME_PM
|
||||
/**
|
||||
* dp_runtime_suspend() - ensure DP is ready to runtime suspend
|
||||
* @soc_hdl: Datapath soc handle
|
||||
* @pdev_id: id of data path pdev handle
|
||||
*
|
||||
* DP is ready to runtime suspend if there are no pending TX packets.
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_runtime_suspend(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
|
||||
void dp_update_ring_hptp(struct dp_soc *soc, bool force_flush_tx)
|
||||
{
|
||||
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
|
||||
struct dp_pdev *pdev;
|
||||
uint8_t i;
|
||||
int32_t tx_pending;
|
||||
|
||||
pdev = dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
|
||||
if (!pdev) {
|
||||
dp_err("pdev is NULL");
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
|
||||
/* Abort if there are any pending TX packets */
|
||||
tx_pending = dp_get_tx_pending(dp_pdev_to_cdp_pdev(pdev));
|
||||
if (tx_pending) {
|
||||
dp_info_rl("%pK: Abort suspend due to pending TX packets %d",
|
||||
soc, tx_pending);
|
||||
dp_find_missing_tx_comp(soc);
|
||||
/* perform a force flush if tx is pending */
|
||||
if (force_flush_tx) {
|
||||
for (i = 0; i < soc->num_tcl_data_rings; i++) {
|
||||
hal_srng_set_event(soc->tcl_data_ring[i].hal_srng,
|
||||
HAL_SRNG_FLUSH_EVENT);
|
||||
dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
|
||||
}
|
||||
qdf_atomic_set(&soc->tx_pending_rtpm, 0);
|
||||
|
||||
return QDF_STATUS_E_AGAIN;
|
||||
return;
|
||||
}
|
||||
|
||||
if (dp_runtime_get_refcount(soc)) {
|
||||
dp_init_info("refcount: %d", dp_runtime_get_refcount(soc));
|
||||
|
||||
return QDF_STATUS_E_AGAIN;
|
||||
}
|
||||
|
||||
if (soc->intr_mode == DP_INTR_POLL)
|
||||
qdf_timer_stop(&soc->int_timer);
|
||||
|
||||
dp_rx_fst_update_pm_suspend_status(soc, true);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
#define DP_FLUSH_WAIT_CNT 10
|
||||
#define DP_RUNTIME_SUSPEND_WAIT_MS 10
|
||||
/**
|
||||
* dp_runtime_resume() - ensure DP is ready to runtime resume
|
||||
* @soc_hdl: Datapath soc handle
|
||||
* @pdev_id: id of data path pdev handle
|
||||
*
|
||||
* Resume DP for runtime PM.
|
||||
*
|
||||
* Return: QDF_STATUS
|
||||
*/
|
||||
QDF_STATUS dp_runtime_resume(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
|
||||
{
|
||||
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
|
||||
int i, suspend_wait = 0;
|
||||
|
||||
if (soc->intr_mode == DP_INTR_POLL)
|
||||
qdf_timer_mod(&soc->int_timer, DP_INTR_POLL_TIMER_MS);
|
||||
|
||||
/*
|
||||
* Wait until dp runtime refcount becomes zero or time out, then flush
|
||||
* pending tx for runtime suspend.
|
||||
*/
|
||||
while (dp_runtime_get_refcount(soc) &&
|
||||
suspend_wait < DP_FLUSH_WAIT_CNT) {
|
||||
qdf_sleep(DP_RUNTIME_SUSPEND_WAIT_MS);
|
||||
suspend_wait++;
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_TCL_DATA_RINGS; i++) {
|
||||
for (i = 0; i < soc->num_tcl_data_rings; i++)
|
||||
dp_flush_ring_hptp(soc, soc->tcl_data_ring[i].hal_srng);
|
||||
}
|
||||
qdf_atomic_set(&soc->tx_pending_rtpm, 0);
|
||||
|
||||
dp_flush_ring_hptp(soc, soc->reo_cmd_ring.hal_srng);
|
||||
dp_rx_fst_update_pm_suspend_status(soc, false);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
#endif /* FEATURE_RUNTIME_PM */
|
||||
#endif
|
||||
|
||||
#ifdef WLAN_FEATURE_STATS_EXT
|
||||
/* rx hw stats event wait timeout in ms */
|
||||
|
@@ -254,8 +254,6 @@ dp_tx_tso_history_add(struct dp_soc *soc, struct qdf_tso_info_t tso_info,
|
||||
}
|
||||
#endif /* WLAN_FEATURE_DP_TX_DESC_HISTORY */
|
||||
|
||||
static int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc);
|
||||
|
||||
/**
|
||||
* dp_is_tput_high() - Check if throughput is high
|
||||
*
|
||||
@@ -1597,14 +1595,6 @@ dp_tx_check_and_flush_hp(struct dp_soc *soc,
|
||||
#endif
|
||||
|
||||
#ifdef FEATURE_RUNTIME_PM
|
||||
static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = qdf_atomic_read(&soc->rtpm_high_tput_flag) &&
|
||||
(hif_rtpm_get_state() <= HIF_RTPM_STATE_ON);
|
||||
return ret;
|
||||
}
|
||||
void
|
||||
dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
|
||||
hal_ring_handle_t hal_ring_hdl,
|
||||
@@ -1656,11 +1646,6 @@ dp_tx_ring_access_end_wrapper(struct dp_soc *soc,
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
|
@@ -1932,4 +1932,17 @@ static inline uint32_t dp_tx_get_pkt_len(struct dp_tx_desc_s *tx_desc)
|
||||
tx_desc->msdu_ext_desc->tso_desc->seg.total_len :
|
||||
qdf_nbuf_len(tx_desc->nbuf);
|
||||
}
|
||||
|
||||
#ifdef FEATURE_RUNTIME_PM
|
||||
static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
|
||||
{
|
||||
return qdf_atomic_read(&soc->rtpm_high_tput_flag) &&
|
||||
(hif_rtpm_get_state() <= HIF_RTPM_STATE_ON);
|
||||
}
|
||||
#else
|
||||
static inline int dp_get_rtpm_tput_policy_requirement(struct dp_soc *soc)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
@@ -2236,6 +2236,7 @@ enum dp_context_type {
|
||||
* @txrx_soc_ppeds_service_status_update:
|
||||
* @txrx_soc_ppeds_enabled_check:
|
||||
* @txrx_soc_ppeds_txdesc_pool_reset:
|
||||
* @dp_update_ring_hptp: Update rings hptp during suspend/resume
|
||||
*/
|
||||
struct dp_arch_ops {
|
||||
/* INIT/DEINIT Arch Ops */
|
||||
@@ -2482,6 +2483,7 @@ struct dp_arch_ops {
|
||||
void (*txrx_soc_ppeds_txdesc_pool_reset)(struct dp_soc *soc,
|
||||
qdf_nbuf_t *nbuf_list);
|
||||
#endif
|
||||
void (*dp_update_ring_hptp)(struct dp_soc *soc, bool force_flush_tx);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@@ -708,6 +708,9 @@ void dp_initialize_arch_ops_li(struct dp_arch_ops *arch_ops)
|
||||
arch_ops->txrx_get_vdev_mcast_param = dp_txrx_get_vdev_mcast_param_li;
|
||||
arch_ops->get_hw_link_id = dp_get_hw_link_id_li;
|
||||
arch_ops->txrx_srng_init = dp_srng_init_li;
|
||||
#if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
|
||||
arch_ops->dp_update_ring_hptp = dp_update_ring_hptp;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef QCA_DP_TX_HW_SW_NBUF_DESC_PREFETCH
|
||||
|
@@ -27,6 +27,8 @@
|
||||
#include "dp_peer.h"
|
||||
#include <wlan_utility.h>
|
||||
#include <dp_rings.h>
|
||||
#include <ce_api.h>
|
||||
#include <ce_internal.h>
|
||||
|
||||
static QDF_STATUS
|
||||
dp_srng_init_rh(struct dp_soc *soc, struct dp_srng *srng, int ring_type,
|
||||
@@ -774,6 +776,17 @@ static void dp_get_rx_hash_key_rh(struct dp_soc *soc,
|
||||
dp_get_rx_hash_key_bytes(lro_hash);
|
||||
}
|
||||
|
||||
#if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
|
||||
static void dp_update_ring_hptp_rh(struct dp_soc *soc, bool force_flush)
|
||||
{
|
||||
struct dp_pdev_rh *rh_pdev =
|
||||
dp_get_rh_pdev_from_dp_pdev(soc->pdev_list[0]);
|
||||
struct dp_tx_ep_info_rh *tx_ep_info = &rh_pdev->tx_ep_info;
|
||||
|
||||
ce_flush_tx_ring_write_idx(tx_ep_info->ce_tx_hdl, force_flush);
|
||||
}
|
||||
#endif
|
||||
|
||||
void dp_initialize_arch_ops_rh(struct dp_arch_ops *arch_ops)
|
||||
{
|
||||
arch_ops->tx_hw_enqueue = dp_tx_hw_enqueue_rh;
|
||||
@@ -826,4 +839,7 @@ void dp_initialize_arch_ops_rh(struct dp_arch_ops *arch_ops)
|
||||
arch_ops->reo_remap_config = dp_reo_remap_config_rh;
|
||||
arch_ops->txrx_peer_setup = dp_peer_setup_rh;
|
||||
arch_ops->txrx_srng_init = dp_srng_init_rh;
|
||||
#if defined(DP_POWER_SAVE) || defined(FEATURE_RUNTIME_PM)
|
||||
arch_ops->dp_update_ring_hptp = dp_update_ring_hptp_rh;
|
||||
#endif
|
||||
}
|
||||
|
@@ -184,6 +184,62 @@ dp_tx_record_hw_desc_rh(uint8_t *hal_tx_desc_cached, struct dp_soc *soc)
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(FEATURE_RUNTIME_PM)
|
||||
static void dp_tx_update_write_index(struct dp_soc *soc,
|
||||
struct dp_tx_ep_info_rh *tx_ep_info)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Avoid runtime get and put APIs under high throughput scenarios */
|
||||
if (dp_get_rtpm_tput_policy_requirement(soc)) {
|
||||
ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
|
||||
true);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = hif_rtpm_get(HIF_RTPM_GET_ASYNC, HIF_RTPM_ID_DP);
|
||||
if (QDF_IS_STATUS_SUCCESS(ret)) {
|
||||
if (hif_system_pm_state_check(soc->hif_handle)) {
|
||||
ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl, false);
|
||||
ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
|
||||
CE_RING_FLUSH_EVENT);
|
||||
} else {
|
||||
ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
|
||||
true);
|
||||
}
|
||||
} else {
|
||||
dp_runtime_get(soc);
|
||||
ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
|
||||
false);
|
||||
ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
|
||||
CE_RING_FLUSH_EVENT);
|
||||
qdf_atomic_inc(&soc->tx_pending_rtpm);
|
||||
dp_runtime_put(soc);
|
||||
}
|
||||
}
|
||||
#elif defined(DP_POWER_SAVE)
|
||||
static void dp_tx_update_write_index(struct dp_soc *soc,
|
||||
struct dp_tx_ep_info_rh *tx_ep_info)
|
||||
{
|
||||
if (hif_system_pm_state_check(soc->hif_handle)) {
|
||||
ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
|
||||
false);
|
||||
ce_ring_set_event(((struct CE_state *)(tx_ep_info->ce_tx_hdl))->src_ring,
|
||||
CE_RING_FLUSH_EVENT);
|
||||
} else {
|
||||
ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
|
||||
true);
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void dp_tx_update_write_index(struct dp_soc *soc,
|
||||
struct dp_tx_ep_info_rh *tx_ep_info)
|
||||
{
|
||||
ce_tx_ring_write_idx_update_wrapper(tx_ep_info->ce_tx_hdl,
|
||||
true);
|
||||
}
|
||||
#endif
|
||||
|
||||
QDF_STATUS
|
||||
dp_tx_hw_enqueue_rh(struct dp_soc *soc, struct dp_vdev *vdev,
|
||||
struct dp_tx_desc_s *tx_desc, uint16_t fw_metadata,
|
||||
@@ -281,9 +337,11 @@ dp_tx_hw_enqueue_rh(struct dp_soc *soc, struct dp_vdev *vdev,
|
||||
|
||||
dp_tx_fill_nbuf_data_attr_rh(nbuf);
|
||||
|
||||
ret = ce_send_fast(tx_ep_info->ce_tx_hdl, nbuf,
|
||||
ce_ring_aquire_lock(tx_ep_info->ce_tx_hdl);
|
||||
ret = ce_enqueue_desc(tx_ep_info->ce_tx_hdl, nbuf,
|
||||
tx_ep_info->tx_endpoint, download_len);
|
||||
if (!ret) {
|
||||
if (ret) {
|
||||
ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
|
||||
dp_verbose_debug("CE tx ring full");
|
||||
/* TODO: Should this be a separate ce_ring_full stat? */
|
||||
DP_STATS_INC(soc, tx.tcl_ring_full[0], 1);
|
||||
@@ -291,6 +349,9 @@ dp_tx_hw_enqueue_rh(struct dp_soc *soc, struct dp_vdev *vdev,
|
||||
goto enqueue_fail;
|
||||
}
|
||||
|
||||
dp_tx_update_write_index(soc, tx_ep_info);
|
||||
ce_ring_release_lock(tx_ep_info->ce_tx_hdl);
|
||||
|
||||
tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
|
||||
dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, nbuf);
|
||||
DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
|
||||
|
@@ -164,6 +164,17 @@ int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* ce_enqueue_desc() - enqueu desc to CE ring.
|
||||
* @copyeng: which copy engine to use
|
||||
* @msdu: data buffer
|
||||
* @transfer_id: arbitrary ID; reflected to destination
|
||||
* @download_len: length of the packet download to FW.
|
||||
*
|
||||
*/
|
||||
int ce_enqueue_desc(struct CE_handle *copyeng, qdf_nbuf_t msdu,
|
||||
unsigned int transfer_id, uint32_t download_len);
|
||||
|
||||
void ce_update_tx_ring(struct CE_handle *ce_tx_hdl, uint32_t num_htt_cmpls);
|
||||
extern qdf_nbuf_t ce_batch_send(struct CE_handle *ce_tx_hdl,
|
||||
qdf_nbuf_t msdu,
|
||||
@@ -696,4 +707,15 @@ void ce_engine_service_reg(struct hif_softc *scn, int CE_id);
|
||||
*/
|
||||
void ce_per_engine_service_fast(struct hif_softc *scn, int ce_id);
|
||||
|
||||
void ce_tx_ring_write_idx_update_wrapper(struct CE_handle *ce_tx_hdl,
|
||||
bool flush);
|
||||
|
||||
/*
|
||||
* ce_ring_flush_write_idx() - CE handler to flush write index
|
||||
* @ce_tx_hdl: ce handle
|
||||
* @force_flush: force flush the write idx if it set to true.
|
||||
*
|
||||
* Returns void
|
||||
*/
|
||||
void ce_flush_tx_ring_write_idx(struct CE_handle *ce_tx_hdl, bool force_flush);
|
||||
#endif /* __COPY_ENGINE_API_H__ */
|
||||
|
@@ -62,6 +62,11 @@ enum ol_ath_hif_ce_ecodes {
|
||||
|
||||
struct CE_src_desc;
|
||||
|
||||
/* CE ring BIT mask
|
||||
* CE_RING_FLUSH_EVENT: flush ce ring index in case of link down
|
||||
*/
|
||||
#define CE_RING_FLUSH_EVENT BIT(0)
|
||||
|
||||
/* Copy Engine Ring internal state */
|
||||
struct CE_ring_state {
|
||||
|
||||
@@ -123,6 +128,11 @@ struct CE_ring_state {
|
||||
uint8_t is_ring_prealloc;
|
||||
|
||||
OS_DMA_MEM_CONTEXT(ce_dmacontext); /* OS Specific DMA context */
|
||||
|
||||
/*ce ring event */
|
||||
unsigned long event;
|
||||
/* last flushed time stamp */
|
||||
uint64_t last_flush_ts;
|
||||
};
|
||||
|
||||
/* Copy Engine internal state */
|
||||
@@ -870,4 +880,51 @@ void hif_ce_desc_record_rx_paddr(struct hif_softc *scn,
|
||||
{
|
||||
}
|
||||
#endif /* HIF_RECORD_PADDR */
|
||||
|
||||
static inline void ce_ring_aquire_lock(struct CE_handle *handle)
|
||||
{
|
||||
struct CE_state *ce_state = (struct CE_state *)handle;
|
||||
|
||||
qdf_spin_lock_bh(&ce_state->ce_index_lock);
|
||||
}
|
||||
|
||||
static inline void ce_ring_release_lock(struct CE_handle *handle)
|
||||
{
|
||||
struct CE_state *ce_state = (struct CE_state *)handle;
|
||||
|
||||
qdf_spin_unlock_bh(&ce_state->ce_index_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* ce_ring_clear_event() - Clear ring event
|
||||
* @ring: Ring pointer
|
||||
* @event: ring event type
|
||||
*
|
||||
*/
|
||||
static inline void ce_ring_clear_event(struct CE_ring_state *ring, int event)
|
||||
{
|
||||
qdf_atomic_clear_bit(event, &ring->event);
|
||||
}
|
||||
|
||||
/*
|
||||
* ce_ring_set_event() - Set ring event
|
||||
* @ring: Ring pointer
|
||||
* @event: Ring event type
|
||||
*
|
||||
*/
|
||||
static inline void ce_ring_set_event(struct CE_ring_state *ring, int event)
|
||||
{
|
||||
qdf_atomic_set_bit(event, &ring->event);
|
||||
}
|
||||
|
||||
/*
|
||||
* ce_ring_get_clear_event() - Clear ring event and return old value
|
||||
* @ring: Ring pointer
|
||||
* @event: Ring event type
|
||||
*
|
||||
*/
|
||||
static inline int ce_ring_get_clear_event(struct CE_ring_state *ring, int event)
|
||||
{
|
||||
return qdf_atomic_test_and_clear_bit(event, &ring->event);
|
||||
}
|
||||
#endif /* __COPY_ENGINE_INTERNAL_H__ */
|
||||
|
@@ -401,6 +401,41 @@ bool hif_ce_service_should_yield(struct hif_softc *scn,
|
||||
qdf_export_symbol(hif_ce_service_should_yield);
|
||||
#endif
|
||||
|
||||
void ce_flush_tx_ring_write_idx(struct CE_handle *ce_tx_hdl, bool force_flush)
|
||||
{
|
||||
struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
|
||||
struct CE_ring_state *src_ring = ce_state->src_ring;
|
||||
struct hif_softc *scn = ce_state->scn;
|
||||
|
||||
if (force_flush)
|
||||
ce_ring_set_event(src_ring, CE_RING_FLUSH_EVENT);
|
||||
|
||||
if (ce_ring_get_clear_event(src_ring, CE_RING_FLUSH_EVENT)) {
|
||||
qdf_spin_lock_bh(&ce_state->ce_index_lock);
|
||||
CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
|
||||
src_ring->write_index);
|
||||
qdf_spin_unlock_bh(&ce_state->ce_index_lock);
|
||||
|
||||
src_ring->last_flush_ts = qdf_get_log_timestamp();
|
||||
hif_debug("flushed");
|
||||
}
|
||||
}
|
||||
|
||||
/* Make sure this wrapper is called under ce_index_lock */
|
||||
void ce_tx_ring_write_idx_update_wrapper(struct CE_handle *ce_tx_hdl,
|
||||
bool flush)
|
||||
{
|
||||
struct CE_state *ce_state = (struct CE_state *)ce_tx_hdl;
|
||||
struct CE_ring_state *src_ring = ce_state->src_ring;
|
||||
struct hif_softc *scn = ce_state->scn;
|
||||
|
||||
if (flush)
|
||||
CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
|
||||
src_ring->write_index);
|
||||
else
|
||||
ce_ring_set_event(src_ring, CE_RING_FLUSH_EVENT);
|
||||
}
|
||||
|
||||
/*
|
||||
* Guts of ce_send, used by both ce_send and ce_sendlist_send.
|
||||
* The caller takes responsibility for any needed locking.
|
||||
|
@@ -1119,6 +1119,148 @@ ce_per_engine_handler_adjust_legacy(struct CE_state *CE_state,
|
||||
}
|
||||
|
||||
#ifdef QCA_WIFI_WCN6450
|
||||
int ce_enqueue_desc(struct CE_handle *copyeng, qdf_nbuf_t msdu,
|
||||
unsigned int transfer_id, uint32_t download_len)
|
||||
{
|
||||
struct CE_state *ce_state = (struct CE_state *)copyeng;
|
||||
struct hif_softc *scn = ce_state->scn;
|
||||
struct CE_ring_state *src_ring = ce_state->src_ring;
|
||||
u_int32_t ctrl_addr = ce_state->ctrl_addr;
|
||||
unsigned int nentries_mask = src_ring->nentries_mask;
|
||||
unsigned int write_index;
|
||||
unsigned int sw_index;
|
||||
unsigned int frag_len;
|
||||
uint64_t dma_addr;
|
||||
uint32_t user_flags;
|
||||
enum hif_ce_event_type type = FAST_TX_SOFTWARE_INDEX_UPDATE;
|
||||
|
||||
/*
|
||||
* Create a log assuming the call will go through, and if not, we would
|
||||
* add an error trace as well.
|
||||
* Please add the same failure log for any additional error paths.
|
||||
*/
|
||||
DPTRACE(qdf_dp_trace(msdu,
|
||||
QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
|
||||
QDF_TRACE_DEFAULT_PDEV_ID,
|
||||
qdf_nbuf_data_addr(msdu),
|
||||
sizeof(qdf_nbuf_data(msdu)), QDF_TX));
|
||||
|
||||
DATA_CE_UPDATE_SWINDEX(src_ring->sw_index, scn, ctrl_addr);
|
||||
|
||||
write_index = src_ring->write_index;
|
||||
sw_index = src_ring->sw_index;
|
||||
hif_record_ce_desc_event(scn, ce_state->id,
|
||||
FAST_TX_SOFTWARE_INDEX_UPDATE,
|
||||
NULL, NULL, sw_index, 0);
|
||||
|
||||
if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
|
||||
< SLOTS_PER_DATAPATH_TX)) {
|
||||
hif_err_rl("Source ring full, required %d, available %d",
|
||||
SLOTS_PER_DATAPATH_TX,
|
||||
CE_RING_DELTA(nentries_mask, write_index,
|
||||
sw_index - 1));
|
||||
OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
|
||||
|
||||
DPTRACE(qdf_dp_trace(NULL,
|
||||
QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD,
|
||||
QDF_TRACE_DEFAULT_PDEV_ID,
|
||||
NULL, 0, QDF_TX));
|
||||
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
{
|
||||
struct CE_src_desc *src_ring_base =
|
||||
(struct CE_src_desc *)src_ring->base_addr_owner_space;
|
||||
struct CE_src_desc *shadow_base =
|
||||
(struct CE_src_desc *)src_ring->shadow_base;
|
||||
struct CE_src_desc *src_desc =
|
||||
CE_SRC_RING_TO_DESC(src_ring_base, write_index);
|
||||
struct CE_src_desc *shadow_src_desc =
|
||||
CE_SRC_RING_TO_DESC(shadow_base, write_index);
|
||||
|
||||
/*
|
||||
* First fill out the ring descriptor for the HTC HTT frame
|
||||
* header. These are uncached writes. Should we use a local
|
||||
* structure instead?
|
||||
*/
|
||||
/* HTT/HTC header can be passed as a argument */
|
||||
dma_addr = qdf_nbuf_get_frag_paddr(msdu, 0);
|
||||
shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
|
||||
0xFFFFFFFF);
|
||||
user_flags = qdf_nbuf_data_attr_get(msdu) & DESC_DATA_FLAG_MASK;
|
||||
ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
|
||||
shadow_src_desc->meta_data = transfer_id;
|
||||
shadow_src_desc->nbytes = qdf_nbuf_get_frag_len(msdu, 0);
|
||||
ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
|
||||
download_len -= shadow_src_desc->nbytes;
|
||||
/*
|
||||
* HTC HTT header is a word stream, so byte swap if CE byte
|
||||
* swap enabled
|
||||
*/
|
||||
shadow_src_desc->byte_swap = ((ce_state->attr_flags &
|
||||
CE_ATTR_BYTE_SWAP_DATA) != 0);
|
||||
/* For the first one, it still does not need to write */
|
||||
shadow_src_desc->gather = 1;
|
||||
*src_desc = *shadow_src_desc;
|
||||
/* By default we could initialize the transfer context to this
|
||||
* value
|
||||
*/
|
||||
src_ring->per_transfer_context[write_index] =
|
||||
CE_SENDLIST_ITEM_CTXT;
|
||||
write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
|
||||
|
||||
src_desc = CE_SRC_RING_TO_DESC(src_ring_base, write_index);
|
||||
shadow_src_desc = CE_SRC_RING_TO_DESC(shadow_base, write_index);
|
||||
/*
|
||||
* Now fill out the ring descriptor for the actual data
|
||||
* packet
|
||||
*/
|
||||
dma_addr = qdf_nbuf_get_frag_paddr(msdu, 1);
|
||||
shadow_src_desc->buffer_addr = (uint32_t)(dma_addr &
|
||||
0xFFFFFFFF);
|
||||
/*
|
||||
* Clear packet offset for all but the first CE desc.
|
||||
*/
|
||||
user_flags &= ~CE_DESC_PKT_OFFSET_BIT_M;
|
||||
ce_buffer_addr_hi_set(shadow_src_desc, dma_addr, user_flags);
|
||||
shadow_src_desc->meta_data = transfer_id;
|
||||
|
||||
/* get actual packet length */
|
||||
frag_len = qdf_nbuf_get_frag_len(msdu, 1);
|
||||
|
||||
/* download remaining bytes of payload */
|
||||
shadow_src_desc->nbytes = download_len;
|
||||
ce_validate_nbytes(shadow_src_desc->nbytes, ce_state);
|
||||
if (shadow_src_desc->nbytes > frag_len)
|
||||
shadow_src_desc->nbytes = frag_len;
|
||||
|
||||
/* Data packet is a byte stream, so disable byte swap */
|
||||
shadow_src_desc->byte_swap = 0;
|
||||
/* For the last one, gather is not set */
|
||||
shadow_src_desc->gather = 0;
|
||||
*src_desc = *shadow_src_desc;
|
||||
src_ring->per_transfer_context[write_index] = msdu;
|
||||
|
||||
hif_record_ce_desc_event(scn, ce_state->id, type,
|
||||
(union ce_desc *)src_desc,
|
||||
src_ring->per_transfer_context[write_index],
|
||||
write_index, shadow_src_desc->nbytes);
|
||||
|
||||
write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
|
||||
|
||||
DPTRACE(qdf_dp_trace(msdu,
|
||||
QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD,
|
||||
QDF_TRACE_DEFAULT_PDEV_ID,
|
||||
qdf_nbuf_data_addr(msdu),
|
||||
sizeof(qdf_nbuf_data(msdu)), QDF_TX));
|
||||
}
|
||||
|
||||
src_ring->write_index = write_index;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ce_legacy_msi_param_setup(struct hif_softc *scn, uint32_t ctrl_addr,
|
||||
uint32_t ce_id, struct CE_attr *attr)
|
||||
{
|
||||
|
@@ -86,8 +86,10 @@ QDF_STATUS hif_initialize_ipci_ops(struct hif_softc *hif_sc)
|
||||
bus_ops->hif_config_irq_clear_cpu_affinity =
|
||||
&hif_ipci_config_irq_clear_cpu_affinity;
|
||||
bus_ops->hif_log_bus_info = &hif_dummy_log_bus_info;
|
||||
#ifndef QCA_WIFI_WCN6450
|
||||
bus_ops->hif_enable_grp_irqs = hif_ipci_enable_grp_irqs;
|
||||
bus_ops->hif_disable_grp_irqs = hif_ipci_disable_grp_irqs;
|
||||
#endif
|
||||
#ifdef FEATURE_IRQ_AFFINITY
|
||||
bus_ops->hif_set_grp_intr_affinity = &hif_ipci_set_grp_intr_affinity;
|
||||
#endif
|
||||
|
@@ -1052,6 +1052,7 @@ void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef QCA_WIFI_WCN6450
|
||||
int hif_ipci_enable_grp_irqs(struct hif_softc *scn)
|
||||
{
|
||||
struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
|
||||
@@ -1085,3 +1086,4 @@ int hif_ipci_disable_grp_irqs(struct hif_softc *scn)
|
||||
|
||||
return status;
|
||||
}
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user