qcacmn: Handle Umac post reset at host
Handle Umac post reset and post reset complete events from firmware. Change-Id: I76ac1b96f01f026535d31edcbd245b3643ecf6ee CRs-Fixed: 3267222
This commit is contained in:

committed by
Madan Koyyalamudi

parent
1e28e45ce7
commit
67de6bcbfd
@@ -347,7 +347,7 @@ void dp_tx_process_htt_completion_be(struct dp_soc *soc,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
release_tx_desc:
|
release_tx_desc:
|
||||||
dp_tx_comp_free_buf(soc, tx_desc);
|
dp_tx_comp_free_buf(soc, tx_desc, false);
|
||||||
dp_tx_desc_release(tx_desc, tx_desc->pool_id);
|
dp_tx_desc_release(tx_desc, tx_desc->pool_id);
|
||||||
if (vdev)
|
if (vdev)
|
||||||
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
|
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
|
||||||
|
@@ -2153,6 +2153,10 @@ extern QDF_STATUS dp_rx_tid_setup_wifi3(struct dp_peer *peer, int tid,
|
|||||||
void dp_pause_reo_send_cmd(struct dp_soc *soc);
|
void dp_pause_reo_send_cmd(struct dp_soc *soc);
|
||||||
|
|
||||||
void dp_resume_reo_send_cmd(struct dp_soc *soc);
|
void dp_resume_reo_send_cmd(struct dp_soc *soc);
|
||||||
|
void dp_cleanup_reo_cmd_module(struct dp_soc *soc);
|
||||||
|
void dp_reo_desc_freelist_destroy(struct dp_soc *soc);
|
||||||
|
void dp_reset_rx_reo_tid_queue(struct dp_soc *soc, void *hw_qdesc_vaddr,
|
||||||
|
uint32_t size);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc,
|
extern QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc,
|
||||||
|
@@ -274,6 +274,8 @@ void dp_print_mlo_ast_stats(struct dp_soc *soc);
|
|||||||
|
|
||||||
#ifdef DP_UMAC_HW_RESET_SUPPORT
|
#ifdef DP_UMAC_HW_RESET_SUPPORT
|
||||||
static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc);
|
static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc);
|
||||||
|
static QDF_STATUS dp_umac_reset_handle_post_reset(struct dp_soc *soc);
|
||||||
|
static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define DP_INTR_POLL_TIMER_MS 5
|
#define DP_INTR_POLL_TIMER_MS 5
|
||||||
@@ -603,7 +605,7 @@ static void dp_service_lmac_rings(void *arg)
|
|||||||
dp_rx_buffers_replenish(soc, mac_for_pdev,
|
dp_rx_buffers_replenish(soc, mac_for_pdev,
|
||||||
rx_refill_buf_ring,
|
rx_refill_buf_ring,
|
||||||
&soc->rx_desc_buf[mac_for_pdev],
|
&soc->rx_desc_buf[mac_for_pdev],
|
||||||
0, &desc_list, &tail);
|
0, &desc_list, &tail, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
|
qdf_timer_mod(&soc->lmac_reap_timer, DP_INTR_POLL_TIMER_MS);
|
||||||
@@ -2201,6 +2203,18 @@ static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
|
|||||||
#endif /* DP_CON_MON_MSI_ENABLED */
|
#endif /* DP_CON_MON_MSI_ENABLED */
|
||||||
#endif /* DISABLE_MON_RING_MSI_CFG */
|
#endif /* DISABLE_MON_RING_MSI_CFG */
|
||||||
|
|
||||||
|
#ifdef DP_UMAC_HW_RESET_SUPPORT
|
||||||
|
static bool dp_check_umac_reset_in_progress(struct dp_soc *soc)
|
||||||
|
{
|
||||||
|
return !!soc->umac_reset_ctx.intr_ctx_bkp;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static bool dp_check_umac_reset_in_progress(struct dp_soc *soc)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* dp_srng_init() - Initialize SRNG
|
* dp_srng_init() - Initialize SRNG
|
||||||
* @soc : Data path soc handle
|
* @soc : Data path soc handle
|
||||||
@@ -2214,6 +2228,8 @@ static inline bool dp_skip_msi_cfg(struct dp_soc *soc, int ring_type)
|
|||||||
QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
|
QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
|
||||||
int ring_type, int ring_num, int mac_id)
|
int ring_type, int ring_num, int mac_id)
|
||||||
{
|
{
|
||||||
|
bool idle_check;
|
||||||
|
|
||||||
hal_soc_handle_t hal_soc = soc->hal_soc;
|
hal_soc_handle_t hal_soc = soc->hal_soc;
|
||||||
struct hal_srng_params ring_params;
|
struct hal_srng_params ring_params;
|
||||||
|
|
||||||
@@ -2259,8 +2275,10 @@ QDF_STATUS dp_srng_init(struct dp_soc *soc, struct dp_srng *srng,
|
|||||||
if (srng->cached)
|
if (srng->cached)
|
||||||
ring_params.flags |= HAL_SRNG_CACHED_DESC;
|
ring_params.flags |= HAL_SRNG_CACHED_DESC;
|
||||||
|
|
||||||
|
idle_check = dp_check_umac_reset_in_progress(soc);
|
||||||
|
|
||||||
srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
|
srng->hal_srng = hal_srng_setup(hal_soc, ring_type, ring_num,
|
||||||
mac_id, &ring_params);
|
mac_id, &ring_params, idle_check);
|
||||||
|
|
||||||
if (!srng->hal_srng) {
|
if (!srng->hal_srng) {
|
||||||
dp_srng_free(soc, srng);
|
dp_srng_free(soc, srng);
|
||||||
@@ -2575,6 +2593,16 @@ uint32_t dp_service_near_full_srngs(void *dp_ctx, uint32_t dp_budget, int cpu)
|
|||||||
|
|
||||||
#ifndef QCA_HOST_MODE_WIFI_DISABLED
|
#ifndef QCA_HOST_MODE_WIFI_DISABLED
|
||||||
|
|
||||||
|
/*
|
||||||
|
* dp_srng_get_cpu() - Get the smp processor id for srng processing
|
||||||
|
*
|
||||||
|
* Return: smp processor id
|
||||||
|
*/
|
||||||
|
static inline int dp_srng_get_cpu(void)
|
||||||
|
{
|
||||||
|
return smp_processor_id();
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
|
* dp_service_srngs() - Top level interrupt handler for DP Ring interrupts
|
||||||
* @dp_ctx: DP SOC handle
|
* @dp_ctx: DP SOC handle
|
||||||
@@ -2718,6 +2746,16 @@ budget_done:
|
|||||||
|
|
||||||
#else /* QCA_HOST_MODE_WIFI_DISABLED */
|
#else /* QCA_HOST_MODE_WIFI_DISABLED */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* dp_srng_get_cpu() - Get the smp processor id for srng processing
|
||||||
|
*
|
||||||
|
* Return: smp processor id
|
||||||
|
*/
|
||||||
|
static inline int dp_srng_get_cpu(void)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* dp_service_srngs() - Top level handler for DP Monitor Ring interrupts
|
* dp_service_srngs() - Top level handler for DP Monitor Ring interrupts
|
||||||
* @dp_ctx: DP SOC handle
|
* @dp_ctx: DP SOC handle
|
||||||
@@ -2780,7 +2818,7 @@ static void dp_interrupt_timer(void *arg)
|
|||||||
uint32_t lmac_iter;
|
uint32_t lmac_iter;
|
||||||
int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
|
int max_mac_rings = wlan_cfg_get_num_mac_rings(pdev->wlan_cfg_ctx);
|
||||||
enum reg_wifi_band mon_band;
|
enum reg_wifi_band mon_band;
|
||||||
int cpu = smp_processor_id();
|
int cpu = dp_srng_get_cpu();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* this logic makes all data path interfacing rings (UMAC/LMAC)
|
* this logic makes all data path interfacing rings (UMAC/LMAC)
|
||||||
@@ -5946,7 +5984,10 @@ static QDF_STATUS dp_pdev_detach_wifi3(struct cdp_soc_t *psoc, uint8_t pdev_id,
|
|||||||
* dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
|
* dp_reo_desc_freelist_destroy() - Flush REO descriptors from deferred freelist
|
||||||
* @soc: DP SOC handle
|
* @soc: DP SOC handle
|
||||||
*/
|
*/
|
||||||
static inline void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
|
#ifndef DP_UMAC_HW_RESET_SUPPORT
|
||||||
|
static inline
|
||||||
|
#endif
|
||||||
|
void dp_reo_desc_freelist_destroy(struct dp_soc *soc)
|
||||||
{
|
{
|
||||||
struct reo_desc_list_node *desc;
|
struct reo_desc_list_node *desc;
|
||||||
struct dp_rx_tid *rx_tid;
|
struct dp_rx_tid *rx_tid;
|
||||||
@@ -6527,6 +6568,15 @@ static void dp_register_umac_reset_handlers(struct dp_soc *soc)
|
|||||||
{
|
{
|
||||||
dp_umac_reset_register_rx_action_callback(soc,
|
dp_umac_reset_register_rx_action_callback(soc,
|
||||||
dp_umac_reset_handle_pre_reset, UMAC_RESET_ACTION_DO_PRE_RESET);
|
dp_umac_reset_handle_pre_reset, UMAC_RESET_ACTION_DO_PRE_RESET);
|
||||||
|
|
||||||
|
dp_umac_reset_register_rx_action_callback(soc,
|
||||||
|
dp_umac_reset_handle_post_reset,
|
||||||
|
UMAC_RESET_ACTION_DO_POST_RESET_START);
|
||||||
|
|
||||||
|
dp_umac_reset_register_rx_action_callback(soc,
|
||||||
|
dp_umac_reset_handle_post_reset_complete,
|
||||||
|
UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE);
|
||||||
|
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static void dp_register_umac_reset_handlers(struct dp_soc *soc)
|
static void dp_register_umac_reset_handlers(struct dp_soc *soc)
|
||||||
@@ -12774,7 +12824,7 @@ static void dp_drain_txrx(struct cdp_soc_t *soc_handle)
|
|||||||
uint32_t budget = 0xffff;
|
uint32_t budget = 0xffff;
|
||||||
uint32_t val;
|
uint32_t val;
|
||||||
int i;
|
int i;
|
||||||
int cpu = smp_processor_id();
|
int cpu = dp_srng_get_cpu();
|
||||||
|
|
||||||
cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
|
cur_tx_limit = soc->wlan_cfg_ctx->tx_comp_loop_pkt_limit;
|
||||||
cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
|
cur_rx_limit = soc->wlan_cfg_ctx->rx_reap_loop_pkt_limit;
|
||||||
@@ -12847,11 +12897,48 @@ static void dp_reset_interrupt_ring_masks(struct dp_soc *soc)
|
|||||||
intr_ctx->host2rxdma_mon_ring_mask = 0;
|
intr_ctx->host2rxdma_mon_ring_mask = 0;
|
||||||
intr_ctx->tx_mon_ring_mask = 0;
|
intr_ctx->tx_mon_ring_mask = 0;
|
||||||
|
|
||||||
intr_bkp = (struct dp_intr_bkp *)((char *)intr_bkp +
|
intr_bkp++;
|
||||||
(sizeof(struct dp_intr_bkp)));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dp_restore_interrupt_ring_masks(): Restore rx interrupt masks
|
||||||
|
* @soc: dp soc handle
|
||||||
|
*
|
||||||
|
* Return: void
|
||||||
|
*/
|
||||||
|
static void dp_restore_interrupt_ring_masks(struct dp_soc *soc)
|
||||||
|
{
|
||||||
|
struct dp_intr_bkp *intr_bkp = soc->umac_reset_ctx.intr_ctx_bkp;
|
||||||
|
struct dp_intr_bkp *intr_bkp_base = intr_bkp;
|
||||||
|
struct dp_intr *intr_ctx;
|
||||||
|
int num_ctxt = wlan_cfg_get_num_contexts(soc->wlan_cfg_ctx);
|
||||||
|
int i;
|
||||||
|
|
||||||
|
qdf_assert_always(intr_bkp);
|
||||||
|
|
||||||
|
for (i = 0; i < num_ctxt; i++) {
|
||||||
|
intr_ctx = &soc->intr_ctx[i];
|
||||||
|
|
||||||
|
intr_ctx->tx_ring_mask = intr_bkp->tx_ring_mask;
|
||||||
|
intr_ctx->rx_ring_mask = intr_bkp->rx_ring_mask;
|
||||||
|
intr_ctx->rx_mon_ring_mask = intr_bkp->rx_mon_ring_mask;
|
||||||
|
intr_ctx->rx_err_ring_mask = intr_bkp->rx_err_ring_mask;
|
||||||
|
intr_ctx->rx_wbm_rel_ring_mask = intr_bkp->rx_wbm_rel_ring_mask;
|
||||||
|
intr_ctx->reo_status_ring_mask = intr_bkp->reo_status_ring_mask;
|
||||||
|
intr_ctx->rxdma2host_ring_mask = intr_bkp->rxdma2host_ring_mask;
|
||||||
|
intr_ctx->host2rxdma_ring_mask = intr_bkp->host2rxdma_ring_mask;
|
||||||
|
intr_ctx->host2rxdma_mon_ring_mask =
|
||||||
|
intr_bkp->host2rxdma_mon_ring_mask;
|
||||||
|
intr_ctx->tx_mon_ring_mask = intr_bkp->tx_mon_ring_mask;
|
||||||
|
|
||||||
|
intr_bkp++;
|
||||||
|
}
|
||||||
|
|
||||||
|
qdf_mem_free(intr_bkp_base);
|
||||||
|
soc->umac_reset_ctx.intr_ctx_bkp = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dp_resume_tx_hardstart(): Restore the old Tx hardstart functions
|
* dp_resume_tx_hardstart(): Restore the old Tx hardstart functions
|
||||||
* @soc: dp soc handle
|
* @soc: dp soc handle
|
||||||
@@ -12959,6 +13046,31 @@ void dp_register_notify_umac_pre_reset_fw_callback(struct dp_soc *soc)
|
|||||||
soc->notify_fw_callback = dp_check_n_notify_umac_prereset_done;
|
soc->notify_fw_callback = dp_check_n_notify_umac_prereset_done;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dp_reinit_rings(): Reinitialize host managed rings
|
||||||
|
* @soc: dp soc handle
|
||||||
|
*
|
||||||
|
* Return: QDF_STATUS
|
||||||
|
*/
|
||||||
|
static void dp_reinit_rings(struct dp_soc *soc)
|
||||||
|
{
|
||||||
|
unsigned long end;
|
||||||
|
|
||||||
|
dp_soc_srng_deinit(soc);
|
||||||
|
dp_hw_link_desc_ring_deinit(soc);
|
||||||
|
|
||||||
|
/* Busy wait for 2 ms to make sure the rings are in idle state
|
||||||
|
* before we enable them again
|
||||||
|
*/
|
||||||
|
end = jiffies + msecs_to_jiffies(2);
|
||||||
|
while (time_before(jiffies, end))
|
||||||
|
;
|
||||||
|
|
||||||
|
dp_hw_link_desc_ring_init(soc);
|
||||||
|
dp_link_desc_ring_replenish(soc, WLAN_INVALID_PDEV_ID);
|
||||||
|
dp_soc_srng_init(soc);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dp_umac_reset_handle_pre_reset(): Handle Umac prereset interrupt from FW
|
* dp_umac_reset_handle_pre_reset(): Handle Umac prereset interrupt from FW
|
||||||
* @soc: dp soc handle
|
* @soc: dp soc handle
|
||||||
@@ -12974,10 +13086,68 @@ static QDF_STATUS dp_umac_reset_handle_pre_reset(struct dp_soc *soc)
|
|||||||
|
|
||||||
dp_check_n_notify_umac_prereset_done(soc);
|
dp_check_n_notify_umac_prereset_done(soc);
|
||||||
|
|
||||||
|
soc->umac_reset_ctx.nbuf_list = NULL;
|
||||||
|
|
||||||
return QDF_STATUS_SUCCESS;
|
return QDF_STATUS_SUCCESS;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dp_umac_reset_handle_post_reset(): Handle Umac postreset interrupt from FW
|
||||||
|
* @soc: dp soc handle
|
||||||
|
*
|
||||||
|
* Return: QDF_STATUS
|
||||||
|
*/
|
||||||
|
static QDF_STATUS dp_umac_reset_handle_post_reset(struct dp_soc *soc)
|
||||||
|
{
|
||||||
|
qdf_nbuf_t *nbuf_list = &soc->umac_reset_ctx.nbuf_list;
|
||||||
|
|
||||||
|
dp_reinit_rings(soc);
|
||||||
|
|
||||||
|
dp_rx_desc_reuse(soc, nbuf_list);
|
||||||
|
|
||||||
|
dp_cleanup_reo_cmd_module(soc);
|
||||||
|
|
||||||
|
dp_tx_desc_pool_cleanup(soc, nbuf_list);
|
||||||
|
|
||||||
|
dp_reset_tid_q_setup(soc);
|
||||||
|
|
||||||
|
return dp_umac_reset_notify_action_completion(soc,
|
||||||
|
UMAC_RESET_ACTION_DO_POST_RESET_START);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dp_umac_reset_handle_post_reset_complete(): Handle Umac postreset_complete
|
||||||
|
* interrupt from FW
|
||||||
|
* @soc: dp soc handle
|
||||||
|
*
|
||||||
|
* Return: QDF_STATUS
|
||||||
|
*/
|
||||||
|
static QDF_STATUS dp_umac_reset_handle_post_reset_complete(struct dp_soc *soc)
|
||||||
|
{
|
||||||
|
QDF_STATUS status;
|
||||||
|
qdf_nbuf_t nbuf_list = soc->umac_reset_ctx.nbuf_list;
|
||||||
|
|
||||||
|
soc->umac_reset_ctx.nbuf_list = NULL;
|
||||||
|
|
||||||
|
dp_resume_reo_send_cmd(soc);
|
||||||
|
|
||||||
|
dp_restore_interrupt_ring_masks(soc);
|
||||||
|
|
||||||
|
dp_resume_tx_hardstart(soc);
|
||||||
|
|
||||||
|
status = dp_umac_reset_notify_action_completion(soc,
|
||||||
|
UMAC_RESET_ACTION_DO_POST_RESET_COMPLETE);
|
||||||
|
|
||||||
|
while (nbuf_list) {
|
||||||
|
qdf_nbuf_t nbuf = nbuf_list->next;
|
||||||
|
|
||||||
|
qdf_nbuf_free(nbuf_list);
|
||||||
|
nbuf_list = nbuf;
|
||||||
|
}
|
||||||
|
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
#ifdef WLAN_FEATURE_PKT_CAPTURE_V2
|
#ifdef WLAN_FEATURE_PKT_CAPTURE_V2
|
||||||
static void
|
static void
|
||||||
dp_set_pkt_capture_mode(struct cdp_soc_t *soc_handle, bool val)
|
dp_set_pkt_capture_mode(struct cdp_soc_t *soc_handle, bool val)
|
||||||
@@ -13527,7 +13697,9 @@ static void dp_find_missing_tx_comp(struct dp_soc *soc)
|
|||||||
tx_desc->id);
|
tx_desc->id);
|
||||||
if (tx_desc->vdev_id == DP_INVALID_VDEV_ID) {
|
if (tx_desc->vdev_id == DP_INVALID_VDEV_ID) {
|
||||||
tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
|
tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
|
||||||
dp_tx_comp_free_buf(soc, tx_desc);
|
dp_tx_comp_free_buf(soc,
|
||||||
|
tx_desc,
|
||||||
|
false);
|
||||||
dp_tx_desc_release(tx_desc, i);
|
dp_tx_desc_release(tx_desc, i);
|
||||||
DP_STATS_INC(soc,
|
DP_STATS_INC(soc,
|
||||||
tx.tx_comp_force_freed, 1);
|
tx.tx_comp_force_freed, 1);
|
||||||
@@ -14809,7 +14981,7 @@ void *dp_soc_init(struct dp_soc *soc, HTC_HANDLE htc_handle,
|
|||||||
if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
|
if (wlan_cfg_get_dp_soc_nss_cfg(soc->wlan_cfg_ctx))
|
||||||
reo_params.alt_dst_ind_0 = REO_REMAP_RELEASE;
|
reo_params.alt_dst_ind_0 = REO_REMAP_RELEASE;
|
||||||
|
|
||||||
hal_reo_setup(soc->hal_soc, &reo_params);
|
hal_reo_setup(soc->hal_soc, &reo_params, 1);
|
||||||
|
|
||||||
hal_reo_set_err_dst_remap(soc->hal_soc);
|
hal_reo_set_err_dst_remap(soc->hal_soc);
|
||||||
|
|
||||||
|
@@ -3766,6 +3766,27 @@ error:
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef DP_UMAC_HW_RESET_SUPPORT
|
||||||
|
static
|
||||||
|
void dp_peer_rst_tids(struct dp_soc *soc, struct dp_peer *peer, void *arg)
|
||||||
|
{
|
||||||
|
int tid;
|
||||||
|
|
||||||
|
for (tid = 0; tid < (DP_MAX_TIDS - 1); tid++) {
|
||||||
|
struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
|
||||||
|
void *vaddr = rx_tid->hw_qdesc_vaddr_aligned;
|
||||||
|
|
||||||
|
if (vaddr)
|
||||||
|
dp_reset_rx_reo_tid_queue(soc, vaddr,
|
||||||
|
rx_tid->hw_qdesc_alloc_size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void dp_reset_tid_q_setup(struct dp_soc *soc)
|
||||||
|
{
|
||||||
|
dp_soc_iterate_peer(soc, dp_peer_rst_tids, NULL, DP_MOD_ID_UMAC_RESET);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
#ifdef REO_DESC_DEFER_FREE
|
#ifdef REO_DESC_DEFER_FREE
|
||||||
/*
|
/*
|
||||||
* dp_reo_desc_clean_up() - If cmd to flush base desc fails add
|
* dp_reo_desc_clean_up() - If cmd to flush base desc fails add
|
||||||
|
@@ -77,6 +77,9 @@ struct dp_peer *dp_peer_find_hash_find(struct dp_soc *soc,
|
|||||||
enum dp_mod_id id);
|
enum dp_mod_id id);
|
||||||
bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
|
bool dp_peer_find_by_id_valid(struct dp_soc *soc, uint16_t peer_id);
|
||||||
|
|
||||||
|
#ifdef DP_UMAC_HW_RESET_SUPPORT
|
||||||
|
void dp_reset_tid_q_setup(struct dp_soc *soc);
|
||||||
|
#endif
|
||||||
/**
|
/**
|
||||||
* dp_peer_get_ref() - Returns peer object given the peer id
|
* dp_peer_get_ref() - Returns peer object given the peer id
|
||||||
*
|
*
|
||||||
|
@@ -87,6 +87,21 @@ void dp_resume_reo_send_cmd(struct dp_soc *soc)
|
|||||||
{
|
{
|
||||||
hal_register_reo_send_cmd(soc->hal_soc);
|
hal_register_reo_send_cmd(soc->hal_soc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dp_reset_rx_reo_tid_queue() - Reset the reo tid queues
|
||||||
|
* @soc: dp soc
|
||||||
|
* @hw_qdesc_vaddr: starting address of the tid queues
|
||||||
|
* @size: size of the memory pointed to by hw_qdesc_vaddr
|
||||||
|
*
|
||||||
|
* Return: status
|
||||||
|
*/
|
||||||
|
void
|
||||||
|
dp_reset_rx_reo_tid_queue(struct dp_soc *soc, void *hw_qdesc_vaddr,
|
||||||
|
uint32_t size)
|
||||||
|
{
|
||||||
|
hal_reset_rx_reo_tid_queue(soc->hal_soc, hw_qdesc_vaddr, size);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, enum hal_reo_cmd_type type,
|
QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, enum hal_reo_cmd_type type,
|
||||||
@@ -204,3 +219,16 @@ void dp_reo_cmdlist_destroy(struct dp_soc *soc)
|
|||||||
}
|
}
|
||||||
qdf_spin_unlock_bh(&soc->rx.reo_cmd_lock);
|
qdf_spin_unlock_bh(&soc->rx.reo_cmd_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef DP_UMAC_HW_RESET_SUPPORT
|
||||||
|
/**
|
||||||
|
* dp_cleanup_reo_cmd_module - Clean up the reo cmd module
|
||||||
|
* @soc: DP SoC hanle
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void dp_cleanup_reo_cmd_module(struct dp_soc *soc)
|
||||||
|
{
|
||||||
|
dp_reo_cmdlist_destroy(soc);
|
||||||
|
dp_reo_desc_freelist_destroy(soc);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
@@ -564,6 +564,131 @@ QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *soc,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef DP_UMAC_HW_RESET_SUPPORT
|
||||||
|
#if defined(QCA_DP_RX_NBUF_NO_MAP_UNMAP) && !defined(BUILD_X86)
|
||||||
|
static inline
|
||||||
|
qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf,
|
||||||
|
uint32_t buf_size)
|
||||||
|
{
|
||||||
|
return dp_rx_nbuf_sync_no_dsb(soc, nbuf, rx_desc_pool->buf_size);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline
|
||||||
|
qdf_dma_addr_t dp_rx_rep_retrieve_paddr(struct dp_soc *dp_soc, qdf_nbuf_t nbuf,
|
||||||
|
uint32_t buf_size)
|
||||||
|
{
|
||||||
|
return qdf_nbuf_get_frag_paddr(nbuf, 0);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* dp_rx_desc_replenish() - Replenish the rx descriptors one at a time
|
||||||
|
*
|
||||||
|
* @soc: core txrx main context
|
||||||
|
* @dp_rxdma_srng: rxdma ring
|
||||||
|
* @rx_desc_pool: rx descriptor pool
|
||||||
|
* @rx_desc:rx descriptor
|
||||||
|
*
|
||||||
|
* Return: void
|
||||||
|
*/
|
||||||
|
static inline
|
||||||
|
void dp_rx_desc_replenish(struct dp_soc *soc, struct dp_srng *dp_rxdma_srng,
|
||||||
|
struct rx_desc_pool *rx_desc_pool,
|
||||||
|
struct dp_rx_desc *rx_desc)
|
||||||
|
{
|
||||||
|
void *rxdma_srng;
|
||||||
|
void *rxdma_ring_entry;
|
||||||
|
qdf_dma_addr_t paddr;
|
||||||
|
|
||||||
|
rxdma_srng = dp_rxdma_srng->hal_srng;
|
||||||
|
|
||||||
|
/* No one else should be accessing the srng at this point */
|
||||||
|
hal_srng_access_start_unlocked(soc->hal_soc, rxdma_srng);
|
||||||
|
|
||||||
|
rxdma_ring_entry = hal_srng_src_get_next(soc->hal_soc, rxdma_srng);
|
||||||
|
|
||||||
|
qdf_assert_always(rxdma_ring_entry);
|
||||||
|
rx_desc->in_err_state = 0;
|
||||||
|
|
||||||
|
paddr = dp_rx_rep_retrieve_paddr(soc, rx_desc->nbuf,
|
||||||
|
rx_desc_pool->buf_size);
|
||||||
|
hal_rxdma_buff_addr_info_set(soc->hal_soc, rxdma_ring_entry, paddr,
|
||||||
|
rx_desc->cookie, rx_desc_pool->owner);
|
||||||
|
|
||||||
|
hal_srng_access_end_unlocked(soc->hal_soc, rxdma_srng);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* dp_rx_desc_reuse() - Reuse the rx descriptors to fill the rx buf ring
|
||||||
|
*
|
||||||
|
* @soc: core txrx main context
|
||||||
|
* @nbuf_list: nbuf list for delayed free
|
||||||
|
*
|
||||||
|
* Return: void
|
||||||
|
*/
|
||||||
|
void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list)
|
||||||
|
{
|
||||||
|
int mac_id, i, j;
|
||||||
|
union dp_rx_desc_list_elem_t *head = NULL;
|
||||||
|
union dp_rx_desc_list_elem_t *tail = NULL;
|
||||||
|
|
||||||
|
for (mac_id = 0; mac_id < MAX_PDEV_CNT; mac_id++) {
|
||||||
|
struct dp_srng *dp_rxdma_srng =
|
||||||
|
&soc->rx_refill_buf_ring[mac_id];
|
||||||
|
struct rx_desc_pool *rx_desc_pool = &soc->rx_desc_buf[mac_id];
|
||||||
|
uint32_t rx_sw_desc_num = rx_desc_pool->pool_size;
|
||||||
|
/* Only fill up 1/3 of the ring size */
|
||||||
|
uint32_t num_req_decs;
|
||||||
|
|
||||||
|
if (!dp_rxdma_srng || !dp_rxdma_srng->hal_srng ||
|
||||||
|
!rx_desc_pool->array)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
num_req_decs = dp_rxdma_srng->num_entries / 3;
|
||||||
|
|
||||||
|
for (i = 0, j = 0; i < rx_sw_desc_num; i++) {
|
||||||
|
struct dp_rx_desc *rx_desc =
|
||||||
|
(struct dp_rx_desc *)&rx_desc_pool->array[i];
|
||||||
|
|
||||||
|
if (rx_desc->in_use) {
|
||||||
|
if (j < dp_rxdma_srng->num_entries) {
|
||||||
|
dp_rx_desc_replenish(soc, dp_rxdma_srng,
|
||||||
|
rx_desc_pool,
|
||||||
|
rx_desc);
|
||||||
|
} else {
|
||||||
|
dp_rx_nbuf_unmap(soc, rx_desc, 0);
|
||||||
|
rx_desc->unmapped = 0;
|
||||||
|
|
||||||
|
rx_desc->nbuf->next = *nbuf_list;
|
||||||
|
*nbuf_list = rx_desc->nbuf;
|
||||||
|
|
||||||
|
dp_rx_add_to_free_desc_list(&head,
|
||||||
|
&tail,
|
||||||
|
rx_desc);
|
||||||
|
}
|
||||||
|
j++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (head)
|
||||||
|
dp_rx_add_desc_list_to_free_list(soc, &head, &tail,
|
||||||
|
mac_id, rx_desc_pool);
|
||||||
|
|
||||||
|
/* If num of descs in use were less, then we need to replenish
|
||||||
|
* the ring with some buffers
|
||||||
|
*/
|
||||||
|
head = NULL;
|
||||||
|
tail = NULL;
|
||||||
|
|
||||||
|
if (j < (num_req_decs - 1))
|
||||||
|
dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
|
||||||
|
rx_desc_pool,
|
||||||
|
((num_req_decs - 1) - j),
|
||||||
|
&head, &tail, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
|
* dp_rx_buffers_replenish() - replenish rxdma ring with rx nbufs
|
||||||
* called during dp rx initialization
|
* called during dp rx initialization
|
||||||
@@ -578,6 +703,7 @@ QDF_STATUS __dp_pdev_rx_buffers_no_map_attach(struct dp_soc *soc,
|
|||||||
* or NULL during dp rx initialization or out of buffer
|
* or NULL during dp rx initialization or out of buffer
|
||||||
* interrupt.
|
* interrupt.
|
||||||
* @tail: tail of descs list
|
* @tail: tail of descs list
|
||||||
|
* @req_only: If true don't replenish more than req buffers
|
||||||
* @func_name: name of the caller function
|
* @func_name: name of the caller function
|
||||||
* Return: return success or failure
|
* Return: return success or failure
|
||||||
*/
|
*/
|
||||||
@@ -587,7 +713,7 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
|
|||||||
uint32_t num_req_buffers,
|
uint32_t num_req_buffers,
|
||||||
union dp_rx_desc_list_elem_t **desc_list,
|
union dp_rx_desc_list_elem_t **desc_list,
|
||||||
union dp_rx_desc_list_elem_t **tail,
|
union dp_rx_desc_list_elem_t **tail,
|
||||||
const char *func_name)
|
bool req_only, const char *func_name)
|
||||||
{
|
{
|
||||||
uint32_t num_alloc_desc;
|
uint32_t num_alloc_desc;
|
||||||
uint16_t num_desc_to_free = 0;
|
uint16_t num_desc_to_free = 0;
|
||||||
@@ -630,7 +756,7 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
|
|||||||
dp_rx_debug("%pK: no of available entries in rxdma ring: %d",
|
dp_rx_debug("%pK: no of available entries in rxdma ring: %d",
|
||||||
dp_soc, num_entries_avail);
|
dp_soc, num_entries_avail);
|
||||||
|
|
||||||
if (!(*desc_list) && (num_entries_avail >
|
if (!req_only && !(*desc_list) && (num_entries_avail >
|
||||||
((dp_rxdma_srng->num_entries * 3) / 4))) {
|
((dp_rxdma_srng->num_entries * 3) / 4))) {
|
||||||
num_req_buffers = num_entries_avail;
|
num_req_buffers = num_entries_avail;
|
||||||
} else if (num_entries_avail < num_req_buffers) {
|
} else if (num_entries_avail < num_req_buffers) {
|
||||||
|
@@ -194,9 +194,10 @@ struct dp_rx_desc {
|
|||||||
__dp_rx_add_to_free_desc_list(head, tail, new, __func__)
|
__dp_rx_add_to_free_desc_list(head, tail, new, __func__)
|
||||||
|
|
||||||
#define dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
|
#define dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
|
||||||
num_buffers, desc_list, tail) \
|
num_buffers, desc_list, tail, req_only) \
|
||||||
__dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
|
__dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool, \
|
||||||
num_buffers, desc_list, tail, __func__)
|
num_buffers, desc_list, tail, req_only, \
|
||||||
|
__func__)
|
||||||
|
|
||||||
#ifdef WLAN_SUPPORT_RX_FISA
|
#ifdef WLAN_SUPPORT_RX_FISA
|
||||||
/**
|
/**
|
||||||
@@ -1521,6 +1522,7 @@ dp_rx_update_flow_tag(struct dp_soc *soc, struct dp_vdev *vdev,
|
|||||||
* or NULL during dp rx initialization or out of buffer
|
* or NULL during dp rx initialization or out of buffer
|
||||||
* interrupt.
|
* interrupt.
|
||||||
* @tail: tail of descs list
|
* @tail: tail of descs list
|
||||||
|
* @req_only: If true don't replenish more than req buffers
|
||||||
* @func_name: name of the caller function
|
* @func_name: name of the caller function
|
||||||
* Return: return success or failure
|
* Return: return success or failure
|
||||||
*/
|
*/
|
||||||
@@ -1530,6 +1532,7 @@ QDF_STATUS __dp_rx_buffers_replenish(struct dp_soc *dp_soc, uint32_t mac_id,
|
|||||||
uint32_t num_req_buffers,
|
uint32_t num_req_buffers,
|
||||||
union dp_rx_desc_list_elem_t **desc_list,
|
union dp_rx_desc_list_elem_t **desc_list,
|
||||||
union dp_rx_desc_list_elem_t **tail,
|
union dp_rx_desc_list_elem_t **tail,
|
||||||
|
bool req_only,
|
||||||
const char *func_name);
|
const char *func_name);
|
||||||
/*
|
/*
|
||||||
* __dp_rx_buffers_no_map_replenish() - replenish rxdma ring with rx nbufs
|
* __dp_rx_buffers_no_map_replenish() - replenish rxdma ring with rx nbufs
|
||||||
@@ -2491,7 +2494,7 @@ void dp_rx_buffers_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
|
|||||||
union dp_rx_desc_list_elem_t **tail)
|
union dp_rx_desc_list_elem_t **tail)
|
||||||
{
|
{
|
||||||
dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
|
dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
|
||||||
num_req_buffers, desc_list, tail);
|
num_req_buffers, desc_list, tail, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
@@ -2503,7 +2506,7 @@ void dp_rx_buffers_lt_replenish_simple(struct dp_soc *soc, uint32_t mac_id,
|
|||||||
union dp_rx_desc_list_elem_t **tail)
|
union dp_rx_desc_list_elem_t **tail)
|
||||||
{
|
{
|
||||||
dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
|
dp_rx_buffers_replenish(soc, mac_id, rxdma_srng, rx_desc_pool,
|
||||||
num_req_buffers, desc_list, tail);
|
num_req_buffers, desc_list, tail, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
@@ -2578,6 +2581,27 @@ void dp_rx_nbuf_free(qdf_nbuf_t nbuf)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef DP_UMAC_HW_RESET_SUPPORT
|
||||||
|
/*
|
||||||
|
* dp_rx_desc_reuse() - Reuse the rx descriptors to fill the rx buf ring
|
||||||
|
*
|
||||||
|
* @soc: core txrx main context
|
||||||
|
* @nbuf_list: nbuf list for delayed free
|
||||||
|
*
|
||||||
|
* Return: void
|
||||||
|
*/
|
||||||
|
void dp_rx_desc_reuse(struct dp_soc *soc, qdf_nbuf_t *nbuf_list);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* dp_rx_desc_delayed_free() - Delayed free of the rx descs
|
||||||
|
*
|
||||||
|
* @soc: core txrx main context
|
||||||
|
*
|
||||||
|
* Return: void
|
||||||
|
*/
|
||||||
|
void dp_rx_desc_delayed_free(struct dp_soc *soc);
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dp_rx_get_txrx_peer_and_vdev() - Get txrx peer and vdev from peer id
|
* dp_rx_get_txrx_peer_and_vdev() - Get txrx peer and vdev from peer id
|
||||||
* @nbuf : pointer to the first msdu of an amsdu.
|
* @nbuf : pointer to the first msdu of an amsdu.
|
||||||
|
@@ -128,7 +128,7 @@ static void dp_rx_return_head_frag_desc(struct dp_txrx_peer *txrx_peer,
|
|||||||
dp_rx_add_to_free_desc_list(&head, &tail,
|
dp_rx_add_to_free_desc_list(&head, &tail,
|
||||||
txrx_peer->rx_tid[tid].head_frag_desc);
|
txrx_peer->rx_tid[tid].head_frag_desc);
|
||||||
dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool,
|
dp_rx_buffers_replenish(soc, 0, dp_rxdma_srng, rx_desc_pool,
|
||||||
1, &head, &tail);
|
1, &head, &tail, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (txrx_peer->rx_tid[tid].dst_ring_desc) {
|
if (txrx_peer->rx_tid[tid].dst_ring_desc) {
|
||||||
|
@@ -2586,7 +2586,8 @@ done:
|
|||||||
rx_desc_pool,
|
rx_desc_pool,
|
||||||
rx_bufs_reaped[mac_id],
|
rx_bufs_reaped[mac_id],
|
||||||
&dp_pdev->free_list_head,
|
&dp_pdev->free_list_head,
|
||||||
&dp_pdev->free_list_tail);
|
&dp_pdev->free_list_tail,
|
||||||
|
false);
|
||||||
rx_bufs_used += rx_bufs_reaped[mac_id];
|
rx_bufs_used += rx_bufs_reaped[mac_id];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2904,7 +2905,7 @@ done:
|
|||||||
|
|
||||||
dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
|
dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
|
||||||
rx_desc_pool, rx_bufs_reaped[mac_id],
|
rx_desc_pool, rx_bufs_reaped[mac_id],
|
||||||
&head[mac_id], &tail[mac_id]);
|
&head[mac_id], &tail[mac_id], false);
|
||||||
rx_bufs_used += rx_bufs_reaped[mac_id];
|
rx_bufs_used += rx_bufs_reaped[mac_id];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -3438,7 +3439,7 @@ dp_rxdma_err_process(struct dp_intr *int_ctx, struct dp_soc *soc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
|
dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
|
||||||
rx_desc_pool, rx_bufs_used, &head, &tail);
|
rx_desc_pool, rx_bufs_used, &head, &tail, false);
|
||||||
|
|
||||||
work_done += rx_bufs_used;
|
work_done += rx_bufs_used;
|
||||||
}
|
}
|
||||||
@@ -3627,7 +3628,7 @@ dp_handle_wbm_internal_error(struct dp_soc *soc, void *hal_desc,
|
|||||||
dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
|
dp_rx_buffers_replenish(soc, mac_id, dp_rxdma_srng,
|
||||||
rx_desc_pool,
|
rx_desc_pool,
|
||||||
rx_bufs_reaped[mac_id],
|
rx_bufs_reaped[mac_id],
|
||||||
&head, &tail);
|
&head, &tail, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -2068,30 +2068,41 @@ static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
|
|||||||
* dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
|
* dp_mesh_tx_comp_free_buff() - Free the mesh tx packet buffer
|
||||||
* @soc: dp_soc handle
|
* @soc: dp_soc handle
|
||||||
* @tx_desc: TX descriptor
|
* @tx_desc: TX descriptor
|
||||||
* @vdev: datapath vdev handle
|
* @delayed_free: delay the nbuf free
|
||||||
*
|
*
|
||||||
* Return: None
|
* Return: nbuf to be freed late
|
||||||
*/
|
*/
|
||||||
static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
|
static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
|
||||||
struct dp_tx_desc_s *tx_desc)
|
struct dp_tx_desc_s *tx_desc,
|
||||||
|
bool delayed_free)
|
||||||
{
|
{
|
||||||
qdf_nbuf_t nbuf = tx_desc->nbuf;
|
qdf_nbuf_t nbuf = tx_desc->nbuf;
|
||||||
struct dp_vdev *vdev = NULL;
|
struct dp_vdev *vdev = NULL;
|
||||||
|
|
||||||
|
vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, DP_MOD_ID_MESH);
|
||||||
if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
|
if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
|
||||||
qdf_nbuf_free(nbuf);
|
if (vdev)
|
||||||
DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
|
DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
|
||||||
} else {
|
|
||||||
vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id,
|
if (delayed_free)
|
||||||
DP_MOD_ID_MESH);
|
return nbuf;
|
||||||
if (vdev && vdev->osif_tx_free_ext)
|
|
||||||
vdev->osif_tx_free_ext((nbuf));
|
|
||||||
else
|
|
||||||
qdf_nbuf_free(nbuf);
|
qdf_nbuf_free(nbuf);
|
||||||
|
} else {
|
||||||
|
if (vdev && vdev->osif_tx_free_ext) {
|
||||||
|
vdev->osif_tx_free_ext((nbuf));
|
||||||
|
} else {
|
||||||
|
if (delayed_free)
|
||||||
|
return nbuf;
|
||||||
|
|
||||||
|
qdf_nbuf_free(nbuf);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (vdev)
|
if (vdev)
|
||||||
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
|
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
|
||||||
}
|
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
|
static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
|
||||||
@@ -2100,9 +2111,11 @@ static inline void dp_tx_update_mesh_flags(struct dp_soc *soc,
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
|
static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
|
||||||
struct dp_tx_desc_s *tx_desc)
|
struct dp_tx_desc_s *tx_desc,
|
||||||
|
bool delayed_free)
|
||||||
{
|
{
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -2297,21 +2310,25 @@ fail_return:
|
|||||||
* dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
|
* dp_tx_comp_free_buf() - Free nbuf associated with the Tx Descriptor
|
||||||
* @soc: Soc handle
|
* @soc: Soc handle
|
||||||
* @desc: software Tx descriptor to be processed
|
* @desc: software Tx descriptor to be processed
|
||||||
|
* @delayed_free: defer freeing of nbuf
|
||||||
*
|
*
|
||||||
* Return: none
|
* Return: nbuf to be freed later
|
||||||
*/
|
*/
|
||||||
void dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc)
|
qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
|
||||||
|
bool delayed_free)
|
||||||
{
|
{
|
||||||
qdf_nbuf_t nbuf = desc->nbuf;
|
qdf_nbuf_t nbuf = desc->nbuf;
|
||||||
enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
|
enum dp_tx_event_type type = dp_tx_get_event_type(desc->flags);
|
||||||
|
|
||||||
/* nbuf already freed in vdev detach path */
|
/* nbuf already freed in vdev detach path */
|
||||||
if (!nbuf)
|
if (!nbuf)
|
||||||
return;
|
return NULL;
|
||||||
|
|
||||||
/* If it is TDLS mgmt, don't unmap or free the frame */
|
/* If it is TDLS mgmt, don't unmap or free the frame */
|
||||||
if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME)
|
if (desc->flags & DP_TX_DESC_FLAG_TDLS_FRAME) {
|
||||||
return dp_non_std_htt_tx_comp_free_buff(soc, desc);
|
dp_non_std_htt_tx_comp_free_buff(soc, desc);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/* 0 : MSDU buffer, 1 : MLE */
|
/* 0 : MSDU buffer, 1 : MLE */
|
||||||
if (desc->msdu_ext_desc) {
|
if (desc->msdu_ext_desc) {
|
||||||
@@ -2328,8 +2345,7 @@ void dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc)
|
|||||||
desc->msdu_ext_desc->tso_desc,
|
desc->msdu_ext_desc->tso_desc,
|
||||||
desc->msdu_ext_desc->
|
desc->msdu_ext_desc->
|
||||||
tso_num_desc);
|
tso_num_desc);
|
||||||
qdf_nbuf_free(nbuf);
|
goto nbuf_free;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (qdf_unlikely(desc->frm_type == dp_tx_frm_sg)) {
|
if (qdf_unlikely(desc->frm_type == dp_tx_frm_sg)) {
|
||||||
@@ -2353,8 +2369,7 @@ void dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc)
|
|||||||
QDF_DMA_TO_DEVICE);
|
QDF_DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
qdf_nbuf_free(nbuf);
|
goto nbuf_free;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* If it's ME frame, dont unmap the cloned nbuf's */
|
/* If it's ME frame, dont unmap the cloned nbuf's */
|
||||||
@@ -2365,13 +2380,18 @@ void dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc)
|
|||||||
dp_tx_unmap(soc, desc);
|
dp_tx_unmap(soc, desc);
|
||||||
|
|
||||||
if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
|
if (desc->flags & DP_TX_DESC_FLAG_MESH_MODE)
|
||||||
return dp_mesh_tx_comp_free_buff(soc, desc);
|
return dp_mesh_tx_comp_free_buff(soc, desc, delayed_free);
|
||||||
|
|
||||||
if (dp_tx_traffic_end_indication_enq_ind_pkt(soc, desc, nbuf))
|
if (dp_tx_traffic_end_indication_enq_ind_pkt(soc, desc, nbuf))
|
||||||
return;
|
return NULL;
|
||||||
|
|
||||||
nbuf_free:
|
nbuf_free:
|
||||||
|
if (delayed_free)
|
||||||
|
return nbuf;
|
||||||
|
|
||||||
qdf_nbuf_free(nbuf);
|
qdf_nbuf_free(nbuf);
|
||||||
|
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -2613,7 +2633,7 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
|
|||||||
* unmap and free current,
|
* unmap and free current,
|
||||||
* retransmit remaining segments
|
* retransmit remaining segments
|
||||||
*/
|
*/
|
||||||
dp_tx_comp_free_buf(soc, tx_desc);
|
dp_tx_comp_free_buf(soc, tx_desc, false);
|
||||||
i++;
|
i++;
|
||||||
dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
|
dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
|
||||||
continue;
|
continue;
|
||||||
@@ -4562,7 +4582,7 @@ dp_tx_comp_process_desc(struct dp_soc *soc,
|
|||||||
}
|
}
|
||||||
|
|
||||||
desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
|
desc->flags |= DP_TX_DESC_FLAG_COMPLETED_TX;
|
||||||
dp_tx_comp_free_buf(soc, desc);
|
dp_tx_comp_free_buf(soc, desc, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef DISABLE_DP_STATS
|
#ifdef DISABLE_DP_STATS
|
||||||
@@ -5329,7 +5349,7 @@ more_data:
|
|||||||
dp_tx_comp_info_rl("pdev in down state %d",
|
dp_tx_comp_info_rl("pdev in down state %d",
|
||||||
tx_desc->id);
|
tx_desc->id);
|
||||||
tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
|
tx_desc->flags |= DP_TX_DESC_FLAG_TX_COMP_ERR;
|
||||||
dp_tx_comp_free_buf(soc, tx_desc);
|
dp_tx_comp_free_buf(soc, tx_desc, false);
|
||||||
dp_tx_desc_release(tx_desc, tx_desc->pool_id);
|
dp_tx_desc_release(tx_desc, tx_desc->pool_id);
|
||||||
goto next_desc;
|
goto next_desc;
|
||||||
}
|
}
|
||||||
@@ -5606,7 +5626,8 @@ void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
|
|||||||
*/
|
*/
|
||||||
if (force_free) {
|
if (force_free) {
|
||||||
tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
|
tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
|
||||||
dp_tx_comp_free_buf(soc, tx_desc);
|
dp_tx_comp_free_buf(soc, tx_desc,
|
||||||
|
false);
|
||||||
dp_tx_desc_release(tx_desc, i);
|
dp_tx_desc_release(tx_desc, i);
|
||||||
} else {
|
} else {
|
||||||
tx_desc->vdev_id = DP_INVALID_VDEV_ID;
|
tx_desc->vdev_id = DP_INVALID_VDEV_ID;
|
||||||
@@ -5670,7 +5691,8 @@ void dp_tx_desc_flush(struct dp_pdev *pdev, struct dp_vdev *vdev,
|
|||||||
if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
|
if (dp_is_tx_desc_flush_match(pdev, vdev, tx_desc)) {
|
||||||
if (force_free) {
|
if (force_free) {
|
||||||
tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
|
tx_desc->flags |= DP_TX_DESC_FLAG_FLUSH;
|
||||||
dp_tx_comp_free_buf(soc, tx_desc);
|
dp_tx_comp_free_buf(soc, tx_desc,
|
||||||
|
false);
|
||||||
dp_tx_desc_release(tx_desc, i);
|
dp_tx_desc_release(tx_desc, i);
|
||||||
} else {
|
} else {
|
||||||
dp_tx_desc_reset_vdev(soc, tx_desc,
|
dp_tx_desc_reset_vdev(soc, tx_desc,
|
||||||
|
@@ -246,7 +246,8 @@ QDF_STATUS dp_tx_tso_cmn_desc_pool_alloc(struct dp_soc *soc,
|
|||||||
QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
|
QDF_STATUS dp_tx_tso_cmn_desc_pool_init(struct dp_soc *soc,
|
||||||
uint8_t num_pool,
|
uint8_t num_pool,
|
||||||
uint32_t num_desc);
|
uint32_t num_desc);
|
||||||
void dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc);
|
qdf_nbuf_t dp_tx_comp_free_buf(struct dp_soc *soc, struct dp_tx_desc_s *desc,
|
||||||
|
bool delayed_free);
|
||||||
void dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id);
|
void dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id);
|
||||||
void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
|
void dp_tx_compute_delay(struct dp_vdev *vdev, struct dp_tx_desc_s *tx_desc,
|
||||||
uint8_t tid, uint8_t ring_id);
|
uint8_t tid, uint8_t ring_id);
|
||||||
|
@@ -63,6 +63,63 @@ dp_tx_desc_pool_counter_initialize(struct dp_tx_desc_pool_s *tx_desc_pool,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef DP_UMAC_HW_RESET_SUPPORT
|
||||||
|
/**
|
||||||
|
* dp_tx_desc_clean_up() - Clean up the tx dexcriptors
|
||||||
|
* @ctxt: context passed
|
||||||
|
* @elem: element to be cleaned up
|
||||||
|
* @elem_list: element list
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void dp_tx_desc_clean_up(void *ctxt, void *elem, void *elem_list)
|
||||||
|
{
|
||||||
|
struct dp_soc *soc = (struct dp_soc *)ctxt;
|
||||||
|
struct dp_tx_desc_s *tx_desc = (struct dp_tx_desc_s *)elem;
|
||||||
|
qdf_nbuf_t *nbuf_list = (qdf_nbuf_t *)elem_list;
|
||||||
|
qdf_nbuf_t nbuf = NULL;
|
||||||
|
|
||||||
|
if (tx_desc->nbuf) {
|
||||||
|
nbuf = dp_tx_comp_free_buf(soc, tx_desc, true);
|
||||||
|
dp_tx_desc_release(tx_desc, tx_desc->pool_id);
|
||||||
|
|
||||||
|
if (nbuf) {
|
||||||
|
if (!nbuf_list) {
|
||||||
|
dp_err("potential memory leak");
|
||||||
|
qdf_assert_always(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
nbuf->next = *nbuf_list;
|
||||||
|
*nbuf_list = nbuf;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dp_tx_desc_pool_cleanup() - Clean up the tx dexcriptor pools
|
||||||
|
* @soc: Handle to DP SoC structure
|
||||||
|
* @nbuf_list: nbuf list for delayed free
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
struct dp_tx_desc_pool_s *tx_desc_pool = NULL;
|
||||||
|
uint32_t num_pool = wlan_cfg_get_num_tx_desc_pool(soc->wlan_cfg_ctx);
|
||||||
|
|
||||||
|
for (i = 0; i < num_pool; i++) {
|
||||||
|
tx_desc_pool = &soc->tx_desc[i];
|
||||||
|
|
||||||
|
if (tx_desc_pool)
|
||||||
|
qdf_tx_desc_pool_free_bufs(soc,
|
||||||
|
&tx_desc_pool->desc_pages,
|
||||||
|
tx_desc_pool->elem_size,
|
||||||
|
tx_desc_pool->elem_count,
|
||||||
|
true, &dp_tx_desc_clean_up,
|
||||||
|
nbuf_list);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
|
* dp_tx_desc_pool_alloc() - Allocate Tx Descriptor pool(s)
|
||||||
* @soc Handle to DP SoC structure
|
* @soc Handle to DP SoC structure
|
||||||
|
@@ -135,6 +135,10 @@ QDF_STATUS dp_tx_tso_num_seg_pool_init(struct dp_soc *soc, uint8_t pool_id,
|
|||||||
void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
|
void dp_tx_tso_num_seg_pool_free(struct dp_soc *soc, uint8_t pool_id);
|
||||||
void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
|
void dp_tx_tso_num_seg_pool_deinit(struct dp_soc *soc, uint8_t pool_id);
|
||||||
|
|
||||||
|
#ifdef DP_UMAC_HW_RESET_SUPPORT
|
||||||
|
void dp_tx_desc_pool_cleanup(struct dp_soc *soc, qdf_nbuf_t *nbuf_list);
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef QCA_LL_TX_FLOW_CONTROL_V2
|
#ifdef QCA_LL_TX_FLOW_CONTROL_V2
|
||||||
void dp_tx_flow_control_init(struct dp_soc *);
|
void dp_tx_flow_control_init(struct dp_soc *);
|
||||||
void dp_tx_flow_control_deinit(struct dp_soc *);
|
void dp_tx_flow_control_deinit(struct dp_soc *);
|
||||||
|
@@ -260,6 +260,7 @@ enum dp_mod_id {
|
|||||||
DP_MOD_ID_SAWF,
|
DP_MOD_ID_SAWF,
|
||||||
DP_MOD_ID_REINJECT,
|
DP_MOD_ID_REINJECT,
|
||||||
DP_MOD_ID_SCS,
|
DP_MOD_ID_SCS,
|
||||||
|
DP_MOD_ID_UMAC_RESET,
|
||||||
DP_MOD_ID_MAX,
|
DP_MOD_ID_MAX,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -135,6 +135,7 @@ struct umac_reset_rx_actions {
|
|||||||
* @shmem_exp_magic_num: Expected magic number in the shared memory
|
* @shmem_exp_magic_num: Expected magic number in the shared memory
|
||||||
* @rx_actions: callbacks for handling UMAC reset actions
|
* @rx_actions: callbacks for handling UMAC reset actions
|
||||||
* @intr_ctx_bkp: DP Interrupts ring masks backup
|
* @intr_ctx_bkp: DP Interrupts ring masks backup
|
||||||
|
* @nbuf_list: skb list for delayed free
|
||||||
*/
|
*/
|
||||||
struct dp_soc_umac_reset_ctx {
|
struct dp_soc_umac_reset_ctx {
|
||||||
qdf_dma_addr_t shmem_paddr_unaligned;
|
qdf_dma_addr_t shmem_paddr_unaligned;
|
||||||
@@ -147,6 +148,7 @@ struct dp_soc_umac_reset_ctx {
|
|||||||
uint32_t shmem_exp_magic_num;
|
uint32_t shmem_exp_magic_num;
|
||||||
struct umac_reset_rx_actions rx_actions;
|
struct umac_reset_rx_actions rx_actions;
|
||||||
struct dp_intr_bkp *intr_ctx_bkp;
|
struct dp_intr_bkp *intr_ctx_bkp;
|
||||||
|
qdf_nbuf_t nbuf_list;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@@ -245,7 +245,7 @@ void dp_tx_process_htt_completion_li(struct dp_soc *soc,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
release_tx_desc:
|
release_tx_desc:
|
||||||
dp_tx_comp_free_buf(soc, tx_desc);
|
dp_tx_comp_free_buf(soc, tx_desc, false);
|
||||||
dp_tx_desc_release(tx_desc, tx_desc->pool_id);
|
dp_tx_desc_release(tx_desc, tx_desc->pool_id);
|
||||||
if (vdev)
|
if (vdev)
|
||||||
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
|
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_HTT_COMP);
|
||||||
|
@@ -583,7 +583,7 @@ void dp_rx_mon_dest_process(struct dp_soc *soc, struct dp_intr *int_ctx,
|
|||||||
mac_for_pdev),
|
mac_for_pdev),
|
||||||
dp_rx_get_mon_desc_pool(soc, mac_id,
|
dp_rx_get_mon_desc_pool(soc, mac_id,
|
||||||
pdev_id),
|
pdev_id),
|
||||||
rx_bufs_used, &head, &tail);
|
rx_bufs_used, &head, &tail, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -628,7 +628,7 @@ dp_rx_pdev_mon_buf_buffers_alloc(struct dp_pdev *pdev, uint32_t mac_id,
|
|||||||
rx_desc_pool,
|
rx_desc_pool,
|
||||||
num_entries,
|
num_entries,
|
||||||
&desc_list,
|
&desc_list,
|
||||||
&tail);
|
&tail, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
@@ -893,7 +893,7 @@ next_entry:
|
|||||||
dp_rx_buffers_replenish(soc, mac_id,
|
dp_rx_buffers_replenish(soc, mac_id,
|
||||||
dp_rxdma_get_mon_buf_ring(pdev, mac_id),
|
dp_rxdma_get_mon_buf_ring(pdev, mac_id),
|
||||||
rx_desc_pool,
|
rx_desc_pool,
|
||||||
rx_bufs_used, &head, &tail);
|
rx_bufs_used, &head, &tail, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
return reap_cnt;
|
return reap_cnt;
|
||||||
|
Reference in New Issue
Block a user