qcacld-3.0: cdp: Convergence of cdp_l_flowctl_ops
Currently the cdp apis are given pdev/vdev/peer handle as its arguments, which is directly accessed in those APIs. This can cause a race-condition in access of the respective handles if it has been deleted in parallel. Hence as a part of cdp convergence, pass only the pdev/vdev id or peer mac address, which will be used to get the respective handles, and hence avoiding the unwanted access of the handles if it has been deleted. Converged l_flowctl_ops - register_tx_flow_control - set_vdev_tx_desc_limit - set_vdev_os_queue_status - deregister_tx_flow_control_cb - flow_control_cb - get_tx_resource - ll_set_tx_pause_q_depth - vdev_flush - vdev_pause - vdev_unpause CRs-Fixed: 2539748 Change-Id: I8ad4a20914c654b8e8aaf629dda7f673fdb110e4
This commit is contained in:
@@ -643,18 +643,13 @@ static void pmo_unpause_all_vdev(struct wlan_objmgr_psoc *psoc,
|
||||
struct pmo_psoc_priv_obj *psoc_ctx)
|
||||
{
|
||||
uint8_t vdev_id;
|
||||
struct cdp_vdev *vdev_dp;
|
||||
|
||||
/* Iterate through VDEV list */
|
||||
for (vdev_id = 0; vdev_id < WLAN_UMAC_PSOC_MAX_VDEVS; vdev_id++) {
|
||||
vdev_dp = pmo_core_vdev_get_dp_handle(psoc_ctx, vdev_id);
|
||||
if (!vdev_dp)
|
||||
continue;
|
||||
|
||||
/* When host resumes, by default unpause all active vdev */
|
||||
if (pmo_core_vdev_get_pause_bitmap(psoc_ctx, vdev_id)) {
|
||||
cdp_fc_vdev_unpause(pmo_core_psoc_get_dp_handle(psoc),
|
||||
vdev_dp,
|
||||
vdev_id,
|
||||
0xffffffff, 0);
|
||||
if (psoc_ctx->pause_bitmap_notifier)
|
||||
psoc_ctx->pause_bitmap_notifier(vdev_id, 0);
|
||||
|
@@ -1922,17 +1922,12 @@ int ol_txrx_distribute_group_credits(struct ol_txrx_pdev_t *pdev,
|
||||
*/
|
||||
|
||||
#ifdef QCA_HL_NETDEV_FLOW_CONTROL
|
||||
/**
|
||||
* ol_txrx_register_hl_flow_control() -register hl netdev flow control callback
|
||||
* @vdev_id: vdev_id
|
||||
* @flowControl: flow control callback
|
||||
*
|
||||
* Return: 0 for success or error code
|
||||
*/
|
||||
int ol_txrx_register_hl_flow_control(struct cdp_soc_t *soc,
|
||||
int ol_txrx_register_hl_flow_control(struct cdp_soc_t *soc_hdl,
|
||||
tx_pause_callback flowcontrol)
|
||||
{
|
||||
struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
|
||||
struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
|
||||
uint8_t pdev_id = OL_TXRX_PDEV_ID;
|
||||
ol_txrx_pdev_handle pdev = ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
|
||||
u32 desc_pool_size = ol_tx_desc_pool_size_hl(pdev->ctrl_pdev);
|
||||
|
||||
/*
|
||||
@@ -1953,7 +1948,7 @@ int ol_txrx_register_hl_flow_control(struct cdp_soc_t *soc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ol_txrx_set_vdev_os_queue_status(u8 vdev_id,
|
||||
int ol_txrx_set_vdev_os_queue_status(struct cdp_soc_t *soc_hdl, u8 vdev_id,
|
||||
enum netif_action_type action)
|
||||
{
|
||||
struct ol_txrx_vdev_t *vdev =
|
||||
@@ -1982,12 +1977,8 @@ int ol_txrx_set_vdev_os_queue_status(u8 vdev_id,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ol_txrx_set_vdev_tx_desc_limit() - Set TX descriptor limits for a vdev
|
||||
* @vdev_id: vdev id for the vdev under consideration.
|
||||
* @chan: Channel on which the vdev has been started.
|
||||
*/
|
||||
int ol_txrx_set_vdev_tx_desc_limit(u8 vdev_id, u8 chan)
|
||||
int ol_txrx_set_vdev_tx_desc_limit(struct cdp_soc_t *soc_hdl, u8 vdev_id,
|
||||
u8 chan)
|
||||
{
|
||||
struct ol_txrx_vdev_t *vdev =
|
||||
(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
|
||||
|
@@ -583,16 +583,23 @@ ol_txrx_peer_tid_unpause(ol_txrx_peer_handle peer, int tid)
|
||||
}
|
||||
|
||||
void
|
||||
ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason,
|
||||
uint32_t pause_type)
|
||||
ol_txrx_vdev_pause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
uint32_t reason, uint32_t pause_type)
|
||||
{
|
||||
struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
|
||||
struct ol_txrx_pdev_t *pdev = vdev->pdev;
|
||||
struct ol_txrx_vdev_t *vdev =
|
||||
(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
|
||||
struct ol_txrx_pdev_t *pdev;
|
||||
struct ol_txrx_peer_t *peer;
|
||||
/* TO DO: log the queue pause */
|
||||
/* acquire the mutex lock, since we'll be modifying the queues */
|
||||
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
|
||||
|
||||
if (qdf_unlikely(!vdev)) {
|
||||
ol_txrx_err("vdev is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
pdev = vdev->pdev;
|
||||
|
||||
/* use peer_ref_mutex before accessing peer_list */
|
||||
qdf_spin_lock_bh(&pdev->peer_ref_mutex);
|
||||
@@ -614,19 +621,24 @@ ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason,
|
||||
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
|
||||
}
|
||||
|
||||
|
||||
void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason,
|
||||
uint32_t pause_type)
|
||||
void ol_txrx_vdev_unpause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
uint32_t reason, uint32_t pause_type)
|
||||
{
|
||||
struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
|
||||
struct ol_txrx_pdev_t *pdev = vdev->pdev;
|
||||
struct ol_txrx_vdev_t *vdev =
|
||||
(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
|
||||
struct ol_txrx_pdev_t *pdev;
|
||||
struct ol_txrx_peer_t *peer;
|
||||
|
||||
/* TO DO: log the queue unpause */
|
||||
/* acquire the mutex lock, since we'll be modifying the queues */
|
||||
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
|
||||
|
||||
if (qdf_unlikely(!vdev)) {
|
||||
ol_txrx_err("vdev is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
pdev = vdev->pdev;
|
||||
|
||||
/* take peer_ref_mutex before accessing peer_list */
|
||||
qdf_spin_lock_bh(&pdev->peer_ref_mutex);
|
||||
@@ -649,9 +661,15 @@ void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason,
|
||||
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
|
||||
}
|
||||
|
||||
void ol_txrx_vdev_flush(struct cdp_vdev *pvdev)
|
||||
void ol_txrx_vdev_flush(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
|
||||
{
|
||||
struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
|
||||
struct ol_txrx_vdev_t *vdev =
|
||||
(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
|
||||
|
||||
if (qdf_unlikely(!vdev)) {
|
||||
ol_txrx_err("vdev is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
ol_tx_queue_vdev_flush(vdev->pdev, vdev);
|
||||
}
|
||||
@@ -1717,9 +1735,8 @@ void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
|
||||
struct ol_txrx_vdev_t *vdev = NULL, *tmp;
|
||||
|
||||
TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
|
||||
cdp_fc_vdev_pause(
|
||||
cds_get_context(QDF_MODULE_ID_SOC),
|
||||
(struct cdp_vdev *)vdev, reason, 0);
|
||||
cdp_fc_vdev_pause(cds_get_context(QDF_MODULE_ID_SOC),
|
||||
vdev->vdev_id, reason, 0);
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1737,7 +1754,7 @@ void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
|
||||
|
||||
TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
|
||||
cdp_fc_vdev_unpause(cds_get_context(QDF_MODULE_ID_SOC),
|
||||
(struct cdp_vdev *)vdev, reason, 0);
|
||||
vdev->vdev_id, reason, 0);
|
||||
}
|
||||
|
||||
}
|
||||
|
@@ -229,20 +229,65 @@ ol_tx_queue_discard(
|
||||
|
||||
#if (!defined(QCA_LL_LEGACY_TX_FLOW_CONTROL)) && (!defined(CONFIG_HL_SUPPORT))
|
||||
static inline
|
||||
void ol_txrx_vdev_flush(struct cdp_vdev *data_vdev)
|
||||
void ol_txrx_vdev_flush(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
|
||||
{
|
||||
}
|
||||
#else
|
||||
void ol_txrx_vdev_flush(struct cdp_vdev *pvdev);
|
||||
/**
|
||||
* ol_txrx_vdev_flush() - Drop all tx data for the specified virtual device
|
||||
* @soc_hdl: soc handle
|
||||
* @vdev_id: vdev id
|
||||
*
|
||||
* Returns: none
|
||||
*
|
||||
* This function applies primarily to HL systems, but also applies to
|
||||
* LL systems that use per-vdev tx queues for MCC or thermal throttling.
|
||||
* This function would typically be used by the ctrl SW after it parks
|
||||
* a STA vdev and then resumes it, but to a new AP. In this case, though
|
||||
* the same vdev can be used, any old tx frames queued inside it would be
|
||||
* stale, and would need to be discarded.
|
||||
*/
|
||||
void ol_txrx_vdev_flush(struct cdp_soc_t *soc_hdl, uint8_t vdev_id);
|
||||
#endif
|
||||
|
||||
#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || \
|
||||
(defined(QCA_LL_TX_FLOW_CONTROL_V2)) || \
|
||||
defined(CONFIG_HL_SUPPORT)
|
||||
void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason,
|
||||
uint32_t pause_type);
|
||||
void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason,
|
||||
uint32_t pause_type);
|
||||
/**
|
||||
* ol_txrx_vdev_pause- Suspend all tx data for the specified virtual device
|
||||
* soc_hdl: Datapath soc handle
|
||||
* @vdev_id: id of vdev
|
||||
* @reason: the reason for which vdev queue is getting paused
|
||||
* @pause_type: type of pause
|
||||
*
|
||||
* Return: none
|
||||
*
|
||||
* This function applies primarily to HL systems, but also
|
||||
* applies to LL systems that use per-vdev tx queues for MCC or
|
||||
* thermal throttling. As an example, this function could be
|
||||
* used when a single-channel physical device supports multiple
|
||||
* channels by jumping back and forth between the channels in a
|
||||
* time-shared manner. As the device is switched from channel A
|
||||
* to channel B, the virtual devices that operate on channel A
|
||||
* will be paused.
|
||||
*/
|
||||
void ol_txrx_vdev_pause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
uint32_t reason, uint32_t pause_type);
|
||||
|
||||
/**
|
||||
* ol_txrx_vdev_unpause - Resume tx for the specified virtual device
|
||||
* soc_hdl: Datapath soc handle
|
||||
* @vdev_id: id of vdev being unpaused
|
||||
* @reason: the reason for which vdev queue is getting unpaused
|
||||
* @pause_type: type of pause
|
||||
*
|
||||
* Return: none
|
||||
*
|
||||
* This function applies primarily to HL systems, but also applies to
|
||||
* LL systems that use per-vdev tx queues for MCC or thermal throttling.
|
||||
*/
|
||||
void ol_txrx_vdev_unpause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
uint32_t reason, uint32_t pause_type);
|
||||
#endif /* QCA_LL_LEGACY_TX_FLOW_CONTROL */
|
||||
|
||||
#if defined(CONFIG_HL_SUPPORT) && defined(QCA_BAD_PEER_TX_FLOW_CL)
|
||||
|
@@ -147,12 +147,14 @@ ol_tx_delay_hist(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
|
||||
|
||||
/**
|
||||
* ol_txrx_flow_control_cb() - call osif flow control callback
|
||||
* @vdev: vdev handle
|
||||
* @soc_hdl: Datapath soc handle
|
||||
* @vdev_id: id of vdev
|
||||
* @tx_resume: tx resume flag
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
void ol_txrx_flow_control_cb(struct cdp_vdev *vdev, bool tx_resume);
|
||||
void ol_txrx_flow_control_cb(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
bool tx_resume);
|
||||
|
||||
#if defined(QCA_LL_LEGACY_TX_FLOW_CONTROL) || (defined(CONFIG_HL_SUPPORT) && \
|
||||
defined(QCA_HL_NETDEV_FLOW_CONTROL))
|
||||
|
@@ -225,7 +225,7 @@ ol_txrx_vdev_handle
|
||||
ol_txrx_get_vdev_by_peer_addr(struct cdp_pdev *ppdev,
|
||||
struct qdf_mac_addr peer_addr)
|
||||
{
|
||||
struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)ppdev;
|
||||
struct ol_txrx_pdev_t *pdev = cdp_pdev_to_ol_txrx_pdev_t(ppdev);
|
||||
struct ol_txrx_peer_t *peer = NULL;
|
||||
ol_txrx_vdev_handle vdev;
|
||||
/* peer_id to be removed PEER_ID_CLEANUP */
|
||||
|
@@ -226,20 +226,49 @@ void htt_pkt_log_init(struct cdp_pdev *pdev_handle, void *scn);
|
||||
void peer_unmap_timer_handler(void *data);
|
||||
|
||||
#ifdef QCA_LL_LEGACY_TX_FLOW_CONTROL
|
||||
int ol_txrx_register_tx_flow_control(uint8_t vdev_id,
|
||||
/**
|
||||
* ol_txrx_register_tx_flow_control() - register tx flow control callback
|
||||
* @soc_hdl: soc handle
|
||||
* @vdev_id: vdev_id
|
||||
* @flowControl: flow control callback
|
||||
* @osif_fc_ctx: callback context
|
||||
* @flow_control_is_pause: is vdev paused by flow control
|
||||
*
|
||||
* Return: 0 for success or error code
|
||||
*/
|
||||
int ol_txrx_register_tx_flow_control(struct cdp_soc_t *soc_hdl,
|
||||
uint8_t vdev_id,
|
||||
ol_txrx_tx_flow_control_fp flow_control,
|
||||
void *osif_fc_ctx,
|
||||
ol_txrx_tx_flow_control_is_pause_fp
|
||||
flow_control_is_pause);
|
||||
|
||||
int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id);
|
||||
/**
|
||||
* ol_txrx_de_register_tx_flow_control_cb() - deregister tx flow control
|
||||
* callback
|
||||
* @soc_hdl: soc handle
|
||||
* @vdev_id: vdev_id
|
||||
*
|
||||
* Return: 0 for success or error code
|
||||
*/
|
||||
int ol_txrx_deregister_tx_flow_control_cb(struct cdp_soc_t *soc_hdl,
|
||||
uint8_t vdev_id);
|
||||
|
||||
bool ol_txrx_get_tx_resource(struct cdp_pdev *pdev,
|
||||
bool ol_txrx_get_tx_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
|
||||
struct qdf_mac_addr peer_addr,
|
||||
unsigned int low_watermark,
|
||||
unsigned int high_watermark_offset);
|
||||
|
||||
int ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth);
|
||||
/**
|
||||
* ol_txrx_ll_set_tx_pause_q_depth() - set pause queue depth
|
||||
* @soc_hdl: soc handle
|
||||
* @vdev_id: vdev id
|
||||
* @pause_q_depth: pause queue depth
|
||||
*
|
||||
* Return: 0 for success or error code
|
||||
*/
|
||||
int ol_txrx_ll_set_tx_pause_q_depth(struct cdp_soc_t *soc_hdl,
|
||||
uint8_t vdev_id, int pause_q_depth);
|
||||
#endif
|
||||
|
||||
void ol_tx_init_pdev(ol_txrx_pdev_handle pdev);
|
||||
@@ -517,10 +546,38 @@ struct ol_txrx_stats_req_internal
|
||||
uint8_t desc_id);
|
||||
|
||||
#ifdef QCA_HL_NETDEV_FLOW_CONTROL
|
||||
int ol_txrx_register_hl_flow_control(struct cdp_soc_t *soc,
|
||||
/**
|
||||
* ol_txrx_register_hl_flow_control() -register hl netdev flow control callback
|
||||
* @soc_hdl: soc handle
|
||||
* @vdev_id: vdev_id
|
||||
* @flowControl: flow control callback
|
||||
*
|
||||
* Return: 0 for success or error code
|
||||
*/
|
||||
int ol_txrx_register_hl_flow_control(struct cdp_soc_t *soc_hdl,
|
||||
tx_pause_callback flowcontrol);
|
||||
int ol_txrx_set_vdev_os_queue_status(u8 vdev_id, enum netif_action_type action);
|
||||
int ol_txrx_set_vdev_tx_desc_limit(u8 vdev_id, u8 chan);
|
||||
|
||||
/**
|
||||
* ol_txrx_set_vdev_os_queue_status() - Set OS queue status for a vdev
|
||||
* @soc_hdl: soc handle
|
||||
* @vdev_id: vdev id for the vdev under consideration.
|
||||
* @action: action to be done on queue for vdev
|
||||
*
|
||||
* Return: 0 on success, -EINVAL on failure
|
||||
*/
|
||||
int ol_txrx_set_vdev_os_queue_status(struct cdp_soc_t *soc_hdl, u8 vdev_id,
|
||||
enum netif_action_type action);
|
||||
|
||||
/**
|
||||
* ol_txrx_set_vdev_tx_desc_limit() - Set TX descriptor limits for a vdev
|
||||
* @soc_hdl: soc handle
|
||||
* @vdev_id: vdev id for the vdev under consideration.
|
||||
* @chan: Channel on which the vdev has been started.
|
||||
*
|
||||
* Return: 0 on success, -EINVAL on failure
|
||||
*/
|
||||
int ol_txrx_set_vdev_tx_desc_limit(struct cdp_soc_t *soc_hdl, u8 vdev_id,
|
||||
u8 chan);
|
||||
#endif
|
||||
|
||||
/**
|
||||
|
@@ -1370,20 +1370,20 @@ ol_txrx_map_to_netif_reason_type(uint32_t reason)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* ol_txrx_vdev_pause() - pause vdev network queues
|
||||
* @vdev: vdev handle
|
||||
* @reason: network queue pause reason
|
||||
* @pause_type: type of pause
|
||||
* Return: none
|
||||
*/
|
||||
void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason,
|
||||
uint32_t pause_type)
|
||||
void ol_txrx_vdev_pause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
uint32_t reason, uint32_t pause_type)
|
||||
{
|
||||
struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
|
||||
struct ol_txrx_pdev_t *pdev = vdev->pdev;
|
||||
struct ol_txrx_vdev_t *vdev =
|
||||
(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
|
||||
struct ol_txrx_pdev_t *pdev;
|
||||
enum netif_reason_type netif_reason;
|
||||
|
||||
if (qdf_unlikely(!vdev)) {
|
||||
ol_txrx_err("vdev is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
pdev = vdev->pdev;
|
||||
if (qdf_unlikely((!pdev) || (!pdev->pause_cb))) {
|
||||
ol_txrx_err("invalid pdev");
|
||||
return;
|
||||
@@ -1398,18 +1398,27 @@ void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason,
|
||||
|
||||
/**
|
||||
* ol_txrx_vdev_unpause() - unpause vdev network queues
|
||||
* @soc_hdl: datapath soc handle
|
||||
* @vdev: vdev handle
|
||||
* @reason: network queue pause reason
|
||||
* @pause_type: type of pause
|
||||
*
|
||||
* Return: none
|
||||
*/
|
||||
void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason,
|
||||
uint32_t pause_type)
|
||||
void ol_txrx_vdev_unpause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
uint32_t reason, uint32_t pause_type)
|
||||
{
|
||||
struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
|
||||
struct ol_txrx_pdev_t *pdev = vdev->pdev;
|
||||
struct ol_txrx_vdev_t *vdev =
|
||||
(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
|
||||
struct ol_txrx_pdev_t *pdev;
|
||||
enum netif_reason_type netif_reason;
|
||||
|
||||
if (qdf_unlikely(!vdev)) {
|
||||
ol_txrx_err("vdev is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
pdev = vdev->pdev;
|
||||
if (qdf_unlikely((!pdev) || (!pdev->pause_cb))) {
|
||||
ol_txrx_err("invalid pdev");
|
||||
return;
|
||||
@@ -1432,10 +1441,12 @@ void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason,
|
||||
*/
|
||||
void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
|
||||
{
|
||||
struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
|
||||
struct ol_txrx_vdev_t *vdev = NULL, *tmp;
|
||||
|
||||
TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
|
||||
ol_txrx_vdev_pause((struct cdp_vdev *)vdev, reason, 0);
|
||||
ol_txrx_vdev_pause(ol_txrx_soc_t_to_cdp_soc_t(soc),
|
||||
vdev->vdev_id, reason, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1448,9 +1459,11 @@ void ol_txrx_pdev_pause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
|
||||
*/
|
||||
void ol_txrx_pdev_unpause(struct ol_txrx_pdev_t *pdev, uint32_t reason)
|
||||
{
|
||||
struct ol_txrx_soc_t *soc = cds_get_context(QDF_MODULE_ID_SOC);
|
||||
struct ol_txrx_vdev_t *vdev = NULL, *tmp;
|
||||
|
||||
TAILQ_FOREACH_SAFE(vdev, &pdev->vdev_list, vdev_list_elem, tmp) {
|
||||
ol_txrx_vdev_unpause((struct cdp_vdev *)vdev, reason, 0);
|
||||
ol_txrx_vdev_unpause(ol_txrx_soc_t_to_cdp_soc_t(soc),
|
||||
vdev->vdev_id, reason, 0);
|
||||
}
|
||||
}
|
||||
|
@@ -38,26 +38,16 @@
|
||||
#include <ol_cfg.h>
|
||||
#include <cdp_txrx_handle.h>
|
||||
|
||||
/**
|
||||
* ol_txrx_vdev_pause- Suspend all tx data for the specified virtual device
|
||||
*
|
||||
* @data_vdev - the virtual device being paused
|
||||
* @reason - the reason for which vdev queue is getting paused
|
||||
*
|
||||
* This function applies primarily to HL systems, but also
|
||||
* applies to LL systems that use per-vdev tx queues for MCC or
|
||||
* thermal throttling. As an example, this function could be
|
||||
* used when a single-channel physical device supports multiple
|
||||
* channels by jumping back and forth between the channels in a
|
||||
* time-shared manner. As the device is switched from channel A
|
||||
* to channel B, the virtual devices that operate on channel A
|
||||
* will be paused.
|
||||
*
|
||||
*/
|
||||
void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason,
|
||||
uint32_t pause_type)
|
||||
void ol_txrx_vdev_pause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
uint32_t reason, uint32_t pause_type)
|
||||
{
|
||||
struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
|
||||
struct ol_txrx_vdev_t *vdev =
|
||||
(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
|
||||
|
||||
if (qdf_unlikely(!vdev)) {
|
||||
ol_txrx_err("vdev is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
/* TO DO: log the queue pause */
|
||||
/* acquire the mutex lock, since we'll be modifying the queues */
|
||||
@@ -72,20 +62,17 @@ void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason,
|
||||
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
|
||||
}
|
||||
|
||||
/**
|
||||
* ol_txrx_vdev_unpause - Resume tx for the specified virtual device
|
||||
*
|
||||
* @data_vdev - the virtual device being unpaused
|
||||
* @reason - the reason for which vdev queue is getting unpaused
|
||||
*
|
||||
* This function applies primarily to HL systems, but also applies to
|
||||
* LL systems that use per-vdev tx queues for MCC or thermal throttling.
|
||||
*
|
||||
*/
|
||||
void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason,
|
||||
uint32_t pause_type)
|
||||
void ol_txrx_vdev_unpause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
uint32_t reason, uint32_t pause_type)
|
||||
{
|
||||
struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
|
||||
struct ol_txrx_vdev_t *vdev =
|
||||
(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
|
||||
|
||||
if (qdf_unlikely(!vdev)) {
|
||||
ol_txrx_err("vdev is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
/* TO DO: log the queue unpause */
|
||||
/* acquire the mutex lock, since we'll be modifying the queues */
|
||||
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
|
||||
@@ -107,22 +94,15 @@ void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason,
|
||||
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
|
||||
}
|
||||
|
||||
/**
|
||||
* ol_txrx_vdev_flush - Drop all tx data for the specified virtual device
|
||||
*
|
||||
* @data_vdev - the virtual device being flushed
|
||||
*
|
||||
* This function applies primarily to HL systems, but also applies to
|
||||
* LL systems that use per-vdev tx queues for MCC or thermal throttling.
|
||||
* This function would typically be used by the ctrl SW after it parks
|
||||
* a STA vdev and then resumes it, but to a new AP. In this case, though
|
||||
* the same vdev can be used, any old tx frames queued inside it would be
|
||||
* stale, and would need to be discarded.
|
||||
*
|
||||
*/
|
||||
void ol_txrx_vdev_flush(struct cdp_vdev *pvdev)
|
||||
void ol_txrx_vdev_flush(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
|
||||
{
|
||||
struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
|
||||
struct ol_txrx_vdev_t *vdev =
|
||||
(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
|
||||
|
||||
if (qdf_unlikely(!vdev)) {
|
||||
ol_txrx_err("vdev is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
qdf_spin_lock_bh(&vdev->ll_pause.mutex);
|
||||
qdf_timer_stop(&vdev->ll_pause.timer);
|
||||
@@ -431,16 +411,8 @@ void ol_tx_vdev_ll_pause_queue_send(void *context)
|
||||
ol_tx_vdev_ll_pause_queue_send_base(vdev);
|
||||
}
|
||||
|
||||
/**
|
||||
* ol_txrx_register_tx_flow_control() - register tx flow control callback
|
||||
* @vdev_id: vdev_id
|
||||
* @flowControl: flow control callback
|
||||
* @osif_fc_ctx: callback context
|
||||
* @flow_control_is_pause: is vdev paused by flow control
|
||||
*
|
||||
* Return: 0 for success or error code
|
||||
*/
|
||||
int ol_txrx_register_tx_flow_control(uint8_t vdev_id,
|
||||
int ol_txrx_register_tx_flow_control(struct cdp_soc_t *soc_hdl,
|
||||
uint8_t vdev_id,
|
||||
ol_txrx_tx_flow_control_fp flowControl,
|
||||
void *osif_fc_ctx,
|
||||
ol_txrx_tx_flow_control_is_pause_fp
|
||||
@@ -463,14 +435,8 @@ int ol_txrx_register_tx_flow_control(uint8_t vdev_id,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ol_txrx_de_register_tx_flow_control_cb() - deregister tx flow control
|
||||
* callback
|
||||
* @vdev_id: vdev_id
|
||||
*
|
||||
* Return: 0 for success or error code
|
||||
*/
|
||||
int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id)
|
||||
int ol_txrx_deregister_tx_flow_control_cb(struct cdp_soc_t *soc_hdl,
|
||||
uint8_t vdev_id)
|
||||
{
|
||||
struct ol_txrx_vdev_t *vdev =
|
||||
(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
|
||||
@@ -491,7 +457,8 @@ int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id)
|
||||
|
||||
/**
|
||||
* ol_txrx_get_tx_resource() - if tx resource less than low_watermark
|
||||
* @pdev: datapath pdev instance
|
||||
* soc_hdl: soc handle
|
||||
* @pdev_id: datapath pdev identifier
|
||||
* @peer_addr: peer mac address
|
||||
* @low_watermark: low watermark
|
||||
* @high_watermark_offset: high watermark offset value
|
||||
@@ -499,14 +466,23 @@ int ol_txrx_deregister_tx_flow_control_cb(uint8_t vdev_id)
|
||||
* Return: true/false
|
||||
*/
|
||||
bool
|
||||
ol_txrx_get_tx_resource(struct cdp_pdev *pdev,
|
||||
ol_txrx_get_tx_resource(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
|
||||
struct qdf_mac_addr peer_addr,
|
||||
unsigned int low_watermark,
|
||||
unsigned int high_watermark_offset)
|
||||
{
|
||||
ol_txrx_vdev_handle vdev = ol_txrx_get_vdev_by_peer_addr(pdev,
|
||||
peer_addr);
|
||||
struct ol_txrx_soc_t *soc = cdp_soc_t_to_ol_txrx_soc_t(soc_hdl);
|
||||
ol_txrx_pdev_handle pdev =
|
||||
ol_txrx_get_pdev_from_pdev_id(soc, pdev_id);
|
||||
ol_txrx_vdev_handle vdev;
|
||||
|
||||
if (qdf_unlikely(!pdev)) {
|
||||
ol_txrx_err("pdev is NULL");
|
||||
return true;
|
||||
}
|
||||
|
||||
vdev = ol_txrx_get_vdev_by_peer_addr(ol_txrx_pdev_t_to_cdp_pdev(pdev),
|
||||
peer_addr);
|
||||
if (!vdev) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
|
||||
"%s: Invalid peer address: " QDF_MAC_ADDR_STR,
|
||||
@@ -535,14 +511,8 @@ ol_txrx_get_tx_resource(struct cdp_pdev *pdev,
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* ol_txrx_ll_set_tx_pause_q_depth() - set pause queue depth
|
||||
* @vdev_id: vdev id
|
||||
* @pause_q_depth: pause queue depth
|
||||
*
|
||||
* Return: 0 for success or error code
|
||||
*/
|
||||
int ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth)
|
||||
int ol_txrx_ll_set_tx_pause_q_depth(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
int pause_q_depth)
|
||||
{
|
||||
struct ol_txrx_vdev_t *vdev =
|
||||
(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
|
||||
@@ -560,9 +530,16 @@ int ol_txrx_ll_set_tx_pause_q_depth(uint8_t vdev_id, int pause_q_depth)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ol_txrx_flow_control_cb(struct cdp_vdev *pvdev, bool tx_resume)
|
||||
void ol_txrx_flow_control_cb(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
bool tx_resume)
|
||||
{
|
||||
struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
|
||||
struct ol_txrx_vdev_t *vdev =
|
||||
(struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
|
||||
|
||||
if (qdf_unlikely(!vdev)) {
|
||||
ol_txrx_err("vdev is NULL");
|
||||
return;
|
||||
}
|
||||
|
||||
qdf_spin_lock_bh(&vdev->flow_control_lock);
|
||||
if ((vdev->osif_flow_control_cb) && (vdev->osif_fc_ctx))
|
||||
@@ -596,6 +573,7 @@ static bool ol_txrx_flow_control_is_pause(ol_txrx_vdev_handle vdev)
|
||||
void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
|
||||
{
|
||||
struct ol_txrx_vdev_t *vdev;
|
||||
struct cdp_soc_t *soc_hdl = ol_txrx_soc_t_to_cdp_soc_t(pdev->soc);
|
||||
|
||||
TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
|
||||
if ((qdf_atomic_read(&vdev->os_q_paused) &&
|
||||
@@ -605,8 +583,8 @@ void ol_tx_flow_ct_unpause_os_q(ol_txrx_pdev_handle pdev)
|
||||
if (pdev->tx_desc.num_free > vdev->tx_fl_hwm) {
|
||||
qdf_atomic_set(&vdev->os_q_paused, 0);
|
||||
qdf_spin_unlock(&pdev->tx_mutex);
|
||||
ol_txrx_flow_control_cb((struct cdp_vdev *)vdev,
|
||||
true);
|
||||
ol_txrx_flow_control_cb(soc_hdl,
|
||||
vdev->vdev_id, true);
|
||||
} else {
|
||||
qdf_spin_unlock(&pdev->tx_mutex);
|
||||
}
|
||||
|
@@ -1934,7 +1934,6 @@ static void hdd_set_peer_authorized_event(uint32_t vdev_id)
|
||||
static inline
|
||||
void hdd_set_unpause_queue(void *soc, struct hdd_adapter *adapter, void *peer)
|
||||
{
|
||||
void *vdev;
|
||||
unsigned long rc;
|
||||
/* wait for event from firmware to set the event */
|
||||
rc = wait_for_completion_timeout(
|
||||
@@ -1943,8 +1942,7 @@ void hdd_set_unpause_queue(void *soc, struct hdd_adapter *adapter, void *peer)
|
||||
if (!rc)
|
||||
hdd_debug("timeout waiting for sta_authorized_event");
|
||||
|
||||
vdev = (void *)cdp_peer_get_vdev(soc, peer);
|
||||
cdp_fc_vdev_unpause(soc, (struct cdp_vdev *)vdev,
|
||||
cdp_fc_vdev_unpause(soc, adapter->vdev_id,
|
||||
OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED,
|
||||
0);
|
||||
}
|
||||
|
@@ -2352,7 +2352,7 @@ bool hdd_dfs_indicate_radar(struct hdd_context *hdd_ctx)
|
||||
if (adapter->txrx_vdev)
|
||||
cdp_fc_vdev_flush(
|
||||
cds_get_context(QDF_MODULE_ID_SOC),
|
||||
adapter->txrx_vdev);
|
||||
adapter->vdev_id);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -60,6 +60,7 @@
|
||||
|
||||
#include "wlan_hdd_nud_tracking.h"
|
||||
#include "dp_txrx.h"
|
||||
#include <ol_defines.h>
|
||||
#include "cfg_ucfg_api.h"
|
||||
#include "target_type.h"
|
||||
#include "wlan_hdd_object_manager.h"
|
||||
@@ -358,7 +359,7 @@ void hdd_get_tx_resource(struct hdd_adapter *adapter,
|
||||
{
|
||||
if (false ==
|
||||
cdp_fc_get_tx_resource(cds_get_context(QDF_MODULE_ID_SOC),
|
||||
cds_get_context(QDF_MODULE_ID_TXRX),
|
||||
OL_TXRX_PDEV_ID,
|
||||
*mac_addr,
|
||||
adapter->tx_flow_low_watermark,
|
||||
adapter->tx_flow_hi_watermark_offset)) {
|
||||
|
@@ -1451,15 +1451,11 @@ static void wma_process_vdev_tx_pause_evt(void *soc,
|
||||
wmi_tx_pause_event_fixed_param *event,
|
||||
uint8_t vdev_id)
|
||||
{
|
||||
struct cdp_vdev *dp_handle =
|
||||
wlan_vdev_get_dp_handle(wma->interfaces[vdev_id].vdev);
|
||||
|
||||
/* PAUSE action, add bitmap */
|
||||
if (event->action == ACTION_PAUSE) {
|
||||
/* Exclude TDLS_OFFCHAN_CHOP from vdev based pauses */
|
||||
if (event->pause_type == PAUSE_TYPE_CHOP_TDLS_OFFCHAN) {
|
||||
cdp_fc_vdev_pause(soc,
|
||||
dp_handle,
|
||||
cdp_fc_vdev_pause(soc, vdev_id,
|
||||
OL_TXQ_PAUSE_REASON_FW,
|
||||
event->pause_type);
|
||||
} else {
|
||||
@@ -1468,8 +1464,7 @@ static void wma_process_vdev_tx_pause_evt(void *soc,
|
||||
* necessary to pause a paused queue again.
|
||||
*/
|
||||
if (!wma_vdev_get_pause_bitmap(vdev_id))
|
||||
cdp_fc_vdev_pause(soc,
|
||||
dp_handle,
|
||||
cdp_fc_vdev_pause(soc, vdev_id,
|
||||
OL_TXQ_PAUSE_REASON_FW,
|
||||
event->pause_type);
|
||||
|
||||
@@ -1481,8 +1476,7 @@ static void wma_process_vdev_tx_pause_evt(void *soc,
|
||||
else if (event->action == ACTION_UNPAUSE) {
|
||||
/* Exclude TDLS_OFFCHAN_CHOP from vdev based pauses */
|
||||
if (event->pause_type == PAUSE_TYPE_CHOP_TDLS_OFFCHAN) {
|
||||
cdp_fc_vdev_unpause(soc,
|
||||
dp_handle,
|
||||
cdp_fc_vdev_unpause(soc, vdev_id,
|
||||
OL_TXQ_PAUSE_REASON_FW,
|
||||
event->pause_type);
|
||||
} else {
|
||||
@@ -1497,7 +1491,7 @@ static void wma_process_vdev_tx_pause_evt(void *soc,
|
||||
/* PAUSE BIT MAP is cleared
|
||||
* UNPAUSE VDEV
|
||||
*/
|
||||
cdp_fc_vdev_unpause(soc, dp_handle,
|
||||
cdp_fc_vdev_unpause(soc, vdev_id,
|
||||
OL_TXQ_PAUSE_REASON_FW,
|
||||
event->pause_type);
|
||||
}
|
||||
@@ -3074,7 +3068,6 @@ void wma_tx_abort(uint8_t vdev_id)
|
||||
uint32_t peer_tid_bitmap = PEER_ALL_TID_BITMASK;
|
||||
struct wma_txrx_node *iface;
|
||||
uint8_t *bssid;
|
||||
struct cdp_vdev *handle;
|
||||
struct peer_flush_params param = {0};
|
||||
|
||||
wma = cds_get_context(QDF_MODULE_ID_WMA);
|
||||
@@ -3086,11 +3079,7 @@ void wma_tx_abort(uint8_t vdev_id)
|
||||
WMA_LOGE("%s: iface->vdev is NULL", __func__);
|
||||
return;
|
||||
}
|
||||
handle = wlan_vdev_get_dp_handle(iface->vdev);
|
||||
if (!handle) {
|
||||
WMA_LOGE("%s: Failed to get dp handle", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
bssid = wma_get_vdev_bssid(iface->vdev);
|
||||
if (!bssid) {
|
||||
WMA_LOGE("%s: Failed to get bssid for vdev_%d",
|
||||
@@ -3100,10 +3089,8 @@ void wma_tx_abort(uint8_t vdev_id)
|
||||
|
||||
WMA_LOGD("%s: vdevid %d bssid %pM", __func__, vdev_id, bssid);
|
||||
wma_vdev_set_pause_bit(vdev_id, PAUSE_TYPE_HOST);
|
||||
cdp_fc_vdev_pause(cds_get_context(QDF_MODULE_ID_SOC),
|
||||
handle,
|
||||
OL_TXQ_PAUSE_REASON_TX_ABORT,
|
||||
0);
|
||||
cdp_fc_vdev_pause(cds_get_context(QDF_MODULE_ID_SOC), vdev_id,
|
||||
OL_TXQ_PAUSE_REASON_TX_ABORT, 0);
|
||||
|
||||
/* Flush all TIDs except MGMT TID for this peer in Target */
|
||||
peer_tid_bitmap &= ~(0x1 << WMI_MGMT_TID);
|
||||
|
@@ -2150,7 +2150,6 @@ void wma_send_del_bss_response(tp_wma_handle wma, struct del_bss_resp *resp)
|
||||
struct wma_txrx_node *iface;
|
||||
struct beacon_info *bcn;
|
||||
uint8_t vdev_id;
|
||||
struct cdp_vdev *handle;
|
||||
void *soc = cds_get_context(QDF_MODULE_ID_SOC);
|
||||
|
||||
if (!resp) {
|
||||
@@ -2168,19 +2167,11 @@ void wma_send_del_bss_response(tp_wma_handle wma, struct del_bss_resp *resp)
|
||||
qdf_mem_free(resp);
|
||||
return;
|
||||
}
|
||||
handle = wlan_vdev_get_dp_handle(iface->vdev);
|
||||
if (!handle) {
|
||||
WMA_LOGE("%s: Failed to get dp handle for vdev id %d",
|
||||
__func__, vdev_id);
|
||||
if (resp)
|
||||
qdf_mem_free(resp);
|
||||
return;
|
||||
}
|
||||
|
||||
cdp_fc_vdev_flush(soc, handle);
|
||||
cdp_fc_vdev_flush(soc, vdev_id);
|
||||
WMA_LOGD("%s, vdev_id: %d, un-pausing tx_ll_queue for VDEV_STOP rsp",
|
||||
__func__, vdev_id);
|
||||
cdp_fc_vdev_unpause(soc, handle, OL_TXQ_PAUSE_REASON_VDEV_STOP, 0);
|
||||
cdp_fc_vdev_unpause(soc, vdev_id, OL_TXQ_PAUSE_REASON_VDEV_STOP, 0);
|
||||
wma_vdev_clear_pause_bit(vdev_id, PAUSE_TYPE_HOST);
|
||||
qdf_atomic_set(&iface->bss_status, WMA_BSS_STATUS_STOPPED);
|
||||
WMA_LOGD("%s: (type %d subtype %d) BSS is stopped",
|
||||
@@ -2869,7 +2860,6 @@ QDF_STATUS wma_vdev_pre_start(uint8_t vdev_id, bool restart)
|
||||
struct vdev_mlme_obj *mlme_obj;
|
||||
struct wlan_objmgr_vdev *vdev = intr[vdev_id].vdev;
|
||||
struct wlan_channel *des_chan;
|
||||
void *dp_handle;
|
||||
|
||||
mlme_obj = wlan_vdev_mlme_get_cmpt_obj(vdev);
|
||||
if (!mlme_obj) {
|
||||
@@ -2935,10 +2925,8 @@ QDF_STATUS wma_vdev_pre_start(uint8_t vdev_id, bool restart)
|
||||
WMA_LOGD("%s, vdev_id: %d, unpausing tx_ll_queue at VDEV_START",
|
||||
__func__, vdev_id);
|
||||
|
||||
dp_handle =
|
||||
wlan_vdev_get_dp_handle(wma->interfaces[vdev_id].vdev);
|
||||
cdp_fc_vdev_unpause(cds_get_context(QDF_MODULE_ID_SOC),
|
||||
dp_handle, 0xffffffff, 0);
|
||||
vdev_id, 0xffffffff, 0);
|
||||
wma_vdev_update_pause_bitmap(vdev_id, 0);
|
||||
}
|
||||
|
||||
@@ -3708,15 +3696,10 @@ void wma_add_bss_lfr3(tp_wma_handle wma, struct bss_params *add_bss)
|
||||
static
|
||||
QDF_STATUS wma_set_cdp_vdev_pause_reason(tp_wma_handle wma, uint8_t vdev_id)
|
||||
{
|
||||
struct cdp_vdev *vdev;
|
||||
void *soc = cds_get_context(QDF_MODULE_ID_SOC);
|
||||
|
||||
vdev = wma_find_vdev_by_id(wma, vdev_id);
|
||||
if (!vdev) {
|
||||
WMA_LOGE("%s Invalid txrx vdev", __func__);
|
||||
return QDF_STATUS_E_INVAL;
|
||||
}
|
||||
cdp_fc_vdev_pause(soc, vdev, OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED, 0);
|
||||
cdp_fc_vdev_pause(soc, vdev_id,
|
||||
OL_TXQ_PAUSE_REASON_PEER_UNAUTHORIZED, 0);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
@@ -4959,7 +4942,6 @@ void wma_delete_bss_ho_fail(tp_wma_handle wma, uint8_t vdev_id)
|
||||
{
|
||||
struct cdp_pdev *pdev;
|
||||
QDF_STATUS status = QDF_STATUS_SUCCESS;
|
||||
struct cdp_vdev *txrx_vdev = NULL, *handle;
|
||||
struct wma_txrx_node *iface;
|
||||
void *soc = cds_get_context(QDF_MODULE_ID_SOC);
|
||||
struct vdev_stop_response resp_event;
|
||||
@@ -4974,13 +4956,7 @@ void wma_delete_bss_ho_fail(tp_wma_handle wma, uint8_t vdev_id)
|
||||
}
|
||||
|
||||
iface = &wma->interfaces[vdev_id];
|
||||
if (!iface->vdev) {
|
||||
WMA_LOGE("%s: vdev is NULL for vdev_%d", __func__, vdev_id);
|
||||
goto fail_del_bss_ho_fail;
|
||||
}
|
||||
|
||||
handle = wlan_vdev_get_dp_handle(iface->vdev);
|
||||
if (!iface || !handle) {
|
||||
if (!iface) {
|
||||
WMA_LOGE("%s vdev id %d is already deleted",
|
||||
__func__, vdev_id);
|
||||
goto fail_del_bss_ho_fail;
|
||||
@@ -4993,13 +4969,6 @@ void wma_delete_bss_ho_fail(tp_wma_handle wma, uint8_t vdev_id)
|
||||
}
|
||||
qdf_mem_zero(bssid, QDF_MAC_ADDR_SIZE);
|
||||
|
||||
txrx_vdev = wma_find_vdev_by_id(wma, vdev_id);
|
||||
if (!txrx_vdev) {
|
||||
WMA_LOGE("%s:Invalid vdev handle", __func__);
|
||||
status = QDF_STATUS_E_FAILURE;
|
||||
goto fail_del_bss_ho_fail;
|
||||
}
|
||||
|
||||
if (iface->psnr_req) {
|
||||
qdf_mem_free(iface->psnr_req);
|
||||
iface->psnr_req = NULL;
|
||||
@@ -5022,12 +4991,12 @@ void wma_delete_bss_ho_fail(tp_wma_handle wma, uint8_t vdev_id)
|
||||
|
||||
WMA_LOGD("%s, vdev_id: %d, pausing tx_ll_queue for VDEV_STOP (del_bss)",
|
||||
__func__, vdev_id);
|
||||
cdp_fc_vdev_pause(soc, handle, OL_TXQ_PAUSE_REASON_VDEV_STOP, 0);
|
||||
cdp_fc_vdev_pause(soc, vdev_id, OL_TXQ_PAUSE_REASON_VDEV_STOP, 0);
|
||||
wma_vdev_set_pause_bit(vdev_id, PAUSE_TYPE_HOST);
|
||||
cdp_fc_vdev_flush(soc, handle);
|
||||
cdp_fc_vdev_flush(soc, vdev_id);
|
||||
WMA_LOGD("%s, vdev_id: %d, un-pausing tx_ll_queue for VDEV_STOP rsp",
|
||||
__func__, vdev_id);
|
||||
cdp_fc_vdev_unpause(soc, handle, OL_TXQ_PAUSE_REASON_VDEV_STOP, 0);
|
||||
cdp_fc_vdev_unpause(soc, vdev_id, OL_TXQ_PAUSE_REASON_VDEV_STOP, 0);
|
||||
wma_vdev_clear_pause_bit(vdev_id, PAUSE_TYPE_HOST);
|
||||
qdf_atomic_set(&iface->bss_status, WMA_BSS_STATUS_STOPPED);
|
||||
WMA_LOGD("%s: (type %d subtype %d) BSS is stopped",
|
||||
@@ -5230,8 +5199,7 @@ void wma_delete_bss(tp_wma_handle wma, uint8_t vdev_id)
|
||||
WMA_LOGD("%s, vdev_id: %d, pausing tx_ll_queue for VDEV_STOP (del_bss)",
|
||||
__func__, vdev_id);
|
||||
wma_vdev_set_pause_bit(vdev_id, PAUSE_TYPE_HOST);
|
||||
cdp_fc_vdev_pause(soc,
|
||||
wlan_vdev_get_dp_handle(iface->vdev),
|
||||
cdp_fc_vdev_pause(soc, vdev_id,
|
||||
OL_TXQ_PAUSE_REASON_VDEV_STOP, 0);
|
||||
|
||||
if (wma_send_vdev_stop_to_fw(wma, vdev_id)) {
|
||||
@@ -5444,8 +5412,7 @@ QDF_STATUS wma_send_vdev_stop(uint8_t vdev_id)
|
||||
|
||||
WMA_LOGD("%s, vdev_id: %d, pausing tx_ll_queue for VDEV_STOP",
|
||||
__func__, vdev_id);
|
||||
cdp_fc_vdev_pause
|
||||
(soc, wlan_vdev_get_dp_handle(wma->interfaces[vdev_id].vdev),
|
||||
cdp_fc_vdev_pause(soc, vdev_id,
|
||||
OL_TXQ_PAUSE_REASON_VDEV_STOP, 0);
|
||||
|
||||
status = mlme_set_vdev_stop_type(
|
||||
|
@@ -4363,10 +4363,8 @@ QDF_STATUS wma_stop(void)
|
||||
if (!vdev)
|
||||
continue;
|
||||
if (wlan_vdev_get_dp_handle(vdev) && wma_is_vdev_up(i)) {
|
||||
cdp_fc_vdev_flush
|
||||
(cds_get_context(QDF_MODULE_ID_SOC),
|
||||
wlan_vdev_get_dp_handle
|
||||
(wma_handle->interfaces[i].vdev));
|
||||
cdp_fc_vdev_flush(cds_get_context(QDF_MODULE_ID_SOC),
|
||||
i);
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user