qcacmn: Restrict lock hold time in tx path

Restrict lock hold time in transmit path to only ring access time.
Also store lmac_id in vdev to avoid multi indirection.
Arrange fields in data structure for better alignment in data path access.

Change-Id: I1d32880ed88be486171be46281ec180d2a4906bf
This commit is contained in:
Radha Krishna Simha Jiguru
2020-03-23 11:27:59 +05:30
committed by nshrivas
parent 2b0ff06748
commit d78c96c67c
4 changed files with 129 additions and 141 deletions

View File

@@ -1278,7 +1278,6 @@ dp_srng_configure_interrupt_thresholds(struct dp_soc *soc,
}
ring_params->low_threshold =
soc->wlan_srng_cfg[ring_type].low_threshold;
if (ring_params->low_threshold)
ring_params->flags |= HAL_SRNG_LOW_THRES_INTR_ENABLE;
}
@@ -5196,6 +5195,7 @@ static QDF_STATUS dp_vdev_attach_wifi3(struct cdp_soc_t *cdp_soc,
#ifdef notyet
vdev->filters_num = 0;
#endif
vdev->lmac_id = pdev->lmac_id;
qdf_mem_copy(
&vdev->mac_addr.raw[0], vdev_mac_addr, QDF_MAC_ADDR_SIZE);
@@ -9538,6 +9538,7 @@ dp_soc_handle_pdev_mode_change
TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
HTT_TX_TCL_METADATA_PDEV_ID_SET(vdev->htt_tcl_metadata,
hw_pdev_id);
vdev->lmac_id = pdev->lmac_id;
}
qdf_spin_unlock_bh(&pdev->vdev_list_lock);

View File

@@ -1109,7 +1109,8 @@ static void dp_tx_raw_prepare_unset(struct dp_soc *soc,
* Gets the next free TCL HW DMA descriptor and sets up required parameters
* from software Tx descriptor
*
* Return:
* Return: QDF_STATUS_SUCCESS: success
* QDF_STATUS_E_RESOURCES: Error return
*/
static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
struct dp_tx_desc_s *tx_desc, uint8_t tid,
@@ -1119,9 +1120,15 @@ static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
{
uint8_t type;
uint16_t length;
void *hal_tx_desc, *hal_tx_desc_cached;
void *hal_tx_desc;
uint32_t *hal_tx_desc_cached;
qdf_dma_addr_t dma_addr;
uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
/*
* Setting it initialization statically here to avoid
* a memset call jump with qdf_mem_set call
*/
uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
enum cdp_sec_type sec_type = ((tx_exc_metadata &&
tx_exc_metadata->sec_type != CDP_INVALID_SEC_TYPE) ?
@@ -1130,9 +1137,14 @@ static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
/* Return Buffer Manager ID */
uint8_t bm_id = ring_id;
hal_ring_handle_t hal_ring_hdl = soc->tcl_data_ring[ring_id].hal_srng;
QDF_STATUS status = QDF_STATUS_E_RESOURCES;
if (!dp_tx_is_desc_id_valid(soc, tx_desc->id)) {
dp_err_rl("Invalid tx desc id:%d", tx_desc->id);
return QDF_STATUS_E_RESOURCES;
}
hal_tx_desc_cached = (void *) cached_desc;
qdf_mem_zero(hal_tx_desc_cached, HAL_TX_DESC_LEN_BYTES);
if (tx_desc->flags & DP_TX_DESC_FLAG_FRAG) {
length = HAL_TX_EXT_DESC_WITH_META_DATA;
@@ -1146,40 +1158,33 @@ static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
qdf_assert_always(dma_addr);
hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
hal_tx_desc_set_buf_addr(hal_tx_desc_cached,
hal_tx_desc_set_buf_addr(soc->hal_soc, hal_tx_desc_cached,
dma_addr, bm_id, tx_desc->id,
type, soc->hal_soc);
if (!dp_tx_is_desc_id_valid(soc, tx_desc->id))
return QDF_STATUS_E_RESOURCES;
hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
type);
hal_tx_desc_set_lmac_id(soc->hal_soc, hal_tx_desc_cached,
vdev->pdev->lmac_id);
vdev->lmac_id);
hal_tx_desc_set_search_type(soc->hal_soc, hal_tx_desc_cached,
vdev->search_type);
hal_tx_desc_set_search_index(soc->hal_soc, hal_tx_desc_cached,
vdev->bss_ast_idx);
hal_tx_desc_set_dscp_tid_table_id(soc->hal_soc, hal_tx_desc_cached,
vdev->dscp_tid_map_id);
hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
sec_type_map[sec_type]);
hal_tx_desc_set_cache_set_num(soc->hal_soc, hal_tx_desc_cached,
(vdev->bss_ast_hash & 0xF));
dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
length, type, (uint64_t)dma_addr,
tx_desc->pkt_offset, tx_desc->id);
hal_tx_desc_set_fw_metadata(hal_tx_desc_cached, fw_metadata);
hal_tx_desc_set_buf_length(hal_tx_desc_cached, length);
hal_tx_desc_set_buf_offset(hal_tx_desc_cached, tx_desc->pkt_offset);
hal_tx_desc_set_encap_type(hal_tx_desc_cached, tx_desc->tx_encap_type);
hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
vdev->hal_desc_addr_search_flags);
if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW)
hal_tx_desc_set_to_fw(hal_tx_desc_cached, 1);
hal_tx_desc_set_addr_search_flags(hal_tx_desc_cached,
vdev->hal_desc_addr_search_flags);
/* verify checksum offload configuration*/
if ((wlan_cfg_get_checksum_offload(soc->wlan_cfg_ctx)) &&
((qdf_nbuf_get_tx_cksum(tx_desc->nbuf) == QDF_NBUF_TX_CKSUM_TCP_UDP)
@@ -1194,25 +1199,47 @@ static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
if (tx_desc->flags & DP_TX_DESC_FLAG_MESH)
hal_tx_desc_set_mesh_en(soc->hal_soc, hal_tx_desc_cached, 1);
tx_desc->timestamp = qdf_ktime_to_ms(qdf_ktime_get());
/* Sync cached descriptor with HW */
hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
if (!hal_tx_desc) {
dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
length, type, (uint64_t)dma_addr,
tx_desc->pkt_offset, tx_desc->id);
hal_ring_hdl = soc->tcl_data_ring[ring_id].hal_srng;
if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_ring_hdl))) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"%s %d : HAL RING Access Failed -- %pK",
__func__, __LINE__, hal_ring_hdl);
DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
return status;
}
/* Sync cached descriptor with HW */
hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
if (qdf_unlikely(!hal_tx_desc)) {
dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
return QDF_STATUS_E_RESOURCES;
goto ring_access_fail;
}
tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
DP_STATS_INC_PKT(vdev, tx_i.processed, 1, length);
status = QDF_STATUS_SUCCESS;
return QDF_STATUS_SUCCESS;
ring_access_fail:
if (hif_pm_runtime_get(soc->hif_handle) == 0) {
hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
hif_pm_runtime_put(soc->hif_handle);
} else {
hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
}
return status;
}
@@ -1590,8 +1617,6 @@ dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
struct dp_tx_desc_s *tx_desc;
QDF_STATUS status;
struct dp_tx_queue *tx_q = &(msdu_info->tx_queue);
hal_ring_handle_t hal_ring_hdl =
soc->tcl_data_ring[tx_q->ring_id].hal_srng;
uint16_t htt_tcl_metadata = 0;
uint8_t tid = msdu_info->tid;
struct cdp_tid_tx_stats *tid_stats = NULL;
@@ -1619,20 +1644,6 @@ dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
dp_tx_update_tdls_flags(tx_desc);
if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_ring_hdl))) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"%s %d : HAL RING Access Failed -- %pK",
__func__, __LINE__, hal_ring_hdl);
dp_tx_get_tid(vdev, nbuf, msdu_info);
tid_stats = &pdev->stats.tid_stats.
tid_tx_stats[tx_q->ring_id][tid];
tid_stats->swdrop_cnt[TX_HAL_RING_ACCESS_ERR]++;
DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE);
goto fail_return;
}
if (qdf_unlikely(peer_id == DP_INVALID_PEER)) {
htt_tcl_metadata = vdev->htt_tcl_metadata;
HTT_TX_TCL_METADATA_HOST_INSPECTED_SET(htt_tcl_metadata, 1);
@@ -1663,21 +1674,10 @@ dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
tid_stats->swdrop_cnt[TX_HW_ENQUEUE]++;
dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
qdf_nbuf_unmap(vdev->osdev, nbuf, QDF_DMA_TO_DEVICE);
goto fail_return;
return nbuf;
}
nbuf = NULL;
fail_return:
if (hif_pm_runtime_get(soc->hif_handle) == 0) {
hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
hif_pm_runtime_put(soc->hif_handle);
} else {
hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
hal_srng_inc_flush_cnt(hal_ring_hdl);
}
return nbuf;
}
@@ -1707,22 +1707,7 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
QDF_STATUS status;
uint16_t htt_tcl_metadata = 0;
struct dp_tx_queue *tx_q = &msdu_info->tx_queue;
hal_ring_handle_t hal_ring_hdl =
soc->tcl_data_ring[tx_q->ring_id].hal_srng;
struct cdp_tid_tx_stats *tid_stats = NULL;
if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_ring_hdl))) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
"%s %d : HAL RING Access Failed -- %pK",
__func__, __LINE__, hal_ring_hdl);
dp_tx_get_tid(vdev, nbuf, msdu_info);
tid_stats = &pdev->stats.tid_stats.
tid_tx_stats[tx_q->ring_id][msdu_info->tid];
tid_stats->swdrop_cnt[TX_HAL_RING_ACCESS_ERR]++;
DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
return nbuf;
}
if (qdf_unlikely(soc->cce_disable)) {
is_cce_classified = dp_cce_classify(vdev, nbuf);
if (is_cce_classified) {
@@ -1845,15 +1830,6 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
nbuf = NULL;
done:
if (hif_pm_runtime_get(soc->hif_handle) == 0) {
hal_srng_access_end(soc->hal_soc, hal_ring_hdl);
hif_pm_runtime_put(soc->hif_handle);
} else {
hal_srng_access_end_reap(soc->hal_soc, hal_ring_hdl);
hal_srng_set_event(hal_ring_hdl, HAL_SRNG_FLUSH_EVENT);
hal_srng_inc_flush_cnt(hal_ring_hdl);
}
return nbuf;
}

View File

@@ -370,20 +370,20 @@ struct dp_tx_ext_desc_pool_s {
struct dp_tx_desc_s {
struct dp_tx_desc_s *next;
qdf_nbuf_t nbuf;
struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
uint32_t id;
struct dp_vdev *vdev;
struct dp_pdev *pdev;
uint8_t pool_id;
struct dp_tx_ext_desc_elem_s *msdu_ext_desc;
uint16_t flags;
struct hal_tx_desc_comp_s comp;
uint16_t tx_encap_type;
uint8_t frm_type;
uint8_t pkt_offset;
uint8_t pool_id;
void *me_buffer;
void *tso_desc;
void *tso_num_desc;
uint64_t timestamp;
struct hal_tx_desc_comp_s comp;
};
/**
@@ -1820,14 +1820,75 @@ struct dp_peer;
struct dp_vdev {
/* OS device abstraction */
qdf_device_t osdev;
/* physical device that is the parent of this virtual device */
struct dp_pdev *pdev;
/* VDEV operating mode */
enum wlan_op_mode opmode;
/* VDEV subtype */
enum wlan_op_subtype subtype;
/* Tx encapsulation type for this VAP */
enum htt_cmn_pkt_type tx_encap_type;
/* Rx Decapsulation type for this VAP */
enum htt_cmn_pkt_type rx_decap_type;
/* BSS peer */
struct dp_peer *vap_bss_peer;
/* WDS enabled */
bool wds_enabled;
/* MEC enabled */
bool mec_enabled;
/* WDS Aging timer period */
uint32_t wds_aging_timer_val;
/* NAWDS enabled */
bool nawds_enabled;
/* Multicast enhancement enabled */
uint8_t mcast_enhancement_en;
/* vdev_id - ID used to specify a particular vdev to the target */
uint8_t vdev_id;
/* Default HTT meta data for this VDEV */
/* TBD: check alignment constraints */
uint16_t htt_tcl_metadata;
/* Mesh mode vdev */
uint32_t mesh_vdev;
/* Mesh mode rx filter setting */
uint32_t mesh_rx_filter;
/* DSCP-TID mapping table ID */
uint8_t dscp_tid_map_id;
/* Address search type to be set in TX descriptor */
uint8_t search_type;
/* AST hash value for BSS peer in HW valid for STA VAP*/
uint16_t bss_ast_hash;
/* vdev lmac_id */
int lmac_id;
bool multipass_en;
/* Address search flags to be configured in HAL descriptor */
uint8_t hal_desc_addr_search_flags;
/* Handle to the OS shim SW's virtual device */
ol_osif_vdev_handle osif_vdev;
/* vdev_id - ID used to specify a particular vdev to the target */
uint8_t vdev_id;
/* Handle to the UMAC handle */
struct cdp_ctrl_objmgr_vdev *ctrl_vdev;
/* MAC address */
union dp_align_mac_addr mac_addr;
@@ -1902,49 +1963,6 @@ struct dp_vdev {
bool tdls_link_connected;
bool is_tdls_frame;
/* VDEV operating mode */
enum wlan_op_mode opmode;
/* VDEV subtype */
enum wlan_op_subtype subtype;
/* Tx encapsulation type for this VAP */
enum htt_cmn_pkt_type tx_encap_type;
/* Rx Decapsulation type for this VAP */
enum htt_cmn_pkt_type rx_decap_type;
/* BSS peer */
struct dp_peer *vap_bss_peer;
/* WDS enabled */
bool wds_enabled;
/* MEC enabled */
bool mec_enabled;
/* WDS Aging timer period */
uint32_t wds_aging_timer_val;
/* NAWDS enabled */
bool nawds_enabled;
/* Default HTT meta data for this VDEV */
/* TBD: check alignment constraints */
uint16_t htt_tcl_metadata;
/* Mesh mode vdev */
uint32_t mesh_vdev;
/* Mesh mode rx filter setting */
uint32_t mesh_rx_filter;
/* DSCP-TID mapping table ID */
uint8_t dscp_tid_map_id;
/* Multicast enhancement enabled */
uint8_t mcast_enhancement_en;
/* per vdev rx nbuf queue */
qdf_nbuf_queue_t rxq;
@@ -1960,8 +1978,6 @@ struct dp_vdev {
/* Is isolation mode enabled */
bool isolation_vdev;
/* Address search flags to be configured in HAL descriptor */
uint8_t hal_desc_addr_search_flags;
#ifdef QCA_LL_TX_FLOW_CONTROL_V2
struct dp_tx_desc_pool_s *pool;
#endif
@@ -1973,11 +1989,6 @@ struct dp_vdev {
/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
bool raw_mode_war;
/* Address search type to be set in TX descriptor */
uint8_t search_type;
/* AST hash value for BSS peer in HW valid for STA VAP*/
uint16_t bss_ast_hash;
/* AST hash index for BSS peer in HW valid for STA VAP*/
uint16_t bss_ast_idx;
@@ -1999,7 +2010,6 @@ struct dp_vdev {
/* Self Peer in STA mode */
struct dp_peer *vap_self_peer;
bool multipass_en;
#ifdef QCA_MULTIPASS_SUPPORT
uint16_t *iv_vlan_map;

View File

@@ -1055,9 +1055,10 @@ static inline void hal_tx_comp_get_status(void *desc, void *ts,
* Return: void
*/
static inline
void hal_tx_desc_set_buf_addr(void *desc, dma_addr_t paddr,
void hal_tx_desc_set_buf_addr(hal_soc_handle_t hal_soc_hdl, void *desc,
dma_addr_t paddr,
uint8_t pool_id, uint32_t desc_id,
uint8_t type, hal_soc_handle_t hal_soc_hdl)
uint8_t type)
{
struct hal_soc *hal_soc = (struct hal_soc *)hal_soc_hdl;