qcacmn: Update tx ingress stats with xmit field

Update tx ingress stats with xmit field

Change-Id: I86ad9c59f49213e2968751bfc488eb2bdd95baa5
CRs-Fixed: 3561679
This commit is contained in:
Aman Mehta
2023-07-22 00:33:04 +05:30
committed by Rahul Choudhary
szülő cfbdba9b7e
commit 3768768424
5 fájl változott, egészen pontosan 172 új sor hozzáadva és 89 régi sor törölve

Fájl megtekintése

@@ -256,6 +256,7 @@ void dp_tx_process_htt_completion_be(struct dp_soc *soc,
struct htt_soc *htt_handle;
uint8_t vdev_id;
uint16_t peer_id;
uint8_t xmit_type;
tx_status = HTT_TX_WBM_COMPLETION_V3_TX_STATUS_GET(htt_desc[0]);
htt_handle = (struct htt_soc *)soc->htt_handle;
@@ -413,7 +414,10 @@ void dp_tx_process_htt_completion_be(struct dp_soc *soc,
}
case HTT_TX_FW2WBM_TX_STATUS_VDEVID_MISMATCH:
{
DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
xmit_type = qdf_nbuf_get_vdev_xmit_type(tx_desc->nbuf);
DP_STATS_INC(vdev,
tx_i[xmit_type].dropped.fail_per_pkt_vdev_id_check,
1);
goto release_tx_desc;
}
default:
@@ -618,6 +622,8 @@ dp_tx_mlo_mcast_multipass_send(struct dp_vdev_be *be_vdev,
qdf_mem_zero(&msdu_info, sizeof(msdu_info));
dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
msdu_info.gsn = be_vdev->mlo_dev_ctxt->seq_num;
msdu_info.xmit_type = qdf_nbuf_get_vdev_xmit_type(ptr->nbuf);
if (ptr->vlan_id == MULTIPASS_WITH_VLAN_ID) {
msdu_info.tid = HTT_TX_EXT_TID_INVALID;
@@ -791,9 +797,12 @@ dp_tx_mlo_mcast_pkt_send(struct dp_vdev_be *be_vdev,
qdf_mem_zero(&msdu_info, sizeof(msdu_info));
dp_tx_get_queue(ptnr_vdev, nbuf_clone, &msdu_info.tx_queue);
msdu_info.gsn = be_vdev->mlo_dev_ctxt->seq_num;
DP_STATS_INC(ptnr_vdev, tx_i.mlo_mcast.send_pkt_count, 1);
msdu_info.gsn = be_vdev->mlo_dev_ctxt->seq_num;
msdu_info.xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf_clone);
DP_STATS_INC(ptnr_vdev,
tx_i[msdu_info.xmit_type].mlo_mcast.send_pkt_count, 1);
nbuf_clone = dp_tx_send_msdu_single(
ptnr_vdev,
nbuf_clone,
@@ -801,7 +810,9 @@ dp_tx_mlo_mcast_pkt_send(struct dp_vdev_be *be_vdev,
DP_MLO_MCAST_REINJECT_PEER_ID,
NULL);
if (qdf_unlikely(nbuf_clone)) {
DP_STATS_INC(ptnr_vdev, tx_i.mlo_mcast.fail_pkt_count, 1);
DP_STATS_INC(ptnr_vdev,
tx_i[msdu_info.xmit_type].mlo_mcast.fail_pkt_count,
1);
dp_info("pkt send failed");
qdf_nbuf_free(nbuf_clone);
return;
@@ -1039,7 +1050,7 @@ void dp_ppeds_stats(struct dp_soc *soc, uint16_t peer_id)
DP_MOD_ID_TX_COMP);
if (txrx_peer) {
vdev = txrx_peer->vdev;
DP_STATS_INC(vdev, tx_i.dropped.fw2wbm_tx_drop, 1);
DP_STATS_INC(vdev, tx_i[DP_XMIT_LINK].dropped.fw2wbm_tx_drop, 1);
dp_txrx_peer_unref_delete(txrx_ref_handle, DP_MOD_ID_TX_COMP);
}
}
@@ -1350,7 +1361,9 @@ dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
DP_STATS_INC(vdev,
tx_i[msdu_info->xmit_type].dropped.enqueue_fail,
1);
dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
return status;
}
@@ -1359,7 +1372,9 @@ dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
if (qdf_unlikely(!hal_tx_desc)) {
dp_verbose_debug("TCL ring full ring_id:%d", ring_id);
DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
DP_STATS_INC(vdev,
tx_i[msdu_info->xmit_type].dropped.enqueue_fail,
1);
dp_sawf_tx_enqueue_fail_peer_stats(soc, tx_desc);
goto ring_access_fail;
}
@@ -1373,7 +1388,8 @@ dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
msdu_info, ring_id);
DP_STATS_INC_PKT(vdev, tx_i.processed, 1, dp_tx_get_pkt_len(tx_desc));
DP_STATS_INC_PKT(vdev, tx_i[msdu_info->xmit_type].processed, 1,
dp_tx_get_pkt_len(tx_desc));
DP_STATS_INC(soc, tx.tcl_enq[ring_id], 1);
dp_tx_update_stats(soc, tx_desc, ring_id);
status = QDF_STATUS_SUCCESS;
@@ -1829,6 +1845,7 @@ qdf_nbuf_t dp_tx_fast_send_be(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
uint32_t *hal_tx_desc_cached;
void *hal_tx_desc;
uint8_t tid = HTT_TX_EXT_TID_INVALID;
uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
return nbuf;
@@ -1840,9 +1857,9 @@ qdf_nbuf_t dp_tx_fast_send_be(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
desc_pool_id = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
pkt_len = qdf_nbuf_headlen(nbuf);
DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, pkt_len);
DP_STATS_INC(vdev, tx_i.rcvd_in_fast_xmit_flow, 1);
DP_STATS_INC(vdev, tx_i.rcvd_per_core[desc_pool_id], 1);
DP_STATS_INC_PKT(vdev, tx_i[xmit_type].rcvd, 1, pkt_len);
DP_STATS_INC(vdev, tx_i[xmit_type].rcvd_in_fast_xmit_flow, 1);
DP_STATS_INC(vdev, tx_i[xmit_type].rcvd_per_core[desc_pool_id], 1);
pdev = vdev->pdev;
if (dp_tx_limit_check(vdev, nbuf))
@@ -1859,8 +1876,10 @@ qdf_nbuf_t dp_tx_fast_send_be(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
if (qdf_unlikely(!tx_desc)) {
DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
DP_STATS_INC(vdev, tx_i[xmit_type].dropped.desc_na.num, 1);
DP_STATS_INC(vdev,
tx_i[xmit_type].dropped.desc_na_exc_alloc_fail.num,
1);
return nbuf;
}
@@ -1885,7 +1904,7 @@ qdf_nbuf_t dp_tx_fast_send_be(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
if (!paddr) {
/* Handle failure */
dp_err("qdf_nbuf_map failed");
DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
DP_STATS_INC(vdev, tx_i[xmit_type].dropped.dma_error, 1);
goto release_desc;
}
@@ -1924,7 +1943,7 @@ qdf_nbuf_t dp_tx_fast_send_be(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1);
DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
DP_STATS_INC(vdev, tx_i[xmit_type].dropped.enqueue_fail, 1);
goto ring_access_fail2;
}
@@ -1932,7 +1951,7 @@ qdf_nbuf_t dp_tx_fast_send_be(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
if (qdf_unlikely(!hal_tx_desc)) {
dp_verbose_debug("TCL ring full ring_id:%d", desc_pool_id);
DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1);
DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
DP_STATS_INC(vdev, tx_i[xmit_type].dropped.enqueue_fail, 1);
goto ring_access_fail;
}
@@ -1942,7 +1961,7 @@ qdf_nbuf_t dp_tx_fast_send_be(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
qdf_mem_copy(hal_tx_desc, hal_tx_desc_cached, DP_TX_FAST_DESC_SIZE);
qdf_dsb();
DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
DP_STATS_INC_PKT(vdev, tx_i[xmit_type].processed, 1, tx_desc->length);
DP_STATS_INC(soc, tx.tcl_enq[desc_pool_id], 1);
status = QDF_STATUS_SUCCESS;

Fájl megtekintése

@@ -6848,15 +6848,16 @@ bool dp_check_pdev_exists(struct dp_soc *soc, struct dp_pdev *data)
}
void dp_aggregate_vdev_stats(struct dp_vdev *vdev,
struct cdp_vdev_stats *vdev_stats)
struct cdp_vdev_stats *vdev_stats,
enum dp_pkt_xmit_type xmit_type)
{
if (!vdev || !vdev->pdev)
return;
dp_update_vdev_ingress_stats(vdev);
qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
dp_copy_vdev_stats_to_tgt_buf(vdev_stats,
&vdev->stats, xmit_type);
dp_vdev_iterate_peer(vdev, dp_update_vdev_stats, vdev_stats,
DP_MOD_ID_GENERIC_STATS);
@@ -6895,7 +6896,7 @@ void dp_aggregate_pdev_stats(struct dp_pdev *pdev)
qdf_spin_lock_bh(&pdev->vdev_list_lock);
TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
dp_aggregate_vdev_stats(vdev, vdev_stats);
dp_aggregate_vdev_stats(vdev, vdev_stats, DP_XMIT_TOTAL);
dp_update_pdev_stats(pdev, vdev_stats);
dp_update_pdev_ingress_stats(pdev, vdev);
}
@@ -6940,7 +6941,7 @@ static QDF_STATUS dp_vdev_getstats(struct cdp_vdev *vdev_handle,
return QDF_STATUS_E_FAILURE;
}
dp_aggregate_vdev_stats(vdev, vdev_stats);
dp_aggregate_vdev_stats(vdev, vdev_stats, DP_XMIT_LINK);
stats->tx_packets = vdev_stats->tx.comp_pkt.num;
stats->tx_bytes = vdev_stats->tx.comp_pkt.bytes;
@@ -9252,11 +9253,12 @@ dp_txrx_get_pdev_stats(struct cdp_soc_t *soc, uint8_t pdev_id,
* dp_txrx_update_vdev_me_stats() - Update vdev ME stats sent from CDP
* @vdev: DP vdev handle
* @buf: buffer containing specific stats structure
* @xmit_type: xmit type of packet - MLD/Link
*
* Return: void
*/
static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
void *buf)
void *buf, uint8_t xmit_type)
{
struct cdp_tx_ingress_stats *host_stats = NULL;
@@ -9266,20 +9268,20 @@ static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
}
host_stats = (struct cdp_tx_ingress_stats *)buf;
DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt,
DP_STATS_INC_PKT(vdev, tx_i[xmit_type].mcast_en.mcast_pkt,
host_stats->mcast_en.mcast_pkt.num,
host_stats->mcast_en.mcast_pkt.bytes);
DP_STATS_INC(vdev, tx_i.mcast_en.dropped_map_error,
DP_STATS_INC(vdev, tx_i[xmit_type].mcast_en.dropped_map_error,
host_stats->mcast_en.dropped_map_error);
DP_STATS_INC(vdev, tx_i.mcast_en.dropped_self_mac,
DP_STATS_INC(vdev, tx_i[xmit_type].mcast_en.dropped_self_mac,
host_stats->mcast_en.dropped_self_mac);
DP_STATS_INC(vdev, tx_i.mcast_en.dropped_send_fail,
DP_STATS_INC(vdev, tx_i[xmit_type].mcast_en.dropped_send_fail,
host_stats->mcast_en.dropped_send_fail);
DP_STATS_INC(vdev, tx_i.mcast_en.ucast,
DP_STATS_INC(vdev, tx_i[xmit_type].mcast_en.ucast,
host_stats->mcast_en.ucast);
DP_STATS_INC(vdev, tx_i.mcast_en.fail_seg_alloc,
DP_STATS_INC(vdev, tx_i[xmit_type].mcast_en.fail_seg_alloc,
host_stats->mcast_en.fail_seg_alloc);
DP_STATS_INC(vdev, tx_i.mcast_en.clone_fail,
DP_STATS_INC(vdev, tx_i[xmit_type].mcast_en.clone_fail,
host_stats->mcast_en.clone_fail);
}
@@ -9287,11 +9289,12 @@ static void dp_txrx_update_vdev_me_stats(struct dp_vdev *vdev,
* dp_txrx_update_vdev_igmp_me_stats() - Update vdev IGMP ME stats sent from CDP
* @vdev: DP vdev handle
* @buf: buffer containing specific stats structure
* @xmit_type: xmit type of packet - MLD/Link
*
* Return: void
*/
static void dp_txrx_update_vdev_igmp_me_stats(struct dp_vdev *vdev,
void *buf)
void *buf, uint8_t xmit_type)
{
struct cdp_tx_ingress_stats *host_stats = NULL;
@@ -9301,9 +9304,9 @@ static void dp_txrx_update_vdev_igmp_me_stats(struct dp_vdev *vdev,
}
host_stats = (struct cdp_tx_ingress_stats *)buf;
DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_rcvd,
DP_STATS_INC(vdev, tx_i[xmit_type].igmp_mcast_en.igmp_rcvd,
host_stats->igmp_mcast_en.igmp_rcvd);
DP_STATS_INC(vdev, tx_i.igmp_mcast_en.igmp_ucast_converted,
DP_STATS_INC(vdev, tx_i[xmit_type].igmp_mcast_en.igmp_ucast_converted,
host_stats->igmp_mcast_en.igmp_ucast_converted);
}
@@ -9313,13 +9316,15 @@ static void dp_txrx_update_vdev_igmp_me_stats(struct dp_vdev *vdev,
* @vdev_id: id of DP vdev handle
* @buf: buffer containing specific stats structure
* @stats_id: stats type
* @xmit_type: xmit type of packet - MLD/Link
*
* Return: QDF_STATUS
*/
static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl,
uint8_t vdev_id,
void *buf,
uint16_t stats_id)
uint16_t stats_id,
uint8_t xmit_type)
{
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
@@ -9334,8 +9339,8 @@ static QDF_STATUS dp_txrx_update_vdev_host_stats(struct cdp_soc_t *soc_hdl,
case DP_VDEV_STATS_PKT_CNT_ONLY:
break;
case DP_VDEV_STATS_TX_ME:
dp_txrx_update_vdev_me_stats(vdev, buf);
dp_txrx_update_vdev_igmp_me_stats(vdev, buf);
dp_txrx_update_vdev_me_stats(vdev, buf, xmit_type);
dp_txrx_update_vdev_igmp_me_stats(vdev, buf, xmit_type);
break;
default:
qdf_info("Invalid stats_id %d", stats_id);
@@ -9671,9 +9676,10 @@ dp_txrx_get_vdev_stats(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
vdev_stats = (struct cdp_vdev_stats *)buf;
if (is_aggregate) {
dp_aggregate_vdev_stats(vdev, buf);
dp_aggregate_vdev_stats(vdev, buf, DP_XMIT_LINK);
} else {
qdf_mem_copy(vdev_stats, &vdev->stats, sizeof(vdev->stats));
dp_copy_vdev_stats_to_tgt_buf(vdev_stats,
&vdev->stats, DP_XMIT_LINK);
}
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_CDP);
@@ -12746,7 +12752,7 @@ static uint32_t dp_tx_get_success_ack_stats(struct cdp_soc_t *soc_hdl,
return 0;
}
dp_aggregate_vdev_stats(vdev, vdev_stats);
dp_aggregate_vdev_stats(vdev, vdev_stats, DP_XMIT_TOTAL);
tx_success = vdev_stats->tx.tx_success.num;
qdf_mem_free(vdev_stats);

Fájl megtekintése

@@ -479,7 +479,7 @@ static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
* needed here.
*/
DP_STATS_INC(vdev,
tx_i.dropped.headroom_insufficient,
tx_i[msdu_info->xmit_type].dropped.headroom_insufficient,
1);
qdf_print(" %s[%d] skb_realloc_headroom failed",
__func__, __LINE__);
@@ -811,7 +811,8 @@ struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
qdf_mem_zero(&cached_ext_desc[0], HAL_TX_EXT_DESC_WITH_META_DATA);
if (!msdu_ext_desc) {
DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
DP_STATS_INC(vdev,
tx_i[msdu_info->xmit_type].dropped.desc_na.num, 1);
return NULL;
}
@@ -997,6 +998,7 @@ dp_tx_send_traffic_end_indication_pkt(struct dp_vdev *vdev,
e_msdu_info.tx_queue = msdu_info->tx_queue;
e_msdu_info.tid = msdu_info->tid;
e_msdu_info.exception_fw = 1;
e_msdu_info.xmit_type = msdu_info->xmit_type;
desc_ext->host_tx_desc_pool = 1;
desc_ext->traffic_end_indication = 1;
nbuf = dp_tx_send_msdu_single(vdev, end_nbuf, &e_msdu_info,
@@ -1174,6 +1176,7 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
struct dp_tx_desc_s *tx_desc;
struct dp_pdev *pdev = vdev->pdev;
struct dp_soc *soc = pdev->soc;
uint8_t xmit_type = msdu_info->xmit_type;
if (dp_tx_limit_check(vdev, nbuf))
return NULL;
@@ -1185,8 +1188,11 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
if (qdf_unlikely(!tx_desc)) {
DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
DP_STATS_INC(vdev,
tx_i[xmit_type].dropped.desc_na.num, 1);
DP_STATS_INC(vdev,
tx_i[xmit_type].dropped.desc_na_exc_alloc_fail.num,
1);
return NULL;
}
@@ -1261,7 +1267,8 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
if (qdf_unlikely(qdf_nbuf_headroom(nbuf) < align_pad)) {
DP_STATS_INC(vdev,
tx_i.dropped.headroom_insufficient, 1);
tx_i[xmit_type].dropped.headroom_insufficient,
1);
goto failure;
}
@@ -1334,7 +1341,8 @@ static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
if (!tx_desc) {
DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
DP_STATS_INC(vdev,
tx_i[msdu_info->xmit_type].dropped.desc_na.num, 1);
return NULL;
}
dp_tx_tso_seg_history_add(soc, msdu_info->u.tso_info.curr_seg,
@@ -1413,11 +1421,14 @@ static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
struct dp_tx_sg_info_s *sg_info = &msdu_info->u.sg_info;
qdf_dot3_qosframe_t *qos_wh = (qdf_dot3_qosframe_t *) nbuf->data;
DP_STATS_INC_PKT(vdev, tx_i.raw.raw_pkt, 1, qdf_nbuf_len(nbuf));
DP_STATS_INC_PKT(vdev, tx_i[msdu_info->xmit_type].raw.raw_pkt,
1, qdf_nbuf_len(nbuf));
/* Continue only if frames are of DATA type */
if (!DP_FRAME_IS_DATA(qos_wh)) {
DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
DP_STATS_INC(vdev,
tx_i[msdu_info->xmit_type].raw.invalid_raw_pkt_datatype,
1);
dp_tx_debug("Pkt. recd is of not data type");
goto error;
}
@@ -1435,7 +1446,9 @@ static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
*/
if (i >= DP_TX_MAX_NUM_FRAGS) {
dp_err_rl("nbuf cnt exceeds the max number of segs");
DP_STATS_INC(vdev, tx_i.raw.num_frags_overflow_err, 1);
DP_STATS_INC(vdev,
tx_i[msdu_info->xmit_type].raw.num_frags_overflow_err,
1);
goto error;
}
if (QDF_STATUS_SUCCESS !=
@@ -1444,7 +1457,9 @@ static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
QDF_DMA_TO_DEVICE,
curr_nbuf->len)) {
dp_tx_err("%s dma map error ", __func__);
DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
DP_STATS_INC(vdev,
tx_i[msdu_info->xmit_type].raw.dma_map_error,
1);
goto error;
}
/* Update the count of mapped nbuf's */
@@ -2190,11 +2205,13 @@ static inline qdf_nbuf_t dp_mesh_tx_comp_free_buff(struct dp_soc *soc,
{
qdf_nbuf_t nbuf = tx_desc->nbuf;
struct dp_vdev *vdev = NULL;
uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
vdev = dp_vdev_get_ref_by_id(soc, tx_desc->vdev_id, DP_MOD_ID_MESH);
if (tx_desc->flags & DP_TX_DESC_FLAG_TO_FW) {
if (vdev)
DP_STATS_INC(vdev, tx_i.mesh.completion_fw, 1);
DP_STATS_INC(vdev,
tx_i[xmit_type].mesh.completion_fw, 1);
if (delayed_free)
return nbuf;
@@ -3051,7 +3068,8 @@ dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
if (!paddr) {
/* Handle failure */
dp_err("qdf_nbuf_map failed");
DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
DP_STATS_INC(vdev,
tx_i[msdu_info->xmit_type].dropped.dma_error, 1);
drop_code = TX_DMA_MAP_ERR;
goto release_desc;
}
@@ -3487,6 +3505,7 @@ static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
uint32_t cur_frag, nr_frags, i;
qdf_dma_addr_t paddr;
struct dp_tx_sg_info_s *sg_info;
uint8_t xmit_type = msdu_info->xmit_type;
sg_info = &msdu_info->u.sg_info;
nr_frags = qdf_nbuf_get_nr_frags(nbuf);
@@ -3496,7 +3515,8 @@ static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
QDF_DMA_TO_DEVICE,
qdf_nbuf_headlen(nbuf))) {
dp_tx_err("dma map error");
DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
DP_STATS_INC(vdev, tx_i[xmit_type].sg.dma_map_error,
1);
qdf_nbuf_free(nbuf);
return NULL;
}
@@ -3513,7 +3533,9 @@ static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
QDF_DMA_TO_DEVICE,
cur_frag)) {
dp_tx_err("frag dma map error");
DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
DP_STATS_INC(vdev,
tx_i[xmit_type].sg.dma_map_error,
1);
goto map_err;
}
@@ -3712,7 +3734,7 @@ static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
{
qdf_ether_header_t *eh;
uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
/* Mcast to Ucast Conversion*/
if (qdf_likely(!vdev->mcast_enhancement_en))
return true;
@@ -3723,7 +3745,7 @@ bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
dp_verbose_debug("Mcast frm for ME %pK", vdev);
qdf_nbuf_set_next(nbuf, NULL);
DP_STATS_INC_PKT(vdev, tx_i.mcast_en.mcast_pkt, 1,
DP_STATS_INC_PKT(vdev, tx_i[xmit_type].mcast_en.mcast_pkt, 1,
qdf_nbuf_len(nbuf));
if (dp_tx_prepare_send_me(vdev, nbuf) ==
QDF_STATUS_SUCCESS) {
@@ -3757,12 +3779,15 @@ bool dp_tx_mcast_enhance(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
*/
static inline bool dp_tx_mcast_drop(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
{
uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
/* Drop tx mcast and WDS Extended feature check */
if (qdf_unlikely((vdev->drop_tx_mcast) && (vdev->wds_ext_enabled))) {
qdf_ether_header_t *eh = (qdf_ether_header_t *)
qdf_nbuf_data(nbuf);
if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
DP_STATS_INC(vdev, tx_i.dropped.tx_mcast_drop, 1);
DP_STATS_INC(vdev,
tx_i[xmit_type].dropped.tx_mcast_drop, 1);
return true;
}
}
@@ -3930,6 +3955,7 @@ dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
struct dp_tx_msdu_info_s msdu_info;
struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
DP_MOD_ID_TX_EXCEPTION);
uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
if (qdf_unlikely(!vdev))
goto fail;
@@ -3940,10 +3966,11 @@ dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
goto fail;
msdu_info.tid = tx_exc_metadata->tid;
msdu_info.xmit_type = xmit_type;
dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
QDF_MAC_ADDR_REF(nbuf->data));
DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
DP_STATS_INC_PKT(vdev, tx_i[xmit_type].rcvd, 1, qdf_nbuf_len(nbuf));
if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
dp_tx_err("Invalid parameters in exception path");
@@ -3959,8 +3986,8 @@ dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
DP_MOD_ID_TX_EXCEPTION);
if (qdf_unlikely(!peer)) {
DP_STATS_INC(vdev,
tx_i.dropped.invalid_peer_id_in_exc_path,
1);
tx_i[xmit_type].dropped.invalid_peer_id_in_exc_path,
1);
goto fail;
}
dp_peer_unref_delete(peer, DP_MOD_ID_TX_EXCEPTION);
@@ -3991,7 +4018,8 @@ dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
goto fail;
}
DP_STATS_INC(vdev, tx_i.rcvd.num, msdu_info.num_seg - 1);
DP_STATS_INC(vdev,
tx_i[xmit_type].rcvd.num, msdu_info.num_seg - 1);
goto send_multiple;
}
@@ -4006,14 +4034,14 @@ dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
dp_verbose_debug("non-TSO SG frame %pK", vdev);
DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
DP_STATS_INC_PKT(vdev, tx_i[xmit_type].sg.sg_pkt, 1,
qdf_nbuf_len(nbuf));
goto send_multiple;
}
if (qdf_likely(tx_exc_metadata->is_tx_sniffer)) {
DP_STATS_INC_PKT(vdev, tx_i.sniffer_rcvd, 1,
DP_STATS_INC_PKT(vdev, tx_i[xmit_type].sniffer_rcvd, 1,
qdf_nbuf_len(nbuf));
dp_tx_add_tx_sniffer_meta_data(vdev, &msdu_info,
@@ -4029,7 +4057,8 @@ dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
* to minimize lock contention for these resources.
*/
dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
DP_STATS_INC(vdev, tx_i.rcvd_per_core[msdu_info.tx_queue.desc_pool_id],
DP_STATS_INC(vdev,
tx_i[xmit_type].rcvd_per_core[msdu_info.tx_queue.desc_pool_id],
1);
/*
@@ -4052,7 +4081,7 @@ dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
*/
dp_tx_nawds_handler(soc, vdev, &msdu_info, nbuf,
tx_exc_metadata->peer_id);
DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
DP_STATS_INC_PKT(vdev, tx_i[xmit_type].nawds_mcast,
1, qdf_nbuf_len(nbuf));
}
@@ -4062,7 +4091,7 @@ dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
/*
* Check exception descriptors
*/
if (dp_tx_exception_limit_check(vdev))
if (dp_tx_exception_limit_check(vdev, xmit_type))
goto fail;
/* Single linear frame */
@@ -4097,13 +4126,16 @@ dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
DP_MOD_ID_TX_EXCEPTION);
uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
if (qdf_unlikely(!vdev))
goto fail;
if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
== QDF_STATUS_E_FAILURE)) {
DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
DP_STATS_INC(vdev,
tx_i[xmit_type].dropped.fail_per_pkt_vdev_id_check,
1);
goto fail;
}
@@ -4129,6 +4161,7 @@ qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
qdf_nbuf_t nbuf_clone = NULL;
struct dp_vdev *vdev;
uint8_t no_enc_frame = 0;
uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
nbuf_mesh = qdf_nbuf_unshare(nbuf);
if (!nbuf_mesh) {
@@ -4169,7 +4202,8 @@ qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
if (nbuf_clone) {
if (!dp_tx_send(soc_hdl, vdev_id, nbuf_clone)) {
DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
DP_STATS_INC(vdev, tx_i[xmit_type].mesh.exception_fw,
1);
} else {
qdf_nbuf_free(nbuf_clone);
}
@@ -4182,7 +4216,7 @@ qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
nbuf = dp_tx_send(soc_hdl, vdev_id, nbuf);
if ((!nbuf) && no_enc_frame) {
DP_STATS_INC(vdev, tx_i.mesh.exception_fw, 1);
DP_STATS_INC(vdev, tx_i[xmit_type].mesh.exception_fw, 1);
}
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_MESH);
@@ -4221,12 +4255,13 @@ qdf_nbuf_t dp_tx_drop(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
{
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
struct dp_vdev *vdev = NULL;
uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
vdev = soc->vdev_id_map[vdev_id];
if (qdf_unlikely(!vdev))
return nbuf;
DP_STATS_INC(vdev, tx_i.dropped.drop_ingress, 1);
DP_STATS_INC(vdev, tx_i[xmit_type].dropped.drop_ingress, 1);
return nbuf;
}
@@ -4269,6 +4304,7 @@ qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
struct dp_tx_msdu_info_s msdu_info = {0};
struct dp_vdev *vdev = NULL;
qdf_nbuf_t end_nbuf = NULL;
uint8_t xmit_type;
if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
return nbuf;
@@ -4293,7 +4329,9 @@ qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
* (TID override disabled)
*/
msdu_info.tid = HTT_TX_EXT_TID_INVALID;
DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
msdu_info.xmit_type = xmit_type;
DP_STATS_INC_PKT(vdev, tx_i[xmit_type].rcvd, 1, qdf_nbuf_len(nbuf));
if (qdf_unlikely(vdev->mesh_vdev)) {
qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
@@ -4314,7 +4352,8 @@ qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
* to minimize lock contention for these resources.
*/
dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
DP_STATS_INC(vdev, tx_i.rcvd_per_core[msdu_info.tx_queue.desc_pool_id],
DP_STATS_INC(vdev,
tx_i[xmit_type].rcvd_per_core[msdu_info.tx_queue.desc_pool_id],
1);
/*
@@ -4347,7 +4386,8 @@ qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
return nbuf;
}
DP_STATS_INC(vdev, tx_i.rcvd.num, msdu_info.num_seg - 1);
DP_STATS_INC(vdev, tx_i[xmit_type].rcvd.num,
msdu_info.num_seg - 1);
goto send_multiple;
}
@@ -4370,7 +4410,7 @@ qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
dp_verbose_debug("non-TSO SG frame %pK", vdev);
DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
DP_STATS_INC_PKT(vdev, tx_i[xmit_type].sg.sg_pkt, 1,
qdf_nbuf_len(nbuf));
goto send_multiple;
@@ -4420,7 +4460,7 @@ qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
sa_peer_id);
}
peer_id = DP_INVALID_PEER;
DP_STATS_INC_PKT(vdev, tx_i.nawds_mcast,
DP_STATS_INC_PKT(vdev, tx_i[xmit_type].nawds_mcast,
1, qdf_nbuf_len(nbuf));
}
@@ -4451,6 +4491,7 @@ qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
{
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
struct dp_vdev *vdev = NULL;
uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
return nbuf;
@@ -4468,7 +4509,9 @@ qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
== QDF_STATUS_E_FAILURE)) {
DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
DP_STATS_INC(vdev,
tx_i[xmit_type].dropped.fail_per_pkt_vdev_id_check,
1);
return nbuf;
}
@@ -4562,13 +4605,14 @@ void dp_tx_reinject_handler(struct dp_soc *soc,
struct ieee80211_frame_addr4 *wh = (struct ieee80211_frame_addr4 *)(qdf_nbuf_data(nbuf));
#endif
struct dp_txrx_peer *txrx_peer;
uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
qdf_assert(vdev);
dp_tx_debug("Tx reinject path");
DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
qdf_nbuf_len(tx_desc->nbuf));
DP_STATS_INC_PKT(vdev, tx_i[xmit_type].reinject_pkts, 1,
qdf_nbuf_len(tx_desc->nbuf));
if (dp_tx_reinject_mlo_hdl(soc, vdev, tx_desc, nbuf, reinject_reason))
return;
@@ -4641,7 +4685,8 @@ void dp_tx_reinject_handler(struct dp_soc *soc,
qdf_mem_zero(&msdu_info, sizeof(msdu_info));
dp_tx_get_queue(vdev, nbuf,
&msdu_info.tx_queue);
msdu_info.xmit_type =
qdf_nbuf_get_vdev_xmit_type(nbuf);
nbuf_copy = dp_tx_send_msdu_single(vdev,
nbuf_copy,
&msdu_info,
@@ -4669,12 +4714,12 @@ void dp_tx_inspect_handler(struct dp_soc *soc,
struct dp_tx_desc_s *tx_desc,
uint8_t *status)
{
uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(tx_desc->nbuf);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s Tx inspect path",
__func__);
DP_STATS_INC_PKT(vdev, tx_i.inspect_pkts, 1,
DP_STATS_INC_PKT(vdev, tx_i[xmit_type].inspect_pkts, 1,
qdf_nbuf_len(tx_desc->nbuf));
DP_TX_FREE_SINGLE_BUF(soc, tx_desc->nbuf);
@@ -6173,6 +6218,7 @@ static inline bool
dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
{
struct dp_vdev *vdev = NULL;
uint8_t xmit_type;
if (desc->tx_status == HAL_TX_TQM_RR_MULTICAST_DROP) {
if (!soc->arch_ops.dp_tx_mcast_handler ||
@@ -6192,7 +6238,8 @@ dp_tx_mcast_reinject_handler(struct dp_soc *soc, struct dp_tx_desc_s *desc)
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_REINJECT);
return false;
}
DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
xmit_type = qdf_nbuf_get_vdev_xmit_type(desc->nbuf);
DP_STATS_INC_PKT(vdev, tx_i[xmit_type].reinject_pkts, 1,
qdf_nbuf_len(desc->nbuf));
soc->arch_ops.dp_tx_mcast_handler(soc, vdev, desc->nbuf);
dp_tx_desc_release(soc, desc, desc->pool_id);
@@ -7841,6 +7888,8 @@ dp_tx_multipass_send_pkt_to_repeater(struct dp_soc *soc, struct dp_vdev *vdev,
qdf_mem_zero(&msdu_info_copy, sizeof(msdu_info_copy));
msdu_info_copy.tid = HTT_TX_EXT_TID_INVALID;
msdu_info_copy.xmit_type =
qdf_nbuf_get_vdev_xmit_type(nbuf);
HTT_TX_MSDU_EXT2_DESC_FLAG_VALID_KEY_FLAGS_SET(msdu_info_copy.meta_data[0], 1);
nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy,
&msdu_info_copy,

Fájl megtekintése

@@ -205,6 +205,7 @@ struct dp_tx_queue {
* @u.sg_info: Scatter Gather information for non-TSO SG frames
* @meta_data: Mesh meta header information
* @ppdu_cookie: 16-bit ppdu_cookie that has to be replayed back in completions
* @xmit_type: xmit type of packet Link (0)/MLD (1)
* @gsn: global sequence for reinjected mcast packets
* @vdev_id : vdev_id for reinjected mcast packets
* @skip_hp_update : Skip HP update for TSO segments and update in last segment
@@ -229,6 +230,7 @@ struct dp_tx_msdu_info_s {
} u;
uint32_t meta_data[DP_TX_MSDU_INFO_META_DATA_DWORDS];
uint16_t ppdu_cookie;
uint8_t xmit_type;
#if defined(WLAN_FEATURE_11BE_MLO) && defined(WLAN_MLO_MULTI_CHIP)
#ifdef WLAN_MCAST_MLO
uint16_t gsn;
@@ -1870,11 +1872,13 @@ dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
{
struct dp_pdev *pdev = vdev->pdev;
struct dp_soc *soc = pdev->soc;
uint8_t xmit_type = qdf_nbuf_get_vdev_xmit_type(nbuf);
if (__dp_tx_limit_check(soc)) {
if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
dp_tx_info("queued packets are more than max tx, drop the frame");
DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
DP_STATS_INC(vdev,
tx_i[xmit_type].dropped.desc_na.num, 1);
return true;
}
}
@@ -1883,9 +1887,11 @@ dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
pdev->num_reg_tx_allowed) {
if (is_dp_spl_tx_limit_reached(vdev, nbuf)) {
dp_tx_info("queued packets are more than max tx, drop the frame");
DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
DP_STATS_INC(vdev,
tx_i.dropped.desc_na_exc_outstand.num, 1);
tx_i[xmit_type].dropped.desc_na.num, 1);
DP_STATS_INC(vdev,
tx_i[xmit_type].dropped.desc_na_exc_outstand.num,
1);
return true;
}
}
@@ -1897,12 +1903,13 @@ dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
* dp_tx_exception_limit_check - Check if allocated tx exception descriptors
* reached soc max limit
* @vdev: DP vdev handle
* @xmit_type: xmit type of packet - MLD/Link
*
* Return: true if allocated tx descriptors reached max configured value, else
* false
*/
static inline bool
dp_tx_exception_limit_check(struct dp_vdev *vdev)
dp_tx_exception_limit_check(struct dp_vdev *vdev, uint8_t xmit_type)
{
struct dp_pdev *pdev = vdev->pdev;
struct dp_soc *soc = pdev->soc;
@@ -1910,7 +1917,7 @@ dp_tx_exception_limit_check(struct dp_vdev *vdev)
if (qdf_atomic_read(&soc->num_tx_exception) >=
soc->num_msdu_exception_desc) {
dp_info("exc packets are more than max drop the exc pkt");
DP_STATS_INC(vdev, tx_i.dropped.exc_desc_na.num, 1);
DP_STATS_INC(vdev, tx_i[xmit_type].dropped.exc_desc_na.num, 1);
return true;
}
@@ -2051,7 +2058,7 @@ dp_tx_limit_check(struct dp_vdev *vdev, qdf_nbuf_t nbuf)
}
static inline bool
dp_tx_exception_limit_check(struct dp_vdev *vdev)
dp_tx_exception_limit_check(struct dp_vdev *vdev, uint8_t xmit_type)
{
return false;
}

Fájl megtekintése

@@ -376,7 +376,7 @@ dp_tx_hw_enqueue_rh(struct dp_soc *soc, struct dp_vdev *vdev,
dp_verbose_debug("CE tx ring full");
/* TODO: Should this be a separate ce_ring_full stat? */
DP_STATS_INC(soc, tx.tcl_ring_full[0], 1);
DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
DP_STATS_INC(vdev, tx_i[DP_XMIT_LINK].dropped.enqueue_fail, 1);
goto enqueue_fail;
}
@@ -388,8 +388,10 @@ dp_tx_hw_enqueue_rh(struct dp_soc *soc, struct dp_vdev *vdev,
tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, nbuf);
DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
DP_STATS_INC_PKT(vdev, tx_i[DP_XMIT_LINK].processed, 1,
tx_desc->length);
DP_STATS_INC(soc, tx.tcl_enq[0], 1);
dp_tx_update_stats(soc, tx_desc, 0);
status = QDF_STATUS_SUCCESS;