qcacmn: Modify logs for QDF_MODULE_ID_DP_TX and QDF_MODULE_ID_DP_TX_COMP

Add macros like dp_tx_info, dp_tx_comp_info,
dp_tx_debug, dp_tx_comp_debug, dp_tx_err,
dp_tx_comp_err, etc.  to define logs

Change-Id: If0eef8112db1f8e99fb95ee943d5ddbff94425b8
CRs-Fixed: 2830563
Этот коммит содержится в:
Shikha Bakshi
2020-11-10 12:45:26 +05:30
коммит произвёл snandini
родитель 763cef6b29
Коммит 7ae3ccdc47
3 изменённых файлов: 53 добавлений и 72 удалений

Просмотреть файл

@@ -119,18 +119,14 @@ dp_tx_limit_check(struct dp_vdev *vdev)
if (qdf_atomic_read(&soc->num_tx_outstanding) >=
soc->num_tx_allowed) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s: queued packets are more than max tx, drop the frame",
__func__);
dp_tx_info("queued packets are more than max tx, drop the frame");
DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
return true;
}
if (qdf_atomic_read(&pdev->num_tx_outstanding) >=
pdev->num_tx_allowed) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s: queued packets are more than max tx, drop the frame",
__func__);
dp_tx_info("queued packets are more than max tx, drop the frame");
DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
return true;
}
@@ -269,14 +265,10 @@ static void dp_tx_tso_desc_release(struct dp_soc *soc,
{
TSO_DEBUG("%s: Free the tso descriptor", __func__);
if (qdf_unlikely(!tx_desc->tso_desc)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s %d TSO desc is NULL!",
__func__, __LINE__);
dp_tx_err("SO desc is NULL!");
qdf_assert(0);
} else if (qdf_unlikely(!tx_desc->tso_num_desc)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s %d TSO num desc is NULL!",
__func__, __LINE__);
dp_tx_err("TSO num desc is NULL!");
qdf_assert(0);
} else {
struct qdf_tso_num_seg_elem_t *tso_num_desc =
@@ -353,10 +345,9 @@ dp_tx_desc_release(struct dp_tx_desc_s *tx_desc, uint8_t desc_pool_id)
else
comp_status = HAL_TX_COMP_RELEASE_REASON_FW;
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"Tx Completion Release desc %d status %d outstanding %d",
tx_desc->id, comp_status,
qdf_atomic_read(&pdev->num_tx_outstanding));
dp_tx_debug("Tx Completion Release desc %d status %d outstanding %d",
tx_desc->id, comp_status,
qdf_atomic_read(&pdev->num_tx_outstanding));
dp_tx_desc_free(soc, tx_desc, desc_pool_id);
return;
@@ -421,8 +412,7 @@ static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
/* Fill and add HTT metaheader */
hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
if (!hdr) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"Error in filling HTT metadata");
dp_tx_err("Error in filling HTT metadata");
return 0;
}
@@ -953,8 +943,7 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
}
if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"qdf_nbuf_push_head failed");
dp_tx_err("qdf_nbuf_push_head failed");
goto failure;
}
@@ -1037,9 +1026,7 @@ static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
/* Allocate and prepare an extension descriptor for scattered frames */
msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
if (!msdu_ext_desc) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
"%s Tx Extension Descriptor Alloc Fail",
__func__);
dp_tx_info("Tx Extension Descriptor Alloc Fail");
goto failure;
}
@@ -1094,8 +1081,7 @@ static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
/* Continue only if frames are of DATA type */
if (!DP_FRAME_IS_DATA(qos_wh)) {
DP_STATS_INC(vdev, tx_i.raw.invalid_raw_pkt_datatype, 1);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"Pkt. recd is of not data type");
dp_tx_debug("Pkt. recd is of not data type");
goto error;
}
/* SWAR for HW: Enable WEP bit in the AMSDU frames for RAW mode */
@@ -1120,8 +1106,7 @@ static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
curr_nbuf,
QDF_DMA_TO_DEVICE,
curr_nbuf->len)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s dma map error ", __func__);
dp_tx_err("%s dma map error ", __func__);
DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
goto error;
}
@@ -2016,9 +2001,8 @@ dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
tx_exc_metadata, msdu_info);
if (status != QDF_STATUS_SUCCESS) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
__func__, tx_desc, tx_q->ring_id);
dp_tx_err("Tx_hw_enqueue Fail tx_desc %pK queue %d",
tx_desc, tx_q->ring_id);
qdf_nbuf_unmap_nbytes_single(vdev->osdev, nbuf,
QDF_DMA_TO_DEVICE,
nbuf->len);
@@ -2349,8 +2333,7 @@ static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
qdf_nbuf_map_nbytes_single(vdev->osdev, nbuf,
QDF_DMA_TO_DEVICE,
qdf_nbuf_headlen(nbuf))) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"dma map error");
dp_tx_err("dma map error");
DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
qdf_nbuf_free(nbuf);
@@ -2366,8 +2349,7 @@ static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) {
if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"frag dma map error");
dp_tx_err("frag dma map error");
DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
goto map_err;
}
@@ -2504,24 +2486,22 @@ qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
remove_meta_hdr:
if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"qdf_nbuf_pull_head failed");
dp_tx_err("qdf_nbuf_pull_head failed");
qdf_nbuf_free(nbuf);
return NULL;
}
msdu_info->tid = qdf_nbuf_get_priority(nbuf);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
"%s , Meta hdr %0x %0x %0x %0x %0x %0x"
" tid %d to_fw %d",
__func__, msdu_info->meta_data[0],
msdu_info->meta_data[1],
msdu_info->meta_data[2],
msdu_info->meta_data[3],
msdu_info->meta_data[4],
msdu_info->meta_data[5],
msdu_info->tid, msdu_info->exception_fw);
dp_tx_info("Meta hdr %0x %0x %0x %0x %0x %0x"
" tid %d to_fw %d",
msdu_info->meta_data[0],
msdu_info->meta_data[1],
msdu_info->meta_data[2],
msdu_info->meta_data[3],
msdu_info->meta_data[4],
msdu_info->meta_data[5],
msdu_info->tid, msdu_info->exception_fw);
return nbuf;
}
@@ -2684,8 +2664,7 @@ dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"Invalid parameters in exception path");
dp_tx_err("Invalid parameters in exception path");
goto fail;
}
@@ -2693,8 +2672,7 @@ dp_tx_send_exception(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
/* MESH mode */
if (qdf_unlikely(vdev->mesh_vdev)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"Mesh mode is not supported in exception path");
dp_tx_err("Mesh mode is not supported in exception path");
goto fail;
}
@@ -4463,18 +4441,14 @@ more_data:
if (qdf_unlikely
((tx_desc->vdev_id == DP_INVALID_VDEV_ID) &&
!tx_desc->flags)) {
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_INFO,
"Descriptor freed in vdev_detach %d",
tx_desc_id);
dp_tx_comp_info("Descriptor freed in vdev_detach %d",
tx_desc_id);
continue;
}
if (qdf_unlikely(tx_desc->pdev->is_pdev_down)) {
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_INFO,
"pdev in down state %d",
tx_desc_id);
dp_tx_comp_info("pdev in down state %d",
tx_desc_id);
dp_tx_comp_free_buf(soc, tx_desc);
dp_tx_desc_release(tx_desc, tx_desc->pool_id);
@@ -4483,20 +4457,16 @@ more_data:
/* Pool id is not matching. Error */
if (tx_desc->pool_id != pool_id) {
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_FATAL,
"Tx Comp pool id %d not matched %d",
pool_id, tx_desc->pool_id);
dp_tx_comp_alert("Tx Comp pool id %d not matched %d",
pool_id, tx_desc->pool_id);
qdf_assert_always(0);
}
if (!(tx_desc->flags & DP_TX_DESC_FLAG_ALLOCATED) ||
!(tx_desc->flags & DP_TX_DESC_FLAG_QUEUED_TX)) {
QDF_TRACE(QDF_MODULE_ID_DP,
QDF_TRACE_LEVEL_FATAL,
"Txdesc invalid, flgs = %x,id = %d",
tx_desc->flags, tx_desc_id);
dp_tx_comp_alert("Txdesc invalid, flgs = %x,id = %d",
tx_desc->flags, tx_desc_id);
qdf_assert_always(0);
}