qcacmn: Clean up dp component prints
Clean up datapath component prints by correcting trace levels for regularly occurring prints and removing newlines from converged print APIs since qdf_trace_msh appends them by default. Change-Id: Ie8fe319fcb737720f7400a165e134b6a18bd15b5 CRs-Fixed: 2243843
This commit is contained in:
@@ -299,7 +299,7 @@ htt_htc_misc_pkt_pool_free(struct htt_soc *soc)
|
||||
|
||||
soc->stats.htc_pkt_free++;
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
|
||||
"%s: Pkt free count %d\n",
|
||||
"%s: Pkt free count %d",
|
||||
__func__, soc->stats.htc_pkt_free);
|
||||
|
||||
qdf_nbuf_free(netbuf);
|
||||
@@ -416,7 +416,7 @@ static int htt_h2t_ver_req_msg(struct htt_soc *soc)
|
||||
*/
|
||||
if (qdf_nbuf_put_tail(msg, HTT_VER_REQ_BYTES) == NULL) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg\n",
|
||||
"%s: Failed to expand head for HTT_H2T_MSG_TYPE_VERSION_REQ msg",
|
||||
__func__);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
@@ -514,19 +514,19 @@ int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng,
|
||||
htt_ring_type = HTT_SW_TO_HW_RING;
|
||||
} else {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Ring %d currently not supported\n",
|
||||
"%s: Ring %d currently not supported",
|
||||
__func__, srng_params.ring_id);
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: ring_type %d ring_id %d\n",
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
|
||||
"%s: ring_type %d ring_id %d",
|
||||
__func__, hal_ring_type, srng_params.ring_id);
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: hp_addr 0x%llx tp_addr 0x%llx\n",
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
|
||||
"%s: hp_addr 0x%llx tp_addr 0x%llx",
|
||||
__func__, (uint64_t)hp_addr, (uint64_t)tp_addr);
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: htt_ring_id %d\n", __func__, htt_ring_id);
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
|
||||
"%s: htt_ring_id %d", __func__, htt_ring_id);
|
||||
break;
|
||||
case RXDMA_MONITOR_BUF:
|
||||
htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
|
||||
@@ -551,7 +551,7 @@ int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng,
|
||||
|
||||
default:
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Ring currently not supported\n", __func__);
|
||||
"%s: Ring currently not supported", __func__);
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
@@ -563,7 +563,7 @@ int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng,
|
||||
*/
|
||||
if (qdf_nbuf_put_tail(htt_msg, HTT_SRING_SETUP_SZ) == NULL) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Failed to expand head for SRING_SETUP msg\n",
|
||||
"%s: Failed to expand head for SRING_SETUP msg",
|
||||
__func__);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
@@ -584,8 +584,8 @@ int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng,
|
||||
else
|
||||
HTT_SRING_SETUP_PDEV_ID_SET(*msg_word, mac_id);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: mac_id %d\n", __func__, mac_id);
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
|
||||
"%s: mac_id %d", __func__, mac_id);
|
||||
HTT_SRING_SETUP_RING_TYPE_SET(*msg_word, htt_ring_type);
|
||||
/* TODO: Discuss with FW on changing this to unique ID and using
|
||||
* htt_ring_type to send the type of ring
|
||||
@@ -610,14 +610,14 @@ int htt_srng_setup(void *htt_soc, int mac_id, void *hal_srng,
|
||||
HTT_SRING_SETUP_ENTRY_SIZE_SET(*msg_word, ring_entry_size);
|
||||
HTT_SRING_SETUP_RING_SIZE_SET(*msg_word,
|
||||
(ring_entry_size * srng_params.num_entries));
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: entry_size %d\n", __func__,
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
|
||||
"%s: entry_size %d", __func__,
|
||||
ring_entry_size);
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: num_entries %d\n", __func__,
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
|
||||
"%s: num_entries %d", __func__,
|
||||
srng_params.num_entries);
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: ring_size %d\n", __func__,
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
|
||||
"%s: ring_size %d", __func__,
|
||||
(ring_entry_size * srng_params.num_entries));
|
||||
if (htt_ring_type == HTT_SW_TO_HW_RING)
|
||||
HTT_SRING_SETUP_RING_MISC_CFG_FLAG_LOOPCOUNT_DISABLE_SET(
|
||||
@@ -783,7 +783,7 @@ int htt_h2t_rx_ring_cfg(void *htt_soc, int pdev_id, void *hal_srng,
|
||||
|
||||
default:
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Ring currently not supported\n", __func__);
|
||||
"%s: Ring currently not supported", __func__);
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
@@ -795,7 +795,7 @@ int htt_h2t_rx_ring_cfg(void *htt_soc, int pdev_id, void *hal_srng,
|
||||
*/
|
||||
if (qdf_nbuf_put_tail(htt_msg, HTT_RX_RING_SELECTION_CFG_SZ) == NULL) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Failed to expand head for RX Ring Cfg msg\n",
|
||||
"%s: Failed to expand head for RX Ring Cfg msg",
|
||||
__func__);
|
||||
goto fail1; /* failure */
|
||||
}
|
||||
@@ -2707,7 +2707,7 @@ dp_ppdu_stats_ind_handler(struct htt_soc *soc,
|
||||
bool free_buf;
|
||||
qdf_nbuf_set_pktlen(htt_t2h_msg, HTT_T2H_MAX_MSG_SIZE);
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
|
||||
"received HTT_T2H_MSG_TYPE_PPDU_STATS_IND\n");
|
||||
"received HTT_T2H_MSG_TYPE_PPDU_STATS_IND");
|
||||
pdev_id = HTT_T2H_PPDU_STATS_PDEV_ID_GET(*msg_word);
|
||||
pdev_id = DP_HW2SW_MACID(pdev_id);
|
||||
free_buf = dp_txrx_ppdu_stats_handler(soc->dp_soc, pdev_id,
|
||||
@@ -2743,7 +2743,7 @@ dp_pktlog_msg_handler(struct htt_soc *soc,
|
||||
uint8_t pdev_id;
|
||||
uint32_t *pl_hdr;
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
|
||||
"received HTT_T2H_MSG_TYPE_PKTLOG\n");
|
||||
"received HTT_T2H_MSG_TYPE_PKTLOG");
|
||||
pdev_id = HTT_T2H_PKTLOG_PDEV_ID_GET(*msg_word);
|
||||
pdev_id = DP_HW2SW_MACID(pdev_id);
|
||||
pl_hdr = (msg_word + 1);
|
||||
@@ -2854,14 +2854,14 @@ static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
|
||||
soc->tgt_ver.major = HTT_VER_CONF_MAJOR_GET(*msg_word);
|
||||
soc->tgt_ver.minor = HTT_VER_CONF_MINOR_GET(*msg_word);
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
|
||||
"target uses HTT version %d.%d; host uses %d.%d\n",
|
||||
"target uses HTT version %d.%d; host uses %d.%d",
|
||||
soc->tgt_ver.major, soc->tgt_ver.minor,
|
||||
HTT_CURRENT_VERSION_MAJOR,
|
||||
HTT_CURRENT_VERSION_MINOR);
|
||||
if (soc->tgt_ver.major != HTT_CURRENT_VERSION_MAJOR) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX,
|
||||
QDF_TRACE_LEVEL_ERROR,
|
||||
"*** Incompatible host/target HTT versions!\n");
|
||||
"*** Incompatible host/target HTT versions!");
|
||||
}
|
||||
/* abort if the target is incompatible with the host */
|
||||
qdf_assert(soc->tgt_ver.major ==
|
||||
@@ -2870,7 +2870,7 @@ static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX,
|
||||
QDF_TRACE_LEVEL_WARN,
|
||||
"*** Warning: host/target HTT versions"
|
||||
" are different, though compatible!\n");
|
||||
" are different, though compatible!");
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -2900,13 +2900,13 @@ static void dp_htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
|
||||
0, tid, 0, win_sz + 1, 0xffff);
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX,
|
||||
QDF_TRACE_LEVEL_INFO,
|
||||
FL("PeerID %d BAW %d TID %d stat %d\n"),
|
||||
FL("PeerID %d BAW %d TID %d stat %d"),
|
||||
peer_id, win_sz, tid, status);
|
||||
|
||||
} else {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX,
|
||||
QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Peer not found peer id %d\n"),
|
||||
FL("Peer not found peer id %d"),
|
||||
peer_id);
|
||||
}
|
||||
break;
|
||||
@@ -3156,7 +3156,7 @@ QDF_STATUS dp_h2t_ext_stats_msg_send(struct dp_pdev *pdev,
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"-----%s:%d----\n cookie <-> %d\n config_param_0 %u\n"
|
||||
"config_param_1 %u\n config_param_2 %u\n"
|
||||
"config_param_4 %u\n -------------\n",
|
||||
"config_param_4 %u\n -------------",
|
||||
__func__, __LINE__, cookie_val, config_param_0,
|
||||
config_param_1, config_param_2, config_param_3);
|
||||
|
||||
@@ -3252,7 +3252,7 @@ QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
|
||||
|
||||
if (!msg) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer\n");
|
||||
"Fail to allocate HTT_H2T_PPDU_STATS_CFG_MSG_SZ msg buffer");
|
||||
qdf_assert(0);
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
@@ -3273,7 +3273,7 @@ QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
|
||||
*/
|
||||
if (qdf_nbuf_put_tail(msg, HTT_H2T_PPDU_STATS_CFG_MSG_SZ) == NULL) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Failed to expand head for HTT_CFG_STATS\n");
|
||||
"Failed to expand head for HTT_CFG_STATS");
|
||||
qdf_nbuf_free(msg);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
@@ -3290,7 +3290,7 @@ QDF_STATUS dp_h2t_cfg_stats_msg_send(struct dp_pdev *pdev,
|
||||
pkt = htt_htc_pkt_alloc(soc);
|
||||
if (!pkt) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Fail to allocate dp_htt_htc_pkt buffer\n");
|
||||
"Fail to allocate dp_htt_htc_pkt buffer");
|
||||
qdf_assert(0);
|
||||
qdf_nbuf_free(msg);
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
@@ -184,7 +184,7 @@ static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
|
||||
(void *)wbm_srng);
|
||||
if (!ring_entry) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s: Failed to get WBM ring entry\n",
|
||||
"%s: Failed to get WBM ring entry",
|
||||
__func__);
|
||||
qdf_mem_free_consistent(soc->osdev, soc->osdev->dev,
|
||||
alloc_size, buffer_vaddr_unaligned,
|
||||
@@ -215,11 +215,11 @@ static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
|
||||
|
||||
if (tx_buffer_count) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s: IPA WDI TX buffer: %d allocated\n",
|
||||
"%s: IPA WDI TX buffer: %d allocated",
|
||||
__func__, tx_buffer_count);
|
||||
} else {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: No IPA WDI TX buffer allocated\n",
|
||||
"%s: No IPA WDI TX buffer allocated",
|
||||
__func__);
|
||||
qdf_mem_free(soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned);
|
||||
soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned = NULL;
|
||||
@@ -255,7 +255,7 @@ int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
|
||||
error = dp_tx_ipa_uc_attach(soc, pdev);
|
||||
if (error) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: DP IPA UC TX attach fail code %d\n",
|
||||
"%s: DP IPA UC TX attach fail code %d",
|
||||
__func__, error);
|
||||
return error;
|
||||
}
|
||||
@@ -264,7 +264,7 @@ int dp_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
|
||||
error = dp_rx_ipa_uc_attach(soc, pdev);
|
||||
if (error) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: DP IPA UC RX attach fail code %d\n",
|
||||
"%s: DP IPA UC RX attach fail code %d",
|
||||
__func__, error);
|
||||
dp_tx_ipa_uc_detach(soc, pdev);
|
||||
return error;
|
||||
|
@@ -914,14 +914,14 @@ static int dp_srng_setup(struct dp_soc *soc, struct dp_srng *srng,
|
||||
|
||||
if (soc->intr_mode == DP_INTR_MSI) {
|
||||
dp_srng_msi_setup(soc, &ring_params, ring_type, ring_num);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
FL("Using MSI for ring_type: %d, ring_num %d"),
|
||||
ring_type, ring_num);
|
||||
|
||||
} else {
|
||||
ring_params.msi_data = 0;
|
||||
ring_params.msi_addr = 0;
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
FL("Skipping MSI for ring_type: %d, ring_num %d"),
|
||||
ring_type, ring_num);
|
||||
}
|
||||
@@ -2303,8 +2303,8 @@ static int dp_soc_cmn_setup(struct dp_soc *soc)
|
||||
soc->num_reo_dest_rings =
|
||||
wlan_cfg_num_reo_dest_rings(soc_cfg_ctx);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP,
|
||||
QDF_TRACE_LEVEL_ERROR,
|
||||
FL("num_reo_dest_rings %d\n"), soc->num_reo_dest_rings);
|
||||
QDF_TRACE_LEVEL_INFO,
|
||||
FL("num_reo_dest_rings %d"), soc->num_reo_dest_rings);
|
||||
for (i = 0; i < soc->num_reo_dest_rings; i++) {
|
||||
if (dp_srng_setup(soc, &soc->reo_dest_ring[i], REO_DST,
|
||||
i, 0, reo_dst_ring_size)) {
|
||||
@@ -2514,7 +2514,7 @@ static int dp_rxdma_ring_setup(struct dp_soc *soc,
|
||||
|
||||
for (i = 0; i < max_mac_rings; i++) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: pdev_id %d mac_id %d\n",
|
||||
"%s: pdev_id %d mac_id %d",
|
||||
__func__, pdev->pdev_id, i);
|
||||
if (dp_srng_setup(soc, &pdev->rx_mac_buf_ring[i],
|
||||
RXDMA_BUF, 1, i,
|
||||
@@ -2967,13 +2967,13 @@ static struct cdp_pdev *dp_pdev_attach_wifi3(struct cdp_soc_t *txrx_soc,
|
||||
/* Rx monitor mode specific init */
|
||||
if (dp_rx_pdev_mon_attach(pdev)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"dp_rx_pdev_attach failed\n");
|
||||
"dp_rx_pdev_attach failed");
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
if (dp_wdi_event_attach(pdev)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"dp_wdi_evet_attach failed\n");
|
||||
"dp_wdi_evet_attach failed");
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
@@ -3359,18 +3359,18 @@ static void dp_rxdma_ring_config(struct dp_soc *soc)
|
||||
if (dbs_enable) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX,
|
||||
QDF_TRACE_LEVEL_ERROR,
|
||||
FL("DBS enabled max_mac_rings %d\n"),
|
||||
FL("DBS enabled max_mac_rings %d"),
|
||||
max_mac_rings);
|
||||
} else {
|
||||
max_mac_rings = 1;
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX,
|
||||
QDF_TRACE_LEVEL_ERROR,
|
||||
FL("DBS disabled, max_mac_rings %d\n"),
|
||||
FL("DBS disabled, max_mac_rings %d"),
|
||||
max_mac_rings);
|
||||
}
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("pdev_id %d max_mac_rings %d\n"),
|
||||
FL("pdev_id %d max_mac_rings %d"),
|
||||
pdev->pdev_id, max_mac_rings);
|
||||
|
||||
for (mac_id = 0; mac_id < max_mac_rings; mac_id++) {
|
||||
@@ -3379,7 +3379,7 @@ static void dp_rxdma_ring_config(struct dp_soc *soc)
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX,
|
||||
QDF_TRACE_LEVEL_ERROR,
|
||||
FL("mac_id %d\n"), mac_for_pdev);
|
||||
FL("mac_id %d"), mac_for_pdev);
|
||||
htt_srng_setup(soc->htt_handle, mac_for_pdev,
|
||||
pdev->rx_mac_buf_ring[mac_id]
|
||||
.hal_srng,
|
||||
@@ -3496,7 +3496,7 @@ static void dp_soc_set_nss_cfg_wifi3(struct cdp_soc_t *cdp_soc, int config)
|
||||
wlan_cfg_set_num_tx_ext_desc(wlan_cfg_ctx, 0);
|
||||
}
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
FL("nss-wifi<0> nss config is enabled"));
|
||||
}
|
||||
/*
|
||||
@@ -4002,7 +4002,7 @@ static void dp_peer_setup_wifi3(struct cdp_vdev *vdev_hdl, void *peer_hdl)
|
||||
hash_based = wlan_cfg_is_rx_hash_enabled(soc->wlan_cfg_ctx);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
FL("hash based steering for pdev: %d is %d\n"),
|
||||
FL("hash based steering for pdev: %d is %d"),
|
||||
pdev->pdev_id, hash_based);
|
||||
|
||||
/*
|
||||
@@ -4483,7 +4483,7 @@ void dp_peer_unref_delete(void *peer_handle)
|
||||
*/
|
||||
qdf_spin_lock_bh(&soc->peer_ref_mutex);
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: peer %pK ref_cnt(before decrement): %d\n", __func__,
|
||||
"%s: peer %pK ref_cnt(before decrement): %d", __func__,
|
||||
peer, qdf_atomic_read(&peer->ref_cnt));
|
||||
if (qdf_atomic_dec_and_test(&peer->ref_cnt)) {
|
||||
peer_id = peer->peer_ids[0];
|
||||
@@ -4811,13 +4811,13 @@ static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
|
||||
pdev_id = pdev->pdev_id;
|
||||
soc = pdev->soc;
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
|
||||
"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
|
||||
"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
|
||||
pdev, pdev_id, soc, vdev);
|
||||
|
||||
/*Check if current pdev's monitor_vdev exists */
|
||||
if (pdev->monitor_vdev) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"vdev=%pK\n", vdev);
|
||||
"vdev=%pK", vdev);
|
||||
qdf_assert(vdev);
|
||||
}
|
||||
|
||||
@@ -4828,7 +4828,7 @@ static int dp_vdev_set_monitor_mode(struct cdp_vdev *vdev_handle,
|
||||
return QDF_STATUS_SUCCESS;
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
|
||||
"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n",
|
||||
"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
|
||||
pdev->mon_filter_mode, pdev->fp_mgmt_filter,
|
||||
pdev->fp_ctrl_filter, pdev->fp_data_filter,
|
||||
pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
|
||||
@@ -4930,13 +4930,13 @@ static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
|
||||
soc = pdev->soc;
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
|
||||
"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK\n",
|
||||
"pdev=%pK, pdev_id=%d, soc=%pK vdev=%pK",
|
||||
pdev, pdev_id, soc, vdev);
|
||||
|
||||
/*Check if current pdev's monitor_vdev exists */
|
||||
if (!pdev->monitor_vdev) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"vdev=%pK\n", vdev);
|
||||
"vdev=%pK", vdev);
|
||||
qdf_assert(vdev);
|
||||
}
|
||||
|
||||
@@ -4950,7 +4950,7 @@ static int dp_pdev_set_advance_monitor_filter(struct cdp_pdev *pdev_handle,
|
||||
pdev->mo_data_filter = filter_val->mo_data;
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
|
||||
"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]\n",
|
||||
"MODE[%x] FP[%02x|%02x|%02x] MO[%02x|%02x|%02x]",
|
||||
pdev->mon_filter_mode, pdev->fp_mgmt_filter,
|
||||
pdev->fp_ctrl_filter, pdev->fp_data_filter,
|
||||
pdev->mo_mgmt_filter, pdev->mo_ctrl_filter,
|
||||
@@ -5377,7 +5377,7 @@ static void dp_get_device_stats(void *handle,
|
||||
default:
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"apstats cannot be updated for this input "
|
||||
"type %d\n", type);
|
||||
"type %d", type);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -6474,7 +6474,7 @@ dp_config_debug_sniffer(struct cdp_pdev *pdev_handle, int val)
|
||||
break;
|
||||
default:
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Invalid value\n");
|
||||
"Invalid value");
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -7225,7 +7225,7 @@ dp_txrx_peer_wds_tx_policy_update(struct cdp_peer *peer_handle,
|
||||
FL("Policy Update set to :\
|
||||
peer->wds_enabled %d\
|
||||
peer->wds_ecm.wds_tx_ucast_4addr %d\
|
||||
peer->wds_ecm.wds_tx_mcast_4addr %d\n"),
|
||||
peer->wds_ecm.wds_tx_mcast_4addr %d"),
|
||||
peer->wds_enabled, peer->wds_ecm.wds_tx_ucast_4addr,
|
||||
peer->wds_ecm.wds_tx_mcast_4addr);
|
||||
return;
|
||||
@@ -7976,7 +7976,7 @@ int dp_set_pktlog_wifi3(struct dp_pdev *pdev, uint32_t event,
|
||||
dp_is_hw_dbs_enable(soc, &max_mac_rings);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
|
||||
FL("Max_mac_rings %d \n"),
|
||||
FL("Max_mac_rings %d "),
|
||||
max_mac_rings);
|
||||
|
||||
if (enable) {
|
||||
|
@@ -41,7 +41,7 @@ dp_set_ssn_valid_flag(struct hal_reo_cmd_params *params,
|
||||
params->u.upd_queue_params.update_svld = 1;
|
||||
params->u.upd_queue_params.svld = valid;
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
|
||||
"%s: Setting SSN valid bit to %d\n",
|
||||
"%s: Setting SSN valid bit to %d",
|
||||
__func__, valid);
|
||||
}
|
||||
#else
|
||||
@@ -74,12 +74,12 @@ static int dp_peer_find_map_attach(struct dp_soc *soc)
|
||||
max_peers = soc->max_peers;
|
||||
/* allocate the peer ID -> peer object map */
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
|
||||
"\n<=== cfg max peer id %d ====>\n", max_peers);
|
||||
"\n<=== cfg max peer id %d ====>", max_peers);
|
||||
peer_map_size = max_peers * sizeof(soc->peer_id_to_obj_map[0]);
|
||||
soc->peer_id_to_obj_map = qdf_mem_malloc(peer_map_size);
|
||||
if (!soc->peer_id_to_obj_map) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: peer map memory allocation failed\n", __func__);
|
||||
"%s: peer map memory allocation failed", __func__);
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
|
||||
@@ -358,7 +358,7 @@ static inline void dp_peer_map_ast(struct dp_soc *soc,
|
||||
}
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x\n",
|
||||
"%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
|
||||
__func__, peer, hw_peer_id, vdev_id, mac_addr[0],
|
||||
mac_addr[1], mac_addr[2], mac_addr[3],
|
||||
mac_addr[4], mac_addr[5]);
|
||||
@@ -384,7 +384,7 @@ static inline void dp_peer_map_ast(struct dp_soc *soc,
|
||||
}
|
||||
} else {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"AST entry not found\n");
|
||||
"AST entry not found");
|
||||
}
|
||||
|
||||
qdf_spin_unlock_bh(&soc->ast_lock);
|
||||
@@ -423,7 +423,7 @@ int dp_peer_add_ast(struct dp_soc *soc,
|
||||
}
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: peer %pK mac %02x:%02x:%02x:%02x:%02x:%02x\n",
|
||||
"%s: peer %pK mac %02x:%02x:%02x:%02x:%02x:%02x",
|
||||
__func__, peer, mac_addr[0], mac_addr[1], mac_addr[2],
|
||||
mac_addr[3], mac_addr[4], mac_addr[5]);
|
||||
|
||||
@@ -902,7 +902,7 @@ static inline struct dp_peer *dp_peer_find_add_id(struct dp_soc *soc,
|
||||
peer = dp_peer_find_hash_find(soc, peer_mac_addr,
|
||||
0 /* is aligned */, vdev_id);
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x\n",
|
||||
"%s: peer %pK ID %d vid %d mac %02x:%02x:%02x:%02x:%02x:%02x",
|
||||
__func__, peer, peer_id, vdev_id, peer_mac_addr[0],
|
||||
peer_mac_addr[1], peer_mac_addr[2], peer_mac_addr[3],
|
||||
peer_mac_addr[4], peer_mac_addr[5]);
|
||||
@@ -950,7 +950,7 @@ dp_rx_peer_map_handler(void *soc_handle, uint16_t peer_id, uint16_t hw_peer_id,
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
|
||||
"peer_map_event (soc:%pK): peer_id %di, hw_peer_id %d, peer_mac "
|
||||
"%02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d\n", soc, peer_id,
|
||||
"%02x:%02x:%02x:%02x:%02x:%02x, vdev_id %d", soc, peer_id,
|
||||
hw_peer_id, peer_mac_addr[0], peer_mac_addr[1],
|
||||
peer_mac_addr[2], peer_mac_addr[3], peer_mac_addr[4],
|
||||
peer_mac_addr[5], vdev_id);
|
||||
@@ -1002,7 +1002,7 @@ dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id)
|
||||
peer = __dp_peer_find_by_id(soc, peer_id);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
|
||||
"peer_unmap_event (soc:%pK) peer_id %d peer %pK\n",
|
||||
"peer_unmap_event (soc:%pK) peer_id %d peer %pK",
|
||||
soc, peer_id, peer);
|
||||
|
||||
/*
|
||||
@@ -1013,7 +1013,7 @@ dp_rx_peer_unmap_handler(void *soc_handle, uint16_t peer_id)
|
||||
if (!peer) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Received unmap event for invalid peer_id"
|
||||
" %u\n", __func__, peer_id);
|
||||
" %u", __func__, peer_id);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1056,7 +1056,7 @@ static void dp_rx_tid_update_cb(struct dp_soc *soc, void *cb_ctxt,
|
||||
HAL_REO_CMD_DRAIN)) {
|
||||
/* Should not happen normally. Just print error for now */
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Rx tid HW desc update failed(%d): tid %d\n",
|
||||
"%s: Rx tid HW desc update failed(%d): tid %d",
|
||||
__func__,
|
||||
reo_status->rx_queue_status.header.status,
|
||||
rx_tid->tid);
|
||||
@@ -1158,13 +1158,13 @@ static void dp_reo_desc_free(struct dp_soc *soc, void *cb_ctxt,
|
||||
(reo_status->fl_cache_status.header.status !=
|
||||
HAL_REO_CMD_DRAIN)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Rx tid HW desc flush failed(%d): tid %d\n",
|
||||
"%s: Rx tid HW desc flush failed(%d): tid %d",
|
||||
__func__,
|
||||
reo_status->rx_queue_status.header.status,
|
||||
freedesc->rx_tid.tid);
|
||||
}
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s: hw_qdesc_paddr: %pK, tid:%d\n", __func__,
|
||||
"%s: hw_qdesc_paddr: %pK, tid:%d", __func__,
|
||||
(void *)(rx_tid->hw_qdesc_paddr), rx_tid->tid);
|
||||
qdf_mem_unmap_nbytes_single(soc->osdev,
|
||||
rx_tid->hw_qdesc_paddr,
|
||||
@@ -1255,7 +1255,7 @@ try_desc_alloc:
|
||||
|
||||
if (!rx_tid->hw_qdesc_vaddr_unaligned) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Rx tid HW desc alloc failed: tid %d\n",
|
||||
"%s: Rx tid HW desc alloc failed: tid %d",
|
||||
__func__, tid);
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
@@ -1272,7 +1272,7 @@ try_desc_alloc:
|
||||
|
||||
if (!rx_tid->hw_qdesc_vaddr_unaligned) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Rx tid HW desc alloc failed: tid %d\n",
|
||||
"%s: Rx tid HW desc alloc failed: tid %d",
|
||||
__func__, tid);
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
@@ -1282,7 +1282,7 @@ try_desc_alloc:
|
||||
hw_qdesc_align);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
|
||||
"%s: Total Size %d Aligned Addr %pK\n",
|
||||
"%s: Total Size %d Aligned Addr %pK",
|
||||
__func__, rx_tid->hw_qdesc_alloc_size,
|
||||
hw_qdesc_vaddr);
|
||||
|
||||
@@ -1326,7 +1326,7 @@ try_desc_alloc:
|
||||
goto try_desc_alloc;
|
||||
else {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Rx tid HW desc alloc failed (lowmem): tid %d\n",
|
||||
"%s: Rx tid HW desc alloc failed (lowmem): tid %d",
|
||||
__func__, tid);
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
@@ -1370,14 +1370,14 @@ static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
|
||||
HAL_REO_CMD_SUCCESS) {
|
||||
/* Should not happen normally. Just print error for now */
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Rx tid HW desc deletion failed(%d): tid %d\n",
|
||||
"%s: Rx tid HW desc deletion failed(%d): tid %d",
|
||||
__func__,
|
||||
reo_status->rx_queue_status.header.status,
|
||||
freedesc->rx_tid.tid);
|
||||
}
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
|
||||
"%s: rx_tid: %d status: %d\n", __func__,
|
||||
"%s: rx_tid: %d status: %d", __func__,
|
||||
freedesc->rx_tid.tid,
|
||||
reo_status->rx_queue_status.header.status);
|
||||
|
||||
@@ -1420,7 +1420,7 @@ static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
|
||||
QDF_TRACE(QDF_MODULE_ID_DP,
|
||||
QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: fail to send CMD_CACHE_FLUSH:"
|
||||
"tid %d desc %pK\n", __func__,
|
||||
"tid %d desc %pK", __func__,
|
||||
rx_tid->tid,
|
||||
(void *)(rx_tid->hw_qdesc_paddr));
|
||||
}
|
||||
@@ -1447,7 +1447,7 @@ static void dp_rx_tid_delete_cb(struct dp_soc *soc, void *cb_ctxt,
|
||||
* Here invoke desc_free function directly to do clean up.
|
||||
*/
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: fail to send REO cmd to flush cache: tid %d\n",
|
||||
"%s: fail to send REO cmd to flush cache: tid %d",
|
||||
__func__, rx_tid->tid);
|
||||
qdf_mem_zero(&reo_status, sizeof(reo_status));
|
||||
reo_status.fl_cache_status.header.status = 0;
|
||||
@@ -1474,7 +1474,7 @@ static int dp_rx_tid_delete_wifi3(struct dp_peer *peer, int tid)
|
||||
|
||||
if (!freedesc) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: malloc failed for freedesc: tid %d\n",
|
||||
"%s: malloc failed for freedesc: tid %d",
|
||||
__func__, tid);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -1507,7 +1507,7 @@ static void dp_peer_setup_remaining_tids(struct dp_peer *peer)
|
||||
for (tid = 1; tid < DP_MAX_TIDS-1; tid++) {
|
||||
dp_rx_tid_setup_wifi3(peer, tid, 1, 0);
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
|
||||
"Setting up TID %d for peer %pK peer->local_id %d\n",
|
||||
"Setting up TID %d for peer %pK peer->local_id %d",
|
||||
tid, peer, peer->local_id);
|
||||
}
|
||||
}
|
||||
@@ -1800,7 +1800,7 @@ void dp_rx_discard(struct dp_vdev *vdev, struct dp_peer *peer, unsigned tid,
|
||||
msdu_list = qdf_nbuf_next(msdu_list);
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
|
||||
"discard rx %pK from partly-deleted peer %pK "
|
||||
"(%02x:%02x:%02x:%02x:%02x:%02x)\n",
|
||||
"(%02x:%02x:%02x:%02x:%02x:%02x)",
|
||||
msdu, peer,
|
||||
peer->mac_addr.raw[0], peer->mac_addr.raw[1],
|
||||
peer->mac_addr.raw[2], peer->mac_addr.raw[3],
|
||||
@@ -1907,7 +1907,7 @@ dp_set_pn_check_wifi3(struct cdp_vdev *vdev_handle, struct cdp_peer *peer_handle
|
||||
dp_rx_tid_update_cb, rx_tid);
|
||||
} else {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
|
||||
"PN Check not setup for TID :%d \n", i);
|
||||
"PN Check not setup for TID :%d ", i);
|
||||
}
|
||||
qdf_spin_unlock_bh(&rx_tid->tid_lock);
|
||||
}
|
||||
@@ -1926,13 +1926,13 @@ dp_rx_sec_ind_handler(void *soc_handle, uint16_t peer_id,
|
||||
peer = dp_peer_find_by_id(soc, peer_id);
|
||||
if (!peer) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"Couldn't find peer from ID %d - skipping security inits\n",
|
||||
"Couldn't find peer from ID %d - skipping security inits",
|
||||
peer_id);
|
||||
return;
|
||||
}
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
|
||||
"sec spec for peer %pK (%02x:%02x:%02x:%02x:%02x:%02x): "
|
||||
"%s key of type %d\n",
|
||||
"%s key of type %d",
|
||||
peer,
|
||||
peer->mac_addr.raw[0], peer->mac_addr.raw[1],
|
||||
peer->mac_addr.raw[2], peer->mac_addr.raw[3],
|
||||
|
@@ -124,7 +124,7 @@ dp_get_vdev_from_soc_vdev_id_wifi3(struct dp_soc *soc,
|
||||
if (vdev->vdev_id == vdev_id) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP,
|
||||
QDF_TRACE_LEVEL_INFO,
|
||||
FL("Found vdev 0x%pK on pdev %d\n"),
|
||||
FL("Found vdev 0x%pK on pdev %d"),
|
||||
vdev, i);
|
||||
return vdev;
|
||||
}
|
||||
|
@@ -54,12 +54,12 @@ QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, enum hal_reo_cmd_type type,
|
||||
break;
|
||||
default:
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Invalid REO command type\n", __func__);
|
||||
"%s: Invalid REO command type", __func__);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
};
|
||||
|
||||
if (num < 0) {
|
||||
qdf_print("%s: Error with sending REO command\n", __func__);
|
||||
qdf_print("%s: Error with sending REO command", __func__);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
@@ -67,7 +67,7 @@ QDF_STATUS dp_reo_send_cmd(struct dp_soc *soc, enum hal_reo_cmd_type type,
|
||||
reo_cmd = qdf_mem_malloc(sizeof(*reo_cmd));
|
||||
if (!reo_cmd) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: alloc failed for REO cmd:%d!!\n",
|
||||
"%s: alloc failed for REO cmd:%d!!",
|
||||
__func__, type);
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
@@ -140,7 +140,7 @@ void dp_reo_status_ring_handler(struct dp_soc *soc)
|
||||
break;
|
||||
default:
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
|
||||
"%s, no handler for TLV:%d\n", __func__, tlv);
|
||||
"%s, no handler for TLV:%d", __func__, tlv);
|
||||
goto next;
|
||||
} /* switch */
|
||||
|
||||
|
@@ -790,7 +790,7 @@ void dp_rx_process_invalid_peer_wrapper(struct dp_soc *soc,
|
||||
static void dp_rx_print_lro_info(uint8_t *rx_tlv)
|
||||
{
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
FL("----------------------RX DESC LRO----------------------\n"));
|
||||
FL("----------------------RX DESC LRO----------------------"));
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
FL("lro_eligible 0x%x"), HAL_RX_TLV_GET_LRO_ELIGIBLE(rx_tlv));
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
@@ -810,7 +810,7 @@ static void dp_rx_print_lro_info(uint8_t *rx_tlv)
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
FL("toeplitz 0x%x"), HAL_RX_TLV_GET_FLOW_ID_TOEPLITZ(rx_tlv));
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
FL("---------------------------------------------------------\n"));
|
||||
FL("---------------------------------------------------------"));
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1581,7 +1581,7 @@ done:
|
||||
* begins.
|
||||
*/
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
FL("rxhash: flow id toeplitz: 0x%x\n"),
|
||||
FL("rxhash: flow id toeplitz: 0x%x"),
|
||||
hal_rx_msdu_start_toeplitz_get(rx_tlv_hdr));
|
||||
|
||||
dp_rx_msdu_stats_update(soc, nbuf, rx_tlv_hdr, peer, ring_id);
|
||||
@@ -1702,7 +1702,7 @@ dp_rx_pdev_attach(struct dp_pdev *pdev)
|
||||
struct rx_desc_pool *rx_desc_pool;
|
||||
|
||||
if (wlan_cfg_get_dp_pdev_nss_enabled(pdev->wlan_cfg_ctx)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"nss-wifi<4> skip Rx refil %d", pdev_id);
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
@@ -540,7 +540,7 @@ static inline int check_x86_paddr(struct dp_soc *dp_soc, qdf_nbuf_t *rx_netbuf,
|
||||
return QDF_STATUS_SUCCESS;
|
||||
else {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"phy addr %pK exceeded 0x50000000 trying again\n",
|
||||
"phy addr %pK exceeded 0x50000000 trying again",
|
||||
paddr);
|
||||
|
||||
nbuf_retry++;
|
||||
|
@@ -126,7 +126,7 @@ void dp_rx_reorder_flush_frag(struct dp_peer *peer,
|
||||
|
||||
if (!peer) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: NULL peer\n", __func__);
|
||||
"%s: NULL peer", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -138,7 +138,7 @@ void dp_rx_reorder_flush_frag(struct dp_peer *peer,
|
||||
HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
|
||||
QDF_STATUS_SUCCESS)
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Failed to return link desc\n",
|
||||
"%s: Failed to return link desc",
|
||||
__func__);
|
||||
}
|
||||
|
||||
@@ -735,7 +735,7 @@ static QDF_STATUS dp_rx_defrag_tkip_demic(const uint8_t *key,
|
||||
pktlen += (qdf_nbuf_len(next) - hdrlen);
|
||||
prev = next;
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s pktlen %ld\n", __func__,
|
||||
"%s pktlen %ld", __func__,
|
||||
qdf_nbuf_len(next) - hdrlen);
|
||||
next = qdf_nbuf_next(next);
|
||||
}
|
||||
@@ -778,7 +778,7 @@ static void dp_rx_frag_pull_hdr(qdf_nbuf_t nbuf, uint16_t hdrsize)
|
||||
RX_PKT_TLVS_LEN + hdrsize);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s: final pktlen %d .11len %d\n",
|
||||
"%s: final pktlen %d .11len %d",
|
||||
__func__,
|
||||
(uint32_t)qdf_nbuf_len(nbuf), hdrsize);
|
||||
}
|
||||
@@ -811,7 +811,7 @@ dp_rx_construct_fraglist(struct dp_peer *peer,
|
||||
qdf_nbuf_set_next(head, NULL);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s: head len %d ext len %d data len %d \n",
|
||||
"%s: head len %d ext len %d data len %d ",
|
||||
__func__,
|
||||
(uint32_t)qdf_nbuf_len(head),
|
||||
(uint32_t)qdf_nbuf_len(rx_nbuf),
|
||||
@@ -872,7 +872,7 @@ dp_rx_defrag_nwifi_to_8023(qdf_nbuf_t nbuf, uint16_t hdrsize)
|
||||
|
||||
if (rx_desc_info == NULL) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Memory alloc failed ! \n", __func__);
|
||||
"%s: Memory alloc failed ! ", __func__);
|
||||
QDF_ASSERT(0);
|
||||
return;
|
||||
}
|
||||
@@ -1031,7 +1031,7 @@ dp_rx_defrag_nwifi_to_8023(qdf_nbuf_t nbuf, uint16_t hdrsize)
|
||||
|
||||
if (qdf_unlikely(ret == QDF_STATUS_E_FAILURE)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: nbuf map failed !\n", __func__);
|
||||
"%s: nbuf map failed !", __func__);
|
||||
qdf_nbuf_free(head);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
@@ -1042,7 +1042,7 @@ dp_rx_defrag_nwifi_to_8023(qdf_nbuf_t nbuf, uint16_t hdrsize)
|
||||
|
||||
if (ret == QDF_STATUS_E_FAILURE) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: x86 check failed !\n", __func__);
|
||||
"%s: x86 check failed !", __func__);
|
||||
return QDF_STATUS_E_FAILURE;
|
||||
}
|
||||
|
||||
@@ -1112,7 +1112,7 @@ dp_rx_defrag_nwifi_to_8023(qdf_nbuf_t nbuf, uint16_t hdrsize)
|
||||
hal_srng_access_end(soc->hal_soc, hal_srng);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s: reinjection done !\n", __func__);
|
||||
"%s: reinjection done !", __func__);
|
||||
return QDF_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
@@ -1155,7 +1155,7 @@ static QDF_STATUS dp_rx_defrag(struct dp_peer *peer, unsigned tid,
|
||||
cur = frag_list_head;
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
|
||||
"%s: index %d Security type: %d\n", __func__,
|
||||
"%s: index %d Security type: %d", __func__,
|
||||
index, peer->security[index].sec_type);
|
||||
|
||||
switch (peer->security[index].sec_type) {
|
||||
@@ -1250,7 +1250,7 @@ static QDF_STATUS dp_rx_defrag(struct dp_peer *peer, unsigned tid,
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX,
|
||||
QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: TKIP demic failed status %d\n",
|
||||
"%s: TKIP demic failed status %d",
|
||||
__func__, status);
|
||||
|
||||
return QDF_STATUS_E_DEFRAG_ERROR;
|
||||
@@ -1317,7 +1317,7 @@ static QDF_STATUS dp_rx_defrag_save_info_from_ring_desc(void *ring_desc,
|
||||
|
||||
if (dst_ring_desc == NULL) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Memory alloc failed !\n", __func__);
|
||||
"%s: Memory alloc failed !", __func__);
|
||||
QDF_ASSERT(0);
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
@@ -1499,7 +1499,7 @@ static QDF_STATUS dp_rx_defrag_store_fragment(struct dp_soc *soc,
|
||||
|
||||
if (status != QDF_STATUS_SUCCESS) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Unable to store ring desc !\n", __func__);
|
||||
"%s: Unable to store ring desc !", __func__);
|
||||
goto end;
|
||||
}
|
||||
} else {
|
||||
@@ -1511,7 +1511,7 @@ static QDF_STATUS dp_rx_defrag_store_fragment(struct dp_soc *soc,
|
||||
HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
|
||||
QDF_STATUS_SUCCESS)
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Failed to return link desc\n",
|
||||
"%s: Failed to return link desc",
|
||||
__func__);
|
||||
|
||||
}
|
||||
@@ -1551,7 +1551,7 @@ static QDF_STATUS dp_rx_defrag_store_fragment(struct dp_soc *soc,
|
||||
HAL_BM_ACTION_PUT_IN_IDLE_LIST) !=
|
||||
QDF_STATUS_SUCCESS)
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Failed to return link desc\n",
|
||||
"%s: Failed to return link desc",
|
||||
__func__);
|
||||
dp_rx_defrag_cleanup(peer, tid);
|
||||
goto end;
|
||||
@@ -1698,7 +1698,7 @@ QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc,
|
||||
if (rx_reorder_array_elem->head &&
|
||||
rxseq != rx_tid->curr_seq_num) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: No list found for TID %d Seq# %d\n",
|
||||
"%s: No list found for TID %d Seq# %d",
|
||||
__func__, tid, rxseq);
|
||||
qdf_nbuf_free(nbuf);
|
||||
goto fail;
|
||||
@@ -1715,7 +1715,7 @@ QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc,
|
||||
|
||||
if (QDF_IS_STATUS_ERROR(status)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s Fragment insert failed\n", __func__);
|
||||
"%s Fragment insert failed", __func__);
|
||||
|
||||
goto fail;
|
||||
}
|
||||
@@ -1740,7 +1740,7 @@ QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc,
|
||||
|
||||
if (QDF_IS_STATUS_ERROR(status)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s Fragment processing failed\n", __func__);
|
||||
"%s Fragment processing failed", __func__);
|
||||
|
||||
dp_rx_return_head_frag_desc(peer, tid);
|
||||
dp_rx_defrag_cleanup(peer, tid);
|
||||
@@ -1755,11 +1755,11 @@ QDF_STATUS dp_rx_defrag_add_last_frag(struct dp_soc *soc,
|
||||
rx_reorder_array_elem->head = NULL;
|
||||
rx_reorder_array_elem->tail = NULL;
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
|
||||
"%s: Frag seq successfully reinjected\n",
|
||||
"%s: Frag seq successfully reinjected",
|
||||
__func__);
|
||||
} else {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Frag seq reinjection failed\n",
|
||||
"%s: Frag seq reinjection failed",
|
||||
__func__);
|
||||
dp_rx_return_head_frag_desc(peer, tid);
|
||||
}
|
||||
|
@@ -40,7 +40,7 @@ QDF_STATUS dp_rx_desc_pool_alloc(struct dp_soc *soc, uint32_t pool_id,
|
||||
|
||||
if (!(rx_desc_pool->array)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
|
||||
"%s: RX Desc Pool[%d] allocation failed\n",
|
||||
"%s: RX Desc Pool[%d] allocation failed",
|
||||
__func__, pool_id);
|
||||
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
@@ -158,7 +158,7 @@ void dp_rx_add_desc_list_to_free_list(struct dp_soc *soc,
|
||||
|
||||
temp_list = rx_desc_pool->freelist;
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK\n",
|
||||
"temp_list: %pK, *local_desc_list: %pK, *tail: %pK (*tail)->next: %pK",
|
||||
temp_list, *local_desc_list, *tail, (*tail)->next);
|
||||
rx_desc_pool->freelist = *local_desc_list;
|
||||
(*tail)->next = temp_list;
|
||||
|
@@ -352,7 +352,7 @@ dp_rx_pn_error_handle(struct dp_soc *soc, void *ring_desc,
|
||||
*/
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"discard rx due to PN error for peer %pK "
|
||||
"(%02x:%02x:%02x:%02x:%02x:%02x)\n",
|
||||
"(%02x:%02x:%02x:%02x:%02x:%02x)",
|
||||
peer,
|
||||
peer->mac_addr.raw[0], peer->mac_addr.raw[1],
|
||||
peer->mac_addr.raw[2], peer->mac_addr.raw[3],
|
||||
@@ -850,7 +850,7 @@ dp_rx_process_mic_error(struct dp_soc *soc,
|
||||
tid, rx_seq, nbuf);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Frag pkt seq# %d frag# %d consumed status %d !\n",
|
||||
"%s: Frag pkt seq# %d frag# %d consumed status %d !",
|
||||
__func__, rx_seq, fragno, status);
|
||||
return;
|
||||
}
|
||||
@@ -1338,7 +1338,7 @@ dp_rx_err_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP,
|
||||
QDF_TRACE_LEVEL_DEBUG,
|
||||
"[%s][%d] msdu_nbuf=%pK \n",
|
||||
"[%s][%d] msdu_nbuf=%pK ",
|
||||
__func__, __LINE__, msdu);
|
||||
|
||||
qdf_nbuf_free(msdu);
|
||||
@@ -1403,7 +1403,7 @@ dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
|
||||
if (!err_dst_srng) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s %d : HAL Monitor Destination Ring Init \
|
||||
Failed -- %pK\n",
|
||||
Failed -- %pK",
|
||||
__func__, __LINE__, err_dst_srng);
|
||||
return 0;
|
||||
}
|
||||
@@ -1415,7 +1415,7 @@ dp_rxdma_err_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
|
||||
if (qdf_unlikely(hal_srng_access_start(hal_soc, err_dst_srng))) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s %d : HAL Monitor Destination Ring Init \
|
||||
Failed -- %pK\n",
|
||||
Failed -- %pK",
|
||||
__func__, __LINE__, err_dst_srng);
|
||||
return 0;
|
||||
}
|
||||
|
@@ -66,7 +66,7 @@ dp_rx_mon_link_desc_return(struct dp_pdev *dp_pdev,
|
||||
*/
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s %d : \
|
||||
HAL RING Access For WBM Release SRNG Failed -- %pK\n",
|
||||
HAL RING Access For WBM Release SRNG Failed -- %pK",
|
||||
__func__, __LINE__, hal_srng);
|
||||
goto done;
|
||||
}
|
||||
@@ -80,7 +80,7 @@ dp_rx_mon_link_desc_return(struct dp_pdev *dp_pdev,
|
||||
status = QDF_STATUS_SUCCESS;
|
||||
} else {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s %d -- Monitor Link Desc WBM Release Ring Full\n",
|
||||
"%s %d -- Monitor Link Desc WBM Release Ring Full",
|
||||
__func__, __LINE__);
|
||||
}
|
||||
done:
|
||||
@@ -211,7 +211,7 @@ dp_rx_mon_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
|
||||
QDF_TRACE(QDF_MODULE_ID_DP,
|
||||
QDF_TRACE_LEVEL_DEBUG,
|
||||
"[%s] i=%d, ppdu_id=%x, "
|
||||
"last_ppdu_id=%x num_msdus = %u\n",
|
||||
"last_ppdu_id=%x num_msdus = %u",
|
||||
__func__, i, *ppdu_id,
|
||||
last_ppdu_id, num_msdus);
|
||||
|
||||
@@ -222,14 +222,14 @@ dp_rx_mon_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP,
|
||||
QDF_TRACE_LEVEL_DEBUG,
|
||||
"[%s] msdu_ppdu_id=%x\n",
|
||||
"[%s] msdu_ppdu_id=%x",
|
||||
__func__, msdu_ppdu_id);
|
||||
|
||||
if (*ppdu_id > msdu_ppdu_id)
|
||||
QDF_TRACE(QDF_MODULE_ID_DP,
|
||||
QDF_TRACE_LEVEL_DEBUG,
|
||||
"[%s][%d] ppdu_id=%d "
|
||||
"msdu_ppdu_id=%d\n",
|
||||
"msdu_ppdu_id=%d",
|
||||
__func__, __LINE__, *ppdu_id,
|
||||
msdu_ppdu_id);
|
||||
|
||||
@@ -296,7 +296,7 @@ dp_rx_mon_mpdu_pop(struct dp_soc *soc, uint32_t mac_id,
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP,
|
||||
QDF_TRACE_LEVEL_ERROR,
|
||||
"%s:%d: Pkt Desc\n",
|
||||
"%s:%d: Pkt Desc",
|
||||
__func__, __LINE__);
|
||||
|
||||
QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP,
|
||||
@@ -333,7 +333,7 @@ next_msdu:
|
||||
if (dp_rx_mon_link_desc_return(dp_pdev, p_last_buf_addr_info,
|
||||
mac_id) != QDF_STATUS_SUCCESS)
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"dp_rx_mon_link_desc_return failed\n");
|
||||
"dp_rx_mon_link_desc_return failed");
|
||||
|
||||
p_last_buf_addr_info = p_buf_addr_info;
|
||||
|
||||
@@ -817,7 +817,7 @@ void dp_rx_mon_dest_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
|
||||
|
||||
if (!mon_dst_srng || !hal_srng_initialized(mon_dst_srng)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s %d : HAL Monitor Destination Ring Init Failed -- %pK\n",
|
||||
"%s %d : HAL Monitor Destination Ring Init Failed -- %pK",
|
||||
__func__, __LINE__, mon_dst_srng);
|
||||
return;
|
||||
}
|
||||
@@ -834,7 +834,7 @@ void dp_rx_mon_dest_process(struct dp_soc *soc, uint32_t mac_id, uint32_t quota)
|
||||
|
||||
if (qdf_unlikely(hal_srng_access_start(hal_soc, mon_dst_srng))) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s %d : HAL Monitor Destination Ring access Failed -- %pK\n",
|
||||
"%s %d : HAL Monitor Destination Ring access Failed -- %pK",
|
||||
__func__, __LINE__, mon_dst_srng);
|
||||
return;
|
||||
}
|
||||
@@ -918,7 +918,7 @@ dp_rx_pdev_mon_buf_attach(struct dp_pdev *pdev, int mac_id) {
|
||||
rxdma_entries*3, rx_desc_pool);
|
||||
if (!QDF_IS_STATUS_SUCCESS(status)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: dp_rx_desc_pool_alloc() failed \n", __func__);
|
||||
"%s: dp_rx_desc_pool_alloc() failed ", __func__);
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -987,7 +987,7 @@ QDF_STATUS dp_mon_link_desc_pool_setup(struct dp_soc *soc, uint32_t mac_id)
|
||||
total_link_descs <<= 1;
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
|
||||
"%s: total_link_descs: %u, link_desc_size: %d\n",
|
||||
"%s: total_link_descs: %u, link_desc_size: %d",
|
||||
__func__, total_link_descs, link_desc_size);
|
||||
|
||||
total_mem_size = total_link_descs * link_desc_size;
|
||||
@@ -1006,7 +1006,7 @@ QDF_STATUS dp_mon_link_desc_pool_setup(struct dp_soc *soc, uint32_t mac_id)
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
|
||||
"%s: total_mem_size: %d, num_link_desc_banks: %u, \
|
||||
max_alloc_size: %d last_bank_size: %d\n",
|
||||
max_alloc_size: %d last_bank_size: %d",
|
||||
__func__, total_mem_size, num_link_desc_banks, max_alloc_size,
|
||||
last_bank_size);
|
||||
|
||||
@@ -1020,7 +1020,7 @@ QDF_STATUS dp_mon_link_desc_pool_setup(struct dp_soc *soc, uint32_t mac_id)
|
||||
if (!dp_pdev->link_desc_banks[mac_for_pdev][i].
|
||||
base_vaddr_unaligned) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Link desc memory allocation failed\n",
|
||||
"%s: Link desc memory allocation failed",
|
||||
__func__);
|
||||
goto fail;
|
||||
}
|
||||
@@ -1059,7 +1059,7 @@ QDF_STATUS dp_mon_link_desc_pool_setup(struct dp_soc *soc, uint32_t mac_id)
|
||||
if (dp_pdev->link_desc_banks[mac_for_pdev][i].
|
||||
base_vaddr_unaligned == NULL) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: allocation failed for mon link desc pool\n",
|
||||
"%s: allocation failed for mon link desc pool",
|
||||
__func__);
|
||||
goto fail;
|
||||
}
|
||||
@@ -1136,7 +1136,7 @@ QDF_STATUS dp_mon_link_desc_pool_setup(struct dp_soc *soc, uint32_t mac_id)
|
||||
}
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_WARN,
|
||||
"%s: successfully replenished %d buffer\n",
|
||||
"%s: successfully replenished %d buffer",
|
||||
__func__, num_replenish_buf);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
@@ -1202,7 +1202,7 @@ dp_rx_pdev_mon_attach(struct dp_pdev *pdev) {
|
||||
int mac_id;
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_WARN,
|
||||
"%s: pdev attach id=%d\n", __func__, pdev_id);
|
||||
"%s: pdev attach id=%d", __func__, pdev_id);
|
||||
|
||||
for (mac_id = 0; mac_id < NUM_RXDMA_RINGS_PER_PDEV; mac_id++) {
|
||||
int mac_for_pdev = dp_get_mac_id_for_pdev(mac_id, pdev_id);
|
||||
@@ -1210,7 +1210,7 @@ dp_rx_pdev_mon_attach(struct dp_pdev *pdev) {
|
||||
status = dp_rx_pdev_mon_buf_attach(pdev, mac_for_pdev);
|
||||
if (!QDF_IS_STATUS_SUCCESS(status)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: dp_rx_pdev_mon_buf_attach() failed\n",
|
||||
"%s: dp_rx_pdev_mon_buf_attach() failed",
|
||||
__func__);
|
||||
return status;
|
||||
}
|
||||
@@ -1218,7 +1218,7 @@ dp_rx_pdev_mon_attach(struct dp_pdev *pdev) {
|
||||
status = dp_rx_pdev_mon_status_attach(pdev, mac_for_pdev);
|
||||
if (!QDF_IS_STATUS_SUCCESS(status)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: dp_rx_pdev_mon_status_attach() failed\n",
|
||||
"%s: dp_rx_pdev_mon_status_attach() failed",
|
||||
__func__);
|
||||
return status;
|
||||
}
|
||||
@@ -1226,7 +1226,7 @@ dp_rx_pdev_mon_attach(struct dp_pdev *pdev) {
|
||||
status = dp_mon_link_desc_pool_setup(soc, mac_for_pdev);
|
||||
if (!QDF_IS_STATUS_SUCCESS(status)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: dp_mon_link_desc_pool_setup() failed\n",
|
||||
"%s: dp_mon_link_desc_pool_setup() failed",
|
||||
__func__);
|
||||
return status;
|
||||
}
|
||||
|
@@ -456,7 +456,7 @@ dp_rx_mon_status_srng_process(struct dp_soc *soc, uint32_t mac_id,
|
||||
if (!mon_status_srng || !hal_srng_initialized(mon_status_srng)) {
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s %d : HAL Monitor Status Ring Init Failed -- %pK\n",
|
||||
"%s %d : HAL Monitor Status Ring Init Failed -- %pK",
|
||||
__func__, __LINE__, mon_status_srng);
|
||||
return work_done;
|
||||
}
|
||||
@@ -700,7 +700,7 @@ QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
|
||||
qdf_assert(rxdma_srng);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"[%s][%d] requested %d buffers for replenish\n",
|
||||
"[%s][%d] requested %d buffers for replenish",
|
||||
__func__, __LINE__, num_req_buffers);
|
||||
|
||||
/*
|
||||
@@ -716,13 +716,13 @@ QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
|
||||
|
||||
if (!num_alloc_desc) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"[%s][%d] no free rx_descs in freelist\n",
|
||||
"[%s][%d] no free rx_descs in freelist",
|
||||
__func__, __LINE__);
|
||||
return QDF_STATUS_E_NOMEM;
|
||||
}
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"[%s][%d] %d rx desc allocated\n", __func__, __LINE__,
|
||||
"[%s][%d] %d rx desc allocated", __func__, __LINE__,
|
||||
num_alloc_desc);
|
||||
|
||||
num_req_buffers = num_alloc_desc;
|
||||
@@ -733,7 +733,7 @@ QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
|
||||
rxdma_srng, sync_hw_ptr);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"[%s][%d] no of available entries in rxdma ring: %d\n",
|
||||
"[%s][%d] no of available entries in rxdma ring: %d",
|
||||
__func__, __LINE__, num_entries_avail);
|
||||
|
||||
if (num_entries_avail < num_req_buffers) {
|
||||
@@ -765,7 +765,7 @@ QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
|
||||
|
||||
if (qdf_unlikely(rxdma_ring_entry == NULL)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"[%s][%d] rxdma_ring_entry is NULL, count - %d\n",
|
||||
"[%s][%d] rxdma_ring_entry is NULL, count - %d",
|
||||
__func__, __LINE__, count);
|
||||
qdf_nbuf_unmap_single(dp_soc->osdev, rx_netbuf,
|
||||
QDF_DMA_BIDIRECTIONAL);
|
||||
@@ -782,7 +782,7 @@ QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"[%s][%d] rx_desc=%pK, cookie=%d, nbuf=%pK, \
|
||||
paddr=%pK\n",
|
||||
paddr=%pK",
|
||||
__func__, __LINE__, &(*desc_list)->rx_desc,
|
||||
(*desc_list)->rx_desc.cookie, rx_netbuf,
|
||||
(void *)paddr);
|
||||
@@ -793,13 +793,13 @@ QDF_STATUS dp_rx_mon_status_buffers_replenish(struct dp_soc *dp_soc,
|
||||
hal_srng_access_end(dp_soc->hal_soc, rxdma_srng);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"successfully replenished %d buffers\n", num_req_buffers);
|
||||
"successfully replenished %d buffers", num_req_buffers);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"%d rx desc added back to free list\n", num_desc_to_free);
|
||||
"%d rx desc added back to free list", num_desc_to_free);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"[%s][%d] desc_list=%pK, tail=%pK rx_desc=%pK, cookie=%d\n",
|
||||
"[%s][%d] desc_list=%pK, tail=%pK rx_desc=%pK, cookie=%d",
|
||||
__func__, __LINE__, desc_list, tail, &(*desc_list)->rx_desc,
|
||||
(*desc_list)->rx_desc.cookie);
|
||||
|
||||
@@ -843,19 +843,19 @@ dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev, int ring_id) {
|
||||
rx_desc_pool = &soc->rx_desc_status[ring_id];
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
|
||||
"%s: Mon RX Status Pool[%d] allocation size=%d\n",
|
||||
"%s: Mon RX Status Pool[%d] allocation size=%d",
|
||||
__func__, ring_id, rxdma_entries);
|
||||
|
||||
status = dp_rx_desc_pool_alloc(soc, ring_id, rxdma_entries+1,
|
||||
rx_desc_pool);
|
||||
if (!QDF_IS_STATUS_SUCCESS(status)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: dp_rx_desc_pool_alloc() failed \n", __func__);
|
||||
"%s: dp_rx_desc_pool_alloc() failed ", __func__);
|
||||
return status;
|
||||
}
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
|
||||
"%s: Mon RX Status Buffers Replenish ring_id=%d\n",
|
||||
"%s: Mon RX Status Buffers Replenish ring_id=%d",
|
||||
__func__, ring_id);
|
||||
|
||||
status = dp_rx_mon_status_buffers_replenish(soc, ring_id, rxdma_srng,
|
||||
@@ -863,7 +863,7 @@ dp_rx_pdev_mon_status_attach(struct dp_pdev *pdev, int ring_id) {
|
||||
HAL_RX_BUF_RBM_SW3_BM);
|
||||
if (!QDF_IS_STATUS_SUCCESS(status)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: dp_rx_buffers_replenish() failed \n", __func__);
|
||||
"%s: dp_rx_buffers_replenish() failed ", __func__);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@@ -66,7 +66,7 @@ static inline void dp_print_stats_string_tlv(uint32_t *tag_buf)
|
||||
|
||||
if (!data) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -191,7 +191,7 @@ static inline void dp_print_tx_pdev_stats_urrn_tlv_v(uint32_t *tag_buf)
|
||||
|
||||
if (!urrn_stats) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -223,7 +223,7 @@ static inline void dp_print_tx_pdev_stats_flush_tlv_v(uint32_t *tag_buf)
|
||||
|
||||
if (!flush_errs) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -257,7 +257,7 @@ static inline void dp_print_tx_pdev_stats_sifs_tlv_v(uint32_t *tag_buf)
|
||||
|
||||
if (!sifs_status) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -290,7 +290,7 @@ static inline void dp_print_tx_pdev_stats_phy_err_tlv_v(uint32_t *tag_buf)
|
||||
|
||||
if (!phy_errs) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -322,7 +322,7 @@ static inline void dp_print_hw_stats_intr_misc_tlv(uint32_t *tag_buf)
|
||||
|
||||
if (!hw_intr_name) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -356,7 +356,7 @@ static inline void dp_print_hw_stats_wd_timeout_tlv(uint32_t *tag_buf)
|
||||
|
||||
if (!hw_module_name) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -452,7 +452,7 @@ static inline void dp_print_tx_tid_stats_tlv(uint32_t *tag_buf)
|
||||
|
||||
if (!tid_name) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -509,7 +509,7 @@ static inline void dp_print_tx_tid_stats_v1_tlv(uint32_t *tag_buf)
|
||||
|
||||
if (!tid_name) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -566,7 +566,7 @@ static inline void dp_print_rx_tid_stats_tlv(uint32_t *tag_buf)
|
||||
|
||||
if (!tid_name) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -606,7 +606,7 @@ static inline void dp_print_counter_tlv(uint32_t *tag_buf)
|
||||
|
||||
if (!counter_name) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -693,7 +693,7 @@ static inline void dp_print_tx_peer_rate_stats_tlv(uint32_t *tag_buf)
|
||||
|
||||
if (!str_buf) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -811,7 +811,7 @@ static inline void dp_print_rx_peer_rate_stats_tlv(uint32_t *tag_buf)
|
||||
|
||||
if (!str_buf) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1055,7 +1055,7 @@ static inline void dp_print_tx_hwq_difs_latency_stats_tlv_v(uint32_t *tag_buf)
|
||||
|
||||
if (!difs_latency_hist) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1093,7 +1093,7 @@ static inline void dp_print_tx_hwq_cmd_result_stats_tlv_v(uint32_t *tag_buf)
|
||||
|
||||
if (!cmd_result) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1126,7 +1126,7 @@ static inline void dp_print_tx_hwq_cmd_stall_stats_tlv_v(uint32_t *tag_buf)
|
||||
|
||||
if (!cmd_stall_status) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1160,7 +1160,7 @@ static inline void dp_print_tx_hwq_fes_result_stats_tlv_v(uint32_t *tag_buf)
|
||||
|
||||
if (!fes_result) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1424,7 +1424,7 @@ static inline void dp_print_sched_txq_cmd_posted_tlv_v(uint32_t *tag_buf)
|
||||
|
||||
if (!sched_cmd_posted) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1458,7 +1458,7 @@ static inline void dp_print_sched_txq_cmd_reaped_tlv_v(uint32_t *tag_buf)
|
||||
|
||||
if (!sched_cmd_reaped) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1565,7 +1565,7 @@ static inline void dp_print_tx_tqm_gen_mpdu_stats_tlv_v(uint32_t *tag_buf)
|
||||
|
||||
if (!gen_mpdu_end_reason) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1600,7 +1600,7 @@ static inline void dp_print_tx_tqm_list_mpdu_stats_tlv_v(uint32_t *tag_buf)
|
||||
|
||||
if (!list_mpdu_end_reason) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1636,7 +1636,7 @@ static inline void dp_print_tx_tqm_list_mpdu_cnt_tlv_v(uint32_t *tag_buf)
|
||||
|
||||
if (!list_mpdu_cnt_hist) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -2086,7 +2086,7 @@ static inline void dp_print_ring_if_stats_tlv(uint32_t *tag_buf)
|
||||
|
||||
if (!wm_hit_count) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -2169,7 +2169,7 @@ static inline void dp_print_sfm_client_user_tlv_v(uint32_t *tag_buf)
|
||||
|
||||
if (!dwords_used_by_user_n) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -2302,7 +2302,7 @@ static inline void dp_print_tx_pdev_rate_stats_tlv(uint32_t *tag_buf)
|
||||
|
||||
if (!str_buf) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -2431,7 +2431,7 @@ static inline void dp_print_rx_pdev_rate_stats_tlv(uint32_t *tag_buf)
|
||||
|
||||
if (!str_buf) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -2595,7 +2595,7 @@ static inline void dp_print_rx_soc_fw_refill_ring_empty_tlv_v(uint32_t *tag_buf)
|
||||
|
||||
if (!refill_ring_empty_cnt) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -2632,7 +2632,7 @@ static inline void dp_print_rx_soc_fw_refill_ring_num_refill_tlv_v(
|
||||
|
||||
if (!refill_ring_num_refill) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -2788,7 +2788,7 @@ static inline void dp_print_rx_pdev_fw_ring_mpdu_err_tlv_v(uint32_t *tag_buf)
|
||||
|
||||
if (!fw_ring_mpdu_err) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -2820,7 +2820,7 @@ static inline void dp_print_rx_pdev_fw_mpdu_drop_tlv_v(uint32_t *tag_buf)
|
||||
|
||||
if (!fw_mpdu_drop) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("Output buffer not allocated\n"));
|
||||
FL("Output buffer not allocated"));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@@ -279,7 +279,7 @@ static uint8_t dp_tx_prepare_htt_metadata(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
|
||||
hdr = qdf_nbuf_push_head(nbuf, htt_desc_size_aligned);
|
||||
if (hdr == NULL) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Error in filling HTT metadata\n");
|
||||
"Error in filling HTT metadata");
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -664,7 +664,7 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
|
||||
align_pad = ((unsigned long) qdf_nbuf_data(nbuf)) & 0x7;
|
||||
if (qdf_nbuf_push_head(nbuf, align_pad) == NULL) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"qdf_nbuf_push_head failed\n");
|
||||
"qdf_nbuf_push_head failed");
|
||||
goto failure;
|
||||
}
|
||||
|
||||
@@ -682,7 +682,7 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
|
||||
QDF_DMA_TO_DEVICE))) {
|
||||
/* Handle failure */
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"qdf_nbuf_map failed\n");
|
||||
"qdf_nbuf_map failed");
|
||||
DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
|
||||
goto failure;
|
||||
}
|
||||
@@ -761,7 +761,7 @@ static struct dp_tx_desc_s *dp_tx_prepare_desc(struct dp_vdev *vdev,
|
||||
msdu_ext_desc = dp_tx_prepare_ext_desc(vdev, msdu_info, desc_pool_id);
|
||||
if (!msdu_ext_desc) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s Tx Extension Descriptor Alloc Fail\n",
|
||||
"%s Tx Extension Descriptor Alloc Fail",
|
||||
__func__);
|
||||
goto failure;
|
||||
}
|
||||
@@ -817,7 +817,7 @@ static qdf_nbuf_t dp_tx_prepare_raw(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
|
||||
if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, curr_nbuf,
|
||||
QDF_DMA_TO_DEVICE)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s dma map error \n", __func__);
|
||||
"%s dma map error ", __func__);
|
||||
DP_STATS_INC(vdev, tx_i.raw.dma_map_error, 1);
|
||||
mapped_buf_num = i;
|
||||
goto error;
|
||||
@@ -952,7 +952,7 @@ static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
|
||||
|
||||
if (!hal_tx_desc) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"%s TCL ring full ring_id:%d\n", __func__, ring_id);
|
||||
"%s TCL ring full ring_id:%d", __func__, ring_id);
|
||||
DP_STATS_INC(soc, tx.tcl_ring_full[ring_id], 1);
|
||||
DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
|
||||
return QDF_STATUS_E_RESOURCES;
|
||||
@@ -1288,7 +1288,7 @@ static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
|
||||
msdu_info, tx_exc_metadata);
|
||||
if (!tx_desc) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s Tx_desc prepare Fail vdev %pK queue %d\n",
|
||||
"%s Tx_desc prepare Fail vdev %pK queue %d",
|
||||
__func__, vdev, tx_q->desc_pool_id);
|
||||
return nbuf;
|
||||
}
|
||||
@@ -1305,7 +1305,7 @@ static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
|
||||
|
||||
if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s %d : HAL RING Access Failed -- %pK\n",
|
||||
"%s %d : HAL RING Access Failed -- %pK",
|
||||
__func__, __LINE__, hal_srng);
|
||||
DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
|
||||
dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
|
||||
@@ -1334,7 +1334,7 @@ static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
|
||||
|
||||
if (status != QDF_STATUS_SUCCESS) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n",
|
||||
"%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
|
||||
__func__, tx_desc, tx_q->ring_id);
|
||||
dp_tx_desc_release(tx_desc, tx_q->desc_pool_id);
|
||||
goto fail_return;
|
||||
@@ -1385,7 +1385,7 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
|
||||
|
||||
if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s %d : HAL RING Access Failed -- %pK\n",
|
||||
"%s %d : HAL RING Access Failed -- %pK",
|
||||
__func__, __LINE__, hal_srng);
|
||||
DP_STATS_INC(vdev, tx_i.dropped.ring_full, 1);
|
||||
return nbuf;
|
||||
@@ -1447,7 +1447,7 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
|
||||
|
||||
if (status != QDF_STATUS_SUCCESS) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s Tx_hw_enqueue Fail tx_desc %pK queue %d\n",
|
||||
"%s Tx_hw_enqueue Fail tx_desc %pK queue %d",
|
||||
__func__, tx_desc, tx_q->ring_id);
|
||||
|
||||
if (tx_desc->flags & DP_TX_DESC_FLAG_ME)
|
||||
@@ -1539,7 +1539,7 @@ static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
|
||||
if (QDF_STATUS_SUCCESS != qdf_nbuf_map(vdev->osdev, nbuf,
|
||||
QDF_DMA_TO_DEVICE)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"dma map error\n");
|
||||
"dma map error");
|
||||
DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
|
||||
|
||||
qdf_nbuf_free(nbuf);
|
||||
@@ -1556,7 +1556,7 @@ static qdf_nbuf_t dp_tx_prepare_sg(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
|
||||
if (QDF_STATUS_E_FAILURE == qdf_nbuf_frag_map(vdev->osdev,
|
||||
nbuf, 0, QDF_DMA_TO_DEVICE, cur_frag)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"frag dma map error\n");
|
||||
"frag dma map error");
|
||||
DP_STATS_INC(vdev, tx_i.sg.dma_map_error, 1);
|
||||
qdf_nbuf_free(nbuf);
|
||||
return NULL;
|
||||
@@ -1647,7 +1647,7 @@ qdf_nbuf_t dp_tx_extract_mesh_meta_data(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
|
||||
remove_meta_hdr:
|
||||
if (qdf_nbuf_pull_head(nbuf, sizeof(struct meta_hdr_s)) == NULL) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"qdf_nbuf_pull_head failed\n");
|
||||
"qdf_nbuf_pull_head failed");
|
||||
qdf_nbuf_free(nbuf);
|
||||
return NULL;
|
||||
}
|
||||
@@ -1659,7 +1659,7 @@ remove_meta_hdr:
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_HIGH,
|
||||
"%s , Meta hdr %0x %0x %0x %0x %0x %0x"
|
||||
" tid %d to_fw %d\n",
|
||||
" tid %d to_fw %d",
|
||||
__func__, msdu_info->meta_data[0],
|
||||
msdu_info->meta_data[1],
|
||||
msdu_info->meta_data[2],
|
||||
@@ -1833,7 +1833,7 @@ qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf,
|
||||
if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
|
||||
if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW\n");
|
||||
"Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1890,7 +1890,7 @@ qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
|
||||
nbuf_mesh = qdf_nbuf_unshare(nbuf);
|
||||
if (nbuf_mesh == NULL) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"qdf_nbuf_unshare failed\n");
|
||||
"qdf_nbuf_unshare failed");
|
||||
return nbuf;
|
||||
}
|
||||
nbuf = nbuf_mesh;
|
||||
@@ -1906,7 +1906,7 @@ qdf_nbuf_t dp_tx_send_mesh(void *vap_dev, qdf_nbuf_t nbuf)
|
||||
nbuf_clone = qdf_nbuf_clone(nbuf);
|
||||
if (nbuf_clone == NULL) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"qdf_nbuf_clone failed\n");
|
||||
"qdf_nbuf_clone failed");
|
||||
return nbuf;
|
||||
}
|
||||
qdf_nbuf_set_tx_ftype(nbuf_clone, CB_FTYPE_MESH_TX_INFO);
|
||||
@@ -1984,7 +1984,7 @@ qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
|
||||
&msdu_info);
|
||||
if (nbuf_mesh == NULL) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"Extracting mesh metadata failed\n");
|
||||
"Extracting mesh metadata failed");
|
||||
return nbuf;
|
||||
}
|
||||
nbuf = nbuf_mesh;
|
||||
@@ -2024,7 +2024,7 @@ qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
|
||||
*/
|
||||
if (qdf_nbuf_is_tso(nbuf)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"%s TSO frame %pK\n", __func__, vdev);
|
||||
"%s TSO frame %pK", __func__, vdev);
|
||||
DP_STATS_INC_PKT(vdev, tx_i.tso.tso_pkt, 1,
|
||||
qdf_nbuf_len(nbuf));
|
||||
|
||||
@@ -2042,7 +2042,7 @@ qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
|
||||
nbuf = dp_tx_prepare_sg(vdev, nbuf, &seg_info, &msdu_info);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"%s non-TSO SG frame %pK\n", __func__, vdev);
|
||||
"%s non-TSO SG frame %pK", __func__, vdev);
|
||||
|
||||
DP_STATS_INC_PKT(vdev, tx_i.sg.sg_pkt, 1,
|
||||
qdf_nbuf_len(nbuf));
|
||||
@@ -2056,7 +2056,7 @@ qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
|
||||
eh = (struct ether_header *)qdf_nbuf_data(nbuf);
|
||||
if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"%s Mcast frm for ME %pK\n", __func__, vdev);
|
||||
"%s Mcast frm for ME %pK", __func__, vdev);
|
||||
|
||||
DP_STATS_INC_PKT(vdev,
|
||||
tx_i.mcast_en.mcast_pkt, 1,
|
||||
@@ -2076,7 +2076,7 @@ qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
|
||||
return NULL;
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"%s Raw frame %pK\n", __func__, vdev);
|
||||
"%s Raw frame %pK", __func__, vdev);
|
||||
|
||||
goto send_multiple;
|
||||
|
||||
@@ -2138,7 +2138,7 @@ void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
|
||||
dp_tx_get_queue(vdev, nbuf, &msdu_info.tx_queue);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"%s Tx reinject path\n", __func__);
|
||||
"%s Tx reinject path", __func__);
|
||||
|
||||
DP_STATS_INC_PKT(vdev, tx_i.reinject_pkts, 1,
|
||||
qdf_nbuf_len(tx_desc->nbuf));
|
||||
@@ -2285,7 +2285,7 @@ static void dp_tx_inspect_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
|
||||
struct dp_pdev *pdev = tx_desc->pdev;
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s Tx inspect path\n",
|
||||
"%s Tx inspect path",
|
||||
__func__);
|
||||
|
||||
qdf_assert(pdev);
|
||||
@@ -2485,7 +2485,7 @@ void dp_tx_mec_handler(struct dp_vdev *vdev, uint8_t *status)
|
||||
}
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"%s Tx MEC Handler\n",
|
||||
"%s Tx MEC Handler",
|
||||
__func__);
|
||||
|
||||
for (i = 0; i < DP_MAC_ADDR_LEN; i++)
|
||||
@@ -2553,7 +2553,7 @@ void dp_tx_process_htt_completion(struct dp_tx_desc_s *tx_desc, uint8_t *status)
|
||||
}
|
||||
default:
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"%s Invalid HTT tx_status %d\n",
|
||||
"%s Invalid HTT tx_status %d",
|
||||
__func__, tx_status);
|
||||
break;
|
||||
}
|
||||
@@ -2577,14 +2577,14 @@ void dp_tx_comp_fill_tx_completion_stats(struct dp_tx_desc_s *tx_desc,
|
||||
if (!tx_desc->msdu_ext_desc) {
|
||||
if (qdf_nbuf_pull_head(netbuf, tx_desc->pkt_offset) == NULL) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"netbuf %pK offset %d\n",
|
||||
"netbuf %pK offset %d",
|
||||
netbuf, tx_desc->pkt_offset);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (qdf_nbuf_push_head(netbuf, sizeof(struct meta_hdr_s)) == NULL) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"netbuf %pK offset %d\n", netbuf,
|
||||
"netbuf %pK offset %d", netbuf,
|
||||
sizeof(struct meta_hdr_s));
|
||||
return;
|
||||
}
|
||||
@@ -2827,7 +2827,7 @@ static inline void dp_tx_comp_process_tx_status(struct dp_tx_desc_s *tx_desc,
|
||||
"ppdu_id = %d \n"
|
||||
"transmit_cnt = %d \n"
|
||||
"tid = %d \n"
|
||||
"peer_id = %d \n",
|
||||
"peer_id = %d ",
|
||||
ts.ack_frame_rssi, ts.first_msdu, ts.last_msdu,
|
||||
ts.msdu_part_of_amsdu, ts.valid, ts.bw,
|
||||
ts.pkt_type, ts.stbc, ts.ldpc, ts.sgi,
|
||||
@@ -2964,7 +2964,7 @@ uint32_t dp_tx_comp_handler(struct dp_soc *soc, void *hal_srng, uint32_t quota)
|
||||
|
||||
if (qdf_unlikely(hal_srng_access_start(soc->hal_soc, hal_srng))) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s %d : HAL RING Access Failed -- %pK\n",
|
||||
"%s %d : HAL RING Access Failed -- %pK",
|
||||
__func__, __LINE__, hal_srng);
|
||||
return 0;
|
||||
}
|
||||
@@ -3288,7 +3288,7 @@ static int dp_tx_alloc_static_pools(struct dp_soc *soc, int num_pool,
|
||||
for (i = 0; i < num_pool; i++) {
|
||||
if (dp_tx_desc_pool_alloc(soc, i, num_desc)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s Tx Desc Pool alloc %d failed %pK\n",
|
||||
"%s Tx Desc Pool alloc %d failed %pK",
|
||||
__func__, i, soc);
|
||||
return ENOMEM;
|
||||
}
|
||||
@@ -3304,7 +3304,7 @@ static void dp_tx_delete_static_pools(struct dp_soc *soc, int num_pool)
|
||||
qdf_assert_always(!soc->tx_desc[i].num_allocated);
|
||||
if (dp_tx_desc_pool_free(soc, i)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s Tx Desc Pool Free failed\n", __func__);
|
||||
"%s Tx Desc Pool Free failed", __func__);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3336,20 +3336,20 @@ QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
|
||||
dp_tx_delete_static_pools(soc, num_pool);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s Tx Desc Pool Free num_pool = %d, descs = %d\n",
|
||||
"%s Tx Desc Pool Free num_pool = %d, descs = %d",
|
||||
__func__, num_pool, num_desc);
|
||||
|
||||
for (i = 0; i < num_pool; i++) {
|
||||
if (dp_tx_ext_desc_pool_free(soc, i)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s Tx Ext Desc Pool Free failed\n",
|
||||
"%s Tx Ext Desc Pool Free failed",
|
||||
__func__);
|
||||
return QDF_STATUS_E_RESOURCES;
|
||||
}
|
||||
}
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s MSDU Ext Desc Pool %d Free descs = %d\n",
|
||||
"%s MSDU Ext Desc Pool %d Free descs = %d",
|
||||
__func__, num_pool, num_ext_desc);
|
||||
|
||||
for (i = 0; i < num_pool; i++) {
|
||||
@@ -3357,7 +3357,7 @@ QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
|
||||
}
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s TSO Desc Pool %d Free descs = %d\n",
|
||||
"%s TSO Desc Pool %d Free descs = %d",
|
||||
__func__, num_pool, num_desc);
|
||||
|
||||
|
||||
@@ -3366,7 +3366,7 @@ QDF_STATUS dp_tx_soc_detach(struct dp_soc *soc)
|
||||
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s TSO Num of seg Desc Pool %d Free descs = %d\n",
|
||||
"%s TSO Num of seg Desc Pool %d Free descs = %d",
|
||||
__func__, num_pool, num_desc);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
@@ -3399,14 +3399,14 @@ QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
|
||||
dp_tx_flow_control_init(soc);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s Tx Desc Alloc num_pool = %d, descs = %d\n",
|
||||
"%s Tx Desc Alloc num_pool = %d, descs = %d",
|
||||
__func__, num_pool, num_desc);
|
||||
|
||||
/* Allocate extension tx descriptor pools */
|
||||
for (i = 0; i < num_pool; i++) {
|
||||
if (dp_tx_ext_desc_pool_alloc(soc, i, num_ext_desc)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"MSDU Ext Desc Pool alloc %d failed %pK\n",
|
||||
"MSDU Ext Desc Pool alloc %d failed %pK",
|
||||
i, soc);
|
||||
|
||||
goto fail;
|
||||
@@ -3414,13 +3414,13 @@ QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
|
||||
}
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s MSDU Ext Desc Alloc %d, descs = %d\n",
|
||||
"%s MSDU Ext Desc Alloc %d, descs = %d",
|
||||
__func__, num_pool, num_ext_desc);
|
||||
|
||||
for (i = 0; i < num_pool; i++) {
|
||||
if (dp_tx_tso_desc_pool_alloc(soc, i, num_desc)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"TSO Desc Pool alloc %d failed %pK\n",
|
||||
"TSO Desc Pool alloc %d failed %pK",
|
||||
i, soc);
|
||||
|
||||
goto fail;
|
||||
@@ -3428,13 +3428,13 @@ QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
|
||||
}
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s TSO Desc Alloc %d, descs = %d\n",
|
||||
"%s TSO Desc Alloc %d, descs = %d",
|
||||
__func__, num_pool, num_desc);
|
||||
|
||||
for (i = 0; i < num_pool; i++) {
|
||||
if (dp_tx_tso_num_seg_pool_alloc(soc, i, num_desc)) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"TSO Num of seg Pool alloc %d failed %pK\n",
|
||||
"TSO Num of seg Pool alloc %d failed %pK",
|
||||
i, soc);
|
||||
|
||||
goto fail;
|
||||
@@ -3442,7 +3442,7 @@ QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
|
||||
}
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s TSO Num of seg pool Alloc %d, descs = %d\n",
|
||||
"%s TSO Num of seg pool Alloc %d, descs = %d",
|
||||
__func__, num_pool, num_desc);
|
||||
|
||||
/* Initialize descriptors in TCL Rings */
|
||||
@@ -3464,7 +3464,7 @@ QDF_STATUS dp_tx_soc_attach(struct dp_soc *soc)
|
||||
soc->process_tx_status = CONFIG_PROCESS_TX_STATUS;
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s HAL Tx init Success\n", __func__);
|
||||
"%s HAL Tx init Success", __func__);
|
||||
|
||||
return QDF_STATUS_SUCCESS;
|
||||
|
||||
|
@@ -228,7 +228,7 @@ dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
|
||||
if (pool->avail_desc == pool->pool_size) {
|
||||
dp_tx_desc_pool_free(soc, desc_pool_id);
|
||||
qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
||||
qdf_print("%s %d pool is freed!!\n",
|
||||
qdf_print("%s %d pool is freed!!",
|
||||
__func__, __LINE__);
|
||||
return;
|
||||
}
|
||||
@@ -237,7 +237,7 @@ dp_tx_desc_free(struct dp_soc *soc, struct dp_tx_desc_s *tx_desc,
|
||||
case FLOW_POOL_ACTIVE_UNPAUSED:
|
||||
break;
|
||||
default:
|
||||
qdf_print("%s %d pool is INACTIVE State!!\n",
|
||||
qdf_print("%s %d pool is INACTIVE State!!",
|
||||
__func__, __LINE__);
|
||||
break;
|
||||
};
|
||||
|
@@ -100,7 +100,7 @@ void dp_tx_clear_flow_pool_stats(struct dp_soc *soc)
|
||||
|
||||
if (!soc) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: soc is null\n", __func__);
|
||||
"%s: soc is null", __func__);
|
||||
return;
|
||||
}
|
||||
qdf_mem_zero(&soc->pool_stats, sizeof(soc->pool_stats));
|
||||
@@ -130,7 +130,7 @@ struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
|
||||
qdf_spin_lock_bh(&pool->flow_pool_lock);
|
||||
if ((pool->status != FLOW_POOL_INACTIVE) || pool->pool_create_cnt) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: flow pool already allocated, attached %d times\n",
|
||||
"%s: flow pool already allocated, attached %d times",
|
||||
__func__, pool->pool_create_cnt);
|
||||
if (pool->avail_desc > pool->start_th)
|
||||
pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
|
||||
@@ -181,7 +181,7 @@ int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
|
||||
{
|
||||
if (!soc || !pool) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: pool or soc is NULL\n", __func__);
|
||||
"%s: pool or soc is NULL", __func__);
|
||||
QDF_ASSERT(0);
|
||||
return ENOMEM;
|
||||
}
|
||||
@@ -196,7 +196,7 @@ int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
|
||||
pool->pool_create_cnt--;
|
||||
if (pool->pool_create_cnt) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: pool is still attached, pending detach %d\n",
|
||||
"%s: pool is still attached, pending detach %d",
|
||||
__func__, pool->pool_create_cnt);
|
||||
qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
||||
return -EAGAIN;
|
||||
@@ -232,7 +232,7 @@ static void dp_tx_flow_pool_vdev_map(struct dp_pdev *pdev,
|
||||
(struct cdp_pdev *)pdev, vdev_id);
|
||||
if (!vdev) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: invalid vdev_id %d\n",
|
||||
"%s: invalid vdev_id %d",
|
||||
__func__, vdev_id);
|
||||
return;
|
||||
}
|
||||
@@ -262,7 +262,7 @@ static void dp_tx_flow_pool_vdev_unmap(struct dp_pdev *pdev,
|
||||
(struct cdp_pdev *)pdev, vdev_id);
|
||||
if (!vdev) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: invalid vdev_id %d\n",
|
||||
"%s: invalid vdev_id %d",
|
||||
__func__, vdev_id);
|
||||
return;
|
||||
}
|
||||
@@ -292,7 +292,7 @@ QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
|
||||
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s: flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d\n",
|
||||
"%s: flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d",
|
||||
__func__, flow_id, flow_type, flow_pool_id, flow_pool_size);
|
||||
|
||||
if (qdf_unlikely(!soc)) {
|
||||
@@ -306,7 +306,7 @@ QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
|
||||
flow_pool_size);
|
||||
if (pool == NULL) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: creation of flow_pool %d size %d failed\n",
|
||||
"%s: creation of flow_pool %d size %d failed",
|
||||
__func__, flow_pool_id, flow_pool_size);
|
||||
return QDF_STATUS_E_RESOURCES;
|
||||
}
|
||||
@@ -318,7 +318,7 @@ QDF_STATUS dp_tx_flow_pool_map_handler(struct dp_pdev *pdev, uint8_t flow_id,
|
||||
break;
|
||||
default:
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: flow type %d not supported !!!\n",
|
||||
"%s: flow type %d not supported !!!",
|
||||
__func__, type);
|
||||
break;
|
||||
}
|
||||
@@ -346,7 +346,7 @@ void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
|
||||
enum htt_flow_type type = flow_type;
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"%s: flow_id %d flow_type %d flow_pool_id %d\n",
|
||||
"%s: flow_id %d flow_type %d flow_pool_id %d",
|
||||
__func__, flow_id, flow_type, flow_pool_id);
|
||||
|
||||
if (qdf_unlikely(!pdev)) {
|
||||
@@ -359,7 +359,7 @@ void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
|
||||
pool = &soc->tx_desc[flow_pool_id];
|
||||
if (!pool) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: flow_pool not available flow_pool_id %d\n",
|
||||
"%s: flow_pool not available flow_pool_id %d",
|
||||
__func__, type);
|
||||
return;
|
||||
}
|
||||
@@ -371,7 +371,7 @@ void dp_tx_flow_pool_unmap_handler(struct dp_pdev *pdev, uint8_t flow_id,
|
||||
break;
|
||||
default:
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: flow type %d not supported !!!\n",
|
||||
"%s: flow type %d not supported !!!",
|
||||
__func__, type);
|
||||
return;
|
||||
}
|
||||
|
@@ -142,7 +142,7 @@ dp_tx_me_exit(struct dp_pdev *pdev)
|
||||
if (pdev->me_buf.buf_in_use > 0) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO,
|
||||
"Tx-comp pending for %d "
|
||||
"ME frames after waiting %ds!!\n",
|
||||
"ME frames after waiting %ds!!",
|
||||
pdev->me_buf.buf_in_use, wait_time);
|
||||
qdf_assert_always(0);
|
||||
}
|
||||
|
@@ -39,7 +39,7 @@ dp_wdi_event_next_sub(wdi_event_subscribe *wdi_sub)
|
||||
{
|
||||
if (!wdi_sub) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Invalid subscriber in %s\n", __func__);
|
||||
"Invalid subscriber in %s", __func__);
|
||||
return NULL;
|
||||
}
|
||||
return wdi_sub->priv.next;
|
||||
@@ -123,12 +123,12 @@ dp_wdi_event_handler(
|
||||
|
||||
if (!event) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Invalid WDI event in %s\n", __func__);
|
||||
"Invalid WDI event in %s", __func__);
|
||||
return;
|
||||
}
|
||||
if (!txrx_pdev) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Invalid pdev in WDI event handler\n");
|
||||
"Invalid pdev in WDI event handler");
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -259,7 +259,7 @@ dp_wdi_event_attach(struct dp_pdev *txrx_pdev)
|
||||
{
|
||||
if (!txrx_pdev) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Invalid device in %s\nWDI event attach failed\n",
|
||||
"Invalid device in %s\nWDI event attach failed",
|
||||
__func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -269,7 +269,7 @@ dp_wdi_event_attach(struct dp_pdev *txrx_pdev)
|
||||
sizeof(wdi_event_subscribe *) * WDI_NUM_EVENTS);
|
||||
if (!txrx_pdev->wdi_event_list) {
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"Insufficient memory for the WDI event lists\n");
|
||||
"Insufficient memory for the WDI event lists");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
|
@@ -704,7 +704,7 @@ hal_rx_print_pn(uint8_t *buf)
|
||||
uint32_t pn_95_64 = HAL_RX_MPDU_PN_95_64_GET(mpdu_info);
|
||||
uint32_t pn_127_96 = HAL_RX_MPDU_PN_127_96_GET(mpdu_info);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"PN number pn_127_96 0x%x pn_95_64 0x%x pn_63_32 0x%x pn_31_0 0x%x \n",
|
||||
"PN number pn_127_96 0x%x pn_95_64 0x%x pn_63_32 0x%x pn_31_0 0x%x ",
|
||||
pn_127_96, pn_95_64, pn_63_32, pn_31_0);
|
||||
}
|
||||
|
||||
@@ -1947,7 +1947,7 @@ static inline void hal_rx_msdu_list_get(void *msdu_link_desc,
|
||||
msdu_details = HAL_RX_LINK_DESC_MSDU0_PTR(msdu_link);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"[%s][%d] msdu_link=%pK msdu_details=%pK\n",
|
||||
"[%s][%d] msdu_link=%pK msdu_details=%pK",
|
||||
__func__, __LINE__, msdu_link, msdu_details);
|
||||
|
||||
for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
|
||||
@@ -1979,7 +1979,7 @@ static inline void hal_rx_msdu_list_get(void *msdu_link_desc,
|
||||
msdu_list->rbm[i] = HAL_RX_BUF_RBM_GET(
|
||||
&msdu_details[i].buffer_addr_info_details);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"[%s][%d] i=%d sw_cookie=%d\n",
|
||||
"[%s][%d] i=%d sw_cookie=%d",
|
||||
__func__, __LINE__, i, msdu_list->sw_cookie[i]);
|
||||
}
|
||||
*num_msdus = i;
|
||||
@@ -2769,7 +2769,7 @@ static inline void hal_rx_dump_pkt_hdr_tlv(struct rx_pkt_hdr_tlv *pkt_hdr_tlv,
|
||||
"\n---------------\n"
|
||||
"rx_pkt_hdr_tlv \n"
|
||||
"---------------\n"
|
||||
"phy_ppdu_id %d \n",
|
||||
"phy_ppdu_id %d ",
|
||||
pkt_hdr_tlv->phy_ppdu_id);
|
||||
|
||||
QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_DP, dbg_level,
|
||||
|
@@ -297,7 +297,7 @@ void hal_rx_reo_ent_buf_paddr_get(void *rx_desc,
|
||||
buf_info->sw_cookie = HAL_RX_BUF_COOKIE_GET(buf_addr_info);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"[%s][%d] ReoAddr=%pK, addrInfo=%pK, paddr=0x%llx, loopcnt=%d\n",
|
||||
"[%s][%d] ReoAddr=%pK, addrInfo=%pK, paddr=0x%llx, loopcnt=%d",
|
||||
__func__, __LINE__, reo_ent_ring, buf_addr_info,
|
||||
(unsigned long long)buf_info->paddr, loop_cnt);
|
||||
|
||||
@@ -349,7 +349,7 @@ static inline void hal_rx_mon_msdu_link_desc_set(struct hal_soc *soc,
|
||||
(HAL_RX_BUFFER_ADDR_39_32_GET(buf_addr_info)) << 32));
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"[%s][%d] src_srng_desc=%pK, buf_addr=0x%llx, cookie=0x%llx\n",
|
||||
"[%s][%d] src_srng_desc=%pK, buf_addr=0x%llx, cookie=0x%llx",
|
||||
__func__, __LINE__, src_srng_desc, (unsigned long long)paddr,
|
||||
(unsigned long long)p_buffer_addr_info->sw_buffer_cookie);
|
||||
|
||||
@@ -1239,43 +1239,43 @@ hal_rx_status_get_tlv_info(void *rx_tlv_hdr, struct hal_rx_ppdu_info *ppdu_info,
|
||||
value = HAL_RX_GET(rssi_info_tlv,
|
||||
RECEIVE_RSSI_INFO_0, RSSI_PRI20_CHAIN0);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"RSSI_PRI20_CHAIN0: %d\n", value);
|
||||
"RSSI_PRI20_CHAIN0: %d", value);
|
||||
|
||||
value = HAL_RX_GET(rssi_info_tlv,
|
||||
RECEIVE_RSSI_INFO_0, RSSI_EXT20_CHAIN0);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"RSSI_EXT20_CHAIN0: %d\n", value);
|
||||
"RSSI_EXT20_CHAIN0: %d", value);
|
||||
|
||||
value = HAL_RX_GET(rssi_info_tlv,
|
||||
RECEIVE_RSSI_INFO_0, RSSI_EXT40_LOW20_CHAIN0);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"RSSI_EXT40_LOW20_CHAIN0: %d\n", value);
|
||||
"RSSI_EXT40_LOW20_CHAIN0: %d", value);
|
||||
|
||||
value = HAL_RX_GET(rssi_info_tlv,
|
||||
RECEIVE_RSSI_INFO_0, RSSI_EXT40_HIGH20_CHAIN0);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"RSSI_EXT40_HIGH20_CHAIN0: %d\n", value);
|
||||
"RSSI_EXT40_HIGH20_CHAIN0: %d", value);
|
||||
|
||||
value = HAL_RX_GET(rssi_info_tlv,
|
||||
RECEIVE_RSSI_INFO_1, RSSI_EXT80_LOW20_CHAIN0);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"RSSI_EXT80_LOW20_CHAIN0: %d\n", value);
|
||||
"RSSI_EXT80_LOW20_CHAIN0: %d", value);
|
||||
|
||||
value = HAL_RX_GET(rssi_info_tlv,
|
||||
RECEIVE_RSSI_INFO_1, RSSI_EXT80_LOW_HIGH20_CHAIN0);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"RSSI_EXT80_LOW_HIGH20_CHAIN0: %d\n", value);
|
||||
"RSSI_EXT80_LOW_HIGH20_CHAIN0: %d", value);
|
||||
|
||||
value = HAL_RX_GET(rssi_info_tlv,
|
||||
RECEIVE_RSSI_INFO_1, RSSI_EXT80_HIGH_LOW20_CHAIN0);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"RSSI_EXT80_HIGH_LOW20_CHAIN0: %d\n", value);
|
||||
"RSSI_EXT80_HIGH_LOW20_CHAIN0: %d", value);
|
||||
|
||||
value = HAL_RX_GET(rssi_info_tlv,
|
||||
RECEIVE_RSSI_INFO_1,
|
||||
RSSI_EXT80_HIGH20_CHAIN0);
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
|
||||
"RSSI_EXT80_HIGH20_CHAIN0: %d\n", value);
|
||||
"RSSI_EXT80_HIGH20_CHAIN0: %d", value);
|
||||
break;
|
||||
}
|
||||
case WIFIPHYRX_OTHER_RECEIVE_INFO_E:
|
||||
|
@@ -76,7 +76,7 @@ inline void hal_reo_cmd_set_descr_addr(uint32_t *reo_desc,
|
||||
break;
|
||||
default:
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Invalid REO command type\n", __func__);
|
||||
"%s: Invalid REO command type", __func__);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -91,7 +91,7 @@ inline int hal_reo_cmd_queue_stats(void *reo_ring, struct hal_soc *soc,
|
||||
reo_desc = hal_srng_src_get_next(soc, reo_ring);
|
||||
if (!reo_desc) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
|
||||
"%s: Out of cmd ring entries\n", __func__);
|
||||
"%s: Out of cmd ring entries", __func__);
|
||||
hal_srng_access_end(soc, reo_ring);
|
||||
return -EBUSY;
|
||||
}
|
||||
@@ -131,7 +131,7 @@ inline int hal_reo_cmd_flush_queue(void *reo_ring, struct hal_soc *soc,
|
||||
reo_desc = hal_srng_src_get_next(soc, reo_ring);
|
||||
if (!reo_desc) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
|
||||
"%s: Out of cmd ring entries\n", __func__);
|
||||
"%s: Out of cmd ring entries", __func__);
|
||||
hal_srng_access_end(soc, reo_ring);
|
||||
return -EBUSY;
|
||||
}
|
||||
@@ -184,7 +184,8 @@ inline int hal_reo_cmd_flush_cache(void *reo_ring, struct hal_soc *soc,
|
||||
if (cp->block_use_after_flush) {
|
||||
index = hal_find_zero_bit(soc->reo_res_bitmap);
|
||||
if (index > 3) {
|
||||
qdf_print("%s, No blocking resource available!\n", __func__);
|
||||
qdf_print("%s, No blocking resource available!",
|
||||
__func__);
|
||||
hal_srng_access_end(soc, reo_ring);
|
||||
return -EBUSY;
|
||||
}
|
||||
@@ -194,7 +195,7 @@ inline int hal_reo_cmd_flush_cache(void *reo_ring, struct hal_soc *soc,
|
||||
reo_desc = hal_srng_src_get_next(soc, reo_ring);
|
||||
if (!reo_desc) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
|
||||
"%s: Out of cmd ring entries\n", __func__);
|
||||
"%s: Out of cmd ring entries", __func__);
|
||||
hal_srng_access_end(soc, reo_ring);
|
||||
hal_srng_dump(reo_ring);
|
||||
return -EBUSY;
|
||||
@@ -256,7 +257,7 @@ inline int hal_reo_cmd_unblock_cache(void *reo_ring, struct hal_soc *soc,
|
||||
index = hal_find_one_bit(soc->reo_res_bitmap);
|
||||
if (index > 3) {
|
||||
hal_srng_access_end(soc, reo_ring);
|
||||
qdf_print("%s: No blocking resource to unblock!\n",
|
||||
qdf_print("%s: No blocking resource to unblock!",
|
||||
__func__);
|
||||
return -EBUSY;
|
||||
}
|
||||
@@ -265,7 +266,7 @@ inline int hal_reo_cmd_unblock_cache(void *reo_ring, struct hal_soc *soc,
|
||||
reo_desc = hal_srng_src_get_next(soc, reo_ring);
|
||||
if (!reo_desc) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
|
||||
"%s: Out of cmd ring entries\n", __func__);
|
||||
"%s: Out of cmd ring entries", __func__);
|
||||
hal_srng_access_end(soc, reo_ring);
|
||||
return -EBUSY;
|
||||
}
|
||||
@@ -306,7 +307,7 @@ inline int hal_reo_cmd_flush_timeout_list(void *reo_ring, struct hal_soc *soc,
|
||||
reo_desc = hal_srng_src_get_next(soc, reo_ring);
|
||||
if (!reo_desc) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
|
||||
"%s: Out of cmd ring entries\n", __func__);
|
||||
"%s: Out of cmd ring entries", __func__);
|
||||
hal_srng_access_end(soc, reo_ring);
|
||||
return -EBUSY;
|
||||
}
|
||||
@@ -352,7 +353,7 @@ inline int hal_reo_cmd_update_rx_queue(void *reo_ring, struct hal_soc *soc,
|
||||
reo_desc = hal_srng_src_get_next(soc, reo_ring);
|
||||
if (!reo_desc) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
|
||||
"%s: Out of cmd ring entries\n", __func__);
|
||||
"%s: Out of cmd ring entries", __func__);
|
||||
hal_srng_access_end(soc, reo_ring);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
@@ -308,7 +308,7 @@ void hal_reo_setup(void *hal_soc,
|
||||
reo_params->remap1);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR 0x%x\n"),
|
||||
FL("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR 0x%x"),
|
||||
HAL_REG_READ(soc,
|
||||
HWIO_REO_R0_DESTINATION_RING_CTRL_IX_2_ADDR(
|
||||
SEQ_WCSS_UMAC_REO_REG_OFFSET)));
|
||||
@@ -319,7 +319,7 @@ void hal_reo_setup(void *hal_soc,
|
||||
reo_params->remap2);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
FL("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR 0x%x\n"),
|
||||
FL("HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR 0x%x"),
|
||||
HAL_REG_READ(soc,
|
||||
HWIO_REO_R0_DESTINATION_RING_CTRL_IX_3_ADDR(
|
||||
SEQ_WCSS_UMAC_REO_REG_OFFSET)));
|
||||
|
@@ -58,8 +58,8 @@ static int hal_get_srng_ring_id(struct hal_soc *hal, int ring_type,
|
||||
int ring_id;
|
||||
|
||||
if (ring_num >= ring_config->max_rings) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: ring_num exceeded maximum no. of supported rings\n",
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
|
||||
"%s: ring_num exceeded maximum no. of supported rings",
|
||||
__func__);
|
||||
/* TODO: This is a programming error. Assert if this happens */
|
||||
return -EINVAL;
|
||||
@@ -137,7 +137,7 @@ QDF_STATUS hal_set_one_shadow_config(void *hal_soc,
|
||||
ring_num);
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
|
||||
"%s: target_reg %x, shadow_index %x, ring_type %d, ring num %d\n",
|
||||
"%s: target_reg %x, shadow_index %x, ring_type %d, ring num %d",
|
||||
__func__, target_register, shadow_config_index,
|
||||
ring_type, ring_num);
|
||||
|
||||
@@ -180,7 +180,7 @@ void hal_get_shadow_config(void *hal_soc,
|
||||
hal->num_shadow_registers_configured;
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s\n", __func__);
|
||||
"%s", __func__);
|
||||
}
|
||||
|
||||
|
||||
@@ -197,11 +197,11 @@ static void hal_validate_shadow_register(struct hal_soc *hal,
|
||||
|
||||
if (index >= MAX_SHADOW_REGISTERS) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: index %x out of bounds\n", __func__, index);
|
||||
"%s: index %x out of bounds", __func__, index);
|
||||
goto error;
|
||||
} else if (hal->shadow_config[index].addr != destination_ba_offset) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: sanity check failure, expected %x, found %x\n",
|
||||
"%s: sanity check failure, expected %x, found %x",
|
||||
__func__, destination_ba_offset,
|
||||
hal->shadow_config[index].addr);
|
||||
goto error;
|
||||
@@ -272,7 +272,7 @@ void *hal_attach(void *hif_handle, qdf_device_t qdf_dev)
|
||||
|
||||
if (!hal) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: hal_soc allocation failed\n", __func__);
|
||||
"%s: hal_soc allocation failed", __func__);
|
||||
goto fail0;
|
||||
}
|
||||
hal->hif_handle = hif_handle;
|
||||
@@ -283,7 +283,7 @@ void *hal_attach(void *hif_handle, qdf_device_t qdf_dev)
|
||||
HAL_SRNG_ID_MAX, &(hal->shadow_rdptr_mem_paddr));
|
||||
if (!hal->shadow_rdptr_mem_paddr) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: hal->shadow_rdptr_mem_paddr allocation failed\n",
|
||||
"%s: hal->shadow_rdptr_mem_paddr allocation failed",
|
||||
__func__);
|
||||
goto fail1;
|
||||
}
|
||||
@@ -294,7 +294,7 @@ void *hal_attach(void *hif_handle, qdf_device_t qdf_dev)
|
||||
&(hal->shadow_wrptr_mem_paddr));
|
||||
if (!hal->shadow_wrptr_mem_vaddr) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: hal->shadow_wrptr_mem_vaddr allocation failed\n",
|
||||
"%s: hal->shadow_wrptr_mem_vaddr allocation failed",
|
||||
__func__);
|
||||
goto fail2;
|
||||
}
|
||||
@@ -549,7 +549,7 @@ void hal_srng_dst_init_hp(struct hal_srng *srng,
|
||||
*(srng->u.dst_ring.hp_addr) = srng->u.dst_ring.cached_hp;
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||
"hp_addr=%pK, cached_hp=%d, hp=%d\n",
|
||||
"hp_addr=%pK, cached_hp=%d, hp=%d",
|
||||
(void *)srng->u.dst_ring.hp_addr, srng->u.dst_ring.cached_hp,
|
||||
*(srng->u.dst_ring.hp_addr));
|
||||
}
|
||||
@@ -697,15 +697,15 @@ void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
|
||||
if (ring_id < 0)
|
||||
return NULL;
|
||||
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: mac_id %d ring_id %d\n",
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO,
|
||||
"%s: mac_id %d ring_id %d",
|
||||
__func__, mac_id, ring_id);
|
||||
|
||||
srng = hal_get_srng(hal_soc, ring_id);
|
||||
|
||||
if (srng->initialized) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Ring (ring_type, ring_num) already initialized\n",
|
||||
"%s: Ring (ring_type, ring_num) already initialized",
|
||||
__func__);
|
||||
return NULL;
|
||||
}
|
||||
@@ -765,7 +765,7 @@ void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
|
||||
if (CHECK_SHADOW_REGISTERS) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX,
|
||||
QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Ring (%d, %d) missing shadow config\n",
|
||||
"%s: Ring (%d, %d) missing shadow config",
|
||||
__func__, ring_type, ring_num);
|
||||
}
|
||||
} else {
|
||||
@@ -800,7 +800,7 @@ void *hal_srng_setup(void *hal_soc, int ring_type, int ring_num,
|
||||
if (CHECK_SHADOW_REGISTERS) {
|
||||
QDF_TRACE(QDF_MODULE_ID_TXRX,
|
||||
QDF_TRACE_LEVEL_ERROR,
|
||||
"%s: Ring (%d, %d) missing shadow config\n",
|
||||
"%s: Ring (%d, %d) missing shadow config",
|
||||
__func__, ring_type, ring_num);
|
||||
}
|
||||
} else {
|
||||
|
Reference in New Issue
Block a user