qcacld-3.0: Remove trailing newline from the DP Logs

Remove trailing newline from the DP Logs.

Change-Id: I0a0f0952539443f9937591ebee1d8432b12ed506
CRs-Fixed: 3492704
This commit is contained in:
Srinivas Girigowda
2023-04-12 16:34:59 -07:00
committed by Rahul Choudhary
orang tua 3f6ad7ad97
melakukan 9325f4c7b8
13 mengubah file dengan 69 tambahan dan 67 penghapusan

Melihat File

@@ -1,6 +1,6 @@
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -155,7 +155,7 @@ static void dp_fisa_fse_cache_flush_timer(void *arg)
DP_HTT_FST_CACHE_INVALIDATE_FULL,
&rx_flow_tuple_info);
if (QDF_IS_STATUS_ERROR(status)) {
dp_err("Failed to send the cache invalidation\n");
dp_err("Failed to send the cache invalidation");
/*
* Not big impact cache entry gets updated later
*/
@@ -210,7 +210,7 @@ static QDF_STATUS dp_rx_fst_cmem_init(struct dp_rx_fst *fst)
fst->fst_update_wq =
qdf_alloc_high_prior_ordered_workqueue("dp_rx_fst_update_wq");
if (!fst->fst_update_wq) {
dp_err("failed to allocate fst update wq\n");
dp_err("failed to allocate fst update wq");
return QDF_STATUS_E_FAILURE;
}
@@ -456,7 +456,7 @@ QDF_STATUS dp_rx_flow_send_fst_fw_setup(struct dp_soc *soc,
/* Higher order bits are mostly 0, Always use 0x10 */
fisa_hw_fst_setup_cmd.base_addr_hi =
(soc->fst_cmem_base >> 32) | 0x10;
dp_info("cmem base address 0x%llx\n", soc->fst_cmem_base);
dp_info("cmem base address 0x%llx", soc->fst_cmem_base);
} else {
fisa_hw_fst_setup_cmd.base_addr_lo =
fst->hal_rx_fst_base_paddr & 0xffffffff;

Melihat File

@@ -371,7 +371,7 @@ int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
* should point next page
*/
if (!cacheable_pages[i + 1]) {
ol_txrx_err("over flow num link %d\n",
ol_txrx_err("over flow num link %d",
num_link);
goto free_htt_desc;
}
@@ -538,7 +538,7 @@ int htt_tx_attach(struct htt_pdev_t *pdev, int desc_pool_elems)
* should pint next page
*/
if (!page_info->page_v_addr_start) {
ol_txrx_err("over flow num link %d\n",
ol_txrx_err("over flow num link %d",
num_link);
goto free_htt_desc;
}
@@ -1150,7 +1150,7 @@ static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
shared_tx_buffer = qdf_mem_shared_mem_alloc(pdev->osdev,
uc_tx_buf_sz);
if (!shared_tx_buffer || !shared_tx_buffer->vaddr) {
qdf_print("IPA WDI TX buffer alloc fail %d allocated\n",
qdf_print("IPA WDI TX buffer alloc fail %d allocated",
tx_buffer_count);
goto out;
}
@@ -1446,12 +1446,12 @@ htt_tx_desc_fill_tso_info(htt_pdev_handle pdev, void *desc,
tso_seg->seg.tso_flags;
/* First 24 bytes (6*4) contain the TSO flags */
TSO_DEBUG("%s seq# %u l2 len %d, ip len %d\n",
TSO_DEBUG("%s seq# %u l2 len %d, ip len %d",
__func__,
tso_seg->seg.tso_flags.tcp_seq_num,
tso_seg->seg.tso_flags.l2_len,
tso_seg->seg.tso_flags.ip_len);
TSO_DEBUG("%s flags 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
TSO_DEBUG("%s flags 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
__func__,
*word,
*(word + 1),
@@ -1475,7 +1475,7 @@ htt_tx_desc_fill_tso_info(htt_pdev_handle pdev, void *desc,
/* [31:16] length of the first buffer */
*word = (tso_seg->seg.tso_frags[i].length << 16) | hi;
word++;
TSO_DEBUG("%s frag[%d] ptr_low 0x%x ptr_hi 0x%x len %u\n",
TSO_DEBUG("%s frag[%d] ptr_low 0x%x ptr_hi 0x%x len %u",
__func__, i,
msdu_ext_desc->frags[i].u.frag32.ptr_low,
msdu_ext_desc->frags[i].u.frag32.ptr_hi,
@@ -1772,7 +1772,7 @@ htt_tx_desc_init(htt_pdev_handle pdev,
uint32_t total_len = tso_info->curr_seg->seg.total_len;
HTT_TX_DESC_FRM_LEN_SET(local_word1, total_len);
TSO_DEBUG("%s setting HTT TX DESC Len = %d\n",
TSO_DEBUG("%s setting HTT TX DESC Len = %d",
__func__, total_len);
} else {
HTT_TX_DESC_FRM_LEN_SET(local_word1, qdf_nbuf_len(msdu));

Melihat File

@@ -1,6 +1,6 @@
/*
* Copyright (c) 2011-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -186,7 +186,7 @@ ol_rx_frag_restructure(
int rx_desc_len)
{
if ((!ind_old_position) || (!rx_desc_old_position)) {
ol_txrx_err("ind_old_position,rx_desc_old_position is NULL\n");
ol_txrx_err("ind_old_position,rx_desc_old_position is NULL");
ASSERT(0);
return;
}
@@ -318,7 +318,7 @@ ol_rx_frag_indication_handler(ol_txrx_pdev_handle pdev,
void *rx_desc;
if (tid >= OL_TXRX_NUM_EXT_TIDS) {
ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
ol_txrx_err("%s: invalid tid, %u", __FUNCTION__, tid);
return;
}
@@ -485,7 +485,7 @@ ol_rx_reorder_store_frag(ol_txrx_pdev_handle pdev,
rx_reorder_array_elem->head);
rx_reorder_array_elem->head = NULL;
rx_reorder_array_elem->tail = NULL;
ol_txrx_err("\n ol_rx_reorder_store:%s mismatch\n",
ol_txrx_err("ol_rx_reorder_store:%s mismatch",
(rxseq == frxseq)
? "address"
: "seq number");
@@ -625,7 +625,7 @@ void ol_rx_defrag_waitlist_remove(struct ol_txrx_peer_t *peer, unsigned int tid)
rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
rx_reorder->defrag_waitlist_elem.tqe_prev = NULL;
} else if (rx_reorder->defrag_waitlist_elem.tqe_next) {
ol_txrx_alert("waitlist->tqe_prv = NULL\n");
ol_txrx_alert("waitlist->tqe_prv = NULL");
QDF_ASSERT(0);
rx_reorder->defrag_waitlist_elem.tqe_next = NULL;
}
@@ -655,7 +655,7 @@ void ol_rx_defrag_waitlist_flush(struct ol_txrx_pdev_t *pdev)
tid = rx_reorder->tid;
if (tid >= OL_TXRX_NUM_EXT_TIDS) {
ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
ol_txrx_err("%s: invalid tid, %u", __FUNCTION__, tid);
WARN_ON(1);
continue;
}

Melihat File

@@ -1,6 +1,6 @@
/*
* Copyright (c) 2011-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -459,7 +459,7 @@ static void ol_rx_reorder_detect_hole(struct ol_txrx_peer_t *peer,
uint32_t win_sz_mask, next_rel_idx, hole_size;
if (tid >= OL_TXRX_NUM_EXT_TIDS) {
ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
ol_txrx_err("%s: invalid tid, %u", __FUNCTION__, tid);
return;
}
@@ -632,7 +632,7 @@ ol_rx_flush_handler(ol_txrx_pdev_handle pdev,
htt_pdev_handle htt_pdev = pdev->htt_pdev;
if (tid >= OL_TXRX_NUM_EXT_TIDS) {
ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
ol_txrx_err("%s: invalid tid, %u", __FUNCTION__, tid);
return;
}
@@ -693,7 +693,7 @@ ol_rx_pn_ind_handler(ol_txrx_pdev_handle pdev,
int i = 0;
if (tid >= OL_TXRX_NUM_EXT_TIDS) {
ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
ol_txrx_err("%s: invalid tid, %u", __FUNCTION__, tid);
WARN_ON(1);
return;
}

Melihat File

@@ -1,5 +1,6 @@
/*
* Copyright (c) 2011, 2014-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -207,7 +208,7 @@ static void ol_tx_do_pdev_flow_control_unpause(struct ol_txrx_pdev_t *pdev)
case FLOW_POOL_ACTIVE_UNPAUSED:
break;
default:
ol_txrx_err("pool is INACTIVE State!!\n");
ol_txrx_err("pool is INACTIVE State!!");
break;
};
}
@@ -802,7 +803,7 @@ struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc,
i - 1, frag_paddr, frag_len);
#if defined(HELIUMPLUS_DEBUG)
qdf_debug("htt_fdesc=%pK frag=%d frag_vaddr=0x%pK frag_paddr=0x%llx len=%zu\n",
qdf_debug("htt_fdesc=%pK frag=%d frag_vaddr=0x%pK frag_paddr=0x%llx len=%zu",
tx_desc->htt_frag_desc,
i-1, frag_vaddr, frag_paddr, frag_len);
ol_txrx_dump_pkt(netbuf, frag_paddr, 64);
@@ -945,7 +946,7 @@ void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev,
* table pointer needs to be reset.
*/
#if defined(HELIUMPLUS_DEBUG)
qdf_print("Frag Descriptor Reset [%d] to 0x%x\n",
qdf_print("Frag Descriptor Reset [%d] to 0x%x",
tx_desc->id,
frag_desc_paddr);
#endif /* HELIUMPLUS_DEBUG */

Melihat File

@@ -1,6 +1,6 @@
/*
* Copyright (c) 2011-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -147,7 +147,7 @@ struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
(QDF_NBUF_CB_GET_PACKET_TYPE(msdu) ==
QDF_NBUF_CB_PACKET_TYPE_EAPOL)) {
tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
ol_txrx_info("Got tx desc from resv pool\n");
ol_txrx_info("Got tx desc from resv pool");
}
}
return tx_desc;
@@ -1434,7 +1434,7 @@ ol_tx_pdev_reset_bundle_require(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
vdev->bundling_required = false;
ol_txrx_info("vdev_id %d bundle_require %d\n",
ol_txrx_info("vdev_id %d bundle_require %d",
vdev->vdev_id, vdev->bundling_required);
}
}
@@ -1458,7 +1458,7 @@ ol_tx_vdev_set_bundle_require(uint8_t vdev_id, unsigned long tx_bytes,
vdev->bundling_required = false;
if (old_bundle_required != vdev->bundling_required)
ol_txrx_info("vdev_id %d bundle_require %d tx_bytes %ld time_in_ms %d high_th %d low_th %d\n",
ol_txrx_info("vdev_id %d bundle_require %d tx_bytes %ld time_in_ms %d high_th %d low_th %d",
vdev->vdev_id, vdev->bundling_required, tx_bytes,
time_in_ms, high_th, low_th);
}

Melihat File

@@ -209,7 +209,7 @@ void ol_txrx_dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
uint32_t *frag_ptr_i_p;
int i;
ol_txrx_err("OL TX Descriptor 0x%pK msdu_id %d\n",
ol_txrx_err("OL TX Descriptor 0x%pK msdu_id %d",
tx_desc, tx_desc->id);
ol_txrx_err("HTT TX Descriptor vaddr: 0x%pK paddr: %pad",
tx_desc->htt_tx_desc, &tx_desc->htt_tx_desc_paddr);
@@ -724,7 +724,7 @@ void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
int seg_idx;
txrx_nofl_info("TSO Statistics:");
txrx_nofl_info("TSO pkts %lld, bytes %lld\n",
txrx_nofl_info("TSO pkts %lld, bytes %lld",
pdev->stats.pub.tx.tso.tso_pkts.pkts,
pdev->stats.pub.tx.tso.tso_pkts.bytes);

Melihat File

@@ -83,7 +83,7 @@ ol_tx_ll_wrapper(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
hif_is_fastpath_mode_enabled(hif_device))) {
msdu_list = ol_tx_ll_fast(vdev, msdu_list);
} else {
qdf_print("Fast path is disabled\n");
qdf_print("Fast path is disabled");
QDF_BUG(0);
}
return msdu_list;
@@ -334,7 +334,7 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
msdu_info.peer = NULL;
if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
ol_txrx_err("ol_tx_prepare_tso failed\n");
ol_txrx_err("ol_tx_prepare_tso failed");
TXRX_STATS_MSDU_LIST_INCR(vdev->pdev,
tx.dropped.host_reject,
msdu);

Melihat File

@@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -2032,7 +2032,7 @@ void ol_tx_update_grp_frm_count(struct ol_tx_frms_queue_t *txq, int num_frms)
int i;
if (!num_frms || !txq) {
ol_txrx_dbg("Invalid params\n");
ol_txrx_dbg("Invalid params");
return;
}

Melihat File

@@ -109,7 +109,7 @@ static void ol_tx_pdev_throttle_phase_timer(void *context)
if (pdev->tx_throttle.current_throttle_phase == THROTTLE_PHASE_OFF) {
/* Traffic is stopped */
ol_txrx_dbg(
"throttle phase --> OFF\n");
"throttle phase --> OFF");
ol_txrx_throttle_pause(pdev);
ol_txrx_thermal_pause(pdev);
pdev->tx_throttle.prev_outstanding_num = 0;
@@ -119,14 +119,14 @@ static void ol_tx_pdev_throttle_phase_timer(void *context)
if (pdev->tx_throttle.current_throttle_level !=
THROTTLE_LEVEL_0) {
ol_txrx_dbg(
"start timer %d ms\n", ms);
"start timer %d ms", ms);
qdf_timer_start(&pdev->tx_throttle.
phase_timer, ms);
}
} else {
/* Traffic can go */
ol_txrx_dbg(
"throttle phase --> ON\n");
"throttle phase --> ON");
ol_txrx_throttle_unpause(pdev);
ol_txrx_thermal_unpause(pdev);
cur_level = pdev->tx_throttle.current_throttle_level;
@@ -134,7 +134,7 @@ static void ol_tx_pdev_throttle_phase_timer(void *context)
ms = pdev->tx_throttle.throttle_time_ms[cur_level][cur_phase];
if (pdev->tx_throttle.current_throttle_level !=
THROTTLE_LEVEL_0) {
ol_txrx_dbg("start timer %d ms\n", ms);
ol_txrx_dbg("start timer %d ms", ms);
qdf_timer_start(&pdev->tx_throttle.phase_timer, ms);
}
}
@@ -232,7 +232,7 @@ void ol_tx_throttle_set_level(struct cdp_soc_t *soc_hdl,
return;
}
ol_txrx_info("Setting throttle level %d\n", level);
ol_txrx_info("Setting throttle level %d", level);
/* Set the current throttle level */
pdev->tx_throttle.current_throttle_level = (enum throttle_level)level;
@@ -266,7 +266,7 @@ void ol_tx_throttle_init_period(struct cdp_soc_t *soc_hdl,
/* Set the current throttle level */
pdev->tx_throttle.throttle_period_ms = period;
ol_txrx_dbg("level OFF ON\n");
ol_txrx_dbg("level OFF ON");
for (i = 0; i < THROTTLE_LEVEL_MAX; i++) {
pdev->tx_throttle.throttle_time_ms[i][THROTTLE_PHASE_ON] =
pdev->tx_throttle.throttle_period_ms -
@@ -276,7 +276,7 @@ void ol_tx_throttle_init_period(struct cdp_soc_t *soc_hdl,
pdev->tx_throttle.throttle_period_ms -
pdev->tx_throttle.throttle_time_ms[
i][THROTTLE_PHASE_ON];
ol_txrx_dbg("%d %d %d\n", i,
ol_txrx_dbg("%d %d %d", i,
pdev->tx_throttle.
throttle_time_ms[i][THROTTLE_PHASE_OFF],
pdev->tx_throttle.

Melihat File

@@ -1472,7 +1472,7 @@ ol_txrx_pdev_post_attach(struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
OL_RX_REORDER_TIMEOUT_INIT(pdev);
ol_txrx_dbg("Created pdev %pK\n", pdev);
ol_txrx_dbg("Created pdev %pK", pdev);
pdev->cfg.host_addba = ol_cfg_host_addba(pdev->ctrl_pdev);
@@ -1734,7 +1734,7 @@ static void ol_txrx_pdev_pre_detach(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
* As a side effect, this will complete the deletion of any
* vdevs that are waiting for their peers to finish deletion.
*/
ol_txrx_dbg("Force delete for pdev %pK\n",
ol_txrx_dbg("Force delete for pdev %pK",
pdev);
ol_txrx_peer_find_hash_erase(pdev);
ol_txrx_peer_free_inactive_list(pdev);
@@ -1838,14 +1838,14 @@ static QDF_STATUS ol_txrx_pdev_detach(struct cdp_soc_t *soc_hdl, uint8_t pdev_id
qdf_spin_lock_bh(&pdev->req_list_spinlock);
if (pdev->req_list_depth > 0)
ol_txrx_err(
"Warning: the txrx req list is not empty, depth=%d\n",
"Warning: the txrx req list is not empty, depth=%d",
pdev->req_list_depth
);
TAILQ_FOREACH_SAFE(req, &pdev->req_list, req_list_elem, temp_req) {
TAILQ_REMOVE(&pdev->req_list, req, req_list_elem);
pdev->req_list_depth--;
ol_txrx_err(
"%d: %pK,verbose(%d), concise(%d), up_m(0x%x), reset_m(0x%x)\n",
"%d: %pK,verbose(%d), concise(%d), up_m(0x%x), reset_m(0x%x)",
i++,
req,
req->base.print.verbose,
@@ -2032,7 +2032,7 @@ ol_txrx_vdev_attach(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
ol_txrx_hl_tdls_flag_reset(soc_hdl, vdev_id, false);
ol_txrx_dbg(
"Created vdev %pK ("QDF_MAC_ADDR_FMT")\n",
"Created vdev %pK ("QDF_MAC_ADDR_FMT")",
vdev,
QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
@@ -2245,7 +2245,7 @@ ol_txrx_vdev_detach(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
if (!TAILQ_EMPTY(&vdev->peer_list)) {
/* debug print - will be removed later */
ol_txrx_dbg(
"not deleting vdev object %pK ("QDF_MAC_ADDR_FMT") until deletion finishes for all its peers\n",
"not deleting vdev object %pK ("QDF_MAC_ADDR_FMT") until deletion finishes for all its peers",
vdev,
QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
/* indicate that the vdev needs to be deleted */
@@ -2259,7 +2259,7 @@ ol_txrx_vdev_detach(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
qdf_event_destroy(&vdev->wait_delete_comp);
ol_txrx_dbg(
"deleting vdev obj %pK ("QDF_MAC_ADDR_FMT")\n",
"deleting vdev obj %pK ("QDF_MAC_ADDR_FMT")",
vdev,
QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
@@ -2448,7 +2448,7 @@ ol_txrx_peer_attach(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
(union ol_txrx_align_mac_addr_t *)peer_mac_addr) &&
(check_valid == 0 || temp_peer->valid)) {
ol_txrx_info_high(
"vdev_id %d ("QDF_MAC_ADDR_FMT") already exists.\n",
"vdev_id %d ("QDF_MAC_ADDR_FMT") already exists",
vdev->vdev_id,
QDF_MAC_ADDR_REF(peer_mac_addr));
if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
@@ -2467,7 +2467,7 @@ ol_txrx_peer_attach(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
(check_valid == 0 ||
temp_peer->valid)) {
ol_txrx_info_high(
"vdev_id %d ("QDF_MAC_ADDR_FMT") old peer exists.\n",
"vdev_id %d ("QDF_MAC_ADDR_FMT") old peer exists",
vdev->vdev_id,
QDF_MAC_ADDR_REF(vdev->last_peer_mac_addr.raw));
if (qdf_atomic_read(&temp_peer->delete_in_progress)) {
@@ -2491,7 +2491,7 @@ ol_txrx_peer_attach(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
rc = qdf_wait_for_event_completion(&vdev->wait_delete_comp,
PEER_DELETION_TIMEOUT);
if (QDF_STATUS_SUCCESS != rc) {
ol_txrx_err("error waiting for peer_id(%d) deletion, status %d\n",
ol_txrx_err("error waiting for peer_id(%d) deletion, status %d",
vdev->wait_on_peer_id, (int) rc);
/* Added for debugging only */
ol_txrx_dump_peer_access_list(temp_peer);
@@ -2560,7 +2560,7 @@ ol_txrx_peer_attach(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
ol_txrx_peer_find_hash_add(pdev, peer);
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
"vdev %pK created peer %pK ref_cnt %d ("QDF_MAC_ADDR_FMT")\n",
"vdev %pK created peer %pK ref_cnt %d ("QDF_MAC_ADDR_FMT")",
vdev, peer, qdf_atomic_read(&peer->ref_cnt),
QDF_MAC_ADDR_REF(peer->mac_addr.raw));
/*
@@ -2961,7 +2961,7 @@ QDF_STATUS ol_txrx_peer_state_update(struct cdp_soc_t *soc_hdl,
PEER_DEBUG_ID_OL_INTERNAL);
if (!peer) {
ol_txrx_err(
"peer is null for peer_mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
"peer is null for peer_mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
peer_mac[4], peer_mac[5]);
return QDF_STATUS_E_INVAL;
@@ -3207,13 +3207,13 @@ int ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,
vdev = peer->vdev;
if (!vdev) {
ol_txrx_err("The vdev is not present anymore\n");
ol_txrx_err("The vdev is not present anymore");
return -EINVAL;
}
pdev = vdev->pdev;
if (!pdev) {
ol_txrx_err("The pdev is not present anymore\n");
ol_txrx_err("The pdev is not present anymore");
err_code = 0xbad2;
goto ERR_STATE;
}
@@ -3256,7 +3256,7 @@ int ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,
if (rc == 0) {
qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
ol_txrx_err("The Peer is not present anymore\n");
ol_txrx_err("The Peer is not present anymore");
qdf_assert(0);
return -EACCES;
}
@@ -4179,7 +4179,7 @@ ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
if (!found) {
ol_txrx_err(
"req(%pK) from firmware can't be found in the list\n", req);
"req(%pK) from firmware can't be found in the list", req);
return;
}
@@ -6440,7 +6440,7 @@ ol_txrx_soc_handle ol_txrx_soc_attach(void *scn_handle,
bool ol_txrx_get_new_htt_msg_format(struct ol_txrx_pdev_t *pdev)
{
if (!pdev) {
qdf_print("%s: pdev is NULL\n", __func__);
qdf_print("%s: pdev is NULL", __func__);
return false;
}
return pdev->new_htt_msg_format;
@@ -6453,7 +6453,7 @@ void ol_txrx_set_new_htt_msg_format(uint8_t val)
pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
if (!pdev) {
qdf_print("%s: pdev is NULL\n", __func__);
qdf_print("%s: pdev is NULL", __func__);
return;
}
pdev->new_htt_msg_format = val;
@@ -6469,7 +6469,7 @@ bool ol_txrx_get_peer_unmap_conf_support(void)
pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
if (!pdev) {
qdf_print("%s: pdev is NULL\n", __func__);
qdf_print("%s: pdev is NULL", __func__);
return false;
}
return pdev->enable_peer_unmap_conf_support;
@@ -6485,7 +6485,7 @@ void ol_txrx_set_peer_unmap_conf_support(bool val)
pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
if (!pdev) {
qdf_print("%s: pdev is NULL\n", __func__);
qdf_print("%s: pdev is NULL", __func__);
return;
}
pdev->enable_peer_unmap_conf_support = val;
@@ -6502,7 +6502,7 @@ bool ol_txrx_get_tx_compl_tsf64(void)
pdev = ol_txrx_get_pdev_from_pdev_id(soc, OL_TXRX_PDEV_ID);
if (!pdev) {
qdf_print("%s: pdev is NULL\n", __func__);
qdf_print("%s: pdev is NULL", __func__);
return false;
}
return pdev->enable_tx_compl_tsf64;

Melihat File

@@ -1,5 +1,6 @@
/*
* Copyright (c) 2012-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -422,7 +423,7 @@ void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev);
static inline
void ol_txrx_stats_display_tso(ol_txrx_pdev_handle pdev)
{
ol_txrx_err("TSO is not supported\n");
ol_txrx_err("TSO is not supported");
}
static inline

Melihat File

@@ -1,6 +1,6 @@
/*
* Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -59,7 +59,7 @@ ol_tx_register_global_mgmt_pool(struct ol_txrx_pdev_t *pdev)
pdev->mgmt_pool = ol_tx_create_flow_pool(TX_FLOW_MGMT_POOL_ID,
TX_FLOW_MGMT_POOL_SIZE);
if (!pdev->mgmt_pool)
ol_txrx_err("Management pool creation failed\n");
ol_txrx_err("Management pool creation failed");
}
/**
@@ -127,7 +127,7 @@ void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
pdev->num_msdu_desc = num_msdu_desc;
if (!ol_tx_get_is_mgmt_over_wmi_enabled())
pdev->num_msdu_desc += TX_FLOW_MGMT_POOL_SIZE;
ol_txrx_info_high("Global pool size: %d\n", pdev->num_msdu_desc);
ol_txrx_info_high("Global pool size: %d", pdev->num_msdu_desc);
}
/**
@@ -192,7 +192,7 @@ void ol_tx_deregister_flow_control(struct ol_txrx_pdev_t *pdev)
if (!pool)
break;
qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
ol_txrx_info("flow pool list is not empty %d!!!\n", i++);
ol_txrx_info("flow pool list is not empty %d!!!", i++);
if (i == 1)
ol_tx_dump_flow_pool_info(soc);