qcacld-3.0: dp: Remove redundant __func__ from the logs

The logging macros implicitly takes care of embedding function name
in the log, hence there is no need to include __func__ again.
Getting rid of redundant __func__ reduces driver memory footprint.

Change-Id: I878671f03be0727bca3bca5dfd82eae4e353c6e1
CRs-Fixed: 2768575
This commit is contained in:
Srinivas Girigowda
2020-09-02 11:48:01 -07:00
committed by snandini
父節點 87b638fadb
當前提交 8fb7e79124
共有 4 個文件被更改,包括 58 次插入62 次删除

查看文件

@@ -431,7 +431,7 @@ ol_tx_classify(
A_UINT8 tid;
u_int8_t peer_id;
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Enter");
dest_addr = ol_tx_dest_addr_find(pdev, tx_nbuf);
if (unlikely(!dest_addr)) {
QDF_TRACE(QDF_MODULE_ID_TXRX,
@@ -570,7 +570,7 @@ ol_tx_classify(
QDF_MAC_ADDR_REF(vdev->mac_addr.raw));
return NULL; /* error */
}
TX_SCHED_DEBUG_PRINT("Peer found\n");
TX_SCHED_DEBUG_PRINT("Peer found");
if (!peer->qos_capable) {
tid = OL_TX_NON_QOS_TID;
} else if ((peer->security[
@@ -644,7 +644,7 @@ ol_tx_classify(
/* Update Tx Queue info */
tx_desc->txq = txq;
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Leave");
return txq;
}
@@ -661,7 +661,7 @@ ol_tx_classify_mgmt(
A_UINT8 *dest_addr;
union ol_txrx_align_mac_addr_t local_mac_addr_aligned, *mac_addr;
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Enter");
dest_addr = ol_tx_dest_addr_find(pdev, tx_nbuf);
if (unlikely(!dest_addr)) {
QDF_TRACE(QDF_MODULE_ID_TXRX,
@@ -759,7 +759,7 @@ ol_tx_classify_mgmt(
/* Update Tx Queue info */
tx_desc->txq = txq;
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Leave");
return txq;
}

查看文件

@@ -166,8 +166,7 @@ ol_tx_queue_discard(
num = pdev->tx_queue.rsrc_threshold_hi -
pdev->tx_queue.rsrc_threshold_lo;
TX_SCHED_DEBUG_PRINT("+%s : %u\n,", __func__,
qdf_atomic_read(&pdev->tx_queue.rsrc_cnt));
TX_SCHED_DEBUG_PRINT("+%u", qdf_atomic_read(&pdev->tx_queue.rsrc_cnt));
while (num > 0) {
discarded = ol_tx_sched_discard_select(
pdev, (u_int16_t)num, tx_descs, flush_all);
@@ -182,7 +181,7 @@ ol_tx_queue_discard(
actual_discarded += discarded;
}
qdf_atomic_add(actual_discarded, &pdev->tx_queue.rsrc_cnt);
TX_SCHED_DEBUG_PRINT("-%s\n", __func__);
TX_SCHED_DEBUG_PRINT("-");
qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
@@ -259,7 +258,7 @@ ol_tx_enqueue(
int bytes;
struct ol_tx_sched_notify_ctx_t notify_ctx;
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Enter");
/*
* If too few tx descriptors are available, drop some currently-queued
@@ -298,7 +297,7 @@ ol_tx_enqueue(
OL_TX_QUEUE_ADDBA_CHECK(pdev, txq, tx_msdu_info);
qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Leave");
}
u_int16_t
@@ -315,7 +314,7 @@ ol_tx_dequeue(
unsigned int credit_sum;
TXRX_ASSERT2(txq->flag != ol_tx_queue_paused);
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Enter");
if (txq->frms < max_frames)
max_frames = txq->frms;
@@ -346,7 +345,7 @@ ol_tx_dequeue(
txq->flag = ol_tx_queue_empty;
ol_tx_queue_log_dequeue(pdev, txq, num_frames, bytes_sum);
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Leave");
*bytes = bytes_sum;
*credit = credit_sum;
@@ -365,7 +364,7 @@ ol_tx_queue_free(
ol_tx_desc_list tx_tmp_list;
TAILQ_INIT(&tx_tmp_list);
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Enter");
qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
notify_ctx.event = OL_TX_DELETE_QUEUE;
@@ -396,7 +395,7 @@ ol_tx_queue_free(
ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 0);
frms--;
}
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Leave");
}
@@ -567,7 +566,7 @@ ol_txrx_peer_tid_unpause(ol_txrx_peer_handle peer, int tid)
/* TO DO: log the queue unpause */
/* acquire the mutex lock, since we'll be modifying the queues */
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Enter");
qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
if (tid == -1) {
@@ -581,7 +580,7 @@ ol_txrx_peer_tid_unpause(ol_txrx_peer_handle peer, int tid)
}
qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Leave");
}
void
@@ -594,7 +593,7 @@ ol_txrx_vdev_pause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
struct ol_txrx_peer_t *peer;
/* TO DO: log the queue pause */
/* acquire the mutex lock, since we'll be modifying the queues */
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Enter");
if (qdf_unlikely(!vdev)) {
ol_txrx_err("vdev is NULL");
@@ -620,7 +619,7 @@ ol_txrx_vdev_pause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Leave");
}
void ol_txrx_vdev_unpause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
@@ -633,7 +632,7 @@ void ol_txrx_vdev_unpause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
/* TO DO: log the queue unpause */
/* acquire the mutex lock, since we'll be modifying the queues */
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Enter");
if (qdf_unlikely(!vdev)) {
ol_txrx_err("vdev is NULL");
@@ -660,7 +659,7 @@ void ol_txrx_vdev_unpause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Leave");
}
void ol_txrx_vdev_flush(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)
@@ -712,8 +711,7 @@ ol_txrx_peer_bal_add_limit_peer(struct ol_txrx_pdev_t *pdev,
/* Check if peer_num has reached the capabilit */
if (peer_num >= MAX_NO_PEERS_IN_LIMIT) {
TX_SCHED_DEBUG_PRINT_ALWAYS(
"reach the maxinum peer num %d\n",
peer_num);
"reach the maxinum peer num %d", peer_num);
return;
}
pdev->tx_peer_bal.limit_list[peer_num].peer_id = peer_id;
@@ -728,7 +726,7 @@ ol_txrx_peer_bal_add_limit_peer(struct ol_txrx_pdev_t *pdev,
}
TX_SCHED_DEBUG_PRINT_ALWAYS(
"Add one peer into limit queue, peer_id %d, cur peer num %d\n",
"Add one peer into limit queue, peer_id %d, cur peer num %d",
peer_id,
pdev->tx_peer_bal.peer_num);
}
@@ -773,7 +771,7 @@ ol_txrx_peer_bal_remove_limit_peer(struct ol_txrx_pdev_t *pdev,
TX_SCHED_DEBUG_PRINT(
"Remove one peer from limitq, peer_id %d, cur peer num %d\n",
"Remove one peer from limitq, peer_id %d, cur peer num %d",
peer_id,
pdev->tx_peer_bal.peer_num);
break;
@@ -796,13 +794,13 @@ ol_txrx_peer_pause_but_no_mgmt_q(ol_txrx_peer_handle peer)
/* TO DO: log the queue pause */
/* acquire the mutex lock, since we'll be modifying the queues */
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Enter");
qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
ol_txrx_peer_pause_but_no_mgmt_q_base(pdev, peer);
qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Leave");
}
void
@@ -813,13 +811,13 @@ ol_txrx_peer_unpause_but_no_mgmt_q(ol_txrx_peer_handle peer)
/* TO DO: log the queue pause */
/* acquire the mutex lock, since we'll be modifying the queues */
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Enter");
qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
ol_txrx_peer_unpause_but_no_mgmt_q_base(pdev, peer);
qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Leave");
}
u_int16_t
@@ -830,7 +828,7 @@ ol_tx_bad_peer_dequeue_check(struct ol_tx_frms_queue_t *txq,
if (txq && (txq->peer) && (txq->peer->tx_limit_flag) &&
(txq->peer->tx_limit < max_frames)) {
TX_SCHED_DEBUG_PRINT(
"Peer ID %d goes to limit, threshold is %d\n",
"Peer ID %d goes to limit, threshold is %d",
txq->peer->peer_ids[0], txq->peer->tx_limit);
*tx_limit_flag = 1;
return txq->peer->tx_limit;
@@ -846,12 +844,12 @@ ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t *pdev,
u_int16_t tx_limit_flag)
{
if (unlikely(!pdev)) {
TX_SCHED_DEBUG_PRINT_ALWAYS("Error: NULL pdev handler\n");
TX_SCHED_DEBUG_PRINT_ALWAYS("Error: NULL pdev handler");
return;
}
if (unlikely(!txq)) {
TX_SCHED_DEBUG_PRINT_ALWAYS("Error: NULL txq\n");
TX_SCHED_DEBUG_PRINT_ALWAYS("Error: NULL txq");
return;
}
@@ -864,10 +862,10 @@ ol_tx_bad_peer_update_tx_limit(struct ol_txrx_pdev_t *pdev,
txq->peer->tx_limit -= frames;
TX_SCHED_DEBUG_PRINT_ALWAYS(
"Peer ID %d in limit, deque %d frms\n",
"Peer ID %d in limit, deque %d frms",
txq->peer->peer_ids[0], frames);
} else if (txq->peer) {
TX_SCHED_DEBUG_PRINT("Download peer_id %d, num_frames %d\n",
TX_SCHED_DEBUG_PRINT("Download peer_id %d, num_frames %d",
txq->peer->peer_ids[0], frames);
}
qdf_spin_unlock_bh(&pdev->tx_peer_bal.mutex);
@@ -930,8 +928,8 @@ ol_tx_pdev_peer_bal_timer(void *context)
peer = ol_txrx_peer_find_by_id(pdev, peer_id);
TX_SCHED_DEBUG_PRINT(
"%s peer_id %d peer = 0x%x tx limit %d\n",
__func__, peer_id,
"peer_id %d peer = 0x%x tx limit %d",
peer_id,
(int)peer, tx_limit);
/*
@@ -944,7 +942,7 @@ ol_tx_pdev_peer_bal_timer(void *context)
ol_txrx_peer_bal_remove_limit_peer(pdev,
peer_id);
TX_SCHED_DEBUG_PRINT_ALWAYS(
"No such a peer, peer id = %d\n",
"No such a peer, peer id = %d",
peer_id);
}
}
@@ -1006,13 +1004,13 @@ ol_txrx_peer_link_status_handler(
struct ol_txrx_peer_t *peer = NULL;
if (!pdev) {
TX_SCHED_DEBUG_PRINT_ALWAYS("Error: NULL pdev handler\n");
TX_SCHED_DEBUG_PRINT_ALWAYS("Error: NULL pdev handler");
return;
}
if (!peer_link_status) {
TX_SCHED_DEBUG_PRINT_ALWAYS(
"Error:NULL link report message. peer num %d\n",
"Error:NULL link report message. peer num %d",
peer_num);
return;
}
@@ -1020,18 +1018,17 @@ ol_txrx_peer_link_status_handler(
/* Check if bad peer tx flow CL is enabled */
if (pdev->tx_peer_bal.enabled != ol_tx_peer_bal_enable) {
TX_SCHED_DEBUG_PRINT_ALWAYS(
"Bad peer tx flow CL is not enabled, ignore it\n");
"Bad peer tx flow CL is not enabled, ignore it");
return;
}
/* Check peer_num is reasonable */
if (peer_num > MAX_NO_PEERS_IN_LIMIT) {
TX_SCHED_DEBUG_PRINT_ALWAYS(
"%s: Bad peer_num %d\n", __func__, peer_num);
TX_SCHED_DEBUG_PRINT_ALWAYS("Bad peer_num %d", peer_num);
return;
}
TX_SCHED_DEBUG_PRINT_ALWAYS("%s: peer_num %d\n", __func__, peer_num);
TX_SCHED_DEBUG_PRINT_ALWAYS("peer_num %d", peer_num);
for (i = 0; i < peer_num; i++) {
u_int16_t peer_limit, peer_id;
@@ -1042,14 +1039,14 @@ ol_txrx_peer_link_status_handler(
peer_phy = peer_link_status->phy;
peer_tput = peer_link_status->rate;
TX_SCHED_DEBUG_PRINT("%s: peer id %d tput %d phy %d\n",
__func__, peer_id, peer_tput, peer_phy);
TX_SCHED_DEBUG_PRINT("peer id %d tput %d phy %d",
peer_id, peer_tput, peer_phy);
/* Sanity check for the PHY mode value */
if (peer_phy > TXRX_IEEE11_AC) {
TX_SCHED_DEBUG_PRINT_ALWAYS(
"%s: PHY value is illegal: %d, and the peer_id %d\n",
__func__, peer_link_status->phy, peer_id);
"PHY value is illegal: %d, and the peer_id %d",
peer_link_status->phy, peer_id);
continue;
}
pause_flag = false;
@@ -1077,8 +1074,8 @@ ol_txrx_peer_link_status_handler(
peer_limit);
} else if (pdev->tx_peer_bal.peer_num) {
TX_SCHED_DEBUG_PRINT(
"%s: Check if peer_id %d exit limit\n",
__func__, peer_id);
"Check if peer_id %d exit limit",
peer_id);
ol_txrx_peer_bal_remove_limit_peer(pdev,
peer_id);
}
@@ -1092,8 +1089,7 @@ ol_txrx_peer_link_status_handler(
}
} else {
TX_SCHED_DEBUG_PRINT(
"%s: Remove peer_id %d from limit list\n",
__func__, peer_id);
"Remove peer_id %d from limit list", peer_id);
ol_txrx_peer_bal_remove_limit_peer(pdev, peer_id);
}

查看文件

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2012-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2012-2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -193,7 +193,7 @@ ol_tx_sched_select_batch_rr(
u_int16_t frames, used_credits = 0, tx_limit, tx_limit_flag = 0;
int bytes;
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Enter");
if (TAILQ_EMPTY(&scheduler->tx_active_tids_list))
return used_credits;
@@ -238,7 +238,7 @@ ol_tx_sched_select_batch_rr(
}
sctx->frms += frames;
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Leave");
return used_credits;
}
@@ -1289,7 +1289,7 @@ ol_tx_sched_discard_select(
notify_ctx.info.ext_tid = cat;
ol_tx_sched_notify(pdev, &notify_ctx);
TX_SCHED_DEBUG_PRINT("%s Tx Drop : %d\n", __func__, frms);
TX_SCHED_DEBUG_PRINT("Tx Drop : %d", frms);
return frms;
}
@@ -1362,7 +1362,7 @@ ol_tx_sched_dispatch(
u_int16_t msdu_id;
int num_msdus = 0;
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Enter");
while (sctx->frms) {
tx_desc = TAILQ_FIRST(&sctx->head);
if (!tx_desc) {
@@ -1438,7 +1438,7 @@ ol_tx_sched_dispatch(
/*Send Batch Of Frames*/
if (head_msdu)
ol_tx_send_batch(pdev, head_msdu, num_msdus);
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Leave");
}
#ifdef QCA_TX_PADDING_CREDIT_SUPPORT
@@ -1490,7 +1490,7 @@ ol_tx_sched(struct ol_txrx_pdev_t *pdev)
struct ol_tx_sched_ctx sctx;
u_int32_t credit;
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Enter");
qdf_spin_lock_bh(&pdev->tx_queue_spinlock);
if (pdev->tx_sched.tx_sched_status != ol_tx_scheduler_idle) {
qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
@@ -1550,7 +1550,7 @@ ol_tx_sched(struct ol_txrx_pdev_t *pdev)
pdev->tx_sched.tx_sched_status = ol_tx_scheduler_idle;
qdf_spin_unlock_bh(&pdev->tx_queue_spinlock);
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Leave");
}
void *

查看文件

@@ -51,7 +51,7 @@ void ol_txrx_vdev_pause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
/* TO DO: log the queue pause */
/* acquire the mutex lock, since we'll be modifying the queues */
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Enter");
qdf_spin_lock_bh(&vdev->ll_pause.mutex);
vdev->ll_pause.paused_reason |= reason;
@@ -59,7 +59,7 @@ void ol_txrx_vdev_pause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
vdev->ll_pause.is_q_paused = true;
qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Leave");
}
void ol_txrx_vdev_unpause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
@@ -75,7 +75,7 @@ void ol_txrx_vdev_unpause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
/* TO DO: log the queue unpause */
/* acquire the mutex lock, since we'll be modifying the queues */
TX_SCHED_DEBUG_PRINT("Enter %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Enter");
qdf_spin_lock_bh(&vdev->ll_pause.mutex);
if (vdev->ll_pause.paused_reason & reason) {
@@ -91,7 +91,7 @@ void ol_txrx_vdev_unpause(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
} else {
qdf_spin_unlock_bh(&vdev->ll_pause.mutex);
}
TX_SCHED_DEBUG_PRINT("Leave %s\n", __func__);
TX_SCHED_DEBUG_PRINT("Leave");
}
void ol_txrx_vdev_flush(struct cdp_soc_t *soc_hdl, uint8_t vdev_id)