浏览代码

qcacld-3.0: Remove unnecessary prints from data path

1. Remove error prints for qdf_mem_malloc APIs.
2. Remove unnecessary __func__ from data path prints.

Change-Id: I6c4b110f626d84da055821c5f210a3d000b6ff15
CRs-Fixed: 2317315
Nirav Shah 6 年之前
父节点
当前提交
7c8c171e05

+ 3 - 4
core/dp/htt/htt.c

@@ -72,10 +72,9 @@ struct htt_htc_pkt *htt_htc_pkt_alloc(struct htt_pdev_t *pdev)
 	if (pkt == NULL)
 		pkt = qdf_mem_malloc(sizeof(*pkt));
 
-	if (!pkt) {
-		qdf_print("%s: HTC packet allocation failed\n", __func__);
+	if (!pkt)
 		return NULL;
-	}
+
 	htc_packet_set_magic_cookie(&(pkt->u.pkt.htc_pkt), 0);
 	return &pkt->u.pkt;     /* not actually a dereference */
 }
@@ -85,7 +84,7 @@ void htt_htc_pkt_free(struct htt_pdev_t *pdev, struct htt_htc_pkt *pkt)
 	struct htt_htc_pkt_union *u_pkt = (struct htt_htc_pkt_union *)pkt;
 
 	if (!u_pkt) {
-		qdf_print("%s: HTC packet is NULL\n", __func__);
+		qdf_print("HTC packet is NULL");
 		return;
 	}
 

+ 2 - 2
core/dp/htt/htt_internal.h

@@ -968,8 +968,8 @@ htt_rx_paddr_unmark_high_bits(qdf_dma_addr_t paddr)
 		 * padded (with 0b0) to 8 bits
 		 */
 		if ((markings & 0xFFFF0000) != RX_PADDR_MAGIC_PATTERN) {
-			qdf_print("%s: paddr not marked correctly: 0x%pK!\n",
-				  __func__, (void *)paddr);
+			qdf_print("paddr not marked correctly: 0x%pK!\n",
+				  (void *)paddr);
 			HTT_ASSERT_ALWAYS(0);
 		}
 

+ 6 - 9
core/dp/htt/htt_monitor_rx.c

@@ -97,7 +97,7 @@ int htt_mon_rx_handle_amsdu_packet(qdf_nbuf_t msdu, htt_pdev_handle pdev,
 	paddr = htt_rx_in_ord_paddr_get(*msg_word);
 	frag_nbuf = htt_rx_in_order_netbuf_pop(pdev, paddr);
 	if (qdf_unlikely(!frag_nbuf)) {
-		qdf_print("%s: netbuf pop failed!\n", __func__);
+		qdf_print("netbuf pop failed!");
 		return 0;
 	}
 	*frag_cnt = *frag_cnt + 1;
@@ -124,7 +124,7 @@ int htt_mon_rx_handle_amsdu_packet(qdf_nbuf_t msdu, htt_pdev_handle pdev,
 			     *msg_word)->msdu_info;
 
 		if (qdf_unlikely(!frag_nbuf)) {
-			qdf_print("%s: netbuf pop failed!\n", __func__);
+			qdf_print("netbuf pop failed!");
 			prev_frag_nbuf->next = NULL;
 			return 0;
 		}
@@ -548,7 +548,7 @@ int htt_rx_mon_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
 	msdu = htt_rx_in_order_netbuf_pop(pdev, paddr);
 
 	if (qdf_unlikely(!msdu)) {
-		qdf_print("%s: netbuf pop failed!\n", __func__);
+		qdf_print("netbuf pop failed!");
 		*tail_msdu = NULL;
 		return 0;
 	}
@@ -581,8 +581,7 @@ int htt_rx_mon_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
 					htt_rx_in_ord_paddr_ind_msdu_t *)
 					msg_word)->msdu_info;
 				if (qdf_unlikely(!msdu)) {
-					qdf_print("%s: netbuf pop failed!\n",
-						  __func__);
+					qdf_print("netbuf pop failed!");
 					return 0;
 				}
 				*replenish_cnt = *replenish_cnt + 1;
@@ -698,8 +697,7 @@ int htt_rx_mon_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
 							    &msg_word,
 							    amsdu_len,
 							    replenish_cnt)) {
-				qdf_print("%s: failed to handle amsdu packet\n",
-					  __func__);
+				qdf_print("failed to handle amsdu packet");
 				return 0;
 			}
 		}
@@ -711,8 +709,7 @@ next_pop:
 			paddr = htt_rx_in_ord_paddr_get(msg_word);
 			next = htt_rx_in_order_netbuf_pop(pdev, paddr);
 			if (qdf_unlikely(!next)) {
-				qdf_print("%s: netbuf pop failed!\n",
-					  __func__);
+				qdf_print("netbuf pop failed!");
 				*tail_msdu = NULL;
 				return 0;
 			}

+ 2 - 8
core/dp/htt/htt_rx.c

@@ -396,10 +396,7 @@ void htt_register_rx_pkt_dump_callback(struct htt_pdev_t *pdev,
 				tp_rx_pkt_dump_cb callback)
 {
 	if (!pdev) {
-		qdf_print("%s: %s, %s",
-			__func__,
-			"htt pdev is NULL",
-			"rx packet status callback register unsuccessful\n");
+		qdf_print("pdev is NULL");
 		return;
 	}
 	pdev->rx_pkt_dump_cb = callback;
@@ -420,10 +417,7 @@ void htt_register_rx_pkt_dump_callback(struct htt_pdev_t *pdev,
 void htt_deregister_rx_pkt_dump_callback(struct htt_pdev_t *pdev)
 {
 	if (!pdev) {
-		qdf_print("%s: %s, %s",
-			__func__,
-			"htt pdev is NULL",
-			"rx packet status callback deregister unsuccessful\n");
+		qdf_print("pdev is NULL");
 		return;
 	}
 	pdev->rx_pkt_dump_cb = NULL;

+ 13 - 18
core/dp/htt/htt_rx_ll.c

@@ -922,7 +922,7 @@ htt_rx_offload_msdu_pop_ll(htt_pdev_handle pdev,
 	*head_buf = *tail_buf = buf = htt_rx_netbuf_pop(pdev);
 
 	if (qdf_unlikely(!buf)) {
-		qdf_print("%s: netbuf pop failed!\n", __func__);
+		qdf_print("netbuf pop failed!");
 		return 1;
 	}
 
@@ -972,7 +972,7 @@ htt_rx_offload_paddr_msdu_pop_ll(htt_pdev_handle pdev,
 	*head_buf = *tail_buf = buf = htt_rx_in_order_netbuf_pop(pdev, paddr);
 
 	if (qdf_unlikely(!buf)) {
-		qdf_print("%s: netbuf pop failed!\n", __func__);
+		qdf_print("netbuf pop failed!");
 		return 1;
 	}
 	qdf_nbuf_set_pktlen(buf, HTT_RX_BUF_SIZE);
@@ -1149,8 +1149,8 @@ htt_rx_hash_list_insert(struct htt_pdev_t *pdev,
 	htt_list_add_tail(&pdev->rx_ring.hash_table[i]->listhead,
 			  &hash_element->listnode);
 
-	RX_HASH_LOG(qdf_print("rx hash: %s: paddr 0x%x netbuf %pK bucket %d\n",
-			      __func__, paddr, netbuf, (int)i));
+	RX_HASH_LOG(qdf_print("rx hash: paddr 0x%x netbuf %pK bucket %d\n",
+			      paddr, netbuf, (int)i));
 
 	HTT_RX_HASH_COUNT_INCR(pdev->rx_ring.hash_table[i]);
 	HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]);
@@ -1214,15 +1214,15 @@ qdf_nbuf_t htt_rx_hash_list_lookup(struct htt_pdev_t *pdev,
 		}
 	}
 
-	RX_HASH_LOG(qdf_print("rx hash: %s: paddr 0x%x, netbuf %pK, bucket %d\n",
-			      __func__, paddr, netbuf, (int)i));
+	RX_HASH_LOG(qdf_print("rx hash: paddr 0x%x, netbuf %pK, bucket %d\n",
+			      paddr, netbuf, (int)i));
 	HTT_RX_HASH_COUNT_PRINT(pdev->rx_ring.hash_table[i]);
 
 	qdf_spin_unlock_bh(&pdev->rx_ring.rx_hash_lock);
 
 	if (!netbuf) {
-		qdf_print("rx hash: %s: no entry found for %pK!\n",
-			  __func__, (void *)paddr);
+		qdf_print("rx hash: no entry found for %pK!\n",
+			  (void *)paddr);
 		cds_trigger_recovery(QDF_RX_HASH_NO_ENTRY_FOUND);
 	}
 
@@ -1247,10 +1247,8 @@ static int htt_rx_hash_init(struct htt_pdev_t *pdev)
 		qdf_mem_malloc(RX_NUM_HASH_BUCKETS *
 			       sizeof(struct htt_rx_hash_bucket *));
 
-	if (!pdev->rx_ring.hash_table) {
-		qdf_print("rx hash table allocation failed!\n");
+	if (!pdev->rx_ring.hash_table)
 		return 1;
-	}
 
 	qdf_spinlock_create(&pdev->rx_ring.rx_hash_lock);
 	qdf_spin_lock_bh(&pdev->rx_ring.rx_hash_lock);
@@ -1465,7 +1463,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
 	(*head_msdu) = msdu = htt_rx_in_order_netbuf_pop(pdev, paddr);
 
 	if (qdf_unlikely(!msdu)) {
-		qdf_print("%s: netbuf pop failed!\n", __func__);
+		qdf_print("netbuf pop failed!");
 		*tail_msdu = NULL;
 		pdev->rx_ring.pop_fail_cnt++;
 		ret = 0;
@@ -1510,8 +1508,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
 				paddr = htt_rx_in_ord_paddr_get(msg_word);
 				next = htt_rx_in_order_netbuf_pop(pdev, paddr);
 				if (qdf_unlikely(!next)) {
-					qdf_print("%s: netbuf pop failed!\n",
-						  __func__);
+					qdf_print("netbuf pop failed!");
 					*tail_msdu = NULL;
 					pdev->rx_ring.pop_fail_cnt++;
 					ret = 0;
@@ -1606,8 +1603,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
 				paddr = htt_rx_in_ord_paddr_get(msg_word);
 				next = htt_rx_in_order_netbuf_pop(pdev, paddr);
 				if (qdf_unlikely(!next)) {
-					qdf_print("%s: netbuf pop failed!\n",
-						  __func__);
+					qdf_print("netbuf pop failed!");
 					*tail_msdu = NULL;
 					pdev->rx_ring.pop_fail_cnt++;
 					ret = 0;
@@ -1635,8 +1631,7 @@ htt_rx_amsdu_rx_in_order_pop_ll(htt_pdev_handle pdev,
 			paddr = htt_rx_in_ord_paddr_get(msg_word);
 			next = htt_rx_in_order_netbuf_pop(pdev, paddr);
 			if (qdf_unlikely(!next)) {
-				qdf_print("%s: netbuf pop failed!\n",
-					  __func__);
+				qdf_print("netbuf pop failed!");
 				*tail_msdu = NULL;
 				pdev->rx_ring.pop_fail_cnt++;
 				ret = 0;

+ 15 - 29
core/dp/htt/htt_t2h.c

@@ -168,10 +168,9 @@ static void htt_ipa_op_response(struct htt_pdev_t *pdev, uint32_t *msg_word)
 		qdf_mem_malloc(sizeof
 				(struct htt_wdi_ipa_op_response_t) +
 				len);
-	if (!op_msg_buffer) {
-		qdf_print("OPCODE message buffer alloc fail");
+	if (!op_msg_buffer)
 		return;
-	}
+
 	qdf_mem_copy(op_msg_buffer,
 			msg_start_ptr,
 			sizeof(struct htt_wdi_ipa_op_response_t) +
@@ -287,9 +286,7 @@ static void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
 				sizeof(struct hl_htt_rx_ind_base)+
 				sizeof(struct ieee80211_frame))) {
 
-				qdf_print("%s: invalid packet len, %u\n",
-						__func__,
-						rx_pkt_len);
+				qdf_print("invalid packet len, %u", rx_pkt_len);
 				/*
 				 * This buf will be freed before
 				 * exiting this function.
@@ -446,7 +443,7 @@ static void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
 		uint32_t len = qdf_nbuf_len(htt_t2h_msg);
 
 		if (len < sizeof(*msg_word) + sizeof(uint32_t)) {
-			qdf_print("%s: invalid nbuff len \n", __func__);
+			qdf_print("invalid nbuff len");
 			WARN_ON(1);
 			break;
 		}
@@ -470,11 +467,8 @@ static void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
 		old_credit = qdf_atomic_read(&pdev->htt_tx_credit.target_delta);
 		if (((old_credit + htt_credit_delta) > MAX_TARGET_TX_CREDIT) ||
 			((old_credit + htt_credit_delta) < -MAX_TARGET_TX_CREDIT)) {
-			qdf_print("%s: invalid credit update,old_credit=%d,"
-				"htt_credit_delta=%d\n",
-				__func__,
-				old_credit,
-				htt_credit_delta);
+			qdf_err("invalid update,old_credit=%d, htt_credit_delta=%d",
+				old_credit, htt_credit_delta);
 			break;
 		}
 
@@ -594,8 +588,7 @@ static void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
 			peer = ol_txrx_peer_find_by_id(pdev->txrx_pdev,
 				 peer_id);
 			if (!peer) {
-				qdf_print("%s: invalid peer id %d\n",
-					 __func__, peer_id);
+				qdf_print("invalid peer id %d", peer_id);
 				qdf_assert(0);
 				break;
 			}
@@ -620,9 +613,8 @@ static void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
 		}
 		default:
 		{
-			qdf_print("%s: unhandled error type %d\n",
-			 __func__,
-			 HTT_RX_OFLD_PKT_ERR_MSG_SUB_TYPE_GET(*msg_word));
+			qdf_print("unhandled error type %d",
+			    HTT_RX_OFLD_PKT_ERR_MSG_SUB_TYPE_GET(*msg_word));
 		break;
 		}
 		}
@@ -820,11 +812,8 @@ void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
 						&pdev->htt_tx_credit.target_delta);
 			if (((old_credit + num_msdus) > MAX_TARGET_TX_CREDIT) ||
 				((old_credit + num_msdus) < -MAX_TARGET_TX_CREDIT)) {
-				qdf_print("%s: invalid credit update,old_credit=%d,"
-					"num_msdus=%d\n",
-					__func__,
-					old_credit,
-					num_msdus);
+				qdf_err("invalid update,old_credit=%d, num_msdus=%d",
+					old_credit, num_msdus);
 			} else {
 				if (!pdev->cfg.default_tx_comp_req) {
 					int credit_delta;
@@ -931,15 +920,12 @@ void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
 		uint8_t offload_ind, frag_ind;
 
 		if (qdf_unlikely(!pdev->cfg.is_full_reorder_offload)) {
-			qdf_print("HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND not ");
-			qdf_print("supported when full reorder offload is ");
-			qdf_print("disabled in the configuration.\n");
+			qdf_print("full reorder offload is disable");
 			break;
 		}
 
 		if (qdf_unlikely(pdev->cfg.is_high_latency)) {
-			qdf_print("HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND ");
-			qdf_print("not supported on high latency.\n");
+			qdf_print("full reorder offload not support in HL");
 			break;
 		}
 
@@ -949,8 +935,8 @@ void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
 		frag_ind = HTT_RX_IN_ORD_PADDR_IND_FRAG_GET(*msg_word);
 
 #if defined(HELIUMPLUS_DEBUG)
-		qdf_print("%s %d: peerid %d tid %d offloadind %d fragind %d\n",
-			  __func__, __LINE__, peer_id, tid, offload_ind,
+		qdf_print("peerid %d tid %d offloadind %d fragind %d",
+			  peer_id, tid, offload_ind,
 			  frag_ind);
 #endif
 		if (qdf_unlikely(frag_ind)) {

+ 9 - 15
core/dp/htt/htt_tx.c

@@ -838,7 +838,7 @@ htt_tx_resume_handler(void *context) { }
 qdf_nbuf_t
 htt_tx_send_batch(htt_pdev_handle pdev, qdf_nbuf_t head_msdu, int num_msdus)
 {
-	qdf_print("*** %s currently only applies for HL systems\n", __func__);
+	qdf_print("Not apply to LL");
 	qdf_assert(0);
 	return head_msdu;
 
@@ -1072,8 +1072,7 @@ static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
 	if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
 		mem_map_table = qdf_mem_map_table_alloc(uc_tx_buf_cnt);
 		if (!mem_map_table) {
-			qdf_print("%s: Failed to allocate memory for mem map table\n",
-				  __func__);
+			qdf_print("Failed to allocate memory");
 			return 0;
 		}
 		mem_info = mem_map_table;
@@ -1191,8 +1190,7 @@ static void htt_tx_buf_pool_free(struct htt_pdev_t *pdev)
 		mem_map_table = qdf_mem_map_table_alloc(
 					pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt);
 		if (!mem_map_table) {
-			qdf_print("%s: Failed to allocate memory for mem map table\n",
-				  __func__);
+			qdf_print("Failed to allocate memory");
 			return;
 		}
 		mem_info = mem_map_table;
@@ -1238,8 +1236,7 @@ static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
 	if (qdf_mem_smmu_s1_enabled(pdev->osdev)) {
 		mem_map_table = qdf_mem_map_table_alloc(uc_tx_buf_cnt);
 		if (!mem_map_table) {
-			qdf_print("%s: Failed to allocate memory for mem map table\n",
-				  __func__);
+			qdf_print("Failed to allocate memory");
 			return 0;
 		}
 		mem_info = mem_map_table;
@@ -1251,8 +1248,8 @@ static int htt_tx_ipa_uc_wdi_tx_buf_alloc(struct htt_pdev_t *pdev,
 		shared_tx_buffer = qdf_mem_shared_mem_alloc(pdev->osdev,
 							    uc_tx_buf_sz);
 		if (!shared_tx_buffer || !shared_tx_buffer->vaddr) {
-			qdf_print("%s: TX BUF alloc fail, loop index: %d",
-				  __func__, tx_buffer_count);
+			qdf_print("TX BUF alloc fail, loop index: %d",
+				  tx_buffer_count);
 			goto pwr2;
 		}
 
@@ -1338,8 +1335,7 @@ static void htt_tx_buf_pool_free(struct htt_pdev_t *pdev)
 		mem_map_table = qdf_mem_map_table_alloc(
 					pdev->ipa_uc_tx_rsc.alloc_tx_buf_cnt);
 		if (!mem_map_table) {
-			qdf_print("%s: Failed to allocate memory for mem map table\n",
-				  __func__);
+			qdf_print("Failed to allocate memory");
 			return;
 		}
 		mem_info = mem_map_table;
@@ -1390,8 +1386,7 @@ int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
 	pdev->ipa_uc_tx_rsc.tx_ce_idx =
 		qdf_mem_shared_mem_alloc(pdev->osdev, 4);
 	if (!pdev->ipa_uc_tx_rsc.tx_ce_idx) {
-		qdf_print("%s: Unable to allocate memory for IPA tx ce idx\n",
-			  __func__);
+		qdf_print("Unable to allocate memory for IPA tx ce idx");
 		return -ENOBUFS;
 	}
 
@@ -1402,7 +1397,7 @@ int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
 					 tx_comp_ring_size);
 	if (!pdev->ipa_uc_tx_rsc.tx_comp_ring ||
 	    !pdev->ipa_uc_tx_rsc.tx_comp_ring->vaddr) {
-		qdf_print("%s: TX COMP ring alloc fail", __func__);
+		qdf_print("TX COMP ring alloc fail");
 		return_code = -ENOBUFS;
 		goto free_tx_ce_idx;
 	}
@@ -1412,7 +1407,6 @@ int htt_tx_ipa_uc_attach(struct htt_pdev_t *pdev,
 		qdf_mem_malloc(uc_tx_buf_cnt *
 			sizeof(*pdev->ipa_uc_tx_rsc.tx_buf_pool_strg));
 	if (!pdev->ipa_uc_tx_rsc.tx_buf_pool_strg) {
-		qdf_print("%s: TX BUF POOL vaddr storage alloc fail", __func__);
 		return_code = -ENOBUFS;
 		goto free_tx_comp_base;
 	}

+ 1 - 3
core/dp/txrx/ol_cfg.c

@@ -115,10 +115,8 @@ struct cdp_cfg *ol_pdev_cfg_attach(qdf_device_t osdev, void *pcfg_param)
 	int i;
 
 	cfg_ctx = qdf_mem_malloc(sizeof(*cfg_ctx));
-	if (!cfg_ctx) {
-		printk(KERN_ERR "cfg ctx allocation failed\n");
+	if (!cfg_ctx)
 		return NULL;
-	}
 
 	ol_pdev_cfg_param_update(cfg_ctx);
 

+ 11 - 17
core/dp/txrx/ol_rx.c

@@ -230,7 +230,7 @@ void ol_rx_update_histogram_stats(uint32_t msdu_count, uint8_t frag_ind,
 	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 
 	if (!pdev) {
-		ol_txrx_err("%s pdev is NULL\n", __func__);
+		ol_txrx_err("pdev is NULL");
 		return;
 	}
 
@@ -555,8 +555,7 @@ ol_rx_indication_handler(ol_txrx_pdev_handle pdev,
 			ol_rx_reorder_peer_cleanup(vdev, peer);
 		} else {
 			if (tid >= OL_TXRX_NUM_EXT_TIDS) {
-				ol_txrx_err("%s:  invalid tid, %u\n",
-					    __func__, tid);
+				ol_txrx_err("invalid tid, %u", tid);
 				WARN_ON(1);
 				return;
 			}
@@ -970,9 +969,7 @@ ol_rx_offload_deliver_ind_handler(ol_txrx_pdev_handle pdev,
 	htt_pdev_handle htt_pdev = pdev->htt_pdev;
 
 	if (msdu_cnt > htt_rx_offload_msdu_cnt(htt_pdev)) {
-		ol_txrx_err("%s: invalid msdu_cnt=%u\n",
-			__func__,
-			msdu_cnt);
+		ol_txrx_err("invalid msdu_cnt=%u", msdu_cnt);
 
 		if (pdev->cfg.is_high_latency)
 			htt_rx_desc_frame_free(htt_pdev, msg);
@@ -1503,7 +1500,7 @@ ol_rx_in_order_indication_handler(ol_txrx_pdev_handle pdev,
 	uint32_t filled = 0;
 
 	if (tid >= OL_TXRX_NUM_EXT_TIDS) {
-		ol_txrx_err("%s:  invalid tid, %u\n", __FUNCTION__, tid);
+		ol_txrx_err("invalid tid, %u", tid);
 		WARN_ON(1);
 		return;
 	}
@@ -1515,14 +1512,14 @@ ol_rx_in_order_indication_handler(ol_txrx_pdev_handle pdev,
 			peer = ol_txrx_peer_find_by_id(pdev, peer_id);
 		htt_pdev = pdev->htt_pdev;
 	} else {
-		ol_txrx_err("%s: Invalid pdev passed!\n", __func__);
+		ol_txrx_err("Invalid pdev passed!");
 		qdf_assert_always(pdev);
 		return;
 	}
 
 #if defined(HELIUMPLUS_DEBUG)
-	qdf_print("%s %d: rx_ind_msg 0x%pK peer_id %d tid %d is_offload %d\n",
-		  __func__, __LINE__, rx_ind_msg, peer_id, tid, is_offload);
+	qdf_print("rx_ind_msg 0x%pK peer_id %d tid %d is_offload %d",
+		  rx_ind_msg, peer_id, tid, is_offload);
 #endif
 
 	pktlog_bit = (htt_rx_amsdu_rx_in_order_get_pktlog(rx_ind_msg) == 0x01);
@@ -1543,7 +1540,7 @@ ol_rx_in_order_indication_handler(ol_txrx_pdev_handle pdev,
 	ol_rx_ind_record_event(status, OL_RX_INDICATION_POP_END);
 
 	if (qdf_unlikely(0 == status)) {
-		ol_txrx_warn("%s: Pop status is 0, returning here", __func__);
+		ol_txrx_warn("pop failed");
 		return;
 	}
 
@@ -1574,9 +1571,7 @@ ol_rx_in_order_indication_handler(ol_txrx_pdev_handle pdev,
 	if (peer) {
 		vdev = peer->vdev;
 	} else {
-		ol_txrx_dbg(
-			   "%s: Couldn't find peer from ID 0x%x\n",
-			   __func__, peer_id);
+		ol_txrx_dbg("Couldn't find peer from ID 0x%x", peer_id);
 		while (head_msdu) {
 			qdf_nbuf_t msdu = head_msdu;
 
@@ -1619,14 +1614,13 @@ void ol_rx_pkt_dump_call(
 	pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 
 	if (!pdev) {
-		ol_txrx_err("%s: pdev is NULL", __func__);
+		ol_txrx_err("pdev is NULL");
 		return;
 	}
 
 	peer = ol_txrx_peer_find_by_id(pdev, peer_id);
 	if (!peer) {
-		ol_txrx_dbg("%s: peer with peer id %d is NULL", __func__,
-			peer_id);
+		ol_txrx_dbg("peer with peer id %d is NULL", peer_id);
 		return;
 	}
 

+ 2 - 3
core/dp/txrx/ol_rx_fwd.c

@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011, 2014-2018 The Linux Foundation. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -70,8 +70,7 @@ static inline void ol_ap_fwd_check(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t msdu)
 	    qdf_mem_cmp
 		     (mac_header->i_addr3, vdev->mac_addr.raw,
 		     IEEE80211_ADDR_LEN)) {
-		ol_txrx_dbg("Exit: %s | Unnecessary to adjust mac header\n",
-			   __func__);
+		ol_txrx_dbg("Exit | Unnecessary to adjust mac header");
 	} else {
 		/* Flip the ToDs bit to FromDs */
 		mac_header->i_fc[1] &= 0xfe;

+ 3 - 3
core/dp/txrx/ol_tx.c

@@ -77,7 +77,7 @@ qdf_nbuf_t ol_tx_data(void *data_vdev, qdf_nbuf_t skb)
 	qdf_nbuf_set_next(skb, NULL);
 	ret = OL_TX_SEND(vdev, skb);
 	if (ret) {
-		ol_txrx_dbg("%s: Failed to tx", __func__);
+		ol_txrx_dbg("Failed to tx");
 		return ret;
 	}
 
@@ -93,7 +93,7 @@ qdf_nbuf_t ol_tx_send_ipa_data_frame(struct cdp_vdev *vdev, qdf_nbuf_t skb)
 	if (qdf_unlikely(!pdev)) {
 		qdf_net_buf_debug_acquire_skb(skb, __FILE__, __LINE__);
 
-		ol_txrx_err("%s: pdev is NULL", __func__);
+		ol_txrx_err("pdev is NULL");
 		return skb;
 	}
 
@@ -113,7 +113,7 @@ qdf_nbuf_t ol_tx_send_ipa_data_frame(struct cdp_vdev *vdev, qdf_nbuf_t skb)
 
 	ret = OL_TX_SEND((struct ol_txrx_vdev_t *)vdev, skb);
 	if (ret) {
-		ol_txrx_dbg("%s: Failed to tx", __func__);
+		ol_txrx_dbg("Failed to tx");
 		return ret;
 	}
 

+ 3 - 6
core/dp/txrx/ol_tx_classify.c

@@ -572,9 +572,8 @@ ol_tx_classify(
 		 */
 		if (tx_msdu_info->htt.info.peer_id == HTT_INVALID_PEER_ID) {
 			if (peer) {
-				ol_txrx_info(
-					   "%s: remove the peer for invalid peer_id %pK\n",
-					   __func__, peer);
+				ol_txrx_info("remove the peer for invalid peer_id %pK",
+					     peer);
 				/* remove the peer reference added above */
 				ol_txrx_peer_release_ref
 						(peer,
@@ -597,9 +596,7 @@ ol_tx_classify(
 	if (IEEE80211_IS_MULTICAST(dest_addr) && vdev->opmode !=
 				wlan_op_mode_sta && tx_msdu_info->peer !=
 								NULL) {
-		ol_txrx_dbg(
-			   "%s: remove the peer reference %pK\n",
-			   __func__, peer);
+		ol_txrx_dbg("remove the peer reference %pK", peer);
 		/* remove the peer reference added above */
 		ol_txrx_peer_release_ref(tx_msdu_info->peer,
 					 PEER_DEBUG_ID_OL_INTERNAL);

+ 9 - 13
core/dp/txrx/ol_tx_desc.c

@@ -39,9 +39,8 @@ static inline void ol_tx_desc_sanity_checks(struct ol_txrx_pdev_t *pdev,
 					struct ol_tx_desc_t *tx_desc)
 {
 	if (tx_desc->pkt_type != ol_tx_frm_freed) {
-		ol_txrx_err(
-				   "%s Potential tx_desc corruption pkt_type:0x%x pdev:0x%pK",
-				   __func__, tx_desc->pkt_type, pdev);
+		ol_txrx_err("Potential tx_desc corruption pkt_type:0x%x pdev:0x%pK",
+			    tx_desc->pkt_type, pdev);
 		qdf_assert(0);
 	}
 }
@@ -53,8 +52,7 @@ static inline void ol_tx_desc_reset_pkt_type(struct ol_tx_desc_t *tx_desc)
 static inline void ol_tx_desc_compute_delay(struct ol_tx_desc_t *tx_desc)
 {
 	if (tx_desc->entry_timestamp_ticks != 0xffffffff) {
-		ol_txrx_err("%s Timestamp:0x%x\n",
-				   __func__, tx_desc->entry_timestamp_ticks);
+		ol_txrx_err("Timestamp:0x%x", tx_desc->entry_timestamp_ticks);
 		qdf_assert(0);
 	}
 	tx_desc->entry_timestamp_ticks = qdf_system_ticks();
@@ -526,16 +524,14 @@ void ol_tx_desc_free(struct ol_txrx_pdev_t *pdev, struct ol_tx_desc_t *tx_desc)
 		if (pool->avail_desc == pool->flow_pool_size) {
 			qdf_spin_unlock_bh(&pool->flow_pool_lock);
 			ol_tx_free_invalid_flow_pool(pool);
-			qdf_print("%s %d pool is INVALID State!!\n",
-				 __func__, __LINE__);
+			qdf_print("pool is INVALID State!!");
 			return;
 		}
 		break;
 	case FLOW_POOL_ACTIVE_UNPAUSED:
 		break;
 	default:
-		qdf_print("%s %d pool is INACTIVE State!!\n",
-				 __func__, __LINE__);
+		qdf_print("pool is INACTIVE State!!");
 		break;
 	};
 
@@ -712,8 +708,8 @@ struct ol_tx_desc_t *ol_tx_desc_ll(struct ol_txrx_pdev_t *pdev,
 			htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc,
 					 i - 1, frag_paddr, frag_len);
 #if defined(HELIUMPLUS_DEBUG)
-			qdf_print("%s:%d: htt_fdesc=%pK frag=%d frag_vaddr=0x%pK frag_paddr=0x%llx len=%zu\n",
-				  __func__, __LINE__, tx_desc->htt_frag_desc,
+			qdf_debug("htt_fdesc=%pK frag=%d frag_vaddr=0x%pK frag_paddr=0x%llx len=%zu\n",
+				  tx_desc->htt_frag_desc,
 				  i-1, frag_vaddr, frag_paddr, frag_len);
 			ol_txrx_dump_pkt(netbuf, frag_paddr, 64);
 #endif /* HELIUMPLUS_DEBUG */
@@ -855,8 +851,8 @@ void ol_tx_desc_frame_free_nonstd(struct ol_txrx_pdev_t *pdev,
 		 * table pointer needs to be reset.
 		 */
 #if defined(HELIUMPLUS_DEBUG)
-		qdf_print("%s %d: Frag Descriptor Reset [%d] to 0x%x\n",
-			  __func__, __LINE__, tx_desc->id,
+		qdf_print("Frag Descriptor Reset [%d] to 0x%x\n",
+			  tx_desc->id,
 			  frag_desc_paddr);
 #endif /* HELIUMPLUS_DEBUG */
 #endif /* HELIUMPLUS */

+ 5 - 7
core/dp/txrx/ol_tx_desc.h

@@ -359,8 +359,6 @@ void ol_tx_desc_dup_detect_init(struct ol_txrx_pdev_t *pdev, uint16_t pool_size)
 	uint16_t size = (pool_size >> DIV_BY_8) +
 		sizeof(*pdev->tx_desc.free_list_bitmap);
 	pdev->tx_desc.free_list_bitmap = qdf_mem_malloc(size);
-	if (!pdev->tx_desc.free_list_bitmap)
-		qdf_print("%s: malloc failed", __func__);
 }
 
 /**
@@ -397,8 +395,8 @@ void ol_tx_desc_dup_detect_set(struct ol_txrx_pdev_t *pdev,
 		return;
 
 	if (qdf_unlikely(msdu_id > pdev->tx_desc.pool_size)) {
-		qdf_print("%s: msdu_id %d > pool_size %d",
-			  __func__, msdu_id, pdev->tx_desc.pool_size);
+		qdf_print("msdu_id %d > pool_size %d",
+			  msdu_id, pdev->tx_desc.pool_size);
 		QDF_BUG(0);
 	}
 
@@ -406,7 +404,7 @@ void ol_tx_desc_dup_detect_set(struct ol_txrx_pdev_t *pdev,
 	if (qdf_unlikely(test)) {
 		uint16_t size = (pdev->tx_desc.pool_size >> DIV_BY_8) +
 			((pdev->tx_desc.pool_size & MOD_BY_8) ? 1 : 0);
-		qdf_print("duplicate msdu_id %d detected !!\n", msdu_id);
+		qdf_print("duplicate msdu_id %d detected!!", msdu_id);
 		qdf_trace_hex_dump(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
 		(void *)pdev->tx_desc.free_list_bitmap, size);
 		QDF_BUG(0);
@@ -431,8 +429,8 @@ void ol_tx_desc_dup_detect_reset(struct ol_txrx_pdev_t *pdev,
 		return;
 
 	if (qdf_unlikely(msdu_id > pdev->tx_desc.pool_size)) {
-		qdf_print("%s: msdu_id %d > pool_size %d",
-			  __func__, msdu_id, pdev->tx_desc.pool_size);
+		qdf_print("msdu_id %d > pool_size %d",
+			  msdu_id, pdev->tx_desc.pool_size);
 		QDF_BUG(0);
 	}
 

+ 1 - 3
core/dp/txrx/ol_tx_hl.c

@@ -1041,9 +1041,7 @@ void ol_txrx_update_tx_queue_groups(
 	struct ol_txrx_vdev_t *vdev;
 
 	if (group_id >= OL_TX_MAX_TXQ_GROUPS) {
-		ol_txrx_warn("%s: invalid group_id=%u, ignore update.\n",
-			__func__,
-			group_id);
+		ol_txrx_warn("invalid group_id=%u, ignore update", group_id);
 		return;
 	}
 

+ 2 - 2
core/dp/txrx/ol_tx_ll_fastpath.c

@@ -234,8 +234,8 @@ ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
 			htt_tx_desc_frag(pdev->htt_pdev, tx_desc->htt_frag_desc,
 					 i - 1, frag_paddr, frag_len);
 #if defined(HELIUMPLUS_DEBUG)
-			qdf_print("%s:%d: htt_fdesc=%pK frag=%d frag_paddr=0x%0llx len=%zu",
-				  __func__, __LINE__, tx_desc->htt_frag_desc,
+			qdf_debug("htt_fdesc=%pK frag=%d frag_paddr=0x%0llx len=%zu",
+				  tx_desc->htt_frag_desc,
 				  i - 1, frag_paddr, frag_len);
 			ol_txrx_dump_pkt(netbuf, frag_paddr, 64);
 #endif /* HELIUMPLUS_DEBUG */

+ 1 - 2
core/dp/txrx/ol_tx_queue.c

@@ -120,8 +120,7 @@ ol_tx_queue_vdev_flush(struct ol_txrx_pdev_t *pdev, struct ol_txrx_vdev_t *vdev)
 				if (txq->frms)
 					ol_tx_queue_free(pdev, txq, j, true);
 			}
-			ol_txrx_info(
-				   "%s: Delete Peer %pK\n", __func__, peer);
+			ol_txrx_info("Delete Peer %pK", peer);
 			ol_txrx_peer_release_ref(peers[i],
 						 PEER_DEBUG_ID_OL_TXQ_VDEV_FL);
 		}

+ 6 - 9
core/dp/txrx/ol_tx_send.c

@@ -897,7 +897,7 @@ void ol_tx_desc_update_group_credit(ol_txrx_pdev_handle pdev,
 	struct ol_tx_desc_t *tx_desc;
 
 	if (tx_desc_id >= pdev->tx_desc.pool_size) {
-		qdf_print("%s: Invalid desc id", __func__);
+		qdf_print("Invalid desc id");
 		return;
 	}
 
@@ -1036,8 +1036,7 @@ ol_tx_single_completion_handler(ol_txrx_pdev_handle pdev,
 
 	tx_desc = ol_tx_desc_find_check(pdev, tx_desc_id);
 	if (tx_desc == NULL) {
-		ol_txrx_err("%s: invalid desc_id(%u), ignore it.\n",
-			    __func__, tx_desc_id);
+		ol_txrx_err("invalid desc_id(%u), ignore it", tx_desc_id);
 		return;
 	}
 
@@ -1508,8 +1507,7 @@ void ol_register_packetdump_callback(tp_ol_packetdump_cb ol_tx_packetdump_cb,
 	ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 
 	if (!pdev) {
-		ol_txrx_err(
-				"%s: pdev is NULL", __func__);
+		ol_txrx_err("pdev is NULL");
 		return;
 	}
 
@@ -1533,8 +1531,7 @@ void ol_deregister_packetdump_callback(void)
 	ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 
 	if (!pdev) {
-		ol_txrx_err(
-				"%s: pdev is NULL", __func__);
+		ol_txrx_err("pdev is NULL");
 		return;
 	}
 
@@ -1548,7 +1545,7 @@ void ol_register_timestamp_callback(tp_ol_timestamp_cb ol_tx_timestamp_cb)
 	ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 
 	if (!pdev) {
-		ol_txrx_err("%s: pdev is NULL", __func__);
+		ol_txrx_err("pdev is NULL");
 		return;
 	}
 	pdev->ol_tx_timestamp_cb = ol_tx_timestamp_cb;
@@ -1559,7 +1556,7 @@ void ol_deregister_timestamp_callback(void)
 	ol_txrx_pdev_handle pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 
 	if (!pdev) {
-		ol_txrx_err("%s: pdev is NULL", __func__);
+		ol_txrx_err("pdev is NULL");
 		return;
 	}
 	pdev->ol_tx_timestamp_cb = NULL;

+ 1 - 3
core/dp/txrx/ol_tx_throttle.c

@@ -202,9 +202,7 @@ void ol_tx_throttle_set_level(struct cdp_pdev *ppdev, int level)
 	int ms = 0;
 
 	if (level >= THROTTLE_LEVEL_MAX) {
-		ol_txrx_dbg(
-			   "%s invalid throttle level set %d, ignoring\n",
-			   __func__, level);
+		ol_txrx_dbg("invalid throttle level set %d, ignoring", level);
 		return;
 	}
 

+ 51 - 80
core/dp/txrx/ol_txrx.c

@@ -129,8 +129,7 @@ static void ol_tx_mark_first_wakeup_packet(uint8_t value)
 	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 
 	if (!pdev) {
-		ol_txrx_err(
-			"%s: pdev is NULL\n", __func__);
+		ol_txrx_err("pdev is NULL");
 		return;
 	}
 
@@ -149,7 +148,7 @@ void ol_tx_set_is_mgmt_over_wmi_enabled(uint8_t value)
 	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 
 	if (!pdev) {
-		qdf_print("%s: pdev is NULL\n", __func__);
+		qdf_print("pdev is NULL");
 		return;
 	}
 	pdev->is_mgmt_over_wmi_enabled = value;
@@ -165,7 +164,7 @@ uint8_t ol_tx_get_is_mgmt_over_wmi_enabled(void)
 	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 
 	if (!pdev) {
-		qdf_print("%s: pdev is NULL\n", __func__);
+		qdf_print("pdev is NULL");
 		return 0;
 	}
 	return pdev->is_mgmt_over_wmi_enabled;
@@ -604,12 +603,9 @@ static QDF_STATUS ol_txrx_write_dpt_buff_debugfs(void *priv,
 	}
 
 	buf1 = (char *)qdf_mem_malloc(len);
-	if (!buf1) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			  "%s: qdf_mem_malloc failure",
-				__func__);
+	if (!buf1)
 		return QDF_STATUS_E_FAULT;
-	}
+
 	qdf_mem_copy(buf1, buf, len);
 	ret = ol_txrx_conv_str_to_int_debugfs(buf1, len, &proto_bitmap,
 					      &nr_records, &verbosity);
@@ -808,7 +804,7 @@ void htt_pkt_log_init(struct cdp_pdev *ppdev, void *scn)
 		pktlog_sethandle(&handle->pl_dev, scn);
 		pktlog_set_callback_regtype(PKTLOG_DEFAULT_CALLBACK_REGISTRATION);
 		if (pktlogmod_init(scn))
-			qdf_print("%s: pktlogmod_init failed", __func__);
+			qdf_print(" pktlogmod_init failed");
 		else
 			handle->pkt_log_init = true;
 	}
@@ -990,10 +986,9 @@ ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
 
 	/* link SW tx descs into a freelist */
 	pdev->tx_desc.num_free = desc_pool_size;
-	ol_txrx_dbg(
-		   "%s first tx_desc:0x%pK Last tx desc:0x%pK\n", __func__,
-		   (uint32_t *) pdev->tx_desc.freelist,
-		   (uint32_t *) (pdev->tx_desc.freelist + desc_pool_size));
+	ol_txrx_dbg("first tx_desc:0x%pK Last tx desc:0x%pK",
+		    (uint32_t *)pdev->tx_desc.freelist,
+		    (uint32_t *)(pdev->tx_desc.freelist + desc_pool_size));
 
 	/* check what format of frames are expected to be delivered by the OS */
 	pdev->frame_format = ol_cfg_frame_type(pdev->ctrl_pdev);
@@ -1507,8 +1502,7 @@ static void ol_txrx_pdev_detach(struct cdp_pdev *ppdev, int force)
 
 	/*checking to ensure txrx pdev structure is not NULL */
 	if (!pdev) {
-		ol_txrx_err(
-			   "NULL pdev passed to %s\n", __func__);
+		ol_txrx_err("pdev is NULL");
 		return;
 	}
 
@@ -1718,7 +1712,7 @@ static void ol_txrx_vdev_register(struct cdp_vdev *pvdev, void *osif_vdev,
 	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
 
 	if (qdf_unlikely(!vdev) || qdf_unlikely(!txrx_ops)) {
-		qdf_print("%s: vdev/txrx_ops is NULL!\n", __func__);
+		qdf_print("vdev/txrx_ops is NULL!");
 		qdf_assert(0);
 		return;
 	}
@@ -1888,8 +1882,8 @@ ol_txrx_vdev_detach(struct cdp_vdev *pvdev,
 	if (!TAILQ_EMPTY(&vdev->peer_list)) {
 		/* debug print - will be removed later */
 		ol_txrx_dbg(
-			   "%s: not deleting vdev object %pK (%02x:%02x:%02x:%02x:%02x:%02x) until deletion finishes for all its peers\n",
-			   __func__, vdev,
+			   "not deleting vdev object %pK (%02x:%02x:%02x:%02x:%02x:%02x) until deletion finishes for all its peers\n",
+			   vdev,
 			   vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
 			   vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
 			   vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
@@ -1904,8 +1898,8 @@ ol_txrx_vdev_detach(struct cdp_vdev *pvdev,
 	qdf_event_destroy(&vdev->wait_delete_comp);
 
 	ol_txrx_dbg(
-		   "%s: deleting vdev obj %pK (%02x:%02x:%02x:%02x:%02x:%02x)\n",
-		   __func__, vdev,
+		   "deleting vdev obj %pK (%02x:%02x:%02x:%02x:%02x:%02x)\n",
+		   vdev,
 		   vdev->mac_addr.raw[0], vdev->mac_addr.raw[1],
 		   vdev->mac_addr.raw[2], vdev->mac_addr.raw[3],
 		   vdev->mac_addr.raw[4], vdev->mac_addr.raw[5]);
@@ -2521,8 +2515,7 @@ ol_txrx_remove_peers_for_vdev(struct cdp_vdev *pvdev,
 	qdf_spin_unlock_bh(&vdev->pdev->peer_ref_mutex);
 
 	if (self_removed)
-		ol_txrx_info("%s: self peer removed by caller ",
-				   __func__);
+		ol_txrx_info("self peer removed by caller");
 
 	if (remove_last_peer) {
 		/* remove IBSS bss peer last */
@@ -2551,8 +2544,8 @@ ol_txrx_remove_peers_for_vdev_no_lock(struct cdp_vdev *pvdev,
 
 	TAILQ_FOREACH_SAFE(peer, &vdev->peer_list, peer_list_elem, tmp_peer) {
 		ol_txrx_info_high(
-			   "%s: peer found for vdev id %d. deleting the peer",
-			   __func__, vdev->vdev_id);
+			   "peer found for vdev id %d. deleting the peer",
+			   vdev->vdev_id);
 		callback(callback_context, (uint8_t *)&vdev->mac_addr,
 				vdev->vdev_id, peer);
 	}
@@ -2624,8 +2617,7 @@ QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *ppdev,
 						    PEER_DEBUG_ID_OL_INTERNAL);
 	if (NULL == peer) {
 		ol_txrx_err(
-			   "%s: peer is null for peer_mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
-			   __func__,
+			   "peer is null for peer_mac 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
 			   peer_mac[0], peer_mac[1], peer_mac[2], peer_mac[3],
 			   peer_mac[4], peer_mac[5]);
 		return QDF_STATUS_E_INVAL;
@@ -2635,9 +2627,7 @@ QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *ppdev,
 	/* avoid multiple auth state change. */
 	if (peer->state == state) {
 #ifdef TXRX_PRINT_VERBOSE_ENABLE
-		ol_txrx_dbg(
-			   "%s: no state change, returns directly\n",
-			   __func__);
+		ol_txrx_dbg("no state change, returns directly");
 #endif
 		peer_ref_cnt = ol_txrx_peer_release_ref
 						(peer,
@@ -2645,8 +2635,8 @@ QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *ppdev,
 		return QDF_STATUS_SUCCESS;
 	}
 
-	ol_txrx_dbg("%s: change from %d to %d\n",
-		   __func__, peer->state, state);
+	ol_txrx_dbg("change from %d to %d",
+		    peer->state, state);
 
 	peer->tx_filter = (state == OL_TXRX_PEER_STATE_AUTH)
 		? ol_tx_filter_pass_thru
@@ -2661,9 +2651,7 @@ QDF_STATUS ol_txrx_peer_state_update(struct cdp_pdev *ppdev,
 			 * Pause all regular (non-extended) TID tx queues until
 			 * data arrives and ADDBA negotiation has completed.
 			 */
-			ol_txrx_dbg(
-				   "%s: pause peer and unpause mgmt/non-qos\n",
-				   __func__);
+			ol_txrx_dbg("pause peer and unpause mgmt/non-qos");
 			ol_txrx_peer_pause(peer); /* pause all tx queues */
 			/* unpause mgmt and non-QoS tx queues */
 			for (tid = OL_TX_NUM_QOS_TIDS;
@@ -2703,8 +2691,7 @@ ol_txrx_peer_update(ol_txrx_vdev_handle vdev,
 	peer = ol_txrx_peer_find_hash_find_get_ref(vdev->pdev, peer_mac, 0, 1,
 						   PEER_DEBUG_ID_OL_INTERNAL);
 	if (!peer) {
-		ol_txrx_dbg("%s: peer is null",
-			   __func__);
+		ol_txrx_dbg("peer is null");
 		return;
 	}
 
@@ -2828,9 +2815,7 @@ static inline void ol_txrx_peer_free_tids(ol_txrx_peer_handle peer)
 	for (i = 0; i < OL_TXRX_NUM_EXT_TIDS; i++) {
 		if (peer->tids_rx_reorder[i].array !=
 		    &peer->tids_rx_reorder[i].base) {
-			ol_txrx_dbg(
-				   "%s, delete reorder arr, tid:%d\n",
-				   __func__, i);
+			ol_txrx_dbg("delete reorder arr, tid:%d", i);
 			qdf_mem_free(peer->tids_rx_reorder[i].array);
 			ol_rx_reorder_init(&peer->tids_rx_reorder[i],
 					   (uint8_t)i);
@@ -2997,8 +2982,8 @@ int ol_txrx_peer_release_ref(ol_txrx_peer_handle peer,
 				 */
 				ol_txrx_tx_desc_reset_vdev(vdev);
 				ol_txrx_dbg(
-					"%s: deleting vdev object %pK (%02x:%02x:%02x:%02x:%02x:%02x) - its last peer is done",
-					__func__, vdev,
+					"deleting vdev object %pK (%02x:%02x:%02x:%02x:%02x:%02x) - its last peer is done",
+					vdev,
 					vdev->mac_addr.raw[0],
 					vdev->mac_addr.raw[1],
 					vdev->mac_addr.raw[2],
@@ -3245,8 +3230,8 @@ static void ol_txrx_peer_detach_force_delete(void *ppeer)
 	ol_txrx_peer_handle peer = ppeer;
 	ol_txrx_pdev_handle pdev = peer->vdev->pdev;
 
-	ol_txrx_info_high("%s peer %pK, peer->ref_cnt %d",
-		__func__, peer, qdf_atomic_read(&peer->ref_cnt));
+	ol_txrx_info_high("peer %pK, peer->ref_cnt %d",
+			  peer, qdf_atomic_read(&peer->ref_cnt));
 
 	/* Clear the peer_id_to_obj map entries */
 	ol_txrx_peer_remove_obj_map_entries(pdev, peer);
@@ -3294,16 +3279,14 @@ static QDF_STATUS ol_txrx_wait_for_pending_tx(int timeout)
 	struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 
 	if (txrx_pdev == NULL) {
-		ol_txrx_err(
-			   "%s: txrx context is null", __func__);
+		ol_txrx_err("txrx context is null");
 		return QDF_STATUS_E_FAULT;
 	}
 
 	while (ol_txrx_get_tx_pending((struct cdp_pdev *)txrx_pdev)) {
 		qdf_sleep(OL_ATH_TX_DRAIN_WAIT_DELAY);
 		if (timeout <= 0) {
-			ol_txrx_err(
-				   "%s: tx frames are pending", __func__);
+			ol_txrx_err("tx frames are pending");
 			ol_txrx_dump_tx_desc(txrx_pdev);
 			return QDF_STATUS_E_TIMEOUT;
 		}
@@ -3456,15 +3439,14 @@ int ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t *pdev,
 	int i;
 
 	if (!pdev) {
-		ol_txrx_err("%s: pdev is NULL", __func__);
+		ol_txrx_err("pdev is NULL");
 		return -EINVAL;
 	}
 	pdev->ol_txrx_fw_stats_desc_pool.pool = qdf_mem_malloc(pool_size *
 		sizeof(struct ol_txrx_fw_stats_desc_elem_t));
-	if (!pdev->ol_txrx_fw_stats_desc_pool.pool) {
-		ol_txrx_err("%s: failed to allocate desc pool", __func__);
+	if (!pdev->ol_txrx_fw_stats_desc_pool.pool)
 		return -ENOMEM;
-	}
+
 	pdev->ol_txrx_fw_stats_desc_pool.freelist =
 		&pdev->ol_txrx_fw_stats_desc_pool.pool[0];
 	pdev->ol_txrx_fw_stats_desc_pool.pool_size = pool_size;
@@ -3494,15 +3476,15 @@ int ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t *pdev,
 void ol_txrx_fw_stats_desc_pool_deinit(struct ol_txrx_pdev_t *pdev)
 {
 	if (!pdev) {
-		ol_txrx_err("%s: pdev is NULL", __func__);
+		ol_txrx_err("pdev is NULL");
 		return;
 	}
 	if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
-		ol_txrx_err("%s: Pool is not initialized", __func__);
+		ol_txrx_err("Pool is not initialized");
 		return;
 	}
 	if (!pdev->ol_txrx_fw_stats_desc_pool.pool) {
-		ol_txrx_err("%s: Pool is not allocated", __func__);
+		ol_txrx_err("Pool is not allocated");
 		return;
 	}
 	qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
@@ -3531,7 +3513,7 @@ struct ol_txrx_fw_stats_desc_t
 	if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
 		qdf_spin_unlock_bh(&pdev->
 				   ol_txrx_fw_stats_desc_pool.pool_lock);
-		ol_txrx_err("%s: Pool deinitialized", __func__);
+		ol_txrx_err("Pool deinitialized");
 		return NULL;
 	}
 	if (pdev->ol_txrx_fw_stats_desc_pool.freelist) {
@@ -3542,10 +3524,9 @@ struct ol_txrx_fw_stats_desc_t
 	qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
 
 	if (desc)
-		ol_txrx_dbg("%s: desc_id %d allocated",
-			    __func__, desc->desc_id);
+		ol_txrx_dbg("desc_id %d allocated", desc->desc_id);
 	else
-		ol_txrx_err("%s: fw stats descriptors are exhausted", __func__);
+		ol_txrx_err("fw stats descriptors are exhausted");
 
 	return desc;
 }
@@ -3569,8 +3550,7 @@ struct ol_txrx_stats_req_internal
 	if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
 		qdf_spin_unlock_bh(&pdev->
 				   ol_txrx_fw_stats_desc_pool.pool_lock);
-		ol_txrx_err("%s: Desc ID %u Pool deinitialized",
-			    __func__, desc_id);
+		ol_txrx_err("Desc ID %u Pool deinitialized", desc_id);
 		return NULL;
 	}
 	desc_elem = &pdev->ol_txrx_fw_stats_desc_pool.pool[desc_id];
@@ -3679,7 +3659,7 @@ ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
 	int found = 0;
 
 	if (cookie >= FW_STATS_DESC_POOL_SIZE) {
-		ol_txrx_err("%s: Cookie is not valid", __func__);
+		ol_txrx_err("Cookie is not valid");
 		return;
 	}
 	req = ol_txrx_fw_stats_desc_get_req(pdev, (uint8_t)cookie);
@@ -4510,7 +4490,7 @@ static void ol_rx_data_cb(struct ol_txrx_pdev_t *pdev,
 
 free_buf:
 	drop_count = ol_txrx_drop_nbuf_list(buf_list);
-	ol_txrx_warn("%s:Dropped frames %u", __func__, drop_count);
+	ol_txrx_warn("Dropped frames %u", drop_count);
 }
 
 /* print for every 16th packet */
@@ -4562,8 +4542,6 @@ static QDF_STATUS ol_txrx_enqueue_rx_frames(
 		next_buf = qdf_nbuf_queue_next(buf);
 		cache_buf = qdf_mem_malloc(sizeof(*cache_buf));
 		if (!cache_buf) {
-			ol_txrx_err(
-				"Failed to allocate buf to cache the rx frames");
 			qdf_nbuf_free(buf);
 		} else {
 			/* Add NULL terminator */
@@ -4733,16 +4711,14 @@ static QDF_STATUS ol_txrx_register_ocb_peer(uint8_t *mac_addr,
 
 	pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 	if (!pdev) {
-		ol_txrx_err("%s: Unable to find pdev!",
-			   __func__);
+		ol_txrx_err("Unable to find pdev!");
 		return QDF_STATUS_E_FAILURE;
 	}
 
 	peer = ol_txrx_find_peer_by_addr((struct cdp_pdev *)pdev,
 					 mac_addr, peer_id);
 	if (!peer) {
-		ol_txrx_err("%s: Unable to find OCB peer!",
-			   __func__);
+		ol_txrx_err("Unable to find OCB peer!");
 		return QDF_STATUS_E_FAILURE;
 	}
 
@@ -4956,7 +4932,7 @@ static QDF_STATUS ol_register_data_stall_detect_cb(
 	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 
 	if (pdev == NULL) {
-		ol_txrx_err("%s: pdev NULL!", __func__);
+		ol_txrx_err("pdev NULL!");
 		return QDF_STATUS_E_INVAL;
 	}
 	pdev->data_stall_detect_callback = data_stall_detect_callback;
@@ -4976,7 +4952,7 @@ static QDF_STATUS ol_deregister_data_stall_detect_cb(
 	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 
 	if (pdev == NULL) {
-		ol_txrx_err("%s: pdev NULL!", __func__);
+		ol_txrx_err("pdev NULL!");
 		return QDF_STATUS_E_INVAL;
 	}
 	pdev->data_stall_detect_callback = NULL;
@@ -5011,11 +4987,9 @@ static void ol_txrx_post_data_stall_event(
 		return;
 	}
 	data_stall_info = qdf_mem_malloc(sizeof(*data_stall_info));
-	if (!data_stall_info) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			"%s: data_stall_info is NULL.", __func__);
+	if (!data_stall_info)
 		return;
-	}
+
 	data_stall_info->indicator = indicator;
 	data_stall_info->data_stall_type = data_stall_type;
 	data_stall_info->vdev_id_bitmap = vdev_id_bitmap;
@@ -5046,7 +5020,7 @@ static void ol_txrx_post_data_stall_event(
 void
 ol_txrx_dump_pkt(qdf_nbuf_t nbuf, uint32_t nbuf_paddr, int len)
 {
-	qdf_print("%s: Pkt: VA 0x%pK PA 0x%llx len %d\n", __func__,
+	qdf_print(" Pkt: VA 0x%pK PA 0x%llx len %d\n",
 		  qdf_nbuf_data(nbuf), (unsigned long long int)nbuf_paddr, len);
 	print_hex_dump(KERN_DEBUG, "Pkt:   ", DUMP_PREFIX_ADDRESS, 16, 4,
 		       qdf_nbuf_data(nbuf), len, true);
@@ -5664,11 +5638,8 @@ struct cdp_soc_t *ol_txrx_soc_attach(void *scn_handle,
 {
 	struct cdp_soc_t *soc = qdf_mem_malloc(sizeof(struct cdp_soc_t));
 
-	if (!soc) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			"%s: OL SOC memory allocation failed\n", __func__);
+	if (!soc)
 		return NULL;
-	}
 
 	soc->ops = &ol_txrx_ops;
 	return soc;

+ 2 - 4
core/dp/txrx/ol_txrx_event.c

@@ -194,11 +194,9 @@ A_STATUS wdi_event_attach(struct ol_txrx_pdev_t *txrx_pdev)
 				    qdf_mem_malloc(
 					    sizeof(wdi_event_subscribe *) *
 					    WDI_NUM_EVENTS);
-	if (!txrx_pdev->wdi_event_list) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			  "Insufficient memory for the WDI event lists\n");
+	if (!txrx_pdev->wdi_event_list)
 		return A_NO_MEMORY;
-	}
+
 	return A_OK;
 }
 

+ 35 - 64
core/dp/txrx/ol_txrx_flow_control.c

@@ -135,7 +135,7 @@ void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
 	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 
 	if (!pdev) {
-		qdf_print("%s: pdev is NULL\n", __func__);
+		qdf_print("pdev is NULL");
 		return;
 	}
 	pdev->num_msdu_desc = num_msdu_desc;
@@ -235,14 +235,12 @@ static int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool, bool force)
 	struct ol_tx_desc_t *tx_desc = NULL;
 
 	if (!pool) {
-		ol_txrx_err(
-		   "%s: pool is NULL\n", __func__);
+		ol_txrx_err("pool is NULL");
 		QDF_ASSERT(0);
 		return -ENOMEM;
 	}
 	if (!pdev) {
-		ol_txrx_err(
-		   "%s: pdev is NULL\n", __func__);
+		ol_txrx_err("pdev is NULL");
 		QDF_ASSERT(0);
 		return -ENOMEM;
 	}
@@ -271,9 +269,8 @@ static int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool, bool force)
 		ol_tx_inc_pool_ref(pool);
 
 		pdev->tx_desc.num_invalid_bin++;
-		ol_txrx_info(
-			"%s: invalid pool created %d\n",
-			 __func__, pdev->tx_desc.num_invalid_bin);
+		ol_txrx_info("invalid pool created %d",
+			     pdev->tx_desc.num_invalid_bin);
 		if (pdev->tx_desc.num_invalid_bin > MAX_INVALID_BIN)
 			ASSERT(0);
 
@@ -451,8 +448,7 @@ void ol_tx_clear_flow_pool_stats(void)
 	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 
 	if (!pdev) {
-		ol_txrx_err("%s: pdev is null\n",
-						 __func__);
+		ol_txrx_err("pdev is null");
 		return;
 	}
 	qdf_mem_zero(&pdev->pool_stats, sizeof(pdev->pool_stats));
@@ -529,8 +525,7 @@ ol_tx_distribute_descs_to_deficient_pools(struct ol_tx_flow_pool_t *src_pool)
 	uint16_t desc_move_count = 0;
 
 	if (!pdev) {
-		ol_txrx_err(
-		   "%s: pdev is NULL\n", __func__);
+		ol_txrx_err("pdev is NULL");
 		return -EINVAL;
 	}
 	qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
@@ -590,19 +585,16 @@ struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
 	uint32_t start_threshold;
 
 	if (!pdev) {
-		ol_txrx_err(
-		   "%s: pdev is NULL\n", __func__);
+		ol_txrx_err("pdev is NULL");
 		return NULL;
 	}
 	stop_threshold = ol_cfg_get_tx_flow_stop_queue_th(pdev->ctrl_pdev);
 	start_threshold = stop_threshold +
 		ol_cfg_get_tx_flow_start_queue_offset(pdev->ctrl_pdev);
 	pool = qdf_mem_malloc(sizeof(*pool));
-	if (!pool) {
-		ol_txrx_err(
-		   "%s: malloc failed\n", __func__);
+	if (!pool)
 		return NULL;
-	}
+
 	pool->flow_pool_id = flow_pool_id;
 	pool->flow_pool_size = flow_pool_size;
 	pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
@@ -663,8 +655,7 @@ int ol_tx_free_invalid_flow_pool(struct ol_tx_flow_pool_t *pool)
 	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 
 	if ((!pdev) || (!pool) || (pool->status != FLOW_POOL_INVALID)) {
-		ol_txrx_err(
-		   "%s: Invalid pool/pdev\n", __func__);
+		ol_txrx_err("Invalid pool/pdev");
 		return -EINVAL;
 	}
 
@@ -676,9 +667,8 @@ int ol_tx_free_invalid_flow_pool(struct ol_tx_flow_pool_t *pool)
 	qdf_spin_unlock_bh(&pool->flow_pool_lock);
 
 	pdev->tx_desc.num_invalid_bin--;
-	ol_txrx_info(
-		"%s: invalid pool deleted %d\n",
-		 __func__, pdev->tx_desc.num_invalid_bin);
+	ol_txrx_info("invalid pool deleted %d",
+		     pdev->tx_desc.num_invalid_bin);
 
 	return ol_tx_dec_pool_ref(pool, false);
 }
@@ -734,9 +724,7 @@ static void ol_tx_flow_pool_vdev_map(struct ol_tx_flow_pool_t *pool,
 
 	vdev = (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
 	if (!vdev) {
-		ol_txrx_err(
-		   "%s: invalid vdev_id %d\n",
-		   __func__, vdev_id);
+		ol_txrx_err("invalid vdev_id %d", vdev_id);
 		return;
 	}
 
@@ -760,9 +748,7 @@ static void ol_tx_flow_pool_vdev_unmap(struct ol_tx_flow_pool_t *pool,
 
 	vdev = (struct ol_txrx_vdev_t *)ol_txrx_get_vdev_from_vdev_id(vdev_id);
 	if (!vdev) {
-		ol_txrx_err(
-		   "%s: invalid vdev_id %d\n",
-		   __func__, vdev_id);
+		ol_txrx_err("invalid vdev_id %d", vdev_id);
 		return;
 	}
 
@@ -792,13 +778,11 @@ void ol_tx_flow_pool_map_handler(uint8_t flow_id, uint8_t flow_type,
 	uint8_t pool_create = 0;
 	enum htt_flow_type type = flow_type;
 
-	ol_txrx_dbg(
-		"%s: flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d\n",
-		__func__, flow_id, flow_type, flow_pool_id, flow_pool_size);
+	ol_txrx_dbg("flow_id %d flow_type %d flow_pool_id %d flow_pool_size %d",
+		    flow_id, flow_type, flow_pool_id, flow_pool_size);
 
 	if (qdf_unlikely(!pdev)) {
-		ol_txrx_err(
-			"%s: pdev is NULL", __func__);
+		ol_txrx_err("pdev is NULL");
 		return;
 	}
 	pdev->pool_stats.pool_map_count++;
@@ -807,9 +791,8 @@ void ol_tx_flow_pool_map_handler(uint8_t flow_id, uint8_t flow_type,
 	if (!pool) {
 		pool = ol_tx_create_flow_pool(flow_pool_id, flow_pool_size);
 		if (pool == NULL) {
-			ol_txrx_err(
-				   "%s: creation of flow_pool %d size %d failed\n",
-				   __func__, flow_pool_id, flow_pool_size);
+			ol_txrx_err("creation of flow_pool %d size %d failed",
+				    flow_pool_id, flow_pool_size);
 			return;
 		}
 		pool_create = 1;
@@ -826,9 +809,7 @@ void ol_tx_flow_pool_map_handler(uint8_t flow_id, uint8_t flow_type,
 	default:
 		if (pool_create)
 			ol_tx_dec_pool_ref(pool, false);
-		ol_txrx_err(
-		   "%s: flow type %d not supported !!!\n",
-		   __func__, type);
+		ol_txrx_err("flow type %d not supported", type);
 		break;
 	}
 }
@@ -851,22 +832,18 @@ void ol_tx_flow_pool_unmap_handler(uint8_t flow_id, uint8_t flow_type,
 	struct ol_tx_flow_pool_t *pool;
 	enum htt_flow_type type = flow_type;
 
-	ol_txrx_dbg(
-		"%s: flow_id %d flow_type %d flow_pool_id %d\n",
-		__func__, flow_id, flow_type, flow_pool_id);
+	ol_txrx_dbg("flow_id %d flow_type %d flow_pool_id %d",
+		    flow_id, flow_type, flow_pool_id);
 
 	if (qdf_unlikely(!pdev)) {
-		ol_txrx_err(
-			"%s: pdev is NULL", __func__);
+		ol_txrx_err("pdev is NULL");
 		return;
 	}
 	pdev->pool_stats.pool_unmap_count++;
 
 	pool = ol_tx_get_flow_pool(flow_pool_id);
 	if (!pool) {
-		ol_txrx_info(
-		   "%s: flow_pool not available flow_pool_id %d\n",
-		   __func__, type);
+		ol_txrx_info("flow_pool not available flow_pool_id %d", type);
 		return;
 	}
 
@@ -876,9 +853,7 @@ void ol_tx_flow_pool_unmap_handler(uint8_t flow_id, uint8_t flow_type,
 		ol_tx_flow_pool_vdev_unmap(pool, flow_id);
 		break;
 	default:
-		ol_txrx_info(
-		   "%s: flow type %d not supported !!!\n",
-		   __func__, type);
+		ol_txrx_info("flow type %d not supported", type);
 		return;
 	}
 
@@ -911,8 +886,7 @@ int ol_tx_distribute_descs_to_deficient_pools_from_global_pool(void)
 	uint8_t free_invalid_pool = 0;
 
 	if (!pdev) {
-		ol_txrx_err(
-		   "%s: pdev is NULL\n", __func__);
+		ol_txrx_err("pdev is NULL");
 		return -EINVAL;
 	}
 
@@ -1206,20 +1180,19 @@ void ol_tx_flow_pool_resize_handler(uint8_t flow_pool_id,
 	struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
 	struct ol_tx_flow_pool_t *pool;
 
-	ol_txrx_dbg("%s: flow_pool_id %d flow_pool_size %d\n",
-		    __func__, flow_pool_id, flow_pool_size);
+	ol_txrx_dbg("flow_pool_id %d flow_pool_size %d",
+		    flow_pool_id, flow_pool_size);
 
 	if (qdf_unlikely(!pdev)) {
-		ol_txrx_err(
-			"%s: pdev is NULL", __func__);
+		ol_txrx_err("pdev is NULL");
 		return;
 	}
 	pdev->pool_stats.pool_resize_count++;
 
 	pool = ol_tx_get_flow_pool(flow_pool_id);
 	if (!pool) {
-		ol_txrx_err("%s: resize for flow_pool %d size %d failed\n",
-			    __func__, flow_pool_id, flow_pool_size);
+		ol_txrx_err("resize for flow_pool %d size %d failed",
+			    flow_pool_id, flow_pool_size);
 		return;
 	}
 
@@ -1250,9 +1223,7 @@ ol_txrx_map_to_netif_reason_type(uint32_t reason)
 	case OL_TXQ_PAUSE_REASON_THERMAL_MITIGATION:
 		return WLAN_THERMAL_MITIGATION;
 	default:
-		ol_txrx_err(
-			   "%s: reason not supported %d\n",
-			   __func__, reason);
+		ol_txrx_err("reason not supported %d", reason);
 		return WLAN_REASON_TYPE_MAX;
 	}
 }
@@ -1271,7 +1242,7 @@ void ol_txrx_vdev_pause(struct cdp_vdev *pvdev, uint32_t reason)
 	enum netif_reason_type netif_reason;
 
 	if (qdf_unlikely((!pdev) || (!pdev->pause_cb))) {
-		ol_txrx_err("%s: invalid pdev\n", __func__);
+		ol_txrx_err("invalid pdev");
 		return;
 	}
 
@@ -1296,7 +1267,7 @@ void ol_txrx_vdev_unpause(struct cdp_vdev *pvdev, uint32_t reason)
 	enum netif_reason_type netif_reason;
 
 	if (qdf_unlikely((!pdev) || (!pdev->pause_cb))) {
-		ol_txrx_err("%s: invalid pdev\n", __func__);
+		ol_txrx_err("invalid pdev");
 		return;
 	}
 

+ 4 - 14
core/dp/txrx/ol_txrx_ipa.c

@@ -1131,11 +1131,9 @@ static QDF_STATUS ol_txrx_ipa_remove_header(char *name)
 		  hdrlookup.hdl);
 	len = sizeof(qdf_ipa_ioc_del_hdr_t) + sizeof(qdf_ipa_hdr_del_t) * 1;
 	ipa_hdr = (qdf_ipa_ioc_del_hdr_t *)qdf_mem_malloc(len);
-	if (ipa_hdr == NULL) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			  "ipa_hdr allocation failed");
+	if (!ipa_hdr)
 		return QDF_STATUS_E_FAILURE;
-	}
+
 	QDF_IPA_IOC_DEL_HDR_NUM_HDRS(ipa_hdr) = 1;
 	QDF_IPA_IOC_DEL_HDR_COMMIT(ipa_hdr) = 0;
 	QDF_IPA_IOC_DEL_HDR_HDL(ipa_hdr) = QDF_IPA_IOC_GET_HDR_HDL(&hdrlookup);
@@ -1174,8 +1172,6 @@ static int ol_txrx_ipa_add_header_info(char *ifname, uint8_t *mac_addr,
 	ipa_hdr = qdf_mem_malloc(sizeof(qdf_ipa_ioc_add_hdr_t)
 				 + sizeof(qdf_ipa_hdr_add_t));
 	if (!ipa_hdr) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			    "%s: ipa_hdr allocation failed", ifname);
 		ret = -ENOMEM;
 		goto end;
 	}
@@ -1283,20 +1279,14 @@ static int ol_txrx_ipa_register_interface(char *ifname,
 	/* Allocate TX properties for TOS categories, 1 each for IPv4 & IPv6 */
 	tx_prop =
 		qdf_mem_malloc(sizeof(qdf_ipa_ioc_tx_intf_prop_t) * num_prop);
-	if (!tx_prop) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			  "tx_prop allocation failed");
+	if (!tx_prop)
 		goto register_interface_fail;
-	}
 
 	/* Allocate RX properties, 1 each for IPv4 & IPv6 */
 	rx_prop =
 		qdf_mem_malloc(sizeof(qdf_ipa_ioc_rx_intf_prop_t) * num_prop);
-	if (!rx_prop) {
-		QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
-			  "rx_prop allocation failed");
+	if (!rx_prop)
 		goto register_interface_fail;
-	}
 
 	qdf_mem_zero(&tx_intf, sizeof(tx_intf));
 	qdf_mem_zero(&rx_intf, sizeof(rx_intf));

+ 6 - 10
core/dp/txrx/ol_txrx_peer_find.c

@@ -388,9 +388,8 @@ static inline void ol_txrx_peer_find_add_id(struct ol_txrx_pdev_t *pdev,
 		 * If the peer ID is for a vdev, then we will fail to find a
 		 * peer with a matching MAC address.
 		 */
-		ol_txrx_err(
-			  "%s: peer not found or peer ID is %d invalid",
-			  __func__, peer_id);
+		ol_txrx_err("peer not found or peer ID is %d invalid",
+			    peer_id);
 		wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
 				    DEBUG_PEER_MAP_EVENT,
 				    peer_id, peer_mac_addr,
@@ -572,7 +571,7 @@ void ol_rx_peer_unmap_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
 
 	if (peer_id == HTT_INVALID_PEER) {
 		ol_txrx_err(
-		   "%s: invalid peer ID %d\n", __func__, peer_id);
+		   "invalid peer ID %d\n", peer_id);
 		wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
 				    DEBUG_PEER_UNMAP_EVENT,
 				    peer_id, NULL, NULL, 0, 0x100);
@@ -592,9 +591,8 @@ void ol_rx_peer_unmap_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
 		wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
 				    DEBUG_PEER_UNMAP_EVENT,
 				    peer_id, NULL, NULL, ref_cnt, 0x101);
-		ol_txrx_dbg(
-			   "%s: peer already deleted, peer_id %d del_peer_id_ref_cnt %d",
-			   __func__, peer_id, ref_cnt);
+		ol_txrx_dbg("peer already deleted, peer_id %d del_peer_id_ref_cnt %d",
+			    peer_id, ref_cnt);
 		return;
 	}
 	peer = pdev->peer_id_to_obj_map[peer_id].peer;
@@ -606,9 +604,7 @@ void ol_rx_peer_unmap_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id)
 		 * in peer_id_to_obj_map will be NULL.
 		 */
 		qdf_spin_unlock_bh(&pdev->peer_map_unmap_lock);
-		ol_txrx_info(
-			   "%s: peer not found for peer_id %d",
-			   __func__, peer_id);
+		ol_txrx_info("peer not found for peer_id %d", peer_id);
 		wlan_roam_debug_log(DEBUG_INVALID_VDEV_ID,
 				    DEBUG_PEER_UNMAP_EVENT,
 				    peer_id, NULL, NULL, 0, 0x102);

+ 0 - 1
core/dp/txrx3.0/dp_rx_thread.c

@@ -475,7 +475,6 @@ QDF_STATUS dp_rx_tm_init(struct dp_rx_tm_handle *rx_tm_hdl,
 		if (qdf_unlikely(!rx_tm_hdl->rx_thread[i])) {
 			QDF_ASSERT(0);
 			qdf_status = QDF_STATUS_E_NOMEM;
-			dp_err("failed to allocate memory for dp_rx_thread");
 			goto ret;
 		}
 		rx_tm_hdl->rx_thread[i]->rtm_handle_cmn =

+ 0 - 1
core/dp/txrx3.0/dp_txrx.c

@@ -28,7 +28,6 @@ QDF_STATUS dp_txrx_init(ol_txrx_soc_handle soc, struct cdp_pdev *pdev,
 
 	dp_ext_hdl = qdf_mem_malloc(sizeof(*dp_ext_hdl));
 	if (!dp_ext_hdl) {
-		dp_err("failed to alloc dp_txrx_handle");
 		QDF_ASSERT(0);
 		return QDF_STATUS_E_NOMEM;
 	}