Browse Source

qcacld-3.0: TXRX: Reduce the log spam in kmsg

Move the logs to appropriate log levels to reduce
the log spam in kmsg.

Change-Id: Ic22302a2f042f693d4bcc1f0efe647d580be33ed
CRs-Fixed: 2014745
Srinivas Girigowda 8 years ago
parent
commit
b8ecec2fb3
3 changed files with 27 additions and 26 deletions
  1. 2 1
      core/dp/txrx/ol_tx_desc.h
  2. 12 12
      core/dp/txrx/ol_txrx.c
  3. 13 13
      core/dp/txrx/ol_txrx_flow_control.c

+ 2 - 1
core/dp/txrx/ol_tx_desc.h

@@ -353,7 +353,8 @@ void ol_tx_desc_dup_detect_init(struct ol_txrx_pdev_t *pdev, uint16_t pool_size)
 static inline
 void ol_tx_desc_dup_detect_deinit(struct ol_txrx_pdev_t *pdev)
 {
-	qdf_print("%s: pool_size %d num_free %d\n", __func__,
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
+		  "%s: pool_size %d num_free %d\n", __func__,
 		pdev->tx_desc.pool_size, pdev->tx_desc.num_free);
 	if (pdev->tx_desc.free_list_bitmap)
 		qdf_mem_free(pdev->tx_desc.free_list_bitmap);

+ 12 - 12
core/dp/txrx/ol_txrx.c

@@ -620,7 +620,7 @@ static void ol_tx_set_desc_global_pool_size(uint32_t num_msdu_desc)
 	pdev->num_msdu_desc = num_msdu_desc;
 	if (!ol_tx_get_is_mgmt_over_wmi_enabled())
 		pdev->num_msdu_desc += TX_FLOW_MGMT_POOL_SIZE;
-	TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Global pool size: %d\n",
+	TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2, "Global pool size: %d\n",
 		pdev->num_msdu_desc);
 	return;
 }
@@ -1311,7 +1311,7 @@ ol_txrx_pdev_post_attach(struct cdp_pdev *ppdev)
 		desc_per_page = desc_per_page >> 1;
 	}
 	pdev->tx_desc.page_divider = (sig_bit - 1);
-	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+	QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
 		"page_divider 0x%x, offset_filter 0x%x num elem %d, ol desc num page %d, ol desc per page %d",
 		pdev->tx_desc.page_divider, pdev->tx_desc.offset_filter,
 		desc_pool_size, pdev->tx_desc.desc_pages.num_pages,
@@ -2349,7 +2349,7 @@ ol_txrx_peer_attach(struct cdp_vdev *pvdev, uint8_t *peer_mac_addr)
 	TAILQ_FOREACH(temp_peer, &vdev->peer_list, peer_list_elem) {
 		if (!ol_txrx_peer_find_mac_addr_cmp(&temp_peer->mac_addr,
 			(union ol_txrx_align_mac_addr_t *)peer_mac_addr)) {
-			TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+			TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
 				"vdev_id %d (%02x:%02x:%02x:%02x:%02x:%02x) already exsist.\n",
 				vdev->vdev_id,
 				peer_mac_addr[0], peer_mac_addr[1],
@@ -2737,7 +2737,7 @@ ol_txrx_remove_peers_for_vdev(struct cdp_vdev *pvdev,
 		}
 		/* self peer is deleted last */
 		if (peer == TAILQ_FIRST(&vdev->peer_list)) {
-			TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+			TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
 				   "%s: self peer removed by caller ",
 				   __func__);
 			break;
@@ -2772,7 +2772,7 @@ ol_txrx_remove_peers_for_vdev_no_lock(struct cdp_vdev *pvdev,
 	ol_txrx_peer_handle peer = NULL;
 
 	TAILQ_FOREACH(peer, &vdev->peer_list, peer_list_elem) {
-		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+		TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
 			   "%s: peer found for vdev id %d. deleting the peer",
 			   __func__, vdev->vdev_id);
 		callback(callback_context, (uint8_t *)&vdev->mac_addr,
@@ -3314,7 +3314,7 @@ static void ol_txrx_peer_detach_force_delete(void *ppeer)
 	ol_txrx_peer_handle peer = ppeer;
 	ol_txrx_pdev_handle pdev = peer->vdev->pdev;
 
-	TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "%s peer %p, peer->ref_cnt %d",
+	TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1, "%s peer %p, peer->ref_cnt %d",
 		__func__, peer, qdf_atomic_read(&peer->ref_cnt));
 
 	/* Clear the peer_id_to_obj map entries */
@@ -3365,7 +3365,7 @@ static void ol_txrx_dump_tx_desc(ol_txrx_pdev_handle pdev_handle)
 
 	num_free = ol_tx_get_total_free_desc(pdev);
 
-	TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+	TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
 		   "total tx credit %d num_free %d",
 		   total, num_free);
 
@@ -4611,7 +4611,7 @@ void ol_rx_data_process(struct ol_txrx_peer_t *peer,
 	if (!data_rx) {
 		struct ol_rx_cached_buf *cache_buf;
 
-		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+		TXRX_PRINT(TXRX_PRINT_LEVEL_WARN,
 			   "Data on the peer before it is registered!!!");
 		buf = rx_buf_list;
 		while (buf) {
@@ -4650,7 +4650,7 @@ void ol_rx_data_process(struct ol_txrx_peer_t *peer,
 
 			pkt = cds_alloc_ol_rx_pkt(sched_ctx);
 			if (!pkt) {
-				TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+				TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
 					   "No available Rx message buffer");
 				goto drop_rx_buf;
 			}
@@ -4669,7 +4669,7 @@ void ol_rx_data_process(struct ol_txrx_peer_t *peer,
 	return;
 
 drop_rx_buf:
-	TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Dropping rx packets");
+	TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1, "Dropping rx packets");
 	buf = rx_buf_list;
 	while (buf) {
 		next_buf = qdf_nbuf_queue_next(buf);
@@ -4928,7 +4928,7 @@ static void ol_register_lro_flush_cb(void (lro_flush_cb)(void *),
 		goto out;
 	}
 	if (pdev->lro_info.lro_flush_cb != NULL) {
-		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+		TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
 			   "%s: LRO already initialised\n", __func__);
 		if (pdev->lro_info.lro_flush_cb != lro_flush_cb) {
 			TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
@@ -4976,7 +4976,7 @@ static void ol_deregister_lro_flush_cb(void (lro_deinit_cb)(void *))
 		return;
 	}
 	if (qdf_atomic_dec_and_test(&pdev->lro_info.lro_dev_cnt) == 0) {
-		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+		TXRX_PRINT(TXRX_PRINT_LEVEL_INFO1,
 			   "%s: Other LRO enabled modules still exist, do not unregister the lro_flush_cb\n", __func__);
 		return;
 	}

+ 13 - 13
core/dp/txrx/ol_txrx_flow_control.c

@@ -156,21 +156,21 @@ void ol_tx_dump_flow_pool_info(void)
 	struct ol_tx_flow_pool_t tmp_pool;
 
 
-	TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Global Pool");
+	TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2, "Global Pool");
 	if (!pdev) {
-		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "ERROR: pdev NULL");
+		TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2, "ERROR: pdev NULL");
 		QDF_ASSERT(0); /* traceback */
 		return;
 	}
-	TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Total %d :: Available %d",
+	TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2, "Total %d :: Available %d",
 		pdev->tx_desc.pool_size, pdev->tx_desc.num_free);
-	TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "Invalid flow_pool %d",
+	TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2, "Invalid flow_pool %d",
 		pdev->tx_desc.num_invalid_bin);
-	TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "No of pool map received %d",
+	TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2, "No of pool map received %d",
 		pdev->pool_stats.pool_map_count);
-	TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "No of pool unmap received %d",
+	TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2, "No of pool unmap received %d",
 		pdev->pool_stats.pool_unmap_count);
-	TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+	TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2,
 		"Pkt dropped due to unavailablity of pool %d",
 		pdev->pool_stats.pkt_drop_no_pool);
 
@@ -186,21 +186,21 @@ void ol_tx_dump_flow_pool_info(void)
 		qdf_mem_copy(&tmp_pool, pool, sizeof(tmp_pool));
 		qdf_spin_unlock_bh(&pool->flow_pool_lock);
 		qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
-		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR, "\n");
-		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+		TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2, "\n");
+		TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2,
 			"Flow_pool_id %d :: status %d",
 			tmp_pool.flow_pool_id, tmp_pool.status);
-		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+		TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2,
 			"Total %d :: Available %d :: Deficient %d",
 			tmp_pool.flow_pool_size, tmp_pool.avail_desc,
 			tmp_pool.deficient_desc);
-		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+		TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2,
 			"Start threshold %d :: Stop threshold %d",
 			 tmp_pool.start_th, tmp_pool.stop_th);
-		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+		TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2,
 			"Member flow_id  %d :: flow_type %d",
 			tmp_pool.member_flow_id, tmp_pool.flow_type);
-		TXRX_PRINT(TXRX_PRINT_LEVEL_ERR,
+		TXRX_PRINT(TXRX_PRINT_LEVEL_INFO2,
 			"Pkt dropped due to unavailablity of descriptors %d",
 			tmp_pool.pkt_drop_no_desc);
 		qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);