Browse Source

qcacld-3.0: Implement descriptor pool for fw stats

The kernel address is used as cookie to keep track
of stats request. This address can be disclosed to
target leading to a security vulnerability.

Implement a FW stats descriptor pool, and use a
descriptor ID to keep track of stats requests,
instead of the kernel address, to prevent
kernel address leak.

Change-Id: Ib49150da899c0b9314f614868a90867f4aa92d3d
CRs-Fixed: 2246110
jitiphil 6 years ago
parent
commit
335d24162d

+ 3 - 3
core/dp/htt/htt_h2t.c

@@ -758,7 +758,7 @@ int
 htt_h2t_dbg_stats_get(struct htt_pdev_t *pdev,
 		      uint32_t stats_type_upload_mask,
 		      uint32_t stats_type_reset_mask,
-		      uint8_t cfg_stat_type, uint32_t cfg_val, uint64_t cookie)
+		      uint8_t cfg_stat_type, uint32_t cfg_val, uint8_t cookie)
 {
 	struct htt_htc_pkt *pkt;
 	qdf_nbuf_t msg;
@@ -822,11 +822,11 @@ htt_h2t_dbg_stats_get(struct htt_pdev_t *pdev,
 
 	/* cookie LSBs */
 	msg_word++;
-	*msg_word = cookie & 0xffffffff;
+	*msg_word = cookie;
 
 	/* cookie MSBs */
 	msg_word++;
-	*msg_word = cookie >> 32;
+	*msg_word = 0;
 
 	SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
 			       htt_h2t_send_complete_free_netbuf,

+ 1 - 2
core/dp/htt/htt_t2h.c

@@ -429,11 +429,10 @@ static void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
 	}
 	case HTT_T2H_MSG_TYPE_STATS_CONF:
 	{
-		uint64_t cookie;
+		uint8_t cookie;
 		uint8_t *stats_info_list;
 
 		cookie = *(msg_word + 1);
-		cookie |= ((uint64_t) (*(msg_word + 2))) << 32;
 
 		stats_info_list = (uint8_t *) (msg_word + 3);
 		htc_pm_runtime_put(pdev->htc_pdev);

+ 1 - 1
core/dp/ol/inc/ol_htt_api.h

@@ -156,7 +156,7 @@ htt_h2t_dbg_stats_get(struct htt_pdev_t *pdev,
 		      uint32_t stats_type_upload_mask,
 		      uint32_t stats_type_reset_mask,
 		      uint8_t cfg_stats_type,
-		      uint32_t cfg_val, uint64_t cookie);
+		      uint32_t cfg_val, uint8_t cookie);
 
 /**
  * @brief Get the fields from HTT T2H stats upload message's stats info header

+ 1 - 1
core/dp/ol/inc/ol_txrx_htt_api.h

@@ -596,7 +596,7 @@ ol_rx_pn_ind_handler(ol_txrx_pdev_handle pdev,
  */
 void
 ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
-			 uint64_t cookie, uint8_t *stats_info_list);
+			 uint8_t cookie, uint8_t *stats_info_list);
 
 /**
  * @brief Process a tx inspect message sent by the target.

+ 191 - 10
core/dp/txrx/ol_txrx.c

@@ -1443,6 +1443,7 @@ ol_txrx_pdev_attach(ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev,
 
 	TXRX_STATS_INIT(pdev);
 	ol_txrx_tso_stats_init(pdev);
+	ol_txrx_fw_stats_desc_pool_init(pdev, FW_STATS_DESC_POOL_SIZE);
 
 	TAILQ_INIT(&pdev->vdev_list);
 
@@ -1507,6 +1508,7 @@ fail2:
 
 fail1:
 	ol_txrx_tso_stats_deinit(pdev);
+	ol_txrx_fw_stats_desc_pool_deinit(pdev);
 	qdf_mem_free(pdev);
 
 fail0:
@@ -2295,6 +2297,7 @@ static void ol_txrx_pdev_detach(struct cdp_pdev *ppdev, int force)
 	htt_pdev_free(pdev->htt_pdev);
 	ol_txrx_peer_find_detach(pdev);
 	ol_txrx_tso_stats_deinit(pdev);
+	ol_txrx_fw_stats_desc_pool_deinit(pdev);
 
 	ol_txrx_pdev_txq_log_destroy(pdev);
 	ol_txrx_pdev_grp_stat_destroy(pdev);
@@ -4185,7 +4188,7 @@ void
 ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
 		     uint8_t cfg_stats_type, uint32_t cfg_val)
 {
-	uint64_t dummy_cookie = 0;
+	uint8_t dummy_cookie = 0;
 
 	htt_h2t_dbg_stats_get(vdev->pdev->htt_pdev, 0 /* upload mask */,
 			      0 /* reset mask */,
@@ -4193,14 +4196,164 @@ ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
 }
 #endif
 
+/**
+ * ol_txrx_fw_stats_desc_pool_init() - Initialize the fw stats descriptor pool
+ * @pdev: handle to ol txrx pdev
+ * @pool_size: Size of fw stats descriptor pool
+ *
+ * Return: 0 for success, error code on failure.
+ */
+int ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t *pdev,
+				    uint8_t pool_size)
+{
+	int i;
+
+	if (!pdev) {
+		ol_txrx_err("%s: pdev is NULL", __func__);
+		return -EINVAL;
+	}
+	pdev->ol_txrx_fw_stats_desc_pool.pool = qdf_mem_malloc(pool_size *
+		sizeof(struct ol_txrx_fw_stats_desc_elem_t));
+	if (!pdev->ol_txrx_fw_stats_desc_pool.pool) {
+		ol_txrx_err("%s: failed to allocate desc pool", __func__);
+		return -ENOMEM;
+	}
+	pdev->ol_txrx_fw_stats_desc_pool.freelist =
+		&pdev->ol_txrx_fw_stats_desc_pool.pool[0];
+	pdev->ol_txrx_fw_stats_desc_pool.pool_size = pool_size;
+
+	for (i = 0; i < (pool_size - 1); i++) {
+		pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i;
+		pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL;
+		pdev->ol_txrx_fw_stats_desc_pool.pool[i].next =
+			&pdev->ol_txrx_fw_stats_desc_pool.pool[i + 1];
+	}
+	pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i;
+	pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL;
+	pdev->ol_txrx_fw_stats_desc_pool.pool[i].next = NULL;
+	qdf_spinlock_create(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
+	qdf_atomic_init(&pdev->ol_txrx_fw_stats_desc_pool.initialized);
+	qdf_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 1);
+	return 0;
+}
+
+/**
+ * ol_txrx_fw_stats_desc_pool_deinit() - Deinitialize the
+ * fw stats descriptor pool
+ * @pdev: handle to ol txrx pdev
+ *
+ * Return: None
+ */
+void ol_txrx_fw_stats_desc_pool_deinit(struct ol_txrx_pdev_t *pdev)
+{
+	struct ol_txrx_fw_stats_desc_elem_t *desc;
+	uint8_t i;
+
+	if (!pdev) {
+		ol_txrx_err("%s: pdev is NULL", __func__);
+		return;
+	}
+	if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
+		ol_txrx_err("%s: Pool is not initialized", __func__);
+		return;
+	}
+	if (!pdev->ol_txrx_fw_stats_desc_pool.pool) {
+		ol_txrx_err("%s: Pool is not allocated", __func__);
+		return;
+	}
+	qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
+	qdf_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 0);
+	for (i = 0; i < pdev->ol_txrx_fw_stats_desc_pool.pool_size; i++) {
+		desc = &pdev->ol_txrx_fw_stats_desc_pool.pool[i];
+		if (desc && desc->desc.req)
+			qdf_mem_free(desc->desc.req);
+	}
+	qdf_mem_free(pdev->ol_txrx_fw_stats_desc_pool.pool);
+	pdev->ol_txrx_fw_stats_desc_pool.pool = NULL;
+
+	pdev->ol_txrx_fw_stats_desc_pool.freelist = NULL;
+	pdev->ol_txrx_fw_stats_desc_pool.pool_size = 0;
+	qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
+}
+
+/**
+ * ol_txrx_fw_stats_desc_alloc() - Get fw stats descriptor from fw stats
+ * free descriptor pool
+ * @pdev: handle to ol txrx pdev
+ *
+ * Return: pointer to fw stats descriptor, NULL on failure
+ */
+struct ol_txrx_fw_stats_desc_t
+	*ol_txrx_fw_stats_desc_alloc(struct ol_txrx_pdev_t *pdev)
+{
+	struct ol_txrx_fw_stats_desc_t *desc = NULL;
+
+	qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
+	if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
+		qdf_spin_unlock_bh(&pdev->
+				   ol_txrx_fw_stats_desc_pool.pool_lock);
+		ol_txrx_err("%s: Pool deinitialized", __func__);
+		return NULL;
+	}
+	if (pdev->ol_txrx_fw_stats_desc_pool.freelist) {
+		desc = &pdev->ol_txrx_fw_stats_desc_pool.freelist->desc;
+		pdev->ol_txrx_fw_stats_desc_pool.freelist =
+			pdev->ol_txrx_fw_stats_desc_pool.freelist->next;
+	}
+	qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
+
+	if (desc)
+		ol_txrx_dbg("%s: desc_id %d allocated",
+			    __func__, desc->desc_id);
+	else
+		ol_txrx_err("%s: fw stats descriptors are exhausted", __func__);
+
+	return desc;
+}
+
+/**
+ * ol_txrx_fw_stats_desc_get_req() - Put fw stats descriptor
+ * back into free pool
+ * @pdev: handle to ol txrx pdev
+ * @fw_stats_desc: fw_stats_desc_get descriptor
+ *
+ * Return: pointer to request
+ */
+struct ol_txrx_stats_req_internal
+	*ol_txrx_fw_stats_desc_get_req(struct ol_txrx_pdev_t *pdev,
+				       unsigned char desc_id)
+{
+	struct ol_txrx_fw_stats_desc_elem_t *desc_elem;
+	struct ol_txrx_stats_req_internal *req;
+
+	qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
+	if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
+		qdf_spin_unlock_bh(&pdev->
+				   ol_txrx_fw_stats_desc_pool.pool_lock);
+		ol_txrx_err("%s: Desc ID %u Pool deinitialized",
+			    __func__, desc_id);
+		return NULL;
+	}
+	desc_elem = &pdev->ol_txrx_fw_stats_desc_pool.pool[desc_id];
+	req = desc_elem->desc.req;
+	desc_elem->desc.req = NULL;
+	desc_elem->next =
+		pdev->ol_txrx_fw_stats_desc_pool.freelist;
+	pdev->ol_txrx_fw_stats_desc_pool.freelist = desc_elem;
+	qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
+	return req;
+}
+
 static A_STATUS
 ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
 			bool per_vdev, bool response_expected)
 {
 	struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
 	struct ol_txrx_pdev_t *pdev = vdev->pdev;
-	uint64_t cookie;
+	uint8_t cookie = FW_STATS_DESC_POOL_SIZE;
 	struct ol_txrx_stats_req_internal *non_volatile_req;
+	struct ol_txrx_fw_stats_desc_t *desc = NULL;
+	struct ol_txrx_fw_stats_desc_elem_t *elem = NULL;
 
 	if (!pdev ||
 	    req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
@@ -4220,11 +4373,16 @@ ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
 	non_volatile_req->base = *req;
 	non_volatile_req->serviced = 0;
 	non_volatile_req->offset = 0;
-
-	/* use the non-volatile request object's address as the cookie */
-	cookie = ol_txrx_stats_ptr_to_u64(non_volatile_req);
-
 	if (response_expected) {
+		desc = ol_txrx_fw_stats_desc_alloc(pdev);
+		if (!desc) {
+			qdf_mem_free(non_volatile_req);
+			return A_ERROR;
+		}
+
+		/* use the desc id as the cookie */
+		cookie = desc->desc_id;
+		desc->req = non_volatile_req;
 		qdf_spin_lock_bh(&pdev->req_list_spinlock);
 		TAILQ_INSERT_TAIL(&pdev->req_list, non_volatile_req, req_list_elem);
 		pdev->req_list_depth++;
@@ -4238,9 +4396,24 @@ ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
 				  cookie)) {
 		if (response_expected) {
 			qdf_spin_lock_bh(&pdev->req_list_spinlock);
-			TAILQ_REMOVE(&pdev->req_list, non_volatile_req, req_list_elem);
+			TAILQ_REMOVE(&pdev->req_list, non_volatile_req,
+				     req_list_elem);
 			pdev->req_list_depth--;
 			qdf_spin_unlock_bh(&pdev->req_list_spinlock);
+			if (desc) {
+				qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.
+						 pool_lock);
+				desc->req = NULL;
+				elem = container_of(desc,
+						    struct ol_txrx_fw_stats_desc_elem_t,
+						    desc);
+				elem->next =
+					pdev->ol_txrx_fw_stats_desc_pool.freelist;
+				pdev->ol_txrx_fw_stats_desc_pool.freelist = elem;
+				qdf_spin_unlock_bh(&pdev->
+						   ol_txrx_fw_stats_desc_pool.
+						   pool_lock);
+			}
 		}
 
 		qdf_mem_free(non_volatile_req);
@@ -4255,7 +4428,7 @@ ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
 
 void
 ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
-			 uint64_t cookie, uint8_t *stats_info_list)
+			 uint8_t cookie, uint8_t *stats_info_list)
 {
 	enum htt_dbg_stats_type type;
 	enum htt_cmn_dbg_stats_type cmn_type = HTT_DBG_CMN_NUM_STATS_INVALID;
@@ -4266,8 +4439,16 @@ ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
 	int more = 0;
 	int found = 0;
 
-	req = ol_txrx_u64_to_stats_ptr(cookie);
-
+	if (cookie >= FW_STATS_DESC_POOL_SIZE) {
+		ol_txrx_err("%s: Cookie is not valid", __func__);
+		return;
+	}
+	req = ol_txrx_fw_stats_desc_get_req(pdev, (uint8_t)cookie);
+	if (!req) {
+		ol_txrx_err("%s: Request not retrieved for cookie %u", __func__,
+			    (uint8_t)cookie);
+		return;
+	}
 	qdf_spin_lock_bh(&pdev->req_list_spinlock);
 	TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
 		if (req == tmp) {

+ 12 - 0
core/dp/txrx/ol_txrx.h

@@ -89,6 +89,9 @@ ol_tx_desc_pool_size_hl(struct cdp_cfg *ctrl_pdev);
 #define OL_TX_DESC_POOL_SIZE_MAX_HL 5000
 #endif
 
+#ifndef FW_STATS_DESC_POOL_SIZE
+#define FW_STATS_DESC_POOL_SIZE 10
+#endif
 
 #ifdef CONFIG_PER_VDEV_TX_DESC_POOL
 #define TXRX_HL_TX_FLOW_CTRL_VDEV_LOW_WATER_MARK 400
@@ -247,4 +250,13 @@ ol_txrx_init_txq_group_limit_lend(struct ol_txrx_pdev_t *pdev)
 {}
 #endif
 
+int ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t *pdev,
+				    uint8_t pool_size);
+void ol_txrx_fw_stats_desc_pool_deinit(struct ol_txrx_pdev_t *pdev);
+struct ol_txrx_fw_stats_desc_t
+	*ol_txrx_fw_stats_desc_alloc(struct ol_txrx_pdev_t *pdev);
+struct ol_txrx_stats_req_internal
+	*ol_txrx_fw_stats_desc_get_req(struct ol_txrx_pdev_t *pdev,
+				       uint8_t desc_id);
+
 #endif /* _OL_TXRX__H_ */

+ 17 - 0
core/dp/txrx/ol_txrx_types.h

@@ -535,6 +535,15 @@ struct ol_txrx_stats_req_internal {
     int offset;
 };
 
+struct ol_txrx_fw_stats_desc_t {
+	struct ol_txrx_stats_req_internal *req;
+	unsigned char desc_id;
+};
+
+struct ol_txrx_fw_stats_desc_elem_t {
+	struct ol_txrx_fw_stats_desc_elem_t *next;
+	struct ol_txrx_fw_stats_desc_t desc;
+};
 
 /*
  * As depicted in the diagram below, the pdev contains an array of
@@ -649,6 +658,14 @@ struct ol_txrx_pdev_t {
 	qdf_atomic_t target_tx_credit;
 	qdf_atomic_t orig_target_tx_credit;
 
+	struct {
+		uint16_t pool_size;
+		struct ol_txrx_fw_stats_desc_elem_t *pool;
+		struct ol_txrx_fw_stats_desc_elem_t *freelist;
+		qdf_spinlock_t pool_lock;
+		qdf_atomic_t initialized;
+	} ol_txrx_fw_stats_desc_pool;
+
 	/* Peer mac address to staid mapping */
 	struct ol_mac_addr mac_to_staid[WLAN_MAX_STA_COUNT + 3];