|
@@ -1443,6 +1443,7 @@ ol_txrx_pdev_attach(ol_txrx_soc_handle soc, struct cdp_cfg *ctrl_pdev,
|
|
|
|
|
|
TXRX_STATS_INIT(pdev);
|
|
|
ol_txrx_tso_stats_init(pdev);
|
|
|
+ ol_txrx_fw_stats_desc_pool_init(pdev, FW_STATS_DESC_POOL_SIZE);
|
|
|
|
|
|
TAILQ_INIT(&pdev->vdev_list);
|
|
|
|
|
@@ -1507,6 +1508,7 @@ fail2:
|
|
|
|
|
|
fail1:
|
|
|
ol_txrx_tso_stats_deinit(pdev);
|
|
|
+ ol_txrx_fw_stats_desc_pool_deinit(pdev);
|
|
|
qdf_mem_free(pdev);
|
|
|
|
|
|
fail0:
|
|
@@ -2295,6 +2297,7 @@ static void ol_txrx_pdev_detach(struct cdp_pdev *ppdev, int force)
|
|
|
htt_pdev_free(pdev->htt_pdev);
|
|
|
ol_txrx_peer_find_detach(pdev);
|
|
|
ol_txrx_tso_stats_deinit(pdev);
|
|
|
+ ol_txrx_fw_stats_desc_pool_deinit(pdev);
|
|
|
|
|
|
ol_txrx_pdev_txq_log_destroy(pdev);
|
|
|
ol_txrx_pdev_grp_stat_destroy(pdev);
|
|
@@ -4185,7 +4188,7 @@ void
|
|
|
ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
|
|
|
uint8_t cfg_stats_type, uint32_t cfg_val)
|
|
|
{
|
|
|
- uint64_t dummy_cookie = 0;
|
|
|
+ uint8_t dummy_cookie = 0;
|
|
|
|
|
|
htt_h2t_dbg_stats_get(vdev->pdev->htt_pdev, 0 /* upload mask */,
|
|
|
0 /* reset mask */,
|
|
@@ -4193,14 +4196,164 @@ ol_txrx_fw_stats_cfg(ol_txrx_vdev_handle vdev,
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+/**
|
|
|
+ * ol_txrx_fw_stats_desc_pool_init() - Initialize the fw stats descriptor pool
|
|
|
+ * @pdev: handle to ol txrx pdev
|
|
|
+ * @pool_size: Size of fw stats descriptor pool
|
|
|
+ *
|
|
|
+ * Return: 0 for success, error code on failure.
|
|
|
+ */
|
|
|
+int ol_txrx_fw_stats_desc_pool_init(struct ol_txrx_pdev_t *pdev,
|
|
|
+ uint8_t pool_size)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ if (!pdev) {
|
|
|
+ ol_txrx_err("%s: pdev is NULL", __func__);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+ pdev->ol_txrx_fw_stats_desc_pool.pool = qdf_mem_malloc(pool_size *
|
|
|
+ sizeof(struct ol_txrx_fw_stats_desc_elem_t));
|
|
|
+ if (!pdev->ol_txrx_fw_stats_desc_pool.pool) {
|
|
|
+ ol_txrx_err("%s: failed to allocate desc pool", __func__);
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
+ pdev->ol_txrx_fw_stats_desc_pool.freelist =
|
|
|
+ &pdev->ol_txrx_fw_stats_desc_pool.pool[0];
|
|
|
+ pdev->ol_txrx_fw_stats_desc_pool.pool_size = pool_size;
|
|
|
+
|
|
|
+ for (i = 0; i < (pool_size - 1); i++) {
|
|
|
+ pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i;
|
|
|
+ pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL;
|
|
|
+ pdev->ol_txrx_fw_stats_desc_pool.pool[i].next =
|
|
|
+ &pdev->ol_txrx_fw_stats_desc_pool.pool[i + 1];
|
|
|
+ }
|
|
|
+ pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.desc_id = i;
|
|
|
+ pdev->ol_txrx_fw_stats_desc_pool.pool[i].desc.req = NULL;
|
|
|
+ pdev->ol_txrx_fw_stats_desc_pool.pool[i].next = NULL;
|
|
|
+ qdf_spinlock_create(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
|
|
|
+ qdf_atomic_init(&pdev->ol_txrx_fw_stats_desc_pool.initialized);
|
|
|
+ qdf_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 1);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * ol_txrx_fw_stats_desc_pool_deinit() - Deinitialize the
|
|
|
+ * fw stats descriptor pool
|
|
|
+ * @pdev: handle to ol txrx pdev
|
|
|
+ *
|
|
|
+ * Return: None
|
|
|
+ */
|
|
|
+void ol_txrx_fw_stats_desc_pool_deinit(struct ol_txrx_pdev_t *pdev)
|
|
|
+{
|
|
|
+ struct ol_txrx_fw_stats_desc_elem_t *desc;
|
|
|
+ uint8_t i;
|
|
|
+
|
|
|
+ if (!pdev) {
|
|
|
+ ol_txrx_err("%s: pdev is NULL", __func__);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
|
|
|
+ ol_txrx_err("%s: Pool is not initialized", __func__);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ if (!pdev->ol_txrx_fw_stats_desc_pool.pool) {
|
|
|
+ ol_txrx_err("%s: Pool is not allocated", __func__);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
|
|
|
+ qdf_atomic_set(&pdev->ol_txrx_fw_stats_desc_pool.initialized, 0);
|
|
|
+ for (i = 0; i < pdev->ol_txrx_fw_stats_desc_pool.pool_size; i++) {
|
|
|
+ desc = &pdev->ol_txrx_fw_stats_desc_pool.pool[i];
|
|
|
+ if (desc && desc->desc.req)
|
|
|
+ qdf_mem_free(desc->desc.req);
|
|
|
+ }
|
|
|
+ qdf_mem_free(pdev->ol_txrx_fw_stats_desc_pool.pool);
|
|
|
+ pdev->ol_txrx_fw_stats_desc_pool.pool = NULL;
|
|
|
+
|
|
|
+ pdev->ol_txrx_fw_stats_desc_pool.freelist = NULL;
|
|
|
+ pdev->ol_txrx_fw_stats_desc_pool.pool_size = 0;
|
|
|
+ qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * ol_txrx_fw_stats_desc_alloc() - Get fw stats descriptor from fw stats
|
|
|
+ * free descriptor pool
|
|
|
+ * @pdev: handle to ol txrx pdev
|
|
|
+ *
|
|
|
+ * Return: pointer to fw stats descriptor, NULL on failure
|
|
|
+ */
|
|
|
+struct ol_txrx_fw_stats_desc_t
|
|
|
+ *ol_txrx_fw_stats_desc_alloc(struct ol_txrx_pdev_t *pdev)
|
|
|
+{
|
|
|
+ struct ol_txrx_fw_stats_desc_t *desc = NULL;
|
|
|
+
|
|
|
+ qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
|
|
|
+ if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
|
|
|
+ qdf_spin_unlock_bh(&pdev->
|
|
|
+ ol_txrx_fw_stats_desc_pool.pool_lock);
|
|
|
+ ol_txrx_err("%s: Pool deinitialized", __func__);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+ if (pdev->ol_txrx_fw_stats_desc_pool.freelist) {
|
|
|
+ desc = &pdev->ol_txrx_fw_stats_desc_pool.freelist->desc;
|
|
|
+ pdev->ol_txrx_fw_stats_desc_pool.freelist =
|
|
|
+ pdev->ol_txrx_fw_stats_desc_pool.freelist->next;
|
|
|
+ }
|
|
|
+ qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
|
|
|
+
|
|
|
+ if (desc)
|
|
|
+ ol_txrx_dbg("%s: desc_id %d allocated",
|
|
|
+ __func__, desc->desc_id);
|
|
|
+ else
|
|
|
+ ol_txrx_err("%s: fw stats descriptors are exhausted", __func__);
|
|
|
+
|
|
|
+ return desc;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * ol_txrx_fw_stats_desc_get_req() - Put fw stats descriptor
|
|
|
+ * back into free pool
|
|
|
+ * @pdev: handle to ol txrx pdev
|
|
|
+ * @fw_stats_desc: fw_stats_desc_get descriptor
|
|
|
+ *
|
|
|
+ * Return: pointer to request
|
|
|
+ */
|
|
|
+struct ol_txrx_stats_req_internal
|
|
|
+ *ol_txrx_fw_stats_desc_get_req(struct ol_txrx_pdev_t *pdev,
|
|
|
+ unsigned char desc_id)
|
|
|
+{
|
|
|
+ struct ol_txrx_fw_stats_desc_elem_t *desc_elem;
|
|
|
+ struct ol_txrx_stats_req_internal *req;
|
|
|
+
|
|
|
+ qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
|
|
|
+ if (!qdf_atomic_read(&pdev->ol_txrx_fw_stats_desc_pool.initialized)) {
|
|
|
+ qdf_spin_unlock_bh(&pdev->
|
|
|
+ ol_txrx_fw_stats_desc_pool.pool_lock);
|
|
|
+ ol_txrx_err("%s: Desc ID %u Pool deinitialized",
|
|
|
+ __func__, desc_id);
|
|
|
+ return NULL;
|
|
|
+ }
|
|
|
+ desc_elem = &pdev->ol_txrx_fw_stats_desc_pool.pool[desc_id];
|
|
|
+ req = desc_elem->desc.req;
|
|
|
+ desc_elem->desc.req = NULL;
|
|
|
+ desc_elem->next =
|
|
|
+ pdev->ol_txrx_fw_stats_desc_pool.freelist;
|
|
|
+ pdev->ol_txrx_fw_stats_desc_pool.freelist = desc_elem;
|
|
|
+ qdf_spin_unlock_bh(&pdev->ol_txrx_fw_stats_desc_pool.pool_lock);
|
|
|
+ return req;
|
|
|
+}
|
|
|
+
|
|
|
static A_STATUS
|
|
|
ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
|
|
|
bool per_vdev, bool response_expected)
|
|
|
{
|
|
|
struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
|
|
|
struct ol_txrx_pdev_t *pdev = vdev->pdev;
|
|
|
- uint64_t cookie;
|
|
|
+ uint8_t cookie = FW_STATS_DESC_POOL_SIZE;
|
|
|
struct ol_txrx_stats_req_internal *non_volatile_req;
|
|
|
+ struct ol_txrx_fw_stats_desc_t *desc = NULL;
|
|
|
+ struct ol_txrx_fw_stats_desc_elem_t *elem = NULL;
|
|
|
|
|
|
if (!pdev ||
|
|
|
req->stats_type_upload_mask >= 1 << HTT_DBG_NUM_STATS ||
|
|
@@ -4220,11 +4373,16 @@ ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
|
|
|
non_volatile_req->base = *req;
|
|
|
non_volatile_req->serviced = 0;
|
|
|
non_volatile_req->offset = 0;
|
|
|
-
|
|
|
- /* use the non-volatile request object's address as the cookie */
|
|
|
- cookie = ol_txrx_stats_ptr_to_u64(non_volatile_req);
|
|
|
-
|
|
|
if (response_expected) {
|
|
|
+ desc = ol_txrx_fw_stats_desc_alloc(pdev);
|
|
|
+ if (!desc) {
|
|
|
+ qdf_mem_free(non_volatile_req);
|
|
|
+ return A_ERROR;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* use the desc id as the cookie */
|
|
|
+ cookie = desc->desc_id;
|
|
|
+ desc->req = non_volatile_req;
|
|
|
qdf_spin_lock_bh(&pdev->req_list_spinlock);
|
|
|
TAILQ_INSERT_TAIL(&pdev->req_list, non_volatile_req, req_list_elem);
|
|
|
pdev->req_list_depth++;
|
|
@@ -4238,9 +4396,24 @@ ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
|
|
|
cookie)) {
|
|
|
if (response_expected) {
|
|
|
qdf_spin_lock_bh(&pdev->req_list_spinlock);
|
|
|
- TAILQ_REMOVE(&pdev->req_list, non_volatile_req, req_list_elem);
|
|
|
+ TAILQ_REMOVE(&pdev->req_list, non_volatile_req,
|
|
|
+ req_list_elem);
|
|
|
pdev->req_list_depth--;
|
|
|
qdf_spin_unlock_bh(&pdev->req_list_spinlock);
|
|
|
+ if (desc) {
|
|
|
+ qdf_spin_lock_bh(&pdev->ol_txrx_fw_stats_desc_pool.
|
|
|
+ pool_lock);
|
|
|
+ desc->req = NULL;
|
|
|
+ elem = container_of(desc,
|
|
|
+ struct ol_txrx_fw_stats_desc_elem_t,
|
|
|
+ desc);
|
|
|
+ elem->next =
|
|
|
+ pdev->ol_txrx_fw_stats_desc_pool.freelist;
|
|
|
+ pdev->ol_txrx_fw_stats_desc_pool.freelist = elem;
|
|
|
+ qdf_spin_unlock_bh(&pdev->
|
|
|
+ ol_txrx_fw_stats_desc_pool.
|
|
|
+ pool_lock);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
qdf_mem_free(non_volatile_req);
|
|
@@ -4255,7 +4428,7 @@ ol_txrx_fw_stats_get(struct cdp_vdev *pvdev, struct ol_txrx_stats_req *req,
|
|
|
|
|
|
void
|
|
|
ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
|
|
|
- uint64_t cookie, uint8_t *stats_info_list)
|
|
|
+ uint8_t cookie, uint8_t *stats_info_list)
|
|
|
{
|
|
|
enum htt_dbg_stats_type type;
|
|
|
enum htt_cmn_dbg_stats_type cmn_type = HTT_DBG_CMN_NUM_STATS_INVALID;
|
|
@@ -4266,8 +4439,16 @@ ol_txrx_fw_stats_handler(ol_txrx_pdev_handle pdev,
|
|
|
int more = 0;
|
|
|
int found = 0;
|
|
|
|
|
|
- req = ol_txrx_u64_to_stats_ptr(cookie);
|
|
|
-
|
|
|
+ if (cookie >= FW_STATS_DESC_POOL_SIZE) {
|
|
|
+ ol_txrx_err("%s: Cookie is not valid", __func__);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ req = ol_txrx_fw_stats_desc_get_req(pdev, (uint8_t)cookie);
|
|
|
+ if (!req) {
|
|
|
+ ol_txrx_err("%s: Request not retrieved for cookie %u", __func__,
|
|
|
+ (uint8_t)cookie);
|
|
|
+ return;
|
|
|
+ }
|
|
|
qdf_spin_lock_bh(&pdev->req_list_spinlock);
|
|
|
TAILQ_FOREACH(tmp, &pdev->req_list, req_list_elem) {
|
|
|
if (req == tmp) {
|