|
@@ -208,6 +208,8 @@ static int ol_tx_delete_flow_pool(struct ol_tx_flow_pool_t *pool, bool force)
|
|
|
}
|
|
|
qdf_spin_unlock_bh(&pdev->tx_mutex);
|
|
|
|
|
|
+ ol_tx_distribute_descs_to_deficient_pools_from_global_pool();
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -301,14 +303,18 @@ void ol_tx_dump_flow_pool_info(void *ctx)
|
|
|
QDF_ASSERT(0); /* traceback */
|
|
|
return;
|
|
|
}
|
|
|
+
|
|
|
ol_txrx_log(QDF_TRACE_LEVEL_INFO_LOW,
|
|
|
- "Global total %d :: avail %d invalid flow_pool %d "
|
|
|
- "maps %d pool unmaps %d pkt drops %d",
|
|
|
+ "Global total %d :: avail %d invalid flow_pool %d ",
|
|
|
pdev->tx_desc.pool_size,
|
|
|
pdev->tx_desc.num_free,
|
|
|
- pdev->tx_desc.num_invalid_bin,
|
|
|
+ pdev->tx_desc.num_invalid_bin);
|
|
|
+
|
|
|
+ ol_txrx_log(QDF_TRACE_LEVEL_INFO_LOW,
|
|
|
+ "maps %d pool unmaps %d pool resize %d pkt drops %d",
|
|
|
pdev->pool_stats.pool_map_count,
|
|
|
pdev->pool_stats.pool_unmap_count,
|
|
|
+ pdev->pool_stats.pool_resize_count,
|
|
|
pdev->pool_stats.pkt_drop_no_pool);
|
|
|
/*
|
|
|
* Nested spin lock.
|
|
@@ -334,10 +340,10 @@ void ol_tx_dump_flow_pool_info(void *ctx)
|
|
|
ol_tx_flow_pool_status_to_str(tmp_pool.status),
|
|
|
tmp_pool.member_flow_id, tmp_pool.flow_type);
|
|
|
ol_txrx_log(QDF_TRACE_LEVEL_INFO_LOW,
|
|
|
- "total %d :: available %d :: deficient %d :: "
|
|
|
- "pkt dropped (no desc) %d",
|
|
|
+ "total %d :: available %d :: deficient %d :: overflow %d :: pkt dropped (no desc) %d",
|
|
|
tmp_pool.flow_pool_size, tmp_pool.avail_desc,
|
|
|
tmp_pool.deficient_desc,
|
|
|
+ tmp_pool.overflow_desc,
|
|
|
tmp_pool.pkt_drop_no_desc);
|
|
|
ol_txrx_log(QDF_TRACE_LEVEL_INFO_LOW,
|
|
|
"thresh: start %d stop %d prio start %d prio stop %d",
|
|
@@ -479,7 +485,6 @@ ol_tx_distribute_descs_to_deficient_pools(struct ol_tx_flow_pool_t *src_pool)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
/**
|
|
|
* ol_tx_create_flow_pool() - create flow pool
|
|
|
* @flow_pool_id: flow pool id
|
|
@@ -549,6 +554,8 @@ struct ol_tx_flow_pool_t *ol_tx_create_flow_pool(uint8_t flow_pool_id,
|
|
|
pool->freelist = temp_list;
|
|
|
pool->avail_desc = size;
|
|
|
pool->deficient_desc = pool->flow_pool_size - pool->avail_desc;
|
|
|
+ /* used for resize pool*/
|
|
|
+ pool->overflow_desc = 0;
|
|
|
|
|
|
/* Add flow_pool to flow_pool_list */
|
|
|
qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
|
|
@@ -627,7 +634,6 @@ static struct ol_tx_flow_pool_t *ol_tx_get_flow_pool(uint8_t flow_pool_id)
|
|
|
return pool;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
/**
|
|
|
* ol_tx_flow_pool_vdev_map() - Map flow_pool with vdev
|
|
|
* @pool: flow_pool
|
|
@@ -797,4 +803,342 @@ void ol_tx_flow_pool_unmap_handler(uint8_t flow_id, uint8_t flow_type,
|
|
|
ol_tx_dec_pool_ref(pool, false);
|
|
|
}
|
|
|
|
|
|
+#ifdef QCA_LL_TX_FLOW_CONTROL_RESIZE
|
|
|
+/**
|
|
|
+ * ol_tx_distribute_descs_to_deficient_pools_from_global_pool()
|
|
|
+ *
|
|
|
+ * Distribute descriptors of global pool to all
|
|
|
+ * deficient pools as per need.
|
|
|
+ *
|
|
|
+ * Return: 0 for success
|
|
|
+ */
|
|
|
+int ol_tx_distribute_descs_to_deficient_pools_from_global_pool(void)
|
|
|
+{
|
|
|
+ struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
|
|
|
+ struct ol_tx_flow_pool_t *dst_pool = NULL;
|
|
|
+ struct ol_tx_flow_pool_t *tmp_pool = NULL;
|
|
|
+ uint16_t total_desc_req = 0;
|
|
|
+ uint16_t desc_move_count = 0;
|
|
|
+ uint16_t temp_count = 0, i;
|
|
|
+ union ol_tx_desc_list_elem_t *temp_list = NULL;
|
|
|
+ struct ol_tx_desc_t *tx_desc;
|
|
|
+ uint8_t free_invalid_pool = 0;
|
|
|
+
|
|
|
+ if (!pdev) {
|
|
|
+ ol_txrx_err(
|
|
|
+ "%s: pdev is NULL\n", __func__);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Nested locks: maintain flow_pool_list_lock->flow_pool_lock */
|
|
|
+ /* find out total deficient desc required */
|
|
|
+ qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
|
|
|
+ TAILQ_FOREACH(dst_pool, &pdev->tx_desc.flow_pool_list,
|
|
|
+ flow_pool_list_elem) {
|
|
|
+ qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
|
|
|
+ total_desc_req += dst_pool->deficient_desc;
|
|
|
+ qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
|
|
|
+ }
|
|
|
+ qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
|
|
|
+
|
|
|
+ qdf_spin_lock_bh(&pdev->tx_mutex);
|
|
|
+ desc_move_count = (pdev->tx_desc.num_free >= total_desc_req) ?
|
|
|
+ total_desc_req : pdev->tx_desc.num_free;
|
|
|
+
|
|
|
+ for (i = 0; i < desc_move_count; i++) {
|
|
|
+ tx_desc = ol_tx_get_desc_global_pool(pdev);
|
|
|
+ ((union ol_tx_desc_list_elem_t *)tx_desc)->next = temp_list;
|
|
|
+ temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
|
|
|
+ }
|
|
|
+ qdf_spin_unlock_bh(&pdev->tx_mutex);
|
|
|
+
|
|
|
+ if (!desc_move_count)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /* destribute desc to deficient pool */
|
|
|
+ qdf_spin_lock_bh(&pdev->tx_desc.flow_pool_list_lock);
|
|
|
+ TAILQ_FOREACH(dst_pool, &pdev->tx_desc.flow_pool_list,
|
|
|
+ flow_pool_list_elem) {
|
|
|
+ qdf_spin_lock_bh(&dst_pool->flow_pool_lock);
|
|
|
+ if (dst_pool->deficient_desc) {
|
|
|
+ temp_count =
|
|
|
+ (dst_pool->deficient_desc > desc_move_count) ?
|
|
|
+ desc_move_count : dst_pool->deficient_desc;
|
|
|
+
|
|
|
+ desc_move_count -= temp_count;
|
|
|
+ for (i = 0; i < temp_count; i++) {
|
|
|
+ tx_desc = &temp_list->tx_desc;
|
|
|
+ temp_list = temp_list->next;
|
|
|
+ ol_tx_put_desc_flow_pool(dst_pool, tx_desc);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (dst_pool->status == FLOW_POOL_ACTIVE_PAUSED) {
|
|
|
+ if (dst_pool->avail_desc > dst_pool->start_th) {
|
|
|
+ pdev->pause_cb(dst_pool->member_flow_id,
|
|
|
+ WLAN_WAKE_ALL_NETIF_QUEUE,
|
|
|
+ WLAN_DATA_FLOW_CONTROL);
|
|
|
+ dst_pool->status =
|
|
|
+ FLOW_POOL_ACTIVE_UNPAUSED;
|
|
|
+ }
|
|
|
+ } else if ((dst_pool->status == FLOW_POOL_INVALID) &&
|
|
|
+ (dst_pool->avail_desc ==
|
|
|
+ dst_pool->flow_pool_size)) {
|
|
|
+ free_invalid_pool = 1;
|
|
|
+ tmp_pool = dst_pool;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ qdf_spin_unlock_bh(&dst_pool->flow_pool_lock);
|
|
|
+ if (desc_move_count == 0)
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ qdf_spin_unlock_bh(&pdev->tx_desc.flow_pool_list_lock);
|
|
|
+
|
|
|
+ if (free_invalid_pool && tmp_pool)
|
|
|
+ ol_tx_free_invalid_flow_pool(tmp_pool);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * ol_tx_flow_pool_update_queue_state() - update network queue for pool based on
|
|
|
+ * new available count.
|
|
|
+ * @pool : pool handle
|
|
|
+ *
|
|
|
+ * Return : none
|
|
|
+ */
|
|
|
+static void ol_tx_flow_pool_update_queue_state(struct ol_txrx_pdev_t *pdev,
|
|
|
+ struct ol_tx_flow_pool_t *pool)
|
|
|
+{
|
|
|
+ qdf_spin_lock_bh(&pool->flow_pool_lock);
|
|
|
+ if (pool->avail_desc > pool->start_th) {
|
|
|
+ pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
|
|
|
+ qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
|
|
+ pdev->pause_cb(pool->member_flow_id,
|
|
|
+ WLAN_WAKE_ALL_NETIF_QUEUE,
|
|
|
+ WLAN_DATA_FLOW_CONTROL);
|
|
|
+ } else if (pool->avail_desc < pool->stop_th &&
|
|
|
+ pool->avail_desc >= pool->stop_priority_th) {
|
|
|
+ pool->status = FLOW_POOL_NON_PRIO_PAUSED;
|
|
|
+ qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
|
|
+ pdev->pause_cb(pool->member_flow_id,
|
|
|
+ WLAN_STOP_NON_PRIORITY_QUEUE,
|
|
|
+ WLAN_DATA_FLOW_CONTROL);
|
|
|
+ pdev->pause_cb(pool->member_flow_id,
|
|
|
+ WLAN_NETIF_PRIORITY_QUEUE_ON,
|
|
|
+ WLAN_DATA_FLOW_CONTROL);
|
|
|
+ } else if (pool->avail_desc < pool->stop_priority_th) {
|
|
|
+ pool->status = FLOW_POOL_ACTIVE_PAUSED;
|
|
|
+ qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
|
|
+ pdev->pause_cb(pool->member_flow_id,
|
|
|
+ WLAN_STOP_ALL_NETIF_QUEUE,
|
|
|
+ WLAN_DATA_FLOW_CONTROL);
|
|
|
+ } else {
|
|
|
+ qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * ol_tx_flow_pool_update() - update pool parameters with new size
|
|
|
+ * @pool : pool handle
|
|
|
+ * @new_pool_size : new pool size
|
|
|
+ * @deficient_count : deficient count
|
|
|
+ * @overflow_count : overflow count
|
|
|
+ *
|
|
|
+ * Return : none
|
|
|
+ */
|
|
|
+static void ol_tx_flow_pool_update(struct ol_tx_flow_pool_t *pool,
|
|
|
+ uint16_t new_pool_size,
|
|
|
+ uint16_t deficient_count,
|
|
|
+ uint16_t overflow_count)
|
|
|
+{
|
|
|
+ struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
|
|
|
+ uint32_t stop_threshold =
|
|
|
+ ol_cfg_get_tx_flow_stop_queue_th(pdev->ctrl_pdev);
|
|
|
+ uint32_t start_threshold = stop_threshold +
|
|
|
+ ol_cfg_get_tx_flow_start_queue_offset(pdev->ctrl_pdev);
|
|
|
+
|
|
|
+ pool->flow_pool_size = new_pool_size;
|
|
|
+ pool->start_th = (start_threshold * new_pool_size) / 100;
|
|
|
+ pool->stop_th = (stop_threshold * new_pool_size) / 100;
|
|
|
+ pool->stop_priority_th = (TX_PRIORITY_TH * pool->stop_th) / 100;
|
|
|
+ if (pool->stop_priority_th >= MAX_TSO_SEGMENT_DESC)
|
|
|
+ pool->stop_priority_th -= MAX_TSO_SEGMENT_DESC;
|
|
|
+
|
|
|
+ pool->start_priority_th = (TX_PRIORITY_TH * pool->start_th) / 100;
|
|
|
+ if (pool->start_priority_th >= MAX_TSO_SEGMENT_DESC)
|
|
|
+ pool->start_priority_th -= MAX_TSO_SEGMENT_DESC;
|
|
|
+
|
|
|
+ if (deficient_count)
|
|
|
+ pool->deficient_desc = deficient_count;
|
|
|
+
|
|
|
+ if (overflow_count)
|
|
|
+ pool->overflow_desc = overflow_count;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * ol_tx_flow_pool_resize() - resize pool with new size
|
|
|
+ * @pool: pool pointer
|
|
|
+ * @new_pool_size: new pool size
|
|
|
+ *
|
|
|
+ * Return: none
|
|
|
+ */
|
|
|
+static void ol_tx_flow_pool_resize(struct ol_tx_flow_pool_t *pool,
|
|
|
+ uint16_t new_pool_size)
|
|
|
+{
|
|
|
+ struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
|
|
|
+ uint16_t diff = 0, overflow_count = 0, deficient_count = 0;
|
|
|
+ uint16_t move_desc_to_global = 0, move_desc_from_global = 0;
|
|
|
+ union ol_tx_desc_list_elem_t *temp_list = NULL;
|
|
|
+ int i = 0, update_done = 0;
|
|
|
+ struct ol_tx_desc_t *tx_desc = NULL;
|
|
|
+ uint16_t temp = 0;
|
|
|
|
|
|
+ qdf_spin_lock_bh(&pool->flow_pool_lock);
|
|
|
+ if (pool->flow_pool_size == new_pool_size) {
|
|
|
+ qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
|
|
+ ol_txrx_info("pool resize received with same size");
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
|
|
+
|
|
|
+ /* Reduce pool size */
|
|
|
+ /* start_priority_th desc should available after reduction */
|
|
|
+ qdf_spin_lock_bh(&pool->flow_pool_lock);
|
|
|
+ if (pool->flow_pool_size > new_pool_size) {
|
|
|
+ diff = pool->flow_pool_size - new_pool_size;
|
|
|
+ diff += pool->overflow_desc;
|
|
|
+ pool->overflow_desc = 0;
|
|
|
+ temp = QDF_MIN(pool->deficient_desc, diff);
|
|
|
+ pool->deficient_desc -= temp;
|
|
|
+ diff -= temp;
|
|
|
+
|
|
|
+ if (diff) {
|
|
|
+ /* Have enough descriptors */
|
|
|
+ if (pool->avail_desc >=
|
|
|
+ (diff + pool->start_priority_th)) {
|
|
|
+ move_desc_to_global = diff;
|
|
|
+ }
|
|
|
+ /* Do not have enough descriptors */
|
|
|
+ else if (pool->avail_desc > pool->start_priority_th) {
|
|
|
+ move_desc_to_global = pool->avail_desc -
|
|
|
+ pool->start_priority_th;
|
|
|
+ overflow_count = diff - move_desc_to_global;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Move desc to temp_list */
|
|
|
+ for (i = 0; i < move_desc_to_global; i++) {
|
|
|
+ tx_desc = ol_tx_get_desc_flow_pool(pool);
|
|
|
+ ((union ol_tx_desc_list_elem_t *)tx_desc)->next
|
|
|
+ = temp_list;
|
|
|
+ temp_list =
|
|
|
+ (union ol_tx_desc_list_elem_t *)tx_desc;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /* update pool size and threshold */
|
|
|
+ ol_tx_flow_pool_update(pool, new_pool_size, 0, overflow_count);
|
|
|
+ update_done = 1;
|
|
|
+ }
|
|
|
+ qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
|
|
+
|
|
|
+ if (move_desc_to_global && temp_list) {
|
|
|
+ /* put free descriptors to global pool */
|
|
|
+ qdf_spin_lock_bh(&pdev->tx_mutex);
|
|
|
+ for (i = 0; i < move_desc_to_global; i++) {
|
|
|
+ tx_desc = &temp_list->tx_desc;
|
|
|
+ temp_list = temp_list->next;
|
|
|
+ ol_tx_put_desc_global_pool(pdev, tx_desc);
|
|
|
+ }
|
|
|
+ qdf_spin_unlock_bh(&pdev->tx_mutex);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (update_done)
|
|
|
+ goto update_done;
|
|
|
+
|
|
|
+ /* Increase pool size */
|
|
|
+ qdf_spin_lock_bh(&pool->flow_pool_lock);
|
|
|
+ if (pool->flow_pool_size < new_pool_size) {
|
|
|
+ diff = new_pool_size - pool->flow_pool_size;
|
|
|
+ diff += pool->deficient_desc;
|
|
|
+ pool->deficient_desc = 0;
|
|
|
+ temp = QDF_MIN(pool->overflow_desc, diff);
|
|
|
+ pool->overflow_desc -= temp;
|
|
|
+ diff -= temp;
|
|
|
+ }
|
|
|
+ qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
|
|
+
|
|
|
+ if (diff) {
|
|
|
+ /* take descriptors from global pool */
|
|
|
+ qdf_spin_lock_bh(&pdev->tx_mutex);
|
|
|
+
|
|
|
+ if (pdev->tx_desc.num_free >= diff) {
|
|
|
+ move_desc_from_global = diff;
|
|
|
+ } else {
|
|
|
+ move_desc_from_global = pdev->tx_desc.num_free;
|
|
|
+ deficient_count = diff - move_desc_from_global;
|
|
|
+ }
|
|
|
+
|
|
|
+ for (i = 0; i < move_desc_from_global; i++) {
|
|
|
+ tx_desc = ol_tx_get_desc_global_pool(pdev);
|
|
|
+ ((union ol_tx_desc_list_elem_t *)tx_desc)->next =
|
|
|
+ temp_list;
|
|
|
+ temp_list = (union ol_tx_desc_list_elem_t *)tx_desc;
|
|
|
+ }
|
|
|
+ qdf_spin_unlock_bh(&pdev->tx_mutex);
|
|
|
+ }
|
|
|
+ /* update desc to pool */
|
|
|
+ qdf_spin_lock_bh(&pool->flow_pool_lock);
|
|
|
+ if (move_desc_from_global && temp_list) {
|
|
|
+ for (i = 0; i < move_desc_from_global; i++) {
|
|
|
+ tx_desc = &temp_list->tx_desc;
|
|
|
+ temp_list = temp_list->next;
|
|
|
+ ol_tx_put_desc_flow_pool(pool, tx_desc);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ /* update pool size and threshold */
|
|
|
+ ol_tx_flow_pool_update(pool, new_pool_size, deficient_count, 0);
|
|
|
+ qdf_spin_unlock_bh(&pool->flow_pool_lock);
|
|
|
+
|
|
|
+update_done:
|
|
|
+
|
|
|
+ ol_tx_flow_pool_update_queue_state(pdev, pool);
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * ol_tx_flow_pool_resize_handler() - Resize pool with new size
|
|
|
+ * @flow_pool_id: pool id
|
|
|
+ * @flow_pool_size: pool size
|
|
|
+ *
|
|
|
+ * Process below target to host message
|
|
|
+ * HTT_T2H_MSG_TYPE_FLOW_POOL_RESIZE
|
|
|
+ *
|
|
|
+ * Return: none
|
|
|
+ */
|
|
|
+void ol_tx_flow_pool_resize_handler(uint8_t flow_pool_id,
|
|
|
+ uint16_t flow_pool_size)
|
|
|
+{
|
|
|
+ struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
|
|
|
+ struct ol_tx_flow_pool_t *pool;
|
|
|
+
|
|
|
+ ol_txrx_dbg("%s: flow_pool_id %d flow_pool_size %d\n",
|
|
|
+ __func__, flow_pool_id, flow_pool_size);
|
|
|
+
|
|
|
+ if (qdf_unlikely(!pdev)) {
|
|
|
+ ol_txrx_err(
|
|
|
+ "%s: pdev is NULL", __func__);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ pdev->pool_stats.pool_resize_count++;
|
|
|
+
|
|
|
+ pool = ol_tx_get_flow_pool(flow_pool_id);
|
|
|
+ if (!pool) {
|
|
|
+ ol_txrx_err("%s: resize for flow_pool %d size %d failed\n",
|
|
|
+ __func__, flow_pool_id, flow_pool_size);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ ol_tx_inc_pool_ref(pool);
|
|
|
+ ol_tx_flow_pool_resize(pool, flow_pool_size);
|
|
|
+ ol_tx_dec_pool_ref(pool, false);
|
|
|
+}
|
|
|
+#endif
|