qcacmn: Add DP callbacks for flow_pool create and delete

Current lithim_dp flow pool created in wma_vdev_start, wma_vdev_start
is called for same vdev multiple times for vdev. This leads to creation
of multiple flow pools for each vdev. Instead create from policy_manager
when session become active.

Register flow pool create/delete callbacks with policy manager.
So that lithium_dp vdev flow pools are created/deleted when vdev
become active/not-active.

Change-Id: Iaf6aaece47c79c7e6f7745feaee35a6bc5cd1297
CRs-Fixed: 2231601
This commit is contained in:
Manjunathappa Prakash
2018-05-08 19:55:25 -07:00
committed by nshrivas
parent 92d87f5161
commit e6aba4f65f
5 changed files with 36 additions and 5 deletions

View File

@@ -121,16 +121,17 @@ struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
uint32_t stop_threshold; uint32_t stop_threshold;
uint32_t start_threshold; uint32_t start_threshold;
if (!soc) { if (flow_pool_id >= MAX_TXDESC_POOLS) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s: soc is NULL\n", __func__); "%s: invalid flow_pool_id %d", __func__, flow_pool_id);
return NULL; return NULL;
} }
pool = &soc->tx_desc[flow_pool_id]; pool = &soc->tx_desc[flow_pool_id];
qdf_spin_lock_bh(&pool->flow_pool_lock); qdf_spin_lock_bh(&pool->flow_pool_lock);
if (pool->status == FLOW_POOL_INVALID) { if ((pool->status != FLOW_POOL_INACTIVE) || pool->pool_create_cnt) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR, QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s: flow pool already allocated\n", __func__); "%s: flow pool already allocated, attached %d times\n",
__func__, pool->pool_create_cnt);
if (pool->avail_desc > pool->start_th) if (pool->avail_desc > pool->start_th)
pool->status = FLOW_POOL_ACTIVE_UNPAUSED; pool->status = FLOW_POOL_ACTIVE_UNPAUSED;
else else
@@ -155,6 +156,7 @@ struct dp_tx_desc_pool_s *dp_tx_create_flow_pool(struct dp_soc *soc,
/* INI is in percentage so divide by 100 */ /* INI is in percentage so divide by 100 */
pool->start_th = (start_threshold * flow_pool_size)/100; pool->start_th = (start_threshold * flow_pool_size)/100;
pool->stop_th = (stop_threshold * flow_pool_size)/100; pool->stop_th = (stop_threshold * flow_pool_size)/100;
pool->pool_create_cnt++;
qdf_spin_unlock_bh(&pool->flow_pool_lock); qdf_spin_unlock_bh(&pool->flow_pool_lock);
@@ -185,10 +187,25 @@ int dp_tx_delete_flow_pool(struct dp_soc *soc, struct dp_tx_desc_pool_s *pool,
} }
qdf_spin_lock_bh(&pool->flow_pool_lock); qdf_spin_lock_bh(&pool->flow_pool_lock);
if (!pool->pool_create_cnt) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"flow pool either not created or alread deleted");
qdf_spin_unlock_bh(&pool->flow_pool_lock);
return -ENOENT;
}
pool->pool_create_cnt--;
if (pool->pool_create_cnt) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s: pool is still attached, pending detach %d\n",
__func__, pool->pool_create_cnt);
qdf_spin_unlock_bh(&pool->flow_pool_lock);
return -EAGAIN;
}
if (pool->avail_desc < pool->pool_size) { if (pool->avail_desc < pool->pool_size) {
pool->status = FLOW_POOL_INVALID; pool->status = FLOW_POOL_INVALID;
qdf_spin_unlock_bh(&pool->flow_pool_lock); qdf_spin_unlock_bh(&pool->flow_pool_lock);
return EAGAIN; return -EAGAIN;
} }
/* We have all the descriptors for the pool, we can delete the pool */ /* We have all the descriptors for the pool, we can delete the pool */

View File

@@ -372,6 +372,7 @@ struct dp_tx_desc_pool_s {
uint16_t start_th; uint16_t start_th;
uint16_t pkt_drop_no_desc; uint16_t pkt_drop_no_desc;
qdf_spinlock_t flow_pool_lock; qdf_spinlock_t flow_pool_lock;
uint8_t pool_create_cnt;
void *pool_owner_ctx; void *pool_owner_ctx;
#else #else
uint16_t elem_count; uint16_t elem_count;

View File

@@ -858,11 +858,15 @@ struct policy_mgr_cdp_cbacks {
* @hdd_disable_rx_ol_in_concurrency: Callback to disable LRO/GRO offloads * @hdd_disable_rx_ol_in_concurrency: Callback to disable LRO/GRO offloads
* @hdd_set_rx_mode_rps_cb: Callback to set RPS * @hdd_set_rx_mode_rps_cb: Callback to set RPS
* @hdd_ipa_set_mcc_mode_cb: Callback to set mcc mode for ipa module * @hdd_ipa_set_mcc_mode_cb: Callback to set mcc mode for ipa module
* @hdd_v2_flow_pool_map: Callback to create vdev flow pool
* @hdd_v2_flow_pool_unmap: Callback to delete vdev flow pool
*/ */
struct policy_mgr_dp_cbacks { struct policy_mgr_dp_cbacks {
void (*hdd_disable_rx_ol_in_concurrency)(bool); void (*hdd_disable_rx_ol_in_concurrency)(bool);
void (*hdd_set_rx_mode_rps_cb)(bool); void (*hdd_set_rx_mode_rps_cb)(bool);
void (*hdd_ipa_set_mcc_mode_cb)(bool); void (*hdd_ipa_set_mcc_mode_cb)(bool);
void (*hdd_v2_flow_pool_map)(int);
void (*hdd_v2_flow_pool_unmap)(int);
}; };
/** /**

View File

@@ -1122,6 +1122,8 @@ void policy_mgr_incr_active_session(struct wlan_objmgr_psoc *psoc,
break; break;
} }
if (pm_ctx->dp_cbacks.hdd_v2_flow_pool_map)
pm_ctx->dp_cbacks.hdd_v2_flow_pool_map(session_id);
policy_mgr_debug("No.# of active sessions for mode %d = %d", policy_mgr_debug("No.# of active sessions for mode %d = %d",
mode, pm_ctx->no_of_active_sessions[mode]); mode, pm_ctx->no_of_active_sessions[mode]);
@@ -1212,6 +1214,9 @@ QDF_STATUS policy_mgr_decr_active_session(struct wlan_objmgr_psoc *psoc,
break; break;
} }
if (pm_ctx->dp_cbacks.hdd_v2_flow_pool_unmap)
pm_ctx->dp_cbacks.hdd_v2_flow_pool_unmap(session_id);
policy_mgr_debug("No.# of active sessions for mode %d = %d", policy_mgr_debug("No.# of active sessions for mode %d = %d",
mode, pm_ctx->no_of_active_sessions[mode]); mode, pm_ctx->no_of_active_sessions[mode]);

View File

@@ -642,6 +642,10 @@ QDF_STATUS policy_mgr_register_dp_cb(struct wlan_objmgr_psoc *psoc,
dp_cbacks->hdd_set_rx_mode_rps_cb; dp_cbacks->hdd_set_rx_mode_rps_cb;
pm_ctx->dp_cbacks.hdd_ipa_set_mcc_mode_cb = pm_ctx->dp_cbacks.hdd_ipa_set_mcc_mode_cb =
dp_cbacks->hdd_ipa_set_mcc_mode_cb; dp_cbacks->hdd_ipa_set_mcc_mode_cb;
pm_ctx->dp_cbacks.hdd_v2_flow_pool_map =
dp_cbacks->hdd_v2_flow_pool_map;
pm_ctx->dp_cbacks.hdd_v2_flow_pool_unmap =
dp_cbacks->hdd_v2_flow_pool_unmap;
return QDF_STATUS_SUCCESS; return QDF_STATUS_SUCCESS;
} }