diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index 59d24b68ab..0d96a11817 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -9521,6 +9521,11 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, result = -ENOMEM; goto fail_gsi_map; } + mutex_init(&ipa3_ctx->recycle_stats_collection_lock); + memset(&ipa3_ctx->recycle_stats, 0, sizeof(struct ipa_lnx_pipe_page_recycling_stats)); + memset(&ipa3_ctx->prev_coal_recycle_stats, 0, sizeof(struct ipa3_page_recycle_stats)); + memset(&ipa3_ctx->prev_default_recycle_stats, 0, sizeof(struct ipa3_page_recycle_stats)); + memset(&ipa3_ctx->prev_low_lat_data_recycle_stats, 0, sizeof(struct ipa3_page_recycle_stats)); ipa3_ctx->transport_power_mgmt_wq = create_singlethread_workqueue("transport_power_mgmt"); @@ -9530,6 +9535,17 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p, goto fail_create_transport_wq; } + /* Create workqueue for recycle stats collection */ + ipa3_ctx->collect_recycle_stats_wq = + create_singlethread_workqueue("page_recycle_stats_collection"); + if (!ipa3_ctx->collect_recycle_stats_wq) { + IPAERR("failed to create page recycling stats collection wq\n"); + result = -ENOMEM; + goto fail_create_recycle_stats_wq; + } + memset(&ipa3_ctx->recycle_stats, 0, + sizeof(ipa3_ctx->recycle_stats)); + mutex_init(&ipa3_ctx->transport_pm.transport_pm_mutex); /* init the lookaside cache */ @@ -9864,6 +9880,8 @@ fail_hdr_cache: fail_rt_rule_cache: kmem_cache_destroy(ipa3_ctx->flt_rule_cache); fail_flt_rule_cache: + destroy_workqueue(ipa3_ctx->collect_recycle_stats_wq); +fail_create_recycle_stats_wq: destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq); fail_create_transport_wq: destroy_workqueue(ipa3_ctx->power_mgmt_wq); diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c index baf3c7b448..df30080974 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c @@ -26,6 +26,7 @@ #include "ipa_trace.h" #include "ipahal.h" #include "ipahal_fltrt.h" +#include "ipa_stats.h" #define IPA_GSI_EVENT_RP_SIZE 8 #define IPA_WAN_NAPI_MAX_FRAMES (NAPI_WEIGHT / IPA_WAN_AGGR_PKT_CNT) @@ -155,6 +156,156 @@ static int ipa3_rmnet_ll_rx_poll(struct napi_struct *napi_rx, int budget); struct gsi_chan_xfer_notify g_lan_rx_notify[IPA_LAN_NAPI_MAX_FRAMES]; +static void ipa3_collect_default_coal_recycle_stats_wq(struct work_struct *work); +static DECLARE_DELAYED_WORK(ipa3_collect_default_coal_recycle_stats_wq_work, + ipa3_collect_default_coal_recycle_stats_wq); + +static void ipa3_collect_low_lat_data_recycle_stats_wq(struct work_struct *work); +static DECLARE_DELAYED_WORK(ipa3_collect_low_lat_data_recycle_stats_wq_work, + ipa3_collect_low_lat_data_recycle_stats_wq); + +static void ipa3_collect_default_coal_recycle_stats_wq(struct work_struct *work) +{ + struct ipa3_sys_context *sys; + int stat_interval_index; + int ep_idx = -1; + + /* For targets which don't require coalescing pipe */ + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS); + if (ep_idx == -1) + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS); + + if (ep_idx == -1) + sys = NULL; + else + sys = ipa3_ctx->ep[ep_idx].sys; + + mutex_lock(&ipa3_ctx->recycle_stats_collection_lock); + stat_interval_index = ipa3_ctx->recycle_stats.default_coal_stats_index; + ipa3_ctx->recycle_stats.interval_time_in_ms = IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_TIME; + + /* Coalescing pipe page recycling stats */ + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].total_cumulative + = ipa3_ctx->stats.page_recycle_stats[0].total_replenished; + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].recycle_cumulative + = ipa3_ctx->stats.page_recycle_stats[0].page_recycled; + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].temp_cumulative + = ipa3_ctx->stats.page_recycle_stats[0].tmp_alloc; + + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].total_diff + = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].total_cumulative + - ipa3_ctx->prev_coal_recycle_stats.total_replenished; + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].recycle_diff + = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].recycle_cumulative + - ipa3_ctx->prev_coal_recycle_stats.page_recycled; + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].temp_diff + = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].temp_cumulative + - ipa3_ctx->prev_coal_recycle_stats.tmp_alloc; + + ipa3_ctx->prev_coal_recycle_stats.total_replenished + = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].total_cumulative; + ipa3_ctx->prev_coal_recycle_stats.page_recycled + = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].recycle_cumulative; + ipa3_ctx->prev_coal_recycle_stats.tmp_alloc + = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].temp_cumulative; + + /* Default pipe page recycling stats */ + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].total_cumulative + = ipa3_ctx->stats.page_recycle_stats[1].total_replenished; + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].recycle_cumulative + = ipa3_ctx->stats.page_recycle_stats[1].page_recycled; + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].temp_cumulative + = ipa3_ctx->stats.page_recycle_stats[1].tmp_alloc; + + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].total_diff + = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].total_cumulative + - ipa3_ctx->prev_default_recycle_stats.total_replenished; + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].recycle_diff + = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].recycle_cumulative + - ipa3_ctx->prev_default_recycle_stats.page_recycled; + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].temp_diff + = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].temp_cumulative + - ipa3_ctx->prev_default_recycle_stats.tmp_alloc; + + ipa3_ctx->prev_default_recycle_stats.total_replenished + = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].total_cumulative; + ipa3_ctx->prev_default_recycle_stats.page_recycled + = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].recycle_cumulative; + ipa3_ctx->prev_default_recycle_stats.tmp_alloc + = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].temp_cumulative; + + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_COALESCING][stat_interval_index].valid = 1; + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_DEFAULT][stat_interval_index].valid = 1; + + /* Single Indexing for coalescing and default pipe */ + ipa3_ctx->recycle_stats.default_coal_stats_index = + (ipa3_ctx->recycle_stats.default_coal_stats_index + 1) % IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT; + + if (sys && atomic_read(&sys->curr_polling_state)) + queue_delayed_work(ipa3_ctx->collect_recycle_stats_wq, + &ipa3_collect_default_coal_recycle_stats_wq_work, msecs_to_jiffies(10)); + + mutex_unlock(&ipa3_ctx->recycle_stats_collection_lock); + + return; + +} + +static void ipa3_collect_low_lat_data_recycle_stats_wq(struct work_struct *work) +{ + struct ipa3_sys_context *sys; + int stat_interval_index; + int ep_idx; + + ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS); + if (ep_idx == -1) + sys = NULL; + else + sys = ipa3_ctx->ep[ep_idx].sys; + + mutex_lock(&ipa3_ctx->recycle_stats_collection_lock); + stat_interval_index = ipa3_ctx->recycle_stats.low_lat_stats_index; + + /* Low latency data pipe page recycling stats */ + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].total_cumulative + = ipa3_ctx->stats.page_recycle_stats[2].total_replenished; + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].recycle_cumulative + = ipa3_ctx->stats.page_recycle_stats[2].page_recycled; + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].temp_cumulative + = ipa3_ctx->stats.page_recycle_stats[2].tmp_alloc; + + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].total_diff + = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].total_cumulative + - ipa3_ctx->prev_low_lat_data_recycle_stats.total_replenished; + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].recycle_diff + = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].recycle_cumulative + - ipa3_ctx->prev_low_lat_data_recycle_stats.page_recycled; + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].temp_diff + = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].temp_cumulative + - ipa3_ctx->prev_low_lat_data_recycle_stats.tmp_alloc; + + ipa3_ctx->prev_low_lat_data_recycle_stats.total_replenished + = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].total_cumulative; + ipa3_ctx->prev_low_lat_data_recycle_stats.page_recycled + = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].recycle_cumulative; + ipa3_ctx->prev_low_lat_data_recycle_stats.tmp_alloc + = ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].temp_cumulative; + + ipa3_ctx->recycle_stats.rx_channel[RX_WAN_LOW_LAT_DATA][stat_interval_index].valid = 1; + + /* Indexing for low lat data stats pipe */ + ipa3_ctx->recycle_stats.low_lat_stats_index = + (ipa3_ctx->recycle_stats.low_lat_stats_index + 1) % IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT; + + if (sys && atomic_read(&sys->curr_polling_state)) + queue_delayed_work(ipa3_ctx->collect_recycle_stats_wq, + &ipa3_collect_low_lat_data_recycle_stats_wq_work, msecs_to_jiffies(10)); + + mutex_unlock(&ipa3_ctx->recycle_stats_collection_lock); + + return; +} + /** * ipa3_write_done_common() - this function is responsible on freeing * all tx_pkt_wrappers related to a skb @@ -7003,6 +7154,9 @@ start_poll: /* call repl_hdlr before napi_reschedule / napi_complete */ ep->sys->repl_hdlr(ep->sys); wan_def_sys->repl_hdlr(wan_def_sys); + /* Scheduling WAN and COAL collect stats work wueue */ + queue_delayed_work(ipa3_ctx->collect_recycle_stats_wq, + &ipa3_collect_default_coal_recycle_stats_wq_work, msecs_to_jiffies(10)); /* When not able to replenish enough descriptors, keep in polling * mode, wait for napi-poll and replenish again. */ @@ -7191,7 +7345,6 @@ static int ipa3_rmnet_ll_rx_poll(struct napi_struct *napi_rx, int budget) IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI_LL"); - remain_aggr_weight = budget / ipa3_ctx->ipa_wan_aggr_pkt_cnt; if (remain_aggr_weight > IPA_WAN_NAPI_MAX_FRAMES) { IPAERR("NAPI weight is higher than expected\n"); @@ -7231,6 +7384,9 @@ start_poll: cnt += budget - remain_aggr_weight * ipa3_ctx->ipa_wan_aggr_pkt_cnt; /* call repl_hdlr before napi_reschedule / napi_complete */ sys->repl_hdlr(sys); + /* Scheduling RMNET LOW LAT DATA collect stats work queue */ + queue_delayed_work(ipa3_ctx->collect_recycle_stats_wq, + &ipa3_collect_low_lat_data_recycle_stats_wq_work, msecs_to_jiffies(10)); /* When not able to replenish enough descriptors, keep in polling * mode, wait for napi-poll and replenish again. */ diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h index 6afc5a07f0..1922d0fa43 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h @@ -2585,6 +2585,13 @@ struct ipa3_context { phys_addr_t per_stats_smem_pa; void *per_stats_smem_va; u32 ipa_smem_size; + bool is_dual_pine_config; + struct workqueue_struct *collect_recycle_stats_wq; + struct ipa_lnx_pipe_page_recycling_stats recycle_stats; + struct ipa3_page_recycle_stats prev_coal_recycle_stats; + struct ipa3_page_recycle_stats prev_default_recycle_stats; + struct ipa3_page_recycle_stats prev_low_lat_data_recycle_stats; + struct mutex recycle_stats_collection_lock; }; struct ipa3_plat_drv_res { diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_stats.c b/drivers/platform/msm/ipa/ipa_v3/ipa_stats.c index a771ecd246..43f48a9eab 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_stats.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_stats.c @@ -1472,6 +1472,42 @@ success: } #endif +static int ipa_get_page_recycle_stats(unsigned long arg) +{ + struct ipa_lnx_pipe_page_recycling_stats *page_recycle_stats; + int alloc_size; + + alloc_size = sizeof(struct ipa_lnx_pipe_page_recycling_stats); + + page_recycle_stats = (struct ipa_lnx_pipe_page_recycling_stats *) memdup_user(( + const void __user *)arg, alloc_size); + if (IS_ERR(page_recycle_stats)) { + IPA_STATS_ERR("copy from user failed"); + return -ENOMEM; + } + + mutex_lock(&ipa3_ctx->recycle_stats_collection_lock); + memcpy(page_recycle_stats, &ipa3_ctx->recycle_stats, + sizeof(struct ipa_lnx_pipe_page_recycling_stats)); + + /* Clear all the data and valid bits */ + memset(&ipa3_ctx->recycle_stats, 0, + sizeof(struct ipa_lnx_pipe_page_recycling_stats)); + + mutex_unlock(&ipa3_ctx->recycle_stats_collection_lock); + + if(copy_to_user((void __user *)arg, + (u8 *)page_recycle_stats, + alloc_size)) { + IPA_STATS_ERR("copy to user failed"); + kfree(page_recycle_stats); + return -EFAULT; + } + + kfree(page_recycle_stats); + return 0; +} + static int ipa_stats_get_alloc_info(unsigned long arg) { int i = 0; @@ -1665,41 +1701,44 @@ static int ipa_stats_get_alloc_info(unsigned long arg) #if IS_ENABLED(CONFIG_IPA3_MHI_PRIME_MANAGER) if (!ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio) { ipa_lnx_agent_ctx.alloc_info.num_mhip_instances = 0; - goto success; + } else { + if (ipa_usb_is_teth_prot_connected(IPA_USB_RNDIS)) + ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_RNDIS; + else if(ipa_usb_is_teth_prot_connected(IPA_USB_RMNET)) + ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_RMNET; + else ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_MAX_TETH_PROT_SIZE; + ipa_lnx_agent_ctx.alloc_info.num_mhip_instances = 1; + ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_pipes = 4; + ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_tx_instances = 2; + ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_rx_instances = 2; + ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[0] = + IPA_CLIENT_MHI_PRIME_TETH_CONS; + ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[1] = + IPA_CLIENT_MHI_PRIME_TETH_PROD; + ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[2] = + IPA_CLIENT_MHI_PRIME_RMNET_CONS; + ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[3] = + IPA_CLIENT_MHI_PRIME_RMNET_PROD; + ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].tx_inst_client_type[0] + = IPA_CLIENT_MHI_PRIME_TETH_CONS; + ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].tx_inst_client_type[1] + = IPA_CLIENT_MHI_PRIME_RMNET_CONS; + ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].rx_inst_client_type[0] + = IPA_CLIENT_MHI_PRIME_TETH_PROD; + ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].rx_inst_client_type[1] + = IPA_CLIENT_MHI_PRIME_RMNET_PROD; } - if (ipa_usb_is_teth_prot_connected(IPA_USB_RNDIS)) - ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_RNDIS; - else if(ipa_usb_is_teth_prot_connected(IPA_USB_RMNET)) - ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_RMNET; - else ipa_lnx_agent_ctx.usb_teth_prot[0] = IPA_USB_MAX_TETH_PROT_SIZE; - ipa_lnx_agent_ctx.alloc_info.num_mhip_instances = 1; - ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_pipes = 4; - ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_tx_instances = 2; - ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].num_rx_instances = 2; - ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[0] = - IPA_CLIENT_MHI_PRIME_TETH_CONS; - ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[1] = - IPA_CLIENT_MHI_PRIME_TETH_PROD; - ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[2] = - IPA_CLIENT_MHI_PRIME_RMNET_CONS; - ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].pipes_client_type[3] = - IPA_CLIENT_MHI_PRIME_RMNET_PROD; - ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].tx_inst_client_type[0] - = IPA_CLIENT_MHI_PRIME_TETH_CONS; - ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].tx_inst_client_type[1] - = IPA_CLIENT_MHI_PRIME_RMNET_CONS; - ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].rx_inst_client_type[0] - = IPA_CLIENT_MHI_PRIME_TETH_PROD; - ipa_lnx_agent_ctx.alloc_info.mhip_inst_info[0].rx_inst_client_type[1] - = IPA_CLIENT_MHI_PRIME_RMNET_PROD; - -success: #else /* MHI Prime is not enabled */ ipa_lnx_agent_ctx.alloc_info.num_mhip_instances = 0; #endif } + /* For Page recycling stats for default, coal and Low lat pipes */ + if (ipa_lnx_agent_ctx.log_type_mask & SPRHD_IPA_LOG_TYPE_RECYCLE_STATS) + ipa_lnx_agent_ctx.alloc_info.num_page_rec_interval = + IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT; + if(copy_to_user((u8 *)arg, &ipa_lnx_agent_ctx, sizeof(struct ipa_lnx_stats_spearhead_ctx))) { @@ -1818,6 +1857,13 @@ static long ipa_lnx_stats_ioctl(struct file *filp, } #endif } + if (consolidated_stats->log_type_mask & SPRHD_IPA_LOG_TYPE_RECYCLE_STATS) { + retval = ipa_get_page_recycle_stats((unsigned long) consolidated_stats->recycle_stats); + if (retval) { + IPA_STATS_ERR("ipa get page recycle stats fail\n"); + break; + } + } break; default: retval = -ENOTTY; diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_stats.h b/drivers/platform/msm/ipa/ipa_v3/ipa_stats.h index 45ee926f1e..8e0ddfd684 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_stats.h +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_stats.h @@ -56,6 +56,9 @@ #define SPEARHEAD_NUM_MAX_INSTANCES 2 +#define IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT 5 +#define IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_TIME 10 /* In milli second */ + /** * This is used to indicate which set of logs is enabled from IPA * These bitmapped macros are copied from @@ -67,6 +70,7 @@ #define SPRHD_IPA_LOG_TYPE_ETH_STATS 0x00008 #define SPRHD_IPA_LOG_TYPE_USB_STATS 0x00010 #define SPRHD_IPA_LOG_TYPE_MHIP_STATS 0x00020 +#define SPRHD_IPA_LOG_TYPE_RECYCLE_STATS 0x00040 /** @@ -340,7 +344,6 @@ struct ipa_lnx_mhip_inst_stats { }; #define IPA_LNX_MHIP_INST_STATS_STRUCT_LEN_INT (8 + 248) - struct ipa_lnx_consolidated_stats { uint64_t log_type_mask; struct ipa_lnx_generic_stats *generic_stats; @@ -349,9 +352,43 @@ struct ipa_lnx_consolidated_stats { struct ipa_lnx_eth_inst_stats *eth_stats; struct ipa_lnx_usb_inst_stats *usb_stats; struct ipa_lnx_mhip_inst_stats *mhip_stats; + struct ipa_lnx_pipe_page_recycling_stats *recycle_stats; }; #define IPA_LNX_CONSOLIDATED_STATS_STRUCT_LEN_INT (8 + 48) +enum rx_channel_type { + RX_WAN_COALESCING, + RX_WAN_DEFAULT, + RX_WAN_LOW_LAT_DATA, + RX_CHANNEL_MAX, +}; + +struct ipa_lnx_recycling_stats { + uint64_t total_cumulative; + uint64_t recycle_cumulative; + uint64_t temp_cumulative; + uint64_t total_diff; + uint64_t recycle_diff; + uint64_t temp_diff; + uint64_t valid; +}; + +/** + * The consolidated stats will be in the 0th index. + * Diff. between each interval values will be in + * indices 1 to (IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT - 1) + * @new_set: Indicates if this is the new set of data or previous data. + * @interval_time_ms: Interval time in millisecond + */ +struct ipa_lnx_pipe_page_recycling_stats { + uint32_t interval_time_in_ms; + uint32_t default_coal_stats_index; + uint32_t low_lat_stats_index; + uint32_t sequence_id; + uint64_t reserved; + struct ipa_lnx_recycling_stats rx_channel[RX_CHANNEL_MAX][IPA_LNX_PIPE_PAGE_RECYCLING_INTERVAL_COUNT]; +}; + /* Explain below structures */ struct ipa_lnx_each_inst_alloc_info { uint32_t pipes_client_type[SPEARHEAD_NUM_MAX_PIPES]; @@ -372,7 +409,7 @@ struct ipa_lnx_stats_alloc_info { uint32_t num_eth_instances; uint32_t num_usb_instances; uint32_t num_mhip_instances; - uint32_t reserved; + uint32_t num_page_rec_interval; struct ipa_lnx_each_inst_alloc_info wlan_inst_info[SPEARHEAD_NUM_MAX_INSTANCES]; struct ipa_lnx_each_inst_alloc_info eth_inst_info[SPEARHEAD_NUM_MAX_INSTANCES]; struct ipa_lnx_each_inst_alloc_info usb_inst_info[SPEARHEAD_NUM_MAX_INSTANCES];