Merge "msm: ipa3: LAN Coalescing Feature Addition"

This commit is contained in:
qctecmdr
2022-04-25 10:49:44 -07:00
committed by Gerrit - the friendly Code Review server
當前提交 300dd21e01
共有 9 個文件被更改,包括 2092 次插入302 次删除

查看文件

@@ -170,6 +170,19 @@ do {\
(x < IPA_CLIENT_MAX && (x & 0x1) == 0)
#define IPA_CLIENT_IS_CONS(x) \
(x < IPA_CLIENT_MAX && (x & 0x1) == 1)
/*
* The following macro does two things:
* 1) It checks to see if client x is allocated, and
* 2) It assigns a value to index idx
*/
#define IPA_CLIENT_IS_MAPPED(x, idx) \
((idx = ipa3_get_ep_mapping(x)) != IPA_EP_NOT_ALLOCATED)
/*
* Same behavior as the macro above; but in addition, determines if
* the client is valid as well.
*/
#define IPA_CLIENT_IS_MAPPED_VALID(x, idx) \
(IPA_CLIENT_IS_MAPPED(x, idx) && ipa3_ctx->ep[idx].valid == 1)
#define IPA_CLIENT_IS_ETH_PROD(x) \
((x == ipa3_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD)) || \
(x == ipa3_get_ep_mapping(IPA_CLIENT_ETHERNET2_PROD)) || \

查看文件

@@ -4345,62 +4345,65 @@ int ipa3_setup_dflt_rt_tables(void)
static int ipa3_setup_exception_path(void)
{
struct ipa_ioc_add_hdr *hdr;
struct ipa_hdr_add *hdr_entry;
struct ipahal_reg_route route = { 0 };
struct ipa3_hdr_entry *hdr_entry_internal;
int ret;
struct ipa_ioc_add_hdr *hdr = NULL;
int ret = 0;
/* install the basic exception header */
hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
sizeof(struct ipa_hdr_add), GFP_KERNEL);
if (!hdr)
return -ENOMEM;
if ( ! lan_coal_enabled() ) {
hdr->num_hdrs = 1;
hdr->commit = 1;
hdr_entry = &hdr->hdr[0];
struct ipa_hdr_add *hdr_entry;
struct ipahal_reg_route route = { 0 };
struct ipa3_hdr_entry *hdr_entry_internal;
strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX);
hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
/* install the basic exception header */
hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
sizeof(struct ipa_hdr_add), GFP_KERNEL);
if (!hdr)
return -ENOMEM;
if (ipa3_add_hdr(hdr)) {
IPAERR("fail to add exception hdr\n");
ret = -EPERM;
goto bail;
hdr->num_hdrs = 1;
hdr->commit = 1;
hdr_entry = &hdr->hdr[0];
strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX);
hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
if (ipa3_add_hdr(hdr)) {
IPAERR("fail to add exception hdr\n");
ret = -EPERM;
goto bail;
}
if (hdr_entry->status) {
IPAERR("fail to add exception hdr\n");
ret = -EPERM;
goto bail;
}
hdr_entry_internal = ipa3_id_find(hdr_entry->hdr_hdl);
if (unlikely(!hdr_entry_internal)) {
IPAERR("fail to find internal hdr structure\n");
ret = -EPERM;
goto bail;
}
ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
/* set the route register to pass exception packets to Apps */
route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
route.route_frag_def_pipe = ipa3_get_ep_mapping(
IPA_CLIENT_APPS_LAN_CONS);
route.route_def_hdr_table = !hdr_entry_internal->is_lcl;
route.route_def_retain_hdr = 1;
if (ipa3_cfg_route(&route)) {
IPAERR("fail to add exception hdr\n");
ret = -EPERM;
goto bail;
}
}
if (hdr_entry->status) {
IPAERR("fail to add exception hdr\n");
ret = -EPERM;
goto bail;
}
hdr_entry_internal = ipa3_id_find(hdr_entry->hdr_hdl);
if (unlikely(!hdr_entry_internal)) {
IPAERR("fail to find internal hdr structure\n");
ret = -EPERM;
goto bail;
}
ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
/* set the route register to pass exception packets to Apps */
route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
route.route_frag_def_pipe = ipa3_get_ep_mapping(
IPA_CLIENT_APPS_LAN_CONS);
route.route_def_hdr_table = !hdr_entry_internal->is_lcl;
route.route_def_retain_hdr = 1;
if (ipa3_cfg_route(&route)) {
IPAERR("fail to add exception hdr\n");
ret = -EPERM;
goto bail;
}
ret = 0;
bail:
kfree(hdr);
if ( hdr ) kfree(hdr);
return ret;
}
@@ -6115,35 +6118,75 @@ static int ipa3_setup_apps_pipes(void)
}
IPADBG("default routing was set\n");
/* LAN IN (IPA->AP) */
memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
sys_in.notify = ipa3_lan_rx_cb;
sys_in.priv = NULL;
if (ipa3_ctx->lan_rx_napi_enable)
sys_in.napi_obj = &ipa3_ctx->napi_lan_rx;
sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_DISABLE_CS_OFFLOAD;
ipa3_ctx->clnt_hdl_data_in = 0;
/**
* ipa_lan_rx_cb() intended to notify the source EP about packet
* being received on the LAN_CONS via calling the source EP call-back.
* There could be a race condition with calling this call-back. Other
* thread may nullify it - e.g. on EP disconnect.
* This lock intended to protect the access to the source EP call-back
*/
spin_lock_init(&ipa3_ctx->disconnect_lock);
if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
IPAERR(":setup sys pipe (LAN_CONS) failed.\n");
result = -EPERM;
goto fail_flt_hash_tuple;
if ( ipa3_ctx->ipa_hw_type >= IPA_HW_v5_5 ) {
/*
* LAN_COAL IN (IPA->AP)
*/
memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
sys_in.client = IPA_CLIENT_APPS_LAN_COAL_CONS;
sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
sys_in.notify = ipa3_lan_coal_rx_cb;
sys_in.priv = NULL;
if (ipa3_ctx->lan_rx_napi_enable)
sys_in.napi_obj = &ipa3_ctx->napi_lan_rx;
sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_DISABLE_CS_OFFLOAD;
/**
* ipa3_lan_coal_rx_cb() intended to notify the source EP about
* packet being received on the LAN_COAL_CONS via calling the
* source EP call-back. There could be a race condition with
* calling this call-back. Other thread may nullify it - e.g. on
* EP disconnect. This lock intended to protect the access to the
* source EP call-back
*/
spin_lock_init(&ipa3_ctx->disconnect_lock);
if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
IPAERR(":setup sys pipe (LAN_COAL_CONS) failed.\n");
result = -EPERM;
goto fail_flt_hash_tuple;
}
} else { /* ipa3_ctx->ipa_hw_type < IPA_HW_v5_5 */
/*
* LAN IN (IPA->AP)
*/
memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
sys_in.notify = ipa3_lan_rx_cb;
sys_in.priv = NULL;
if (ipa3_ctx->lan_rx_napi_enable)
sys_in.napi_obj = &ipa3_ctx->napi_lan_rx;
sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_DISABLE_CS_OFFLOAD;
/**
* ipa_lan_rx_cb() intended to notify the source EP about packet
* being received on the LAN_CONS via calling the source EP call-back.
* There could be a race condition with calling this call-back. Other
* thread may nullify it - e.g. on EP disconnect.
* This lock intended to protect the access to the source EP call-back
*/
spin_lock_init(&ipa3_ctx->disconnect_lock);
if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
IPAERR(":setup sys pipe (LAN_CONS) failed.\n");
result = -EPERM;
goto fail_flt_hash_tuple;
}
}
/* LAN OUT (AP->IPA) */
@@ -6172,7 +6215,8 @@ static int ipa3_setup_apps_pipes(void)
return 0;
fail_lan_data_out:
ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
if ( ipa3_ctx->clnt_hdl_data_in )
ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
fail_flt_hash_tuple:
if (ipa3_ctx->dflt_v6_rt_rule_hdl)
__ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
@@ -6189,7 +6233,8 @@ static void ipa3_teardown_apps_pipes(void)
{
if (!ipa3_ctx->ipa_config_is_mhi)
ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
if ( ipa3_ctx->clnt_hdl_data_in )
ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
__ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
__ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
__ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl, false);
@@ -6798,7 +6843,7 @@ static void __ipa3_dec_client_disable_clks(void)
*/
if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) == 1 &&
!ipa3_ctx->tag_process_before_gating) {
ipa3_force_close_coal();
ipa3_force_close_coal(true, true);
/* While sending force close command setting
* tag process as true to make configure to
* original state
@@ -8789,8 +8834,11 @@ static inline void ipa3_enable_napi_netdev(void)
if (ipa3_ctx->lan_rx_napi_enable || ipa3_ctx->tx_napi_enable) {
init_dummy_netdev(&ipa3_ctx->generic_ndev);
if(ipa3_ctx->lan_rx_napi_enable) {
netif_napi_add(&ipa3_ctx->generic_ndev, &ipa3_ctx->napi_lan_rx,
ipa3_lan_poll, NAPI_WEIGHT);
netif_napi_add(
&ipa3_ctx->generic_ndev,
&ipa3_ctx->napi_lan_rx,
ipa3_lan_poll,
NAPI_WEIGHT);
}
}
}
@@ -8909,10 +8957,18 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
ipa3_ctx->uc_ctx.holb_monitor.max_cnt_11ad =
resource_p->ipa_holb_monitor_max_cnt_11ad;
ipa3_ctx->ipa_wan_aggr_pkt_cnt = resource_p->ipa_wan_aggr_pkt_cnt;
ipa3_ctx->stats.page_recycle_stats[0].total_replenished = 0;
ipa3_ctx->stats.page_recycle_stats[0].tmp_alloc = 0;
ipa3_ctx->stats.page_recycle_stats[1].total_replenished = 0;
ipa3_ctx->stats.page_recycle_stats[1].tmp_alloc = 0;
memset(
ipa3_ctx->stats.page_recycle_stats,
0,
sizeof(ipa3_ctx->stats.page_recycle_stats));
memset(
ipa3_ctx->stats.cache_recycle_stats,
0,
sizeof(ipa3_ctx->stats.cache_recycle_stats));
memset(
&ipa3_ctx->stats.coal,
0,
sizeof(ipa3_ctx->stats.coal));
memset(ipa3_ctx->stats.page_recycle_cnt, 0,
sizeof(ipa3_ctx->stats.page_recycle_cnt));
ipa3_ctx->stats.num_sort_tasklet_sched[0] = 0;

查看文件

@@ -1621,33 +1621,40 @@ static ssize_t ipa3_read_page_recycle_stats(struct file *file,
int nbytes;
int cnt = 0, i = 0, k = 0;
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"COAL : Total number of packets replenished =%llu\n"
"COAL : Number of page recycled packets =%llu\n"
"COAL : Number of tmp alloc packets =%llu\n"
"COAL : Number of times tasklet scheduled =%llu\n"
"DEF : Total number of packets replenished =%llu\n"
"DEF : Number of page recycled packets =%llu\n"
"DEF : Number of tmp alloc packets =%llu\n"
"DEF : Number of times tasklet scheduled =%llu\n"
"COMMON : Number of page recycled in tasklet =%llu\n"
"COMMON : Number of times free pages not found in tasklet =%llu\n",
ipa3_ctx->stats.page_recycle_stats[0].total_replenished,
ipa3_ctx->stats.page_recycle_stats[0].page_recycled,
ipa3_ctx->stats.page_recycle_stats[0].tmp_alloc,
ipa3_ctx->stats.num_sort_tasklet_sched[0],
ipa3_ctx->stats.page_recycle_stats[1].total_replenished,
ipa3_ctx->stats.page_recycle_stats[1].page_recycled,
ipa3_ctx->stats.page_recycle_stats[1].tmp_alloc,
ipa3_ctx->stats.num_sort_tasklet_sched[1],
ipa3_ctx->stats.page_recycle_cnt_in_tasklet,
ipa3_ctx->stats.num_of_times_wq_reschd);
nbytes = scnprintf(
dbg_buff, IPA_MAX_MSG_LEN,
"COAL : Total number of packets replenished =%llu\n"
"COAL : Number of page recycled packets =%llu\n"
"COAL : Number of tmp alloc packets =%llu\n"
"COAL : Number of times tasklet scheduled =%llu\n"
"DEF : Total number of packets replenished =%llu\n"
"DEF : Number of page recycled packets =%llu\n"
"DEF : Number of tmp alloc packets =%llu\n"
"DEF : Number of times tasklet scheduled =%llu\n"
"COMMON : Number of page recycled in tasklet =%llu\n"
"COMMON : Number of times free pages not found in tasklet =%llu\n",
ipa3_ctx->stats.page_recycle_stats[0].total_replenished,
ipa3_ctx->stats.page_recycle_stats[0].page_recycled,
ipa3_ctx->stats.page_recycle_stats[0].tmp_alloc,
ipa3_ctx->stats.num_sort_tasklet_sched[0],
ipa3_ctx->stats.page_recycle_stats[1].total_replenished,
ipa3_ctx->stats.page_recycle_stats[1].page_recycled,
ipa3_ctx->stats.page_recycle_stats[1].tmp_alloc,
ipa3_ctx->stats.num_sort_tasklet_sched[1],
ipa3_ctx->stats.page_recycle_cnt_in_tasklet,
ipa3_ctx->stats.num_of_times_wq_reschd);
cnt += nbytes;
for (k = 0; k < 2; k++) {
for (i = 0; i < ipa3_ctx->page_poll_threshold; i++) {
nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN,
nbytes = scnprintf(
dbg_buff + cnt, IPA_MAX_MSG_LEN,
"COMMON : Page replenish efficiency[%d][%d] =%llu\n",
k, i, ipa3_ctx->stats.page_recycle_cnt[k][i]);
cnt += nbytes;
@@ -1656,6 +1663,111 @@ static ssize_t ipa3_read_page_recycle_stats(struct file *file,
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
static ssize_t ipa3_read_lan_coal_stats(
struct file *file,
char __user *ubuf,
size_t count,
loff_t *ppos)
{
int nbytes=0, cnt=0;
u32 i;
char buf[4096];
*buf = '\0';
for ( i = 0;
i < sizeof(ipa3_ctx->stats.coal.coal_veid) /
sizeof(ipa3_ctx->stats.coal.coal_veid[0]);
i++ ) {
nbytes += scnprintf(
buf + nbytes,
sizeof(buf) - nbytes,
"(%u/%llu) ",
i,
ipa3_ctx->stats.coal.coal_veid[i]);
}
nbytes = scnprintf(
dbg_buff, IPA_MAX_MSG_LEN,
"LAN COAL rx = %llu\n"
"LAN COAL pkts = %llu\n"
"LAN COAL left as is = %llu\n"
"LAN COAL reconstructed = %llu\n"
"LAN COAL hdr qmap err = %llu\n"
"LAN COAL hdr nlo err = %llu\n"
"LAN COAL hdr pkt err = %llu\n"
"LAN COAL csum err = %llu\n"
"LAN COAL ip invalid = %llu\n"
"LAN COAL trans invalid = %llu\n"
"LAN COAL tcp = %llu\n"
"LAN COAL tcp bytes = %llu\n"
"LAN COAL udp = %llu\n"
"LAN COAL udp bytes = %llu\n"
"LAN COAL (veid/cnt)...(veid/cnt) = %s\n",
ipa3_ctx->stats.coal.coal_rx,
ipa3_ctx->stats.coal.coal_pkts,
ipa3_ctx->stats.coal.coal_left_as_is,
ipa3_ctx->stats.coal.coal_reconstructed,
ipa3_ctx->stats.coal.coal_hdr_qmap_err,
ipa3_ctx->stats.coal.coal_hdr_nlo_err,
ipa3_ctx->stats.coal.coal_hdr_pkt_err,
ipa3_ctx->stats.coal.coal_csum_err,
ipa3_ctx->stats.coal.coal_ip_invalid,
ipa3_ctx->stats.coal.coal_trans_invalid,
ipa3_ctx->stats.coal.coal_tcp,
ipa3_ctx->stats.coal.coal_tcp_bytes,
ipa3_ctx->stats.coal.coal_udp,
ipa3_ctx->stats.coal.coal_udp_bytes,
buf);
cnt += nbytes;
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
static ssize_t ipa3_read_cache_recycle_stats(
struct file *file,
char __user *ubuf,
size_t count,
loff_t *ppos)
{
int nbytes;
int cnt = 0;
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"COAL (cache) : Total number of pkts replenished =%llu\n"
"COAL (cache) : Number of pkts alloced =%llu\n"
"COAL (cache) : Number of pkts not alloced =%llu\n"
"DEF (cache) : Total number of pkts replenished =%llu\n"
"DEF (cache) : Number of pkts alloced =%llu\n"
"DEF (cache) : Number of pkts not alloced =%llu\n"
"OTHER (cache) : Total number of packets replenished =%llu\n"
"OTHER (cache) : Number of pkts alloced =%llu\n"
"OTHER (cache) : Number of pkts not alloced =%llu\n",
ipa3_ctx->stats.cache_recycle_stats[0].tot_pkt_replenished,
ipa3_ctx->stats.cache_recycle_stats[0].pkt_allocd,
ipa3_ctx->stats.cache_recycle_stats[0].pkt_found,
ipa3_ctx->stats.cache_recycle_stats[1].tot_pkt_replenished,
ipa3_ctx->stats.cache_recycle_stats[1].pkt_allocd,
ipa3_ctx->stats.cache_recycle_stats[1].pkt_found,
ipa3_ctx->stats.cache_recycle_stats[2].tot_pkt_replenished,
ipa3_ctx->stats.cache_recycle_stats[2].pkt_allocd,
ipa3_ctx->stats.cache_recycle_stats[2].pkt_found);
cnt += nbytes;
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
static ssize_t ipa3_read_wstats(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
@@ -3320,6 +3432,14 @@ static const struct ipa3_debugfs_file debugfs_files[] = {
"page_recycle_stats", IPA_READ_ONLY_MODE, NULL, {
.read = ipa3_read_page_recycle_stats,
}
}, {
"lan_coal_stats", IPA_READ_ONLY_MODE, NULL, {
.read = ipa3_read_lan_coal_stats,
}
}, {
"cache_recycle_stats", IPA_READ_ONLY_MODE, NULL, {
.read = ipa3_read_cache_recycle_stats,
}
}, {
"wdi", IPA_READ_ONLY_MODE, NULL, {
.read = ipa3_read_wdi,

文件差異過大導致無法顯示 Load Diff

查看文件

@@ -1546,6 +1546,30 @@ struct ipa3_page_recycle_stats {
u64 tmp_alloc;
};
struct ipa3_cache_recycle_stats {
u64 pkt_allocd;
u64 pkt_found;
u64 tot_pkt_replenished;
};
struct lan_coal_stats {
u64 coal_rx;
u64 coal_left_as_is;
u64 coal_reconstructed;
u64 coal_pkts;
u64 coal_hdr_qmap_err;
u64 coal_hdr_nlo_err;
u64 coal_hdr_pkt_err;
u64 coal_csum_err;
u64 coal_ip_invalid;
u64 coal_trans_invalid;
u64 coal_veid[GSI_VEID_MAX];
u64 coal_tcp;
u64 coal_tcp_bytes;
u64 coal_udp;
u64 coal_udp_bytes;
};
struct ipa3_stats {
u32 tx_sw_pkts;
u32 tx_hw_pkts;
@@ -1565,6 +1589,7 @@ struct ipa3_stats {
u32 rmnet_ll_rx_empty;
u32 rmnet_ll_repl_rx_empty;
u32 lan_rx_empty;
u32 lan_rx_empty_coal;
u32 lan_repl_rx_empty;
u32 low_lat_rx_empty;
u32 low_lat_repl_rx_empty;
@@ -1575,11 +1600,13 @@ struct ipa3_stats {
u64 lower_order;
u32 pipe_setup_fail_cnt;
struct ipa3_page_recycle_stats page_recycle_stats[3];
struct ipa3_cache_recycle_stats cache_recycle_stats[3];
u64 page_recycle_cnt[3][IPA_PAGE_POLL_THRESHOLD_MAX];
atomic_t num_buff_above_thresh_for_def_pipe_notified;
atomic_t num_buff_above_thresh_for_coal_pipe_notified;
atomic_t num_buff_below_thresh_for_def_pipe_notified;
atomic_t num_buff_below_thresh_for_coal_pipe_notified;
struct lan_coal_stats coal;
u64 num_sort_tasklet_sched[3];
u64 num_of_times_wq_reschd;
u64 page_recycle_cnt_in_tasklet;
@@ -2174,6 +2201,7 @@ struct ipa_ntn3_client_stats {
* mhi_ctrl_state: state of mhi ctrl pipes
*/
struct ipa3_context {
bool coal_stopped;
struct ipa3_char_device_context cdev;
struct ipa3_ep_context ep[IPA5_MAX_NUM_PIPES];
bool skip_ep_cfg_shadow[IPA5_MAX_NUM_PIPES];
@@ -2364,7 +2392,11 @@ struct ipa3_context {
u32 icc_num_cases;
u32 icc_num_paths;
u32 icc_clk[IPA_ICC_LVL_MAX][IPA_ICC_PATH_MAX][IPA_ICC_TYPE_MAX];
struct ipahal_imm_cmd_pyld *coal_cmd_pyld[2];
#define WAN_COAL_SUB 0
#define LAN_COAL_SUB 1
#define ULSO_COAL_SUB 2
#define MAX_CCP_SUB (ULSO_COAL_SUB + 1)
struct ipahal_imm_cmd_pyld *coal_cmd_pyld[MAX_CCP_SUB];
struct ipa_mem_buffer ulso_wa_cmd;
u32 tx_wrapper_cache_max_size;
struct ipa3_app_clock_vote app_clock_vote;
@@ -2745,6 +2777,36 @@ struct ipa3_controller {
struct icc_path *icc_path[IPA_ICC_PATH_MAX];
};
/*
* When data arrives on IPA_CLIENT_APPS_LAN_COAL_CONS, said data will
* contain a qmap header followed by an array of the following. The
* number of them in the array is always MAX_COAL_PACKET_STATUS_INFO
* (see below); however, only "num_nlos" (a field in the cmap heeader)
* will be valid. The rest are to be ignored.
*/
struct coal_packet_status_info {
u16 pkt_len;
u8 pkt_cksum_errs;
u8 num_pkts;
} __aligned(1);
/*
* This is the number of the struct coal_packet_status_info that
* follow the qmap header. As above, only "num_nlos" are valid. The
* rest are to be ignored.
*/
#define MAX_COAL_PACKET_STATUS_INFO (6)
#define VALID_NLS(nls) \
((nls) > 0 && (nls) <= MAX_COAL_PACKET_STATUS_INFO)
/*
* The following is the total number of bits in all the pkt_cksum_errs
* in each of the struct coal_packet_status_info(s) that follow the
* qmap header. Each bit is meant to tell us if a packet is good or
* bad, relative to a checksum. Given this, the max number of bits
* dictates the max number of packets that can be in a buffer from the
* IPA.
*/
#define MAX_COAL_PACKETS (48)
extern struct ipa3_context *ipa3_ctx;
extern bool ipa_net_initialized;
@@ -2937,6 +2999,9 @@ void ipa3_get_default_evict_values(
void ipa3_default_evict_register( void );
int ipa3_set_evict_policy(
struct ipa_ioc_coal_evict_policy *evict_pol);
void start_coalescing( void );
void stop_coalescing( void );
bool lan_coal_enabled( void );
/*
* Messaging
@@ -3238,6 +3303,10 @@ void wwan_cleanup(void);
int ipa3_teth_bridge_driver_init(void);
void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data);
void ipa3_lan_coal_rx_cb(
void *priv,
enum ipa_dp_evt_type evt,
unsigned long data);
int _ipa_init_sram_v3(void);
int _ipa_init_hdr_v3_0(void);
@@ -3410,7 +3479,9 @@ int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple);
void ipa3_set_resorce_groups_min_max_limits(void);
void ipa3_set_resorce_groups_config(void);
int ipa3_suspend_apps_pipes(bool suspend);
void ipa3_force_close_coal(void);
void ipa3_force_close_coal(
bool close_wan,
bool close_lan );
int ipa3_flt_read_tbl_from_hw(u32 pipe_idx,
enum ipa_ip_type ip_type,
bool hashable,
@@ -3619,7 +3690,7 @@ static inline void *alloc_and_init(u32 size, u32 init_val)
*/
#define IPA_COAL_VP_LRU_THRSHLD 0
#define IPA_COAL_EVICTION_EN true
#define IPA_COAL_VP_LRU_GRAN_SEL IPA_EVICT_TIME_GRAN_10_USEC
#define IPA_COAL_VP_LRU_GRAN_SEL 0
#define IPA_COAL_VP_LRU_UDP_THRSHLD 0
#define IPA_COAL_VP_LRU_TCP_THRSHLD 0
#define IPA_COAL_VP_LRU_UDP_THRSHLD_EN 1
@@ -3631,15 +3702,10 @@ static inline void *alloc_and_init(u32 size, u32 init_val)
* eviction timers.
*/
enum ipa_evict_time_gran_type {
IPA_EVICT_TIME_GRAN_10_USEC,
IPA_EVICT_TIME_GRAN_20_USEC,
IPA_EVICT_TIME_GRAN_50_USEC,
IPA_EVICT_TIME_GRAN_100_USEC,
IPA_EVICT_TIME_GRAN_1_MSEC,
IPA_EVICT_TIME_GRAN_10_MSEC,
IPA_EVICT_TIME_GRAN_100_MSEC,
IPA_EVICT_TIME_GRAN_NEAR_HALF_SEC, /* 0.65536s */
IPA_EVICT_TIME_GRAN_MAX,
IPA_EVICT_TIME_GRAN_0,
IPA_EVICT_TIME_GRAN_1,
IPA_EVICT_TIME_GRAN_2,
IPA_EVICT_TIME_GRAN_3,
};
/* query ipa APQ mode*/

查看文件

@@ -1077,9 +1077,18 @@ static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry,
(*(entry))->ipacm_installed = user;
if ((*(entry))->rule.coalesce &&
(*(entry))->rule.dst == IPA_CLIENT_APPS_WAN_CONS &&
ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS) != -1)
(*(entry))->rule.dst = IPA_CLIENT_APPS_WAN_COAL_CONS;
IPA_CLIENT_IS_LAN_or_WAN_CONS((*(entry))->rule.dst)) {
int unused;
if ((*(entry))->rule.dst == IPA_CLIENT_APPS_LAN_CONS) {
if (IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_LAN_COAL_CONS, unused)) {
(*(entry))->rule.dst = IPA_CLIENT_APPS_LAN_COAL_CONS;
}
} else { /* == IPA_CLIENT_APPS_WAN_CONS */
if (IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_WAN_COAL_CONS, unused)) {
(*(entry))->rule.dst = IPA_CLIENT_APPS_WAN_COAL_CONS;
}
}
}
if (rule->enable_stats)
(*entry)->cnt_idx = rule->cnt_idx;

查看文件

@@ -6531,9 +6531,57 @@ const char *ipa_clients_strings[IPA_CLIENT_MAX] = {
__stringify(RESERVERD_CONS_123),
__stringify(RESERVERD_PROD_124),
__stringify(IPA_CLIENT_TPUT_CONS),
__stringify(RESERVERD_PROD_126),
__stringify(IPA_CLIENT_APPS_LAN_COAL_CONS),
};
EXPORT_SYMBOL(ipa_clients_strings);
static void _set_coalescing_disposition(
bool force_to_default )
{
if ( ipa3_ctx->ipa_initialization_complete
&&
ipa3_ctx->ipa_hw_type >= IPA_HW_v5_5 ) {
struct ipahal_reg_coal_master_cfg master_cfg;
memset(&master_cfg, 0, sizeof(master_cfg));
ipahal_read_reg_fields(IPA_COAL_MASTER_CFG, &master_cfg);
master_cfg.coal_force_to_default = force_to_default;
ipahal_write_reg_fields(IPA_COAL_MASTER_CFG, &master_cfg);
}
}
void start_coalescing()
{
if ( ipa3_ctx->coal_stopped ) {
_set_coalescing_disposition(false);
ipa3_ctx->coal_stopped = false;
}
}
void stop_coalescing()
{
if ( ! ipa3_ctx->coal_stopped ) {
_set_coalescing_disposition(true);
ipa3_ctx->coal_stopped = true;
}
}
bool lan_coal_enabled()
{
if ( ipa3_ctx->ipa_initialization_complete ) {
int ep_idx;
if ( IPA_CLIENT_IS_MAPPED_VALID(IPA_CLIENT_APPS_LAN_COAL_CONS, ep_idx) ) {
return true;
}
}
return false;
}
int ipa3_set_evict_policy(
struct ipa_ioc_coal_evict_policy *evict_pol)
{
@@ -6638,6 +6686,8 @@ const char *ipa_get_version_string(enum ipa_hw_type ver)
break;
case IPA_HW_v5_1:
str = "5.1";
case IPA_HW_v5_5:
str = "5.5";
default:
str = "Invalid version";
break;
@@ -7486,12 +7536,13 @@ int ipa3_init_hw(void)
master_cfg.coal_ipv4_id_ignore = ipa3_ctx->coal_ipv4_id_ignore;
ipahal_write_reg_fields(IPA_COAL_MASTER_CFG, &master_cfg);
IPADBG(": coal-ipv4-id-ignore = %s\n",
master_cfg.coal_ipv4_id_ignore
? "True" : "False");
IPADBG(
": coal-ipv4-id-ignore = %s\n",
master_cfg.coal_ipv4_id_ignore ?
"True" : "False");
ipa_comp_cfg();
/*
* In IPA 4.2 filter and routing hashing not supported
* disabling hash enable register.
@@ -11801,7 +11852,7 @@ EXPORT_SYMBOL(ipa3_stop_gsi_channel);
static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
{
struct ipa_ep_cfg_ctrl cfg;
int ipa_ep_idx, coal_ep_idx;
int ipa_ep_idx, wan_coal_ep_idx, lan_coal_ep_idx;
struct ipa3_ep_context *ep;
int res;
@@ -11832,8 +11883,6 @@ static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
return 0;
}
coal_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
/*
* Configure the callback mode only one time after starting the channel
* otherwise observing IEOB interrupt received before configure callmode
@@ -11858,7 +11907,7 @@ static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
/* Apps prod pipes use common event ring so cannot configure mode*/
/*
* Skipping to configure mode for default wan pipe,
* Skipping to configure mode for default [w|l]an pipe,
* as both pipes using commong event ring. if both pipes
* configure same event ring observing race condition in
* updating current polling state.
@@ -11866,7 +11915,9 @@ static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
if (IPA_CLIENT_IS_APPS_PROD(client) ||
(client == IPA_CLIENT_APPS_WAN_CONS &&
coal_ep_idx != IPA_EP_NOT_ALLOCATED))
IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_WAN_COAL_CONS, wan_coal_ep_idx)) ||
(client == IPA_CLIENT_APPS_LAN_CONS &&
IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_LAN_COAL_CONS, lan_coal_ep_idx)))
return 0;
if (suspend) {
@@ -11883,24 +11934,57 @@ static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
return 0;
}
void ipa3_force_close_coal(void)
void ipa3_force_close_coal(
bool close_wan,
bool close_lan )
{
struct ipa3_desc desc[2];
struct ipa3_desc desc[ MAX_CCP_SUB ];
int ep_idx, num_desc = 0;
ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
if (ep_idx == IPA_EP_NOT_ALLOCATED || (!ipa3_ctx->ep[ep_idx].valid))
return;
if ( close_wan
&&
IPA_CLIENT_IS_MAPPED_VALID(IPA_CLIENT_APPS_WAN_COAL_CONS, ep_idx)
&&
ipa3_ctx->coal_cmd_pyld[WAN_COAL_SUB] ) {
ipa3_init_imm_cmd_desc(
&desc[num_desc],
ipa3_ctx->coal_cmd_pyld[WAN_COAL_SUB]);
ipa3_init_imm_cmd_desc(&desc[0], ipa3_ctx->coal_cmd_pyld[0]);
num_desc++;
if (ipa3_ctx->ulso_wa) {
ipa3_init_imm_cmd_desc(&desc[1], ipa3_ctx->coal_cmd_pyld[1]);
num_desc++;
}
IPADBG("Sending %d descriptor for coal force close\n", num_desc);
if (ipa3_send_cmd(num_desc, desc))
IPADBG("ipa3_send_cmd timedout\n");
if ( close_lan
&&
IPA_CLIENT_IS_MAPPED_VALID(IPA_CLIENT_APPS_LAN_COAL_CONS, ep_idx)
&&
ipa3_ctx->coal_cmd_pyld[LAN_COAL_SUB] ) {
ipa3_init_imm_cmd_desc(
&desc[num_desc],
ipa3_ctx->coal_cmd_pyld[LAN_COAL_SUB]);
num_desc++;
}
if (ipa3_ctx->ulso_wa && ipa3_ctx->coal_cmd_pyld[ULSO_COAL_SUB] ) {
ipa3_init_imm_cmd_desc(
&desc[num_desc],
ipa3_ctx->coal_cmd_pyld[ULSO_COAL_SUB]);
num_desc++;
}
if ( num_desc ) {
IPADBG("Sending %d descriptor(s) for coal force close\n", num_desc);
if ( ipa3_send_cmd_timeout(
num_desc,
desc,
IPA_COAL_CLOSE_FRAME_CMD_TIMEOUT_MSEC) ) {
IPADBG("ipa3_send_cmd_timeout timedout\n");
}
}
}
int ipa3_suspend_apps_pipes(bool suspend)
@@ -11909,25 +11993,45 @@ int ipa3_suspend_apps_pipes(bool suspend)
struct ipa_ep_cfg_holb holb_cfg;
int odl_ep_idx;
if (suspend) {
stop_coalescing();
ipa3_force_close_coal(true, true);
}
/* As per HPG first need start/stop coalescing channel
* then default one. Coalescing client number was greater then
* default one so starting the last client.
*/
res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_COAL_CONS, suspend);
if (res == -EAGAIN)
if (res == -EAGAIN) {
if (suspend) start_coalescing();
goto undo_coal_cons;
}
res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_CONS, suspend);
if (res == -EAGAIN)
if (res == -EAGAIN) {
if (suspend) start_coalescing();
goto undo_wan_cons;
}
res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_COAL_CONS, suspend);
if (res == -EAGAIN) {
if (suspend) start_coalescing();
goto undo_lan_coal_cons;
}
res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_CONS, suspend);
if (res == -EAGAIN)
if (res == -EAGAIN) {
if (suspend) start_coalescing();
goto undo_lan_cons;
}
if (suspend) start_coalescing();
res = _ipa_suspend_resume_pipe(IPA_CLIENT_ODL_DPL_CONS, suspend);
if (res == -EAGAIN)
if (res == -EAGAIN) {
goto undo_odl_cons;
}
odl_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_ODL_DPL_CONS);
if (odl_ep_idx != IPA_EP_NOT_ALLOCATED && ipa3_ctx->ep[odl_ep_idx].valid) {
@@ -11949,13 +12053,15 @@ int ipa3_suspend_apps_pipes(bool suspend)
res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_LOW_LAT_CONS,
suspend);
if (res == -EAGAIN)
if (res == -EAGAIN) {
goto undo_qmap_cons;
}
res = _ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_LOW_LAT_DATA_CONS,
suspend);
if (res == -EAGAIN)
if (res == -EAGAIN) {
goto undo_low_lat_data_cons;
}
if (suspend) {
struct ipahal_reg_tx_wrapper tx;
@@ -12033,6 +12139,8 @@ undo_odl_cons:
_ipa_suspend_resume_pipe(IPA_CLIENT_ODL_DPL_CONS, !suspend);
undo_lan_cons:
_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_CONS, !suspend);
undo_lan_coal_cons:
_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_LAN_COAL_CONS, !suspend);
undo_wan_cons:
_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_COAL_CONS, !suspend);
_ipa_suspend_resume_pipe(IPA_CLIENT_APPS_WAN_CONS, !suspend);
@@ -12094,57 +12202,98 @@ int ipa3_allocate_coal_close_frame(void)
struct ipahal_imm_cmd_register_write reg_write_cmd = { 0 };
struct ipahal_imm_cmd_register_read dummy_reg_read = { 0 };
struct ipahal_reg_valmask valmask;
int ep_idx;
u32 offset = 0;
int ep_idx, num_desc = 0;
ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
if (ep_idx == IPA_EP_NOT_ALLOCATED)
return 0;
IPADBG("Allocate coal close frame cmd\n");
reg_write_cmd.skip_pipeline_clear = false;
if (ipa3_ctx->ulso_wa) {
reg_write_cmd.pipeline_clear_options = IPAHAL_SRC_GRP_CLEAR;
} else {
reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
}
if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
offset = ipahal_get_reg_ofst(
IPA_AGGR_FORCE_CLOSE);
else
offset = ipahal_get_ep_reg_offset(
IPA_AGGR_FORCE_CLOSE_n, ep_idx);
reg_write_cmd.offset = offset;
ipahal_get_aggr_force_close_valmask(ep_idx, &valmask);
reg_write_cmd.value = valmask.val;
reg_write_cmd.value_mask = valmask.mask;
ipa3_ctx->coal_cmd_pyld[0] =
ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
&reg_write_cmd, false);
if (!ipa3_ctx->coal_cmd_pyld[0]) {
IPAERR("fail construct register_write imm cmd\n");
ipa_assert();
return 0;
if ( IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_WAN_COAL_CONS, ep_idx) ) {
IPADBG("Allocate wan coal close frame cmd\n");
reg_write_cmd.skip_pipeline_clear = false;
if (ipa3_ctx->ulso_wa) {
reg_write_cmd.pipeline_clear_options = IPAHAL_SRC_GRP_CLEAR;
} else {
reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
}
if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
offset = ipahal_get_reg_ofst(
IPA_AGGR_FORCE_CLOSE);
else
offset = ipahal_get_ep_reg_offset(
IPA_AGGR_FORCE_CLOSE_n, ep_idx);
reg_write_cmd.offset = offset;
ipahal_get_aggr_force_close_valmask(ep_idx, &valmask);
reg_write_cmd.value = valmask.val;
reg_write_cmd.value_mask = valmask.mask;
ipa3_ctx->coal_cmd_pyld[WAN_COAL_SUB] =
ipahal_construct_imm_cmd(
IPA_IMM_CMD_REGISTER_WRITE,
&reg_write_cmd, false);
if (!ipa3_ctx->coal_cmd_pyld[WAN_COAL_SUB]) {
IPAERR("fail construct register_write imm cmd\n");
ipa_assert();
return 0;
}
num_desc++;
}
if (ipa3_ctx->ulso_wa) {
/* dummary regsiter read IC with HPS clear*/
if ( IPA_CLIENT_IS_MAPPED(IPA_CLIENT_APPS_LAN_COAL_CONS, ep_idx) ) {
IPADBG("Allocate lan coal close frame cmd\n");
reg_write_cmd.skip_pipeline_clear = false;
if (ipa3_ctx->ulso_wa) {
reg_write_cmd.pipeline_clear_options = IPAHAL_SRC_GRP_CLEAR;
} else {
reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
}
if (ipa3_ctx->ipa_hw_type < IPA_HW_v5_0)
offset = ipahal_get_reg_ofst(
IPA_AGGR_FORCE_CLOSE);
else
offset = ipahal_get_ep_reg_offset(
IPA_AGGR_FORCE_CLOSE_n, ep_idx);
reg_write_cmd.offset = offset;
ipahal_get_aggr_force_close_valmask(ep_idx, &valmask);
reg_write_cmd.value = valmask.val;
reg_write_cmd.value_mask = valmask.mask;
ipa3_ctx->coal_cmd_pyld[LAN_COAL_SUB] =
ipahal_construct_imm_cmd(
IPA_IMM_CMD_REGISTER_WRITE,
&reg_write_cmd, false);
if (!ipa3_ctx->coal_cmd_pyld[LAN_COAL_SUB]) {
IPAERR("fail construct register_write imm cmd\n");
ipa_assert();
return 0;
}
num_desc++;
}
if ( ipa3_ctx->ulso_wa ) {
/*
* Dummy regsiter read IC with HPS clear
*/
ipa3_ctx->ulso_wa_cmd.size = 4;
ipa3_ctx->ulso_wa_cmd.base = dma_alloc_coherent(ipa3_ctx->pdev,
ipa3_ctx->ulso_wa_cmd.size,
&ipa3_ctx->ulso_wa_cmd.phys_base, GFP_KERNEL);
ipa3_ctx->ulso_wa_cmd.base =
dma_alloc_coherent(
ipa3_ctx->pdev,
ipa3_ctx->ulso_wa_cmd.size,
&ipa3_ctx->ulso_wa_cmd.phys_base, GFP_KERNEL);
if (ipa3_ctx->ulso_wa_cmd.base == NULL) {
ipa_assert();
}
offset = ipahal_get_reg_n_ofst(IPA_STAT_QUOTA_BASE_n,
offset = ipahal_get_reg_n_ofst(
IPA_STAT_QUOTA_BASE_n,
ipa3_ctx->ee);
dummy_reg_read.skip_pipeline_clear = false;
dummy_reg_read.pipeline_clear_options = IPAHAL_HPS_CLEAR;
dummy_reg_read.offset = offset;
dummy_reg_read.sys_addr = ipa3_ctx->ulso_wa_cmd.phys_base;
ipa3_ctx->coal_cmd_pyld[1] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_REGISTER_READ,
&dummy_reg_read, false);
if (!ipa3_ctx->coal_cmd_pyld[1]) {
ipa3_ctx->coal_cmd_pyld[ULSO_COAL_SUB] =
ipahal_construct_imm_cmd(
IPA_IMM_CMD_REGISTER_READ,
&dummy_reg_read, false);
if (!ipa3_ctx->coal_cmd_pyld[ULSO_COAL_SUB]) {
IPAERR("failed to construct DUMMY READ IC\n");
ipa_assert();
}
@@ -12155,15 +12304,27 @@ int ipa3_allocate_coal_close_frame(void)
void ipa3_free_coal_close_frame(void)
{
if (ipa3_ctx->coal_cmd_pyld[0])
ipahal_destroy_imm_cmd(ipa3_ctx->coal_cmd_pyld[0]);
if (ipa3_ctx->coal_cmd_pyld[WAN_COAL_SUB]) {
ipahal_destroy_imm_cmd(ipa3_ctx->coal_cmd_pyld[WAN_COAL_SUB]);
}
if (ipa3_ctx->coal_cmd_pyld[1]) {
ipahal_destroy_imm_cmd(ipa3_ctx->coal_cmd_pyld[1]);
dma_free_coherent(ipa3_ctx->pdev, ipa3_ctx->ulso_wa_cmd.size,
ipa3_ctx->ulso_wa_cmd.base, ipa3_ctx->ulso_wa_cmd.phys_base);
if (ipa3_ctx->coal_cmd_pyld[LAN_COAL_SUB]) {
ipahal_destroy_imm_cmd(ipa3_ctx->coal_cmd_pyld[LAN_COAL_SUB]);
}
if (ipa3_ctx->coal_cmd_pyld[ULSO_COAL_SUB]) {
ipahal_destroy_imm_cmd(ipa3_ctx->coal_cmd_pyld[ULSO_COAL_SUB]);
}
if ( ipa3_ctx->ulso_wa_cmd.base ) {
dma_free_coherent(
ipa3_ctx->pdev,
ipa3_ctx->ulso_wa_cmd.size,
ipa3_ctx->ulso_wa_cmd.base,
ipa3_ctx->ulso_wa_cmd.phys_base);
}
}
/**
* ipa3_inject_dma_task_for_gsi()- Send DMA_TASK to IPA for GSI stop channel
*
@@ -12715,6 +12876,7 @@ bool ipa3_is_msm_device(void)
case IPA_HW_v4_9:
case IPA_HW_v4_11:
case IPA_HW_v5_1:
case IPA_HW_v5_5:
return true;
default:
IPAERR("unknown HW type %d\n", ipa3_ctx->ipa_hw_type);

查看文件

@@ -57,8 +57,11 @@ static const char *ipahal_pkt_status_exception_to_str
__stringify(IPAHAL_PKT_STATUS_EXCEPTION_CSUM),
};
/*
* Forward declarations.
*/
static u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd);
static int ipahal_qmap_init(enum ipa_hw_type ipa_hw_type);
static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_task_32b_addr(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
@@ -2576,6 +2579,12 @@ int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base,
goto bail_free_ctx;
}
if (ipahal_qmap_init(ipa_hw_type)) {
IPAHAL_ERR("failed to init ipahal qmap\n");
result = -EFAULT;
goto bail_free_ctx;
}
ipahal_hdr_init(ipa_hw_type);
if (ipahal_fltrt_init(ipa_hw_type)) {
@@ -2636,3 +2645,184 @@ void ipahal_free_dma_mem(struct ipa_mem_buffer *mem)
mem->phys_base = 0;
}
}
/*
* ***************************************************************
*
* To follow, a generalized qmap header manipulation API.
*
* The functions immediately following this comment are version
* specific qmap parsing functions. The referred to in the
* ipahal_qmap_parse_tbl below.
*
* ***************************************************************
*/
void ipa_qmap_hdr_parse_v4_5(
union qmap_hdr_u* qmap_hdr,
struct qmap_hdr_data* qmap_data_rslt )
{
qmap_data_rslt->cd = qmap_hdr->qmap5_0.cd;
qmap_data_rslt->qmap_next_hdr = qmap_hdr->qmap5_0.qmap_next_hdr;
qmap_data_rslt->pad = qmap_hdr->qmap5_0.pad;
qmap_data_rslt->mux_id = qmap_hdr->qmap5_0.mux_id;
qmap_data_rslt->packet_len_with_pad = qmap_hdr->qmap5_0.packet_len_with_pad;
qmap_data_rslt->hdr_type = qmap_hdr->qmap5_0.hdr_type;
qmap_data_rslt->coal_next_hdr = qmap_hdr->qmap5_0.coal_next_hdr;
qmap_data_rslt->zero_checksum = qmap_hdr->qmap5_0.zero_checksum;
}
void ipa_qmap_hdr_parse_v5_0(
union qmap_hdr_u* qmap_hdr,
struct qmap_hdr_data* qmap_data_rslt )
{
qmap_data_rslt->cd = qmap_hdr->qmap5_0.cd;
qmap_data_rslt->qmap_next_hdr = qmap_hdr->qmap5_0.qmap_next_hdr;
qmap_data_rslt->pad = qmap_hdr->qmap5_0.pad;
qmap_data_rslt->mux_id = qmap_hdr->qmap5_0.mux_id;
qmap_data_rslt->packet_len_with_pad = qmap_hdr->qmap5_0.packet_len_with_pad;
qmap_data_rslt->hdr_type = qmap_hdr->qmap5_0.hdr_type;
qmap_data_rslt->coal_next_hdr = qmap_hdr->qmap5_0.coal_next_hdr;
qmap_data_rslt->ip_id_cfg = qmap_hdr->qmap5_0.ip_id_cfg;
qmap_data_rslt->zero_checksum = qmap_hdr->qmap5_0.zero_checksum;
qmap_data_rslt->additional_hdr_size = qmap_hdr->qmap5_0.additional_hdr_size;
qmap_data_rslt->segment_size = qmap_hdr->qmap5_0.segment_size;
}
void ipa_qmap_hdr_parse_v5_5(
union qmap_hdr_u* qmap_hdr,
struct qmap_hdr_data* qmap_data_rslt )
{
qmap_data_rslt->cd = qmap_hdr->qmap5_5.cd;
qmap_data_rslt->qmap_next_hdr = qmap_hdr->qmap5_5.qmap_next_hdr;
qmap_data_rslt->pad = qmap_hdr->qmap5_5.pad;
qmap_data_rslt->mux_id = qmap_hdr->qmap5_5.mux_id;
qmap_data_rslt->packet_len_with_pad = ntohs(qmap_hdr->qmap5_5.packet_len_with_pad);
qmap_data_rslt->hdr_type = qmap_hdr->qmap5_5.hdr_type;
qmap_data_rslt->coal_next_hdr = qmap_hdr->qmap5_5.coal_next_hdr;
qmap_data_rslt->chksum_valid = qmap_hdr->qmap5_5.chksum_valid;
qmap_data_rslt->num_nlos = qmap_hdr->qmap5_5.num_nlos;
qmap_data_rslt->inc_ip_id = qmap_hdr->qmap5_5.inc_ip_id;
qmap_data_rslt->rnd_ip_id = qmap_hdr->qmap5_5.rnd_ip_id;
qmap_data_rslt->close_value = qmap_hdr->qmap5_5.close_value;
qmap_data_rslt->close_type = qmap_hdr->qmap5_5.close_type;
qmap_data_rslt->vcid = qmap_hdr->qmap5_5.vcid;
}
/*
* Structure used to describe a version specific qmap parsing table.
*/
struct ipahal_qmap_parse_s {
/*
* Function prototype for a version specific qmap parsing
* function.
*/
void (*parse)(
union qmap_hdr_u* qmap_hdr,
struct qmap_hdr_data* qmap_data_rslt );
};
/*
* Table used to contain and drive version specific qmap parsing
* functions.
*/
static struct ipahal_qmap_parse_s ipahal_qmap_parse_tbl[IPA_HW_MAX] = {
/* IPAv4.5 */
[IPA_HW_v4_5] = {
ipa_qmap_hdr_parse_v4_5
},
/* IPAv5.0 */
[IPA_HW_v5_0] = {
ipa_qmap_hdr_parse_v5_0
},
/* IPAv5.5 */
[IPA_HW_v5_5] = {
ipa_qmap_hdr_parse_v5_5
},
};
static int ipahal_qmap_init(
enum ipa_hw_type ipa_hw_type)
{
struct ipahal_qmap_parse_s zero_obj;
int i;
IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
if (ipa_hw_type < 0 || ipa_hw_type >= IPA_HW_MAX) {
IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
return -EINVAL;
}
memset(&zero_obj, 0, sizeof(zero_obj));
for (i = IPA_HW_v4_5; i < ipa_hw_type; i++) {
if (memcmp(&ipahal_qmap_parse_tbl[i+1],
&zero_obj,
sizeof(struct ipahal_qmap_parse_s)) == 0 ) {
memcpy(
&ipahal_qmap_parse_tbl[i+1],
&ipahal_qmap_parse_tbl[i],
sizeof(struct ipahal_qmap_parse_s));
} else {
if (ipahal_qmap_parse_tbl[i+1].parse == 0) {
IPAHAL_ERR(
"QMAP parse table missing parse function ipa_ver=%d\n",
i+1);
WARN_ON(1);
}
}
}
return 0;
}
/*
* FUNCTION: ipahal_qmap_parse()
*
* The following Function to be called when version specific qmap parsing is
* required.
*
* ARGUMENTS:
*
* unparsed_qmap
*
* The QMAP header off of a freshly recieved data packet. As per
* the architecture documentation, the data contained herein will
* be in network order.
*
* qmap_data_rslt
*
* A location to store the parsed data from unparsed_qmap above.
*/
int ipahal_qmap_parse(
const void* unparsed_qmap,
struct qmap_hdr_data* qmap_data_rslt )
{
union qmap_hdr_u qmap_hdr;
IPAHAL_DBG_LOW("Parse qmap/coal header\n");
if (!unparsed_qmap || !qmap_data_rslt) {
IPAHAL_ERR(
"Input Error: unparsed_qmap=%pK qmap_data_rslt=%pK\n",
unparsed_qmap, qmap_data_rslt);
return -EINVAL;
}
if (ipahal_ctx->hw_type < IPA_HW_v4_5) {
IPAHAL_ERR(
"Unsupported qmap parse for IPA HW type (%d)\n",
ipahal_ctx->hw_type);
return -EINVAL;
}
ipahal_qmap_ntoh(unparsed_qmap, &qmap_hdr);
ipahal_qmap_parse_tbl[ipahal_ctx->hw_type].parse(&qmap_hdr, qmap_data_rslt);
return 0;
}

查看文件

@@ -843,4 +843,295 @@ u32 ipahal_get_ep_bit(u32 ep_num);
*/
u32 ipahal_get_ep_reg_idx(u32 ep_num);
/*
* ***************************************************************
*
* To follow, a generalized qmap header manipulation API.
*
* ***************************************************************
*/
/**
* qmap_hdr_v4_5 -
*
* @cd -
* @qmap_next_hdr -
* @pad -
* @mux_id -
* @packet_len_with_pad -
* @hdr_type -
* @coal_next_hdr -
* @zero_checksum -
*
* The following bit layout is when the data are in host order.
*
* FIXME FINDME Need to be reordered properly to reflect network
* ordering as seen by little endian host (qmap_hdr_v5_5
* below proplerly done).
*/
struct qmap_hdr_v4_5 {
/*
* 32 bits of qmap header to follow
*/
u64 cd: 1;
u64 qmap_next_hdr: 1;
u64 pad: 6;
u64 mux_id: 8;
u64 packet_len_with_pad: 16;
/*
* 32 bits of coalescing frame header to follow
*/
u64 hdr_type: 7;
u64 coal_next_hdr: 1;
u64 zero_checksum: 1;
u64 rsrvd1: 7;
u64 rsrvd2: 16;
} __packed;
/**
* qmap_hdr_v5_0 -
*
* @cd -
* @qmap_next_hdr -
* @pad -
* @mux_id -
* @packet_len_with_pad -
* @hdr_type -
* @coal_next_hdr -
* @ip_id_cfg -
* @zero_checksum -
* @additional_hdr_size -
* @segment_size -
*
* The following bit layout is when the data are in host order.
*
* FIXME FINDME Need to be reordered properly to reflect network
* ordering as seen by little endian host (qmap_hdr_v5_5
* below proplerly done).
*/
struct qmap_hdr_v5_0 {
/*
* 32 bits of qmap header to follow
*/
u64 cd: 1;
u64 qmap_next_hdr: 1;
u64 pad: 6;
u64 mux_id: 8;
u64 packet_len_with_pad: 16;
/*
* 32 bits of coalescing frame header to follow
*/
u64 hdr_type: 7;
u64 coal_next_hdr: 1;
u64 ip_id_cfg: 1;
u64 zero_checksum: 1;
u64 rsrvd: 1;
u64 additional_hdr_size: 5;
u64 segment_size: 16;
} __packed;
/**
* qmap_hdr_v5_5 -
*
* @cd -
* @qmap_next_hdr -
* @pad -
* @mux_id -
* @packet_len_with_pad -
* @hdr_type -
* @coal_next_hdr -
* @chksum_valid -
* @num_nlos -
* @inc_ip_id -
* @rnd_ip_id -
* @close_value -
* @close_type -
* @vcid -
*
* NOTE:
*
* The layout below is different when compared against
* documentation, which shows the fields as they are in network byte
* order - and network byte order is how we receive the data from
* the IPA. To avoid using cycles converting from network to host
* order, we've defined the stucture below such that we can access
* the correct fields while the data are still in network order.
*/
struct qmap_hdr_v5_5 {
/*
* 32 bits of qmap header to follow
*/
u8 pad: 6;
u8 qmap_next_hdr: 1;
u8 cd: 1;
u8 mux_id;
u16 packet_len_with_pad;
/*
* 32 bits of coalescing frame header to follow
*/
u8 coal_next_hdr: 1;
u8 hdr_type: 7;
u8 rsrvd1: 2;
u8 rnd_ip_id: 1;
u8 inc_ip_id: 1;
u8 num_nlos: 3;
u8 chksum_valid: 1;
u8 close_type: 4;
u8 close_value: 4;
u8 rsrvd2: 4;
u8 vcid: 4;
} __packed;
/**
* qmap_hdr_u -
*
* The following is a union of all of the qmap versions above.
*
* NOTE WELL: REMEMBER to keep it in sync with the bit strucure
* definitions above.
*/
union qmap_hdr_u {
struct qmap_hdr_v4_5 qmap4_5;
struct qmap_hdr_v5_0 qmap5_0;
struct qmap_hdr_v5_5 qmap5_5;
u32 words[2]; /* these used to flip from ntoh and hton */
} __packed;
/**
* qmap_hdr_data -
*
* The following is an aggregation of the qmap header bit structures
* above.
*
* NOTE WELL: REMEMBER to keep it in sync with the bit structure
* definitions above.
*/
struct qmap_hdr_data {
/*
* Data from qmap header to follow
*/
u8 cd;
u8 qmap_next_hdr;
u8 pad;
u8 mux_id;
u16 packet_len_with_pad;
/*
* Data from coalescing frame header to follow
*/
u8 hdr_type;
u8 coal_next_hdr;
u8 ip_id_cfg;
u8 zero_checksum;
u8 additional_hdr_size;
u16 segment_size;
u8 chksum_valid;
u8 num_nlos;
u8 inc_ip_id;
u8 rnd_ip_id;
u8 close_value;
u8 close_type;
u8 vcid;
};
/**
* FUNCTION: ipahal_qmap_parse()
*
* The following function to be called when version specific qmap parsing is
* required.
*
* ARGUMENTS:
*
* unparsed_qmap
*
* The QMAP header off of a freshly recieved data packet. As per
* the architecture documentation, the data contained herein will
* be in network order.
*
* qmap_data_rslt
*
* A location to store the parsed data from unparsed_qmap above.
*/
int ipahal_qmap_parse(
const void* unparsed_qmap,
struct qmap_hdr_data* qmap_data_rslt);
/**
* FUNCTION: ipahal_qmap_ntoh()
*
* The following function will take a QMAP header, which you know is
* in network order, and convert it to host order.
*
* NOTE WELL: Once in host order, the data will align with the bit
* descriptions in the headers above.
*
* ARGUMENTS:
*
* src_data_from_packet
*
* The QMAP header off of a freshly recieved data packet. As per
* the architecture documentation, the data contained herein will
* be in network order.
*
* dst_result
*
* A location to where the original data will be copied, then
* converted to host order.
*/
static inline void ipahal_qmap_ntoh(
const void* src_data_from_packet,
union qmap_hdr_u* dst_result)
{
/*
* Nothing to do, since we define the bit fields in the
* structure, such that we can access them correctly while
* keeping the data in network order...
*/
if (src_data_from_packet && dst_result) {
memcpy(
dst_result,
src_data_from_packet,
sizeof(union qmap_hdr_u));
}
}
/**
* FUNCTION: ipahal_qmap_hton()
*
* The following function will take QMAP data, that you've assembled
* in host otder (ie. via using the bit structures definitions above),
* and convert it to network order.
*
* This function is to be used for QMAP data destined for network
* transmission.
*
* ARGUMENTS:
*
* src_data_from_host
*
* QMAP data in host order.
*
* dst_result
*
* A location to where the host ordered data above will be copied,
* then converted to network order.
*/
static inline void ipahal_qmap_hton(
union qmap_hdr_u* src_data_from_host,
void* dst_result)
{
if (src_data_from_host && dst_result) {
memcpy(
dst_result,
src_data_from_host,
sizeof(union qmap_hdr_u));
/*
* Reusing variable below to do the host to network swap...
*/
src_data_from_host = (union qmap_hdr_u*) dst_result;
src_data_from_host->words[0] = htonl(src_data_from_host->words[0]);
src_data_from_host->words[1] = htonl(src_data_from_host->words[1]);
}
}
#endif /* _IPAHAL_H_ */