Merge "ipa: Distribute header table on SRAM and DDR"

This commit is contained in:
qctecmdr
2021-05-04 09:14:06 -07:00
committed by Gerrit - the friendly Code Review server
7 changed files with 301 additions and 277 deletions

View File

@@ -3516,6 +3516,7 @@ static int ipa3_setup_exception_path(void)
struct ipa_ioc_add_hdr *hdr;
struct ipa_hdr_add *hdr_entry;
struct ipahal_reg_route route = { 0 };
struct ipa3_hdr_entry *hdr_entry_internal;
int ret;
/* install the basic exception header */
@@ -3543,13 +3544,20 @@ static int ipa3_setup_exception_path(void)
goto bail;
}
hdr_entry_internal = ipa3_id_find(hdr_entry->hdr_hdl);
if (unlikely(!hdr_entry_internal)) {
IPAERR("fail to find internal hdr structure\n");
ret = -EPERM;
goto bail;
}
ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
/* set the route register to pass exception packets to Apps */
route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
route.route_frag_def_pipe = ipa3_get_ep_mapping(
IPA_CLIENT_APPS_LAN_CONS);
route.route_def_hdr_table = !ipa3_ctx->hdr_tbl_lcl;
route.route_def_hdr_table = !hdr_entry_internal->is_lcl;
route.route_def_retain_hdr = 1;
if (ipa3_cfg_route(&route)) {
@@ -6762,9 +6770,8 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n",
ipa3_ctx->smem_sz, ipa3_ctx->smem_restricted_bytes);
IPADBG("hdr_lcl=%u ip4_rt_hash=%u ip4_rt_nonhash=%u\n",
ipa3_ctx->hdr_tbl_lcl, ipa3_ctx->ip4_rt_tbl_hash_lcl,
ipa3_ctx->ip4_rt_tbl_nhash_lcl);
IPADBG("ip4_rt_hash=%u ip4_rt_nonhash=%u\n",
ipa3_ctx->ip4_rt_tbl_hash_lcl, ipa3_ctx->ip4_rt_tbl_nhash_lcl);
IPADBG("ip6_rt_hash=%u ip6_rt_nonhash=%u\n",
ipa3_ctx->ip6_rt_tbl_hash_lcl, ipa3_ctx->ip6_rt_tbl_nhash_lcl);
@@ -7783,6 +7790,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
struct ipa3_rt_tbl_set *rset;
struct ipa_active_client_logging_info log_info;
struct cdev *cdev;
enum hdr_tbl_storage hdr_tbl;
IPADBG("IPA Driver initialization started\n");
@@ -8192,11 +8200,13 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
goto fail_rx_pkt_wrapper_cache;
}
/* init the various list heads */
INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_hdr_entry_list);
for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_offset_list[i]);
INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_free_offset_list[i]);
/* Init the various list heads for both SRAM/DDR */
for (hdr_tbl = HDR_TBL_LCL; hdr_tbl < HDR_TBLS_TOTAL; hdr_tbl++) {
INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl[hdr_tbl].head_hdr_entry_list);
for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl[hdr_tbl].head_offset_list[i]);
INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl[hdr_tbl].head_free_offset_list[i]);
}
}
INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list);
for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {

View File

@@ -609,50 +609,53 @@ static ssize_t ipa3_read_hdr(struct file *file, char __user *ubuf, size_t count,
int nbytes = 0;
int i = 0;
struct ipa3_hdr_entry *entry;
enum hdr_tbl_storage hdr_tbl;
mutex_lock(&ipa3_ctx->lock);
if (ipa3_ctx->hdr_tbl_lcl)
pr_err("Table resides on local memory\n");
else
pr_err("Table resides on system (ddr) memory\n");
for (hdr_tbl = HDR_TBL_LCL; hdr_tbl < HDR_TBLS_TOTAL; hdr_tbl++) {
if (hdr_tbl == HDR_TBL_LCL)
pr_err("Table on local memory:\n");
else
pr_err("Table on system (ddr) memory:\n");
list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
link) {
if (entry->cookie != IPA_HDR_COOKIE)
continue;
nbytes = scnprintf(
dbg_buff,
IPA_MAX_MSG_LEN,
"name:%s len=%d ref=%d partial=%d type=%s ",
entry->name,
entry->hdr_len,
entry->ref_cnt,
entry->is_partial,
ipa3_hdr_l2_type_name[entry->type]);
list_for_each_entry(entry, &ipa3_ctx->hdr_tbl[hdr_tbl].head_hdr_entry_list,
link) {
if (entry->cookie != IPA_HDR_COOKIE)
continue;
nbytes = scnprintf(
dbg_buff,
IPA_MAX_MSG_LEN,
"name:%s len=%d ref=%d partial=%d type=%s ",
entry->name,
entry->hdr_len,
entry->ref_cnt,
entry->is_partial,
ipa3_hdr_l2_type_name[entry->type]);
if (entry->is_hdr_proc_ctx) {
nbytes += scnprintf(
dbg_buff + nbytes,
IPA_MAX_MSG_LEN - nbytes,
"phys_base=0x%pa ",
&entry->phys_base);
} else {
nbytes += scnprintf(
dbg_buff + nbytes,
IPA_MAX_MSG_LEN - nbytes,
"ofst=%u ",
entry->offset_entry->offset >> 2);
if (entry->is_hdr_proc_ctx) {
nbytes += scnprintf(
dbg_buff + nbytes,
IPA_MAX_MSG_LEN - nbytes,
"phys_base=0x%pa ",
&entry->phys_base);
} else {
nbytes += scnprintf(
dbg_buff + nbytes,
IPA_MAX_MSG_LEN - nbytes,
"ofst=%u ",
entry->offset_entry->offset >> 2);
}
for (i = 0; i < entry->hdr_len; i++) {
scnprintf(dbg_buff + nbytes + i * 2,
IPA_MAX_MSG_LEN - nbytes - i * 2,
"%02x", entry->hdr[i]);
}
scnprintf(dbg_buff + nbytes + entry->hdr_len * 2,
IPA_MAX_MSG_LEN - nbytes - entry->hdr_len * 2,
"\n");
pr_err("%s", dbg_buff);
}
for (i = 0; i < entry->hdr_len; i++) {
scnprintf(dbg_buff + nbytes + i * 2,
IPA_MAX_MSG_LEN - nbytes - i * 2,
"%02x", entry->hdr[i]);
}
scnprintf(dbg_buff + nbytes + entry->hdr_len * 2,
IPA_MAX_MSG_LEN - nbytes - entry->hdr_len * 2,
"\n");
pr_err("%s", dbg_buff);
}
mutex_unlock(&ipa3_ctx->lock);
@@ -980,7 +983,7 @@ static ssize_t ipa3_read_rt(struct file *file, char __user *ubuf, size_t count,
pr_err("rule_idx:%d dst:%d ep:%d S:%u ",
i, entry->rule.dst,
ipa3_get_ep_mapping(entry->rule.dst),
!ipa3_ctx->hdr_tbl_lcl);
!(entry->hdr && entry->hdr->is_lcl));
pr_err("hdr_ofst[words]:%u attrib_mask:%08x ",
ofst >> 2,
entry->rule.attrib.attrib_mask);

View File

@@ -17,22 +17,23 @@ static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
/**
* ipa3_generate_hdr_hw_tbl() - generates the headers table
* @loc: [in] storage type of the header table buffer (local or system)
* @mem: [out] buffer to put the header table
*
* Returns: 0 on success, negative on failure
*/
static int ipa3_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
static int ipa3_generate_hdr_hw_tbl(enum hdr_tbl_storage loc, struct ipa_mem_buffer *mem)
{
struct ipa3_hdr_entry *entry;
gfp_t flag = GFP_KERNEL;
mem->size = ipa3_ctx->hdr_tbl.end;
mem->size = (ipa3_ctx->hdr_tbl[loc].end) ? ipa3_ctx->hdr_tbl[loc].end : ipa_hdr_bin_sz[0];
if (mem->size == 0) {
IPAERR("hdr tbl empty\n");
IPAERR("%s hdr tbl empty\n", loc == HDR_TBL_LCL ? "SRAM" : "DDR");
return -EPERM;
}
IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_tbl.end);
IPADBG_LOW("tbl_sz=%d\n", mem->size);
alloc:
mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
@@ -46,7 +47,7 @@ alloc:
return -ENOMEM;
}
list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
list_for_each_entry(entry, &ipa3_ctx->hdr_tbl[loc].head_hdr_entry_list,
link) {
if (entry->is_hdr_proc_ctx)
continue;
@@ -60,7 +61,7 @@ alloc:
}
static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
u64 hdr_base_addr)
u64 hdr_sys_addr)
{
struct ipa3_hdr_proc_ctx_entry *entry;
int ret;
@@ -96,7 +97,7 @@ static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
entry->hdr->hdr_len,
entry->hdr->is_hdr_proc_ctx,
entry->hdr->phys_base,
hdr_base_addr,
(entry->hdr->is_lcl) ? IPA_MEM_PART(apps_hdr_ofst) : hdr_sys_addr,
entry->hdr->offset_entry,
&entry->l2tp_params,
&entry->generic_params,
@@ -120,7 +121,6 @@ static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
static int ipa3_generate_hdr_proc_ctx_hw_tbl(u64 hdr_sys_addr,
struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
{
u64 hdr_base_addr;
gfp_t flag = GFP_KERNEL;
mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4;
@@ -148,9 +148,7 @@ alloc:
(aligned_mem->phys_base - mem->phys_base);
aligned_mem->size = mem->size - IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
memset(aligned_mem->base, 0, aligned_mem->size);
hdr_base_addr = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_ofst) :
hdr_sys_addr;
return ipa3_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_base_addr);
return ipa3_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_sys_addr);
}
/**
@@ -160,37 +158,46 @@ alloc:
*/
int __ipa_commit_hdr_v3_0(void)
{
struct ipa3_desc desc[3];
struct ipa_mem_buffer hdr_mem;
struct ipa3_desc desc[4];
struct ipa_mem_buffer hdr_mem[HDR_TBLS_TOTAL] = {0};
struct ipa_mem_buffer ctx_mem;
struct ipa_mem_buffer aligned_ctx_mem;
struct ipahal_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
struct ipahal_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
struct ipahal_imm_cmd_hdr_init_system hdr_init_cmd = {0};
struct ipahal_imm_cmd_pyld *hdr_cmd_pyld = NULL;
struct ipahal_imm_cmd_pyld *hdr_cmd_pyld[HDR_TBLS_TOTAL] = {0};
struct ipahal_imm_cmd_pyld *ctx_cmd_pyld = NULL;
struct ipahal_imm_cmd_pyld *coal_cmd_pyld = NULL;
int rc = -EFAULT;
int i;
int num_cmd = 0;
u32 proc_ctx_size;
u32 hdr_tbl_size, proc_ctx_size;
u32 proc_ctx_ofst;
u32 proc_ctx_size_ddr;
struct ipahal_imm_cmd_register_write reg_write_coal_close;
struct ipahal_reg_valmask valmask;
enum hdr_tbl_storage loc;
memset(desc, 0, 3 * sizeof(struct ipa3_desc));
if (ipa3_generate_hdr_hw_tbl(&hdr_mem)) {
IPAERR("fail to generate HDR HW TBL\n");
goto end;
}
/* Generate structures for both SRAM and DDR header tables */
for (loc = HDR_TBL_LCL; loc < HDR_TBLS_TOTAL; loc++) {
if (ipa3_generate_hdr_hw_tbl(loc, &hdr_mem[loc])) {
IPAERR("fail to generate %s HDR HW TBL\n",
loc == HDR_TBL_LCL ? "SRAM" : "DDR");
goto end;
}
if (ipa3_generate_hdr_proc_ctx_hw_tbl(hdr_mem.phys_base, &ctx_mem,
&aligned_ctx_mem)) {
IPAERR("fail to generate HDR PROC CTX HW TBL\n");
goto end;
hdr_tbl_size = (loc == HDR_TBL_LCL) ?
IPA_MEM_PART(apps_hdr_size) : IPA_MEM_PART(apps_hdr_size_ddr);
if (hdr_mem[loc].size > hdr_tbl_size) {
IPAERR("%s HDR tbl too big needed %d avail %d\n",
loc == HDR_TBL_LCL ? "SRAM" : "DDR",
hdr_mem[loc].size, hdr_tbl_size);
goto free_dma;
}
}
/* IC to close the coal frame before HPS Clear if coal is enabled */
@@ -221,47 +228,51 @@ int __ipa_commit_hdr_v3_0(void)
++num_cmd;
}
if (ipa3_ctx->hdr_tbl_lcl) {
if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) {
IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
IPA_MEM_PART(apps_hdr_size));
goto end;
} else {
dma_cmd_hdr.is_read = false; /* write operation */
dma_cmd_hdr.skip_pipeline_clear = false;
dma_cmd_hdr.pipeline_clear_options = IPAHAL_HPS_CLEAR;
dma_cmd_hdr.system_addr = hdr_mem.phys_base;
dma_cmd_hdr.size = hdr_mem.size;
dma_cmd_hdr.local_addr =
ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(apps_hdr_ofst);
hdr_cmd_pyld = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM,
&dma_cmd_hdr, false);
if (!hdr_cmd_pyld) {
IPAERR("fail construct dma_shared_mem cmd\n");
goto end;
}
}
} else {
if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
IPA_MEM_PART(apps_hdr_size_ddr));
goto end;
} else {
hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base;
hdr_cmd_pyld = ipahal_construct_imm_cmd(
IPA_IMM_CMD_HDR_INIT_SYSTEM,
&hdr_init_cmd, false);
if (!hdr_cmd_pyld) {
IPAERR("fail construct hdr_init_system cmd\n");
goto end;
}
}
/* Local (SRAM) header table configuration */
dma_cmd_hdr.is_read = false; /* write operation */
dma_cmd_hdr.skip_pipeline_clear = false;
dma_cmd_hdr.pipeline_clear_options = IPAHAL_HPS_CLEAR;
dma_cmd_hdr.system_addr = hdr_mem[HDR_TBL_LCL].phys_base;
dma_cmd_hdr.size = hdr_mem[HDR_TBL_LCL].size;
dma_cmd_hdr.local_addr =
ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(apps_hdr_ofst);
hdr_cmd_pyld[HDR_TBL_LCL] = ipahal_construct_imm_cmd(IPA_IMM_CMD_DMA_SHARED_MEM,
&dma_cmd_hdr, false);
if (!hdr_cmd_pyld[HDR_TBL_LCL]) {
IPAERR("fail construct dma_shared_mem cmd\n");
goto end;
}
ipa3_init_imm_cmd_desc(&desc[num_cmd], hdr_cmd_pyld);
ipa3_init_imm_cmd_desc(&desc[num_cmd], hdr_cmd_pyld[HDR_TBL_LCL]);
++num_cmd;
IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
IPA_DUMP_BUFF(hdr_mem[HDR_TBL_LCL].base,
hdr_mem[HDR_TBL_LCL].phys_base,
hdr_mem[HDR_TBL_LCL].size);
/* System (DDR) header table configuration */
hdr_init_cmd.hdr_table_addr = hdr_mem[HDR_TBL_SYS].phys_base;
hdr_cmd_pyld[HDR_TBL_SYS] = ipahal_construct_imm_cmd(IPA_IMM_CMD_HDR_INIT_SYSTEM,
&hdr_init_cmd, false);
if (!hdr_cmd_pyld[HDR_TBL_SYS]) {
IPAERR("fail construct hdr_init_system cmd\n");
goto free_dma;
}
ipa3_init_imm_cmd_desc(&desc[num_cmd], hdr_cmd_pyld[HDR_TBL_SYS]);
++num_cmd;
IPA_DUMP_BUFF(hdr_mem[HDR_TBL_SYS].base,
hdr_mem[HDR_TBL_SYS].phys_base,
hdr_mem[HDR_TBL_SYS].size);
/* The header memory passed to the HPC here is DDR (system),
but the actual header base will be determined later for each header */
if (ipa3_generate_hdr_proc_ctx_hw_tbl(hdr_mem[HDR_TBL_SYS].phys_base,
&ctx_mem,
&aligned_ctx_mem)) {
IPAERR("fail to generate HDR PROC CTX HW TBL\n");
goto end;
}
proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size);
proc_ctx_ofst = IPA_MEM_PART(apps_hdr_proc_ctx_ofst);
@@ -323,18 +334,14 @@ int __ipa_commit_hdr_v3_0(void)
else
rc = 0;
if (ipa3_ctx->hdr_tbl_lcl) {
dma_free_coherent(ipa3_ctx->pdev, hdr_mem.size, hdr_mem.base,
hdr_mem.phys_base);
} else {
if (!rc) {
if (ipa3_ctx->hdr_mem.phys_base)
dma_free_coherent(ipa3_ctx->pdev,
ipa3_ctx->hdr_mem.size,
ipa3_ctx->hdr_mem.base,
ipa3_ctx->hdr_mem.phys_base);
ipa3_ctx->hdr_mem = hdr_mem;
if (!rc && hdr_mem[HDR_TBL_SYS].base) {
if (ipa3_ctx->hdr_sys_mem.phys_base) {
dma_free_coherent(ipa3_ctx->pdev,
ipa3_ctx->hdr_sys_mem.size,
ipa3_ctx->hdr_sys_mem.base,
ipa3_ctx->hdr_sys_mem.phys_base);
}
ipa3_ctx->hdr_sys_mem = hdr_mem[HDR_TBL_SYS];
}
if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
@@ -350,16 +357,35 @@ int __ipa_commit_hdr_v3_0(void)
ipa3_ctx->hdr_proc_ctx_mem = ctx_mem;
}
}
goto end;
free_dma:
if (hdr_mem[HDR_TBL_SYS].base) {
dma_free_coherent(ipa3_ctx->pdev,
hdr_mem[HDR_TBL_SYS].size,
hdr_mem[HDR_TBL_SYS].base,
hdr_mem[HDR_TBL_SYS].phys_base);
}
end:
if (hdr_mem[HDR_TBL_LCL].base) {
dma_free_coherent(ipa3_ctx->pdev,
hdr_mem[HDR_TBL_LCL].size,
hdr_mem[HDR_TBL_LCL].base,
hdr_mem[HDR_TBL_LCL].phys_base);
}
if (coal_cmd_pyld)
ipahal_destroy_imm_cmd(coal_cmd_pyld);
if (ctx_cmd_pyld)
ipahal_destroy_imm_cmd(ctx_cmd_pyld);
if (hdr_cmd_pyld)
ipahal_destroy_imm_cmd(hdr_cmd_pyld);
if (hdr_cmd_pyld[HDR_TBL_SYS])
ipahal_destroy_imm_cmd(hdr_cmd_pyld[HDR_TBL_SYS]);
if (hdr_cmd_pyld[HDR_TBL_LCL])
ipahal_destroy_imm_cmd(hdr_cmd_pyld[HDR_TBL_LCL]);
return rc;
}
@@ -504,7 +530,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user)
struct ipa3_hdr_entry *entry;
struct ipa_hdr_offset_entry *offset = NULL;
u32 bin;
struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
struct ipa3_hdr_tbl *htbl;
int id;
int mem_size;
@@ -533,6 +559,8 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user)
entry->eth2_ofst = hdr->eth2_ofst;
entry->cookie = IPA_HDR_COOKIE;
entry->ipacm_installed = user;
entry->is_hdr_proc_ctx = false;
entry->is_lcl = (entry->is_partial || (hdr->status == IPA_HDR_TO_DDR_PATTERN)) ? false : true;
if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
bin = IPA_HDR_BIN0;
@@ -553,45 +581,41 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user)
goto bad_hdr_len;
}
mem_size = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) :
IPA_MEM_PART(apps_hdr_size_ddr);
htbl = entry->is_lcl ? &ipa3_ctx->hdr_tbl[HDR_TBL_LCL] : &ipa3_ctx->hdr_tbl[HDR_TBL_SYS];
mem_size = entry->is_lcl ? IPA_MEM_PART(apps_hdr_size) : IPA_MEM_PART(apps_hdr_size_ddr);
if (list_empty(&htbl->head_free_offset_list[bin])) {
/* if header does not fit to table, place it in DDR */
if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
entry->is_hdr_proc_ctx = true;
entry->phys_base = dma_map_single(ipa3_ctx->pdev,
entry->hdr,
entry->hdr_len,
DMA_TO_DEVICE);
if (dma_mapping_error(ipa3_ctx->pdev,
entry->phys_base)) {
IPAERR("dma_map_single failure for entry\n");
goto fail_dma_mapping;
}
} else {
entry->is_hdr_proc_ctx = false;
offset = kmem_cache_zalloc(ipa3_ctx->hdr_offset_cache,
GFP_KERNEL);
if (!offset) {
IPAERR("failed to alloc hdr offset object\n");
if (entry->is_lcl) {
htbl = &ipa3_ctx->hdr_tbl[HDR_TBL_SYS];
mem_size = IPA_MEM_PART(apps_hdr_size_ddr);
entry->is_lcl = false;
} else {
IPAERR("No space in DDR header buffer! Requested: %d Left: %d\n",
ipa_hdr_bin_sz[bin], mem_size - htbl->end);
goto bad_hdr_len;
}
INIT_LIST_HEAD(&offset->link);
/*
* for a first item grow, set the bin and offset which
* are set in stone
*/
offset->offset = htbl->end;
offset->bin = bin;
htbl->end += ipa_hdr_bin_sz[bin];
list_add(&offset->link,
&htbl->head_offset_list[bin]);
entry->offset_entry = offset;
offset->ipacm_installed = user;
}
offset = kmem_cache_zalloc(ipa3_ctx->hdr_offset_cache,
GFP_KERNEL);
if (!offset) {
IPAERR("failed to alloc hdr offset object\n");
goto bad_hdr_len;
}
INIT_LIST_HEAD(&offset->link);
/*
* for a first item grow, set the bin and offset which
* are set in stone
*/
offset->offset = htbl->end;
offset->bin = bin;
htbl->end += ipa_hdr_bin_sz[bin];
list_add(&offset->link,
&htbl->head_offset_list[bin]);
entry->offset_entry = offset;
offset->ipacm_installed = user;
} else {
entry->is_hdr_proc_ctx = false;
/* get the first free slot */
offset = list_first_entry(&htbl->head_free_offset_list[bin],
struct ipa_hdr_offset_entry, link);
@@ -602,16 +626,11 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user)
list_add(&entry->link, &htbl->head_hdr_entry_list);
htbl->hdr_cnt++;
if (entry->is_hdr_proc_ctx)
IPADBG("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d to %s table\n",
hdr->hdr_len,
htbl->hdr_cnt,
&entry->phys_base);
else
IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
hdr->hdr_len,
htbl->hdr_cnt,
entry->offset_entry->offset);
entry->offset_entry->offset,
entry->is_lcl ? "SRAM" : "DDR");
id = ipa3_id_alloc(entry);
if (id < 0) {
@@ -623,41 +642,16 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr, bool user)
hdr->hdr_hdl = id;
entry->ref_cnt++;
if (entry->is_hdr_proc_ctx) {
struct ipa_hdr_proc_ctx_add proc_ctx;
IPADBG("adding processing context for header %s\n", hdr->name);
proc_ctx.type = IPA_HDR_PROC_NONE;
proc_ctx.hdr_hdl = id;
if (__ipa_add_hdr_proc_ctx(&proc_ctx, false, user)) {
IPAERR("failed to add hdr proc ctx\n");
goto fail_add_proc_ctx;
}
entry->proc_ctx = ipa3_id_find(proc_ctx.proc_ctx_hdl);
}
return 0;
fail_add_proc_ctx:
entry->ref_cnt--;
hdr->hdr_hdl = 0;
ipa3_id_remove(id);
ipa_insert_failed:
if (entry->is_hdr_proc_ctx) {
dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
entry->hdr_len, DMA_TO_DEVICE);
} else {
if (offset)
list_move(&offset->link,
&htbl->head_free_offset_list[offset->bin]);
entry->offset_entry = NULL;
}
if (offset)
list_move(&offset->link,
&htbl->head_free_offset_list[offset->bin]);
entry->offset_entry = NULL;
htbl->hdr_cnt--;
list_del(&entry->link);
fail_dma_mapping:
entry->is_hdr_proc_ctx = false;
bad_hdr_len:
entry->cookie = 0;
kmem_cache_free(ipa3_ctx->hdr_cache, entry);
@@ -714,7 +708,7 @@ static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl,
int __ipa3_del_hdr(u32 hdr_hdl, bool by_user)
{
struct ipa3_hdr_entry *entry;
struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
struct ipa3_hdr_tbl *htbl;
entry = ipa3_id_find(hdr_hdl);
if (entry == NULL) {
@@ -727,6 +721,8 @@ int __ipa3_del_hdr(u32 hdr_hdl, bool by_user)
return -EINVAL;
}
htbl = entry->is_lcl ? &ipa3_ctx->hdr_tbl[HDR_TBL_LCL] : &ipa3_ctx->hdr_tbl[HDR_TBL_SYS];
if (entry->is_hdr_proc_ctx)
IPADBG("del hdr of len=%d hdr_cnt=%d phys_base=%pa\n",
entry->hdr_len, htbl->hdr_cnt, &entry->phys_base);
@@ -1053,8 +1049,8 @@ int ipa3_reset_hdr(bool user_only)
struct ipa_hdr_offset_entry *off_next;
struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_entry;
struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_next;
struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
struct ipa3_hdr_proc_ctx_tbl *htbl_proc = &ipa3_ctx->hdr_proc_ctx_tbl;
enum hdr_tbl_storage hdr_tbl_loc;
int i;
/*
@@ -1068,83 +1064,85 @@ int ipa3_reset_hdr(bool user_only)
mutex_lock(&ipa3_ctx->lock);
IPADBG("reset hdr\n");
list_for_each_entry_safe(entry, next,
&ipa3_ctx->hdr_tbl.head_hdr_entry_list, link) {
for (hdr_tbl_loc = HDR_TBL_LCL; hdr_tbl_loc < HDR_TBLS_TOTAL; hdr_tbl_loc++) {
list_for_each_entry_safe(entry, next,
&ipa3_ctx->hdr_tbl[hdr_tbl_loc].head_hdr_entry_list, link) {
/* do not remove the default header */
if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
IPADBG("Trying to remove hdr %s offset=%u\n",
entry->name, entry->offset_entry->offset);
if (!entry->offset_entry->offset) {
if (entry->is_hdr_proc_ctx) {
IPAERR("default header is proc ctx\n");
mutex_unlock(&ipa3_ctx->lock);
WARN_ON_RATELIMIT_IPA(1);
return -EFAULT;
}
IPADBG("skip default header\n");
continue;
}
}
if (ipa3_id_find(entry->id) == NULL) {
mutex_unlock(&ipa3_ctx->lock);
WARN_ON_RATELIMIT_IPA(1);
return -EFAULT;
}
if (!user_only || entry->ipacm_installed) {
if (entry->is_hdr_proc_ctx) {
dma_unmap_single(ipa3_ctx->pdev,
entry->phys_base,
entry->hdr_len,
DMA_TO_DEVICE);
entry->proc_ctx = NULL;
} else {
/* move the offset entry to free list */
entry->offset_entry->ipacm_installed = false;
list_move(&entry->offset_entry->link,
&htbl->head_free_offset_list[
entry->offset_entry->bin]);
}
list_del(&entry->link);
htbl->hdr_cnt--;
entry->ref_cnt = 0;
entry->cookie = 0;
/* remove the handle from the database */
ipa3_id_remove(entry->id);
kmem_cache_free(ipa3_ctx->hdr_cache, entry);
}
}
/* only clean up offset_list and free_offset_list on global reset */
if (!user_only) {
for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
list_for_each_entry_safe(off_entry, off_next,
&ipa3_ctx->hdr_tbl.head_offset_list[i],
link) {
/**
* do not remove the default exception
* header which is at offset 0
*/
if (off_entry->offset == 0)
/* do not remove the default header */
if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
IPADBG("Trying to remove hdr %s offset=%u\n",
entry->name, entry->offset_entry->offset);
if (!entry->offset_entry->offset) {
if (entry->is_hdr_proc_ctx) {
IPAERR("default header is proc ctx\n");
mutex_unlock(&ipa3_ctx->lock);
WARN_ON_RATELIMIT_IPA(1);
return -EFAULT;
}
IPADBG("skip default header\n");
continue;
list_del(&off_entry->link);
kmem_cache_free(ipa3_ctx->hdr_offset_cache,
off_entry);
}
}
list_for_each_entry_safe(off_entry, off_next,
&ipa3_ctx->hdr_tbl.head_free_offset_list[i],
link) {
list_del(&off_entry->link);
kmem_cache_free(ipa3_ctx->hdr_offset_cache,
off_entry);
if (ipa3_id_find(entry->id) == NULL) {
mutex_unlock(&ipa3_ctx->lock);
WARN_ON_RATELIMIT_IPA(1);
return -EFAULT;
}
if (!user_only || entry->ipacm_installed) {
if (entry->is_hdr_proc_ctx) {
dma_unmap_single(ipa3_ctx->pdev,
entry->phys_base,
entry->hdr_len,
DMA_TO_DEVICE);
entry->proc_ctx = NULL;
} else {
/* move the offset entry to free list */
entry->offset_entry->ipacm_installed = false;
list_move(&entry->offset_entry->link,
&ipa3_ctx->hdr_tbl[hdr_tbl_loc].head_free_offset_list[
entry->offset_entry->bin]);
}
list_del(&entry->link);
ipa3_ctx->hdr_tbl[hdr_tbl_loc].hdr_cnt--;
entry->ref_cnt = 0;
entry->cookie = 0;
/* remove the handle from the database */
ipa3_id_remove(entry->id);
kmem_cache_free(ipa3_ctx->hdr_cache, entry);
}
}
/* there is one header of size 8 */
ipa3_ctx->hdr_tbl.end = 8;
ipa3_ctx->hdr_tbl.hdr_cnt = 1;
/* only clean up offset_list and free_offset_list on global reset */
if (!user_only) {
for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
list_for_each_entry_safe(off_entry, off_next,
&ipa3_ctx->hdr_tbl[hdr_tbl_loc].head_offset_list[i],
link) {
/**
* do not remove the default exception
* header which is at offset 0
*/
if (off_entry->offset == 0)
continue;
list_del(&off_entry->link);
kmem_cache_free(ipa3_ctx->hdr_offset_cache,
off_entry);
}
list_for_each_entry_safe(off_entry, off_next,
&ipa3_ctx->hdr_tbl[hdr_tbl_loc].head_free_offset_list[i],
link) {
list_del(&off_entry->link);
kmem_cache_free(ipa3_ctx->hdr_offset_cache,
off_entry);
}
}
/* there is one header of size 8 */
ipa3_ctx->hdr_tbl[hdr_tbl_loc].end = 8;
ipa3_ctx->hdr_tbl[hdr_tbl_loc].hdr_cnt = 1;
}
}
IPADBG("reset hdr proc ctx\n");
@@ -1214,16 +1212,19 @@ int ipa3_reset_hdr(bool user_only)
static struct ipa3_hdr_entry *__ipa_find_hdr(const char *name)
{
struct ipa3_hdr_entry *entry;
enum hdr_tbl_storage hdr_tbl_loc;
if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
IPAERR_RL("Header name too long: %s\n", name);
return NULL;
}
list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
link) {
if (!strcmp(name, entry->name))
return entry;
for (hdr_tbl_loc = HDR_TBL_LCL; hdr_tbl_loc < HDR_TBLS_TOTAL; hdr_tbl_loc++) {
list_for_each_entry(entry,
&ipa3_ctx->hdr_tbl[hdr_tbl_loc].head_hdr_entry_list,
link) {
if (!strcmp(name, entry->name))
return entry;
}
}
return NULL;

View File

@@ -209,6 +209,14 @@ enum {
#define IPA_HDR_BIN5 5
#define IPA_HDR_BIN_MAX 6
enum hdr_tbl_storage {
HDR_TBL_LCL,
HDR_TBL_SYS,
HDR_TBLS_TOTAL,
};
#define IPA_HDR_TO_DDR_PATTERN 0x2DDA
#define IPA_HDR_PROC_CTX_BIN0 0
#define IPA_HDR_PROC_CTX_BIN1 1
#define IPA_HDR_PROC_CTX_BIN_MAX 2
@@ -743,6 +751,7 @@ struct ipa3_rt_tbl {
* @eth2_ofst: offset to start of Ethernet-II/802.3 header
* @user_deleted: is the header deleted by the user?
* @ipacm_installed: indicate if installed by ipacm
* @is_lcl: is the entry in the SRAM?
*/
struct ipa3_hdr_entry {
struct list_head link;
@@ -762,6 +771,7 @@ struct ipa3_hdr_entry {
u16 eth2_ofst;
bool user_deleted;
bool ipacm_installed;
bool is_lcl;
};
/**
@@ -1969,7 +1979,6 @@ struct ipa3_eth_error_stats {
* @aggregation_type: aggregation type used on USB client endpoint
* @aggregation_byte_limit: aggregation byte limit used on USB client endpoint
* @aggregation_time_limit: aggregation time limit used on USB client endpoint
* @hdr_tbl_lcl: where hdr tbl resides 1-local, 0-system
* @hdr_proc_ctx_tbl_lcl: where proc_ctx tbl resides true-local, false-system
* @hdr_mem: header memory
* @hdr_proc_ctx_mem: processing context memory
@@ -2053,7 +2062,7 @@ struct ipa3_context {
u32 ipa_wrapper_base;
u32 ipa_wrapper_size;
u32 ipa_cfg_offset;
struct ipa3_hdr_tbl hdr_tbl;
struct ipa3_hdr_tbl hdr_tbl[HDR_TBLS_TOTAL];
struct ipa3_hdr_proc_ctx_tbl hdr_proc_ctx_tbl;
struct ipa3_rt_tbl_set rt_tbl_set[IPA_IP_MAX];
struct ipa3_rt_tbl_set reap_rt_tbl_set[IPA_IP_MAX];
@@ -2079,9 +2088,8 @@ struct ipa3_context {
uint aggregation_type;
uint aggregation_byte_limit;
uint aggregation_time_limit;
bool hdr_tbl_lcl;
bool hdr_proc_ctx_tbl_lcl;
struct ipa_mem_buffer hdr_mem;
struct ipa_mem_buffer hdr_sys_mem;
struct ipa_mem_buffer hdr_proc_ctx_mem;
bool ip4_rt_tbl_hash_lcl;
bool ip4_rt_tbl_nhash_lcl;

View File

@@ -111,9 +111,10 @@ static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip,
}
} else if ((entry->hdr != NULL) &&
(entry->hdr->cookie == IPA_HDR_COOKIE)) {
gen_params.hdr_lcl = ipa3_ctx->hdr_tbl_lcl;
gen_params.hdr_type = IPAHAL_RT_RULE_HDR_RAW;
gen_params.hdr_ofst = entry->hdr->offset_entry->offset;
gen_params.hdr_ofst += entry->hdr->is_lcl ? IPA_MEM_PART(modem_hdr_size) : 0;
gen_params.hdr_lcl = entry->hdr->is_lcl;
} else {
gen_params.hdr_type = IPAHAL_RT_RULE_HDR_NONE;
gen_params.hdr_ofst = 0;

View File

@@ -6448,7 +6448,6 @@ void _ipa_sram_settings_read_v3_0(void)
ipa3_ctx->smem_restricted_bytes *= 8;
ipa3_ctx->smem_sz *= 8;
ipa3_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
ipa3_ctx->hdr_tbl_lcl = false;
ipa3_ctx->hdr_proc_ctx_tbl_lcl = true;
/*

View File

@@ -236,6 +236,7 @@ static int ipa3_setup_a7_qmap_hdr(void)
hdr->num_hdrs = 1;
hdr->commit = 1;
hdr_entry = &hdr->hdr[0];
hdr_entry->status = IPA_HDR_TO_DDR_PATTERN;
strlcpy(hdr_entry->name, IPA_A7_QMAP_HDR_NAME,
IPA_RESOURCE_NAME_MAX);
@@ -369,6 +370,7 @@ static int ipa3_add_qmap_hdr(uint32_t mux_id, uint32_t *hdr_hdl)
hdr->num_hdrs = 1;
hdr->commit = 1;
hdr_entry = &hdr->hdr[0];
hdr_entry->status = IPA_HDR_TO_DDR_PATTERN;
snprintf(hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
A2_MUX_HDR_NAME_V4_PREF,