Merge "msm: ipa3: update ipa smmu cb mappings based on EP configurations"

This commit is contained in:
qctecmdr
2024-04-18 21:20:54 -07:00
committed by Gerrit - the friendly Code Review server
2 changed files with 29 additions and 12 deletions

View File

@@ -8275,10 +8275,12 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
/* uC is getting loaded through XBL here */ /* uC is getting loaded through XBL here */
ipa3_ctx->uc_ctx.uc_inited = true; ipa3_ctx->uc_ctx.uc_inited = true;
ipa3_ctx->uc_ctx.uc_loaded = true; ipa3_ctx->uc_ctx.uc_loaded = true;
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
result = ipa3_alloc_temp_buffs_to_uc(TEMP_BUFF_SIZE, NO_OF_BUFFS); result = ipa3_alloc_temp_buffs_to_uc(TEMP_BUFF_SIZE, NO_OF_BUFFS);
if (result) { if (result) {
IPAERR("Temp buffer allocations for uC failed %d\n", result); IPAERR("Temp buffer allocations for uC failed %d\n", result);
result = -ENODEV; result = -ENODEV;
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
goto fail_teth_bridge_driver_init; goto fail_teth_bridge_driver_init;
} }
@@ -8287,6 +8289,7 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
IPAERR("ER and TR allocations for uC pipes failed %d\n", result); IPAERR("ER and TR allocations for uC pipes failed %d\n", result);
ipa3_free_uc_temp_buffs(NO_OF_BUFFS); ipa3_free_uc_temp_buffs(NO_OF_BUFFS);
result = -ENODEV; result = -ENODEV;
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
goto fail_teth_bridge_driver_init; goto fail_teth_bridge_driver_init;
} }
@@ -8296,8 +8299,10 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
ipa3_free_uc_temp_buffs(NO_OF_BUFFS); ipa3_free_uc_temp_buffs(NO_OF_BUFFS);
ipa3_free_uc_pipes_er_tr(); ipa3_free_uc_pipes_er_tr();
result = -ENODEV; result = -ENODEV;
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
goto fail_teth_bridge_driver_init; goto fail_teth_bridge_driver_init;
} }
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
} }
#endif #endif

View File

@@ -483,7 +483,7 @@ static int ipa3_uc_setup_prod_pipe_transfer_ring(
} }
ring.size = sizeof(struct prod_pipe_tre) * IPA_UC_PROD_TRANSFER_RING_SIZE; ring.size = sizeof(struct prod_pipe_tre) * IPA_UC_PROD_TRANSFER_RING_SIZE;
ring.base = dma_alloc_coherent(ipa3_ctx->pdev, ring.size, ring.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, ring.size,
&ring.phys_base, GFP_KERNEL); &ring.phys_base, GFP_KERNEL);
if (ring.base == NULL) { if (ring.base == NULL) {
IPAERR("dma alloc coherent failed.\n"); IPAERR("dma alloc coherent failed.\n");
@@ -521,7 +521,7 @@ static int ipa3_uc_setup_prod_pipe_event_ring(
} }
ring.size = sizeof(struct prod_pipe_tre) * IPA_UC_PROD_EVENT_RING_SIZE; ring.size = sizeof(struct prod_pipe_tre) * IPA_UC_PROD_EVENT_RING_SIZE;
ring.base = dma_alloc_coherent(ipa3_ctx->pdev, ring.size, ring.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, ring.size,
&ring.phys_base, GFP_KERNEL); &ring.phys_base, GFP_KERNEL);
if (ring.base == NULL) { if (ring.base == NULL) {
IPAERR("dma alloc coherent failed.\n"); IPAERR("dma alloc coherent failed.\n");
@@ -546,7 +546,7 @@ static int ipa3_uc_setup_con_pipe_transfer_ring(
} }
ring.size = sizeof(struct con_pipe_tre) * IPA_UC_CON_TRANSFER_RING_SIZE; ring.size = sizeof(struct con_pipe_tre) * IPA_UC_CON_TRANSFER_RING_SIZE;
ring.base = dma_alloc_coherent(ipa3_ctx->pdev, ring.size, ring.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, ring.size,
&ring.phys_base, GFP_KERNEL); &ring.phys_base, GFP_KERNEL);
if (ring.base == NULL) { if (ring.base == NULL) {
IPAERR("dma alloc coherent failed.\n"); IPAERR("dma alloc coherent failed.\n");
@@ -566,20 +566,32 @@ void ipa3_free_uc_pipes_er_tr(void)
for (index = 0; index < er_tr_cpu_addresses.no_buffs; index++) { for (index = 0; index < er_tr_cpu_addresses.no_buffs; index++) {
if (index < MAX_UC_PROD_PIPES_TR_INDEX) { if (index < MAX_UC_PROD_PIPES_TR_INDEX) {
dma_free_coherent(ipa3_ctx->pdev, dma_free_coherent(ipa3_ctx->uc_pdev,
er_tr_cpu_addresses.rtp_tr_er.uc_prod_tr[index].temp_buff_size, er_tr_cpu_addresses.rtp_tr_er.uc_prod_tr[index].temp_buff_size,
er_tr_cpu_addresses.cpu_address[index], er_tr_cpu_addresses.cpu_address[index],
er_tr_cpu_addresses.rtp_tr_er.uc_prod_tr[index].temp_buff_pa); er_tr_cpu_addresses.rtp_tr_er.uc_prod_tr[index].temp_buff_pa);
} else if (index < MAX_UC_PROD_PIPES_ER_INDEX) { } else if (index >= MAX_UC_PROD_PIPES_TR_INDEX &&
dma_free_coherent(ipa3_ctx->pdev, index < MAX_UC_PROD_PIPES_ER_INDEX) {
er_tr_cpu_addresses.rtp_tr_er.uc_prod_er[index].temp_buff_size, /* subtracting MAX_UC_PROD_TR_INDEX here because,
* uc_prod_er[] is of size MAX_UC_PROD_PIPES only
*/
dma_free_coherent(ipa3_ctx->uc_pdev,
er_tr_cpu_addresses.rtp_tr_er.uc_prod_er[index
-MAX_UC_PROD_PIPES_TR_INDEX].temp_buff_size,
er_tr_cpu_addresses.cpu_address[index], er_tr_cpu_addresses.cpu_address[index],
er_tr_cpu_addresses.rtp_tr_er.uc_prod_er[index].temp_buff_pa); er_tr_cpu_addresses.rtp_tr_er.uc_prod_er[index
} else if (index < MAX_UC_CONS_PIPES_TR_INDEX) { -MAX_UC_PROD_PIPES_TR_INDEX].temp_buff_pa);
dma_free_coherent(ipa3_ctx->pdev, } else if (index >= MAX_UC_PROD_PIPES_ER_INDEX &&
er_tr_cpu_addresses.rtp_tr_er.uc_cons_tr[index].temp_buff_size, index < MAX_UC_CONS_PIPES_TR_INDEX) {
/* subtracting MAX_UC_PROD_TR_INDEX here because,
* uc_cons_tr[] is of size MAX_UC_CONS_PIPES only
*/
dma_free_coherent(ipa3_ctx->uc_pdev,
er_tr_cpu_addresses.rtp_tr_er.uc_cons_tr[index
-MAX_UC_PROD_PIPES_ER_INDEX].temp_buff_size,
er_tr_cpu_addresses.cpu_address[index], er_tr_cpu_addresses.cpu_address[index],
er_tr_cpu_addresses.rtp_tr_er.uc_cons_tr[index].temp_buff_pa); er_tr_cpu_addresses.rtp_tr_er.uc_cons_tr[index
-MAX_UC_PROD_PIPES_ER_INDEX].temp_buff_pa);
} }
} }
} }