diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c index 6273dce0f2..79be1bbee8 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c @@ -8275,10 +8275,12 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p, /* uC is getting loaded through XBL here */ ipa3_ctx->uc_ctx.uc_inited = true; ipa3_ctx->uc_ctx.uc_loaded = true; + IPA_ACTIVE_CLIENTS_INC_SIMPLE(); result = ipa3_alloc_temp_buffs_to_uc(TEMP_BUFF_SIZE, NO_OF_BUFFS); if (result) { IPAERR("Temp buffer allocations for uC failed %d\n", result); result = -ENODEV; + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); goto fail_teth_bridge_driver_init; } @@ -8287,6 +8289,7 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p, IPAERR("ER and TR allocations for uC pipes failed %d\n", result); ipa3_free_uc_temp_buffs(NO_OF_BUFFS); result = -ENODEV; + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); goto fail_teth_bridge_driver_init; } @@ -8296,8 +8299,10 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p, ipa3_free_uc_temp_buffs(NO_OF_BUFFS); ipa3_free_uc_pipes_er_tr(); result = -ENODEV; + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); goto fail_teth_bridge_driver_init; } + IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); } #endif diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_rtp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_rtp.c index 608d3a0e5a..1dcdbe5932 100644 --- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_rtp.c +++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_rtp.c @@ -483,7 +483,7 @@ static int ipa3_uc_setup_prod_pipe_transfer_ring( } ring.size = sizeof(struct prod_pipe_tre) * IPA_UC_PROD_TRANSFER_RING_SIZE; - ring.base = dma_alloc_coherent(ipa3_ctx->pdev, ring.size, + ring.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, ring.size, &ring.phys_base, GFP_KERNEL); if (ring.base == NULL) { IPAERR("dma alloc coherent failed.\n"); @@ -521,7 +521,7 @@ static int ipa3_uc_setup_prod_pipe_event_ring( } ring.size = sizeof(struct prod_pipe_tre) * IPA_UC_PROD_EVENT_RING_SIZE; - ring.base = dma_alloc_coherent(ipa3_ctx->pdev, ring.size, + ring.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, ring.size, &ring.phys_base, GFP_KERNEL); if (ring.base == NULL) { IPAERR("dma alloc coherent failed.\n"); @@ -546,7 +546,7 @@ static int ipa3_uc_setup_con_pipe_transfer_ring( } ring.size = sizeof(struct con_pipe_tre) * IPA_UC_CON_TRANSFER_RING_SIZE; - ring.base = dma_alloc_coherent(ipa3_ctx->pdev, ring.size, + ring.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, ring.size, &ring.phys_base, GFP_KERNEL); if (ring.base == NULL) { IPAERR("dma alloc coherent failed.\n"); @@ -566,20 +566,32 @@ void ipa3_free_uc_pipes_er_tr(void) for (index = 0; index < er_tr_cpu_addresses.no_buffs; index++) { if (index < MAX_UC_PROD_PIPES_TR_INDEX) { - dma_free_coherent(ipa3_ctx->pdev, + dma_free_coherent(ipa3_ctx->uc_pdev, er_tr_cpu_addresses.rtp_tr_er.uc_prod_tr[index].temp_buff_size, er_tr_cpu_addresses.cpu_address[index], er_tr_cpu_addresses.rtp_tr_er.uc_prod_tr[index].temp_buff_pa); - } else if (index < MAX_UC_PROD_PIPES_ER_INDEX) { - dma_free_coherent(ipa3_ctx->pdev, - er_tr_cpu_addresses.rtp_tr_er.uc_prod_er[index].temp_buff_size, + } else if (index >= MAX_UC_PROD_PIPES_TR_INDEX && + index < MAX_UC_PROD_PIPES_ER_INDEX) { + /* subtracting MAX_UC_PROD_TR_INDEX here because, + * uc_prod_er[] is of size MAX_UC_PROD_PIPES only + */ + dma_free_coherent(ipa3_ctx->uc_pdev, + er_tr_cpu_addresses.rtp_tr_er.uc_prod_er[index + -MAX_UC_PROD_PIPES_TR_INDEX].temp_buff_size, er_tr_cpu_addresses.cpu_address[index], - er_tr_cpu_addresses.rtp_tr_er.uc_prod_er[index].temp_buff_pa); - } else if (index < MAX_UC_CONS_PIPES_TR_INDEX) { - dma_free_coherent(ipa3_ctx->pdev, - er_tr_cpu_addresses.rtp_tr_er.uc_cons_tr[index].temp_buff_size, + er_tr_cpu_addresses.rtp_tr_er.uc_prod_er[index + -MAX_UC_PROD_PIPES_TR_INDEX].temp_buff_pa); + } else if (index >= MAX_UC_PROD_PIPES_ER_INDEX && + index < MAX_UC_CONS_PIPES_TR_INDEX) { + /* subtracting MAX_UC_PROD_TR_INDEX here because, + * uc_cons_tr[] is of size MAX_UC_CONS_PIPES only + */ + dma_free_coherent(ipa3_ctx->uc_pdev, + er_tr_cpu_addresses.rtp_tr_er.uc_cons_tr[index + -MAX_UC_PROD_PIPES_ER_INDEX].temp_buff_size, er_tr_cpu_addresses.cpu_address[index], - er_tr_cpu_addresses.rtp_tr_er.uc_cons_tr[index].temp_buff_pa); + er_tr_cpu_addresses.rtp_tr_er.uc_cons_tr[index + -MAX_UC_PROD_PIPES_ER_INDEX].temp_buff_pa); } } }