diff --git a/crypto-qti/qce.h b/crypto-qti/qce.h index a499cdd5de..6d6f6ffaf7 100644 --- a/crypto-qti/qce.h +++ b/crypto-qti/qce.h @@ -218,4 +218,5 @@ void qce_clear_driver_stats(void *handle); void qce_dump_req(void *handle); void qce_get_crypto_status(void *handle, struct qce_error *error); int qce_manage_timeout(void *handle, int req_info); +int qce_set_irqs(void *handle, bool enable); #endif /* __CRYPTO_MSM_QCE_H */ diff --git a/crypto-qti/qce50.c b/crypto-qti/qce50.c index a22a2799e0..a66d471bf9 100644 --- a/crypto-qti/qce50.c +++ b/crypto-qti/qce50.c @@ -2343,6 +2343,20 @@ static int _qce_unlock_other_pipes(struct qce_device *pce_dev, int req_info) return rc; } +static int qce_sps_set_irqs(struct qce_device *pce_dev, bool enable) +{ + if (enable) + return sps_bam_enable_irqs(pce_dev->ce_bam_info.bam_handle); + else + return sps_bam_disable_irqs(pce_dev->ce_bam_info.bam_handle); +} + +int qce_set_irqs(void *handle, bool enable) +{ + return qce_sps_set_irqs(handle, enable); +} +EXPORT_SYMBOL(qce_set_irqs); + static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info, bool is_complete); @@ -3558,7 +3572,7 @@ static void _sps_producer_callback(struct sps_event_notify *notify) preq_info->xfer_type == QCE_XFER_AEAD) && pce_sps_data->producer_state == QCE_PIPE_STATE_IDLE) { pce_sps_data->producer_state = QCE_PIPE_STATE_COMP; - if (!is_offload_op(op)) { + if (!is_offload_op(op) && (op < QCE_OFFLOAD_OPER_LAST)) { pce_sps_data->out_transfer.iovec_count = 0; _qce_sps_add_data(GET_PHYS_ADDR( pce_sps_data->result_dump), @@ -5361,8 +5375,9 @@ static int _qce_resume(void *handle) struct sps_connect *sps_connect_info; int rc, i; + rc = -ENODEV; if (handle == NULL) - return -ENODEV; + return rc; for (i = 0; i < QCE_OFFLOAD_OPER_LAST; i++) { if (i == QCE_OFFLOAD_NONE && !(pce_dev->kernel_pipes_support)) diff --git a/crypto-qti/qcedev.c b/crypto-qti/qcedev.c index 59c7b1ede4..b2cca25d79 100644 --- a/crypto-qti/qcedev.c +++ b/crypto-qti/qcedev.c @@ -180,16 +180,31 @@ static void qcedev_ce_high_bw_req(struct qcedev_control *podev, { int ret = 0; + if(podev == NULL) return; + mutex_lock(&qcedev_sent_bw_req); if (high_bw_req) { if (podev->high_bw_req_count == 0) { ret = qcedev_control_clocks(podev, true); if (ret) goto exit_unlock_mutex; + ret = qce_set_irqs(podev->qce, true); + if (ret) { + pr_err("%s: could not enable bam irqs, ret = %d", + __func__, ret); + qcedev_control_clocks(podev, false); + goto exit_unlock_mutex; + } } podev->high_bw_req_count++; } else { if (podev->high_bw_req_count == 1) { + ret = qce_set_irqs(podev->qce, false); + if (ret) { + pr_err("%s: could not disable bam irqs, ret = %d", + __func__, ret); + goto exit_unlock_mutex; + } ret = qcedev_control_clocks(podev, false); if (ret) goto exit_unlock_mutex; @@ -295,7 +310,9 @@ static int qcedev_release(struct inode *inode, struct file *file) __func__, podev); } - qcedev_ce_high_bw_req(podev, false); + if (podev) + qcedev_ce_high_bw_req(podev, false); + if (qcedev_unmap_all_buffers(handle)) pr_err("%s: failed to unmap all ion buffers\n", __func__); @@ -714,6 +731,7 @@ static int submit_req(struct qcedev_async_req *qcedev_areq, int wait = MAX_CRYPTO_WAIT_TIME; struct qcedev_async_req *new_req = NULL; int retries = 0; + int req_wait = MAX_REQUEST_TIME; qcedev_areq->err = 0; podev = handle->cntl; @@ -750,12 +768,16 @@ static int submit_req(struct qcedev_async_req *qcedev_areq, list_add_tail(&qcedev_areq->list, &podev->ready_commands); qcedev_areq->state = QCEDEV_REQ_WAITING; - if (wait_event_interruptible_lock_irq_timeout( + req_wait = wait_event_interruptible_lock_irq_timeout( qcedev_areq->wait_q, (qcedev_areq->state == QCEDEV_REQ_CURRENT), podev->lock, - msecs_to_jiffies(MAX_REQUEST_TIME)) == 0) { - pr_err("%s: request timed out\n", __func__); + msecs_to_jiffies(MAX_REQUEST_TIME)); + if ((req_wait == 0) || (req_wait == -ERESTARTSYS)) { + pr_err("%s: request timed out, req_wait = %d\n", + __func__, req_wait); + list_del(&qcedev_areq->list); + podev->active_command = NULL; spin_unlock_irqrestore(&podev->lock, flags); return qcedev_areq->err; } @@ -1734,6 +1756,8 @@ static int qcedev_smmu_ablk_offload_cipher(struct qcedev_async_req *areq, } } exit: + areq->cipher_req.creq.src = NULL; + areq->cipher_req.creq.dst = NULL; return err; } @@ -2561,13 +2585,21 @@ static int qcedev_probe_device(struct platform_device *pdev) rc = -ENODEV; goto exit_scale_busbandwidth; } + podev->qce = handle; + + rc = qce_set_irqs(podev->qce, false); + if (rc) { + pr_err("%s: could not disable bam irqs, ret = %d", + __func__, rc); + goto exit_scale_busbandwidth; + } + rc = icc_set_bw(podev->icc_path, 0, 0); if (rc) { pr_err("%s Unable to set to low bandwidth\n", __func__); goto exit_qce_close; } - podev->qce = handle; podev->pdev = pdev; platform_set_drvdata(pdev, podev); @@ -2682,6 +2714,12 @@ static int qcedev_suspend(struct platform_device *pdev, pm_message_t state) mutex_lock(&qcedev_sent_bw_req); if (podev->high_bw_req_count) { + ret = qce_set_irqs(podev->qce, false); + if (ret) { + pr_err("%s: could not disable bam irqs, ret = %d", + __func__, ret); + goto suspend_exit; + } ret = qcedev_control_clocks(podev, false); if (ret) goto suspend_exit; @@ -2707,6 +2745,12 @@ static int qcedev_resume(struct platform_device *pdev) ret = qcedev_control_clocks(podev, true); if (ret) goto resume_exit; + ret = qce_set_irqs(podev->qce, true); + if (ret) { + pr_err("%s: could not enable bam irqs, ret = %d", + __func__, ret); + qcedev_control_clocks(podev, false); + } } resume_exit: