crypto: brcm - Explicity ACK mailbox message
Add support to explicity ACK mailbox message
because after sending message we can know
the send status via error attribute of brcm_message.
This is needed to support "txdone_ack" supported in
mailbox controller driver.
Fixes: 9d12ba86f8
("crypto: brcm - Add Broadcom SPU driver")
Signed-off-by: Raveendra Padasalagi <raveendra.padasalagi@broadcom.com>
Reviewed-by: Anup Patel <anup.patel@broadcom.com>
Reviewed-by: Scott Branden <scott.branden@broadcom.com>
Cc: stable@vger.kernel.org
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:

committed by
Herbert Xu

parent
52872f5288
commit
f0e2ce58f8
@@ -256,6 +256,44 @@ spu_ablkcipher_tx_sg_create(struct brcm_message *mssg,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int mailbox_send_message(struct brcm_message *mssg, u32 flags,
|
||||||
|
u8 chan_idx)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
int retry_cnt = 0;
|
||||||
|
struct device *dev = &(iproc_priv.pdev->dev);
|
||||||
|
|
||||||
|
err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg);
|
||||||
|
if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
|
||||||
|
while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
|
||||||
|
/*
|
||||||
|
* Mailbox queue is full. Since MAY_SLEEP is set, assume
|
||||||
|
* not in atomic context and we can wait and try again.
|
||||||
|
*/
|
||||||
|
retry_cnt++;
|
||||||
|
usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
|
||||||
|
err = mbox_send_message(iproc_priv.mbox[chan_idx],
|
||||||
|
mssg);
|
||||||
|
atomic_inc(&iproc_priv.mb_no_spc);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (err < 0) {
|
||||||
|
atomic_inc(&iproc_priv.mb_send_fail);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check error returned by mailbox controller */
|
||||||
|
err = mssg->error;
|
||||||
|
if (unlikely(err < 0)) {
|
||||||
|
dev_err(dev, "message error %d", err);
|
||||||
|
/* Signal txdone for mailbox channel */
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Signal txdone for mailbox channel */
|
||||||
|
mbox_client_txdone(iproc_priv.mbox[chan_idx], err);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* handle_ablkcipher_req() - Submit as much of a block cipher request as fits in
|
* handle_ablkcipher_req() - Submit as much of a block cipher request as fits in
|
||||||
* a single SPU request message, starting at the current position in the request
|
* a single SPU request message, starting at the current position in the request
|
||||||
@@ -293,7 +331,6 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
|
|||||||
u32 pad_len; /* total length of all padding */
|
u32 pad_len; /* total length of all padding */
|
||||||
bool update_key = false;
|
bool update_key = false;
|
||||||
struct brcm_message *mssg; /* mailbox message */
|
struct brcm_message *mssg; /* mailbox message */
|
||||||
int retry_cnt = 0;
|
|
||||||
|
|
||||||
/* number of entries in src and dst sg in mailbox message. */
|
/* number of entries in src and dst sg in mailbox message. */
|
||||||
u8 rx_frag_num = 2; /* response header and STATUS */
|
u8 rx_frag_num = 2; /* response header and STATUS */
|
||||||
@@ -462,24 +499,9 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx)
|
|||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
|
err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
|
||||||
if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
|
if (unlikely(err < 0))
|
||||||
while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
|
|
||||||
/*
|
|
||||||
* Mailbox queue is full. Since MAY_SLEEP is set, assume
|
|
||||||
* not in atomic context and we can wait and try again.
|
|
||||||
*/
|
|
||||||
retry_cnt++;
|
|
||||||
usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
|
|
||||||
err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
|
|
||||||
mssg);
|
|
||||||
atomic_inc(&iproc_priv.mb_no_spc);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (unlikely(err < 0)) {
|
|
||||||
atomic_inc(&iproc_priv.mb_send_fail);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
|
||||||
|
|
||||||
return -EINPROGRESS;
|
return -EINPROGRESS;
|
||||||
}
|
}
|
||||||
@@ -710,7 +732,6 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
|
|||||||
u32 spu_hdr_len;
|
u32 spu_hdr_len;
|
||||||
unsigned int digestsize;
|
unsigned int digestsize;
|
||||||
u16 rem = 0;
|
u16 rem = 0;
|
||||||
int retry_cnt = 0;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* number of entries in src and dst sg. Always includes SPU msg header.
|
* number of entries in src and dst sg. Always includes SPU msg header.
|
||||||
@@ -904,24 +925,10 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
|
|||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
|
err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
|
||||||
if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
|
if (unlikely(err < 0))
|
||||||
while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
|
|
||||||
/*
|
|
||||||
* Mailbox queue is full. Since MAY_SLEEP is set, assume
|
|
||||||
* not in atomic context and we can wait and try again.
|
|
||||||
*/
|
|
||||||
retry_cnt++;
|
|
||||||
usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
|
|
||||||
err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
|
|
||||||
mssg);
|
|
||||||
atomic_inc(&iproc_priv.mb_no_spc);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (err < 0) {
|
|
||||||
atomic_inc(&iproc_priv.mb_send_fail);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
|
||||||
return -EINPROGRESS;
|
return -EINPROGRESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1320,7 +1327,6 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
|
|||||||
int assoc_nents = 0;
|
int assoc_nents = 0;
|
||||||
bool incl_icv = false;
|
bool incl_icv = false;
|
||||||
unsigned int digestsize = ctx->digestsize;
|
unsigned int digestsize = ctx->digestsize;
|
||||||
int retry_cnt = 0;
|
|
||||||
|
|
||||||
/* number of entries in src and dst sg. Always includes SPU msg header.
|
/* number of entries in src and dst sg. Always includes SPU msg header.
|
||||||
*/
|
*/
|
||||||
@@ -1558,24 +1564,9 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx)
|
|||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg);
|
err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx);
|
||||||
if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) {
|
if (unlikely(err < 0))
|
||||||
while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) {
|
|
||||||
/*
|
|
||||||
* Mailbox queue is full. Since MAY_SLEEP is set, assume
|
|
||||||
* not in atomic context and we can wait and try again.
|
|
||||||
*/
|
|
||||||
retry_cnt++;
|
|
||||||
usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX);
|
|
||||||
err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx],
|
|
||||||
mssg);
|
|
||||||
atomic_inc(&iproc_priv.mb_no_spc);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (err < 0) {
|
|
||||||
atomic_inc(&iproc_priv.mb_send_fail);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
|
||||||
|
|
||||||
return -EINPROGRESS;
|
return -EINPROGRESS;
|
||||||
}
|
}
|
||||||
@@ -4537,7 +4528,7 @@ static int spu_mb_init(struct device *dev)
|
|||||||
mcl->dev = dev;
|
mcl->dev = dev;
|
||||||
mcl->tx_block = false;
|
mcl->tx_block = false;
|
||||||
mcl->tx_tout = 0;
|
mcl->tx_tout = 0;
|
||||||
mcl->knows_txdone = false;
|
mcl->knows_txdone = true;
|
||||||
mcl->rx_callback = spu_rx_callback;
|
mcl->rx_callback = spu_rx_callback;
|
||||||
mcl->tx_done = NULL;
|
mcl->tx_done = NULL;
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user