|
|
|
@@ -131,6 +131,11 @@ static inline int is_ofld_imm(const struct sk_buff *skb)
|
|
|
|
|
return (skb->len <= SGE_MAX_WR_LEN);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
|
|
|
|
|
{
|
|
|
|
|
memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
|
|
|
|
|
unsigned int entlen,
|
|
|
|
|
unsigned int skip)
|
|
|
|
@@ -165,6 +170,7 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
|
|
|
|
|
int err)
|
|
|
|
|
{
|
|
|
|
|
struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
|
|
|
|
|
struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
|
|
|
|
|
int digestsize, updated_digestsize;
|
|
|
|
|
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
|
|
|
|
struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
|
|
|
|
@@ -172,25 +178,43 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
|
|
|
|
|
if (input == NULL)
|
|
|
|
|
goto out;
|
|
|
|
|
digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
|
|
|
|
|
if (reqctx->is_sg_map)
|
|
|
|
|
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
|
|
|
|
|
if (reqctx->dma_addr)
|
|
|
|
|
dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->dma_addr,
|
|
|
|
|
reqctx->dma_len, DMA_TO_DEVICE);
|
|
|
|
|
reqctx->dma_addr = 0;
|
|
|
|
|
updated_digestsize = digestsize;
|
|
|
|
|
if (digestsize == SHA224_DIGEST_SIZE)
|
|
|
|
|
updated_digestsize = SHA256_DIGEST_SIZE;
|
|
|
|
|
else if (digestsize == SHA384_DIGEST_SIZE)
|
|
|
|
|
updated_digestsize = SHA512_DIGEST_SIZE;
|
|
|
|
|
if (reqctx->result == 1) {
|
|
|
|
|
reqctx->result = 0;
|
|
|
|
|
memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
|
|
|
|
|
digestsize);
|
|
|
|
|
} else {
|
|
|
|
|
memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
|
|
|
|
|
updated_digestsize);
|
|
|
|
|
|
|
|
|
|
if (hctx_wr->dma_addr) {
|
|
|
|
|
dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
|
|
|
|
|
hctx_wr->dma_len, DMA_TO_DEVICE);
|
|
|
|
|
hctx_wr->dma_addr = 0;
|
|
|
|
|
}
|
|
|
|
|
if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
|
|
|
|
|
req->nbytes)) {
|
|
|
|
|
if (hctx_wr->result == 1) {
|
|
|
|
|
hctx_wr->result = 0;
|
|
|
|
|
memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
|
|
|
|
|
digestsize);
|
|
|
|
|
} else {
|
|
|
|
|
memcpy(reqctx->partial_hash,
|
|
|
|
|
input + sizeof(struct cpl_fw6_pld),
|
|
|
|
|
updated_digestsize);
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
goto unmap;
|
|
|
|
|
}
|
|
|
|
|
memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
|
|
|
|
|
updated_digestsize);
|
|
|
|
|
|
|
|
|
|
err = chcr_ahash_continue(req);
|
|
|
|
|
if (err)
|
|
|
|
|
goto unmap;
|
|
|
|
|
return;
|
|
|
|
|
unmap:
|
|
|
|
|
if (hctx_wr->is_sg_map)
|
|
|
|
|
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
req->base.complete(&req->base, err);
|
|
|
|
|
}
|
|
|
|
@@ -563,7 +587,6 @@ static void ulptx_walk_add_sg(struct ulptx_walk *walk,
|
|
|
|
|
|
|
|
|
|
if (!len)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
while (sg && skip) {
|
|
|
|
|
if (sg_dma_len(sg) <= skip) {
|
|
|
|
|
skip -= sg_dma_len(sg);
|
|
|
|
@@ -653,6 +676,35 @@ static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int chcr_hash_ent_in_wr(struct scatterlist *src,
|
|
|
|
|
unsigned int minsg,
|
|
|
|
|
unsigned int space,
|
|
|
|
|
unsigned int srcskip)
|
|
|
|
|
{
|
|
|
|
|
int srclen = 0;
|
|
|
|
|
int srcsg = minsg;
|
|
|
|
|
int soffset = 0, sless;
|
|
|
|
|
|
|
|
|
|
if (sg_dma_len(src) == srcskip) {
|
|
|
|
|
src = sg_next(src);
|
|
|
|
|
srcskip = 0;
|
|
|
|
|
}
|
|
|
|
|
while (src && space > (sgl_ent_len[srcsg + 1])) {
|
|
|
|
|
sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
|
|
|
|
|
CHCR_SRC_SG_SIZE);
|
|
|
|
|
srclen += sless;
|
|
|
|
|
soffset += sless;
|
|
|
|
|
srcsg++;
|
|
|
|
|
if (sg_dma_len(src) == (soffset + srcskip)) {
|
|
|
|
|
src = sg_next(src);
|
|
|
|
|
soffset = 0;
|
|
|
|
|
srcskip = 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return srclen;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int chcr_sg_ent_in_wr(struct scatterlist *src,
|
|
|
|
|
struct scatterlist *dst,
|
|
|
|
|
unsigned int minsg,
|
|
|
|
@@ -1153,7 +1205,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
|
|
|
|
|
}
|
|
|
|
|
if (!reqctx->imm) {
|
|
|
|
|
bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
|
|
|
|
|
SPACE_LEFT(ablkctx->enckey_len),
|
|
|
|
|
CIP_SPACE_LEFT(ablkctx->enckey_len),
|
|
|
|
|
reqctx->src_ofst, reqctx->dst_ofst);
|
|
|
|
|
if ((bytes + reqctx->processed) >= req->nbytes)
|
|
|
|
|
bytes = req->nbytes - reqctx->processed;
|
|
|
|
@@ -1257,7 +1309,7 @@ static int process_cipher(struct ablkcipher_request *req,
|
|
|
|
|
if (!reqctx->imm) {
|
|
|
|
|
bytes = chcr_sg_ent_in_wr(req->src, req->dst,
|
|
|
|
|
MIN_CIPHER_SG,
|
|
|
|
|
SPACE_LEFT(ablkctx->enckey_len),
|
|
|
|
|
CIP_SPACE_LEFT(ablkctx->enckey_len),
|
|
|
|
|
0, 0);
|
|
|
|
|
if ((bytes + reqctx->processed) >= req->nbytes)
|
|
|
|
|
bytes = req->nbytes - reqctx->processed;
|
|
|
|
@@ -1513,35 +1565,24 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
|
|
|
|
|
struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
|
|
|
|
|
struct chcr_wr *chcr_req;
|
|
|
|
|
struct ulptx_sgl *ulptx;
|
|
|
|
|
unsigned int nents = 0, transhdr_len, iopad_alignment = 0;
|
|
|
|
|
unsigned int digestsize = crypto_ahash_digestsize(tfm);
|
|
|
|
|
unsigned int kctx_len = 0, temp = 0;
|
|
|
|
|
u8 hash_size_in_response = 0;
|
|
|
|
|
unsigned int nents = 0, transhdr_len;
|
|
|
|
|
unsigned int temp = 0;
|
|
|
|
|
gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
|
|
|
|
|
GFP_ATOMIC;
|
|
|
|
|
struct adapter *adap = padap(h_ctx(tfm)->dev);
|
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
|
|
iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
|
|
|
|
|
kctx_len = param->alg_prm.result_size + iopad_alignment;
|
|
|
|
|
if (param->opad_needed)
|
|
|
|
|
kctx_len += param->alg_prm.result_size + iopad_alignment;
|
|
|
|
|
|
|
|
|
|
if (req_ctx->result)
|
|
|
|
|
hash_size_in_response = digestsize;
|
|
|
|
|
else
|
|
|
|
|
hash_size_in_response = param->alg_prm.result_size;
|
|
|
|
|
transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
|
|
|
|
|
req_ctx->imm = (transhdr_len + param->bfr_len + param->sg_len) <=
|
|
|
|
|
SGE_MAX_WR_LEN;
|
|
|
|
|
nents = sg_nents_xlen(req->src, param->sg_len, CHCR_SRC_SG_SIZE, 0);
|
|
|
|
|
transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
|
|
|
|
|
req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
|
|
|
|
|
param->sg_len) <= SGE_MAX_WR_LEN;
|
|
|
|
|
nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
|
|
|
|
|
CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
|
|
|
|
|
nents += param->bfr_len ? 1 : 0;
|
|
|
|
|
transhdr_len += req_ctx->imm ? roundup((param->bfr_len +
|
|
|
|
|
param->sg_len), 16) :
|
|
|
|
|
(sgl_len(nents) * 8);
|
|
|
|
|
transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
|
|
|
|
|
param->sg_len, 16) : (sgl_len(nents) * 8);
|
|
|
|
|
transhdr_len = roundup(transhdr_len, 16);
|
|
|
|
|
|
|
|
|
|
skb = alloc_skb(SGE_MAX_WR_LEN, flags);
|
|
|
|
|
skb = alloc_skb(transhdr_len, flags);
|
|
|
|
|
if (!skb)
|
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
chcr_req = __skb_put_zero(skb, transhdr_len);
|
|
|
|
@@ -1573,33 +1614,33 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
|
|
|
|
|
chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
|
|
|
|
|
param->alg_prm.mk_size, 0,
|
|
|
|
|
param->opad_needed,
|
|
|
|
|
((kctx_len +
|
|
|
|
|
((param->kctx_len +
|
|
|
|
|
sizeof(chcr_req->key_ctx)) >> 4));
|
|
|
|
|
chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
|
|
|
|
|
ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + kctx_len +
|
|
|
|
|
ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
|
|
|
|
|
DUMMY_BYTES);
|
|
|
|
|
if (param->bfr_len != 0) {
|
|
|
|
|
req_ctx->dma_addr = dma_map_single(&u_ctx->lldi.pdev->dev,
|
|
|
|
|
req_ctx->reqbfr, param->bfr_len,
|
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
req_ctx->hctx_wr.dma_addr =
|
|
|
|
|
dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
|
|
|
|
|
param->bfr_len, DMA_TO_DEVICE);
|
|
|
|
|
if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
|
|
|
|
|
req_ctx->dma_addr)) {
|
|
|
|
|
req_ctx->hctx_wr. dma_addr)) {
|
|
|
|
|
error = -ENOMEM;
|
|
|
|
|
goto err;
|
|
|
|
|
}
|
|
|
|
|
req_ctx->dma_len = param->bfr_len;
|
|
|
|
|
req_ctx->hctx_wr.dma_len = param->bfr_len;
|
|
|
|
|
} else {
|
|
|
|
|
req_ctx->dma_addr = 0;
|
|
|
|
|
req_ctx->hctx_wr.dma_addr = 0;
|
|
|
|
|
}
|
|
|
|
|
chcr_add_hash_src_ent(req, ulptx, param);
|
|
|
|
|
/* Request upto max wr size */
|
|
|
|
|
temp = kctx_len + DUMMY_BYTES + (req_ctx->imm ? (param->sg_len
|
|
|
|
|
+ param->bfr_len) : 0);
|
|
|
|
|
temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
|
|
|
|
|
(param->sg_len + param->bfr_len) : 0);
|
|
|
|
|
atomic_inc(&adap->chcr_stats.digest_rqst);
|
|
|
|
|
create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->imm,
|
|
|
|
|
hash_size_in_response, transhdr_len,
|
|
|
|
|
create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
|
|
|
|
|
param->hash_size, transhdr_len,
|
|
|
|
|
temp, 0);
|
|
|
|
|
req_ctx->skb = skb;
|
|
|
|
|
req_ctx->hctx_wr.skb = skb;
|
|
|
|
|
return skb;
|
|
|
|
|
err:
|
|
|
|
|
kfree_skb(skb);
|
|
|
|
@@ -1618,7 +1659,6 @@ static int chcr_ahash_update(struct ahash_request *req)
|
|
|
|
|
int error;
|
|
|
|
|
|
|
|
|
|
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
|
|
|
|
|
|
|
|
|
|
u_ctx = ULD_CTX(h_ctx(rtfm));
|
|
|
|
|
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
|
|
|
|
|
h_ctx(rtfm)->tx_qidx))) {
|
|
|
|
@@ -1635,17 +1675,26 @@ static int chcr_ahash_update(struct ahash_request *req)
|
|
|
|
|
req_ctx->reqlen += nbytes;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
chcr_init_hctx_per_wr(req_ctx);
|
|
|
|
|
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
|
|
|
|
|
if (error)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
|
|
|
|
|
params.kctx_len = roundup(params.alg_prm.result_size, 16);
|
|
|
|
|
params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
|
|
|
|
|
HASH_SPACE_LEFT(params.kctx_len), 0);
|
|
|
|
|
if (params.sg_len > req->nbytes)
|
|
|
|
|
params.sg_len = req->nbytes;
|
|
|
|
|
params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
|
|
|
|
|
req_ctx->reqlen;
|
|
|
|
|
params.opad_needed = 0;
|
|
|
|
|
params.more = 1;
|
|
|
|
|
params.last = 0;
|
|
|
|
|
params.sg_len = nbytes - req_ctx->reqlen;
|
|
|
|
|
params.bfr_len = req_ctx->reqlen;
|
|
|
|
|
params.scmd1 = 0;
|
|
|
|
|
get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
|
|
|
|
|
req_ctx->result = 0;
|
|
|
|
|
req_ctx->hctx_wr.srcsg = req->src;
|
|
|
|
|
|
|
|
|
|
params.hash_size = params.alg_prm.result_size;
|
|
|
|
|
req_ctx->data_len += params.sg_len + params.bfr_len;
|
|
|
|
|
skb = create_hash_wr(req, ¶ms);
|
|
|
|
|
if (IS_ERR(skb)) {
|
|
|
|
@@ -1653,6 +1702,7 @@ static int chcr_ahash_update(struct ahash_request *req)
|
|
|
|
|
goto unmap;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
req_ctx->hctx_wr.processed += params.sg_len;
|
|
|
|
|
if (remainder) {
|
|
|
|
|
/* Swap buffers */
|
|
|
|
|
swap(req_ctx->reqbfr, req_ctx->skbfr);
|
|
|
|
@@ -1690,16 +1740,27 @@ static int chcr_ahash_final(struct ahash_request *req)
|
|
|
|
|
struct uld_ctx *u_ctx = NULL;
|
|
|
|
|
u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
|
|
|
|
|
|
|
|
|
|
chcr_init_hctx_per_wr(req_ctx);
|
|
|
|
|
u_ctx = ULD_CTX(h_ctx(rtfm));
|
|
|
|
|
if (is_hmac(crypto_ahash_tfm(rtfm)))
|
|
|
|
|
params.opad_needed = 1;
|
|
|
|
|
else
|
|
|
|
|
params.opad_needed = 0;
|
|
|
|
|
params.sg_len = 0;
|
|
|
|
|
req_ctx->hctx_wr.isfinal = 1;
|
|
|
|
|
get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
|
|
|
|
|
req_ctx->result = 1;
|
|
|
|
|
params.kctx_len = roundup(params.alg_prm.result_size, 16);
|
|
|
|
|
if (is_hmac(crypto_ahash_tfm(rtfm))) {
|
|
|
|
|
params.opad_needed = 1;
|
|
|
|
|
params.kctx_len *= 2;
|
|
|
|
|
} else {
|
|
|
|
|
params.opad_needed = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
req_ctx->hctx_wr.result = 1;
|
|
|
|
|
params.bfr_len = req_ctx->reqlen;
|
|
|
|
|
req_ctx->data_len += params.bfr_len + params.sg_len;
|
|
|
|
|
req_ctx->hctx_wr.srcsg = req->src;
|
|
|
|
|
if (req_ctx->reqlen == 0) {
|
|
|
|
|
create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
|
|
|
|
|
params.last = 0;
|
|
|
|
@@ -1712,16 +1773,81 @@ static int chcr_ahash_final(struct ahash_request *req)
|
|
|
|
|
params.last = 1;
|
|
|
|
|
params.more = 0;
|
|
|
|
|
}
|
|
|
|
|
params.hash_size = crypto_ahash_digestsize(rtfm);
|
|
|
|
|
skb = create_hash_wr(req, ¶ms);
|
|
|
|
|
if (IS_ERR(skb))
|
|
|
|
|
return PTR_ERR(skb);
|
|
|
|
|
|
|
|
|
|
req_ctx->reqlen = 0;
|
|
|
|
|
skb->dev = u_ctx->lldi.ports[0];
|
|
|
|
|
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
|
|
|
|
|
chcr_send_wr(skb);
|
|
|
|
|
return -EINPROGRESS;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int chcr_ahash_continue(struct ahash_request *req)
|
|
|
|
|
{
|
|
|
|
|
struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
|
|
|
|
|
struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
|
|
|
|
|
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
|
|
|
|
|
struct uld_ctx *u_ctx = NULL;
|
|
|
|
|
struct sk_buff *skb;
|
|
|
|
|
struct hash_wr_param params;
|
|
|
|
|
u8 bs;
|
|
|
|
|
int error;
|
|
|
|
|
|
|
|
|
|
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
|
|
|
|
|
u_ctx = ULD_CTX(h_ctx(rtfm));
|
|
|
|
|
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
|
|
|
|
|
h_ctx(rtfm)->tx_qidx))) {
|
|
|
|
|
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
|
|
|
|
|
return -EBUSY;
|
|
|
|
|
}
|
|
|
|
|
get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
|
|
|
|
|
params.kctx_len = roundup(params.alg_prm.result_size, 16);
|
|
|
|
|
if (is_hmac(crypto_ahash_tfm(rtfm))) {
|
|
|
|
|
params.kctx_len *= 2;
|
|
|
|
|
params.opad_needed = 1;
|
|
|
|
|
} else {
|
|
|
|
|
params.opad_needed = 0;
|
|
|
|
|
}
|
|
|
|
|
params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
|
|
|
|
|
HASH_SPACE_LEFT(params.kctx_len),
|
|
|
|
|
hctx_wr->src_ofst);
|
|
|
|
|
if ((params.sg_len + hctx_wr->processed) > req->nbytes)
|
|
|
|
|
params.sg_len = req->nbytes - hctx_wr->processed;
|
|
|
|
|
if (!hctx_wr->result ||
|
|
|
|
|
((params.sg_len + hctx_wr->processed) < req->nbytes)) {
|
|
|
|
|
if (is_hmac(crypto_ahash_tfm(rtfm))) {
|
|
|
|
|
params.kctx_len /= 2;
|
|
|
|
|
params.opad_needed = 0;
|
|
|
|
|
}
|
|
|
|
|
params.last = 0;
|
|
|
|
|
params.more = 1;
|
|
|
|
|
params.sg_len = rounddown(params.sg_len, bs);
|
|
|
|
|
params.hash_size = params.alg_prm.result_size;
|
|
|
|
|
params.scmd1 = 0;
|
|
|
|
|
} else {
|
|
|
|
|
params.last = 1;
|
|
|
|
|
params.more = 0;
|
|
|
|
|
params.hash_size = crypto_ahash_digestsize(rtfm);
|
|
|
|
|
params.scmd1 = reqctx->data_len + params.sg_len;
|
|
|
|
|
}
|
|
|
|
|
params.bfr_len = 0;
|
|
|
|
|
reqctx->data_len += params.sg_len;
|
|
|
|
|
skb = create_hash_wr(req, ¶ms);
|
|
|
|
|
if (IS_ERR(skb)) {
|
|
|
|
|
error = PTR_ERR(skb);
|
|
|
|
|
goto err;
|
|
|
|
|
}
|
|
|
|
|
hctx_wr->processed += params.sg_len;
|
|
|
|
|
skb->dev = u_ctx->lldi.ports[0];
|
|
|
|
|
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
|
|
|
|
|
chcr_send_wr(skb);
|
|
|
|
|
return 0;
|
|
|
|
|
err:
|
|
|
|
|
return error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int chcr_ahash_finup(struct ahash_request *req)
|
|
|
|
|
{
|
|
|
|
|
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
|
|
|
|
@@ -1740,37 +1866,59 @@ static int chcr_ahash_finup(struct ahash_request *req)
|
|
|
|
|
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
|
|
|
|
|
return -EBUSY;
|
|
|
|
|
}
|
|
|
|
|
chcr_init_hctx_per_wr(req_ctx);
|
|
|
|
|
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
|
|
|
|
|
if (error)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
|
|
if (is_hmac(crypto_ahash_tfm(rtfm)))
|
|
|
|
|
params.opad_needed = 1;
|
|
|
|
|
else
|
|
|
|
|
params.opad_needed = 0;
|
|
|
|
|
|
|
|
|
|
params.sg_len = req->nbytes;
|
|
|
|
|
params.bfr_len = req_ctx->reqlen;
|
|
|
|
|
get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
|
|
|
|
|
params.kctx_len = roundup(params.alg_prm.result_size, 16);
|
|
|
|
|
if (is_hmac(crypto_ahash_tfm(rtfm))) {
|
|
|
|
|
params.kctx_len *= 2;
|
|
|
|
|
params.opad_needed = 1;
|
|
|
|
|
} else {
|
|
|
|
|
params.opad_needed = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
|
|
|
|
|
HASH_SPACE_LEFT(params.kctx_len), 0);
|
|
|
|
|
if (params.sg_len < req->nbytes) {
|
|
|
|
|
if (is_hmac(crypto_ahash_tfm(rtfm))) {
|
|
|
|
|
params.kctx_len /= 2;
|
|
|
|
|
params.opad_needed = 0;
|
|
|
|
|
}
|
|
|
|
|
params.last = 0;
|
|
|
|
|
params.more = 1;
|
|
|
|
|
params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
|
|
|
|
|
- req_ctx->reqlen;
|
|
|
|
|
params.hash_size = params.alg_prm.result_size;
|
|
|
|
|
params.scmd1 = 0;
|
|
|
|
|
} else {
|
|
|
|
|
params.last = 1;
|
|
|
|
|
params.more = 0;
|
|
|
|
|
params.sg_len = req->nbytes;
|
|
|
|
|
params.hash_size = crypto_ahash_digestsize(rtfm);
|
|
|
|
|
params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
|
|
|
|
|
params.sg_len;
|
|
|
|
|
}
|
|
|
|
|
params.bfr_len = req_ctx->reqlen;
|
|
|
|
|
req_ctx->data_len += params.bfr_len + params.sg_len;
|
|
|
|
|
req_ctx->result = 1;
|
|
|
|
|
req_ctx->hctx_wr.result = 1;
|
|
|
|
|
req_ctx->hctx_wr.srcsg = req->src;
|
|
|
|
|
if ((req_ctx->reqlen + req->nbytes) == 0) {
|
|
|
|
|
create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
|
|
|
|
|
params.last = 0;
|
|
|
|
|
params.more = 1;
|
|
|
|
|
params.scmd1 = 0;
|
|
|
|
|
params.bfr_len = bs;
|
|
|
|
|
} else {
|
|
|
|
|
params.scmd1 = req_ctx->data_len;
|
|
|
|
|
params.last = 1;
|
|
|
|
|
params.more = 0;
|
|
|
|
|
}
|
|
|
|
|
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
|
|
|
|
|
if (error)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
|
|
skb = create_hash_wr(req, ¶ms);
|
|
|
|
|
if (IS_ERR(skb)) {
|
|
|
|
|
error = PTR_ERR(skb);
|
|
|
|
|
goto unmap;
|
|
|
|
|
}
|
|
|
|
|
req_ctx->reqlen = 0;
|
|
|
|
|
req_ctx->hctx_wr.processed += params.sg_len;
|
|
|
|
|
skb->dev = u_ctx->lldi.ports[0];
|
|
|
|
|
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
|
|
|
|
|
chcr_send_wr(skb);
|
|
|
|
@@ -1801,21 +1949,42 @@ static int chcr_ahash_digest(struct ahash_request *req)
|
|
|
|
|
return -EBUSY;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (is_hmac(crypto_ahash_tfm(rtfm)))
|
|
|
|
|
params.opad_needed = 1;
|
|
|
|
|
else
|
|
|
|
|
params.opad_needed = 0;
|
|
|
|
|
chcr_init_hctx_per_wr(req_ctx);
|
|
|
|
|
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
|
|
|
|
|
if (error)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
|
|
params.last = 0;
|
|
|
|
|
params.more = 0;
|
|
|
|
|
params.sg_len = req->nbytes;
|
|
|
|
|
params.bfr_len = 0;
|
|
|
|
|
params.scmd1 = 0;
|
|
|
|
|
get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
|
|
|
|
|
req_ctx->result = 1;
|
|
|
|
|
params.kctx_len = roundup(params.alg_prm.result_size, 16);
|
|
|
|
|
if (is_hmac(crypto_ahash_tfm(rtfm))) {
|
|
|
|
|
params.kctx_len *= 2;
|
|
|
|
|
params.opad_needed = 1;
|
|
|
|
|
} else {
|
|
|
|
|
params.opad_needed = 0;
|
|
|
|
|
}
|
|
|
|
|
params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
|
|
|
|
|
HASH_SPACE_LEFT(params.kctx_len), 0);
|
|
|
|
|
if (params.sg_len < req->nbytes) {
|
|
|
|
|
if (is_hmac(crypto_ahash_tfm(rtfm))) {
|
|
|
|
|
params.kctx_len /= 2;
|
|
|
|
|
params.opad_needed = 0;
|
|
|
|
|
}
|
|
|
|
|
params.last = 0;
|
|
|
|
|
params.more = 1;
|
|
|
|
|
params.scmd1 = 0;
|
|
|
|
|
params.sg_len = rounddown(params.sg_len, bs);
|
|
|
|
|
params.hash_size = params.alg_prm.result_size;
|
|
|
|
|
} else {
|
|
|
|
|
params.sg_len = req->nbytes;
|
|
|
|
|
params.hash_size = crypto_ahash_digestsize(rtfm);
|
|
|
|
|
params.last = 1;
|
|
|
|
|
params.more = 0;
|
|
|
|
|
params.scmd1 = req->nbytes + req_ctx->data_len;
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
params.bfr_len = 0;
|
|
|
|
|
req_ctx->hctx_wr.result = 1;
|
|
|
|
|
req_ctx->hctx_wr.srcsg = req->src;
|
|
|
|
|
req_ctx->data_len += params.bfr_len + params.sg_len;
|
|
|
|
|
|
|
|
|
|
if (req->nbytes == 0) {
|
|
|
|
@@ -1829,6 +1998,7 @@ static int chcr_ahash_digest(struct ahash_request *req)
|
|
|
|
|
error = PTR_ERR(skb);
|
|
|
|
|
goto unmap;
|
|
|
|
|
}
|
|
|
|
|
req_ctx->hctx_wr.processed += params.sg_len;
|
|
|
|
|
skb->dev = u_ctx->lldi.ports[0];
|
|
|
|
|
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
|
|
|
|
|
chcr_send_wr(skb);
|
|
|
|
@@ -1845,11 +2015,10 @@ static int chcr_ahash_export(struct ahash_request *areq, void *out)
|
|
|
|
|
|
|
|
|
|
state->reqlen = req_ctx->reqlen;
|
|
|
|
|
state->data_len = req_ctx->data_len;
|
|
|
|
|
state->is_sg_map = 0;
|
|
|
|
|
state->result = 0;
|
|
|
|
|
memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
|
|
|
|
|
memcpy(state->partial_hash, req_ctx->partial_hash,
|
|
|
|
|
CHCR_HASH_MAX_DIGEST_SIZE);
|
|
|
|
|
chcr_init_hctx_per_wr(state);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -1862,11 +2031,10 @@ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
|
|
|
|
|
req_ctx->data_len = state->data_len;
|
|
|
|
|
req_ctx->reqbfr = req_ctx->bfr1;
|
|
|
|
|
req_ctx->skbfr = req_ctx->bfr2;
|
|
|
|
|
req_ctx->is_sg_map = 0;
|
|
|
|
|
req_ctx->result = 0;
|
|
|
|
|
memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
|
|
|
|
|
memcpy(req_ctx->partial_hash, state->partial_hash,
|
|
|
|
|
CHCR_HASH_MAX_DIGEST_SIZE);
|
|
|
|
|
chcr_init_hctx_per_wr(req_ctx);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -1963,10 +2131,8 @@ static int chcr_sha_init(struct ahash_request *areq)
|
|
|
|
|
req_ctx->reqlen = 0;
|
|
|
|
|
req_ctx->reqbfr = req_ctx->bfr1;
|
|
|
|
|
req_ctx->skbfr = req_ctx->bfr2;
|
|
|
|
|
req_ctx->skb = NULL;
|
|
|
|
|
req_ctx->result = 0;
|
|
|
|
|
req_ctx->is_sg_map = 0;
|
|
|
|
|
copy_hash_init_values(req_ctx->partial_hash, digestsize);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -2407,22 +2573,26 @@ void chcr_add_hash_src_ent(struct ahash_request *req,
|
|
|
|
|
struct ulptx_walk ulp_walk;
|
|
|
|
|
struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
|
|
|
|
|
|
|
|
|
|
if (reqctx->imm) {
|
|
|
|
|
if (reqctx->hctx_wr.imm) {
|
|
|
|
|
u8 *buf = (u8 *)ulptx;
|
|
|
|
|
|
|
|
|
|
if (param->bfr_len) {
|
|
|
|
|
memcpy(buf, reqctx->reqbfr, param->bfr_len);
|
|
|
|
|
buf += param->bfr_len;
|
|
|
|
|
}
|
|
|
|
|
sg_pcopy_to_buffer(req->src, sg_nents(req->src),
|
|
|
|
|
buf, param->sg_len, 0);
|
|
|
|
|
|
|
|
|
|
sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
|
|
|
|
|
sg_nents(reqctx->hctx_wr.srcsg), buf,
|
|
|
|
|
param->sg_len, 0);
|
|
|
|
|
} else {
|
|
|
|
|
ulptx_walk_init(&ulp_walk, ulptx);
|
|
|
|
|
if (param->bfr_len)
|
|
|
|
|
ulptx_walk_add_page(&ulp_walk, param->bfr_len,
|
|
|
|
|
&reqctx->dma_addr);
|
|
|
|
|
ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len,
|
|
|
|
|
0);
|
|
|
|
|
&reqctx->hctx_wr.dma_addr);
|
|
|
|
|
ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
|
|
|
|
|
param->sg_len, reqctx->hctx_wr.src_ofst);
|
|
|
|
|
reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
|
|
|
|
|
reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
|
|
|
|
|
ulptx_walk_end(&ulp_walk);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
@@ -2439,7 +2609,7 @@ int chcr_hash_dma_map(struct device *dev,
|
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
if (!error)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
req_ctx->is_sg_map = 1;
|
|
|
|
|
req_ctx->hctx_wr.is_sg_map = 1;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -2453,7 +2623,7 @@ void chcr_hash_dma_unmap(struct device *dev,
|
|
|
|
|
|
|
|
|
|
dma_unmap_sg(dev, req->src, sg_nents(req->src),
|
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
req_ctx->is_sg_map = 0;
|
|
|
|
|
req_ctx->hctx_wr.is_sg_map = 0;
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|