crypto: qat - use pre-allocated buffers in datapath
[ Upstream commit e0831e7af4e03f2715de102e18e9179ec0a81562 ]
In order to do DMAs, the QAT device requires that the scatterlist
structures are mapped and translated into a format that the firmware can
understand. This is defined as the composition of a scatter gather list
(SGL) descriptor header, the struct qat_alg_buf_list, plus a variable
number of flat buffer descriptors, the struct qat_alg_buf.
The allocation and mapping of these data structures is done each time a
request is received from the skcipher and aead APIs.
In an OOM situation, this behaviour might lead to a dead-lock if an
allocation fails.
Based on the conversation in [1], increase the size of the aead and
skcipher request contexts to include an SGL descriptor that can handle
a maximum of 4 flat buffers.
If requests exceed 4 entries buffers, memory is allocated dynamically.
[1] https://lore.kernel.org/linux-crypto/20200722072932.GA27544@gondor.apana.org.au/
Cc: stable@vger.kernel.org
Fixes: d370cec321
("crypto: qat - Intel(R) QAT crypto interface")
Reported-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
Reviewed-by: Marco Chiappero <marco.chiappero@intel.com>
Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Stable-dep-of: cf5bb835b7c8 ("crypto: qat - fix DMA transfer direction")
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:

committed by
Greg Kroah-Hartman

parent
a91af50850
commit
a43babc059
@@ -34,19 +34,6 @@
|
|||||||
static DEFINE_MUTEX(algs_lock);
|
static DEFINE_MUTEX(algs_lock);
|
||||||
static unsigned int active_devs;
|
static unsigned int active_devs;
|
||||||
|
|
||||||
struct qat_alg_buf {
|
|
||||||
u32 len;
|
|
||||||
u32 resrvd;
|
|
||||||
u64 addr;
|
|
||||||
} __packed;
|
|
||||||
|
|
||||||
struct qat_alg_buf_list {
|
|
||||||
u64 resrvd;
|
|
||||||
u32 num_bufs;
|
|
||||||
u32 num_mapped_bufs;
|
|
||||||
struct qat_alg_buf bufers[];
|
|
||||||
} __packed __aligned(64);
|
|
||||||
|
|
||||||
/* Common content descriptor */
|
/* Common content descriptor */
|
||||||
struct qat_alg_cd {
|
struct qat_alg_cd {
|
||||||
union {
|
union {
|
||||||
@@ -644,7 +631,10 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
|
|||||||
bl->bufers[i].len, DMA_BIDIRECTIONAL);
|
bl->bufers[i].len, DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
|
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
|
||||||
|
|
||||||
|
if (!qat_req->buf.sgl_src_valid)
|
||||||
kfree(bl);
|
kfree(bl);
|
||||||
|
|
||||||
if (blp != blpout) {
|
if (blp != blpout) {
|
||||||
/* If out of place operation dma unmap only data */
|
/* If out of place operation dma unmap only data */
|
||||||
int bufless = blout->num_bufs - blout->num_mapped_bufs;
|
int bufless = blout->num_bufs - blout->num_mapped_bufs;
|
||||||
@@ -655,6 +645,8 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
|
|||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
}
|
}
|
||||||
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
|
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
|
||||||
|
|
||||||
|
if (!qat_req->buf.sgl_dst_valid)
|
||||||
kfree(blout);
|
kfree(blout);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -672,15 +664,24 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
|||||||
dma_addr_t blp = DMA_MAPPING_ERROR;
|
dma_addr_t blp = DMA_MAPPING_ERROR;
|
||||||
dma_addr_t bloutp = DMA_MAPPING_ERROR;
|
dma_addr_t bloutp = DMA_MAPPING_ERROR;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
|
size_t sz_out, sz = struct_size(bufl, bufers, n);
|
||||||
|
int node = dev_to_node(&GET_DEV(inst->accel_dev));
|
||||||
|
|
||||||
if (unlikely(!n))
|
if (unlikely(!n))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
bufl = kzalloc_node(sz, GFP_ATOMIC,
|
qat_req->buf.sgl_src_valid = false;
|
||||||
dev_to_node(&GET_DEV(inst->accel_dev)));
|
qat_req->buf.sgl_dst_valid = false;
|
||||||
|
|
||||||
|
if (n > QAT_MAX_BUFF_DESC) {
|
||||||
|
bufl = kzalloc_node(sz, GFP_ATOMIC, node);
|
||||||
if (unlikely(!bufl))
|
if (unlikely(!bufl))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
} else {
|
||||||
|
bufl = &qat_req->buf.sgl_src.sgl_hdr;
|
||||||
|
memset(bufl, 0, sizeof(struct qat_alg_buf_list));
|
||||||
|
qat_req->buf.sgl_src_valid = true;
|
||||||
|
}
|
||||||
|
|
||||||
for_each_sg(sgl, sg, n, i)
|
for_each_sg(sgl, sg, n, i)
|
||||||
bufl->bufers[i].addr = DMA_MAPPING_ERROR;
|
bufl->bufers[i].addr = DMA_MAPPING_ERROR;
|
||||||
@@ -711,12 +712,18 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
|
|||||||
struct qat_alg_buf *bufers;
|
struct qat_alg_buf *bufers;
|
||||||
|
|
||||||
n = sg_nents(sglout);
|
n = sg_nents(sglout);
|
||||||
sz_out = struct_size(buflout, bufers, n + 1);
|
sz_out = struct_size(buflout, bufers, n);
|
||||||
sg_nctr = 0;
|
sg_nctr = 0;
|
||||||
buflout = kzalloc_node(sz_out, GFP_ATOMIC,
|
|
||||||
dev_to_node(&GET_DEV(inst->accel_dev)));
|
if (n > QAT_MAX_BUFF_DESC) {
|
||||||
|
buflout = kzalloc_node(sz_out, GFP_ATOMIC, node);
|
||||||
if (unlikely(!buflout))
|
if (unlikely(!buflout))
|
||||||
goto err_in;
|
goto err_in;
|
||||||
|
} else {
|
||||||
|
buflout = &qat_req->buf.sgl_dst.sgl_hdr;
|
||||||
|
memset(buflout, 0, sizeof(struct qat_alg_buf_list));
|
||||||
|
qat_req->buf.sgl_dst_valid = true;
|
||||||
|
}
|
||||||
|
|
||||||
bufers = buflout->bufers;
|
bufers = buflout->bufers;
|
||||||
for_each_sg(sglout, sg, n, i)
|
for_each_sg(sglout, sg, n, i)
|
||||||
@@ -761,6 +768,8 @@ err_out:
|
|||||||
dma_unmap_single(dev, buflout->bufers[i].addr,
|
dma_unmap_single(dev, buflout->bufers[i].addr,
|
||||||
buflout->bufers[i].len,
|
buflout->bufers[i].len,
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
|
if (!qat_req->buf.sgl_dst_valid)
|
||||||
kfree(buflout);
|
kfree(buflout);
|
||||||
|
|
||||||
err_in:
|
err_in:
|
||||||
@@ -774,6 +783,7 @@ err_in:
|
|||||||
bufl->bufers[i].len,
|
bufl->bufers[i].len,
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
|
if (!qat_req->buf.sgl_src_valid)
|
||||||
kfree(bufl);
|
kfree(bufl);
|
||||||
|
|
||||||
dev_err(dev, "Failed to map buf for dma\n");
|
dev_err(dev, "Failed to map buf for dma\n");
|
||||||
|
@@ -20,6 +20,26 @@ struct qat_crypto_instance {
|
|||||||
atomic_t refctr;
|
atomic_t refctr;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define QAT_MAX_BUFF_DESC 4
|
||||||
|
|
||||||
|
struct qat_alg_buf {
|
||||||
|
u32 len;
|
||||||
|
u32 resrvd;
|
||||||
|
u64 addr;
|
||||||
|
} __packed;
|
||||||
|
|
||||||
|
struct qat_alg_buf_list {
|
||||||
|
u64 resrvd;
|
||||||
|
u32 num_bufs;
|
||||||
|
u32 num_mapped_bufs;
|
||||||
|
struct qat_alg_buf bufers[];
|
||||||
|
} __packed;
|
||||||
|
|
||||||
|
struct qat_alg_fixed_buf_list {
|
||||||
|
struct qat_alg_buf_list sgl_hdr;
|
||||||
|
struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
|
||||||
|
} __packed __aligned(64);
|
||||||
|
|
||||||
struct qat_crypto_request_buffs {
|
struct qat_crypto_request_buffs {
|
||||||
struct qat_alg_buf_list *bl;
|
struct qat_alg_buf_list *bl;
|
||||||
dma_addr_t blp;
|
dma_addr_t blp;
|
||||||
@@ -27,6 +47,10 @@ struct qat_crypto_request_buffs {
|
|||||||
dma_addr_t bloutp;
|
dma_addr_t bloutp;
|
||||||
size_t sz;
|
size_t sz;
|
||||||
size_t sz_out;
|
size_t sz_out;
|
||||||
|
bool sgl_src_valid;
|
||||||
|
bool sgl_dst_valid;
|
||||||
|
struct qat_alg_fixed_buf_list sgl_src;
|
||||||
|
struct qat_alg_fixed_buf_list sgl_dst;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct qat_crypto_request;
|
struct qat_crypto_request;
|
||||||
|
Reference in New Issue
Block a user