zip_crypto.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2019 HiSilicon Limited. */
  3. #include <crypto/internal/acompress.h>
  4. #include <linux/bitfield.h>
  5. #include <linux/bitmap.h>
  6. #include <linux/dma-mapping.h>
  7. #include <linux/scatterlist.h>
  8. #include "zip.h"
  9. /* hisi_zip_sqe dw3 */
  10. #define HZIP_BD_STATUS_M GENMASK(7, 0)
  11. /* hisi_zip_sqe dw7 */
  12. #define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0)
  13. #define HZIP_SQE_TYPE_M GENMASK(31, 28)
  14. /* hisi_zip_sqe dw8 */
  15. #define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0)
  16. /* hisi_zip_sqe dw9 */
  17. #define HZIP_REQ_TYPE_M GENMASK(7, 0)
  18. #define HZIP_ALG_TYPE_ZLIB 0x02
  19. #define HZIP_ALG_TYPE_GZIP 0x03
  20. #define HZIP_BUF_TYPE_M GENMASK(11, 8)
  21. #define HZIP_PBUFFER 0x0
  22. #define HZIP_SGL 0x1
  23. #define HZIP_ZLIB_HEAD_SIZE 2
  24. #define HZIP_GZIP_HEAD_SIZE 10
  25. #define GZIP_HEAD_FHCRC_BIT BIT(1)
  26. #define GZIP_HEAD_FEXTRA_BIT BIT(2)
  27. #define GZIP_HEAD_FNAME_BIT BIT(3)
  28. #define GZIP_HEAD_FCOMMENT_BIT BIT(4)
  29. #define GZIP_HEAD_FLG_SHIFT 3
  30. #define GZIP_HEAD_FEXTRA_SHIFT 10
  31. #define GZIP_HEAD_FEXTRA_XLEN 2UL
  32. #define GZIP_HEAD_FHCRC_SIZE 2
  33. #define HZIP_GZIP_HEAD_BUF 256
  34. #define HZIP_ALG_PRIORITY 300
  35. #define HZIP_SGL_SGE_NR 10
  36. #define HZIP_ALG_ZLIB GENMASK(1, 0)
  37. #define HZIP_ALG_GZIP GENMASK(3, 2)
  38. static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c};
  39. static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = {
  40. 0x1f, 0x8b, 0x08, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x03
  41. };
  42. enum hisi_zip_alg_type {
  43. HZIP_ALG_TYPE_COMP = 0,
  44. HZIP_ALG_TYPE_DECOMP = 1,
  45. };
  46. enum {
  47. HZIP_QPC_COMP,
  48. HZIP_QPC_DECOMP,
  49. HZIP_CTX_Q_NUM
  50. };
  51. #define COMP_NAME_TO_TYPE(alg_name) \
  52. (!strcmp((alg_name), "zlib-deflate") ? HZIP_ALG_TYPE_ZLIB : \
  53. !strcmp((alg_name), "gzip") ? HZIP_ALG_TYPE_GZIP : 0) \
  54. #define TO_HEAD_SIZE(req_type) \
  55. (((req_type) == HZIP_ALG_TYPE_ZLIB) ? sizeof(zlib_head) : \
  56. ((req_type) == HZIP_ALG_TYPE_GZIP) ? sizeof(gzip_head) : 0) \
  57. #define TO_HEAD(req_type) \
  58. (((req_type) == HZIP_ALG_TYPE_ZLIB) ? zlib_head : \
  59. ((req_type) == HZIP_ALG_TYPE_GZIP) ? gzip_head : NULL) \
  60. struct hisi_zip_req {
  61. struct acomp_req *req;
  62. u32 sskip;
  63. u32 dskip;
  64. struct hisi_acc_hw_sgl *hw_src;
  65. struct hisi_acc_hw_sgl *hw_dst;
  66. dma_addr_t dma_src;
  67. dma_addr_t dma_dst;
  68. u16 req_id;
  69. };
  70. struct hisi_zip_req_q {
  71. struct hisi_zip_req *q;
  72. unsigned long *req_bitmap;
  73. rwlock_t req_lock;
  74. u16 size;
  75. };
  76. struct hisi_zip_qp_ctx {
  77. struct hisi_qp *qp;
  78. struct hisi_zip_req_q req_q;
  79. struct hisi_acc_sgl_pool *sgl_pool;
  80. struct hisi_zip *zip_dev;
  81. struct hisi_zip_ctx *ctx;
  82. };
  83. struct hisi_zip_sqe_ops {
  84. u8 sqe_type;
  85. void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
  86. void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
  87. void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type);
  88. void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type);
  89. void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
  90. void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type);
  91. u32 (*get_tag)(struct hisi_zip_sqe *sqe);
  92. u32 (*get_status)(struct hisi_zip_sqe *sqe);
  93. u32 (*get_dstlen)(struct hisi_zip_sqe *sqe);
  94. };
  95. struct hisi_zip_ctx {
  96. struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM];
  97. const struct hisi_zip_sqe_ops *ops;
  98. };
  99. static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
  100. {
  101. int ret;
  102. u16 n;
  103. if (!val)
  104. return -EINVAL;
  105. ret = kstrtou16(val, 10, &n);
  106. if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX)
  107. return -EINVAL;
  108. return param_set_ushort(val, kp);
  109. }
  110. static const struct kernel_param_ops sgl_sge_nr_ops = {
  111. .set = sgl_sge_nr_set,
  112. .get = param_get_ushort,
  113. };
  114. static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
  115. module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
  116. MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
  117. static u32 get_extra_field_size(const u8 *start)
  118. {
  119. return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN;
  120. }
  121. static u32 get_name_field_size(const u8 *start)
  122. {
  123. return strlen(start) + 1;
  124. }
  125. static u32 get_comment_field_size(const u8 *start)
  126. {
  127. return strlen(start) + 1;
  128. }
  129. static u32 __get_gzip_head_size(const u8 *src)
  130. {
  131. u8 head_flg = *(src + GZIP_HEAD_FLG_SHIFT);
  132. u32 size = GZIP_HEAD_FEXTRA_SHIFT;
  133. if (head_flg & GZIP_HEAD_FEXTRA_BIT)
  134. size += get_extra_field_size(src + size);
  135. if (head_flg & GZIP_HEAD_FNAME_BIT)
  136. size += get_name_field_size(src + size);
  137. if (head_flg & GZIP_HEAD_FCOMMENT_BIT)
  138. size += get_comment_field_size(src + size);
  139. if (head_flg & GZIP_HEAD_FHCRC_BIT)
  140. size += GZIP_HEAD_FHCRC_SIZE;
  141. return size;
  142. }
  143. static u32 __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
  144. {
  145. char buf[HZIP_GZIP_HEAD_BUF];
  146. sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf));
  147. return __get_gzip_head_size(buf);
  148. }
  149. static int add_comp_head(struct scatterlist *dst, u8 req_type)
  150. {
  151. int head_size = TO_HEAD_SIZE(req_type);
  152. const u8 *head = TO_HEAD(req_type);
  153. int ret;
  154. ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size);
  155. if (unlikely(ret != head_size)) {
  156. pr_err("the head size of buffer is wrong (%d)!\n", ret);
  157. return -ENOMEM;
  158. }
  159. return head_size;
  160. }
  161. static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type)
  162. {
  163. if (unlikely(!acomp_req->src || !acomp_req->slen))
  164. return -EINVAL;
  165. if (unlikely(req_type == HZIP_ALG_TYPE_GZIP &&
  166. acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT))
  167. return -EINVAL;
  168. switch (req_type) {
  169. case HZIP_ALG_TYPE_ZLIB:
  170. return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB);
  171. case HZIP_ALG_TYPE_GZIP:
  172. return TO_HEAD_SIZE(HZIP_ALG_TYPE_GZIP);
  173. default:
  174. pr_err("request type does not support!\n");
  175. return -EINVAL;
  176. }
  177. }
  178. static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
  179. struct hisi_zip_qp_ctx *qp_ctx,
  180. size_t head_size, bool is_comp)
  181. {
  182. struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
  183. struct hisi_zip_req *q = req_q->q;
  184. struct hisi_zip_req *req_cache;
  185. int req_id;
  186. write_lock(&req_q->req_lock);
  187. req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size);
  188. if (req_id >= req_q->size) {
  189. write_unlock(&req_q->req_lock);
  190. dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
  191. return ERR_PTR(-EAGAIN);
  192. }
  193. set_bit(req_id, req_q->req_bitmap);
  194. write_unlock(&req_q->req_lock);
  195. req_cache = q + req_id;
  196. req_cache->req_id = req_id;
  197. req_cache->req = req;
  198. if (is_comp) {
  199. req_cache->sskip = 0;
  200. req_cache->dskip = head_size;
  201. } else {
  202. req_cache->sskip = head_size;
  203. req_cache->dskip = 0;
  204. }
  205. return req_cache;
  206. }
  207. static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
  208. struct hisi_zip_req *req)
  209. {
  210. struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
  211. write_lock(&req_q->req_lock);
  212. clear_bit(req->req_id, req_q->req_bitmap);
  213. write_unlock(&req_q->req_lock);
  214. }
  215. static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
  216. {
  217. sqe->source_addr_l = lower_32_bits(req->dma_src);
  218. sqe->source_addr_h = upper_32_bits(req->dma_src);
  219. sqe->dest_addr_l = lower_32_bits(req->dma_dst);
  220. sqe->dest_addr_h = upper_32_bits(req->dma_dst);
  221. }
  222. static void hisi_zip_fill_buf_size(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
  223. {
  224. struct acomp_req *a_req = req->req;
  225. sqe->input_data_length = a_req->slen - req->sskip;
  226. sqe->dest_avail_out = a_req->dlen - req->dskip;
  227. sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, req->sskip);
  228. sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, req->dskip);
  229. }
  230. static void hisi_zip_fill_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
  231. {
  232. u32 val;
  233. val = sqe->dw9 & ~HZIP_BUF_TYPE_M;
  234. val |= FIELD_PREP(HZIP_BUF_TYPE_M, buf_type);
  235. sqe->dw9 = val;
  236. }
  237. static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type)
  238. {
  239. u32 val;
  240. val = sqe->dw9 & ~HZIP_REQ_TYPE_M;
  241. val |= FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
  242. sqe->dw9 = val;
  243. }
  244. static void hisi_zip_fill_tag_v1(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
  245. {
  246. sqe->dw13 = req->req_id;
  247. }
  248. static void hisi_zip_fill_tag_v2(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
  249. {
  250. sqe->dw26 = req->req_id;
  251. }
  252. static void hisi_zip_fill_sqe_type(struct hisi_zip_sqe *sqe, u8 sqe_type)
  253. {
  254. u32 val;
  255. val = sqe->dw7 & ~HZIP_SQE_TYPE_M;
  256. val |= FIELD_PREP(HZIP_SQE_TYPE_M, sqe_type);
  257. sqe->dw7 = val;
  258. }
  259. static void hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe,
  260. u8 req_type, struct hisi_zip_req *req)
  261. {
  262. const struct hisi_zip_sqe_ops *ops = ctx->ops;
  263. memset(sqe, 0, sizeof(struct hisi_zip_sqe));
  264. ops->fill_addr(sqe, req);
  265. ops->fill_buf_size(sqe, req);
  266. ops->fill_buf_type(sqe, HZIP_SGL);
  267. ops->fill_req_type(sqe, req_type);
  268. ops->fill_tag(sqe, req);
  269. ops->fill_sqe_type(sqe, ops->sqe_type);
  270. }
  271. static int hisi_zip_do_work(struct hisi_zip_req *req,
  272. struct hisi_zip_qp_ctx *qp_ctx)
  273. {
  274. struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
  275. struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
  276. struct acomp_req *a_req = req->req;
  277. struct hisi_qp *qp = qp_ctx->qp;
  278. struct device *dev = &qp->qm->pdev->dev;
  279. struct hisi_zip_sqe zip_sqe;
  280. int ret;
  281. if (unlikely(!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen))
  282. return -EINVAL;
  283. req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
  284. req->req_id << 1, &req->dma_src);
  285. if (IS_ERR(req->hw_src)) {
  286. dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
  287. PTR_ERR(req->hw_src));
  288. return PTR_ERR(req->hw_src);
  289. }
  290. req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
  291. (req->req_id << 1) + 1,
  292. &req->dma_dst);
  293. if (IS_ERR(req->hw_dst)) {
  294. ret = PTR_ERR(req->hw_dst);
  295. dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
  296. ret);
  297. goto err_unmap_input;
  298. }
  299. hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp->req_type, req);
  300. /* send command to start a task */
  301. atomic64_inc(&dfx->send_cnt);
  302. ret = hisi_qp_send(qp, &zip_sqe);
  303. if (unlikely(ret < 0)) {
  304. atomic64_inc(&dfx->send_busy_cnt);
  305. ret = -EAGAIN;
  306. dev_dbg_ratelimited(dev, "failed to send request!\n");
  307. goto err_unmap_output;
  308. }
  309. return -EINPROGRESS;
  310. err_unmap_output:
  311. hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
  312. err_unmap_input:
  313. hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
  314. return ret;
  315. }
  316. static u32 hisi_zip_get_tag_v1(struct hisi_zip_sqe *sqe)
  317. {
  318. return sqe->dw13;
  319. }
  320. static u32 hisi_zip_get_tag_v2(struct hisi_zip_sqe *sqe)
  321. {
  322. return sqe->dw26;
  323. }
  324. static u32 hisi_zip_get_status(struct hisi_zip_sqe *sqe)
  325. {
  326. return sqe->dw3 & HZIP_BD_STATUS_M;
  327. }
  328. static u32 hisi_zip_get_dstlen(struct hisi_zip_sqe *sqe)
  329. {
  330. return sqe->produced;
  331. }
  332. static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
  333. {
  334. struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
  335. const struct hisi_zip_sqe_ops *ops = qp_ctx->ctx->ops;
  336. struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
  337. struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
  338. struct device *dev = &qp->qm->pdev->dev;
  339. struct hisi_zip_sqe *sqe = data;
  340. u32 tag = ops->get_tag(sqe);
  341. struct hisi_zip_req *req = req_q->q + tag;
  342. struct acomp_req *acomp_req = req->req;
  343. u32 status, dlen, head_size;
  344. int err = 0;
  345. atomic64_inc(&dfx->recv_cnt);
  346. status = ops->get_status(sqe);
  347. if (unlikely(status != 0 && status != HZIP_NC_ERR)) {
  348. dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
  349. (qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
  350. sqe->produced);
  351. atomic64_inc(&dfx->err_bd_cnt);
  352. err = -EIO;
  353. }
  354. dlen = ops->get_dstlen(sqe);
  355. hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
  356. hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
  357. head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0;
  358. acomp_req->dlen = dlen + head_size;
  359. if (acomp_req->base.complete)
  360. acomp_request_complete(acomp_req, err);
  361. hisi_zip_remove_req(qp_ctx, req);
  362. }
  363. static int hisi_zip_acompress(struct acomp_req *acomp_req)
  364. {
  365. struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
  366. struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
  367. struct device *dev = &qp_ctx->qp->qm->pdev->dev;
  368. struct hisi_zip_req *req;
  369. int head_size;
  370. int ret;
  371. /* let's output compression head now */
  372. head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type);
  373. if (unlikely(head_size < 0)) {
  374. dev_err_ratelimited(dev, "failed to add comp head (%d)!\n",
  375. head_size);
  376. return head_size;
  377. }
  378. req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true);
  379. if (IS_ERR(req))
  380. return PTR_ERR(req);
  381. ret = hisi_zip_do_work(req, qp_ctx);
  382. if (unlikely(ret != -EINPROGRESS)) {
  383. dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
  384. hisi_zip_remove_req(qp_ctx, req);
  385. }
  386. return ret;
  387. }
  388. static int hisi_zip_adecompress(struct acomp_req *acomp_req)
  389. {
  390. struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
  391. struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
  392. struct device *dev = &qp_ctx->qp->qm->pdev->dev;
  393. struct hisi_zip_req *req;
  394. int head_size, ret;
  395. head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type);
  396. if (unlikely(head_size < 0)) {
  397. dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n",
  398. head_size);
  399. return head_size;
  400. }
  401. req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false);
  402. if (IS_ERR(req))
  403. return PTR_ERR(req);
  404. ret = hisi_zip_do_work(req, qp_ctx);
  405. if (unlikely(ret != -EINPROGRESS)) {
  406. dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
  407. ret);
  408. hisi_zip_remove_req(qp_ctx, req);
  409. }
  410. return ret;
  411. }
  412. static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *qp_ctx,
  413. int alg_type, int req_type)
  414. {
  415. struct device *dev = &qp->qm->pdev->dev;
  416. int ret;
  417. qp->req_type = req_type;
  418. qp->alg_type = alg_type;
  419. qp->qp_ctx = qp_ctx;
  420. ret = hisi_qm_start_qp(qp, 0);
  421. if (ret < 0) {
  422. dev_err(dev, "failed to start qp (%d)!\n", ret);
  423. return ret;
  424. }
  425. qp_ctx->qp = qp;
  426. return 0;
  427. }
  428. static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *qp_ctx)
  429. {
  430. hisi_qm_stop_qp(qp_ctx->qp);
  431. hisi_qm_free_qps(&qp_ctx->qp, 1);
  432. }
  433. static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = {
  434. .sqe_type = 0,
  435. .fill_addr = hisi_zip_fill_addr,
  436. .fill_buf_size = hisi_zip_fill_buf_size,
  437. .fill_buf_type = hisi_zip_fill_buf_type,
  438. .fill_req_type = hisi_zip_fill_req_type,
  439. .fill_tag = hisi_zip_fill_tag_v1,
  440. .fill_sqe_type = hisi_zip_fill_sqe_type,
  441. .get_tag = hisi_zip_get_tag_v1,
  442. .get_status = hisi_zip_get_status,
  443. .get_dstlen = hisi_zip_get_dstlen,
  444. };
  445. static const struct hisi_zip_sqe_ops hisi_zip_ops_v2 = {
  446. .sqe_type = 0x3,
  447. .fill_addr = hisi_zip_fill_addr,
  448. .fill_buf_size = hisi_zip_fill_buf_size,
  449. .fill_buf_type = hisi_zip_fill_buf_type,
  450. .fill_req_type = hisi_zip_fill_req_type,
  451. .fill_tag = hisi_zip_fill_tag_v2,
  452. .fill_sqe_type = hisi_zip_fill_sqe_type,
  453. .get_tag = hisi_zip_get_tag_v2,
  454. .get_status = hisi_zip_get_status,
  455. .get_dstlen = hisi_zip_get_dstlen,
  456. };
  457. static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node)
  458. {
  459. struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
  460. struct hisi_zip_qp_ctx *qp_ctx;
  461. struct hisi_zip *hisi_zip;
  462. int ret, i, j;
  463. ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node);
  464. if (ret) {
  465. pr_err("failed to create zip qps (%d)!\n", ret);
  466. return -ENODEV;
  467. }
  468. hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm);
  469. for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
  470. /* alg_type = 0 for compress, 1 for decompress in hw sqe */
  471. qp_ctx = &hisi_zip_ctx->qp_ctx[i];
  472. qp_ctx->ctx = hisi_zip_ctx;
  473. ret = hisi_zip_start_qp(qps[i], qp_ctx, i, req_type);
  474. if (ret) {
  475. for (j = i - 1; j >= 0; j--)
  476. hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp);
  477. hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM);
  478. return ret;
  479. }
  480. qp_ctx->zip_dev = hisi_zip;
  481. }
  482. if (hisi_zip->qm.ver < QM_HW_V3)
  483. hisi_zip_ctx->ops = &hisi_zip_ops_v1;
  484. else
  485. hisi_zip_ctx->ops = &hisi_zip_ops_v2;
  486. return 0;
  487. }
  488. static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
  489. {
  490. int i;
  491. for (i = 0; i < HZIP_CTX_Q_NUM; i++)
  492. hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]);
  493. }
  494. static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
  495. {
  496. u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
  497. struct hisi_zip_req_q *req_q;
  498. int i, ret;
  499. for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
  500. req_q = &ctx->qp_ctx[i].req_q;
  501. req_q->size = q_depth;
  502. req_q->req_bitmap = bitmap_zalloc(req_q->size, GFP_KERNEL);
  503. if (!req_q->req_bitmap) {
  504. ret = -ENOMEM;
  505. if (i == 0)
  506. return ret;
  507. goto err_free_comp_q;
  508. }
  509. rwlock_init(&req_q->req_lock);
  510. req_q->q = kcalloc(req_q->size, sizeof(struct hisi_zip_req),
  511. GFP_KERNEL);
  512. if (!req_q->q) {
  513. ret = -ENOMEM;
  514. if (i == 0)
  515. goto err_free_comp_bitmap;
  516. else
  517. goto err_free_decomp_bitmap;
  518. }
  519. }
  520. return 0;
  521. err_free_decomp_bitmap:
  522. bitmap_free(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap);
  523. err_free_comp_q:
  524. kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q);
  525. err_free_comp_bitmap:
  526. bitmap_free(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap);
  527. return ret;
  528. }
  529. static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx)
  530. {
  531. int i;
  532. for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
  533. kfree(ctx->qp_ctx[i].req_q.q);
  534. bitmap_free(ctx->qp_ctx[i].req_q.req_bitmap);
  535. }
  536. }
  537. static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
  538. {
  539. u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
  540. struct hisi_zip_qp_ctx *tmp;
  541. struct device *dev;
  542. int i;
  543. for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
  544. tmp = &ctx->qp_ctx[i];
  545. dev = &tmp->qp->qm->pdev->dev;
  546. tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, q_depth << 1,
  547. sgl_sge_nr);
  548. if (IS_ERR(tmp->sgl_pool)) {
  549. if (i == 1)
  550. goto err_free_sgl_pool0;
  551. return -ENOMEM;
  552. }
  553. }
  554. return 0;
  555. err_free_sgl_pool0:
  556. hisi_acc_free_sgl_pool(&ctx->qp_ctx[HZIP_QPC_COMP].qp->qm->pdev->dev,
  557. ctx->qp_ctx[HZIP_QPC_COMP].sgl_pool);
  558. return -ENOMEM;
  559. }
  560. static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx)
  561. {
  562. int i;
  563. for (i = 0; i < HZIP_CTX_Q_NUM; i++)
  564. hisi_acc_free_sgl_pool(&ctx->qp_ctx[i].qp->qm->pdev->dev,
  565. ctx->qp_ctx[i].sgl_pool);
  566. }
  567. static void hisi_zip_set_acomp_cb(struct hisi_zip_ctx *ctx,
  568. void (*fn)(struct hisi_qp *, void *))
  569. {
  570. int i;
  571. for (i = 0; i < HZIP_CTX_Q_NUM; i++)
  572. ctx->qp_ctx[i].qp->req_cb = fn;
  573. }
  574. static int hisi_zip_acomp_init(struct crypto_acomp *tfm)
  575. {
  576. const char *alg_name = crypto_tfm_alg_name(&tfm->base);
  577. struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
  578. struct device *dev;
  579. int ret;
  580. ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name), tfm->base.node);
  581. if (ret) {
  582. pr_err("failed to init ctx (%d)!\n", ret);
  583. return ret;
  584. }
  585. dev = &ctx->qp_ctx[0].qp->qm->pdev->dev;
  586. ret = hisi_zip_create_req_q(ctx);
  587. if (ret) {
  588. dev_err(dev, "failed to create request queue (%d)!\n", ret);
  589. goto err_ctx_exit;
  590. }
  591. ret = hisi_zip_create_sgl_pool(ctx);
  592. if (ret) {
  593. dev_err(dev, "failed to create sgl pool (%d)!\n", ret);
  594. goto err_release_req_q;
  595. }
  596. hisi_zip_set_acomp_cb(ctx, hisi_zip_acomp_cb);
  597. return 0;
  598. err_release_req_q:
  599. hisi_zip_release_req_q(ctx);
  600. err_ctx_exit:
  601. hisi_zip_ctx_exit(ctx);
  602. return ret;
  603. }
  604. static void hisi_zip_acomp_exit(struct crypto_acomp *tfm)
  605. {
  606. struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
  607. hisi_zip_set_acomp_cb(ctx, NULL);
  608. hisi_zip_release_sgl_pool(ctx);
  609. hisi_zip_release_req_q(ctx);
  610. hisi_zip_ctx_exit(ctx);
  611. }
  612. static struct acomp_alg hisi_zip_acomp_zlib = {
  613. .init = hisi_zip_acomp_init,
  614. .exit = hisi_zip_acomp_exit,
  615. .compress = hisi_zip_acompress,
  616. .decompress = hisi_zip_adecompress,
  617. .base = {
  618. .cra_name = "zlib-deflate",
  619. .cra_driver_name = "hisi-zlib-acomp",
  620. .cra_module = THIS_MODULE,
  621. .cra_priority = HZIP_ALG_PRIORITY,
  622. .cra_ctxsize = sizeof(struct hisi_zip_ctx),
  623. }
  624. };
  625. static int hisi_zip_register_zlib(struct hisi_qm *qm)
  626. {
  627. int ret;
  628. if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB))
  629. return 0;
  630. ret = crypto_register_acomp(&hisi_zip_acomp_zlib);
  631. if (ret)
  632. dev_err(&qm->pdev->dev, "failed to register to zlib (%d)!\n", ret);
  633. return ret;
  634. }
  635. static void hisi_zip_unregister_zlib(struct hisi_qm *qm)
  636. {
  637. if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB))
  638. return;
  639. crypto_unregister_acomp(&hisi_zip_acomp_zlib);
  640. }
  641. static struct acomp_alg hisi_zip_acomp_gzip = {
  642. .init = hisi_zip_acomp_init,
  643. .exit = hisi_zip_acomp_exit,
  644. .compress = hisi_zip_acompress,
  645. .decompress = hisi_zip_adecompress,
  646. .base = {
  647. .cra_name = "gzip",
  648. .cra_driver_name = "hisi-gzip-acomp",
  649. .cra_module = THIS_MODULE,
  650. .cra_priority = HZIP_ALG_PRIORITY,
  651. .cra_ctxsize = sizeof(struct hisi_zip_ctx),
  652. }
  653. };
  654. static int hisi_zip_register_gzip(struct hisi_qm *qm)
  655. {
  656. int ret;
  657. if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP))
  658. return 0;
  659. ret = crypto_register_acomp(&hisi_zip_acomp_gzip);
  660. if (ret)
  661. dev_err(&qm->pdev->dev, "failed to register to gzip (%d)!\n", ret);
  662. return ret;
  663. }
  664. static void hisi_zip_unregister_gzip(struct hisi_qm *qm)
  665. {
  666. if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP))
  667. return;
  668. crypto_unregister_acomp(&hisi_zip_acomp_gzip);
  669. }
  670. int hisi_zip_register_to_crypto(struct hisi_qm *qm)
  671. {
  672. int ret = 0;
  673. ret = hisi_zip_register_zlib(qm);
  674. if (ret)
  675. return ret;
  676. ret = hisi_zip_register_gzip(qm);
  677. if (ret)
  678. hisi_zip_unregister_zlib(qm);
  679. return ret;
  680. }
  681. void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
  682. {
  683. hisi_zip_unregister_zlib(qm);
  684. hisi_zip_unregister_gzip(qm);
  685. }