cptvf_algs.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2016 Cavium, Inc.
  4. */
  5. #include <crypto/aes.h>
  6. #include <crypto/algapi.h>
  7. #include <crypto/authenc.h>
  8. #include <crypto/internal/des.h>
  9. #include <crypto/xts.h>
  10. #include <linux/crypto.h>
  11. #include <linux/err.h>
  12. #include <linux/list.h>
  13. #include <linux/scatterlist.h>
  14. #include "cptvf.h"
  15. #include "cptvf_algs.h"
  16. struct cpt_device_handle {
  17. void *cdev[MAX_DEVICES];
  18. u32 dev_count;
  19. };
  20. static struct cpt_device_handle dev_handle;
  21. static void cvm_callback(u32 status, void *arg)
  22. {
  23. struct crypto_async_request *req = (struct crypto_async_request *)arg;
  24. req->complete(req, !status);
  25. }
  26. static inline void update_input_iv(struct cpt_request_info *req_info,
  27. u8 *iv, u32 enc_iv_len,
  28. u32 *argcnt)
  29. {
  30. /* Setting the iv information */
  31. req_info->in[*argcnt].vptr = (void *)iv;
  32. req_info->in[*argcnt].size = enc_iv_len;
  33. req_info->req.dlen += enc_iv_len;
  34. ++(*argcnt);
  35. }
  36. static inline void update_output_iv(struct cpt_request_info *req_info,
  37. u8 *iv, u32 enc_iv_len,
  38. u32 *argcnt)
  39. {
  40. /* Setting the iv information */
  41. req_info->out[*argcnt].vptr = (void *)iv;
  42. req_info->out[*argcnt].size = enc_iv_len;
  43. req_info->rlen += enc_iv_len;
  44. ++(*argcnt);
  45. }
  46. static inline void update_input_data(struct cpt_request_info *req_info,
  47. struct scatterlist *inp_sg,
  48. u32 nbytes, u32 *argcnt)
  49. {
  50. req_info->req.dlen += nbytes;
  51. while (nbytes) {
  52. u32 len = min(nbytes, inp_sg->length);
  53. u8 *ptr = sg_virt(inp_sg);
  54. req_info->in[*argcnt].vptr = (void *)ptr;
  55. req_info->in[*argcnt].size = len;
  56. nbytes -= len;
  57. ++(*argcnt);
  58. ++inp_sg;
  59. }
  60. }
  61. static inline void update_output_data(struct cpt_request_info *req_info,
  62. struct scatterlist *outp_sg,
  63. u32 nbytes, u32 *argcnt)
  64. {
  65. req_info->rlen += nbytes;
  66. while (nbytes) {
  67. u32 len = min(nbytes, outp_sg->length);
  68. u8 *ptr = sg_virt(outp_sg);
  69. req_info->out[*argcnt].vptr = (void *)ptr;
  70. req_info->out[*argcnt].size = len;
  71. nbytes -= len;
  72. ++(*argcnt);
  73. ++outp_sg;
  74. }
  75. }
  76. static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc,
  77. u32 *argcnt)
  78. {
  79. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  80. struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(tfm);
  81. struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
  82. struct fc_context *fctx = &rctx->fctx;
  83. u32 enc_iv_len = crypto_skcipher_ivsize(tfm);
  84. struct cpt_request_info *req_info = &rctx->cpt_req;
  85. __be64 *ctrl_flags = NULL;
  86. __be64 *offset_control;
  87. req_info->ctrl.s.grp = 0;
  88. req_info->ctrl.s.dma_mode = DMA_GATHER_SCATTER;
  89. req_info->ctrl.s.se_req = SE_CORE_REQ;
  90. req_info->req.opcode.s.major = MAJOR_OP_FC |
  91. DMA_MODE_FLAG(DMA_GATHER_SCATTER);
  92. if (enc)
  93. req_info->req.opcode.s.minor = 2;
  94. else
  95. req_info->req.opcode.s.minor = 3;
  96. req_info->req.param1 = req->cryptlen; /* Encryption Data length */
  97. req_info->req.param2 = 0; /*Auth data length */
  98. fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
  99. fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
  100. fctx->enc.enc_ctrl.e.iv_source = FROM_DPTR;
  101. if (ctx->cipher_type == AES_XTS)
  102. memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
  103. else
  104. memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
  105. ctrl_flags = (__be64 *)&fctx->enc.enc_ctrl.flags;
  106. *ctrl_flags = cpu_to_be64(fctx->enc.enc_ctrl.flags);
  107. offset_control = (__be64 *)&rctx->control_word;
  108. *offset_control = cpu_to_be64(((u64)(enc_iv_len) << 16));
  109. /* Storing Packet Data Information in offset
  110. * Control Word First 8 bytes
  111. */
  112. req_info->in[*argcnt].vptr = (u8 *)offset_control;
  113. req_info->in[*argcnt].size = CONTROL_WORD_LEN;
  114. req_info->req.dlen += CONTROL_WORD_LEN;
  115. ++(*argcnt);
  116. req_info->in[*argcnt].vptr = (u8 *)fctx;
  117. req_info->in[*argcnt].size = sizeof(struct fc_context);
  118. req_info->req.dlen += sizeof(struct fc_context);
  119. ++(*argcnt);
  120. return 0;
  121. }
  122. static inline u32 create_input_list(struct skcipher_request *req, u32 enc,
  123. u32 enc_iv_len)
  124. {
  125. struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
  126. struct cpt_request_info *req_info = &rctx->cpt_req;
  127. u32 argcnt = 0;
  128. create_ctx_hdr(req, enc, &argcnt);
  129. update_input_iv(req_info, req->iv, enc_iv_len, &argcnt);
  130. update_input_data(req_info, req->src, req->cryptlen, &argcnt);
  131. req_info->incnt = argcnt;
  132. return 0;
  133. }
  134. static inline void store_cb_info(struct skcipher_request *req,
  135. struct cpt_request_info *req_info)
  136. {
  137. req_info->callback = (void *)cvm_callback;
  138. req_info->callback_arg = (void *)&req->base;
  139. }
  140. static inline void create_output_list(struct skcipher_request *req,
  141. u32 enc_iv_len)
  142. {
  143. struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
  144. struct cpt_request_info *req_info = &rctx->cpt_req;
  145. u32 argcnt = 0;
  146. /* OUTPUT Buffer Processing
  147. * AES encryption/decryption output would be
  148. * received in the following format
  149. *
  150. * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
  151. * [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
  152. */
  153. /* Reading IV information */
  154. update_output_iv(req_info, req->iv, enc_iv_len, &argcnt);
  155. update_output_data(req_info, req->dst, req->cryptlen, &argcnt);
  156. req_info->outcnt = argcnt;
  157. }
  158. static inline int cvm_enc_dec(struct skcipher_request *req, u32 enc)
  159. {
  160. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  161. struct cvm_req_ctx *rctx = skcipher_request_ctx(req);
  162. u32 enc_iv_len = crypto_skcipher_ivsize(tfm);
  163. struct fc_context *fctx = &rctx->fctx;
  164. struct cpt_request_info *req_info = &rctx->cpt_req;
  165. void *cdev = NULL;
  166. int status;
  167. memset(req_info, 0, sizeof(struct cpt_request_info));
  168. req_info->may_sleep = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) != 0;
  169. memset(fctx, 0, sizeof(struct fc_context));
  170. create_input_list(req, enc, enc_iv_len);
  171. create_output_list(req, enc_iv_len);
  172. store_cb_info(req, req_info);
  173. cdev = dev_handle.cdev[smp_processor_id()];
  174. status = cptvf_do_request(cdev, req_info);
  175. /* We perform an asynchronous send and once
  176. * the request is completed the driver would
  177. * intimate through registered call back functions
  178. */
  179. if (status)
  180. return status;
  181. else
  182. return -EINPROGRESS;
  183. }
  184. static int cvm_encrypt(struct skcipher_request *req)
  185. {
  186. return cvm_enc_dec(req, true);
  187. }
  188. static int cvm_decrypt(struct skcipher_request *req)
  189. {
  190. return cvm_enc_dec(req, false);
  191. }
  192. static int cvm_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
  193. u32 keylen)
  194. {
  195. struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
  196. struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
  197. int err;
  198. const u8 *key1 = key;
  199. const u8 *key2 = key + (keylen / 2);
  200. err = xts_check_key(tfm, key, keylen);
  201. if (err)
  202. return err;
  203. ctx->key_len = keylen;
  204. memcpy(ctx->enc_key, key1, keylen / 2);
  205. memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
  206. ctx->cipher_type = AES_XTS;
  207. switch (ctx->key_len) {
  208. case 32:
  209. ctx->key_type = AES_128_BIT;
  210. break;
  211. case 64:
  212. ctx->key_type = AES_256_BIT;
  213. break;
  214. default:
  215. return -EINVAL;
  216. }
  217. return 0;
  218. }
  219. static int cvm_validate_keylen(struct cvm_enc_ctx *ctx, u32 keylen)
  220. {
  221. if ((keylen == 16) || (keylen == 24) || (keylen == 32)) {
  222. ctx->key_len = keylen;
  223. switch (ctx->key_len) {
  224. case 16:
  225. ctx->key_type = AES_128_BIT;
  226. break;
  227. case 24:
  228. ctx->key_type = AES_192_BIT;
  229. break;
  230. case 32:
  231. ctx->key_type = AES_256_BIT;
  232. break;
  233. default:
  234. return -EINVAL;
  235. }
  236. if (ctx->cipher_type == DES3_CBC)
  237. ctx->key_type = 0;
  238. return 0;
  239. }
  240. return -EINVAL;
  241. }
  242. static int cvm_setkey(struct crypto_skcipher *cipher, const u8 *key,
  243. u32 keylen, u8 cipher_type)
  244. {
  245. struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
  246. struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
  247. ctx->cipher_type = cipher_type;
  248. if (!cvm_validate_keylen(ctx, keylen)) {
  249. memcpy(ctx->enc_key, key, keylen);
  250. return 0;
  251. } else {
  252. return -EINVAL;
  253. }
  254. }
  255. static int cvm_cbc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
  256. u32 keylen)
  257. {
  258. return cvm_setkey(cipher, key, keylen, AES_CBC);
  259. }
  260. static int cvm_ecb_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
  261. u32 keylen)
  262. {
  263. return cvm_setkey(cipher, key, keylen, AES_ECB);
  264. }
  265. static int cvm_cfb_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
  266. u32 keylen)
  267. {
  268. return cvm_setkey(cipher, key, keylen, AES_CFB);
  269. }
  270. static int cvm_cbc_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
  271. u32 keylen)
  272. {
  273. return verify_skcipher_des3_key(cipher, key) ?:
  274. cvm_setkey(cipher, key, keylen, DES3_CBC);
  275. }
  276. static int cvm_ecb_des3_setkey(struct crypto_skcipher *cipher, const u8 *key,
  277. u32 keylen)
  278. {
  279. return verify_skcipher_des3_key(cipher, key) ?:
  280. cvm_setkey(cipher, key, keylen, DES3_ECB);
  281. }
  282. static int cvm_enc_dec_init(struct crypto_skcipher *tfm)
  283. {
  284. crypto_skcipher_set_reqsize(tfm, sizeof(struct cvm_req_ctx));
  285. return 0;
  286. }
  287. static struct skcipher_alg algs[] = { {
  288. .base.cra_flags = CRYPTO_ALG_ASYNC |
  289. CRYPTO_ALG_ALLOCATES_MEMORY,
  290. .base.cra_blocksize = AES_BLOCK_SIZE,
  291. .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
  292. .base.cra_alignmask = 7,
  293. .base.cra_priority = 4001,
  294. .base.cra_name = "xts(aes)",
  295. .base.cra_driver_name = "cavium-xts-aes",
  296. .base.cra_module = THIS_MODULE,
  297. .ivsize = AES_BLOCK_SIZE,
  298. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  299. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  300. .setkey = cvm_xts_setkey,
  301. .encrypt = cvm_encrypt,
  302. .decrypt = cvm_decrypt,
  303. .init = cvm_enc_dec_init,
  304. }, {
  305. .base.cra_flags = CRYPTO_ALG_ASYNC |
  306. CRYPTO_ALG_ALLOCATES_MEMORY,
  307. .base.cra_blocksize = AES_BLOCK_SIZE,
  308. .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
  309. .base.cra_alignmask = 7,
  310. .base.cra_priority = 4001,
  311. .base.cra_name = "cbc(aes)",
  312. .base.cra_driver_name = "cavium-cbc-aes",
  313. .base.cra_module = THIS_MODULE,
  314. .ivsize = AES_BLOCK_SIZE,
  315. .min_keysize = AES_MIN_KEY_SIZE,
  316. .max_keysize = AES_MAX_KEY_SIZE,
  317. .setkey = cvm_cbc_aes_setkey,
  318. .encrypt = cvm_encrypt,
  319. .decrypt = cvm_decrypt,
  320. .init = cvm_enc_dec_init,
  321. }, {
  322. .base.cra_flags = CRYPTO_ALG_ASYNC |
  323. CRYPTO_ALG_ALLOCATES_MEMORY,
  324. .base.cra_blocksize = AES_BLOCK_SIZE,
  325. .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
  326. .base.cra_alignmask = 7,
  327. .base.cra_priority = 4001,
  328. .base.cra_name = "ecb(aes)",
  329. .base.cra_driver_name = "cavium-ecb-aes",
  330. .base.cra_module = THIS_MODULE,
  331. .min_keysize = AES_MIN_KEY_SIZE,
  332. .max_keysize = AES_MAX_KEY_SIZE,
  333. .setkey = cvm_ecb_aes_setkey,
  334. .encrypt = cvm_encrypt,
  335. .decrypt = cvm_decrypt,
  336. .init = cvm_enc_dec_init,
  337. }, {
  338. .base.cra_flags = CRYPTO_ALG_ASYNC |
  339. CRYPTO_ALG_ALLOCATES_MEMORY,
  340. .base.cra_blocksize = AES_BLOCK_SIZE,
  341. .base.cra_ctxsize = sizeof(struct cvm_enc_ctx),
  342. .base.cra_alignmask = 7,
  343. .base.cra_priority = 4001,
  344. .base.cra_name = "cfb(aes)",
  345. .base.cra_driver_name = "cavium-cfb-aes",
  346. .base.cra_module = THIS_MODULE,
  347. .ivsize = AES_BLOCK_SIZE,
  348. .min_keysize = AES_MIN_KEY_SIZE,
  349. .max_keysize = AES_MAX_KEY_SIZE,
  350. .setkey = cvm_cfb_aes_setkey,
  351. .encrypt = cvm_encrypt,
  352. .decrypt = cvm_decrypt,
  353. .init = cvm_enc_dec_init,
  354. }, {
  355. .base.cra_flags = CRYPTO_ALG_ASYNC |
  356. CRYPTO_ALG_ALLOCATES_MEMORY,
  357. .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
  358. .base.cra_ctxsize = sizeof(struct cvm_des3_ctx),
  359. .base.cra_alignmask = 7,
  360. .base.cra_priority = 4001,
  361. .base.cra_name = "cbc(des3_ede)",
  362. .base.cra_driver_name = "cavium-cbc-des3_ede",
  363. .base.cra_module = THIS_MODULE,
  364. .min_keysize = DES3_EDE_KEY_SIZE,
  365. .max_keysize = DES3_EDE_KEY_SIZE,
  366. .ivsize = DES_BLOCK_SIZE,
  367. .setkey = cvm_cbc_des3_setkey,
  368. .encrypt = cvm_encrypt,
  369. .decrypt = cvm_decrypt,
  370. .init = cvm_enc_dec_init,
  371. }, {
  372. .base.cra_flags = CRYPTO_ALG_ASYNC |
  373. CRYPTO_ALG_ALLOCATES_MEMORY,
  374. .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
  375. .base.cra_ctxsize = sizeof(struct cvm_des3_ctx),
  376. .base.cra_alignmask = 7,
  377. .base.cra_priority = 4001,
  378. .base.cra_name = "ecb(des3_ede)",
  379. .base.cra_driver_name = "cavium-ecb-des3_ede",
  380. .base.cra_module = THIS_MODULE,
  381. .min_keysize = DES3_EDE_KEY_SIZE,
  382. .max_keysize = DES3_EDE_KEY_SIZE,
  383. .ivsize = DES_BLOCK_SIZE,
  384. .setkey = cvm_ecb_des3_setkey,
  385. .encrypt = cvm_encrypt,
  386. .decrypt = cvm_decrypt,
  387. .init = cvm_enc_dec_init,
  388. } };
  389. static inline int cav_register_algs(void)
  390. {
  391. return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
  392. }
  393. static inline void cav_unregister_algs(void)
  394. {
  395. crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
  396. }
  397. int cvm_crypto_init(struct cpt_vf *cptvf)
  398. {
  399. struct pci_dev *pdev = cptvf->pdev;
  400. u32 dev_count;
  401. dev_count = dev_handle.dev_count;
  402. dev_handle.cdev[dev_count] = cptvf;
  403. dev_handle.dev_count++;
  404. if (dev_count == 3) {
  405. if (cav_register_algs()) {
  406. dev_err(&pdev->dev, "Error in registering crypto algorithms\n");
  407. return -EINVAL;
  408. }
  409. }
  410. return 0;
  411. }
  412. void cvm_crypto_exit(void)
  413. {
  414. u32 dev_count;
  415. dev_count = --dev_handle.dev_count;
  416. if (!dev_count)
  417. cav_unregister_algs();
  418. }