rk3288_crypto_skcipher.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Crypto acceleration support for Rockchip RK3288
  4. *
  5. * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
  6. *
  7. * Author: Zain Wang <[email protected]>
  8. *
  9. * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
  10. */
  11. #include <linux/device.h>
  12. #include <crypto/scatterwalk.h>
  13. #include "rk3288_crypto.h"
  14. #define RK_CRYPTO_DEC BIT(0)
  15. static int rk_cipher_need_fallback(struct skcipher_request *req)
  16. {
  17. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  18. unsigned int bs = crypto_skcipher_blocksize(tfm);
  19. struct scatterlist *sgs, *sgd;
  20. unsigned int stodo, dtodo, len;
  21. if (!req->cryptlen)
  22. return true;
  23. len = req->cryptlen;
  24. sgs = req->src;
  25. sgd = req->dst;
  26. while (sgs && sgd) {
  27. if (!IS_ALIGNED(sgs->offset, sizeof(u32))) {
  28. return true;
  29. }
  30. if (!IS_ALIGNED(sgd->offset, sizeof(u32))) {
  31. return true;
  32. }
  33. stodo = min(len, sgs->length);
  34. if (stodo % bs) {
  35. return true;
  36. }
  37. dtodo = min(len, sgd->length);
  38. if (dtodo % bs) {
  39. return true;
  40. }
  41. if (stodo != dtodo) {
  42. return true;
  43. }
  44. len -= stodo;
  45. sgs = sg_next(sgs);
  46. sgd = sg_next(sgd);
  47. }
  48. return false;
  49. }
  50. static int rk_cipher_fallback(struct skcipher_request *areq)
  51. {
  52. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  53. struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm);
  54. struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
  55. int err;
  56. skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
  57. skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
  58. areq->base.complete, areq->base.data);
  59. skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
  60. areq->cryptlen, areq->iv);
  61. if (rctx->mode & RK_CRYPTO_DEC)
  62. err = crypto_skcipher_decrypt(&rctx->fallback_req);
  63. else
  64. err = crypto_skcipher_encrypt(&rctx->fallback_req);
  65. return err;
  66. }
  67. static int rk_handle_req(struct rk_crypto_info *dev,
  68. struct skcipher_request *req)
  69. {
  70. struct crypto_engine *engine = dev->engine;
  71. if (rk_cipher_need_fallback(req))
  72. return rk_cipher_fallback(req);
  73. return crypto_transfer_skcipher_request_to_engine(engine, req);
  74. }
  75. static int rk_aes_setkey(struct crypto_skcipher *cipher,
  76. const u8 *key, unsigned int keylen)
  77. {
  78. struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
  79. struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  80. if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
  81. keylen != AES_KEYSIZE_256)
  82. return -EINVAL;
  83. ctx->keylen = keylen;
  84. memcpy(ctx->key, key, keylen);
  85. return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
  86. }
  87. static int rk_des_setkey(struct crypto_skcipher *cipher,
  88. const u8 *key, unsigned int keylen)
  89. {
  90. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
  91. int err;
  92. err = verify_skcipher_des_key(cipher, key);
  93. if (err)
  94. return err;
  95. ctx->keylen = keylen;
  96. memcpy(ctx->key, key, keylen);
  97. return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
  98. }
  99. static int rk_tdes_setkey(struct crypto_skcipher *cipher,
  100. const u8 *key, unsigned int keylen)
  101. {
  102. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
  103. int err;
  104. err = verify_skcipher_des3_key(cipher, key);
  105. if (err)
  106. return err;
  107. ctx->keylen = keylen;
  108. memcpy(ctx->key, key, keylen);
  109. return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
  110. }
  111. static int rk_aes_ecb_encrypt(struct skcipher_request *req)
  112. {
  113. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  114. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  115. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  116. struct rk_crypto_info *dev = ctx->dev;
  117. rctx->mode = RK_CRYPTO_AES_ECB_MODE;
  118. return rk_handle_req(dev, req);
  119. }
  120. static int rk_aes_ecb_decrypt(struct skcipher_request *req)
  121. {
  122. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  123. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  124. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  125. struct rk_crypto_info *dev = ctx->dev;
  126. rctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
  127. return rk_handle_req(dev, req);
  128. }
  129. static int rk_aes_cbc_encrypt(struct skcipher_request *req)
  130. {
  131. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  132. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  133. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  134. struct rk_crypto_info *dev = ctx->dev;
  135. rctx->mode = RK_CRYPTO_AES_CBC_MODE;
  136. return rk_handle_req(dev, req);
  137. }
  138. static int rk_aes_cbc_decrypt(struct skcipher_request *req)
  139. {
  140. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  141. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  142. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  143. struct rk_crypto_info *dev = ctx->dev;
  144. rctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
  145. return rk_handle_req(dev, req);
  146. }
  147. static int rk_des_ecb_encrypt(struct skcipher_request *req)
  148. {
  149. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  150. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  151. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  152. struct rk_crypto_info *dev = ctx->dev;
  153. rctx->mode = 0;
  154. return rk_handle_req(dev, req);
  155. }
  156. static int rk_des_ecb_decrypt(struct skcipher_request *req)
  157. {
  158. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  159. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  160. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  161. struct rk_crypto_info *dev = ctx->dev;
  162. rctx->mode = RK_CRYPTO_DEC;
  163. return rk_handle_req(dev, req);
  164. }
  165. static int rk_des_cbc_encrypt(struct skcipher_request *req)
  166. {
  167. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  168. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  169. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  170. struct rk_crypto_info *dev = ctx->dev;
  171. rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
  172. return rk_handle_req(dev, req);
  173. }
  174. static int rk_des_cbc_decrypt(struct skcipher_request *req)
  175. {
  176. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  177. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  178. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  179. struct rk_crypto_info *dev = ctx->dev;
  180. rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
  181. return rk_handle_req(dev, req);
  182. }
  183. static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req)
  184. {
  185. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  186. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  187. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  188. struct rk_crypto_info *dev = ctx->dev;
  189. rctx->mode = RK_CRYPTO_TDES_SELECT;
  190. return rk_handle_req(dev, req);
  191. }
  192. static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req)
  193. {
  194. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  195. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  196. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  197. struct rk_crypto_info *dev = ctx->dev;
  198. rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
  199. return rk_handle_req(dev, req);
  200. }
  201. static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req)
  202. {
  203. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  204. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  205. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  206. struct rk_crypto_info *dev = ctx->dev;
  207. rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
  208. return rk_handle_req(dev, req);
  209. }
  210. static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req)
  211. {
  212. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  213. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  214. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  215. struct rk_crypto_info *dev = ctx->dev;
  216. rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
  217. RK_CRYPTO_DEC;
  218. return rk_handle_req(dev, req);
  219. }
  220. static void rk_ablk_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req)
  221. {
  222. struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
  223. struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
  224. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  225. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
  226. u32 block, conf_reg = 0;
  227. block = crypto_tfm_alg_blocksize(tfm);
  228. if (block == DES_BLOCK_SIZE) {
  229. rctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
  230. RK_CRYPTO_TDES_BYTESWAP_KEY |
  231. RK_CRYPTO_TDES_BYTESWAP_IV;
  232. CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode);
  233. memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen);
  234. conf_reg = RK_CRYPTO_DESSEL;
  235. } else {
  236. rctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
  237. RK_CRYPTO_AES_KEY_CHANGE |
  238. RK_CRYPTO_AES_BYTESWAP_KEY |
  239. RK_CRYPTO_AES_BYTESWAP_IV;
  240. if (ctx->keylen == AES_KEYSIZE_192)
  241. rctx->mode |= RK_CRYPTO_AES_192BIT_key;
  242. else if (ctx->keylen == AES_KEYSIZE_256)
  243. rctx->mode |= RK_CRYPTO_AES_256BIT_key;
  244. CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode);
  245. memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen);
  246. }
  247. conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
  248. RK_CRYPTO_BYTESWAP_BRFIFO;
  249. CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg);
  250. CRYPTO_WRITE(dev, RK_CRYPTO_INTENA,
  251. RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
  252. }
  253. static void crypto_dma_start(struct rk_crypto_info *dev,
  254. struct scatterlist *sgs,
  255. struct scatterlist *sgd, unsigned int todo)
  256. {
  257. CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, sg_dma_address(sgs));
  258. CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, todo);
  259. CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, sg_dma_address(sgd));
  260. CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
  261. _SBF(RK_CRYPTO_BLOCK_START, 16));
  262. }
  263. static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
  264. {
  265. struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
  266. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  267. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  268. struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
  269. struct scatterlist *sgs, *sgd;
  270. int err = 0;
  271. int ivsize = crypto_skcipher_ivsize(tfm);
  272. int offset;
  273. u8 iv[AES_BLOCK_SIZE];
  274. u8 biv[AES_BLOCK_SIZE];
  275. u8 *ivtouse = areq->iv;
  276. unsigned int len = areq->cryptlen;
  277. unsigned int todo;
  278. ivsize = crypto_skcipher_ivsize(tfm);
  279. if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
  280. if (rctx->mode & RK_CRYPTO_DEC) {
  281. offset = areq->cryptlen - ivsize;
  282. scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
  283. offset, ivsize, 0);
  284. }
  285. }
  286. sgs = areq->src;
  287. sgd = areq->dst;
  288. while (sgs && sgd && len) {
  289. if (!sgs->length) {
  290. sgs = sg_next(sgs);
  291. sgd = sg_next(sgd);
  292. continue;
  293. }
  294. if (rctx->mode & RK_CRYPTO_DEC) {
  295. /* we backup last block of source to be used as IV at next step */
  296. offset = sgs->length - ivsize;
  297. scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0);
  298. }
  299. if (sgs == sgd) {
  300. err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL);
  301. if (err <= 0) {
  302. err = -EINVAL;
  303. goto theend_iv;
  304. }
  305. } else {
  306. err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE);
  307. if (err <= 0) {
  308. err = -EINVAL;
  309. goto theend_iv;
  310. }
  311. err = dma_map_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE);
  312. if (err <= 0) {
  313. err = -EINVAL;
  314. goto theend_sgs;
  315. }
  316. }
  317. err = 0;
  318. rk_ablk_hw_init(ctx->dev, areq);
  319. if (ivsize) {
  320. if (ivsize == DES_BLOCK_SIZE)
  321. memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize);
  322. else
  323. memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize);
  324. }
  325. reinit_completion(&ctx->dev->complete);
  326. ctx->dev->status = 0;
  327. todo = min(sg_dma_len(sgs), len);
  328. len -= todo;
  329. crypto_dma_start(ctx->dev, sgs, sgd, todo / 4);
  330. wait_for_completion_interruptible_timeout(&ctx->dev->complete,
  331. msecs_to_jiffies(2000));
  332. if (!ctx->dev->status) {
  333. dev_err(ctx->dev->dev, "DMA timeout\n");
  334. err = -EFAULT;
  335. goto theend;
  336. }
  337. if (sgs == sgd) {
  338. dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL);
  339. } else {
  340. dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE);
  341. dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE);
  342. }
  343. if (rctx->mode & RK_CRYPTO_DEC) {
  344. memcpy(iv, biv, ivsize);
  345. ivtouse = iv;
  346. } else {
  347. offset = sgd->length - ivsize;
  348. scatterwalk_map_and_copy(iv, sgd, offset, ivsize, 0);
  349. ivtouse = iv;
  350. }
  351. sgs = sg_next(sgs);
  352. sgd = sg_next(sgd);
  353. }
  354. if (areq->iv && ivsize > 0) {
  355. offset = areq->cryptlen - ivsize;
  356. if (rctx->mode & RK_CRYPTO_DEC) {
  357. memcpy(areq->iv, rctx->backup_iv, ivsize);
  358. memzero_explicit(rctx->backup_iv, ivsize);
  359. } else {
  360. scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
  361. ivsize, 0);
  362. }
  363. }
  364. theend:
  365. local_bh_disable();
  366. crypto_finalize_skcipher_request(engine, areq, err);
  367. local_bh_enable();
  368. return 0;
  369. theend_sgs:
  370. if (sgs == sgd) {
  371. dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL);
  372. } else {
  373. dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE);
  374. dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE);
  375. }
  376. theend_iv:
  377. return err;
  378. }
  379. static int rk_ablk_init_tfm(struct crypto_skcipher *tfm)
  380. {
  381. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  382. struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  383. const char *name = crypto_tfm_alg_name(&tfm->base);
  384. struct rk_crypto_tmp *algt;
  385. algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
  386. ctx->dev = algt->dev;
  387. ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
  388. if (IS_ERR(ctx->fallback_tfm)) {
  389. dev_err(ctx->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
  390. name, PTR_ERR(ctx->fallback_tfm));
  391. return PTR_ERR(ctx->fallback_tfm);
  392. }
  393. tfm->reqsize = sizeof(struct rk_cipher_rctx) +
  394. crypto_skcipher_reqsize(ctx->fallback_tfm);
  395. ctx->enginectx.op.do_one_request = rk_cipher_run;
  396. return 0;
  397. }
  398. static void rk_ablk_exit_tfm(struct crypto_skcipher *tfm)
  399. {
  400. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  401. memzero_explicit(ctx->key, ctx->keylen);
  402. crypto_free_skcipher(ctx->fallback_tfm);
  403. }
  404. struct rk_crypto_tmp rk_ecb_aes_alg = {
  405. .type = ALG_TYPE_CIPHER,
  406. .alg.skcipher = {
  407. .base.cra_name = "ecb(aes)",
  408. .base.cra_driver_name = "ecb-aes-rk",
  409. .base.cra_priority = 300,
  410. .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  411. .base.cra_blocksize = AES_BLOCK_SIZE,
  412. .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
  413. .base.cra_alignmask = 0x0f,
  414. .base.cra_module = THIS_MODULE,
  415. .init = rk_ablk_init_tfm,
  416. .exit = rk_ablk_exit_tfm,
  417. .min_keysize = AES_MIN_KEY_SIZE,
  418. .max_keysize = AES_MAX_KEY_SIZE,
  419. .setkey = rk_aes_setkey,
  420. .encrypt = rk_aes_ecb_encrypt,
  421. .decrypt = rk_aes_ecb_decrypt,
  422. }
  423. };
  424. struct rk_crypto_tmp rk_cbc_aes_alg = {
  425. .type = ALG_TYPE_CIPHER,
  426. .alg.skcipher = {
  427. .base.cra_name = "cbc(aes)",
  428. .base.cra_driver_name = "cbc-aes-rk",
  429. .base.cra_priority = 300,
  430. .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  431. .base.cra_blocksize = AES_BLOCK_SIZE,
  432. .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
  433. .base.cra_alignmask = 0x0f,
  434. .base.cra_module = THIS_MODULE,
  435. .init = rk_ablk_init_tfm,
  436. .exit = rk_ablk_exit_tfm,
  437. .min_keysize = AES_MIN_KEY_SIZE,
  438. .max_keysize = AES_MAX_KEY_SIZE,
  439. .ivsize = AES_BLOCK_SIZE,
  440. .setkey = rk_aes_setkey,
  441. .encrypt = rk_aes_cbc_encrypt,
  442. .decrypt = rk_aes_cbc_decrypt,
  443. }
  444. };
  445. struct rk_crypto_tmp rk_ecb_des_alg = {
  446. .type = ALG_TYPE_CIPHER,
  447. .alg.skcipher = {
  448. .base.cra_name = "ecb(des)",
  449. .base.cra_driver_name = "ecb-des-rk",
  450. .base.cra_priority = 300,
  451. .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  452. .base.cra_blocksize = DES_BLOCK_SIZE,
  453. .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
  454. .base.cra_alignmask = 0x07,
  455. .base.cra_module = THIS_MODULE,
  456. .init = rk_ablk_init_tfm,
  457. .exit = rk_ablk_exit_tfm,
  458. .min_keysize = DES_KEY_SIZE,
  459. .max_keysize = DES_KEY_SIZE,
  460. .setkey = rk_des_setkey,
  461. .encrypt = rk_des_ecb_encrypt,
  462. .decrypt = rk_des_ecb_decrypt,
  463. }
  464. };
  465. struct rk_crypto_tmp rk_cbc_des_alg = {
  466. .type = ALG_TYPE_CIPHER,
  467. .alg.skcipher = {
  468. .base.cra_name = "cbc(des)",
  469. .base.cra_driver_name = "cbc-des-rk",
  470. .base.cra_priority = 300,
  471. .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  472. .base.cra_blocksize = DES_BLOCK_SIZE,
  473. .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
  474. .base.cra_alignmask = 0x07,
  475. .base.cra_module = THIS_MODULE,
  476. .init = rk_ablk_init_tfm,
  477. .exit = rk_ablk_exit_tfm,
  478. .min_keysize = DES_KEY_SIZE,
  479. .max_keysize = DES_KEY_SIZE,
  480. .ivsize = DES_BLOCK_SIZE,
  481. .setkey = rk_des_setkey,
  482. .encrypt = rk_des_cbc_encrypt,
  483. .decrypt = rk_des_cbc_decrypt,
  484. }
  485. };
  486. struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
  487. .type = ALG_TYPE_CIPHER,
  488. .alg.skcipher = {
  489. .base.cra_name = "ecb(des3_ede)",
  490. .base.cra_driver_name = "ecb-des3-ede-rk",
  491. .base.cra_priority = 300,
  492. .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  493. .base.cra_blocksize = DES_BLOCK_SIZE,
  494. .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
  495. .base.cra_alignmask = 0x07,
  496. .base.cra_module = THIS_MODULE,
  497. .init = rk_ablk_init_tfm,
  498. .exit = rk_ablk_exit_tfm,
  499. .min_keysize = DES3_EDE_KEY_SIZE,
  500. .max_keysize = DES3_EDE_KEY_SIZE,
  501. .setkey = rk_tdes_setkey,
  502. .encrypt = rk_des3_ede_ecb_encrypt,
  503. .decrypt = rk_des3_ede_ecb_decrypt,
  504. }
  505. };
  506. struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
  507. .type = ALG_TYPE_CIPHER,
  508. .alg.skcipher = {
  509. .base.cra_name = "cbc(des3_ede)",
  510. .base.cra_driver_name = "cbc-des3-ede-rk",
  511. .base.cra_priority = 300,
  512. .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  513. .base.cra_blocksize = DES_BLOCK_SIZE,
  514. .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
  515. .base.cra_alignmask = 0x07,
  516. .base.cra_module = THIS_MODULE,
  517. .init = rk_ablk_init_tfm,
  518. .exit = rk_ablk_exit_tfm,
  519. .min_keysize = DES3_EDE_KEY_SIZE,
  520. .max_keysize = DES3_EDE_KEY_SIZE,
  521. .ivsize = DES_BLOCK_SIZE,
  522. .setkey = rk_tdes_setkey,
  523. .encrypt = rk_des3_ede_cbc_encrypt,
  524. .decrypt = rk_des3_ede_cbc_decrypt,
  525. }
  526. };