sun4i-ss-cipher.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
  4. *
  5. * Copyright (C) 2013-2015 Corentin LABBE <[email protected]>
  6. *
  7. * This file add support for AES cipher with 128,192,256 bits
  8. * keysize in CBC and ECB mode.
  9. * Add support also for DES and 3DES in CBC and ECB mode.
  10. *
  11. * You could find the datasheet in Documentation/arm/sunxi.rst
  12. */
  13. #include "sun4i-ss.h"
  14. static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
  15. {
  16. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  17. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  18. struct sun4i_ss_ctx *ss = op->ss;
  19. unsigned int ivsize = crypto_skcipher_ivsize(tfm);
  20. struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
  21. u32 mode = ctx->mode;
  22. /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
  23. u32 rx_cnt = SS_RX_DEFAULT;
  24. u32 tx_cnt = 0;
  25. u32 spaces;
  26. u32 v;
  27. int err = 0;
  28. unsigned int i;
  29. unsigned int ileft = areq->cryptlen;
  30. unsigned int oleft = areq->cryptlen;
  31. unsigned int todo;
  32. unsigned long pi = 0, po = 0; /* progress for in and out */
  33. bool miter_err;
  34. struct sg_mapping_iter mi, mo;
  35. unsigned int oi, oo; /* offset for in and out */
  36. unsigned long flags;
  37. struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  38. struct sun4i_ss_alg_template *algt;
  39. if (!areq->cryptlen)
  40. return 0;
  41. if (!areq->src || !areq->dst) {
  42. dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
  43. return -EINVAL;
  44. }
  45. if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
  46. scatterwalk_map_and_copy(ctx->backup_iv, areq->src,
  47. areq->cryptlen - ivsize, ivsize, 0);
  48. }
  49. if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
  50. algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
  51. algt->stat_opti++;
  52. algt->stat_bytes += areq->cryptlen;
  53. }
  54. spin_lock_irqsave(&ss->slock, flags);
  55. for (i = 0; i < op->keylen / 4; i++)
  56. writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
  57. if (areq->iv) {
  58. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  59. v = *(u32 *)(areq->iv + i * 4);
  60. writesl(ss->base + SS_IV0 + i * 4, &v, 1);
  61. }
  62. }
  63. writel(mode, ss->base + SS_CTL);
  64. ileft = areq->cryptlen / 4;
  65. oleft = areq->cryptlen / 4;
  66. oi = 0;
  67. oo = 0;
  68. do {
  69. if (ileft) {
  70. sg_miter_start(&mi, areq->src, sg_nents(areq->src),
  71. SG_MITER_FROM_SG | SG_MITER_ATOMIC);
  72. if (pi)
  73. sg_miter_skip(&mi, pi);
  74. miter_err = sg_miter_next(&mi);
  75. if (!miter_err || !mi.addr) {
  76. dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
  77. err = -EINVAL;
  78. goto release_ss;
  79. }
  80. todo = min(rx_cnt, ileft);
  81. todo = min_t(size_t, todo, (mi.length - oi) / 4);
  82. if (todo) {
  83. ileft -= todo;
  84. writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
  85. oi += todo * 4;
  86. }
  87. if (oi == mi.length) {
  88. pi += mi.length;
  89. oi = 0;
  90. }
  91. sg_miter_stop(&mi);
  92. }
  93. spaces = readl(ss->base + SS_FCSR);
  94. rx_cnt = SS_RXFIFO_SPACES(spaces);
  95. tx_cnt = SS_TXFIFO_SPACES(spaces);
  96. sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
  97. SG_MITER_TO_SG | SG_MITER_ATOMIC);
  98. if (po)
  99. sg_miter_skip(&mo, po);
  100. miter_err = sg_miter_next(&mo);
  101. if (!miter_err || !mo.addr) {
  102. dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
  103. err = -EINVAL;
  104. goto release_ss;
  105. }
  106. todo = min(tx_cnt, oleft);
  107. todo = min_t(size_t, todo, (mo.length - oo) / 4);
  108. if (todo) {
  109. oleft -= todo;
  110. readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
  111. oo += todo * 4;
  112. }
  113. if (oo == mo.length) {
  114. oo = 0;
  115. po += mo.length;
  116. }
  117. sg_miter_stop(&mo);
  118. } while (oleft);
  119. if (areq->iv) {
  120. if (mode & SS_DECRYPTION) {
  121. memcpy(areq->iv, ctx->backup_iv, ivsize);
  122. memzero_explicit(ctx->backup_iv, ivsize);
  123. } else {
  124. scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
  125. ivsize, 0);
  126. }
  127. }
  128. release_ss:
  129. writel(0, ss->base + SS_CTL);
  130. spin_unlock_irqrestore(&ss->slock, flags);
  131. return err;
  132. }
  133. static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
  134. {
  135. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  136. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  137. struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
  138. int err;
  139. struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  140. struct sun4i_ss_alg_template *algt;
  141. if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
  142. algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
  143. algt->stat_fb++;
  144. }
  145. skcipher_request_set_tfm(&ctx->fallback_req, op->fallback_tfm);
  146. skcipher_request_set_callback(&ctx->fallback_req, areq->base.flags,
  147. areq->base.complete, areq->base.data);
  148. skcipher_request_set_crypt(&ctx->fallback_req, areq->src, areq->dst,
  149. areq->cryptlen, areq->iv);
  150. if (ctx->mode & SS_DECRYPTION)
  151. err = crypto_skcipher_decrypt(&ctx->fallback_req);
  152. else
  153. err = crypto_skcipher_encrypt(&ctx->fallback_req);
  154. return err;
  155. }
  156. /* Generic function that support SG with size not multiple of 4 */
  157. static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
  158. {
  159. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  160. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  161. struct sun4i_ss_ctx *ss = op->ss;
  162. int no_chunk = 1;
  163. struct scatterlist *in_sg = areq->src;
  164. struct scatterlist *out_sg = areq->dst;
  165. unsigned int ivsize = crypto_skcipher_ivsize(tfm);
  166. struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
  167. struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  168. struct sun4i_ss_alg_template *algt;
  169. u32 mode = ctx->mode;
  170. /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
  171. u32 rx_cnt = SS_RX_DEFAULT;
  172. u32 tx_cnt = 0;
  173. u32 v;
  174. u32 spaces;
  175. int err = 0;
  176. unsigned int i;
  177. unsigned int ileft = areq->cryptlen;
  178. unsigned int oleft = areq->cryptlen;
  179. unsigned int todo;
  180. struct sg_mapping_iter mi, mo;
  181. unsigned long pi = 0, po = 0; /* progress for in and out */
  182. bool miter_err;
  183. unsigned int oi, oo; /* offset for in and out */
  184. unsigned int ob = 0; /* offset in buf */
  185. unsigned int obo = 0; /* offset in bufo*/
  186. unsigned int obl = 0; /* length of data in bufo */
  187. unsigned long flags;
  188. bool need_fallback = false;
  189. if (!areq->cryptlen)
  190. return 0;
  191. if (!areq->src || !areq->dst) {
  192. dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
  193. return -EINVAL;
  194. }
  195. algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
  196. if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
  197. need_fallback = true;
  198. /*
  199. * if we have only SGs with size multiple of 4,
  200. * we can use the SS optimized function
  201. */
  202. while (in_sg && no_chunk == 1) {
  203. if ((in_sg->length | in_sg->offset) & 3u)
  204. no_chunk = 0;
  205. in_sg = sg_next(in_sg);
  206. }
  207. while (out_sg && no_chunk == 1) {
  208. if ((out_sg->length | out_sg->offset) & 3u)
  209. no_chunk = 0;
  210. out_sg = sg_next(out_sg);
  211. }
  212. if (no_chunk == 1 && !need_fallback)
  213. return sun4i_ss_opti_poll(areq);
  214. if (need_fallback)
  215. return sun4i_ss_cipher_poll_fallback(areq);
  216. if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) {
  217. scatterwalk_map_and_copy(ctx->backup_iv, areq->src,
  218. areq->cryptlen - ivsize, ivsize, 0);
  219. }
  220. if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) {
  221. algt->stat_req++;
  222. algt->stat_bytes += areq->cryptlen;
  223. }
  224. spin_lock_irqsave(&ss->slock, flags);
  225. for (i = 0; i < op->keylen / 4; i++)
  226. writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
  227. if (areq->iv) {
  228. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  229. v = *(u32 *)(areq->iv + i * 4);
  230. writesl(ss->base + SS_IV0 + i * 4, &v, 1);
  231. }
  232. }
  233. writel(mode, ss->base + SS_CTL);
  234. ileft = areq->cryptlen;
  235. oleft = areq->cryptlen;
  236. oi = 0;
  237. oo = 0;
  238. while (oleft) {
  239. if (ileft) {
  240. sg_miter_start(&mi, areq->src, sg_nents(areq->src),
  241. SG_MITER_FROM_SG | SG_MITER_ATOMIC);
  242. if (pi)
  243. sg_miter_skip(&mi, pi);
  244. miter_err = sg_miter_next(&mi);
  245. if (!miter_err || !mi.addr) {
  246. dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
  247. err = -EINVAL;
  248. goto release_ss;
  249. }
  250. /*
  251. * todo is the number of consecutive 4byte word that we
  252. * can read from current SG
  253. */
  254. todo = min(rx_cnt, ileft / 4);
  255. todo = min_t(size_t, todo, (mi.length - oi) / 4);
  256. if (todo && !ob) {
  257. writesl(ss->base + SS_RXFIFO, mi.addr + oi,
  258. todo);
  259. ileft -= todo * 4;
  260. oi += todo * 4;
  261. } else {
  262. /*
  263. * not enough consecutive bytes, so we need to
  264. * linearize in buf. todo is in bytes
  265. * After that copy, if we have a multiple of 4
  266. * we need to be able to write all buf in one
  267. * pass, so it is why we min() with rx_cnt
  268. */
  269. todo = min(rx_cnt * 4 - ob, ileft);
  270. todo = min_t(size_t, todo, mi.length - oi);
  271. memcpy(ss->buf + ob, mi.addr + oi, todo);
  272. ileft -= todo;
  273. oi += todo;
  274. ob += todo;
  275. if (!(ob % 4)) {
  276. writesl(ss->base + SS_RXFIFO, ss->buf,
  277. ob / 4);
  278. ob = 0;
  279. }
  280. }
  281. if (oi == mi.length) {
  282. pi += mi.length;
  283. oi = 0;
  284. }
  285. sg_miter_stop(&mi);
  286. }
  287. spaces = readl(ss->base + SS_FCSR);
  288. rx_cnt = SS_RXFIFO_SPACES(spaces);
  289. tx_cnt = SS_TXFIFO_SPACES(spaces);
  290. if (!tx_cnt)
  291. continue;
  292. sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
  293. SG_MITER_TO_SG | SG_MITER_ATOMIC);
  294. if (po)
  295. sg_miter_skip(&mo, po);
  296. miter_err = sg_miter_next(&mo);
  297. if (!miter_err || !mo.addr) {
  298. dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
  299. err = -EINVAL;
  300. goto release_ss;
  301. }
  302. /* todo in 4bytes word */
  303. todo = min(tx_cnt, oleft / 4);
  304. todo = min_t(size_t, todo, (mo.length - oo) / 4);
  305. if (todo) {
  306. readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
  307. oleft -= todo * 4;
  308. oo += todo * 4;
  309. if (oo == mo.length) {
  310. po += mo.length;
  311. oo = 0;
  312. }
  313. } else {
  314. /*
  315. * read obl bytes in bufo, we read at maximum for
  316. * emptying the device
  317. */
  318. readsl(ss->base + SS_TXFIFO, ss->bufo, tx_cnt);
  319. obl = tx_cnt * 4;
  320. obo = 0;
  321. do {
  322. /*
  323. * how many bytes we can copy ?
  324. * no more than remaining SG size
  325. * no more than remaining buffer
  326. * no need to test against oleft
  327. */
  328. todo = min_t(size_t,
  329. mo.length - oo, obl - obo);
  330. memcpy(mo.addr + oo, ss->bufo + obo, todo);
  331. oleft -= todo;
  332. obo += todo;
  333. oo += todo;
  334. if (oo == mo.length) {
  335. po += mo.length;
  336. sg_miter_next(&mo);
  337. oo = 0;
  338. }
  339. } while (obo < obl);
  340. /* bufo must be fully used here */
  341. }
  342. sg_miter_stop(&mo);
  343. }
  344. if (areq->iv) {
  345. if (mode & SS_DECRYPTION) {
  346. memcpy(areq->iv, ctx->backup_iv, ivsize);
  347. memzero_explicit(ctx->backup_iv, ivsize);
  348. } else {
  349. scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize,
  350. ivsize, 0);
  351. }
  352. }
  353. release_ss:
  354. writel(0, ss->base + SS_CTL);
  355. spin_unlock_irqrestore(&ss->slock, flags);
  356. return err;
  357. }
  358. /* CBC AES */
  359. int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
  360. {
  361. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  362. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  363. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  364. rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  365. op->keymode;
  366. return sun4i_ss_cipher_poll(areq);
  367. }
  368. int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
  369. {
  370. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  371. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  372. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  373. rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  374. op->keymode;
  375. return sun4i_ss_cipher_poll(areq);
  376. }
  377. /* ECB AES */
  378. int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
  379. {
  380. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  381. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  382. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  383. rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  384. op->keymode;
  385. return sun4i_ss_cipher_poll(areq);
  386. }
  387. int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
  388. {
  389. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  390. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  391. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  392. rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  393. op->keymode;
  394. return sun4i_ss_cipher_poll(areq);
  395. }
  396. /* CBC DES */
  397. int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
  398. {
  399. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  400. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  401. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  402. rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  403. op->keymode;
  404. return sun4i_ss_cipher_poll(areq);
  405. }
  406. int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
  407. {
  408. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  409. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  410. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  411. rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  412. op->keymode;
  413. return sun4i_ss_cipher_poll(areq);
  414. }
  415. /* ECB DES */
  416. int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
  417. {
  418. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  419. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  420. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  421. rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  422. op->keymode;
  423. return sun4i_ss_cipher_poll(areq);
  424. }
  425. int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
  426. {
  427. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  428. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  429. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  430. rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  431. op->keymode;
  432. return sun4i_ss_cipher_poll(areq);
  433. }
  434. /* CBC 3DES */
  435. int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
  436. {
  437. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  438. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  439. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  440. rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  441. op->keymode;
  442. return sun4i_ss_cipher_poll(areq);
  443. }
  444. int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
  445. {
  446. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  447. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  448. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  449. rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  450. op->keymode;
  451. return sun4i_ss_cipher_poll(areq);
  452. }
  453. /* ECB 3DES */
  454. int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
  455. {
  456. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  457. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  458. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  459. rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  460. op->keymode;
  461. return sun4i_ss_cipher_poll(areq);
  462. }
  463. int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
  464. {
  465. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  466. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  467. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  468. rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  469. op->keymode;
  470. return sun4i_ss_cipher_poll(areq);
  471. }
  472. int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
  473. {
  474. struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
  475. struct sun4i_ss_alg_template *algt;
  476. const char *name = crypto_tfm_alg_name(tfm);
  477. int err;
  478. memset(op, 0, sizeof(struct sun4i_tfm_ctx));
  479. algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
  480. alg.crypto.base);
  481. op->ss = algt->ss;
  482. op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
  483. if (IS_ERR(op->fallback_tfm)) {
  484. dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
  485. name, PTR_ERR(op->fallback_tfm));
  486. return PTR_ERR(op->fallback_tfm);
  487. }
  488. crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
  489. sizeof(struct sun4i_cipher_req_ctx) +
  490. crypto_skcipher_reqsize(op->fallback_tfm));
  491. err = pm_runtime_resume_and_get(op->ss->dev);
  492. if (err < 0)
  493. goto error_pm;
  494. return 0;
  495. error_pm:
  496. crypto_free_skcipher(op->fallback_tfm);
  497. return err;
  498. }
  499. void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
  500. {
  501. struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
  502. crypto_free_skcipher(op->fallback_tfm);
  503. pm_runtime_put(op->ss->dev);
  504. }
  505. /* check and set the AES key, prepare the mode to be used */
  506. int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
  507. unsigned int keylen)
  508. {
  509. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  510. struct sun4i_ss_ctx *ss = op->ss;
  511. switch (keylen) {
  512. case 128 / 8:
  513. op->keymode = SS_AES_128BITS;
  514. break;
  515. case 192 / 8:
  516. op->keymode = SS_AES_192BITS;
  517. break;
  518. case 256 / 8:
  519. op->keymode = SS_AES_256BITS;
  520. break;
  521. default:
  522. dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
  523. return -EINVAL;
  524. }
  525. op->keylen = keylen;
  526. memcpy(op->key, key, keylen);
  527. crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
  528. crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
  529. return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
  530. }
  531. /* check and set the DES key, prepare the mode to be used */
  532. int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
  533. unsigned int keylen)
  534. {
  535. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  536. int err;
  537. err = verify_skcipher_des_key(tfm, key);
  538. if (err)
  539. return err;
  540. op->keylen = keylen;
  541. memcpy(op->key, key, keylen);
  542. crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
  543. crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
  544. return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
  545. }
  546. /* check and set the 3DES key, prepare the mode to be used */
  547. int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
  548. unsigned int keylen)
  549. {
  550. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  551. int err;
  552. err = verify_skcipher_des3_key(tfm, key);
  553. if (err)
  554. return err;
  555. op->keylen = keylen;
  556. memcpy(op->key, key, keylen);
  557. crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
  558. crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
  559. return crypto_skcipher_setkey(op->fallback_tfm, key, keylen);
  560. }