sun8i-ss-hash.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * sun8i-ss-hash.c - hardware cryptographic offloader for
  4. * Allwinner A80/A83T SoC
  5. *
  6. * Copyright (C) 2015-2020 Corentin Labbe <[email protected]>
  7. *
  8. * This file add support for MD5 and SHA1/SHA224/SHA256.
  9. *
  10. * You could find the datasheet in Documentation/arm/sunxi.rst
  11. */
  12. #include <linux/bottom_half.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/pm_runtime.h>
  15. #include <linux/scatterlist.h>
  16. #include <crypto/internal/hash.h>
  17. #include <crypto/hmac.h>
  18. #include <crypto/scatterwalk.h>
  19. #include <crypto/sha1.h>
  20. #include <crypto/sha2.h>
  21. #include <crypto/md5.h>
  22. #include "sun8i-ss.h"
  23. static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key,
  24. unsigned int keylen)
  25. {
  26. struct crypto_shash *xtfm;
  27. struct shash_desc *sdesc;
  28. size_t len;
  29. int ret = 0;
  30. xtfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_NEED_FALLBACK);
  31. if (IS_ERR(xtfm))
  32. return PTR_ERR(xtfm);
  33. len = sizeof(*sdesc) + crypto_shash_descsize(xtfm);
  34. sdesc = kmalloc(len, GFP_KERNEL);
  35. if (!sdesc) {
  36. ret = -ENOMEM;
  37. goto err_hashkey_sdesc;
  38. }
  39. sdesc->tfm = xtfm;
  40. ret = crypto_shash_init(sdesc);
  41. if (ret) {
  42. dev_err(tfmctx->ss->dev, "shash init error ret=%d\n", ret);
  43. goto err_hashkey;
  44. }
  45. ret = crypto_shash_finup(sdesc, key, keylen, tfmctx->key);
  46. if (ret)
  47. dev_err(tfmctx->ss->dev, "shash finup error\n");
  48. err_hashkey:
  49. kfree(sdesc);
  50. err_hashkey_sdesc:
  51. crypto_free_shash(xtfm);
  52. return ret;
  53. }
  54. int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
  55. unsigned int keylen)
  56. {
  57. struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(ahash);
  58. struct ahash_alg *alg = __crypto_ahash_alg(ahash->base.__crt_alg);
  59. struct sun8i_ss_alg_template *algt;
  60. int digestsize, i;
  61. int bs = crypto_ahash_blocksize(ahash);
  62. int ret;
  63. algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
  64. digestsize = algt->alg.hash.halg.digestsize;
  65. if (keylen > bs) {
  66. ret = sun8i_ss_hashkey(tfmctx, key, keylen);
  67. if (ret)
  68. return ret;
  69. tfmctx->keylen = digestsize;
  70. } else {
  71. tfmctx->keylen = keylen;
  72. memcpy(tfmctx->key, key, keylen);
  73. }
  74. tfmctx->ipad = kzalloc(bs, GFP_KERNEL | GFP_DMA);
  75. if (!tfmctx->ipad)
  76. return -ENOMEM;
  77. tfmctx->opad = kzalloc(bs, GFP_KERNEL | GFP_DMA);
  78. if (!tfmctx->opad) {
  79. ret = -ENOMEM;
  80. goto err_opad;
  81. }
  82. memset(tfmctx->key + tfmctx->keylen, 0, bs - tfmctx->keylen);
  83. memcpy(tfmctx->ipad, tfmctx->key, tfmctx->keylen);
  84. memcpy(tfmctx->opad, tfmctx->key, tfmctx->keylen);
  85. for (i = 0; i < bs; i++) {
  86. tfmctx->ipad[i] ^= HMAC_IPAD_VALUE;
  87. tfmctx->opad[i] ^= HMAC_OPAD_VALUE;
  88. }
  89. ret = crypto_ahash_setkey(tfmctx->fallback_tfm, key, keylen);
  90. if (!ret)
  91. return 0;
  92. memzero_explicit(tfmctx->key, keylen);
  93. kfree_sensitive(tfmctx->opad);
  94. err_opad:
  95. kfree_sensitive(tfmctx->ipad);
  96. return ret;
  97. }
  98. int sun8i_ss_hash_crainit(struct crypto_tfm *tfm)
  99. {
  100. struct sun8i_ss_hash_tfm_ctx *op = crypto_tfm_ctx(tfm);
  101. struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
  102. struct sun8i_ss_alg_template *algt;
  103. int err;
  104. memset(op, 0, sizeof(struct sun8i_ss_hash_tfm_ctx));
  105. algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
  106. op->ss = algt->ss;
  107. op->enginectx.op.do_one_request = sun8i_ss_hash_run;
  108. op->enginectx.op.prepare_request = NULL;
  109. op->enginectx.op.unprepare_request = NULL;
  110. /* FALLBACK */
  111. op->fallback_tfm = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
  112. CRYPTO_ALG_NEED_FALLBACK);
  113. if (IS_ERR(op->fallback_tfm)) {
  114. dev_err(algt->ss->dev, "Fallback driver could no be loaded\n");
  115. return PTR_ERR(op->fallback_tfm);
  116. }
  117. if (algt->alg.hash.halg.statesize < crypto_ahash_statesize(op->fallback_tfm))
  118. algt->alg.hash.halg.statesize = crypto_ahash_statesize(op->fallback_tfm);
  119. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  120. sizeof(struct sun8i_ss_hash_reqctx) +
  121. crypto_ahash_reqsize(op->fallback_tfm));
  122. memcpy(algt->fbname, crypto_tfm_alg_driver_name(&op->fallback_tfm->base), CRYPTO_MAX_ALG_NAME);
  123. err = pm_runtime_get_sync(op->ss->dev);
  124. if (err < 0)
  125. goto error_pm;
  126. return 0;
  127. error_pm:
  128. pm_runtime_put_noidle(op->ss->dev);
  129. crypto_free_ahash(op->fallback_tfm);
  130. return err;
  131. }
  132. void sun8i_ss_hash_craexit(struct crypto_tfm *tfm)
  133. {
  134. struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_tfm_ctx(tfm);
  135. kfree_sensitive(tfmctx->ipad);
  136. kfree_sensitive(tfmctx->opad);
  137. crypto_free_ahash(tfmctx->fallback_tfm);
  138. pm_runtime_put_sync_suspend(tfmctx->ss->dev);
  139. }
  140. int sun8i_ss_hash_init(struct ahash_request *areq)
  141. {
  142. struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
  143. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  144. struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
  145. memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx));
  146. ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
  147. rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  148. return crypto_ahash_init(&rctx->fallback_req);
  149. }
  150. int sun8i_ss_hash_export(struct ahash_request *areq, void *out)
  151. {
  152. struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
  153. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  154. struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
  155. ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
  156. rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  157. return crypto_ahash_export(&rctx->fallback_req, out);
  158. }
  159. int sun8i_ss_hash_import(struct ahash_request *areq, const void *in)
  160. {
  161. struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
  162. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  163. struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
  164. ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
  165. rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  166. return crypto_ahash_import(&rctx->fallback_req, in);
  167. }
  168. int sun8i_ss_hash_final(struct ahash_request *areq)
  169. {
  170. struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
  171. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  172. struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
  173. #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
  174. struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
  175. struct sun8i_ss_alg_template *algt;
  176. #endif
  177. ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
  178. rctx->fallback_req.base.flags = areq->base.flags &
  179. CRYPTO_TFM_REQ_MAY_SLEEP;
  180. rctx->fallback_req.result = areq->result;
  181. #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
  182. algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
  183. algt->stat_fb++;
  184. #endif
  185. return crypto_ahash_final(&rctx->fallback_req);
  186. }
  187. int sun8i_ss_hash_update(struct ahash_request *areq)
  188. {
  189. struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
  190. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  191. struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
  192. ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
  193. rctx->fallback_req.base.flags = areq->base.flags &
  194. CRYPTO_TFM_REQ_MAY_SLEEP;
  195. rctx->fallback_req.nbytes = areq->nbytes;
  196. rctx->fallback_req.src = areq->src;
  197. return crypto_ahash_update(&rctx->fallback_req);
  198. }
  199. int sun8i_ss_hash_finup(struct ahash_request *areq)
  200. {
  201. struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
  202. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  203. struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
  204. #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
  205. struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
  206. struct sun8i_ss_alg_template *algt;
  207. #endif
  208. ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
  209. rctx->fallback_req.base.flags = areq->base.flags &
  210. CRYPTO_TFM_REQ_MAY_SLEEP;
  211. rctx->fallback_req.nbytes = areq->nbytes;
  212. rctx->fallback_req.src = areq->src;
  213. rctx->fallback_req.result = areq->result;
  214. #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
  215. algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
  216. algt->stat_fb++;
  217. #endif
  218. return crypto_ahash_finup(&rctx->fallback_req);
  219. }
  220. static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
  221. {
  222. struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
  223. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  224. struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
  225. #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
  226. struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
  227. struct sun8i_ss_alg_template *algt;
  228. #endif
  229. ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
  230. rctx->fallback_req.base.flags = areq->base.flags &
  231. CRYPTO_TFM_REQ_MAY_SLEEP;
  232. rctx->fallback_req.nbytes = areq->nbytes;
  233. rctx->fallback_req.src = areq->src;
  234. rctx->fallback_req.result = areq->result;
  235. #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
  236. algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
  237. algt->stat_fb++;
  238. #endif
  239. return crypto_ahash_digest(&rctx->fallback_req);
  240. }
  241. static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss,
  242. struct sun8i_ss_hash_reqctx *rctx,
  243. const char *name)
  244. {
  245. int flow = rctx->flow;
  246. u32 v = SS_START;
  247. int i;
  248. #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
  249. ss->flows[flow].stat_req++;
  250. #endif
  251. /* choose between stream0/stream1 */
  252. if (flow)
  253. v |= SS_FLOW1;
  254. else
  255. v |= SS_FLOW0;
  256. v |= rctx->method;
  257. for (i = 0; i < MAX_SG; i++) {
  258. if (!rctx->t_dst[i].addr)
  259. break;
  260. mutex_lock(&ss->mlock);
  261. if (i > 0) {
  262. v |= BIT(17);
  263. writel(rctx->t_dst[i - 1].addr, ss->base + SS_KEY_ADR_REG);
  264. writel(rctx->t_dst[i - 1].addr, ss->base + SS_IV_ADR_REG);
  265. }
  266. dev_dbg(ss->dev,
  267. "Processing SG %d on flow %d %s ctl=%x %d to %d method=%x src=%x dst=%x\n",
  268. i, flow, name, v,
  269. rctx->t_src[i].len, rctx->t_dst[i].len,
  270. rctx->method, rctx->t_src[i].addr, rctx->t_dst[i].addr);
  271. writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG);
  272. writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG);
  273. writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG);
  274. writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG);
  275. reinit_completion(&ss->flows[flow].complete);
  276. ss->flows[flow].status = 0;
  277. wmb();
  278. writel(v, ss->base + SS_CTL_REG);
  279. mutex_unlock(&ss->mlock);
  280. wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
  281. msecs_to_jiffies(2000));
  282. if (ss->flows[flow].status == 0) {
  283. dev_err(ss->dev, "DMA timeout for %s\n", name);
  284. return -EFAULT;
  285. }
  286. }
  287. return 0;
  288. }
  289. static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
  290. {
  291. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  292. struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
  293. struct sun8i_ss_alg_template *algt;
  294. struct scatterlist *sg;
  295. algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
  296. if (areq->nbytes == 0) {
  297. algt->stat_fb_len++;
  298. return true;
  299. }
  300. if (areq->nbytes >= MAX_PAD_SIZE - 64) {
  301. algt->stat_fb_len++;
  302. return true;
  303. }
  304. /* we need to reserve one SG for the padding one */
  305. if (sg_nents(areq->src) > MAX_SG - 1) {
  306. algt->stat_fb_sgnum++;
  307. return true;
  308. }
  309. sg = areq->src;
  310. while (sg) {
  311. /* SS can operate hash only on full block size
  312. * since SS support only MD5,sha1,sha224 and sha256, blocksize
  313. * is always 64
  314. */
  315. /* Only the last block could be bounced to the pad buffer */
  316. if (sg->length % 64 && sg_next(sg)) {
  317. algt->stat_fb_sglen++;
  318. return true;
  319. }
  320. if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
  321. algt->stat_fb_align++;
  322. return true;
  323. }
  324. if (sg->length % 4) {
  325. algt->stat_fb_sglen++;
  326. return true;
  327. }
  328. sg = sg_next(sg);
  329. }
  330. return false;
  331. }
  332. int sun8i_ss_hash_digest(struct ahash_request *areq)
  333. {
  334. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  335. struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
  336. struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
  337. struct sun8i_ss_alg_template *algt;
  338. struct sun8i_ss_dev *ss;
  339. struct crypto_engine *engine;
  340. int e;
  341. if (sun8i_ss_hash_need_fallback(areq))
  342. return sun8i_ss_hash_digest_fb(areq);
  343. algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
  344. ss = algt->ss;
  345. e = sun8i_ss_get_engine_number(ss);
  346. rctx->flow = e;
  347. engine = ss->flows[e].engine;
  348. return crypto_transfer_hash_request_to_engine(engine, areq);
  349. }
  350. static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs)
  351. {
  352. u64 fill, min_fill, j, k;
  353. __be64 *bebits;
  354. __le64 *lebits;
  355. j = padi;
  356. buf[j++] = cpu_to_le32(0x80);
  357. if (bs == 64) {
  358. fill = 64 - (byte_count % 64);
  359. min_fill = 2 * sizeof(u32) + sizeof(u32);
  360. } else {
  361. fill = 128 - (byte_count % 128);
  362. min_fill = 4 * sizeof(u32) + sizeof(u32);
  363. }
  364. if (fill < min_fill)
  365. fill += bs;
  366. k = j;
  367. j += (fill - min_fill) / sizeof(u32);
  368. if (j * 4 > bufsize) {
  369. pr_err("%s OVERFLOW %llu\n", __func__, j);
  370. return 0;
  371. }
  372. for (; k < j; k++)
  373. buf[k] = 0;
  374. if (le) {
  375. /* MD5 */
  376. lebits = (__le64 *)&buf[j];
  377. *lebits = cpu_to_le64(byte_count << 3);
  378. j += 2;
  379. } else {
  380. if (bs == 64) {
  381. /* sha1 sha224 sha256 */
  382. bebits = (__be64 *)&buf[j];
  383. *bebits = cpu_to_be64(byte_count << 3);
  384. j += 2;
  385. } else {
  386. /* sha384 sha512*/
  387. bebits = (__be64 *)&buf[j];
  388. *bebits = cpu_to_be64(byte_count >> 61);
  389. j += 2;
  390. bebits = (__be64 *)&buf[j];
  391. *bebits = cpu_to_be64(byte_count << 3);
  392. j += 2;
  393. }
  394. }
  395. if (j * 4 > bufsize) {
  396. pr_err("%s OVERFLOW %llu\n", __func__, j);
  397. return 0;
  398. }
  399. return j;
  400. }
  401. /* sun8i_ss_hash_run - run an ahash request
  402. * Send the data of the request to the SS along with an extra SG with padding
  403. */
  404. int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
  405. {
  406. struct ahash_request *areq = container_of(breq, struct ahash_request, base);
  407. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  408. struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
  409. struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
  410. struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
  411. struct sun8i_ss_alg_template *algt;
  412. struct sun8i_ss_dev *ss;
  413. struct scatterlist *sg;
  414. int bs = crypto_ahash_blocksize(tfm);
  415. int nr_sgs, err, digestsize;
  416. unsigned int len;
  417. u64 byte_count;
  418. void *pad, *result;
  419. int j, i, k, todo;
  420. dma_addr_t addr_res, addr_pad, addr_xpad;
  421. __le32 *bf;
  422. /* HMAC step:
  423. * 0: normal hashing
  424. * 1: IPAD
  425. * 2: OPAD
  426. */
  427. int hmac = 0;
  428. algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash);
  429. ss = algt->ss;
  430. digestsize = algt->alg.hash.halg.digestsize;
  431. if (digestsize == SHA224_DIGEST_SIZE)
  432. digestsize = SHA256_DIGEST_SIZE;
  433. result = ss->flows[rctx->flow].result;
  434. pad = ss->flows[rctx->flow].pad;
  435. bf = (__le32 *)pad;
  436. for (i = 0; i < MAX_SG; i++) {
  437. rctx->t_dst[i].addr = 0;
  438. rctx->t_dst[i].len = 0;
  439. }
  440. #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
  441. algt->stat_req++;
  442. #endif
  443. rctx->method = ss->variant->alg_hash[algt->ss_algo_id];
  444. nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
  445. if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
  446. dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
  447. err = -EINVAL;
  448. goto theend;
  449. }
  450. addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
  451. if (dma_mapping_error(ss->dev, addr_res)) {
  452. dev_err(ss->dev, "DMA map dest\n");
  453. err = -EINVAL;
  454. goto err_dma_result;
  455. }
  456. j = 0;
  457. len = areq->nbytes;
  458. sg = areq->src;
  459. i = 0;
  460. while (len > 0 && sg) {
  461. if (sg_dma_len(sg) == 0) {
  462. sg = sg_next(sg);
  463. continue;
  464. }
  465. todo = min(len, sg_dma_len(sg));
  466. /* only the last SG could be with a size not modulo64 */
  467. if (todo % 64 == 0) {
  468. rctx->t_src[i].addr = sg_dma_address(sg);
  469. rctx->t_src[i].len = todo / 4;
  470. rctx->t_dst[i].addr = addr_res;
  471. rctx->t_dst[i].len = digestsize / 4;
  472. len -= todo;
  473. } else {
  474. scatterwalk_map_and_copy(bf, sg, 0, todo, 0);
  475. j += todo / 4;
  476. len -= todo;
  477. }
  478. sg = sg_next(sg);
  479. i++;
  480. }
  481. if (len > 0) {
  482. dev_err(ss->dev, "remaining len %d\n", len);
  483. err = -EINVAL;
  484. goto theend;
  485. }
  486. if (j > 0)
  487. i--;
  488. retry:
  489. byte_count = areq->nbytes;
  490. if (tfmctx->keylen && hmac == 0) {
  491. hmac = 1;
  492. /* shift all SG one slot up, to free slot 0 for IPAD */
  493. for (k = 6; k >= 0; k--) {
  494. rctx->t_src[k + 1].addr = rctx->t_src[k].addr;
  495. rctx->t_src[k + 1].len = rctx->t_src[k].len;
  496. rctx->t_dst[k + 1].addr = rctx->t_dst[k].addr;
  497. rctx->t_dst[k + 1].len = rctx->t_dst[k].len;
  498. }
  499. addr_xpad = dma_map_single(ss->dev, tfmctx->ipad, bs, DMA_TO_DEVICE);
  500. err = dma_mapping_error(ss->dev, addr_xpad);
  501. if (err) {
  502. dev_err(ss->dev, "Fail to create DMA mapping of ipad\n");
  503. goto err_dma_xpad;
  504. }
  505. rctx->t_src[0].addr = addr_xpad;
  506. rctx->t_src[0].len = bs / 4;
  507. rctx->t_dst[0].addr = addr_res;
  508. rctx->t_dst[0].len = digestsize / 4;
  509. i++;
  510. byte_count = areq->nbytes + bs;
  511. }
  512. if (tfmctx->keylen && hmac == 2) {
  513. for (i = 0; i < MAX_SG; i++) {
  514. rctx->t_src[i].addr = 0;
  515. rctx->t_src[i].len = 0;
  516. rctx->t_dst[i].addr = 0;
  517. rctx->t_dst[i].len = 0;
  518. }
  519. addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
  520. if (dma_mapping_error(ss->dev, addr_res)) {
  521. dev_err(ss->dev, "Fail to create DMA mapping of result\n");
  522. err = -EINVAL;
  523. goto err_dma_result;
  524. }
  525. addr_xpad = dma_map_single(ss->dev, tfmctx->opad, bs, DMA_TO_DEVICE);
  526. err = dma_mapping_error(ss->dev, addr_xpad);
  527. if (err) {
  528. dev_err(ss->dev, "Fail to create DMA mapping of opad\n");
  529. goto err_dma_xpad;
  530. }
  531. rctx->t_src[0].addr = addr_xpad;
  532. rctx->t_src[0].len = bs / 4;
  533. memcpy(bf, result, digestsize);
  534. j = digestsize / 4;
  535. i = 1;
  536. byte_count = digestsize + bs;
  537. rctx->t_dst[0].addr = addr_res;
  538. rctx->t_dst[0].len = digestsize / 4;
  539. }
  540. switch (algt->ss_algo_id) {
  541. case SS_ID_HASH_MD5:
  542. j = hash_pad(bf, 4096, j, byte_count, true, bs);
  543. break;
  544. case SS_ID_HASH_SHA1:
  545. case SS_ID_HASH_SHA224:
  546. case SS_ID_HASH_SHA256:
  547. j = hash_pad(bf, 4096, j, byte_count, false, bs);
  548. break;
  549. }
  550. if (!j) {
  551. err = -EINVAL;
  552. goto theend;
  553. }
  554. addr_pad = dma_map_single(ss->dev, pad, j * 4, DMA_TO_DEVICE);
  555. if (dma_mapping_error(ss->dev, addr_pad)) {
  556. dev_err(ss->dev, "DMA error on padding SG\n");
  557. err = -EINVAL;
  558. goto err_dma_pad;
  559. }
  560. rctx->t_src[i].addr = addr_pad;
  561. rctx->t_src[i].len = j;
  562. rctx->t_dst[i].addr = addr_res;
  563. rctx->t_dst[i].len = digestsize / 4;
  564. err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
  565. /*
  566. * mini helper for checking dma map/unmap
  567. * flow start for hmac = 0 (and HMAC = 1)
  568. * HMAC = 0
  569. * MAP src
  570. * MAP res
  571. *
  572. * retry:
  573. * if hmac then hmac = 1
  574. * MAP xpad (ipad)
  575. * if hmac == 2
  576. * MAP res
  577. * MAP xpad (opad)
  578. * MAP pad
  579. * ACTION!
  580. * UNMAP pad
  581. * if hmac
  582. * UNMAP xpad
  583. * UNMAP res
  584. * if hmac < 2
  585. * UNMAP SRC
  586. *
  587. * if hmac = 1 then hmac = 2 goto retry
  588. */
  589. dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE);
  590. err_dma_pad:
  591. if (hmac > 0)
  592. dma_unmap_single(ss->dev, addr_xpad, bs, DMA_TO_DEVICE);
  593. err_dma_xpad:
  594. dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
  595. err_dma_result:
  596. if (hmac < 2)
  597. dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
  598. DMA_TO_DEVICE);
  599. if (hmac == 1 && !err) {
  600. hmac = 2;
  601. goto retry;
  602. }
  603. if (!err)
  604. memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
  605. theend:
  606. local_bh_disable();
  607. crypto_finalize_hash_request(engine, breq, err);
  608. local_bh_enable();
  609. return 0;
  610. }