cryptd.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Software async crypto daemon.
  4. *
  5. * Copyright (c) 2006 Herbert Xu <[email protected]>
  6. *
  7. * Added AEAD support to cryptd.
  8. * Authors: Tadeusz Struk ([email protected])
  9. * Adrian Hoban <[email protected]>
  10. * Gabriele Paoloni <[email protected]>
  11. * Aidan O'Mahony ([email protected])
  12. * Copyright (c) 2010, Intel Corporation.
  13. */
  14. #include <crypto/internal/hash.h>
  15. #include <crypto/internal/aead.h>
  16. #include <crypto/internal/skcipher.h>
  17. #include <crypto/cryptd.h>
  18. #include <linux/refcount.h>
  19. #include <linux/err.h>
  20. #include <linux/init.h>
  21. #include <linux/kernel.h>
  22. #include <linux/list.h>
  23. #include <linux/module.h>
  24. #include <linux/scatterlist.h>
  25. #include <linux/sched.h>
  26. #include <linux/slab.h>
  27. #include <linux/workqueue.h>
  28. static unsigned int cryptd_max_cpu_qlen = 1000;
  29. module_param(cryptd_max_cpu_qlen, uint, 0);
  30. MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
  31. static struct workqueue_struct *cryptd_wq;
  32. struct cryptd_cpu_queue {
  33. struct crypto_queue queue;
  34. struct work_struct work;
  35. };
  36. struct cryptd_queue {
  37. /*
  38. * Protected by disabling BH to allow enqueueing from softinterrupt and
  39. * dequeuing from kworker (cryptd_queue_worker()).
  40. */
  41. struct cryptd_cpu_queue __percpu *cpu_queue;
  42. };
  43. struct cryptd_instance_ctx {
  44. struct crypto_spawn spawn;
  45. struct cryptd_queue *queue;
  46. };
  47. struct skcipherd_instance_ctx {
  48. struct crypto_skcipher_spawn spawn;
  49. struct cryptd_queue *queue;
  50. };
  51. struct hashd_instance_ctx {
  52. struct crypto_shash_spawn spawn;
  53. struct cryptd_queue *queue;
  54. };
  55. struct aead_instance_ctx {
  56. struct crypto_aead_spawn aead_spawn;
  57. struct cryptd_queue *queue;
  58. };
  59. struct cryptd_skcipher_ctx {
  60. refcount_t refcnt;
  61. struct crypto_skcipher *child;
  62. };
  63. struct cryptd_skcipher_request_ctx {
  64. crypto_completion_t complete;
  65. struct skcipher_request req;
  66. };
  67. struct cryptd_hash_ctx {
  68. refcount_t refcnt;
  69. struct crypto_shash *child;
  70. };
  71. struct cryptd_hash_request_ctx {
  72. crypto_completion_t complete;
  73. struct shash_desc desc;
  74. };
  75. struct cryptd_aead_ctx {
  76. refcount_t refcnt;
  77. struct crypto_aead *child;
  78. };
  79. struct cryptd_aead_request_ctx {
  80. crypto_completion_t complete;
  81. };
  82. static void cryptd_queue_worker(struct work_struct *work);
  83. static int cryptd_init_queue(struct cryptd_queue *queue,
  84. unsigned int max_cpu_qlen)
  85. {
  86. int cpu;
  87. struct cryptd_cpu_queue *cpu_queue;
  88. queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
  89. if (!queue->cpu_queue)
  90. return -ENOMEM;
  91. for_each_possible_cpu(cpu) {
  92. cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
  93. crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
  94. INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
  95. }
  96. pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
  97. return 0;
  98. }
  99. static void cryptd_fini_queue(struct cryptd_queue *queue)
  100. {
  101. int cpu;
  102. struct cryptd_cpu_queue *cpu_queue;
  103. for_each_possible_cpu(cpu) {
  104. cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
  105. BUG_ON(cpu_queue->queue.qlen);
  106. }
  107. free_percpu(queue->cpu_queue);
  108. }
  109. static int cryptd_enqueue_request(struct cryptd_queue *queue,
  110. struct crypto_async_request *request)
  111. {
  112. int err;
  113. struct cryptd_cpu_queue *cpu_queue;
  114. refcount_t *refcnt;
  115. local_bh_disable();
  116. cpu_queue = this_cpu_ptr(queue->cpu_queue);
  117. err = crypto_enqueue_request(&cpu_queue->queue, request);
  118. refcnt = crypto_tfm_ctx(request->tfm);
  119. if (err == -ENOSPC)
  120. goto out;
  121. queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
  122. if (!refcount_read(refcnt))
  123. goto out;
  124. refcount_inc(refcnt);
  125. out:
  126. local_bh_enable();
  127. return err;
  128. }
  129. /* Called in workqueue context, do one real cryption work (via
  130. * req->complete) and reschedule itself if there are more work to
  131. * do. */
  132. static void cryptd_queue_worker(struct work_struct *work)
  133. {
  134. struct cryptd_cpu_queue *cpu_queue;
  135. struct crypto_async_request *req, *backlog;
  136. cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
  137. /*
  138. * Only handle one request at a time to avoid hogging crypto workqueue.
  139. */
  140. local_bh_disable();
  141. backlog = crypto_get_backlog(&cpu_queue->queue);
  142. req = crypto_dequeue_request(&cpu_queue->queue);
  143. local_bh_enable();
  144. if (!req)
  145. return;
  146. if (backlog)
  147. backlog->complete(backlog, -EINPROGRESS);
  148. req->complete(req, 0);
  149. if (cpu_queue->queue.qlen)
  150. queue_work(cryptd_wq, &cpu_queue->work);
  151. }
  152. static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
  153. {
  154. struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
  155. struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
  156. return ictx->queue;
  157. }
  158. static void cryptd_type_and_mask(struct crypto_attr_type *algt,
  159. u32 *type, u32 *mask)
  160. {
  161. /*
  162. * cryptd is allowed to wrap internal algorithms, but in that case the
  163. * resulting cryptd instance will be marked as internal as well.
  164. */
  165. *type = algt->type & CRYPTO_ALG_INTERNAL;
  166. *mask = algt->mask & CRYPTO_ALG_INTERNAL;
  167. /* No point in cryptd wrapping an algorithm that's already async. */
  168. *mask |= CRYPTO_ALG_ASYNC;
  169. *mask |= crypto_algt_inherited_mask(algt);
  170. }
  171. static int cryptd_init_instance(struct crypto_instance *inst,
  172. struct crypto_alg *alg)
  173. {
  174. if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
  175. "cryptd(%s)",
  176. alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
  177. return -ENAMETOOLONG;
  178. memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
  179. inst->alg.cra_priority = alg->cra_priority + 50;
  180. inst->alg.cra_blocksize = alg->cra_blocksize;
  181. inst->alg.cra_alignmask = alg->cra_alignmask;
  182. return 0;
  183. }
  184. static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
  185. const u8 *key, unsigned int keylen)
  186. {
  187. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
  188. struct crypto_skcipher *child = ctx->child;
  189. crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  190. crypto_skcipher_set_flags(child,
  191. crypto_skcipher_get_flags(parent) &
  192. CRYPTO_TFM_REQ_MASK);
  193. return crypto_skcipher_setkey(child, key, keylen);
  194. }
  195. static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
  196. {
  197. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  198. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  199. struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
  200. int refcnt = refcount_read(&ctx->refcnt);
  201. local_bh_disable();
  202. rctx->complete(&req->base, err);
  203. local_bh_enable();
  204. if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
  205. crypto_free_skcipher(tfm);
  206. }
  207. static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
  208. int err)
  209. {
  210. struct skcipher_request *req = skcipher_request_cast(base);
  211. struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
  212. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  213. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  214. struct skcipher_request *subreq = &rctx->req;
  215. struct crypto_skcipher *child = ctx->child;
  216. if (unlikely(err == -EINPROGRESS))
  217. goto out;
  218. skcipher_request_set_tfm(subreq, child);
  219. skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
  220. NULL, NULL);
  221. skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
  222. req->iv);
  223. err = crypto_skcipher_encrypt(subreq);
  224. skcipher_request_zero(subreq);
  225. req->base.complete = rctx->complete;
  226. out:
  227. cryptd_skcipher_complete(req, err);
  228. }
  229. static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
  230. int err)
  231. {
  232. struct skcipher_request *req = skcipher_request_cast(base);
  233. struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
  234. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  235. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  236. struct skcipher_request *subreq = &rctx->req;
  237. struct crypto_skcipher *child = ctx->child;
  238. if (unlikely(err == -EINPROGRESS))
  239. goto out;
  240. skcipher_request_set_tfm(subreq, child);
  241. skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
  242. NULL, NULL);
  243. skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
  244. req->iv);
  245. err = crypto_skcipher_decrypt(subreq);
  246. skcipher_request_zero(subreq);
  247. req->base.complete = rctx->complete;
  248. out:
  249. cryptd_skcipher_complete(req, err);
  250. }
  251. static int cryptd_skcipher_enqueue(struct skcipher_request *req,
  252. crypto_completion_t compl)
  253. {
  254. struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
  255. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  256. struct cryptd_queue *queue;
  257. queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
  258. rctx->complete = req->base.complete;
  259. req->base.complete = compl;
  260. return cryptd_enqueue_request(queue, &req->base);
  261. }
  262. static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
  263. {
  264. return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
  265. }
  266. static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
  267. {
  268. return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
  269. }
  270. static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
  271. {
  272. struct skcipher_instance *inst = skcipher_alg_instance(tfm);
  273. struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
  274. struct crypto_skcipher_spawn *spawn = &ictx->spawn;
  275. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  276. struct crypto_skcipher *cipher;
  277. cipher = crypto_spawn_skcipher(spawn);
  278. if (IS_ERR(cipher))
  279. return PTR_ERR(cipher);
  280. ctx->child = cipher;
  281. crypto_skcipher_set_reqsize(
  282. tfm, sizeof(struct cryptd_skcipher_request_ctx) +
  283. crypto_skcipher_reqsize(cipher));
  284. return 0;
  285. }
  286. static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
  287. {
  288. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  289. crypto_free_skcipher(ctx->child);
  290. }
  291. static void cryptd_skcipher_free(struct skcipher_instance *inst)
  292. {
  293. struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
  294. crypto_drop_skcipher(&ctx->spawn);
  295. kfree(inst);
  296. }
  297. static int cryptd_create_skcipher(struct crypto_template *tmpl,
  298. struct rtattr **tb,
  299. struct crypto_attr_type *algt,
  300. struct cryptd_queue *queue)
  301. {
  302. struct skcipherd_instance_ctx *ctx;
  303. struct skcipher_instance *inst;
  304. struct skcipher_alg *alg;
  305. u32 type;
  306. u32 mask;
  307. int err;
  308. cryptd_type_and_mask(algt, &type, &mask);
  309. inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
  310. if (!inst)
  311. return -ENOMEM;
  312. ctx = skcipher_instance_ctx(inst);
  313. ctx->queue = queue;
  314. err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
  315. crypto_attr_alg_name(tb[1]), type, mask);
  316. if (err)
  317. goto err_free_inst;
  318. alg = crypto_spawn_skcipher_alg(&ctx->spawn);
  319. err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
  320. if (err)
  321. goto err_free_inst;
  322. inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
  323. (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
  324. inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
  325. inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
  326. inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
  327. inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
  328. inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
  329. inst->alg.init = cryptd_skcipher_init_tfm;
  330. inst->alg.exit = cryptd_skcipher_exit_tfm;
  331. inst->alg.setkey = cryptd_skcipher_setkey;
  332. inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
  333. inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
  334. inst->free = cryptd_skcipher_free;
  335. err = skcipher_register_instance(tmpl, inst);
  336. if (err) {
  337. err_free_inst:
  338. cryptd_skcipher_free(inst);
  339. }
  340. return err;
  341. }
  342. static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
  343. {
  344. struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
  345. struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
  346. struct crypto_shash_spawn *spawn = &ictx->spawn;
  347. struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  348. struct crypto_shash *hash;
  349. hash = crypto_spawn_shash(spawn);
  350. if (IS_ERR(hash))
  351. return PTR_ERR(hash);
  352. ctx->child = hash;
  353. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  354. sizeof(struct cryptd_hash_request_ctx) +
  355. crypto_shash_descsize(hash));
  356. return 0;
  357. }
  358. static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
  359. {
  360. struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  361. crypto_free_shash(ctx->child);
  362. }
  363. static int cryptd_hash_setkey(struct crypto_ahash *parent,
  364. const u8 *key, unsigned int keylen)
  365. {
  366. struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
  367. struct crypto_shash *child = ctx->child;
  368. crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  369. crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
  370. CRYPTO_TFM_REQ_MASK);
  371. return crypto_shash_setkey(child, key, keylen);
  372. }
  373. static int cryptd_hash_enqueue(struct ahash_request *req,
  374. crypto_completion_t compl)
  375. {
  376. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  377. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  378. struct cryptd_queue *queue =
  379. cryptd_get_queue(crypto_ahash_tfm(tfm));
  380. rctx->complete = req->base.complete;
  381. req->base.complete = compl;
  382. return cryptd_enqueue_request(queue, &req->base);
  383. }
  384. static void cryptd_hash_complete(struct ahash_request *req, int err)
  385. {
  386. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  387. struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  388. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  389. int refcnt = refcount_read(&ctx->refcnt);
  390. local_bh_disable();
  391. rctx->complete(&req->base, err);
  392. local_bh_enable();
  393. if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
  394. crypto_free_ahash(tfm);
  395. }
  396. static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
  397. {
  398. struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
  399. struct crypto_shash *child = ctx->child;
  400. struct ahash_request *req = ahash_request_cast(req_async);
  401. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  402. struct shash_desc *desc = &rctx->desc;
  403. if (unlikely(err == -EINPROGRESS))
  404. goto out;
  405. desc->tfm = child;
  406. err = crypto_shash_init(desc);
  407. req->base.complete = rctx->complete;
  408. out:
  409. cryptd_hash_complete(req, err);
  410. }
  411. static int cryptd_hash_init_enqueue(struct ahash_request *req)
  412. {
  413. return cryptd_hash_enqueue(req, cryptd_hash_init);
  414. }
  415. static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
  416. {
  417. struct ahash_request *req = ahash_request_cast(req_async);
  418. struct cryptd_hash_request_ctx *rctx;
  419. rctx = ahash_request_ctx(req);
  420. if (unlikely(err == -EINPROGRESS))
  421. goto out;
  422. err = shash_ahash_update(req, &rctx->desc);
  423. req->base.complete = rctx->complete;
  424. out:
  425. cryptd_hash_complete(req, err);
  426. }
  427. static int cryptd_hash_update_enqueue(struct ahash_request *req)
  428. {
  429. return cryptd_hash_enqueue(req, cryptd_hash_update);
  430. }
  431. static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
  432. {
  433. struct ahash_request *req = ahash_request_cast(req_async);
  434. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  435. if (unlikely(err == -EINPROGRESS))
  436. goto out;
  437. err = crypto_shash_final(&rctx->desc, req->result);
  438. req->base.complete = rctx->complete;
  439. out:
  440. cryptd_hash_complete(req, err);
  441. }
  442. static int cryptd_hash_final_enqueue(struct ahash_request *req)
  443. {
  444. return cryptd_hash_enqueue(req, cryptd_hash_final);
  445. }
  446. static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
  447. {
  448. struct ahash_request *req = ahash_request_cast(req_async);
  449. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  450. if (unlikely(err == -EINPROGRESS))
  451. goto out;
  452. err = shash_ahash_finup(req, &rctx->desc);
  453. req->base.complete = rctx->complete;
  454. out:
  455. cryptd_hash_complete(req, err);
  456. }
  457. static int cryptd_hash_finup_enqueue(struct ahash_request *req)
  458. {
  459. return cryptd_hash_enqueue(req, cryptd_hash_finup);
  460. }
  461. static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
  462. {
  463. struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
  464. struct crypto_shash *child = ctx->child;
  465. struct ahash_request *req = ahash_request_cast(req_async);
  466. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  467. struct shash_desc *desc = &rctx->desc;
  468. if (unlikely(err == -EINPROGRESS))
  469. goto out;
  470. desc->tfm = child;
  471. err = shash_ahash_digest(req, desc);
  472. req->base.complete = rctx->complete;
  473. out:
  474. cryptd_hash_complete(req, err);
  475. }
  476. static int cryptd_hash_digest_enqueue(struct ahash_request *req)
  477. {
  478. return cryptd_hash_enqueue(req, cryptd_hash_digest);
  479. }
  480. static int cryptd_hash_export(struct ahash_request *req, void *out)
  481. {
  482. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  483. return crypto_shash_export(&rctx->desc, out);
  484. }
  485. static int cryptd_hash_import(struct ahash_request *req, const void *in)
  486. {
  487. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  488. struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  489. struct shash_desc *desc = cryptd_shash_desc(req);
  490. desc->tfm = ctx->child;
  491. return crypto_shash_import(desc, in);
  492. }
  493. static void cryptd_hash_free(struct ahash_instance *inst)
  494. {
  495. struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst);
  496. crypto_drop_shash(&ctx->spawn);
  497. kfree(inst);
  498. }
  499. static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
  500. struct crypto_attr_type *algt,
  501. struct cryptd_queue *queue)
  502. {
  503. struct hashd_instance_ctx *ctx;
  504. struct ahash_instance *inst;
  505. struct shash_alg *alg;
  506. u32 type;
  507. u32 mask;
  508. int err;
  509. cryptd_type_and_mask(algt, &type, &mask);
  510. inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
  511. if (!inst)
  512. return -ENOMEM;
  513. ctx = ahash_instance_ctx(inst);
  514. ctx->queue = queue;
  515. err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst),
  516. crypto_attr_alg_name(tb[1]), type, mask);
  517. if (err)
  518. goto err_free_inst;
  519. alg = crypto_spawn_shash_alg(&ctx->spawn);
  520. err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base);
  521. if (err)
  522. goto err_free_inst;
  523. inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
  524. (alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
  525. CRYPTO_ALG_OPTIONAL_KEY));
  526. inst->alg.halg.digestsize = alg->digestsize;
  527. inst->alg.halg.statesize = alg->statesize;
  528. inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
  529. inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
  530. inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
  531. inst->alg.init = cryptd_hash_init_enqueue;
  532. inst->alg.update = cryptd_hash_update_enqueue;
  533. inst->alg.final = cryptd_hash_final_enqueue;
  534. inst->alg.finup = cryptd_hash_finup_enqueue;
  535. inst->alg.export = cryptd_hash_export;
  536. inst->alg.import = cryptd_hash_import;
  537. if (crypto_shash_alg_has_setkey(alg))
  538. inst->alg.setkey = cryptd_hash_setkey;
  539. inst->alg.digest = cryptd_hash_digest_enqueue;
  540. inst->free = cryptd_hash_free;
  541. err = ahash_register_instance(tmpl, inst);
  542. if (err) {
  543. err_free_inst:
  544. cryptd_hash_free(inst);
  545. }
  546. return err;
  547. }
  548. static int cryptd_aead_setkey(struct crypto_aead *parent,
  549. const u8 *key, unsigned int keylen)
  550. {
  551. struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
  552. struct crypto_aead *child = ctx->child;
  553. return crypto_aead_setkey(child, key, keylen);
  554. }
  555. static int cryptd_aead_setauthsize(struct crypto_aead *parent,
  556. unsigned int authsize)
  557. {
  558. struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
  559. struct crypto_aead *child = ctx->child;
  560. return crypto_aead_setauthsize(child, authsize);
  561. }
  562. static void cryptd_aead_crypt(struct aead_request *req,
  563. struct crypto_aead *child,
  564. int err,
  565. int (*crypt)(struct aead_request *req))
  566. {
  567. struct cryptd_aead_request_ctx *rctx;
  568. struct cryptd_aead_ctx *ctx;
  569. crypto_completion_t compl;
  570. struct crypto_aead *tfm;
  571. int refcnt;
  572. rctx = aead_request_ctx(req);
  573. compl = rctx->complete;
  574. tfm = crypto_aead_reqtfm(req);
  575. if (unlikely(err == -EINPROGRESS))
  576. goto out;
  577. aead_request_set_tfm(req, child);
  578. err = crypt( req );
  579. out:
  580. ctx = crypto_aead_ctx(tfm);
  581. refcnt = refcount_read(&ctx->refcnt);
  582. local_bh_disable();
  583. compl(&req->base, err);
  584. local_bh_enable();
  585. if (err != -EINPROGRESS && refcnt && refcount_dec_and_test(&ctx->refcnt))
  586. crypto_free_aead(tfm);
  587. }
  588. static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
  589. {
  590. struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
  591. struct crypto_aead *child = ctx->child;
  592. struct aead_request *req;
  593. req = container_of(areq, struct aead_request, base);
  594. cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
  595. }
  596. static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
  597. {
  598. struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
  599. struct crypto_aead *child = ctx->child;
  600. struct aead_request *req;
  601. req = container_of(areq, struct aead_request, base);
  602. cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
  603. }
  604. static int cryptd_aead_enqueue(struct aead_request *req,
  605. crypto_completion_t compl)
  606. {
  607. struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
  608. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  609. struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
  610. rctx->complete = req->base.complete;
  611. req->base.complete = compl;
  612. return cryptd_enqueue_request(queue, &req->base);
  613. }
  614. static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
  615. {
  616. return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
  617. }
  618. static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
  619. {
  620. return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
  621. }
  622. static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
  623. {
  624. struct aead_instance *inst = aead_alg_instance(tfm);
  625. struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
  626. struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
  627. struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
  628. struct crypto_aead *cipher;
  629. cipher = crypto_spawn_aead(spawn);
  630. if (IS_ERR(cipher))
  631. return PTR_ERR(cipher);
  632. ctx->child = cipher;
  633. crypto_aead_set_reqsize(
  634. tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
  635. crypto_aead_reqsize(cipher)));
  636. return 0;
  637. }
  638. static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
  639. {
  640. struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
  641. crypto_free_aead(ctx->child);
  642. }
  643. static void cryptd_aead_free(struct aead_instance *inst)
  644. {
  645. struct aead_instance_ctx *ctx = aead_instance_ctx(inst);
  646. crypto_drop_aead(&ctx->aead_spawn);
  647. kfree(inst);
  648. }
  649. static int cryptd_create_aead(struct crypto_template *tmpl,
  650. struct rtattr **tb,
  651. struct crypto_attr_type *algt,
  652. struct cryptd_queue *queue)
  653. {
  654. struct aead_instance_ctx *ctx;
  655. struct aead_instance *inst;
  656. struct aead_alg *alg;
  657. u32 type;
  658. u32 mask;
  659. int err;
  660. cryptd_type_and_mask(algt, &type, &mask);
  661. inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
  662. if (!inst)
  663. return -ENOMEM;
  664. ctx = aead_instance_ctx(inst);
  665. ctx->queue = queue;
  666. err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
  667. crypto_attr_alg_name(tb[1]), type, mask);
  668. if (err)
  669. goto err_free_inst;
  670. alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
  671. err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
  672. if (err)
  673. goto err_free_inst;
  674. inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
  675. (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
  676. inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
  677. inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
  678. inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
  679. inst->alg.init = cryptd_aead_init_tfm;
  680. inst->alg.exit = cryptd_aead_exit_tfm;
  681. inst->alg.setkey = cryptd_aead_setkey;
  682. inst->alg.setauthsize = cryptd_aead_setauthsize;
  683. inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
  684. inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
  685. inst->free = cryptd_aead_free;
  686. err = aead_register_instance(tmpl, inst);
  687. if (err) {
  688. err_free_inst:
  689. cryptd_aead_free(inst);
  690. }
  691. return err;
  692. }
  693. static struct cryptd_queue queue;
  694. static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
  695. {
  696. struct crypto_attr_type *algt;
  697. algt = crypto_get_attr_type(tb);
  698. if (IS_ERR(algt))
  699. return PTR_ERR(algt);
  700. switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
  701. case CRYPTO_ALG_TYPE_SKCIPHER:
  702. return cryptd_create_skcipher(tmpl, tb, algt, &queue);
  703. case CRYPTO_ALG_TYPE_HASH:
  704. return cryptd_create_hash(tmpl, tb, algt, &queue);
  705. case CRYPTO_ALG_TYPE_AEAD:
  706. return cryptd_create_aead(tmpl, tb, algt, &queue);
  707. }
  708. return -EINVAL;
  709. }
  710. static struct crypto_template cryptd_tmpl = {
  711. .name = "cryptd",
  712. .create = cryptd_create,
  713. .module = THIS_MODULE,
  714. };
  715. struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
  716. u32 type, u32 mask)
  717. {
  718. char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
  719. struct cryptd_skcipher_ctx *ctx;
  720. struct crypto_skcipher *tfm;
  721. if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
  722. "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
  723. return ERR_PTR(-EINVAL);
  724. tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
  725. if (IS_ERR(tfm))
  726. return ERR_CAST(tfm);
  727. if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
  728. crypto_free_skcipher(tfm);
  729. return ERR_PTR(-EINVAL);
  730. }
  731. ctx = crypto_skcipher_ctx(tfm);
  732. refcount_set(&ctx->refcnt, 1);
  733. return container_of(tfm, struct cryptd_skcipher, base);
  734. }
  735. EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
  736. struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
  737. {
  738. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
  739. return ctx->child;
  740. }
  741. EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
  742. bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
  743. {
  744. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
  745. return refcount_read(&ctx->refcnt) - 1;
  746. }
  747. EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
  748. void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
  749. {
  750. struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
  751. if (refcount_dec_and_test(&ctx->refcnt))
  752. crypto_free_skcipher(&tfm->base);
  753. }
  754. EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
  755. struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
  756. u32 type, u32 mask)
  757. {
  758. char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
  759. struct cryptd_hash_ctx *ctx;
  760. struct crypto_ahash *tfm;
  761. if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
  762. "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
  763. return ERR_PTR(-EINVAL);
  764. tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
  765. if (IS_ERR(tfm))
  766. return ERR_CAST(tfm);
  767. if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
  768. crypto_free_ahash(tfm);
  769. return ERR_PTR(-EINVAL);
  770. }
  771. ctx = crypto_ahash_ctx(tfm);
  772. refcount_set(&ctx->refcnt, 1);
  773. return __cryptd_ahash_cast(tfm);
  774. }
  775. EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
  776. struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
  777. {
  778. struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
  779. return ctx->child;
  780. }
  781. EXPORT_SYMBOL_GPL(cryptd_ahash_child);
  782. struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
  783. {
  784. struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
  785. return &rctx->desc;
  786. }
  787. EXPORT_SYMBOL_GPL(cryptd_shash_desc);
  788. bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
  789. {
  790. struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
  791. return refcount_read(&ctx->refcnt) - 1;
  792. }
  793. EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
  794. void cryptd_free_ahash(struct cryptd_ahash *tfm)
  795. {
  796. struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
  797. if (refcount_dec_and_test(&ctx->refcnt))
  798. crypto_free_ahash(&tfm->base);
  799. }
  800. EXPORT_SYMBOL_GPL(cryptd_free_ahash);
  801. struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
  802. u32 type, u32 mask)
  803. {
  804. char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
  805. struct cryptd_aead_ctx *ctx;
  806. struct crypto_aead *tfm;
  807. if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
  808. "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
  809. return ERR_PTR(-EINVAL);
  810. tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
  811. if (IS_ERR(tfm))
  812. return ERR_CAST(tfm);
  813. if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
  814. crypto_free_aead(tfm);
  815. return ERR_PTR(-EINVAL);
  816. }
  817. ctx = crypto_aead_ctx(tfm);
  818. refcount_set(&ctx->refcnt, 1);
  819. return __cryptd_aead_cast(tfm);
  820. }
  821. EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
  822. struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
  823. {
  824. struct cryptd_aead_ctx *ctx;
  825. ctx = crypto_aead_ctx(&tfm->base);
  826. return ctx->child;
  827. }
  828. EXPORT_SYMBOL_GPL(cryptd_aead_child);
  829. bool cryptd_aead_queued(struct cryptd_aead *tfm)
  830. {
  831. struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
  832. return refcount_read(&ctx->refcnt) - 1;
  833. }
  834. EXPORT_SYMBOL_GPL(cryptd_aead_queued);
  835. void cryptd_free_aead(struct cryptd_aead *tfm)
  836. {
  837. struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
  838. if (refcount_dec_and_test(&ctx->refcnt))
  839. crypto_free_aead(&tfm->base);
  840. }
  841. EXPORT_SYMBOL_GPL(cryptd_free_aead);
  842. static int __init cryptd_init(void)
  843. {
  844. int err;
  845. cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
  846. 1);
  847. if (!cryptd_wq)
  848. return -ENOMEM;
  849. err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
  850. if (err)
  851. goto err_destroy_wq;
  852. err = crypto_register_template(&cryptd_tmpl);
  853. if (err)
  854. goto err_fini_queue;
  855. return 0;
  856. err_fini_queue:
  857. cryptd_fini_queue(&queue);
  858. err_destroy_wq:
  859. destroy_workqueue(cryptd_wq);
  860. return err;
  861. }
  862. static void __exit cryptd_exit(void)
  863. {
  864. destroy_workqueue(cryptd_wq);
  865. cryptd_fini_queue(&queue);
  866. crypto_unregister_template(&cryptd_tmpl);
  867. }
  868. subsys_initcall(cryptd_init);
  869. module_exit(cryptd_exit);
  870. MODULE_LICENSE("GPL");
  871. MODULE_DESCRIPTION("Software async crypto daemon");
  872. MODULE_ALIAS_CRYPTO("cryptd");