blk-crypto-fallback.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright 2019 Google LLC
  4. */
  5. /*
  6. * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
  7. */
  8. #define pr_fmt(fmt) "blk-crypto-fallback: " fmt
  9. #include <crypto/skcipher.h>
  10. #include <linux/blk-crypto.h>
  11. #include <linux/blk-crypto-profile.h>
  12. #include <linux/blkdev.h>
  13. #include <linux/crypto.h>
  14. #include <linux/mempool.h>
  15. #include <linux/module.h>
  16. #include <linux/random.h>
  17. #include <linux/scatterlist.h>
  18. #include "blk-cgroup.h"
  19. #include "blk-crypto-internal.h"
  20. static unsigned int num_prealloc_bounce_pg = 32;
  21. module_param(num_prealloc_bounce_pg, uint, 0);
  22. MODULE_PARM_DESC(num_prealloc_bounce_pg,
  23. "Number of preallocated bounce pages for the blk-crypto crypto API fallback");
  24. static unsigned int blk_crypto_num_keyslots = 100;
  25. module_param_named(num_keyslots, blk_crypto_num_keyslots, uint, 0);
  26. MODULE_PARM_DESC(num_keyslots,
  27. "Number of keyslots for the blk-crypto crypto API fallback");
  28. static unsigned int num_prealloc_fallback_crypt_ctxs = 128;
  29. module_param(num_prealloc_fallback_crypt_ctxs, uint, 0);
  30. MODULE_PARM_DESC(num_prealloc_crypt_fallback_ctxs,
  31. "Number of preallocated bio fallback crypto contexts for blk-crypto to use during crypto API fallback");
  32. struct bio_fallback_crypt_ctx {
  33. struct bio_crypt_ctx crypt_ctx;
  34. /*
  35. * Copy of the bvec_iter when this bio was submitted.
  36. * We only want to en/decrypt the part of the bio as described by the
  37. * bvec_iter upon submission because bio might be split before being
  38. * resubmitted
  39. */
  40. struct bvec_iter crypt_iter;
  41. union {
  42. struct {
  43. struct work_struct work;
  44. struct bio *bio;
  45. };
  46. struct {
  47. void *bi_private_orig;
  48. bio_end_io_t *bi_end_io_orig;
  49. };
  50. };
  51. };
  52. static struct kmem_cache *bio_fallback_crypt_ctx_cache;
  53. static mempool_t *bio_fallback_crypt_ctx_pool;
  54. /*
  55. * Allocating a crypto tfm during I/O can deadlock, so we have to preallocate
  56. * all of a mode's tfms when that mode starts being used. Since each mode may
  57. * need all the keyslots at some point, each mode needs its own tfm for each
  58. * keyslot; thus, a keyslot may contain tfms for multiple modes. However, to
  59. * match the behavior of real inline encryption hardware (which only supports a
  60. * single encryption context per keyslot), we only allow one tfm per keyslot to
  61. * be used at a time - the rest of the unused tfms have their keys cleared.
  62. */
  63. static DEFINE_MUTEX(tfms_init_lock);
  64. static bool tfms_inited[BLK_ENCRYPTION_MODE_MAX];
  65. static struct blk_crypto_fallback_keyslot {
  66. enum blk_crypto_mode_num crypto_mode;
  67. struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
  68. } *blk_crypto_keyslots;
  69. static struct blk_crypto_profile *blk_crypto_fallback_profile;
  70. static struct workqueue_struct *blk_crypto_wq;
  71. static mempool_t *blk_crypto_bounce_page_pool;
  72. static struct bio_set crypto_bio_split;
  73. /*
  74. * This is the key we set when evicting a keyslot. This *should* be the all 0's
  75. * key, but AES-XTS rejects that key, so we use some random bytes instead.
  76. */
  77. static u8 blank_key[BLK_CRYPTO_MAX_STANDARD_KEY_SIZE];
  78. static void blk_crypto_fallback_evict_keyslot(unsigned int slot)
  79. {
  80. struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot];
  81. enum blk_crypto_mode_num crypto_mode = slotp->crypto_mode;
  82. int err;
  83. WARN_ON(slotp->crypto_mode == BLK_ENCRYPTION_MODE_INVALID);
  84. /* Clear the key in the skcipher */
  85. err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], blank_key,
  86. blk_crypto_modes[crypto_mode].keysize);
  87. WARN_ON(err);
  88. slotp->crypto_mode = BLK_ENCRYPTION_MODE_INVALID;
  89. }
  90. static int
  91. blk_crypto_fallback_keyslot_program(struct blk_crypto_profile *profile,
  92. const struct blk_crypto_key *key,
  93. unsigned int slot)
  94. {
  95. struct blk_crypto_fallback_keyslot *slotp = &blk_crypto_keyslots[slot];
  96. const enum blk_crypto_mode_num crypto_mode =
  97. key->crypto_cfg.crypto_mode;
  98. int err;
  99. if (crypto_mode != slotp->crypto_mode &&
  100. slotp->crypto_mode != BLK_ENCRYPTION_MODE_INVALID)
  101. blk_crypto_fallback_evict_keyslot(slot);
  102. slotp->crypto_mode = crypto_mode;
  103. err = crypto_skcipher_setkey(slotp->tfms[crypto_mode], key->raw,
  104. key->size);
  105. if (err) {
  106. blk_crypto_fallback_evict_keyslot(slot);
  107. return err;
  108. }
  109. return 0;
  110. }
  111. static int blk_crypto_fallback_keyslot_evict(struct blk_crypto_profile *profile,
  112. const struct blk_crypto_key *key,
  113. unsigned int slot)
  114. {
  115. blk_crypto_fallback_evict_keyslot(slot);
  116. return 0;
  117. }
  118. static const struct blk_crypto_ll_ops blk_crypto_fallback_ll_ops = {
  119. .keyslot_program = blk_crypto_fallback_keyslot_program,
  120. .keyslot_evict = blk_crypto_fallback_keyslot_evict,
  121. };
  122. static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
  123. {
  124. struct bio *src_bio = enc_bio->bi_private;
  125. int i;
  126. for (i = 0; i < enc_bio->bi_vcnt; i++)
  127. mempool_free(enc_bio->bi_io_vec[i].bv_page,
  128. blk_crypto_bounce_page_pool);
  129. src_bio->bi_status = enc_bio->bi_status;
  130. bio_uninit(enc_bio);
  131. kfree(enc_bio);
  132. bio_endio(src_bio);
  133. }
  134. static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src)
  135. {
  136. unsigned int nr_segs = bio_segments(bio_src);
  137. struct bvec_iter iter;
  138. struct bio_vec bv;
  139. struct bio *bio;
  140. bio = bio_kmalloc(nr_segs, GFP_NOIO);
  141. if (!bio)
  142. return NULL;
  143. bio_init(bio, bio_src->bi_bdev, bio->bi_inline_vecs, nr_segs,
  144. bio_src->bi_opf);
  145. if (bio_flagged(bio_src, BIO_REMAPPED))
  146. bio_set_flag(bio, BIO_REMAPPED);
  147. bio->bi_ioprio = bio_src->bi_ioprio;
  148. bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
  149. bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
  150. bio_for_each_segment(bv, bio_src, iter)
  151. bio->bi_io_vec[bio->bi_vcnt++] = bv;
  152. bio_clone_blkg_association(bio, bio_src);
  153. bio_clone_skip_dm_default_key(bio, bio_src);
  154. return bio;
  155. }
  156. static bool
  157. blk_crypto_fallback_alloc_cipher_req(struct blk_crypto_keyslot *slot,
  158. struct skcipher_request **ciph_req_ret,
  159. struct crypto_wait *wait)
  160. {
  161. struct skcipher_request *ciph_req;
  162. const struct blk_crypto_fallback_keyslot *slotp;
  163. int keyslot_idx = blk_crypto_keyslot_index(slot);
  164. slotp = &blk_crypto_keyslots[keyslot_idx];
  165. ciph_req = skcipher_request_alloc(slotp->tfms[slotp->crypto_mode],
  166. GFP_NOIO);
  167. if (!ciph_req)
  168. return false;
  169. skcipher_request_set_callback(ciph_req,
  170. CRYPTO_TFM_REQ_MAY_BACKLOG |
  171. CRYPTO_TFM_REQ_MAY_SLEEP,
  172. crypto_req_done, wait);
  173. *ciph_req_ret = ciph_req;
  174. return true;
  175. }
  176. static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr)
  177. {
  178. struct bio *bio = *bio_ptr;
  179. unsigned int i = 0;
  180. unsigned int num_sectors = 0;
  181. struct bio_vec bv;
  182. struct bvec_iter iter;
  183. bio_for_each_segment(bv, bio, iter) {
  184. num_sectors += bv.bv_len >> SECTOR_SHIFT;
  185. if (++i == BIO_MAX_VECS)
  186. break;
  187. }
  188. if (num_sectors < bio_sectors(bio)) {
  189. struct bio *split_bio;
  190. split_bio = bio_split(bio, num_sectors, GFP_NOIO,
  191. &crypto_bio_split);
  192. if (!split_bio) {
  193. bio->bi_status = BLK_STS_RESOURCE;
  194. return false;
  195. }
  196. bio_chain(split_bio, bio);
  197. submit_bio_noacct(bio);
  198. *bio_ptr = split_bio;
  199. }
  200. return true;
  201. }
  202. union blk_crypto_iv {
  203. __le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
  204. u8 bytes[BLK_CRYPTO_MAX_IV_SIZE];
  205. };
  206. static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
  207. union blk_crypto_iv *iv)
  208. {
  209. int i;
  210. for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++)
  211. iv->dun[i] = cpu_to_le64(dun[i]);
  212. }
  213. /*
  214. * The crypto API fallback's encryption routine.
  215. * Allocate a bounce bio for encryption, encrypt the input bio using crypto API,
  216. * and replace *bio_ptr with the bounce bio. May split input bio if it's too
  217. * large. Returns true on success. Returns false and sets bio->bi_status on
  218. * error.
  219. */
  220. static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
  221. {
  222. struct bio *src_bio, *enc_bio;
  223. struct bio_crypt_ctx *bc;
  224. struct blk_crypto_keyslot *slot;
  225. int data_unit_size;
  226. struct skcipher_request *ciph_req = NULL;
  227. DECLARE_CRYPTO_WAIT(wait);
  228. u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
  229. struct scatterlist src, dst;
  230. union blk_crypto_iv iv;
  231. unsigned int i, j;
  232. bool ret = false;
  233. blk_status_t blk_st;
  234. /* Split the bio if it's too big for single page bvec */
  235. if (!blk_crypto_fallback_split_bio_if_needed(bio_ptr))
  236. return false;
  237. src_bio = *bio_ptr;
  238. bc = src_bio->bi_crypt_context;
  239. data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
  240. /* Allocate bounce bio for encryption */
  241. enc_bio = blk_crypto_fallback_clone_bio(src_bio);
  242. if (!enc_bio) {
  243. src_bio->bi_status = BLK_STS_RESOURCE;
  244. return false;
  245. }
  246. /*
  247. * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
  248. * this bio's algorithm and key.
  249. */
  250. blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
  251. bc->bc_key, &slot);
  252. if (blk_st != BLK_STS_OK) {
  253. src_bio->bi_status = blk_st;
  254. goto out_put_enc_bio;
  255. }
  256. /* and then allocate an skcipher_request for it */
  257. if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) {
  258. src_bio->bi_status = BLK_STS_RESOURCE;
  259. goto out_release_keyslot;
  260. }
  261. memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
  262. sg_init_table(&src, 1);
  263. sg_init_table(&dst, 1);
  264. skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size,
  265. iv.bytes);
  266. /* Encrypt each page in the bounce bio */
  267. for (i = 0; i < enc_bio->bi_vcnt; i++) {
  268. struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i];
  269. struct page *plaintext_page = enc_bvec->bv_page;
  270. struct page *ciphertext_page =
  271. mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO);
  272. enc_bvec->bv_page = ciphertext_page;
  273. if (!ciphertext_page) {
  274. src_bio->bi_status = BLK_STS_RESOURCE;
  275. goto out_free_bounce_pages;
  276. }
  277. sg_set_page(&src, plaintext_page, data_unit_size,
  278. enc_bvec->bv_offset);
  279. sg_set_page(&dst, ciphertext_page, data_unit_size,
  280. enc_bvec->bv_offset);
  281. /* Encrypt each data unit in this page */
  282. for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) {
  283. blk_crypto_dun_to_iv(curr_dun, &iv);
  284. if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req),
  285. &wait)) {
  286. i++;
  287. src_bio->bi_status = BLK_STS_IOERR;
  288. goto out_free_bounce_pages;
  289. }
  290. bio_crypt_dun_increment(curr_dun, 1);
  291. src.offset += data_unit_size;
  292. dst.offset += data_unit_size;
  293. }
  294. }
  295. enc_bio->bi_private = src_bio;
  296. enc_bio->bi_end_io = blk_crypto_fallback_encrypt_endio;
  297. *bio_ptr = enc_bio;
  298. ret = true;
  299. enc_bio = NULL;
  300. goto out_free_ciph_req;
  301. out_free_bounce_pages:
  302. while (i > 0)
  303. mempool_free(enc_bio->bi_io_vec[--i].bv_page,
  304. blk_crypto_bounce_page_pool);
  305. out_free_ciph_req:
  306. skcipher_request_free(ciph_req);
  307. out_release_keyslot:
  308. blk_crypto_put_keyslot(slot);
  309. out_put_enc_bio:
  310. if (enc_bio)
  311. bio_uninit(enc_bio);
  312. kfree(enc_bio);
  313. return ret;
  314. }
  315. /*
  316. * The crypto API fallback's main decryption routine.
  317. * Decrypts input bio in place, and calls bio_endio on the bio.
  318. */
  319. static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
  320. {
  321. struct bio_fallback_crypt_ctx *f_ctx =
  322. container_of(work, struct bio_fallback_crypt_ctx, work);
  323. struct bio *bio = f_ctx->bio;
  324. struct bio_crypt_ctx *bc = &f_ctx->crypt_ctx;
  325. struct blk_crypto_keyslot *slot;
  326. struct skcipher_request *ciph_req = NULL;
  327. DECLARE_CRYPTO_WAIT(wait);
  328. u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
  329. union blk_crypto_iv iv;
  330. struct scatterlist sg;
  331. struct bio_vec bv;
  332. struct bvec_iter iter;
  333. const int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
  334. unsigned int i;
  335. blk_status_t blk_st;
  336. /*
  337. * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
  338. * this bio's algorithm and key.
  339. */
  340. blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
  341. bc->bc_key, &slot);
  342. if (blk_st != BLK_STS_OK) {
  343. bio->bi_status = blk_st;
  344. goto out_no_keyslot;
  345. }
  346. /* and then allocate an skcipher_request for it */
  347. if (!blk_crypto_fallback_alloc_cipher_req(slot, &ciph_req, &wait)) {
  348. bio->bi_status = BLK_STS_RESOURCE;
  349. goto out;
  350. }
  351. memcpy(curr_dun, bc->bc_dun, sizeof(curr_dun));
  352. sg_init_table(&sg, 1);
  353. skcipher_request_set_crypt(ciph_req, &sg, &sg, data_unit_size,
  354. iv.bytes);
  355. /* Decrypt each segment in the bio */
  356. __bio_for_each_segment(bv, bio, iter, f_ctx->crypt_iter) {
  357. struct page *page = bv.bv_page;
  358. sg_set_page(&sg, page, data_unit_size, bv.bv_offset);
  359. /* Decrypt each data unit in the segment */
  360. for (i = 0; i < bv.bv_len; i += data_unit_size) {
  361. blk_crypto_dun_to_iv(curr_dun, &iv);
  362. if (crypto_wait_req(crypto_skcipher_decrypt(ciph_req),
  363. &wait)) {
  364. bio->bi_status = BLK_STS_IOERR;
  365. goto out;
  366. }
  367. bio_crypt_dun_increment(curr_dun, 1);
  368. sg.offset += data_unit_size;
  369. }
  370. }
  371. out:
  372. skcipher_request_free(ciph_req);
  373. blk_crypto_put_keyslot(slot);
  374. out_no_keyslot:
  375. mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
  376. bio_endio(bio);
  377. }
  378. /**
  379. * blk_crypto_fallback_decrypt_endio - queue bio for fallback decryption
  380. *
  381. * @bio: the bio to queue
  382. *
  383. * Restore bi_private and bi_end_io, and queue the bio for decryption into a
  384. * workqueue, since this function will be called from an atomic context.
  385. */
  386. static void blk_crypto_fallback_decrypt_endio(struct bio *bio)
  387. {
  388. struct bio_fallback_crypt_ctx *f_ctx = bio->bi_private;
  389. bio->bi_private = f_ctx->bi_private_orig;
  390. bio->bi_end_io = f_ctx->bi_end_io_orig;
  391. /* If there was an IO error, don't queue for decrypt. */
  392. if (bio->bi_status) {
  393. mempool_free(f_ctx, bio_fallback_crypt_ctx_pool);
  394. bio_endio(bio);
  395. return;
  396. }
  397. INIT_WORK(&f_ctx->work, blk_crypto_fallback_decrypt_bio);
  398. f_ctx->bio = bio;
  399. queue_work(blk_crypto_wq, &f_ctx->work);
  400. }
  401. /**
  402. * blk_crypto_fallback_bio_prep - Prepare a bio to use fallback en/decryption
  403. *
  404. * @bio_ptr: pointer to the bio to prepare
  405. *
  406. * If bio is doing a WRITE operation, this splits the bio into two parts if it's
  407. * too big (see blk_crypto_fallback_split_bio_if_needed()). It then allocates a
  408. * bounce bio for the first part, encrypts it, and updates bio_ptr to point to
  409. * the bounce bio.
  410. *
  411. * For a READ operation, we mark the bio for decryption by using bi_private and
  412. * bi_end_io.
  413. *
  414. * In either case, this function will make the bio look like a regular bio (i.e.
  415. * as if no encryption context was ever specified) for the purposes of the rest
  416. * of the stack except for blk-integrity (blk-integrity and blk-crypto are not
  417. * currently supported together).
  418. *
  419. * Return: true on success. Sets bio->bi_status and returns false on error.
  420. */
  421. bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
  422. {
  423. struct bio *bio = *bio_ptr;
  424. struct bio_crypt_ctx *bc = bio->bi_crypt_context;
  425. struct bio_fallback_crypt_ctx *f_ctx;
  426. if (WARN_ON_ONCE(!tfms_inited[bc->bc_key->crypto_cfg.crypto_mode])) {
  427. /* User didn't call blk_crypto_start_using_key() first */
  428. bio->bi_status = BLK_STS_IOERR;
  429. return false;
  430. }
  431. if (!__blk_crypto_cfg_supported(blk_crypto_fallback_profile,
  432. &bc->bc_key->crypto_cfg)) {
  433. bio->bi_status = BLK_STS_NOTSUPP;
  434. return false;
  435. }
  436. if (bio_data_dir(bio) == WRITE)
  437. return blk_crypto_fallback_encrypt_bio(bio_ptr);
  438. /*
  439. * bio READ case: Set up a f_ctx in the bio's bi_private and set the
  440. * bi_end_io appropriately to trigger decryption when the bio is ended.
  441. */
  442. f_ctx = mempool_alloc(bio_fallback_crypt_ctx_pool, GFP_NOIO);
  443. f_ctx->crypt_ctx = *bc;
  444. f_ctx->crypt_iter = bio->bi_iter;
  445. f_ctx->bi_private_orig = bio->bi_private;
  446. f_ctx->bi_end_io_orig = bio->bi_end_io;
  447. bio->bi_private = (void *)f_ctx;
  448. bio->bi_end_io = blk_crypto_fallback_decrypt_endio;
  449. bio_crypt_free_ctx(bio);
  450. return true;
  451. }
  452. int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
  453. {
  454. return __blk_crypto_evict_key(blk_crypto_fallback_profile, key);
  455. }
  456. static bool blk_crypto_fallback_inited;
  457. static int blk_crypto_fallback_init(void)
  458. {
  459. int i;
  460. int err;
  461. if (blk_crypto_fallback_inited)
  462. return 0;
  463. get_random_bytes(blank_key, sizeof(blank_key));
  464. err = bioset_init(&crypto_bio_split, 64, 0, 0);
  465. if (err)
  466. goto out;
  467. /* Dynamic allocation is needed because of lockdep_register_key(). */
  468. blk_crypto_fallback_profile =
  469. kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL);
  470. if (!blk_crypto_fallback_profile) {
  471. err = -ENOMEM;
  472. goto fail_free_bioset;
  473. }
  474. err = blk_crypto_profile_init(blk_crypto_fallback_profile,
  475. blk_crypto_num_keyslots);
  476. if (err)
  477. goto fail_free_profile;
  478. err = -ENOMEM;
  479. blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops;
  480. blk_crypto_fallback_profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
  481. blk_crypto_fallback_profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_STANDARD;
  482. /* All blk-crypto modes have a crypto API fallback. */
  483. for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
  484. blk_crypto_fallback_profile->modes_supported[i] = 0xFFFFFFFF;
  485. blk_crypto_fallback_profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
  486. blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
  487. WQ_UNBOUND | WQ_HIGHPRI |
  488. WQ_MEM_RECLAIM, num_online_cpus());
  489. if (!blk_crypto_wq)
  490. goto fail_destroy_profile;
  491. blk_crypto_keyslots = kcalloc(blk_crypto_num_keyslots,
  492. sizeof(blk_crypto_keyslots[0]),
  493. GFP_KERNEL);
  494. if (!blk_crypto_keyslots)
  495. goto fail_free_wq;
  496. blk_crypto_bounce_page_pool =
  497. mempool_create_page_pool(num_prealloc_bounce_pg, 0);
  498. if (!blk_crypto_bounce_page_pool)
  499. goto fail_free_keyslots;
  500. bio_fallback_crypt_ctx_cache = KMEM_CACHE(bio_fallback_crypt_ctx, 0);
  501. if (!bio_fallback_crypt_ctx_cache)
  502. goto fail_free_bounce_page_pool;
  503. bio_fallback_crypt_ctx_pool =
  504. mempool_create_slab_pool(num_prealloc_fallback_crypt_ctxs,
  505. bio_fallback_crypt_ctx_cache);
  506. if (!bio_fallback_crypt_ctx_pool)
  507. goto fail_free_crypt_ctx_cache;
  508. blk_crypto_fallback_inited = true;
  509. return 0;
  510. fail_free_crypt_ctx_cache:
  511. kmem_cache_destroy(bio_fallback_crypt_ctx_cache);
  512. fail_free_bounce_page_pool:
  513. mempool_destroy(blk_crypto_bounce_page_pool);
  514. fail_free_keyslots:
  515. kfree(blk_crypto_keyslots);
  516. fail_free_wq:
  517. destroy_workqueue(blk_crypto_wq);
  518. fail_destroy_profile:
  519. blk_crypto_profile_destroy(blk_crypto_fallback_profile);
  520. fail_free_profile:
  521. kfree(blk_crypto_fallback_profile);
  522. fail_free_bioset:
  523. bioset_exit(&crypto_bio_split);
  524. out:
  525. return err;
  526. }
  527. /*
  528. * Prepare blk-crypto-fallback for the specified crypto mode.
  529. * Returns -ENOPKG if the needed crypto API support is missing.
  530. */
  531. int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
  532. {
  533. const char *cipher_str = blk_crypto_modes[mode_num].cipher_str;
  534. struct blk_crypto_fallback_keyslot *slotp;
  535. unsigned int i;
  536. int err = 0;
  537. /*
  538. * Fast path
  539. * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
  540. * for each i are visible before we try to access them.
  541. */
  542. if (likely(smp_load_acquire(&tfms_inited[mode_num])))
  543. return 0;
  544. mutex_lock(&tfms_init_lock);
  545. if (tfms_inited[mode_num])
  546. goto out;
  547. err = blk_crypto_fallback_init();
  548. if (err)
  549. goto out;
  550. for (i = 0; i < blk_crypto_num_keyslots; i++) {
  551. slotp = &blk_crypto_keyslots[i];
  552. slotp->tfms[mode_num] = crypto_alloc_skcipher(cipher_str, 0, 0);
  553. if (IS_ERR(slotp->tfms[mode_num])) {
  554. err = PTR_ERR(slotp->tfms[mode_num]);
  555. if (err == -ENOENT) {
  556. pr_warn_once("Missing crypto API support for \"%s\"\n",
  557. cipher_str);
  558. err = -ENOPKG;
  559. }
  560. slotp->tfms[mode_num] = NULL;
  561. goto out_free_tfms;
  562. }
  563. crypto_skcipher_set_flags(slotp->tfms[mode_num],
  564. CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
  565. }
  566. /*
  567. * Ensure that updates to blk_crypto_keyslots[i].tfms[mode_num]
  568. * for each i are visible before we set tfms_inited[mode_num].
  569. */
  570. smp_store_release(&tfms_inited[mode_num], true);
  571. goto out;
  572. out_free_tfms:
  573. for (i = 0; i < blk_crypto_num_keyslots; i++) {
  574. slotp = &blk_crypto_keyslots[i];
  575. crypto_free_skcipher(slotp->tfms[mode_num]);
  576. slotp->tfms[mode_num] = NULL;
  577. }
  578. out:
  579. mutex_unlock(&tfms_init_lock);
  580. return err;
  581. }