inline_crypt.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Inline encryption support for fscrypt
  4. *
  5. * Copyright 2019 Google LLC
  6. */
  7. /*
  8. * With "inline encryption", the block layer handles the decryption/encryption
  9. * as part of the bio, instead of the filesystem doing the crypto itself via
  10. * crypto API. See Documentation/block/inline-encryption.rst. fscrypt still
  11. * provides the key and IV to use.
  12. */
  13. #include <linux/blk-crypto.h>
  14. #include <linux/blkdev.h>
  15. #include <linux/buffer_head.h>
  16. #include <linux/sched/mm.h>
  17. #include <linux/slab.h>
  18. #include <linux/uio.h>
  19. #include "fscrypt_private.h"
  20. static struct block_device **fscrypt_get_devices(struct super_block *sb,
  21. unsigned int *num_devs)
  22. {
  23. struct block_device **devs;
  24. if (sb->s_cop->get_devices) {
  25. devs = sb->s_cop->get_devices(sb, num_devs);
  26. if (devs)
  27. return devs;
  28. }
  29. devs = kmalloc(sizeof(*devs), GFP_KERNEL);
  30. if (!devs)
  31. return ERR_PTR(-ENOMEM);
  32. devs[0] = sb->s_bdev;
  33. *num_devs = 1;
  34. return devs;
  35. }
  36. static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci)
  37. {
  38. struct super_block *sb = ci->ci_inode->i_sb;
  39. unsigned int flags = fscrypt_policy_flags(&ci->ci_policy);
  40. int dun_bits;
  41. if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY)
  42. return offsetofend(union fscrypt_iv, nonce);
  43. if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64)
  44. return sizeof(__le64);
  45. if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)
  46. return sizeof(__le32);
  47. /* Default case: IVs are just the file data unit index */
  48. dun_bits = fscrypt_max_file_dun_bits(sb, ci->ci_data_unit_bits);
  49. return DIV_ROUND_UP(dun_bits, 8);
  50. }
  51. /*
  52. * Log a message when starting to use blk-crypto (native) or blk-crypto-fallback
  53. * for an encryption mode for the first time. This is the blk-crypto
  54. * counterpart to the message logged when starting to use the crypto API for the
  55. * first time. A limitation is that these messages don't convey which specific
  56. * filesystems or files are using each implementation. However, *usually*
  57. * systems use just one implementation per mode, which makes these messages
  58. * helpful for debugging problems where the "wrong" implementation is used.
  59. */
  60. static void fscrypt_log_blk_crypto_impl(struct fscrypt_mode *mode,
  61. struct block_device **devs,
  62. unsigned int num_devs,
  63. const struct blk_crypto_config *cfg)
  64. {
  65. unsigned int i;
  66. for (i = 0; i < num_devs; i++) {
  67. if (!IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
  68. blk_crypto_config_supported_natively(devs[i], cfg)) {
  69. if (!xchg(&mode->logged_blk_crypto_native, 1))
  70. pr_info("fscrypt: %s using blk-crypto (native)\n",
  71. mode->friendly_name);
  72. } else if (!xchg(&mode->logged_blk_crypto_fallback, 1)) {
  73. pr_info("fscrypt: %s using blk-crypto-fallback\n",
  74. mode->friendly_name);
  75. }
  76. }
  77. }
  78. /* Enable inline encryption for this file if supported. */
  79. int fscrypt_select_encryption_impl(struct fscrypt_info *ci,
  80. bool is_hw_wrapped_key)
  81. {
  82. const struct inode *inode = ci->ci_inode;
  83. struct super_block *sb = inode->i_sb;
  84. struct blk_crypto_config crypto_cfg;
  85. struct block_device **devs;
  86. unsigned int num_devs;
  87. unsigned int i;
  88. /* The file must need contents encryption, not filenames encryption */
  89. if (!S_ISREG(inode->i_mode))
  90. return 0;
  91. /* The crypto mode must have a blk-crypto counterpart */
  92. if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID)
  93. return 0;
  94. /* The filesystem must be mounted with -o inlinecrypt */
  95. if (!(sb->s_flags & SB_INLINECRYPT))
  96. return 0;
  97. /*
  98. * When a page contains multiple logically contiguous filesystem blocks,
  99. * some filesystem code only calls fscrypt_mergeable_bio() for the first
  100. * block in the page. This is fine for most of fscrypt's IV generation
  101. * strategies, where contiguous blocks imply contiguous IVs. But it
  102. * doesn't work with IV_INO_LBLK_32. For now, simply exclude
  103. * IV_INO_LBLK_32 with blocksize != PAGE_SIZE from inline encryption.
  104. */
  105. if ((fscrypt_policy_flags(&ci->ci_policy) &
  106. FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) &&
  107. sb->s_blocksize != PAGE_SIZE)
  108. return 0;
  109. /*
  110. * On all the filesystem's block devices, blk-crypto must support the
  111. * crypto configuration that the file would use.
  112. */
  113. crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode;
  114. crypto_cfg.data_unit_size = 1U << ci->ci_data_unit_bits;
  115. crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci);
  116. crypto_cfg.key_type =
  117. is_hw_wrapped_key ? BLK_CRYPTO_KEY_TYPE_HW_WRAPPED :
  118. BLK_CRYPTO_KEY_TYPE_STANDARD;
  119. devs = fscrypt_get_devices(sb, &num_devs);
  120. if (IS_ERR(devs))
  121. return PTR_ERR(devs);
  122. for (i = 0; i < num_devs; i++) {
  123. if (!blk_crypto_config_supported(devs[i], &crypto_cfg))
  124. goto out_free_devs;
  125. }
  126. fscrypt_log_blk_crypto_impl(ci->ci_mode, devs, num_devs, &crypto_cfg);
  127. ci->ci_inlinecrypt = true;
  128. out_free_devs:
  129. kfree(devs);
  130. return 0;
  131. }
  132. int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
  133. const u8 *raw_key, size_t raw_key_size,
  134. bool is_hw_wrapped,
  135. const struct fscrypt_info *ci)
  136. {
  137. const struct inode *inode = ci->ci_inode;
  138. struct super_block *sb = inode->i_sb;
  139. enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode;
  140. enum blk_crypto_key_type key_type = is_hw_wrapped ?
  141. BLK_CRYPTO_KEY_TYPE_HW_WRAPPED : BLK_CRYPTO_KEY_TYPE_STANDARD;
  142. struct blk_crypto_key *blk_key;
  143. struct block_device **devs;
  144. unsigned int num_devs;
  145. unsigned int i;
  146. int err;
  147. blk_key = kmalloc(sizeof(*blk_key), GFP_KERNEL);
  148. if (!blk_key)
  149. return -ENOMEM;
  150. err = blk_crypto_init_key(blk_key, raw_key, raw_key_size, key_type,
  151. crypto_mode, fscrypt_get_dun_bytes(ci),
  152. 1U << ci->ci_data_unit_bits);
  153. if (err) {
  154. fscrypt_err(inode, "error %d initializing blk-crypto key", err);
  155. goto fail;
  156. }
  157. /* Start using blk-crypto on all the filesystem's block devices. */
  158. devs = fscrypt_get_devices(sb, &num_devs);
  159. if (IS_ERR(devs)) {
  160. err = PTR_ERR(devs);
  161. goto fail;
  162. }
  163. for (i = 0; i < num_devs; i++) {
  164. err = blk_crypto_start_using_key(devs[i], blk_key);
  165. if (err)
  166. break;
  167. }
  168. kfree(devs);
  169. if (err) {
  170. fscrypt_err(inode, "error %d starting to use blk-crypto", err);
  171. goto fail;
  172. }
  173. /*
  174. * Pairs with the smp_load_acquire() in fscrypt_is_key_prepared().
  175. * I.e., here we publish ->blk_key with a RELEASE barrier so that
  176. * concurrent tasks can ACQUIRE it. Note that this concurrency is only
  177. * possible for per-mode keys, not for per-file keys.
  178. */
  179. smp_store_release(&prep_key->blk_key, blk_key);
  180. return 0;
  181. fail:
  182. kfree_sensitive(blk_key);
  183. return err;
  184. }
  185. void fscrypt_destroy_inline_crypt_key(struct super_block *sb,
  186. struct fscrypt_prepared_key *prep_key)
  187. {
  188. struct blk_crypto_key *blk_key = prep_key->blk_key;
  189. struct block_device **devs;
  190. unsigned int num_devs;
  191. unsigned int i;
  192. if (!blk_key)
  193. return;
  194. /* Evict the key from all the filesystem's block devices. */
  195. devs = fscrypt_get_devices(sb, &num_devs);
  196. if (!IS_ERR(devs)) {
  197. for (i = 0; i < num_devs; i++)
  198. blk_crypto_evict_key(devs[i], blk_key);
  199. kfree(devs);
  200. }
  201. kfree_sensitive(blk_key);
  202. }
  203. /*
  204. * Ask the inline encryption hardware to derive the software secret from a
  205. * hardware-wrapped key. Returns -EOPNOTSUPP if hardware-wrapped keys aren't
  206. * supported on this filesystem or hardware.
  207. */
  208. int fscrypt_derive_sw_secret(struct super_block *sb,
  209. const u8 *wrapped_key, size_t wrapped_key_size,
  210. u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
  211. {
  212. int err;
  213. /* The filesystem must be mounted with -o inlinecrypt. */
  214. if (!(sb->s_flags & SB_INLINECRYPT)) {
  215. fscrypt_warn(NULL,
  216. "%s: filesystem not mounted with inlinecrypt\n",
  217. sb->s_id);
  218. return -EOPNOTSUPP;
  219. }
  220. err = blk_crypto_derive_sw_secret(sb->s_bdev, wrapped_key,
  221. wrapped_key_size, sw_secret);
  222. if (err == -EOPNOTSUPP)
  223. fscrypt_warn(NULL,
  224. "%s: block device doesn't support hardware-wrapped keys\n",
  225. sb->s_id);
  226. return err;
  227. }
  228. bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
  229. {
  230. return inode->i_crypt_info->ci_inlinecrypt;
  231. }
  232. EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto);
  233. static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num,
  234. u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
  235. {
  236. u64 index = lblk_num << ci->ci_data_units_per_block_bits;
  237. union fscrypt_iv iv;
  238. int i;
  239. fscrypt_generate_iv(&iv, index, ci);
  240. BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE);
  241. memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE);
  242. for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++)
  243. dun[i] = le64_to_cpu(iv.dun[i]);
  244. }
  245. /**
  246. * fscrypt_set_bio_crypt_ctx() - prepare a file contents bio for inline crypto
  247. * @bio: a bio which will eventually be submitted to the file
  248. * @inode: the file's inode
  249. * @first_lblk: the first file logical block number in the I/O
  250. * @gfp_mask: memory allocation flags - these must be a waiting mask so that
  251. * bio_crypt_set_ctx can't fail.
  252. *
  253. * If the contents of the file should be encrypted (or decrypted) with inline
  254. * encryption, then assign the appropriate encryption context to the bio.
  255. *
  256. * Normally the bio should be newly allocated (i.e. no pages added yet), as
  257. * otherwise fscrypt_mergeable_bio() won't work as intended.
  258. *
  259. * The encryption context will be freed automatically when the bio is freed.
  260. *
  261. * This function also handles setting bi_skip_dm_default_key when needed.
  262. */
  263. void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
  264. u64 first_lblk, gfp_t gfp_mask)
  265. {
  266. const struct fscrypt_info *ci;
  267. u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
  268. if (fscrypt_inode_should_skip_dm_default_key(inode))
  269. bio_set_skip_dm_default_key(bio);
  270. if (!fscrypt_inode_uses_inline_crypto(inode))
  271. return;
  272. ci = inode->i_crypt_info;
  273. fscrypt_generate_dun(ci, first_lblk, dun);
  274. bio_crypt_set_ctx(bio, ci->ci_enc_key.blk_key, dun, gfp_mask);
  275. }
  276. EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx);
  277. /* Extract the inode and logical block number from a buffer_head. */
  278. static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh,
  279. const struct inode **inode_ret,
  280. u64 *lblk_num_ret)
  281. {
  282. struct page *page = bh->b_page;
  283. const struct address_space *mapping;
  284. const struct inode *inode;
  285. /*
  286. * The ext4 journal (jbd2) can submit a buffer_head it directly created
  287. * for a non-pagecache page. fscrypt doesn't care about these.
  288. */
  289. mapping = page_mapping(page);
  290. if (!mapping)
  291. return false;
  292. inode = mapping->host;
  293. *inode_ret = inode;
  294. *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) +
  295. (bh_offset(bh) >> inode->i_blkbits);
  296. return true;
  297. }
  298. /**
  299. * fscrypt_set_bio_crypt_ctx_bh() - prepare a file contents bio for inline
  300. * crypto
  301. * @bio: a bio which will eventually be submitted to the file
  302. * @first_bh: the first buffer_head for which I/O will be submitted
  303. * @gfp_mask: memory allocation flags
  304. *
  305. * Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead
  306. * of an inode and block number directly.
  307. */
  308. void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
  309. const struct buffer_head *first_bh,
  310. gfp_t gfp_mask)
  311. {
  312. const struct inode *inode;
  313. u64 first_lblk;
  314. if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk))
  315. fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask);
  316. }
  317. EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh);
  318. /**
  319. * fscrypt_mergeable_bio() - test whether data can be added to a bio
  320. * @bio: the bio being built up
  321. * @inode: the inode for the next part of the I/O
  322. * @next_lblk: the next file logical block number in the I/O
  323. *
  324. * When building a bio which may contain data which should undergo inline
  325. * encryption (or decryption) via fscrypt, filesystems should call this function
  326. * to ensure that the resulting bio contains only contiguous data unit numbers.
  327. * This will return false if the next part of the I/O cannot be merged with the
  328. * bio because either the encryption key would be different or the encryption
  329. * data unit numbers would be discontiguous.
  330. *
  331. * fscrypt_set_bio_crypt_ctx() must have already been called on the bio.
  332. *
  333. * This function isn't required in cases where crypto-mergeability is ensured in
  334. * another way, such as I/O targeting only a single file (and thus a single key)
  335. * combined with fscrypt_limit_io_blocks() to ensure DUN contiguity.
  336. *
  337. * This function also returns false if the next part of the I/O would need to
  338. * have a different value for the bi_skip_dm_default_key flag.
  339. *
  340. * Return: true iff the I/O is mergeable
  341. */
  342. bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
  343. u64 next_lblk)
  344. {
  345. const struct bio_crypt_ctx *bc = bio->bi_crypt_context;
  346. u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
  347. if (!!bc != fscrypt_inode_uses_inline_crypto(inode))
  348. return false;
  349. if (bio_should_skip_dm_default_key(bio) !=
  350. fscrypt_inode_should_skip_dm_default_key(inode))
  351. return false;
  352. if (!bc)
  353. return true;
  354. /*
  355. * Comparing the key pointers is good enough, as all I/O for each key
  356. * uses the same pointer. I.e., there's currently no need to support
  357. * merging requests where the keys are the same but the pointers differ.
  358. */
  359. if (bc->bc_key != inode->i_crypt_info->ci_enc_key.blk_key)
  360. return false;
  361. fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun);
  362. return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun);
  363. }
  364. EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio);
  365. /**
  366. * fscrypt_mergeable_bio_bh() - test whether data can be added to a bio
  367. * @bio: the bio being built up
  368. * @next_bh: the next buffer_head for which I/O will be submitted
  369. *
  370. * Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of
  371. * an inode and block number directly.
  372. *
  373. * Return: true iff the I/O is mergeable
  374. */
  375. bool fscrypt_mergeable_bio_bh(struct bio *bio,
  376. const struct buffer_head *next_bh)
  377. {
  378. const struct inode *inode;
  379. u64 next_lblk;
  380. if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk))
  381. return !bio->bi_crypt_context &&
  382. !bio_should_skip_dm_default_key(bio);
  383. return fscrypt_mergeable_bio(bio, inode, next_lblk);
  384. }
  385. EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh);
  386. /**
  387. * fscrypt_dio_supported() - check whether DIO (direct I/O) is supported on an
  388. * inode, as far as encryption is concerned
  389. * @inode: the inode in question
  390. *
  391. * Return: %true if there are no encryption constraints that prevent DIO from
  392. * being supported; %false if DIO is unsupported. (Note that in the
  393. * %true case, the filesystem might have other, non-encryption-related
  394. * constraints that prevent DIO from actually being supported. Also, on
  395. * encrypted files the filesystem is still responsible for only allowing
  396. * DIO when requests are filesystem-block-aligned.)
  397. */
  398. bool fscrypt_dio_supported(struct inode *inode)
  399. {
  400. int err;
  401. /* If the file is unencrypted, no veto from us. */
  402. if (!fscrypt_needs_contents_encryption(inode))
  403. return true;
  404. /*
  405. * We only support DIO with inline crypto, not fs-layer crypto.
  406. *
  407. * To determine whether the inode is using inline crypto, we have to set
  408. * up the key if it wasn't already done. This is because in the current
  409. * design of fscrypt, the decision of whether to use inline crypto or
  410. * not isn't made until the inode's encryption key is being set up. In
  411. * the DIO read/write case, the key will always be set up already, since
  412. * the file will be open. But in the case of statx(), the key might not
  413. * be set up yet, as the file might not have been opened yet.
  414. */
  415. err = fscrypt_require_key(inode);
  416. if (err) {
  417. /*
  418. * Key unavailable or couldn't be set up. This edge case isn't
  419. * worth worrying about; just report that DIO is unsupported.
  420. */
  421. return false;
  422. }
  423. return fscrypt_inode_uses_inline_crypto(inode);
  424. }
  425. EXPORT_SYMBOL_GPL(fscrypt_dio_supported);
  426. /**
  427. * fscrypt_limit_io_blocks() - limit I/O blocks to avoid discontiguous DUNs
  428. * @inode: the file on which I/O is being done
  429. * @lblk: the block at which the I/O is being started from
  430. * @nr_blocks: the number of blocks we want to submit starting at @lblk
  431. *
  432. * Determine the limit to the number of blocks that can be submitted in a bio
  433. * targeting @lblk without causing a data unit number (DUN) discontiguity.
  434. *
  435. * This is normally just @nr_blocks, as normally the DUNs just increment along
  436. * with the logical blocks. (Or the file is not encrypted.)
  437. *
  438. * In rare cases, fscrypt can be using an IV generation method that allows the
  439. * DUN to wrap around within logically contiguous blocks, and that wraparound
  440. * will occur. If this happens, a value less than @nr_blocks will be returned
  441. * so that the wraparound doesn't occur in the middle of a bio, which would
  442. * cause encryption/decryption to produce wrong results.
  443. *
  444. * Return: the actual number of blocks that can be submitted
  445. */
  446. u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks)
  447. {
  448. const struct fscrypt_info *ci;
  449. u32 dun;
  450. if (!fscrypt_inode_uses_inline_crypto(inode))
  451. return nr_blocks;
  452. if (nr_blocks <= 1)
  453. return nr_blocks;
  454. ci = inode->i_crypt_info;
  455. if (!(fscrypt_policy_flags(&ci->ci_policy) &
  456. FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
  457. return nr_blocks;
  458. /* With IV_INO_LBLK_32, the DUN can wrap around from U32_MAX to 0. */
  459. dun = ci->ci_hashed_ino + lblk;
  460. return min_t(u64, nr_blocks, (u64)U32_MAX + 1 - dun);
  461. }
  462. EXPORT_SYMBOL_GPL(fscrypt_limit_io_blocks);