123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * Inline encryption support for fscrypt
- *
- * Copyright 2019 Google LLC
- */
- /*
- * With "inline encryption", the block layer handles the decryption/encryption
- * as part of the bio, instead of the filesystem doing the crypto itself via
- * crypto API. See Documentation/block/inline-encryption.rst. fscrypt still
- * provides the key and IV to use.
- */
- #include <linux/blk-crypto.h>
- #include <linux/blkdev.h>
- #include <linux/buffer_head.h>
- #include <linux/sched/mm.h>
- #include <linux/slab.h>
- #include <linux/uio.h>
- #include "fscrypt_private.h"
- static struct block_device **fscrypt_get_devices(struct super_block *sb,
- unsigned int *num_devs)
- {
- struct block_device **devs;
- if (sb->s_cop->get_devices) {
- devs = sb->s_cop->get_devices(sb, num_devs);
- if (devs)
- return devs;
- }
- devs = kmalloc(sizeof(*devs), GFP_KERNEL);
- if (!devs)
- return ERR_PTR(-ENOMEM);
- devs[0] = sb->s_bdev;
- *num_devs = 1;
- return devs;
- }
- static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci)
- {
- struct super_block *sb = ci->ci_inode->i_sb;
- unsigned int flags = fscrypt_policy_flags(&ci->ci_policy);
- int dun_bits;
- if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY)
- return offsetofend(union fscrypt_iv, nonce);
- if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64)
- return sizeof(__le64);
- if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32)
- return sizeof(__le32);
- /* Default case: IVs are just the file data unit index */
- dun_bits = fscrypt_max_file_dun_bits(sb, ci->ci_data_unit_bits);
- return DIV_ROUND_UP(dun_bits, 8);
- }
- /*
- * Log a message when starting to use blk-crypto (native) or blk-crypto-fallback
- * for an encryption mode for the first time. This is the blk-crypto
- * counterpart to the message logged when starting to use the crypto API for the
- * first time. A limitation is that these messages don't convey which specific
- * filesystems or files are using each implementation. However, *usually*
- * systems use just one implementation per mode, which makes these messages
- * helpful for debugging problems where the "wrong" implementation is used.
- */
- static void fscrypt_log_blk_crypto_impl(struct fscrypt_mode *mode,
- struct block_device **devs,
- unsigned int num_devs,
- const struct blk_crypto_config *cfg)
- {
- unsigned int i;
- for (i = 0; i < num_devs; i++) {
- if (!IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
- blk_crypto_config_supported_natively(devs[i], cfg)) {
- if (!xchg(&mode->logged_blk_crypto_native, 1))
- pr_info("fscrypt: %s using blk-crypto (native)\n",
- mode->friendly_name);
- } else if (!xchg(&mode->logged_blk_crypto_fallback, 1)) {
- pr_info("fscrypt: %s using blk-crypto-fallback\n",
- mode->friendly_name);
- }
- }
- }
- /* Enable inline encryption for this file if supported. */
- int fscrypt_select_encryption_impl(struct fscrypt_info *ci,
- bool is_hw_wrapped_key)
- {
- const struct inode *inode = ci->ci_inode;
- struct super_block *sb = inode->i_sb;
- struct blk_crypto_config crypto_cfg;
- struct block_device **devs;
- unsigned int num_devs;
- unsigned int i;
- /* The file must need contents encryption, not filenames encryption */
- if (!S_ISREG(inode->i_mode))
- return 0;
- /* The crypto mode must have a blk-crypto counterpart */
- if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID)
- return 0;
- /* The filesystem must be mounted with -o inlinecrypt */
- if (!(sb->s_flags & SB_INLINECRYPT))
- return 0;
- /*
- * When a page contains multiple logically contiguous filesystem blocks,
- * some filesystem code only calls fscrypt_mergeable_bio() for the first
- * block in the page. This is fine for most of fscrypt's IV generation
- * strategies, where contiguous blocks imply contiguous IVs. But it
- * doesn't work with IV_INO_LBLK_32. For now, simply exclude
- * IV_INO_LBLK_32 with blocksize != PAGE_SIZE from inline encryption.
- */
- if ((fscrypt_policy_flags(&ci->ci_policy) &
- FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) &&
- sb->s_blocksize != PAGE_SIZE)
- return 0;
- /*
- * On all the filesystem's block devices, blk-crypto must support the
- * crypto configuration that the file would use.
- */
- crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode;
- crypto_cfg.data_unit_size = 1U << ci->ci_data_unit_bits;
- crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci);
- crypto_cfg.key_type =
- is_hw_wrapped_key ? BLK_CRYPTO_KEY_TYPE_HW_WRAPPED :
- BLK_CRYPTO_KEY_TYPE_STANDARD;
- devs = fscrypt_get_devices(sb, &num_devs);
- if (IS_ERR(devs))
- return PTR_ERR(devs);
- for (i = 0; i < num_devs; i++) {
- if (!blk_crypto_config_supported(devs[i], &crypto_cfg))
- goto out_free_devs;
- }
- fscrypt_log_blk_crypto_impl(ci->ci_mode, devs, num_devs, &crypto_cfg);
- ci->ci_inlinecrypt = true;
- out_free_devs:
- kfree(devs);
- return 0;
- }
- int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
- const u8 *raw_key, size_t raw_key_size,
- bool is_hw_wrapped,
- const struct fscrypt_info *ci)
- {
- const struct inode *inode = ci->ci_inode;
- struct super_block *sb = inode->i_sb;
- enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode;
- enum blk_crypto_key_type key_type = is_hw_wrapped ?
- BLK_CRYPTO_KEY_TYPE_HW_WRAPPED : BLK_CRYPTO_KEY_TYPE_STANDARD;
- struct blk_crypto_key *blk_key;
- struct block_device **devs;
- unsigned int num_devs;
- unsigned int i;
- int err;
- blk_key = kmalloc(sizeof(*blk_key), GFP_KERNEL);
- if (!blk_key)
- return -ENOMEM;
- err = blk_crypto_init_key(blk_key, raw_key, raw_key_size, key_type,
- crypto_mode, fscrypt_get_dun_bytes(ci),
- 1U << ci->ci_data_unit_bits);
- if (err) {
- fscrypt_err(inode, "error %d initializing blk-crypto key", err);
- goto fail;
- }
- /* Start using blk-crypto on all the filesystem's block devices. */
- devs = fscrypt_get_devices(sb, &num_devs);
- if (IS_ERR(devs)) {
- err = PTR_ERR(devs);
- goto fail;
- }
- for (i = 0; i < num_devs; i++) {
- err = blk_crypto_start_using_key(devs[i], blk_key);
- if (err)
- break;
- }
- kfree(devs);
- if (err) {
- fscrypt_err(inode, "error %d starting to use blk-crypto", err);
- goto fail;
- }
- /*
- * Pairs with the smp_load_acquire() in fscrypt_is_key_prepared().
- * I.e., here we publish ->blk_key with a RELEASE barrier so that
- * concurrent tasks can ACQUIRE it. Note that this concurrency is only
- * possible for per-mode keys, not for per-file keys.
- */
- smp_store_release(&prep_key->blk_key, blk_key);
- return 0;
- fail:
- kfree_sensitive(blk_key);
- return err;
- }
- void fscrypt_destroy_inline_crypt_key(struct super_block *sb,
- struct fscrypt_prepared_key *prep_key)
- {
- struct blk_crypto_key *blk_key = prep_key->blk_key;
- struct block_device **devs;
- unsigned int num_devs;
- unsigned int i;
- if (!blk_key)
- return;
- /* Evict the key from all the filesystem's block devices. */
- devs = fscrypt_get_devices(sb, &num_devs);
- if (!IS_ERR(devs)) {
- for (i = 0; i < num_devs; i++)
- blk_crypto_evict_key(devs[i], blk_key);
- kfree(devs);
- }
- kfree_sensitive(blk_key);
- }
- /*
- * Ask the inline encryption hardware to derive the software secret from a
- * hardware-wrapped key. Returns -EOPNOTSUPP if hardware-wrapped keys aren't
- * supported on this filesystem or hardware.
- */
- int fscrypt_derive_sw_secret(struct super_block *sb,
- const u8 *wrapped_key, size_t wrapped_key_size,
- u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
- {
- int err;
- /* The filesystem must be mounted with -o inlinecrypt. */
- if (!(sb->s_flags & SB_INLINECRYPT)) {
- fscrypt_warn(NULL,
- "%s: filesystem not mounted with inlinecrypt\n",
- sb->s_id);
- return -EOPNOTSUPP;
- }
- err = blk_crypto_derive_sw_secret(sb->s_bdev, wrapped_key,
- wrapped_key_size, sw_secret);
- if (err == -EOPNOTSUPP)
- fscrypt_warn(NULL,
- "%s: block device doesn't support hardware-wrapped keys\n",
- sb->s_id);
- return err;
- }
- bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
- {
- return inode->i_crypt_info->ci_inlinecrypt;
- }
- EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto);
- static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num,
- u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
- {
- u64 index = lblk_num << ci->ci_data_units_per_block_bits;
- union fscrypt_iv iv;
- int i;
- fscrypt_generate_iv(&iv, index, ci);
- BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE);
- memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE);
- for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++)
- dun[i] = le64_to_cpu(iv.dun[i]);
- }
- /**
- * fscrypt_set_bio_crypt_ctx() - prepare a file contents bio for inline crypto
- * @bio: a bio which will eventually be submitted to the file
- * @inode: the file's inode
- * @first_lblk: the first file logical block number in the I/O
- * @gfp_mask: memory allocation flags - these must be a waiting mask so that
- * bio_crypt_set_ctx can't fail.
- *
- * If the contents of the file should be encrypted (or decrypted) with inline
- * encryption, then assign the appropriate encryption context to the bio.
- *
- * Normally the bio should be newly allocated (i.e. no pages added yet), as
- * otherwise fscrypt_mergeable_bio() won't work as intended.
- *
- * The encryption context will be freed automatically when the bio is freed.
- *
- * This function also handles setting bi_skip_dm_default_key when needed.
- */
- void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
- u64 first_lblk, gfp_t gfp_mask)
- {
- const struct fscrypt_info *ci;
- u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
- if (fscrypt_inode_should_skip_dm_default_key(inode))
- bio_set_skip_dm_default_key(bio);
- if (!fscrypt_inode_uses_inline_crypto(inode))
- return;
- ci = inode->i_crypt_info;
- fscrypt_generate_dun(ci, first_lblk, dun);
- bio_crypt_set_ctx(bio, ci->ci_enc_key.blk_key, dun, gfp_mask);
- }
- EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx);
- /* Extract the inode and logical block number from a buffer_head. */
- static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh,
- const struct inode **inode_ret,
- u64 *lblk_num_ret)
- {
- struct page *page = bh->b_page;
- const struct address_space *mapping;
- const struct inode *inode;
- /*
- * The ext4 journal (jbd2) can submit a buffer_head it directly created
- * for a non-pagecache page. fscrypt doesn't care about these.
- */
- mapping = page_mapping(page);
- if (!mapping)
- return false;
- inode = mapping->host;
- *inode_ret = inode;
- *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) +
- (bh_offset(bh) >> inode->i_blkbits);
- return true;
- }
- /**
- * fscrypt_set_bio_crypt_ctx_bh() - prepare a file contents bio for inline
- * crypto
- * @bio: a bio which will eventually be submitted to the file
- * @first_bh: the first buffer_head for which I/O will be submitted
- * @gfp_mask: memory allocation flags
- *
- * Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead
- * of an inode and block number directly.
- */
- void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio,
- const struct buffer_head *first_bh,
- gfp_t gfp_mask)
- {
- const struct inode *inode;
- u64 first_lblk;
- if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk))
- fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask);
- }
- EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh);
- /**
- * fscrypt_mergeable_bio() - test whether data can be added to a bio
- * @bio: the bio being built up
- * @inode: the inode for the next part of the I/O
- * @next_lblk: the next file logical block number in the I/O
- *
- * When building a bio which may contain data which should undergo inline
- * encryption (or decryption) via fscrypt, filesystems should call this function
- * to ensure that the resulting bio contains only contiguous data unit numbers.
- * This will return false if the next part of the I/O cannot be merged with the
- * bio because either the encryption key would be different or the encryption
- * data unit numbers would be discontiguous.
- *
- * fscrypt_set_bio_crypt_ctx() must have already been called on the bio.
- *
- * This function isn't required in cases where crypto-mergeability is ensured in
- * another way, such as I/O targeting only a single file (and thus a single key)
- * combined with fscrypt_limit_io_blocks() to ensure DUN contiguity.
- *
- * This function also returns false if the next part of the I/O would need to
- * have a different value for the bi_skip_dm_default_key flag.
- *
- * Return: true iff the I/O is mergeable
- */
- bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
- u64 next_lblk)
- {
- const struct bio_crypt_ctx *bc = bio->bi_crypt_context;
- u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
- if (!!bc != fscrypt_inode_uses_inline_crypto(inode))
- return false;
- if (bio_should_skip_dm_default_key(bio) !=
- fscrypt_inode_should_skip_dm_default_key(inode))
- return false;
- if (!bc)
- return true;
- /*
- * Comparing the key pointers is good enough, as all I/O for each key
- * uses the same pointer. I.e., there's currently no need to support
- * merging requests where the keys are the same but the pointers differ.
- */
- if (bc->bc_key != inode->i_crypt_info->ci_enc_key.blk_key)
- return false;
- fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun);
- return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun);
- }
- EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio);
- /**
- * fscrypt_mergeable_bio_bh() - test whether data can be added to a bio
- * @bio: the bio being built up
- * @next_bh: the next buffer_head for which I/O will be submitted
- *
- * Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of
- * an inode and block number directly.
- *
- * Return: true iff the I/O is mergeable
- */
- bool fscrypt_mergeable_bio_bh(struct bio *bio,
- const struct buffer_head *next_bh)
- {
- const struct inode *inode;
- u64 next_lblk;
- if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk))
- return !bio->bi_crypt_context &&
- !bio_should_skip_dm_default_key(bio);
- return fscrypt_mergeable_bio(bio, inode, next_lblk);
- }
- EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh);
- /**
- * fscrypt_dio_supported() - check whether DIO (direct I/O) is supported on an
- * inode, as far as encryption is concerned
- * @inode: the inode in question
- *
- * Return: %true if there are no encryption constraints that prevent DIO from
- * being supported; %false if DIO is unsupported. (Note that in the
- * %true case, the filesystem might have other, non-encryption-related
- * constraints that prevent DIO from actually being supported. Also, on
- * encrypted files the filesystem is still responsible for only allowing
- * DIO when requests are filesystem-block-aligned.)
- */
- bool fscrypt_dio_supported(struct inode *inode)
- {
- int err;
- /* If the file is unencrypted, no veto from us. */
- if (!fscrypt_needs_contents_encryption(inode))
- return true;
- /*
- * We only support DIO with inline crypto, not fs-layer crypto.
- *
- * To determine whether the inode is using inline crypto, we have to set
- * up the key if it wasn't already done. This is because in the current
- * design of fscrypt, the decision of whether to use inline crypto or
- * not isn't made until the inode's encryption key is being set up. In
- * the DIO read/write case, the key will always be set up already, since
- * the file will be open. But in the case of statx(), the key might not
- * be set up yet, as the file might not have been opened yet.
- */
- err = fscrypt_require_key(inode);
- if (err) {
- /*
- * Key unavailable or couldn't be set up. This edge case isn't
- * worth worrying about; just report that DIO is unsupported.
- */
- return false;
- }
- return fscrypt_inode_uses_inline_crypto(inode);
- }
- EXPORT_SYMBOL_GPL(fscrypt_dio_supported);
- /**
- * fscrypt_limit_io_blocks() - limit I/O blocks to avoid discontiguous DUNs
- * @inode: the file on which I/O is being done
- * @lblk: the block at which the I/O is being started from
- * @nr_blocks: the number of blocks we want to submit starting at @lblk
- *
- * Determine the limit to the number of blocks that can be submitted in a bio
- * targeting @lblk without causing a data unit number (DUN) discontiguity.
- *
- * This is normally just @nr_blocks, as normally the DUNs just increment along
- * with the logical blocks. (Or the file is not encrypted.)
- *
- * In rare cases, fscrypt can be using an IV generation method that allows the
- * DUN to wrap around within logically contiguous blocks, and that wraparound
- * will occur. If this happens, a value less than @nr_blocks will be returned
- * so that the wraparound doesn't occur in the middle of a bio, which would
- * cause encryption/decryption to produce wrong results.
- *
- * Return: the actual number of blocks that can be submitted
- */
- u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks)
- {
- const struct fscrypt_info *ci;
- u32 dun;
- if (!fscrypt_inode_uses_inline_crypto(inode))
- return nr_blocks;
- if (nr_blocks <= 1)
- return nr_blocks;
- ci = inode->i_crypt_info;
- if (!(fscrypt_policy_flags(&ci->ci_policy) &
- FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
- return nr_blocks;
- /* With IV_INO_LBLK_32, the DUN can wrap around from U32_MAX to 0. */
- dun = ci->ci_hashed_ino + lblk;
- return min_t(u64, nr_blocks, (u64)U32_MAX + 1 - dun);
- }
- EXPORT_SYMBOL_GPL(fscrypt_limit_io_blocks);
|