ANDROID: fips140: remove in-place updating of live algorithms

The lab has confirmed that it is actually fine for users to keep using
non-FIPS code after the module has loaded if they were already using it
beforehand.  So remove the code that tried to prevent this by updating
live algorithms in-place.  Similarly, remove the call to
synchronize_rcu_tasks() which no longer has any purpose.

We still need to move the live algorithms to a private list, so keep
doing that.  Keep appending "+orig" to cra_name as well, and start doing
the same for cra_driver_name too.

Bug: 188620248
Change-Id: I29c9faec7d7314484a03f9729924b2f892552c7c
Signed-off-by: Eric Biggers <ebiggers@google.com>
(cherry picked from commit 54aecb72dba94d1bb68844af6e4790b5823fb6ce)
This commit is contained in:
Eric Biggers
2021-10-27 12:01:42 -07:00
parent e45108ecff
commit ecf9341134

View File

@@ -130,7 +130,7 @@ static bool __init is_fips140_algo(struct crypto_alg *alg)
return false;
}
static LIST_HEAD(unchecked_fips140_algos);
static LIST_HEAD(existing_live_algos);
/*
* Release a list of algorithms which have been removed from crypto_alg_list.
@@ -173,38 +173,53 @@ static void __init unregister_existing_fips140_algos(void)
down_write(&crypto_alg_sem);
/*
* Find all registered algorithms that we care about, and move them to
* a private list so that they are no longer exposed via the algo
* lookup API. Subsequently, we will unregister them if they are not in
* active use. If they are, we cannot simply remove them but we can
* adapt them later to use our integrity checked backing code.
* Find all registered algorithms that we care about, and move them to a
* private list so that they are no longer exposed via the algo lookup
* API. Subsequently, we will unregister them if they are not in active
* use. If they are, we can't fully unregister them but we can ensure
* that new users won't use them.
*/
list_for_each_entry_safe(alg, tmp, &crypto_alg_list, cra_list) {
if (is_fips140_algo(alg)) {
if (refcount_read(&alg->cra_refcnt) == 1) {
/*
* This algorithm is not currently in use, but
* there may be template instances holding
* references to it via spawns. So let's tear
* it down like crypto_unregister_alg() would,
* but without releasing the lock, to prevent
* races with concurrent TFM allocations.
*/
alg->cra_flags |= CRYPTO_ALG_DEAD;
list_move(&alg->cra_list, &remove_list);
crypto_remove_spawns(alg, &spawns, NULL);
} else {
/*
* This algorithm is live, i.e., there are TFMs
* allocated that rely on it for its crypto
* transformations. We will swap these out
* later with integrity checked versions.
*/
pr_info("found already-live algorithm '%s' ('%s')\n",
alg->cra_name, alg->cra_driver_name);
list_move(&alg->cra_list,
&unchecked_fips140_algos);
}
if (!is_fips140_algo(alg))
continue;
if (refcount_read(&alg->cra_refcnt) == 1) {
/*
* This algorithm is not currently in use, but there may
* be template instances holding references to it via
* spawns. So let's tear it down like
* crypto_unregister_alg() would, but without releasing
* the lock, to prevent races with concurrent TFM
* allocations.
*/
alg->cra_flags |= CRYPTO_ALG_DEAD;
list_move(&alg->cra_list, &remove_list);
crypto_remove_spawns(alg, &spawns, NULL);
} else {
/*
* This algorithm is live, i.e. it has TFMs allocated,
* so we can't fully unregister it. It's not necessary
* to dynamically redirect existing users to the FIPS
* code, given that they can't be relying on FIPS
* certified crypto in the first place. However, we do
* need to ensure that new users will get the FIPS code.
*
* In most cases, setting alg->cra_priority to 0
* achieves this. However, that isn't enough for
* algorithms like "hmac(sha256)" that need to be
* instantiated from a template, since existing
* algorithms always take priority over a template being
* instantiated. Therefore, we move the algorithm to
* a private list so that algorithm lookups won't find
* it anymore. To further distinguish it from the FIPS
* algorithms, we also append "+orig" to its name.
*/
pr_info("found already-live algorithm '%s' ('%s')\n",
alg->cra_name, alg->cra_driver_name);
alg->cra_priority = 0;
strlcat(alg->cra_name, "+orig", CRYPTO_MAX_ALG_NAME);
strlcat(alg->cra_driver_name, "+orig",
CRYPTO_MAX_ALG_NAME);
list_move(&alg->cra_list, &existing_live_algos);
}
}
up_write(&crypto_alg_sem);
@@ -355,166 +370,6 @@ static bool __init check_fips140_module_hmac(void)
return true;
}
static bool __init update_live_fips140_algos(void)
{
struct crypto_alg *alg, *new_alg, *tmp;
/*
* Find all algorithms that we could not unregister the last time
* around, due to the fact that they were already in use.
*/
down_write(&crypto_alg_sem);
list_for_each_entry_safe(alg, tmp, &unchecked_fips140_algos, cra_list) {
/*
* Take this algo off the list before releasing the lock. This
* ensures that a concurrent invocation of
* crypto_unregister_alg() observes a consistent state, i.e.,
* the algo is still on the list, and crypto_unregister_alg()
* will release it, or it is not, and crypto_unregister_alg()
* will issue a warning but ignore this condition otherwise.
*/
list_del_init(&alg->cra_list);
up_write(&crypto_alg_sem);
/*
* Grab the algo that will replace the live one.
* Note that this will instantiate template based instances as
* well, as long as their driver name uses the conventional
* pattern of "template(algo)". In this case, we are relying on
* the fact that the templates carried by this module will
* supersede the builtin ones, due to the fact that they were
* registered later, and therefore appear first in the linked
* list. For example, "hmac(sha1-ce)" constructed using the
* builtin hmac template and the builtin SHA1 driver will be
* superseded by the integrity checked versions of HMAC and
* SHA1-ce carried in this module.
*
* Note that this takes a reference to the new algorithm which
* will never get released. This is intentional: once we copy
* the function pointers from the new algo into the old one, we
* cannot drop the new algo unless we are sure that the old one
* has been released, and this is someting we don't keep track
* of at the moment.
*/
new_alg = crypto_alg_mod_lookup(alg->cra_driver_name,
alg->cra_flags & CRYPTO_ALG_TYPE_MASK,
CRYPTO_ALG_TYPE_MASK | CRYPTO_NOLOAD);
if (IS_ERR(new_alg)) {
pr_crit("Failed to allocate '%s' for updating live algo (%ld)\n",
alg->cra_driver_name, PTR_ERR(new_alg));
return false;
}
/*
* The FIPS module's algorithms are expected to be built from
* the same source code as the in-kernel ones so that they are
* fully compatible. In general, there's no way to verify full
* compatibility at runtime, but we can at least verify that
* the algorithm properties match.
*/
if (alg->cra_ctxsize != new_alg->cra_ctxsize ||
alg->cra_alignmask != new_alg->cra_alignmask) {
pr_crit("Failed to update live algo '%s' due to mismatch:\n"
"cra_ctxsize : %u vs %u\n"
"cra_alignmask : 0x%x vs 0x%x\n",
alg->cra_driver_name,
alg->cra_ctxsize, new_alg->cra_ctxsize,
alg->cra_alignmask, new_alg->cra_alignmask);
return false;
}
/*
* Update the name and priority so the algorithm stands out as
* one that was updated in order to comply with FIPS140, and
* that it is not the preferred version for further use.
*/
strlcat(alg->cra_name, "+orig", CRYPTO_MAX_ALG_NAME);
alg->cra_priority = 0;
switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
struct aead_alg *old_aead, *new_aead;
struct skcipher_alg *old_skcipher, *new_skcipher;
struct shash_alg *old_shash, *new_shash;
struct rng_alg *old_rng, *new_rng;
case CRYPTO_ALG_TYPE_CIPHER:
alg->cra_u.cipher = new_alg->cra_u.cipher;
break;
case CRYPTO_ALG_TYPE_AEAD:
old_aead = container_of(alg, struct aead_alg, base);
new_aead = container_of(new_alg, struct aead_alg, base);
old_aead->setkey = new_aead->setkey;
old_aead->setauthsize = new_aead->setauthsize;
old_aead->encrypt = new_aead->encrypt;
old_aead->decrypt = new_aead->decrypt;
old_aead->init = new_aead->init;
old_aead->exit = new_aead->exit;
break;
case CRYPTO_ALG_TYPE_SKCIPHER:
old_skcipher = container_of(alg, struct skcipher_alg, base);
new_skcipher = container_of(new_alg, struct skcipher_alg, base);
old_skcipher->setkey = new_skcipher->setkey;
old_skcipher->encrypt = new_skcipher->encrypt;
old_skcipher->decrypt = new_skcipher->decrypt;
old_skcipher->init = new_skcipher->init;
old_skcipher->exit = new_skcipher->exit;
break;
case CRYPTO_ALG_TYPE_SHASH:
old_shash = container_of(alg, struct shash_alg, base);
new_shash = container_of(new_alg, struct shash_alg, base);
old_shash->init = new_shash->init;
old_shash->update = new_shash->update;
old_shash->final = new_shash->final;
old_shash->finup = new_shash->finup;
old_shash->digest = new_shash->digest;
old_shash->export = new_shash->export;
old_shash->import = new_shash->import;
old_shash->setkey = new_shash->setkey;
old_shash->init_tfm = new_shash->init_tfm;
old_shash->exit_tfm = new_shash->exit_tfm;
break;
case CRYPTO_ALG_TYPE_RNG:
old_rng = container_of(alg, struct rng_alg, base);
new_rng = container_of(new_alg, struct rng_alg, base);
old_rng->generate = new_rng->generate;
old_rng->seed = new_rng->seed;
old_rng->set_ent = new_rng->set_ent;
break;
default:
/*
* This should never happen: every item on the
* fips140_algorithms list should match one of the
* cases above, so if we end up here, something is
* definitely wrong.
*/
pr_crit("Unexpected type %u for algo %s, giving up ...\n",
alg->cra_flags & CRYPTO_ALG_TYPE_MASK,
alg->cra_driver_name);
return false;
}
/*
* Move the algorithm back to the algorithm list, so it is
* visible in /proc/crypto et al.
*/
down_write(&crypto_alg_sem);
list_add_tail(&alg->cra_list, &crypto_alg_list);
}
up_write(&crypto_alg_sem);
return true;
}
static void fips140_sha256(void *p, const u8 *data, unsigned int len, u8 *out,
int *hook_inuse)
{
@@ -613,19 +468,9 @@ fips140_init(void)
complete_all(&fips140_tests_done);
if (!update_live_fips140_algos())
goto panic;
if (!update_fips140_library_routines())
goto panic;
/*
* Wait until all tasks have at least been scheduled once and preempted
* voluntarily. This ensures that none of the superseded algorithms that
* were already in use will still be live.
*/
synchronize_rcu_tasks();
pr_info("module successfully loaded\n");
return 0;