diff --git a/android/gki_aarch64_fips140_modules b/android/gki_aarch64_fips140_modules new file mode 100644 index 000000000000..01d4fcc5c4ac --- /dev/null +++ b/android/gki_aarch64_fips140_modules @@ -0,0 +1 @@ +crypto/fips140.ko diff --git a/arch/arm64/configs/fips140_gki.fragment b/arch/arm64/configs/fips140_gki.fragment new file mode 100644 index 000000000000..68292520be10 --- /dev/null +++ b/arch/arm64/configs/fips140_gki.fragment @@ -0,0 +1 @@ +CONFIG_CRYPTO_FIPS140_MOD=y diff --git a/arch/arm64/crypto/Kbuild.fips140 b/arch/arm64/crypto/Kbuild.fips140 new file mode 100644 index 000000000000..986bdad2f50d --- /dev/null +++ b/arch/arm64/crypto/Kbuild.fips140 @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Create a separate FIPS archive that duplicates the modules that are relevant +# for FIPS 140 certification as builtin objects +# + +sha1-ce-y := sha1-ce-glue.o sha1-ce-core.o +sha2-ce-y := sha2-ce-glue.o sha2-ce-core.o +sha512-ce-y := sha512-ce-glue.o sha512-ce-core.o +ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o +aes-ce-cipher-y := aes-ce-core.o aes-ce-glue.o +aes-ce-blk-y := aes-glue-ce.o aes-ce.o +aes-neon-blk-y := aes-glue-neon.o aes-neon.o +sha256-arm64-y := sha256-glue.o sha256-core.o +sha512-arm64-y := sha512-glue.o sha512-core.o +aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o +aes-neon-bs-y := aes-neonbs-core.o aes-neonbs-glue.o + +crypto-arm64-fips-src := $(srctree)/arch/arm64/crypto/ +crypto-arm64-fips-modules := sha1-ce.o sha2-ce.o sha512-ce.o ghash-ce.o \ + aes-ce-cipher.o aes-ce-blk.o aes-neon-blk.o \ + sha256-arm64.o sha512-arm64.o aes-arm64.o \ + aes-neon-bs.o + +crypto-fips-objs += $(foreach o,$(crypto-arm64-fips-modules),$($(o:.o=-y):.o=-fips-arch.o)) + +CFLAGS_aes-glue-ce-fips-arch.o := -DUSE_V8_CRYPTO_EXTENSIONS + +$(obj)/aes-glue-%-fips-arch.o: KBUILD_CFLAGS += $(FIPS140_CFLAGS) +$(obj)/aes-glue-%-fips-arch.o: $(crypto-arm64-fips-src)/aes-glue.c FORCE + $(call if_changed_rule,cc_o_c) + +$(obj)/%-fips-arch.o: KBUILD_CFLAGS += $(FIPS140_CFLAGS) +$(obj)/%-fips-arch.o: $(crypto-arm64-fips-src)/%.c FORCE + $(call if_changed_rule,cc_o_c) + +$(obj)/%-fips-arch.o: $(crypto-arm64-fips-src)/%.S FORCE + $(call if_changed_rule,as_o_S) + +$(obj)/%: $(crypto-arm64-fips-src)/%_shipped + $(call cmd,shipped) + +$(obj)/%-fips-arch.o: $(obj)/%.S FORCE + $(call if_changed_rule,as_o_S) diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h index 810045628c66..0b736bff0bdf 100644 --- a/arch/arm64/include/asm/module.lds.h +++ b/arch/arm64/include/asm/module.lds.h @@ -3,5 +3,34 @@ SECTIONS { .plt 0 (NOLOAD) : { BYTE(0) } .init.plt 0 (NOLOAD) : { BYTE(0) } .text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) } + +#ifdef CONFIG_CRYPTO_FIPS140 + /* + * The FIPS140 module incorporates copies of builtin code, which gets + * integrity checked at module load time, and registered in a way that + * ensures that the integrity checked versions supersede the builtin + * ones. These objects are compiled as builtin code, and so their init + * hooks will be exported from the binary in the same way as builtin + * initcalls are, i.e., annotated with a level that defines the order + * in which the hooks are expected to be invoked. + */ +#define INIT_CALLS_LEVEL(level) \ + KEEP(*(.initcall##level##.init*)) \ + KEEP(*(.initcall##level##s.init*)) + + .initcalls : { + *(.initcalls._start) + INIT_CALLS_LEVEL(0) + INIT_CALLS_LEVEL(1) + INIT_CALLS_LEVEL(2) + INIT_CALLS_LEVEL(3) + INIT_CALLS_LEVEL(4) + INIT_CALLS_LEVEL(5) + INIT_CALLS_LEVEL(rootfs) + INIT_CALLS_LEVEL(6) + INIT_CALLS_LEVEL(7) + *(.initcalls._end) + } +#endif } #endif diff --git a/build.config.gki.aarch64.fips140 b/build.config.gki.aarch64.fips140 new file mode 100644 index 000000000000..040d73af3d2a --- /dev/null +++ b/build.config.gki.aarch64.fips140 @@ -0,0 +1,17 @@ +. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki.aarch64 + +FILES="${FILES} +crypto/fips140.ko +" + +if [ "${LTO}" = "none" ]; then + echo "The FIPS140 module needs LTO to be enabled." + exit 1 +fi + +MODULES_ORDER=android/gki_aarch64_fips140_modules +KERNEL_DIR=common + +DEFCONFIG=fips140_gki_defconfig +PRE_DEFCONFIG_CMDS="cat ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/fips140_gki.fragment > ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG};" +POST_DEFCONFIG_CMDS="rm ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG}" diff --git a/crypto/Kconfig b/crypto/Kconfig index 774adc9846fa..ca514e82431a 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -32,6 +32,14 @@ config CRYPTO_FIPS certification. You should say no unless you know what this is. +config CRYPTO_FIPS140 + def_bool y + depends on MODULES && ARM64 && ARM64_MODULE_PLTS + +config CRYPTO_FIPS140_MOD + bool "Enable FIPS140 integrity self-checked loadable module" + depends on LTO_CLANG && CRYPTO_FIPS140 + config CRYPTO_ALGAPI tristate select CRYPTO_ALGAPI2 diff --git a/crypto/Makefile b/crypto/Makefile index b279483fba50..6c117defa554 100644 --- a/crypto/Makefile +++ b/crypto/Makefile @@ -197,3 +197,40 @@ obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/ obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o crypto_simd-y := simd.o obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o + +ifneq ($(CONFIG_CRYPTO_FIPS140_MOD),) + +FIPS140_CFLAGS := -D__DISABLE_EXPORTS -DBUILD_FIPS140_KO + +# +# Create a separate FIPS archive containing a duplicate of each builtin generic +# module that is in scope for FIPS 140-2 certification +# +crypto-fips-objs := drbg.o ecb.o cbc.o ctr.o gcm.o xts.o hmac.o memneq.o \ + gf128mul.o aes_generic.o lib-crypto-aes.o \ + sha1_generic.o sha256_generic.o sha512_generic.o \ + lib-sha1.o lib-crypto-sha256.o +crypto-fips-objs := $(foreach o,$(crypto-fips-objs),$(o:.o=-fips.o)) + +# get the arch to add its objects to $(crypto-fips-objs) +include $(srctree)/arch/$(ARCH)/crypto/Kbuild.fips140 + +extra-$(CONFIG_CRYPTO_FIPS140_MOD) += crypto-fips.a + +$(obj)/%-fips.o: KBUILD_CFLAGS += $(FIPS140_CFLAGS) +$(obj)/%-fips.o: $(src)/%.c FORCE + $(call if_changed_rule,cc_o_c) +$(obj)/lib-%-fips.o: $(srctree)/lib/%.c FORCE + $(call if_changed_rule,cc_o_c) +$(obj)/lib-crypto-%-fips.o: $(srctree)/lib/crypto/%.c FORCE + $(call if_changed_rule,cc_o_c) + +$(obj)/crypto-fips.a: $(addprefix $(obj)/,$(crypto-fips-objs)) FORCE + $(call if_changed,ar_and_symver) + +fips140-objs := fips140-module.o crypto-fips.a +obj-m += fips140.o + +CFLAGS_fips140-module.o += $(FIPS140_CFLAGS) + +endif diff --git a/crypto/fips140-module.c b/crypto/fips140-module.c new file mode 100644 index 000000000000..2b3ecd4bea5a --- /dev/null +++ b/crypto/fips140-module.c @@ -0,0 +1,630 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2021 Google LLC + * Author: Ard Biesheuvel + * + * This file is the core of the fips140.ko, which carries a number of crypto + * algorithms and chaining mode templates that are also built into vmlinux. + * This modules performs a load time integrity check, as mandated by FIPS 140, + * and replaces registered crypto algorithms that appear on the FIPS 140 list + * with ones provided by this module. This meets the FIPS 140 requirements for + * a cryptographic software module. + */ + +#define pr_fmt(fmt) "fips140: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "internal.h" + +/* + * FIPS 140-2 prefers the use of HMAC with a public key over a plain hash. + */ +u8 __initdata fips140_integ_hmac_key[] = "The quick brown fox jumps over the lazy dog"; + +/* this is populated by the build tool */ +u8 __initdata fips140_integ_hmac_digest[SHA256_DIGEST_SIZE]; + +const u32 __initcall_start_marker __section(".initcalls._start"); +const u32 __initcall_end_marker __section(".initcalls._end"); + +const u8 __fips140_text_start __section(".text.._start"); +const u8 __fips140_text_end __section(".text.._end"); + +const u8 __fips140_rodata_start __section(".rodata.._start"); +const u8 __fips140_rodata_end __section(".rodata.._end"); + +/* + * We need this little detour to prevent Clang from detecting out of bounds + * accesses to __fips140_text_start and __fips140_rodata_start, which only exist + * to delineate the section, and so their sizes are not relevant to us. + */ +const u32 *__initcall_start = &__initcall_start_marker; + +const u8 *__text_start = &__fips140_text_start; +const u8 *__rodata_start = &__fips140_rodata_start; + +static const char fips140_algorithms[][22] __initconst = { + "aes", + + "gcm(aes)", + + "ecb(aes)", + "cbc(aes)", + "ctr(aes)", + "xts(aes)", + + "hmac(sha1)", + "hmac(sha224)", + "hmac(sha256)", + "hmac(sha384)", + "hmac(sha512)", + "sha1", + "sha224", + "sha256", + "sha384", + "sha512", + + "drbg_nopr_ctr_aes256", + "drbg_nopr_ctr_aes192", + "drbg_nopr_ctr_aes128", + "drbg_nopr_hmac_sha512", + "drbg_nopr_hmac_sha384", + "drbg_nopr_hmac_sha256", + "drbg_nopr_hmac_sha1", + "drbg_nopr_sha512", + "drbg_nopr_sha384", + "drbg_nopr_sha256", + "drbg_nopr_sha1", + "drbg_pr_ctr_aes256", + "drbg_pr_ctr_aes192", + "drbg_pr_ctr_aes128", + "drbg_pr_hmac_sha512", + "drbg_pr_hmac_sha384", + "drbg_pr_hmac_sha256", + "drbg_pr_hmac_sha1", + "drbg_pr_sha512", + "drbg_pr_sha384", + "drbg_pr_sha256", + "drbg_pr_sha1", +}; + +static bool __init is_fips140_algo(struct crypto_alg *alg) +{ + int i; + + /* + * All software algorithms are synchronous, hardware algorithms must + * be covered by their own FIPS 140 certification. + */ + if (alg->cra_flags & CRYPTO_ALG_ASYNC) + return false; + + for (i = 0; i < ARRAY_SIZE(fips140_algorithms); i++) + if (!strcmp(alg->cra_name, fips140_algorithms[i])) + return true; + return false; +} + +static LIST_HEAD(unchecked_fips140_algos); + +static void __init unregister_existing_fips140_algos(void) +{ + struct crypto_alg *alg, *tmp; + LIST_HEAD(remove_list); + LIST_HEAD(spawns); + + down_write(&crypto_alg_sem); + + /* + * Find all registered algorithms that we care about, and move them to + * a private list so that they are no longer exposed via the algo + * lookup API. Subsequently, we will unregister them if they are not in + * active use. If they are, we cannot simply remove them but we can + * adapt them later to use our integrity checked backing code. + */ + list_for_each_entry_safe(alg, tmp, &crypto_alg_list, cra_list) { + if (is_fips140_algo(alg)) { + if (refcount_read(&alg->cra_refcnt) == 1) { + /* + * This algorithm is not currently in use, but + * there may be template instances holding + * references to it via spawns. So let's tear + * it down like crypto_unregister_alg() would, + * but without releasing the lock, to prevent + * races with concurrent TFM allocations. + */ + alg->cra_flags |= CRYPTO_ALG_DEAD; + list_move(&alg->cra_list, &remove_list); + crypto_remove_spawns(alg, &spawns, NULL); + } else { + /* + * This algorithm is live, i.e., there are TFMs + * allocated that rely on it for its crypto + * transformations. We will swap these out + * later with integrity checked versions. + */ + list_move(&alg->cra_list, + &unchecked_fips140_algos); + } + } + } + + /* + * We haven't taken a reference to the algorithms on the remove_list, + * so technically, we may be competing with a concurrent invocation of + * crypto_unregister_alg() here. Fortunately, crypto_unregister_alg() + * just gives up with a warning if the algo that is being unregistered + * has already disappeared, so this happens to be safe. That does mean + * we need to hold on to the lock, to ensure that the algo is either on + * the list or it is not, and not in some limbo state. + */ + crypto_remove_final(&remove_list); + crypto_remove_final(&spawns); + + up_write(&crypto_alg_sem); +} + +static void __init unapply_text_relocations(void *section, int section_size, + const Elf64_Rela *rela, int numrels) +{ + while (numrels--) { + u32 *place = (u32 *)(section + rela->r_offset); + + BUG_ON(rela->r_offset >= section_size); + + switch (ELF64_R_TYPE(rela->r_info)) { +#ifdef CONFIG_ARM64 + case R_AARCH64_JUMP26: + case R_AARCH64_CALL26: + *place &= ~GENMASK(25, 0); + break; + + case R_AARCH64_ADR_PREL_LO21: + case R_AARCH64_ADR_PREL_PG_HI21: + case R_AARCH64_ADR_PREL_PG_HI21_NC: + *place &= ~(GENMASK(30, 29) | GENMASK(23, 5)); + break; + + case R_AARCH64_ADD_ABS_LO12_NC: + case R_AARCH64_LDST8_ABS_LO12_NC: + case R_AARCH64_LDST16_ABS_LO12_NC: + case R_AARCH64_LDST32_ABS_LO12_NC: + case R_AARCH64_LDST64_ABS_LO12_NC: + case R_AARCH64_LDST128_ABS_LO12_NC: + *place &= ~GENMASK(21, 10); + break; + default: + pr_err("unhandled relocation type %llu\n", + ELF64_R_TYPE(rela->r_info)); + BUG(); +#else +#error +#endif + } + rela++; + } +} + +static void __init unapply_rodata_relocations(void *section, int section_size, + const Elf64_Rela *rela, int numrels) +{ + while (numrels--) { + void *place = section + rela->r_offset; + + BUG_ON(rela->r_offset >= section_size); + + switch (ELF64_R_TYPE(rela->r_info)) { +#ifdef CONFIG_ARM64 + case R_AARCH64_ABS64: + *(u64 *)place = 0; + break; + default: + pr_err("unhandled relocation type %llu\n", + ELF64_R_TYPE(rela->r_info)); + BUG(); +#else +#error +#endif + } + rela++; + } +} + +static bool __init check_fips140_module_hmac(void) +{ + SHASH_DESC_ON_STACK(desc, dontcare); + u8 digest[SHA256_DIGEST_SIZE]; + void *textcopy, *rodatacopy; + int textsize, rodatasize; + int err; + + textsize = &__fips140_text_end - &__fips140_text_start; + rodatasize = &__fips140_rodata_end - &__fips140_rodata_start; + + pr_warn("text size : 0x%x\n", textsize); + pr_warn("rodata size: 0x%x\n", rodatasize); + + textcopy = kmalloc(textsize + rodatasize, GFP_KERNEL); + if (!textcopy) { + pr_err("Failed to allocate memory for copy of .text\n"); + return false; + } + + rodatacopy = textcopy + textsize; + + memcpy(textcopy, __text_start, textsize); + memcpy(rodatacopy, __rodata_start, rodatasize); + + // apply the relocations in reverse on the copies of .text and .rodata + unapply_text_relocations(textcopy, textsize, + __this_module.arch.text_relocations, + __this_module.arch.num_text_relocations); + + unapply_rodata_relocations(rodatacopy, rodatasize, + __this_module.arch.rodata_relocations, + __this_module.arch.num_rodata_relocations); + + kfree(__this_module.arch.text_relocations); + kfree(__this_module.arch.rodata_relocations); + + desc->tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); + if (IS_ERR(desc->tfm)) { + pr_err("failed to allocate hmac tfm (%ld)\n", PTR_ERR(desc->tfm)); + kfree(textcopy); + return false; + } + + pr_warn("using '%s' for integrity check\n", + crypto_shash_driver_name(desc->tfm)); + + err = crypto_shash_setkey(desc->tfm, fips140_integ_hmac_key, + strlen(fips140_integ_hmac_key)) ?: + crypto_shash_init(desc) ?: + crypto_shash_update(desc, textcopy, textsize) ?: + crypto_shash_finup(desc, rodatacopy, rodatasize, digest); + + crypto_free_shash(desc->tfm); + kfree(textcopy); + + if (err) { + pr_err("failed to calculate hmac shash (%d)\n", err); + return false; + } + + if (memcmp(digest, fips140_integ_hmac_digest, sizeof(digest))) { + pr_err("provided_digest : %*phN\n", (int)sizeof(digest), + fips140_integ_hmac_digest); + + pr_err("calculated digest: %*phN\n", (int)sizeof(digest), + digest); + + return false; + } + + return true; +} + +static bool __init update_live_fips140_algos(void) +{ + struct crypto_alg *alg, *new_alg, *tmp; + + /* + * Find all algorithms that we could not unregister the last time + * around, due to the fact that they were already in use. + */ + down_write(&crypto_alg_sem); + list_for_each_entry_safe(alg, tmp, &unchecked_fips140_algos, cra_list) { + + /* + * Take this algo off the list before releasing the lock. This + * ensures that a concurrent invocation of + * crypto_unregister_alg() observes a consistent state, i.e., + * the algo is still on the list, and crypto_unregister_alg() + * will release it, or it is not, and crypto_unregister_alg() + * will issue a warning but ignore this condition otherwise. + */ + list_del_init(&alg->cra_list); + up_write(&crypto_alg_sem); + + /* + * Grab the algo that will replace the live one. + * Note that this will instantiate template based instances as + * well, as long as their driver name uses the conventional + * pattern of "template(algo)". In this case, we are relying on + * the fact that the templates carried by this module will + * supersede the builtin ones, due to the fact that they were + * registered later, and therefore appear first in the linked + * list. For example, "hmac(sha1-ce)" constructed using the + * builtin hmac template and the builtin SHA1 driver will be + * superseded by the integrity checked versions of HMAC and + * SHA1-ce carried in this module. + * + * Note that this takes a reference to the new algorithm which + * will never get released. This is intentional: once we copy + * the function pointers from the new algo into the old one, we + * cannot drop the new algo unless we are sure that the old one + * has been released, and this is someting we don't keep track + * of at the moment. + */ + new_alg = crypto_alg_mod_lookup(alg->cra_driver_name, + alg->cra_flags & CRYPTO_ALG_TYPE_MASK, + CRYPTO_ALG_TYPE_MASK | CRYPTO_NOLOAD); + + if (IS_ERR(new_alg)) { + pr_crit("Failed to allocate '%s' for updating live algo (%ld)\n", + alg->cra_driver_name, PTR_ERR(new_alg)); + return false; + } + + /* + * The FIPS module's algorithms are expected to be built from + * the same source code as the in-kernel ones so that they are + * fully compatible. In general, there's no way to verify full + * compatibility at runtime, but we can at least verify that + * the algorithm properties match. + */ + if (alg->cra_ctxsize != new_alg->cra_ctxsize || + alg->cra_alignmask != new_alg->cra_alignmask) { + pr_crit("Failed to update live algo '%s' due to mismatch:\n" + "cra_ctxsize : %u vs %u\n" + "cra_alignmask : 0x%x vs 0x%x\n", + alg->cra_driver_name, + alg->cra_ctxsize, new_alg->cra_ctxsize, + alg->cra_alignmask, new_alg->cra_alignmask); + return false; + } + + /* + * Update the name and priority so the algorithm stands out as + * one that was updated in order to comply with FIPS140, and + * that it is not the preferred version for further use. + */ + strlcat(alg->cra_name, "+orig", CRYPTO_MAX_ALG_NAME); + alg->cra_priority = 0; + + switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { + struct aead_alg *old_aead, *new_aead; + struct skcipher_alg *old_skcipher, *new_skcipher; + struct shash_alg *old_shash, *new_shash; + struct rng_alg *old_rng, *new_rng; + + case CRYPTO_ALG_TYPE_CIPHER: + alg->cra_u.cipher = new_alg->cra_u.cipher; + break; + + case CRYPTO_ALG_TYPE_AEAD: + old_aead = container_of(alg, struct aead_alg, base); + new_aead = container_of(new_alg, struct aead_alg, base); + + old_aead->setkey = new_aead->setkey; + old_aead->setauthsize = new_aead->setauthsize; + old_aead->encrypt = new_aead->encrypt; + old_aead->decrypt = new_aead->decrypt; + old_aead->init = new_aead->init; + old_aead->exit = new_aead->exit; + break; + + case CRYPTO_ALG_TYPE_SKCIPHER: + old_skcipher = container_of(alg, struct skcipher_alg, base); + new_skcipher = container_of(new_alg, struct skcipher_alg, base); + + old_skcipher->setkey = new_skcipher->setkey; + old_skcipher->encrypt = new_skcipher->encrypt; + old_skcipher->decrypt = new_skcipher->decrypt; + old_skcipher->init = new_skcipher->init; + old_skcipher->exit = new_skcipher->exit; + break; + + case CRYPTO_ALG_TYPE_SHASH: + old_shash = container_of(alg, struct shash_alg, base); + new_shash = container_of(new_alg, struct shash_alg, base); + + old_shash->init = new_shash->init; + old_shash->update = new_shash->update; + old_shash->final = new_shash->final; + old_shash->finup = new_shash->finup; + old_shash->digest = new_shash->digest; + old_shash->export = new_shash->export; + old_shash->import = new_shash->import; + old_shash->setkey = new_shash->setkey; + old_shash->init_tfm = new_shash->init_tfm; + old_shash->exit_tfm = new_shash->exit_tfm; + break; + + case CRYPTO_ALG_TYPE_RNG: + old_rng = container_of(alg, struct rng_alg, base); + new_rng = container_of(new_alg, struct rng_alg, base); + + old_rng->generate = new_rng->generate; + old_rng->seed = new_rng->seed; + old_rng->set_ent = new_rng->set_ent; + break; + default: + /* + * This should never happen: every item on the + * fips140_algorithms list should match one of the + * cases above, so if we end up here, something is + * definitely wrong. + */ + pr_crit("Unexpected type %u for algo %s, giving up ...\n", + alg->cra_flags & CRYPTO_ALG_TYPE_MASK, + alg->cra_driver_name); + return false; + } + + /* + * Move the algorithm back to the algorithm list, so it is + * visible in /proc/crypto et al. + */ + down_write(&crypto_alg_sem); + list_add_tail(&alg->cra_list, &crypto_alg_list); + } + up_write(&crypto_alg_sem); + + return true; +} + +static void fips140_sha256(void *p, const u8 *data, unsigned int len, u8 *out, + int *hook_inuse) +{ + sha256(data, len, out); + *hook_inuse = 1; +} + +static void fips140_aes_expandkey(void *p, struct crypto_aes_ctx *ctx, + const u8 *in_key, unsigned int key_len, + int *err) +{ + *err = aes_expandkey(ctx, in_key, key_len); +} + +static void fips140_aes_encrypt(void *priv, const struct crypto_aes_ctx *ctx, + u8 *out, const u8 *in, int *hook_inuse) +{ + aes_encrypt(ctx, out, in); + *hook_inuse = 1; +} + +static void fips140_aes_decrypt(void *priv, const struct crypto_aes_ctx *ctx, + u8 *out, const u8 *in, int *hook_inuse) +{ + aes_decrypt(ctx, out, in); + *hook_inuse = 1; +} + +static bool update_fips140_library_routines(void) +{ + int ret; + + ret = register_trace_android_vh_sha256(fips140_sha256, NULL) ?: + register_trace_android_vh_aes_expandkey(fips140_aes_expandkey, NULL) ?: + register_trace_android_vh_aes_encrypt(fips140_aes_encrypt, NULL) ?: + register_trace_android_vh_aes_decrypt(fips140_aes_decrypt, NULL); + + return ret == 0; +} + +/* + * Initialize the FIPS 140 module. + * + * Note: this routine iterates over the contents of the initcall section, which + * consists of an array of function pointers that was emitted by the linker + * rather than the compiler. This means that these function pointers lack the + * usual CFI stubs that the compiler emits when CFI codegen is enabled. So + * let's disable CFI locally when handling the initcall array, to avoid + * surpises. + */ +int __init __attribute__((__no_sanitize__("cfi"))) fips140_init(void) +{ + const u32 *initcall; + + pr_info("Loading FIPS 140 module\n"); + + unregister_existing_fips140_algos(); + + /* iterate over all init routines present in this module and call them */ + for (initcall = __initcall_start + 1; + initcall < &__initcall_end_marker; + initcall++) { + int (*init)(void) = offset_to_ptr(initcall); + + init(); + } + + if (!update_live_fips140_algos()) + goto panic; + + if (!update_fips140_library_routines()) + goto panic; + + /* + * Wait until all tasks have at least been scheduled once and preempted + * voluntarily. This ensures that none of the superseded algorithms that + * were already in use will still be live. + */ + synchronize_rcu_tasks(); + + /* insert self tests here */ + + /* + * It may seem backward to perform the integrity check last, but this + * is intentional: the check itself uses hmac(sha256) which is one of + * the algorithms that are replaced with versions from this module, and + * the integrity check must use the replacement version. + */ + + if (!check_fips140_module_hmac()) { + pr_crit("FIPS 140 integrity check failed -- giving up!\n"); + goto panic; + } + + pr_info("FIPS 140 integrity check successful\n"); + pr_info("FIPS 140 module successfully loaded\n"); + return 0; + +panic: + panic("FIPS 140 module load failure"); +} + +module_init(fips140_init); + +MODULE_IMPORT_NS(CRYPTO_INTERNAL); +MODULE_LICENSE("GPL v2"); + +/* + * Crypto-related helper functions, reproduced here so that they will be + * covered by the FIPS 140 integrity check. + * + * Non-cryptographic helper functions such as memcpy() can be excluded from the + * FIPS module, but there is ambiguity about other helper functions like + * __crypto_xor() and crypto_inc() which aren't cryptographic by themselves, + * but are more closely associated with cryptography than e.g. memcpy(). To + * err on the side of caution, we include copies of these in the FIPS module. + */ +void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len) +{ + while (len >= 8) { + *(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2; + dst += 8; + src1 += 8; + src2 += 8; + len -= 8; + } + + while (len >= 4) { + *(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2; + dst += 4; + src1 += 4; + src2 += 4; + len -= 4; + } + + while (len >= 2) { + *(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2; + dst += 2; + src1 += 2; + src2 += 2; + len -= 2; + } + + while (len--) + *dst++ = *src1++ ^ *src2++; +} + +void crypto_inc(u8 *a, unsigned int size) +{ + a += size; + + while (size--) + if (++*--a) + break; +} diff --git a/scripts/module.lds.S b/scripts/module.lds.S index ea11e2146183..1713fc5383bb 100644 --- a/scripts/module.lds.S +++ b/scripts/module.lds.S @@ -50,8 +50,10 @@ SECTIONS { } .rodata : { + *(.rodata.._start) *(.rodata .rodata.[0-9a-zA-Z_]*) *(.rodata..L*) + *(.rodata.._end) } #ifdef CONFIG_CFI_CLANG @@ -60,11 +62,13 @@ SECTIONS { * .text section, and that the section is aligned to page size. */ .text : ALIGN(PAGE_SIZE) { + *(.text.._start) *(.text.__cfi_check) *(.text .text.[0-9a-zA-Z_]*) __cfi_jt_start = .; *(.text..L.cfi.jumptable .text..L.cfi.jumptable.*) __cfi_jt_end = .; + *(.text.._end) } #endif #endif