Snap for 7449546 from 28c189ec5c to android12-5.10-keystone-qcom-release

Change-Id: Id4784aaaa1ee072b8f7aa5c108645035fcb8867c
This commit is contained in:
Android Build Coastguard Worker
2021-06-11 20:00:37 +00:00
75 changed files with 69557 additions and 63630 deletions

View File

@@ -63,7 +63,7 @@ Required properties (DMA function blocks):
- larb: Should contain a phandle pointing to the local arbiter device as defined
in Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
- iommus: Should point to the respective IOMMU block with master port as
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
for details.
Examples:

View File

@@ -19,7 +19,7 @@ Required properties:
Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
for details.
- iommus: should point to the respective IOMMU block with master port as
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
for details.
Example:

View File

@@ -17,7 +17,7 @@ Required properties:
Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt
for details.
- iommus: should point to the respective IOMMU block with master port as
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
for details.
Example:

View File

@@ -25,7 +25,7 @@ Required properties (DMA function blocks, child node):
"mediatek,mt8173-mdp-wdma"
"mediatek,mt8173-mdp-wrot"
- iommus: should point to the respective IOMMU block with master port as
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
for details.
- mediatek,larb: must contain the local arbiters in the current Socs, see
Documentation/devicetree/bindings/memory-controllers/mediatek,smi-larb.txt

View File

@@ -18,7 +18,7 @@ Required properties:
"univpll_d2", "clk_cci400_sel", "vdec_sel", "vdecpll", "vencpll",
"venc_lt_sel", "vdec_bus_clk_src".
- iommus : should point to the respective IOMMU block with master port as
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.txt
argument, see Documentation/devicetree/bindings/iommu/mediatek,iommu.yaml
for details.
One of the two following nodes:
- mediatek,vpu : the node of the video processor unit, if using VPU.

View File

@@ -917,10 +917,10 @@ endif
ifdef CONFIG_LTO_CLANG
ifdef CONFIG_LTO_CLANG_THIN
CC_FLAGS_LTO += -flto=thin -fsplit-lto-unit
CC_FLAGS_LTO := -flto=thin -fsplit-lto-unit
KBUILD_LDFLAGS += --thinlto-cache-dir=$(extmod-prefix).thinlto-cache
else
CC_FLAGS_LTO += -flto
CC_FLAGS_LTO := -flto
endif
ifeq ($(SRCARCH),x86)
@@ -1568,7 +1568,7 @@ endif # CONFIG_MODULES
# Directories & files removed with 'make clean'
CLEAN_FILES += include/ksym vmlinux.symvers modules-only.symvers \
modules.builtin modules.builtin.modinfo modules.nsdeps \
compile_commands.json
compile_commands.json .thinlto-cache
# Directories & files removed with 'make mrproper'
MRPROPER_FILES += include/config include/generated \
@@ -1582,7 +1582,7 @@ MRPROPER_FILES += include/config include/generated \
*.spec
# Directories & files removed with 'make distclean'
DISTCLEAN_FILES += tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS .thinlto-cache
DISTCLEAN_FILES += tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
# clean - Delete most, but leave enough to build external modules
#

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,149 @@
[abi_symbol_list]
# required by fips140.ko
add_random_ready_callback
aead_register_instance
bcmp
cancel_work_sync
__cfi_slowpath
cpu_have_feature
crypto_aead_decrypt
crypto_aead_encrypt
crypto_aead_setauthsize
crypto_aead_setkey
crypto_ahash_finup
crypto_ahash_setkey
crypto_alg_list
crypto_alg_mod_lookup
crypto_alg_sem
crypto_alloc_base
crypto_alloc_rng
crypto_alloc_shash
crypto_attr_alg_name
crypto_check_attr_type
crypto_cipher_encrypt_one
crypto_cipher_setkey
crypto_destroy_tfm
crypto_drop_spawn
crypto_get_default_null_skcipher
crypto_grab_aead
crypto_grab_ahash
crypto_grab_shash
crypto_grab_skcipher
crypto_inst_setname
crypto_put_default_null_skcipher
crypto_register_aead
crypto_register_alg
crypto_register_rngs
crypto_register_shash
crypto_register_shashes
crypto_register_skciphers
crypto_register_template
crypto_register_templates
crypto_remove_final
crypto_remove_spawns
crypto_req_done
crypto_shash_alg_has_setkey
crypto_shash_digest
crypto_shash_final
crypto_shash_finup
crypto_shash_setkey
crypto_shash_tfm_digest
crypto_shash_update
crypto_skcipher_decrypt
crypto_skcipher_encrypt
crypto_skcipher_setkey
crypto_spawn_tfm2
crypto_unregister_aead
crypto_unregister_alg
crypto_unregister_rngs
crypto_unregister_shash
crypto_unregister_shashes
crypto_unregister_skciphers
crypto_unregister_template
crypto_unregister_templates
del_random_ready_callback
down_write
fpsimd_context_busy
get_random_bytes
__init_swait_queue_head
irq_stat
kasan_flag_enabled
kernel_neon_begin
kernel_neon_end
kfree
kfree_sensitive
__kmalloc
kmalloc_caches
kmalloc_order_trace
kmem_cache_alloc_trace
__list_add_valid
__list_del_entry_valid
memcpy
memset
__mutex_init
mutex_lock
mutex_unlock
panic
preempt_schedule
preempt_schedule_notrace
printk
queue_work_on
scatterwalk_ffwd
scatterwalk_map_and_copy
sg_init_one
sg_init_table
sg_next
shash_free_singlespawn_instance
shash_register_instance
skcipher_alloc_instance_simple
skcipher_register_instance
skcipher_walk_aead_decrypt
skcipher_walk_aead_encrypt
skcipher_walk_done
skcipher_walk_virt
snprintf
__stack_chk_fail
__stack_chk_guard
strcmp
strlcat
strlcpy
strlen
strncmp
synchronize_rcu_tasks
system_wq
__traceiter_android_vh_aes_decrypt
__traceiter_android_vh_aes_encrypt
__traceiter_android_vh_aes_expandkey
__traceiter_android_vh_sha256
__tracepoint_android_vh_aes_decrypt
__tracepoint_android_vh_aes_encrypt
__tracepoint_android_vh_aes_expandkey
__tracepoint_android_vh_sha256
tracepoint_probe_register
up_write
wait_for_completion
# needed by fips140.ko but not identified by the tooling
# TODO(b/189327973): [GKI: ABI] Build of fips140.ko module fails to identify some symbols
__crypto_memneq
__crypto_xor
aes_decrypt
aes_encrypt
aes_expandkey
ce_aes_expandkey
crypto_aes_inv_sbox
crypto_aes_sbox
crypto_aes_set_key
crypto_ft_tab
crypto_inc
crypto_it_tab
crypto_sha1_finup
crypto_sha1_update
gf128mul_lle
sha1_transform
sha224_final
sha256
sha256_block_data_order
sha256_final
sha256_update

View File

@@ -985,6 +985,7 @@
get_zeroed_page
gfp_zone
gic_nonsecure_priorities
gic_resume
gov_attr_set_init
gov_attr_set_put
governor_sysfs_ops
@@ -2128,6 +2129,7 @@
sg_pcopy_from_buffer
sg_pcopy_to_buffer
sg_scsi_ioctl
shmem_mark_page_lazyfree
shmem_truncate_range
show_rcu_gp_kthreads
show_regs
@@ -2468,6 +2470,8 @@
__traceiter_android_vh_binder_wakeup_ilocked
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_cpuidle_psci_enter
__traceiter_android_vh_cpuidle_psci_exit
__traceiter_android_vh_dump_throttled_rt_tasks
__traceiter_android_vh_force_compatible_post
__traceiter_android_vh_force_compatible_pre
@@ -2477,6 +2481,7 @@
__traceiter_android_vh_ftrace_oops_enter
__traceiter_android_vh_ftrace_oops_exit
__traceiter_android_vh_ftrace_size_check
__traceiter_android_vh_gic_resume
__traceiter_android_vh_gpio_block_read
__traceiter_android_vh_iommu_setup_dma_ops
__traceiter_android_vh_ipi_stop
@@ -2560,6 +2565,8 @@
__tracepoint_android_vh_check_uninterruptible_tasks_dn
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_cpuidle_psci_enter
__tracepoint_android_vh_cpuidle_psci_exit
__tracepoint_android_vh_dump_throttled_rt_tasks
__tracepoint_android_vh_force_compatible_post
__tracepoint_android_vh_force_compatible_pre
@@ -2569,6 +2576,7 @@
__tracepoint_android_vh_ftrace_oops_enter
__tracepoint_android_vh_ftrace_oops_exit
__tracepoint_android_vh_ftrace_size_check
__tracepoint_android_vh_gic_resume
__tracepoint_android_vh_gpio_block_read
__tracepoint_android_vh_iommu_setup_dma_ops
__tracepoint_android_vh_ipi_stop

View File

@@ -0,0 +1 @@
crypto/fips140.ko

View File

@@ -0,0 +1,38 @@
# SPDX-License-Identifier: GPL-2.0
#
# This file is included by the generic Kbuild makefile to permit the
# architecture to perform postlink actions on vmlinux and any .ko module file.
# In this case, we only need it for fips140.ko, which needs a HMAC digest to be
# injected into it. All other targets are NOPs.
#
PHONY := __archpost
__archpost:
-include include/config/auto.conf
include scripts/Kbuild.include
CMD_FIPS140_GEN_HMAC = crypto/fips140_gen_hmac
quiet_cmd_gen_hmac = HMAC $@
cmd_gen_hmac = $(CMD_FIPS140_GEN_HMAC) $@
# `@true` prevents complaints when there is nothing to be done
vmlinux: FORCE
@true
$(objtree)/crypto/fips140.ko: FORCE
$(call cmd,gen_hmac)
%.ko: FORCE
@true
clean:
@true
PHONY += FORCE clean
FORCE:
.PHONY: $(PHONY)

View File

@@ -0,0 +1 @@
CONFIG_CRYPTO_FIPS140_MOD=y

View File

@@ -0,0 +1,44 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# Create a separate FIPS archive that duplicates the modules that are relevant
# for FIPS 140 certification as builtin objects
#
sha1-ce-y := sha1-ce-glue.o sha1-ce-core.o
sha2-ce-y := sha2-ce-glue.o sha2-ce-core.o
sha512-ce-y := sha512-ce-glue.o sha512-ce-core.o
ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o
aes-ce-cipher-y := aes-ce-core.o aes-ce-glue.o
aes-ce-blk-y := aes-glue-ce.o aes-ce.o
aes-neon-blk-y := aes-glue-neon.o aes-neon.o
sha256-arm64-y := sha256-glue.o sha256-core.o
sha512-arm64-y := sha512-glue.o sha512-core.o
aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o
aes-neon-bs-y := aes-neonbs-core.o aes-neonbs-glue.o
crypto-arm64-fips-src := $(srctree)/arch/arm64/crypto/
crypto-arm64-fips-modules := sha1-ce.o sha2-ce.o sha512-ce.o ghash-ce.o \
aes-ce-cipher.o aes-ce-blk.o aes-neon-blk.o \
sha256-arm64.o sha512-arm64.o aes-arm64.o \
aes-neon-bs.o
crypto-fips-objs += $(foreach o,$(crypto-arm64-fips-modules),$($(o:.o=-y):.o=-fips-arch.o))
CFLAGS_aes-glue-ce-fips-arch.o := -DUSE_V8_CRYPTO_EXTENSIONS
$(obj)/aes-glue-%-fips-arch.o: KBUILD_CFLAGS += $(FIPS140_CFLAGS)
$(obj)/aes-glue-%-fips-arch.o: $(crypto-arm64-fips-src)/aes-glue.c FORCE
$(call if_changed_rule,cc_o_c)
$(obj)/%-fips-arch.o: KBUILD_CFLAGS += $(FIPS140_CFLAGS)
$(obj)/%-fips-arch.o: $(crypto-arm64-fips-src)/%.c FORCE
$(call if_changed_rule,cc_o_c)
$(obj)/%-fips-arch.o: $(crypto-arm64-fips-src)/%.S FORCE
$(call if_changed_rule,as_o_S)
$(obj)/%: $(crypto-arm64-fips-src)/%_shipped
$(call cmd,shipped)
$(obj)/%-fips-arch.o: $(obj)/%.S FORCE
$(call if_changed_rule,as_o_S)

View File

@@ -9,6 +9,7 @@
/* A64 instructions are always 32 bits. */
#define AARCH64_INSN_SIZE 4
#ifndef BUILD_FIPS140_KO
#ifndef __ASSEMBLY__
#include <linux/stringify.h>
@@ -214,4 +215,33 @@ alternative_endif
#define ALTERNATIVE(oldinstr, newinstr, ...) \
_ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1)
#else
/*
* The FIPS140 module does not support alternatives patching, as this
* invalidates the HMAC digest of the .text section. However, some alternatives
* are known to be irrelevant so we can tolerate them in the FIPS140 module, as
* they will never be applied in the first place in the use cases that the
* FIPS140 module targets (Android running on a production phone). Any other
* uses of alternatives should be avoided, as it is not safe in the general
* case to simply use the default sequence in one place (the fips module) and
* the alternative sequence everywhere else.
*
* Below is an allowlist of features that we can ignore, by simply taking the
* safe default instruction sequence. Note that this implies that the FIPS140
* module is not compatible with VHE, or with pseudo-NMI support.
*/
#define __ALT_ARM64_HAS_LDAPR 0,
#define __ALT_ARM64_HAS_VIRT_HOST_EXTN 0,
#define __ALT_ARM64_HAS_IRQ_PRIO_MASKING 0,
#define ALTERNATIVE(oldinstr, newinstr, feature, ...) \
_ALTERNATIVE(oldinstr, __ALT_ ## feature, #feature)
#define _ALTERNATIVE(oldinstr, feature, feature_str) \
__take_second_arg(feature oldinstr, \
".err Feature " feature_str " not supported in fips140 module")
#endif /* BUILD_FIPS140_KO */
#endif /* __ASM_ALTERNATIVE_MACROS_H */

View File

@@ -3,5 +3,34 @@ SECTIONS {
.plt 0 (NOLOAD) : { BYTE(0) }
.init.plt 0 (NOLOAD) : { BYTE(0) }
.text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) }
#ifdef CONFIG_CRYPTO_FIPS140
/*
* The FIPS140 module incorporates copies of builtin code, which gets
* integrity checked at module load time, and registered in a way that
* ensures that the integrity checked versions supersede the builtin
* ones. These objects are compiled as builtin code, and so their init
* hooks will be exported from the binary in the same way as builtin
* initcalls are, i.e., annotated with a level that defines the order
* in which the hooks are expected to be invoked.
*/
#define INIT_CALLS_LEVEL(level) \
KEEP(*(.initcall##level##.init*)) \
KEEP(*(.initcall##level##s.init*))
.initcalls : {
*(.initcalls._start)
INIT_CALLS_LEVEL(0)
INIT_CALLS_LEVEL(1)
INIT_CALLS_LEVEL(2)
INIT_CALLS_LEVEL(3)
INIT_CALLS_LEVEL(4)
INIT_CALLS_LEVEL(5)
INIT_CALLS_LEVEL(rootfs)
INIT_CALLS_LEVEL(6)
INIT_CALLS_LEVEL(7)
*(.initcalls._end)
}
#endif
}
#endif

View File

@@ -35,9 +35,7 @@ static __must_check inline bool may_use_simd(void)
* migrated, and if it's clear we cannot be migrated to a CPU
* where it is set.
*/
return !WARN_ON(!system_capabilities_finalized()) &&
system_supports_fpsimd() &&
!in_irq() && !irqs_disabled() && !in_nmi() &&
return !in_irq() && !irqs_disabled() && !in_nmi() &&
!this_cpu_read(fpsimd_context_busy);
}

View File

@@ -153,6 +153,7 @@ SYM_CODE_START_LOCAL(enter_vhe)
// Invalidate TLBs before enabling the MMU
tlbi vmalle1
dsb nsh
isb
// Enable the EL2 S1 MMU, as set up from EL1
mrs_s x0, SYS_SCTLR_EL12

View File

@@ -7,6 +7,7 @@
#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/sort.h>
static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc,
@@ -290,6 +291,7 @@ static int partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela,
int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod)
{
bool copy_rela_for_fips140 = false;
unsigned long core_plts = 0;
unsigned long init_plts = 0;
Elf64_Sym *syms = NULL;
@@ -321,6 +323,10 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
return -ENOEXEC;
}
if (IS_ENABLED(CONFIG_CRYPTO_FIPS140) &&
!strcmp(mod->name, "fips140"))
copy_rela_for_fips140 = true;
for (i = 0; i < ehdr->e_shnum; i++) {
Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset;
int nents, numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela);
@@ -329,10 +335,38 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
if (sechdrs[i].sh_type != SHT_RELA)
continue;
#ifdef CONFIG_CRYPTO_FIPS140
if (copy_rela_for_fips140 &&
!strcmp(secstrings + dstsec->sh_name, ".rodata")) {
void *p = kmemdup(rels, numrels * sizeof(Elf64_Rela),
GFP_KERNEL);
if (!p) {
pr_err("fips140: failed to allocate .rodata RELA buffer\n");
return -ENOMEM;
}
mod->arch.rodata_relocations = p;
mod->arch.num_rodata_relocations = numrels;
}
#endif
/* ignore relocations that operate on non-exec sections */
if (!(dstsec->sh_flags & SHF_EXECINSTR))
continue;
#ifdef CONFIG_CRYPTO_FIPS140
if (copy_rela_for_fips140 &&
!strcmp(secstrings + dstsec->sh_name, ".text")) {
void *p = kmemdup(rels, numrels * sizeof(Elf64_Rela),
GFP_KERNEL);
if (!p) {
pr_err("fips140: failed to allocate .text RELA buffer\n");
return -ENOMEM;
}
mod->arch.text_relocations = p;
mod->arch.num_text_relocations = numrels;
}
#endif
/*
* sort branch relocations requiring a PLT by type, symbol index
* and addend

View File

@@ -1888,8 +1888,10 @@ static int init_hyp_mode(void)
if (is_protected_kvm_enabled()) {
init_cpu_logical_map();
if (!init_psci_relay())
if (!init_psci_relay()) {
err = -ENODEV;
goto out_err;
}
}
if (is_protected_kvm_enabled()) {

View File

@@ -4,7 +4,7 @@
#
asflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS -D__DISABLE_TRACE_MMIO__
hostprogs := gen-hyprel
HOST_EXTRACFLAGS += -I$(objtree)/include

View File

@@ -50,6 +50,18 @@
#ifndef R_AARCH64_ABS64
#define R_AARCH64_ABS64 257
#endif
#ifndef R_AARCH64_PREL64
#define R_AARCH64_PREL64 260
#endif
#ifndef R_AARCH64_PREL32
#define R_AARCH64_PREL32 261
#endif
#ifndef R_AARCH64_PREL16
#define R_AARCH64_PREL16 262
#endif
#ifndef R_AARCH64_PLT32
#define R_AARCH64_PLT32 314
#endif
#ifndef R_AARCH64_LD_PREL_LO19
#define R_AARCH64_LD_PREL_LO19 273
#endif
@@ -371,6 +383,12 @@ static void emit_rela_section(Elf64_Shdr *sh_rela)
case R_AARCH64_ABS64:
emit_rela_abs64(rela, sh_orig_name);
break;
/* Allow position-relative data relocations. */
case R_AARCH64_PREL64:
case R_AARCH64_PREL32:
case R_AARCH64_PREL16:
case R_AARCH64_PLT32:
break;
/* Allow relocations to generate PC-relative addressing. */
case R_AARCH64_LD_PREL_LO19:
case R_AARCH64_ADR_PREL_LO21:

View File

@@ -23,8 +23,8 @@
extern unsigned long hyp_nr_cpus;
struct host_kvm host_kvm;
struct hyp_pool host_s2_mem;
struct hyp_pool host_s2_dev;
static struct hyp_pool host_s2_mem;
static struct hyp_pool host_s2_dev;
/*
* Copies of the host's CPU features registers holding sanitized values.

View File

@@ -477,7 +477,8 @@ static void __init map_mem(pgd_t *pgdp)
int flags = 0;
u64 i;
if (rodata_full || debug_pagealloc_enabled())
if (rodata_full || debug_pagealloc_enabled() ||
IS_ENABLED(CONFIG_KFENCE))
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/*

View File

@@ -61,6 +61,7 @@ config PARISC
select HAVE_KRETPROBES
select HAVE_DYNAMIC_FTRACE if $(cc-option,-fpatchable-function-entry=1,1)
select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE
select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY if DYNAMIC_FTRACE
select HAVE_KPROBES_ON_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select SET_FS

View File

@@ -21,8 +21,6 @@ typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
#define __ARCH_UAPI_SA_FLAGS _SA_SIGGFAULT
#include <asm/sigcontext.h>
#endif /* !__ASSEMBLY */

View File

@@ -177,11 +177,6 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
endif
ifdef CONFIG_LTO_CLANG
KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
-plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
endif
# Workaround for a gcc prelease that unfortunately was shipped in a suse release
KBUILD_CFLAGS += -Wno-sign-compare
#
@@ -201,7 +196,12 @@ ifdef CONFIG_RETPOLINE
endif
endif
KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
ifdef CONFIG_LTO_CLANG
KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
-plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
endif
ifdef CONFIG_X86_NEED_RELOCS
LDFLAGS_vmlinux := --emit-relocs --discard-none

View File

@@ -18,6 +18,7 @@ android/abi_gki_aarch64_generic
android/abi_gki_aarch64_exynos
android/abi_gki_aarch64_mtk
android/abi_gki_aarch64_xiaomi
android/abi_gki_aarch64_fips140
"
FILES="${FILES}

View File

@@ -0,0 +1,17 @@
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki.aarch64
FILES="${FILES}
crypto/fips140.ko
"
if [ "${LTO}" = "none" ]; then
echo "The FIPS140 module needs LTO to be enabled."
exit 1
fi
MODULES_ORDER=android/gki_aarch64_fips140_modules
DEFCONFIG=fips140_gki_defconfig
KMI_SYMBOL_LIST=android/abi_gki_aarch64_fips140
PRE_DEFCONFIG_CMDS="cat ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/fips140_gki.fragment > ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG};"
POST_DEFCONFIG_CMDS="rm ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG}"

View File

@@ -32,6 +32,14 @@ config CRYPTO_FIPS
certification. You should say no unless you know what
this is.
config CRYPTO_FIPS140
def_bool y
depends on MODULES && ARM64 && ARM64_MODULE_PLTS
config CRYPTO_FIPS140_MOD
bool "Enable FIPS140 integrity self-checked loadable module"
depends on LTO_CLANG && CRYPTO_FIPS140
config CRYPTO_ALGAPI
tristate
select CRYPTO_ALGAPI2

View File

@@ -197,3 +197,43 @@ obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += asymmetric_keys/
obj-$(CONFIG_CRYPTO_HASH_INFO) += hash_info.o
crypto_simd-y := simd.o
obj-$(CONFIG_CRYPTO_SIMD) += crypto_simd.o
ifneq ($(CONFIG_CRYPTO_FIPS140_MOD),)
FIPS140_CFLAGS := -D__DISABLE_EXPORTS -DBUILD_FIPS140_KO
#
# Create a separate FIPS archive containing a duplicate of each builtin generic
# module that is in scope for FIPS 140-2 certification
#
crypto-fips-objs := drbg.o ecb.o cbc.o ctr.o gcm.o xts.o hmac.o memneq.o \
gf128mul.o aes_generic.o lib-crypto-aes.o \
sha1_generic.o sha256_generic.o sha512_generic.o \
lib-sha1.o lib-crypto-sha256.o
crypto-fips-objs := $(foreach o,$(crypto-fips-objs),$(o:.o=-fips.o))
# get the arch to add its objects to $(crypto-fips-objs)
include $(srctree)/arch/$(ARCH)/crypto/Kbuild.fips140
extra-$(CONFIG_CRYPTO_FIPS140_MOD) += crypto-fips.a
$(obj)/%-fips.o: KBUILD_CFLAGS += $(FIPS140_CFLAGS)
$(obj)/%-fips.o: $(src)/%.c FORCE
$(call if_changed_rule,cc_o_c)
$(obj)/lib-%-fips.o: $(srctree)/lib/%.c FORCE
$(call if_changed_rule,cc_o_c)
$(obj)/lib-crypto-%-fips.o: $(srctree)/lib/crypto/%.c FORCE
$(call if_changed_rule,cc_o_c)
$(obj)/crypto-fips.a: $(addprefix $(obj)/,$(crypto-fips-objs)) FORCE
$(call if_changed,ar_and_symver)
fips140-objs := fips140-module.o crypto-fips.a
obj-m += fips140.o
CFLAGS_fips140-module.o += $(FIPS140_CFLAGS)
hostprogs-always-y := fips140_gen_hmac
HOSTLDLIBS_fips140_gen_hmac := -lcrypto -lelf
endif

630
crypto/fips140-module.c Normal file
View File

@@ -0,0 +1,630 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2021 Google LLC
* Author: Ard Biesheuvel <ardb@google.com>
*
* This file is the core of the fips140.ko, which carries a number of crypto
* algorithms and chaining mode templates that are also built into vmlinux.
* This modules performs a load time integrity check, as mandated by FIPS 140,
* and replaces registered crypto algorithms that appear on the FIPS 140 list
* with ones provided by this module. This meets the FIPS 140 requirements for
* a cryptographic software module.
*/
#define pr_fmt(fmt) "fips140: " fmt
#include <linux/ctype.h>
#include <linux/module.h>
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/hash.h>
#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include <crypto/rng.h>
#include <trace/hooks/fips140.h>
#include "internal.h"
/*
* FIPS 140-2 prefers the use of HMAC with a public key over a plain hash.
*/
u8 __initdata fips140_integ_hmac_key[] = "The quick brown fox jumps over the lazy dog";
/* this is populated by the build tool */
u8 __initdata fips140_integ_hmac_digest[SHA256_DIGEST_SIZE];
const u32 __initcall_start_marker __section(".initcalls._start");
const u32 __initcall_end_marker __section(".initcalls._end");
const u8 __fips140_text_start __section(".text.._start");
const u8 __fips140_text_end __section(".text.._end");
const u8 __fips140_rodata_start __section(".rodata.._start");
const u8 __fips140_rodata_end __section(".rodata.._end");
/*
* We need this little detour to prevent Clang from detecting out of bounds
* accesses to __fips140_text_start and __fips140_rodata_start, which only exist
* to delineate the section, and so their sizes are not relevant to us.
*/
const u32 *__initcall_start = &__initcall_start_marker;
const u8 *__text_start = &__fips140_text_start;
const u8 *__rodata_start = &__fips140_rodata_start;
static const char fips140_algorithms[][22] __initconst = {
"aes",
"gcm(aes)",
"ecb(aes)",
"cbc(aes)",
"ctr(aes)",
"xts(aes)",
"hmac(sha1)",
"hmac(sha224)",
"hmac(sha256)",
"hmac(sha384)",
"hmac(sha512)",
"sha1",
"sha224",
"sha256",
"sha384",
"sha512",
"drbg_nopr_ctr_aes256",
"drbg_nopr_ctr_aes192",
"drbg_nopr_ctr_aes128",
"drbg_nopr_hmac_sha512",
"drbg_nopr_hmac_sha384",
"drbg_nopr_hmac_sha256",
"drbg_nopr_hmac_sha1",
"drbg_nopr_sha512",
"drbg_nopr_sha384",
"drbg_nopr_sha256",
"drbg_nopr_sha1",
"drbg_pr_ctr_aes256",
"drbg_pr_ctr_aes192",
"drbg_pr_ctr_aes128",
"drbg_pr_hmac_sha512",
"drbg_pr_hmac_sha384",
"drbg_pr_hmac_sha256",
"drbg_pr_hmac_sha1",
"drbg_pr_sha512",
"drbg_pr_sha384",
"drbg_pr_sha256",
"drbg_pr_sha1",
};
static bool __init is_fips140_algo(struct crypto_alg *alg)
{
int i;
/*
* All software algorithms are synchronous, hardware algorithms must
* be covered by their own FIPS 140 certification.
*/
if (alg->cra_flags & CRYPTO_ALG_ASYNC)
return false;
for (i = 0; i < ARRAY_SIZE(fips140_algorithms); i++)
if (!strcmp(alg->cra_name, fips140_algorithms[i]))
return true;
return false;
}
static LIST_HEAD(unchecked_fips140_algos);
static void __init unregister_existing_fips140_algos(void)
{
struct crypto_alg *alg, *tmp;
LIST_HEAD(remove_list);
LIST_HEAD(spawns);
down_write(&crypto_alg_sem);
/*
* Find all registered algorithms that we care about, and move them to
* a private list so that they are no longer exposed via the algo
* lookup API. Subsequently, we will unregister them if they are not in
* active use. If they are, we cannot simply remove them but we can
* adapt them later to use our integrity checked backing code.
*/
list_for_each_entry_safe(alg, tmp, &crypto_alg_list, cra_list) {
if (is_fips140_algo(alg)) {
if (refcount_read(&alg->cra_refcnt) == 1) {
/*
* This algorithm is not currently in use, but
* there may be template instances holding
* references to it via spawns. So let's tear
* it down like crypto_unregister_alg() would,
* but without releasing the lock, to prevent
* races with concurrent TFM allocations.
*/
alg->cra_flags |= CRYPTO_ALG_DEAD;
list_move(&alg->cra_list, &remove_list);
crypto_remove_spawns(alg, &spawns, NULL);
} else {
/*
* This algorithm is live, i.e., there are TFMs
* allocated that rely on it for its crypto
* transformations. We will swap these out
* later with integrity checked versions.
*/
list_move(&alg->cra_list,
&unchecked_fips140_algos);
}
}
}
/*
* We haven't taken a reference to the algorithms on the remove_list,
* so technically, we may be competing with a concurrent invocation of
* crypto_unregister_alg() here. Fortunately, crypto_unregister_alg()
* just gives up with a warning if the algo that is being unregistered
* has already disappeared, so this happens to be safe. That does mean
* we need to hold on to the lock, to ensure that the algo is either on
* the list or it is not, and not in some limbo state.
*/
crypto_remove_final(&remove_list);
crypto_remove_final(&spawns);
up_write(&crypto_alg_sem);
}
static void __init unapply_text_relocations(void *section, int section_size,
const Elf64_Rela *rela, int numrels)
{
while (numrels--) {
u32 *place = (u32 *)(section + rela->r_offset);
BUG_ON(rela->r_offset >= section_size);
switch (ELF64_R_TYPE(rela->r_info)) {
#ifdef CONFIG_ARM64
case R_AARCH64_JUMP26:
case R_AARCH64_CALL26:
*place &= ~GENMASK(25, 0);
break;
case R_AARCH64_ADR_PREL_LO21:
case R_AARCH64_ADR_PREL_PG_HI21:
case R_AARCH64_ADR_PREL_PG_HI21_NC:
*place &= ~(GENMASK(30, 29) | GENMASK(23, 5));
break;
case R_AARCH64_ADD_ABS_LO12_NC:
case R_AARCH64_LDST8_ABS_LO12_NC:
case R_AARCH64_LDST16_ABS_LO12_NC:
case R_AARCH64_LDST32_ABS_LO12_NC:
case R_AARCH64_LDST64_ABS_LO12_NC:
case R_AARCH64_LDST128_ABS_LO12_NC:
*place &= ~GENMASK(21, 10);
break;
default:
pr_err("unhandled relocation type %llu\n",
ELF64_R_TYPE(rela->r_info));
BUG();
#else
#error
#endif
}
rela++;
}
}
static void __init unapply_rodata_relocations(void *section, int section_size,
const Elf64_Rela *rela, int numrels)
{
while (numrels--) {
void *place = section + rela->r_offset;
BUG_ON(rela->r_offset >= section_size);
switch (ELF64_R_TYPE(rela->r_info)) {
#ifdef CONFIG_ARM64
case R_AARCH64_ABS64:
*(u64 *)place = 0;
break;
default:
pr_err("unhandled relocation type %llu\n",
ELF64_R_TYPE(rela->r_info));
BUG();
#else
#error
#endif
}
rela++;
}
}
static bool __init check_fips140_module_hmac(void)
{
SHASH_DESC_ON_STACK(desc, dontcare);
u8 digest[SHA256_DIGEST_SIZE];
void *textcopy, *rodatacopy;
int textsize, rodatasize;
int err;
textsize = &__fips140_text_end - &__fips140_text_start;
rodatasize = &__fips140_rodata_end - &__fips140_rodata_start;
pr_warn("text size : 0x%x\n", textsize);
pr_warn("rodata size: 0x%x\n", rodatasize);
textcopy = kmalloc(textsize + rodatasize, GFP_KERNEL);
if (!textcopy) {
pr_err("Failed to allocate memory for copy of .text\n");
return false;
}
rodatacopy = textcopy + textsize;
memcpy(textcopy, __text_start, textsize);
memcpy(rodatacopy, __rodata_start, rodatasize);
// apply the relocations in reverse on the copies of .text and .rodata
unapply_text_relocations(textcopy, textsize,
__this_module.arch.text_relocations,
__this_module.arch.num_text_relocations);
unapply_rodata_relocations(rodatacopy, rodatasize,
__this_module.arch.rodata_relocations,
__this_module.arch.num_rodata_relocations);
kfree(__this_module.arch.text_relocations);
kfree(__this_module.arch.rodata_relocations);
desc->tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
if (IS_ERR(desc->tfm)) {
pr_err("failed to allocate hmac tfm (%ld)\n", PTR_ERR(desc->tfm));
kfree(textcopy);
return false;
}
pr_warn("using '%s' for integrity check\n",
crypto_shash_driver_name(desc->tfm));
err = crypto_shash_setkey(desc->tfm, fips140_integ_hmac_key,
strlen(fips140_integ_hmac_key)) ?:
crypto_shash_init(desc) ?:
crypto_shash_update(desc, textcopy, textsize) ?:
crypto_shash_finup(desc, rodatacopy, rodatasize, digest);
crypto_free_shash(desc->tfm);
kfree(textcopy);
if (err) {
pr_err("failed to calculate hmac shash (%d)\n", err);
return false;
}
if (memcmp(digest, fips140_integ_hmac_digest, sizeof(digest))) {
pr_err("provided_digest : %*phN\n", (int)sizeof(digest),
fips140_integ_hmac_digest);
pr_err("calculated digest: %*phN\n", (int)sizeof(digest),
digest);
return false;
}
return true;
}
static bool __init update_live_fips140_algos(void)
{
struct crypto_alg *alg, *new_alg, *tmp;
/*
* Find all algorithms that we could not unregister the last time
* around, due to the fact that they were already in use.
*/
down_write(&crypto_alg_sem);
list_for_each_entry_safe(alg, tmp, &unchecked_fips140_algos, cra_list) {
/*
* Take this algo off the list before releasing the lock. This
* ensures that a concurrent invocation of
* crypto_unregister_alg() observes a consistent state, i.e.,
* the algo is still on the list, and crypto_unregister_alg()
* will release it, or it is not, and crypto_unregister_alg()
* will issue a warning but ignore this condition otherwise.
*/
list_del_init(&alg->cra_list);
up_write(&crypto_alg_sem);
/*
* Grab the algo that will replace the live one.
* Note that this will instantiate template based instances as
* well, as long as their driver name uses the conventional
* pattern of "template(algo)". In this case, we are relying on
* the fact that the templates carried by this module will
* supersede the builtin ones, due to the fact that they were
* registered later, and therefore appear first in the linked
* list. For example, "hmac(sha1-ce)" constructed using the
* builtin hmac template and the builtin SHA1 driver will be
* superseded by the integrity checked versions of HMAC and
* SHA1-ce carried in this module.
*
* Note that this takes a reference to the new algorithm which
* will never get released. This is intentional: once we copy
* the function pointers from the new algo into the old one, we
* cannot drop the new algo unless we are sure that the old one
* has been released, and this is someting we don't keep track
* of at the moment.
*/
new_alg = crypto_alg_mod_lookup(alg->cra_driver_name,
alg->cra_flags & CRYPTO_ALG_TYPE_MASK,
CRYPTO_ALG_TYPE_MASK | CRYPTO_NOLOAD);
if (IS_ERR(new_alg)) {
pr_crit("Failed to allocate '%s' for updating live algo (%ld)\n",
alg->cra_driver_name, PTR_ERR(new_alg));
return false;
}
/*
* The FIPS module's algorithms are expected to be built from
* the same source code as the in-kernel ones so that they are
* fully compatible. In general, there's no way to verify full
* compatibility at runtime, but we can at least verify that
* the algorithm properties match.
*/
if (alg->cra_ctxsize != new_alg->cra_ctxsize ||
alg->cra_alignmask != new_alg->cra_alignmask) {
pr_crit("Failed to update live algo '%s' due to mismatch:\n"
"cra_ctxsize : %u vs %u\n"
"cra_alignmask : 0x%x vs 0x%x\n",
alg->cra_driver_name,
alg->cra_ctxsize, new_alg->cra_ctxsize,
alg->cra_alignmask, new_alg->cra_alignmask);
return false;
}
/*
* Update the name and priority so the algorithm stands out as
* one that was updated in order to comply with FIPS140, and
* that it is not the preferred version for further use.
*/
strlcat(alg->cra_name, "+orig", CRYPTO_MAX_ALG_NAME);
alg->cra_priority = 0;
switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
struct aead_alg *old_aead, *new_aead;
struct skcipher_alg *old_skcipher, *new_skcipher;
struct shash_alg *old_shash, *new_shash;
struct rng_alg *old_rng, *new_rng;
case CRYPTO_ALG_TYPE_CIPHER:
alg->cra_u.cipher = new_alg->cra_u.cipher;
break;
case CRYPTO_ALG_TYPE_AEAD:
old_aead = container_of(alg, struct aead_alg, base);
new_aead = container_of(new_alg, struct aead_alg, base);
old_aead->setkey = new_aead->setkey;
old_aead->setauthsize = new_aead->setauthsize;
old_aead->encrypt = new_aead->encrypt;
old_aead->decrypt = new_aead->decrypt;
old_aead->init = new_aead->init;
old_aead->exit = new_aead->exit;
break;
case CRYPTO_ALG_TYPE_SKCIPHER:
old_skcipher = container_of(alg, struct skcipher_alg, base);
new_skcipher = container_of(new_alg, struct skcipher_alg, base);
old_skcipher->setkey = new_skcipher->setkey;
old_skcipher->encrypt = new_skcipher->encrypt;
old_skcipher->decrypt = new_skcipher->decrypt;
old_skcipher->init = new_skcipher->init;
old_skcipher->exit = new_skcipher->exit;
break;
case CRYPTO_ALG_TYPE_SHASH:
old_shash = container_of(alg, struct shash_alg, base);
new_shash = container_of(new_alg, struct shash_alg, base);
old_shash->init = new_shash->init;
old_shash->update = new_shash->update;
old_shash->final = new_shash->final;
old_shash->finup = new_shash->finup;
old_shash->digest = new_shash->digest;
old_shash->export = new_shash->export;
old_shash->import = new_shash->import;
old_shash->setkey = new_shash->setkey;
old_shash->init_tfm = new_shash->init_tfm;
old_shash->exit_tfm = new_shash->exit_tfm;
break;
case CRYPTO_ALG_TYPE_RNG:
old_rng = container_of(alg, struct rng_alg, base);
new_rng = container_of(new_alg, struct rng_alg, base);
old_rng->generate = new_rng->generate;
old_rng->seed = new_rng->seed;
old_rng->set_ent = new_rng->set_ent;
break;
default:
/*
* This should never happen: every item on the
* fips140_algorithms list should match one of the
* cases above, so if we end up here, something is
* definitely wrong.
*/
pr_crit("Unexpected type %u for algo %s, giving up ...\n",
alg->cra_flags & CRYPTO_ALG_TYPE_MASK,
alg->cra_driver_name);
return false;
}
/*
* Move the algorithm back to the algorithm list, so it is
* visible in /proc/crypto et al.
*/
down_write(&crypto_alg_sem);
list_add_tail(&alg->cra_list, &crypto_alg_list);
}
up_write(&crypto_alg_sem);
return true;
}
static void fips140_sha256(void *p, const u8 *data, unsigned int len, u8 *out,
int *hook_inuse)
{
sha256(data, len, out);
*hook_inuse = 1;
}
static void fips140_aes_expandkey(void *p, struct crypto_aes_ctx *ctx,
const u8 *in_key, unsigned int key_len,
int *err)
{
*err = aes_expandkey(ctx, in_key, key_len);
}
static void fips140_aes_encrypt(void *priv, const struct crypto_aes_ctx *ctx,
u8 *out, const u8 *in, int *hook_inuse)
{
aes_encrypt(ctx, out, in);
*hook_inuse = 1;
}
static void fips140_aes_decrypt(void *priv, const struct crypto_aes_ctx *ctx,
u8 *out, const u8 *in, int *hook_inuse)
{
aes_decrypt(ctx, out, in);
*hook_inuse = 1;
}
static bool update_fips140_library_routines(void)
{
int ret;
ret = register_trace_android_vh_sha256(fips140_sha256, NULL) ?:
register_trace_android_vh_aes_expandkey(fips140_aes_expandkey, NULL) ?:
register_trace_android_vh_aes_encrypt(fips140_aes_encrypt, NULL) ?:
register_trace_android_vh_aes_decrypt(fips140_aes_decrypt, NULL);
return ret == 0;
}
/*
* Initialize the FIPS 140 module.
*
* Note: this routine iterates over the contents of the initcall section, which
* consists of an array of function pointers that was emitted by the linker
* rather than the compiler. This means that these function pointers lack the
* usual CFI stubs that the compiler emits when CFI codegen is enabled. So
* let's disable CFI locally when handling the initcall array, to avoid
* surpises.
*/
int __init __attribute__((__no_sanitize__("cfi"))) fips140_init(void)
{
const u32 *initcall;
pr_info("Loading FIPS 140 module\n");
unregister_existing_fips140_algos();
/* iterate over all init routines present in this module and call them */
for (initcall = __initcall_start + 1;
initcall < &__initcall_end_marker;
initcall++) {
int (*init)(void) = offset_to_ptr(initcall);
init();
}
if (!update_live_fips140_algos())
goto panic;
if (!update_fips140_library_routines())
goto panic;
/*
* Wait until all tasks have at least been scheduled once and preempted
* voluntarily. This ensures that none of the superseded algorithms that
* were already in use will still be live.
*/
synchronize_rcu_tasks();
/* insert self tests here */
/*
* It may seem backward to perform the integrity check last, but this
* is intentional: the check itself uses hmac(sha256) which is one of
* the algorithms that are replaced with versions from this module, and
* the integrity check must use the replacement version.
*/
if (!check_fips140_module_hmac()) {
pr_crit("FIPS 140 integrity check failed -- giving up!\n");
goto panic;
}
pr_info("FIPS 140 integrity check successful\n");
pr_info("FIPS 140 module successfully loaded\n");
return 0;
panic:
panic("FIPS 140 module load failure");
}
module_init(fips140_init);
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
MODULE_LICENSE("GPL v2");
/*
* Crypto-related helper functions, reproduced here so that they will be
* covered by the FIPS 140 integrity check.
*
* Non-cryptographic helper functions such as memcpy() can be excluded from the
* FIPS module, but there is ambiguity about other helper functions like
* __crypto_xor() and crypto_inc() which aren't cryptographic by themselves,
* but are more closely associated with cryptography than e.g. memcpy(). To
* err on the side of caution, we include copies of these in the FIPS module.
*/
void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
{
while (len >= 8) {
*(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
dst += 8;
src1 += 8;
src2 += 8;
len -= 8;
}
while (len >= 4) {
*(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
dst += 4;
src1 += 4;
src2 += 4;
len -= 4;
}
while (len >= 2) {
*(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
dst += 2;
src1 += 2;
src2 += 2;
len -= 2;
}
while (len--)
*dst++ = *src1++ ^ *src2++;
}
void crypto_inc(u8 *a, unsigned int size)
{
a += size;
while (size--)
if (++*--a)
break;
}

129
crypto/fips140_gen_hmac.c Normal file
View File

@@ -0,0 +1,129 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 - Google LLC
* Author: Ard Biesheuvel <ardb@google.com>
*
* This is a host tool that is intended to be used to take the HMAC digest of
* the .text and .rodata sections of the fips140.ko module, and store it inside
* the module. The module will perform an integrity selfcheck at module_init()
* time, by recalculating the digest and comparing it with the value calculated
* here.
*
* Note that the peculiar way an HMAC is being used as a digest with a public
* key rather than as a symmetric key signature is mandated by FIPS 140-2.
*/
#include <elf.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <openssl/hmac.h>
static Elf64_Ehdr *ehdr;
static Elf64_Shdr *shdr;
static int num_shdr;
static const char *strtab;
static Elf64_Sym *syms;
static int num_syms;
static Elf64_Shdr *find_symtab_section(void)
{
int i;
for (i = 0; i < num_shdr; i++)
if (shdr[i].sh_type == SHT_SYMTAB)
return &shdr[i];
return NULL;
}
static void *get_sym_addr(const char *sym_name)
{
int i;
for (i = 0; i < num_syms; i++)
if (!strcmp(strtab + syms[i].st_name, sym_name))
return (void *)ehdr + shdr[syms[i].st_shndx].sh_offset +
syms[i].st_value;
return NULL;
}
static void hmac_section(HMAC_CTX *hmac, const char *start, const char *end)
{
void *start_addr = get_sym_addr(start);
void *end_addr = get_sym_addr(end);
HMAC_Update(hmac, start_addr, end_addr - start_addr);
}
int main(int argc, char **argv)
{
Elf64_Shdr *symtab_shdr;
const char *hmac_key;
unsigned char *dg;
unsigned int dglen;
struct stat stat;
HMAC_CTX *hmac;
int fd, ret;
if (argc < 2) {
fprintf(stderr, "file argument missing\n");
exit(EXIT_FAILURE);
}
fd = open(argv[1], O_RDWR);
if (fd < 0) {
fprintf(stderr, "failed to open %s\n", argv[1]);
exit(EXIT_FAILURE);
}
ret = fstat(fd, &stat);
if (ret < 0) {
fprintf(stderr, "failed to stat() %s\n", argv[1]);
exit(EXIT_FAILURE);
}
ehdr = mmap(0, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (ehdr == MAP_FAILED) {
fprintf(stderr, "failed to mmap() %s\n", argv[1]);
exit(EXIT_FAILURE);
}
shdr = (void *)ehdr + ehdr->e_shoff;
num_shdr = ehdr->e_shnum;
symtab_shdr = find_symtab_section();
syms = (void *)ehdr + symtab_shdr->sh_offset;
num_syms = symtab_shdr->sh_size / sizeof(Elf64_Sym);
strtab = (void *)ehdr + shdr[symtab_shdr->sh_link].sh_offset;
hmac_key = get_sym_addr("fips140_integ_hmac_key");
if (!hmac_key) {
fprintf(stderr, "failed to locate HMAC key in binary\n");
exit(EXIT_FAILURE);
}
dg = get_sym_addr("fips140_integ_hmac_digest");
if (!dg) {
fprintf(stderr, "failed to locate HMAC digest in binary\n");
exit(EXIT_FAILURE);
}
hmac = HMAC_CTX_new();
HMAC_Init_ex(hmac, hmac_key, strlen(hmac_key), EVP_sha256(), NULL);
hmac_section(hmac, "__fips140_text_start", "__fips140_text_end");
hmac_section(hmac, "__fips140_rodata_start", "__fips140_rodata_end");
HMAC_Final(hmac, dg, &dglen);
close(fd);
return 0;
}

View File

@@ -20,12 +20,24 @@
static const struct crypto_type crypto_shash_type;
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
return -ENOSYS;
}
EXPORT_SYMBOL_GPL(shash_no_setkey);
/*
* Check whether an shash algorithm has a setkey function.
*
* For CFI compatibility, this must not be an inline function. This is because
* when CFI is enabled, modules won't get the same address for shash_no_setkey
* (if it were exported, which inlining would require) as the core kernel will.
*/
bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
{
return alg->setkey != shash_no_setkey;
}
EXPORT_SYMBOL_GPL(crypto_shash_alg_has_setkey);
static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)

View File

@@ -5046,7 +5046,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
uint32_t enable;
if (copy_from_user(&enable, ubuf, sizeof(enable))) {
ret = -EINVAL;
ret = -EFAULT;
goto err;
}
binder_inner_proc_lock(proc);

View File

@@ -60,6 +60,9 @@
#include <trace/hooks/v4l2core.h>
#include <trace/hooks/v4l2mc.h>
#include <trace/hooks/scmi.h>
#include <trace/hooks/user.h>
#include <trace/hooks/cpuidle_psci.h>
#include <trace/hooks/fips140.h>
/*
* Export tracepoints that act as a bare tracehook (ie: have no trace event
@@ -314,4 +317,12 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_scmi_timeout_sync);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_new_ilb);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_force_compatible_pre);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_force_compatible_post);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_uid);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_user);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_balance_anon_file_reclaim);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpuidle_psci_enter);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpuidle_psci_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sha256);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_aes_expandkey);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_aes_encrypt);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_aes_decrypt);

View File

@@ -866,25 +866,34 @@ EXPORT_SYMBOL_GPL(fwnode_remove_software_node);
/**
* device_add_software_node - Assign software node to a device
* @dev: The device the software node is meant for.
* @swnode: The software node.
* @node: The software node.
*
* This function will register @swnode and make it the secondary firmware node
* pointer of @dev. If @dev has no primary node, then @swnode will become the primary
* node.
* This function will make @node the secondary firmware node pointer of @dev. If
* @dev has no primary node, then @node will become the primary node. The
* function will register @node automatically if it wasn't already registered.
*/
int device_add_software_node(struct device *dev, const struct software_node *swnode)
int device_add_software_node(struct device *dev, const struct software_node *node)
{
struct swnode *swnode;
int ret;
/* Only one software node per device. */
if (dev_to_swnode(dev))
return -EBUSY;
ret = software_node_register(swnode);
if (ret)
return ret;
swnode = software_node_to_swnode(node);
if (swnode) {
kobject_get(&swnode->kobj);
} else {
ret = software_node_register(node);
if (ret)
return ret;
set_secondary_fwnode(dev, software_node_fwnode(swnode));
swnode = software_node_to_swnode(node);
}
set_secondary_fwnode(dev, &swnode->fwnode);
software_node_notify(dev, KOBJ_ADD);
return 0;
}
@@ -921,8 +930,8 @@ int software_node_notify(struct device *dev, unsigned long action)
switch (action) {
case KOBJ_ADD:
ret = sysfs_create_link(&dev->kobj, &swnode->kobj,
"software_node");
ret = sysfs_create_link_nowarn(&dev->kobj, &swnode->kobj,
"software_node");
if (ret)
break;

View File

@@ -25,6 +25,7 @@
#include <linux/string.h>
#include <asm/cpuidle.h>
#include <trace/hooks/cpuidle_psci.h>
#include "cpuidle-psci.h"
#include "dt_idle_states.h"
@@ -67,6 +68,8 @@ static int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
if (ret)
return -1;
trace_android_vh_cpuidle_psci_enter(dev, s2idle);
/* Do runtime PM to manage a hierarchical CPU toplogy. */
rcu_irq_enter_irqson();
if (s2idle)
@@ -88,6 +91,8 @@ static int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
pm_runtime_get_sync(pd_dev);
rcu_irq_exit_irqson();
trace_android_vh_cpuidle_psci_exit(dev, s2idle);
cpu_pm_exit();
/* Clear the domain state to start fresh when back from idle. */

View File

@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
struct cma_heap {

View File

@@ -471,7 +471,7 @@ EXPORT_SYMBOL_GPL(gpiochip_line_is_valid);
static void gpiodevice_release(struct device *dev)
{
struct gpio_device *gdev = dev_get_drvdata(dev);
struct gpio_device *gdev = container_of(dev, struct gpio_device, dev);
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
@@ -614,7 +614,6 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
goto err_free_ida;
device_initialize(&gdev->dev);
dev_set_drvdata(&gdev->dev, gdev);
if (gc->parent && gc->parent->driver)
gdev->owner = gc->parent->driver->owner;
else if (gc->owner)
@@ -4397,7 +4396,8 @@ static int __init gpiolib_dev_init(void)
return ret;
}
if (driver_register(&gpio_stub_drv) < 0) {
ret = driver_register(&gpio_stub_drv);
if (ret < 0) {
pr_err("gpiolib: could not register GPIO stub driver\n");
bus_unregister(&gpio_bus_type);
return ret;

View File

@@ -71,7 +71,7 @@ static ssize_t format_attr_contextid_show(struct device *dev,
{
int pid_fmt = ETM_OPT_CTXTID;
#if defined(CONFIG_CORESIGHT_SOURCE_ETM4X)
#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X)
pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID;
#endif
return sprintf(page, "config:%d\n", pid_fmt);

View File

@@ -688,13 +688,6 @@ static const struct iommu_ops mtk_iommu_ops = {
static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
{
u32 regval;
int ret;
ret = clk_prepare_enable(data->bclk);
if (ret) {
dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
return ret;
}
if (data->plat_data->m4u_plat == M4U_MT8173) {
regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
@@ -760,7 +753,6 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
dev_name(data->dev), (void *)data)) {
writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
clk_disable_unprepare(data->bclk);
dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
return -ENODEV;
}
@@ -977,14 +969,19 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
void __iomem *base = data->base;
int ret;
/* Avoid first resume to affect the default value of registers below. */
if (!m4u_dom)
return 0;
ret = clk_prepare_enable(data->bclk);
if (ret) {
dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
return ret;
}
/*
* Uppon first resume, only enable the clk and return, since the values of the
* registers are not yet set.
*/
if (!m4u_dom)
return 0;
writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);

View File

@@ -1267,10 +1267,11 @@ static inline void gic_cpu_pm_init(void) { }
#endif /* CONFIG_CPU_PM */
#ifdef CONFIG_PM
static void gic_resume(void)
void gic_resume(void)
{
trace_android_vh_gic_resume(gic_data.domain, gic_data.dist_base);
}
EXPORT_SYMBOL_GPL(gic_resume);
static struct syscore_ops gic_syscore_ops = {
.resume = gic_resume,
@@ -1283,6 +1284,7 @@ static void gic_syscore_init(void)
#else
static inline void gic_syscore_init(void) { }
void gic_resume(void) { }
#endif

View File

@@ -211,6 +211,7 @@ config VIDEO_MEDIATEK_JPEG
depends on MTK_IOMMU_V1 || MTK_IOMMU || COMPILE_TEST
depends on VIDEO_DEV && VIDEO_V4L2
depends on ARCH_MEDIATEK || COMPILE_TEST
depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
help
@@ -238,6 +239,7 @@ config VIDEO_MEDIATEK_MDP
depends on MTK_IOMMU || COMPILE_TEST
depends on VIDEO_DEV && VIDEO_V4L2
depends on ARCH_MEDIATEK || COMPILE_TEST
depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
select VIDEO_MEDIATEK_VPU
@@ -258,6 +260,7 @@ config VIDEO_MEDIATEK_VCODEC
# our dependencies, to avoid missing symbols during link.
depends on VIDEO_MEDIATEK_VPU || !VIDEO_MEDIATEK_VPU
depends on MTK_SCP || !MTK_SCP
depends on MTK_SMI || (COMPILE_TEST && MTK_SMI=n)
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
select VIDEO_MEDIATEK_VCODEC_VPU if VIDEO_MEDIATEK_VPU

View File

@@ -853,6 +853,7 @@ config REGULATOR_QCOM_RPM
config REGULATOR_QCOM_RPMH
tristate "Qualcomm Technologies, Inc. RPMh regulator driver"
depends on QCOM_RPMH || COMPILE_TEST
depends on QCOM_COMMAND_DB || !QCOM_COMMAND_DB
help
This driver supports control of PMIC regulators via the RPMh hardware
block found on Qualcomm Technologies Inc. SoCs. RPMh regulator

View File

@@ -858,6 +858,7 @@ struct dwc3_trb {
* @hwparams6: GHWPARAMS6
* @hwparams7: GHWPARAMS7
* @hwparams8: GHWPARAMS8
* @hwparams9: GHWPARAMS9
*/
struct dwc3_hwparams {
u32 hwparams0;

View File

@@ -167,6 +167,7 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
dwc3_np = of_get_child_by_name(node, "dwc3");
if (!dwc3_np) {
err = -ENODEV;
dev_err(dev, "failed to find dwc3 core child\n");
goto disable_rpm;
}

View File

@@ -239,7 +239,7 @@ find_mux:
dev = class_find_device(&typec_mux_class, NULL, fwnode,
mux_fwnode_match);
return dev ? to_typec_switch(dev) : ERR_PTR(-EPROBE_DEFER);
return dev ? to_typec_mux(dev) : ERR_PTR(-EPROBE_DEFER);
}
/**

View File

@@ -1253,6 +1253,7 @@ err_unregister:
}
err_reset:
memset(&ucsi->cap, 0, sizeof(ucsi->cap));
ucsi_reset_ppm(ucsi);
err:
return ret;

View File

@@ -75,13 +75,7 @@ void crypto_unregister_ahashes(struct ahash_alg *algs, int count);
int ahash_register_instance(struct crypto_template *tmpl,
struct ahash_instance *inst);
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
{
return alg->setkey != shash_no_setkey;
}
bool crypto_shash_alg_has_setkey(struct shash_alg *alg);
static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg)
{

View File

@@ -708,6 +708,8 @@ static inline bool gic_enable_sre(void)
return !!(val & ICC_SRE_EL1_SRE);
}
void gic_resume(void);
#endif
#endif

View File

@@ -13,9 +13,9 @@
* TODO - io.h is included in NVHE files and these tracepoints are getting
* enabled for NVHE too. To avoid these tracepoints enabling in NHVE below
* condition is introduced.
* !(defined(__KVM_NVHE_HYPERVISOR__))
* !(defined(__DISABLE_TRACE_MMIO__))
*/
#if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__KVM_NVHE_HYPERVISOR__))
#if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__))
DECLARE_TRACEPOINT(rwmmio_write);
DECLARE_TRACEPOINT(rwmmio_read);
DECLARE_TRACEPOINT(rwmmio_post_read);

View File

@@ -460,7 +460,7 @@ static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
/*
* Set the allocation direction to bottom-up or top-down.
*/
static inline void memblock_set_bottom_up(bool enable)
static inline __init void memblock_set_bottom_up(bool enable)
{
memblock.bottom_up = enable;
}
@@ -470,7 +470,7 @@ static inline void memblock_set_bottom_up(bool enable)
* if this is true, that said, memblock will allocate memory
* in bottom-up direction.
*/
static inline bool memblock_bottom_up(void)
static inline __init bool memblock_bottom_up(void)
{
return memblock.bottom_up;
}

View File

@@ -488,7 +488,7 @@ fwnode_create_software_node(const struct property_entry *properties,
const struct fwnode_handle *parent);
void fwnode_remove_software_node(struct fwnode_handle *fwnode);
int device_add_software_node(struct device *dev, const struct software_node *swnode);
int device_add_software_node(struct device *dev, const struct software_node *node);
void device_remove_software_node(struct device *dev);
#endif /* _LINUX_PROPERTY_H_ */

View File

@@ -85,6 +85,7 @@ extern bool shmem_huge_enabled(struct vm_area_struct *vma);
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
pgoff_t start, pgoff_t end);
extern void shmem_mark_page_lazyfree(struct page *page);
/* Flag allocation requirements to shmem_getpage */
enum sgp_type {

View File

@@ -363,6 +363,7 @@ extern void rotate_reclaimable_page(struct page *page);
extern void deactivate_file_page(struct page *page);
extern void deactivate_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
extern void mark_page_lazyfree_movetail(struct page *page);
extern void swap_setup(void);
extern void __lru_cache_add_inactive_or_unevictable(struct page *page,

View File

@@ -1008,14 +1008,21 @@ struct survey_info {
* @sae_pwd: password for SAE authentication (for devices supporting SAE
* offload)
* @sae_pwd_len: length of SAE password (for devices supporting SAE offload)
* @sae_pwe: The mechanisms allowed for SAE PWE derivation
* NL80211_SAE_PWE_UNSPECIFIED: Not-specified, used to indicate userspace
* did not specify any preference. The driver should follow its
* internal policy in such a scenario.
* NL80211_SAE_PWE_HUNT_AND_PECK: Allow hunting-and-pecking loop only
* NL80211_SAE_PWE_HASH_TO_ELEMENT: Allow hash-to-element only
* NL80211_SAE_PWE_BOTH: Allow either hunting-and-pecking loop
* or hash-to-element
* @sae_pwe: The mechanisms allowed for SAE PWE derivation:
*
* NL80211_SAE_PWE_UNSPECIFIED
* Not-specified, used to indicate userspace did not specify any
* preference. The driver should follow its internal policy in
* such a scenario.
*
* NL80211_SAE_PWE_HUNT_AND_PECK
* Allow hunting-and-pecking loop only
*
* NL80211_SAE_PWE_HASH_TO_ELEMENT
* Allow hash-to-element only
*
* NL80211_SAE_PWE_BOTH
* Allow either hunting-and-pecking loop or hash-to-element
*/
struct cfg80211_crypto_settings {
u32 wpa_versions;

View File

@@ -0,0 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM cpuidle_psci
#define TRACE_INCLUDE_PATH trace/hooks
#if !defined(_TRACE_HOOK_CPUIDLE_PSCI_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOOK_CPUIDLE_PSCI_H
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
/*
* Following tracepoints are not exported in tracefs and provide a
* mechanism for vendor modules to hook and extend functionality
*/
struct cpuidle_device;
DECLARE_HOOK(android_vh_cpuidle_psci_enter,
TP_PROTO(struct cpuidle_device *dev, bool s2idle),
TP_ARGS(dev, s2idle));
DECLARE_HOOK(android_vh_cpuidle_psci_exit,
TP_PROTO(struct cpuidle_device *dev, bool s2idle),
TP_ARGS(dev, s2idle));
#endif /* _TRACE_HOOK_CPUIDLE_PSCI_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@@ -0,0 +1,51 @@
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM fips140
#define TRACE_INCLUDE_PATH trace/hooks
#if !defined(_TRACE_HOOK_FIPS140_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOOK_FIPS140_H
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
struct crypto_aes_ctx;
/*
* These hooks exist only for the benefit of the FIPS140 crypto module, which
* uses them to swap out the underlying implementation with one that is integrity
* checked as per FIPS 140 requirements. No other uses are allowed or
* supported.
*/
DECLARE_HOOK(android_vh_sha256,
TP_PROTO(const u8 *data,
unsigned int len,
u8 *out,
int *hook_inuse),
TP_ARGS(data, len, out, hook_inuse));
DECLARE_HOOK(android_vh_aes_expandkey,
TP_PROTO(struct crypto_aes_ctx *ctx,
const u8 *in_key,
unsigned int key_len,
int *err),
TP_ARGS(ctx, in_key, key_len, err));
DECLARE_HOOK(android_vh_aes_encrypt,
TP_PROTO(const struct crypto_aes_ctx *ctx,
u8 *out,
const u8 *in,
int *hook_inuse),
TP_ARGS(ctx, out, in, hook_inuse));
DECLARE_HOOK(android_vh_aes_decrypt,
TP_PROTO(const struct crypto_aes_ctx *ctx,
u8 *out,
const u8 *in,
int *hook_inuse),
TP_ARGS(ctx, out, in, hook_inuse));
#endif /* _TRACE_HOOK_FIPS140_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM user
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH trace/hooks
#if !defined(_TRACE_HOOK_USER_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HOOK_USER_H
#include <linux/tracepoint.h>
#include <trace/hooks/vendor_hooks.h>
struct user_struct;
DECLARE_HOOK(android_vh_alloc_uid,
TP_PROTO(struct user_struct *user),
TP_ARGS(user));
DECLARE_HOOK(android_vh_free_user,
TP_PROTO(struct user_struct *up),
TP_ARGS(up));
#endif /* _TRACE_HOOK_USER_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@@ -48,7 +48,7 @@ struct idletimer_tg_info_v1 {
char label[MAX_IDLETIMER_LABEL_SIZE];
__u8 send_nl_msg; /* unused: for compatibility with Android */
__u8 send_nl_msg;
__u8 timer_type;
/* for kernel module internal use only */

View File

@@ -2352,7 +2352,6 @@ config TRIM_UNUSED_KSYMS
config UNUSED_KSYMS_WHITELIST
string "Whitelist of symbols to keep in ksymtab"
depends on TRIM_UNUSED_KSYMS
default "scripts/lto-used-symbols.txt" if LTO_CLANG
help
By default, all unused exported symbols will be un-exported from the
build when TRIM_UNUSED_KSYMS is selected.

View File

@@ -3336,7 +3336,7 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
rcu_read_unlock();
spin_unlock_irqrestore(&callback_lock, flags);
}
EXPORT_SYMBOL_GPL(cpuset_cpus_allowed);
/**
* cpuset_cpus_allowed_fallback - final fallback before complete catastrophe.
* @tsk: pointer to task_struct with which the scheduler is struggling

View File

@@ -45,7 +45,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_switch);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_waking);
#ifdef CONFIG_SCHEDSTATS
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_sleep);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_wait);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_iowait);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_stat_blocked);

View File

@@ -20,6 +20,8 @@
#include <linux/user_namespace.h>
#include <linux/proc_ns.h>
#include <trace/hooks/user.h>
/*
* userns count is 1 for root user, 1 for init_uts_ns,
* and 1 for... ?
@@ -139,6 +141,7 @@ static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
static void free_user(struct user_struct *up, unsigned long flags)
__releases(&uidhash_lock)
{
trace_android_vh_free_user(up);
uid_hash_remove(up);
spin_unlock_irqrestore(&uidhash_lock, flags);
kmem_cache_free(uid_cachep, up);
@@ -190,6 +193,7 @@ struct user_struct *alloc_uid(kuid_t uid)
new->uid = uid;
refcount_set(&new->__count, 1);
trace_android_vh_alloc_uid(new);
ratelimit_state_init(&new->ratelimit, HZ, 100);
ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);

View File

@@ -7,6 +7,7 @@
#include <linux/crypto.h>
#include <linux/module.h>
#include <asm/unaligned.h>
#include <trace/hooks/fips140.h>
/*
* Emit the sbox as volatile const to prevent the compiler from doing
@@ -189,6 +190,13 @@ int aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
u32 rc, i, j;
int err;
#if defined(CONFIG_CRYPTO_FIPS140) && !defined(BUILD_FIPS140_KO)
err = -(MAX_ERRNO + 1);
trace_android_vh_aes_expandkey(ctx, in_key, key_len, &err);
if (err != -(MAX_ERRNO + 1))
return err;
#endif
err = aes_check_keylen(key_len);
if (err)
return err;
@@ -261,6 +269,13 @@ void aes_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in)
int rounds = 6 + ctx->key_length / 4;
u32 st0[4], st1[4];
int round;
#if defined(CONFIG_CRYPTO_FIPS140) && !defined(BUILD_FIPS140_KO)
int hook_inuse = 0;
trace_android_vh_aes_encrypt(ctx, out, in, &hook_inuse);
if (hook_inuse)
return;
#endif
st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in);
st0[1] = ctx->key_enc[1] ^ get_unaligned_le32(in + 4);
@@ -312,6 +327,13 @@ void aes_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, const u8 *in)
int rounds = 6 + ctx->key_length / 4;
u32 st0[4], st1[4];
int round;
#if defined(CONFIG_CRYPTO_FIPS140) && !defined(BUILD_FIPS140_KO)
int hook_inuse = 0;
trace_android_vh_aes_decrypt(ctx, out, in, &hook_inuse);
if (hook_inuse)
return;
#endif
st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in);
st0[1] = ctx->key_dec[1] ^ get_unaligned_le32(in + 4);

View File

@@ -17,6 +17,7 @@
#include <linux/string.h>
#include <crypto/sha.h>
#include <asm/unaligned.h>
#include <trace/hooks/fips140.h>
static inline u32 Ch(u32 x, u32 y, u32 z)
{
@@ -284,6 +285,14 @@ void sha256(const u8 *data, unsigned int len, u8 *out)
{
struct sha256_state sctx;
#if defined(CONFIG_CRYPTO_FIPS140) && !defined(BUILD_FIPS140_KO)
int hook_inuse = 0;
trace_android_vh_sha256(data, len, out, &hook_inuse);
if (hook_inuse)
return;
#endif
sha256_init(&sctx);
sha256_update(&sctx, data, len);
sha256_final(&sctx, out);

View File

@@ -817,20 +817,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
*/
void init_mem_debugging_and_hardening(void)
{
if (_init_on_alloc_enabled_early) {
if (page_poisoning_enabled())
pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
"will take precedence over init_on_alloc\n");
else
static_branch_enable(&init_on_alloc);
}
if (_init_on_free_enabled_early) {
if (page_poisoning_enabled())
pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
"will take precedence over init_on_free\n");
else
static_branch_enable(&init_on_free);
}
bool page_poisoning_requested = false;
#ifdef CONFIG_PAGE_POISONING
/*
@@ -839,10 +826,27 @@ void init_mem_debugging_and_hardening(void)
*/
if (page_poisoning_enabled() ||
(!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
debug_pagealloc_enabled()))
debug_pagealloc_enabled())) {
static_branch_enable(&_page_poisoning_enabled);
page_poisoning_requested = true;
}
#endif
if (_init_on_alloc_enabled_early) {
if (page_poisoning_requested)
pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
"will take precedence over init_on_alloc\n");
else
static_branch_enable(&init_on_alloc);
}
if (_init_on_free_enabled_early) {
if (page_poisoning_requested)
pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
"will take precedence over init_on_free\n");
else
static_branch_enable(&init_on_free);
}
#ifdef CONFIG_DEBUG_PAGEALLOC
if (!debug_pagealloc_enabled())
return;

View File

@@ -4284,3 +4284,9 @@ struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
#endif
}
EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
void shmem_mark_page_lazyfree(struct page *page)
{
mark_page_lazyfree_movetail(page);
}
EXPORT_SYMBOL_GPL(shmem_mark_page_lazyfree);

View File

@@ -65,6 +65,7 @@ struct lru_pvecs {
struct pagevec lru_deactivate_file;
struct pagevec lru_deactivate;
struct pagevec lru_lazyfree;
struct pagevec lru_lazyfree_movetail;
#ifdef CONFIG_SMP
struct pagevec activate_page;
#endif
@@ -630,6 +631,21 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
}
}
static void lru_lazyfree_movetail_fn(struct page *page, struct lruvec *lruvec,
void *arg)
{
if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) &&
!PageSwapCache(page)) {
bool active = PageActive(page);
del_page_from_lru_list(page, lruvec,
LRU_INACTIVE_ANON + active);
ClearPageActive(page);
ClearPageReferenced(page);
add_page_to_lru_list_tail(page, lruvec, LRU_INACTIVE_FILE);
}
}
/*
* Drain pages out of the cpu's pagevecs.
* Either "cpu" is the current CPU, and preemption has already been
@@ -665,6 +681,10 @@ void lru_add_drain_cpu(int cpu)
if (pagevec_count(pvec))
pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
pvec = &per_cpu(lru_pvecs.lru_lazyfree_movetail, cpu);
if (pagevec_count(pvec))
pagevec_lru_move_fn(pvec, lru_lazyfree_movetail_fn, NULL);
activate_page_drain(cpu);
invalidate_bh_lrus_cpu(cpu);
}
@@ -742,6 +762,29 @@ void mark_page_lazyfree(struct page *page)
}
}
/**
* mark_page_lazyfree_movetail - make a swapbacked page lazyfree
* @page: page to deactivate
*
* mark_page_lazyfree_movetail() moves @page to the tail of inactive file list.
* This is done to accelerate the reclaim of @page.
*/
void mark_page_lazyfree_movetail(struct page *page)
{
if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) &&
!PageSwapCache(page)) {
struct pagevec *pvec;
local_lock(&lru_pvecs.lock);
pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree_movetail);
get_page(page);
if (pagevec_add_and_need_flush(pvec, page))
pagevec_lru_move_fn(pvec,
lru_lazyfree_movetail_fn, NULL);
local_unlock(&lru_pvecs.lock);
}
}
void lru_add_drain(void)
{
local_lock(&lru_pvecs.lock);
@@ -854,6 +897,7 @@ inline void __lru_add_drain_all(bool force_all_cpus)
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree_movetail, cpu)) ||
need_activate_page_drain(cpu) ||
has_bh_in_lru(cpu, NULL)) {
INIT_WORK(work, lru_add_drain_per_cpu);

View File

@@ -28,6 +28,11 @@
#include <linux/kobject.h>
#include <linux/workqueue.h>
#include <linux/sysfs.h>
#include <linux/suspend.h>
#include <net/sock.h>
#include <net/inet_sock.h>
#define NLMSG_MAX_SIZE 64
struct idletimer_tg {
struct list_head entry;
@@ -38,15 +43,112 @@ struct idletimer_tg {
struct kobject *kobj;
struct device_attribute attr;
struct timespec64 delayed_timer_trigger;
struct timespec64 last_modified_timer;
struct timespec64 last_suspend_time;
struct notifier_block pm_nb;
int timeout;
unsigned int refcnt;
u8 timer_type;
bool work_pending;
bool send_nl_msg;
bool active;
uid_t uid;
bool suspend_time_valid;
};
static LIST_HEAD(idletimer_tg_list);
static DEFINE_MUTEX(list_mutex);
static DEFINE_SPINLOCK(timestamp_lock);
static struct kobject *idletimer_tg_kobj;
static bool check_for_delayed_trigger(struct idletimer_tg *timer,
struct timespec64 *ts)
{
bool state;
struct timespec64 temp;
spin_lock_bh(&timestamp_lock);
timer->work_pending = false;
if ((ts->tv_sec - timer->last_modified_timer.tv_sec) > timer->timeout ||
timer->delayed_timer_trigger.tv_sec != 0) {
state = false;
temp.tv_sec = timer->timeout;
temp.tv_nsec = 0;
if (timer->delayed_timer_trigger.tv_sec != 0) {
temp = timespec64_add(timer->delayed_timer_trigger,
temp);
ts->tv_sec = temp.tv_sec;
ts->tv_nsec = temp.tv_nsec;
timer->delayed_timer_trigger.tv_sec = 0;
timer->work_pending = true;
schedule_work(&timer->work);
} else {
temp = timespec64_add(timer->last_modified_timer, temp);
ts->tv_sec = temp.tv_sec;
ts->tv_nsec = temp.tv_nsec;
}
} else {
state = timer->active;
}
spin_unlock_bh(&timestamp_lock);
return state;
}
static void notify_netlink_uevent(const char *iface, struct idletimer_tg *timer)
{
char iface_msg[NLMSG_MAX_SIZE];
char state_msg[NLMSG_MAX_SIZE];
char timestamp_msg[NLMSG_MAX_SIZE];
char uid_msg[NLMSG_MAX_SIZE];
char *envp[] = { iface_msg, state_msg, timestamp_msg, uid_msg, NULL };
int res;
struct timespec64 ts;
u64 time_ns;
bool state;
res = snprintf(iface_msg, NLMSG_MAX_SIZE, "INTERFACE=%s",
iface);
if (NLMSG_MAX_SIZE <= res) {
pr_err("message too long (%d)", res);
return;
}
ts = ktime_to_timespec64(ktime_get_boottime());
state = check_for_delayed_trigger(timer, &ts);
res = snprintf(state_msg, NLMSG_MAX_SIZE, "STATE=%s",
state ? "active" : "inactive");
if (NLMSG_MAX_SIZE <= res) {
pr_err("message too long (%d)", res);
return;
}
if (state) {
res = snprintf(uid_msg, NLMSG_MAX_SIZE, "UID=%u", timer->uid);
if (NLMSG_MAX_SIZE <= res)
pr_err("message too long (%d)", res);
} else {
res = snprintf(uid_msg, NLMSG_MAX_SIZE, "UID=");
if (NLMSG_MAX_SIZE <= res)
pr_err("message too long (%d)", res);
}
time_ns = timespec64_to_ns(&ts);
res = snprintf(timestamp_msg, NLMSG_MAX_SIZE, "TIME_NS=%llu", time_ns);
if (NLMSG_MAX_SIZE <= res) {
timestamp_msg[0] = '\0';
pr_err("message too long (%d)", res);
}
pr_debug("putting nlmsg: <%s> <%s> <%s> <%s>\n", iface_msg, state_msg,
timestamp_msg, uid_msg);
kobject_uevent_env(idletimer_tg_kobj, KOBJ_CHANGE, envp);
return;
}
static
struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
{
@@ -67,6 +169,7 @@ static ssize_t idletimer_tg_show(struct device *dev,
unsigned long expires = 0;
struct timespec64 ktimespec = {};
long time_diff = 0;
unsigned long now = jiffies;
mutex_lock(&list_mutex);
@@ -78,16 +181,20 @@ static ssize_t idletimer_tg_show(struct device *dev,
time_diff = ktimespec.tv_sec;
} else {
expires = timer->timer.expires;
time_diff = jiffies_to_msecs(expires - jiffies) / 1000;
time_diff = jiffies_to_msecs(expires - now) / 1000;
}
}
mutex_unlock(&list_mutex);
if (time_after(expires, jiffies) || ktimespec.tv_sec > 0)
return snprintf(buf, PAGE_SIZE, "%ld\n", time_diff);
if (time_after(expires, now) || ktimespec.tv_sec > 0)
return scnprintf(buf, PAGE_SIZE, "%ld\n", time_diff);
return snprintf(buf, PAGE_SIZE, "0\n");
if (timer->send_nl_msg)
return scnprintf(buf, PAGE_SIZE, "0 %d\n",
jiffies_to_msecs(now - expires) / 1000);
return scnprintf(buf, PAGE_SIZE, "0\n");
}
static void idletimer_tg_work(struct work_struct *work)
@@ -96,6 +203,9 @@ static void idletimer_tg_work(struct work_struct *work)
work);
sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
if (timer->send_nl_msg)
notify_netlink_uevent(timer->attr.attr.name, timer);
}
static void idletimer_tg_expired(struct timer_list *t)
@@ -104,7 +214,62 @@ static void idletimer_tg_expired(struct timer_list *t)
pr_debug("timer %s expired\n", timer->attr.attr.name);
spin_lock_bh(&timestamp_lock);
timer->active = false;
timer->work_pending = true;
schedule_work(&timer->work);
spin_unlock_bh(&timestamp_lock);
}
static int idletimer_resume(struct notifier_block *notifier,
unsigned long pm_event, void *unused)
{
struct timespec64 ts;
unsigned long time_diff, now = jiffies;
struct idletimer_tg *timer = container_of(notifier,
struct idletimer_tg, pm_nb);
if (!timer)
return NOTIFY_DONE;
switch (pm_event) {
case PM_SUSPEND_PREPARE:
timer->last_suspend_time =
ktime_to_timespec64(ktime_get_boottime());
timer->suspend_time_valid = true;
break;
case PM_POST_SUSPEND:
if (!timer->suspend_time_valid)
break;
timer->suspend_time_valid = false;
spin_lock_bh(&timestamp_lock);
if (!timer->active) {
spin_unlock_bh(&timestamp_lock);
break;
}
/* since jiffies are not updated when suspended now represents
* the time it would have suspended */
if (time_after(timer->timer.expires, now)) {
ts = ktime_to_timespec64(ktime_get_boottime());
ts = timespec64_sub(ts, timer->last_suspend_time);
time_diff = timespec64_to_jiffies(&ts);
if (timer->timer.expires > (time_diff + now)) {
mod_timer_pending(&timer->timer,
(timer->timer.expires - time_diff));
} else {
del_timer(&timer->timer);
timer->timer.expires = 0;
timer->active = false;
timer->work_pending = true;
schedule_work(&timer->work);
}
}
spin_unlock_bh(&timestamp_lock);
break;
default:
break;
}
return NOTIFY_DONE;
}
static enum alarmtimer_restart idletimer_tg_alarmproc(struct alarm *alarm,
@@ -137,7 +302,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
{
int ret;
info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
info->timer = kzalloc(sizeof(*info->timer), GFP_KERNEL);
if (!info->timer) {
ret = -ENOMEM;
goto out;
@@ -163,12 +328,29 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
}
list_add(&info->timer->entry, &idletimer_tg_list);
timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
pr_debug("timer type value is 0.");
info->timer->timer_type = 0;
info->timer->refcnt = 1;
info->timer->send_nl_msg = false;
info->timer->active = true;
info->timer->timeout = info->timeout;
info->timer->delayed_timer_trigger.tv_sec = 0;
info->timer->delayed_timer_trigger.tv_nsec = 0;
info->timer->work_pending = false;
info->timer->uid = 0;
info->timer->last_modified_timer =
ktime_to_timespec64(ktime_get_boottime());
info->timer->pm_nb.notifier_call = idletimer_resume;
ret = register_pm_notifier(&info->timer->pm_nb);
if (ret)
printk(KERN_WARNING "[%s] Failed to register pm notifier %d\n",
__func__, ret);
INIT_WORK(&info->timer->work, idletimer_tg_work);
timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);
@@ -186,7 +368,7 @@ static int idletimer_tg_create_v1(struct idletimer_tg_info_v1 *info)
{
int ret;
info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
info->timer = kzalloc(sizeof(*info->timer), GFP_KERNEL);
if (!info->timer) {
ret = -ENOMEM;
goto out;
@@ -218,6 +400,22 @@ static int idletimer_tg_create_v1(struct idletimer_tg_info_v1 *info)
pr_debug("timer type value is %u", info->timer_type);
info->timer->timer_type = info->timer_type;
info->timer->refcnt = 1;
info->timer->send_nl_msg = (info->send_nl_msg != 0);
info->timer->active = true;
info->timer->timeout = info->timeout;
info->timer->delayed_timer_trigger.tv_sec = 0;
info->timer->delayed_timer_trigger.tv_nsec = 0;
info->timer->work_pending = false;
info->timer->uid = 0;
info->timer->last_modified_timer =
ktime_to_timespec64(ktime_get_boottime());
info->timer->pm_nb.notifier_call = idletimer_resume;
ret = register_pm_notifier(&info->timer->pm_nb);
if (ret)
printk(KERN_WARNING "[%s] Failed to register pm notifier %d\n",
__func__, ret);
INIT_WORK(&info->timer->work, idletimer_tg_work);
@@ -231,7 +429,7 @@ static int idletimer_tg_create_v1(struct idletimer_tg_info_v1 *info)
} else {
timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);
msecs_to_jiffies(info->timeout * 1000) + jiffies);
}
return 0;
@@ -244,6 +442,41 @@ out:
return ret;
}
static void reset_timer(struct idletimer_tg * const info_timer,
const __u32 info_timeout,
struct sk_buff *skb)
{
unsigned long now = jiffies;
bool timer_prev;
spin_lock_bh(&timestamp_lock);
timer_prev = info_timer->active;
info_timer->active = true;
/* timer_prev is used to guard overflow problem in time_before*/
if (!timer_prev || time_before(info_timer->timer.expires, now)) {
pr_debug("Starting Checkentry timer (Expired, Jiffies): %lu, %lu\n",
info_timer->timer.expires, now);
/* Stores the uid resposible for waking up the radio */
if (skb && (skb->sk)) {
info_timer->uid = from_kuid_munged(current_user_ns(),
sock_i_uid(skb_to_full_sk(skb)));
}
/* checks if there is a pending inactive notification*/
if (info_timer->work_pending)
info_timer->delayed_timer_trigger = info_timer->last_modified_timer;
else {
info_timer->work_pending = true;
schedule_work(&info_timer->work);
}
}
info_timer->last_modified_timer = ktime_to_timespec64(ktime_get_boottime());
mod_timer(&info_timer->timer, msecs_to_jiffies(info_timeout * 1000) + now);
spin_unlock_bh(&timestamp_lock);
}
/*
* The actual xt_tables plugin.
*/
@@ -251,12 +484,21 @@ static unsigned int idletimer_tg_target(struct sk_buff *skb,
const struct xt_action_param *par)
{
const struct idletimer_tg_info *info = par->targinfo;
unsigned long now = jiffies;
pr_debug("resetting timer %s, timeout period %u\n",
info->label, info->timeout);
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);
info->timer->active = true;
if (time_before(info->timer->timer.expires, now)) {
schedule_work(&info->timer->work);
pr_debug("Starting timer %s (Expired, Jiffies): %lu, %lu\n",
info->label, info->timer->timer.expires, now);
}
/* TODO: Avoid modifying timers on each packet */
reset_timer(info->timer, info->timeout, skb);
return XT_CONTINUE;
}
@@ -268,6 +510,7 @@ static unsigned int idletimer_tg_target_v1(struct sk_buff *skb,
const struct xt_action_param *par)
{
const struct idletimer_tg_info_v1 *info = par->targinfo;
unsigned long now = jiffies;
pr_debug("resetting timer %s, timeout period %u\n",
info->label, info->timeout);
@@ -276,8 +519,16 @@ static unsigned int idletimer_tg_target_v1(struct sk_buff *skb,
ktime_t tout = ktime_set(info->timeout, 0);
alarm_start_relative(&info->timer->alarm, tout);
} else {
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);
info->timer->active = true;
if (time_before(info->timer->timer.expires, now)) {
schedule_work(&info->timer->work);
pr_debug("Starting timer %s (Expired, Jiffies): %lu, %lu\n",
info->label, info->timer->timer.expires, now);
}
/* TODO: Avoid modifying timers on each packet */
reset_timer(info->timer, info->timeout, skb);
}
return XT_CONTINUE;
@@ -321,9 +572,7 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
info->timer = __idletimer_tg_find_by_label(info->label);
if (info->timer) {
info->timer->refcnt++;
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);
reset_timer(info->timer, info->timeout, NULL);
pr_debug("increased refcnt of timer %s to %u\n",
info->label, info->timer->refcnt);
} else {
@@ -346,9 +595,6 @@ static int idletimer_tg_checkentry_v1(const struct xt_tgchk_param *par)
pr_debug("checkentry targinfo%s\n", info->label);
if (info->send_nl_msg)
return -EOPNOTSUPP;
ret = idletimer_tg_helper((struct idletimer_tg_info *)info);
if(ret < 0)
{
@@ -361,6 +607,11 @@ static int idletimer_tg_checkentry_v1(const struct xt_tgchk_param *par)
return -EINVAL;
}
if (info->send_nl_msg > 1) {
pr_debug("invalid value for send_nl_msg\n");
return -EINVAL;
}
mutex_lock(&list_mutex);
info->timer = __idletimer_tg_find_by_label(info->label);
@@ -383,8 +634,7 @@ static int idletimer_tg_checkentry_v1(const struct xt_tgchk_param *par)
alarm_start_relative(&info->timer->alarm, tout);
}
} else {
mod_timer(&info->timer->timer,
msecs_to_jiffies(info->timeout * 1000) + jiffies);
reset_timer(info->timer, info->timeout, NULL);
}
pr_debug("increased refcnt of timer %s to %u\n",
info->label, info->timer->refcnt);
@@ -414,8 +664,9 @@ static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
list_del(&info->timer->entry);
del_timer_sync(&info->timer->timer);
cancel_work_sync(&info->timer->work);
sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
unregister_pm_notifier(&info->timer->pm_nb);
cancel_work_sync(&info->timer->work);
kfree(info->timer->attr.attr.name);
kfree(info->timer);
} else {
@@ -443,8 +694,9 @@ static void idletimer_tg_destroy_v1(const struct xt_tgdtor_param *par)
} else {
del_timer_sync(&info->timer->timer);
}
cancel_work_sync(&info->timer->work);
sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
unregister_pm_notifier(&info->timer->pm_nb);
cancel_work_sync(&info->timer->work);
kfree(info->timer->attr.attr.name);
kfree(info->timer);
} else {
@@ -540,3 +792,4 @@ MODULE_DESCRIPTION("Xtables: idle time monitor");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("ipt_IDLETIMER");
MODULE_ALIAS("ip6t_IDLETIMER");
MODULE_ALIAS("arpt_IDLETIMER");

View File

@@ -439,7 +439,7 @@ quiet_cmd_link_multi-m = AR [M] $@
cmd_link_multi-m = \
$(cmd_update_lto_symversions); \
rm -f $@; \
$(AR) cDPrsT $@ $(filter %.o,$^)
$(AR) cDPrsT $@ $(filter %.o %.a,$^)
else
quiet_cmd_link_multi-m = LD [M] $@
cmd_link_multi-m = $(LD) $(ld_flags) -r -o $@ $(filter %.o,$^)

View File

@@ -19,7 +19,26 @@ esac
# We need access to CONFIG_ symbols
. include/config/auto.conf
ksym_wl=/dev/null
needed_symbols=
# Special case for modversions (see modpost.c)
if [ -n "$CONFIG_MODVERSIONS" ]; then
needed_symbols="$needed_symbols module_layout"
fi
# With CONFIG_LTO_CLANG, LLVM bitcode has not yet been compiled into a binary
# when the .mod files are generated, which means they don't yet contain
# references to certain symbols that will be present in the final binaries.
if [ -n "$CONFIG_LTO_CLANG" ]; then
# intrinsic functions
needed_symbols="$needed_symbols memcpy memmove memset"
# ftrace
needed_symbols="$needed_symbols _mcount"
# stack protector symbols
needed_symbols="$needed_symbols __stack_chk_fail __stack_chk_guard"
fi
ksym_wl=
if [ -n "$CONFIG_UNUSED_KSYMS_WHITELIST" ]; then
# Use 'eval' to expand the whitelist path and check if it is relative
eval ksym_wl="$CONFIG_UNUSED_KSYMS_WHITELIST"
@@ -40,16 +59,14 @@ cat > "$output_file" << EOT
EOT
[ -f modules.order ] && modlist=modules.order || modlist=/dev/null
sed 's/ko$/mod/' $modlist |
xargs -n1 sed -n -e '2{s/ /\n/g;/^$/!p;}' -- |
cat - "$ksym_wl" |
{
sed 's/ko$/mod/' $modlist | xargs -n1 sed -n -e '2p'
echo "$needed_symbols"
[ -n "$ksym_wl" ] && cat "$ksym_wl"
} | sed -e 's/ /\n/g' | sed -n -e '/^$/!p' |
# Remove the dot prefix for ppc64; symbol names with a dot (.) hold entry
# point addresses.
sed -e 's/^\.//' |
sort -u |
sed -e 's/\(.*\)/#define __KSYM_\1 1/' >> "$output_file"
# Special case for modversions (see modpost.c)
if [ -n "$CONFIG_MODVERSIONS" ]; then
echo "#define __KSYM_module_layout 1" >> "$output_file"
fi

View File

@@ -1,5 +0,0 @@
memcpy
memmove
memset
__stack_chk_fail
__stack_chk_guard

View File

@@ -33,6 +33,7 @@ SECTIONS {
__patchable_function_entries : { *(__patchable_function_entries) }
#ifdef CONFIG_LTO_CLANG
/*
* With CONFIG_LTO_CLANG, LLD always enables -fdata-sections and
* -ffunction-sections, which increases the size of the final module.
@@ -49,8 +50,10 @@ SECTIONS {
}
.rodata : {
*(.rodata.._start)
*(.rodata .rodata.[0-9a-zA-Z_]*)
*(.rodata..L*)
*(.rodata.._end)
}
#ifdef CONFIG_CFI_CLANG
@@ -59,13 +62,16 @@ SECTIONS {
* .text section, and that the section is aligned to page size.
*/
.text : ALIGN(PAGE_SIZE) {
*(.text.._start)
*(.text.__cfi_check)
*(.text .text.[0-9a-zA-Z_]*)
__cfi_jt_start = .;
*(.text..L.cfi.jumptable .text..L.cfi.jumptable.*)
__cfi_jt_end = .;
*(.text.._end)
}
#endif
#endif
}
/* bring in arch-specific sections */

View File

@@ -40,3 +40,5 @@
# CONFIG_RESET_BRCMSTB_RESCAL is not set
# CONFIG_RESET_INTEL_GW is not set
# CONFIG_ADI_AXI_ADC is not set
# CONFIG_DEBUG_PAGEALLOC is not set
# CONFIG_PAGE_POISONING is not set