Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: "A sizeable pile of arm64 updates for 5.8. Summary below, but the big two features are support for Branch Target Identification and Clang's Shadow Call stack. The latter is currently arm64-only, but the high-level parts are all in core code so it could easily be adopted by other architectures pending toolchain support Branch Target Identification (BTI): - Support for ARMv8.5-BTI in both user- and kernel-space. This allows branch targets to limit the types of branch from which they can be called and additionally prevents branching to arbitrary code, although kernel support requires a very recent toolchain. - Function annotation via SYM_FUNC_START() so that assembly functions are wrapped with the relevant "landing pad" instructions. - BPF and vDSO updates to use the new instructions. - Addition of a new HWCAP and exposure of BTI capability to userspace via ID register emulation, along with ELF loader support for the BTI feature in .note.gnu.property. - Non-critical fixes to CFI unwind annotations in the sigreturn trampoline. Shadow Call Stack (SCS): - Support for Clang's Shadow Call Stack feature, which reserves platform register x18 to point at a separate stack for each task that holds only return addresses. This protects function return control flow from buffer overruns on the main stack. - Save/restore of x18 across problematic boundaries (user-mode, hypervisor, EFI, suspend, etc). - Core support for SCS, should other architectures want to use it too. - SCS overflow checking on context-switch as part of the existing stack limit check if CONFIG_SCHED_STACK_END_CHECK=y. CPU feature detection: - Removed numerous "SANITY CHECK" errors when running on a system with mismatched AArch32 support at EL1. This is primarily a concern for KVM, which disabled support for 32-bit guests on such a system. - Addition of new ID registers and fields as the architecture has been extended. Perf and PMU drivers: - Minor fixes and cleanups to system PMU drivers. Hardware errata: - Unify KVM workarounds for VHE and nVHE configurations. - Sort vendor errata entries in Kconfig. Secure Monitor Call Calling Convention (SMCCC): - Update to the latest specification from Arm (v1.2). - Allow PSCI code to query the SMCCC version. Software Delegated Exception Interface (SDEI): - Unexport a bunch of unused symbols. - Minor fixes to handling of firmware data. Pointer authentication: - Add support for dumping the kernel PAC mask in vmcoreinfo so that the stack can be unwound by tools such as kdump. - Simplification of key initialisation during CPU bringup. BPF backend: - Improve immediate generation for logical and add/sub instructions. vDSO: - Minor fixes to the linker flags for consistency with other architectures and support for LLVM's unwinder. - Clean up logic to initialise and map the vDSO into userspace. ACPI: - Work around for an ambiguity in the IORT specification relating to the "num_ids" field. - Support _DMA method for all named components rather than only PCIe root complexes. - Minor other IORT-related fixes. Miscellaneous: - Initialise debug traps early for KGDB and fix KDB cacheflushing deadlock. - Minor tweaks to early boot state (documentation update, set TEXT_OFFSET to 0x0, increase alignment of PE/COFF sections). - Refactoring and cleanup" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (148 commits) KVM: arm64: Move __load_guest_stage2 to kvm_mmu.h KVM: arm64: Check advertised Stage-2 page size capability arm64/cpufeature: Add get_arm64_ftr_reg_nowarn() ACPI/IORT: Remove the unused __get_pci_rid() arm64/cpuinfo: Add ID_MMFR4_EL1 into the cpuinfo_arm64 context arm64/cpufeature: Add remaining feature bits in ID_AA64PFR1 register arm64/cpufeature: Add remaining feature bits in ID_AA64PFR0 register arm64/cpufeature: Add remaining feature bits in ID_AA64ISAR0 register arm64/cpufeature: Add remaining feature bits in ID_MMFR4 register arm64/cpufeature: Add remaining feature bits in ID_PFR0 register arm64/cpufeature: Introduce ID_MMFR5 CPU register arm64/cpufeature: Introduce ID_DFR1 CPU register arm64/cpufeature: Introduce ID_PFR2 CPU register arm64/cpufeature: Make doublelock a signed feature in ID_AA64DFR0 arm64/cpufeature: Drop TraceFilt feature exposure from ID_DFR0 register arm64/cpufeature: Add explicit ftr_id_isar0[] for ID_ISAR0 register arm64: mm: Add asid_gen_match() helper firmware: smccc: Fix missing prototype warning for arm_smccc_version_init arm64: vdso: Fix CFI directives in sigreturn trampoline arm64: vdso: Don't prefix sigreturn trampoline with a BTI C instruction ...
此提交包含在:
@@ -103,6 +103,7 @@ obj-$(CONFIG_TRACEPOINTS) += trace/
|
||||
obj-$(CONFIG_IRQ_WORK) += irq_work.o
|
||||
obj-$(CONFIG_CPU_PM) += cpu_pm.o
|
||||
obj-$(CONFIG_BPF) += bpf/
|
||||
obj-$(CONFIG_SHADOW_CALL_STACK) += scs.o
|
||||
|
||||
obj-$(CONFIG_PERF_EVENTS) += events/
|
||||
|
||||
|
@@ -94,6 +94,7 @@
|
||||
#include <linux/thread_info.h>
|
||||
#include <linux/stackleak.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/scs.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
@@ -456,6 +457,8 @@ void put_task_stack(struct task_struct *tsk)
|
||||
|
||||
void free_task(struct task_struct *tsk)
|
||||
{
|
||||
scs_release(tsk);
|
||||
|
||||
#ifndef CONFIG_THREAD_INFO_IN_TASK
|
||||
/*
|
||||
* The task is finally done with both the stack and thread_info,
|
||||
@@ -840,6 +843,8 @@ void __init fork_init(void)
|
||||
NULL, free_vm_stack_cache);
|
||||
#endif
|
||||
|
||||
scs_init();
|
||||
|
||||
lockdep_init_task(&init_task);
|
||||
uprobes_init();
|
||||
}
|
||||
@@ -899,6 +904,10 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
|
||||
if (err)
|
||||
goto free_stack;
|
||||
|
||||
err = scs_prepare(tsk, node);
|
||||
if (err)
|
||||
goto free_stack;
|
||||
|
||||
#ifdef CONFIG_SECCOMP
|
||||
/*
|
||||
* We must handle setting up seccomp filters once we're under
|
||||
|
@@ -11,6 +11,7 @@
|
||||
#include <linux/nospec.h>
|
||||
|
||||
#include <linux/kcov.h>
|
||||
#include <linux/scs.h>
|
||||
|
||||
#include <asm/switch_to.h>
|
||||
#include <asm/tlb.h>
|
||||
@@ -3925,6 +3926,9 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt)
|
||||
#ifdef CONFIG_SCHED_STACK_END_CHECK
|
||||
if (task_stack_end_corrupted(prev))
|
||||
panic("corrupted stack end detected inside scheduler\n");
|
||||
|
||||
if (task_scs_end_corrupted(prev))
|
||||
panic("corrupted shadow stack detected inside scheduler\n");
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
|
||||
@@ -6088,6 +6092,7 @@ void init_idle(struct task_struct *idle, int cpu)
|
||||
idle->se.exec_start = sched_clock();
|
||||
idle->flags |= PF_IDLE;
|
||||
|
||||
scs_task_reset(idle);
|
||||
kasan_unpoison_task_stack(idle);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
104
kernel/scs.c
一般檔案
104
kernel/scs.c
一般檔案
@@ -0,0 +1,104 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Shadow Call Stack support.
|
||||
*
|
||||
* Copyright (C) 2019 Google LLC
|
||||
*/
|
||||
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/scs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmstat.h>
|
||||
|
||||
static struct kmem_cache *scs_cache;
|
||||
|
||||
static void __scs_account(void *s, int account)
|
||||
{
|
||||
struct page *scs_page = virt_to_page(s);
|
||||
|
||||
mod_zone_page_state(page_zone(scs_page), NR_KERNEL_SCS_KB,
|
||||
account * (SCS_SIZE / SZ_1K));
|
||||
}
|
||||
|
||||
static void *scs_alloc(int node)
|
||||
{
|
||||
void *s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node);
|
||||
|
||||
if (!s)
|
||||
return NULL;
|
||||
|
||||
*__scs_magic(s) = SCS_END_MAGIC;
|
||||
|
||||
/*
|
||||
* Poison the allocation to catch unintentional accesses to
|
||||
* the shadow stack when KASAN is enabled.
|
||||
*/
|
||||
kasan_poison_object_data(scs_cache, s);
|
||||
__scs_account(s, 1);
|
||||
return s;
|
||||
}
|
||||
|
||||
static void scs_free(void *s)
|
||||
{
|
||||
__scs_account(s, -1);
|
||||
kasan_unpoison_object_data(scs_cache, s);
|
||||
kmem_cache_free(scs_cache, s);
|
||||
}
|
||||
|
||||
void __init scs_init(void)
|
||||
{
|
||||
scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, 0, 0, NULL);
|
||||
}
|
||||
|
||||
int scs_prepare(struct task_struct *tsk, int node)
|
||||
{
|
||||
void *s = scs_alloc(node);
|
||||
|
||||
if (!s)
|
||||
return -ENOMEM;
|
||||
|
||||
task_scs(tsk) = task_scs_sp(tsk) = s;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void scs_check_usage(struct task_struct *tsk)
|
||||
{
|
||||
static unsigned long highest;
|
||||
|
||||
unsigned long *p, prev, curr = highest, used = 0;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_DEBUG_STACK_USAGE))
|
||||
return;
|
||||
|
||||
for (p = task_scs(tsk); p < __scs_magic(tsk); ++p) {
|
||||
if (!READ_ONCE_NOCHECK(*p))
|
||||
break;
|
||||
used++;
|
||||
}
|
||||
|
||||
while (used > curr) {
|
||||
prev = cmpxchg_relaxed(&highest, curr, used);
|
||||
|
||||
if (prev == curr) {
|
||||
pr_info("%s (%d): highest shadow stack usage: %lu bytes\n",
|
||||
tsk->comm, task_pid_nr(tsk), used);
|
||||
break;
|
||||
}
|
||||
|
||||
curr = prev;
|
||||
}
|
||||
}
|
||||
|
||||
void scs_release(struct task_struct *tsk)
|
||||
{
|
||||
void *s = task_scs(tsk);
|
||||
|
||||
if (!s)
|
||||
return;
|
||||
|
||||
WARN(task_scs_end_corrupted(tsk),
|
||||
"corrupted shadow stack detected when freeing task\n");
|
||||
scs_check_usage(tsk);
|
||||
scs_free(s);
|
||||
}
|
新增問題並參考
封鎖使用者