Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 festive updates from Will Deacon: "In the end, we ended up with quite a lot more than I expected: - Support for ARMv8.3 Pointer Authentication in userspace (CRIU and kernel-side support to come later) - Support for per-thread stack canaries, pending an update to GCC that is currently undergoing review - Support for kexec_file_load(), which permits secure boot of a kexec payload but also happens to improve the performance of kexec dramatically because we can avoid the sucky purgatory code from userspace. Kdump will come later (requires updates to libfdt). - Optimisation of our dynamic CPU feature framework, so that all detected features are enabled via a single stop_machine() invocation - KPTI whitelisting of Cortex-A CPUs unaffected by Meltdown, so that they can benefit from global TLB entries when KASLR is not in use - 52-bit virtual addressing for userspace (kernel remains 48-bit) - Patch in LSE atomics for per-cpu atomic operations - Custom preempt.h implementation to avoid unconditional calls to preempt_schedule() from preempt_enable() - Support for the new 'SB' Speculation Barrier instruction - Vectorised implementation of XOR checksumming and CRC32 optimisations - Workaround for Cortex-A76 erratum #1165522 - Improved compatibility with Clang/LLD - Support for TX2 system PMUS for profiling the L3 cache and DMC - Reflect read-only permissions in the linear map by default - Ensure MMIO reads are ordered with subsequent calls to Xdelay() - Initial support for memory hotplug - Tweak the threshold when we invalidate the TLB by-ASID, so that mremap() performance is improved for ranges spanning multiple PMDs. - Minor refactoring and cleanups" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (125 commits) arm64: kaslr: print PHYS_OFFSET in dump_kernel_offset() arm64: sysreg: Use _BITUL() when defining register bits arm64: cpufeature: Rework ptr auth hwcaps using multi_entry_cap_matches arm64: cpufeature: Reduce number of pointer auth CPU caps from 6 to 4 arm64: docs: document pointer authentication arm64: ptr auth: Move per-thread keys from thread_info to thread_struct arm64: enable pointer authentication arm64: add prctl control for resetting ptrauth keys arm64: perf: strip PAC when unwinding userspace arm64: expose user PAC bit positions via ptrace arm64: add basic pointer authentication support arm64/cpufeature: detect pointer authentication arm64: Don't trap host pointer auth use to EL2 arm64/kvm: hide ptrauth from guests arm64/kvm: consistently handle host HCR_EL2 flags arm64: add pointer authentication register bits arm64: add comments about EC exception levels arm64: perf: Treat EXCLUDE_EL* bit definitions as unsigned arm64: kpti: Whitelist Cortex-A CPUs that don't implement the CSV3 field arm64: enable per-task stack canaries ...
This commit is contained in:
@@ -30,7 +30,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE
|
||||
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
|
||||
sys_compat.o
|
||||
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
|
||||
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
|
||||
arm64-obj-$(CONFIG_MODULES) += module.o
|
||||
arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
|
||||
arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
|
||||
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
|
||||
@@ -49,14 +49,16 @@ arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
|
||||
arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o
|
||||
arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
|
||||
arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
|
||||
arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
|
||||
arm64-obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \
|
||||
cpu-reset.o
|
||||
arm64-obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o
|
||||
arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
|
||||
arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
|
||||
arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||
arm64-obj-$(CONFIG_CRASH_CORE) += crash_core.o
|
||||
arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
|
||||
arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
|
||||
arm64-obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o
|
||||
|
||||
obj-y += $(arm64-obj-y) vdso/ probes/
|
||||
obj-m += $(arm64-obj-m)
|
||||
|
@@ -1,88 +0,0 @@
|
||||
/*
|
||||
* Based on arch/arm/kernel/armksyms.c
|
||||
*
|
||||
* Copyright (C) 2000 Russell King
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/cryptohash.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/in6.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/kprobes.h>
|
||||
|
||||
#include <asm/checksum.h>
|
||||
|
||||
EXPORT_SYMBOL(copy_page);
|
||||
EXPORT_SYMBOL(clear_page);
|
||||
|
||||
/* user mem (segment) */
|
||||
EXPORT_SYMBOL(__arch_copy_from_user);
|
||||
EXPORT_SYMBOL(__arch_copy_to_user);
|
||||
EXPORT_SYMBOL(__arch_clear_user);
|
||||
EXPORT_SYMBOL(__arch_copy_in_user);
|
||||
|
||||
/* physical memory */
|
||||
EXPORT_SYMBOL(memstart_addr);
|
||||
|
||||
/* string / mem functions */
|
||||
#ifndef CONFIG_KASAN
|
||||
EXPORT_SYMBOL(strchr);
|
||||
EXPORT_SYMBOL(strrchr);
|
||||
EXPORT_SYMBOL(strcmp);
|
||||
EXPORT_SYMBOL(strncmp);
|
||||
EXPORT_SYMBOL(strlen);
|
||||
EXPORT_SYMBOL(strnlen);
|
||||
EXPORT_SYMBOL(memcmp);
|
||||
EXPORT_SYMBOL(memchr);
|
||||
#endif
|
||||
|
||||
EXPORT_SYMBOL(memset);
|
||||
EXPORT_SYMBOL(memcpy);
|
||||
EXPORT_SYMBOL(memmove);
|
||||
EXPORT_SYMBOL(__memset);
|
||||
EXPORT_SYMBOL(__memcpy);
|
||||
EXPORT_SYMBOL(__memmove);
|
||||
|
||||
/* atomic bitops */
|
||||
EXPORT_SYMBOL(set_bit);
|
||||
EXPORT_SYMBOL(test_and_set_bit);
|
||||
EXPORT_SYMBOL(clear_bit);
|
||||
EXPORT_SYMBOL(test_and_clear_bit);
|
||||
EXPORT_SYMBOL(change_bit);
|
||||
EXPORT_SYMBOL(test_and_change_bit);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
EXPORT_SYMBOL(_mcount);
|
||||
NOKPROBE_SYMBOL(_mcount);
|
||||
#endif
|
||||
|
||||
/* arm-smccc */
|
||||
EXPORT_SYMBOL(__arm_smccc_smc);
|
||||
EXPORT_SYMBOL(__arm_smccc_hvc);
|
||||
|
||||
/* tishift.S */
|
||||
extern long long __ashlti3(long long a, int b);
|
||||
EXPORT_SYMBOL(__ashlti3);
|
||||
extern long long __ashrti3(long long a, int b);
|
||||
EXPORT_SYMBOL(__ashrti3);
|
||||
extern long long __lshrti3(long long a, int b);
|
||||
EXPORT_SYMBOL(__lshrti3);
|
@@ -46,6 +46,9 @@ int main(void)
|
||||
DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
|
||||
#endif
|
||||
DEFINE(TSK_STACK, offsetof(struct task_struct, stack));
|
||||
#ifdef CONFIG_STACKPROTECTOR
|
||||
DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
|
||||
#endif
|
||||
BLANK();
|
||||
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
|
||||
BLANK();
|
||||
|
@@ -22,11 +22,11 @@
|
||||
* __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for
|
||||
* cpu_soft_restart.
|
||||
*
|
||||
* @el2_switch: Flag to indicate a swich to EL2 is needed.
|
||||
* @el2_switch: Flag to indicate a switch to EL2 is needed.
|
||||
* @entry: Location to jump to for soft reset.
|
||||
* arg0: First argument passed to @entry.
|
||||
* arg1: Second argument passed to @entry.
|
||||
* arg2: Third argument passed to @entry.
|
||||
* arg0: First argument passed to @entry. (relocation list)
|
||||
* arg1: Second argument passed to @entry.(physical kernel entry)
|
||||
* arg2: Third argument passed to @entry. (physical dtb address)
|
||||
*
|
||||
* Put the CPU into the same state as it would be if it had been reset, and
|
||||
* branch to what would be the reset vector. It must be executed with the
|
||||
|
@@ -135,7 +135,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
|
||||
const char *hyp_vecs_start,
|
||||
const char *hyp_vecs_end)
|
||||
{
|
||||
static DEFINE_SPINLOCK(bp_lock);
|
||||
static DEFINE_RAW_SPINLOCK(bp_lock);
|
||||
int cpu, slot = -1;
|
||||
|
||||
/*
|
||||
@@ -147,7 +147,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&bp_lock);
|
||||
raw_spin_lock(&bp_lock);
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
|
||||
slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
|
||||
@@ -163,7 +163,7 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
|
||||
|
||||
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
|
||||
__this_cpu_write(bp_hardening_data.fn, fn);
|
||||
spin_unlock(&bp_lock);
|
||||
raw_spin_unlock(&bp_lock);
|
||||
}
|
||||
#else
|
||||
#define __smccc_workaround_1_smc_start NULL
|
||||
@@ -507,38 +507,6 @@ cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
|
||||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
|
||||
CAP_MIDR_RANGE_LIST(midr_list)
|
||||
|
||||
/*
|
||||
* Generic helper for handling capabilties with multiple (match,enable) pairs
|
||||
* of call backs, sharing the same capability bit.
|
||||
* Iterate over each entry to see if at least one matches.
|
||||
*/
|
||||
static bool __maybe_unused
|
||||
multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope)
|
||||
{
|
||||
const struct arm64_cpu_capabilities *caps;
|
||||
|
||||
for (caps = entry->match_list; caps->matches; caps++)
|
||||
if (caps->matches(caps, scope))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Take appropriate action for all matching entries in the shared capability
|
||||
* entry.
|
||||
*/
|
||||
static void __maybe_unused
|
||||
multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
|
||||
{
|
||||
const struct arm64_cpu_capabilities *caps;
|
||||
|
||||
for (caps = entry->match_list; caps->matches; caps++)
|
||||
if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
|
||||
caps->cpu_enable)
|
||||
caps->cpu_enable(caps);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||
|
||||
/*
|
||||
@@ -584,24 +552,63 @@ static const struct midr_range arm64_repeat_tlbi_cpus[] = {
|
||||
|
||||
#endif
|
||||
|
||||
const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
#ifdef CONFIG_CAVIUM_ERRATUM_27456
|
||||
static const struct midr_range cavium_erratum_27456_cpus[] = {
|
||||
/* Cavium ThunderX, T88 pass 1.x - 2.1 */
|
||||
MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
|
||||
/* Cavium ThunderX, T81 pass 1.0 */
|
||||
MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
|
||||
{},
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CAVIUM_ERRATUM_30115
|
||||
static const struct midr_range cavium_erratum_30115_cpus[] = {
|
||||
/* Cavium ThunderX, T88 pass 1.x - 2.2 */
|
||||
MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
|
||||
/* Cavium ThunderX, T81 pass 1.0 - 1.2 */
|
||||
MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
|
||||
/* Cavium ThunderX, T83 pass 1.0 */
|
||||
MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
|
||||
{},
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
|
||||
static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
|
||||
{
|
||||
ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
|
||||
},
|
||||
{
|
||||
.midr_range.model = MIDR_QCOM_KRYO,
|
||||
.matches = is_kryo_midr,
|
||||
},
|
||||
{},
|
||||
};
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
|
||||
static const struct midr_range workaround_clean_cache[] = {
|
||||
#if defined(CONFIG_ARM64_ERRATUM_826319) || \
|
||||
defined(CONFIG_ARM64_ERRATUM_827319) || \
|
||||
defined(CONFIG_ARM64_ERRATUM_824069)
|
||||
{
|
||||
/* Cortex-A53 r0p[012] */
|
||||
.desc = "ARM errata 826319, 827319, 824069",
|
||||
.capability = ARM64_WORKAROUND_CLEAN_CACHE,
|
||||
ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
|
||||
.cpu_enable = cpu_enable_cache_maint_trap,
|
||||
},
|
||||
/* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
|
||||
MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_819472
|
||||
#ifdef CONFIG_ARM64_ERRATUM_819472
|
||||
/* Cortex-A53 r0p[01] : ARM errata 819472 */
|
||||
MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
|
||||
#endif
|
||||
{},
|
||||
};
|
||||
#endif
|
||||
|
||||
const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
|
||||
{
|
||||
/* Cortex-A53 r0p[01] */
|
||||
.desc = "ARM errata 819472",
|
||||
.desc = "ARM errata 826319, 827319, 824069, 819472",
|
||||
.capability = ARM64_WORKAROUND_CLEAN_CACHE,
|
||||
ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
|
||||
ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
|
||||
.cpu_enable = cpu_enable_cache_maint_trap,
|
||||
},
|
||||
#endif
|
||||
@@ -652,40 +659,16 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
#endif
|
||||
#ifdef CONFIG_CAVIUM_ERRATUM_27456
|
||||
{
|
||||
/* Cavium ThunderX, T88 pass 1.x - 2.1 */
|
||||
.desc = "Cavium erratum 27456",
|
||||
.capability = ARM64_WORKAROUND_CAVIUM_27456,
|
||||
ERRATA_MIDR_RANGE(MIDR_THUNDERX,
|
||||
0, 0,
|
||||
1, 1),
|
||||
},
|
||||
{
|
||||
/* Cavium ThunderX, T81 pass 1.0 */
|
||||
.desc = "Cavium erratum 27456",
|
||||
.capability = ARM64_WORKAROUND_CAVIUM_27456,
|
||||
ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
|
||||
ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_CAVIUM_ERRATUM_30115
|
||||
{
|
||||
/* Cavium ThunderX, T88 pass 1.x - 2.2 */
|
||||
.desc = "Cavium erratum 30115",
|
||||
.capability = ARM64_WORKAROUND_CAVIUM_30115,
|
||||
ERRATA_MIDR_RANGE(MIDR_THUNDERX,
|
||||
0, 0,
|
||||
1, 2),
|
||||
},
|
||||
{
|
||||
/* Cavium ThunderX, T81 pass 1.0 - 1.2 */
|
||||
.desc = "Cavium erratum 30115",
|
||||
.capability = ARM64_WORKAROUND_CAVIUM_30115,
|
||||
ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
|
||||
},
|
||||
{
|
||||
/* Cavium ThunderX, T83 pass 1.0 */
|
||||
.desc = "Cavium erratum 30115",
|
||||
.capability = ARM64_WORKAROUND_CAVIUM_30115,
|
||||
ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
|
||||
ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
|
||||
},
|
||||
#endif
|
||||
{
|
||||
@@ -697,16 +680,10 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
},
|
||||
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
|
||||
{
|
||||
.desc = "Qualcomm Technologies Falkor erratum 1003",
|
||||
.desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
|
||||
.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
|
||||
ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
|
||||
},
|
||||
{
|
||||
.desc = "Qualcomm Technologies Kryo erratum 1003",
|
||||
.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
|
||||
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
|
||||
.midr_range.model = MIDR_QCOM_KRYO,
|
||||
.matches = is_kryo_midr,
|
||||
.matches = cpucap_multi_entry_cap_matches,
|
||||
.match_list = qcom_erratum_1003_list,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
|
||||
@@ -753,6 +730,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
.capability = ARM64_WORKAROUND_1188873,
|
||||
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_1165522
|
||||
{
|
||||
/* Cortex-A76 r0p0 to r2p0 */
|
||||
.desc = "ARM erratum 1165522",
|
||||
.capability = ARM64_WORKAROUND_1165522,
|
||||
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
|
||||
},
|
||||
#endif
|
||||
{
|
||||
}
|
||||
|
@@ -52,6 +52,7 @@ unsigned int compat_elf_hwcap2 __read_mostly;
|
||||
|
||||
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
|
||||
EXPORT_SYMBOL(cpu_hwcaps);
|
||||
static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS];
|
||||
|
||||
/*
|
||||
* Flag to indicate if we have computed the system wide
|
||||
@@ -141,9 +142,18 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
|
||||
};
|
||||
|
||||
static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
|
||||
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
|
||||
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPA_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
|
||||
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
|
||||
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
|
||||
ARM64_FTR_END,
|
||||
};
|
||||
@@ -518,6 +528,29 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
|
||||
}
|
||||
|
||||
extern const struct arm64_cpu_capabilities arm64_errata[];
|
||||
static const struct arm64_cpu_capabilities arm64_features[];
|
||||
|
||||
static void __init
|
||||
init_cpu_hwcaps_indirect_list_from_array(const struct arm64_cpu_capabilities *caps)
|
||||
{
|
||||
for (; caps->matches; caps++) {
|
||||
if (WARN(caps->capability >= ARM64_NCAPS,
|
||||
"Invalid capability %d\n", caps->capability))
|
||||
continue;
|
||||
if (WARN(cpu_hwcaps_ptrs[caps->capability],
|
||||
"Duplicate entry for capability %d\n",
|
||||
caps->capability))
|
||||
continue;
|
||||
cpu_hwcaps_ptrs[caps->capability] = caps;
|
||||
}
|
||||
}
|
||||
|
||||
static void __init init_cpu_hwcaps_indirect_list(void)
|
||||
{
|
||||
init_cpu_hwcaps_indirect_list_from_array(arm64_features);
|
||||
init_cpu_hwcaps_indirect_list_from_array(arm64_errata);
|
||||
}
|
||||
|
||||
static void __init setup_boot_cpu_capabilities(void);
|
||||
|
||||
void __init init_cpu_features(struct cpuinfo_arm64 *info)
|
||||
@@ -563,6 +596,12 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
|
||||
sve_init_vq_map();
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the indirect array of CPU hwcaps capabilities pointers
|
||||
* before we handle the boot CPU below.
|
||||
*/
|
||||
init_cpu_hwcaps_indirect_list();
|
||||
|
||||
/*
|
||||
* Detect and enable early CPU capabilities based on the boot CPU,
|
||||
* after we have initialised the CPU feature infrastructure.
|
||||
@@ -915,6 +954,12 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
|
||||
static const struct midr_range kpti_safe_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
|
||||
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
char const *str = "command line option";
|
||||
@@ -1145,6 +1190,14 @@ static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
|
||||
}
|
||||
#endif /* CONFIG_ARM64_RAS_EXTN */
|
||||
|
||||
#ifdef CONFIG_ARM64_PTR_AUTH
|
||||
static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap)
|
||||
{
|
||||
sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ENIA | SCTLR_ELx_ENIB |
|
||||
SCTLR_ELx_ENDA | SCTLR_ELx_ENDB);
|
||||
}
|
||||
#endif /* CONFIG_ARM64_PTR_AUTH */
|
||||
|
||||
static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
{
|
||||
.desc = "GIC system register CPU interface",
|
||||
@@ -1368,22 +1421,115 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
.cpu_enable = cpu_enable_cnp,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
.desc = "Speculation barrier (SB)",
|
||||
.capability = ARM64_HAS_SB,
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.matches = has_cpuid_feature,
|
||||
.sys_reg = SYS_ID_AA64ISAR1_EL1,
|
||||
.field_pos = ID_AA64ISAR1_SB_SHIFT,
|
||||
.sign = FTR_UNSIGNED,
|
||||
.min_field_value = 1,
|
||||
},
|
||||
#ifdef CONFIG_ARM64_PTR_AUTH
|
||||
{
|
||||
.desc = "Address authentication (architected algorithm)",
|
||||
.capability = ARM64_HAS_ADDRESS_AUTH_ARCH,
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.sys_reg = SYS_ID_AA64ISAR1_EL1,
|
||||
.sign = FTR_UNSIGNED,
|
||||
.field_pos = ID_AA64ISAR1_APA_SHIFT,
|
||||
.min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
|
||||
.matches = has_cpuid_feature,
|
||||
.cpu_enable = cpu_enable_address_auth,
|
||||
},
|
||||
{
|
||||
.desc = "Address authentication (IMP DEF algorithm)",
|
||||
.capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.sys_reg = SYS_ID_AA64ISAR1_EL1,
|
||||
.sign = FTR_UNSIGNED,
|
||||
.field_pos = ID_AA64ISAR1_API_SHIFT,
|
||||
.min_field_value = ID_AA64ISAR1_API_IMP_DEF,
|
||||
.matches = has_cpuid_feature,
|
||||
.cpu_enable = cpu_enable_address_auth,
|
||||
},
|
||||
{
|
||||
.desc = "Generic authentication (architected algorithm)",
|
||||
.capability = ARM64_HAS_GENERIC_AUTH_ARCH,
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.sys_reg = SYS_ID_AA64ISAR1_EL1,
|
||||
.sign = FTR_UNSIGNED,
|
||||
.field_pos = ID_AA64ISAR1_GPA_SHIFT,
|
||||
.min_field_value = ID_AA64ISAR1_GPA_ARCHITECTED,
|
||||
.matches = has_cpuid_feature,
|
||||
},
|
||||
{
|
||||
.desc = "Generic authentication (IMP DEF algorithm)",
|
||||
.capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF,
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||
.sys_reg = SYS_ID_AA64ISAR1_EL1,
|
||||
.sign = FTR_UNSIGNED,
|
||||
.field_pos = ID_AA64ISAR1_GPI_SHIFT,
|
||||
.min_field_value = ID_AA64ISAR1_GPI_IMP_DEF,
|
||||
.matches = has_cpuid_feature,
|
||||
},
|
||||
#endif /* CONFIG_ARM64_PTR_AUTH */
|
||||
{},
|
||||
};
|
||||
|
||||
#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
|
||||
{ \
|
||||
.desc = #cap, \
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE, \
|
||||
.matches = has_cpuid_feature, \
|
||||
.sys_reg = reg, \
|
||||
.field_pos = field, \
|
||||
.sign = s, \
|
||||
.min_field_value = min_value, \
|
||||
.hwcap_type = cap_type, \
|
||||
.hwcap = cap, \
|
||||
#define HWCAP_CPUID_MATCH(reg, field, s, min_value) \
|
||||
.matches = has_cpuid_feature, \
|
||||
.sys_reg = reg, \
|
||||
.field_pos = field, \
|
||||
.sign = s, \
|
||||
.min_field_value = min_value,
|
||||
|
||||
#define __HWCAP_CAP(name, cap_type, cap) \
|
||||
.desc = name, \
|
||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE, \
|
||||
.hwcap_type = cap_type, \
|
||||
.hwcap = cap, \
|
||||
|
||||
#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
|
||||
{ \
|
||||
__HWCAP_CAP(#cap, cap_type, cap) \
|
||||
HWCAP_CPUID_MATCH(reg, field, s, min_value) \
|
||||
}
|
||||
|
||||
#define HWCAP_MULTI_CAP(list, cap_type, cap) \
|
||||
{ \
|
||||
__HWCAP_CAP(#cap, cap_type, cap) \
|
||||
.matches = cpucap_multi_entry_cap_matches, \
|
||||
.match_list = list, \
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_PTR_AUTH
|
||||
static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
|
||||
{
|
||||
HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_APA_SHIFT,
|
||||
FTR_UNSIGNED, ID_AA64ISAR1_APA_ARCHITECTED)
|
||||
},
|
||||
{
|
||||
HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_SHIFT,
|
||||
FTR_UNSIGNED, ID_AA64ISAR1_API_IMP_DEF)
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
|
||||
{
|
||||
HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPA_SHIFT,
|
||||
FTR_UNSIGNED, ID_AA64ISAR1_GPA_ARCHITECTED)
|
||||
},
|
||||
{
|
||||
HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPI_SHIFT,
|
||||
FTR_UNSIGNED, ID_AA64ISAR1_GPI_IMP_DEF)
|
||||
},
|
||||
{},
|
||||
};
|
||||
#endif
|
||||
|
||||
static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
|
||||
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
|
||||
HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
|
||||
@@ -1409,11 +1555,16 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
|
||||
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
|
||||
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
|
||||
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
|
||||
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SB),
|
||||
HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
|
||||
#endif
|
||||
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
|
||||
#ifdef CONFIG_ARM64_PTR_AUTH
|
||||
HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, HWCAP_PACA),
|
||||
HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, HWCAP_PACG),
|
||||
#endif
|
||||
{},
|
||||
};
|
||||
|
||||
@@ -1482,52 +1633,46 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
|
||||
cap_set_elf_hwcap(hwcaps);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if the current CPU has a given feature capability.
|
||||
* Should be called from non-preemptible context.
|
||||
*/
|
||||
static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
|
||||
unsigned int cap)
|
||||
static void update_cpu_capabilities(u16 scope_mask)
|
||||
{
|
||||
int i;
|
||||
const struct arm64_cpu_capabilities *caps;
|
||||
|
||||
if (WARN_ON(preemptible()))
|
||||
return false;
|
||||
|
||||
for (caps = cap_array; caps->matches; caps++)
|
||||
if (caps->capability == cap)
|
||||
return caps->matches(caps, SCOPE_LOCAL_CPU);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
|
||||
u16 scope_mask, const char *info)
|
||||
{
|
||||
scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
|
||||
for (; caps->matches; caps++) {
|
||||
if (!(caps->type & scope_mask) ||
|
||||
for (i = 0; i < ARM64_NCAPS; i++) {
|
||||
caps = cpu_hwcaps_ptrs[i];
|
||||
if (!caps || !(caps->type & scope_mask) ||
|
||||
cpus_have_cap(caps->capability) ||
|
||||
!caps->matches(caps, cpucap_default_scope(caps)))
|
||||
continue;
|
||||
|
||||
if (!cpus_have_cap(caps->capability) && caps->desc)
|
||||
pr_info("%s %s\n", info, caps->desc);
|
||||
if (caps->desc)
|
||||
pr_info("detected: %s\n", caps->desc);
|
||||
cpus_set_cap(caps->capability);
|
||||
}
|
||||
}
|
||||
|
||||
static void update_cpu_capabilities(u16 scope_mask)
|
||||
/*
|
||||
* Enable all the available capabilities on this CPU. The capabilities
|
||||
* with BOOT_CPU scope are handled separately and hence skipped here.
|
||||
*/
|
||||
static int cpu_enable_non_boot_scope_capabilities(void *__unused)
|
||||
{
|
||||
__update_cpu_capabilities(arm64_errata, scope_mask,
|
||||
"enabling workaround for");
|
||||
__update_cpu_capabilities(arm64_features, scope_mask, "detected:");
|
||||
}
|
||||
int i;
|
||||
u16 non_boot_scope = SCOPE_ALL & ~SCOPE_BOOT_CPU;
|
||||
|
||||
static int __enable_cpu_capability(void *arg)
|
||||
{
|
||||
const struct arm64_cpu_capabilities *cap = arg;
|
||||
for_each_available_cap(i) {
|
||||
const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[i];
|
||||
|
||||
cap->cpu_enable(cap);
|
||||
if (WARN_ON(!cap))
|
||||
continue;
|
||||
|
||||
if (!(cap->type & non_boot_scope))
|
||||
continue;
|
||||
|
||||
if (cap->cpu_enable)
|
||||
cap->cpu_enable(cap);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1535,21 +1680,29 @@ static int __enable_cpu_capability(void *arg)
|
||||
* Run through the enabled capabilities and enable() it on all active
|
||||
* CPUs
|
||||
*/
|
||||
static void __init
|
||||
__enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
|
||||
u16 scope_mask)
|
||||
static void __init enable_cpu_capabilities(u16 scope_mask)
|
||||
{
|
||||
scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
|
||||
for (; caps->matches; caps++) {
|
||||
unsigned int num = caps->capability;
|
||||
int i;
|
||||
const struct arm64_cpu_capabilities *caps;
|
||||
bool boot_scope;
|
||||
|
||||
if (!(caps->type & scope_mask) || !cpus_have_cap(num))
|
||||
scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
|
||||
boot_scope = !!(scope_mask & SCOPE_BOOT_CPU);
|
||||
|
||||
for (i = 0; i < ARM64_NCAPS; i++) {
|
||||
unsigned int num;
|
||||
|
||||
caps = cpu_hwcaps_ptrs[i];
|
||||
if (!caps || !(caps->type & scope_mask))
|
||||
continue;
|
||||
num = caps->capability;
|
||||
if (!cpus_have_cap(num))
|
||||
continue;
|
||||
|
||||
/* Ensure cpus_have_const_cap(num) works */
|
||||
static_branch_enable(&cpu_hwcap_keys[num]);
|
||||
|
||||
if (caps->cpu_enable) {
|
||||
if (boot_scope && caps->cpu_enable)
|
||||
/*
|
||||
* Capabilities with SCOPE_BOOT_CPU scope are finalised
|
||||
* before any secondary CPU boots. Thus, each secondary
|
||||
@@ -1558,25 +1711,19 @@ __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
|
||||
* the boot CPU, for which the capability must be
|
||||
* enabled here. This approach avoids costly
|
||||
* stop_machine() calls for this case.
|
||||
*
|
||||
* Otherwise, use stop_machine() as it schedules the
|
||||
* work allowing us to modify PSTATE, instead of
|
||||
* on_each_cpu() which uses an IPI, giving us a PSTATE
|
||||
* that disappears when we return.
|
||||
*/
|
||||
if (scope_mask & SCOPE_BOOT_CPU)
|
||||
caps->cpu_enable(caps);
|
||||
else
|
||||
stop_machine(__enable_cpu_capability,
|
||||
(void *)caps, cpu_online_mask);
|
||||
}
|
||||
caps->cpu_enable(caps);
|
||||
}
|
||||
}
|
||||
|
||||
static void __init enable_cpu_capabilities(u16 scope_mask)
|
||||
{
|
||||
__enable_cpu_capabilities(arm64_errata, scope_mask);
|
||||
__enable_cpu_capabilities(arm64_features, scope_mask);
|
||||
/*
|
||||
* For all non-boot scope capabilities, use stop_machine()
|
||||
* as it schedules the work allowing us to modify PSTATE,
|
||||
* instead of on_each_cpu() which uses an IPI, giving us a
|
||||
* PSTATE that disappears when we return.
|
||||
*/
|
||||
if (!boot_scope)
|
||||
stop_machine(cpu_enable_non_boot_scope_capabilities,
|
||||
NULL, cpu_online_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1586,16 +1733,17 @@ static void __init enable_cpu_capabilities(u16 scope_mask)
|
||||
*
|
||||
* Returns "false" on conflicts.
|
||||
*/
|
||||
static bool
|
||||
__verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps,
|
||||
u16 scope_mask)
|
||||
static bool verify_local_cpu_caps(u16 scope_mask)
|
||||
{
|
||||
int i;
|
||||
bool cpu_has_cap, system_has_cap;
|
||||
const struct arm64_cpu_capabilities *caps;
|
||||
|
||||
scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
|
||||
|
||||
for (; caps->matches; caps++) {
|
||||
if (!(caps->type & scope_mask))
|
||||
for (i = 0; i < ARM64_NCAPS; i++) {
|
||||
caps = cpu_hwcaps_ptrs[i];
|
||||
if (!caps || !(caps->type & scope_mask))
|
||||
continue;
|
||||
|
||||
cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU);
|
||||
@@ -1626,7 +1774,7 @@ __verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps,
|
||||
}
|
||||
}
|
||||
|
||||
if (caps->matches) {
|
||||
if (i < ARM64_NCAPS) {
|
||||
pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
|
||||
smp_processor_id(), caps->capability,
|
||||
caps->desc, system_has_cap, cpu_has_cap);
|
||||
@@ -1636,12 +1784,6 @@ __verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool verify_local_cpu_caps(u16 scope_mask)
|
||||
{
|
||||
return __verify_local_cpu_caps(arm64_errata, scope_mask) &&
|
||||
__verify_local_cpu_caps(arm64_features, scope_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for CPU features that are used in early boot
|
||||
* based on the Boot CPU value.
|
||||
@@ -1750,12 +1892,16 @@ static void __init mark_const_caps_ready(void)
|
||||
static_branch_enable(&arm64_const_caps_ready);
|
||||
}
|
||||
|
||||
extern const struct arm64_cpu_capabilities arm64_errata[];
|
||||
|
||||
bool this_cpu_has_cap(unsigned int cap)
|
||||
bool this_cpu_has_cap(unsigned int n)
|
||||
{
|
||||
return (__this_cpu_has_cap(arm64_features, cap) ||
|
||||
__this_cpu_has_cap(arm64_errata, cap));
|
||||
if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
|
||||
const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
|
||||
|
||||
if (cap)
|
||||
return cap->matches(cap, SCOPE_LOCAL_CPU);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void __init setup_system_capabilities(void)
|
||||
|
@@ -82,6 +82,9 @@ static const char *const hwcap_str[] = {
|
||||
"ilrcpc",
|
||||
"flagm",
|
||||
"ssbs",
|
||||
"sb",
|
||||
"paca",
|
||||
"pacg",
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@@ -79,7 +79,6 @@
|
||||
.macro mcount_get_lr reg
|
||||
ldr \reg, [x29]
|
||||
ldr \reg, [\reg, #8]
|
||||
mcount_adjust_addr \reg, \reg
|
||||
.endm
|
||||
|
||||
.macro mcount_get_lr_addr reg
|
||||
@@ -121,6 +120,8 @@ skip_ftrace_call: // }
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
mcount_exit
|
||||
ENDPROC(_mcount)
|
||||
EXPORT_SYMBOL(_mcount)
|
||||
NOKPROBE(_mcount)
|
||||
|
||||
#else /* CONFIG_DYNAMIC_FTRACE */
|
||||
/*
|
||||
@@ -132,6 +133,8 @@ ENDPROC(_mcount)
|
||||
ENTRY(_mcount)
|
||||
ret
|
||||
ENDPROC(_mcount)
|
||||
EXPORT_SYMBOL(_mcount)
|
||||
NOKPROBE(_mcount)
|
||||
|
||||
/*
|
||||
* void ftrace_caller(unsigned long return_address)
|
||||
@@ -148,14 +151,12 @@ ENTRY(ftrace_caller)
|
||||
mcount_get_pc0 x0 // function's pc
|
||||
mcount_get_lr x1 // function's lr
|
||||
|
||||
.global ftrace_call
|
||||
ftrace_call: // tracer(pc, lr);
|
||||
GLOBAL(ftrace_call) // tracer(pc, lr);
|
||||
nop // This will be replaced with "bl xxx"
|
||||
// where xxx can be any kind of tracer.
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
.global ftrace_graph_call
|
||||
ftrace_graph_call: // ftrace_graph_caller();
|
||||
GLOBAL(ftrace_graph_call) // ftrace_graph_caller();
|
||||
nop // If enabled, this will be replaced
|
||||
// "b ftrace_graph_caller"
|
||||
#endif
|
||||
@@ -169,24 +170,6 @@ ENTRY(ftrace_stub)
|
||||
ENDPROC(ftrace_stub)
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
/* save return value regs*/
|
||||
.macro save_return_regs
|
||||
sub sp, sp, #64
|
||||
stp x0, x1, [sp]
|
||||
stp x2, x3, [sp, #16]
|
||||
stp x4, x5, [sp, #32]
|
||||
stp x6, x7, [sp, #48]
|
||||
.endm
|
||||
|
||||
/* restore return value regs*/
|
||||
.macro restore_return_regs
|
||||
ldp x0, x1, [sp]
|
||||
ldp x2, x3, [sp, #16]
|
||||
ldp x4, x5, [sp, #32]
|
||||
ldp x6, x7, [sp, #48]
|
||||
add sp, sp, #64
|
||||
.endm
|
||||
|
||||
/*
|
||||
* void ftrace_graph_caller(void)
|
||||
*
|
||||
@@ -197,10 +180,10 @@ ENDPROC(ftrace_stub)
|
||||
* and run return_to_handler() later on its exit.
|
||||
*/
|
||||
ENTRY(ftrace_graph_caller)
|
||||
mcount_get_lr_addr x0 // pointer to function's saved lr
|
||||
mcount_get_pc x1 // function's pc
|
||||
mcount_get_pc x0 // function's pc
|
||||
mcount_get_lr_addr x1 // pointer to function's saved lr
|
||||
mcount_get_parent_fp x2 // parent's fp
|
||||
bl prepare_ftrace_return // prepare_ftrace_return(&lr, pc, fp)
|
||||
bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp)
|
||||
|
||||
mcount_exit
|
||||
ENDPROC(ftrace_graph_caller)
|
||||
@@ -209,15 +192,27 @@ ENDPROC(ftrace_graph_caller)
|
||||
* void return_to_handler(void)
|
||||
*
|
||||
* Run ftrace_return_to_handler() before going back to parent.
|
||||
* @fp is checked against the value passed by ftrace_graph_caller()
|
||||
* only when HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
|
||||
* @fp is checked against the value passed by ftrace_graph_caller().
|
||||
*/
|
||||
ENTRY(return_to_handler)
|
||||
save_return_regs
|
||||
/* save return value regs */
|
||||
sub sp, sp, #64
|
||||
stp x0, x1, [sp]
|
||||
stp x2, x3, [sp, #16]
|
||||
stp x4, x5, [sp, #32]
|
||||
stp x6, x7, [sp, #48]
|
||||
|
||||
mov x0, x29 // parent's fp
|
||||
bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
|
||||
mov x30, x0 // restore the original return address
|
||||
restore_return_regs
|
||||
|
||||
/* restore return value regs */
|
||||
ldp x0, x1, [sp]
|
||||
ldp x2, x3, [sp, #16]
|
||||
ldp x4, x5, [sp, #32]
|
||||
ldp x6, x7, [sp, #48]
|
||||
add sp, sp, #64
|
||||
|
||||
ret
|
||||
END(return_to_handler)
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
@@ -344,10 +344,6 @@ alternative_else_nop_endif
|
||||
ldp x28, x29, [sp, #16 * 14]
|
||||
ldr lr, [sp, #S_LR]
|
||||
add sp, sp, #S_FRAME_SIZE // restore sp
|
||||
/*
|
||||
* ARCH_HAS_MEMBARRIER_SYNC_CORE rely on eret context synchronization
|
||||
* when returning from IPI handler, and when returning to user-space.
|
||||
*/
|
||||
|
||||
.if \el == 0
|
||||
alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
|
||||
@@ -363,6 +359,7 @@ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
|
||||
.else
|
||||
eret
|
||||
.endif
|
||||
sb
|
||||
.endm
|
||||
|
||||
.macro irq_stack_entry
|
||||
@@ -622,10 +619,8 @@ el1_irq:
|
||||
irq_handler
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
|
||||
cbnz w24, 1f // preempt count != 0
|
||||
ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
|
||||
tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
|
||||
ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
|
||||
cbnz x24, 1f // preempt count != 0
|
||||
bl el1_preempt
|
||||
1:
|
||||
#endif
|
||||
@@ -1006,6 +1001,7 @@ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
|
||||
mrs x30, far_el1
|
||||
.endif
|
||||
eret
|
||||
sb
|
||||
.endm
|
||||
|
||||
.align 11
|
||||
|
@@ -104,7 +104,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
* is added in the future, but for now, the pr_err() below
|
||||
* deals with a theoretical issue only.
|
||||
*/
|
||||
trampoline = get_plt_entry(addr);
|
||||
trampoline = get_plt_entry(addr, mod->arch.ftrace_trampoline);
|
||||
if (!plt_entries_equal(mod->arch.ftrace_trampoline,
|
||||
&trampoline)) {
|
||||
if (!plt_entries_equal(mod->arch.ftrace_trampoline,
|
||||
@@ -211,7 +211,7 @@ int __init ftrace_dyn_arch_init(void)
|
||||
*
|
||||
* Note that @frame_pointer is used only for sanity check later.
|
||||
*/
|
||||
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
|
||||
void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
|
||||
unsigned long frame_pointer)
|
||||
{
|
||||
unsigned long return_hooker = (unsigned long)&return_to_handler;
|
||||
|
@@ -31,6 +31,7 @@
|
||||
#include <asm/cache.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/elf.h>
|
||||
#include <asm/image.h>
|
||||
#include <asm/kernel-pgtable.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/memory.h>
|
||||
@@ -91,7 +92,7 @@ _head:
|
||||
.quad 0 // reserved
|
||||
.quad 0 // reserved
|
||||
.quad 0 // reserved
|
||||
.ascii "ARM\x64" // Magic number
|
||||
.ascii ARM64_IMAGE_MAGIC // Magic number
|
||||
#ifdef CONFIG_EFI
|
||||
.long pe_header - _head // Offset to the PE header.
|
||||
|
||||
@@ -318,6 +319,19 @@ __create_page_tables:
|
||||
adrp x0, idmap_pg_dir
|
||||
adrp x3, __idmap_text_start // __pa(__idmap_text_start)
|
||||
|
||||
#ifdef CONFIG_ARM64_USER_VA_BITS_52
|
||||
mrs_s x6, SYS_ID_AA64MMFR2_EL1
|
||||
and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
|
||||
mov x5, #52
|
||||
cbnz x6, 1f
|
||||
#endif
|
||||
mov x5, #VA_BITS
|
||||
1:
|
||||
adr_l x6, vabits_user
|
||||
str x5, [x6]
|
||||
dmb sy
|
||||
dc ivac, x6 // Invalidate potentially stale cache line
|
||||
|
||||
/*
|
||||
* VA_BITS may be too small to allow for an ID mapping to be created
|
||||
* that covers system RAM if that is located sufficiently high in the
|
||||
@@ -496,10 +510,9 @@ ENTRY(el2_setup)
|
||||
#endif
|
||||
|
||||
/* Hyp configuration. */
|
||||
mov x0, #HCR_RW // 64-bit EL1
|
||||
mov_q x0, HCR_HOST_NVHE_FLAGS
|
||||
cbz x2, set_hcr
|
||||
orr x0, x0, #HCR_TGE // Enable Host Extensions
|
||||
orr x0, x0, #HCR_E2H
|
||||
mov_q x0, HCR_HOST_VHE_FLAGS
|
||||
set_hcr:
|
||||
msr hcr_el2, x0
|
||||
isb
|
||||
@@ -707,6 +720,7 @@ secondary_startup:
|
||||
/*
|
||||
* Common entry point for secondary CPUs.
|
||||
*/
|
||||
bl __cpu_secondary_check52bitva
|
||||
bl __cpu_setup // initialise processor
|
||||
adrp x1, swapper_pg_dir
|
||||
bl __enable_mmu
|
||||
@@ -769,6 +783,7 @@ ENTRY(__enable_mmu)
|
||||
phys_to_ttbr x1, x1
|
||||
phys_to_ttbr x2, x2
|
||||
msr ttbr0_el1, x2 // load TTBR0
|
||||
offset_ttbr1 x1
|
||||
msr ttbr1_el1, x1 // load TTBR1
|
||||
isb
|
||||
msr sctlr_el1, x0
|
||||
@@ -784,9 +799,30 @@ ENTRY(__enable_mmu)
|
||||
ret
|
||||
ENDPROC(__enable_mmu)
|
||||
|
||||
ENTRY(__cpu_secondary_check52bitva)
|
||||
#ifdef CONFIG_ARM64_USER_VA_BITS_52
|
||||
ldr_l x0, vabits_user
|
||||
cmp x0, #52
|
||||
b.ne 2f
|
||||
|
||||
mrs_s x0, SYS_ID_AA64MMFR2_EL1
|
||||
and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
|
||||
cbnz x0, 2f
|
||||
|
||||
update_early_cpu_boot_status \
|
||||
CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1
|
||||
1: wfe
|
||||
wfi
|
||||
b 1b
|
||||
|
||||
#endif
|
||||
2: ret
|
||||
ENDPROC(__cpu_secondary_check52bitva)
|
||||
|
||||
__no_granule_support:
|
||||
/* Indicate that this CPU can't boot and is stuck in the kernel */
|
||||
update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2
|
||||
update_early_cpu_boot_status \
|
||||
CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2
|
||||
1:
|
||||
wfe
|
||||
wfi
|
||||
|
@@ -40,6 +40,7 @@
|
||||
tlbi vmalle1
|
||||
dsb nsh
|
||||
phys_to_ttbr \tmp, \page_table
|
||||
offset_ttbr1 \tmp
|
||||
msr ttbr1_el1, \tmp
|
||||
isb
|
||||
.endm
|
||||
|
@@ -15,13 +15,15 @@
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef __ASM_IMAGE_H
|
||||
#define __ASM_IMAGE_H
|
||||
#ifndef __ARM64_KERNEL_IMAGE_H
|
||||
#define __ARM64_KERNEL_IMAGE_H
|
||||
|
||||
#ifndef LINKER_SCRIPT
|
||||
#error This file should only be included in vmlinux.lds.S
|
||||
#endif
|
||||
|
||||
#include <asm/image.h>
|
||||
|
||||
/*
|
||||
* There aren't any ELF relocations we can use to endian-swap values known only
|
||||
* at link time (e.g. the subtraction of two symbol addresses), so we must get
|
||||
@@ -47,19 +49,22 @@
|
||||
sym##_lo32 = DATA_LE32((data) & 0xffffffff); \
|
||||
sym##_hi32 = DATA_LE32((data) >> 32)
|
||||
|
||||
#define __HEAD_FLAG(field) (__HEAD_FLAG_##field << \
|
||||
ARM64_IMAGE_FLAG_##field##_SHIFT)
|
||||
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
#define __HEAD_FLAG_BE 1
|
||||
#define __HEAD_FLAG_BE ARM64_IMAGE_FLAG_BE
|
||||
#else
|
||||
#define __HEAD_FLAG_BE 0
|
||||
#define __HEAD_FLAG_BE ARM64_IMAGE_FLAG_LE
|
||||
#endif
|
||||
|
||||
#define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2)
|
||||
|
||||
#define __HEAD_FLAG_PHYS_BASE 1
|
||||
|
||||
#define __HEAD_FLAGS ((__HEAD_FLAG_BE << 0) | \
|
||||
(__HEAD_FLAG_PAGE_SIZE << 1) | \
|
||||
(__HEAD_FLAG_PHYS_BASE << 3))
|
||||
#define __HEAD_FLAGS (__HEAD_FLAG(BE) | \
|
||||
__HEAD_FLAG(PAGE_SIZE) | \
|
||||
__HEAD_FLAG(PHYS_BASE))
|
||||
|
||||
/*
|
||||
* These will output as part of the Image header, which should be little-endian
|
||||
@@ -75,16 +80,6 @@
|
||||
|
||||
__efistub_stext_offset = stext - _text;
|
||||
|
||||
/*
|
||||
* Prevent the symbol aliases below from being emitted into the kallsyms
|
||||
* table, by forcing them to be absolute symbols (which are conveniently
|
||||
* ignored by scripts/kallsyms) rather than section relative symbols.
|
||||
* The distinction is only relevant for partial linking, and only for symbols
|
||||
* that are defined within a section declaration (which is not the case for
|
||||
* the definitions below) so the resulting values will be identical.
|
||||
*/
|
||||
#define KALLSYMS_HIDE(sym) ABSOLUTE(sym)
|
||||
|
||||
/*
|
||||
* The EFI stub has its own symbol namespace prefixed by __efistub_, to
|
||||
* isolate it from the kernel proper. The following symbols are legally
|
||||
@@ -94,29 +89,29 @@ __efistub_stext_offset = stext - _text;
|
||||
* linked at. The routines below are all implemented in assembler in a
|
||||
* position independent manner
|
||||
*/
|
||||
__efistub_memcmp = KALLSYMS_HIDE(__pi_memcmp);
|
||||
__efistub_memchr = KALLSYMS_HIDE(__pi_memchr);
|
||||
__efistub_memcpy = KALLSYMS_HIDE(__pi_memcpy);
|
||||
__efistub_memmove = KALLSYMS_HIDE(__pi_memmove);
|
||||
__efistub_memset = KALLSYMS_HIDE(__pi_memset);
|
||||
__efistub_strlen = KALLSYMS_HIDE(__pi_strlen);
|
||||
__efistub_strnlen = KALLSYMS_HIDE(__pi_strnlen);
|
||||
__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp);
|
||||
__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp);
|
||||
__efistub_strrchr = KALLSYMS_HIDE(__pi_strrchr);
|
||||
__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area);
|
||||
__efistub_memcmp = __pi_memcmp;
|
||||
__efistub_memchr = __pi_memchr;
|
||||
__efistub_memcpy = __pi_memcpy;
|
||||
__efistub_memmove = __pi_memmove;
|
||||
__efistub_memset = __pi_memset;
|
||||
__efistub_strlen = __pi_strlen;
|
||||
__efistub_strnlen = __pi_strnlen;
|
||||
__efistub_strcmp = __pi_strcmp;
|
||||
__efistub_strncmp = __pi_strncmp;
|
||||
__efistub_strrchr = __pi_strrchr;
|
||||
__efistub___flush_dcache_area = __pi___flush_dcache_area;
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
__efistub___memcpy = KALLSYMS_HIDE(__pi_memcpy);
|
||||
__efistub___memmove = KALLSYMS_HIDE(__pi_memmove);
|
||||
__efistub___memset = KALLSYMS_HIDE(__pi_memset);
|
||||
__efistub___memcpy = __pi_memcpy;
|
||||
__efistub___memmove = __pi_memmove;
|
||||
__efistub___memset = __pi_memset;
|
||||
#endif
|
||||
|
||||
__efistub__text = KALLSYMS_HIDE(_text);
|
||||
__efistub__end = KALLSYMS_HIDE(_end);
|
||||
__efistub__edata = KALLSYMS_HIDE(_edata);
|
||||
__efistub_screen_info = KALLSYMS_HIDE(screen_info);
|
||||
__efistub__text = _text;
|
||||
__efistub__end = _end;
|
||||
__efistub__edata = _edata;
|
||||
__efistub_screen_info = screen_info;
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_IMAGE_H */
|
||||
#endif /* __ARM64_KERNEL_IMAGE_H */
|
||||
|
@@ -1239,6 +1239,35 @@ u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
|
||||
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
|
||||
}
|
||||
|
||||
u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
|
||||
enum aarch64_insn_register reg,
|
||||
enum aarch64_insn_adr_type type)
|
||||
{
|
||||
u32 insn;
|
||||
s32 offset;
|
||||
|
||||
switch (type) {
|
||||
case AARCH64_INSN_ADR_TYPE_ADR:
|
||||
insn = aarch64_insn_get_adr_value();
|
||||
offset = addr - pc;
|
||||
break;
|
||||
case AARCH64_INSN_ADR_TYPE_ADRP:
|
||||
insn = aarch64_insn_get_adrp_value();
|
||||
offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12;
|
||||
break;
|
||||
default:
|
||||
pr_err("%s: unknown adr encoding %d\n", __func__, type);
|
||||
return AARCH64_BREAK_FAULT;
|
||||
}
|
||||
|
||||
if (offset < -SZ_1M || offset >= SZ_1M)
|
||||
return AARCH64_BREAK_FAULT;
|
||||
|
||||
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg);
|
||||
|
||||
return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* Decode the imm field of a branch, and return the byte offset as a
|
||||
* signed value (so it can be used when computing a new branch
|
||||
|
130
arch/arm64/kernel/kexec_image.c
Normal file
130
arch/arm64/kernel/kexec_image.c
Normal file
@@ -0,0 +1,130 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Kexec image loader
|
||||
|
||||
* Copyright (C) 2018 Linaro Limited
|
||||
* Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "kexec_file(Image): " fmt
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/pe.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/verification.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/image.h>
|
||||
#include <asm/memory.h>
|
||||
|
||||
static int image_probe(const char *kernel_buf, unsigned long kernel_len)
|
||||
{
|
||||
const struct arm64_image_header *h =
|
||||
(const struct arm64_image_header *)(kernel_buf);
|
||||
|
||||
if (!h || (kernel_len < sizeof(*h)))
|
||||
return -EINVAL;
|
||||
|
||||
if (memcmp(&h->magic, ARM64_IMAGE_MAGIC, sizeof(h->magic)))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *image_load(struct kimage *image,
|
||||
char *kernel, unsigned long kernel_len,
|
||||
char *initrd, unsigned long initrd_len,
|
||||
char *cmdline, unsigned long cmdline_len)
|
||||
{
|
||||
struct arm64_image_header *h;
|
||||
u64 flags, value;
|
||||
bool be_image, be_kernel;
|
||||
struct kexec_buf kbuf;
|
||||
unsigned long text_offset;
|
||||
struct kexec_segment *kernel_segment;
|
||||
int ret;
|
||||
|
||||
/* We don't support crash kernels yet. */
|
||||
if (image->type == KEXEC_TYPE_CRASH)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
/*
|
||||
* We require a kernel with an unambiguous Image header. Per
|
||||
* Documentation/booting.txt, this is the case when image_size
|
||||
* is non-zero (practically speaking, since v3.17).
|
||||
*/
|
||||
h = (struct arm64_image_header *)kernel;
|
||||
if (!h->image_size)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/* Check cpu features */
|
||||
flags = le64_to_cpu(h->flags);
|
||||
be_image = arm64_image_flag_field(flags, ARM64_IMAGE_FLAG_BE);
|
||||
be_kernel = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
|
||||
if ((be_image != be_kernel) && !system_supports_mixed_endian())
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
value = arm64_image_flag_field(flags, ARM64_IMAGE_FLAG_PAGE_SIZE);
|
||||
if (((value == ARM64_IMAGE_FLAG_PAGE_SIZE_4K) &&
|
||||
!system_supports_4kb_granule()) ||
|
||||
((value == ARM64_IMAGE_FLAG_PAGE_SIZE_64K) &&
|
||||
!system_supports_64kb_granule()) ||
|
||||
((value == ARM64_IMAGE_FLAG_PAGE_SIZE_16K) &&
|
||||
!system_supports_16kb_granule()))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/* Load the kernel */
|
||||
kbuf.image = image;
|
||||
kbuf.buf_min = 0;
|
||||
kbuf.buf_max = ULONG_MAX;
|
||||
kbuf.top_down = false;
|
||||
|
||||
kbuf.buffer = kernel;
|
||||
kbuf.bufsz = kernel_len;
|
||||
kbuf.mem = 0;
|
||||
kbuf.memsz = le64_to_cpu(h->image_size);
|
||||
text_offset = le64_to_cpu(h->text_offset);
|
||||
kbuf.buf_align = MIN_KIMG_ALIGN;
|
||||
|
||||
/* Adjust kernel segment with TEXT_OFFSET */
|
||||
kbuf.memsz += text_offset;
|
||||
|
||||
ret = kexec_add_buffer(&kbuf);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
kernel_segment = &image->segment[image->nr_segments - 1];
|
||||
kernel_segment->mem += text_offset;
|
||||
kernel_segment->memsz -= text_offset;
|
||||
image->start = kernel_segment->mem;
|
||||
|
||||
pr_debug("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
|
||||
kernel_segment->mem, kbuf.bufsz,
|
||||
kernel_segment->memsz);
|
||||
|
||||
/* Load additional data */
|
||||
ret = load_other_segments(image,
|
||||
kernel_segment->mem, kernel_segment->memsz,
|
||||
initrd, initrd_len, cmdline);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
|
||||
static int image_verify_sig(const char *kernel, unsigned long kernel_len)
|
||||
{
|
||||
return verify_pefile_signature(kernel, kernel_len, NULL,
|
||||
VERIFYING_KEXEC_PE_SIGNATURE);
|
||||
}
|
||||
#endif
|
||||
|
||||
const struct kexec_file_ops kexec_image_ops = {
|
||||
.probe = image_probe,
|
||||
.load = image_load,
|
||||
#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG
|
||||
.verify_sig = image_verify_sig,
|
||||
#endif
|
||||
};
|
@@ -212,9 +212,17 @@ void machine_kexec(struct kimage *kimage)
|
||||
* uses physical addressing to relocate the new image to its final
|
||||
* position and transfers control to the image entry point when the
|
||||
* relocation is complete.
|
||||
* In kexec case, kimage->start points to purgatory assuming that
|
||||
* kernel entry and dtb address are embedded in purgatory by
|
||||
* userspace (kexec-tools).
|
||||
* In kexec_file case, the kernel starts directly without purgatory.
|
||||
*/
|
||||
|
||||
cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start, 0);
|
||||
cpu_soft_restart(reboot_code_buffer_phys, kimage->head, kimage->start,
|
||||
#ifdef CONFIG_KEXEC_FILE
|
||||
kimage->arch.dtb_mem);
|
||||
#else
|
||||
0);
|
||||
#endif
|
||||
|
||||
BUG(); /* Should never get here. */
|
||||
}
|
||||
|
224
arch/arm64/kernel/machine_kexec_file.c
Normal file
224
arch/arm64/kernel/machine_kexec_file.c
Normal file
@@ -0,0 +1,224 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* kexec_file for arm64
|
||||
*
|
||||
* Copyright (C) 2018 Linaro Limited
|
||||
* Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
|
||||
*
|
||||
* Most code is derived from arm64 port of kexec-tools
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "kexec_file: " fmt
|
||||
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/libfdt.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
/* relevant device tree properties */
|
||||
#define FDT_PROP_INITRD_START "linux,initrd-start"
|
||||
#define FDT_PROP_INITRD_END "linux,initrd-end"
|
||||
#define FDT_PROP_BOOTARGS "bootargs"
|
||||
#define FDT_PROP_KASLR_SEED "kaslr-seed"
|
||||
|
||||
const struct kexec_file_ops * const kexec_file_loaders[] = {
|
||||
&kexec_image_ops,
|
||||
NULL
|
||||
};
|
||||
|
||||
int arch_kimage_file_post_load_cleanup(struct kimage *image)
|
||||
{
|
||||
vfree(image->arch.dtb);
|
||||
image->arch.dtb = NULL;
|
||||
|
||||
return kexec_image_post_load_cleanup_default(image);
|
||||
}
|
||||
|
||||
static int setup_dtb(struct kimage *image,
|
||||
unsigned long initrd_load_addr, unsigned long initrd_len,
|
||||
char *cmdline, void *dtb)
|
||||
{
|
||||
int off, ret;
|
||||
|
||||
ret = fdt_path_offset(dtb, "/chosen");
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
off = ret;
|
||||
|
||||
/* add bootargs */
|
||||
if (cmdline) {
|
||||
ret = fdt_setprop_string(dtb, off, FDT_PROP_BOOTARGS, cmdline);
|
||||
if (ret)
|
||||
goto out;
|
||||
} else {
|
||||
ret = fdt_delprop(dtb, off, FDT_PROP_BOOTARGS);
|
||||
if (ret && (ret != -FDT_ERR_NOTFOUND))
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* add initrd-* */
|
||||
if (initrd_load_addr) {
|
||||
ret = fdt_setprop_u64(dtb, off, FDT_PROP_INITRD_START,
|
||||
initrd_load_addr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = fdt_setprop_u64(dtb, off, FDT_PROP_INITRD_END,
|
||||
initrd_load_addr + initrd_len);
|
||||
if (ret)
|
||||
goto out;
|
||||
} else {
|
||||
ret = fdt_delprop(dtb, off, FDT_PROP_INITRD_START);
|
||||
if (ret && (ret != -FDT_ERR_NOTFOUND))
|
||||
goto out;
|
||||
|
||||
ret = fdt_delprop(dtb, off, FDT_PROP_INITRD_END);
|
||||
if (ret && (ret != -FDT_ERR_NOTFOUND))
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* add kaslr-seed */
|
||||
ret = fdt_delprop(dtb, off, FDT_PROP_KASLR_SEED);
|
||||
if (ret && (ret != -FDT_ERR_NOTFOUND))
|
||||
goto out;
|
||||
|
||||
if (rng_is_initialized()) {
|
||||
u64 seed = get_random_u64();
|
||||
ret = fdt_setprop_u64(dtb, off, FDT_PROP_KASLR_SEED, seed);
|
||||
if (ret)
|
||||
goto out;
|
||||
} else {
|
||||
pr_notice("RNG is not initialised: omitting \"%s\" property\n",
|
||||
FDT_PROP_KASLR_SEED);
|
||||
}
|
||||
|
||||
out:
|
||||
if (ret)
|
||||
return (ret == -FDT_ERR_NOSPACE) ? -ENOMEM : -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* More space needed so that we can add initrd, bootargs and kaslr-seed.
|
||||
*/
|
||||
#define DTB_EXTRA_SPACE 0x1000
|
||||
|
||||
static int create_dtb(struct kimage *image,
|
||||
unsigned long initrd_load_addr, unsigned long initrd_len,
|
||||
char *cmdline, void **dtb)
|
||||
{
|
||||
void *buf;
|
||||
size_t buf_size;
|
||||
int ret;
|
||||
|
||||
buf_size = fdt_totalsize(initial_boot_params)
|
||||
+ strlen(cmdline) + DTB_EXTRA_SPACE;
|
||||
|
||||
for (;;) {
|
||||
buf = vmalloc(buf_size);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
/* duplicate a device tree blob */
|
||||
ret = fdt_open_into(initial_boot_params, buf, buf_size);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
ret = setup_dtb(image, initrd_load_addr, initrd_len,
|
||||
cmdline, buf);
|
||||
if (ret) {
|
||||
vfree(buf);
|
||||
if (ret == -ENOMEM) {
|
||||
/* unlikely, but just in case */
|
||||
buf_size += DTB_EXTRA_SPACE;
|
||||
continue;
|
||||
} else {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* trim it */
|
||||
fdt_pack(buf);
|
||||
*dtb = buf;
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
int load_other_segments(struct kimage *image,
|
||||
unsigned long kernel_load_addr,
|
||||
unsigned long kernel_size,
|
||||
char *initrd, unsigned long initrd_len,
|
||||
char *cmdline)
|
||||
{
|
||||
struct kexec_buf kbuf;
|
||||
void *dtb = NULL;
|
||||
unsigned long initrd_load_addr = 0, dtb_len;
|
||||
int ret = 0;
|
||||
|
||||
kbuf.image = image;
|
||||
/* not allocate anything below the kernel */
|
||||
kbuf.buf_min = kernel_load_addr + kernel_size;
|
||||
|
||||
/* load initrd */
|
||||
if (initrd) {
|
||||
kbuf.buffer = initrd;
|
||||
kbuf.bufsz = initrd_len;
|
||||
kbuf.mem = 0;
|
||||
kbuf.memsz = initrd_len;
|
||||
kbuf.buf_align = 0;
|
||||
/* within 1GB-aligned window of up to 32GB in size */
|
||||
kbuf.buf_max = round_down(kernel_load_addr, SZ_1G)
|
||||
+ (unsigned long)SZ_1G * 32;
|
||||
kbuf.top_down = false;
|
||||
|
||||
ret = kexec_add_buffer(&kbuf);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
initrd_load_addr = kbuf.mem;
|
||||
|
||||
pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
|
||||
initrd_load_addr, initrd_len, initrd_len);
|
||||
}
|
||||
|
||||
/* load dtb */
|
||||
ret = create_dtb(image, initrd_load_addr, initrd_len, cmdline, &dtb);
|
||||
if (ret) {
|
||||
pr_err("Preparing for new dtb failed\n");
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
dtb_len = fdt_totalsize(dtb);
|
||||
kbuf.buffer = dtb;
|
||||
kbuf.bufsz = dtb_len;
|
||||
kbuf.mem = 0;
|
||||
kbuf.memsz = dtb_len;
|
||||
/* not across 2MB boundary */
|
||||
kbuf.buf_align = SZ_2M;
|
||||
kbuf.buf_max = ULONG_MAX;
|
||||
kbuf.top_down = true;
|
||||
|
||||
ret = kexec_add_buffer(&kbuf);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
image->arch.dtb = dtb;
|
||||
image->arch.dtb_mem = kbuf.mem;
|
||||
|
||||
pr_debug("Loaded dtb at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
|
||||
kbuf.mem, dtb_len, dtb_len);
|
||||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
vfree(dtb);
|
||||
return ret;
|
||||
}
|
@@ -11,31 +11,91 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/sort.h>
|
||||
|
||||
static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc,
|
||||
enum aarch64_insn_register reg)
|
||||
{
|
||||
u32 adrp, add;
|
||||
|
||||
adrp = aarch64_insn_gen_adr(pc, dst, reg, AARCH64_INSN_ADR_TYPE_ADRP);
|
||||
add = aarch64_insn_gen_add_sub_imm(reg, reg, dst % SZ_4K,
|
||||
AARCH64_INSN_VARIANT_64BIT,
|
||||
AARCH64_INSN_ADSB_ADD);
|
||||
|
||||
return (struct plt_entry){ cpu_to_le32(adrp), cpu_to_le32(add) };
|
||||
}
|
||||
|
||||
struct plt_entry get_plt_entry(u64 dst, void *pc)
|
||||
{
|
||||
struct plt_entry plt;
|
||||
static u32 br;
|
||||
|
||||
if (!br)
|
||||
br = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_16,
|
||||
AARCH64_INSN_BRANCH_NOLINK);
|
||||
|
||||
plt = __get_adrp_add_pair(dst, (u64)pc, AARCH64_INSN_REG_16);
|
||||
plt.br = cpu_to_le32(br);
|
||||
|
||||
return plt;
|
||||
}
|
||||
|
||||
bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b)
|
||||
{
|
||||
u64 p, q;
|
||||
|
||||
/*
|
||||
* Check whether both entries refer to the same target:
|
||||
* do the cheapest checks first.
|
||||
* If the 'add' or 'br' opcodes are different, then the target
|
||||
* cannot be the same.
|
||||
*/
|
||||
if (a->add != b->add || a->br != b->br)
|
||||
return false;
|
||||
|
||||
p = ALIGN_DOWN((u64)a, SZ_4K);
|
||||
q = ALIGN_DOWN((u64)b, SZ_4K);
|
||||
|
||||
/*
|
||||
* If the 'adrp' opcodes are the same then we just need to check
|
||||
* that they refer to the same 4k region.
|
||||
*/
|
||||
if (a->adrp == b->adrp && p == q)
|
||||
return true;
|
||||
|
||||
return (p + aarch64_insn_adrp_get_offset(le32_to_cpu(a->adrp))) ==
|
||||
(q + aarch64_insn_adrp_get_offset(le32_to_cpu(b->adrp)));
|
||||
}
|
||||
|
||||
static bool in_init(const struct module *mod, void *loc)
|
||||
{
|
||||
return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
|
||||
}
|
||||
|
||||
u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
|
||||
u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
|
||||
void *loc, const Elf64_Rela *rela,
|
||||
Elf64_Sym *sym)
|
||||
{
|
||||
struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
|
||||
&mod->arch.init;
|
||||
struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr;
|
||||
struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
|
||||
int i = pltsec->plt_num_entries;
|
||||
int j = i - 1;
|
||||
u64 val = sym->st_value + rela->r_addend;
|
||||
|
||||
plt[i] = get_plt_entry(val);
|
||||
if (is_forbidden_offset_for_adrp(&plt[i].adrp))
|
||||
i++;
|
||||
|
||||
plt[i] = get_plt_entry(val, &plt[i]);
|
||||
|
||||
/*
|
||||
* Check if the entry we just created is a duplicate. Given that the
|
||||
* relocations are sorted, this will be the last entry we allocated.
|
||||
* (if one exists).
|
||||
*/
|
||||
if (i > 0 && plt_entries_equal(plt + i, plt + i - 1))
|
||||
return (u64)&plt[i - 1];
|
||||
if (j >= 0 && plt_entries_equal(plt + i, plt + j))
|
||||
return (u64)&plt[j];
|
||||
|
||||
pltsec->plt_num_entries++;
|
||||
pltsec->plt_num_entries += i - j;
|
||||
if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
|
||||
return 0;
|
||||
|
||||
@@ -43,41 +103,31 @@ u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_ERRATUM_843419
|
||||
u64 module_emit_veneer_for_adrp(struct module *mod, void *loc, u64 val)
|
||||
u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
|
||||
void *loc, u64 val)
|
||||
{
|
||||
struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
|
||||
&mod->arch.init;
|
||||
struct plt_entry *plt = (struct plt_entry *)pltsec->plt->sh_addr;
|
||||
struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
|
||||
int i = pltsec->plt_num_entries++;
|
||||
u32 mov0, mov1, mov2, br;
|
||||
u32 br;
|
||||
int rd;
|
||||
|
||||
if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
|
||||
return 0;
|
||||
|
||||
if (is_forbidden_offset_for_adrp(&plt[i].adrp))
|
||||
i = pltsec->plt_num_entries++;
|
||||
|
||||
/* get the destination register of the ADRP instruction */
|
||||
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD,
|
||||
le32_to_cpup((__le32 *)loc));
|
||||
|
||||
/* generate the veneer instructions */
|
||||
mov0 = aarch64_insn_gen_movewide(rd, (u16)~val, 0,
|
||||
AARCH64_INSN_VARIANT_64BIT,
|
||||
AARCH64_INSN_MOVEWIDE_INVERSE);
|
||||
mov1 = aarch64_insn_gen_movewide(rd, (u16)(val >> 16), 16,
|
||||
AARCH64_INSN_VARIANT_64BIT,
|
||||
AARCH64_INSN_MOVEWIDE_KEEP);
|
||||
mov2 = aarch64_insn_gen_movewide(rd, (u16)(val >> 32), 32,
|
||||
AARCH64_INSN_VARIANT_64BIT,
|
||||
AARCH64_INSN_MOVEWIDE_KEEP);
|
||||
br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4,
|
||||
AARCH64_INSN_BRANCH_NOLINK);
|
||||
|
||||
plt[i] = (struct plt_entry){
|
||||
cpu_to_le32(mov0),
|
||||
cpu_to_le32(mov1),
|
||||
cpu_to_le32(mov2),
|
||||
cpu_to_le32(br)
|
||||
};
|
||||
plt[i] = __get_adrp_add_pair(val, (u64)&plt[i], rd);
|
||||
plt[i].br = cpu_to_le32(br);
|
||||
|
||||
return (u64)&plt[i];
|
||||
}
|
||||
@@ -193,6 +243,15 @@ static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
|
||||
cpus_have_const_cap(ARM64_WORKAROUND_843419))
|
||||
/*
|
||||
* Add some slack so we can skip PLT slots that may trigger
|
||||
* the erratum due to the placement of the ADRP instruction.
|
||||
*/
|
||||
ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry)));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -202,7 +261,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
|
||||
unsigned long core_plts = 0;
|
||||
unsigned long init_plts = 0;
|
||||
Elf64_Sym *syms = NULL;
|
||||
Elf_Shdr *tramp = NULL;
|
||||
Elf_Shdr *pltsec, *tramp = NULL;
|
||||
int i;
|
||||
|
||||
/*
|
||||
@@ -211,9 +270,9 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
|
||||
*/
|
||||
for (i = 0; i < ehdr->e_shnum; i++) {
|
||||
if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
|
||||
mod->arch.core.plt = sechdrs + i;
|
||||
mod->arch.core.plt_shndx = i;
|
||||
else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
|
||||
mod->arch.init.plt = sechdrs + i;
|
||||
mod->arch.init.plt_shndx = i;
|
||||
else if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
|
||||
!strcmp(secstrings + sechdrs[i].sh_name,
|
||||
".text.ftrace_trampoline"))
|
||||
@@ -222,7 +281,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
|
||||
syms = (Elf64_Sym *)sechdrs[i].sh_addr;
|
||||
}
|
||||
|
||||
if (!mod->arch.core.plt || !mod->arch.init.plt) {
|
||||
if (!mod->arch.core.plt_shndx || !mod->arch.init.plt_shndx) {
|
||||
pr_err("%s: module PLT section(s) missing\n", mod->name);
|
||||
return -ENOEXEC;
|
||||
}
|
||||
@@ -254,17 +313,19 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
|
||||
sechdrs[i].sh_info, dstsec);
|
||||
}
|
||||
|
||||
mod->arch.core.plt->sh_type = SHT_NOBITS;
|
||||
mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
|
||||
mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
|
||||
mod->arch.core.plt->sh_size = (core_plts + 1) * sizeof(struct plt_entry);
|
||||
pltsec = sechdrs + mod->arch.core.plt_shndx;
|
||||
pltsec->sh_type = SHT_NOBITS;
|
||||
pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
|
||||
pltsec->sh_addralign = L1_CACHE_BYTES;
|
||||
pltsec->sh_size = (core_plts + 1) * sizeof(struct plt_entry);
|
||||
mod->arch.core.plt_num_entries = 0;
|
||||
mod->arch.core.plt_max_entries = core_plts;
|
||||
|
||||
mod->arch.init.plt->sh_type = SHT_NOBITS;
|
||||
mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
|
||||
mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
|
||||
mod->arch.init.plt->sh_size = (init_plts + 1) * sizeof(struct plt_entry);
|
||||
pltsec = sechdrs + mod->arch.init.plt_shndx;
|
||||
pltsec->sh_type = SHT_NOBITS;
|
||||
pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
|
||||
pltsec->sh_addralign = L1_CACHE_BYTES;
|
||||
pltsec->sh_size = (init_plts + 1) * sizeof(struct plt_entry);
|
||||
mod->arch.init.plt_num_entries = 0;
|
||||
mod->arch.init.plt_max_entries = init_plts;
|
||||
|
||||
|
@@ -198,13 +198,12 @@ static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val)
|
||||
static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
|
||||
__le32 *place, u64 val)
|
||||
{
|
||||
u32 insn;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
|
||||
!cpus_have_const_cap(ARM64_WORKAROUND_843419) ||
|
||||
((u64)place & 0xfff) < 0xff8)
|
||||
if (!is_forbidden_offset_for_adrp(place))
|
||||
return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
|
||||
AARCH64_INSN_IMM_ADR);
|
||||
|
||||
@@ -215,7 +214,7 @@ static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val)
|
||||
insn &= ~BIT(31);
|
||||
} else {
|
||||
/* out of range for ADR -> emit a veneer */
|
||||
val = module_emit_veneer_for_adrp(mod, place, val & ~0xfff);
|
||||
val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
|
||||
if (!val)
|
||||
return -ENOEXEC;
|
||||
insn = aarch64_insn_gen_branch_imm((u64)place, val,
|
||||
@@ -368,7 +367,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
|
||||
case R_AARCH64_ADR_PREL_PG_HI21_NC:
|
||||
overflow_check = false;
|
||||
case R_AARCH64_ADR_PREL_PG_HI21:
|
||||
ovf = reloc_insn_adrp(me, loc, val);
|
||||
ovf = reloc_insn_adrp(me, sechdrs, loc, val);
|
||||
if (ovf && ovf != -ERANGE)
|
||||
return ovf;
|
||||
break;
|
||||
@@ -413,7 +412,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
|
||||
ovf == -ERANGE) {
|
||||
val = module_emit_plt_entry(me, loc, &rel[i], sym);
|
||||
val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
|
||||
if (!val)
|
||||
return -ENOEXEC;
|
||||
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
|
||||
|
@@ -18,6 +18,7 @@
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/pointer_auth.h>
|
||||
#include <asm/stacktrace.h>
|
||||
|
||||
struct frame_tail {
|
||||
@@ -35,6 +36,7 @@ user_backtrace(struct frame_tail __user *tail,
|
||||
{
|
||||
struct frame_tail buftail;
|
||||
unsigned long err;
|
||||
unsigned long lr;
|
||||
|
||||
/* Also check accessibility of one struct frame_tail beyond */
|
||||
if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
|
||||
@@ -47,7 +49,9 @@ user_backtrace(struct frame_tail __user *tail,
|
||||
if (err)
|
||||
return NULL;
|
||||
|
||||
perf_callchain_store(entry, buftail.lr);
|
||||
lr = ptrauth_strip_insn_pac(buftail.lr);
|
||||
|
||||
perf_callchain_store(entry, lr);
|
||||
|
||||
/*
|
||||
* Frame pointers should strictly progress back up the stack
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* PMU support
|
||||
* ARMv8 PMUv3 Performance Events handling code.
|
||||
*
|
||||
* Copyright (C) 2012 ARM Limited
|
||||
* Author: Will Deacon <will.deacon@arm.com>
|
||||
@@ -30,149 +30,6 @@
|
||||
#include <linux/perf/arm_pmu.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
/*
|
||||
* ARMv8 PMUv3 Performance Events handling code.
|
||||
* Common event types (some are defined in asm/perf_event.h).
|
||||
*/
|
||||
|
||||
/* At least one of the following is required. */
|
||||
#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08
|
||||
#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B
|
||||
|
||||
/* Common architectural events. */
|
||||
#define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x06
|
||||
#define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x07
|
||||
#define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x09
|
||||
#define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x0A
|
||||
#define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x0B
|
||||
#define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x0C
|
||||
#define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x0D
|
||||
#define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x0E
|
||||
#define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x0F
|
||||
#define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x1C
|
||||
#define ARMV8_PMUV3_PERFCTR_CHAIN 0x1E
|
||||
#define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x21
|
||||
|
||||
/* Common microarchitectural events. */
|
||||
#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x01
|
||||
#define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x02
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x05
|
||||
#define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x13
|
||||
#define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x14
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x15
|
||||
#define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x16
|
||||
#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x17
|
||||
#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x18
|
||||
#define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x19
|
||||
#define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x1A
|
||||
#define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x1D
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x1F
|
||||
#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x20
|
||||
#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x22
|
||||
#define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x23
|
||||
#define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x24
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x25
|
||||
#define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x26
|
||||
#define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x27
|
||||
#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x28
|
||||
#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x29
|
||||
#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x2A
|
||||
#define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x2B
|
||||
#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x2C
|
||||
#define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x2D
|
||||
#define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x2E
|
||||
#define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x2F
|
||||
#define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x30
|
||||
|
||||
/* ARMv8 recommended implementation defined event types */
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x40
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x41
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD 0x42
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR 0x43
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER 0x44
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER 0x45
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM 0x46
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN 0x47
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL 0x48
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD 0x4C
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR 0x4D
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD 0x4E
|
||||
#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR 0x4F
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD 0x50
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR 0x51
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD 0x52
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR 0x53
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM 0x56
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN 0x57
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL 0x58
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD 0x5C
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR 0x5D
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD 0x5E
|
||||
#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR 0x5F
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD 0x60
|
||||
#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR 0x61
|
||||
#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED 0x62
|
||||
#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED 0x63
|
||||
#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL 0x64
|
||||
#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH 0x65
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD 0x66
|
||||
#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR 0x67
|
||||
#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC 0x68
|
||||
#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC 0x69
|
||||
#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC 0x6A
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC 0x6C
|
||||
#define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC 0x6D
|
||||
#define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC 0x6E
|
||||
#define ARMV8_IMPDEF_PERFCTR_STREX_SPEC 0x6F
|
||||
#define ARMV8_IMPDEF_PERFCTR_LD_SPEC 0x70
|
||||
#define ARMV8_IMPDEF_PERFCTR_ST_SPEC 0x71
|
||||
#define ARMV8_IMPDEF_PERFCTR_LDST_SPEC 0x72
|
||||
#define ARMV8_IMPDEF_PERFCTR_DP_SPEC 0x73
|
||||
#define ARMV8_IMPDEF_PERFCTR_ASE_SPEC 0x74
|
||||
#define ARMV8_IMPDEF_PERFCTR_VFP_SPEC 0x75
|
||||
#define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC 0x76
|
||||
#define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC 0x77
|
||||
#define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC 0x78
|
||||
#define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC 0x79
|
||||
#define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC 0x7A
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_ISB_SPEC 0x7C
|
||||
#define ARMV8_IMPDEF_PERFCTR_DSB_SPEC 0x7D
|
||||
#define ARMV8_IMPDEF_PERFCTR_DMB_SPEC 0x7E
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF 0x81
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_SVC 0x82
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_PABORT 0x83
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_DABORT 0x84
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_IRQ 0x86
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_FIQ 0x87
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_SMC 0x88
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_HVC 0x8A
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT 0x8B
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT 0x8C
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER 0x8D
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ 0x8E
|
||||
#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ 0x8F
|
||||
#define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC 0x90
|
||||
#define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC 0x91
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD 0xA0
|
||||
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR 0xA1
|
||||
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD 0xA2
|
||||
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR 0xA3
|
||||
|
||||
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM 0xA6
|
||||
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN 0xA7
|
||||
#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL 0xA8
|
||||
|
||||
/* ARMv8 Cortex-A53 specific event types. */
|
||||
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
|
||||
|
||||
@@ -183,12 +40,10 @@
|
||||
#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC
|
||||
#define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED
|
||||
|
||||
/* PMUv3 HW events mapping. */
|
||||
|
||||
/*
|
||||
* ARMv8 Architectural defined events, not all of these may
|
||||
* be supported on any given implementation. Undefined events will
|
||||
* be disabled at run-time.
|
||||
* be supported on any given implementation. Unsupported events will
|
||||
* be disabled at run-time based on the PMCEID registers.
|
||||
*/
|
||||
static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
|
||||
PERF_MAP_ALL_UNSUPPORTED,
|
||||
@@ -210,8 +65,6 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
|
||||
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
|
||||
[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
|
||||
[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
|
||||
[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
|
||||
|
||||
[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
|
||||
[C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
|
||||
@@ -224,8 +77,6 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
|
||||
[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
|
||||
[C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
|
||||
[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
|
||||
[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
|
||||
};
|
||||
|
||||
static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
@@ -370,6 +221,18 @@ ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL);
|
||||
ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL);
|
||||
ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB);
|
||||
ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB);
|
||||
ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS);
|
||||
ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE);
|
||||
ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS);
|
||||
ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK);
|
||||
ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK);
|
||||
ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD);
|
||||
ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD);
|
||||
ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD);
|
||||
ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP);
|
||||
ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED);
|
||||
ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE);
|
||||
ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION);
|
||||
|
||||
static struct attribute *armv8_pmuv3_event_attrs[] = {
|
||||
&armv8_event_attr_sw_incr.attr.attr,
|
||||
@@ -420,6 +283,18 @@ static struct attribute *armv8_pmuv3_event_attrs[] = {
|
||||
&armv8_event_attr_l2i_tlb_refill.attr.attr,
|
||||
&armv8_event_attr_l2d_tlb.attr.attr,
|
||||
&armv8_event_attr_l2i_tlb.attr.attr,
|
||||
&armv8_event_attr_remote_access.attr.attr,
|
||||
&armv8_event_attr_ll_cache.attr.attr,
|
||||
&armv8_event_attr_ll_cache_miss.attr.attr,
|
||||
&armv8_event_attr_dtlb_walk.attr.attr,
|
||||
&armv8_event_attr_itlb_walk.attr.attr,
|
||||
&armv8_event_attr_ll_cache_rd.attr.attr,
|
||||
&armv8_event_attr_ll_cache_miss_rd.attr.attr,
|
||||
&armv8_event_attr_remote_access_rd.attr.attr,
|
||||
&armv8_event_attr_sample_pop.attr.attr,
|
||||
&armv8_event_attr_sample_feed.attr.attr,
|
||||
&armv8_event_attr_sample_filtrate.attr.attr,
|
||||
&armv8_event_attr_sample_collision.attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
@@ -434,7 +309,13 @@ armv8pmu_event_attr_is_visible(struct kobject *kobj,
|
||||
|
||||
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr);
|
||||
|
||||
if (test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
|
||||
if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
|
||||
test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
|
||||
return attr->mode;
|
||||
|
||||
pmu_attr->id -= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
|
||||
if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
|
||||
test_bit(pmu_attr->id, cpu_pmu->pmceid_ext_bitmap))
|
||||
return attr->mode;
|
||||
|
||||
return 0;
|
||||
@@ -1009,7 +890,7 @@ static int __armv8_pmuv3_map_event(struct perf_event *event,
|
||||
if (armv8pmu_event_is_64bit(event))
|
||||
event->hw.flags |= ARMPMU_EVT_64BIT;
|
||||
|
||||
/* Onl expose micro/arch events supported by this PMU */
|
||||
/* Only expose micro/arch events supported by this PMU */
|
||||
if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
|
||||
&& test_bit(hw_event_id, armpmu->pmceid_bitmap)) {
|
||||
return hw_event_id;
|
||||
@@ -1061,6 +942,7 @@ static void __armv8pmu_probe_pmu(void *info)
|
||||
struct armv8pmu_probe_info *probe = info;
|
||||
struct arm_pmu *cpu_pmu = probe->pmu;
|
||||
u64 dfr0;
|
||||
u64 pmceid_raw[2];
|
||||
u32 pmceid[2];
|
||||
int pmuver;
|
||||
|
||||
@@ -1079,11 +961,17 @@ static void __armv8pmu_probe_pmu(void *info)
|
||||
/* Add the CPU cycles counter */
|
||||
cpu_pmu->num_events += 1;
|
||||
|
||||
pmceid[0] = read_sysreg(pmceid0_el0);
|
||||
pmceid[1] = read_sysreg(pmceid1_el0);
|
||||
pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0);
|
||||
pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0);
|
||||
|
||||
bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
|
||||
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
|
||||
|
||||
pmceid[0] = pmceid_raw[0] >> 32;
|
||||
pmceid[1] = pmceid_raw[1] >> 32;
|
||||
|
||||
bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap,
|
||||
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
|
||||
}
|
||||
|
||||
static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
|
||||
@@ -1109,16 +997,16 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
cpu_pmu->handle_irq = armv8pmu_handle_irq,
|
||||
cpu_pmu->enable = armv8pmu_enable_event,
|
||||
cpu_pmu->disable = armv8pmu_disable_event,
|
||||
cpu_pmu->read_counter = armv8pmu_read_counter,
|
||||
cpu_pmu->write_counter = armv8pmu_write_counter,
|
||||
cpu_pmu->get_event_idx = armv8pmu_get_event_idx,
|
||||
cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx,
|
||||
cpu_pmu->start = armv8pmu_start,
|
||||
cpu_pmu->stop = armv8pmu_stop,
|
||||
cpu_pmu->reset = armv8pmu_reset,
|
||||
cpu_pmu->handle_irq = armv8pmu_handle_irq;
|
||||
cpu_pmu->enable = armv8pmu_enable_event;
|
||||
cpu_pmu->disable = armv8pmu_disable_event;
|
||||
cpu_pmu->read_counter = armv8pmu_read_counter;
|
||||
cpu_pmu->write_counter = armv8pmu_write_counter;
|
||||
cpu_pmu->get_event_idx = armv8pmu_get_event_idx;
|
||||
cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx;
|
||||
cpu_pmu->start = armv8pmu_start;
|
||||
cpu_pmu->stop = armv8pmu_stop;
|
||||
cpu_pmu->reset = armv8pmu_reset;
|
||||
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
|
||||
cpu_pmu->filter_match = armv8pmu_filter_match;
|
||||
|
||||
@@ -1274,6 +1162,7 @@ static struct platform_driver armv8_pmu_driver = {
|
||||
.driver = {
|
||||
.name = ARMV8_PMU_PDEV_NAME,
|
||||
.of_match_table = armv8_pmu_of_device_ids,
|
||||
.suppress_bind_attrs = true,
|
||||
},
|
||||
.probe = armv8_pmu_device_probe,
|
||||
};
|
||||
|
47
arch/arm64/kernel/pointer_auth.c
Normal file
47
arch/arm64/kernel/pointer_auth.c
Normal file
@@ -0,0 +1,47 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/prctl.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/sched.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/pointer_auth.h>
|
||||
|
||||
int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg)
|
||||
{
|
||||
struct ptrauth_keys *keys = &tsk->thread.keys_user;
|
||||
unsigned long addr_key_mask = PR_PAC_APIAKEY | PR_PAC_APIBKEY |
|
||||
PR_PAC_APDAKEY | PR_PAC_APDBKEY;
|
||||
unsigned long key_mask = addr_key_mask | PR_PAC_APGAKEY;
|
||||
|
||||
if (!system_supports_address_auth() && !system_supports_generic_auth())
|
||||
return -EINVAL;
|
||||
|
||||
if (!arg) {
|
||||
ptrauth_keys_init(keys);
|
||||
ptrauth_keys_switch(keys);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (arg & ~key_mask)
|
||||
return -EINVAL;
|
||||
|
||||
if (((arg & addr_key_mask) && !system_supports_address_auth()) ||
|
||||
((arg & PR_PAC_APGAKEY) && !system_supports_generic_auth()))
|
||||
return -EINVAL;
|
||||
|
||||
if (arg & PR_PAC_APIAKEY)
|
||||
get_random_bytes(&keys->apia, sizeof(keys->apia));
|
||||
if (arg & PR_PAC_APIBKEY)
|
||||
get_random_bytes(&keys->apib, sizeof(keys->apib));
|
||||
if (arg & PR_PAC_APDAKEY)
|
||||
get_random_bytes(&keys->apda, sizeof(keys->apda));
|
||||
if (arg & PR_PAC_APDBKEY)
|
||||
get_random_bytes(&keys->apdb, sizeof(keys->apdb));
|
||||
if (arg & PR_PAC_APGAKEY)
|
||||
get_random_bytes(&keys->apga, sizeof(keys->apga));
|
||||
|
||||
ptrauth_keys_switch(keys);
|
||||
|
||||
return 0;
|
||||
}
|
@@ -57,9 +57,10 @@
|
||||
#include <asm/fpsimd.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/pointer_auth.h>
|
||||
#include <asm/stacktrace.h>
|
||||
|
||||
#ifdef CONFIG_STACKPROTECTOR
|
||||
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
|
||||
#include <linux/stackprotector.h>
|
||||
unsigned long __stack_chk_guard __read_mostly;
|
||||
EXPORT_SYMBOL(__stack_chk_guard);
|
||||
@@ -429,6 +430,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
|
||||
contextidr_thread_switch(next);
|
||||
entry_task_switch(next);
|
||||
uao_thread_switch(next);
|
||||
ptrauth_thread_switch(next);
|
||||
|
||||
/*
|
||||
* Complete any pending TLB or cache maintenance on this CPU in case
|
||||
@@ -496,4 +498,6 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
|
||||
void arch_setup_new_exec(void)
|
||||
{
|
||||
current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
|
||||
|
||||
ptrauth_thread_init_user(current);
|
||||
}
|
||||
|
@@ -46,6 +46,7 @@
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/fpsimd.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pointer_auth.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/syscall.h>
|
||||
#include <asm/traps.h>
|
||||
@@ -956,6 +957,30 @@ out:
|
||||
|
||||
#endif /* CONFIG_ARM64_SVE */
|
||||
|
||||
#ifdef CONFIG_ARM64_PTR_AUTH
|
||||
static int pac_mask_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf)
|
||||
{
|
||||
/*
|
||||
* The PAC bits can differ across data and instruction pointers
|
||||
* depending on TCR_EL1.TBID*, which we may make use of in future, so
|
||||
* we expose separate masks.
|
||||
*/
|
||||
unsigned long mask = ptrauth_user_pac_mask();
|
||||
struct user_pac_mask uregs = {
|
||||
.data_mask = mask,
|
||||
.insn_mask = mask,
|
||||
};
|
||||
|
||||
if (!system_supports_address_auth())
|
||||
return -EINVAL;
|
||||
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &uregs, 0, -1);
|
||||
}
|
||||
#endif /* CONFIG_ARM64_PTR_AUTH */
|
||||
|
||||
enum aarch64_regset {
|
||||
REGSET_GPR,
|
||||
REGSET_FPR,
|
||||
@@ -968,6 +993,9 @@ enum aarch64_regset {
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
REGSET_SVE,
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_PTR_AUTH
|
||||
REGSET_PAC_MASK,
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct user_regset aarch64_regsets[] = {
|
||||
@@ -1037,6 +1065,16 @@ static const struct user_regset aarch64_regsets[] = {
|
||||
.get_size = sve_get_size,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_PTR_AUTH
|
||||
[REGSET_PAC_MASK] = {
|
||||
.core_note_type = NT_ARM_PAC_MASK,
|
||||
.n = sizeof(struct user_pac_mask) / sizeof(u64),
|
||||
.size = sizeof(u64),
|
||||
.align = sizeof(u64),
|
||||
.get = pac_mask_get,
|
||||
/* this cannot be set dynamically */
|
||||
},
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct user_regset_view user_aarch64_view = {
|
||||
|
@@ -32,6 +32,7 @@
|
||||
ENTRY(arm64_relocate_new_kernel)
|
||||
|
||||
/* Setup the list loop variables. */
|
||||
mov x18, x2 /* x18 = dtb address */
|
||||
mov x17, x1 /* x17 = kimage_start */
|
||||
mov x16, x0 /* x16 = kimage_head */
|
||||
raw_dcache_line_size x15, x0 /* x15 = dcache line size */
|
||||
@@ -107,7 +108,7 @@ ENTRY(arm64_relocate_new_kernel)
|
||||
isb
|
||||
|
||||
/* Start new image. */
|
||||
mov x0, xzr
|
||||
mov x0, x18
|
||||
mov x1, xzr
|
||||
mov x2, xzr
|
||||
mov x3, xzr
|
||||
|
@@ -388,6 +388,7 @@ static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) {
|
||||
pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
|
||||
offset, KIMAGE_VADDR);
|
||||
pr_emerg("PHYS_OFFSET: 0x%llx\n", PHYS_OFFSET);
|
||||
} else {
|
||||
pr_emerg("Kernel Offset: disabled\n");
|
||||
}
|
||||
|
@@ -13,7 +13,9 @@
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
.macro SMCCC instr
|
||||
.cfi_startproc
|
||||
@@ -40,6 +42,7 @@
|
||||
ENTRY(__arm_smccc_smc)
|
||||
SMCCC smc
|
||||
ENDPROC(__arm_smccc_smc)
|
||||
EXPORT_SYMBOL(__arm_smccc_smc)
|
||||
|
||||
/*
|
||||
* void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
|
||||
@@ -50,3 +53,4 @@ ENDPROC(__arm_smccc_smc)
|
||||
ENTRY(__arm_smccc_hvc)
|
||||
SMCCC hvc
|
||||
ENDPROC(__arm_smccc_hvc)
|
||||
EXPORT_SYMBOL(__arm_smccc_hvc)
|
||||
|
@@ -141,6 +141,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
}
|
||||
} else {
|
||||
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
secondary_data.task = NULL;
|
||||
@@ -151,7 +152,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
if (status == CPU_MMU_OFF)
|
||||
status = READ_ONCE(__early_cpu_boot_status);
|
||||
|
||||
switch (status) {
|
||||
switch (status & CPU_BOOT_STATUS_MASK) {
|
||||
default:
|
||||
pr_err("CPU%u: failed in unknown state : 0x%lx\n",
|
||||
cpu, status);
|
||||
@@ -165,6 +166,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
|
||||
case CPU_STUCK_IN_KERNEL:
|
||||
pr_crit("CPU%u: is stuck in kernel\n", cpu);
|
||||
if (status & CPU_STUCK_REASON_52_BIT_VA)
|
||||
pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
|
||||
if (status & CPU_STUCK_REASON_NO_GRAN)
|
||||
pr_crit("CPU%u: does not support %luK granule \n", cpu, PAGE_SIZE / SZ_1K);
|
||||
cpus_stuck_in_kernel++;
|
||||
break;
|
||||
case CPU_PANIC_KERNEL:
|
||||
|
@@ -99,7 +99,8 @@ SECTIONS
|
||||
*(.discard)
|
||||
*(.discard.*)
|
||||
*(.interp .dynamic)
|
||||
*(.dynsym .dynstr .hash)
|
||||
*(.dynsym .dynstr .hash .gnu.hash)
|
||||
*(.eh_frame)
|
||||
}
|
||||
|
||||
. = KIMAGE_VADDR + TEXT_OFFSET;
|
||||
@@ -192,12 +193,12 @@ SECTIONS
|
||||
|
||||
PERCPU_SECTION(L1_CACHE_BYTES)
|
||||
|
||||
.rela : ALIGN(8) {
|
||||
.rela.dyn : ALIGN(8) {
|
||||
*(.rela .rela*)
|
||||
}
|
||||
|
||||
__rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
|
||||
__rela_size = SIZEOF(.rela);
|
||||
__rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR);
|
||||
__rela_size = SIZEOF(.rela.dyn);
|
||||
|
||||
. = ALIGN(SEGMENT_ALIGN);
|
||||
__initdata_end = .;
|
||||
|
Reference in New Issue
Block a user