Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 updates from Will Deacon:

 - Support for new architectural features introduced in ARMv8.1:
   * Privileged Access Never (PAN) to catch user pointer dereferences in
     the kernel
   * Large System Extension (LSE) for building scalable atomics and locks
     (depends on locking/arch-atomic from tip, which is included here)
   * Hardware Dirty Bit Management (DBM) for updating clean PTEs
     automatically

 - Move our PSCI implementation out into drivers/firmware/, where it can
   be shared with arch/arm/. RMK has also pulled this component branch
   and has additional patches moving arch/arm/ over. MAINTAINERS is
   updated accordingly.

 - Better BUG implementation based on the BRK instruction for trapping

 - Leaf TLB invalidation for unmapping user pages

 - Support for PROBE_ONLY PCI configurations

 - Various cleanups and non-critical fixes, including:
   * Always flush FP/SIMD state over exec()
   * Restrict memblock additions based on range of linear mapping
   * Ensure *(LIST_POISON) generates a fatal fault
   * Context-tracking syscall return no longer corrupts return value when
     not forced on.
   * Alternatives patching synchronisation/stability improvements
   * Signed sub-word cmpxchg compare fix (tickled by HAVE_CMPXCHG_LOCAL)
   * Force SMP=y
   * Hide direct DCC access from userspace
   * Fix EFI stub memory allocation when DRAM starts at 0x0

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (92 commits)
  arm64: flush FP/SIMD state correctly after execve()
  arm64: makefile: fix perf_callchain.o kconfig dependency
  arm64: set MAX_MEMBLOCK_ADDR according to linear region size
  of/fdt: make memblock maximum physical address arch configurable
  arm64: Fix source code file path in comments
  arm64: entry: always restore x0 from the stack on syscall return
  arm64: mdscr_el1: avoid exposing DCC to userspace
  arm64: kconfig: Move LIST_POISON to a safe value
  arm64: Add __exception_irq_entry definition for function graph
  arm64: mm: ensure patched kernel text is fetched from PoU
  arm64: alternatives: ensure secondary CPUs execute ISB after patching
  arm64: make ll/sc __cmpxchg_case_##name asm consistent
  arm64: dma-mapping: Simplify pgprot handling
  arm64: restore cpu suspend/resume functionality
  ARM64: PCI: do not enable resources on PROBE_ONLY systems
  arm64: cmpxchg: truncate sub-word signed types before comparison
  arm64: alternative: put secondary CPUs into polling loop during patch
  arm64/Documentation: clarify wording regarding memory below the Image
  arm64: lse: fix lse cmpxchg code indentation
  arm64: remove redundant object file list
  ...
This commit is contained in:
Linus Torvalds
2015-09-04 07:18:09 -07:00
86 changed files with 2331 additions and 1155 deletions

View File

@@ -17,15 +17,15 @@ arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \
return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o
cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o entry32.o \
../../arm/kernel/opcodes.o
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o
arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o

View File

@@ -85,7 +85,7 @@ static u32 get_alt_insn(struct alt_instr *alt, u32 *insnptr, u32 *altinsnptr)
return insn;
}
static int __apply_alternatives(void *alt_region)
static void __apply_alternatives(void *alt_region)
{
struct alt_instr *alt;
struct alt_region *region = alt_region;
@@ -114,19 +114,39 @@ static int __apply_alternatives(void *alt_region)
flush_icache_range((uintptr_t)origptr,
(uintptr_t)(origptr + nr_inst));
}
return 0;
}
void apply_alternatives_all(void)
/*
* We might be patching the stop_machine state machine, so implement a
* really simple polling protocol here.
*/
static int __apply_alternatives_multi_stop(void *unused)
{
static int patched = 0;
struct alt_region region = {
.begin = __alt_instructions,
.end = __alt_instructions_end,
};
/* We always have a CPU 0 at this point (__init) */
if (smp_processor_id()) {
while (!READ_ONCE(patched))
cpu_relax();
isb();
} else {
BUG_ON(patched);
__apply_alternatives(&region);
/* Barriers provided by the cache flushing */
WRITE_ONCE(patched, 1);
}
return 0;
}
void __init apply_alternatives_all(void)
{
/* better not try code patching on a live SMP system */
stop_machine(__apply_alternatives, &region, NULL);
stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
}
void apply_alternatives(void *start, size_t length)

View File

@@ -14,8 +14,11 @@
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/insn.h>
#include <asm/opcodes.h>
#include <asm/sysreg.h>
#include <asm/system_misc.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
@@ -279,6 +282,8 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
*/
#define __user_swpX_asm(data, addr, res, temp, B) \
__asm__ __volatile__( \
ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
" mov %w2, %w1\n" \
"0: ldxr"B" %w1, [%3]\n" \
"1: stxr"B" %w0, %w2, [%3]\n" \
@@ -294,7 +299,9 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
" .align 3\n" \
" .quad 0b, 3b\n" \
" .quad 1b, 3b\n" \
" .popsection" \
" .popsection\n" \
ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN) \
: "=&r" (res), "+r" (data), "=&r" (temp) \
: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
: "memory")
@@ -504,16 +511,6 @@ ret:
return 0;
}
static inline void config_sctlr_el1(u32 clear, u32 set)
{
u32 val;
asm volatile("mrs %0, sctlr_el1" : "=r" (val));
val &= ~clear;
val |= set;
asm volatile("msr sctlr_el1, %0" : : "r" (val));
}
static int cp15_barrier_set_hw_mode(bool enable)
{
if (enable)

View File

@@ -30,9 +30,7 @@ extern const struct cpu_operations cpu_psci_ops;
const struct cpu_operations *cpu_ops[NR_CPUS];
static const struct cpu_operations *supported_cpu_ops[] __initconst = {
#ifdef CONFIG_SMP
&smp_spin_table_ops,
#endif
&cpu_psci_ops,
NULL,
};

View File

@@ -21,24 +21,57 @@
#include <linux/types.h>
#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include <asm/processor.h>
static bool
has_id_aa64pfr0_feature(const struct arm64_cpu_capabilities *entry)
feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
{
u64 val;
int val = cpuid_feature_extract_field(reg, entry->field_pos);
val = read_cpuid(id_aa64pfr0_el1);
return (val & entry->register_mask) == entry->register_value;
return val >= entry->min_field_value;
}
#define __ID_FEAT_CHK(reg) \
static bool __maybe_unused \
has_##reg##_feature(const struct arm64_cpu_capabilities *entry) \
{ \
u64 val; \
\
val = read_cpuid(reg##_el1); \
return feature_matches(val, entry); \
}
__ID_FEAT_CHK(id_aa64pfr0);
__ID_FEAT_CHK(id_aa64mmfr1);
__ID_FEAT_CHK(id_aa64isar0);
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
.matches = has_id_aa64pfr0_feature,
.register_mask = (0xf << 24),
.register_value = (1 << 24),
.field_pos = 24,
.min_field_value = 1,
},
#ifdef CONFIG_ARM64_PAN
{
.desc = "Privileged Access Never",
.capability = ARM64_HAS_PAN,
.matches = has_id_aa64mmfr1_feature,
.field_pos = 20,
.min_field_value = 1,
.enable = cpu_enable_pan,
},
#endif /* CONFIG_ARM64_PAN */
#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
{
.desc = "LSE atomic instructions",
.capability = ARM64_HAS_LSE_ATOMICS,
.matches = has_id_aa64isar0_feature,
.field_pos = 20,
.min_field_value = 2,
},
#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
{},
};
@@ -55,9 +88,15 @@ void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
pr_info("%s %s\n", info, caps[i].desc);
cpus_set_cap(caps[i].capability);
}
/* second pass allows enable() to consider interacting capabilities */
for (i = 0; caps[i].desc; i++) {
if (cpus_have_cap(caps[i].capability) && caps[i].enable)
caps[i].enable();
}
}
void check_local_cpu_features(void)
{
check_cpu_capabilities(arm64_features, "detected feature");
check_cpu_capabilities(arm64_features, "detected feature:");
}

View File

@@ -82,7 +82,7 @@ early_param("nodebugmon", early_debug_disable);
static DEFINE_PER_CPU(int, mde_ref_count);
static DEFINE_PER_CPU(int, kde_ref_count);
void enable_debug_monitors(enum debug_el el)
void enable_debug_monitors(enum dbg_active_el el)
{
u32 mdscr, enable = 0;
@@ -102,7 +102,7 @@ void enable_debug_monitors(enum debug_el el)
}
}
void disable_debug_monitors(enum debug_el el)
void disable_debug_monitors(enum dbg_active_el el)
{
u32 mdscr, disable = 0;

View File

@@ -13,7 +13,7 @@
#include <asm/efi.h>
#include <asm/sections.h>
efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table,
efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
@@ -23,21 +23,44 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table,
{
efi_status_t status;
unsigned long kernel_size, kernel_memsize = 0;
unsigned long nr_pages;
void *old_image_addr = (void *)*image_addr;
/* Relocate the image, if required. */
kernel_size = _edata - _text;
if (*image_addr != (dram_base + TEXT_OFFSET)) {
kernel_memsize = kernel_size + (_end - _edata);
status = efi_low_alloc(sys_table, kernel_memsize + TEXT_OFFSET,
SZ_2M, reserve_addr);
/*
* First, try a straight allocation at the preferred offset.
* This will work around the issue where, if dram_base == 0x0,
* efi_low_alloc() refuses to allocate at 0x0 (to prevent the
* address of the allocation to be mistaken for a FAIL return
* value or a NULL pointer). It will also ensure that, on
* platforms where the [dram_base, dram_base + TEXT_OFFSET)
* interval is partially occupied by the firmware (like on APM
* Mustang), we can still place the kernel at the address
* 'dram_base + TEXT_OFFSET'.
*/
*image_addr = *reserve_addr = dram_base + TEXT_OFFSET;
nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) /
EFI_PAGE_SIZE;
status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
EFI_LOADER_DATA, nr_pages,
(efi_physical_addr_t *)reserve_addr);
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table, "Failed to relocate kernel\n");
return status;
kernel_memsize += TEXT_OFFSET;
status = efi_low_alloc(sys_table_arg, kernel_memsize,
SZ_2M, reserve_addr);
if (status != EFI_SUCCESS) {
pr_efi_err(sys_table_arg, "Failed to relocate kernel\n");
return status;
}
*image_addr = *reserve_addr + TEXT_OFFSET;
}
memcpy((void *)*reserve_addr + TEXT_OFFSET, (void *)*image_addr,
kernel_size);
*image_addr = *reserve_addr + TEXT_OFFSET;
*reserve_size = kernel_memsize + TEXT_OFFSET;
memcpy((void *)*image_addr, old_image_addr, kernel_size);
*reserve_size = kernel_memsize;
}

View File

@@ -116,41 +116,34 @@
*/
.endm
.macro kernel_exit, el, ret = 0
.macro kernel_exit, el
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
.if \el == 0
ct_user_enter
ldr x23, [sp, #S_SP] // load return stack pointer
msr sp_el0, x23
#ifdef CONFIG_ARM64_ERRATUM_845719
#undef SEQUENCE_ORG
#undef SEQUENCE_ALT
alternative_if_not ARM64_WORKAROUND_845719
nop
nop
#ifdef CONFIG_PID_IN_CONTEXTIDR
#define SEQUENCE_ORG "nop ; nop ; nop"
#define SEQUENCE_ALT "tbz x22, #4, 1f ; mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:"
#else
#define SEQUENCE_ORG "nop ; nop"
#define SEQUENCE_ALT "tbz x22, #4, 1f ; msr contextidr_el1, xzr; 1:"
nop
#endif
alternative_insn SEQUENCE_ORG, SEQUENCE_ALT, ARM64_WORKAROUND_845719
alternative_else
tbz x22, #4, 1f
#ifdef CONFIG_PID_IN_CONTEXTIDR
mrs x29, contextidr_el1
msr contextidr_el1, x29
#else
msr contextidr_el1, xzr
#endif
1:
alternative_endif
#endif
.endif
msr elr_el1, x21 // set up the return data
msr spsr_el1, x22
.if \ret
ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
.else
ldp x0, x1, [sp, #16 * 0]
.endif
ldp x2, x3, [sp, #16 * 1]
ldp x4, x5, [sp, #16 * 2]
ldp x6, x7, [sp, #16 * 3]
@@ -613,22 +606,21 @@ ENDPROC(cpu_switch_to)
*/
ret_fast_syscall:
disable_irq // disable interrupts
str x0, [sp, #S_X0] // returned x0
ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing
and x2, x1, #_TIF_SYSCALL_WORK
cbnz x2, ret_fast_syscall_trace
and x2, x1, #_TIF_WORK_MASK
cbnz x2, fast_work_pending
cbnz x2, work_pending
enable_step_tsk x1, x2
kernel_exit 0, ret = 1
kernel_exit 0
ret_fast_syscall_trace:
enable_irq // enable interrupts
b __sys_trace_return
b __sys_trace_return_skipped // we already saved x0
/*
* Ok, we need to do extra processing, enter the slow path.
*/
fast_work_pending:
str x0, [sp, #S_X0] // returned x0
work_pending:
tbnz x1, #TIF_NEED_RESCHED, work_resched
/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
@@ -652,7 +644,7 @@ ret_to_user:
cbnz x2, work_pending
enable_step_tsk x1, x2
no_work_pending:
kernel_exit 0, ret = 0
kernel_exit 0
ENDPROC(ret_to_user)
/*

View File

@@ -158,6 +158,7 @@ void fpsimd_thread_switch(struct task_struct *next)
void fpsimd_flush_thread(void)
{
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
fpsimd_flush_task_state(current);
set_thread_flag(TIF_FOREIGN_FPSTATE);
}

View File

@@ -62,13 +62,8 @@
/*
* Initial memory map attributes.
*/
#ifndef CONFIG_SMP
#define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF
#define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF
#else
#define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF | PTE_SHARED
#define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S
#endif
#ifdef CONFIG_ARM64_64K_PAGES
#define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS
@@ -574,7 +569,6 @@ ENTRY(__boot_cpu_mode)
.long BOOT_CPU_MODE_EL1
.popsection
#ifdef CONFIG_SMP
/*
* This provides a "holding pen" for platforms to hold all secondary
* cores are held until we're ready for them to initialise.
@@ -622,7 +616,6 @@ ENTRY(__secondary_switched)
mov x29, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)
#endif /* CONFIG_SMP */
/*
* Enable the MMU.
@@ -641,5 +634,13 @@ __enable_mmu:
isb
msr sctlr_el1, x0
isb
/*
* Invalidate the local I-cache so that any instructions fetched
* speculatively from the PoC are discarded, since they may have
* been dynamically patched at the PoU.
*/
ic iallu
dsb nsh
isb
br x27
ENDPROC(__enable_mmu)

View File

@@ -156,7 +156,7 @@ static void write_wb_reg(int reg, int n, u64 val)
* Convert a breakpoint privilege level to the corresponding exception
* level.
*/
static enum debug_el debug_exception_level(int privilege)
static enum dbg_active_el debug_exception_level(int privilege)
{
switch (privilege) {
case AARCH64_BREAKPOINT_EL0:
@@ -230,7 +230,7 @@ static int hw_breakpoint_control(struct perf_event *bp,
struct perf_event **slots;
struct debug_info *debug_info = &current->thread.debug;
int i, max_slots, ctrl_reg, val_reg, reg_enable;
enum debug_el dbg_el = debug_exception_level(info->ctrl.privilege);
enum dbg_active_el dbg_el = debug_exception_level(info->ctrl.privilege);
u32 ctrl;
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
@@ -537,7 +537,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* exception level at the register level.
* This is used when single-stepping after a breakpoint exception.
*/
static void toggle_bp_registers(int reg, enum debug_el el, int enable)
static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
{
int i, max_slots, privilege;
u32 ctrl;

View File

@@ -101,9 +101,8 @@ static void __kprobes *patch_map(void *addr, int fixmap)
return addr;
BUG_ON(!page);
set_fixmap(fixmap, page_to_phys(page));
return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
(uintaddr & ~PAGE_MASK));
}
static void __kprobes patch_unmap(int fixmap)

View File

@@ -33,9 +33,7 @@ unsigned long irq_err_count;
int arch_show_interrupts(struct seq_file *p, int prec)
{
#ifdef CONFIG_SMP
show_ipi_list(p, prec);
#endif
seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
return 0;
}

View File

@@ -235,13 +235,13 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
static struct break_hook kgdb_brkpt_hook = {
.esr_mask = 0xffffffff,
.esr_val = DBG_ESR_VAL_BRK(KGDB_DYN_DBG_BRK_IMM),
.esr_val = (u32)ESR_ELx_VAL_BRK64(KGDB_DYN_DBG_BRK_IMM),
.fn = kgdb_brk_fn
};
static struct break_hook kgdb_compiled_brkpt_hook = {
.esr_mask = 0xffffffff,
.esr_val = DBG_ESR_VAL_BRK(KGDB_COMPILED_DBG_BRK_IMM),
.esr_val = (u32)ESR_ELx_VAL_BRK64(KGDB_COMPILED_DBG_BRK_IMM),
.fn = kgdb_compiled_brk_fn
};
@@ -328,9 +328,9 @@ void kgdb_arch_exit(void)
*/
struct kgdb_arch arch_kgdb_ops = {
.gdb_bpt_instr = {
KGDB_DYN_BRK_INS_BYTE0,
KGDB_DYN_BRK_INS_BYTE1,
KGDB_DYN_BRK_INS_BYTE2,
KGDB_DYN_BRK_INS_BYTE3,
KGDB_DYN_BRK_INS_BYTE(0),
KGDB_DYN_BRK_INS_BYTE(1),
KGDB_DYN_BRK_INS_BYTE(2),
KGDB_DYN_BRK_INS_BYTE(3),
}
};

View File

@@ -38,6 +38,19 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
return res->start;
}
/**
* pcibios_enable_device - Enable I/O and memory.
* @dev: PCI device to be enabled
* @mask: bitmask of BARs to enable
*/
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
if (pci_has_flag(PCI_PROBE_ONLY))
return 0;
return pci_enable_resources(dev, mask);
}
/*
* Try to assign the IRQ number from DT when adding a new device
*/

View File

@@ -0,0 +1,196 @@
/*
* arm64 callchain support
*
* Copyright (C) 2015 ARM Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <asm/stacktrace.h>
struct frame_tail {
struct frame_tail __user *fp;
unsigned long lr;
} __attribute__((packed));
/*
* Get the return address for a single stackframe and return a pointer to the
* next frame tail.
*/
static struct frame_tail __user *
user_backtrace(struct frame_tail __user *tail,
struct perf_callchain_entry *entry)
{
struct frame_tail buftail;
unsigned long err;
/* Also check accessibility of one struct frame_tail beyond */
if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
return NULL;
pagefault_disable();
err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
pagefault_enable();
if (err)
return NULL;
perf_callchain_store(entry, buftail.lr);
/*
* Frame pointers should strictly progress back up the stack
* (towards higher addresses).
*/
if (tail >= buftail.fp)
return NULL;
return buftail.fp;
}
#ifdef CONFIG_COMPAT
/*
* The registers we're interested in are at the end of the variable
* length saved register structure. The fp points at the end of this
* structure so the address of this struct is:
* (struct compat_frame_tail *)(xxx->fp)-1
*
* This code has been adapted from the ARM OProfile support.
*/
struct compat_frame_tail {
compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
u32 sp;
u32 lr;
} __attribute__((packed));
static struct compat_frame_tail __user *
compat_user_backtrace(struct compat_frame_tail __user *tail,
struct perf_callchain_entry *entry)
{
struct compat_frame_tail buftail;
unsigned long err;
/* Also check accessibility of one struct frame_tail beyond */
if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
return NULL;
pagefault_disable();
err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
pagefault_enable();
if (err)
return NULL;
perf_callchain_store(entry, buftail.lr);
/*
* Frame pointers should strictly progress back up the stack
* (towards higher addresses).
*/
if (tail + 1 >= (struct compat_frame_tail __user *)
compat_ptr(buftail.fp))
return NULL;
return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
}
#endif /* CONFIG_COMPAT */
void perf_callchain_user(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */
return;
}
perf_callchain_store(entry, regs->pc);
if (!compat_user_mode(regs)) {
/* AARCH64 mode */
struct frame_tail __user *tail;
tail = (struct frame_tail __user *)regs->regs[29];
while (entry->nr < PERF_MAX_STACK_DEPTH &&
tail && !((unsigned long)tail & 0xf))
tail = user_backtrace(tail, entry);
} else {
#ifdef CONFIG_COMPAT
/* AARCH32 compat mode */
struct compat_frame_tail __user *tail;
tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
tail && !((unsigned long)tail & 0x3))
tail = compat_user_backtrace(tail, entry);
#endif
}
}
/*
* Gets called by walk_stackframe() for every stackframe. This will be called
* whist unwinding the stackframe and is like a subroutine return so we use
* the PC.
*/
static int callchain_trace(struct stackframe *frame, void *data)
{
struct perf_callchain_entry *entry = data;
perf_callchain_store(entry, frame->pc);
return 0;
}
void perf_callchain_kernel(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
struct stackframe frame;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */
return;
}
frame.fp = regs->regs[29];
frame.sp = regs->sp;
frame.pc = regs->pc;
walk_stackframe(&frame, callchain_trace, entry);
}
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
return perf_guest_cbs->get_guest_ip();
return instruction_pointer(regs);
}
unsigned long perf_misc_flags(struct pt_regs *regs)
{
int misc = 0;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
if (perf_guest_cbs->is_user_mode())
misc |= PERF_RECORD_MISC_GUEST_USER;
else
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
} else {
if (user_mode(regs))
misc |= PERF_RECORD_MISC_USER;
else
misc |= PERF_RECORD_MISC_KERNEL;
}
return misc;
}

View File

@@ -25,7 +25,7 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -36,7 +36,6 @@
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/pmu.h>
#include <asm/stacktrace.h>
/*
* ARMv8 supports a maximum of 32 events.
@@ -78,6 +77,16 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
#define CACHE_OP_UNSUPPORTED 0xFFFF
#define PERF_MAP_ALL_UNSUPPORTED \
[0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
[0 ... C(MAX) - 1] = { \
[0 ... C(OP_MAX) - 1] = { \
[0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
}, \
}
static int
armpmu_map_cache_event(const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
@@ -435,10 +444,8 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
unsigned int i, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
if (!pmu_device) {
pr_err("no PMU device registered\n");
if (!pmu_device)
return -ENODEV;
}
irqs = min(pmu_device->num_resources, num_possible_cpus());
if (!irqs) {
@@ -703,118 +710,28 @@ enum armv8_pmuv3_perf_types {
/* PMUv3 HW events mapping. */
static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
PERF_MAP_ALL_UNSUPPORTED,
[PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
};
static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
PERF_CACHE_MAP_ALL_UNSUPPORTED,
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
[C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
};
/*
@@ -1337,7 +1254,7 @@ static int armpmu_device_probe(struct platform_device *pdev)
}
for_each_possible_cpu(cpu)
if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
if (dn == of_cpu_device_node_get(cpu))
break;
if (cpu >= nr_cpu_ids) {
@@ -1415,180 +1332,3 @@ static int __init init_hw_perf_events(void)
}
early_initcall(init_hw_perf_events);
/*
* Callchain handling code.
*/
struct frame_tail {
struct frame_tail __user *fp;
unsigned long lr;
} __attribute__((packed));
/*
* Get the return address for a single stackframe and return a pointer to the
* next frame tail.
*/
static struct frame_tail __user *
user_backtrace(struct frame_tail __user *tail,
struct perf_callchain_entry *entry)
{
struct frame_tail buftail;
unsigned long err;
/* Also check accessibility of one struct frame_tail beyond */
if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
return NULL;
pagefault_disable();
err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
pagefault_enable();
if (err)
return NULL;
perf_callchain_store(entry, buftail.lr);
/*
* Frame pointers should strictly progress back up the stack
* (towards higher addresses).
*/
if (tail >= buftail.fp)
return NULL;
return buftail.fp;
}
#ifdef CONFIG_COMPAT
/*
* The registers we're interested in are at the end of the variable
* length saved register structure. The fp points at the end of this
* structure so the address of this struct is:
* (struct compat_frame_tail *)(xxx->fp)-1
*
* This code has been adapted from the ARM OProfile support.
*/
struct compat_frame_tail {
compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
u32 sp;
u32 lr;
} __attribute__((packed));
static struct compat_frame_tail __user *
compat_user_backtrace(struct compat_frame_tail __user *tail,
struct perf_callchain_entry *entry)
{
struct compat_frame_tail buftail;
unsigned long err;
/* Also check accessibility of one struct frame_tail beyond */
if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
return NULL;
pagefault_disable();
err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
pagefault_enable();
if (err)
return NULL;
perf_callchain_store(entry, buftail.lr);
/*
* Frame pointers should strictly progress back up the stack
* (towards higher addresses).
*/
if (tail + 1 >= (struct compat_frame_tail __user *)
compat_ptr(buftail.fp))
return NULL;
return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
}
#endif /* CONFIG_COMPAT */
void perf_callchain_user(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */
return;
}
perf_callchain_store(entry, regs->pc);
if (!compat_user_mode(regs)) {
/* AARCH64 mode */
struct frame_tail __user *tail;
tail = (struct frame_tail __user *)regs->regs[29];
while (entry->nr < PERF_MAX_STACK_DEPTH &&
tail && !((unsigned long)tail & 0xf))
tail = user_backtrace(tail, entry);
} else {
#ifdef CONFIG_COMPAT
/* AARCH32 compat mode */
struct compat_frame_tail __user *tail;
tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
tail && !((unsigned long)tail & 0x3))
tail = compat_user_backtrace(tail, entry);
#endif
}
}
/*
* Gets called by walk_stackframe() for every stackframe. This will be called
* whist unwinding the stackframe and is like a subroutine return so we use
* the PC.
*/
static int callchain_trace(struct stackframe *frame, void *data)
{
struct perf_callchain_entry *entry = data;
perf_callchain_store(entry, frame->pc);
return 0;
}
void perf_callchain_kernel(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
struct stackframe frame;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */
return;
}
frame.fp = regs->regs[29];
frame.sp = regs->sp;
frame.pc = regs->pc;
walk_stackframe(&frame, callchain_trace, entry);
}
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
return perf_guest_cbs->get_guest_ip();
return instruction_pointer(regs);
}
unsigned long perf_misc_flags(struct pt_regs *regs)
{
int misc = 0;
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
if (perf_guest_cbs->is_user_mode())
misc |= PERF_RECORD_MISC_GUEST_USER;
else
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
} else {
if (user_mode(regs))
misc |= PERF_RECORD_MISC_USER;
else
misc |= PERF_RECORD_MISC_KERNEL;
}
return misc;
}

View File

@@ -110,8 +110,6 @@ free_mem:
return ret;
}
#ifdef CONFIG_SMP
static int __init cpu_psci_cpu_init(unsigned int cpu)
{
return 0;
@@ -193,7 +191,6 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
return -ETIMEDOUT;
}
#endif
#endif
static int psci_suspend_finisher(unsigned long index)
{
@@ -228,7 +225,6 @@ const struct cpu_operations cpu_psci_ops = {
.cpu_init_idle = cpu_psci_cpu_init_idle,
.cpu_suspend = cpu_psci_cpu_suspend,
#endif
#ifdef CONFIG_SMP
.cpu_init = cpu_psci_cpu_init,
.cpu_prepare = cpu_psci_cpu_prepare,
.cpu_boot = cpu_psci_cpu_boot,
@@ -237,6 +233,5 @@ const struct cpu_operations cpu_psci_ops = {
.cpu_die = cpu_psci_cpu_die,
.cpu_kill = cpu_psci_cpu_kill,
#endif
#endif
};

View File

@@ -826,6 +826,30 @@ static int compat_vfp_set(struct task_struct *target,
return ret;
}
static int compat_tls_get(struct task_struct *target,
const struct user_regset *regset, unsigned int pos,
unsigned int count, void *kbuf, void __user *ubuf)
{
compat_ulong_t tls = (compat_ulong_t)target->thread.tp_value;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
}
static int compat_tls_set(struct task_struct *target,
const struct user_regset *regset, unsigned int pos,
unsigned int count, const void *kbuf,
const void __user *ubuf)
{
int ret;
compat_ulong_t tls;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
if (ret)
return ret;
target->thread.tp_value = tls;
return ret;
}
static const struct user_regset aarch32_regsets[] = {
[REGSET_COMPAT_GPR] = {
.core_note_type = NT_PRSTATUS,
@@ -850,6 +874,64 @@ static const struct user_regset_view user_aarch32_view = {
.regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
};
static const struct user_regset aarch32_ptrace_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS,
.n = COMPAT_ELF_NGREG,
.size = sizeof(compat_elf_greg_t),
.align = sizeof(compat_elf_greg_t),
.get = compat_gpr_get,
.set = compat_gpr_set
},
[REGSET_FPR] = {
.core_note_type = NT_ARM_VFP,
.n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
.size = sizeof(compat_ulong_t),
.align = sizeof(compat_ulong_t),
.get = compat_vfp_get,
.set = compat_vfp_set
},
[REGSET_TLS] = {
.core_note_type = NT_ARM_TLS,
.n = 1,
.size = sizeof(compat_ulong_t),
.align = sizeof(compat_ulong_t),
.get = compat_tls_get,
.set = compat_tls_set,
},
#ifdef CONFIG_HAVE_HW_BREAKPOINT
[REGSET_HW_BREAK] = {
.core_note_type = NT_ARM_HW_BREAK,
.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
.get = hw_break_get,
.set = hw_break_set,
},
[REGSET_HW_WATCH] = {
.core_note_type = NT_ARM_HW_WATCH,
.n = sizeof(struct user_hwdebug_state) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
.get = hw_break_get,
.set = hw_break_set,
},
#endif
[REGSET_SYSTEM_CALL] = {
.core_note_type = NT_ARM_SYSTEM_CALL,
.n = 1,
.size = sizeof(int),
.align = sizeof(int),
.get = system_call_get,
.set = system_call_set,
},
};
static const struct user_regset_view user_aarch32_ptrace_view = {
.name = "aarch32", .e_machine = EM_ARM,
.regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
};
static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
compat_ulong_t __user *ret)
{
@@ -1109,8 +1191,16 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
#ifdef CONFIG_COMPAT
if (is_compat_thread(task_thread_info(task)))
/*
* Core dumping of 32-bit tasks or compat ptrace requests must use the
* user_aarch32_view compatible with arm32. Native ptrace requests on
* 32-bit children use an extended user_aarch32_ptrace_view to allow
* access to the TLS register.
*/
if (is_compat_task())
return &user_aarch32_view;
else if (is_compat_thread(task_thread_info(task)))
return &user_aarch32_ptrace_view;
#endif
return &user_aarch64_view;
}

View File

@@ -62,7 +62,6 @@
#include <asm/traps.h>
#include <asm/memblock.h>
#include <asm/efi.h>
#include <asm/virt.h>
#include <asm/xen/hypervisor.h>
unsigned long elf_hwcap __read_mostly;
@@ -130,7 +129,6 @@ bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
}
struct mpidr_hash mpidr_hash;
#ifdef CONFIG_SMP
/**
* smp_build_mpidr_hash - Pre-compute shifts required at each affinity
* level in order to build a linear index from an
@@ -196,35 +194,11 @@ static void __init smp_build_mpidr_hash(void)
pr_warn("Large number of MPIDR hash buckets detected\n");
__flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
}
#endif
static void __init hyp_mode_check(void)
{
if (is_hyp_mode_available())
pr_info("CPU: All CPU(s) started at EL2\n");
else if (is_hyp_mode_mismatched())
WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
"CPU: CPUs started in inconsistent modes");
else
pr_info("CPU: All CPU(s) started at EL1\n");
}
void __init do_post_cpus_up_work(void)
{
hyp_mode_check();
apply_alternatives_all();
}
#ifdef CONFIG_UP_LATE_INIT
void __init up_late_init(void)
{
do_post_cpus_up_work();
}
#endif /* CONFIG_UP_LATE_INIT */
static void __init setup_processor(void)
{
u64 features, block;
u64 features;
s64 block;
u32 cwg;
int cls;
@@ -254,8 +228,8 @@ static void __init setup_processor(void)
* for non-negative values. Negative values are reserved.
*/
features = read_cpuid(ID_AA64ISAR0_EL1);
block = (features >> 4) & 0xf;
if (!(block & 0x8)) {
block = cpuid_feature_extract_field(features, 4);
if (block > 0) {
switch (block) {
default:
case 2:
@@ -267,26 +241,36 @@ static void __init setup_processor(void)
}
}
block = (features >> 8) & 0xf;
if (block && !(block & 0x8))
if (cpuid_feature_extract_field(features, 8) > 0)
elf_hwcap |= HWCAP_SHA1;
block = (features >> 12) & 0xf;
if (block && !(block & 0x8))
if (cpuid_feature_extract_field(features, 12) > 0)
elf_hwcap |= HWCAP_SHA2;
block = (features >> 16) & 0xf;
if (block && !(block & 0x8))
if (cpuid_feature_extract_field(features, 16) > 0)
elf_hwcap |= HWCAP_CRC32;
block = cpuid_feature_extract_field(features, 20);
if (block > 0) {
switch (block) {
default:
case 2:
elf_hwcap |= HWCAP_ATOMICS;
case 1:
/* RESERVED */
case 0:
break;
}
}
#ifdef CONFIG_COMPAT
/*
* ID_ISAR5_EL1 carries similar information as above, but pertaining to
* the Aarch32 32-bit execution state.
* the AArch32 32-bit execution state.
*/
features = read_cpuid(ID_ISAR5_EL1);
block = (features >> 4) & 0xf;
if (!(block & 0x8)) {
block = cpuid_feature_extract_field(features, 4);
if (block > 0) {
switch (block) {
default:
case 2:
@@ -298,16 +282,13 @@ static void __init setup_processor(void)
}
}
block = (features >> 8) & 0xf;
if (block && !(block & 0x8))
if (cpuid_feature_extract_field(features, 8) > 0)
compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
block = (features >> 12) & 0xf;
if (block && !(block & 0x8))
if (cpuid_feature_extract_field(features, 12) > 0)
compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
block = (features >> 16) & 0xf;
if (block && !(block & 0x8))
if (cpuid_feature_extract_field(features, 16) > 0)
compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
#endif
}
@@ -404,10 +385,8 @@ void __init setup_arch(char **cmdline_p)
xen_early_init();
cpu_read_bootcpu_ops();
#ifdef CONFIG_SMP
smp_init_cpus();
smp_build_mpidr_hash();
#endif
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
@@ -426,8 +405,13 @@ void __init setup_arch(char **cmdline_p)
static int __init arm64_device_init(void)
{
of_iommu_init();
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
if (of_have_populated_dt()) {
of_iommu_init();
of_platform_populate(NULL, of_default_bus_match_table,
NULL, NULL);
} else if (acpi_disabled) {
pr_crit("Device tree not populated\n");
}
return 0;
}
arch_initcall_sync(arm64_device_init);
@@ -455,6 +439,7 @@ static const char *hwcap_str[] = {
"sha1",
"sha2",
"crc32",
"atomics",
NULL
};
@@ -507,9 +492,7 @@ static int c_show(struct seq_file *m, void *v)
* online processors, looking for lines beginning with
* "processor". Give glibc what it expects.
*/
#ifdef CONFIG_SMP
seq_printf(m, "processor\t: %d\n", i);
#endif
/*
* Dump out the common processor features in a single line.

View File

@@ -82,7 +82,6 @@ ENTRY(__cpu_suspend_enter)
str x2, [x0, #CPU_CTX_SP]
ldr x1, =sleep_save_sp
ldr x1, [x1, #SLEEP_SAVE_SP_VIRT]
#ifdef CONFIG_SMP
mrs x7, mpidr_el1
ldr x9, =mpidr_hash
ldr x10, [x9, #MPIDR_HASH_MASK]
@@ -94,7 +93,6 @@ ENTRY(__cpu_suspend_enter)
ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
add x1, x1, x8, lsl #3
#endif
bl __cpu_suspend_save
/*
* Grab suspend finisher in x20 and its argument in x19
@@ -135,6 +133,14 @@ ENTRY(cpu_resume_mmu)
ldr x3, =cpu_resume_after_mmu
msr sctlr_el1, x0 // restore sctlr_el1
isb
/*
* Invalidate the local I-cache so that any instructions fetched
* speculatively from the PoC are discarded, since they may have
* been dynamically patched at the PoU.
*/
ic iallu
dsb nsh
isb
br x3 // global jump to virtual address
ENDPROC(cpu_resume_mmu)
.popsection
@@ -151,7 +157,6 @@ ENDPROC(cpu_resume_after_mmu)
ENTRY(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly
#ifdef CONFIG_SMP
mrs x1, mpidr_el1
adrp x8, mpidr_hash
add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address
@@ -161,9 +166,6 @@ ENTRY(cpu_resume)
ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
/* x7 contains hash index, let's use it to grab context pointer */
#else
mov x7, xzr
#endif
ldr_l x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
ldr x0, [x0, x7, lsl #3]
/* load sp from context */

View File

@@ -52,6 +52,7 @@
#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
#include <asm/virt.h>
#define CREATE_TRACE_POINTS
#include <trace/events/ipi.h>
@@ -310,10 +311,22 @@ void cpu_die(void)
}
#endif
static void __init hyp_mode_check(void)
{
if (is_hyp_mode_available())
pr_info("CPU: All CPU(s) started at EL2\n");
else if (is_hyp_mode_mismatched())
WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
"CPU: CPUs started in inconsistent modes");
else
pr_info("CPU: All CPU(s) started at EL1\n");
}
void __init smp_cpus_done(unsigned int max_cpus)
{
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
do_post_cpus_up_work();
hyp_mode_check();
apply_alternatives_all();
}
void __init smp_prepare_boot_cpu(void)

View File

@@ -42,7 +42,6 @@
#include <asm/thread_info.h>
#include <asm/stacktrace.h>
#ifdef CONFIG_SMP
unsigned long profile_pc(struct pt_regs *regs)
{
struct stackframe frame;
@@ -62,7 +61,6 @@ unsigned long profile_pc(struct pt_regs *regs)
return frame.pc;
}
EXPORT_SYMBOL(profile_pc);
#endif
void __init time_init(void)
{

View File

@@ -300,6 +300,6 @@ void __init init_cpu_topology(void)
* Discard anything that was parsed if we hit an error so we
* don't use partial information.
*/
if (parse_dt_topology())
if (of_have_populated_dt() && parse_dt_topology())
reset_cpu_topology();
}

View File

@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/bug.h>
#include <linux/signal.h>
#include <linux/personality.h>
#include <linux/kallsyms.h>
@@ -32,8 +33,10 @@
#include <linux/syscalls.h>
#include <asm/atomic.h>
#include <asm/bug.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
#include <asm/insn.h>
#include <asm/traps.h>
#include <asm/stacktrace.h>
#include <asm/exception.h>
@@ -52,11 +55,12 @@ int show_unhandled_signals = 1;
* Dump out the contents of some memory nicely...
*/
static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
unsigned long top)
unsigned long top, bool compat)
{
unsigned long first;
mm_segment_t fs;
int i;
unsigned int width = compat ? 4 : 8;
/*
* We need to switch to kernel mode so that we can use __get_user
@@ -75,13 +79,22 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
memset(str, ' ', sizeof(str));
str[sizeof(str) - 1] = '\0';
for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
for (p = first, i = 0; i < (32 / width)
&& p < top; i++, p += width) {
if (p >= bottom && p < top) {
unsigned int val;
if (__get_user(val, (unsigned int *)p) == 0)
sprintf(str + i * 9, " %08x", val);
else
sprintf(str + i * 9, " ????????");
unsigned long val;
if (width == 8) {
if (__get_user(val, (unsigned long *)p) == 0)
sprintf(str + i * 17, " %016lx", val);
else
sprintf(str + i * 17, " ????????????????");
} else {
if (__get_user(val, (unsigned int *)p) == 0)
sprintf(str + i * 9, " %08lx", val);
else
sprintf(str + i * 9, " ????????");
}
}
}
printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
@@ -95,7 +108,7 @@ static void dump_backtrace_entry(unsigned long where, unsigned long stack)
print_ip_sym(where);
if (in_exception_text(where))
dump_mem("", "Exception stack", stack,
stack + sizeof(struct pt_regs));
stack + sizeof(struct pt_regs), false);
}
static void dump_instr(const char *lvl, struct pt_regs *regs)
@@ -179,11 +192,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
#else
#define S_PREEMPT ""
#endif
#ifdef CONFIG_SMP
#define S_SMP " SMP"
#else
#define S_SMP ""
#endif
static int __die(const char *str, int err, struct thread_info *thread,
struct pt_regs *regs)
@@ -207,7 +216,8 @@ static int __die(const char *str, int err, struct thread_info *thread,
if (!user_mode(regs) || in_interrupt()) {
dump_mem(KERN_EMERG, "Stack: ", regs->sp,
THREAD_SIZE + (unsigned long)task_stack_page(tsk));
THREAD_SIZE + (unsigned long)task_stack_page(tsk),
compat_user_mode(regs));
dump_backtrace(regs, tsk);
dump_instr(KERN_EMERG, regs);
}
@@ -459,7 +469,63 @@ void __pgd_error(const char *file, int line, unsigned long val)
pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val);
}
/* GENERIC_BUG traps */
int is_valid_bugaddr(unsigned long addr)
{
/*
* bug_handler() only called for BRK #BUG_BRK_IMM.
* So the answer is trivial -- any spurious instances with no
* bug table entry will be rejected by report_bug() and passed
* back to the debug-monitors code and handled as a fatal
* unexpected debug exception.
*/
return 1;
}
static int bug_handler(struct pt_regs *regs, unsigned int esr)
{
if (user_mode(regs))
return DBG_HOOK_ERROR;
switch (report_bug(regs->pc, regs)) {
case BUG_TRAP_TYPE_BUG:
die("Oops - BUG", regs, 0);
break;
case BUG_TRAP_TYPE_WARN:
/* Ideally, report_bug() should backtrace for us... but no. */
dump_backtrace(regs, NULL);
break;
default:
/* unknown/unrecognised bug trap type */
return DBG_HOOK_ERROR;
}
/* If thread survives, skip over the BUG instruction and continue: */
regs->pc += AARCH64_INSN_SIZE; /* skip BRK and resume */
return DBG_HOOK_HANDLED;
}
static struct break_hook bug_break_hook = {
.esr_val = 0xf2000000 | BUG_BRK_IMM,
.esr_mask = 0xffffffff,
.fn = bug_handler,
};
/*
* Initial handler for AArch64 BRK exceptions
* This handler only used until debug_traps_init().
*/
int __init early_brk64(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
}
/* This registration must happen early, before debug_traps_init(). */
void __init trap_init(void)
{
return;
register_break_hook(&bug_break_hook);
}