Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King: "Bigger items included in this update are: - A series of updates from Arnd for ARM randconfig build failures - Updates from Dmitry for StrongARM SA-1100 to move IRQ handling to drivers/irqchip/ - Move ARMs SP804 timer to drivers/clocksource/ - Perf updates from Mark Rutland in preparation to move the ARM perf code into drivers/ so it can be shared with ARM64. - MCPM updates from Nicolas - Add support for taking platform serial number from DT - Re-implement Keystone2 physical address space switch to conform to architecture requirements - Clean up ARMv7 LPAE code, which goes in hand with the Keystone2 changes. - L2C cleanups to avoid unlocking caches if we're prevented by the secure support to unlock. - Avoid cleaning a potentially dirty cache containing stale data on CPU initialisation - Add ARM-only entry point for secondary startup (for machines that can only call into a Thumb kernel in ARM mode). Same thing is also done for the resume entry point. - Provide arch_irqs_disabled via asm-generic - Enlarge ARMv7M vector table - Always use BFD linker for VDSO, as gold doesn't accept some of the options we need. - Fix an incorrect BSYM (for Thumb symbols) usage, and convert all BSYM compiler macros to a "badr" (for branch address). - Shut up compiler warnings provoked by our cmpxchg() implementation. - Ensure bad xchg sizes fail to link" * 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (75 commits) ARM: Fix build if CLKDEV_LOOKUP is not configured ARM: fix new BSYM() usage introduced via for-arm-soc branch ARM: 8383/1: nommu: avoid deprecated source register on mov ARM: 8391/1: l2c: add options to overwrite prefetching behavior ARM: 8390/1: irqflags: Get arch_irqs_disabled from asm-generic ARM: 8387/1: arm/mm/dma-mapping.c: Add arm_coherent_dma_mmap ARM: 8388/1: tcm: Don't crash when TCM banks are protected by TrustZone ARM: 8384/1: VDSO: force use of BFD linker ARM: 8385/1: VDSO: group link options ARM: cmpxchg: avoid warnings from macro-ized cmpxchg() implementations ARM: remove __bad_xchg definition ARM: 8369/1: ARMv7M: define size of vector table for Vybrid ARM: 8382/1: clocksource: make ARM_TIMER_SP804 depend on GENERIC_SCHED_CLOCK ARM: 8366/1: move Dual-Timer SP804 driver to drivers/clocksource ARM: 8365/1: introduce sp804_timer_disable and remove arm_timer.h inclusion ARM: 8364/1: fix BE32 module loading ARM: 8360/1: add secondary_startup_arm prototype in header file ARM: 8359/1: correct secondary_startup_arm mode ARM: proc-v7: sanitise and document registers around errata ARM: proc-v7: clean up MIDR access ...
This commit is contained in:
@@ -34,6 +34,7 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle.o
|
||||
obj-$(CONFIG_ISA_DMA_API) += dma.o
|
||||
obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
|
||||
obj-$(CONFIG_MODULES) += armksyms.o module.o
|
||||
obj-$(CONFIG_ARM_MODULE_PLTS) += module-plts.o
|
||||
obj-$(CONFIG_ISA_DMA) += dma-isa.o
|
||||
obj-$(CONFIG_PCI) += bios32.o isa.o
|
||||
obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o
|
||||
@@ -70,7 +71,9 @@ obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
|
||||
obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o
|
||||
obj-$(CONFIG_IWMMXT) += iwmmxt.o
|
||||
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
|
||||
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o
|
||||
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o \
|
||||
perf_event_xscale.o perf_event_v6.o \
|
||||
perf_event_v7.o
|
||||
CFLAGS_pj4-cp0.o := -marm
|
||||
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
|
||||
obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
|
||||
|
@@ -40,7 +40,7 @@
|
||||
#ifdef CONFIG_MULTI_IRQ_HANDLER
|
||||
ldr r1, =handle_arch_irq
|
||||
mov r0, sp
|
||||
adr lr, BSYM(9997f)
|
||||
badr lr, 9997f
|
||||
ldr pc, [r1]
|
||||
#else
|
||||
arch_irq_handler_default
|
||||
@@ -273,7 +273,7 @@ __und_svc:
|
||||
str r4, [sp, #S_PC]
|
||||
orr r0, r9, r0, lsl #16
|
||||
#endif
|
||||
adr r9, BSYM(__und_svc_finish)
|
||||
badr r9, __und_svc_finish
|
||||
mov r2, r4
|
||||
bl call_fpe
|
||||
|
||||
@@ -469,7 +469,7 @@ __und_usr:
|
||||
@ instruction, or the more conventional lr if we are to treat
|
||||
@ this as a real undefined instruction
|
||||
@
|
||||
adr r9, BSYM(ret_from_exception)
|
||||
badr r9, ret_from_exception
|
||||
|
||||
@ IRQs must be enabled before attempting to read the instruction from
|
||||
@ user space since that could cause a page/translation fault if the
|
||||
@@ -486,7 +486,7 @@ __und_usr:
|
||||
@ r2 = PC value for the following instruction (:= regs->ARM_pc)
|
||||
@ r4 = PC value for the faulting instruction
|
||||
@ lr = 32-bit undefined instruction function
|
||||
adr lr, BSYM(__und_usr_fault_32)
|
||||
badr lr, __und_usr_fault_32
|
||||
b call_fpe
|
||||
|
||||
__und_usr_thumb:
|
||||
@@ -522,7 +522,7 @@ ARM_BE8(rev16 r0, r0) @ little endian instruction
|
||||
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
|
||||
str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
|
||||
orr r0, r0, r5, lsl #16
|
||||
adr lr, BSYM(__und_usr_fault_32)
|
||||
badr lr, __und_usr_fault_32
|
||||
@ r0 = the two 16-bit Thumb instructions which caused the exception
|
||||
@ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
|
||||
@ r4 = PC value for the first 16-bit Thumb instruction
|
||||
@@ -716,7 +716,7 @@ __und_usr_fault_32:
|
||||
__und_usr_fault_16:
|
||||
mov r1, #2
|
||||
1: mov r0, sp
|
||||
adr lr, BSYM(ret_from_exception)
|
||||
badr lr, ret_from_exception
|
||||
b __und_fault
|
||||
ENDPROC(__und_usr_fault_32)
|
||||
ENDPROC(__und_usr_fault_16)
|
||||
|
@@ -90,7 +90,7 @@ ENTRY(ret_from_fork)
|
||||
bl schedule_tail
|
||||
cmp r5, #0
|
||||
movne r0, r4
|
||||
adrne lr, BSYM(1f)
|
||||
badrne lr, 1f
|
||||
retne r5
|
||||
1: get_thread_info tsk
|
||||
b ret_slow_syscall
|
||||
@@ -198,7 +198,7 @@ local_restart:
|
||||
bne __sys_trace
|
||||
|
||||
cmp scno, #NR_syscalls @ check upper syscall limit
|
||||
adr lr, BSYM(ret_fast_syscall) @ return address
|
||||
badr lr, ret_fast_syscall @ return address
|
||||
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
|
||||
|
||||
add r1, sp, #S_OFF
|
||||
@@ -233,7 +233,7 @@ __sys_trace:
|
||||
add r0, sp, #S_OFF
|
||||
bl syscall_trace_enter
|
||||
|
||||
adr lr, BSYM(__sys_trace_return) @ return address
|
||||
badr lr, __sys_trace_return @ return address
|
||||
mov scno, r0 @ syscall number (possibly new)
|
||||
add r1, sp, #S_R0 + S_OFF @ pointer to regs
|
||||
cmp scno, #NR_syscalls @ check upper syscall limit
|
||||
|
@@ -87,7 +87,7 @@
|
||||
|
||||
1: mcount_get_lr r1 @ lr of instrumented func
|
||||
mcount_adjust_addr r0, lr @ instrumented function
|
||||
adr lr, BSYM(2f)
|
||||
badr lr, 2f
|
||||
mov pc, r2
|
||||
2: mcount_exit
|
||||
.endm
|
||||
|
@@ -117,9 +117,14 @@ ENTRY(__switch_to)
|
||||
ENDPROC(__switch_to)
|
||||
|
||||
.data
|
||||
.align 8
|
||||
#if CONFIG_CPU_V7M_NUM_IRQ <= 112
|
||||
.align 9
|
||||
#else
|
||||
.align 10
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Vector table (64 words => 256 bytes natural alignment)
|
||||
* Vector table (Natural alignment need to be ensured)
|
||||
*/
|
||||
ENTRY(vector_table)
|
||||
.long 0 @ 0 - Reset stack pointer
|
||||
@@ -138,6 +143,6 @@ ENTRY(vector_table)
|
||||
.long __invalid_entry @ 13 - Reserved
|
||||
.long __pendsv_entry @ 14 - PendSV
|
||||
.long __invalid_entry @ 15 - SysTick
|
||||
.rept 64 - 16
|
||||
.long __irq_entry @ 16..64 - External Interrupts
|
||||
.rept CONFIG_CPU_V7M_NUM_IRQ
|
||||
.long __irq_entry @ External Interrupts
|
||||
.endr
|
||||
|
@@ -46,7 +46,7 @@ ENTRY(stext)
|
||||
.arm
|
||||
ENTRY(stext)
|
||||
|
||||
THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
|
||||
THUMB( badr r9, 1f ) @ Kernel is always entered in ARM.
|
||||
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
|
||||
THUMB( .thumb ) @ switch to Thumb now.
|
||||
THUMB(1: )
|
||||
@@ -77,13 +77,13 @@ ENTRY(stext)
|
||||
orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit
|
||||
bl __setup_mpu
|
||||
#endif
|
||||
ldr r13, =__mmap_switched @ address to jump to after
|
||||
@ initialising sctlr
|
||||
adr lr, BSYM(1f) @ return (PIC) address
|
||||
|
||||
badr lr, 1f @ return (PIC) address
|
||||
ldr r12, [r10, #PROCINFO_INITFUNC]
|
||||
add r12, r12, r10
|
||||
ret r12
|
||||
1: b __after_proc_init
|
||||
1: bl __after_proc_init
|
||||
b __mmap_switched
|
||||
ENDPROC(stext)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
@@ -106,8 +106,7 @@ ENTRY(secondary_startup)
|
||||
movs r10, r5 @ invalid processor?
|
||||
beq __error_p @ yes, error 'p'
|
||||
|
||||
adr r4, __secondary_data
|
||||
ldmia r4, {r7, r12}
|
||||
ldr r7, __secondary_data
|
||||
|
||||
#ifdef CONFIG_ARM_MPU
|
||||
/* Use MPU region info supplied by __cpu_up */
|
||||
@@ -115,23 +114,19 @@ ENTRY(secondary_startup)
|
||||
bl __setup_mpu @ Initialize the MPU
|
||||
#endif
|
||||
|
||||
adr lr, BSYM(__after_proc_init) @ return address
|
||||
mov r13, r12 @ __secondary_switched address
|
||||
badr lr, 1f @ return (PIC) address
|
||||
ldr r12, [r10, #PROCINFO_INITFUNC]
|
||||
add r12, r12, r10
|
||||
ret r12
|
||||
ENDPROC(secondary_startup)
|
||||
|
||||
ENTRY(__secondary_switched)
|
||||
ldr sp, [r7, #8] @ set up the stack pointer
|
||||
1: bl __after_proc_init
|
||||
ldr sp, [r7, #12] @ set up the stack pointer
|
||||
mov fp, #0
|
||||
b secondary_start_kernel
|
||||
ENDPROC(__secondary_switched)
|
||||
ENDPROC(secondary_startup)
|
||||
|
||||
.type __secondary_data, %object
|
||||
__secondary_data:
|
||||
.long secondary_data
|
||||
.long __secondary_switched
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
@@ -164,7 +159,7 @@ __after_proc_init:
|
||||
#endif
|
||||
mcr p15, 0, r0, c1, c0, 0 @ write control reg
|
||||
#endif /* CONFIG_CPU_CP15 */
|
||||
ret r13
|
||||
ret lr
|
||||
ENDPROC(__after_proc_init)
|
||||
.ltorg
|
||||
|
||||
|
@@ -80,7 +80,7 @@
|
||||
ENTRY(stext)
|
||||
ARM_BE8(setend be ) @ ensure we are in BE8 mode
|
||||
|
||||
THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
|
||||
THUMB( badr r9, 1f ) @ Kernel is always entered in ARM.
|
||||
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
|
||||
THUMB( .thumb ) @ switch to Thumb now.
|
||||
THUMB(1: )
|
||||
@@ -131,13 +131,30 @@ ENTRY(stext)
|
||||
* The following calls CPU specific code in a position independent
|
||||
* manner. See arch/arm/mm/proc-*.S for details. r10 = base of
|
||||
* xxx_proc_info structure selected by __lookup_processor_type
|
||||
* above. On return, the CPU will be ready for the MMU to be
|
||||
* turned on, and r0 will hold the CPU control register value.
|
||||
* above.
|
||||
*
|
||||
* The processor init function will be called with:
|
||||
* r1 - machine type
|
||||
* r2 - boot data (atags/dt) pointer
|
||||
* r4 - translation table base (low word)
|
||||
* r5 - translation table base (high word, if LPAE)
|
||||
* r8 - translation table base 1 (pfn if LPAE)
|
||||
* r9 - cpuid
|
||||
* r13 - virtual address for __enable_mmu -> __turn_mmu_on
|
||||
*
|
||||
* On return, the CPU will be ready for the MMU to be turned on,
|
||||
* r0 will hold the CPU control register value, r1, r2, r4, and
|
||||
* r9 will be preserved. r5 will also be preserved if LPAE.
|
||||
*/
|
||||
ldr r13, =__mmap_switched @ address to jump to after
|
||||
@ mmu has been enabled
|
||||
adr lr, BSYM(1f) @ return (PIC) address
|
||||
badr lr, 1f @ return (PIC) address
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
mov r5, #0 @ high TTBR0
|
||||
mov r8, r4, lsr #12 @ TTBR1 is swapper_pg_dir pfn
|
||||
#else
|
||||
mov r8, r4 @ set TTBR1 to swapper_pg_dir
|
||||
#endif
|
||||
ldr r12, [r10, #PROCINFO_INITFUNC]
|
||||
add r12, r12, r10
|
||||
ret r12
|
||||
@@ -158,7 +175,7 @@ ENDPROC(stext)
|
||||
*
|
||||
* Returns:
|
||||
* r0, r3, r5-r7 corrupted
|
||||
* r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h)
|
||||
* r4 = physical page table address
|
||||
*/
|
||||
__create_page_tables:
|
||||
pgtbl r4, r8 @ page table address
|
||||
@@ -333,7 +350,6 @@ __create_page_tables:
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
sub r4, r4, #0x1000 @ point to the PGD table
|
||||
mov r4, r4, lsr #ARCH_PGD_SHIFT
|
||||
#endif
|
||||
ret lr
|
||||
ENDPROC(__create_page_tables)
|
||||
@@ -346,9 +362,9 @@ __turn_mmu_on_loc:
|
||||
|
||||
#if defined(CONFIG_SMP)
|
||||
.text
|
||||
ENTRY(secondary_startup_arm)
|
||||
.arm
|
||||
THUMB( adr r9, BSYM(1f) ) @ Kernel is entered in ARM.
|
||||
ENTRY(secondary_startup_arm)
|
||||
THUMB( badr r9, 1f ) @ Kernel is entered in ARM.
|
||||
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
|
||||
THUMB( .thumb ) @ switch to Thumb now.
|
||||
THUMB(1: )
|
||||
@@ -381,10 +397,10 @@ ENTRY(secondary_startup)
|
||||
adr r4, __secondary_data
|
||||
ldmia r4, {r5, r7, r12} @ address to jump to after
|
||||
sub lr, r4, r5 @ mmu has been enabled
|
||||
ldr r4, [r7, lr] @ get secondary_data.pgdir
|
||||
add r7, r7, #4
|
||||
ldr r8, [r7, lr] @ get secondary_data.swapper_pg_dir
|
||||
adr lr, BSYM(__enable_mmu) @ return address
|
||||
add r3, r7, lr
|
||||
ldrd r4, [r3, #0] @ get secondary_data.pgdir
|
||||
ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
|
||||
badr lr, __enable_mmu @ return address
|
||||
mov r13, r12 @ __secondary_switched address
|
||||
ldr r12, [r10, #PROCINFO_INITFUNC]
|
||||
add r12, r12, r10 @ initialise processor
|
||||
@@ -397,7 +413,7 @@ ENDPROC(secondary_startup_arm)
|
||||
* r6 = &secondary_data
|
||||
*/
|
||||
ENTRY(__secondary_switched)
|
||||
ldr sp, [r7, #4] @ get secondary_data.stack
|
||||
ldr sp, [r7, #12] @ get secondary_data.stack
|
||||
mov fp, #0
|
||||
b secondary_start_kernel
|
||||
ENDPROC(__secondary_switched)
|
||||
@@ -416,12 +432,14 @@ __secondary_data:
|
||||
/*
|
||||
* Setup common bits before finally enabling the MMU. Essentially
|
||||
* this is just loading the page table pointer and domain access
|
||||
* registers.
|
||||
* registers. All these registers need to be preserved by the
|
||||
* processor setup function (or set in the case of r0)
|
||||
*
|
||||
* r0 = cp#15 control register
|
||||
* r1 = machine ID
|
||||
* r2 = atags or dtb pointer
|
||||
* r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h)
|
||||
* r4 = TTBR pointer (low word)
|
||||
* r5 = TTBR pointer (high word if LPAE)
|
||||
* r9 = processor ID
|
||||
* r13 = *virtual* address to jump to upon completion
|
||||
*/
|
||||
@@ -440,7 +458,9 @@ __enable_mmu:
|
||||
#ifdef CONFIG_CPU_ICACHE_DISABLE
|
||||
bic r0, r0, #CR_I
|
||||
#endif
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
mcrr p15, 0, r4, r5, c2 @ load TTBR0
|
||||
#else
|
||||
mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
|
||||
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
|
||||
domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
|
||||
|
183
arch/arm/kernel/module-plts.c
Normal file
183
arch/arm/kernel/module-plts.c
Normal file
@@ -0,0 +1,183 @@
|
||||
/*
|
||||
* Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/elf.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/cache.h>
|
||||
#include <asm/opcodes.h>
|
||||
|
||||
#define PLT_ENT_STRIDE L1_CACHE_BYTES
|
||||
#define PLT_ENT_COUNT (PLT_ENT_STRIDE / sizeof(u32))
|
||||
#define PLT_ENT_SIZE (sizeof(struct plt_entries) / PLT_ENT_COUNT)
|
||||
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
#define PLT_ENT_LDR __opcode_to_mem_thumb32(0xf8dff000 | \
|
||||
(PLT_ENT_STRIDE - 4))
|
||||
#else
|
||||
#define PLT_ENT_LDR __opcode_to_mem_arm(0xe59ff000 | \
|
||||
(PLT_ENT_STRIDE - 8))
|
||||
#endif
|
||||
|
||||
struct plt_entries {
|
||||
u32 ldr[PLT_ENT_COUNT];
|
||||
u32 lit[PLT_ENT_COUNT];
|
||||
};
|
||||
|
||||
static bool in_init(const struct module *mod, u32 addr)
|
||||
{
|
||||
return addr - (u32)mod->module_init < mod->init_size;
|
||||
}
|
||||
|
||||
u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
|
||||
{
|
||||
struct plt_entries *plt, *plt_end;
|
||||
int c, *count;
|
||||
|
||||
if (in_init(mod, loc)) {
|
||||
plt = (void *)mod->arch.init_plt->sh_addr;
|
||||
plt_end = (void *)plt + mod->arch.init_plt->sh_size;
|
||||
count = &mod->arch.init_plt_count;
|
||||
} else {
|
||||
plt = (void *)mod->arch.core_plt->sh_addr;
|
||||
plt_end = (void *)plt + mod->arch.core_plt->sh_size;
|
||||
count = &mod->arch.core_plt_count;
|
||||
}
|
||||
|
||||
/* Look for an existing entry pointing to 'val' */
|
||||
for (c = *count; plt < plt_end; c -= PLT_ENT_COUNT, plt++) {
|
||||
int i;
|
||||
|
||||
if (!c) {
|
||||
/* Populate a new set of entries */
|
||||
*plt = (struct plt_entries){
|
||||
{ [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, },
|
||||
{ val, }
|
||||
};
|
||||
++*count;
|
||||
return (u32)plt->ldr;
|
||||
}
|
||||
for (i = 0; i < PLT_ENT_COUNT; i++) {
|
||||
if (!plt->lit[i]) {
|
||||
plt->lit[i] = val;
|
||||
++*count;
|
||||
}
|
||||
if (plt->lit[i] == val)
|
||||
return (u32)&plt->ldr[i];
|
||||
}
|
||||
}
|
||||
BUG();
|
||||
}
|
||||
|
||||
static int duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num,
|
||||
u32 mask)
|
||||
{
|
||||
u32 *loc1, *loc2;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
if (rel[i].r_info != rel[num].r_info)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Identical relocation types against identical symbols can
|
||||
* still result in different PLT entries if the addend in the
|
||||
* place is different. So resolve the target of the relocation
|
||||
* to compare the values.
|
||||
*/
|
||||
loc1 = (u32 *)(base + rel[i].r_offset);
|
||||
loc2 = (u32 *)(base + rel[num].r_offset);
|
||||
if (((*loc1 ^ *loc2) & mask) == 0)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Count how many PLT entries we may need */
|
||||
static unsigned int count_plts(Elf32_Addr base, const Elf32_Rel *rel, int num)
|
||||
{
|
||||
unsigned int ret = 0;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Sure, this is order(n^2), but it's usually short, and not
|
||||
* time critical
|
||||
*/
|
||||
for (i = 0; i < num; i++)
|
||||
switch (ELF32_R_TYPE(rel[i].r_info)) {
|
||||
case R_ARM_CALL:
|
||||
case R_ARM_PC24:
|
||||
case R_ARM_JUMP24:
|
||||
if (!duplicate_rel(base, rel, i,
|
||||
__opcode_to_mem_arm(0x00ffffff)))
|
||||
ret++;
|
||||
break;
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
case R_ARM_THM_CALL:
|
||||
case R_ARM_THM_JUMP24:
|
||||
if (!duplicate_rel(base, rel, i,
|
||||
__opcode_to_mem_thumb32(0x07ff2fff)))
|
||||
ret++;
|
||||
#endif
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
|
||||
char *secstrings, struct module *mod)
|
||||
{
|
||||
unsigned long core_plts = 0, init_plts = 0;
|
||||
Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
|
||||
|
||||
/*
|
||||
* To store the PLTs, we expand the .text section for core module code
|
||||
* and the .init.text section for initialization code.
|
||||
*/
|
||||
for (s = sechdrs; s < sechdrs_end; ++s)
|
||||
if (strcmp(".core.plt", secstrings + s->sh_name) == 0)
|
||||
mod->arch.core_plt = s;
|
||||
else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
|
||||
mod->arch.init_plt = s;
|
||||
|
||||
if (!mod->arch.core_plt || !mod->arch.init_plt) {
|
||||
pr_err("%s: sections missing\n", mod->name);
|
||||
return -ENOEXEC;
|
||||
}
|
||||
|
||||
for (s = sechdrs + 1; s < sechdrs_end; ++s) {
|
||||
const Elf32_Rel *rels = (void *)ehdr + s->sh_offset;
|
||||
int numrels = s->sh_size / sizeof(Elf32_Rel);
|
||||
Elf32_Shdr *dstsec = sechdrs + s->sh_info;
|
||||
|
||||
if (s->sh_type != SHT_REL)
|
||||
continue;
|
||||
|
||||
if (strstr(secstrings + s->sh_name, ".init"))
|
||||
init_plts += count_plts(dstsec->sh_addr, rels, numrels);
|
||||
else
|
||||
core_plts += count_plts(dstsec->sh_addr, rels, numrels);
|
||||
}
|
||||
|
||||
mod->arch.core_plt->sh_type = SHT_NOBITS;
|
||||
mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
|
||||
mod->arch.core_plt->sh_addralign = L1_CACHE_BYTES;
|
||||
mod->arch.core_plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
|
||||
sizeof(struct plt_entries));
|
||||
mod->arch.core_plt_count = 0;
|
||||
|
||||
mod->arch.init_plt->sh_type = SHT_NOBITS;
|
||||
mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
|
||||
mod->arch.init_plt->sh_addralign = L1_CACHE_BYTES;
|
||||
mod->arch.init_plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
|
||||
sizeof(struct plt_entries));
|
||||
mod->arch.init_plt_count = 0;
|
||||
pr_debug("%s: core.plt=%x, init.plt=%x\n", __func__,
|
||||
mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size);
|
||||
return 0;
|
||||
}
|
@@ -40,7 +40,12 @@
|
||||
#ifdef CONFIG_MMU
|
||||
void *module_alloc(unsigned long size)
|
||||
{
|
||||
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
|
||||
void *p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
|
||||
GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p)
|
||||
return p;
|
||||
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
|
||||
GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
@@ -110,6 +115,20 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
||||
offset -= 0x04000000;
|
||||
|
||||
offset += sym->st_value - loc;
|
||||
|
||||
/*
|
||||
* Route through a PLT entry if 'offset' exceeds the
|
||||
* supported range. Note that 'offset + loc + 8'
|
||||
* contains the absolute jump target, i.e.,
|
||||
* @sym + addend, corrected for the +8 PC bias.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS) &&
|
||||
(offset <= (s32)0xfe000000 ||
|
||||
offset >= (s32)0x02000000))
|
||||
offset = get_module_plt(module, loc,
|
||||
offset + loc + 8)
|
||||
- loc - 8;
|
||||
|
||||
if (offset <= (s32)0xfe000000 ||
|
||||
offset >= (s32)0x02000000) {
|
||||
pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
|
||||
@@ -203,6 +222,17 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
||||
offset -= 0x02000000;
|
||||
offset += sym->st_value - loc;
|
||||
|
||||
/*
|
||||
* Route through a PLT entry if 'offset' exceeds the
|
||||
* supported range.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS) &&
|
||||
(offset <= (s32)0xff000000 ||
|
||||
offset >= (s32)0x01000000))
|
||||
offset = get_module_plt(module, loc,
|
||||
offset + loc + 4)
|
||||
- loc - 4;
|
||||
|
||||
if (offset <= (s32)0xff000000 ||
|
||||
offset >= (s32)0x01000000) {
|
||||
pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
|
||||
|
4
arch/arm/kernel/module.lds
Normal file
4
arch/arm/kernel/module.lds
Normal file
@@ -0,0 +1,4 @@
|
||||
SECTIONS {
|
||||
.core.plt : { BYTE(0) }
|
||||
.init.plt : { BYTE(0) }
|
||||
}
|
@@ -11,12 +11,18 @@
|
||||
*/
|
||||
#define pr_fmt(fmt) "hw perfevents: " fmt
|
||||
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdesc.h>
|
||||
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/irq_regs.h>
|
||||
#include <asm/pmu.h>
|
||||
|
||||
@@ -229,6 +235,10 @@ armpmu_add(struct perf_event *event, int flags)
|
||||
int idx;
|
||||
int err = 0;
|
||||
|
||||
/* An event following a process won't be stopped earlier */
|
||||
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
|
||||
return -ENOENT;
|
||||
|
||||
perf_pmu_disable(event->pmu);
|
||||
|
||||
/* If we don't have a space for the counter then finish early. */
|
||||
@@ -344,20 +354,12 @@ static void
|
||||
armpmu_release_hardware(struct arm_pmu *armpmu)
|
||||
{
|
||||
armpmu->free_irq(armpmu);
|
||||
pm_runtime_put_sync(&armpmu->plat_device->dev);
|
||||
}
|
||||
|
||||
static int
|
||||
armpmu_reserve_hardware(struct arm_pmu *armpmu)
|
||||
{
|
||||
int err;
|
||||
struct platform_device *pmu_device = armpmu->plat_device;
|
||||
|
||||
if (!pmu_device)
|
||||
return -ENODEV;
|
||||
|
||||
pm_runtime_get_sync(&pmu_device->dev);
|
||||
err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
|
||||
int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
|
||||
if (err) {
|
||||
armpmu_release_hardware(armpmu);
|
||||
return err;
|
||||
@@ -454,6 +456,17 @@ static int armpmu_event_init(struct perf_event *event)
|
||||
int err = 0;
|
||||
atomic_t *active_events = &armpmu->active_events;
|
||||
|
||||
/*
|
||||
* Reject CPU-affine events for CPUs that are of a different class to
|
||||
* that which this PMU handles. Process-following events (where
|
||||
* event->cpu == -1) can be migrated between CPUs, and thus we have to
|
||||
* reject them later (in armpmu_add) if they're scheduled on a
|
||||
* different class of CPU.
|
||||
*/
|
||||
if (event->cpu != -1 &&
|
||||
!cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
|
||||
return -ENOENT;
|
||||
|
||||
/* does not support taken branch sampling */
|
||||
if (has_branch_stack(event))
|
||||
return -EOPNOTSUPP;
|
||||
@@ -489,6 +502,10 @@ static void armpmu_enable(struct pmu *pmu)
|
||||
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
|
||||
int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
|
||||
|
||||
/* For task-bound events we may be called on other CPUs */
|
||||
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
|
||||
return;
|
||||
|
||||
if (enabled)
|
||||
armpmu->start(armpmu);
|
||||
}
|
||||
@@ -496,35 +513,26 @@ static void armpmu_enable(struct pmu *pmu)
|
||||
static void armpmu_disable(struct pmu *pmu)
|
||||
{
|
||||
struct arm_pmu *armpmu = to_arm_pmu(pmu);
|
||||
|
||||
/* For task-bound events we may be called on other CPUs */
|
||||
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
|
||||
return;
|
||||
|
||||
armpmu->stop(armpmu);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
static int armpmu_runtime_resume(struct device *dev)
|
||||
/*
|
||||
* In heterogeneous systems, events are specific to a particular
|
||||
* microarchitecture, and aren't suitable for another. Thus, only match CPUs of
|
||||
* the same microarchitecture.
|
||||
*/
|
||||
static int armpmu_filter_match(struct perf_event *event)
|
||||
{
|
||||
struct arm_pmu_platdata *plat = dev_get_platdata(dev);
|
||||
|
||||
if (plat && plat->runtime_resume)
|
||||
return plat->runtime_resume(dev);
|
||||
|
||||
return 0;
|
||||
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||
unsigned int cpu = smp_processor_id();
|
||||
return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
|
||||
}
|
||||
|
||||
static int armpmu_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct arm_pmu_platdata *plat = dev_get_platdata(dev);
|
||||
|
||||
if (plat && plat->runtime_suspend)
|
||||
return plat->runtime_suspend(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
const struct dev_pm_ops armpmu_dev_pm_ops = {
|
||||
SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL)
|
||||
};
|
||||
|
||||
static void armpmu_init(struct arm_pmu *armpmu)
|
||||
{
|
||||
atomic_set(&armpmu->active_events, 0);
|
||||
@@ -539,15 +547,349 @@ static void armpmu_init(struct arm_pmu *armpmu)
|
||||
.start = armpmu_start,
|
||||
.stop = armpmu_stop,
|
||||
.read = armpmu_read,
|
||||
.filter_match = armpmu_filter_match,
|
||||
};
|
||||
}
|
||||
|
||||
int armpmu_register(struct arm_pmu *armpmu, int type)
|
||||
{
|
||||
armpmu_init(armpmu);
|
||||
pm_runtime_enable(&armpmu->plat_device->dev);
|
||||
pr_info("enabled with %s PMU driver, %d counters available\n",
|
||||
armpmu->name, armpmu->num_events);
|
||||
return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
|
||||
}
|
||||
|
||||
/* Set at runtime when we know what CPU type we are. */
|
||||
static struct arm_pmu *__oprofile_cpu_pmu;
|
||||
|
||||
/*
|
||||
* Despite the names, these two functions are CPU-specific and are used
|
||||
* by the OProfile/perf code.
|
||||
*/
|
||||
const char *perf_pmu_name(void)
|
||||
{
|
||||
if (!__oprofile_cpu_pmu)
|
||||
return NULL;
|
||||
|
||||
return __oprofile_cpu_pmu->name;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(perf_pmu_name);
|
||||
|
||||
int perf_num_counters(void)
|
||||
{
|
||||
int max_events = 0;
|
||||
|
||||
if (__oprofile_cpu_pmu != NULL)
|
||||
max_events = __oprofile_cpu_pmu->num_events;
|
||||
|
||||
return max_events;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(perf_num_counters);
|
||||
|
||||
static void cpu_pmu_enable_percpu_irq(void *data)
|
||||
{
|
||||
int irq = *(int *)data;
|
||||
|
||||
enable_percpu_irq(irq, IRQ_TYPE_NONE);
|
||||
}
|
||||
|
||||
static void cpu_pmu_disable_percpu_irq(void *data)
|
||||
{
|
||||
int irq = *(int *)data;
|
||||
|
||||
disable_percpu_irq(irq);
|
||||
}
|
||||
|
||||
static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
int i, irq, irqs;
|
||||
struct platform_device *pmu_device = cpu_pmu->plat_device;
|
||||
struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
|
||||
|
||||
irqs = min(pmu_device->num_resources, num_possible_cpus());
|
||||
|
||||
irq = platform_get_irq(pmu_device, 0);
|
||||
if (irq >= 0 && irq_is_percpu(irq)) {
|
||||
on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
|
||||
free_percpu_irq(irq, &hw_events->percpu_pmu);
|
||||
} else {
|
||||
for (i = 0; i < irqs; ++i) {
|
||||
int cpu = i;
|
||||
|
||||
if (cpu_pmu->irq_affinity)
|
||||
cpu = cpu_pmu->irq_affinity[i];
|
||||
|
||||
if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
|
||||
continue;
|
||||
irq = platform_get_irq(pmu_device, i);
|
||||
if (irq >= 0)
|
||||
free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
|
||||
{
|
||||
int i, err, irq, irqs;
|
||||
struct platform_device *pmu_device = cpu_pmu->plat_device;
|
||||
struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
|
||||
|
||||
if (!pmu_device)
|
||||
return -ENODEV;
|
||||
|
||||
irqs = min(pmu_device->num_resources, num_possible_cpus());
|
||||
if (irqs < 1) {
|
||||
pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
irq = platform_get_irq(pmu_device, 0);
|
||||
if (irq >= 0 && irq_is_percpu(irq)) {
|
||||
err = request_percpu_irq(irq, handler, "arm-pmu",
|
||||
&hw_events->percpu_pmu);
|
||||
if (err) {
|
||||
pr_err("unable to request IRQ%d for ARM PMU counters\n",
|
||||
irq);
|
||||
return err;
|
||||
}
|
||||
on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
|
||||
} else {
|
||||
for (i = 0; i < irqs; ++i) {
|
||||
int cpu = i;
|
||||
|
||||
err = 0;
|
||||
irq = platform_get_irq(pmu_device, i);
|
||||
if (irq < 0)
|
||||
continue;
|
||||
|
||||
if (cpu_pmu->irq_affinity)
|
||||
cpu = cpu_pmu->irq_affinity[i];
|
||||
|
||||
/*
|
||||
* If we have a single PMU interrupt that we can't shift,
|
||||
* assume that we're running on a uniprocessor machine and
|
||||
* continue. Otherwise, continue without this interrupt.
|
||||
*/
|
||||
if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
|
||||
pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
|
||||
irq, cpu);
|
||||
continue;
|
||||
}
|
||||
|
||||
err = request_irq(irq, handler,
|
||||
IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
|
||||
per_cpu_ptr(&hw_events->percpu_pmu, cpu));
|
||||
if (err) {
|
||||
pr_err("unable to request IRQ%d for ARM PMU counters\n",
|
||||
irq);
|
||||
return err;
|
||||
}
|
||||
|
||||
cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* PMU hardware loses all context when a CPU goes offline.
|
||||
* When a CPU is hotplugged back in, since some hardware registers are
|
||||
* UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
|
||||
* junk values out of them.
|
||||
*/
|
||||
static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
|
||||
void *hcpu)
|
||||
{
|
||||
int cpu = (unsigned long)hcpu;
|
||||
struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
|
||||
|
||||
if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (pmu->reset)
|
||||
pmu->reset(pmu);
|
||||
else
|
||||
return NOTIFY_DONE;
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
int err;
|
||||
int cpu;
|
||||
struct pmu_hw_events __percpu *cpu_hw_events;
|
||||
|
||||
cpu_hw_events = alloc_percpu(struct pmu_hw_events);
|
||||
if (!cpu_hw_events)
|
||||
return -ENOMEM;
|
||||
|
||||
cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
|
||||
err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
|
||||
if (err)
|
||||
goto out_hw_events;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
|
||||
raw_spin_lock_init(&events->pmu_lock);
|
||||
events->percpu_pmu = cpu_pmu;
|
||||
}
|
||||
|
||||
cpu_pmu->hw_events = cpu_hw_events;
|
||||
cpu_pmu->request_irq = cpu_pmu_request_irq;
|
||||
cpu_pmu->free_irq = cpu_pmu_free_irq;
|
||||
|
||||
/* Ensure the PMU has sane values out of reset. */
|
||||
if (cpu_pmu->reset)
|
||||
on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
|
||||
cpu_pmu, 1);
|
||||
|
||||
/* If no interrupts available, set the corresponding capability flag */
|
||||
if (!platform_get_irq(cpu_pmu->plat_device, 0))
|
||||
cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
||||
|
||||
return 0;
|
||||
|
||||
out_hw_events:
|
||||
free_percpu(cpu_hw_events);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
|
||||
free_percpu(cpu_pmu->hw_events);
|
||||
}
|
||||
|
||||
/*
|
||||
* CPU PMU identification and probing.
|
||||
*/
|
||||
static int probe_current_pmu(struct arm_pmu *pmu,
|
||||
const struct pmu_probe_info *info)
|
||||
{
|
||||
int cpu = get_cpu();
|
||||
unsigned int cpuid = read_cpuid_id();
|
||||
int ret = -ENODEV;
|
||||
|
||||
pr_info("probing PMU on CPU %d\n", cpu);
|
||||
|
||||
for (; info->init != NULL; info++) {
|
||||
if ((cpuid & info->mask) != info->cpuid)
|
||||
continue;
|
||||
ret = info->init(pmu);
|
||||
break;
|
||||
}
|
||||
|
||||
put_cpu();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int of_pmu_irq_cfg(struct arm_pmu *pmu)
|
||||
{
|
||||
int i, irq, *irqs;
|
||||
struct platform_device *pdev = pmu->plat_device;
|
||||
|
||||
/* Don't bother with PPIs; they're already affine */
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq >= 0 && irq_is_percpu(irq))
|
||||
return 0;
|
||||
|
||||
irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
|
||||
if (!irqs)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < pdev->num_resources; ++i) {
|
||||
struct device_node *dn;
|
||||
int cpu;
|
||||
|
||||
dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity",
|
||||
i);
|
||||
if (!dn) {
|
||||
pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
|
||||
of_node_full_name(pdev->dev.of_node), i);
|
||||
break;
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
|
||||
break;
|
||||
|
||||
of_node_put(dn);
|
||||
if (cpu >= nr_cpu_ids) {
|
||||
pr_warn("Failed to find logical CPU for %s\n",
|
||||
dn->name);
|
||||
break;
|
||||
}
|
||||
|
||||
irqs[i] = cpu;
|
||||
cpumask_set_cpu(cpu, &pmu->supported_cpus);
|
||||
}
|
||||
|
||||
if (i == pdev->num_resources) {
|
||||
pmu->irq_affinity = irqs;
|
||||
} else {
|
||||
kfree(irqs);
|
||||
cpumask_setall(&pmu->supported_cpus);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arm_pmu_device_probe(struct platform_device *pdev,
|
||||
const struct of_device_id *of_table,
|
||||
const struct pmu_probe_info *probe_table)
|
||||
{
|
||||
const struct of_device_id *of_id;
|
||||
const int (*init_fn)(struct arm_pmu *);
|
||||
struct device_node *node = pdev->dev.of_node;
|
||||
struct arm_pmu *pmu;
|
||||
int ret = -ENODEV;
|
||||
|
||||
pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
|
||||
if (!pmu) {
|
||||
pr_info("failed to allocate PMU device!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (!__oprofile_cpu_pmu)
|
||||
__oprofile_cpu_pmu = pmu;
|
||||
|
||||
pmu->plat_device = pdev;
|
||||
|
||||
if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
|
||||
init_fn = of_id->data;
|
||||
|
||||
ret = of_pmu_irq_cfg(pmu);
|
||||
if (!ret)
|
||||
ret = init_fn(pmu);
|
||||
} else {
|
||||
ret = probe_current_pmu(pmu, probe_table);
|
||||
cpumask_setall(&pmu->supported_cpus);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
pr_info("failed to probe PMU!\n");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
ret = cpu_pmu_init(pmu);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
ret = armpmu_register(pmu, -1);
|
||||
if (ret)
|
||||
goto out_destroy;
|
||||
|
||||
return 0;
|
||||
|
||||
out_destroy:
|
||||
cpu_pmu_destroy(pmu);
|
||||
out_free:
|
||||
pr_info("failed to register PMU devices!\n");
|
||||
kfree(pmu);
|
||||
return ret;
|
||||
}
|
||||
|
@@ -1,421 +0,0 @@
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* Copyright (C) 2012 ARM Limited
|
||||
*
|
||||
* Author: Will Deacon <will.deacon@arm.com>
|
||||
*/
|
||||
#define pr_fmt(fmt) "CPU PMU: " fmt
|
||||
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdesc.h>
|
||||
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/irq_regs.h>
|
||||
#include <asm/pmu.h>
|
||||
|
||||
/* Set at runtime when we know what CPU type we are. */
|
||||
static struct arm_pmu *cpu_pmu;
|
||||
|
||||
/*
|
||||
* Despite the names, these two functions are CPU-specific and are used
|
||||
* by the OProfile/perf code.
|
||||
*/
|
||||
const char *perf_pmu_name(void)
|
||||
{
|
||||
if (!cpu_pmu)
|
||||
return NULL;
|
||||
|
||||
return cpu_pmu->name;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(perf_pmu_name);
|
||||
|
||||
int perf_num_counters(void)
|
||||
{
|
||||
int max_events = 0;
|
||||
|
||||
if (cpu_pmu != NULL)
|
||||
max_events = cpu_pmu->num_events;
|
||||
|
||||
return max_events;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(perf_num_counters);
|
||||
|
||||
/* Include the PMU-specific implementations. */
|
||||
#include "perf_event_xscale.c"
|
||||
#include "perf_event_v6.c"
|
||||
#include "perf_event_v7.c"
|
||||
|
||||
static void cpu_pmu_enable_percpu_irq(void *data)
|
||||
{
|
||||
int irq = *(int *)data;
|
||||
|
||||
enable_percpu_irq(irq, IRQ_TYPE_NONE);
|
||||
}
|
||||
|
||||
static void cpu_pmu_disable_percpu_irq(void *data)
|
||||
{
|
||||
int irq = *(int *)data;
|
||||
|
||||
disable_percpu_irq(irq);
|
||||
}
|
||||
|
||||
static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
int i, irq, irqs;
|
||||
struct platform_device *pmu_device = cpu_pmu->plat_device;
|
||||
struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
|
||||
|
||||
irqs = min(pmu_device->num_resources, num_possible_cpus());
|
||||
|
||||
irq = platform_get_irq(pmu_device, 0);
|
||||
if (irq >= 0 && irq_is_percpu(irq)) {
|
||||
on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
|
||||
free_percpu_irq(irq, &hw_events->percpu_pmu);
|
||||
} else {
|
||||
for (i = 0; i < irqs; ++i) {
|
||||
int cpu = i;
|
||||
|
||||
if (cpu_pmu->irq_affinity)
|
||||
cpu = cpu_pmu->irq_affinity[i];
|
||||
|
||||
if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
|
||||
continue;
|
||||
irq = platform_get_irq(pmu_device, i);
|
||||
if (irq >= 0)
|
||||
free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
|
||||
{
|
||||
int i, err, irq, irqs;
|
||||
struct platform_device *pmu_device = cpu_pmu->plat_device;
|
||||
struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
|
||||
|
||||
if (!pmu_device)
|
||||
return -ENODEV;
|
||||
|
||||
irqs = min(pmu_device->num_resources, num_possible_cpus());
|
||||
if (irqs < 1) {
|
||||
pr_warn_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
irq = platform_get_irq(pmu_device, 0);
|
||||
if (irq >= 0 && irq_is_percpu(irq)) {
|
||||
err = request_percpu_irq(irq, handler, "arm-pmu",
|
||||
&hw_events->percpu_pmu);
|
||||
if (err) {
|
||||
pr_err("unable to request IRQ%d for ARM PMU counters\n",
|
||||
irq);
|
||||
return err;
|
||||
}
|
||||
on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
|
||||
} else {
|
||||
for (i = 0; i < irqs; ++i) {
|
||||
int cpu = i;
|
||||
|
||||
err = 0;
|
||||
irq = platform_get_irq(pmu_device, i);
|
||||
if (irq < 0)
|
||||
continue;
|
||||
|
||||
if (cpu_pmu->irq_affinity)
|
||||
cpu = cpu_pmu->irq_affinity[i];
|
||||
|
||||
/*
|
||||
* If we have a single PMU interrupt that we can't shift,
|
||||
* assume that we're running on a uniprocessor machine and
|
||||
* continue. Otherwise, continue without this interrupt.
|
||||
*/
|
||||
if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
|
||||
pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
|
||||
irq, cpu);
|
||||
continue;
|
||||
}
|
||||
|
||||
err = request_irq(irq, handler,
|
||||
IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
|
||||
per_cpu_ptr(&hw_events->percpu_pmu, cpu));
|
||||
if (err) {
|
||||
pr_err("unable to request IRQ%d for ARM PMU counters\n",
|
||||
irq);
|
||||
return err;
|
||||
}
|
||||
|
||||
cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* PMU hardware loses all context when a CPU goes offline.
|
||||
* When a CPU is hotplugged back in, since some hardware registers are
|
||||
* UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
|
||||
* junk values out of them.
|
||||
*/
|
||||
static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
|
||||
void *hcpu)
|
||||
{
|
||||
struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
|
||||
|
||||
if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (pmu->reset)
|
||||
pmu->reset(pmu);
|
||||
else
|
||||
return NOTIFY_DONE;
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
int err;
|
||||
int cpu;
|
||||
struct pmu_hw_events __percpu *cpu_hw_events;
|
||||
|
||||
cpu_hw_events = alloc_percpu(struct pmu_hw_events);
|
||||
if (!cpu_hw_events)
|
||||
return -ENOMEM;
|
||||
|
||||
cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
|
||||
err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
|
||||
if (err)
|
||||
goto out_hw_events;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
|
||||
raw_spin_lock_init(&events->pmu_lock);
|
||||
events->percpu_pmu = cpu_pmu;
|
||||
}
|
||||
|
||||
cpu_pmu->hw_events = cpu_hw_events;
|
||||
cpu_pmu->request_irq = cpu_pmu_request_irq;
|
||||
cpu_pmu->free_irq = cpu_pmu_free_irq;
|
||||
|
||||
/* Ensure the PMU has sane values out of reset. */
|
||||
if (cpu_pmu->reset)
|
||||
on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
|
||||
|
||||
/* If no interrupts available, set the corresponding capability flag */
|
||||
if (!platform_get_irq(cpu_pmu->plat_device, 0))
|
||||
cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
||||
|
||||
return 0;
|
||||
|
||||
out_hw_events:
|
||||
free_percpu(cpu_hw_events);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
|
||||
free_percpu(cpu_pmu->hw_events);
|
||||
}
|
||||
|
||||
/*
|
||||
* PMU platform driver and devicetree bindings.
|
||||
*/
|
||||
static const struct of_device_id cpu_pmu_of_device_ids[] = {
|
||||
{.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init},
|
||||
{.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init},
|
||||
{.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init},
|
||||
{.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init},
|
||||
{.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init},
|
||||
{.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init},
|
||||
{.compatible = "arm,cortex-a5-pmu", .data = armv7_a5_pmu_init},
|
||||
{.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init},
|
||||
{.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init},
|
||||
{.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init},
|
||||
{.compatible = "qcom,krait-pmu", .data = krait_pmu_init},
|
||||
{.compatible = "qcom,scorpion-pmu", .data = scorpion_pmu_init},
|
||||
{.compatible = "qcom,scorpion-mp-pmu", .data = scorpion_mp_pmu_init},
|
||||
{},
|
||||
};
|
||||
|
||||
static struct platform_device_id cpu_pmu_plat_device_ids[] = {
|
||||
{.name = "arm-pmu"},
|
||||
{.name = "armv6-pmu"},
|
||||
{.name = "armv7-pmu"},
|
||||
{.name = "xscale-pmu"},
|
||||
{},
|
||||
};
|
||||
|
||||
static const struct pmu_probe_info pmu_probe_table[] = {
|
||||
ARM_PMU_PROBE(ARM_CPU_PART_ARM1136, armv6_1136_pmu_init),
|
||||
ARM_PMU_PROBE(ARM_CPU_PART_ARM1156, armv6_1156_pmu_init),
|
||||
ARM_PMU_PROBE(ARM_CPU_PART_ARM1176, armv6_1176_pmu_init),
|
||||
ARM_PMU_PROBE(ARM_CPU_PART_ARM11MPCORE, armv6mpcore_pmu_init),
|
||||
ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8, armv7_a8_pmu_init),
|
||||
ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9, armv7_a9_pmu_init),
|
||||
XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V1, xscale1pmu_init),
|
||||
XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V2, xscale2pmu_init),
|
||||
{ /* sentinel value */ }
|
||||
};
|
||||
|
||||
/*
|
||||
* CPU PMU identification and probing.
|
||||
*/
|
||||
static int probe_current_pmu(struct arm_pmu *pmu)
|
||||
{
|
||||
int cpu = get_cpu();
|
||||
unsigned int cpuid = read_cpuid_id();
|
||||
int ret = -ENODEV;
|
||||
const struct pmu_probe_info *info;
|
||||
|
||||
pr_info("probing PMU on CPU %d\n", cpu);
|
||||
|
||||
for (info = pmu_probe_table; info->init != NULL; info++) {
|
||||
if ((cpuid & info->mask) != info->cpuid)
|
||||
continue;
|
||||
ret = info->init(pmu);
|
||||
break;
|
||||
}
|
||||
|
||||
put_cpu();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int of_pmu_irq_cfg(struct platform_device *pdev)
|
||||
{
|
||||
int i, irq;
|
||||
int *irqs;
|
||||
|
||||
/* Don't bother with PPIs; they're already affine */
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq >= 0 && irq_is_percpu(irq))
|
||||
return 0;
|
||||
|
||||
irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
|
||||
if (!irqs)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < pdev->num_resources; ++i) {
|
||||
struct device_node *dn;
|
||||
int cpu;
|
||||
|
||||
dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity",
|
||||
i);
|
||||
if (!dn) {
|
||||
pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
|
||||
of_node_full_name(pdev->dev.of_node), i);
|
||||
break;
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
|
||||
break;
|
||||
|
||||
of_node_put(dn);
|
||||
if (cpu >= nr_cpu_ids) {
|
||||
pr_warn("Failed to find logical CPU for %s\n",
|
||||
dn->name);
|
||||
break;
|
||||
}
|
||||
|
||||
irqs[i] = cpu;
|
||||
}
|
||||
|
||||
if (i == pdev->num_resources)
|
||||
cpu_pmu->irq_affinity = irqs;
|
||||
else
|
||||
kfree(irqs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpu_pmu_device_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct of_device_id *of_id;
|
||||
const int (*init_fn)(struct arm_pmu *);
|
||||
struct device_node *node = pdev->dev.of_node;
|
||||
struct arm_pmu *pmu;
|
||||
int ret = -ENODEV;
|
||||
|
||||
if (cpu_pmu) {
|
||||
pr_info("attempt to register multiple PMU devices!\n");
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
|
||||
if (!pmu) {
|
||||
pr_info("failed to allocate PMU device!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cpu_pmu = pmu;
|
||||
cpu_pmu->plat_device = pdev;
|
||||
|
||||
if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
|
||||
init_fn = of_id->data;
|
||||
|
||||
ret = of_pmu_irq_cfg(pdev);
|
||||
if (!ret)
|
||||
ret = init_fn(pmu);
|
||||
} else {
|
||||
ret = probe_current_pmu(pmu);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
pr_info("failed to probe PMU!\n");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
ret = cpu_pmu_init(cpu_pmu);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
ret = armpmu_register(cpu_pmu, -1);
|
||||
if (ret)
|
||||
goto out_destroy;
|
||||
|
||||
return 0;
|
||||
|
||||
out_destroy:
|
||||
cpu_pmu_destroy(cpu_pmu);
|
||||
out_free:
|
||||
pr_info("failed to register PMU devices!\n");
|
||||
kfree(pmu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct platform_driver cpu_pmu_driver = {
|
||||
.driver = {
|
||||
.name = "arm-pmu",
|
||||
.pm = &armpmu_dev_pm_ops,
|
||||
.of_match_table = cpu_pmu_of_device_ids,
|
||||
},
|
||||
.probe = cpu_pmu_device_probe,
|
||||
.id_table = cpu_pmu_plat_device_ids,
|
||||
};
|
||||
|
||||
static int __init register_pmu_driver(void)
|
||||
{
|
||||
return platform_driver_register(&cpu_pmu_driver);
|
||||
}
|
||||
device_initcall(register_pmu_driver);
|
@@ -31,6 +31,14 @@
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
|
||||
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/irq_regs.h>
|
||||
#include <asm/pmu.h>
|
||||
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
enum armv6_perf_types {
|
||||
ARMV6_PERFCTR_ICACHE_MISS = 0x0,
|
||||
ARMV6_PERFCTR_IBUF_STALL = 0x1,
|
||||
@@ -543,24 +551,39 @@ static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
|
||||
static struct of_device_id armv6_pmu_of_device_ids[] = {
|
||||
{.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init},
|
||||
{.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init},
|
||||
{.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init},
|
||||
{ /* sentinel value */ }
|
||||
};
|
||||
|
||||
static const struct pmu_probe_info armv6_pmu_probe_table[] = {
|
||||
ARM_PMU_PROBE(ARM_CPU_PART_ARM1136, armv6_1136_pmu_init),
|
||||
ARM_PMU_PROBE(ARM_CPU_PART_ARM1156, armv6_1156_pmu_init),
|
||||
ARM_PMU_PROBE(ARM_CPU_PART_ARM1176, armv6_1176_pmu_init),
|
||||
ARM_PMU_PROBE(ARM_CPU_PART_ARM11MPCORE, armv6mpcore_pmu_init),
|
||||
{ /* sentinel value */ }
|
||||
};
|
||||
|
||||
static int armv6_pmu_device_probe(struct platform_device *pdev)
|
||||
{
|
||||
return -ENODEV;
|
||||
return arm_pmu_device_probe(pdev, armv6_pmu_of_device_ids,
|
||||
armv6_pmu_probe_table);
|
||||
}
|
||||
|
||||
static int armv6_1156_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static struct platform_driver armv6_pmu_driver = {
|
||||
.driver = {
|
||||
.name = "armv6-pmu",
|
||||
.of_match_table = armv6_pmu_of_device_ids,
|
||||
},
|
||||
.probe = armv6_pmu_device_probe,
|
||||
};
|
||||
|
||||
static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
static int __init register_armv6_pmu_driver(void)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
return -ENODEV;
|
||||
return platform_driver_register(&armv6_pmu_driver);
|
||||
}
|
||||
device_initcall(register_armv6_pmu_driver);
|
||||
#endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
|
||||
|
@@ -19,9 +19,15 @@
|
||||
#ifdef CONFIG_CPU_V7
|
||||
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/irq_regs.h>
|
||||
#include <asm/pmu.h>
|
||||
#include <asm/vfp.h>
|
||||
#include "../vfp/vfpinstr.h"
|
||||
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
/*
|
||||
* Common ARMv7 event types
|
||||
*
|
||||
@@ -1056,15 +1062,22 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu)
|
||||
cpu_pmu->max_period = (1LLU << 32) - 1;
|
||||
};
|
||||
|
||||
static u32 armv7_read_num_pmnc_events(void)
|
||||
static void armv7_read_num_pmnc_events(void *info)
|
||||
{
|
||||
u32 nb_cnt;
|
||||
int *nb_cnt = info;
|
||||
|
||||
/* Read the nb of CNTx counters supported from PMNC */
|
||||
nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
|
||||
*nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
|
||||
|
||||
/* Add the CPU cycles counter and return */
|
||||
return nb_cnt + 1;
|
||||
/* Add the CPU cycles counter */
|
||||
*nb_cnt += 1;
|
||||
}
|
||||
|
||||
static int armv7_probe_num_events(struct arm_pmu *arm_pmu)
|
||||
{
|
||||
return smp_call_function_any(&arm_pmu->supported_cpus,
|
||||
armv7_read_num_pmnc_events,
|
||||
&arm_pmu->num_events, 1);
|
||||
}
|
||||
|
||||
static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
@@ -1072,8 +1085,7 @@ static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
armv7pmu_init(cpu_pmu);
|
||||
cpu_pmu->name = "armv7_cortex_a8";
|
||||
cpu_pmu->map_event = armv7_a8_map_event;
|
||||
cpu_pmu->num_events = armv7_read_num_pmnc_events();
|
||||
return 0;
|
||||
return armv7_probe_num_events(cpu_pmu);
|
||||
}
|
||||
|
||||
static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
@@ -1081,8 +1093,7 @@ static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
armv7pmu_init(cpu_pmu);
|
||||
cpu_pmu->name = "armv7_cortex_a9";
|
||||
cpu_pmu->map_event = armv7_a9_map_event;
|
||||
cpu_pmu->num_events = armv7_read_num_pmnc_events();
|
||||
return 0;
|
||||
return armv7_probe_num_events(cpu_pmu);
|
||||
}
|
||||
|
||||
static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
@@ -1090,8 +1101,7 @@ static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
armv7pmu_init(cpu_pmu);
|
||||
cpu_pmu->name = "armv7_cortex_a5";
|
||||
cpu_pmu->map_event = armv7_a5_map_event;
|
||||
cpu_pmu->num_events = armv7_read_num_pmnc_events();
|
||||
return 0;
|
||||
return armv7_probe_num_events(cpu_pmu);
|
||||
}
|
||||
|
||||
static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
@@ -1099,9 +1109,8 @@ static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
armv7pmu_init(cpu_pmu);
|
||||
cpu_pmu->name = "armv7_cortex_a15";
|
||||
cpu_pmu->map_event = armv7_a15_map_event;
|
||||
cpu_pmu->num_events = armv7_read_num_pmnc_events();
|
||||
cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
|
||||
return 0;
|
||||
return armv7_probe_num_events(cpu_pmu);
|
||||
}
|
||||
|
||||
static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
@@ -1109,9 +1118,8 @@ static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
armv7pmu_init(cpu_pmu);
|
||||
cpu_pmu->name = "armv7_cortex_a7";
|
||||
cpu_pmu->map_event = armv7_a7_map_event;
|
||||
cpu_pmu->num_events = armv7_read_num_pmnc_events();
|
||||
cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
|
||||
return 0;
|
||||
return armv7_probe_num_events(cpu_pmu);
|
||||
}
|
||||
|
||||
static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
@@ -1119,16 +1127,15 @@ static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
armv7pmu_init(cpu_pmu);
|
||||
cpu_pmu->name = "armv7_cortex_a12";
|
||||
cpu_pmu->map_event = armv7_a12_map_event;
|
||||
cpu_pmu->num_events = armv7_read_num_pmnc_events();
|
||||
cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
|
||||
return 0;
|
||||
return armv7_probe_num_events(cpu_pmu);
|
||||
}
|
||||
|
||||
static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
armv7_a12_pmu_init(cpu_pmu);
|
||||
int ret = armv7_a12_pmu_init(cpu_pmu);
|
||||
cpu_pmu->name = "armv7_cortex_a17";
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1508,14 +1515,13 @@ static int krait_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
cpu_pmu->map_event = krait_map_event_no_branch;
|
||||
else
|
||||
cpu_pmu->map_event = krait_map_event;
|
||||
cpu_pmu->num_events = armv7_read_num_pmnc_events();
|
||||
cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
|
||||
cpu_pmu->reset = krait_pmu_reset;
|
||||
cpu_pmu->enable = krait_pmu_enable_event;
|
||||
cpu_pmu->disable = krait_pmu_disable_event;
|
||||
cpu_pmu->get_event_idx = krait_pmu_get_event_idx;
|
||||
cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
|
||||
return 0;
|
||||
return armv7_probe_num_events(cpu_pmu);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1833,13 +1839,12 @@ static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
armv7pmu_init(cpu_pmu);
|
||||
cpu_pmu->name = "armv7_scorpion";
|
||||
cpu_pmu->map_event = scorpion_map_event;
|
||||
cpu_pmu->num_events = armv7_read_num_pmnc_events();
|
||||
cpu_pmu->reset = scorpion_pmu_reset;
|
||||
cpu_pmu->enable = scorpion_pmu_enable_event;
|
||||
cpu_pmu->disable = scorpion_pmu_disable_event;
|
||||
cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx;
|
||||
cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
|
||||
return 0;
|
||||
return armv7_probe_num_events(cpu_pmu);
|
||||
}
|
||||
|
||||
static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
@@ -1847,62 +1852,52 @@ static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
armv7pmu_init(cpu_pmu);
|
||||
cpu_pmu->name = "armv7_scorpion_mp";
|
||||
cpu_pmu->map_event = scorpion_map_event;
|
||||
cpu_pmu->num_events = armv7_read_num_pmnc_events();
|
||||
cpu_pmu->reset = scorpion_pmu_reset;
|
||||
cpu_pmu->enable = scorpion_pmu_enable_event;
|
||||
cpu_pmu->disable = scorpion_pmu_disable_event;
|
||||
cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx;
|
||||
cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
return -ENODEV;
|
||||
return armv7_probe_num_events(cpu_pmu);
|
||||
}
|
||||
|
||||
static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
static const struct of_device_id armv7_pmu_of_device_ids[] = {
|
||||
{.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init},
|
||||
{.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init},
|
||||
{.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init},
|
||||
{.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init},
|
||||
{.compatible = "arm,cortex-a8-pmu", .data = armv7_a8_pmu_init},
|
||||
{.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init},
|
||||
{.compatible = "arm,cortex-a5-pmu", .data = armv7_a5_pmu_init},
|
||||
{.compatible = "qcom,krait-pmu", .data = krait_pmu_init},
|
||||
{.compatible = "qcom,scorpion-pmu", .data = scorpion_pmu_init},
|
||||
{.compatible = "qcom,scorpion-mp-pmu", .data = scorpion_mp_pmu_init},
|
||||
{},
|
||||
};
|
||||
|
||||
static const struct pmu_probe_info armv7_pmu_probe_table[] = {
|
||||
ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8, armv7_a8_pmu_init),
|
||||
ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9, armv7_a9_pmu_init),
|
||||
{ /* sentinel value */ }
|
||||
};
|
||||
|
||||
|
||||
static int armv7_pmu_device_probe(struct platform_device *pdev)
|
||||
{
|
||||
return -ENODEV;
|
||||
return arm_pmu_device_probe(pdev, armv7_pmu_of_device_ids,
|
||||
armv7_pmu_probe_table);
|
||||
}
|
||||
|
||||
static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
static struct platform_driver armv7_pmu_driver = {
|
||||
.driver = {
|
||||
.name = "armv7-pmu",
|
||||
.of_match_table = armv7_pmu_of_device_ids,
|
||||
},
|
||||
.probe = armv7_pmu_device_probe,
|
||||
};
|
||||
|
||||
static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
static int __init register_armv7_pmu_driver(void)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int krait_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
return -ENODEV;
|
||||
return platform_driver_register(&armv7_pmu_driver);
|
||||
}
|
||||
device_initcall(register_armv7_pmu_driver);
|
||||
#endif /* CONFIG_CPU_V7 */
|
||||
|
@@ -13,6 +13,14 @@
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_CPU_XSCALE
|
||||
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/irq_regs.h>
|
||||
#include <asm/pmu.h>
|
||||
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
enum xscale_perf_types {
|
||||
XSCALE_PERFCTR_ICACHE_MISS = 0x00,
|
||||
XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01,
|
||||
@@ -740,14 +748,28 @@ static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline int xscale1pmu_init(struct arm_pmu *cpu_pmu)
|
||||
|
||||
static const struct pmu_probe_info xscale_pmu_probe_table[] = {
|
||||
XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V1, xscale1pmu_init),
|
||||
XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V2, xscale2pmu_init),
|
||||
{ /* sentinel value */ }
|
||||
};
|
||||
|
||||
static int xscale_pmu_device_probe(struct platform_device *pdev)
|
||||
{
|
||||
return -ENODEV;
|
||||
return arm_pmu_device_probe(pdev, NULL, xscale_pmu_probe_table);
|
||||
}
|
||||
|
||||
static inline int xscale2pmu_init(struct arm_pmu *cpu_pmu)
|
||||
static struct platform_driver xscale_pmu_driver = {
|
||||
.driver = {
|
||||
.name = "xscale-pmu",
|
||||
},
|
||||
.probe = xscale_pmu_device_probe,
|
||||
};
|
||||
|
||||
static int __init register_xscale_pmu_driver(void)
|
||||
{
|
||||
return -ENODEV;
|
||||
return platform_driver_register(&xscale_pmu_driver);
|
||||
}
|
||||
device_initcall(register_xscale_pmu_driver);
|
||||
#endif /* CONFIG_CPU_XSCALE */
|
||||
|
@@ -75,8 +75,7 @@ __setup("fpe=", fpe_setup);
|
||||
|
||||
extern void init_default_cache_policy(unsigned long);
|
||||
extern void paging_init(const struct machine_desc *desc);
|
||||
extern void early_paging_init(const struct machine_desc *,
|
||||
struct proc_info_list *);
|
||||
extern void early_paging_init(const struct machine_desc *);
|
||||
extern void sanity_check_meminfo(void);
|
||||
extern enum reboot_mode reboot_mode;
|
||||
extern void setup_dma_zone(const struct machine_desc *desc);
|
||||
@@ -93,6 +92,9 @@ unsigned int __atags_pointer __initdata;
|
||||
unsigned int system_rev;
|
||||
EXPORT_SYMBOL(system_rev);
|
||||
|
||||
const char *system_serial;
|
||||
EXPORT_SYMBOL(system_serial);
|
||||
|
||||
unsigned int system_serial_low;
|
||||
EXPORT_SYMBOL(system_serial_low);
|
||||
|
||||
@@ -839,8 +841,25 @@ arch_initcall(customize_machine);
|
||||
|
||||
static int __init init_machine_late(void)
|
||||
{
|
||||
struct device_node *root;
|
||||
int ret;
|
||||
|
||||
if (machine_desc->init_late)
|
||||
machine_desc->init_late();
|
||||
|
||||
root = of_find_node_by_path("/");
|
||||
if (root) {
|
||||
ret = of_property_read_string(root, "serial-number",
|
||||
&system_serial);
|
||||
if (ret)
|
||||
system_serial = NULL;
|
||||
}
|
||||
|
||||
if (!system_serial)
|
||||
system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
|
||||
system_serial_high,
|
||||
system_serial_low);
|
||||
|
||||
return 0;
|
||||
}
|
||||
late_initcall(init_machine_late);
|
||||
@@ -936,7 +955,9 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
parse_early_param();
|
||||
|
||||
early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
|
||||
#ifdef CONFIG_MMU
|
||||
early_paging_init(mdesc);
|
||||
#endif
|
||||
setup_dma_zone(mdesc);
|
||||
sanity_check_meminfo();
|
||||
arm_memblock_init(mdesc);
|
||||
@@ -1109,8 +1130,7 @@ static int c_show(struct seq_file *m, void *v)
|
||||
|
||||
seq_printf(m, "Hardware\t: %s\n", machine_name);
|
||||
seq_printf(m, "Revision\t: %04x\n", system_rev);
|
||||
seq_printf(m, "Serial\t\t: %08x%08x\n",
|
||||
system_serial_high, system_serial_low);
|
||||
seq_printf(m, "Serial\t\t: %s\n", system_serial);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -81,7 +81,7 @@ ENTRY(__cpu_suspend)
|
||||
mov r1, r4 @ size of save block
|
||||
add r0, sp, #8 @ pointer to save block
|
||||
bl __cpu_suspend_save
|
||||
adr lr, BSYM(cpu_suspend_abort)
|
||||
badr lr, cpu_suspend_abort
|
||||
ldmfd sp!, {r0, pc} @ call suspend fn
|
||||
ENDPROC(__cpu_suspend)
|
||||
.ltorg
|
||||
@@ -122,7 +122,7 @@ ENDPROC(cpu_resume_after_mmu)
|
||||
#ifdef CONFIG_MMU
|
||||
.arm
|
||||
ENTRY(cpu_resume_arm)
|
||||
THUMB( adr r9, BSYM(1f) ) @ Kernel is entered in ARM.
|
||||
THUMB( badr r9, 1f ) @ Kernel is entered in ARM.
|
||||
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
|
||||
THUMB( .thumb ) @ switch to Thumb now.
|
||||
THUMB(1: )
|
||||
|
@@ -86,9 +86,11 @@ void __init smp_set_ops(struct smp_operations *ops)
|
||||
|
||||
static unsigned long get_arch_pgd(pgd_t *pgd)
|
||||
{
|
||||
phys_addr_t pgdir = virt_to_idmap(pgd);
|
||||
BUG_ON(pgdir & ARCH_PGD_MASK);
|
||||
return pgdir >> ARCH_PGD_SHIFT;
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
return __phys_to_pfn(virt_to_phys(pgd));
|
||||
#else
|
||||
return virt_to_phys(pgd);
|
||||
#endif
|
||||
}
|
||||
|
||||
int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
@@ -108,7 +110,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
secondary_data.pgdir = get_arch_pgd(idmap_pgd);
|
||||
secondary_data.pgdir = virt_to_phys(idmap_pgd);
|
||||
secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
|
||||
#endif
|
||||
sync_cache_w(&secondary_data);
|
||||
|
@@ -17,6 +17,9 @@
|
||||
#include <asm/mach/map.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/system_info.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
#define TCMTR_FORMAT_MASK 0xe0000000U
|
||||
|
||||
static struct gen_pool *tcm_pool;
|
||||
static bool dtcm_present;
|
||||
@@ -175,6 +178,77 @@ static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* When we are running in the non-secure world and the secure world
|
||||
* has not explicitly given us access to the TCM we will get an
|
||||
* undefined error when reading the TCM region register in the
|
||||
* setup_tcm_bank function (above).
|
||||
*
|
||||
* There are two variants of this register read that we need to trap,
|
||||
* the read for the data TCM and the read for the instruction TCM:
|
||||
* c0370628: ee196f11 mrc 15, 0, r6, cr9, cr1, {0}
|
||||
* c0370674: ee196f31 mrc 15, 0, r6, cr9, cr1, {1}
|
||||
*
|
||||
* Our undef hook mask explicitly matches all fields of the encoded
|
||||
* instruction other than the destination register. The mask also
|
||||
* only allows operand 2 to have the values 0 or 1.
|
||||
*
|
||||
* The undefined hook is defined as __init and __initdata, and therefore
|
||||
* must be removed before tcm_init returns.
|
||||
*
|
||||
* In this particular case (MRC with ARM condition code ALways) the
|
||||
* Thumb-2 and ARM instruction encoding are identical, so this hook
|
||||
* will work on a Thumb-2 kernel.
|
||||
*
|
||||
* See A8.8.107, DDI0406C_C ARM Architecture Reference Manual, Encoding
|
||||
* T1/A1 for the bit-by-bit details.
|
||||
*
|
||||
* mrc p15, 0, XX, c9, c1, 0
|
||||
* mrc p15, 0, XX, c9, c1, 1
|
||||
* | | | | | | | +---- opc2 0|1 = 000|001
|
||||
* | | | | | | +------- CRm 0 = 0001
|
||||
* | | | | | +----------- CRn 0 = 1001
|
||||
* | | | | +--------------- Rt ? = ????
|
||||
* | | | +------------------- opc1 0 = 000
|
||||
* | | +----------------------- coproc 15 = 1111
|
||||
* | +-------------------------- condition ALways = 1110
|
||||
* +----------------------------- instruction MRC = 1110
|
||||
*
|
||||
* Encoding this as per A8.8.107 of DDI0406C, Encoding T1/A1, yields:
|
||||
* 1111 1111 1111 1111 0000 1111 1101 1111 Required Mask
|
||||
* 1110 1110 0001 1001 ???? 1111 0001 0001 mrc p15, 0, XX, c9, c1, 0
|
||||
* 1110 1110 0001 1001 ???? 1111 0011 0001 mrc p15, 0, XX, c9, c1, 1
|
||||
* [ ] [ ] [ ]| [ ] [ ] [ ] [ ]| +--- CRm
|
||||
* | | | | | | | | +----- SBO
|
||||
* | | | | | | | +------- opc2
|
||||
* | | | | | | +----------- coproc
|
||||
* | | | | | +---------------- Rt
|
||||
* | | | | +--------------------- CRn
|
||||
* | | | +------------------------- SBO
|
||||
* | | +--------------------------- opc1
|
||||
* | +------------------------------- instruction
|
||||
* +------------------------------------ condition
|
||||
*/
|
||||
#define TCM_REGION_READ_MASK 0xffff0fdf
|
||||
#define TCM_REGION_READ_INSTR 0xee190f11
|
||||
#define DEST_REG_SHIFT 12
|
||||
#define DEST_REG_MASK 0xf
|
||||
|
||||
static int __init tcm_handler(struct pt_regs *regs, unsigned int instr)
|
||||
{
|
||||
regs->uregs[(instr >> DEST_REG_SHIFT) & DEST_REG_MASK] = 0;
|
||||
regs->ARM_pc += 4;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct undef_hook tcm_hook __initdata = {
|
||||
.instr_mask = TCM_REGION_READ_MASK,
|
||||
.instr_val = TCM_REGION_READ_INSTR,
|
||||
.cpsr_mask = MODE_MASK,
|
||||
.cpsr_val = SVC_MODE,
|
||||
.fn = tcm_handler
|
||||
};
|
||||
|
||||
/*
|
||||
* This initializes the TCM memory
|
||||
*/
|
||||
@@ -204,9 +278,18 @@ void __init tcm_init(void)
|
||||
}
|
||||
|
||||
tcm_status = read_cpuid_tcmstatus();
|
||||
|
||||
/*
|
||||
* This code only supports v6-compatible TCMTR implementations.
|
||||
*/
|
||||
if (tcm_status & TCMTR_FORMAT_MASK)
|
||||
return;
|
||||
|
||||
dtcm_banks = (tcm_status >> 16) & 0x03;
|
||||
itcm_banks = (tcm_status & 0x03);
|
||||
|
||||
register_undef_hook(&tcm_hook);
|
||||
|
||||
/* Values greater than 2 for D/ITCM banks are "reserved" */
|
||||
if (dtcm_banks > 2)
|
||||
dtcm_banks = 0;
|
||||
@@ -218,7 +301,7 @@ void __init tcm_init(void)
|
||||
for (i = 0; i < dtcm_banks; i++) {
|
||||
ret = setup_tcm_bank(0, i, dtcm_banks, &dtcm_end);
|
||||
if (ret)
|
||||
return;
|
||||
goto unregister;
|
||||
}
|
||||
/* This means you compiled more code than fits into DTCM */
|
||||
if (dtcm_code_sz > (dtcm_end - DTCM_OFFSET)) {
|
||||
@@ -227,6 +310,12 @@ void __init tcm_init(void)
|
||||
dtcm_code_sz, (dtcm_end - DTCM_OFFSET));
|
||||
goto no_dtcm;
|
||||
}
|
||||
/*
|
||||
* This means that the DTCM sizes were 0 or the DTCM banks
|
||||
* were inaccessible due to TrustZone configuration.
|
||||
*/
|
||||
if (!(dtcm_end - DTCM_OFFSET))
|
||||
goto no_dtcm;
|
||||
dtcm_res.end = dtcm_end - 1;
|
||||
request_resource(&iomem_resource, &dtcm_res);
|
||||
dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET;
|
||||
@@ -250,15 +339,21 @@ no_dtcm:
|
||||
for (i = 0; i < itcm_banks; i++) {
|
||||
ret = setup_tcm_bank(1, i, itcm_banks, &itcm_end);
|
||||
if (ret)
|
||||
return;
|
||||
goto unregister;
|
||||
}
|
||||
/* This means you compiled more code than fits into ITCM */
|
||||
if (itcm_code_sz > (itcm_end - ITCM_OFFSET)) {
|
||||
pr_info("CPU ITCM: %u bytes of code compiled to "
|
||||
"ITCM but only %lu bytes of ITCM present\n",
|
||||
itcm_code_sz, (itcm_end - ITCM_OFFSET));
|
||||
return;
|
||||
goto unregister;
|
||||
}
|
||||
/*
|
||||
* This means that the ITCM sizes were 0 or the ITCM banks
|
||||
* were inaccessible due to TrustZone configuration.
|
||||
*/
|
||||
if (!(itcm_end - ITCM_OFFSET))
|
||||
goto unregister;
|
||||
itcm_res.end = itcm_end - 1;
|
||||
request_resource(&iomem_resource, &itcm_res);
|
||||
itcm_iomap[0].length = itcm_end - ITCM_OFFSET;
|
||||
@@ -275,6 +370,9 @@ no_dtcm:
|
||||
pr_info("CPU ITCM: %u bytes of code compiled to ITCM but no "
|
||||
"ITCM banks present in CPU\n", itcm_code_sz);
|
||||
}
|
||||
|
||||
unregister:
|
||||
unregister_undef_hook(&tcm_hook);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -749,14 +749,6 @@ late_initcall(arm_mrc_hook_init);
|
||||
|
||||
#endif
|
||||
|
||||
void __bad_xchg(volatile void *ptr, int size)
|
||||
{
|
||||
pr_err("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
|
||||
__builtin_return_address(0), ptr, size);
|
||||
BUG();
|
||||
}
|
||||
EXPORT_SYMBOL(__bad_xchg);
|
||||
|
||||
/*
|
||||
* A data abort trap was taken, but we did not handle the instruction.
|
||||
* Try to abort the user program, or panic if it was the kernel.
|
||||
|
Reference in New Issue
Block a user