Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King: "Included in this update: - perf updates from Will Deacon: The main changes are callchain stability fixes from Jean Pihet and event mapping and PMU name rework from Mark Rutland The latter is preparatory work for enabling some code re-use with arm64 in the future. - updates for nommu from Uwe Kleine-König: Two different fixes for the same problem making some ARM nommu configurations not boot since 3.6-rc1. The problem is that user_addr_max returned the biggest available RAM address which makes some copy_from_user variants fail to read from XIP memory. - deprecate legacy OMAP DMA API, in preparation for it's removal. The popular drivers have been converted over, leaving a very small number of rarely used drivers, which hopefully can be converted during the next cycle with a bit more visibility (and hopefully people popping out of the woodwork to help test) - more tweaks for BE systems, particularly with the kernel image format. In connection with this, I've cleaned up the way we generate the linker script for the decompressor. - removal of hard-coded assumptions of the kernel stack size, making everywhere depend on the value of THREAD_SIZE_ORDER. - MCPM updates from Nicolas Pitre. - Make it easier for proper CPU part number checks (which should always include the vendor field). - Assembly code optimisation - use the "bx" instruction when returning from a function on ARMv6+ rather than "mov pc, reg". - Save the last kernel misaligned fault location and report it via the procfs alignment file. - Clean up the way we create the initial stack frame, which is a repeated pattern in several different locations. - Support for 8-byte get_user(), needed for some DRM implementations. - mcs locking from Will Deacon. - Save and restore a few more Cortex-A9 registers (for errata workarounds) - Fix various aspects of the SWP emulation, and the ELF hwcap for the SWP instruction. - Update LPAE logic for pte_write and pmd_write to make it more correct. - Support for Broadcom Brahma15 CPU cores. - ARM assembly crypto updates from Ard Biesheuvel" * 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (53 commits) ARM: add comments to the early page table remap code ARM: 8122/1: smp_scu: enable SCU standby support ARM: 8121/1: smp_scu: use macro for SCU enable bit ARM: 8120/1: crypto: sha512: add ARM NEON implementation ARM: 8119/1: crypto: sha1: add ARM NEON implementation ARM: 8118/1: crypto: sha1/make use of common SHA-1 structures ARM: 8113/1: remove remaining definitions of PLAT_PHYS_OFFSET from <mach/memory.h> ARM: 8111/1: Enable erratum 798181 for Broadcom Brahma-B15 ARM: 8110/1: do CPU-specific init for Broadcom Brahma15 cores ARM: 8109/1: mm: Modify pte_write and pmd_write logic for LPAE ARM: 8108/1: mm: Introduce {pte,pmd}_isset and {pte,pmd}_isclear ARM: hwcap: disable HWCAP_SWP if the CPU advertises it has exclusives ARM: SWP emulation: only initialise on ARMv7 CPUs ARM: SWP emulation: always enable when SMP is enabled ARM: 8103/1: save/restore Cortex-A9 CP15 registers on suspend/resume ARM: 8098/1: mcs lock: implement wfe-based polling for MCS locking ARM: 8091/2: add get_user() support for 8 byte types ARM: 8097/1: unistd.h: relocate comments back to place ARM: 8096/1: Describe required sort order for textofs-y (TEXT_OFFSET) ARM: 8090/1: add revision info for PL310 errata 588369 and 727915 ...
This commit is contained in:
@@ -90,7 +90,7 @@ ENTRY(printascii)
|
||||
ldrneb r1, [r0], #1
|
||||
teqne r1, #0
|
||||
bne 1b
|
||||
mov pc, lr
|
||||
ret lr
|
||||
ENDPROC(printascii)
|
||||
|
||||
ENTRY(printch)
|
||||
@@ -105,7 +105,7 @@ ENTRY(debug_ll_addr)
|
||||
addruart r2, r3, ip
|
||||
str r2, [r0]
|
||||
str r3, [r1]
|
||||
mov pc, lr
|
||||
ret lr
|
||||
ENDPROC(debug_ll_addr)
|
||||
#endif
|
||||
|
||||
@@ -116,7 +116,7 @@ ENTRY(printascii)
|
||||
mov r0, #0x04 @ SYS_WRITE0
|
||||
ARM( svc #0x123456 )
|
||||
THUMB( svc #0xab )
|
||||
mov pc, lr
|
||||
ret lr
|
||||
ENDPROC(printascii)
|
||||
|
||||
ENTRY(printch)
|
||||
@@ -125,14 +125,14 @@ ENTRY(printch)
|
||||
mov r0, #0x03 @ SYS_WRITEC
|
||||
ARM( svc #0x123456 )
|
||||
THUMB( svc #0xab )
|
||||
mov pc, lr
|
||||
ret lr
|
||||
ENDPROC(printch)
|
||||
|
||||
ENTRY(debug_ll_addr)
|
||||
mov r2, #0
|
||||
str r2, [r0]
|
||||
str r2, [r1]
|
||||
mov pc, lr
|
||||
ret lr
|
||||
ENDPROC(debug_ll_addr)
|
||||
|
||||
#endif
|
||||
|
@@ -224,7 +224,7 @@ svc_preempt:
|
||||
1: bl preempt_schedule_irq @ irq en/disable is done inside
|
||||
ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
|
||||
tst r0, #_TIF_NEED_RESCHED
|
||||
moveq pc, r8 @ go again
|
||||
reteq r8 @ go again
|
||||
b 1b
|
||||
#endif
|
||||
|
||||
@@ -490,7 +490,7 @@ ENDPROC(__und_usr)
|
||||
.pushsection .fixup, "ax"
|
||||
.align 2
|
||||
4: str r4, [sp, #S_PC] @ retry current instruction
|
||||
mov pc, r9
|
||||
ret r9
|
||||
.popsection
|
||||
.pushsection __ex_table,"a"
|
||||
.long 1b, 4b
|
||||
@@ -552,7 +552,7 @@ call_fpe:
|
||||
#endif
|
||||
tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
|
||||
tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
|
||||
moveq pc, lr
|
||||
reteq lr
|
||||
and r8, r0, #0x00000f00 @ mask out CP number
|
||||
THUMB( lsr r8, r8, #8 )
|
||||
mov r7, #1
|
||||
@@ -571,33 +571,33 @@ call_fpe:
|
||||
THUMB( add pc, r8 )
|
||||
nop
|
||||
|
||||
movw_pc lr @ CP#0
|
||||
ret.w lr @ CP#0
|
||||
W(b) do_fpe @ CP#1 (FPE)
|
||||
W(b) do_fpe @ CP#2 (FPE)
|
||||
movw_pc lr @ CP#3
|
||||
ret.w lr @ CP#3
|
||||
#ifdef CONFIG_CRUNCH
|
||||
b crunch_task_enable @ CP#4 (MaverickCrunch)
|
||||
b crunch_task_enable @ CP#5 (MaverickCrunch)
|
||||
b crunch_task_enable @ CP#6 (MaverickCrunch)
|
||||
#else
|
||||
movw_pc lr @ CP#4
|
||||
movw_pc lr @ CP#5
|
||||
movw_pc lr @ CP#6
|
||||
ret.w lr @ CP#4
|
||||
ret.w lr @ CP#5
|
||||
ret.w lr @ CP#6
|
||||
#endif
|
||||
movw_pc lr @ CP#7
|
||||
movw_pc lr @ CP#8
|
||||
movw_pc lr @ CP#9
|
||||
ret.w lr @ CP#7
|
||||
ret.w lr @ CP#8
|
||||
ret.w lr @ CP#9
|
||||
#ifdef CONFIG_VFP
|
||||
W(b) do_vfp @ CP#10 (VFP)
|
||||
W(b) do_vfp @ CP#11 (VFP)
|
||||
#else
|
||||
movw_pc lr @ CP#10 (VFP)
|
||||
movw_pc lr @ CP#11 (VFP)
|
||||
ret.w lr @ CP#10 (VFP)
|
||||
ret.w lr @ CP#11 (VFP)
|
||||
#endif
|
||||
movw_pc lr @ CP#12
|
||||
movw_pc lr @ CP#13
|
||||
movw_pc lr @ CP#14 (Debug)
|
||||
movw_pc lr @ CP#15 (Control)
|
||||
ret.w lr @ CP#12
|
||||
ret.w lr @ CP#13
|
||||
ret.w lr @ CP#14 (Debug)
|
||||
ret.w lr @ CP#15 (Control)
|
||||
|
||||
#ifdef NEED_CPU_ARCHITECTURE
|
||||
.align 2
|
||||
@@ -649,7 +649,7 @@ ENTRY(fp_enter)
|
||||
.popsection
|
||||
|
||||
ENTRY(no_fp)
|
||||
mov pc, lr
|
||||
ret lr
|
||||
ENDPROC(no_fp)
|
||||
|
||||
__und_usr_fault_32:
|
||||
@@ -745,7 +745,7 @@ ENDPROC(__switch_to)
|
||||
#ifdef CONFIG_ARM_THUMB
|
||||
bx \reg
|
||||
#else
|
||||
mov pc, \reg
|
||||
ret \reg
|
||||
#endif
|
||||
.endm
|
||||
|
||||
@@ -837,7 +837,7 @@ kuser_cmpxchg64_fixup:
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
bcc kuser_cmpxchg32_fixup
|
||||
#endif
|
||||
mov pc, lr
|
||||
ret lr
|
||||
.previous
|
||||
|
||||
#else
|
||||
@@ -905,7 +905,7 @@ kuser_cmpxchg32_fixup:
|
||||
subs r8, r4, r7
|
||||
rsbcss r8, r8, #(2b - 1b)
|
||||
strcs r7, [sp, #S_PC]
|
||||
mov pc, lr
|
||||
ret lr
|
||||
.previous
|
||||
|
||||
#else
|
||||
|
@@ -8,6 +8,7 @@
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/unwind.h>
|
||||
@@ -88,7 +89,7 @@ ENTRY(ret_from_fork)
|
||||
cmp r5, #0
|
||||
movne r0, r4
|
||||
adrne lr, BSYM(1f)
|
||||
movne pc, r5
|
||||
retne r5
|
||||
1: get_thread_info tsk
|
||||
b ret_slow_syscall
|
||||
ENDPROC(ret_from_fork)
|
||||
@@ -290,7 +291,7 @@ ENDPROC(ftrace_graph_caller_old)
|
||||
|
||||
.macro mcount_exit
|
||||
ldmia sp!, {r0-r3, ip, lr}
|
||||
mov pc, ip
|
||||
ret ip
|
||||
.endm
|
||||
|
||||
ENTRY(__gnu_mcount_nc)
|
||||
@@ -298,7 +299,7 @@ UNWIND(.fnstart)
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
mov ip, lr
|
||||
ldmia sp!, {lr}
|
||||
mov pc, ip
|
||||
ret ip
|
||||
#else
|
||||
__mcount
|
||||
#endif
|
||||
@@ -333,12 +334,12 @@ return_to_handler:
|
||||
bl ftrace_return_to_handler
|
||||
mov lr, r0 @ r0 has real ret addr
|
||||
ldmia sp!, {r0-r3}
|
||||
mov pc, lr
|
||||
ret lr
|
||||
#endif
|
||||
|
||||
ENTRY(ftrace_stub)
|
||||
.Lftrace_stub:
|
||||
mov pc, lr
|
||||
ret lr
|
||||
ENDPROC(ftrace_stub)
|
||||
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
@@ -561,7 +562,7 @@ sys_mmap2:
|
||||
streq r5, [sp, #4]
|
||||
beq sys_mmap_pgoff
|
||||
mov r0, #-EINVAL
|
||||
mov pc, lr
|
||||
ret lr
|
||||
#else
|
||||
str r5, [sp, #4]
|
||||
b sys_mmap_pgoff
|
||||
|
@@ -240,12 +240,6 @@
|
||||
movs pc, lr @ return & move spsr_svc into cpsr
|
||||
.endm
|
||||
|
||||
@
|
||||
@ 32-bit wide "mov pc, reg"
|
||||
@
|
||||
.macro movw_pc, reg
|
||||
mov pc, \reg
|
||||
.endm
|
||||
#else /* CONFIG_THUMB2_KERNEL */
|
||||
.macro svc_exit, rpsr, irq = 0
|
||||
.if \irq != 0
|
||||
@@ -304,14 +298,6 @@
|
||||
movs pc, lr @ return & move spsr_svc into cpsr
|
||||
.endm
|
||||
#endif /* ifdef CONFIG_CPU_V7M / else */
|
||||
|
||||
@
|
||||
@ 32-bit wide "mov pc, reg"
|
||||
@
|
||||
.macro movw_pc, reg
|
||||
mov pc, \reg
|
||||
nop
|
||||
.endm
|
||||
#endif /* !CONFIG_THUMB2_KERNEL */
|
||||
|
||||
/*
|
||||
|
@@ -32,7 +32,7 @@ ENTRY(__set_fiq_regs)
|
||||
ldr lr, [r0]
|
||||
msr cpsr_c, r1 @ return to SVC mode
|
||||
mov r0, r0 @ avoid hazard prior to ARMv4
|
||||
mov pc, lr
|
||||
ret lr
|
||||
ENDPROC(__set_fiq_regs)
|
||||
|
||||
ENTRY(__get_fiq_regs)
|
||||
@@ -45,5 +45,5 @@ ENTRY(__get_fiq_regs)
|
||||
str lr, [r0]
|
||||
msr cpsr_c, r1 @ return to SVC mode
|
||||
mov r0, r0 @ avoid hazard prior to ARMv4
|
||||
mov pc, lr
|
||||
ret lr
|
||||
ENDPROC(__get_fiq_regs)
|
||||
|
@@ -10,6 +10,7 @@
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*/
|
||||
#include <asm/assembler.h>
|
||||
|
||||
#define ATAG_CORE 0x54410001
|
||||
#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2)
|
||||
@@ -61,10 +62,10 @@ __vet_atags:
|
||||
cmp r5, r6
|
||||
bne 1f
|
||||
|
||||
2: mov pc, lr @ atag/dtb pointer is ok
|
||||
2: ret lr @ atag/dtb pointer is ok
|
||||
|
||||
1: mov r2, #0
|
||||
mov pc, lr
|
||||
ret lr
|
||||
ENDPROC(__vet_atags)
|
||||
|
||||
/*
|
||||
@@ -162,7 +163,7 @@ __lookup_processor_type:
|
||||
cmp r5, r6
|
||||
blo 1b
|
||||
mov r5, #0 @ unknown processor
|
||||
2: mov pc, lr
|
||||
2: ret lr
|
||||
ENDPROC(__lookup_processor_type)
|
||||
|
||||
/*
|
||||
|
@@ -82,7 +82,7 @@ ENTRY(stext)
|
||||
adr lr, BSYM(1f) @ return (PIC) address
|
||||
ARM( add pc, r10, #PROCINFO_INITFUNC )
|
||||
THUMB( add r12, r10, #PROCINFO_INITFUNC )
|
||||
THUMB( mov pc, r12 )
|
||||
THUMB( ret r12 )
|
||||
1: b __after_proc_init
|
||||
ENDPROC(stext)
|
||||
|
||||
@@ -119,7 +119,7 @@ ENTRY(secondary_startup)
|
||||
mov r13, r12 @ __secondary_switched address
|
||||
ARM( add pc, r10, #PROCINFO_INITFUNC )
|
||||
THUMB( add r12, r10, #PROCINFO_INITFUNC )
|
||||
THUMB( mov pc, r12 )
|
||||
THUMB( ret r12 )
|
||||
ENDPROC(secondary_startup)
|
||||
|
||||
ENTRY(__secondary_switched)
|
||||
@@ -164,7 +164,7 @@ __after_proc_init:
|
||||
#endif
|
||||
mcr p15, 0, r0, c1, c0, 0 @ write control reg
|
||||
#endif /* CONFIG_CPU_CP15 */
|
||||
mov pc, r13
|
||||
ret r13
|
||||
ENDPROC(__after_proc_init)
|
||||
.ltorg
|
||||
|
||||
@@ -254,7 +254,7 @@ ENTRY(__setup_mpu)
|
||||
orr r0, r0, #CR_M @ Set SCTRL.M (MPU on)
|
||||
mcr p15, 0, r0, c1, c0, 0 @ Enable MPU
|
||||
isb
|
||||
mov pc,lr
|
||||
ret lr
|
||||
ENDPROC(__setup_mpu)
|
||||
#endif
|
||||
#include "head-common.S"
|
||||
|
@@ -140,7 +140,7 @@ ENTRY(stext)
|
||||
mov r8, r4 @ set TTBR1 to swapper_pg_dir
|
||||
ARM( add pc, r10, #PROCINFO_INITFUNC )
|
||||
THUMB( add r12, r10, #PROCINFO_INITFUNC )
|
||||
THUMB( mov pc, r12 )
|
||||
THUMB( ret r12 )
|
||||
1: b __enable_mmu
|
||||
ENDPROC(stext)
|
||||
.ltorg
|
||||
@@ -335,7 +335,7 @@ __create_page_tables:
|
||||
sub r4, r4, #0x1000 @ point to the PGD table
|
||||
mov r4, r4, lsr #ARCH_PGD_SHIFT
|
||||
#endif
|
||||
mov pc, lr
|
||||
ret lr
|
||||
ENDPROC(__create_page_tables)
|
||||
.ltorg
|
||||
.align
|
||||
@@ -383,7 +383,7 @@ ENTRY(secondary_startup)
|
||||
ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor
|
||||
@ (return control reg)
|
||||
THUMB( add r12, r10, #PROCINFO_INITFUNC )
|
||||
THUMB( mov pc, r12 )
|
||||
THUMB( ret r12 )
|
||||
ENDPROC(secondary_startup)
|
||||
|
||||
/*
|
||||
@@ -468,7 +468,7 @@ ENTRY(__turn_mmu_on)
|
||||
instr_sync
|
||||
mov r3, r3
|
||||
mov r3, r13
|
||||
mov pc, r3
|
||||
ret r3
|
||||
__turn_mmu_on_end:
|
||||
ENDPROC(__turn_mmu_on)
|
||||
.popsection
|
||||
@@ -487,7 +487,7 @@ __fixup_smp:
|
||||
orr r4, r4, #0x0000b000
|
||||
orr r4, r4, #0x00000020 @ val 0x4100b020
|
||||
teq r3, r4 @ ARM 11MPCore?
|
||||
moveq pc, lr @ yes, assume SMP
|
||||
reteq lr @ yes, assume SMP
|
||||
|
||||
mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
|
||||
and r0, r0, #0xc0000000 @ multiprocessing extensions and
|
||||
@@ -500,7 +500,7 @@ __fixup_smp:
|
||||
orr r4, r4, #0x0000c000
|
||||
orr r4, r4, #0x00000090
|
||||
teq r3, r4 @ Check for ARM Cortex-A9
|
||||
movne pc, lr @ Not ARM Cortex-A9,
|
||||
retne lr @ Not ARM Cortex-A9,
|
||||
|
||||
@ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
|
||||
@ below address check will need to be #ifdef'd or equivalent
|
||||
@@ -512,7 +512,7 @@ __fixup_smp:
|
||||
ARM_BE8(rev r0, r0) @ byteswap if big endian
|
||||
and r0, r0, #0x3 @ number of CPUs
|
||||
teq r0, #0x0 @ is 1?
|
||||
movne pc, lr
|
||||
retne lr
|
||||
|
||||
__fixup_smp_on_up:
|
||||
adr r0, 1f
|
||||
@@ -539,7 +539,7 @@ smp_on_up:
|
||||
.text
|
||||
__do_fixup_smp_on_up:
|
||||
cmp r4, r5
|
||||
movhs pc, lr
|
||||
reths lr
|
||||
ldmia r4!, {r0, r6}
|
||||
ARM( str r6, [r0, r3] )
|
||||
THUMB( add r0, r0, r3 )
|
||||
@@ -672,7 +672,7 @@ ARM_BE8(rev16 ip, ip)
|
||||
2: cmp r4, r5
|
||||
ldrcc r7, [r4], #4 @ use branch for delay slot
|
||||
bcc 1b
|
||||
mov pc, lr
|
||||
ret lr
|
||||
#endif
|
||||
ENDPROC(__fixup_a_pv_table)
|
||||
|
||||
|
@@ -99,7 +99,7 @@ ENTRY(__hyp_stub_install_secondary)
|
||||
* immediately.
|
||||
*/
|
||||
compare_cpu_mode_with_primary r4, r5, r6, r7
|
||||
movne pc, lr
|
||||
retne lr
|
||||
|
||||
/*
|
||||
* Once we have given up on one CPU, we do not try to install the
|
||||
@@ -111,7 +111,7 @@ ENTRY(__hyp_stub_install_secondary)
|
||||
*/
|
||||
|
||||
cmp r4, #HYP_MODE
|
||||
movne pc, lr @ give up if the CPU is not in HYP mode
|
||||
retne lr @ give up if the CPU is not in HYP mode
|
||||
|
||||
/*
|
||||
* Configure HSCTLR to set correct exception endianness/instruction set
|
||||
@@ -201,7 +201,7 @@ ENDPROC(__hyp_get_vectors)
|
||||
@ fall through
|
||||
ENTRY(__hyp_set_vectors)
|
||||
__HVC(0)
|
||||
mov pc, lr
|
||||
ret lr
|
||||
ENDPROC(__hyp_set_vectors)
|
||||
|
||||
#ifndef ZIMAGE
|
||||
|
@@ -100,7 +100,7 @@ ENTRY(iwmmxt_task_enable)
|
||||
get_thread_info r10
|
||||
#endif
|
||||
4: dec_preempt_count r10, r3
|
||||
mov pc, r9 @ normal exit from exception
|
||||
ret r9 @ normal exit from exception
|
||||
|
||||
concan_save:
|
||||
|
||||
@@ -144,7 +144,7 @@ concan_dump:
|
||||
wstrd wR15, [r1, #MMX_WR15]
|
||||
|
||||
2: teq r0, #0 @ anything to load?
|
||||
moveq pc, lr @ if not, return
|
||||
reteq lr @ if not, return
|
||||
|
||||
concan_load:
|
||||
|
||||
@@ -177,10 +177,10 @@ concan_load:
|
||||
@ clear CUP/MUP (only if r1 != 0)
|
||||
teq r1, #0
|
||||
mov r2, #0
|
||||
moveq pc, lr
|
||||
reteq lr
|
||||
|
||||
tmcr wCon, r2
|
||||
mov pc, lr
|
||||
ret lr
|
||||
|
||||
/*
|
||||
* Back up Concan regs to save area and disable access to them
|
||||
@@ -266,7 +266,7 @@ ENTRY(iwmmxt_task_copy)
|
||||
mov r3, lr @ preserve return address
|
||||
bl concan_dump
|
||||
msr cpsr_c, ip @ restore interrupt mode
|
||||
mov pc, r3
|
||||
ret r3
|
||||
|
||||
/*
|
||||
* Restore Concan state from given memory address
|
||||
@@ -302,7 +302,7 @@ ENTRY(iwmmxt_task_restore)
|
||||
mov r3, lr @ preserve return address
|
||||
bl concan_load
|
||||
msr cpsr_c, ip @ restore interrupt mode
|
||||
mov pc, r3
|
||||
ret r3
|
||||
|
||||
/*
|
||||
* Concan handling on task switch
|
||||
@@ -324,7 +324,7 @@ ENTRY(iwmmxt_task_switch)
|
||||
add r3, r0, #TI_IWMMXT_STATE @ get next task Concan save area
|
||||
ldr r2, [r2] @ get current Concan owner
|
||||
teq r2, r3 @ next task owns it?
|
||||
movne pc, lr @ no: leave Concan disabled
|
||||
retne lr @ no: leave Concan disabled
|
||||
|
||||
1: @ flip Concan access
|
||||
XSC(eor r1, r1, #0x3)
|
||||
@@ -351,7 +351,7 @@ ENTRY(iwmmxt_task_release)
|
||||
eors r0, r0, r1 @ if equal...
|
||||
streq r0, [r3] @ then clear ownership
|
||||
msr cpsr_c, r2 @ restore interrupts
|
||||
mov pc, lr
|
||||
ret lr
|
||||
|
||||
.data
|
||||
concan_owner:
|
||||
|
@@ -560,11 +560,16 @@ user_backtrace(struct frame_tail __user *tail,
|
||||
struct perf_callchain_entry *entry)
|
||||
{
|
||||
struct frame_tail buftail;
|
||||
unsigned long err;
|
||||
|
||||
/* Also check accessibility of one struct frame_tail beyond */
|
||||
if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
|
||||
return NULL;
|
||||
if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
|
||||
|
||||
pagefault_disable();
|
||||
err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
|
||||
pagefault_enable();
|
||||
|
||||
if (err)
|
||||
return NULL;
|
||||
|
||||
perf_callchain_store(entry, buftail.lr);
|
||||
@@ -590,6 +595,10 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
}
|
||||
|
||||
perf_callchain_store(entry, regs->ARM_pc);
|
||||
|
||||
if (!current->mm)
|
||||
return;
|
||||
|
||||
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
|
||||
|
||||
while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
|
||||
@@ -621,10 +630,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
return;
|
||||
}
|
||||
|
||||
fr.fp = regs->ARM_fp;
|
||||
fr.sp = regs->ARM_sp;
|
||||
fr.lr = regs->ARM_lr;
|
||||
fr.pc = regs->ARM_pc;
|
||||
arm_get_current_stackframe(regs, &fr);
|
||||
walk_stackframe(&fr, callchain_trace, entry);
|
||||
}
|
||||
|
||||
|
@@ -233,14 +233,17 @@ static struct of_device_id cpu_pmu_of_device_ids[] = {
|
||||
{.compatible = "arm,cortex-a7-pmu", .data = armv7_a7_pmu_init},
|
||||
{.compatible = "arm,cortex-a5-pmu", .data = armv7_a5_pmu_init},
|
||||
{.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init},
|
||||
{.compatible = "arm,arm1176-pmu", .data = armv6pmu_init},
|
||||
{.compatible = "arm,arm1136-pmu", .data = armv6pmu_init},
|
||||
{.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init},
|
||||
{.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init},
|
||||
{.compatible = "qcom,krait-pmu", .data = krait_pmu_init},
|
||||
{},
|
||||
};
|
||||
|
||||
static struct platform_device_id cpu_pmu_plat_device_ids[] = {
|
||||
{.name = "arm-pmu"},
|
||||
{.name = "armv6-pmu"},
|
||||
{.name = "armv7-pmu"},
|
||||
{.name = "xscale-pmu"},
|
||||
{},
|
||||
};
|
||||
|
||||
@@ -250,40 +253,43 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = {
|
||||
static int probe_current_pmu(struct arm_pmu *pmu)
|
||||
{
|
||||
int cpu = get_cpu();
|
||||
unsigned long implementor = read_cpuid_implementor();
|
||||
unsigned long part_number = read_cpuid_part_number();
|
||||
int ret = -ENODEV;
|
||||
|
||||
pr_info("probing PMU on CPU %d\n", cpu);
|
||||
|
||||
switch (read_cpuid_part()) {
|
||||
/* ARM Ltd CPUs. */
|
||||
if (implementor == ARM_CPU_IMP_ARM) {
|
||||
switch (part_number) {
|
||||
case ARM_CPU_PART_ARM1136:
|
||||
case ARM_CPU_PART_ARM1156:
|
||||
case ARM_CPU_PART_ARM1176:
|
||||
ret = armv6pmu_init(pmu);
|
||||
break;
|
||||
case ARM_CPU_PART_ARM11MPCORE:
|
||||
ret = armv6mpcore_pmu_init(pmu);
|
||||
break;
|
||||
case ARM_CPU_PART_CORTEX_A8:
|
||||
ret = armv7_a8_pmu_init(pmu);
|
||||
break;
|
||||
case ARM_CPU_PART_CORTEX_A9:
|
||||
ret = armv7_a9_pmu_init(pmu);
|
||||
break;
|
||||
}
|
||||
/* Intel CPUs [xscale]. */
|
||||
} else if (implementor == ARM_CPU_IMP_INTEL) {
|
||||
switch (xscale_cpu_arch_version()) {
|
||||
case ARM_CPU_XSCALE_ARCH_V1:
|
||||
ret = xscale1pmu_init(pmu);
|
||||
break;
|
||||
case ARM_CPU_XSCALE_ARCH_V2:
|
||||
ret = xscale2pmu_init(pmu);
|
||||
break;
|
||||
case ARM_CPU_PART_ARM1136:
|
||||
ret = armv6_1136_pmu_init(pmu);
|
||||
break;
|
||||
case ARM_CPU_PART_ARM1156:
|
||||
ret = armv6_1156_pmu_init(pmu);
|
||||
break;
|
||||
case ARM_CPU_PART_ARM1176:
|
||||
ret = armv6_1176_pmu_init(pmu);
|
||||
break;
|
||||
case ARM_CPU_PART_ARM11MPCORE:
|
||||
ret = armv6mpcore_pmu_init(pmu);
|
||||
break;
|
||||
case ARM_CPU_PART_CORTEX_A8:
|
||||
ret = armv7_a8_pmu_init(pmu);
|
||||
break;
|
||||
case ARM_CPU_PART_CORTEX_A9:
|
||||
ret = armv7_a9_pmu_init(pmu);
|
||||
break;
|
||||
|
||||
default:
|
||||
if (read_cpuid_implementor() == ARM_CPU_IMP_INTEL) {
|
||||
switch (xscale_cpu_arch_version()) {
|
||||
case ARM_CPU_XSCALE_ARCH_V1:
|
||||
ret = xscale1pmu_init(pmu);
|
||||
break;
|
||||
case ARM_CPU_XSCALE_ARCH_V2:
|
||||
ret = xscale2pmu_init(pmu);
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
put_cpu();
|
||||
|
@@ -65,13 +65,11 @@ enum armv6_counters {
|
||||
* accesses/misses in hardware.
|
||||
*/
|
||||
static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
|
||||
PERF_MAP_ALL_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
|
||||
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6_PERFCTR_LSU_FULL_STALL,
|
||||
};
|
||||
@@ -79,116 +77,31 @@ static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
|
||||
static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
|
||||
[C(L1D)] = {
|
||||
/*
|
||||
* The performance counters don't differentiate between read
|
||||
* and write accesses/misses so this isn't strictly correct,
|
||||
* but it's the best we can do. Writes and reads get
|
||||
* combined.
|
||||
*/
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
|
||||
[C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
|
||||
[C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(L1I)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(LL)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(DTLB)] = {
|
||||
/*
|
||||
* The ARM performance counters can count micro DTLB misses,
|
||||
* micro ITLB misses and main TLB misses. There isn't an event
|
||||
* for TLB misses, so use the micro misses here and if users
|
||||
* want the main TLB misses they can use a raw counter.
|
||||
*/
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(ITLB)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(BPU)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(NODE)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
PERF_CACHE_MAP_ALL_UNSUPPORTED,
|
||||
|
||||
/*
|
||||
* The performance counters don't differentiate between read and write
|
||||
* accesses/misses so this isn't strictly correct, but it's the best we
|
||||
* can do. Writes and reads get combined.
|
||||
*/
|
||||
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
|
||||
[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
|
||||
[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
|
||||
[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
|
||||
|
||||
[C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
|
||||
|
||||
/*
|
||||
* The ARM performance counters can count micro DTLB misses, micro ITLB
|
||||
* misses and main TLB misses. There isn't an event for TLB misses, so
|
||||
* use the micro misses here and if users want the main TLB misses they
|
||||
* can use a raw counter.
|
||||
*/
|
||||
[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
|
||||
[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
|
||||
|
||||
[C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
|
||||
[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
|
||||
};
|
||||
|
||||
enum armv6mpcore_perf_types {
|
||||
@@ -220,13 +133,11 @@ enum armv6mpcore_perf_types {
|
||||
* accesses/misses in hardware.
|
||||
*/
|
||||
static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
|
||||
PERF_MAP_ALL_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
|
||||
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL,
|
||||
};
|
||||
@@ -234,114 +145,26 @@ static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
|
||||
static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
|
||||
[C(L1D)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] =
|
||||
ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
|
||||
[C(RESULT_MISS)] =
|
||||
ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] =
|
||||
ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
|
||||
[C(RESULT_MISS)] =
|
||||
ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(L1I)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(LL)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(DTLB)] = {
|
||||
/*
|
||||
* The ARM performance counters can count micro DTLB misses,
|
||||
* micro ITLB misses and main TLB misses. There isn't an event
|
||||
* for TLB misses, so use the micro misses here and if users
|
||||
* want the main TLB misses they can use a raw counter.
|
||||
*/
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(ITLB)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(BPU)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(NODE)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
PERF_CACHE_MAP_ALL_UNSUPPORTED,
|
||||
|
||||
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
|
||||
[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
|
||||
[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
|
||||
[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
|
||||
|
||||
[C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
|
||||
|
||||
/*
|
||||
* The ARM performance counters can count micro DTLB misses, micro ITLB
|
||||
* misses and main TLB misses. There isn't an event for TLB misses, so
|
||||
* use the micro misses here and if users want the main TLB misses they
|
||||
* can use a raw counter.
|
||||
*/
|
||||
[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
|
||||
[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
|
||||
|
||||
[C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
|
||||
[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
|
||||
};
|
||||
|
||||
static inline unsigned long
|
||||
@@ -653,9 +476,8 @@ static int armv6_map_event(struct perf_event *event)
|
||||
&armv6_perf_cache_map, 0xFF);
|
||||
}
|
||||
|
||||
static int armv6pmu_init(struct arm_pmu *cpu_pmu)
|
||||
static void armv6pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
cpu_pmu->name = "v6";
|
||||
cpu_pmu->handle_irq = armv6pmu_handle_irq;
|
||||
cpu_pmu->enable = armv6pmu_enable_event;
|
||||
cpu_pmu->disable = armv6pmu_disable_event;
|
||||
@@ -667,7 +489,26 @@ static int armv6pmu_init(struct arm_pmu *cpu_pmu)
|
||||
cpu_pmu->map_event = armv6_map_event;
|
||||
cpu_pmu->num_events = 3;
|
||||
cpu_pmu->max_period = (1LLU << 32) - 1;
|
||||
}
|
||||
|
||||
static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
armv6pmu_init(cpu_pmu);
|
||||
cpu_pmu->name = "armv6_1136";
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int armv6_1156_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
armv6pmu_init(cpu_pmu);
|
||||
cpu_pmu->name = "armv6_1156";
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
armv6pmu_init(cpu_pmu);
|
||||
cpu_pmu->name = "armv6_1176";
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -687,7 +528,7 @@ static int armv6mpcore_map_event(struct perf_event *event)
|
||||
|
||||
static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
cpu_pmu->name = "v6mpcore";
|
||||
cpu_pmu->name = "armv6_11mpcore";
|
||||
cpu_pmu->handle_irq = armv6pmu_handle_irq;
|
||||
cpu_pmu->enable = armv6pmu_enable_event;
|
||||
cpu_pmu->disable = armv6mpcore_pmu_disable_event;
|
||||
@@ -703,7 +544,17 @@ static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int armv6pmu_init(struct arm_pmu *cpu_pmu)
|
||||
static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int armv6_1156_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int armv6_1176_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -48,118 +48,31 @@ enum xscale_counters {
|
||||
};
|
||||
|
||||
static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
|
||||
PERF_MAP_ALL_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
|
||||
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
|
||||
};
|
||||
|
||||
static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
|
||||
[C(L1D)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
|
||||
[C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
|
||||
[C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(L1I)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(LL)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(DTLB)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(ITLB)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(BPU)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(NODE)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
PERF_CACHE_MAP_ALL_UNSUPPORTED,
|
||||
|
||||
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
|
||||
[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
|
||||
[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
|
||||
[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
|
||||
|
||||
[C(L1I)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
|
||||
|
||||
[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
|
||||
[C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
|
||||
|
||||
[C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
|
||||
[C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
|
||||
};
|
||||
|
||||
#define XSCALE_PMU_ENABLE 0x001
|
||||
@@ -442,7 +355,7 @@ static int xscale_map_event(struct perf_event *event)
|
||||
|
||||
static int xscale1pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
cpu_pmu->name = "xscale1";
|
||||
cpu_pmu->name = "armv5_xscale1";
|
||||
cpu_pmu->handle_irq = xscale1pmu_handle_irq;
|
||||
cpu_pmu->enable = xscale1pmu_enable_event;
|
||||
cpu_pmu->disable = xscale1pmu_disable_event;
|
||||
@@ -812,7 +725,7 @@ static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val)
|
||||
|
||||
static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
|
||||
{
|
||||
cpu_pmu->name = "xscale2";
|
||||
cpu_pmu->name = "armv5_xscale2";
|
||||
cpu_pmu->handle_irq = xscale2pmu_handle_irq;
|
||||
cpu_pmu->enable = xscale2pmu_enable_event;
|
||||
cpu_pmu->disable = xscale2pmu_disable_event;
|
||||
|
@@ -3,6 +3,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/kexec.h>
|
||||
|
||||
.align 3 /* not needed for this code, but keeps fncpy() happy */
|
||||
@@ -59,7 +60,7 @@ ENTRY(relocate_new_kernel)
|
||||
mov r0,#0
|
||||
ldr r1,kexec_mach_type
|
||||
ldr r2,kexec_boot_atags
|
||||
ARM( mov pc, lr )
|
||||
ARM( ret lr )
|
||||
THUMB( bx lr )
|
||||
|
||||
.align
|
||||
|
@@ -393,19 +393,34 @@ static void __init cpuid_init_hwcaps(void)
|
||||
elf_hwcap |= HWCAP_LPAE;
|
||||
}
|
||||
|
||||
static void __init feat_v6_fixup(void)
|
||||
static void __init elf_hwcap_fixup(void)
|
||||
{
|
||||
int id = read_cpuid_id();
|
||||
|
||||
if ((id & 0xff0f0000) != 0x41070000)
|
||||
return;
|
||||
unsigned id = read_cpuid_id();
|
||||
unsigned sync_prim;
|
||||
|
||||
/*
|
||||
* HWCAP_TLS is available only on 1136 r1p0 and later,
|
||||
* see also kuser_get_tls_init.
|
||||
*/
|
||||
if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
|
||||
if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
|
||||
((id >> 20) & 3) == 0) {
|
||||
elf_hwcap &= ~HWCAP_TLS;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Verify if CPUID scheme is implemented */
|
||||
if ((id & 0x000f0000) != 0x000f0000)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the CPU supports LDREX/STREX and LDREXB/STREXB,
|
||||
* avoid advertising SWP; it may not be atomic with
|
||||
* multiprocessing cores.
|
||||
*/
|
||||
sync_prim = ((read_cpuid_ext(CPUID_EXT_ISAR3) >> 8) & 0xf0) |
|
||||
((read_cpuid_ext(CPUID_EXT_ISAR4) >> 20) & 0x0f);
|
||||
if (sync_prim >= 0x13)
|
||||
elf_hwcap &= ~HWCAP_SWP;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -609,7 +624,7 @@ static void __init setup_processor(void)
|
||||
#endif
|
||||
erratum_a15_798181_init();
|
||||
|
||||
feat_v6_fixup();
|
||||
elf_hwcap_fixup();
|
||||
|
||||
cacheid_init();
|
||||
cpu_init();
|
||||
|
@@ -107,7 +107,7 @@ ENTRY(cpu_resume_mmu)
|
||||
instr_sync
|
||||
mov r0, r0
|
||||
mov r0, r0
|
||||
mov pc, r3 @ jump to virtual address
|
||||
ret r3 @ jump to virtual address
|
||||
ENDPROC(cpu_resume_mmu)
|
||||
.popsection
|
||||
cpu_resume_after_mmu:
|
||||
|
@@ -17,6 +17,8 @@
|
||||
#include <asm/cputype.h>
|
||||
|
||||
#define SCU_CTRL 0x00
|
||||
#define SCU_ENABLE (1 << 0)
|
||||
#define SCU_STANDBY_ENABLE (1 << 5)
|
||||
#define SCU_CONFIG 0x04
|
||||
#define SCU_CPU_STATUS 0x08
|
||||
#define SCU_INVALIDATE 0x0c
|
||||
@@ -50,10 +52,16 @@ void scu_enable(void __iomem *scu_base)
|
||||
|
||||
scu_ctrl = readl_relaxed(scu_base + SCU_CTRL);
|
||||
/* already enabled? */
|
||||
if (scu_ctrl & 1)
|
||||
if (scu_ctrl & SCU_ENABLE)
|
||||
return;
|
||||
|
||||
scu_ctrl |= 1;
|
||||
scu_ctrl |= SCU_ENABLE;
|
||||
|
||||
/* Cortex-A9 earlier than r2p0 has no standby bit in SCU */
|
||||
if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090 &&
|
||||
(read_cpuid_id() & 0x00f0000f) >= 0x00200000)
|
||||
scu_ctrl |= SCU_STANDBY_ENABLE;
|
||||
|
||||
writel_relaxed(scu_ctrl, scu_base + SCU_CTRL);
|
||||
|
||||
/*
|
||||
|
@@ -92,15 +92,19 @@ void erratum_a15_798181_init(void)
|
||||
unsigned int midr = read_cpuid_id();
|
||||
unsigned int revidr = read_cpuid(CPUID_REVIDR);
|
||||
|
||||
/* Cortex-A15 r0p0..r3p2 w/o ECO fix affected */
|
||||
if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2 ||
|
||||
(revidr & 0x210) == 0x210) {
|
||||
return;
|
||||
}
|
||||
if (revidr & 0x10)
|
||||
erratum_a15_798181_handler = erratum_a15_798181_partial;
|
||||
else
|
||||
/* Brahma-B15 r0p0..r0p2 affected
|
||||
* Cortex-A15 r0p0..r3p2 w/o ECO fix affected */
|
||||
if ((midr & 0xff0ffff0) == 0x420f00f0 && midr <= 0x420f00f2)
|
||||
erratum_a15_798181_handler = erratum_a15_798181_broadcast;
|
||||
else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr <= 0x413fc0f2 &&
|
||||
(revidr & 0x210) != 0x210) {
|
||||
if (revidr & 0x10)
|
||||
erratum_a15_798181_handler =
|
||||
erratum_a15_798181_partial;
|
||||
else
|
||||
erratum_a15_798181_handler =
|
||||
erratum_a15_798181_broadcast;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -27,6 +27,7 @@
|
||||
#include <linux/perf_event.h>
|
||||
|
||||
#include <asm/opcodes.h>
|
||||
#include <asm/system_info.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
@@ -266,6 +267,9 @@ static struct undef_hook swp_hook = {
|
||||
*/
|
||||
static int __init swp_emulation_init(void)
|
||||
{
|
||||
if (cpu_architecture() < CPU_ARCH_ARMv7)
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
if (!proc_create("cpu/swp_emulation", S_IRUGO, NULL, &proc_status_fops))
|
||||
return -ENOMEM;
|
||||
|
@@ -50,10 +50,7 @@ unsigned long profile_pc(struct pt_regs *regs)
|
||||
if (!in_lock_functions(regs->ARM_pc))
|
||||
return regs->ARM_pc;
|
||||
|
||||
frame.fp = regs->ARM_fp;
|
||||
frame.sp = regs->ARM_sp;
|
||||
frame.lr = regs->ARM_lr;
|
||||
frame.pc = regs->ARM_pc;
|
||||
arm_get_current_stackframe(regs, &frame);
|
||||
do {
|
||||
int ret = unwind_frame(&frame);
|
||||
if (ret < 0)
|
||||
|
@@ -31,11 +31,13 @@
|
||||
#include <asm/exception.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/unwind.h>
|
||||
#include <asm/tls.h>
|
||||
#include <asm/system_misc.h>
|
||||
#include <asm/opcodes.h>
|
||||
|
||||
|
||||
static const char *handler[]= {
|
||||
"prefetch abort",
|
||||
"data abort",
|
||||
@@ -184,7 +186,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
|
||||
tsk = current;
|
||||
|
||||
if (regs) {
|
||||
fp = regs->ARM_fp;
|
||||
fp = frame_pointer(regs);
|
||||
mode = processor_mode(regs);
|
||||
} else if (tsk != current) {
|
||||
fp = thread_saved_fp(tsk);
|
||||
@@ -719,7 +721,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
|
||||
dump_instr("", regs);
|
||||
if (user_mode(regs)) {
|
||||
__show_regs(regs);
|
||||
c_backtrace(regs->ARM_fp, processor_mode(regs));
|
||||
c_backtrace(frame_pointer(regs), processor_mode(regs));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@@ -479,12 +479,10 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
|
||||
tsk = current;
|
||||
|
||||
if (regs) {
|
||||
frame.fp = regs->ARM_fp;
|
||||
frame.sp = regs->ARM_sp;
|
||||
frame.lr = regs->ARM_lr;
|
||||
arm_get_current_stackframe(regs, &frame);
|
||||
/* PC might be corrupted, use LR in that case. */
|
||||
frame.pc = kernel_text_address(regs->ARM_pc)
|
||||
? regs->ARM_pc : regs->ARM_lr;
|
||||
if (!kernel_text_address(regs->ARM_pc))
|
||||
frame.pc = regs->ARM_lr;
|
||||
} else if (tsk == current) {
|
||||
frame.fp = (unsigned long)__builtin_frame_address(0);
|
||||
frame.sp = current_sp;
|
||||
|
@@ -318,7 +318,6 @@ SECTIONS
|
||||
_end = .;
|
||||
|
||||
STABS_DEBUG
|
||||
.comment 0 : { *(.comment) }
|
||||
}
|
||||
|
||||
/*
|
||||
|
Reference in New Issue
Block a user