Merge branch 'devel-stable' into for-next
Conflicts: arch/arm/Makefile arch/arm/include/asm/glue-proc.h
This commit is contained in:
@@ -15,7 +15,7 @@ CFLAGS_REMOVE_return_address.o = -pg
|
||||
|
||||
# Object file lists.
|
||||
|
||||
obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \
|
||||
obj-y := elf.o entry-common.o irq.o opcodes.o \
|
||||
process.o ptrace.o return_address.o sched_clock.o \
|
||||
setup.o signal.o stacktrace.o sys_arm.o time.o traps.o
|
||||
|
||||
@@ -23,6 +23,12 @@ obj-$(CONFIG_ATAGS) += atags_parse.o
|
||||
obj-$(CONFIG_ATAGS_PROC) += atags_proc.o
|
||||
obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o
|
||||
|
||||
ifeq ($(CONFIG_CPU_V7M),y)
|
||||
obj-y += entry-v7m.o
|
||||
else
|
||||
obj-y += entry-armv.o
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_OC_ETM) += etm.o
|
||||
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
|
||||
obj-$(CONFIG_ISA_DMA_API) += dma.o
|
||||
@@ -32,7 +38,10 @@ obj-$(CONFIG_ARTHUR) += arthur.o
|
||||
obj-$(CONFIG_ISA_DMA) += dma-isa.o
|
||||
obj-$(CONFIG_PCI) += bios32.o isa.o
|
||||
obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o
|
||||
obj-$(CONFIG_SMP) += smp.o smp_tlb.o
|
||||
obj-$(CONFIG_SMP) += smp.o
|
||||
ifdef CONFIG_MMU
|
||||
obj-$(CONFIG_SMP) += smp_tlb.o
|
||||
endif
|
||||
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
|
||||
obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
|
||||
obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o
|
||||
@@ -82,6 +91,9 @@ obj-$(CONFIG_DEBUG_LL) += debug.o
|
||||
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
|
||||
|
||||
obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
|
||||
obj-$(CONFIG_ARM_PSCI) += psci.o
|
||||
ifeq ($(CONFIG_ARM_PSCI),y)
|
||||
obj-y += psci.o
|
||||
obj-$(CONFIG_SMP) += psci_smp.o
|
||||
endif
|
||||
|
||||
extra-y := $(head-y) vmlinux.lds
|
||||
|
@@ -23,6 +23,7 @@
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/procinfo.h>
|
||||
#include <asm/suspend.h>
|
||||
#include <asm/hardware/cache-l2x0.h>
|
||||
#include <linux/kbuild.h>
|
||||
|
||||
@@ -144,6 +145,11 @@ int main(void)
|
||||
#endif
|
||||
#ifdef MULTI_CACHE
|
||||
DEFINE(CACHE_FLUSH_KERN_ALL, offsetof(struct cpu_cache_fns, flush_kern_all));
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_CPU_SUSPEND
|
||||
DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp));
|
||||
DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys));
|
||||
DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash));
|
||||
#endif
|
||||
BLANK();
|
||||
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
|
||||
|
@@ -350,6 +350,9 @@ ENDPROC(ftrace_stub)
|
||||
|
||||
.align 5
|
||||
ENTRY(vector_swi)
|
||||
#ifdef CONFIG_CPU_V7M
|
||||
v7m_exception_entry
|
||||
#else
|
||||
sub sp, sp, #S_FRAME_SIZE
|
||||
stmia sp, {r0 - r12} @ Calling r0 - r12
|
||||
ARM( add r8, sp, #S_PC )
|
||||
@@ -360,6 +363,7 @@ ENTRY(vector_swi)
|
||||
str lr, [sp, #S_PC] @ Save calling PC
|
||||
str r8, [sp, #S_PSR] @ Save CPSR
|
||||
str r0, [sp, #S_OLD_R0] @ Save OLD_R0
|
||||
#endif
|
||||
zero_fp
|
||||
|
||||
#ifdef CONFIG_ALIGNMENT_TRAP
|
||||
|
@@ -5,6 +5,7 @@
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/v7m.h>
|
||||
|
||||
@ Bad Abort numbers
|
||||
@ -----------------
|
||||
@@ -44,6 +45,116 @@
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_CPU_V7M
|
||||
/*
|
||||
* ARMv7-M exception entry/exit macros.
|
||||
*
|
||||
* xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are
|
||||
* automatically saved on the current stack (32 words) before
|
||||
* switching to the exception stack (SP_main).
|
||||
*
|
||||
* If exception is taken while in user mode, SP_main is
|
||||
* empty. Otherwise, SP_main is aligned to 64 bit automatically
|
||||
* (CCR.STKALIGN set).
|
||||
*
|
||||
* Linux assumes that the interrupts are disabled when entering an
|
||||
* exception handler and it may BUG if this is not the case. Interrupts
|
||||
* are disabled during entry and reenabled in the exit macro.
|
||||
*
|
||||
* v7m_exception_slow_exit is used when returning from SVC or PendSV.
|
||||
* When returning to kernel mode, we don't return from exception.
|
||||
*/
|
||||
.macro v7m_exception_entry
|
||||
@ determine the location of the registers saved by the core during
|
||||
@ exception entry. Depending on the mode the cpu was in when the
|
||||
@ exception happend that is either on the main or the process stack.
|
||||
@ Bit 2 of EXC_RETURN stored in the lr register specifies which stack
|
||||
@ was used.
|
||||
tst lr, #EXC_RET_STACK_MASK
|
||||
mrsne r12, psp
|
||||
moveq r12, sp
|
||||
|
||||
@ we cannot rely on r0-r3 and r12 matching the value saved in the
|
||||
@ exception frame because of tail-chaining. So these have to be
|
||||
@ reloaded.
|
||||
ldmia r12!, {r0-r3}
|
||||
|
||||
@ Linux expects to have irqs off. Do it here before taking stack space
|
||||
cpsid i
|
||||
|
||||
sub sp, #S_FRAME_SIZE-S_IP
|
||||
stmdb sp!, {r0-r11}
|
||||
|
||||
@ load saved r12, lr, return address and xPSR.
|
||||
@ r0-r7 are used for signals and never touched from now on. Clobbering
|
||||
@ r8-r12 is OK.
|
||||
mov r9, r12
|
||||
ldmia r9!, {r8, r10-r12}
|
||||
|
||||
@ calculate the original stack pointer value.
|
||||
@ r9 currently points to the memory location just above the auto saved
|
||||
@ xPSR.
|
||||
@ The cpu might automatically 8-byte align the stack. Bit 9
|
||||
@ of the saved xPSR specifies if stack aligning took place. In this case
|
||||
@ another 32-bit value is included in the stack.
|
||||
|
||||
tst r12, V7M_xPSR_FRAMEPTRALIGN
|
||||
addne r9, r9, #4
|
||||
|
||||
@ store saved r12 using str to have a register to hold the base for stm
|
||||
str r8, [sp, #S_IP]
|
||||
add r8, sp, #S_SP
|
||||
@ store r13-r15, xPSR
|
||||
stmia r8!, {r9-r12}
|
||||
@ store old_r0
|
||||
str r0, [r8]
|
||||
.endm
|
||||
|
||||
/*
|
||||
* PENDSV and SVCALL are configured to have the same exception
|
||||
* priorities. As a kernel thread runs at SVCALL execution priority it
|
||||
* can never be preempted and so we will never have to return to a
|
||||
* kernel thread here.
|
||||
*/
|
||||
.macro v7m_exception_slow_exit ret_r0
|
||||
cpsid i
|
||||
ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK
|
||||
|
||||
@ read original r12, sp, lr, pc and xPSR
|
||||
add r12, sp, #S_IP
|
||||
ldmia r12, {r1-r5}
|
||||
|
||||
@ an exception frame is always 8-byte aligned. To tell the hardware if
|
||||
@ the sp to be restored is aligned or not set bit 9 of the saved xPSR
|
||||
@ accordingly.
|
||||
tst r2, #4
|
||||
subne r2, r2, #4
|
||||
orrne r5, V7M_xPSR_FRAMEPTRALIGN
|
||||
biceq r5, V7M_xPSR_FRAMEPTRALIGN
|
||||
|
||||
@ write basic exception frame
|
||||
stmdb r2!, {r1, r3-r5}
|
||||
ldmia sp, {r1, r3-r5}
|
||||
.if \ret_r0
|
||||
stmdb r2!, {r0, r3-r5}
|
||||
.else
|
||||
stmdb r2!, {r1, r3-r5}
|
||||
.endif
|
||||
|
||||
@ restore process sp
|
||||
msr psp, r2
|
||||
|
||||
@ restore original r4-r11
|
||||
ldmia sp!, {r0-r11}
|
||||
|
||||
@ restore main sp
|
||||
add sp, sp, #S_FRAME_SIZE-S_IP
|
||||
|
||||
cpsie i
|
||||
bx lr
|
||||
.endm
|
||||
#endif /* CONFIG_CPU_V7M */
|
||||
|
||||
@
|
||||
@ Store/load the USER SP and LR registers by switching to the SYS
|
||||
@ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
|
||||
@@ -165,6 +276,18 @@
|
||||
rfeia sp!
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_CPU_V7M
|
||||
/*
|
||||
* Note we don't need to do clrex here as clearing the local monitor is
|
||||
* part of each exception entry and exit sequence.
|
||||
*/
|
||||
.macro restore_user_regs, fast = 0, offset = 0
|
||||
.if \offset
|
||||
add sp, #\offset
|
||||
.endif
|
||||
v7m_exception_slow_exit ret_r0 = \fast
|
||||
.endm
|
||||
#else /* ifdef CONFIG_CPU_V7M */
|
||||
.macro restore_user_regs, fast = 0, offset = 0
|
||||
clrex @ clear the exclusive monitor
|
||||
mov r2, sp
|
||||
@@ -181,6 +304,7 @@
|
||||
add sp, sp, #S_FRAME_SIZE - S_SP
|
||||
movs pc, lr @ return & move spsr_svc into cpsr
|
||||
.endm
|
||||
#endif /* ifdef CONFIG_CPU_V7M / else */
|
||||
|
||||
.macro get_thread_info, rd
|
||||
mov \rd, sp
|
||||
|
143
arch/arm/kernel/entry-v7m.S
Normal file
143
arch/arm/kernel/entry-v7m.S
Normal file
@@ -0,0 +1,143 @@
|
||||
/*
|
||||
* linux/arch/arm/kernel/entry-v7m.S
|
||||
*
|
||||
* Copyright (C) 2008 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* Low-level vector interface routines for the ARMv7-M architecture
|
||||
*/
|
||||
#include <asm/memory.h>
|
||||
#include <asm/glue.h>
|
||||
#include <asm/thread_notify.h>
|
||||
#include <asm/v7m.h>
|
||||
|
||||
#include <mach/entry-macro.S>
|
||||
|
||||
#include "entry-header.S"
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
#error "CONFIG_TRACE_IRQFLAGS not supported on the current ARMv7M implementation"
|
||||
#endif
|
||||
|
||||
__invalid_entry:
|
||||
v7m_exception_entry
|
||||
adr r0, strerr
|
||||
mrs r1, ipsr
|
||||
mov r2, lr
|
||||
bl printk
|
||||
mov r0, sp
|
||||
bl show_regs
|
||||
1: b 1b
|
||||
ENDPROC(__invalid_entry)
|
||||
|
||||
strerr: .asciz "\nUnhandled exception: IPSR = %08lx LR = %08lx\n"
|
||||
|
||||
.align 2
|
||||
__irq_entry:
|
||||
v7m_exception_entry
|
||||
|
||||
@
|
||||
@ Invoke the IRQ handler
|
||||
@
|
||||
mrs r0, ipsr
|
||||
ldr r1, =V7M_xPSR_EXCEPTIONNO
|
||||
and r0, r1
|
||||
sub r0, #16
|
||||
mov r1, sp
|
||||
stmdb sp!, {lr}
|
||||
@ routine called with r0 = irq number, r1 = struct pt_regs *
|
||||
bl nvic_do_IRQ
|
||||
|
||||
pop {lr}
|
||||
@
|
||||
@ Check for any pending work if returning to user
|
||||
@
|
||||
ldr r1, =BASEADDR_V7M_SCB
|
||||
ldr r0, [r1, V7M_SCB_ICSR]
|
||||
tst r0, V7M_SCB_ICSR_RETTOBASE
|
||||
beq 2f
|
||||
|
||||
get_thread_info tsk
|
||||
ldr r2, [tsk, #TI_FLAGS]
|
||||
tst r2, #_TIF_WORK_MASK
|
||||
beq 2f @ no work pending
|
||||
mov r0, #V7M_SCB_ICSR_PENDSVSET
|
||||
str r0, [r1, V7M_SCB_ICSR] @ raise PendSV
|
||||
|
||||
2:
|
||||
@ registers r0-r3 and r12 are automatically restored on exception
|
||||
@ return. r4-r7 were not clobbered in v7m_exception_entry so for
|
||||
@ correctness they don't need to be restored. So only r8-r11 must be
|
||||
@ restored here. The easiest way to do so is to restore r0-r7, too.
|
||||
ldmia sp!, {r0-r11}
|
||||
add sp, #S_FRAME_SIZE-S_IP
|
||||
cpsie i
|
||||
bx lr
|
||||
ENDPROC(__irq_entry)
|
||||
|
||||
__pendsv_entry:
|
||||
v7m_exception_entry
|
||||
|
||||
ldr r1, =BASEADDR_V7M_SCB
|
||||
mov r0, #V7M_SCB_ICSR_PENDSVCLR
|
||||
str r0, [r1, V7M_SCB_ICSR] @ clear PendSV
|
||||
|
||||
@ execute the pending work, including reschedule
|
||||
get_thread_info tsk
|
||||
mov why, #0
|
||||
b ret_to_user
|
||||
ENDPROC(__pendsv_entry)
|
||||
|
||||
/*
|
||||
* Register switch for ARMv7-M processors.
|
||||
* r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
|
||||
* previous and next are guaranteed not to be the same.
|
||||
*/
|
||||
ENTRY(__switch_to)
|
||||
.fnstart
|
||||
.cantunwind
|
||||
add ip, r1, #TI_CPU_SAVE
|
||||
stmia ip!, {r4 - r11} @ Store most regs on stack
|
||||
str sp, [ip], #4
|
||||
str lr, [ip], #4
|
||||
mov r5, r0
|
||||
add r4, r2, #TI_CPU_SAVE
|
||||
ldr r0, =thread_notify_head
|
||||
mov r1, #THREAD_NOTIFY_SWITCH
|
||||
bl atomic_notifier_call_chain
|
||||
mov ip, r4
|
||||
mov r0, r5
|
||||
ldmia ip!, {r4 - r11} @ Load all regs saved previously
|
||||
ldr sp, [ip]
|
||||
ldr pc, [ip, #4]!
|
||||
.fnend
|
||||
ENDPROC(__switch_to)
|
||||
|
||||
.data
|
||||
.align 8
|
||||
/*
|
||||
* Vector table (64 words => 256 bytes natural alignment)
|
||||
*/
|
||||
ENTRY(vector_table)
|
||||
.long 0 @ 0 - Reset stack pointer
|
||||
.long __invalid_entry @ 1 - Reset
|
||||
.long __invalid_entry @ 2 - NMI
|
||||
.long __invalid_entry @ 3 - HardFault
|
||||
.long __invalid_entry @ 4 - MemManage
|
||||
.long __invalid_entry @ 5 - BusFault
|
||||
.long __invalid_entry @ 6 - UsageFault
|
||||
.long __invalid_entry @ 7 - Reserved
|
||||
.long __invalid_entry @ 8 - Reserved
|
||||
.long __invalid_entry @ 9 - Reserved
|
||||
.long __invalid_entry @ 10 - Reserved
|
||||
.long vector_swi @ 11 - SVCall
|
||||
.long __invalid_entry @ 12 - Debug Monitor
|
||||
.long __invalid_entry @ 13 - Reserved
|
||||
.long __pendsv_entry @ 14 - PendSV
|
||||
.long __invalid_entry @ 15 - SysTick
|
||||
.rept 64 - 16
|
||||
.long __irq_entry @ 16..64 - External Interrupts
|
||||
.endr
|
@@ -17,8 +17,12 @@
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/v7m.h>
|
||||
#include <asm/mpu.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
/*
|
||||
* Kernel startup entry point.
|
||||
@@ -50,20 +54,85 @@ ENTRY(stext)
|
||||
|
||||
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
|
||||
@ and irqs disabled
|
||||
#if defined(CONFIG_CPU_CP15)
|
||||
mrc p15, 0, r9, c0, c0 @ get processor id
|
||||
#elif defined(CONFIG_CPU_V7M)
|
||||
ldr r9, =BASEADDR_V7M_SCB
|
||||
ldr r9, [r9, V7M_SCB_CPUID]
|
||||
#else
|
||||
ldr r9, =CONFIG_PROCESSOR_ID
|
||||
#endif
|
||||
bl __lookup_processor_type @ r5=procinfo r9=cpuid
|
||||
movs r10, r5 @ invalid processor (r5=0)?
|
||||
beq __error_p @ yes, error 'p'
|
||||
|
||||
#ifdef CONFIG_ARM_MPU
|
||||
/* Calculate the size of a region covering just the kernel */
|
||||
ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET
|
||||
ldr r6, =(_end) @ Cover whole kernel
|
||||
sub r6, r6, r5 @ Minimum size of region to map
|
||||
clz r6, r6 @ Region size must be 2^N...
|
||||
rsb r6, r6, #31 @ ...so round up region size
|
||||
lsl r6, r6, #MPU_RSR_SZ @ Put size in right field
|
||||
orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit
|
||||
bl __setup_mpu
|
||||
#endif
|
||||
ldr r13, =__mmap_switched @ address to jump to after
|
||||
@ initialising sctlr
|
||||
adr lr, BSYM(1f) @ return (PIC) address
|
||||
ARM( add pc, r10, #PROCINFO_INITFUNC )
|
||||
THUMB( add r12, r10, #PROCINFO_INITFUNC )
|
||||
THUMB( mov pc, r12 )
|
||||
1: b __after_proc_init
|
||||
ENDPROC(stext)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
__CPUINIT
|
||||
ENTRY(secondary_startup)
|
||||
/*
|
||||
* Common entry point for secondary CPUs.
|
||||
*
|
||||
* Ensure that we're in SVC mode, and IRQs are disabled. Lookup
|
||||
* the processor type - there is no need to check the machine type
|
||||
* as it has already been validated by the primary processor.
|
||||
*/
|
||||
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
|
||||
#ifndef CONFIG_CPU_CP15
|
||||
ldr r9, =CONFIG_PROCESSOR_ID
|
||||
#else
|
||||
mrc p15, 0, r9, c0, c0 @ get processor id
|
||||
#endif
|
||||
bl __lookup_processor_type @ r5=procinfo r9=cpuid
|
||||
movs r10, r5 @ invalid processor (r5=0)?
|
||||
beq __error_p @ yes, error 'p'
|
||||
movs r10, r5 @ invalid processor?
|
||||
beq __error_p @ yes, error 'p'
|
||||
|
||||
adr lr, BSYM(__after_proc_init) @ return (PIC) address
|
||||
adr r4, __secondary_data
|
||||
ldmia r4, {r7, r12}
|
||||
|
||||
#ifdef CONFIG_ARM_MPU
|
||||
/* Use MPU region info supplied by __cpu_up */
|
||||
ldr r6, [r7] @ get secondary_data.mpu_szr
|
||||
bl __setup_mpu @ Initialize the MPU
|
||||
#endif
|
||||
|
||||
adr lr, BSYM(__after_proc_init) @ return address
|
||||
mov r13, r12 @ __secondary_switched address
|
||||
ARM( add pc, r10, #PROCINFO_INITFUNC )
|
||||
THUMB( add r12, r10, #PROCINFO_INITFUNC )
|
||||
THUMB( mov pc, r12 )
|
||||
ENDPROC(stext)
|
||||
ENDPROC(secondary_startup)
|
||||
|
||||
ENTRY(__secondary_switched)
|
||||
ldr sp, [r7, #8] @ set up the stack pointer
|
||||
mov fp, #0
|
||||
b secondary_start_kernel
|
||||
ENDPROC(__secondary_switched)
|
||||
|
||||
.type __secondary_data, %object
|
||||
__secondary_data:
|
||||
.long secondary_data
|
||||
.long __secondary_switched
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* Set the Control Register and Read the process ID.
|
||||
@@ -95,10 +164,97 @@ __after_proc_init:
|
||||
#endif
|
||||
mcr p15, 0, r0, c1, c0, 0 @ write control reg
|
||||
#endif /* CONFIG_CPU_CP15 */
|
||||
|
||||
b __mmap_switched @ clear the BSS and jump
|
||||
@ to start_kernel
|
||||
mov pc, r13
|
||||
ENDPROC(__after_proc_init)
|
||||
.ltorg
|
||||
|
||||
#ifdef CONFIG_ARM_MPU
|
||||
|
||||
|
||||
/* Set which MPU region should be programmed */
|
||||
.macro set_region_nr tmp, rgnr
|
||||
mov \tmp, \rgnr @ Use static region numbers
|
||||
mcr p15, 0, \tmp, c6, c2, 0 @ Write RGNR
|
||||
.endm
|
||||
|
||||
/* Setup a single MPU region, either D or I side (D-side for unified) */
|
||||
.macro setup_region bar, acr, sr, side = MPU_DATA_SIDE
|
||||
mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR
|
||||
mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR
|
||||
mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Setup the MPU and initial MPU Regions. We create the following regions:
|
||||
* Region 0: Use this for probing the MPU details, so leave disabled.
|
||||
* Region 1: Background region - covers the whole of RAM as strongly ordered
|
||||
* Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6
|
||||
* Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page
|
||||
*
|
||||
* r6: Value to be written to DRSR (and IRSR if required) for MPU_RAM_REGION
|
||||
*/
|
||||
|
||||
ENTRY(__setup_mpu)
|
||||
|
||||
/* Probe for v7 PMSA compliance */
|
||||
mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0
|
||||
and r0, r0, #(MMFR0_PMSA) @ PMSA field
|
||||
teq r0, #(MMFR0_PMSAv7) @ PMSA v7
|
||||
bne __error_p @ Fail: ARM_MPU on NOT v7 PMSA
|
||||
|
||||
/* Determine whether the D/I-side memory map is unified. We set the
|
||||
* flags here and continue to use them for the rest of this function */
|
||||
mrc p15, 0, r0, c0, c0, 4 @ MPUIR
|
||||
ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU
|
||||
beq __error_p @ Fail: ARM_MPU and no MPU
|
||||
tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified
|
||||
|
||||
/* Setup second region first to free up r6 */
|
||||
set_region_nr r0, #MPU_RAM_REGION
|
||||
isb
|
||||
/* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
|
||||
ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET
|
||||
ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL)
|
||||
|
||||
setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled
|
||||
beq 1f @ Memory-map not unified
|
||||
setup_region r0, r5, r6, MPU_INSTR_SIDE @ PHYS_OFFSET, shared, enabled
|
||||
1: isb
|
||||
|
||||
/* First/background region */
|
||||
set_region_nr r0, #MPU_BG_REGION
|
||||
isb
|
||||
/* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */
|
||||
mov r0, #0 @ BG region starts at 0x0
|
||||
ldr r5,=(MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA)
|
||||
mov r6, #MPU_RSR_ALL_MEM @ 4GB region, enabled
|
||||
|
||||
setup_region r0, r5, r6, MPU_DATA_SIDE @ 0x0, BG region, enabled
|
||||
beq 2f @ Memory-map not unified
|
||||
setup_region r0, r5, r6, MPU_INSTR_SIDE @ 0x0, BG region, enabled
|
||||
2: isb
|
||||
|
||||
/* Vectors region */
|
||||
set_region_nr r0, #MPU_VECTORS_REGION
|
||||
isb
|
||||
/* Shared, inaccessible to PL0, rw PL1 */
|
||||
mov r0, #CONFIG_VECTORS_BASE @ Cover from VECTORS_BASE
|
||||
ldr r5,=(MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL)
|
||||
/* Writing N to bits 5:1 (RSR_SZ) --> region size 2^N+1 */
|
||||
mov r6, #(((PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN)
|
||||
|
||||
setup_region r0, r5, r6, MPU_DATA_SIDE @ VECTORS_BASE, PL0 NA, enabled
|
||||
beq 3f @ Memory-map not unified
|
||||
setup_region r0, r5, r6, MPU_INSTR_SIDE @ VECTORS_BASE, PL0 NA, enabled
|
||||
3: isb
|
||||
|
||||
/* Enable the MPU */
|
||||
mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR
|
||||
bic r0, r0, #CR_BR @ Disable the 'default mem-map'
|
||||
orr r0, r0, #CR_M @ Set SCTRL.M (MPU on)
|
||||
mcr p15, 0, r0, c1, c0, 0 @ Enable MPU
|
||||
isb
|
||||
mov pc,lr
|
||||
ENDPROC(__setup_mpu)
|
||||
#endif
|
||||
#include "head-common.S"
|
||||
|
@@ -156,7 +156,7 @@ ENDPROC(stext)
|
||||
*
|
||||
* Returns:
|
||||
* r0, r3, r5-r7 corrupted
|
||||
* r4 = physical page table address
|
||||
* r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h)
|
||||
*/
|
||||
__create_page_tables:
|
||||
pgtbl r4, r8 @ page table address
|
||||
@@ -331,6 +331,7 @@ __create_page_tables:
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
sub r4, r4, #0x1000 @ point to the PGD table
|
||||
mov r4, r4, lsr #ARCH_PGD_SHIFT
|
||||
#endif
|
||||
mov pc, lr
|
||||
ENDPROC(__create_page_tables)
|
||||
@@ -408,7 +409,7 @@ __secondary_data:
|
||||
* r0 = cp#15 control register
|
||||
* r1 = machine ID
|
||||
* r2 = atags or dtb pointer
|
||||
* r4 = page table pointer
|
||||
* r4 = page table (see ARCH_PGD_SHIFT in asm/memory.h)
|
||||
* r9 = processor ID
|
||||
* r13 = *virtual* address to jump to upon completion
|
||||
*/
|
||||
@@ -427,10 +428,7 @@ __enable_mmu:
|
||||
#ifdef CONFIG_CPU_ICACHE_DISABLE
|
||||
bic r0, r0, #CR_I
|
||||
#endif
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
mov r5, #0
|
||||
mcrr p15, 0, r4, r5, c2 @ load TTBR0
|
||||
#else
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
|
||||
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
|
||||
domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
|
||||
|
@@ -153,6 +153,13 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
|
||||
mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL
|
||||
orr r7, r7, #3 @ PL1PCEN | PL1PCTEN
|
||||
mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL
|
||||
mov r7, #0
|
||||
mcrr p15, 4, r7, r7, c14 @ CNTVOFF
|
||||
|
||||
@ Disable virtual timer in case it was counting
|
||||
mrc p15, 0, r7, c14, c3, 1 @ CNTV_CTL
|
||||
bic r7, #1 @ Clear ENABLE
|
||||
mcr p15, 0, r7, c14, c3, 1 @ CNTV_CTL
|
||||
1:
|
||||
#endif
|
||||
|
||||
|
@@ -158,7 +158,7 @@ static const struct of_device_id psci_of_match[] __initconst = {
|
||||
{},
|
||||
};
|
||||
|
||||
static int __init psci_init(void)
|
||||
void __init psci_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
const char *method;
|
||||
@@ -166,7 +166,7 @@ static int __init psci_init(void)
|
||||
|
||||
np = of_find_matching_node(NULL, psci_of_match);
|
||||
if (!np)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
pr_info("probing function IDs from device-tree\n");
|
||||
|
||||
@@ -206,6 +206,5 @@ static int __init psci_init(void)
|
||||
|
||||
out_put_node:
|
||||
of_node_put(np);
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
early_initcall(psci_init);
|
||||
|
84
arch/arm/kernel/psci_smp.c
Normal file
84
arch/arm/kernel/psci_smp.c
Normal file
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* Copyright (C) 2012 ARM Limited
|
||||
*
|
||||
* Author: Will Deacon <will.deacon@arm.com>
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/irqchip/arm-gic.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
#include <asm/psci.h>
|
||||
#include <asm/smp_plat.h>
|
||||
|
||||
/*
|
||||
* psci_smp assumes that the following is true about PSCI:
|
||||
*
|
||||
* cpu_suspend Suspend the execution on a CPU
|
||||
* @state we don't currently describe affinity levels, so just pass 0.
|
||||
* @entry_point the first instruction to be executed on return
|
||||
* returns 0 success, < 0 on failure
|
||||
*
|
||||
* cpu_off Power down a CPU
|
||||
* @state we don't currently describe affinity levels, so just pass 0.
|
||||
* no return on successful call
|
||||
*
|
||||
* cpu_on Power up a CPU
|
||||
* @cpuid cpuid of target CPU, as from MPIDR
|
||||
* @entry_point the first instruction to be executed on return
|
||||
* returns 0 success, < 0 on failure
|
||||
*
|
||||
* migrate Migrate the context to a different CPU
|
||||
* @cpuid cpuid of target CPU, as from MPIDR
|
||||
* returns 0 success, < 0 on failure
|
||||
*
|
||||
*/
|
||||
|
||||
extern void secondary_startup(void);
|
||||
|
||||
static int __cpuinit psci_boot_secondary(unsigned int cpu,
|
||||
struct task_struct *idle)
|
||||
{
|
||||
if (psci_ops.cpu_on)
|
||||
return psci_ops.cpu_on(cpu_logical_map(cpu),
|
||||
__pa(secondary_startup));
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
void __ref psci_cpu_die(unsigned int cpu)
|
||||
{
|
||||
const struct psci_power_state ps = {
|
||||
.type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
|
||||
};
|
||||
|
||||
if (psci_ops.cpu_off)
|
||||
psci_ops.cpu_off(ps);
|
||||
|
||||
/* We should never return */
|
||||
panic("psci: cpu %d failed to shutdown\n", cpu);
|
||||
}
|
||||
#endif
|
||||
|
||||
bool __init psci_smp_available(void)
|
||||
{
|
||||
/* is cpu_on available at least? */
|
||||
return (psci_ops.cpu_on != NULL);
|
||||
}
|
||||
|
||||
struct smp_operations __initdata psci_smp_ops = {
|
||||
.smp_boot_secondary = psci_boot_secondary,
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
.cpu_die = psci_cpu_die,
|
||||
#endif
|
||||
};
|
@@ -37,6 +37,7 @@
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/elf.h>
|
||||
#include <asm/procinfo.h>
|
||||
#include <asm/psci.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/smp_plat.h>
|
||||
@@ -128,7 +129,9 @@ struct stack {
|
||||
u32 und[3];
|
||||
} ____cacheline_aligned;
|
||||
|
||||
#ifndef CONFIG_CPU_V7M
|
||||
static struct stack stacks[NR_CPUS];
|
||||
#endif
|
||||
|
||||
char elf_platform[ELF_PLATFORM_SIZE];
|
||||
EXPORT_SYMBOL(elf_platform);
|
||||
@@ -207,7 +210,7 @@ static const char *proc_arch[] = {
|
||||
"5TEJ",
|
||||
"6TEJ",
|
||||
"7",
|
||||
"?(11)",
|
||||
"7M",
|
||||
"?(12)",
|
||||
"?(13)",
|
||||
"?(14)",
|
||||
@@ -216,6 +219,12 @@ static const char *proc_arch[] = {
|
||||
"?(17)",
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CPU_V7M
|
||||
static int __get_cpu_architecture(void)
|
||||
{
|
||||
return CPU_ARCH_ARMv7M;
|
||||
}
|
||||
#else
|
||||
static int __get_cpu_architecture(void)
|
||||
{
|
||||
int cpu_arch;
|
||||
@@ -248,6 +257,7 @@ static int __get_cpu_architecture(void)
|
||||
|
||||
return cpu_arch;
|
||||
}
|
||||
#endif
|
||||
|
||||
int __pure cpu_architecture(void)
|
||||
{
|
||||
@@ -293,7 +303,9 @@ static void __init cacheid_init(void)
|
||||
{
|
||||
unsigned int arch = cpu_architecture();
|
||||
|
||||
if (arch >= CPU_ARCH_ARMv6) {
|
||||
if (arch == CPU_ARCH_ARMv7M) {
|
||||
cacheid = 0;
|
||||
} else if (arch >= CPU_ARCH_ARMv6) {
|
||||
unsigned int cachetype = read_cpuid_cachetype();
|
||||
if ((cachetype & (7 << 29)) == 4 << 29) {
|
||||
/* ARMv7 register format */
|
||||
@@ -355,7 +367,7 @@ void __init early_print(const char *str, ...)
|
||||
|
||||
static void __init cpuid_init_hwcaps(void)
|
||||
{
|
||||
unsigned int divide_instrs;
|
||||
unsigned int divide_instrs, vmsa;
|
||||
|
||||
if (cpu_architecture() < CPU_ARCH_ARMv7)
|
||||
return;
|
||||
@@ -368,6 +380,11 @@ static void __init cpuid_init_hwcaps(void)
|
||||
case 1:
|
||||
elf_hwcap |= HWCAP_IDIVT;
|
||||
}
|
||||
|
||||
/* LPAE implies atomic ldrd/strd instructions */
|
||||
vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
|
||||
if (vmsa >= 5)
|
||||
elf_hwcap |= HWCAP_LPAE;
|
||||
}
|
||||
|
||||
static void __init feat_v6_fixup(void)
|
||||
@@ -392,6 +409,7 @@ static void __init feat_v6_fixup(void)
|
||||
*/
|
||||
void notrace cpu_init(void)
|
||||
{
|
||||
#ifndef CONFIG_CPU_V7M
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct stack *stk = &stacks[cpu];
|
||||
|
||||
@@ -442,6 +460,7 @@ void notrace cpu_init(void)
|
||||
"I" (offsetof(struct stack, und[0])),
|
||||
PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
|
||||
: "r14");
|
||||
#endif
|
||||
}
|
||||
|
||||
u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
|
||||
@@ -466,6 +485,72 @@ void __init smp_setup_processor_id(void)
|
||||
printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
|
||||
}
|
||||
|
||||
struct mpidr_hash mpidr_hash;
|
||||
#ifdef CONFIG_SMP
|
||||
/**
|
||||
* smp_build_mpidr_hash - Pre-compute shifts required at each affinity
|
||||
* level in order to build a linear index from an
|
||||
* MPIDR value. Resulting algorithm is a collision
|
||||
* free hash carried out through shifting and ORing
|
||||
*/
|
||||
static void __init smp_build_mpidr_hash(void)
|
||||
{
|
||||
u32 i, affinity;
|
||||
u32 fs[3], bits[3], ls, mask = 0;
|
||||
/*
|
||||
* Pre-scan the list of MPIDRS and filter out bits that do
|
||||
* not contribute to affinity levels, ie they never toggle.
|
||||
*/
|
||||
for_each_possible_cpu(i)
|
||||
mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
|
||||
pr_debug("mask of set bits 0x%x\n", mask);
|
||||
/*
|
||||
* Find and stash the last and first bit set at all affinity levels to
|
||||
* check how many bits are required to represent them.
|
||||
*/
|
||||
for (i = 0; i < 3; i++) {
|
||||
affinity = MPIDR_AFFINITY_LEVEL(mask, i);
|
||||
/*
|
||||
* Find the MSB bit and LSB bits position
|
||||
* to determine how many bits are required
|
||||
* to express the affinity level.
|
||||
*/
|
||||
ls = fls(affinity);
|
||||
fs[i] = affinity ? ffs(affinity) - 1 : 0;
|
||||
bits[i] = ls - fs[i];
|
||||
}
|
||||
/*
|
||||
* An index can be created from the MPIDR by isolating the
|
||||
* significant bits at each affinity level and by shifting
|
||||
* them in order to compress the 24 bits values space to a
|
||||
* compressed set of values. This is equivalent to hashing
|
||||
* the MPIDR through shifting and ORing. It is a collision free
|
||||
* hash though not minimal since some levels might contain a number
|
||||
* of CPUs that is not an exact power of 2 and their bit
|
||||
* representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
|
||||
*/
|
||||
mpidr_hash.shift_aff[0] = fs[0];
|
||||
mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
|
||||
mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
|
||||
(bits[1] + bits[0]);
|
||||
mpidr_hash.mask = mask;
|
||||
mpidr_hash.bits = bits[2] + bits[1] + bits[0];
|
||||
pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
|
||||
mpidr_hash.shift_aff[0],
|
||||
mpidr_hash.shift_aff[1],
|
||||
mpidr_hash.shift_aff[2],
|
||||
mpidr_hash.mask,
|
||||
mpidr_hash.bits);
|
||||
/*
|
||||
* 4x is an arbitrary value used to warn on a hash table much bigger
|
||||
* than expected on most systems.
|
||||
*/
|
||||
if (mpidr_hash_size() > 4 * num_possible_cpus())
|
||||
pr_warn("Large number of MPIDR hash buckets detected\n");
|
||||
sync_cache_w(&mpidr_hash);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __init setup_processor(void)
|
||||
{
|
||||
struct proc_info_list *list;
|
||||
@@ -803,10 +888,17 @@ void __init setup_arch(char **cmdline_p)
|
||||
unflatten_device_tree();
|
||||
|
||||
arm_dt_init_cpu_maps();
|
||||
psci_init();
|
||||
#ifdef CONFIG_SMP
|
||||
if (is_smp()) {
|
||||
smp_set_ops(mdesc->smp);
|
||||
if (!mdesc->smp_init || !mdesc->smp_init()) {
|
||||
if (psci_smp_available())
|
||||
smp_set_ops(&psci_smp_ops);
|
||||
else if (mdesc->smp)
|
||||
smp_set_ops(mdesc->smp);
|
||||
}
|
||||
smp_init_cpus();
|
||||
smp_build_mpidr_hash();
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -879,6 +971,7 @@ static const char *hwcap_str[] = {
|
||||
"vfpv4",
|
||||
"idiva",
|
||||
"idivt",
|
||||
"lpae",
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@@ -392,14 +392,19 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
|
||||
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
|
||||
idx += 3;
|
||||
|
||||
/*
|
||||
* Put the sigreturn code on the stack no matter which return
|
||||
* mechanism we use in order to remain ABI compliant
|
||||
*/
|
||||
if (__put_user(sigreturn_codes[idx], rc) ||
|
||||
__put_user(sigreturn_codes[idx+1], rc+1))
|
||||
return 1;
|
||||
|
||||
if (cpsr & MODE32_BIT) {
|
||||
if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) {
|
||||
/*
|
||||
* 32-bit code can use the new high-page
|
||||
* signal return code support.
|
||||
* signal return code support except when the MPU has
|
||||
* protected the vectors page from PL0
|
||||
*/
|
||||
retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
|
||||
} else {
|
||||
|
@@ -6,6 +6,49 @@
|
||||
#include <asm/glue-proc.h>
|
||||
.text
|
||||
|
||||
/*
|
||||
* Implementation of MPIDR hash algorithm through shifting
|
||||
* and OR'ing.
|
||||
*
|
||||
* @dst: register containing hash result
|
||||
* @rs0: register containing affinity level 0 bit shift
|
||||
* @rs1: register containing affinity level 1 bit shift
|
||||
* @rs2: register containing affinity level 2 bit shift
|
||||
* @mpidr: register containing MPIDR value
|
||||
* @mask: register containing MPIDR mask
|
||||
*
|
||||
* Pseudo C-code:
|
||||
*
|
||||
*u32 dst;
|
||||
*
|
||||
*compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 mpidr, u32 mask) {
|
||||
* u32 aff0, aff1, aff2;
|
||||
* u32 mpidr_masked = mpidr & mask;
|
||||
* aff0 = mpidr_masked & 0xff;
|
||||
* aff1 = mpidr_masked & 0xff00;
|
||||
* aff2 = mpidr_masked & 0xff0000;
|
||||
* dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2);
|
||||
*}
|
||||
* Input registers: rs0, rs1, rs2, mpidr, mask
|
||||
* Output register: dst
|
||||
* Note: input and output registers must be disjoint register sets
|
||||
(eg: a macro instance with mpidr = r1 and dst = r1 is invalid)
|
||||
*/
|
||||
.macro compute_mpidr_hash dst, rs0, rs1, rs2, mpidr, mask
|
||||
and \mpidr, \mpidr, \mask @ mask out MPIDR bits
|
||||
and \dst, \mpidr, #0xff @ mask=aff0
|
||||
ARM( mov \dst, \dst, lsr \rs0 ) @ dst=aff0>>rs0
|
||||
THUMB( lsr \dst, \dst, \rs0 )
|
||||
and \mask, \mpidr, #0xff00 @ mask = aff1
|
||||
ARM( orr \dst, \dst, \mask, lsr \rs1 ) @ dst|=(aff1>>rs1)
|
||||
THUMB( lsr \mask, \mask, \rs1 )
|
||||
THUMB( orr \dst, \dst, \mask )
|
||||
and \mask, \mpidr, #0xff0000 @ mask = aff2
|
||||
ARM( orr \dst, \dst, \mask, lsr \rs2 ) @ dst|=(aff2>>rs2)
|
||||
THUMB( lsr \mask, \mask, \rs2 )
|
||||
THUMB( orr \dst, \dst, \mask )
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Save CPU state for a suspend. This saves the CPU general purpose
|
||||
* registers, and allocates space on the kernel stack to save the CPU
|
||||
@@ -29,12 +72,18 @@ ENTRY(__cpu_suspend)
|
||||
mov r1, r4 @ size of save block
|
||||
mov r2, r5 @ virtual SP
|
||||
ldr r3, =sleep_save_sp
|
||||
#ifdef CONFIG_SMP
|
||||
ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
|
||||
ALT_UP(mov lr, #0)
|
||||
and lr, lr, #15
|
||||
ldr r3, [r3, #SLEEP_SAVE_SP_VIRT]
|
||||
ALT_SMP(mrc p15, 0, r9, c0, c0, 5)
|
||||
ALT_UP_B(1f)
|
||||
ldr r8, =mpidr_hash
|
||||
/*
|
||||
* This ldmia relies on the memory layout of the mpidr_hash
|
||||
* struct mpidr_hash.
|
||||
*/
|
||||
ldmia r8, {r4-r7} @ r4 = mpidr mask (r5,r6,r7) = l[0,1,2] shifts
|
||||
compute_mpidr_hash lr, r5, r6, r7, r9, r4
|
||||
add r3, r3, lr, lsl #2
|
||||
#endif
|
||||
1:
|
||||
bl __cpu_suspend_save
|
||||
adr lr, BSYM(cpu_suspend_abort)
|
||||
ldmfd sp!, {r0, pc} @ call suspend fn
|
||||
@@ -81,15 +130,23 @@ ENDPROC(cpu_resume_after_mmu)
|
||||
.data
|
||||
.align
|
||||
ENTRY(cpu_resume)
|
||||
#ifdef CONFIG_SMP
|
||||
adr r0, sleep_save_sp
|
||||
ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
|
||||
ALT_UP(mov r1, #0)
|
||||
and r1, r1, #15
|
||||
ldr r0, [r0, r1, lsl #2] @ stack phys addr
|
||||
#else
|
||||
ldr r0, sleep_save_sp @ stack phys addr
|
||||
#endif
|
||||
mov r1, #0
|
||||
ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
|
||||
ALT_UP_B(1f)
|
||||
adr r2, mpidr_hash_ptr
|
||||
ldr r3, [r2]
|
||||
add r2, r2, r3 @ r2 = struct mpidr_hash phys address
|
||||
/*
|
||||
* This ldmia relies on the memory layout of the mpidr_hash
|
||||
* struct mpidr_hash.
|
||||
*/
|
||||
ldmia r2, { r3-r6 } @ r3 = mpidr mask (r4,r5,r6) = l[0,1,2] shifts
|
||||
compute_mpidr_hash r1, r4, r5, r6, r0, r3
|
||||
1:
|
||||
adr r0, _sleep_save_sp
|
||||
ldr r0, [r0, #SLEEP_SAVE_SP_PHYS]
|
||||
ldr r0, [r0, r1, lsl #2]
|
||||
|
||||
setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off
|
||||
@ load phys pgd, stack, resume fn
|
||||
ARM( ldmia r0!, {r1, sp, pc} )
|
||||
@@ -98,7 +155,11 @@ THUMB( mov sp, r2 )
|
||||
THUMB( bx r3 )
|
||||
ENDPROC(cpu_resume)
|
||||
|
||||
sleep_save_sp:
|
||||
.rept CONFIG_NR_CPUS
|
||||
.long 0 @ preserve stack phys ptr here
|
||||
.endr
|
||||
.align 2
|
||||
mpidr_hash_ptr:
|
||||
.long mpidr_hash - . @ mpidr_hash struct offset
|
||||
|
||||
.type sleep_save_sp, #object
|
||||
ENTRY(sleep_save_sp)
|
||||
_sleep_save_sp:
|
||||
.space SLEEP_SAVE_SP_SZ @ struct sleep_save_sp
|
||||
|
@@ -45,6 +45,7 @@
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/virt.h>
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mpu.h>
|
||||
|
||||
/*
|
||||
* as from 2.5, kernels no longer have an init_tasks structure
|
||||
@@ -78,6 +79,13 @@ void __init smp_set_ops(struct smp_operations *ops)
|
||||
smp_ops = *ops;
|
||||
};
|
||||
|
||||
static unsigned long get_arch_pgd(pgd_t *pgd)
|
||||
{
|
||||
phys_addr_t pgdir = virt_to_phys(pgd);
|
||||
BUG_ON(pgdir & ARCH_PGD_MASK);
|
||||
return pgdir >> ARCH_PGD_SHIFT;
|
||||
}
|
||||
|
||||
int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
int ret;
|
||||
@@ -87,8 +95,14 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
* its stack and the page tables.
|
||||
*/
|
||||
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
|
||||
secondary_data.pgdir = virt_to_phys(idmap_pgd);
|
||||
secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
|
||||
#ifdef CONFIG_ARM_MPU
|
||||
secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
secondary_data.pgdir = get_arch_pgd(idmap_pgd);
|
||||
secondary_data.swapper_pg_dir = get_arch_pgd(swapper_pg_dir);
|
||||
#endif
|
||||
__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
|
||||
outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
|
||||
|
||||
@@ -112,9 +126,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
|
||||
}
|
||||
|
||||
secondary_data.stack = NULL;
|
||||
secondary_data.pgdir = 0;
|
||||
|
||||
memset(&secondary_data, 0, sizeof(secondary_data));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -1,15 +1,54 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/idmap.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/suspend.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
|
||||
extern void cpu_resume_mmu(void);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
/*
|
||||
* Hide the first two arguments to __cpu_suspend - these are an implementation
|
||||
* detail which platform code shouldn't have to know about.
|
||||
*/
|
||||
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
||||
{
|
||||
struct mm_struct *mm = current->active_mm;
|
||||
int ret;
|
||||
|
||||
if (!idmap_pgd)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Provide a temporary page table with an identity mapping for
|
||||
* the MMU-enable code, required for resuming. On successful
|
||||
* resume (indicated by a zero return code), we need to switch
|
||||
* back to the correct page tables.
|
||||
*/
|
||||
ret = __cpu_suspend(arg, fn);
|
||||
if (ret == 0) {
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
local_flush_bp_all();
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
||||
{
|
||||
return __cpu_suspend(arg, fn);
|
||||
}
|
||||
#define idmap_pgd NULL
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This is called by __cpu_suspend() to save the state, and do whatever
|
||||
* flushing is required to ensure that when the CPU goes to sleep we have
|
||||
@@ -47,30 +86,19 @@ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
|
||||
virt_to_phys(save_ptr) + sizeof(*save_ptr));
|
||||
}
|
||||
|
||||
/*
|
||||
* Hide the first two arguments to __cpu_suspend - these are an implementation
|
||||
* detail which platform code shouldn't have to know about.
|
||||
*/
|
||||
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
||||
extern struct sleep_save_sp sleep_save_sp;
|
||||
|
||||
static int cpu_suspend_alloc_sp(void)
|
||||
{
|
||||
struct mm_struct *mm = current->active_mm;
|
||||
int ret;
|
||||
void *ctx_ptr;
|
||||
/* ctx_ptr is an array of physical addresses */
|
||||
ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(u32), GFP_KERNEL);
|
||||
|
||||
if (!idmap_pgd)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Provide a temporary page table with an identity mapping for
|
||||
* the MMU-enable code, required for resuming. On successful
|
||||
* resume (indicated by a zero return code), we need to switch
|
||||
* back to the correct page tables.
|
||||
*/
|
||||
ret = __cpu_suspend(arg, fn);
|
||||
if (ret == 0) {
|
||||
cpu_switch_mm(mm->pgd, mm);
|
||||
local_flush_bp_all();
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
return ret;
|
||||
if (WARN_ON(!ctx_ptr))
|
||||
return -ENOMEM;
|
||||
sleep_save_sp.save_ptr_stash = ctx_ptr;
|
||||
sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
|
||||
sync_cache_w(&sleep_save_sp);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(cpu_suspend_alloc_sp);
|
||||
|
@@ -812,6 +812,7 @@ static void __init kuser_get_tls_init(unsigned long vectors)
|
||||
|
||||
void __init early_trap_init(void *vectors_base)
|
||||
{
|
||||
#ifndef CONFIG_CPU_V7M
|
||||
unsigned long vectors = (unsigned long)vectors_base;
|
||||
extern char __stubs_start[], __stubs_end[];
|
||||
extern char __vectors_start[], __vectors_end[];
|
||||
@@ -843,4 +844,11 @@ void __init early_trap_init(void *vectors_base)
|
||||
|
||||
flush_icache_range(vectors, vectors + PAGE_SIZE);
|
||||
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
|
||||
#else /* ifndef CONFIG_CPU_V7M */
|
||||
/*
|
||||
* on V7-M there is no need to copy the vector table to a dedicated
|
||||
* memory area. The address is configurable and so a table in the kernel
|
||||
* image can be used.
|
||||
*/
|
||||
#endif
|
||||
}
|
||||
|
Reference in New Issue
Block a user