Merge branches 'fixes' and 'misc' into for-linus
This commit is contained in:
@@ -18,6 +18,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/initrd.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/root_dev.h>
|
||||
@@ -91,8 +92,6 @@ __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
|
||||
#ifdef CONFIG_BLK_DEV_RAM
|
||||
static int __init parse_tag_ramdisk(const struct tag *tag)
|
||||
{
|
||||
extern int rd_size, rd_image_start, rd_prompt, rd_doload;
|
||||
|
||||
rd_image_start = tag->u.ramdisk.start;
|
||||
rd_doload = (tag->u.ramdisk.flags & 1) == 0;
|
||||
rd_prompt = (tag->u.ramdisk.flags & 2) == 0;
|
||||
|
@@ -458,10 +458,14 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
|
||||
int nr, busnr;
|
||||
|
||||
for (nr = busnr = 0; nr < hw->nr_controllers; nr++) {
|
||||
sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL);
|
||||
if (WARN(!sys, "PCI: unable to allocate sys data!"))
|
||||
struct pci_host_bridge *bridge;
|
||||
|
||||
bridge = pci_alloc_host_bridge(sizeof(struct pci_sys_data));
|
||||
if (WARN(!bridge, "PCI: unable to allocate bridge!"))
|
||||
break;
|
||||
|
||||
sys = pci_host_bridge_priv(bridge);
|
||||
|
||||
sys->busnr = busnr;
|
||||
sys->swizzle = hw->swizzle;
|
||||
sys->map_irq = hw->map_irq;
|
||||
@@ -473,34 +477,44 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
|
||||
ret = hw->setup(nr, sys);
|
||||
|
||||
if (ret > 0) {
|
||||
struct pci_host_bridge *host_bridge;
|
||||
|
||||
ret = pcibios_init_resource(nr, sys, hw->io_optional);
|
||||
if (ret) {
|
||||
kfree(sys);
|
||||
pci_free_host_bridge(bridge);
|
||||
break;
|
||||
}
|
||||
|
||||
bridge->map_irq = pcibios_map_irq;
|
||||
bridge->swizzle_irq = pcibios_swizzle;
|
||||
|
||||
if (hw->scan)
|
||||
sys->bus = hw->scan(nr, sys);
|
||||
else
|
||||
sys->bus = pci_scan_root_bus_msi(parent,
|
||||
sys->busnr, hw->ops, sys,
|
||||
&sys->resources, hw->msi_ctrl);
|
||||
ret = hw->scan(nr, bridge);
|
||||
else {
|
||||
list_splice_init(&sys->resources,
|
||||
&bridge->windows);
|
||||
bridge->dev.parent = parent;
|
||||
bridge->sysdata = sys;
|
||||
bridge->busnr = sys->busnr;
|
||||
bridge->ops = hw->ops;
|
||||
bridge->msi = hw->msi_ctrl;
|
||||
bridge->align_resource =
|
||||
hw->align_resource;
|
||||
|
||||
if (WARN(!sys->bus, "PCI: unable to scan bus!")) {
|
||||
kfree(sys);
|
||||
ret = pci_scan_root_bus_bridge(bridge);
|
||||
}
|
||||
|
||||
if (WARN(ret < 0, "PCI: unable to scan bus!")) {
|
||||
pci_free_host_bridge(bridge);
|
||||
break;
|
||||
}
|
||||
|
||||
sys->bus = bridge->bus;
|
||||
|
||||
busnr = sys->bus->busn_res.end + 1;
|
||||
|
||||
list_add(&sys->node, head);
|
||||
|
||||
host_bridge = pci_find_host_bridge(sys->bus);
|
||||
host_bridge->align_resource = hw->align_resource;
|
||||
} else {
|
||||
kfree(sys);
|
||||
pci_free_host_bridge(bridge);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
@@ -519,8 +533,6 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
|
||||
if (hw->postinit)
|
||||
hw->postinit();
|
||||
|
||||
pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
|
||||
|
||||
list_for_each_entry(sys, &head, node) {
|
||||
struct pci_bus *bus = sys->bus;
|
||||
|
||||
|
@@ -798,7 +798,10 @@ ENTRY(__switch_to)
|
||||
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
||||
ldr r7, [r2, #TI_TASK]
|
||||
ldr r8, =__stack_chk_guard
|
||||
ldr r7, [r7, #TSK_STACK_CANARY]
|
||||
.if (TSK_STACK_CANARY > IMM12_MASK)
|
||||
add r7, r7, #TSK_STACK_CANARY & ~IMM12_MASK
|
||||
.endif
|
||||
ldr r7, [r7, #TSK_STACK_CANARY & IMM12_MASK]
|
||||
#endif
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
|
||||
|
@@ -27,6 +27,14 @@
|
||||
|
||||
#include "entry-header.S"
|
||||
|
||||
saved_psr .req r8
|
||||
#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
|
||||
saved_pc .req r9
|
||||
#define TRACE(x...) x
|
||||
#else
|
||||
saved_pc .req lr
|
||||
#define TRACE(x...)
|
||||
#endif
|
||||
|
||||
.align 5
|
||||
#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
|
||||
@@ -141,16 +149,17 @@ ENTRY(vector_swi)
|
||||
ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr
|
||||
THUMB( mov r8, sp )
|
||||
THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr
|
||||
mrs r8, spsr @ called from non-FIQ mode, so ok.
|
||||
str lr, [sp, #S_PC] @ Save calling PC
|
||||
str r8, [sp, #S_PSR] @ Save CPSR
|
||||
mrs saved_psr, spsr @ called from non-FIQ mode, so ok.
|
||||
TRACE( mov saved_pc, lr )
|
||||
str saved_pc, [sp, #S_PC] @ Save calling PC
|
||||
str saved_psr, [sp, #S_PSR] @ Save CPSR
|
||||
str r0, [sp, #S_OLD_R0] @ Save OLD_R0
|
||||
#endif
|
||||
zero_fp
|
||||
alignment_trap r10, ip, __cr_alignment
|
||||
enable_irq
|
||||
ct_user_exit
|
||||
get_thread_info tsk
|
||||
asm_trace_hardirqs_on save=0
|
||||
enable_irq_notrace
|
||||
ct_user_exit save=0
|
||||
|
||||
/*
|
||||
* Get the system call number.
|
||||
@@ -163,11 +172,11 @@ ENTRY(vector_swi)
|
||||
* value to determine if it is an EABI or an old ABI call.
|
||||
*/
|
||||
#ifdef CONFIG_ARM_THUMB
|
||||
tst r8, #PSR_T_BIT
|
||||
tst saved_psr, #PSR_T_BIT
|
||||
movne r10, #0 @ no thumb OABI emulation
|
||||
USER( ldreq r10, [lr, #-4] ) @ get SWI instruction
|
||||
USER( ldreq r10, [saved_pc, #-4] ) @ get SWI instruction
|
||||
#else
|
||||
USER( ldr r10, [lr, #-4] ) @ get SWI instruction
|
||||
USER( ldr r10, [saved_pc, #-4] ) @ get SWI instruction
|
||||
#endif
|
||||
ARM_BE8(rev r10, r10) @ little endian instruction
|
||||
|
||||
@@ -178,15 +187,17 @@ ENTRY(vector_swi)
|
||||
*/
|
||||
#elif defined(CONFIG_ARM_THUMB)
|
||||
/* Legacy ABI only, possibly thumb mode. */
|
||||
tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
|
||||
tst saved_psr, #PSR_T_BIT @ this is SPSR from save_user_regs
|
||||
addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
|
||||
USER( ldreq scno, [lr, #-4] )
|
||||
USER( ldreq scno, [saved_pc, #-4] )
|
||||
|
||||
#else
|
||||
/* Legacy ABI only. */
|
||||
USER( ldr scno, [lr, #-4] ) @ get SWI instruction
|
||||
USER( ldr scno, [saved_pc, #-4] ) @ get SWI instruction
|
||||
#endif
|
||||
|
||||
/* saved_psr and saved_pc are now dead */
|
||||
|
||||
uaccess_disable tbl
|
||||
|
||||
adr tbl, sys_call_table @ load syscall table pointer
|
||||
@@ -205,6 +216,12 @@ ENTRY(vector_swi)
|
||||
bic scno, scno, #0xff000000 @ mask off SWI op-code
|
||||
eor scno, scno, #__NR_SYSCALL_BASE @ check OS number
|
||||
#endif
|
||||
get_thread_info tsk
|
||||
/*
|
||||
* Reload the registers that may have been corrupted on entry to
|
||||
* the syscall assembly (by tracing or context tracking.)
|
||||
*/
|
||||
TRACE( ldmia sp, {r0 - r3} )
|
||||
|
||||
local_restart:
|
||||
ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing
|
||||
@@ -234,8 +251,9 @@ local_restart:
|
||||
* current task.
|
||||
*/
|
||||
9001:
|
||||
sub lr, lr, #4
|
||||
sub lr, saved_pc, #4
|
||||
str lr, [sp, #S_PC]
|
||||
get_thread_info tsk
|
||||
b ret_fast_syscall
|
||||
#endif
|
||||
ENDPROC(vector_swi)
|
||||
|
@@ -92,12 +92,95 @@
|
||||
2: mcount_exit
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
|
||||
.macro __ftrace_regs_caller
|
||||
|
||||
sub sp, sp, #8 @ space for PC and CPSR OLD_R0,
|
||||
@ OLD_R0 will overwrite previous LR
|
||||
|
||||
add ip, sp, #12 @ move in IP the value of SP as it was
|
||||
@ before the push {lr} of the mcount mechanism
|
||||
|
||||
str lr, [sp, #0] @ store LR instead of PC
|
||||
|
||||
ldr lr, [sp, #8] @ get previous LR
|
||||
|
||||
str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR
|
||||
|
||||
stmdb sp!, {ip, lr}
|
||||
stmdb sp!, {r0-r11, lr}
|
||||
|
||||
@ stack content at this point:
|
||||
@ 0 4 48 52 56 60 64 68 72
|
||||
@ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 |
|
||||
|
||||
mov r3, sp @ struct pt_regs*
|
||||
|
||||
ldr r2, =function_trace_op
|
||||
ldr r2, [r2] @ pointer to the current
|
||||
@ function tracing op
|
||||
|
||||
ldr r1, [sp, #S_LR] @ lr of instrumented func
|
||||
|
||||
ldr lr, [sp, #S_PC] @ get LR
|
||||
|
||||
mcount_adjust_addr r0, lr @ instrumented function
|
||||
|
||||
.globl ftrace_regs_call
|
||||
ftrace_regs_call:
|
||||
bl ftrace_stub
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
.globl ftrace_graph_regs_call
|
||||
ftrace_graph_regs_call:
|
||||
mov r0, r0
|
||||
#endif
|
||||
|
||||
@ pop saved regs
|
||||
ldmia sp!, {r0-r12} @ restore r0 through r12
|
||||
ldr ip, [sp, #8] @ restore PC
|
||||
ldr lr, [sp, #4] @ restore LR
|
||||
ldr sp, [sp, #0] @ restore SP
|
||||
mov pc, ip @ return
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
.macro __ftrace_graph_regs_caller
|
||||
|
||||
sub r0, fp, #4 @ lr of instrumented routine (parent)
|
||||
|
||||
@ called from __ftrace_regs_caller
|
||||
ldr r1, [sp, #S_PC] @ instrumented routine (func)
|
||||
mcount_adjust_addr r1, r1
|
||||
|
||||
mov r2, fp @ frame pointer
|
||||
bl prepare_ftrace_return
|
||||
|
||||
@ pop registers saved in ftrace_regs_caller
|
||||
ldmia sp!, {r0-r12} @ restore r0 through r12
|
||||
ldr ip, [sp, #8] @ restore PC
|
||||
ldr lr, [sp, #4] @ restore LR
|
||||
ldr sp, [sp, #0] @ restore SP
|
||||
mov pc, ip @ return
|
||||
|
||||
.endm
|
||||
#endif
|
||||
#endif
|
||||
|
||||
.macro __ftrace_caller suffix
|
||||
mcount_enter
|
||||
|
||||
mcount_get_lr r1 @ lr of instrumented func
|
||||
mcount_adjust_addr r0, lr @ instrumented function
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
ldr r2, =function_trace_op
|
||||
ldr r2, [r2] @ pointer to the current
|
||||
@ function tracing op
|
||||
mov r3, #0 @ regs is NULL
|
||||
#endif
|
||||
|
||||
.globl ftrace_call\suffix
|
||||
ftrace_call\suffix:
|
||||
bl ftrace_stub
|
||||
@@ -212,6 +295,15 @@ UNWIND(.fnstart)
|
||||
__ftrace_caller
|
||||
UNWIND(.fnend)
|
||||
ENDPROC(ftrace_caller)
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
ENTRY(ftrace_regs_caller)
|
||||
UNWIND(.fnstart)
|
||||
__ftrace_regs_caller
|
||||
UNWIND(.fnend)
|
||||
ENDPROC(ftrace_regs_caller)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
@@ -220,6 +312,14 @@ UNWIND(.fnstart)
|
||||
__ftrace_graph_caller
|
||||
UNWIND(.fnend)
|
||||
ENDPROC(ftrace_graph_caller)
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
ENTRY(ftrace_graph_regs_caller)
|
||||
UNWIND(.fnstart)
|
||||
__ftrace_graph_regs_caller
|
||||
UNWIND(.fnend)
|
||||
ENDPROC(ftrace_graph_regs_caller)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
.purgem mcount_enter
|
||||
|
@@ -141,6 +141,15 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
|
||||
|
||||
ret = ftrace_modify_code(pc, 0, new, false);
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
if (!ret) {
|
||||
pc = (unsigned long)&ftrace_regs_call;
|
||||
new = ftrace_call_replace(pc, (unsigned long)func);
|
||||
|
||||
ret = ftrace_modify_code(pc, 0, new, false);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OLD_MCOUNT
|
||||
if (!ret) {
|
||||
pc = (unsigned long)&ftrace_call_old;
|
||||
@@ -159,11 +168,29 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
unsigned long ip = rec->ip;
|
||||
|
||||
old = ftrace_nop_replace(rec);
|
||||
|
||||
new = ftrace_call_replace(ip, adjust_address(rec, addr));
|
||||
|
||||
return ftrace_modify_code(rec->ip, old, new, true);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
|
||||
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
|
||||
unsigned long addr)
|
||||
{
|
||||
unsigned long new, old;
|
||||
unsigned long ip = rec->ip;
|
||||
|
||||
old = ftrace_call_replace(ip, adjust_address(rec, old_addr));
|
||||
|
||||
new = ftrace_call_replace(ip, adjust_address(rec, addr));
|
||||
|
||||
return ftrace_modify_code(rec->ip, old, new, true);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
int ftrace_make_nop(struct module *mod,
|
||||
struct dyn_ftrace *rec, unsigned long addr)
|
||||
{
|
||||
@@ -231,6 +258,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
|
||||
extern unsigned long ftrace_graph_call;
|
||||
extern unsigned long ftrace_graph_call_old;
|
||||
extern void ftrace_graph_caller_old(void);
|
||||
extern unsigned long ftrace_graph_regs_call;
|
||||
extern void ftrace_graph_regs_caller(void);
|
||||
|
||||
static int __ftrace_modify_caller(unsigned long *callsite,
|
||||
void (*func) (void), bool enable)
|
||||
@@ -253,6 +282,14 @@ static int ftrace_modify_graph_caller(bool enable)
|
||||
ftrace_graph_caller,
|
||||
enable);
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
if (!ret)
|
||||
ret = __ftrace_modify_caller(&ftrace_graph_regs_call,
|
||||
ftrace_graph_regs_caller,
|
||||
enable);
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef CONFIG_OLD_MCOUNT
|
||||
if (!ret)
|
||||
ret = __ftrace_modify_caller(&ftrace_graph_call_old,
|
||||
|
@@ -1090,7 +1090,7 @@ static int __init arch_hw_breakpoint_init(void)
|
||||
* driven low on this core and there isn't an architected way to
|
||||
* determine that.
|
||||
*/
|
||||
get_online_cpus();
|
||||
cpus_read_lock();
|
||||
register_undef_hook(&debug_reg_hook);
|
||||
|
||||
/*
|
||||
@@ -1098,15 +1098,16 @@ static int __init arch_hw_breakpoint_init(void)
|
||||
* assume that a halting debugger will leave the world in a nice state
|
||||
* for us.
|
||||
*/
|
||||
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm/hw_breakpoint:online",
|
||||
dbg_reset_online, NULL);
|
||||
ret = cpuhp_setup_state_cpuslocked(CPUHP_AP_ONLINE_DYN,
|
||||
"arm/hw_breakpoint:online",
|
||||
dbg_reset_online, NULL);
|
||||
unregister_undef_hook(&debug_reg_hook);
|
||||
if (WARN_ON(ret < 0) || !cpumask_empty(&debug_err_mask)) {
|
||||
core_num_brps = 0;
|
||||
core_num_wrps = 0;
|
||||
if (ret > 0)
|
||||
cpuhp_remove_state_nocalls(ret);
|
||||
put_online_cpus();
|
||||
cpuhp_remove_state_nocalls_cpuslocked(ret);
|
||||
cpus_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1124,7 +1125,7 @@ static int __init arch_hw_breakpoint_init(void)
|
||||
TRAP_HWBKPT, "watchpoint debug exception");
|
||||
hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
|
||||
TRAP_HWBKPT, "breakpoint debug exception");
|
||||
put_online_cpus();
|
||||
cpus_read_unlock();
|
||||
|
||||
/* Register PM notifiers. */
|
||||
pm_init();
|
||||
|
@@ -124,5 +124,5 @@ void __kprobes patch_text(void *addr, unsigned int insn)
|
||||
.insn = insn,
|
||||
};
|
||||
|
||||
stop_machine(patch_text_stop_machine, &patch, NULL);
|
||||
stop_machine_cpuslocked(patch_text_stop_machine, &patch, NULL);
|
||||
}
|
||||
|
@@ -552,7 +552,7 @@ static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct of_device_id armv6_pmu_of_device_ids[] = {
|
||||
static const struct of_device_id armv6_pmu_of_device_ids[] = {
|
||||
{.compatible = "arm,arm11mpcore-pmu", .data = armv6mpcore_pmu_init},
|
||||
{.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init},
|
||||
{.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init},
|
||||
|
@@ -123,10 +123,10 @@ void __show_regs(struct pt_regs *regs)
|
||||
|
||||
print_symbol("PC is at %s\n", instruction_pointer(regs));
|
||||
print_symbol("LR is at %s\n", regs->ARM_lr);
|
||||
printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
|
||||
"sp : %08lx ip : %08lx fp : %08lx\n",
|
||||
regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
|
||||
regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
|
||||
printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n",
|
||||
regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr);
|
||||
printk("sp : %08lx ip : %08lx fp : %08lx\n",
|
||||
regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
|
||||
printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
|
||||
regs->ARM_r10, regs->ARM_r9,
|
||||
regs->ARM_r8);
|
||||
@@ -404,9 +404,17 @@ static unsigned long sigpage_addr(const struct mm_struct *mm,
|
||||
static struct page *signal_page;
|
||||
extern struct page *get_signal_page(void);
|
||||
|
||||
static int sigpage_mremap(const struct vm_special_mapping *sm,
|
||||
struct vm_area_struct *new_vma)
|
||||
{
|
||||
current->mm->context.sigpage = new_vma->vm_start;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct vm_special_mapping sigpage_mapping = {
|
||||
.name = "[sigpage]",
|
||||
.pages = &signal_page,
|
||||
.mremap = sigpage_mremap,
|
||||
};
|
||||
|
||||
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
||||
|
@@ -555,8 +555,7 @@ static DEFINE_RAW_SPINLOCK(stop_lock);
|
||||
*/
|
||||
static void ipi_cpu_stop(unsigned int cpu)
|
||||
{
|
||||
if (system_state == SYSTEM_BOOTING ||
|
||||
system_state == SYSTEM_RUNNING) {
|
||||
if (system_state <= SYSTEM_RUNNING) {
|
||||
raw_spin_lock(&stop_lock);
|
||||
pr_crit("CPU%u: stopping\n", cpu);
|
||||
dump_stack();
|
||||
|
@@ -403,7 +403,7 @@ out:
|
||||
WARN(err, "twd_local_timer_of_register failed (%d)\n", err);
|
||||
return err;
|
||||
}
|
||||
CLOCKSOURCE_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register);
|
||||
CLOCKSOURCE_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register);
|
||||
CLOCKSOURCE_OF_DECLARE(arm_twd_11mp, "arm,arm11mp-twd-timer", twd_local_timer_of_register);
|
||||
TIMER_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register);
|
||||
TIMER_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register);
|
||||
TIMER_OF_DECLARE(arm_twd_11mp, "arm,arm11mp-twd-timer", twd_local_timer_of_register);
|
||||
#endif
|
||||
|
@@ -171,6 +171,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
||||
{
|
||||
__save_stack_trace(tsk, trace, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(save_stack_trace_tsk);
|
||||
|
||||
void save_stack_trace(struct stack_trace *trace)
|
||||
{
|
||||
|
@@ -120,6 +120,6 @@ void __init time_init(void)
|
||||
#ifdef CONFIG_COMMON_CLK
|
||||
of_clk_init(NULL);
|
||||
#endif
|
||||
clocksource_probe();
|
||||
timer_probe();
|
||||
}
|
||||
}
|
||||
|
@@ -11,6 +11,7 @@
|
||||
* for more details.
|
||||
*/
|
||||
|
||||
#include <linux/arch_topology.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/cpumask.h>
|
||||
@@ -44,77 +45,6 @@
|
||||
* to run the rebalance_domains for all idle cores and the cpu_capacity can be
|
||||
* updated during this sequence.
|
||||
*/
|
||||
static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
|
||||
static DEFINE_MUTEX(cpu_scale_mutex);
|
||||
|
||||
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
|
||||
{
|
||||
return per_cpu(cpu_scale, cpu);
|
||||
}
|
||||
|
||||
static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
|
||||
{
|
||||
per_cpu(cpu_scale, cpu) = capacity;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_SYSCTL
|
||||
static ssize_t cpu_capacity_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct cpu *cpu = container_of(dev, struct cpu, dev);
|
||||
|
||||
return sprintf(buf, "%lu\n",
|
||||
arch_scale_cpu_capacity(NULL, cpu->dev.id));
|
||||
}
|
||||
|
||||
static ssize_t cpu_capacity_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
struct cpu *cpu = container_of(dev, struct cpu, dev);
|
||||
int this_cpu = cpu->dev.id, i;
|
||||
unsigned long new_capacity;
|
||||
ssize_t ret;
|
||||
|
||||
if (count) {
|
||||
ret = kstrtoul(buf, 0, &new_capacity);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (new_capacity > SCHED_CAPACITY_SCALE)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&cpu_scale_mutex);
|
||||
for_each_cpu(i, &cpu_topology[this_cpu].core_sibling)
|
||||
set_capacity_scale(i, new_capacity);
|
||||
mutex_unlock(&cpu_scale_mutex);
|
||||
}
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RW(cpu_capacity);
|
||||
|
||||
static int register_cpu_capacity_sysctl(void)
|
||||
{
|
||||
int i;
|
||||
struct device *cpu;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
cpu = get_cpu_device(i);
|
||||
if (!cpu) {
|
||||
pr_err("%s: too early to get CPU%d device!\n",
|
||||
__func__, i);
|
||||
continue;
|
||||
}
|
||||
device_create_file(cpu, &dev_attr_cpu_capacity);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(register_cpu_capacity_sysctl);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
struct cpu_efficiency {
|
||||
@@ -143,145 +73,6 @@ static unsigned long *__cpu_capacity;
|
||||
|
||||
static unsigned long middle_capacity = 1;
|
||||
static bool cap_from_dt = true;
|
||||
static u32 *raw_capacity;
|
||||
static bool cap_parsing_failed;
|
||||
static u32 capacity_scale;
|
||||
|
||||
static int __init parse_cpu_capacity(struct device_node *cpu_node, int cpu)
|
||||
{
|
||||
int ret = 1;
|
||||
u32 cpu_capacity;
|
||||
|
||||
if (cap_parsing_failed)
|
||||
return !ret;
|
||||
|
||||
ret = of_property_read_u32(cpu_node,
|
||||
"capacity-dmips-mhz",
|
||||
&cpu_capacity);
|
||||
if (!ret) {
|
||||
if (!raw_capacity) {
|
||||
raw_capacity = kcalloc(num_possible_cpus(),
|
||||
sizeof(*raw_capacity),
|
||||
GFP_KERNEL);
|
||||
if (!raw_capacity) {
|
||||
pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
|
||||
cap_parsing_failed = true;
|
||||
return !ret;
|
||||
}
|
||||
}
|
||||
capacity_scale = max(cpu_capacity, capacity_scale);
|
||||
raw_capacity[cpu] = cpu_capacity;
|
||||
pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
|
||||
cpu_node->full_name, raw_capacity[cpu]);
|
||||
} else {
|
||||
if (raw_capacity) {
|
||||
pr_err("cpu_capacity: missing %s raw capacity\n",
|
||||
cpu_node->full_name);
|
||||
pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
|
||||
}
|
||||
cap_parsing_failed = true;
|
||||
kfree(raw_capacity);
|
||||
}
|
||||
|
||||
return !ret;
|
||||
}
|
||||
|
||||
static void normalize_cpu_capacity(void)
|
||||
{
|
||||
u64 capacity;
|
||||
int cpu;
|
||||
|
||||
if (!raw_capacity || cap_parsing_failed)
|
||||
return;
|
||||
|
||||
pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
|
||||
mutex_lock(&cpu_scale_mutex);
|
||||
for_each_possible_cpu(cpu) {
|
||||
capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
|
||||
/ capacity_scale;
|
||||
set_capacity_scale(cpu, capacity);
|
||||
pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
|
||||
cpu, arch_scale_cpu_capacity(NULL, cpu));
|
||||
}
|
||||
mutex_unlock(&cpu_scale_mutex);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
static cpumask_var_t cpus_to_visit;
|
||||
static bool cap_parsing_done;
|
||||
static void parsing_done_workfn(struct work_struct *work);
|
||||
static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
|
||||
|
||||
static int
|
||||
init_cpu_capacity_callback(struct notifier_block *nb,
|
||||
unsigned long val,
|
||||
void *data)
|
||||
{
|
||||
struct cpufreq_policy *policy = data;
|
||||
int cpu;
|
||||
|
||||
if (cap_parsing_failed || cap_parsing_done)
|
||||
return 0;
|
||||
|
||||
switch (val) {
|
||||
case CPUFREQ_NOTIFY:
|
||||
pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
|
||||
cpumask_pr_args(policy->related_cpus),
|
||||
cpumask_pr_args(cpus_to_visit));
|
||||
cpumask_andnot(cpus_to_visit,
|
||||
cpus_to_visit,
|
||||
policy->related_cpus);
|
||||
for_each_cpu(cpu, policy->related_cpus) {
|
||||
raw_capacity[cpu] = arch_scale_cpu_capacity(NULL, cpu) *
|
||||
policy->cpuinfo.max_freq / 1000UL;
|
||||
capacity_scale = max(raw_capacity[cpu], capacity_scale);
|
||||
}
|
||||
if (cpumask_empty(cpus_to_visit)) {
|
||||
normalize_cpu_capacity();
|
||||
kfree(raw_capacity);
|
||||
pr_debug("cpu_capacity: parsing done\n");
|
||||
cap_parsing_done = true;
|
||||
schedule_work(&parsing_done_work);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct notifier_block init_cpu_capacity_notifier = {
|
||||
.notifier_call = init_cpu_capacity_callback,
|
||||
};
|
||||
|
||||
static int __init register_cpufreq_notifier(void)
|
||||
{
|
||||
if (cap_parsing_failed)
|
||||
return -EINVAL;
|
||||
|
||||
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
|
||||
pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
cpumask_copy(cpus_to_visit, cpu_possible_mask);
|
||||
|
||||
return cpufreq_register_notifier(&init_cpu_capacity_notifier,
|
||||
CPUFREQ_POLICY_NOTIFIER);
|
||||
}
|
||||
core_initcall(register_cpufreq_notifier);
|
||||
|
||||
static void parsing_done_workfn(struct work_struct *work)
|
||||
{
|
||||
cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
|
||||
CPUFREQ_POLICY_NOTIFIER);
|
||||
}
|
||||
|
||||
#else
|
||||
static int __init free_raw_capacity(void)
|
||||
{
|
||||
kfree(raw_capacity);
|
||||
|
||||
return 0;
|
||||
}
|
||||
core_initcall(free_raw_capacity);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Iterate all CPUs' descriptor in DT and compute the efficiency
|
||||
@@ -320,7 +111,7 @@ static void __init parse_dt_topology(void)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (parse_cpu_capacity(cn, cpu)) {
|
||||
if (topology_parse_cpu_capacity(cn, cpu)) {
|
||||
of_node_put(cn);
|
||||
continue;
|
||||
}
|
||||
@@ -368,8 +159,8 @@ static void __init parse_dt_topology(void)
|
||||
middle_capacity = ((max_capacity / 3)
|
||||
>> (SCHED_CAPACITY_SHIFT-1)) + 1;
|
||||
|
||||
if (cap_from_dt && !cap_parsing_failed)
|
||||
normalize_cpu_capacity();
|
||||
if (cap_from_dt)
|
||||
topology_normalize_cpu_scale();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -382,10 +173,10 @@ static void update_cpu_capacity(unsigned int cpu)
|
||||
if (!cpu_capacity(cpu) || cap_from_dt)
|
||||
return;
|
||||
|
||||
set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity);
|
||||
topology_set_cpu_scale(cpu, cpu_capacity(cpu) / middle_capacity);
|
||||
|
||||
pr_info("CPU%u: update cpu_capacity %lu\n",
|
||||
cpu, arch_scale_cpu_capacity(NULL, cpu));
|
||||
cpu, topology_get_cpu_scale(NULL, cpu));
|
||||
}
|
||||
|
||||
#else
|
||||
|
@@ -54,8 +54,26 @@ static const struct vm_special_mapping vdso_data_mapping = {
|
||||
.pages = &vdso_data_page,
|
||||
};
|
||||
|
||||
static int vdso_mremap(const struct vm_special_mapping *sm,
|
||||
struct vm_area_struct *new_vma)
|
||||
{
|
||||
unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
|
||||
unsigned long vdso_size;
|
||||
|
||||
/* without VVAR page */
|
||||
vdso_size = (vdso_total_pages - 1) << PAGE_SHIFT;
|
||||
|
||||
if (vdso_size != new_size)
|
||||
return -EINVAL;
|
||||
|
||||
current->mm->context.vdso = new_vma->vm_start;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct vm_special_mapping vdso_text_mapping __ro_after_init = {
|
||||
.name = "[vdso]",
|
||||
.mremap = vdso_mremap,
|
||||
};
|
||||
|
||||
struct elfinfo {
|
||||
|
Reference in New Issue
Block a user