Merge commit 'v3.7-rc1' into stable/for-linus-3.7
* commit 'v3.7-rc1': (10892 commits) Linux 3.7-rc1 x86, boot: Explicitly include autoconf.h for hostprogs perf: Fix UAPI fallout ARM: config: make sure that platforms are ordered by option string ARM: config: sort select statements alphanumerically UAPI: (Scripted) Disintegrate include/linux/byteorder UAPI: (Scripted) Disintegrate include/linux UAPI: Unexport linux/blk_types.h UAPI: Unexport part of linux/ppp-comp.h perf: Handle new rbtree implementation procfs: don't need a PATH_MAX allocation to hold a string representation of an int vfs: embed struct filename inside of names_cache allocation if possible audit: make audit_inode take struct filename vfs: make path_openat take a struct filename pointer vfs: turn do_path_lookup into wrapper around struct filename variant audit: allow audit code to satisfy getname requests from its names_list vfs: define struct filename and have getname() return it btrfs: Fix compilation with user namespace support enabled userns: Fix posix_acl_file_xattr_userns gid conversion userns: Properly print bluetooth socket uids ...
这个提交包含在:
@@ -56,6 +56,8 @@
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/percpu.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/rcu.h>
|
||||
#include <asm/smap.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
|
||||
@@ -68,25 +70,51 @@
|
||||
.section .entry.text, "ax"
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
|
||||
#ifdef CC_USING_FENTRY
|
||||
# define function_hook __fentry__
|
||||
#else
|
||||
# define function_hook mcount
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
ENTRY(mcount)
|
||||
|
||||
ENTRY(function_hook)
|
||||
retq
|
||||
END(mcount)
|
||||
END(function_hook)
|
||||
|
||||
/* skip is set if stack has been adjusted */
|
||||
.macro ftrace_caller_setup skip=0
|
||||
MCOUNT_SAVE_FRAME \skip
|
||||
|
||||
/* Load the ftrace_ops into the 3rd parameter */
|
||||
leaq function_trace_op, %rdx
|
||||
|
||||
/* Load ip into the first parameter */
|
||||
movq RIP(%rsp), %rdi
|
||||
subq $MCOUNT_INSN_SIZE, %rdi
|
||||
/* Load the parent_ip into the second parameter */
|
||||
#ifdef CC_USING_FENTRY
|
||||
movq SS+16(%rsp), %rsi
|
||||
#else
|
||||
movq 8(%rbp), %rsi
|
||||
#endif
|
||||
.endm
|
||||
|
||||
ENTRY(ftrace_caller)
|
||||
/* Check if tracing was disabled (quick check) */
|
||||
cmpl $0, function_trace_stop
|
||||
jne ftrace_stub
|
||||
|
||||
MCOUNT_SAVE_FRAME
|
||||
|
||||
movq 0x38(%rsp), %rdi
|
||||
movq 8(%rbp), %rsi
|
||||
subq $MCOUNT_INSN_SIZE, %rdi
|
||||
ftrace_caller_setup
|
||||
/* regs go into 4th parameter (but make it NULL) */
|
||||
movq $0, %rcx
|
||||
|
||||
GLOBAL(ftrace_call)
|
||||
call ftrace_stub
|
||||
|
||||
MCOUNT_RESTORE_FRAME
|
||||
ftrace_return:
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
GLOBAL(ftrace_graph_call)
|
||||
@@ -97,8 +125,78 @@ GLOBAL(ftrace_stub)
|
||||
retq
|
||||
END(ftrace_caller)
|
||||
|
||||
ENTRY(ftrace_regs_caller)
|
||||
/* Save the current flags before compare (in SS location)*/
|
||||
pushfq
|
||||
|
||||
/* Check if tracing was disabled (quick check) */
|
||||
cmpl $0, function_trace_stop
|
||||
jne ftrace_restore_flags
|
||||
|
||||
/* skip=8 to skip flags saved in SS */
|
||||
ftrace_caller_setup 8
|
||||
|
||||
/* Save the rest of pt_regs */
|
||||
movq %r15, R15(%rsp)
|
||||
movq %r14, R14(%rsp)
|
||||
movq %r13, R13(%rsp)
|
||||
movq %r12, R12(%rsp)
|
||||
movq %r11, R11(%rsp)
|
||||
movq %r10, R10(%rsp)
|
||||
movq %rbp, RBP(%rsp)
|
||||
movq %rbx, RBX(%rsp)
|
||||
/* Copy saved flags */
|
||||
movq SS(%rsp), %rcx
|
||||
movq %rcx, EFLAGS(%rsp)
|
||||
/* Kernel segments */
|
||||
movq $__KERNEL_DS, %rcx
|
||||
movq %rcx, SS(%rsp)
|
||||
movq $__KERNEL_CS, %rcx
|
||||
movq %rcx, CS(%rsp)
|
||||
/* Stack - skipping return address */
|
||||
leaq SS+16(%rsp), %rcx
|
||||
movq %rcx, RSP(%rsp)
|
||||
|
||||
/* regs go into 4th parameter */
|
||||
leaq (%rsp), %rcx
|
||||
|
||||
GLOBAL(ftrace_regs_call)
|
||||
call ftrace_stub
|
||||
|
||||
/* Copy flags back to SS, to restore them */
|
||||
movq EFLAGS(%rsp), %rax
|
||||
movq %rax, SS(%rsp)
|
||||
|
||||
/* Handlers can change the RIP */
|
||||
movq RIP(%rsp), %rax
|
||||
movq %rax, SS+8(%rsp)
|
||||
|
||||
/* restore the rest of pt_regs */
|
||||
movq R15(%rsp), %r15
|
||||
movq R14(%rsp), %r14
|
||||
movq R13(%rsp), %r13
|
||||
movq R12(%rsp), %r12
|
||||
movq R10(%rsp), %r10
|
||||
movq RBP(%rsp), %rbp
|
||||
movq RBX(%rsp), %rbx
|
||||
|
||||
/* skip=8 to skip flags saved in SS */
|
||||
MCOUNT_RESTORE_FRAME 8
|
||||
|
||||
/* Restore flags */
|
||||
popfq
|
||||
|
||||
jmp ftrace_return
|
||||
ftrace_restore_flags:
|
||||
popfq
|
||||
jmp ftrace_stub
|
||||
|
||||
END(ftrace_regs_caller)
|
||||
|
||||
|
||||
#else /* ! CONFIG_DYNAMIC_FTRACE */
|
||||
ENTRY(mcount)
|
||||
|
||||
ENTRY(function_hook)
|
||||
cmpl $0, function_trace_stop
|
||||
jne ftrace_stub
|
||||
|
||||
@@ -119,8 +217,12 @@ GLOBAL(ftrace_stub)
|
||||
trace:
|
||||
MCOUNT_SAVE_FRAME
|
||||
|
||||
movq 0x38(%rsp), %rdi
|
||||
movq RIP(%rsp), %rdi
|
||||
#ifdef CC_USING_FENTRY
|
||||
movq SS+16(%rsp), %rsi
|
||||
#else
|
||||
movq 8(%rbp), %rsi
|
||||
#endif
|
||||
subq $MCOUNT_INSN_SIZE, %rdi
|
||||
|
||||
call *ftrace_trace_function
|
||||
@@ -128,20 +230,22 @@ trace:
|
||||
MCOUNT_RESTORE_FRAME
|
||||
|
||||
jmp ftrace_stub
|
||||
END(mcount)
|
||||
END(function_hook)
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
ENTRY(ftrace_graph_caller)
|
||||
cmpl $0, function_trace_stop
|
||||
jne ftrace_stub
|
||||
|
||||
MCOUNT_SAVE_FRAME
|
||||
|
||||
#ifdef CC_USING_FENTRY
|
||||
leaq SS+16(%rsp), %rdi
|
||||
movq $0, %rdx /* No framepointers needed */
|
||||
#else
|
||||
leaq 8(%rbp), %rdi
|
||||
movq 0x38(%rsp), %rsi
|
||||
movq (%rbp), %rdx
|
||||
#endif
|
||||
movq RIP(%rsp), %rsi
|
||||
subq $MCOUNT_INSN_SIZE, %rsi
|
||||
|
||||
call prepare_ftrace_return
|
||||
@@ -342,15 +446,15 @@ ENDPROC(native_usergs_sysret64)
|
||||
.macro SAVE_ARGS_IRQ
|
||||
cld
|
||||
/* start from rbp in pt_regs and jump over */
|
||||
movq_cfi rdi, RDI-RBP
|
||||
movq_cfi rsi, RSI-RBP
|
||||
movq_cfi rdx, RDX-RBP
|
||||
movq_cfi rcx, RCX-RBP
|
||||
movq_cfi rax, RAX-RBP
|
||||
movq_cfi r8, R8-RBP
|
||||
movq_cfi r9, R9-RBP
|
||||
movq_cfi r10, R10-RBP
|
||||
movq_cfi r11, R11-RBP
|
||||
movq_cfi rdi, (RDI-RBP)
|
||||
movq_cfi rsi, (RSI-RBP)
|
||||
movq_cfi rdx, (RDX-RBP)
|
||||
movq_cfi rcx, (RCX-RBP)
|
||||
movq_cfi rax, (RAX-RBP)
|
||||
movq_cfi r8, (R8-RBP)
|
||||
movq_cfi r9, (R9-RBP)
|
||||
movq_cfi r10, (R10-RBP)
|
||||
movq_cfi r11, (R11-RBP)
|
||||
|
||||
/* Save rbp so that we can unwind from get_irq_regs() */
|
||||
movq_cfi rbp, 0
|
||||
@@ -384,7 +488,7 @@ ENDPROC(native_usergs_sysret64)
|
||||
.endm
|
||||
|
||||
ENTRY(save_rest)
|
||||
PARTIAL_FRAME 1 REST_SKIP+8
|
||||
PARTIAL_FRAME 1 (REST_SKIP+8)
|
||||
movq 5*8+16(%rsp), %r11 /* save return address */
|
||||
movq_cfi rbx, RBX+16
|
||||
movq_cfi rbp, RBP+16
|
||||
@@ -440,7 +544,7 @@ ENTRY(ret_from_fork)
|
||||
|
||||
LOCK ; btr $TIF_FORK,TI_flags(%r8)
|
||||
|
||||
pushq_cfi kernel_eflags(%rip)
|
||||
pushq_cfi $0x0002
|
||||
popfq_cfi # reset kernel eflags
|
||||
|
||||
call schedule_tail # rdi: 'prev' task parameter
|
||||
@@ -450,7 +554,7 @@ ENTRY(ret_from_fork)
|
||||
RESTORE_REST
|
||||
|
||||
testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
|
||||
jz retint_restore_args
|
||||
jz 1f
|
||||
|
||||
testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
|
||||
jnz int_ret_from_sys_call
|
||||
@@ -458,6 +562,14 @@ ENTRY(ret_from_fork)
|
||||
RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
|
||||
jmp ret_from_sys_call # go to the SYSRET fastpath
|
||||
|
||||
1:
|
||||
subq $REST_SKIP, %rsp # leave space for volatiles
|
||||
CFI_ADJUST_CFA_OFFSET REST_SKIP
|
||||
movq %rbp, %rdi
|
||||
call *%rbx
|
||||
movl $0, RAX(%rsp)
|
||||
RESTORE_REST
|
||||
jmp int_ret_from_sys_call
|
||||
CFI_ENDPROC
|
||||
END(ret_from_fork)
|
||||
|
||||
@@ -465,7 +577,8 @@ END(ret_from_fork)
|
||||
* System call entry. Up to 6 arguments in registers are supported.
|
||||
*
|
||||
* SYSCALL does not save anything on the stack and does not change the
|
||||
* stack pointer.
|
||||
* stack pointer. However, it does mask the flags register for us, so
|
||||
* CLD and CLAC are not needed.
|
||||
*/
|
||||
|
||||
/*
|
||||
@@ -565,7 +678,7 @@ sysret_careful:
|
||||
TRACE_IRQS_ON
|
||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||
pushq_cfi %rdi
|
||||
call schedule
|
||||
SCHEDULE_USER
|
||||
popq_cfi %rdi
|
||||
jmp sysret_check
|
||||
|
||||
@@ -678,7 +791,7 @@ int_careful:
|
||||
TRACE_IRQS_ON
|
||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||
pushq_cfi %rdi
|
||||
call schedule
|
||||
SCHEDULE_USER
|
||||
popq_cfi %rdi
|
||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||
TRACE_IRQS_OFF
|
||||
@@ -757,7 +870,6 @@ ENTRY(stub_execve)
|
||||
PARTIAL_FRAME 0
|
||||
SAVE_REST
|
||||
FIXUP_TOP_OF_STACK %r11
|
||||
movq %rsp, %rcx
|
||||
call sys_execve
|
||||
RESTORE_TOP_OF_STACK %r11
|
||||
movq %rax,RAX(%rsp)
|
||||
@@ -807,8 +919,7 @@ ENTRY(stub_x32_execve)
|
||||
PARTIAL_FRAME 0
|
||||
SAVE_REST
|
||||
FIXUP_TOP_OF_STACK %r11
|
||||
movq %rsp, %rcx
|
||||
call sys32_execve
|
||||
call compat_sys_execve
|
||||
RESTORE_TOP_OF_STACK %r11
|
||||
movq %rax,RAX(%rsp)
|
||||
RESTORE_REST
|
||||
@@ -884,6 +995,7 @@ END(interrupt)
|
||||
*/
|
||||
.p2align CONFIG_X86_L1_CACHE_SHIFT
|
||||
common_interrupt:
|
||||
ASM_CLAC
|
||||
XCPT_FRAME
|
||||
addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
|
||||
interrupt do_IRQ
|
||||
@@ -974,7 +1086,7 @@ retint_careful:
|
||||
TRACE_IRQS_ON
|
||||
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||
pushq_cfi %rdi
|
||||
call schedule
|
||||
SCHEDULE_USER
|
||||
popq_cfi %rdi
|
||||
GET_THREAD_INFO(%rcx)
|
||||
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||
@@ -1023,6 +1135,7 @@ END(common_interrupt)
|
||||
*/
|
||||
.macro apicinterrupt num sym do_sym
|
||||
ENTRY(\sym)
|
||||
ASM_CLAC
|
||||
INTR_FRAME
|
||||
pushq_cfi $~(\num)
|
||||
.Lcommon_\sym:
|
||||
@@ -1077,6 +1190,7 @@ apicinterrupt IRQ_WORK_VECTOR \
|
||||
*/
|
||||
.macro zeroentry sym do_sym
|
||||
ENTRY(\sym)
|
||||
ASM_CLAC
|
||||
INTR_FRAME
|
||||
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
||||
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
|
||||
@@ -1094,6 +1208,7 @@ END(\sym)
|
||||
|
||||
.macro paranoidzeroentry sym do_sym
|
||||
ENTRY(\sym)
|
||||
ASM_CLAC
|
||||
INTR_FRAME
|
||||
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
||||
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
|
||||
@@ -1112,6 +1227,7 @@ END(\sym)
|
||||
#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
|
||||
.macro paranoidzeroentry_ist sym do_sym ist
|
||||
ENTRY(\sym)
|
||||
ASM_CLAC
|
||||
INTR_FRAME
|
||||
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
||||
pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
|
||||
@@ -1131,6 +1247,7 @@ END(\sym)
|
||||
|
||||
.macro errorentry sym do_sym
|
||||
ENTRY(\sym)
|
||||
ASM_CLAC
|
||||
XCPT_FRAME
|
||||
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
||||
subq $ORIG_RAX-R15, %rsp
|
||||
@@ -1149,6 +1266,7 @@ END(\sym)
|
||||
/* error code is on the stack already */
|
||||
.macro paranoiderrorentry sym do_sym
|
||||
ENTRY(\sym)
|
||||
ASM_CLAC
|
||||
XCPT_FRAME
|
||||
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
||||
subq $ORIG_RAX-R15, %rsp
|
||||
@@ -1206,52 +1324,6 @@ bad_gs:
|
||||
jmp 2b
|
||||
.previous
|
||||
|
||||
ENTRY(kernel_thread_helper)
|
||||
pushq $0 # fake return address
|
||||
CFI_STARTPROC
|
||||
/*
|
||||
* Here we are in the child and the registers are set as they were
|
||||
* at kernel_thread() invocation in the parent.
|
||||
*/
|
||||
call *%rsi
|
||||
# exit
|
||||
mov %eax, %edi
|
||||
call do_exit
|
||||
ud2 # padding for call trace
|
||||
CFI_ENDPROC
|
||||
END(kernel_thread_helper)
|
||||
|
||||
/*
|
||||
* execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
|
||||
*
|
||||
* C extern interface:
|
||||
* extern long execve(const char *name, char **argv, char **envp)
|
||||
*
|
||||
* asm input arguments:
|
||||
* rdi: name, rsi: argv, rdx: envp
|
||||
*
|
||||
* We want to fallback into:
|
||||
* extern long sys_execve(const char *name, char **argv,char **envp, struct pt_regs *regs)
|
||||
*
|
||||
* do_sys_execve asm fallback arguments:
|
||||
* rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
|
||||
*/
|
||||
ENTRY(kernel_execve)
|
||||
CFI_STARTPROC
|
||||
FAKE_STACK_FRAME $0
|
||||
SAVE_ALL
|
||||
movq %rsp,%rcx
|
||||
call sys_execve
|
||||
movq %rax, RAX(%rsp)
|
||||
RESTORE_REST
|
||||
testq %rax,%rax
|
||||
je int_ret_from_sys_call
|
||||
RESTORE_ARGS
|
||||
UNFAKE_STACK_FRAME
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
END(kernel_execve)
|
||||
|
||||
/* Call softirq on interrupt stack. Interrupts are off. */
|
||||
ENTRY(call_softirq)
|
||||
CFI_STARTPROC
|
||||
@@ -1449,7 +1521,7 @@ paranoid_userspace:
|
||||
paranoid_schedule:
|
||||
TRACE_IRQS_ON
|
||||
ENABLE_INTERRUPTS(CLBR_ANY)
|
||||
call schedule
|
||||
SCHEDULE_USER
|
||||
DISABLE_INTERRUPTS(CLBR_ANY)
|
||||
TRACE_IRQS_OFF
|
||||
jmp paranoid_userspace
|
||||
|
在新工单中引用
屏蔽一个用户