Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 asm updates from Ingo Molnar: "The main changes in this development cycle were: - a large number of call stack dumping/printing improvements: higher robustness, better cross-context dumping, improved output, etc. (Josh Poimboeuf) - vDSO getcpu() performance improvement for future Intel CPUs with the RDPID instruction (Andy Lutomirski) - add two new Intel AVX512 features and the CPUID support infrastructure for it: AVX512IFMA and AVX512VBMI. (Gayatri Kammela, He Chen) - more copy-user unification (Borislav Petkov) - entry code assembly macro simplifications (Alexander Kuleshov) - vDSO C/R support improvements (Dmitry Safonov) - misc fixes and cleanups (Borislav Petkov, Paul Bolle)" * 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (40 commits) scripts/decode_stacktrace.sh: Fix address line detection on x86 x86/boot/64: Use defines for page size x86/dumpstack: Make stack name tags more comprehensible selftests/x86: Add test_vdso to test getcpu() x86/vdso: Use RDPID in preference to LSL when available x86/dumpstack: Handle NULL stack pointer in show_trace_log_lvl() x86/cpufeatures: Enable new AVX512 cpu features x86/cpuid: Provide get_scattered_cpuid_leaf() x86/cpuid: Cleanup cpuid_regs definitions x86/copy_user: Unify the code by removing the 64-bit asm _copy_*_user() variants x86/unwind: Ensure stack grows down x86/vdso: Set vDSO pointer only after success x86/prctl/uapi: Remove #ifdef for CHECKPOINT_RESTORE x86/unwind: Detect bad stack return address x86/dumpstack: Warn on stack recursion x86/unwind: Warn on bad frame pointer x86/decoder: Use stderr if insn sanity test fails x86/decoder: Use stdout if insn decoder test is successful mm/page_alloc: Remove kernel address exposure in free_reserved_area() x86/dumpstack: Remove raw stack dump ...
This commit is contained in:
@@ -227,6 +227,7 @@
|
||||
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
|
||||
#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
|
||||
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
|
||||
#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
|
||||
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
|
||||
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
|
||||
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
|
||||
@@ -280,8 +281,10 @@
|
||||
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
|
||||
#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
|
||||
#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
|
||||
#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
|
||||
#define X86_FEATURE_RDPID (16*32+ 22) /* RDPID instruction */
|
||||
|
||||
/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
|
||||
#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
|
||||
|
@@ -21,7 +21,6 @@ enum die_val {
|
||||
DIE_NMIUNKNOWN,
|
||||
};
|
||||
|
||||
extern void printk_address(unsigned long address);
|
||||
extern void die(const char *, struct pt_regs *,long);
|
||||
extern int __must_check __die(const char *, struct pt_regs *, long);
|
||||
extern void show_stack_regs(struct pt_regs *regs);
|
||||
|
@@ -137,6 +137,17 @@ struct cpuinfo_x86 {
|
||||
u32 microcode;
|
||||
};
|
||||
|
||||
struct cpuid_regs {
|
||||
u32 eax, ebx, ecx, edx;
|
||||
};
|
||||
|
||||
enum cpuid_regs_idx {
|
||||
CPUID_EAX = 0,
|
||||
CPUID_EBX,
|
||||
CPUID_ECX,
|
||||
CPUID_EDX,
|
||||
};
|
||||
|
||||
#define X86_VENDOR_INTEL 0
|
||||
#define X86_VENDOR_CYRIX 1
|
||||
#define X86_VENDOR_AMD 2
|
||||
@@ -178,6 +189,9 @@ extern void identify_secondary_cpu(struct cpuinfo_x86 *);
|
||||
extern void print_cpu_info(struct cpuinfo_x86 *);
|
||||
void print_cpu_msr(struct cpuinfo_x86 *);
|
||||
extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
|
||||
extern u32 get_scattered_cpuid_leaf(unsigned int level,
|
||||
unsigned int sub_leaf,
|
||||
enum cpuid_regs_idx reg);
|
||||
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
|
||||
extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
|
||||
|
||||
|
@@ -30,8 +30,7 @@ bool in_task_stack(unsigned long *stack, struct task_struct *task,
|
||||
int get_stack_info(unsigned long *stack, struct task_struct *task,
|
||||
struct stack_info *info, unsigned long *visit_mask);
|
||||
|
||||
void stack_type_str(enum stack_type type, const char **begin,
|
||||
const char **end);
|
||||
const char *stack_type_name(enum stack_type type);
|
||||
|
||||
static inline bool on_stack(struct stack_info *info, void *addr, size_t len)
|
||||
{
|
||||
@@ -43,8 +42,6 @@ static inline bool on_stack(struct stack_info *info, void *addr, size_t len)
|
||||
addr + len > begin && addr + len <= end);
|
||||
}
|
||||
|
||||
extern int kstack_depth_to_print;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define STACKSLOTS_PER_LINE 8
|
||||
#else
|
||||
@@ -86,9 +83,6 @@ get_stack_pointer(struct task_struct *task, struct pt_regs *regs)
|
||||
void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned long *stack, char *log_lvl);
|
||||
|
||||
void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned long *sp, char *log_lvl);
|
||||
|
||||
extern unsigned int code_bytes;
|
||||
|
||||
/* The form of the top of the frame on the stack */
|
||||
|
@@ -13,6 +13,7 @@ struct unwind_state {
|
||||
int graph_idx;
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
unsigned long *bp;
|
||||
struct pt_regs *regs;
|
||||
#else
|
||||
unsigned long *sp;
|
||||
#endif
|
||||
@@ -47,7 +48,15 @@ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
|
||||
if (unwind_done(state))
|
||||
return NULL;
|
||||
|
||||
return state->bp + 1;
|
||||
return state->regs ? &state->regs->ip : state->bp + 1;
|
||||
}
|
||||
|
||||
static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
|
||||
{
|
||||
if (unwind_done(state))
|
||||
return NULL;
|
||||
|
||||
return state->regs;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_FRAME_POINTER */
|
||||
@@ -58,6 +67,11 @@ unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct pt_regs *unwind_get_entry_regs(struct unwind_state *state)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_FRAME_POINTER */
|
||||
|
||||
#endif /* _ASM_X86_UNWIND_H */
|
||||
|
@@ -89,8 +89,13 @@ static inline unsigned int __getcpu(void)
|
||||
* works on all CPUs. This is volatile so that it orders
|
||||
* correctly wrt barrier() and to keep gcc from cleverly
|
||||
* hoisting it out of the calling function.
|
||||
*
|
||||
* If RDPID is available, use it.
|
||||
*/
|
||||
asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
|
||||
alternative_io ("lsl %[p],%[seg]",
|
||||
".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
|
||||
X86_FEATURE_RDPID,
|
||||
[p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
|
||||
|
||||
return p;
|
||||
}
|
||||
|
Reference in New Issue
Block a user