Merge branch 'x86/vdso' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull x86 cdso updates from Peter Anvin: "Vdso cleanups and improvements largely from Andy Lutomirski. This makes the vdso a lot less ''special''" * 'x86/vdso' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/vdso, build: Make LE access macros clearer, host-safe x86/vdso, build: Fix cross-compilation from big-endian architectures x86/vdso, build: When vdso2c fails, unlink the output x86, vdso: Fix an OOPS accessing the HPET mapping w/o an HPET x86, mm: Replace arch_vma_name with vm_ops->name for vsyscalls x86, mm: Improve _install_special_mapping and fix x86 vdso naming mm, fs: Add vm_ops->name as an alternative to arch_vma_name x86, vdso: Fix an OOPS accessing the HPET mapping w/o an HPET x86, vdso: Remove vestiges of VDSO_PRELINK and some outdated comments x86, vdso: Move the vvar and hpet mappings next to the 64-bit vDSO x86, vdso: Move the 32-bit vdso special pages after the text x86, vdso: Reimplement vdso.so preparation in build-time C x86, vdso: Move syscall and sysenter setup into kernel/cpu/common.c x86, vdso: Clean up 32-bit vs 64-bit vdso params x86, mm: Ensure correct alignment of the fixmap
This commit is contained in:
@@ -20,6 +20,7 @@
|
||||
#include <asm/processor.h>
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/vsyscall.h>
|
||||
#include <linux/topology.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <asm/pgtable.h>
|
||||
@@ -953,6 +954,38 @@ static void vgetcpu_set_mode(void)
|
||||
else
|
||||
vgetcpu_mode = VGETCPU_LSL;
|
||||
}
|
||||
|
||||
/* May not be __init: called during resume */
|
||||
static void syscall32_cpu_init(void)
|
||||
{
|
||||
/* Load these always in case some future AMD CPU supports
|
||||
SYSENTER from compat mode too. */
|
||||
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
|
||||
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
|
||||
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
|
||||
|
||||
wrmsrl(MSR_CSTAR, ia32_cstar_target);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
void enable_sep_cpu(void)
|
||||
{
|
||||
int cpu = get_cpu();
|
||||
struct tss_struct *tss = &per_cpu(init_tss, cpu);
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_SEP)) {
|
||||
put_cpu();
|
||||
return;
|
||||
}
|
||||
|
||||
tss->x86_tss.ss1 = __KERNEL_CS;
|
||||
tss->x86_tss.sp1 = sizeof(struct tss_struct) + (unsigned long) tss;
|
||||
wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
|
||||
wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0);
|
||||
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0);
|
||||
put_cpu();
|
||||
}
|
||||
#endif
|
||||
|
||||
void __init identify_boot_cpu(void)
|
||||
|
@@ -74,9 +74,6 @@ static inline void hpet_writel(unsigned int d, unsigned int a)
|
||||
static inline void hpet_set_mapping(void)
|
||||
{
|
||||
hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
|
||||
#ifdef CONFIG_X86_64
|
||||
__set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VVAR_NOCACHE);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void hpet_clear_mapping(void)
|
||||
|
@@ -298,7 +298,8 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
|
||||
}
|
||||
|
||||
if (current->mm->context.vdso)
|
||||
restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
|
||||
restorer = current->mm->context.vdso +
|
||||
selected_vdso32->sym___kernel_sigreturn;
|
||||
else
|
||||
restorer = &frame->retcode;
|
||||
if (ksig->ka.sa.sa_flags & SA_RESTORER)
|
||||
@@ -361,7 +362,8 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
|
||||
save_altstack_ex(&frame->uc.uc_stack, regs->sp);
|
||||
|
||||
/* Set up to return from userspace. */
|
||||
restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
|
||||
restorer = current->mm->context.vdso +
|
||||
selected_vdso32->sym___kernel_sigreturn;
|
||||
if (ksig->ka.sa.sa_flags & SA_RESTORER)
|
||||
restorer = ksig->ka.sa.sa_restorer;
|
||||
put_user_ex(restorer, &frame->pretcode);
|
||||
|
@@ -91,7 +91,7 @@ static int addr_to_vsyscall_nr(unsigned long addr)
|
||||
{
|
||||
int nr;
|
||||
|
||||
if ((addr & ~0xC00UL) != VSYSCALL_START)
|
||||
if ((addr & ~0xC00UL) != VSYSCALL_ADDR)
|
||||
return -EINVAL;
|
||||
|
||||
nr = (addr & 0xC00UL) >> 10;
|
||||
@@ -330,24 +330,17 @@ void __init map_vsyscall(void)
|
||||
{
|
||||
extern char __vsyscall_page;
|
||||
unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
|
||||
unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
|
||||
|
||||
__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
|
||||
__set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
|
||||
vsyscall_mode == NATIVE
|
||||
? PAGE_KERNEL_VSYSCALL
|
||||
: PAGE_KERNEL_VVAR);
|
||||
BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
|
||||
(unsigned long)VSYSCALL_START);
|
||||
|
||||
__set_fixmap(VVAR_PAGE, physaddr_vvar_page, PAGE_KERNEL_VVAR);
|
||||
BUILD_BUG_ON((unsigned long)__fix_to_virt(VVAR_PAGE) !=
|
||||
(unsigned long)VVAR_ADDRESS);
|
||||
BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
|
||||
(unsigned long)VSYSCALL_ADDR);
|
||||
}
|
||||
|
||||
static int __init vsyscall_init(void)
|
||||
{
|
||||
BUG_ON(VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE));
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
on_each_cpu(cpu_vsyscall_init, NULL, 1);
|
||||
|
Reference in New Issue
Block a user