Merge branch 'x86/vdso' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull x86 cdso updates from Peter Anvin: "Vdso cleanups and improvements largely from Andy Lutomirski. This makes the vdso a lot less ''special''" * 'x86/vdso' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/vdso, build: Make LE access macros clearer, host-safe x86/vdso, build: Fix cross-compilation from big-endian architectures x86/vdso, build: When vdso2c fails, unlink the output x86, vdso: Fix an OOPS accessing the HPET mapping w/o an HPET x86, mm: Replace arch_vma_name with vm_ops->name for vsyscalls x86, mm: Improve _install_special_mapping and fix x86 vdso naming mm, fs: Add vm_ops->name as an alternative to arch_vma_name x86, vdso: Fix an OOPS accessing the HPET mapping w/o an HPET x86, vdso: Remove vestiges of VDSO_PRELINK and some outdated comments x86, vdso: Move the vvar and hpet mappings next to the 64-bit vDSO x86, vdso: Move the 32-bit vdso special pages after the text x86, vdso: Reimplement vdso.so preparation in build-time C x86, vdso: Move syscall and sysenter setup into kernel/cpu/common.c x86, vdso: Clean up 32-bit vs 64-bit vdso params x86, mm: Ensure correct alignment of the fixmap
Tento commit je obsažen v:
@@ -1494,7 +1494,7 @@ static int xen_pgd_alloc(struct mm_struct *mm)
|
||||
page->private = (unsigned long)user_pgd;
|
||||
|
||||
if (user_pgd != NULL) {
|
||||
user_pgd[pgd_index(VSYSCALL_START)] =
|
||||
user_pgd[pgd_index(VSYSCALL_ADDR)] =
|
||||
__pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
|
||||
ret = 0;
|
||||
}
|
||||
@@ -2062,8 +2062,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
|
||||
case FIX_KMAP_BEGIN ... FIX_KMAP_END:
|
||||
# endif
|
||||
#else
|
||||
case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
|
||||
case VVAR_PAGE:
|
||||
case VSYSCALL_PAGE:
|
||||
#endif
|
||||
case FIX_TEXT_POKE0:
|
||||
case FIX_TEXT_POKE1:
|
||||
@@ -2104,8 +2103,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
|
||||
#ifdef CONFIG_X86_64
|
||||
/* Replicate changes to map the vsyscall page into the user
|
||||
pagetable vsyscall mapping. */
|
||||
if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) ||
|
||||
idx == VVAR_PAGE) {
|
||||
if (idx == VSYSCALL_PAGE) {
|
||||
unsigned long vaddr = __fix_to_virt(idx);
|
||||
set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
|
||||
}
|
||||
|
@@ -525,10 +525,17 @@ char * __init xen_memory_setup(void)
|
||||
static void __init fiddle_vdso(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* This could be called before selected_vdso32 is initialized, so
|
||||
* just fiddle with both possible images. vdso_image_32_syscall
|
||||
* can't be selected, since it only exists on 64-bit systems.
|
||||
*/
|
||||
u32 *mask;
|
||||
mask = VDSO32_SYMBOL(&vdso32_int80_start, NOTE_MASK);
|
||||
mask = vdso_image_32_int80.data +
|
||||
vdso_image_32_int80.sym_VDSO32_NOTE_MASK;
|
||||
*mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
|
||||
mask = VDSO32_SYMBOL(&vdso32_sysenter_start, NOTE_MASK);
|
||||
mask = vdso_image_32_sysenter.data +
|
||||
vdso_image_32_sysenter.sym_VDSO32_NOTE_MASK;
|
||||
*mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
|
||||
#endif
|
||||
}
|
||||
|
Odkázat v novém úkolu
Zablokovat Uživatele