Merge branch 'x86/vdso' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull x86 cdso updates from Peter Anvin: "Vdso cleanups and improvements largely from Andy Lutomirski. This makes the vdso a lot less ''special''" * 'x86/vdso' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/vdso, build: Make LE access macros clearer, host-safe x86/vdso, build: Fix cross-compilation from big-endian architectures x86/vdso, build: When vdso2c fails, unlink the output x86, vdso: Fix an OOPS accessing the HPET mapping w/o an HPET x86, mm: Replace arch_vma_name with vm_ops->name for vsyscalls x86, mm: Improve _install_special_mapping and fix x86 vdso naming mm, fs: Add vm_ops->name as an alternative to arch_vma_name x86, vdso: Fix an OOPS accessing the HPET mapping w/o an HPET x86, vdso: Remove vestiges of VDSO_PRELINK and some outdated comments x86, vdso: Move the vvar and hpet mappings next to the 64-bit vDSO x86, vdso: Move the 32-bit vdso special pages after the text x86, vdso: Reimplement vdso.so preparation in build-time C x86, vdso: Move syscall and sysenter setup into kernel/cpu/common.c x86, vdso: Clean up 32-bit vs 64-bit vdso params x86, mm: Ensure correct alignment of the fixmap
This commit is contained in:
@@ -1055,8 +1055,8 @@ void __init mem_init(void)
|
||||
after_bootmem = 1;
|
||||
|
||||
/* Register memory areas for /proc/kcore */
|
||||
kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
|
||||
VSYSCALL_END - VSYSCALL_START, KCORE_OTHER);
|
||||
kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
|
||||
PAGE_SIZE, KCORE_OTHER);
|
||||
|
||||
mem_init_print_info(NULL);
|
||||
}
|
||||
@@ -1185,11 +1185,19 @@ int kern_addr_valid(unsigned long addr)
|
||||
* covers the 64bit vsyscall page now. 32bit has a real VMA now and does
|
||||
* not need special handling anymore:
|
||||
*/
|
||||
static const char *gate_vma_name(struct vm_area_struct *vma)
|
||||
{
|
||||
return "[vsyscall]";
|
||||
}
|
||||
static struct vm_operations_struct gate_vma_ops = {
|
||||
.name = gate_vma_name,
|
||||
};
|
||||
static struct vm_area_struct gate_vma = {
|
||||
.vm_start = VSYSCALL_START,
|
||||
.vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
|
||||
.vm_start = VSYSCALL_ADDR,
|
||||
.vm_end = VSYSCALL_ADDR + PAGE_SIZE,
|
||||
.vm_page_prot = PAGE_READONLY_EXEC,
|
||||
.vm_flags = VM_READ | VM_EXEC
|
||||
.vm_flags = VM_READ | VM_EXEC,
|
||||
.vm_ops = &gate_vma_ops,
|
||||
};
|
||||
|
||||
struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
|
||||
@@ -1218,16 +1226,7 @@ int in_gate_area(struct mm_struct *mm, unsigned long addr)
|
||||
*/
|
||||
int in_gate_area_no_mm(unsigned long addr)
|
||||
{
|
||||
return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
|
||||
}
|
||||
|
||||
const char *arch_vma_name(struct vm_area_struct *vma)
|
||||
{
|
||||
if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
|
||||
return "[vdso]";
|
||||
if (vma == &gate_vma)
|
||||
return "[vsyscall]";
|
||||
return NULL;
|
||||
return (addr & PAGE_MASK) == VSYSCALL_ADDR;
|
||||
}
|
||||
|
||||
static unsigned long probe_memory_block_size(void)
|
||||
|
Reference in New Issue
Block a user