Merge branch 'x86/vdso' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull x86 cdso updates from Peter Anvin: "Vdso cleanups and improvements largely from Andy Lutomirski. This makes the vdso a lot less ''special''" * 'x86/vdso' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/vdso, build: Make LE access macros clearer, host-safe x86/vdso, build: Fix cross-compilation from big-endian architectures x86/vdso, build: When vdso2c fails, unlink the output x86, vdso: Fix an OOPS accessing the HPET mapping w/o an HPET x86, mm: Replace arch_vma_name with vm_ops->name for vsyscalls x86, mm: Improve _install_special_mapping and fix x86 vdso naming mm, fs: Add vm_ops->name as an alternative to arch_vma_name x86, vdso: Fix an OOPS accessing the HPET mapping w/o an HPET x86, vdso: Remove vestiges of VDSO_PRELINK and some outdated comments x86, vdso: Move the vvar and hpet mappings next to the 64-bit vDSO x86, vdso: Move the 32-bit vdso special pages after the text x86, vdso: Reimplement vdso.so preparation in build-time C x86, vdso: Move syscall and sysenter setup into kernel/cpu/common.c x86, vdso: Clean up 32-bit vs 64-bit vdso params x86, mm: Ensure correct alignment of the fixmap
This commit is contained in:
89
mm/mmap.c
89
mm/mmap.c
@@ -2871,6 +2871,31 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int special_mapping_fault(struct vm_area_struct *vma,
|
||||
struct vm_fault *vmf);
|
||||
|
||||
/*
|
||||
* Having a close hook prevents vma merging regardless of flags.
|
||||
*/
|
||||
static void special_mapping_close(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
|
||||
static const char *special_mapping_name(struct vm_area_struct *vma)
|
||||
{
|
||||
return ((struct vm_special_mapping *)vma->vm_private_data)->name;
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct special_mapping_vmops = {
|
||||
.close = special_mapping_close,
|
||||
.fault = special_mapping_fault,
|
||||
.name = special_mapping_name,
|
||||
};
|
||||
|
||||
static const struct vm_operations_struct legacy_special_mapping_vmops = {
|
||||
.close = special_mapping_close,
|
||||
.fault = special_mapping_fault,
|
||||
};
|
||||
|
||||
static int special_mapping_fault(struct vm_area_struct *vma,
|
||||
struct vm_fault *vmf)
|
||||
@@ -2886,7 +2911,13 @@ static int special_mapping_fault(struct vm_area_struct *vma,
|
||||
*/
|
||||
pgoff = vmf->pgoff - vma->vm_pgoff;
|
||||
|
||||
for (pages = vma->vm_private_data; pgoff && *pages; ++pages)
|
||||
if (vma->vm_ops == &legacy_special_mapping_vmops)
|
||||
pages = vma->vm_private_data;
|
||||
else
|
||||
pages = ((struct vm_special_mapping *)vma->vm_private_data)->
|
||||
pages;
|
||||
|
||||
for (; pgoff && *pages; ++pages)
|
||||
pgoff--;
|
||||
|
||||
if (*pages) {
|
||||
@@ -2899,30 +2930,11 @@ static int special_mapping_fault(struct vm_area_struct *vma,
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Having a close hook prevents vma merging regardless of flags.
|
||||
*/
|
||||
static void special_mapping_close(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct special_mapping_vmops = {
|
||||
.close = special_mapping_close,
|
||||
.fault = special_mapping_fault,
|
||||
};
|
||||
|
||||
/*
|
||||
* Called with mm->mmap_sem held for writing.
|
||||
* Insert a new vma covering the given region, with the given flags.
|
||||
* Its pages are supplied by the given array of struct page *.
|
||||
* The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
|
||||
* The region past the last page supplied will always produce SIGBUS.
|
||||
* The array pointer and the pages it points to are assumed to stay alive
|
||||
* for as long as this mapping might exist.
|
||||
*/
|
||||
struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long len,
|
||||
unsigned long vm_flags, struct page **pages)
|
||||
static struct vm_area_struct *__install_special_mapping(
|
||||
struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long len,
|
||||
unsigned long vm_flags, const struct vm_operations_struct *ops,
|
||||
void *priv)
|
||||
{
|
||||
int ret;
|
||||
struct vm_area_struct *vma;
|
||||
@@ -2939,8 +2951,8 @@ struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
|
||||
vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
|
||||
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
|
||||
|
||||
vma->vm_ops = &special_mapping_vmops;
|
||||
vma->vm_private_data = pages;
|
||||
vma->vm_ops = ops;
|
||||
vma->vm_private_data = priv;
|
||||
|
||||
ret = insert_vm_struct(mm, vma);
|
||||
if (ret)
|
||||
@@ -2957,12 +2969,31 @@ out:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called with mm->mmap_sem held for writing.
|
||||
* Insert a new vma covering the given region, with the given flags.
|
||||
* Its pages are supplied by the given array of struct page *.
|
||||
* The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
|
||||
* The region past the last page supplied will always produce SIGBUS.
|
||||
* The array pointer and the pages it points to are assumed to stay alive
|
||||
* for as long as this mapping might exist.
|
||||
*/
|
||||
struct vm_area_struct *_install_special_mapping(
|
||||
struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long len,
|
||||
unsigned long vm_flags, const struct vm_special_mapping *spec)
|
||||
{
|
||||
return __install_special_mapping(mm, addr, len, vm_flags,
|
||||
&special_mapping_vmops, (void *)spec);
|
||||
}
|
||||
|
||||
int install_special_mapping(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long len,
|
||||
unsigned long vm_flags, struct page **pages)
|
||||
{
|
||||
struct vm_area_struct *vma = _install_special_mapping(mm,
|
||||
addr, len, vm_flags, pages);
|
||||
struct vm_area_struct *vma = __install_special_mapping(
|
||||
mm, addr, len, vm_flags, &legacy_special_mapping_vmops,
|
||||
(void *)pages);
|
||||
|
||||
return PTR_ERR_OR_ZERO(vma);
|
||||
}
|
||||
|
Reference in New Issue
Block a user