vsyscall.c 2.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * arch/sh/kernel/vsyscall/vsyscall.c
  4. *
  5. * Copyright (C) 2006 Paul Mundt
  6. *
  7. * vDSO randomization
  8. * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
  9. */
  10. #include <linux/mm.h>
  11. #include <linux/kernel.h>
  12. #include <linux/init.h>
  13. #include <linux/gfp.h>
  14. #include <linux/module.h>
  15. #include <linux/elf.h>
  16. #include <linux/sched.h>
  17. #include <linux/err.h>
  18. /*
  19. * Should the kernel map a VDSO page into processes and pass its
  20. * address down to glibc upon exec()?
  21. */
  22. unsigned int __read_mostly vdso_enabled = 1;
  23. EXPORT_SYMBOL_GPL(vdso_enabled);
  24. static int __init vdso_setup(char *s)
  25. {
  26. vdso_enabled = simple_strtoul(s, NULL, 0);
  27. return 1;
  28. }
  29. __setup("vdso=", vdso_setup);
  30. /*
  31. * These symbols are defined by vsyscall.o to mark the bounds
  32. * of the ELF DSO images included therein.
  33. */
  34. extern const char vsyscall_trapa_start, vsyscall_trapa_end;
  35. static struct page *syscall_pages[1];
  36. int __init vsyscall_init(void)
  37. {
  38. void *syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
  39. syscall_pages[0] = virt_to_page(syscall_page);
  40. /*
  41. * XXX: Map this page to a fixmap entry if we get around
  42. * to adding the page to ELF core dumps
  43. */
  44. memcpy(syscall_page,
  45. &vsyscall_trapa_start,
  46. &vsyscall_trapa_end - &vsyscall_trapa_start);
  47. return 0;
  48. }
  49. /* Setup a VMA at program startup for the vsyscall page */
  50. int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
  51. {
  52. struct mm_struct *mm = current->mm;
  53. unsigned long addr;
  54. int ret;
  55. if (mmap_write_lock_killable(mm))
  56. return -EINTR;
  57. addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
  58. if (IS_ERR_VALUE(addr)) {
  59. ret = addr;
  60. goto up_fail;
  61. }
  62. ret = install_special_mapping(mm, addr, PAGE_SIZE,
  63. VM_READ | VM_EXEC |
  64. VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
  65. syscall_pages);
  66. if (unlikely(ret))
  67. goto up_fail;
  68. current->mm->context.vdso = (void *)addr;
  69. up_fail:
  70. mmap_write_unlock(mm);
  71. return ret;
  72. }
  73. const char *arch_vma_name(struct vm_area_struct *vma)
  74. {
  75. if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
  76. return "[vdso]";
  77. return NULL;
  78. }