vdso.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2022 Helge Deller <[email protected]>
  4. *
  5. * based on arch/s390/kernel/vdso.c which is
  6. * Copyright IBM Corp. 2008
  7. * Author(s): Martin Schwidefsky ([email protected])
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/mm.h>
  11. #include <linux/slab.h>
  12. #include <linux/elf.h>
  13. #include <linux/timekeeper_internal.h>
  14. #include <linux/compat.h>
  15. #include <linux/nsproxy.h>
  16. #include <linux/time_namespace.h>
  17. #include <linux/random.h>
  18. #include <asm/pgtable.h>
  19. #include <asm/page.h>
  20. #include <asm/sections.h>
  21. #include <asm/vdso.h>
  22. #include <asm/cacheflush.h>
  23. extern char vdso32_start, vdso32_end;
  24. extern char vdso64_start, vdso64_end;
  25. static int vdso_mremap(const struct vm_special_mapping *sm,
  26. struct vm_area_struct *vma)
  27. {
  28. current->mm->context.vdso_base = vma->vm_start;
  29. return 0;
  30. }
  31. #ifdef CONFIG_64BIT
  32. static struct vm_special_mapping vdso64_mapping = {
  33. .name = "[vdso]",
  34. .mremap = vdso_mremap,
  35. };
  36. #endif
  37. static struct vm_special_mapping vdso32_mapping = {
  38. .name = "[vdso]",
  39. .mremap = vdso_mremap,
  40. };
  41. /*
  42. * This is called from binfmt_elf, we create the special vma for the
  43. * vDSO and insert it into the mm struct tree
  44. */
  45. int arch_setup_additional_pages(struct linux_binprm *bprm,
  46. int executable_stack)
  47. {
  48. unsigned long vdso_text_start, vdso_text_len, map_base;
  49. struct vm_special_mapping *vdso_mapping;
  50. struct mm_struct *mm = current->mm;
  51. struct vm_area_struct *vma;
  52. int rc;
  53. if (mmap_write_lock_killable(mm))
  54. return -EINTR;
  55. #ifdef CONFIG_64BIT
  56. if (!is_compat_task()) {
  57. vdso_text_len = &vdso64_end - &vdso64_start;
  58. vdso_mapping = &vdso64_mapping;
  59. } else
  60. #endif
  61. {
  62. vdso_text_len = &vdso32_end - &vdso32_start;
  63. vdso_mapping = &vdso32_mapping;
  64. }
  65. map_base = mm->mmap_base;
  66. if (current->flags & PF_RANDOMIZE)
  67. map_base -= prandom_u32_max(0x20) * PAGE_SIZE;
  68. vdso_text_start = get_unmapped_area(NULL, map_base, vdso_text_len, 0, 0);
  69. /* VM_MAYWRITE for COW so gdb can set breakpoints */
  70. vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
  71. VM_READ|VM_EXEC|
  72. VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
  73. vdso_mapping);
  74. if (IS_ERR(vma)) {
  75. do_munmap(mm, vdso_text_start, PAGE_SIZE, NULL);
  76. rc = PTR_ERR(vma);
  77. } else {
  78. current->mm->context.vdso_base = vdso_text_start;
  79. rc = 0;
  80. }
  81. mmap_write_unlock(mm);
  82. return rc;
  83. }
  84. static struct page ** __init vdso_setup_pages(void *start, void *end)
  85. {
  86. int pages = (end - start) >> PAGE_SHIFT;
  87. struct page **pagelist;
  88. int i;
  89. pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
  90. if (!pagelist)
  91. panic("%s: Cannot allocate page list for VDSO", __func__);
  92. for (i = 0; i < pages; i++)
  93. pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
  94. return pagelist;
  95. }
  96. static int __init vdso_init(void)
  97. {
  98. #ifdef CONFIG_64BIT
  99. vdso64_mapping.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
  100. #endif
  101. if (IS_ENABLED(CONFIG_COMPAT) || !IS_ENABLED(CONFIG_64BIT))
  102. vdso32_mapping.pages = vdso_setup_pages(&vdso32_start, &vdso32_end);
  103. return 0;
  104. }
  105. arch_initcall(vdso_init);