vdso.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C) 2015 Imagination Technologies
  4. * Author: Alex Smith <[email protected]>
  5. */
  6. #include <linux/binfmts.h>
  7. #include <linux/elf.h>
  8. #include <linux/err.h>
  9. #include <linux/init.h>
  10. #include <linux/ioport.h>
  11. #include <linux/kernel.h>
  12. #include <linux/mm.h>
  13. #include <linux/random.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/timekeeper_internal.h>
  17. #include <asm/abi.h>
  18. #include <asm/mips-cps.h>
  19. #include <asm/page.h>
  20. #include <asm/vdso.h>
  21. #include <vdso/helpers.h>
  22. #include <vdso/vsyscall.h>
  23. /* Kernel-provided data used by the VDSO. */
  24. static union mips_vdso_data mips_vdso_data __page_aligned_data;
  25. struct vdso_data *vdso_data = mips_vdso_data.data;
  26. /*
  27. * Mapping for the VDSO data/GIC pages. The real pages are mapped manually, as
  28. * what we map and where within the area they are mapped is determined at
  29. * runtime.
  30. */
  31. static struct page *no_pages[] = { NULL };
  32. static struct vm_special_mapping vdso_vvar_mapping = {
  33. .name = "[vvar]",
  34. .pages = no_pages,
  35. };
  36. static void __init init_vdso_image(struct mips_vdso_image *image)
  37. {
  38. unsigned long num_pages, i;
  39. unsigned long data_pfn;
  40. BUG_ON(!PAGE_ALIGNED(image->data));
  41. BUG_ON(!PAGE_ALIGNED(image->size));
  42. num_pages = image->size / PAGE_SIZE;
  43. data_pfn = __phys_to_pfn(__pa_symbol(image->data));
  44. for (i = 0; i < num_pages; i++)
  45. image->mapping.pages[i] = pfn_to_page(data_pfn + i);
  46. }
  47. static int __init init_vdso(void)
  48. {
  49. init_vdso_image(&vdso_image);
  50. #ifdef CONFIG_MIPS32_O32
  51. init_vdso_image(&vdso_image_o32);
  52. #endif
  53. #ifdef CONFIG_MIPS32_N32
  54. init_vdso_image(&vdso_image_n32);
  55. #endif
  56. return 0;
  57. }
  58. subsys_initcall(init_vdso);
  59. static unsigned long vdso_base(void)
  60. {
  61. unsigned long base = STACK_TOP;
  62. if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
  63. /* Skip the delay slot emulation page */
  64. base += PAGE_SIZE;
  65. }
  66. if (current->flags & PF_RANDOMIZE) {
  67. base += prandom_u32_max(VDSO_RANDOMIZE_SIZE);
  68. base = PAGE_ALIGN(base);
  69. }
  70. return base;
  71. }
  72. int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
  73. {
  74. struct mips_vdso_image *image = current->thread.abi->vdso;
  75. struct mm_struct *mm = current->mm;
  76. unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn, gic_base;
  77. struct vm_area_struct *vma;
  78. int ret;
  79. if (mmap_write_lock_killable(mm))
  80. return -EINTR;
  81. if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
  82. /* Map delay slot emulation page */
  83. base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
  84. VM_READ | VM_EXEC |
  85. VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
  86. 0, NULL);
  87. if (IS_ERR_VALUE(base)) {
  88. ret = base;
  89. goto out;
  90. }
  91. }
  92. /*
  93. * Determine total area size. This includes the VDSO data itself, the
  94. * data page, and the GIC user page if present. Always create a mapping
  95. * for the GIC user area if the GIC is present regardless of whether it
  96. * is the current clocksource, in case it comes into use later on. We
  97. * only map a page even though the total area is 64K, as we only need
  98. * the counter registers at the start.
  99. */
  100. gic_size = mips_gic_present() ? PAGE_SIZE : 0;
  101. vvar_size = gic_size + PAGE_SIZE;
  102. size = vvar_size + image->size;
  103. /*
  104. * Find a region that's large enough for us to perform the
  105. * colour-matching alignment below.
  106. */
  107. if (cpu_has_dc_aliases)
  108. size += shm_align_mask + 1;
  109. base = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
  110. if (IS_ERR_VALUE(base)) {
  111. ret = base;
  112. goto out;
  113. }
  114. /*
  115. * If we suffer from dcache aliasing, ensure that the VDSO data page
  116. * mapping is coloured the same as the kernel's mapping of that memory.
  117. * This ensures that when the kernel updates the VDSO data userland
  118. * will observe it without requiring cache invalidations.
  119. */
  120. if (cpu_has_dc_aliases) {
  121. base = __ALIGN_MASK(base, shm_align_mask);
  122. base += ((unsigned long)vdso_data - gic_size) & shm_align_mask;
  123. }
  124. data_addr = base + gic_size;
  125. vdso_addr = data_addr + PAGE_SIZE;
  126. vma = _install_special_mapping(mm, base, vvar_size,
  127. VM_READ | VM_MAYREAD,
  128. &vdso_vvar_mapping);
  129. if (IS_ERR(vma)) {
  130. ret = PTR_ERR(vma);
  131. goto out;
  132. }
  133. /* Map GIC user page. */
  134. if (gic_size) {
  135. gic_base = (unsigned long)mips_gic_base + MIPS_GIC_USER_OFS;
  136. gic_pfn = PFN_DOWN(__pa(gic_base));
  137. ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size,
  138. pgprot_noncached(vma->vm_page_prot));
  139. if (ret)
  140. goto out;
  141. }
  142. /* Map data page. */
  143. ret = remap_pfn_range(vma, data_addr,
  144. virt_to_phys(vdso_data) >> PAGE_SHIFT,
  145. PAGE_SIZE, vma->vm_page_prot);
  146. if (ret)
  147. goto out;
  148. /* Map VDSO image. */
  149. vma = _install_special_mapping(mm, vdso_addr, image->size,
  150. VM_READ | VM_EXEC |
  151. VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
  152. &image->mapping);
  153. if (IS_ERR(vma)) {
  154. ret = PTR_ERR(vma);
  155. goto out;
  156. }
  157. mm->context.vdso = (void *)vdso_addr;
  158. ret = 0;
  159. out:
  160. mmap_write_unlock(mm);
  161. return ret;
  162. }