mmap.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  4. */
  5. #include <linux/export.h>
  6. #include <linux/io.h>
  7. #include <linux/memblock.h>
  8. #include <linux/mm.h>
  9. #include <linux/mman.h>
  10. unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
  11. EXPORT_SYMBOL(shm_align_mask);
  12. #define COLOUR_ALIGN(addr, pgoff) \
  13. ((((addr) + shm_align_mask) & ~shm_align_mask) + \
  14. (((pgoff) << PAGE_SHIFT) & shm_align_mask))
  15. enum mmap_allocation_direction {UP, DOWN};
  16. static unsigned long arch_get_unmapped_area_common(struct file *filp,
  17. unsigned long addr0, unsigned long len, unsigned long pgoff,
  18. unsigned long flags, enum mmap_allocation_direction dir)
  19. {
  20. struct mm_struct *mm = current->mm;
  21. struct vm_area_struct *vma;
  22. unsigned long addr = addr0;
  23. int do_color_align;
  24. struct vm_unmapped_area_info info;
  25. if (unlikely(len > TASK_SIZE))
  26. return -ENOMEM;
  27. if (flags & MAP_FIXED) {
  28. /* Even MAP_FIXED mappings must reside within TASK_SIZE */
  29. if (TASK_SIZE - len < addr)
  30. return -EINVAL;
  31. /*
  32. * We do not accept a shared mapping if it would violate
  33. * cache aliasing constraints.
  34. */
  35. if ((flags & MAP_SHARED) &&
  36. ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
  37. return -EINVAL;
  38. return addr;
  39. }
  40. do_color_align = 0;
  41. if (filp || (flags & MAP_SHARED))
  42. do_color_align = 1;
  43. /* requesting a specific address */
  44. if (addr) {
  45. if (do_color_align)
  46. addr = COLOUR_ALIGN(addr, pgoff);
  47. else
  48. addr = PAGE_ALIGN(addr);
  49. vma = find_vma(mm, addr);
  50. if (TASK_SIZE - len >= addr &&
  51. (!vma || addr + len <= vm_start_gap(vma)))
  52. return addr;
  53. }
  54. info.length = len;
  55. info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
  56. info.align_offset = pgoff << PAGE_SHIFT;
  57. if (dir == DOWN) {
  58. info.flags = VM_UNMAPPED_AREA_TOPDOWN;
  59. info.low_limit = PAGE_SIZE;
  60. info.high_limit = mm->mmap_base;
  61. addr = vm_unmapped_area(&info);
  62. if (!(addr & ~PAGE_MASK))
  63. return addr;
  64. /*
  65. * A failed mmap() very likely causes application failure,
  66. * so fall back to the bottom-up function here. This scenario
  67. * can happen with large stack limits and large mmap()
  68. * allocations.
  69. */
  70. }
  71. info.flags = 0;
  72. info.low_limit = mm->mmap_base;
  73. info.high_limit = TASK_SIZE;
  74. return vm_unmapped_area(&info);
  75. }
  76. unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
  77. unsigned long len, unsigned long pgoff, unsigned long flags)
  78. {
  79. return arch_get_unmapped_area_common(filp,
  80. addr0, len, pgoff, flags, UP);
  81. }
  82. /*
  83. * There is no need to export this but sched.h declares the function as
  84. * extern so making it static here results in an error.
  85. */
  86. unsigned long arch_get_unmapped_area_topdown(struct file *filp,
  87. unsigned long addr0, unsigned long len, unsigned long pgoff,
  88. unsigned long flags)
  89. {
  90. return arch_get_unmapped_area_common(filp,
  91. addr0, len, pgoff, flags, DOWN);
  92. }
  93. int __virt_addr_valid(volatile void *kaddr)
  94. {
  95. unsigned long vaddr = (unsigned long)kaddr;
  96. if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base))
  97. return 0;
  98. return pfn_valid(PFN_DOWN(PHYSADDR(kaddr)));
  99. }
  100. EXPORT_SYMBOL_GPL(__virt_addr_valid);
  101. /*
  102. * You really shouldn't be using read() or write() on /dev/mem. This might go
  103. * away in the future.
  104. */
  105. int valid_phys_addr_range(phys_addr_t addr, size_t size)
  106. {
  107. /*
  108. * Check whether addr is covered by a memory region without the
  109. * MEMBLOCK_NOMAP attribute, and whether that region covers the
  110. * entire range. In theory, this could lead to false negatives
  111. * if the range is covered by distinct but adjacent memory regions
  112. * that only differ in other attributes. However, few of such
  113. * attributes have been defined, and it is debatable whether it
  114. * follows that /dev/mem read() calls should be able traverse
  115. * such boundaries.
  116. */
  117. return memblock_is_region_memory(addr, size) && memblock_is_map_memory(addr);
  118. }
  119. /*
  120. * Do not allow /dev/mem mappings beyond the supported physical range.
  121. */
  122. int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
  123. {
  124. return !(((pfn << PAGE_SHIFT) + size) & ~(GENMASK_ULL(cpu_pabits, 0)));
  125. }