mmap.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. /*
  2. * arch/sh/mm/mmap.c
  3. *
  4. * Copyright (C) 2008 - 2009 Paul Mundt
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. */
  10. #include <linux/io.h>
  11. #include <linux/mm.h>
  12. #include <linux/sched/mm.h>
  13. #include <linux/mman.h>
  14. #include <linux/module.h>
  15. #include <asm/page.h>
  16. #include <asm/processor.h>
  17. unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
  18. EXPORT_SYMBOL(shm_align_mask);
  19. #ifdef CONFIG_MMU
  20. static const pgprot_t protection_map[16] = {
  21. [VM_NONE] = PAGE_NONE,
  22. [VM_READ] = PAGE_READONLY,
  23. [VM_WRITE] = PAGE_COPY,
  24. [VM_WRITE | VM_READ] = PAGE_COPY,
  25. [VM_EXEC] = PAGE_EXECREAD,
  26. [VM_EXEC | VM_READ] = PAGE_EXECREAD,
  27. [VM_EXEC | VM_WRITE] = PAGE_COPY,
  28. [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
  29. [VM_SHARED] = PAGE_NONE,
  30. [VM_SHARED | VM_READ] = PAGE_READONLY,
  31. [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY,
  32. [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
  33. [VM_SHARED | VM_EXEC] = PAGE_EXECREAD,
  34. [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD,
  35. [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX,
  36. [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
  37. };
  38. DECLARE_VM_GET_PAGE_PROT
  39. /*
  40. * To avoid cache aliases, we map the shared page with same color.
  41. */
  42. static inline unsigned long COLOUR_ALIGN(unsigned long addr,
  43. unsigned long pgoff)
  44. {
  45. unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
  46. unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
  47. return base + off;
  48. }
  49. unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
  50. unsigned long len, unsigned long pgoff, unsigned long flags)
  51. {
  52. struct mm_struct *mm = current->mm;
  53. struct vm_area_struct *vma;
  54. int do_colour_align;
  55. struct vm_unmapped_area_info info;
  56. if (flags & MAP_FIXED) {
  57. /* We do not accept a shared mapping if it would violate
  58. * cache aliasing constraints.
  59. */
  60. if ((flags & MAP_SHARED) &&
  61. ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
  62. return -EINVAL;
  63. return addr;
  64. }
  65. if (unlikely(len > TASK_SIZE))
  66. return -ENOMEM;
  67. do_colour_align = 0;
  68. if (filp || (flags & MAP_SHARED))
  69. do_colour_align = 1;
  70. if (addr) {
  71. if (do_colour_align)
  72. addr = COLOUR_ALIGN(addr, pgoff);
  73. else
  74. addr = PAGE_ALIGN(addr);
  75. vma = find_vma(mm, addr);
  76. if (TASK_SIZE - len >= addr &&
  77. (!vma || addr + len <= vm_start_gap(vma)))
  78. return addr;
  79. }
  80. info.flags = 0;
  81. info.length = len;
  82. info.low_limit = TASK_UNMAPPED_BASE;
  83. info.high_limit = TASK_SIZE;
  84. info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
  85. info.align_offset = pgoff << PAGE_SHIFT;
  86. return vm_unmapped_area(&info);
  87. }
  88. unsigned long
  89. arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  90. const unsigned long len, const unsigned long pgoff,
  91. const unsigned long flags)
  92. {
  93. struct vm_area_struct *vma;
  94. struct mm_struct *mm = current->mm;
  95. unsigned long addr = addr0;
  96. int do_colour_align;
  97. struct vm_unmapped_area_info info;
  98. if (flags & MAP_FIXED) {
  99. /* We do not accept a shared mapping if it would violate
  100. * cache aliasing constraints.
  101. */
  102. if ((flags & MAP_SHARED) &&
  103. ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
  104. return -EINVAL;
  105. return addr;
  106. }
  107. if (unlikely(len > TASK_SIZE))
  108. return -ENOMEM;
  109. do_colour_align = 0;
  110. if (filp || (flags & MAP_SHARED))
  111. do_colour_align = 1;
  112. /* requesting a specific address */
  113. if (addr) {
  114. if (do_colour_align)
  115. addr = COLOUR_ALIGN(addr, pgoff);
  116. else
  117. addr = PAGE_ALIGN(addr);
  118. vma = find_vma(mm, addr);
  119. if (TASK_SIZE - len >= addr &&
  120. (!vma || addr + len <= vm_start_gap(vma)))
  121. return addr;
  122. }
  123. info.flags = VM_UNMAPPED_AREA_TOPDOWN;
  124. info.length = len;
  125. info.low_limit = PAGE_SIZE;
  126. info.high_limit = mm->mmap_base;
  127. info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
  128. info.align_offset = pgoff << PAGE_SHIFT;
  129. addr = vm_unmapped_area(&info);
  130. /*
  131. * A failed mmap() very likely causes application failure,
  132. * so fall back to the bottom-up function here. This scenario
  133. * can happen with large stack limits and large mmap()
  134. * allocations.
  135. */
  136. if (addr & ~PAGE_MASK) {
  137. VM_BUG_ON(addr != -ENOMEM);
  138. info.flags = 0;
  139. info.low_limit = TASK_UNMAPPED_BASE;
  140. info.high_limit = TASK_SIZE;
  141. addr = vm_unmapped_area(&info);
  142. }
  143. return addr;
  144. }
  145. #endif /* CONFIG_MMU */
  146. /*
  147. * You really shouldn't be using read() or write() on /dev/mem. This
  148. * might go away in the future.
  149. */
  150. int valid_phys_addr_range(phys_addr_t addr, size_t count)
  151. {
  152. if (addr < __MEMORY_START)
  153. return 0;
  154. if (addr + count > __pa(high_memory))
  155. return 0;
  156. return 1;
  157. }
  158. int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
  159. {
  160. return 1;
  161. }