mmap.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Based on arch/arm/mm/mmap.c
  4. *
  5. * Copyright (C) 2012 ARM Ltd.
  6. */
  7. #include <linux/io.h>
  8. #include <linux/memblock.h>
  9. #include <linux/mm.h>
  10. #include <linux/types.h>
  11. #include <asm/cpufeature.h>
  12. #include <asm/page.h>
  13. static pgprot_t protection_map[16] __ro_after_init = {
  14. [VM_NONE] = PAGE_NONE,
  15. [VM_READ] = PAGE_READONLY,
  16. [VM_WRITE] = PAGE_READONLY,
  17. [VM_WRITE | VM_READ] = PAGE_READONLY,
  18. /* PAGE_EXECONLY if Enhanced PAN */
  19. [VM_EXEC] = PAGE_READONLY_EXEC,
  20. [VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
  21. [VM_EXEC | VM_WRITE] = PAGE_READONLY_EXEC,
  22. [VM_EXEC | VM_WRITE | VM_READ] = PAGE_READONLY_EXEC,
  23. [VM_SHARED] = PAGE_NONE,
  24. [VM_SHARED | VM_READ] = PAGE_READONLY,
  25. [VM_SHARED | VM_WRITE] = PAGE_SHARED,
  26. [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
  27. /* PAGE_EXECONLY if Enhanced PAN */
  28. [VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC,
  29. [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
  30. [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC,
  31. [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC
  32. };
  33. /*
  34. * You really shouldn't be using read() or write() on /dev/mem. This might go
  35. * away in the future.
  36. */
  37. int valid_phys_addr_range(phys_addr_t addr, size_t size)
  38. {
  39. /*
  40. * Check whether addr is covered by a memory region without the
  41. * MEMBLOCK_NOMAP attribute, and whether that region covers the
  42. * entire range. In theory, this could lead to false negatives
  43. * if the range is covered by distinct but adjacent memory regions
  44. * that only differ in other attributes. However, few of such
  45. * attributes have been defined, and it is debatable whether it
  46. * follows that /dev/mem read() calls should be able traverse
  47. * such boundaries.
  48. */
  49. return memblock_is_region_memory(addr, size) &&
  50. memblock_is_map_memory(addr);
  51. }
  52. /*
  53. * Do not allow /dev/mem mappings beyond the supported physical range.
  54. */
  55. int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
  56. {
  57. return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK);
  58. }
  59. static int __init adjust_protection_map(void)
  60. {
  61. /*
  62. * With Enhanced PAN we can honour the execute-only permissions as
  63. * there is no PAN override with such mappings.
  64. */
  65. if (cpus_have_const_cap(ARM64_HAS_EPAN)) {
  66. protection_map[VM_EXEC] = PAGE_EXECONLY;
  67. protection_map[VM_EXEC | VM_SHARED] = PAGE_EXECONLY;
  68. }
  69. return 0;
  70. }
  71. arch_initcall(adjust_protection_map);
  72. pgprot_t vm_get_page_prot(unsigned long vm_flags)
  73. {
  74. pteval_t prot = pgprot_val(protection_map[vm_flags &
  75. (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
  76. if (vm_flags & VM_ARM64_BTI)
  77. prot |= PTE_GP;
  78. /*
  79. * There are two conditions required for returning a Normal Tagged
  80. * memory type: (1) the user requested it via PROT_MTE passed to
  81. * mmap() or mprotect() and (2) the corresponding vma supports MTE. We
  82. * register (1) as VM_MTE in the vma->vm_flags and (2) as
  83. * VM_MTE_ALLOWED. Note that the latter can only be set during the
  84. * mmap() call since mprotect() does not accept MAP_* flags.
  85. * Checking for VM_MTE only is sufficient since arch_validate_flags()
  86. * does not permit (VM_MTE & !VM_MTE_ALLOWED).
  87. */
  88. if (vm_flags & VM_MTE)
  89. prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
  90. return __pgprot(prot);
  91. }
  92. EXPORT_SYMBOL(vm_get_page_prot);