pageattr.c 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/mm.h>
  6. #include <linux/module.h>
  7. #include <asm/tlbflush.h>
  8. #include <asm/set_memory.h>
  9. struct page_change_data {
  10. pgprot_t set_mask;
  11. pgprot_t clear_mask;
  12. };
  13. static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
  14. {
  15. struct page_change_data *cdata = data;
  16. pte_t pte = *ptep;
  17. pte = clear_pte_bit(pte, cdata->clear_mask);
  18. pte = set_pte_bit(pte, cdata->set_mask);
  19. set_pte_ext(ptep, pte, 0);
  20. return 0;
  21. }
  22. static bool in_range(unsigned long start, unsigned long size,
  23. unsigned long range_start, unsigned long range_end)
  24. {
  25. return start >= range_start && start < range_end &&
  26. size <= range_end - start;
  27. }
  28. /*
  29. * This function assumes that the range is mapped with PAGE_SIZE pages.
  30. */
  31. static int __change_memory_common(unsigned long start, unsigned long size,
  32. pgprot_t set_mask, pgprot_t clear_mask)
  33. {
  34. struct page_change_data data;
  35. int ret;
  36. data.set_mask = set_mask;
  37. data.clear_mask = clear_mask;
  38. ret = apply_to_page_range(&init_mm, start, size, change_page_range,
  39. &data);
  40. flush_tlb_kernel_range(start, start + size);
  41. return ret;
  42. }
  43. static int change_memory_common(unsigned long addr, int numpages,
  44. pgprot_t set_mask, pgprot_t clear_mask)
  45. {
  46. unsigned long start = addr & PAGE_MASK;
  47. unsigned long end = PAGE_ALIGN(addr) + numpages * PAGE_SIZE;
  48. unsigned long size = end - start;
  49. WARN_ON_ONCE(start != addr);
  50. if (!size)
  51. return 0;
  52. if (!in_range(start, size, MODULES_VADDR, MODULES_END) &&
  53. !in_range(start, size, VMALLOC_START, VMALLOC_END))
  54. return -EINVAL;
  55. return __change_memory_common(start, size, set_mask, clear_mask);
  56. }
  57. int set_memory_ro(unsigned long addr, int numpages)
  58. {
  59. return change_memory_common(addr, numpages,
  60. __pgprot(L_PTE_RDONLY),
  61. __pgprot(0));
  62. }
  63. int set_memory_rw(unsigned long addr, int numpages)
  64. {
  65. return change_memory_common(addr, numpages,
  66. __pgprot(0),
  67. __pgprot(L_PTE_RDONLY));
  68. }
  69. int set_memory_nx(unsigned long addr, int numpages)
  70. {
  71. return change_memory_common(addr, numpages,
  72. __pgprot(L_PTE_XN),
  73. __pgprot(0));
  74. }
  75. int set_memory_x(unsigned long addr, int numpages)
  76. {
  77. return change_memory_common(addr, numpages,
  78. __pgprot(0),
  79. __pgprot(L_PTE_XN));
  80. }
  81. int set_memory_valid(unsigned long addr, int numpages, int enable)
  82. {
  83. if (enable)
  84. return __change_memory_common(addr, PAGE_SIZE * numpages,
  85. __pgprot(L_PTE_VALID),
  86. __pgprot(0));
  87. else
  88. return __change_memory_common(addr, PAGE_SIZE * numpages,
  89. __pgprot(0),
  90. __pgprot(L_PTE_VALID));
  91. }