pgtable_64.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * This file contains pgtable related functions for 64-bit machines.
  4. *
  5. * Derived from arch/ppc64/mm/init.c
  6. * Copyright (C) 1995-1996 Gary Thomas ([email protected])
  7. *
  8. * Modifications by Paul Mackerras (PowerMac) ([email protected])
  9. * and Cort Dougan (PReP) ([email protected])
  10. * Copyright (C) 1996 Paul Mackerras
  11. *
  12. * Derived from "arch/i386/mm/init.c"
  13. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  14. *
  15. * Dave Engebretsen <[email protected]>
  16. * Rework for PPC64 port.
  17. */
  18. #include <linux/signal.h>
  19. #include <linux/sched.h>
  20. #include <linux/kernel.h>
  21. #include <linux/errno.h>
  22. #include <linux/string.h>
  23. #include <linux/export.h>
  24. #include <linux/types.h>
  25. #include <linux/mman.h>
  26. #include <linux/mm.h>
  27. #include <linux/swap.h>
  28. #include <linux/stddef.h>
  29. #include <linux/vmalloc.h>
  30. #include <linux/slab.h>
  31. #include <linux/hugetlb.h>
  32. #include <asm/page.h>
  33. #include <asm/mmu_context.h>
  34. #include <asm/mmu.h>
  35. #include <asm/smp.h>
  36. #include <asm/machdep.h>
  37. #include <asm/tlb.h>
  38. #include <asm/processor.h>
  39. #include <asm/cputable.h>
  40. #include <asm/sections.h>
  41. #include <asm/firmware.h>
  42. #include <asm/dma.h>
  43. #include <mm/mmu_decl.h>
  44. #ifdef CONFIG_PPC_BOOK3S_64
  45. /*
  46. * partition table and process table for ISA 3.0
  47. */
  48. struct prtb_entry *process_tb;
  49. struct patb_entry *partition_tb;
  50. /*
  51. * page table size
  52. */
  53. unsigned long __pte_index_size;
  54. EXPORT_SYMBOL(__pte_index_size);
  55. unsigned long __pmd_index_size;
  56. EXPORT_SYMBOL(__pmd_index_size);
  57. unsigned long __pud_index_size;
  58. EXPORT_SYMBOL(__pud_index_size);
  59. unsigned long __pgd_index_size;
  60. EXPORT_SYMBOL(__pgd_index_size);
  61. unsigned long __pud_cache_index;
  62. EXPORT_SYMBOL(__pud_cache_index);
  63. unsigned long __pte_table_size;
  64. EXPORT_SYMBOL(__pte_table_size);
  65. unsigned long __pmd_table_size;
  66. EXPORT_SYMBOL(__pmd_table_size);
  67. unsigned long __pud_table_size;
  68. EXPORT_SYMBOL(__pud_table_size);
  69. unsigned long __pgd_table_size;
  70. EXPORT_SYMBOL(__pgd_table_size);
  71. unsigned long __pmd_val_bits;
  72. EXPORT_SYMBOL(__pmd_val_bits);
  73. unsigned long __pud_val_bits;
  74. EXPORT_SYMBOL(__pud_val_bits);
  75. unsigned long __pgd_val_bits;
  76. EXPORT_SYMBOL(__pgd_val_bits);
  77. unsigned long __kernel_virt_start;
  78. EXPORT_SYMBOL(__kernel_virt_start);
  79. unsigned long __vmalloc_start;
  80. EXPORT_SYMBOL(__vmalloc_start);
  81. unsigned long __vmalloc_end;
  82. EXPORT_SYMBOL(__vmalloc_end);
  83. unsigned long __kernel_io_start;
  84. EXPORT_SYMBOL(__kernel_io_start);
  85. unsigned long __kernel_io_end;
  86. struct page *vmemmap;
  87. EXPORT_SYMBOL(vmemmap);
  88. unsigned long __pte_frag_nr;
  89. EXPORT_SYMBOL(__pte_frag_nr);
  90. unsigned long __pte_frag_size_shift;
  91. EXPORT_SYMBOL(__pte_frag_size_shift);
  92. #endif
  93. #ifndef __PAGETABLE_PUD_FOLDED
  94. /* 4 level page table */
  95. struct page *p4d_page(p4d_t p4d)
  96. {
  97. if (p4d_is_leaf(p4d)) {
  98. if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
  99. VM_WARN_ON(!p4d_huge(p4d));
  100. return pte_page(p4d_pte(p4d));
  101. }
  102. return virt_to_page(p4d_pgtable(p4d));
  103. }
  104. #endif
  105. struct page *pud_page(pud_t pud)
  106. {
  107. if (pud_is_leaf(pud)) {
  108. if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
  109. VM_WARN_ON(!pud_huge(pud));
  110. return pte_page(pud_pte(pud));
  111. }
  112. return virt_to_page(pud_pgtable(pud));
  113. }
  114. /*
  115. * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
  116. * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
  117. */
  118. struct page *pmd_page(pmd_t pmd)
  119. {
  120. if (pmd_is_leaf(pmd)) {
  121. /*
  122. * vmalloc_to_page may be called on any vmap address (not only
  123. * vmalloc), and it uses pmd_page() etc., when huge vmap is
  124. * enabled so these checks can't be used.
  125. */
  126. if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
  127. VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
  128. return pte_page(pmd_pte(pmd));
  129. }
  130. return virt_to_page(pmd_page_vaddr(pmd));
  131. }
  132. #ifdef CONFIG_STRICT_KERNEL_RWX
  133. void mark_rodata_ro(void)
  134. {
  135. if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) {
  136. pr_warn("Warning: Unable to mark rodata read only on this CPU.\n");
  137. return;
  138. }
  139. if (radix_enabled())
  140. radix__mark_rodata_ro();
  141. else
  142. hash__mark_rodata_ro();
  143. // mark_initmem_nx() should have already run by now
  144. ptdump_check_wx();
  145. }
  146. void mark_initmem_nx(void)
  147. {
  148. if (radix_enabled())
  149. radix__mark_initmem_nx();
  150. else
  151. hash__mark_initmem_nx();
  152. }
  153. #endif