pgtable_64.h 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_X86_PGTABLE_64_H
  3. #define _ASM_X86_PGTABLE_64_H
  4. #include <linux/const.h>
  5. #include <asm/pgtable_64_types.h>
  6. #ifndef __ASSEMBLY__
  7. /*
  8. * This file contains the functions and defines necessary to modify and use
  9. * the x86-64 page table tree.
  10. */
  11. #include <asm/processor.h>
  12. #include <linux/bitops.h>
  13. #include <linux/threads.h>
  14. #include <asm/fixmap.h>
  15. extern p4d_t level4_kernel_pgt[512];
  16. extern p4d_t level4_ident_pgt[512];
  17. extern pud_t level3_kernel_pgt[512];
  18. extern pud_t level3_ident_pgt[512];
  19. extern pmd_t level2_kernel_pgt[512];
  20. extern pmd_t level2_fixmap_pgt[512];
  21. extern pmd_t level2_ident_pgt[512];
  22. extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
  23. extern pgd_t init_top_pgt[];
  24. #define swapper_pg_dir init_top_pgt
  25. extern void paging_init(void);
  26. static inline void sync_initial_page_table(void) { }
  27. #define pte_ERROR(e) \
  28. pr_err("%s:%d: bad pte %p(%016lx)\n", \
  29. __FILE__, __LINE__, &(e), pte_val(e))
  30. #define pmd_ERROR(e) \
  31. pr_err("%s:%d: bad pmd %p(%016lx)\n", \
  32. __FILE__, __LINE__, &(e), pmd_val(e))
  33. #define pud_ERROR(e) \
  34. pr_err("%s:%d: bad pud %p(%016lx)\n", \
  35. __FILE__, __LINE__, &(e), pud_val(e))
  36. #if CONFIG_PGTABLE_LEVELS >= 5
  37. #define p4d_ERROR(e) \
  38. pr_err("%s:%d: bad p4d %p(%016lx)\n", \
  39. __FILE__, __LINE__, &(e), p4d_val(e))
  40. #endif
  41. #define pgd_ERROR(e) \
  42. pr_err("%s:%d: bad pgd %p(%016lx)\n", \
  43. __FILE__, __LINE__, &(e), pgd_val(e))
  44. struct mm_struct;
  45. #define mm_p4d_folded mm_p4d_folded
  46. static inline bool mm_p4d_folded(struct mm_struct *mm)
  47. {
  48. return !pgtable_l5_enabled();
  49. }
  50. void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
  51. void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
  52. static inline void native_set_pte(pte_t *ptep, pte_t pte)
  53. {
  54. WRITE_ONCE(*ptep, pte);
  55. }
  56. static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
  57. pte_t *ptep)
  58. {
  59. native_set_pte(ptep, native_make_pte(0));
  60. }
  61. static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
  62. {
  63. native_set_pte(ptep, pte);
  64. }
  65. static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
  66. {
  67. WRITE_ONCE(*pmdp, pmd);
  68. }
  69. static inline void native_pmd_clear(pmd_t *pmd)
  70. {
  71. native_set_pmd(pmd, native_make_pmd(0));
  72. }
  73. static inline pte_t native_ptep_get_and_clear(pte_t *xp)
  74. {
  75. #ifdef CONFIG_SMP
  76. return native_make_pte(xchg(&xp->pte, 0));
  77. #else
  78. /* native_local_ptep_get_and_clear,
  79. but duplicated because of cyclic dependency */
  80. pte_t ret = *xp;
  81. native_pte_clear(NULL, 0, xp);
  82. return ret;
  83. #endif
  84. }
  85. static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
  86. {
  87. #ifdef CONFIG_SMP
  88. return native_make_pmd(xchg(&xp->pmd, 0));
  89. #else
  90. /* native_local_pmdp_get_and_clear,
  91. but duplicated because of cyclic dependency */
  92. pmd_t ret = *xp;
  93. native_pmd_clear(xp);
  94. return ret;
  95. #endif
  96. }
  97. static inline void native_set_pud(pud_t *pudp, pud_t pud)
  98. {
  99. WRITE_ONCE(*pudp, pud);
  100. }
  101. static inline void native_pud_clear(pud_t *pud)
  102. {
  103. native_set_pud(pud, native_make_pud(0));
  104. }
  105. static inline pud_t native_pudp_get_and_clear(pud_t *xp)
  106. {
  107. #ifdef CONFIG_SMP
  108. return native_make_pud(xchg(&xp->pud, 0));
  109. #else
  110. /* native_local_pudp_get_and_clear,
  111. * but duplicated because of cyclic dependency
  112. */
  113. pud_t ret = *xp;
  114. native_pud_clear(xp);
  115. return ret;
  116. #endif
  117. }
  118. static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
  119. {
  120. pgd_t pgd;
  121. if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) {
  122. WRITE_ONCE(*p4dp, p4d);
  123. return;
  124. }
  125. pgd = native_make_pgd(native_p4d_val(p4d));
  126. pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd);
  127. WRITE_ONCE(*p4dp, native_make_p4d(native_pgd_val(pgd)));
  128. }
  129. static inline void native_p4d_clear(p4d_t *p4d)
  130. {
  131. native_set_p4d(p4d, native_make_p4d(0));
  132. }
  133. static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
  134. {
  135. WRITE_ONCE(*pgdp, pti_set_user_pgtbl(pgdp, pgd));
  136. }
  137. static inline void native_pgd_clear(pgd_t *pgd)
  138. {
  139. native_set_pgd(pgd, native_make_pgd(0));
  140. }
  141. /*
  142. * Conversion functions: convert a page and protection to a page entry,
  143. * and a page entry and page directory to the page they refer to.
  144. */
  145. /* PGD - Level 4 access */
  146. /* PUD - Level 3 access */
  147. /* PMD - Level 2 access */
  148. /* PTE - Level 1 access */
  149. /*
  150. * Encode and de-code a swap entry
  151. *
  152. * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number
  153. * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
  154. * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| E|F|SD|0| <- swp entry
  155. *
  156. * G (8) is aliased and used as a PROT_NONE indicator for
  157. * !present ptes. We need to start storing swap entries above
  158. * there. We also need to avoid using A and D because of an
  159. * erratum where they can be incorrectly set by hardware on
  160. * non-present PTEs.
  161. *
  162. * SD Bits 1-4 are not used in non-present format and available for
  163. * special use described below:
  164. *
  165. * SD (1) in swp entry is used to store soft dirty bit, which helps us
  166. * remember soft dirty over page migration
  167. *
  168. * F (2) in swp entry is used to record when a pagetable is
  169. * writeprotected by userfaultfd WP support.
  170. *
  171. * E (3) in swp entry is used to rememeber PG_anon_exclusive.
  172. *
  173. * Bit 7 in swp entry should be 0 because pmd_present checks not only P,
  174. * but also L and G.
  175. *
  176. * The offset is inverted by a binary not operation to make the high
  177. * physical bits set.
  178. */
  179. #define SWP_TYPE_BITS 5
  180. #define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
  181. /* We always extract/encode the offset by shifting it all the way up, and then down again */
  182. #define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
  183. #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
  184. /* Extract the high bits for type */
  185. #define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
  186. /* Shift up (to get rid of type), then down to get value */
  187. #define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
  188. /*
  189. * Shift the offset up "too far" by TYPE bits, then down again
  190. * The offset is inverted by a binary not operation to make the high
  191. * physical bits set.
  192. */
  193. #define __swp_entry(type, offset) ((swp_entry_t) { \
  194. (~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
  195. | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
  196. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
  197. #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val((pmd)) })
  198. #define __swp_entry_to_pte(x) (__pte((x).val))
  199. #define __swp_entry_to_pmd(x) (__pmd((x).val))
  200. extern int kern_addr_valid(unsigned long addr);
  201. extern void cleanup_highmap(void);
  202. #define HAVE_ARCH_UNMAPPED_AREA
  203. #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
  204. #define PAGE_AGP PAGE_KERNEL_NOCACHE
  205. #define HAVE_PAGE_AGP 1
  206. /* fs/proc/kcore.c */
  207. #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
  208. #define kc_offset_to_vaddr(o) ((o) | ~__VIRTUAL_MASK)
  209. #define __HAVE_ARCH_PTE_SAME
  210. #define vmemmap ((struct page *)VMEMMAP_START)
  211. extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
  212. extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
  213. #define gup_fast_permitted gup_fast_permitted
  214. static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
  215. {
  216. if (end >> __VIRTUAL_MASK_SHIFT)
  217. return false;
  218. return true;
  219. }
  220. #include <asm/pgtable-invert.h>
  221. #endif /* !__ASSEMBLY__ */
  222. #endif /* _ASM_X86_PGTABLE_64_H */