pgtable-64.h 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2012 Regents of the University of California
  4. */
  5. #ifndef _ASM_RISCV_PGTABLE_64_H
  6. #define _ASM_RISCV_PGTABLE_64_H
  7. #include <linux/bits.h>
  8. #include <linux/const.h>
  9. #include <asm/errata_list.h>
  10. extern bool pgtable_l4_enabled;
  11. extern bool pgtable_l5_enabled;
  12. #define PGDIR_SHIFT_L3 30
  13. #define PGDIR_SHIFT_L4 39
  14. #define PGDIR_SHIFT_L5 48
  15. #define PGDIR_SIZE_L3 (_AC(1, UL) << PGDIR_SHIFT_L3)
  16. #define PGDIR_SHIFT (pgtable_l5_enabled ? PGDIR_SHIFT_L5 : \
  17. (pgtable_l4_enabled ? PGDIR_SHIFT_L4 : PGDIR_SHIFT_L3))
  18. /* Size of region mapped by a page global directory */
  19. #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
  20. #define PGDIR_MASK (~(PGDIR_SIZE - 1))
  21. /* p4d is folded into pgd in case of 4-level page table */
  22. #define P4D_SHIFT_L3 30
  23. #define P4D_SHIFT_L4 39
  24. #define P4D_SHIFT_L5 39
  25. #define P4D_SHIFT (pgtable_l5_enabled ? P4D_SHIFT_L5 : \
  26. (pgtable_l4_enabled ? P4D_SHIFT_L4 : P4D_SHIFT_L3))
  27. #define P4D_SIZE (_AC(1, UL) << P4D_SHIFT)
  28. #define P4D_MASK (~(P4D_SIZE - 1))
  29. /* pud is folded into pgd in case of 3-level page table */
  30. #define PUD_SHIFT 30
  31. #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
  32. #define PUD_MASK (~(PUD_SIZE - 1))
  33. #define PMD_SHIFT 21
  34. /* Size of region mapped by a page middle directory */
  35. #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
  36. #define PMD_MASK (~(PMD_SIZE - 1))
  37. /* Page 4th Directory entry */
  38. typedef struct {
  39. unsigned long p4d;
  40. } p4d_t;
  41. #define p4d_val(x) ((x).p4d)
  42. #define __p4d(x) ((p4d_t) { (x) })
  43. #define PTRS_PER_P4D (PAGE_SIZE / sizeof(p4d_t))
  44. /* Page Upper Directory entry */
  45. typedef struct {
  46. unsigned long pud;
  47. } pud_t;
  48. #define pud_val(x) ((x).pud)
  49. #define __pud(x) ((pud_t) { (x) })
  50. #define PTRS_PER_PUD (PAGE_SIZE / sizeof(pud_t))
  51. /* Page Middle Directory entry */
  52. typedef struct {
  53. unsigned long pmd;
  54. } pmd_t;
  55. #define pmd_val(x) ((x).pmd)
  56. #define __pmd(x) ((pmd_t) { (x) })
  57. #define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
  58. /*
  59. * rv64 PTE format:
  60. * | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
  61. * N MT RSV PFN reserved for SW D A G U X W R V
  62. */
  63. #define _PAGE_PFN_MASK GENMASK(53, 10)
  64. /*
  65. * [62:61] Svpbmt Memory Type definitions:
  66. *
  67. * 00 - PMA Normal Cacheable, No change to implied PMA memory type
  68. * 01 - NC Non-cacheable, idempotent, weakly-ordered Main Memory
  69. * 10 - IO Non-cacheable, non-idempotent, strongly-ordered I/O memory
  70. * 11 - Rsvd Reserved for future standard use
  71. */
  72. #define _PAGE_NOCACHE_SVPBMT (1UL << 61)
  73. #define _PAGE_IO_SVPBMT (1UL << 62)
  74. #define _PAGE_MTMASK_SVPBMT (_PAGE_NOCACHE_SVPBMT | _PAGE_IO_SVPBMT)
  75. /*
  76. * [63:59] T-Head Memory Type definitions:
  77. *
  78. * 00000 - NC Weakly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable
  79. * 01110 - PMA Weakly-ordered, Cacheable, Bufferable, Shareable, Non-trustable
  80. * 10000 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable
  81. */
  82. #define _PAGE_PMA_THEAD ((1UL << 62) | (1UL << 61) | (1UL << 60))
  83. #define _PAGE_NOCACHE_THEAD 0UL
  84. #define _PAGE_IO_THEAD (1UL << 63)
  85. #define _PAGE_MTMASK_THEAD (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59))
  86. static inline u64 riscv_page_mtmask(void)
  87. {
  88. u64 val;
  89. ALT_SVPBMT(val, _PAGE_MTMASK);
  90. return val;
  91. }
  92. static inline u64 riscv_page_nocache(void)
  93. {
  94. u64 val;
  95. ALT_SVPBMT(val, _PAGE_NOCACHE);
  96. return val;
  97. }
  98. static inline u64 riscv_page_io(void)
  99. {
  100. u64 val;
  101. ALT_SVPBMT(val, _PAGE_IO);
  102. return val;
  103. }
  104. #define _PAGE_NOCACHE riscv_page_nocache()
  105. #define _PAGE_IO riscv_page_io()
  106. #define _PAGE_MTMASK riscv_page_mtmask()
  107. /* Set of bits to preserve across pte_modify() */
  108. #define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
  109. _PAGE_WRITE | _PAGE_EXEC | \
  110. _PAGE_USER | _PAGE_GLOBAL | \
  111. _PAGE_MTMASK))
  112. static inline int pud_present(pud_t pud)
  113. {
  114. return (pud_val(pud) & _PAGE_PRESENT);
  115. }
  116. static inline int pud_none(pud_t pud)
  117. {
  118. return (pud_val(pud) == 0);
  119. }
  120. static inline int pud_bad(pud_t pud)
  121. {
  122. return !pud_present(pud);
  123. }
  124. #define pud_leaf pud_leaf
  125. static inline int pud_leaf(pud_t pud)
  126. {
  127. return pud_present(pud) && (pud_val(pud) & _PAGE_LEAF);
  128. }
  129. static inline int pud_user(pud_t pud)
  130. {
  131. return pud_val(pud) & _PAGE_USER;
  132. }
  133. static inline void set_pud(pud_t *pudp, pud_t pud)
  134. {
  135. *pudp = pud;
  136. }
  137. static inline void pud_clear(pud_t *pudp)
  138. {
  139. set_pud(pudp, __pud(0));
  140. }
  141. static inline pud_t pfn_pud(unsigned long pfn, pgprot_t prot)
  142. {
  143. return __pud((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
  144. }
  145. static inline unsigned long _pud_pfn(pud_t pud)
  146. {
  147. return __page_val_to_pfn(pud_val(pud));
  148. }
  149. static inline pmd_t *pud_pgtable(pud_t pud)
  150. {
  151. return (pmd_t *)pfn_to_virt(__page_val_to_pfn(pud_val(pud)));
  152. }
  153. static inline struct page *pud_page(pud_t pud)
  154. {
  155. return pfn_to_page(__page_val_to_pfn(pud_val(pud)));
  156. }
  157. #define mm_p4d_folded mm_p4d_folded
  158. static inline bool mm_p4d_folded(struct mm_struct *mm)
  159. {
  160. if (pgtable_l5_enabled)
  161. return false;
  162. return true;
  163. }
  164. #define mm_pud_folded mm_pud_folded
  165. static inline bool mm_pud_folded(struct mm_struct *mm)
  166. {
  167. if (pgtable_l4_enabled)
  168. return false;
  169. return true;
  170. }
  171. #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
  172. static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
  173. {
  174. unsigned long prot_val = pgprot_val(prot);
  175. ALT_THEAD_PMA(prot_val);
  176. return __pmd((pfn << _PAGE_PFN_SHIFT) | prot_val);
  177. }
  178. static inline unsigned long _pmd_pfn(pmd_t pmd)
  179. {
  180. return __page_val_to_pfn(pmd_val(pmd));
  181. }
  182. #define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), prot)
  183. #define pmd_ERROR(e) \
  184. pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
  185. #define pud_ERROR(e) \
  186. pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
  187. #define p4d_ERROR(e) \
  188. pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
  189. static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
  190. {
  191. if (pgtable_l4_enabled)
  192. *p4dp = p4d;
  193. else
  194. set_pud((pud_t *)p4dp, (pud_t){ p4d_val(p4d) });
  195. }
  196. static inline int p4d_none(p4d_t p4d)
  197. {
  198. if (pgtable_l4_enabled)
  199. return (p4d_val(p4d) == 0);
  200. return 0;
  201. }
  202. static inline int p4d_present(p4d_t p4d)
  203. {
  204. if (pgtable_l4_enabled)
  205. return (p4d_val(p4d) & _PAGE_PRESENT);
  206. return 1;
  207. }
  208. static inline int p4d_bad(p4d_t p4d)
  209. {
  210. if (pgtable_l4_enabled)
  211. return !p4d_present(p4d);
  212. return 0;
  213. }
  214. static inline void p4d_clear(p4d_t *p4d)
  215. {
  216. if (pgtable_l4_enabled)
  217. set_p4d(p4d, __p4d(0));
  218. }
  219. static inline p4d_t pfn_p4d(unsigned long pfn, pgprot_t prot)
  220. {
  221. return __p4d((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
  222. }
  223. static inline unsigned long _p4d_pfn(p4d_t p4d)
  224. {
  225. return __page_val_to_pfn(p4d_val(p4d));
  226. }
  227. static inline pud_t *p4d_pgtable(p4d_t p4d)
  228. {
  229. if (pgtable_l4_enabled)
  230. return (pud_t *)pfn_to_virt(__page_val_to_pfn(p4d_val(p4d)));
  231. return (pud_t *)pud_pgtable((pud_t) { p4d_val(p4d) });
  232. }
  233. #define p4d_page_vaddr(p4d) ((unsigned long)p4d_pgtable(p4d))
  234. static inline struct page *p4d_page(p4d_t p4d)
  235. {
  236. return pfn_to_page(__page_val_to_pfn(p4d_val(p4d)));
  237. }
  238. #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
  239. #define pud_offset pud_offset
  240. static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
  241. {
  242. if (pgtable_l4_enabled)
  243. return p4d_pgtable(*p4d) + pud_index(address);
  244. return (pud_t *)p4d;
  245. }
  246. static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
  247. {
  248. if (pgtable_l5_enabled)
  249. *pgdp = pgd;
  250. else
  251. set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) });
  252. }
  253. static inline int pgd_none(pgd_t pgd)
  254. {
  255. if (pgtable_l5_enabled)
  256. return (pgd_val(pgd) == 0);
  257. return 0;
  258. }
  259. static inline int pgd_present(pgd_t pgd)
  260. {
  261. if (pgtable_l5_enabled)
  262. return (pgd_val(pgd) & _PAGE_PRESENT);
  263. return 1;
  264. }
  265. static inline int pgd_bad(pgd_t pgd)
  266. {
  267. if (pgtable_l5_enabled)
  268. return !pgd_present(pgd);
  269. return 0;
  270. }
  271. static inline void pgd_clear(pgd_t *pgd)
  272. {
  273. if (pgtable_l5_enabled)
  274. set_pgd(pgd, __pgd(0));
  275. }
  276. static inline p4d_t *pgd_pgtable(pgd_t pgd)
  277. {
  278. if (pgtable_l5_enabled)
  279. return (p4d_t *)pfn_to_virt(__page_val_to_pfn(pgd_val(pgd)));
  280. return (p4d_t *)p4d_pgtable((p4d_t) { pgd_val(pgd) });
  281. }
  282. #define pgd_page_vaddr(pgd) ((unsigned long)pgd_pgtable(pgd))
  283. static inline struct page *pgd_page(pgd_t pgd)
  284. {
  285. return pfn_to_page(__page_val_to_pfn(pgd_val(pgd)));
  286. }
  287. #define pgd_page(pgd) pgd_page(pgd)
  288. #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
  289. #define p4d_offset p4d_offset
  290. static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
  291. {
  292. if (pgtable_l5_enabled)
  293. return pgd_pgtable(*pgd) + p4d_index(address);
  294. return (p4d_t *)pgd;
  295. }
  296. #endif /* _ASM_RISCV_PGTABLE_64_H */