pgtable.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_POWERPC_PGTABLE_H
  3. #define _ASM_POWERPC_PGTABLE_H
  4. #ifndef __ASSEMBLY__
  5. #include <linux/mmdebug.h>
  6. #include <linux/mmzone.h>
  7. #include <asm/processor.h> /* For TASK_SIZE */
  8. #include <asm/mmu.h>
  9. #include <asm/page.h>
  10. #include <asm/tlbflush.h>
  11. struct mm_struct;
  12. #endif /* !__ASSEMBLY__ */
  13. #ifdef CONFIG_PPC_BOOK3S
  14. #include <asm/book3s/pgtable.h>
  15. #else
  16. #include <asm/nohash/pgtable.h>
  17. #endif /* !CONFIG_PPC_BOOK3S */
  18. /*
  19. * Protection used for kernel text. We want the debuggers to be able to
  20. * set breakpoints anywhere, so don't write protect the kernel text
  21. * on platforms where such control is possible.
  22. */
  23. #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \
  24. defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
  25. #define PAGE_KERNEL_TEXT PAGE_KERNEL_X
  26. #else
  27. #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
  28. #endif
  29. /* Make modules code happy. We don't set RO yet */
  30. #define PAGE_KERNEL_EXEC PAGE_KERNEL_X
  31. /* Advertise special mapping type for AGP */
  32. #define PAGE_AGP (PAGE_KERNEL_NC)
  33. #define HAVE_PAGE_AGP
  34. #ifndef __ASSEMBLY__
  35. #ifndef MAX_PTRS_PER_PGD
  36. #define MAX_PTRS_PER_PGD PTRS_PER_PGD
  37. #endif
  38. /* Keep these as a macros to avoid include dependency mess */
  39. #define pte_page(x) pfn_to_page(pte_pfn(x))
  40. #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
  41. /*
  42. * Select all bits except the pfn
  43. */
  44. static inline pgprot_t pte_pgprot(pte_t pte)
  45. {
  46. unsigned long pte_flags;
  47. pte_flags = pte_val(pte) & ~PTE_RPN_MASK;
  48. return __pgprot(pte_flags);
  49. }
  50. #ifndef pmd_page_vaddr
  51. static inline unsigned long pmd_page_vaddr(pmd_t pmd)
  52. {
  53. return ((unsigned long)__va(pmd_val(pmd) & ~PMD_MASKED_BITS));
  54. }
  55. #define pmd_page_vaddr pmd_page_vaddr
  56. #endif
  57. /*
  58. * ZERO_PAGE is a global shared page that is always zero: used
  59. * for zero-mapped memory areas etc..
  60. */
  61. extern unsigned long empty_zero_page[];
  62. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  63. extern pgd_t swapper_pg_dir[];
  64. extern void paging_init(void);
  65. void poking_init(void);
  66. extern unsigned long ioremap_bot;
  67. extern const pgprot_t protection_map[16];
  68. /*
  69. * kern_addr_valid is intended to indicate whether an address is a valid
  70. * kernel address. Most 32-bit archs define it as always true (like this)
  71. * but most 64-bit archs actually perform a test. What should we do here?
  72. */
  73. #define kern_addr_valid(addr) (1)
  74. #ifndef CONFIG_TRANSPARENT_HUGEPAGE
  75. #define pmd_large(pmd) 0
  76. #endif
  77. /* can we use this in kvm */
  78. unsigned long vmalloc_to_phys(void *vmalloc_addr);
  79. void pgtable_cache_add(unsigned int shift);
  80. pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va);
  81. #if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
  82. void mark_initmem_nx(void);
  83. #else
  84. static inline void mark_initmem_nx(void) { }
  85. #endif
  86. /*
  87. * When used, PTE_FRAG_NR is defined in subarch pgtable.h
  88. * so we are sure it is included when arriving here.
  89. */
  90. #ifdef PTE_FRAG_NR
  91. static inline void *pte_frag_get(mm_context_t *ctx)
  92. {
  93. return ctx->pte_frag;
  94. }
  95. static inline void pte_frag_set(mm_context_t *ctx, void *p)
  96. {
  97. ctx->pte_frag = p;
  98. }
  99. #else
  100. #define PTE_FRAG_NR 1
  101. #define PTE_FRAG_SIZE_SHIFT PAGE_SHIFT
  102. #define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
  103. static inline void *pte_frag_get(mm_context_t *ctx)
  104. {
  105. return NULL;
  106. }
  107. static inline void pte_frag_set(mm_context_t *ctx, void *p)
  108. {
  109. }
  110. #endif
  111. #ifndef pmd_is_leaf
  112. #define pmd_is_leaf pmd_is_leaf
  113. static inline bool pmd_is_leaf(pmd_t pmd)
  114. {
  115. return false;
  116. }
  117. #endif
  118. #ifndef pud_is_leaf
  119. #define pud_is_leaf pud_is_leaf
  120. static inline bool pud_is_leaf(pud_t pud)
  121. {
  122. return false;
  123. }
  124. #endif
  125. #ifndef p4d_is_leaf
  126. #define p4d_is_leaf p4d_is_leaf
  127. static inline bool p4d_is_leaf(p4d_t p4d)
  128. {
  129. return false;
  130. }
  131. #endif
  132. #define pmd_pgtable pmd_pgtable
  133. static inline pgtable_t pmd_pgtable(pmd_t pmd)
  134. {
  135. return (pgtable_t)pmd_page_vaddr(pmd);
  136. }
  137. #ifdef CONFIG_PPC64
  138. #define is_ioremap_addr is_ioremap_addr
  139. static inline bool is_ioremap_addr(const void *x)
  140. {
  141. unsigned long addr = (unsigned long)x;
  142. return addr >= IOREMAP_BASE && addr < IOREMAP_END;
  143. }
  144. struct seq_file;
  145. void arch_report_meminfo(struct seq_file *m);
  146. #endif /* CONFIG_PPC64 */
  147. #endif /* __ASSEMBLY__ */
  148. #endif /* _ASM_POWERPC_PGTABLE_H */