pgtable.h 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __ASM_CSKY_PGTABLE_H
  3. #define __ASM_CSKY_PGTABLE_H
  4. #include <asm/fixmap.h>
  5. #include <asm/memory.h>
  6. #include <asm/addrspace.h>
  7. #include <abi/pgtable-bits.h>
  8. #include <asm-generic/pgtable-nopmd.h>
  9. #define PGDIR_SHIFT 22
  10. #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  11. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  12. #define USER_PTRS_PER_PGD (PAGE_OFFSET/PGDIR_SIZE)
  13. /*
  14. * C-SKY is two-level paging structure:
  15. */
  16. #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
  17. #define PTRS_PER_PMD 1
  18. #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
  19. #define pte_ERROR(e) \
  20. pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
  21. #define pgd_ERROR(e) \
  22. pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
  23. #define pmd_pfn(pmd) (pmd_phys(pmd) >> PAGE_SHIFT)
  24. #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
  25. #define pte_clear(mm, addr, ptep) set_pte((ptep), \
  26. (((unsigned int) addr >= PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0)))
  27. #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
  28. #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
  29. #define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT))
  30. #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \
  31. | pgprot_val(prot))
  32. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  33. #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  34. #define pte_page(x) pfn_to_page(pte_pfn(x))
  35. #define __mk_pte(page_nr, pgprot) __pte(((page_nr) << PAGE_SHIFT) | \
  36. pgprot_val(pgprot))
  37. /*
  38. * C-SKY only has VALID and DIRTY bit in hardware. So we need to use the
  39. * two bits emulate PRESENT, READ, WRITE, EXEC, MODIFIED, ACCESSED.
  40. */
  41. #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
  42. #define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
  43. #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ | \
  44. _CACHE_CACHED)
  45. #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE | \
  46. _CACHE_CACHED)
  47. #define PAGE_SHARED PAGE_WRITE
  48. #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_VALID | \
  49. _PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED | \
  50. _PAGE_GLOBAL | \
  51. _CACHE_CACHED)
  52. #define _PAGE_IOREMAP (_PAGE_BASE | _PAGE_READ | _PAGE_VALID | \
  53. _PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED | \
  54. _PAGE_GLOBAL | \
  55. _CACHE_UNCACHED | _PAGE_SO)
  56. #define _PAGE_CHG_MASK (~(unsigned long) \
  57. (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
  58. _CACHE_MASK | _PAGE_GLOBAL))
  59. #define MAX_SWAPFILES_CHECK() \
  60. BUILD_BUG_ON(MAX_SWAPFILES_SHIFT != 5)
  61. extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  62. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  63. extern void load_pgd(unsigned long pg_dir);
  64. extern pte_t invalid_pte_table[PTRS_PER_PTE];
  65. static inline void set_pte(pte_t *p, pte_t pte)
  66. {
  67. *p = pte;
  68. #if defined(CONFIG_CPU_NEED_TLBSYNC)
  69. dcache_wb_line((u32)p);
  70. #endif
  71. /* prevent out of order excution */
  72. smp_mb();
  73. }
  74. #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
  75. static inline pte_t *pmd_page_vaddr(pmd_t pmd)
  76. {
  77. unsigned long ptr;
  78. ptr = pmd_val(pmd);
  79. return __va(ptr);
  80. }
  81. #define pmd_phys(pmd) pmd_val(pmd)
  82. static inline void set_pmd(pmd_t *p, pmd_t pmd)
  83. {
  84. *p = pmd;
  85. #if defined(CONFIG_CPU_NEED_TLBSYNC)
  86. dcache_wb_line((u32)p);
  87. #endif
  88. /* prevent specul excute */
  89. smp_mb();
  90. }
  91. static inline int pmd_none(pmd_t pmd)
  92. {
  93. return pmd_val(pmd) == __pa(invalid_pte_table);
  94. }
  95. #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
  96. static inline int pmd_present(pmd_t pmd)
  97. {
  98. return (pmd_val(pmd) != __pa(invalid_pte_table));
  99. }
  100. static inline void pmd_clear(pmd_t *p)
  101. {
  102. pmd_val(*p) = (__pa(invalid_pte_table));
  103. #if defined(CONFIG_CPU_NEED_TLBSYNC)
  104. dcache_wb_line((u32)p);
  105. #endif
  106. }
  107. /*
  108. * The following only work if pte_present() is true.
  109. * Undefined behaviour if not..
  110. */
  111. static inline int pte_read(pte_t pte)
  112. {
  113. return pte.pte_low & _PAGE_READ;
  114. }
  115. static inline int pte_write(pte_t pte)
  116. {
  117. return (pte).pte_low & _PAGE_WRITE;
  118. }
  119. static inline int pte_dirty(pte_t pte)
  120. {
  121. return (pte).pte_low & _PAGE_MODIFIED;
  122. }
  123. static inline int pte_young(pte_t pte)
  124. {
  125. return (pte).pte_low & _PAGE_ACCESSED;
  126. }
  127. static inline pte_t pte_wrprotect(pte_t pte)
  128. {
  129. pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
  130. return pte;
  131. }
  132. static inline pte_t pte_mkclean(pte_t pte)
  133. {
  134. pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_DIRTY);
  135. return pte;
  136. }
  137. static inline pte_t pte_mkold(pte_t pte)
  138. {
  139. pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_VALID);
  140. return pte;
  141. }
  142. static inline pte_t pte_mkwrite(pte_t pte)
  143. {
  144. pte_val(pte) |= _PAGE_WRITE;
  145. if (pte_val(pte) & _PAGE_MODIFIED)
  146. pte_val(pte) |= _PAGE_DIRTY;
  147. return pte;
  148. }
  149. static inline pte_t pte_mkdirty(pte_t pte)
  150. {
  151. pte_val(pte) |= _PAGE_MODIFIED;
  152. if (pte_val(pte) & _PAGE_WRITE)
  153. pte_val(pte) |= _PAGE_DIRTY;
  154. return pte;
  155. }
  156. static inline pte_t pte_mkyoung(pte_t pte)
  157. {
  158. pte_val(pte) |= _PAGE_ACCESSED;
  159. if (pte_val(pte) & _PAGE_READ)
  160. pte_val(pte) |= _PAGE_VALID;
  161. return pte;
  162. }
  163. #define __HAVE_PHYS_MEM_ACCESS_PROT
  164. struct file;
  165. extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  166. unsigned long size, pgprot_t vma_prot);
  167. /*
  168. * Macro to make mark a page protection value as "uncacheable". Note
  169. * that "protection" is really a misnomer here as the protection value
  170. * contains the memory attribute bits, dirty bits, and various other
  171. * bits as well.
  172. */
  173. #define pgprot_noncached pgprot_noncached
  174. static inline pgprot_t pgprot_noncached(pgprot_t _prot)
  175. {
  176. unsigned long prot = pgprot_val(_prot);
  177. prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED | _PAGE_SO;
  178. return __pgprot(prot);
  179. }
  180. #define pgprot_writecombine pgprot_writecombine
  181. static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
  182. {
  183. unsigned long prot = pgprot_val(_prot);
  184. prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
  185. return __pgprot(prot);
  186. }
  187. /*
  188. * Conversion functions: convert a page and protection to a page entry,
  189. * and a page entry and page directory to the page they refer to.
  190. */
  191. #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
  192. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  193. {
  194. return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
  195. (pgprot_val(newprot)));
  196. }
  197. extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  198. extern void paging_init(void);
  199. void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
  200. pte_t *pte);
  201. /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
  202. #define kern_addr_valid(addr) (1)
  203. #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
  204. remap_pfn_range(vma, vaddr, pfn, size, prot)
  205. #endif /* __ASM_CSKY_PGTABLE_H */