pgtable.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_IA64_PGTABLE_H
  3. #define _ASM_IA64_PGTABLE_H
  4. /*
  5. * This file contains the functions and defines necessary to modify and use
  6. * the IA-64 page table tree.
  7. *
  8. * This hopefully works with any (fixed) IA-64 page-size, as defined
  9. * in <asm/page.h>.
  10. *
  11. * Copyright (C) 1998-2005 Hewlett-Packard Co
  12. * David Mosberger-Tang <[email protected]>
  13. */
  14. #include <asm/mman.h>
  15. #include <asm/page.h>
  16. #include <asm/processor.h>
  17. #include <asm/types.h>
  18. #define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */
  19. /*
  20. * First, define the various bits in a PTE. Note that the PTE format
  21. * matches the VHPT short format, the firt doubleword of the VHPD long
  22. * format, and the first doubleword of the TLB insertion format.
  23. */
  24. #define _PAGE_P_BIT 0
  25. #define _PAGE_A_BIT 5
  26. #define _PAGE_D_BIT 6
  27. #define _PAGE_P (1 << _PAGE_P_BIT) /* page present bit */
  28. #define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */
  29. #define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */
  30. #define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */
  31. #define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */
  32. #define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */
  33. #define _PAGE_MA_MASK (0x7 << 2)
  34. #define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */
  35. #define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */
  36. #define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */
  37. #define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */
  38. #define _PAGE_PL_MASK (3 << 7)
  39. #define _PAGE_AR_R (0 << 9) /* read only */
  40. #define _PAGE_AR_RX (1 << 9) /* read & execute */
  41. #define _PAGE_AR_RW (2 << 9) /* read & write */
  42. #define _PAGE_AR_RWX (3 << 9) /* read, write & execute */
  43. #define _PAGE_AR_R_RW (4 << 9) /* read / read & write */
  44. #define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */
  45. #define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */
  46. #define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */
  47. #define _PAGE_AR_MASK (7 << 9)
  48. #define _PAGE_AR_SHIFT 9
  49. #define _PAGE_A (1 << _PAGE_A_BIT) /* page accessed bit */
  50. #define _PAGE_D (1 << _PAGE_D_BIT) /* page dirty bit */
  51. #define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL)
  52. #define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */
  53. #define _PAGE_PROTNONE (__IA64_UL(1) << 63)
  54. #define _PFN_MASK _PAGE_PPN_MASK
  55. /* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */
  56. #define _PAGE_CHG_MASK (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED)
  57. #define _PAGE_SIZE_4K 12
  58. #define _PAGE_SIZE_8K 13
  59. #define _PAGE_SIZE_16K 14
  60. #define _PAGE_SIZE_64K 16
  61. #define _PAGE_SIZE_256K 18
  62. #define _PAGE_SIZE_1M 20
  63. #define _PAGE_SIZE_4M 22
  64. #define _PAGE_SIZE_16M 24
  65. #define _PAGE_SIZE_64M 26
  66. #define _PAGE_SIZE_256M 28
  67. #define _PAGE_SIZE_1G 30
  68. #define _PAGE_SIZE_4G 32
  69. #define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB
  70. #define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB
  71. #define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED
  72. /*
  73. * How many pointers will a page table level hold expressed in shift
  74. */
  75. #define PTRS_PER_PTD_SHIFT (PAGE_SHIFT-3)
  76. /*
  77. * Definitions for fourth level:
  78. */
  79. #define PTRS_PER_PTE (__IA64_UL(1) << (PTRS_PER_PTD_SHIFT))
  80. /*
  81. * Definitions for third level:
  82. *
  83. * PMD_SHIFT determines the size of the area a third-level page table
  84. * can map.
  85. */
  86. #define PMD_SHIFT (PAGE_SHIFT + (PTRS_PER_PTD_SHIFT))
  87. #define PMD_SIZE (1UL << PMD_SHIFT)
  88. #define PMD_MASK (~(PMD_SIZE-1))
  89. #define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT))
  90. #if CONFIG_PGTABLE_LEVELS == 4
  91. /*
  92. * Definitions for second level:
  93. *
  94. * PUD_SHIFT determines the size of the area a second-level page table
  95. * can map.
  96. */
  97. #define PUD_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
  98. #define PUD_SIZE (1UL << PUD_SHIFT)
  99. #define PUD_MASK (~(PUD_SIZE-1))
  100. #define PTRS_PER_PUD (1UL << (PTRS_PER_PTD_SHIFT))
  101. #endif
  102. /*
  103. * Definitions for first level:
  104. *
  105. * PGDIR_SHIFT determines what a first-level page table entry can map.
  106. */
  107. #if CONFIG_PGTABLE_LEVELS == 4
  108. #define PGDIR_SHIFT (PUD_SHIFT + (PTRS_PER_PTD_SHIFT))
  109. #else
  110. #define PGDIR_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
  111. #endif
  112. #define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT)
  113. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  114. #define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT
  115. #define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT)
  116. #define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
  117. /*
  118. * All the normal masks have the "page accessed" bits on, as any time
  119. * they are used, the page is accessed. They are cleared only by the
  120. * page-out routines.
  121. */
  122. #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_A)
  123. #define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
  124. #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
  125. #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
  126. #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
  127. #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
  128. #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
  129. #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
  130. #define PAGE_KERNEL_UC __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX | \
  131. _PAGE_MA_UC)
  132. # ifndef __ASSEMBLY__
  133. #include <linux/sched/mm.h> /* for mm_struct */
  134. #include <linux/bitops.h>
  135. #include <asm/cacheflush.h>
  136. #include <asm/mmu_context.h>
  137. /*
  138. * Next come the mappings that determine how mmap() protection bits
  139. * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented. The
  140. * _P version gets used for a private shared memory segment, the _S
  141. * version gets used for a shared memory segment with MAP_SHARED on.
  142. * In a private shared memory segment, we do a copy-on-write if a task
  143. * attempts to write to the page.
  144. */
  145. /* xwr */
  146. #define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
  147. #if CONFIG_PGTABLE_LEVELS == 4
  148. #define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
  149. #endif
  150. #define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
  151. #define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
  152. /*
  153. * Some definitions to translate between mem_map, PTEs, and page addresses:
  154. */
  155. /* Quick test to see if ADDR is a (potentially) valid physical address. */
  156. static inline long
  157. ia64_phys_addr_valid (unsigned long addr)
  158. {
  159. return (addr & (local_cpu_data->unimpl_pa_mask)) == 0;
  160. }
  161. /*
  162. * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
  163. * memory. For the return value to be meaningful, ADDR must be >=
  164. * PAGE_OFFSET. This operation can be relatively expensive (e.g.,
  165. * require a hash-, or multi-level tree-lookup or something of that
  166. * sort) but it guarantees to return TRUE only if accessing the page
  167. * at that address does not cause an error. Note that there may be
  168. * addresses for which kern_addr_valid() returns FALSE even though an
  169. * access would not cause an error (e.g., this is typically true for
  170. * memory mapped I/O regions.
  171. *
  172. * XXX Need to implement this for IA-64.
  173. */
  174. #define kern_addr_valid(addr) (1)
  175. /*
  176. * Now come the defines and routines to manage and access the three-level
  177. * page table.
  178. */
  179. #define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
  180. #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
  181. /* SPARSEMEM_VMEMMAP uses half of vmalloc... */
  182. # define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 10)))
  183. # define vmemmap ((struct page *)VMALLOC_END)
  184. #else
  185. # define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
  186. #endif
  187. /* fs/proc/kcore.c */
  188. #define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE))
  189. #define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE))
  190. #define RGN_MAP_SHIFT (PGDIR_SHIFT + PTRS_PER_PGD_SHIFT - 3)
  191. #define RGN_MAP_LIMIT ((1UL << RGN_MAP_SHIFT) - PAGE_SIZE) /* per region addr limit */
  192. /*
  193. * Conversion functions: convert page frame number (pfn) and a protection value to a page
  194. * table entry (pte).
  195. */
  196. #define pfn_pte(pfn, pgprot) \
  197. ({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; })
  198. /* Extract pfn from pte. */
  199. #define pte_pfn(_pte) ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT)
  200. #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
  201. /* This takes a physical page address that is used by the remapping functions */
  202. #define mk_pte_phys(physpage, pgprot) \
  203. ({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
  204. #define pte_modify(_pte, newprot) \
  205. (__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK)))
  206. #define pte_none(pte) (!pte_val(pte))
  207. #define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
  208. #define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL)
  209. /* pte_page() returns the "struct page *" corresponding to the PTE: */
  210. #define pte_page(pte) virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET))
  211. #define pmd_none(pmd) (!pmd_val(pmd))
  212. #define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd)))
  213. #define pmd_present(pmd) (pmd_val(pmd) != 0UL)
  214. #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
  215. #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
  216. #define pmd_pfn(pmd) ((pmd_val(pmd) & _PFN_MASK) >> PAGE_SHIFT)
  217. #define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET))
  218. #define pud_none(pud) (!pud_val(pud))
  219. #define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud)))
  220. #define pud_present(pud) (pud_val(pud) != 0UL)
  221. #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
  222. #define pud_pgtable(pud) ((pmd_t *) __va(pud_val(pud) & _PFN_MASK))
  223. #define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET))
  224. #if CONFIG_PGTABLE_LEVELS == 4
  225. #define p4d_none(p4d) (!p4d_val(p4d))
  226. #define p4d_bad(p4d) (!ia64_phys_addr_valid(p4d_val(p4d)))
  227. #define p4d_present(p4d) (p4d_val(p4d) != 0UL)
  228. #define p4d_clear(p4dp) (p4d_val(*(p4dp)) = 0UL)
  229. #define p4d_pgtable(p4d) ((pud_t *) __va(p4d_val(p4d) & _PFN_MASK))
  230. #define p4d_page(p4d) virt_to_page((p4d_val(p4d) + PAGE_OFFSET))
  231. #endif
  232. /*
  233. * The following have defined behavior only work if pte_present() is true.
  234. */
  235. #define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)
  236. #define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0)
  237. #define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
  238. #define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
  239. /*
  240. * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
  241. * access rights:
  242. */
  243. #define pte_wrprotect(pte) (__pte(pte_val(pte) & ~_PAGE_AR_RW))
  244. #define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_AR_RW))
  245. #define pte_mkold(pte) (__pte(pte_val(pte) & ~_PAGE_A))
  246. #define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A))
  247. #define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))
  248. #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))
  249. #define pte_mkhuge(pte) (__pte(pte_val(pte)))
  250. /*
  251. * Because ia64's Icache and Dcache is not coherent (on a cpu), we need to
  252. * sync icache and dcache when we insert *new* executable page.
  253. * __ia64_sync_icache_dcache() check Pg_arch_1 bit and flush icache
  254. * if necessary.
  255. *
  256. * set_pte() is also called by the kernel, but we can expect that the kernel
  257. * flushes icache explicitly if necessary.
  258. */
  259. #define pte_present_exec_user(pte)\
  260. ((pte_val(pte) & (_PAGE_P | _PAGE_PL_MASK | _PAGE_AR_RX)) == \
  261. (_PAGE_P | _PAGE_PL_3 | _PAGE_AR_RX))
  262. extern void __ia64_sync_icache_dcache(pte_t pteval);
  263. static inline void set_pte(pte_t *ptep, pte_t pteval)
  264. {
  265. /* page is present && page is user && page is executable
  266. * && (page swapin or new page or page migration
  267. * || copy_on_write with page copying.)
  268. */
  269. if (pte_present_exec_user(pteval) &&
  270. (!pte_present(*ptep) ||
  271. pte_pfn(*ptep) != pte_pfn(pteval)))
  272. /* load_module() calles flush_icache_range() explicitly*/
  273. __ia64_sync_icache_dcache(pteval);
  274. *ptep = pteval;
  275. }
  276. #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
  277. /*
  278. * Make page protection values cacheable, uncacheable, or write-
  279. * combining. Note that "protection" is really a misnomer here as the
  280. * protection value contains the memory attribute bits, dirty bits, and
  281. * various other bits as well.
  282. */
  283. #define pgprot_cacheable(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WB)
  284. #define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
  285. #define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
  286. struct file;
  287. extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  288. unsigned long size, pgprot_t vma_prot);
  289. #define __HAVE_PHYS_MEM_ACCESS_PROT
  290. static inline unsigned long
  291. pgd_index (unsigned long address)
  292. {
  293. unsigned long region = address >> 61;
  294. unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
  295. return (region << (PAGE_SHIFT - 6)) | l1index;
  296. }
  297. #define pgd_index pgd_index
  298. /*
  299. * In the kernel's mapped region we know everything is in region number 5, so
  300. * as an optimisation its PGD already points to the area for that region.
  301. * However, this also means that we cannot use pgd_index() and we must
  302. * never add the region here.
  303. */
  304. #define pgd_offset_k(addr) \
  305. (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
  306. /* Look up a pgd entry in the gate area. On IA-64, the gate-area
  307. resides in the kernel-mapped segment, hence we use pgd_offset_k()
  308. here. */
  309. #define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
  310. /* atomic versions of the some PTE manipulations: */
  311. static inline int
  312. ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
  313. {
  314. #ifdef CONFIG_SMP
  315. if (!pte_young(*ptep))
  316. return 0;
  317. return test_and_clear_bit(_PAGE_A_BIT, ptep);
  318. #else
  319. pte_t pte = *ptep;
  320. if (!pte_young(pte))
  321. return 0;
  322. set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
  323. return 1;
  324. #endif
  325. }
  326. static inline pte_t
  327. ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  328. {
  329. #ifdef CONFIG_SMP
  330. return __pte(xchg((long *) ptep, 0));
  331. #else
  332. pte_t pte = *ptep;
  333. pte_clear(mm, addr, ptep);
  334. return pte;
  335. #endif
  336. }
  337. static inline void
  338. ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  339. {
  340. #ifdef CONFIG_SMP
  341. unsigned long new, old;
  342. do {
  343. old = pte_val(*ptep);
  344. new = pte_val(pte_wrprotect(__pte (old)));
  345. } while (cmpxchg((unsigned long *) ptep, old, new) != old);
  346. #else
  347. pte_t old_pte = *ptep;
  348. set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
  349. #endif
  350. }
  351. static inline int
  352. pte_same (pte_t a, pte_t b)
  353. {
  354. return pte_val(a) == pte_val(b);
  355. }
  356. #define update_mmu_cache(vma, address, ptep) do { } while (0)
  357. extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  358. extern void paging_init (void);
  359. /*
  360. * Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of
  361. * bits in the swap-type field of the swap pte. It would be nice to
  362. * enforce that, but we can't easily include <linux/swap.h> here.
  363. * (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...).
  364. *
  365. * Format of swap pte:
  366. * bit 0 : present bit (must be zero)
  367. * bits 1- 7: swap-type
  368. * bits 8-62: swap offset
  369. * bit 63 : _PAGE_PROTNONE bit
  370. */
  371. #define __swp_type(entry) (((entry).val >> 1) & 0x7f)
  372. #define __swp_offset(entry) (((entry).val << 1) >> 9)
  373. #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 1) | ((long) (offset) << 8) })
  374. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  375. #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  376. /*
  377. * ZERO_PAGE is a global shared page that is always zero: used
  378. * for zero-mapped memory areas etc..
  379. */
  380. extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
  381. extern struct page *zero_page_memmap_ptr;
  382. #define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
  383. /* We provide our own get_unmapped_area to cope with VA holes for userland */
  384. #define HAVE_ARCH_UNMAPPED_AREA
  385. #ifdef CONFIG_HUGETLB_PAGE
  386. #define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
  387. #define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
  388. #define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1))
  389. #endif
  390. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  391. /*
  392. * Update PTEP with ENTRY, which is guaranteed to be a less
  393. * restrictive PTE. That is, ENTRY may have the ACCESSED, DIRTY, and
  394. * WRITABLE bits turned on, when the value at PTEP did not. The
  395. * WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE.
  396. *
  397. * SAFELY_WRITABLE is TRUE if we can update the value at PTEP without
  398. * having to worry about races. On SMP machines, there are only two
  399. * cases where this is true:
  400. *
  401. * (1) *PTEP has the PRESENT bit turned OFF
  402. * (2) ENTRY has the DIRTY bit turned ON
  403. *
  404. * On ia64, we could implement this routine with a cmpxchg()-loop
  405. * which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY.
  406. * However, like on x86, we can get a more streamlined version by
  407. * observing that it is OK to drop ACCESSED bit updates when
  408. * SAFELY_WRITABLE is FALSE. Besides being rare, all that would do is
  409. * result in an extra Access-bit fault, which would then turn on the
  410. * ACCESSED bit in the low-level fault handler (iaccess_bit or
  411. * daccess_bit in ivt.S).
  412. */
  413. #ifdef CONFIG_SMP
  414. # define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
  415. ({ \
  416. int __changed = !pte_same(*(__ptep), __entry); \
  417. if (__changed && __safely_writable) { \
  418. set_pte(__ptep, __entry); \
  419. flush_tlb_page(__vma, __addr); \
  420. } \
  421. __changed; \
  422. })
  423. #else
  424. # define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
  425. ({ \
  426. int __changed = !pte_same(*(__ptep), __entry); \
  427. if (__changed) { \
  428. set_pte_at((__vma)->vm_mm, (__addr), __ptep, __entry); \
  429. flush_tlb_page(__vma, __addr); \
  430. } \
  431. __changed; \
  432. })
  433. #endif
  434. # endif /* !__ASSEMBLY__ */
  435. /*
  436. * Identity-mapped regions use a large page size. We'll call such large pages
  437. * "granules". If you can think of a better name that's unambiguous, let me
  438. * know...
  439. */
  440. #if defined(CONFIG_IA64_GRANULE_64MB)
  441. # define IA64_GRANULE_SHIFT _PAGE_SIZE_64M
  442. #elif defined(CONFIG_IA64_GRANULE_16MB)
  443. # define IA64_GRANULE_SHIFT _PAGE_SIZE_16M
  444. #endif
  445. #define IA64_GRANULE_SIZE (1 << IA64_GRANULE_SHIFT)
  446. /*
  447. * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL):
  448. */
  449. #define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M
  450. #define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
  451. /* These tell get_user_pages() that the first gate page is accessible from user-level. */
  452. #define FIXADDR_USER_START GATE_ADDR
  453. #ifdef HAVE_BUGGY_SEGREL
  454. # define FIXADDR_USER_END (GATE_ADDR + 2*PAGE_SIZE)
  455. #else
  456. # define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE)
  457. #endif
  458. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  459. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  460. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  461. #define __HAVE_ARCH_PTE_SAME
  462. #define __HAVE_ARCH_PGD_OFFSET_GATE
  463. #if CONFIG_PGTABLE_LEVELS == 3
  464. #include <asm-generic/pgtable-nopud.h>
  465. #endif
  466. #include <asm-generic/pgtable-nop4d.h>
  467. #endif /* _ASM_IA64_PGTABLE_H */