pgtable-bits-arcv2.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  4. */
  5. /*
  6. * page table flags for software walked/managed MMUv3 (ARC700) and MMUv4 (HS)
  7. * There correspond to the corresponding bits in the TLB
  8. */
  9. #ifndef _ASM_ARC_PGTABLE_BITS_ARCV2_H
  10. #define _ASM_ARC_PGTABLE_BITS_ARCV2_H
  11. #ifdef CONFIG_ARC_CACHE_PAGES
  12. #define _PAGE_CACHEABLE (1 << 0) /* Cached (H) */
  13. #else
  14. #define _PAGE_CACHEABLE 0
  15. #endif
  16. #define _PAGE_EXECUTE (1 << 1) /* User Execute (H) */
  17. #define _PAGE_WRITE (1 << 2) /* User Write (H) */
  18. #define _PAGE_READ (1 << 3) /* User Read (H) */
  19. #define _PAGE_ACCESSED (1 << 4) /* Accessed (s) */
  20. #define _PAGE_DIRTY (1 << 5) /* Modified (s) */
  21. #define _PAGE_SPECIAL (1 << 6)
  22. #define _PAGE_GLOBAL (1 << 8) /* ASID agnostic (H) */
  23. #define _PAGE_PRESENT (1 << 9) /* PTE/TLB Valid (H) */
  24. #ifdef CONFIG_ARC_MMU_V4
  25. #define _PAGE_HW_SZ (1 << 10) /* Normal/super (H) */
  26. #else
  27. #define _PAGE_HW_SZ 0
  28. #endif
  29. /* Defaults for every user page */
  30. #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
  31. /* Set of bits not changed in pte_modify */
  32. #define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
  33. _PAGE_SPECIAL)
  34. /* More Abbrevaited helpers */
  35. #define PAGE_U_NONE __pgprot(___DEF)
  36. #define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
  37. #define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
  38. #define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
  39. #define PAGE_U_X_W_R __pgprot(___DEF \
  40. | _PAGE_READ | _PAGE_WRITE | _PAGE_EXECUTE)
  41. #define PAGE_KERNEL __pgprot(___DEF | _PAGE_GLOBAL \
  42. | _PAGE_READ | _PAGE_WRITE | _PAGE_EXECUTE)
  43. #define PAGE_SHARED PAGE_U_W_R
  44. #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
  45. /*
  46. * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
  47. *
  48. * Certain cases have 1:1 mapping
  49. * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
  50. * which directly corresponds to PAGE_U_X_R
  51. *
  52. * Other rules which cause the divergence from 1:1 mapping
  53. *
  54. * 1. Although ARC700 can do exclusive execute/write protection (meaning R
  55. * can be tracked independet of X/W unlike some other CPUs), still to
  56. * keep things consistent with other archs:
  57. * -Write implies Read: W => R
  58. * -Execute implies Read: X => R
  59. *
  60. * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
  61. * This is to enable COW mechanism
  62. */
  63. /* xwr */
  64. #ifndef __ASSEMBLY__
  65. #define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
  66. #define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY)
  67. #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
  68. #define pte_special(pte) (pte_val(pte) & _PAGE_SPECIAL)
  69. #define PTE_BIT_FUNC(fn, op) \
  70. static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
  71. PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT));
  72. PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
  73. PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
  74. PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY));
  75. PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY));
  76. PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
  77. PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
  78. PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL));
  79. PTE_BIT_FUNC(mkhuge, |= (_PAGE_HW_SZ));
  80. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  81. {
  82. return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
  83. }
  84. static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
  85. pte_t *ptep, pte_t pteval)
  86. {
  87. set_pte(ptep, pteval);
  88. }
  89. void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
  90. pte_t *ptep);
  91. /* Encode swap {type,off} tuple into PTE
  92. * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
  93. * PAGE_PRESENT is zero in a PTE holding swap "identifier"
  94. */
  95. #define __swp_entry(type, off) ((swp_entry_t) \
  96. { ((type) & 0x1f) | ((off) << 13) })
  97. /* Decode a PTE containing swap "identifier "into constituents */
  98. #define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
  99. #define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13)
  100. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  101. #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  102. #define kern_addr_valid(addr) (1)
  103. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  104. #include <asm/hugepage.h>
  105. #endif
  106. #endif /* __ASSEMBLY__ */
  107. #endif