kernel-pgtable.h 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Kernel page table mapping
  4. *
  5. * Copyright (C) 2015 ARM Ltd.
  6. */
  7. #ifndef __ASM_KERNEL_PGTABLE_H
  8. #define __ASM_KERNEL_PGTABLE_H
  9. #include <asm/boot.h>
  10. #include <asm/pgtable-hwdef.h>
  11. #include <asm/sparsemem.h>
  12. /*
  13. * The linear mapping and the start of memory are both 2M aligned (per
  14. * the arm64 booting.txt requirements). Hence we can use section mapping
  15. * with 4K (section size = 2M) but not with 16K (section size = 32M) or
  16. * 64K (section size = 512M).
  17. */
  18. #ifdef CONFIG_ARM64_4K_PAGES
  19. #define ARM64_KERNEL_USES_PMD_MAPS 1
  20. #else
  21. #define ARM64_KERNEL_USES_PMD_MAPS 0
  22. #endif
  23. /*
  24. * The idmap and swapper page tables need some space reserved in the kernel
  25. * image. Both require pgd, pud (4 levels only) and pmd tables to (section)
  26. * map the kernel. With the 64K page configuration, swapper and idmap need to
  27. * map to pte level. The swapper also maps the FDT (see __create_page_tables
  28. * for more information). Note that the number of ID map translation levels
  29. * could be increased on the fly if system RAM is out of reach for the default
  30. * VA range, so pages required to map highest possible PA are reserved in all
  31. * cases.
  32. */
  33. #if ARM64_KERNEL_USES_PMD_MAPS
  34. #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1)
  35. #else
  36. #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS)
  37. #endif
  38. /*
  39. * If KASLR is enabled, then an offset K is added to the kernel address
  40. * space. The bottom 21 bits of this offset are zero to guarantee 2MB
  41. * alignment for PA and VA.
  42. *
  43. * For each pagetable level of the swapper, we know that the shift will
  44. * be larger than 21 (for the 4KB granule case we use section maps thus
  45. * the smallest shift is actually 30) thus there is the possibility that
  46. * KASLR can increase the number of pagetable entries by 1, so we make
  47. * room for this extra entry.
  48. *
  49. * Note KASLR cannot increase the number of required entries for a level
  50. * by more than one because it increments both the virtual start and end
  51. * addresses equally (the extra entry comes from the case where the end
  52. * address is just pushed over a boundary and the start address isn't).
  53. */
  54. #ifdef CONFIG_RANDOMIZE_BASE
  55. #define EARLY_KASLR (1)
  56. #else
  57. #define EARLY_KASLR (0)
  58. #endif
  59. #define EARLY_ENTRIES(vstart, vend, shift, add) \
  60. ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + add)
  61. #define EARLY_PGDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT, add))
  62. #if SWAPPER_PGTABLE_LEVELS > 3
  63. #define EARLY_PUDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT, add))
  64. #else
  65. #define EARLY_PUDS(vstart, vend, add) (0)
  66. #endif
  67. #if SWAPPER_PGTABLE_LEVELS > 2
  68. #define EARLY_PMDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT, add))
  69. #else
  70. #define EARLY_PMDS(vstart, vend, add) (0)
  71. #endif
  72. #define EARLY_PAGES(vstart, vend, add) ( 1 /* PGDIR page */ \
  73. + EARLY_PGDS((vstart), (vend), add) /* each PGDIR needs a next level page table */ \
  74. + EARLY_PUDS((vstart), (vend), add) /* each PUD needs a next level page table */ \
  75. + EARLY_PMDS((vstart), (vend), add)) /* each PMD needs a next level page table */
  76. #define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end, EARLY_KASLR))
  77. /* the initial ID map may need two extra pages if it needs to be extended */
  78. #if VA_BITS < 48
  79. #define INIT_IDMAP_DIR_SIZE ((INIT_IDMAP_DIR_PAGES + 2) * PAGE_SIZE)
  80. #else
  81. #define INIT_IDMAP_DIR_SIZE (INIT_IDMAP_DIR_PAGES * PAGE_SIZE)
  82. #endif
  83. #define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE, 1)
  84. /* Initial memory map size */
  85. #if ARM64_KERNEL_USES_PMD_MAPS
  86. #define SWAPPER_BLOCK_SHIFT PMD_SHIFT
  87. #define SWAPPER_BLOCK_SIZE PMD_SIZE
  88. #define SWAPPER_TABLE_SHIFT PUD_SHIFT
  89. #else
  90. #define SWAPPER_BLOCK_SHIFT PAGE_SHIFT
  91. #define SWAPPER_BLOCK_SIZE PAGE_SIZE
  92. #define SWAPPER_TABLE_SHIFT PMD_SHIFT
  93. #endif
  94. /*
  95. * Initial memory map attributes.
  96. */
  97. #define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
  98. #define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
  99. #if ARM64_KERNEL_USES_PMD_MAPS
  100. #define SWAPPER_RW_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
  101. #define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PMD_SECT_RDONLY)
  102. #else
  103. #define SWAPPER_RW_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
  104. #define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PTE_RDONLY)
  105. #endif
  106. /*
  107. * To make optimal use of block mappings when laying out the linear
  108. * mapping, round down the base of physical memory to a size that can
  109. * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE
  110. * (64k granule), or a multiple that can be mapped using contiguous bits
  111. * in the page tables: 32 * PMD_SIZE (16k granule)
  112. */
  113. #if defined(CONFIG_ARM64_4K_PAGES)
  114. #define ARM64_MEMSTART_SHIFT PUD_SHIFT
  115. #elif defined(CONFIG_ARM64_16K_PAGES)
  116. #define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT
  117. #else
  118. #define ARM64_MEMSTART_SHIFT PMD_SHIFT
  119. #endif
  120. /*
  121. * sparsemem vmemmap imposes an additional requirement on the alignment of
  122. * memstart_addr, due to the fact that the base of the vmemmap region
  123. * has a direct correspondence, and needs to appear sufficiently aligned
  124. * in the virtual address space.
  125. */
  126. #if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
  127. #define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS)
  128. #else
  129. #define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT)
  130. #endif
  131. #endif /* __ASM_KERNEL_PGTABLE_H */