mmu_context.h 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Based on arch/arm/include/asm/mmu_context.h
  4. *
  5. * Copyright (C) 1996 Russell King.
  6. * Copyright (C) 2012 ARM Ltd.
  7. */
  8. #ifndef __ASM_MMU_CONTEXT_H
  9. #define __ASM_MMU_CONTEXT_H
  10. #ifndef __ASSEMBLY__
  11. #include <linux/compiler.h>
  12. #include <linux/sched.h>
  13. #include <linux/sched/hotplug.h>
  14. #include <linux/mm_types.h>
  15. #include <linux/pgtable.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/cpufeature.h>
  18. #include <asm/proc-fns.h>
  19. #include <asm-generic/mm_hooks.h>
  20. #include <asm/cputype.h>
  21. #include <asm/sysreg.h>
  22. #include <asm/tlbflush.h>
  23. extern bool rodata_full;
  24. static inline void contextidr_thread_switch(struct task_struct *next)
  25. {
  26. if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR))
  27. return;
  28. write_sysreg(task_pid_nr(next), contextidr_el1);
  29. isb();
  30. }
  31. /*
  32. * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0.
  33. */
  34. static inline void cpu_set_reserved_ttbr0(void)
  35. {
  36. unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
  37. write_sysreg(ttbr, ttbr0_el1);
  38. isb();
  39. }
  40. void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
  41. static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
  42. {
  43. BUG_ON(pgd == swapper_pg_dir);
  44. cpu_set_reserved_ttbr0();
  45. cpu_do_switch_mm(virt_to_phys(pgd),mm);
  46. }
  47. /*
  48. * TCR.T0SZ value to use when the ID map is active. Usually equals
  49. * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in
  50. * physical memory, in which case it will be smaller.
  51. */
  52. extern int idmap_t0sz;
  53. /*
  54. * Ensure TCR.T0SZ is set to the provided value.
  55. */
  56. static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
  57. {
  58. unsigned long tcr = read_sysreg(tcr_el1);
  59. if ((tcr & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET == t0sz)
  60. return;
  61. tcr &= ~TCR_T0SZ_MASK;
  62. tcr |= t0sz << TCR_T0SZ_OFFSET;
  63. write_sysreg(tcr, tcr_el1);
  64. isb();
  65. }
  66. #define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual))
  67. #define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz)
  68. /*
  69. * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
  70. *
  71. * The idmap lives in the same VA range as userspace, but uses global entries
  72. * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
  73. * speculative TLB fetches, we must temporarily install the reserved page
  74. * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
  75. *
  76. * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
  77. * which should not be installed in TTBR0_EL1. In this case we can leave the
  78. * reserved page tables in place.
  79. */
  80. static inline void cpu_uninstall_idmap(void)
  81. {
  82. struct mm_struct *mm = current->active_mm;
  83. cpu_set_reserved_ttbr0();
  84. local_flush_tlb_all();
  85. cpu_set_default_tcr_t0sz();
  86. if (mm != &init_mm && !system_uses_ttbr0_pan())
  87. cpu_switch_mm(mm->pgd, mm);
  88. }
  89. static inline void __cpu_install_idmap(pgd_t *idmap)
  90. {
  91. cpu_set_reserved_ttbr0();
  92. local_flush_tlb_all();
  93. cpu_set_idmap_tcr_t0sz();
  94. cpu_switch_mm(lm_alias(idmap), &init_mm);
  95. }
  96. static inline void cpu_install_idmap(void)
  97. {
  98. __cpu_install_idmap(idmap_pg_dir);
  99. }
  100. /*
  101. * Load our new page tables. A strict BBM approach requires that we ensure that
  102. * TLBs are free of any entries that may overlap with the global mappings we are
  103. * about to install.
  104. *
  105. * For a real hibernate/resume/kexec cycle TTBR0 currently points to a zero
  106. * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI runtime
  107. * services), while for a userspace-driven test_resume cycle it points to
  108. * userspace page tables (and we must point it at a zero page ourselves).
  109. *
  110. * We change T0SZ as part of installing the idmap. This is undone by
  111. * cpu_uninstall_idmap() in __cpu_suspend_exit().
  112. */
  113. static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
  114. {
  115. cpu_set_reserved_ttbr0();
  116. local_flush_tlb_all();
  117. __cpu_set_tcr_t0sz(t0sz);
  118. /* avoid cpu_switch_mm() and its SW-PAN and CNP interactions */
  119. write_sysreg(ttbr0, ttbr0_el1);
  120. isb();
  121. }
  122. /*
  123. * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
  124. * avoiding the possibility of conflicting TLB entries being allocated.
  125. */
  126. static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
  127. {
  128. typedef void (ttbr_replace_func)(phys_addr_t);
  129. extern ttbr_replace_func idmap_cpu_replace_ttbr1;
  130. ttbr_replace_func *replace_phys;
  131. /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
  132. phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
  133. if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) {
  134. /*
  135. * cpu_replace_ttbr1() is used when there's a boot CPU
  136. * up (i.e. cpufeature framework is not up yet) and
  137. * latter only when we enable CNP via cpufeature's
  138. * enable() callback.
  139. * Also we rely on the cpu_hwcap bit being set before
  140. * calling the enable() function.
  141. */
  142. ttbr1 |= TTBR_CNP_BIT;
  143. }
  144. replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
  145. __cpu_install_idmap(idmap);
  146. replace_phys(ttbr1);
  147. cpu_uninstall_idmap();
  148. }
  149. /*
  150. * It would be nice to return ASIDs back to the allocator, but unfortunately
  151. * that introduces a race with a generation rollover where we could erroneously
  152. * free an ASID allocated in a future generation. We could workaround this by
  153. * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
  154. * but we'd then need to make sure that we didn't dirty any TLBs afterwards.
  155. * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
  156. * take CPU migration into account.
  157. */
  158. void check_and_switch_context(struct mm_struct *mm);
  159. #define init_new_context(tsk, mm) init_new_context(tsk, mm)
  160. static inline int
  161. init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  162. {
  163. atomic64_set(&mm->context.id, 0);
  164. refcount_set(&mm->context.pinned, 0);
  165. return 0;
  166. }
  167. #ifdef CONFIG_ARM64_SW_TTBR0_PAN
  168. static inline void update_saved_ttbr0(struct task_struct *tsk,
  169. struct mm_struct *mm)
  170. {
  171. u64 ttbr;
  172. if (!system_uses_ttbr0_pan())
  173. return;
  174. if (mm == &init_mm)
  175. ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
  176. else
  177. ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
  178. WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
  179. }
  180. #else
  181. static inline void update_saved_ttbr0(struct task_struct *tsk,
  182. struct mm_struct *mm)
  183. {
  184. }
  185. #endif
  186. #define enter_lazy_tlb enter_lazy_tlb
  187. static inline void
  188. enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  189. {
  190. /*
  191. * We don't actually care about the ttbr0 mapping, so point it at the
  192. * zero page.
  193. */
  194. update_saved_ttbr0(tsk, &init_mm);
  195. }
  196. static inline void __switch_mm(struct mm_struct *next)
  197. {
  198. /*
  199. * init_mm.pgd does not contain any user mappings and it is always
  200. * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
  201. */
  202. if (next == &init_mm) {
  203. cpu_set_reserved_ttbr0();
  204. return;
  205. }
  206. check_and_switch_context(next);
  207. }
  208. static inline void
  209. switch_mm(struct mm_struct *prev, struct mm_struct *next,
  210. struct task_struct *tsk)
  211. {
  212. if (prev != next)
  213. __switch_mm(next);
  214. /*
  215. * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
  216. * value may have not been initialised yet (activate_mm caller) or the
  217. * ASID has changed since the last run (following the context switch
  218. * of another thread of the same process).
  219. */
  220. update_saved_ttbr0(tsk, next);
  221. }
  222. static inline const struct cpumask *
  223. task_cpu_possible_mask(struct task_struct *p)
  224. {
  225. if (!static_branch_unlikely(&arm64_mismatched_32bit_el0))
  226. return cpu_possible_mask;
  227. if (!is_compat_thread(task_thread_info(p)))
  228. return cpu_possible_mask;
  229. return system_32bit_el0_cpumask();
  230. }
  231. #define task_cpu_possible_mask task_cpu_possible_mask
  232. void verify_cpu_asid_bits(void);
  233. void post_ttbr_update_workaround(void);
  234. unsigned long arm64_mm_context_get(struct mm_struct *mm);
  235. void arm64_mm_context_put(struct mm_struct *mm);
  236. #include <asm-generic/mmu_context.h>
  237. #endif /* !__ASSEMBLY__ */
  238. #endif /* !__ASM_MMU_CONTEXT_H */