mmu_context.h 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __ASM_POWERPC_MMU_CONTEXT_H
  3. #define __ASM_POWERPC_MMU_CONTEXT_H
  4. #ifdef __KERNEL__
  5. #include <linux/kernel.h>
  6. #include <linux/mm.h>
  7. #include <linux/sched.h>
  8. #include <linux/spinlock.h>
  9. #include <asm/mmu.h>
  10. #include <asm/cputable.h>
  11. #include <asm/cputhreads.h>
  12. /*
  13. * Most if the context management is out of line
  14. */
  15. #define init_new_context init_new_context
  16. extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
  17. #define destroy_context destroy_context
  18. extern void destroy_context(struct mm_struct *mm);
  19. #ifdef CONFIG_SPAPR_TCE_IOMMU
  20. struct mm_iommu_table_group_mem_t;
  21. extern bool mm_iommu_preregistered(struct mm_struct *mm);
  22. extern long mm_iommu_new(struct mm_struct *mm,
  23. unsigned long ua, unsigned long entries,
  24. struct mm_iommu_table_group_mem_t **pmem);
  25. extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
  26. unsigned long entries, unsigned long dev_hpa,
  27. struct mm_iommu_table_group_mem_t **pmem);
  28. extern long mm_iommu_put(struct mm_struct *mm,
  29. struct mm_iommu_table_group_mem_t *mem);
  30. extern void mm_iommu_init(struct mm_struct *mm);
  31. extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
  32. unsigned long ua, unsigned long size);
  33. extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
  34. unsigned long ua, unsigned long entries);
  35. extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
  36. unsigned long ua, unsigned int pageshift, unsigned long *hpa);
  37. extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
  38. unsigned int pageshift, unsigned long *size);
  39. extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
  40. extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
  41. #else
  42. static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
  43. unsigned int pageshift, unsigned long *size)
  44. {
  45. return false;
  46. }
  47. static inline void mm_iommu_init(struct mm_struct *mm) { }
  48. #endif
  49. extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
  50. #ifdef CONFIG_PPC_BOOK3S_64
  51. extern void radix__switch_mmu_context(struct mm_struct *prev,
  52. struct mm_struct *next);
  53. static inline void switch_mmu_context(struct mm_struct *prev,
  54. struct mm_struct *next,
  55. struct task_struct *tsk)
  56. {
  57. if (radix_enabled())
  58. return radix__switch_mmu_context(prev, next);
  59. return switch_slb(tsk, next);
  60. }
  61. extern int hash__alloc_context_id(void);
  62. void __init hash__reserve_context_id(int id);
  63. extern void __destroy_context(int context_id);
  64. static inline void mmu_context_init(void) { }
  65. #ifdef CONFIG_PPC_64S_HASH_MMU
  66. static inline int alloc_extended_context(struct mm_struct *mm,
  67. unsigned long ea)
  68. {
  69. int context_id;
  70. int index = ea >> MAX_EA_BITS_PER_CONTEXT;
  71. context_id = hash__alloc_context_id();
  72. if (context_id < 0)
  73. return context_id;
  74. VM_WARN_ON(mm->context.extended_id[index]);
  75. mm->context.extended_id[index] = context_id;
  76. return context_id;
  77. }
  78. static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
  79. {
  80. int context_id;
  81. context_id = get_user_context(&mm->context, ea);
  82. if (!context_id)
  83. return true;
  84. return false;
  85. }
  86. #endif
  87. #else
  88. extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
  89. struct task_struct *tsk);
  90. extern unsigned long __init_new_context(void);
  91. extern void __destroy_context(unsigned long context_id);
  92. extern void mmu_context_init(void);
  93. static inline int alloc_extended_context(struct mm_struct *mm,
  94. unsigned long ea)
  95. {
  96. /* non book3s_64 should never find this called */
  97. WARN_ON(1);
  98. return -ENOMEM;
  99. }
  100. static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
  101. {
  102. return false;
  103. }
  104. #endif
  105. extern int use_cop(unsigned long acop, struct mm_struct *mm);
  106. extern void drop_cop(unsigned long acop, struct mm_struct *mm);
  107. #ifdef CONFIG_PPC_BOOK3S_64
  108. static inline void inc_mm_active_cpus(struct mm_struct *mm)
  109. {
  110. atomic_inc(&mm->context.active_cpus);
  111. }
  112. static inline void dec_mm_active_cpus(struct mm_struct *mm)
  113. {
  114. atomic_dec(&mm->context.active_cpus);
  115. }
  116. static inline void mm_context_add_copro(struct mm_struct *mm)
  117. {
  118. /*
  119. * If any copro is in use, increment the active CPU count
  120. * in order to force TLB invalidations to be global as to
  121. * propagate to the Nest MMU.
  122. */
  123. if (atomic_inc_return(&mm->context.copros) == 1)
  124. inc_mm_active_cpus(mm);
  125. }
  126. static inline void mm_context_remove_copro(struct mm_struct *mm)
  127. {
  128. int c;
  129. /*
  130. * When removing the last copro, we need to broadcast a global
  131. * flush of the full mm, as the next TLBI may be local and the
  132. * nMMU and/or PSL need to be cleaned up.
  133. *
  134. * Both the 'copros' and 'active_cpus' counts are looked at in
  135. * flush_all_mm() to determine the scope (local/global) of the
  136. * TLBIs, so we need to flush first before decrementing
  137. * 'copros'. If this API is used by several callers for the
  138. * same context, it can lead to over-flushing. It's hopefully
  139. * not common enough to be a problem.
  140. *
  141. * Skip on hash, as we don't know how to do the proper flush
  142. * for the time being. Invalidations will remain global if
  143. * used on hash. Note that we can't drop 'copros' either, as
  144. * it could make some invalidations local with no flush
  145. * in-between.
  146. */
  147. if (radix_enabled()) {
  148. flush_all_mm(mm);
  149. c = atomic_dec_if_positive(&mm->context.copros);
  150. /* Detect imbalance between add and remove */
  151. WARN_ON(c < 0);
  152. if (c == 0)
  153. dec_mm_active_cpus(mm);
  154. }
  155. }
  156. /*
  157. * vas_windows counter shows number of open windows in the mm
  158. * context. During context switch, use this counter to clear the
  159. * foreign real address mapping (CP_ABORT) for the thread / process
  160. * that intend to use COPY/PASTE. When a process closes all windows,
  161. * disable CP_ABORT which is expensive to run.
  162. *
  163. * For user context, register a copro so that TLBIs are seen by the
  164. * nest MMU. mm_context_add/remove_vas_window() are used only for user
  165. * space windows.
  166. */
  167. static inline void mm_context_add_vas_window(struct mm_struct *mm)
  168. {
  169. atomic_inc(&mm->context.vas_windows);
  170. mm_context_add_copro(mm);
  171. }
  172. static inline void mm_context_remove_vas_window(struct mm_struct *mm)
  173. {
  174. int v;
  175. mm_context_remove_copro(mm);
  176. v = atomic_dec_if_positive(&mm->context.vas_windows);
  177. /* Detect imbalance between add and remove */
  178. WARN_ON(v < 0);
  179. }
  180. #else
  181. static inline void inc_mm_active_cpus(struct mm_struct *mm) { }
  182. static inline void dec_mm_active_cpus(struct mm_struct *mm) { }
  183. static inline void mm_context_add_copro(struct mm_struct *mm) { }
  184. static inline void mm_context_remove_copro(struct mm_struct *mm) { }
  185. #endif
  186. #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
  187. void do_h_rpt_invalidate_prt(unsigned long pid, unsigned long lpid,
  188. unsigned long type, unsigned long pg_sizes,
  189. unsigned long start, unsigned long end);
  190. #else
  191. static inline void do_h_rpt_invalidate_prt(unsigned long pid,
  192. unsigned long lpid,
  193. unsigned long type,
  194. unsigned long pg_sizes,
  195. unsigned long start,
  196. unsigned long end) { }
  197. #endif
  198. extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
  199. struct task_struct *tsk);
  200. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  201. struct task_struct *tsk)
  202. {
  203. unsigned long flags;
  204. local_irq_save(flags);
  205. switch_mm_irqs_off(prev, next, tsk);
  206. local_irq_restore(flags);
  207. }
  208. #define switch_mm_irqs_off switch_mm_irqs_off
  209. /*
  210. * After we have set current->mm to a new value, this activates
  211. * the context for the new mm so we see the new mappings.
  212. */
  213. #define activate_mm activate_mm
  214. static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
  215. {
  216. switch_mm_irqs_off(prev, next, current);
  217. }
  218. /* We don't currently use enter_lazy_tlb() for anything */
  219. #ifdef CONFIG_PPC_BOOK3E_64
  220. #define enter_lazy_tlb enter_lazy_tlb
  221. static inline void enter_lazy_tlb(struct mm_struct *mm,
  222. struct task_struct *tsk)
  223. {
  224. /* 64-bit Book3E keeps track of current PGD in the PACA */
  225. get_paca()->pgd = NULL;
  226. }
  227. #endif
  228. extern void arch_exit_mmap(struct mm_struct *mm);
  229. static inline void arch_unmap(struct mm_struct *mm,
  230. unsigned long start, unsigned long end)
  231. {
  232. unsigned long vdso_base = (unsigned long)mm->context.vdso;
  233. if (start <= vdso_base && vdso_base < end)
  234. mm->context.vdso = NULL;
  235. }
  236. #ifdef CONFIG_PPC_MEM_KEYS
  237. bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
  238. bool execute, bool foreign);
  239. void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm);
  240. #else /* CONFIG_PPC_MEM_KEYS */
  241. static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
  242. bool write, bool execute, bool foreign)
  243. {
  244. /* by default, allow everything */
  245. return true;
  246. }
  247. #define pkey_mm_init(mm)
  248. #define arch_dup_pkeys(oldmm, mm)
  249. static inline u64 pte_to_hpte_pkey_bits(u64 pteflags, unsigned long flags)
  250. {
  251. return 0x0UL;
  252. }
  253. #endif /* CONFIG_PPC_MEM_KEYS */
  254. static inline int arch_dup_mmap(struct mm_struct *oldmm,
  255. struct mm_struct *mm)
  256. {
  257. arch_dup_pkeys(oldmm, mm);
  258. return 0;
  259. }
  260. #include <asm-generic/mmu_context.h>
  261. #endif /* __KERNEL__ */
  262. #endif /* __ASM_POWERPC_MMU_CONTEXT_H */