mmu_context.h 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __ALPHA_MMU_CONTEXT_H
  3. #define __ALPHA_MMU_CONTEXT_H
  4. /*
  5. * get a new mmu context..
  6. *
  7. * Copyright (C) 1996, Linus Torvalds
  8. */
  9. #include <linux/mm_types.h>
  10. #include <linux/sched.h>
  11. #include <asm/machvec.h>
  12. #include <asm/compiler.h>
  13. #include <asm-generic/mm_hooks.h>
  14. /*
  15. * Force a context reload. This is needed when we change the page
  16. * table pointer or when we update the ASN of the current process.
  17. */
  18. /* Don't get into trouble with dueling __EXTERN_INLINEs. */
  19. #ifndef __EXTERN_INLINE
  20. #include <asm/io.h>
  21. #endif
  22. static inline unsigned long
  23. __reload_thread(struct pcb_struct *pcb)
  24. {
  25. register unsigned long a0 __asm__("$16");
  26. register unsigned long v0 __asm__("$0");
  27. a0 = virt_to_phys(pcb);
  28. __asm__ __volatile__(
  29. "call_pal %2 #__reload_thread"
  30. : "=r"(v0), "=r"(a0)
  31. : "i"(PAL_swpctx), "r"(a0)
  32. : "$1", "$22", "$23", "$24", "$25");
  33. return v0;
  34. }
  35. /*
  36. * The maximum ASN's the processor supports. On the EV4 this is 63
  37. * but the PAL-code doesn't actually use this information. On the
  38. * EV5 this is 127, and EV6 has 255.
  39. *
  40. * On the EV4, the ASNs are more-or-less useless anyway, as they are
  41. * only used as an icache tag, not for TB entries. On the EV5 and EV6,
  42. * ASN's also validate the TB entries, and thus make a lot more sense.
  43. *
  44. * The EV4 ASN's don't even match the architecture manual, ugh. And
  45. * I quote: "If a processor implements address space numbers (ASNs),
  46. * and the old PTE has the Address Space Match (ASM) bit clear (ASNs
  47. * in use) and the Valid bit set, then entries can also effectively be
  48. * made coherent by assigning a new, unused ASN to the currently
  49. * running process and not reusing the previous ASN before calling the
  50. * appropriate PALcode routine to invalidate the translation buffer (TB)".
  51. *
  52. * In short, the EV4 has a "kind of" ASN capability, but it doesn't actually
  53. * work correctly and can thus not be used (explaining the lack of PAL-code
  54. * support).
  55. */
  56. #define EV4_MAX_ASN 63
  57. #define EV5_MAX_ASN 127
  58. #define EV6_MAX_ASN 255
  59. #ifdef CONFIG_ALPHA_GENERIC
  60. # define MAX_ASN (alpha_mv.max_asn)
  61. #else
  62. # ifdef CONFIG_ALPHA_EV4
  63. # define MAX_ASN EV4_MAX_ASN
  64. # elif defined(CONFIG_ALPHA_EV5)
  65. # define MAX_ASN EV5_MAX_ASN
  66. # else
  67. # define MAX_ASN EV6_MAX_ASN
  68. # endif
  69. #endif
  70. /*
  71. * cpu_last_asn(processor):
  72. * 63 0
  73. * +-------------+----------------+--------------+
  74. * | asn version | this processor | hardware asn |
  75. * +-------------+----------------+--------------+
  76. */
  77. #include <asm/smp.h>
  78. #ifdef CONFIG_SMP
  79. #define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn)
  80. #else
  81. extern unsigned long last_asn;
  82. #define cpu_last_asn(cpuid) last_asn
  83. #endif /* CONFIG_SMP */
  84. #define WIDTH_HARDWARE_ASN 8
  85. #define ASN_FIRST_VERSION (1UL << WIDTH_HARDWARE_ASN)
  86. #define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1)
  87. /*
  88. * NOTE! The way this is set up, the high bits of the "asn_cache" (and
  89. * the "mm->context") are the ASN _version_ code. A version of 0 is
  90. * always considered invalid, so to invalidate another process you only
  91. * need to do "p->mm->context = 0".
  92. *
  93. * If we need more ASN's than the processor has, we invalidate the old
  94. * user TLB's (tbiap()) and start a new ASN version. That will automatically
  95. * force a new asn for any other processes the next time they want to
  96. * run.
  97. */
  98. #ifndef __EXTERN_INLINE
  99. #define __EXTERN_INLINE extern inline
  100. #define __MMU_EXTERN_INLINE
  101. #endif
  102. extern inline unsigned long
  103. __get_new_mm_context(struct mm_struct *mm, long cpu)
  104. {
  105. unsigned long asn = cpu_last_asn(cpu);
  106. unsigned long next = asn + 1;
  107. if ((asn & HARDWARE_ASN_MASK) >= MAX_ASN) {
  108. tbiap();
  109. imb();
  110. next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION;
  111. }
  112. cpu_last_asn(cpu) = next;
  113. return next;
  114. }
  115. __EXTERN_INLINE void
  116. ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
  117. struct task_struct *next)
  118. {
  119. /* Check if our ASN is of an older version, and thus invalid. */
  120. unsigned long asn;
  121. unsigned long mmc;
  122. long cpu = smp_processor_id();
  123. #ifdef CONFIG_SMP
  124. cpu_data[cpu].asn_lock = 1;
  125. barrier();
  126. #endif
  127. asn = cpu_last_asn(cpu);
  128. mmc = next_mm->context[cpu];
  129. if ((mmc ^ asn) & ~HARDWARE_ASN_MASK) {
  130. mmc = __get_new_mm_context(next_mm, cpu);
  131. next_mm->context[cpu] = mmc;
  132. }
  133. #ifdef CONFIG_SMP
  134. else
  135. cpu_data[cpu].need_new_asn = 1;
  136. #endif
  137. /* Always update the PCB ASN. Another thread may have allocated
  138. a new mm->context (via flush_tlb_mm) without the ASN serial
  139. number wrapping. We have no way to detect when this is needed. */
  140. task_thread_info(next)->pcb.asn = mmc & HARDWARE_ASN_MASK;
  141. }
  142. __EXTERN_INLINE void
  143. ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
  144. struct task_struct *next)
  145. {
  146. /* As described, ASN's are broken for TLB usage. But we can
  147. optimize for switching between threads -- if the mm is
  148. unchanged from current we needn't flush. */
  149. /* ??? May not be needed because EV4 PALcode recognizes that
  150. ASN's are broken and does a tbiap itself on swpctx, under
  151. the "Must set ASN or flush" rule. At least this is true
  152. for a 1992 SRM, reports Joseph Martin ([email protected]).
  153. I'm going to leave this here anyway, just to Be Sure. -- r~ */
  154. if (prev_mm != next_mm)
  155. tbiap();
  156. /* Do continue to allocate ASNs, because we can still use them
  157. to avoid flushing the icache. */
  158. ev5_switch_mm(prev_mm, next_mm, next);
  159. }
  160. extern void __load_new_mm_context(struct mm_struct *);
  161. #ifdef CONFIG_SMP
  162. #define check_mmu_context() \
  163. do { \
  164. int cpu = smp_processor_id(); \
  165. cpu_data[cpu].asn_lock = 0; \
  166. barrier(); \
  167. if (cpu_data[cpu].need_new_asn) { \
  168. struct mm_struct * mm = current->active_mm; \
  169. cpu_data[cpu].need_new_asn = 0; \
  170. if (!mm->context[cpu]) \
  171. __load_new_mm_context(mm); \
  172. } \
  173. } while(0)
  174. #else
  175. #define check_mmu_context() do { } while(0)
  176. #endif
  177. __EXTERN_INLINE void
  178. ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
  179. {
  180. __load_new_mm_context(next_mm);
  181. }
  182. __EXTERN_INLINE void
  183. ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
  184. {
  185. __load_new_mm_context(next_mm);
  186. tbiap();
  187. }
  188. #ifdef CONFIG_ALPHA_GENERIC
  189. # define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c))
  190. # define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y))
  191. #else
  192. # ifdef CONFIG_ALPHA_EV4
  193. # define switch_mm(a,b,c) ev4_switch_mm((a),(b),(c))
  194. # define activate_mm(x,y) ev4_activate_mm((x),(y))
  195. # else
  196. # define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c))
  197. # define activate_mm(x,y) ev5_activate_mm((x),(y))
  198. # endif
  199. #endif
  200. #define init_new_context init_new_context
  201. static inline int
  202. init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  203. {
  204. int i;
  205. for_each_online_cpu(i)
  206. mm->context[i] = 0;
  207. if (tsk != current)
  208. task_thread_info(tsk)->pcb.ptbr
  209. = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
  210. return 0;
  211. }
  212. #define enter_lazy_tlb enter_lazy_tlb
  213. static inline void
  214. enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  215. {
  216. task_thread_info(tsk)->pcb.ptbr
  217. = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
  218. }
  219. #include <asm-generic/mmu_context.h>
  220. #ifdef __MMU_EXTERN_INLINE
  221. #undef __EXTERN_INLINE
  222. #undef __MMU_EXTERN_INLINE
  223. #endif
  224. #endif /* __ALPHA_MMU_CONTEXT_H */