tlbflush.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _M68K_TLBFLUSH_H
  3. #define _M68K_TLBFLUSH_H
  4. #ifdef CONFIG_MMU
  5. #ifndef CONFIG_SUN3
  6. #include <asm/current.h>
  7. #include <asm/mcfmmu.h>
  8. static inline void flush_tlb_kernel_page(void *addr)
  9. {
  10. if (CPU_IS_COLDFIRE) {
  11. mmu_write(MMUOR, MMUOR_CNL);
  12. } else if (CPU_IS_040_OR_060) {
  13. set_fc(SUPER_DATA);
  14. __asm__ __volatile__(".chip 68040\n\t"
  15. "pflush (%0)\n\t"
  16. ".chip 68k"
  17. : : "a" (addr));
  18. set_fc(USER_DATA);
  19. } else if (CPU_IS_020_OR_030)
  20. __asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
  21. }
  22. /*
  23. * flush all user-space atc entries.
  24. */
  25. static inline void __flush_tlb(void)
  26. {
  27. if (CPU_IS_COLDFIRE) {
  28. mmu_write(MMUOR, MMUOR_CNL);
  29. } else if (CPU_IS_040_OR_060) {
  30. __asm__ __volatile__(".chip 68040\n\t"
  31. "pflushan\n\t"
  32. ".chip 68k");
  33. } else if (CPU_IS_020_OR_030) {
  34. __asm__ __volatile__("pflush #0,#4");
  35. }
  36. }
  37. static inline void __flush_tlb040_one(unsigned long addr)
  38. {
  39. __asm__ __volatile__(".chip 68040\n\t"
  40. "pflush (%0)\n\t"
  41. ".chip 68k"
  42. : : "a" (addr));
  43. }
  44. static inline void __flush_tlb_one(unsigned long addr)
  45. {
  46. if (CPU_IS_COLDFIRE)
  47. mmu_write(MMUOR, MMUOR_CNL);
  48. else if (CPU_IS_040_OR_060)
  49. __flush_tlb040_one(addr);
  50. else if (CPU_IS_020_OR_030)
  51. __asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
  52. }
  53. #define flush_tlb() __flush_tlb()
  54. /*
  55. * flush all atc entries (both kernel and user-space entries).
  56. */
  57. static inline void flush_tlb_all(void)
  58. {
  59. if (CPU_IS_COLDFIRE) {
  60. mmu_write(MMUOR, MMUOR_CNL);
  61. } else if (CPU_IS_040_OR_060) {
  62. __asm__ __volatile__(".chip 68040\n\t"
  63. "pflusha\n\t"
  64. ".chip 68k");
  65. } else if (CPU_IS_020_OR_030) {
  66. __asm__ __volatile__("pflusha");
  67. }
  68. }
  69. static inline void flush_tlb_mm(struct mm_struct *mm)
  70. {
  71. if (mm == current->active_mm)
  72. __flush_tlb();
  73. }
  74. static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
  75. {
  76. if (vma->vm_mm == current->active_mm)
  77. __flush_tlb_one(addr);
  78. }
  79. static inline void flush_tlb_range(struct vm_area_struct *vma,
  80. unsigned long start, unsigned long end)
  81. {
  82. if (vma->vm_mm == current->active_mm)
  83. __flush_tlb();
  84. }
  85. static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  86. {
  87. flush_tlb_all();
  88. }
  89. #else
  90. /* Reserved PMEGs. */
  91. extern char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
  92. extern unsigned long pmeg_vaddr[SUN3_PMEGS_NUM];
  93. extern unsigned char pmeg_alloc[SUN3_PMEGS_NUM];
  94. extern unsigned char pmeg_ctx[SUN3_PMEGS_NUM];
  95. /* Flush all userspace mappings one by one... (why no flush command,
  96. sun?) */
  97. static inline void flush_tlb_all(void)
  98. {
  99. unsigned long addr;
  100. unsigned char ctx, oldctx;
  101. oldctx = sun3_get_context();
  102. for(addr = 0x00000000; addr < TASK_SIZE; addr += SUN3_PMEG_SIZE) {
  103. for(ctx = 0; ctx < 8; ctx++) {
  104. sun3_put_context(ctx);
  105. sun3_put_segmap(addr, SUN3_INVALID_PMEG);
  106. }
  107. }
  108. sun3_put_context(oldctx);
  109. /* erase all of the userspace pmeg maps, we've clobbered them
  110. all anyway */
  111. for(addr = 0; addr < SUN3_INVALID_PMEG; addr++) {
  112. if(pmeg_alloc[addr] == 1) {
  113. pmeg_alloc[addr] = 0;
  114. pmeg_ctx[addr] = 0;
  115. pmeg_vaddr[addr] = 0;
  116. }
  117. }
  118. }
  119. /* Clear user TLB entries within the context named in mm */
  120. static inline void flush_tlb_mm (struct mm_struct *mm)
  121. {
  122. unsigned char oldctx;
  123. unsigned char seg;
  124. unsigned long i;
  125. oldctx = sun3_get_context();
  126. sun3_put_context(mm->context);
  127. for(i = 0; i < TASK_SIZE; i += SUN3_PMEG_SIZE) {
  128. seg = sun3_get_segmap(i);
  129. if(seg == SUN3_INVALID_PMEG)
  130. continue;
  131. sun3_put_segmap(i, SUN3_INVALID_PMEG);
  132. pmeg_alloc[seg] = 0;
  133. pmeg_ctx[seg] = 0;
  134. pmeg_vaddr[seg] = 0;
  135. }
  136. sun3_put_context(oldctx);
  137. }
  138. /* Flush a single TLB page. In this case, we're limited to flushing a
  139. single PMEG */
  140. static inline void flush_tlb_page (struct vm_area_struct *vma,
  141. unsigned long addr)
  142. {
  143. unsigned char oldctx;
  144. unsigned char i;
  145. oldctx = sun3_get_context();
  146. sun3_put_context(vma->vm_mm->context);
  147. addr &= ~SUN3_PMEG_MASK;
  148. if((i = sun3_get_segmap(addr)) != SUN3_INVALID_PMEG)
  149. {
  150. pmeg_alloc[i] = 0;
  151. pmeg_ctx[i] = 0;
  152. pmeg_vaddr[i] = 0;
  153. sun3_put_segmap (addr, SUN3_INVALID_PMEG);
  154. }
  155. sun3_put_context(oldctx);
  156. }
  157. /* Flush a range of pages from TLB. */
  158. static inline void flush_tlb_range (struct vm_area_struct *vma,
  159. unsigned long start, unsigned long end)
  160. {
  161. struct mm_struct *mm = vma->vm_mm;
  162. unsigned char seg, oldctx;
  163. start &= ~SUN3_PMEG_MASK;
  164. oldctx = sun3_get_context();
  165. sun3_put_context(mm->context);
  166. while(start < end)
  167. {
  168. if((seg = sun3_get_segmap(start)) == SUN3_INVALID_PMEG)
  169. goto next;
  170. if(pmeg_ctx[seg] == mm->context) {
  171. pmeg_alloc[seg] = 0;
  172. pmeg_ctx[seg] = 0;
  173. pmeg_vaddr[seg] = 0;
  174. }
  175. sun3_put_segmap(start, SUN3_INVALID_PMEG);
  176. next:
  177. start += SUN3_PMEG_SIZE;
  178. }
  179. }
  180. static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  181. {
  182. flush_tlb_all();
  183. }
  184. /* Flush kernel page from TLB. */
  185. static inline void flush_tlb_kernel_page (unsigned long addr)
  186. {
  187. sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
  188. }
  189. #endif
  190. #else /* !CONFIG_MMU */
  191. /*
  192. * flush all user-space atc entries.
  193. */
  194. static inline void __flush_tlb(void)
  195. {
  196. BUG();
  197. }
  198. static inline void __flush_tlb_one(unsigned long addr)
  199. {
  200. BUG();
  201. }
  202. #define flush_tlb() __flush_tlb()
  203. /*
  204. * flush all atc entries (both kernel and user-space entries).
  205. */
  206. static inline void flush_tlb_all(void)
  207. {
  208. BUG();
  209. }
  210. static inline void flush_tlb_mm(struct mm_struct *mm)
  211. {
  212. BUG();
  213. }
  214. static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
  215. {
  216. BUG();
  217. }
  218. static inline void flush_tlb_range(struct vm_area_struct *vma,
  219. unsigned long start, unsigned long end)
  220. {
  221. BUG();
  222. }
  223. static inline void flush_tlb_kernel_page(unsigned long addr)
  224. {
  225. BUG();
  226. }
  227. #endif /* CONFIG_MMU */
  228. #endif /* _M68K_TLBFLUSH_H */