tlb.c 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3. #include <linux/init.h>
  4. #include <linux/mm.h>
  5. #include <linux/module.h>
  6. #include <linux/sched.h>
  7. #include <asm/mmu_context.h>
  8. #include <asm/setup.h>
  9. /*
  10. * One C-SKY MMU TLB entry contain two PFN/page entry, ie:
  11. * 1VPN -> 2PFN
  12. */
  13. #define TLB_ENTRY_SIZE (PAGE_SIZE * 2)
  14. #define TLB_ENTRY_SIZE_MASK (PAGE_MASK << 1)
  15. void flush_tlb_all(void)
  16. {
  17. tlb_invalid_all();
  18. }
  19. void flush_tlb_mm(struct mm_struct *mm)
  20. {
  21. #ifdef CONFIG_CPU_HAS_TLBI
  22. sync_is();
  23. asm volatile(
  24. "tlbi.asids %0 \n"
  25. "sync.i \n"
  26. :
  27. : "r" (cpu_asid(mm))
  28. : "memory");
  29. #else
  30. tlb_invalid_all();
  31. #endif
  32. }
  33. /*
  34. * MMU operation regs only could invalid tlb entry in jtlb and we
  35. * need change asid field to invalid I-utlb & D-utlb.
  36. */
  37. #ifndef CONFIG_CPU_HAS_TLBI
  38. #define restore_asid_inv_utlb(oldpid, newpid) \
  39. do { \
  40. if (oldpid == newpid) \
  41. write_mmu_entryhi(oldpid + 1); \
  42. write_mmu_entryhi(oldpid); \
  43. } while (0)
  44. #endif
  45. void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  46. unsigned long end)
  47. {
  48. unsigned long newpid = cpu_asid(vma->vm_mm);
  49. start &= TLB_ENTRY_SIZE_MASK;
  50. end += TLB_ENTRY_SIZE - 1;
  51. end &= TLB_ENTRY_SIZE_MASK;
  52. #ifdef CONFIG_CPU_HAS_TLBI
  53. sync_is();
  54. while (start < end) {
  55. asm volatile(
  56. "tlbi.vas %0 \n"
  57. :
  58. : "r" (start | newpid)
  59. : "memory");
  60. start += 2*PAGE_SIZE;
  61. }
  62. asm volatile("sync.i\n");
  63. #else
  64. {
  65. unsigned long flags, oldpid;
  66. local_irq_save(flags);
  67. oldpid = read_mmu_entryhi() & ASID_MASK;
  68. while (start < end) {
  69. int idx;
  70. write_mmu_entryhi(start | newpid);
  71. start += 2*PAGE_SIZE;
  72. tlb_probe();
  73. idx = read_mmu_index();
  74. if (idx >= 0)
  75. tlb_invalid_indexed();
  76. }
  77. restore_asid_inv_utlb(oldpid, newpid);
  78. local_irq_restore(flags);
  79. }
  80. #endif
  81. }
  82. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  83. {
  84. start &= TLB_ENTRY_SIZE_MASK;
  85. end += TLB_ENTRY_SIZE - 1;
  86. end &= TLB_ENTRY_SIZE_MASK;
  87. #ifdef CONFIG_CPU_HAS_TLBI
  88. sync_is();
  89. while (start < end) {
  90. asm volatile(
  91. "tlbi.vaas %0 \n"
  92. :
  93. : "r" (start)
  94. : "memory");
  95. start += 2*PAGE_SIZE;
  96. }
  97. asm volatile("sync.i\n");
  98. #else
  99. {
  100. unsigned long flags, oldpid;
  101. local_irq_save(flags);
  102. oldpid = read_mmu_entryhi() & ASID_MASK;
  103. while (start < end) {
  104. int idx;
  105. write_mmu_entryhi(start | oldpid);
  106. start += 2*PAGE_SIZE;
  107. tlb_probe();
  108. idx = read_mmu_index();
  109. if (idx >= 0)
  110. tlb_invalid_indexed();
  111. }
  112. restore_asid_inv_utlb(oldpid, oldpid);
  113. local_irq_restore(flags);
  114. }
  115. #endif
  116. }
  117. void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
  118. {
  119. int newpid = cpu_asid(vma->vm_mm);
  120. addr &= TLB_ENTRY_SIZE_MASK;
  121. #ifdef CONFIG_CPU_HAS_TLBI
  122. sync_is();
  123. asm volatile(
  124. "tlbi.vas %0 \n"
  125. "sync.i \n"
  126. :
  127. : "r" (addr | newpid)
  128. : "memory");
  129. #else
  130. {
  131. int oldpid, idx;
  132. unsigned long flags;
  133. local_irq_save(flags);
  134. oldpid = read_mmu_entryhi() & ASID_MASK;
  135. write_mmu_entryhi(addr | newpid);
  136. tlb_probe();
  137. idx = read_mmu_index();
  138. if (idx >= 0)
  139. tlb_invalid_indexed();
  140. restore_asid_inv_utlb(oldpid, newpid);
  141. local_irq_restore(flags);
  142. }
  143. #endif
  144. }
  145. void flush_tlb_one(unsigned long addr)
  146. {
  147. addr &= TLB_ENTRY_SIZE_MASK;
  148. #ifdef CONFIG_CPU_HAS_TLBI
  149. sync_is();
  150. asm volatile(
  151. "tlbi.vaas %0 \n"
  152. "sync.i \n"
  153. :
  154. : "r" (addr)
  155. : "memory");
  156. #else
  157. {
  158. int oldpid, idx;
  159. unsigned long flags;
  160. local_irq_save(flags);
  161. oldpid = read_mmu_entryhi() & ASID_MASK;
  162. write_mmu_entryhi(addr | oldpid);
  163. tlb_probe();
  164. idx = read_mmu_index();
  165. if (idx >= 0)
  166. tlb_invalid_indexed();
  167. restore_asid_inv_utlb(oldpid, oldpid);
  168. local_irq_restore(flags);
  169. }
  170. #endif
  171. }
  172. EXPORT_SYMBOL(flush_tlb_one);