tlbflush.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _S390_TLBFLUSH_H
  3. #define _S390_TLBFLUSH_H
  4. #include <linux/mm.h>
  5. #include <linux/sched.h>
  6. #include <asm/processor.h>
  7. /*
  8. * Flush all TLB entries on the local CPU.
  9. */
  10. static inline void __tlb_flush_local(void)
  11. {
  12. asm volatile("ptlb" : : : "memory");
  13. }
  14. /*
  15. * Flush TLB entries for a specific ASCE on all CPUs
  16. */
  17. static inline void __tlb_flush_idte(unsigned long asce)
  18. {
  19. unsigned long opt;
  20. opt = IDTE_PTOA;
  21. if (MACHINE_HAS_TLB_GUEST)
  22. opt |= IDTE_GUEST_ASCE;
  23. /* Global TLB flush for the mm */
  24. asm volatile("idte 0,%1,%0" : : "a" (opt), "a" (asce) : "cc");
  25. }
  26. /*
  27. * Flush all TLB entries on all CPUs.
  28. */
  29. static inline void __tlb_flush_global(void)
  30. {
  31. unsigned int dummy = 0;
  32. csp(&dummy, 0, 0);
  33. }
  34. /*
  35. * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
  36. * this implicates multiple ASCEs!).
  37. */
  38. static inline void __tlb_flush_mm(struct mm_struct *mm)
  39. {
  40. unsigned long gmap_asce;
  41. /*
  42. * If the machine has IDTE we prefer to do a per mm flush
  43. * on all cpus instead of doing a local flush if the mm
  44. * only ran on the local cpu.
  45. */
  46. preempt_disable();
  47. atomic_inc(&mm->context.flush_count);
  48. /* Reset TLB flush mask */
  49. cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
  50. barrier();
  51. gmap_asce = READ_ONCE(mm->context.gmap_asce);
  52. if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
  53. if (gmap_asce)
  54. __tlb_flush_idte(gmap_asce);
  55. __tlb_flush_idte(mm->context.asce);
  56. } else {
  57. /* Global TLB flush */
  58. __tlb_flush_global();
  59. }
  60. atomic_dec(&mm->context.flush_count);
  61. preempt_enable();
  62. }
  63. static inline void __tlb_flush_kernel(void)
  64. {
  65. if (MACHINE_HAS_IDTE)
  66. __tlb_flush_idte(init_mm.context.asce);
  67. else
  68. __tlb_flush_global();
  69. }
  70. static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
  71. {
  72. spin_lock(&mm->context.lock);
  73. if (mm->context.flush_mm) {
  74. mm->context.flush_mm = 0;
  75. __tlb_flush_mm(mm);
  76. }
  77. spin_unlock(&mm->context.lock);
  78. }
  79. /*
  80. * TLB flushing:
  81. * flush_tlb() - flushes the current mm struct TLBs
  82. * flush_tlb_all() - flushes all processes TLBs
  83. * flush_tlb_mm(mm) - flushes the specified mm context TLB's
  84. * flush_tlb_page(vma, vmaddr) - flushes one page
  85. * flush_tlb_range(vma, start, end) - flushes a range of pages
  86. * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
  87. */
  88. /*
  89. * flush_tlb_mm goes together with ptep_set_wrprotect for the
  90. * copy_page_range operation and flush_tlb_range is related to
  91. * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
  92. * ptep_get_and_clear do not flush the TLBs directly if the mm has
  93. * only one user. At the end of the update the flush_tlb_mm and
  94. * flush_tlb_range functions need to do the flush.
  95. */
  96. #define flush_tlb() do { } while (0)
  97. #define flush_tlb_all() do { } while (0)
  98. #define flush_tlb_page(vma, addr) do { } while (0)
  99. static inline void flush_tlb_mm(struct mm_struct *mm)
  100. {
  101. __tlb_flush_mm_lazy(mm);
  102. }
  103. static inline void flush_tlb_range(struct vm_area_struct *vma,
  104. unsigned long start, unsigned long end)
  105. {
  106. __tlb_flush_mm_lazy(vma->vm_mm);
  107. }
  108. static inline void flush_tlb_kernel_range(unsigned long start,
  109. unsigned long end)
  110. {
  111. __tlb_flush_kernel();
  112. }
  113. #endif /* _S390_TLBFLUSH_H */