tlbflush.h 1.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _PARISC_TLBFLUSH_H
  3. #define _PARISC_TLBFLUSH_H
  4. /* TLB flushing routines.... */
  5. #include <linux/mm.h>
  6. #include <linux/sched.h>
  7. #include <asm/mmu_context.h>
  8. extern void flush_tlb_all(void);
  9. extern void flush_tlb_all_local(void *);
  10. #define smp_flush_tlb_all() flush_tlb_all()
  11. int __flush_tlb_range(unsigned long sid,
  12. unsigned long start, unsigned long end);
  13. #define flush_tlb_range(vma, start, end) \
  14. __flush_tlb_range((vma)->vm_mm->context.space_id, start, end)
  15. #define flush_tlb_kernel_range(start, end) \
  16. __flush_tlb_range(0, start, end)
  17. /*
  18. * flush_tlb_mm()
  19. *
  20. * The code to switch to a new context is NOT valid for processes
  21. * which play with the space id's. Thus, we have to preserve the
  22. * space and just flush the entire tlb. However, the compilers,
  23. * dynamic linker, etc, do not manipulate space id's, so there
  24. * could be a significant performance benefit in switching contexts
  25. * and not flushing the whole tlb.
  26. */
  27. static inline void flush_tlb_mm(struct mm_struct *mm)
  28. {
  29. BUG_ON(mm == &init_mm); /* Should never happen */
  30. #if 1 || defined(CONFIG_SMP)
  31. /* Except for very small threads, flushing the whole TLB is
  32. * faster than using __flush_tlb_range. The pdtlb and pitlb
  33. * instructions are very slow because of the TLB broadcast.
  34. * It might be faster to do local range flushes on all CPUs
  35. * on PA 2.0 systems.
  36. */
  37. flush_tlb_all();
  38. #else
  39. /* FIXME: currently broken, causing space id and protection ids
  40. * to go out of sync, resulting in faults on userspace accesses.
  41. * This approach needs further investigation since running many
  42. * small applications (e.g., GCC testsuite) is faster on HP-UX.
  43. */
  44. if (mm) {
  45. if (mm->context != 0)
  46. free_sid(mm->context);
  47. mm->context = alloc_sid();
  48. if (mm == current->active_mm)
  49. load_context(mm->context);
  50. }
  51. #endif
  52. }
  53. static inline void flush_tlb_page(struct vm_area_struct *vma,
  54. unsigned long addr)
  55. {
  56. purge_tlb_entries(vma->vm_mm, addr);
  57. }
  58. #endif