mmu_context.h 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __PARISC_MMU_CONTEXT_H
  3. #define __PARISC_MMU_CONTEXT_H
  4. #include <linux/mm.h>
  5. #include <linux/sched.h>
  6. #include <linux/atomic.h>
  7. #include <linux/spinlock.h>
  8. #include <asm-generic/mm_hooks.h>
  9. /* on PA-RISC, we actually have enough contexts to justify an allocator
  10. * for them. prumpf */
  11. extern unsigned long alloc_sid(void);
  12. extern void free_sid(unsigned long);
  13. #define init_new_context init_new_context
  14. static inline int
  15. init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  16. {
  17. BUG_ON(atomic_read(&mm->mm_users) != 1);
  18. mm->context.space_id = alloc_sid();
  19. return 0;
  20. }
  21. #define destroy_context destroy_context
  22. static inline void
  23. destroy_context(struct mm_struct *mm)
  24. {
  25. free_sid(mm->context.space_id);
  26. mm->context.space_id = 0;
  27. }
  28. static inline unsigned long __space_to_prot(mm_context_t context)
  29. {
  30. #if SPACEID_SHIFT == 0
  31. return context.space_id << 1;
  32. #else
  33. return context.space_id >> (SPACEID_SHIFT - 1);
  34. #endif
  35. }
  36. static inline void load_context(mm_context_t context)
  37. {
  38. mtsp(context.space_id, SR_USER);
  39. mtctl(__space_to_prot(context), 8);
  40. }
  41. static inline void switch_mm_irqs_off(struct mm_struct *prev,
  42. struct mm_struct *next, struct task_struct *tsk)
  43. {
  44. if (prev != next) {
  45. #ifdef CONFIG_TLB_PTLOCK
  46. /* put physical address of page_table_lock in cr28 (tr4)
  47. for TLB faults */
  48. spinlock_t *pgd_lock = &next->page_table_lock;
  49. mtctl(__pa(__ldcw_align(&pgd_lock->rlock.raw_lock)), 28);
  50. #endif
  51. mtctl(__pa(next->pgd), 25);
  52. load_context(next->context);
  53. }
  54. }
  55. static inline void switch_mm(struct mm_struct *prev,
  56. struct mm_struct *next, struct task_struct *tsk)
  57. {
  58. unsigned long flags;
  59. if (prev == next)
  60. return;
  61. local_irq_save(flags);
  62. switch_mm_irqs_off(prev, next, tsk);
  63. local_irq_restore(flags);
  64. }
  65. #define switch_mm_irqs_off switch_mm_irqs_off
  66. #define activate_mm activate_mm
  67. static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
  68. {
  69. /*
  70. * Activate_mm is our one chance to allocate a space id
  71. * for a new mm created in the exec path. There's also
  72. * some lazy tlb stuff, which is currently dead code, but
  73. * we only allocate a space id if one hasn't been allocated
  74. * already, so we should be OK.
  75. */
  76. BUG_ON(next == &init_mm); /* Should never happen */
  77. if (next->context.space_id == 0)
  78. next->context.space_id = alloc_sid();
  79. switch_mm(prev,next,current);
  80. }
  81. #include <asm-generic/mmu_context.h>
  82. #endif