asid.h 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __ASM_ASM_ASID_H
  3. #define __ASM_ASM_ASID_H
  4. #include <linux/atomic.h>
  5. #include <linux/compiler.h>
  6. #include <linux/cpumask.h>
  7. #include <linux/percpu.h>
  8. #include <linux/spinlock.h>
  9. struct asid_info
  10. {
  11. atomic64_t generation;
  12. unsigned long *map;
  13. atomic64_t __percpu *active;
  14. u64 __percpu *reserved;
  15. u32 bits;
  16. /* Lock protecting the structure */
  17. raw_spinlock_t lock;
  18. /* Which CPU requires context flush on next call */
  19. cpumask_t flush_pending;
  20. /* Number of ASID allocated by context (shift value) */
  21. unsigned int ctxt_shift;
  22. /* Callback to locally flush the context. */
  23. void (*flush_cpu_ctxt_cb)(void);
  24. };
  25. #define NUM_ASIDS(info) (1UL << ((info)->bits))
  26. #define NUM_CTXT_ASIDS(info) (NUM_ASIDS(info) >> (info)->ctxt_shift)
  27. #define active_asid(info, cpu) *per_cpu_ptr((info)->active, cpu)
  28. void asid_new_context(struct asid_info *info, atomic64_t *pasid,
  29. unsigned int cpu, struct mm_struct *mm);
  30. /*
  31. * Check the ASID is still valid for the context. If not generate a new ASID.
  32. *
  33. * @pasid: Pointer to the current ASID batch
  34. * @cpu: current CPU ID. Must have been acquired through get_cpu()
  35. */
  36. static inline void asid_check_context(struct asid_info *info,
  37. atomic64_t *pasid, unsigned int cpu,
  38. struct mm_struct *mm)
  39. {
  40. u64 asid, old_active_asid;
  41. asid = atomic64_read(pasid);
  42. /*
  43. * The memory ordering here is subtle.
  44. * If our active_asid is non-zero and the ASID matches the current
  45. * generation, then we update the active_asid entry with a relaxed
  46. * cmpxchg. Racing with a concurrent rollover means that either:
  47. *
  48. * - We get a zero back from the cmpxchg and end up waiting on the
  49. * lock. Taking the lock synchronises with the rollover and so
  50. * we are forced to see the updated generation.
  51. *
  52. * - We get a valid ASID back from the cmpxchg, which means the
  53. * relaxed xchg in flush_context will treat us as reserved
  54. * because atomic RmWs are totally ordered for a given location.
  55. */
  56. old_active_asid = atomic64_read(&active_asid(info, cpu));
  57. if (old_active_asid &&
  58. !((asid ^ atomic64_read(&info->generation)) >> info->bits) &&
  59. atomic64_cmpxchg_relaxed(&active_asid(info, cpu),
  60. old_active_asid, asid))
  61. return;
  62. asid_new_context(info, pasid, cpu, mm);
  63. }
  64. int asid_allocator_init(struct asid_info *info,
  65. u32 bits, unsigned int asid_per_ctxt,
  66. void (*flush_cpu_ctxt_cb)(void));
  67. #endif