vmid.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * VMID allocator.
  4. *
  5. * Based on Arm64 ASID allocator algorithm.
  6. * Please refer arch/arm64/mm/context.c for detailed
  7. * comments on algorithm.
  8. *
  9. * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
  10. * Copyright (C) 2012 ARM Ltd.
  11. */
  12. #include <linux/bitfield.h>
  13. #include <linux/bitops.h>
  14. #include <asm/kvm_asm.h>
  15. #include <asm/kvm_mmu.h>
  16. unsigned int kvm_arm_vmid_bits;
  17. static DEFINE_RAW_SPINLOCK(cpu_vmid_lock);
  18. static atomic64_t vmid_generation;
  19. static unsigned long *vmid_map;
  20. static DEFINE_PER_CPU(atomic64_t, active_vmids);
  21. static DEFINE_PER_CPU(u64, reserved_vmids);
  22. #define VMID_MASK (~GENMASK(kvm_arm_vmid_bits - 1, 0))
  23. #define VMID_FIRST_VERSION (1UL << kvm_arm_vmid_bits)
  24. #define NUM_USER_VMIDS VMID_FIRST_VERSION
  25. #define vmid2idx(vmid) ((vmid) & ~VMID_MASK)
  26. #define idx2vmid(idx) vmid2idx(idx)
  27. /*
  28. * As vmid #0 is always reserved, we will never allocate one
  29. * as below and can be treated as invalid. This is used to
  30. * set the active_vmids on vCPU schedule out.
  31. */
  32. #define VMID_ACTIVE_INVALID VMID_FIRST_VERSION
  33. #define vmid_gen_match(vmid) \
  34. (!(((vmid) ^ atomic64_read(&vmid_generation)) >> kvm_arm_vmid_bits))
  35. static void flush_context(void)
  36. {
  37. int cpu;
  38. u64 vmid;
  39. bitmap_clear(vmid_map, 0, NUM_USER_VMIDS);
  40. for_each_possible_cpu(cpu) {
  41. vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0);
  42. /* Preserve reserved VMID */
  43. if (vmid == 0)
  44. vmid = per_cpu(reserved_vmids, cpu);
  45. __set_bit(vmid2idx(vmid), vmid_map);
  46. per_cpu(reserved_vmids, cpu) = vmid;
  47. }
  48. /*
  49. * Unlike ASID allocator, we expect less frequent rollover in
  50. * case of VMIDs. Hence, instead of marking the CPU as
  51. * flush_pending and issuing a local context invalidation on
  52. * the next context-switch, we broadcast TLB flush + I-cache
  53. * invalidation over the inner shareable domain on rollover.
  54. */
  55. kvm_call_hyp(__kvm_flush_vm_context);
  56. }
  57. static bool check_update_reserved_vmid(u64 vmid, u64 newvmid)
  58. {
  59. int cpu;
  60. bool hit = false;
  61. /*
  62. * Iterate over the set of reserved VMIDs looking for a match
  63. * and update to use newvmid (i.e. the same VMID in the current
  64. * generation).
  65. */
  66. for_each_possible_cpu(cpu) {
  67. if (per_cpu(reserved_vmids, cpu) == vmid) {
  68. hit = true;
  69. per_cpu(reserved_vmids, cpu) = newvmid;
  70. }
  71. }
  72. return hit;
  73. }
  74. static u64 new_vmid(struct kvm_vmid *kvm_vmid)
  75. {
  76. static u32 cur_idx = 1;
  77. u64 vmid = atomic64_read(&kvm_vmid->id);
  78. u64 generation = atomic64_read(&vmid_generation);
  79. if (vmid != 0) {
  80. u64 newvmid = generation | (vmid & ~VMID_MASK);
  81. if (check_update_reserved_vmid(vmid, newvmid)) {
  82. atomic64_set(&kvm_vmid->id, newvmid);
  83. return newvmid;
  84. }
  85. if (!__test_and_set_bit(vmid2idx(vmid), vmid_map)) {
  86. atomic64_set(&kvm_vmid->id, newvmid);
  87. return newvmid;
  88. }
  89. }
  90. vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, cur_idx);
  91. if (vmid != NUM_USER_VMIDS)
  92. goto set_vmid;
  93. /* We're out of VMIDs, so increment the global generation count */
  94. generation = atomic64_add_return_relaxed(VMID_FIRST_VERSION,
  95. &vmid_generation);
  96. flush_context();
  97. /* We have more VMIDs than CPUs, so this will always succeed */
  98. vmid = find_next_zero_bit(vmid_map, NUM_USER_VMIDS, 1);
  99. set_vmid:
  100. __set_bit(vmid, vmid_map);
  101. cur_idx = vmid;
  102. vmid = idx2vmid(vmid) | generation;
  103. atomic64_set(&kvm_vmid->id, vmid);
  104. return vmid;
  105. }
  106. /* Called from vCPU sched out with preemption disabled */
  107. void kvm_arm_vmid_clear_active(void)
  108. {
  109. atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID);
  110. }
  111. void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid)
  112. {
  113. unsigned long flags;
  114. u64 vmid, old_active_vmid;
  115. vmid = atomic64_read(&kvm_vmid->id);
  116. /*
  117. * Please refer comments in check_and_switch_context() in
  118. * arch/arm64/mm/context.c.
  119. *
  120. * Unlike ASID allocator, we set the active_vmids to
  121. * VMID_ACTIVE_INVALID on vCPU schedule out to avoid
  122. * reserving the VMID space needlessly on rollover.
  123. * Hence explicitly check here for a "!= 0" to
  124. * handle the sync with a concurrent rollover.
  125. */
  126. old_active_vmid = atomic64_read(this_cpu_ptr(&active_vmids));
  127. if (old_active_vmid != 0 && vmid_gen_match(vmid) &&
  128. 0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids),
  129. old_active_vmid, vmid))
  130. return;
  131. raw_spin_lock_irqsave(&cpu_vmid_lock, flags);
  132. /* Check that our VMID belongs to the current generation. */
  133. vmid = atomic64_read(&kvm_vmid->id);
  134. if (!vmid_gen_match(vmid))
  135. vmid = new_vmid(kvm_vmid);
  136. atomic64_set(this_cpu_ptr(&active_vmids), vmid);
  137. raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags);
  138. }
  139. /*
  140. * Initialize the VMID allocator
  141. */
  142. int kvm_arm_vmid_alloc_init(void)
  143. {
  144. kvm_arm_vmid_bits = kvm_get_vmid_bits();
  145. /*
  146. * Expect allocation after rollover to fail if we don't have
  147. * at least one more VMID than CPUs. VMID #0 is always reserved.
  148. */
  149. WARN_ON(NUM_USER_VMIDS - 1 <= num_possible_cpus());
  150. atomic64_set(&vmid_generation, VMID_FIRST_VERSION);
  151. vmid_map = kcalloc(BITS_TO_LONGS(NUM_USER_VMIDS),
  152. sizeof(*vmid_map), GFP_KERNEL);
  153. if (!vmid_map)
  154. return -ENOMEM;
  155. return 0;
  156. }
  157. void kvm_arm_vmid_alloc_free(void)
  158. {
  159. kfree(vmid_map);
  160. }