vgic-v3.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. #include <linux/irqchip/arm-gic-v3.h>
  3. #include <linux/irq.h>
  4. #include <linux/irqdomain.h>
  5. #include <linux/kvm.h>
  6. #include <linux/kvm_host.h>
  7. #include <kvm/arm_vgic.h>
  8. #include <asm/kvm_hyp.h>
  9. #include <asm/kvm_mmu.h>
  10. #include <asm/kvm_asm.h>
  11. #include "vgic.h"
  12. static bool group0_trap;
  13. static bool group1_trap;
  14. static bool common_trap;
  15. static bool dir_trap;
  16. static bool gicv4_enable;
  17. void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
  18. {
  19. struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
  20. cpuif->vgic_hcr |= ICH_HCR_UIE;
  21. }
  22. static bool lr_signals_eoi_mi(u64 lr_val)
  23. {
  24. return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) &&
  25. !(lr_val & ICH_LR_HW);
  26. }
  27. void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
  28. {
  29. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  30. struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
  31. u32 model = vcpu->kvm->arch.vgic.vgic_model;
  32. int lr;
  33. DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
  34. cpuif->vgic_hcr &= ~ICH_HCR_UIE;
  35. for (lr = 0; lr < cpuif->used_lrs; lr++) {
  36. u64 val = cpuif->vgic_lr[lr];
  37. u32 intid, cpuid;
  38. struct vgic_irq *irq;
  39. bool is_v2_sgi = false;
  40. bool deactivated;
  41. cpuid = val & GICH_LR_PHYSID_CPUID;
  42. cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
  43. if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
  44. intid = val & ICH_LR_VIRTUAL_ID_MASK;
  45. } else {
  46. intid = val & GICH_LR_VIRTUALID;
  47. is_v2_sgi = vgic_irq_is_sgi(intid);
  48. }
  49. /* Notify fds when the guest EOI'ed a level-triggered IRQ */
  50. if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
  51. kvm_notify_acked_irq(vcpu->kvm, 0,
  52. intid - VGIC_NR_PRIVATE_IRQS);
  53. irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
  54. if (!irq) /* An LPI could have been unmapped. */
  55. continue;
  56. raw_spin_lock(&irq->irq_lock);
  57. /* Always preserve the active bit, note deactivation */
  58. deactivated = irq->active && !(val & ICH_LR_ACTIVE_BIT);
  59. irq->active = !!(val & ICH_LR_ACTIVE_BIT);
  60. if (irq->active && is_v2_sgi)
  61. irq->active_source = cpuid;
  62. /* Edge is the only case where we preserve the pending bit */
  63. if (irq->config == VGIC_CONFIG_EDGE &&
  64. (val & ICH_LR_PENDING_BIT)) {
  65. irq->pending_latch = true;
  66. if (is_v2_sgi)
  67. irq->source |= (1 << cpuid);
  68. }
  69. /*
  70. * Clear soft pending state when level irqs have been acked.
  71. */
  72. if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
  73. irq->pending_latch = false;
  74. /* Handle resampling for mapped interrupts if required */
  75. vgic_irq_handle_resampling(irq, deactivated, val & ICH_LR_PENDING_BIT);
  76. raw_spin_unlock(&irq->irq_lock);
  77. vgic_put_irq(vcpu->kvm, irq);
  78. }
  79. cpuif->used_lrs = 0;
  80. }
  81. /* Requires the irq to be locked already */
  82. void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
  83. {
  84. u32 model = vcpu->kvm->arch.vgic.vgic_model;
  85. u64 val = irq->intid;
  86. bool allow_pending = true, is_v2_sgi;
  87. is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
  88. model == KVM_DEV_TYPE_ARM_VGIC_V2);
  89. if (irq->active) {
  90. val |= ICH_LR_ACTIVE_BIT;
  91. if (is_v2_sgi)
  92. val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
  93. if (vgic_irq_is_multi_sgi(irq)) {
  94. allow_pending = false;
  95. val |= ICH_LR_EOI;
  96. }
  97. }
  98. if (irq->hw && !vgic_irq_needs_resampling(irq)) {
  99. val |= ICH_LR_HW;
  100. val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
  101. /*
  102. * Never set pending+active on a HW interrupt, as the
  103. * pending state is kept at the physical distributor
  104. * level.
  105. */
  106. if (irq->active)
  107. allow_pending = false;
  108. } else {
  109. if (irq->config == VGIC_CONFIG_LEVEL) {
  110. val |= ICH_LR_EOI;
  111. /*
  112. * Software resampling doesn't work very well
  113. * if we allow P+A, so let's not do that.
  114. */
  115. if (irq->active)
  116. allow_pending = false;
  117. }
  118. }
  119. if (allow_pending && irq_is_pending(irq)) {
  120. val |= ICH_LR_PENDING_BIT;
  121. if (irq->config == VGIC_CONFIG_EDGE)
  122. irq->pending_latch = false;
  123. if (vgic_irq_is_sgi(irq->intid) &&
  124. model == KVM_DEV_TYPE_ARM_VGIC_V2) {
  125. u32 src = ffs(irq->source);
  126. if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
  127. irq->intid))
  128. return;
  129. val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
  130. irq->source &= ~(1 << (src - 1));
  131. if (irq->source) {
  132. irq->pending_latch = true;
  133. val |= ICH_LR_EOI;
  134. }
  135. }
  136. }
  137. /*
  138. * Level-triggered mapped IRQs are special because we only observe
  139. * rising edges as input to the VGIC. We therefore lower the line
  140. * level here, so that we can take new virtual IRQs. See
  141. * vgic_v3_fold_lr_state for more info.
  142. */
  143. if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT))
  144. irq->line_level = false;
  145. if (irq->group)
  146. val |= ICH_LR_GROUP;
  147. val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
  148. vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
  149. }
  150. void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
  151. {
  152. vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
  153. }
  154. void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
  155. {
  156. struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
  157. u32 model = vcpu->kvm->arch.vgic.vgic_model;
  158. u32 vmcr;
  159. if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
  160. vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
  161. ICH_VMCR_ACK_CTL_MASK;
  162. vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
  163. ICH_VMCR_FIQ_EN_MASK;
  164. } else {
  165. /*
  166. * When emulating GICv3 on GICv3 with SRE=1 on the
  167. * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
  168. */
  169. vmcr = ICH_VMCR_FIQ_EN_MASK;
  170. }
  171. vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
  172. vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
  173. vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
  174. vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
  175. vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
  176. vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK;
  177. vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK;
  178. cpu_if->vgic_vmcr = vmcr;
  179. }
  180. void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
  181. {
  182. struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
  183. u32 model = vcpu->kvm->arch.vgic.vgic_model;
  184. u32 vmcr;
  185. vmcr = cpu_if->vgic_vmcr;
  186. if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
  187. vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
  188. ICH_VMCR_ACK_CTL_SHIFT;
  189. vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
  190. ICH_VMCR_FIQ_EN_SHIFT;
  191. } else {
  192. /*
  193. * When emulating GICv3 on GICv3 with SRE=1 on the
  194. * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
  195. */
  196. vmcrp->fiqen = 1;
  197. vmcrp->ackctl = 0;
  198. }
  199. vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
  200. vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
  201. vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
  202. vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
  203. vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
  204. vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT;
  205. vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT;
  206. }
  207. #define INITIAL_PENDBASER_VALUE \
  208. (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \
  209. GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \
  210. GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
  211. void vgic_v3_enable(struct kvm_vcpu *vcpu)
  212. {
  213. struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
  214. /*
  215. * By forcing VMCR to zero, the GIC will restore the binary
  216. * points to their reset values. Anything else resets to zero
  217. * anyway.
  218. */
  219. vgic_v3->vgic_vmcr = 0;
  220. /*
  221. * If we are emulating a GICv3, we do it in an non-GICv2-compatible
  222. * way, so we force SRE to 1 to demonstrate this to the guest.
  223. * Also, we don't support any form of IRQ/FIQ bypass.
  224. * This goes with the spec allowing the value to be RAO/WI.
  225. */
  226. if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
  227. vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
  228. ICC_SRE_EL1_DFB |
  229. ICC_SRE_EL1_SRE);
  230. vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
  231. } else {
  232. vgic_v3->vgic_sre = 0;
  233. }
  234. vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 &
  235. ICH_VTR_ID_BITS_MASK) >>
  236. ICH_VTR_ID_BITS_SHIFT;
  237. vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 &
  238. ICH_VTR_PRI_BITS_MASK) >>
  239. ICH_VTR_PRI_BITS_SHIFT) + 1;
  240. /* Get the show on the road... */
  241. vgic_v3->vgic_hcr = ICH_HCR_EN;
  242. if (group0_trap)
  243. vgic_v3->vgic_hcr |= ICH_HCR_TALL0;
  244. if (group1_trap)
  245. vgic_v3->vgic_hcr |= ICH_HCR_TALL1;
  246. if (common_trap)
  247. vgic_v3->vgic_hcr |= ICH_HCR_TC;
  248. if (dir_trap)
  249. vgic_v3->vgic_hcr |= ICH_HCR_TDIR;
  250. }
  251. int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
  252. {
  253. struct kvm_vcpu *vcpu;
  254. int byte_offset, bit_nr;
  255. gpa_t pendbase, ptr;
  256. bool status;
  257. u8 val;
  258. int ret;
  259. unsigned long flags;
  260. retry:
  261. vcpu = irq->target_vcpu;
  262. if (!vcpu)
  263. return 0;
  264. pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
  265. byte_offset = irq->intid / BITS_PER_BYTE;
  266. bit_nr = irq->intid % BITS_PER_BYTE;
  267. ptr = pendbase + byte_offset;
  268. ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
  269. if (ret)
  270. return ret;
  271. status = val & (1 << bit_nr);
  272. raw_spin_lock_irqsave(&irq->irq_lock, flags);
  273. if (irq->target_vcpu != vcpu) {
  274. raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
  275. goto retry;
  276. }
  277. irq->pending_latch = status;
  278. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  279. if (status) {
  280. /* clear consumed data */
  281. val &= ~(1 << bit_nr);
  282. ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
  283. if (ret)
  284. return ret;
  285. }
  286. return 0;
  287. }
  288. /*
  289. * The deactivation of the doorbell interrupt will trigger the
  290. * unmapping of the associated vPE.
  291. */
  292. static void unmap_all_vpes(struct kvm *kvm)
  293. {
  294. struct vgic_dist *dist = &kvm->arch.vgic;
  295. int i;
  296. for (i = 0; i < dist->its_vm.nr_vpes; i++)
  297. free_irq(dist->its_vm.vpes[i]->irq, kvm_get_vcpu(kvm, i));
  298. }
  299. static void map_all_vpes(struct kvm *kvm)
  300. {
  301. struct vgic_dist *dist = &kvm->arch.vgic;
  302. int i;
  303. for (i = 0; i < dist->its_vm.nr_vpes; i++)
  304. WARN_ON(vgic_v4_request_vpe_irq(kvm_get_vcpu(kvm, i),
  305. dist->its_vm.vpes[i]->irq));
  306. }
  307. /**
  308. * vgic_v3_save_pending_tables - Save the pending tables into guest RAM
  309. * kvm lock and all vcpu lock must be held
  310. */
  311. int vgic_v3_save_pending_tables(struct kvm *kvm)
  312. {
  313. struct vgic_dist *dist = &kvm->arch.vgic;
  314. struct vgic_irq *irq;
  315. gpa_t last_ptr = ~(gpa_t)0;
  316. bool vlpi_avail = false;
  317. int ret = 0;
  318. u8 val;
  319. if (unlikely(!vgic_initialized(kvm)))
  320. return -ENXIO;
  321. /*
  322. * A preparation for getting any VLPI states.
  323. * The above vgic initialized check also ensures that the allocation
  324. * and enabling of the doorbells have already been done.
  325. */
  326. if (kvm_vgic_global_state.has_gicv4_1) {
  327. unmap_all_vpes(kvm);
  328. vlpi_avail = true;
  329. }
  330. list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
  331. int byte_offset, bit_nr;
  332. struct kvm_vcpu *vcpu;
  333. gpa_t pendbase, ptr;
  334. bool is_pending;
  335. bool stored;
  336. vcpu = irq->target_vcpu;
  337. if (!vcpu)
  338. continue;
  339. pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
  340. byte_offset = irq->intid / BITS_PER_BYTE;
  341. bit_nr = irq->intid % BITS_PER_BYTE;
  342. ptr = pendbase + byte_offset;
  343. if (ptr != last_ptr) {
  344. ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
  345. if (ret)
  346. goto out;
  347. last_ptr = ptr;
  348. }
  349. stored = val & (1U << bit_nr);
  350. is_pending = irq->pending_latch;
  351. if (irq->hw && vlpi_avail)
  352. vgic_v4_get_vlpi_state(irq, &is_pending);
  353. if (stored == is_pending)
  354. continue;
  355. if (is_pending)
  356. val |= 1 << bit_nr;
  357. else
  358. val &= ~(1 << bit_nr);
  359. ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
  360. if (ret)
  361. goto out;
  362. }
  363. out:
  364. if (vlpi_avail)
  365. map_all_vpes(kvm);
  366. return ret;
  367. }
  368. /**
  369. * vgic_v3_rdist_overlap - check if a region overlaps with any
  370. * existing redistributor region
  371. *
  372. * @kvm: kvm handle
  373. * @base: base of the region
  374. * @size: size of region
  375. *
  376. * Return: true if there is an overlap
  377. */
  378. bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size)
  379. {
  380. struct vgic_dist *d = &kvm->arch.vgic;
  381. struct vgic_redist_region *rdreg;
  382. list_for_each_entry(rdreg, &d->rd_regions, list) {
  383. if ((base + size > rdreg->base) &&
  384. (base < rdreg->base + vgic_v3_rd_region_size(kvm, rdreg)))
  385. return true;
  386. }
  387. return false;
  388. }
  389. /*
  390. * Check for overlapping regions and for regions crossing the end of memory
  391. * for base addresses which have already been set.
  392. */
  393. bool vgic_v3_check_base(struct kvm *kvm)
  394. {
  395. struct vgic_dist *d = &kvm->arch.vgic;
  396. struct vgic_redist_region *rdreg;
  397. if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
  398. d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
  399. return false;
  400. list_for_each_entry(rdreg, &d->rd_regions, list) {
  401. size_t sz = vgic_v3_rd_region_size(kvm, rdreg);
  402. if (vgic_check_iorange(kvm, VGIC_ADDR_UNDEF,
  403. rdreg->base, SZ_64K, sz))
  404. return false;
  405. }
  406. if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base))
  407. return true;
  408. return !vgic_v3_rdist_overlap(kvm, d->vgic_dist_base,
  409. KVM_VGIC_V3_DIST_SIZE);
  410. }
  411. /**
  412. * vgic_v3_rdist_free_slot - Look up registered rdist regions and identify one
  413. * which has free space to put a new rdist region.
  414. *
  415. * @rd_regions: redistributor region list head
  416. *
  417. * A redistributor regions maps n redistributors, n = region size / (2 x 64kB).
  418. * Stride between redistributors is 0 and regions are filled in the index order.
  419. *
  420. * Return: the redist region handle, if any, that has space to map a new rdist
  421. * region.
  422. */
  423. struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rd_regions)
  424. {
  425. struct vgic_redist_region *rdreg;
  426. list_for_each_entry(rdreg, rd_regions, list) {
  427. if (!vgic_v3_redist_region_full(rdreg))
  428. return rdreg;
  429. }
  430. return NULL;
  431. }
  432. struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
  433. u32 index)
  434. {
  435. struct list_head *rd_regions = &kvm->arch.vgic.rd_regions;
  436. struct vgic_redist_region *rdreg;
  437. list_for_each_entry(rdreg, rd_regions, list) {
  438. if (rdreg->index == index)
  439. return rdreg;
  440. }
  441. return NULL;
  442. }
  443. int vgic_v3_map_resources(struct kvm *kvm)
  444. {
  445. struct vgic_dist *dist = &kvm->arch.vgic;
  446. struct kvm_vcpu *vcpu;
  447. unsigned long c;
  448. kvm_for_each_vcpu(c, vcpu, kvm) {
  449. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  450. if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
  451. kvm_debug("vcpu %ld redistributor base not set\n", c);
  452. return -ENXIO;
  453. }
  454. }
  455. if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
  456. kvm_debug("Need to set vgic distributor addresses first\n");
  457. return -ENXIO;
  458. }
  459. if (!vgic_v3_check_base(kvm)) {
  460. kvm_debug("VGIC redist and dist frames overlap\n");
  461. return -EINVAL;
  462. }
  463. /*
  464. * For a VGICv3 we require the userland to explicitly initialize
  465. * the VGIC before we need to use it.
  466. */
  467. if (!vgic_initialized(kvm)) {
  468. return -EBUSY;
  469. }
  470. if (kvm_vgic_global_state.has_gicv4_1)
  471. vgic_v4_configure_vsgis(kvm);
  472. return 0;
  473. }
  474. DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
  475. static int __init early_group0_trap_cfg(char *buf)
  476. {
  477. return strtobool(buf, &group0_trap);
  478. }
  479. early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg);
  480. static int __init early_group1_trap_cfg(char *buf)
  481. {
  482. return strtobool(buf, &group1_trap);
  483. }
  484. early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg);
  485. static int __init early_common_trap_cfg(char *buf)
  486. {
  487. return strtobool(buf, &common_trap);
  488. }
  489. early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
  490. static int __init early_gicv4_enable(char *buf)
  491. {
  492. return strtobool(buf, &gicv4_enable);
  493. }
  494. early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
  495. static const struct midr_range broken_seis[] = {
  496. MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
  497. MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
  498. MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_PRO),
  499. MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
  500. MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
  501. MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
  502. {},
  503. };
  504. static bool vgic_v3_broken_seis(void)
  505. {
  506. return ((kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) &&
  507. is_midr_in_range_list(read_cpuid_id(), broken_seis));
  508. }
  509. /**
  510. * vgic_v3_probe - probe for a VGICv3 compatible interrupt controller
  511. * @info: pointer to the GIC description
  512. *
  513. * Returns 0 if the VGICv3 has been probed successfully, returns an error code
  514. * otherwise
  515. */
  516. int vgic_v3_probe(const struct gic_kvm_info *info)
  517. {
  518. u64 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config);
  519. bool has_v2;
  520. int ret;
  521. has_v2 = ich_vtr_el2 >> 63;
  522. ich_vtr_el2 = (u32)ich_vtr_el2;
  523. /*
  524. * The ListRegs field is 5 bits, but there is an architectural
  525. * maximum of 16 list registers. Just ignore bit 4...
  526. */
  527. kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
  528. kvm_vgic_global_state.can_emulate_gicv2 = false;
  529. kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
  530. /* GICv4 support? */
  531. if (info->has_v4) {
  532. kvm_vgic_global_state.has_gicv4 = gicv4_enable;
  533. kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
  534. kvm_info("GICv4%s support %sabled\n",
  535. kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
  536. gicv4_enable ? "en" : "dis");
  537. }
  538. kvm_vgic_global_state.vcpu_base = 0;
  539. if (!info->vcpu.start) {
  540. kvm_info("GICv3: no GICV resource entry\n");
  541. } else if (!has_v2) {
  542. pr_warn(FW_BUG "CPU interface incapable of MMIO access\n");
  543. } else if (!PAGE_ALIGNED(info->vcpu.start)) {
  544. pr_warn("GICV physical address 0x%llx not page aligned\n",
  545. (unsigned long long)info->vcpu.start);
  546. } else if (kvm_get_mode() != KVM_MODE_PROTECTED) {
  547. kvm_vgic_global_state.vcpu_base = info->vcpu.start;
  548. kvm_vgic_global_state.can_emulate_gicv2 = true;
  549. ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
  550. if (ret) {
  551. kvm_err("Cannot register GICv2 KVM device.\n");
  552. return ret;
  553. }
  554. kvm_info("vgic-v2@%llx\n", info->vcpu.start);
  555. }
  556. ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
  557. if (ret) {
  558. kvm_err("Cannot register GICv3 KVM device.\n");
  559. kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2);
  560. return ret;
  561. }
  562. if (kvm_vgic_global_state.vcpu_base == 0)
  563. kvm_info("disabling GICv2 emulation\n");
  564. if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
  565. group0_trap = true;
  566. group1_trap = true;
  567. }
  568. if (vgic_v3_broken_seis()) {
  569. kvm_info("GICv3 with broken locally generated SEI\n");
  570. kvm_vgic_global_state.ich_vtr_el2 &= ~ICH_VTR_SEIS_MASK;
  571. group0_trap = true;
  572. group1_trap = true;
  573. if (ich_vtr_el2 & ICH_VTR_TDS_MASK)
  574. dir_trap = true;
  575. else
  576. common_trap = true;
  577. }
  578. if (group0_trap || group1_trap || common_trap | dir_trap) {
  579. kvm_info("GICv3 sysreg trapping enabled ([%s%s%s%s], reduced performance)\n",
  580. group0_trap ? "G0" : "",
  581. group1_trap ? "G1" : "",
  582. common_trap ? "C" : "",
  583. dir_trap ? "D" : "");
  584. static_branch_enable(&vgic_v3_cpuif_trap);
  585. }
  586. kvm_vgic_global_state.vctrl_base = NULL;
  587. kvm_vgic_global_state.type = VGIC_V3;
  588. kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
  589. return 0;
  590. }
  591. void vgic_v3_load(struct kvm_vcpu *vcpu)
  592. {
  593. struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
  594. if (likely(!is_protected_kvm_enabled()))
  595. kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, cpu_if);
  596. if (has_vhe())
  597. __vgic_v3_activate_traps(cpu_if);
  598. WARN_ON(vgic_v4_load(vcpu));
  599. }
  600. void vgic_v3_put(struct kvm_vcpu *vcpu, bool blocking)
  601. {
  602. struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
  603. WARN_ON(vgic_v4_put(vcpu));
  604. if (likely(!is_protected_kvm_enabled()))
  605. kvm_call_hyp(__vgic_v3_save_vmcr_aprs, cpu_if);
  606. if (has_vhe())
  607. __vgic_v3_deactivate_traps(cpu_if);
  608. }