vgic-v2.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2015, 2016 ARM Ltd.
  4. */
  5. #include <linux/irqchip/arm-gic.h>
  6. #include <linux/kvm.h>
  7. #include <linux/kvm_host.h>
  8. #include <kvm/arm_vgic.h>
  9. #include <asm/kvm_mmu.h>
  10. #include "vgic.h"
  11. static inline void vgic_v2_write_lr(int lr, u32 val)
  12. {
  13. void __iomem *base = kvm_vgic_global_state.vctrl_base;
  14. writel_relaxed(val, base + GICH_LR0 + (lr * 4));
  15. }
  16. void vgic_v2_init_lrs(void)
  17. {
  18. int i;
  19. for (i = 0; i < kvm_vgic_global_state.nr_lr; i++)
  20. vgic_v2_write_lr(i, 0);
  21. }
  22. void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
  23. {
  24. struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
  25. cpuif->vgic_hcr |= GICH_HCR_UIE;
  26. }
  27. static bool lr_signals_eoi_mi(u32 lr_val)
  28. {
  29. return !(lr_val & GICH_LR_STATE) && (lr_val & GICH_LR_EOI) &&
  30. !(lr_val & GICH_LR_HW);
  31. }
  32. /*
  33. * transfer the content of the LRs back into the corresponding ap_list:
  34. * - active bit is transferred as is
  35. * - pending bit is
  36. * - transferred as is in case of edge sensitive IRQs
  37. * - set to the line-level (resample time) for level sensitive IRQs
  38. */
  39. void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
  40. {
  41. struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
  42. struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
  43. int lr;
  44. DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
  45. cpuif->vgic_hcr &= ~GICH_HCR_UIE;
  46. for (lr = 0; lr < vgic_cpu->vgic_v2.used_lrs; lr++) {
  47. u32 val = cpuif->vgic_lr[lr];
  48. u32 cpuid, intid = val & GICH_LR_VIRTUALID;
  49. struct vgic_irq *irq;
  50. bool deactivated;
  51. /* Extract the source vCPU id from the LR */
  52. cpuid = val & GICH_LR_PHYSID_CPUID;
  53. cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
  54. cpuid &= 7;
  55. /* Notify fds when the guest EOI'ed a level-triggered SPI */
  56. if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
  57. kvm_notify_acked_irq(vcpu->kvm, 0,
  58. intid - VGIC_NR_PRIVATE_IRQS);
  59. irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
  60. raw_spin_lock(&irq->irq_lock);
  61. /* Always preserve the active bit, note deactivation */
  62. deactivated = irq->active && !(val & GICH_LR_ACTIVE_BIT);
  63. irq->active = !!(val & GICH_LR_ACTIVE_BIT);
  64. if (irq->active && vgic_irq_is_sgi(intid))
  65. irq->active_source = cpuid;
  66. /* Edge is the only case where we preserve the pending bit */
  67. if (irq->config == VGIC_CONFIG_EDGE &&
  68. (val & GICH_LR_PENDING_BIT)) {
  69. irq->pending_latch = true;
  70. if (vgic_irq_is_sgi(intid))
  71. irq->source |= (1 << cpuid);
  72. }
  73. /*
  74. * Clear soft pending state when level irqs have been acked.
  75. */
  76. if (irq->config == VGIC_CONFIG_LEVEL && !(val & GICH_LR_STATE))
  77. irq->pending_latch = false;
  78. /* Handle resampling for mapped interrupts if required */
  79. vgic_irq_handle_resampling(irq, deactivated, val & GICH_LR_PENDING_BIT);
  80. raw_spin_unlock(&irq->irq_lock);
  81. vgic_put_irq(vcpu->kvm, irq);
  82. }
  83. cpuif->used_lrs = 0;
  84. }
  85. /*
  86. * Populates the particular LR with the state of a given IRQ:
  87. * - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq
  88. * - for a level sensitive IRQ the pending state value is unchanged;
  89. * it is dictated directly by the input level
  90. *
  91. * If @irq describes an SGI with multiple sources, we choose the
  92. * lowest-numbered source VCPU and clear that bit in the source bitmap.
  93. *
  94. * The irq_lock must be held by the caller.
  95. */
  96. void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
  97. {
  98. u32 val = irq->intid;
  99. bool allow_pending = true;
  100. if (irq->active) {
  101. val |= GICH_LR_ACTIVE_BIT;
  102. if (vgic_irq_is_sgi(irq->intid))
  103. val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
  104. if (vgic_irq_is_multi_sgi(irq)) {
  105. allow_pending = false;
  106. val |= GICH_LR_EOI;
  107. }
  108. }
  109. if (irq->group)
  110. val |= GICH_LR_GROUP1;
  111. if (irq->hw && !vgic_irq_needs_resampling(irq)) {
  112. val |= GICH_LR_HW;
  113. val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
  114. /*
  115. * Never set pending+active on a HW interrupt, as the
  116. * pending state is kept at the physical distributor
  117. * level.
  118. */
  119. if (irq->active)
  120. allow_pending = false;
  121. } else {
  122. if (irq->config == VGIC_CONFIG_LEVEL) {
  123. val |= GICH_LR_EOI;
  124. /*
  125. * Software resampling doesn't work very well
  126. * if we allow P+A, so let's not do that.
  127. */
  128. if (irq->active)
  129. allow_pending = false;
  130. }
  131. }
  132. if (allow_pending && irq_is_pending(irq)) {
  133. val |= GICH_LR_PENDING_BIT;
  134. if (irq->config == VGIC_CONFIG_EDGE)
  135. irq->pending_latch = false;
  136. if (vgic_irq_is_sgi(irq->intid)) {
  137. u32 src = ffs(irq->source);
  138. if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
  139. irq->intid))
  140. return;
  141. val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
  142. irq->source &= ~(1 << (src - 1));
  143. if (irq->source) {
  144. irq->pending_latch = true;
  145. val |= GICH_LR_EOI;
  146. }
  147. }
  148. }
  149. /*
  150. * Level-triggered mapped IRQs are special because we only observe
  151. * rising edges as input to the VGIC. We therefore lower the line
  152. * level here, so that we can take new virtual IRQs. See
  153. * vgic_v2_fold_lr_state for more info.
  154. */
  155. if (vgic_irq_is_mapped_level(irq) && (val & GICH_LR_PENDING_BIT))
  156. irq->line_level = false;
  157. /* The GICv2 LR only holds five bits of priority. */
  158. val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT;
  159. vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val;
  160. }
  161. void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr)
  162. {
  163. vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0;
  164. }
  165. void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
  166. {
  167. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  168. u32 vmcr;
  169. vmcr = (vmcrp->grpen0 << GICH_VMCR_ENABLE_GRP0_SHIFT) &
  170. GICH_VMCR_ENABLE_GRP0_MASK;
  171. vmcr |= (vmcrp->grpen1 << GICH_VMCR_ENABLE_GRP1_SHIFT) &
  172. GICH_VMCR_ENABLE_GRP1_MASK;
  173. vmcr |= (vmcrp->ackctl << GICH_VMCR_ACK_CTL_SHIFT) &
  174. GICH_VMCR_ACK_CTL_MASK;
  175. vmcr |= (vmcrp->fiqen << GICH_VMCR_FIQ_EN_SHIFT) &
  176. GICH_VMCR_FIQ_EN_MASK;
  177. vmcr |= (vmcrp->cbpr << GICH_VMCR_CBPR_SHIFT) &
  178. GICH_VMCR_CBPR_MASK;
  179. vmcr |= (vmcrp->eoim << GICH_VMCR_EOI_MODE_SHIFT) &
  180. GICH_VMCR_EOI_MODE_MASK;
  181. vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
  182. GICH_VMCR_ALIAS_BINPOINT_MASK;
  183. vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
  184. GICH_VMCR_BINPOINT_MASK;
  185. vmcr |= ((vmcrp->pmr >> GICV_PMR_PRIORITY_SHIFT) <<
  186. GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
  187. cpu_if->vgic_vmcr = vmcr;
  188. }
  189. void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
  190. {
  191. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  192. u32 vmcr;
  193. vmcr = cpu_if->vgic_vmcr;
  194. vmcrp->grpen0 = (vmcr & GICH_VMCR_ENABLE_GRP0_MASK) >>
  195. GICH_VMCR_ENABLE_GRP0_SHIFT;
  196. vmcrp->grpen1 = (vmcr & GICH_VMCR_ENABLE_GRP1_MASK) >>
  197. GICH_VMCR_ENABLE_GRP1_SHIFT;
  198. vmcrp->ackctl = (vmcr & GICH_VMCR_ACK_CTL_MASK) >>
  199. GICH_VMCR_ACK_CTL_SHIFT;
  200. vmcrp->fiqen = (vmcr & GICH_VMCR_FIQ_EN_MASK) >>
  201. GICH_VMCR_FIQ_EN_SHIFT;
  202. vmcrp->cbpr = (vmcr & GICH_VMCR_CBPR_MASK) >>
  203. GICH_VMCR_CBPR_SHIFT;
  204. vmcrp->eoim = (vmcr & GICH_VMCR_EOI_MODE_MASK) >>
  205. GICH_VMCR_EOI_MODE_SHIFT;
  206. vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
  207. GICH_VMCR_ALIAS_BINPOINT_SHIFT;
  208. vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
  209. GICH_VMCR_BINPOINT_SHIFT;
  210. vmcrp->pmr = ((vmcr & GICH_VMCR_PRIMASK_MASK) >>
  211. GICH_VMCR_PRIMASK_SHIFT) << GICV_PMR_PRIORITY_SHIFT;
  212. }
  213. void vgic_v2_enable(struct kvm_vcpu *vcpu)
  214. {
  215. /*
  216. * By forcing VMCR to zero, the GIC will restore the binary
  217. * points to their reset values. Anything else resets to zero
  218. * anyway.
  219. */
  220. vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
  221. /* Get the show on the road... */
  222. vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
  223. }
  224. /* check for overlapping regions and for regions crossing the end of memory */
  225. static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base)
  226. {
  227. if (dist_base + KVM_VGIC_V2_DIST_SIZE < dist_base)
  228. return false;
  229. if (cpu_base + KVM_VGIC_V2_CPU_SIZE < cpu_base)
  230. return false;
  231. if (dist_base + KVM_VGIC_V2_DIST_SIZE <= cpu_base)
  232. return true;
  233. if (cpu_base + KVM_VGIC_V2_CPU_SIZE <= dist_base)
  234. return true;
  235. return false;
  236. }
  237. int vgic_v2_map_resources(struct kvm *kvm)
  238. {
  239. struct vgic_dist *dist = &kvm->arch.vgic;
  240. int ret = 0;
  241. if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
  242. IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
  243. kvm_debug("Need to set vgic cpu and dist addresses first\n");
  244. return -ENXIO;
  245. }
  246. if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
  247. kvm_debug("VGIC CPU and dist frames overlap\n");
  248. return -EINVAL;
  249. }
  250. /*
  251. * Initialize the vgic if this hasn't already been done on demand by
  252. * accessing the vgic state from userspace.
  253. */
  254. ret = vgic_init(kvm);
  255. if (ret) {
  256. kvm_err("Unable to initialize VGIC dynamic data structures\n");
  257. return ret;
  258. }
  259. if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
  260. ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
  261. kvm_vgic_global_state.vcpu_base,
  262. KVM_VGIC_V2_CPU_SIZE, true);
  263. if (ret) {
  264. kvm_err("Unable to remap VGIC CPU to VCPU\n");
  265. return ret;
  266. }
  267. }
  268. return 0;
  269. }
  270. DEFINE_STATIC_KEY_FALSE(vgic_v2_cpuif_trap);
  271. /**
  272. * vgic_v2_probe - probe for a VGICv2 compatible interrupt controller
  273. * @info: pointer to the GIC description
  274. *
  275. * Returns 0 if the VGICv2 has been probed successfully, returns an error code
  276. * otherwise
  277. */
  278. int vgic_v2_probe(const struct gic_kvm_info *info)
  279. {
  280. int ret;
  281. u32 vtr;
  282. if (is_protected_kvm_enabled()) {
  283. kvm_err("GICv2 not supported in protected mode\n");
  284. return -ENXIO;
  285. }
  286. if (!info->vctrl.start) {
  287. kvm_err("GICH not present in the firmware table\n");
  288. return -ENXIO;
  289. }
  290. if (!PAGE_ALIGNED(info->vcpu.start) ||
  291. !PAGE_ALIGNED(resource_size(&info->vcpu))) {
  292. kvm_info("GICV region size/alignment is unsafe, using trapping (reduced performance)\n");
  293. ret = create_hyp_io_mappings(info->vcpu.start,
  294. resource_size(&info->vcpu),
  295. &kvm_vgic_global_state.vcpu_base_va,
  296. &kvm_vgic_global_state.vcpu_hyp_va);
  297. if (ret) {
  298. kvm_err("Cannot map GICV into hyp\n");
  299. goto out;
  300. }
  301. static_branch_enable(&vgic_v2_cpuif_trap);
  302. }
  303. ret = create_hyp_io_mappings(info->vctrl.start,
  304. resource_size(&info->vctrl),
  305. &kvm_vgic_global_state.vctrl_base,
  306. &kvm_vgic_global_state.vctrl_hyp);
  307. if (ret) {
  308. kvm_err("Cannot map VCTRL into hyp\n");
  309. goto out;
  310. }
  311. vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR);
  312. kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1;
  313. ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
  314. if (ret) {
  315. kvm_err("Cannot register GICv2 KVM device\n");
  316. goto out;
  317. }
  318. kvm_vgic_global_state.can_emulate_gicv2 = true;
  319. kvm_vgic_global_state.vcpu_base = info->vcpu.start;
  320. kvm_vgic_global_state.type = VGIC_V2;
  321. kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS;
  322. kvm_debug("vgic-v2@%llx\n", info->vctrl.start);
  323. return 0;
  324. out:
  325. if (kvm_vgic_global_state.vctrl_base)
  326. iounmap(kvm_vgic_global_state.vctrl_base);
  327. if (kvm_vgic_global_state.vcpu_base_va)
  328. iounmap(kvm_vgic_global_state.vcpu_base_va);
  329. return ret;
  330. }
  331. static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
  332. {
  333. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  334. u64 used_lrs = cpu_if->used_lrs;
  335. u64 elrsr;
  336. int i;
  337. elrsr = readl_relaxed(base + GICH_ELRSR0);
  338. if (unlikely(used_lrs > 32))
  339. elrsr |= ((u64)readl_relaxed(base + GICH_ELRSR1)) << 32;
  340. for (i = 0; i < used_lrs; i++) {
  341. if (elrsr & (1UL << i))
  342. cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
  343. else
  344. cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
  345. writel_relaxed(0, base + GICH_LR0 + (i * 4));
  346. }
  347. }
  348. void vgic_v2_save_state(struct kvm_vcpu *vcpu)
  349. {
  350. void __iomem *base = kvm_vgic_global_state.vctrl_base;
  351. u64 used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
  352. if (!base)
  353. return;
  354. if (used_lrs) {
  355. save_lrs(vcpu, base);
  356. writel_relaxed(0, base + GICH_HCR);
  357. }
  358. }
  359. void vgic_v2_restore_state(struct kvm_vcpu *vcpu)
  360. {
  361. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  362. void __iomem *base = kvm_vgic_global_state.vctrl_base;
  363. u64 used_lrs = cpu_if->used_lrs;
  364. int i;
  365. if (!base)
  366. return;
  367. if (used_lrs) {
  368. writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
  369. for (i = 0; i < used_lrs; i++) {
  370. writel_relaxed(cpu_if->vgic_lr[i],
  371. base + GICH_LR0 + (i * 4));
  372. }
  373. }
  374. }
  375. void vgic_v2_load(struct kvm_vcpu *vcpu)
  376. {
  377. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  378. writel_relaxed(cpu_if->vgic_vmcr,
  379. kvm_vgic_global_state.vctrl_base + GICH_VMCR);
  380. writel_relaxed(cpu_if->vgic_apr,
  381. kvm_vgic_global_state.vctrl_base + GICH_APR);
  382. }
  383. void vgic_v2_put(struct kvm_vcpu *vcpu, bool blocking)
  384. {
  385. struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
  386. cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
  387. cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
  388. }