vgic-v3-sr.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2012-2015 - ARM Ltd
  4. * Author: Marc Zyngier <[email protected]>
  5. */
  6. #include <hyp/adjust_pc.h>
  7. #include <linux/compiler.h>
  8. #include <linux/irqchip/arm-gic-v3.h>
  9. #include <linux/kvm_host.h>
  10. #include <asm/kvm_emulate.h>
  11. #include <asm/kvm_hyp.h>
  12. #include <asm/kvm_mmu.h>
  13. #define vtr_to_max_lr_idx(v) ((v) & 0xf)
  14. #define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
  15. #define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5))
  16. static u64 __gic_v3_get_lr(unsigned int lr)
  17. {
  18. switch (lr & 0xf) {
  19. case 0:
  20. return read_gicreg(ICH_LR0_EL2);
  21. case 1:
  22. return read_gicreg(ICH_LR1_EL2);
  23. case 2:
  24. return read_gicreg(ICH_LR2_EL2);
  25. case 3:
  26. return read_gicreg(ICH_LR3_EL2);
  27. case 4:
  28. return read_gicreg(ICH_LR4_EL2);
  29. case 5:
  30. return read_gicreg(ICH_LR5_EL2);
  31. case 6:
  32. return read_gicreg(ICH_LR6_EL2);
  33. case 7:
  34. return read_gicreg(ICH_LR7_EL2);
  35. case 8:
  36. return read_gicreg(ICH_LR8_EL2);
  37. case 9:
  38. return read_gicreg(ICH_LR9_EL2);
  39. case 10:
  40. return read_gicreg(ICH_LR10_EL2);
  41. case 11:
  42. return read_gicreg(ICH_LR11_EL2);
  43. case 12:
  44. return read_gicreg(ICH_LR12_EL2);
  45. case 13:
  46. return read_gicreg(ICH_LR13_EL2);
  47. case 14:
  48. return read_gicreg(ICH_LR14_EL2);
  49. case 15:
  50. return read_gicreg(ICH_LR15_EL2);
  51. }
  52. unreachable();
  53. }
  54. static void __gic_v3_set_lr(u64 val, int lr)
  55. {
  56. switch (lr & 0xf) {
  57. case 0:
  58. write_gicreg(val, ICH_LR0_EL2);
  59. break;
  60. case 1:
  61. write_gicreg(val, ICH_LR1_EL2);
  62. break;
  63. case 2:
  64. write_gicreg(val, ICH_LR2_EL2);
  65. break;
  66. case 3:
  67. write_gicreg(val, ICH_LR3_EL2);
  68. break;
  69. case 4:
  70. write_gicreg(val, ICH_LR4_EL2);
  71. break;
  72. case 5:
  73. write_gicreg(val, ICH_LR5_EL2);
  74. break;
  75. case 6:
  76. write_gicreg(val, ICH_LR6_EL2);
  77. break;
  78. case 7:
  79. write_gicreg(val, ICH_LR7_EL2);
  80. break;
  81. case 8:
  82. write_gicreg(val, ICH_LR8_EL2);
  83. break;
  84. case 9:
  85. write_gicreg(val, ICH_LR9_EL2);
  86. break;
  87. case 10:
  88. write_gicreg(val, ICH_LR10_EL2);
  89. break;
  90. case 11:
  91. write_gicreg(val, ICH_LR11_EL2);
  92. break;
  93. case 12:
  94. write_gicreg(val, ICH_LR12_EL2);
  95. break;
  96. case 13:
  97. write_gicreg(val, ICH_LR13_EL2);
  98. break;
  99. case 14:
  100. write_gicreg(val, ICH_LR14_EL2);
  101. break;
  102. case 15:
  103. write_gicreg(val, ICH_LR15_EL2);
  104. break;
  105. }
  106. }
  107. static void __vgic_v3_write_ap0rn(u32 val, int n)
  108. {
  109. switch (n) {
  110. case 0:
  111. write_gicreg(val, ICH_AP0R0_EL2);
  112. break;
  113. case 1:
  114. write_gicreg(val, ICH_AP0R1_EL2);
  115. break;
  116. case 2:
  117. write_gicreg(val, ICH_AP0R2_EL2);
  118. break;
  119. case 3:
  120. write_gicreg(val, ICH_AP0R3_EL2);
  121. break;
  122. }
  123. }
  124. static void __vgic_v3_write_ap1rn(u32 val, int n)
  125. {
  126. switch (n) {
  127. case 0:
  128. write_gicreg(val, ICH_AP1R0_EL2);
  129. break;
  130. case 1:
  131. write_gicreg(val, ICH_AP1R1_EL2);
  132. break;
  133. case 2:
  134. write_gicreg(val, ICH_AP1R2_EL2);
  135. break;
  136. case 3:
  137. write_gicreg(val, ICH_AP1R3_EL2);
  138. break;
  139. }
  140. }
  141. static u32 __vgic_v3_read_ap0rn(int n)
  142. {
  143. u32 val;
  144. switch (n) {
  145. case 0:
  146. val = read_gicreg(ICH_AP0R0_EL2);
  147. break;
  148. case 1:
  149. val = read_gicreg(ICH_AP0R1_EL2);
  150. break;
  151. case 2:
  152. val = read_gicreg(ICH_AP0R2_EL2);
  153. break;
  154. case 3:
  155. val = read_gicreg(ICH_AP0R3_EL2);
  156. break;
  157. default:
  158. unreachable();
  159. }
  160. return val;
  161. }
  162. static u32 __vgic_v3_read_ap1rn(int n)
  163. {
  164. u32 val;
  165. switch (n) {
  166. case 0:
  167. val = read_gicreg(ICH_AP1R0_EL2);
  168. break;
  169. case 1:
  170. val = read_gicreg(ICH_AP1R1_EL2);
  171. break;
  172. case 2:
  173. val = read_gicreg(ICH_AP1R2_EL2);
  174. break;
  175. case 3:
  176. val = read_gicreg(ICH_AP1R3_EL2);
  177. break;
  178. default:
  179. unreachable();
  180. }
  181. return val;
  182. }
  183. void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
  184. {
  185. u64 used_lrs = cpu_if->used_lrs;
  186. /*
  187. * Make sure stores to the GIC via the memory mapped interface
  188. * are now visible to the system register interface when reading the
  189. * LRs, and when reading back the VMCR on non-VHE systems.
  190. */
  191. if (used_lrs || !has_vhe()) {
  192. if (!cpu_if->vgic_sre) {
  193. dsb(sy);
  194. isb();
  195. }
  196. }
  197. if (used_lrs || cpu_if->its_vpe.its_vm) {
  198. int i;
  199. u32 elrsr;
  200. elrsr = read_gicreg(ICH_ELRSR_EL2);
  201. write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2);
  202. for (i = 0; i < used_lrs; i++) {
  203. if (elrsr & (1 << i))
  204. cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
  205. else
  206. cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
  207. __gic_v3_set_lr(0, i);
  208. }
  209. }
  210. }
  211. void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if)
  212. {
  213. u64 used_lrs = cpu_if->used_lrs;
  214. int i;
  215. if (used_lrs || cpu_if->its_vpe.its_vm) {
  216. write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
  217. for (i = 0; i < used_lrs; i++)
  218. __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
  219. }
  220. /*
  221. * Ensure that writes to the LRs, and on non-VHE systems ensure that
  222. * the write to the VMCR in __vgic_v3_activate_traps(), will have
  223. * reached the (re)distributors. This ensure the guest will read the
  224. * correct values from the memory-mapped interface.
  225. */
  226. if (used_lrs || !has_vhe()) {
  227. if (!cpu_if->vgic_sre) {
  228. isb();
  229. dsb(sy);
  230. }
  231. }
  232. }
  233. void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
  234. {
  235. /*
  236. * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
  237. * Group0 interrupt (as generated in GICv2 mode) to be
  238. * delivered as a FIQ to the guest, with potentially fatal
  239. * consequences. So we must make sure that ICC_SRE_EL1 has
  240. * been actually programmed with the value we want before
  241. * starting to mess with the rest of the GIC, and VMCR_EL2 in
  242. * particular. This logic must be called before
  243. * __vgic_v3_restore_state().
  244. */
  245. if (!cpu_if->vgic_sre) {
  246. write_gicreg(0, ICC_SRE_EL1);
  247. isb();
  248. write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
  249. if (has_vhe()) {
  250. /*
  251. * Ensure that the write to the VMCR will have reached
  252. * the (re)distributors. This ensure the guest will
  253. * read the correct values from the memory-mapped
  254. * interface.
  255. */
  256. isb();
  257. dsb(sy);
  258. }
  259. }
  260. /*
  261. * Prevent the guest from touching the GIC system registers if
  262. * SRE isn't enabled for GICv3 emulation.
  263. */
  264. write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
  265. ICC_SRE_EL2);
  266. /*
  267. * If we need to trap system registers, we must write
  268. * ICH_HCR_EL2 anyway, even if no interrupts are being
  269. * injected,
  270. */
  271. if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
  272. cpu_if->its_vpe.its_vm)
  273. write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
  274. }
  275. void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
  276. {
  277. u64 val;
  278. if (!cpu_if->vgic_sre) {
  279. cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
  280. }
  281. val = read_gicreg(ICC_SRE_EL2);
  282. write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
  283. if (!cpu_if->vgic_sre) {
  284. /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
  285. isb();
  286. write_gicreg(1, ICC_SRE_EL1);
  287. }
  288. /*
  289. * If we were trapping system registers, we enabled the VGIC even if
  290. * no interrupts were being injected, and we disable it again here.
  291. */
  292. if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
  293. cpu_if->its_vpe.its_vm)
  294. write_gicreg(0, ICH_HCR_EL2);
  295. }
  296. static void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
  297. {
  298. u64 val;
  299. u32 nr_pre_bits;
  300. val = read_gicreg(ICH_VTR_EL2);
  301. nr_pre_bits = vtr_to_nr_pre_bits(val);
  302. switch (nr_pre_bits) {
  303. case 7:
  304. cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
  305. cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
  306. fallthrough;
  307. case 6:
  308. cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
  309. fallthrough;
  310. default:
  311. cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
  312. }
  313. switch (nr_pre_bits) {
  314. case 7:
  315. cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
  316. cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
  317. fallthrough;
  318. case 6:
  319. cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
  320. fallthrough;
  321. default:
  322. cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
  323. }
  324. }
  325. static void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
  326. {
  327. u64 val;
  328. u32 nr_pre_bits;
  329. val = read_gicreg(ICH_VTR_EL2);
  330. nr_pre_bits = vtr_to_nr_pre_bits(val);
  331. switch (nr_pre_bits) {
  332. case 7:
  333. __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
  334. __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
  335. fallthrough;
  336. case 6:
  337. __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
  338. fallthrough;
  339. default:
  340. __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
  341. }
  342. switch (nr_pre_bits) {
  343. case 7:
  344. __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
  345. __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
  346. fallthrough;
  347. case 6:
  348. __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
  349. fallthrough;
  350. default:
  351. __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
  352. }
  353. }
  354. void __vgic_v3_init_lrs(void)
  355. {
  356. int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
  357. int i;
  358. for (i = 0; i <= max_lr_idx; i++)
  359. __gic_v3_set_lr(0, i);
  360. }
  361. /*
  362. * Return the GIC CPU configuration:
  363. * - [31:0] ICH_VTR_EL2
  364. * - [62:32] RES0
  365. * - [63] MMIO (GICv2) capable
  366. */
  367. u64 __vgic_v3_get_gic_config(void)
  368. {
  369. u64 val, sre = read_gicreg(ICC_SRE_EL1);
  370. unsigned long flags = 0;
  371. /*
  372. * To check whether we have a MMIO-based (GICv2 compatible)
  373. * CPU interface, we need to disable the system register
  374. * view. To do that safely, we have to prevent any interrupt
  375. * from firing (which would be deadly).
  376. *
  377. * Note that this only makes sense on VHE, as interrupts are
  378. * already masked for nVHE as part of the exception entry to
  379. * EL2.
  380. */
  381. if (has_vhe())
  382. flags = local_daif_save();
  383. /*
  384. * Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates
  385. * that to be able to set ICC_SRE_EL1.SRE to 0, all the
  386. * interrupt overrides must be set. You've got to love this.
  387. */
  388. sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
  389. isb();
  390. write_gicreg(0, ICC_SRE_EL1);
  391. isb();
  392. val = read_gicreg(ICC_SRE_EL1);
  393. write_gicreg(sre, ICC_SRE_EL1);
  394. isb();
  395. sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
  396. isb();
  397. if (has_vhe())
  398. local_daif_restore(flags);
  399. val = (val & ICC_SRE_EL1_SRE) ? 0 : (1ULL << 63);
  400. val |= read_gicreg(ICH_VTR_EL2);
  401. return val;
  402. }
  403. static u64 __vgic_v3_read_vmcr(void)
  404. {
  405. return read_gicreg(ICH_VMCR_EL2);
  406. }
  407. static void __vgic_v3_write_vmcr(u32 vmcr)
  408. {
  409. write_gicreg(vmcr, ICH_VMCR_EL2);
  410. }
  411. void __vgic_v3_save_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if)
  412. {
  413. __vgic_v3_save_aprs(cpu_if);
  414. if (cpu_if->vgic_sre)
  415. cpu_if->vgic_vmcr = __vgic_v3_read_vmcr();
  416. }
  417. void __vgic_v3_restore_vmcr_aprs(struct vgic_v3_cpu_if *cpu_if)
  418. {
  419. /*
  420. * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
  421. * is dependent on ICC_SRE_EL1.SRE, and we have to perform the
  422. * VMCR_EL2 save/restore in the world switch.
  423. */
  424. if (cpu_if->vgic_sre)
  425. __vgic_v3_write_vmcr(cpu_if->vgic_vmcr);
  426. __vgic_v3_restore_aprs(cpu_if);
  427. }
  428. static int __vgic_v3_bpr_min(void)
  429. {
  430. /* See Pseudocode for VPriorityGroup */
  431. return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2));
  432. }
  433. static int __vgic_v3_get_group(struct kvm_vcpu *vcpu)
  434. {
  435. u64 esr = kvm_vcpu_get_esr(vcpu);
  436. u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
  437. return crm != 8;
  438. }
  439. #define GICv3_IDLE_PRIORITY 0xff
  440. static int __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu, u32 vmcr,
  441. u64 *lr_val)
  442. {
  443. unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
  444. u8 priority = GICv3_IDLE_PRIORITY;
  445. int i, lr = -1;
  446. for (i = 0; i < used_lrs; i++) {
  447. u64 val = __gic_v3_get_lr(i);
  448. u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
  449. /* Not pending in the state? */
  450. if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT)
  451. continue;
  452. /* Group-0 interrupt, but Group-0 disabled? */
  453. if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK))
  454. continue;
  455. /* Group-1 interrupt, but Group-1 disabled? */
  456. if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK))
  457. continue;
  458. /* Not the highest priority? */
  459. if (lr_prio >= priority)
  460. continue;
  461. /* This is a candidate */
  462. priority = lr_prio;
  463. *lr_val = val;
  464. lr = i;
  465. }
  466. if (lr == -1)
  467. *lr_val = ICC_IAR1_EL1_SPURIOUS;
  468. return lr;
  469. }
  470. static int __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu, int intid,
  471. u64 *lr_val)
  472. {
  473. unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
  474. int i;
  475. for (i = 0; i < used_lrs; i++) {
  476. u64 val = __gic_v3_get_lr(i);
  477. if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid &&
  478. (val & ICH_LR_ACTIVE_BIT)) {
  479. *lr_val = val;
  480. return i;
  481. }
  482. }
  483. *lr_val = ICC_IAR1_EL1_SPURIOUS;
  484. return -1;
  485. }
  486. static int __vgic_v3_get_highest_active_priority(void)
  487. {
  488. u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
  489. u32 hap = 0;
  490. int i;
  491. for (i = 0; i < nr_apr_regs; i++) {
  492. u32 val;
  493. /*
  494. * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers
  495. * contain the active priority levels for this VCPU
  496. * for the maximum number of supported priority
  497. * levels, and we return the full priority level only
  498. * if the BPR is programmed to its minimum, otherwise
  499. * we return a combination of the priority level and
  500. * subpriority, as determined by the setting of the
  501. * BPR, but without the full subpriority.
  502. */
  503. val = __vgic_v3_read_ap0rn(i);
  504. val |= __vgic_v3_read_ap1rn(i);
  505. if (!val) {
  506. hap += 32;
  507. continue;
  508. }
  509. return (hap + __ffs(val)) << __vgic_v3_bpr_min();
  510. }
  511. return GICv3_IDLE_PRIORITY;
  512. }
  513. static unsigned int __vgic_v3_get_bpr0(u32 vmcr)
  514. {
  515. return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
  516. }
  517. static unsigned int __vgic_v3_get_bpr1(u32 vmcr)
  518. {
  519. unsigned int bpr;
  520. if (vmcr & ICH_VMCR_CBPR_MASK) {
  521. bpr = __vgic_v3_get_bpr0(vmcr);
  522. if (bpr < 7)
  523. bpr++;
  524. } else {
  525. bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
  526. }
  527. return bpr;
  528. }
  529. /*
  530. * Convert a priority to a preemption level, taking the relevant BPR
  531. * into account by zeroing the sub-priority bits.
  532. */
  533. static u8 __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
  534. {
  535. unsigned int bpr;
  536. if (!grp)
  537. bpr = __vgic_v3_get_bpr0(vmcr) + 1;
  538. else
  539. bpr = __vgic_v3_get_bpr1(vmcr);
  540. return pri & (GENMASK(7, 0) << bpr);
  541. }
  542. /*
  543. * The priority value is independent of any of the BPR values, so we
  544. * normalize it using the minimal BPR value. This guarantees that no
  545. * matter what the guest does with its BPR, we can always set/get the
  546. * same value of a priority.
  547. */
  548. static void __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp)
  549. {
  550. u8 pre, ap;
  551. u32 val;
  552. int apr;
  553. pre = __vgic_v3_pri_to_pre(pri, vmcr, grp);
  554. ap = pre >> __vgic_v3_bpr_min();
  555. apr = ap / 32;
  556. if (!grp) {
  557. val = __vgic_v3_read_ap0rn(apr);
  558. __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr);
  559. } else {
  560. val = __vgic_v3_read_ap1rn(apr);
  561. __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr);
  562. }
  563. }
  564. static int __vgic_v3_clear_highest_active_priority(void)
  565. {
  566. u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
  567. u32 hap = 0;
  568. int i;
  569. for (i = 0; i < nr_apr_regs; i++) {
  570. u32 ap0, ap1;
  571. int c0, c1;
  572. ap0 = __vgic_v3_read_ap0rn(i);
  573. ap1 = __vgic_v3_read_ap1rn(i);
  574. if (!ap0 && !ap1) {
  575. hap += 32;
  576. continue;
  577. }
  578. c0 = ap0 ? __ffs(ap0) : 32;
  579. c1 = ap1 ? __ffs(ap1) : 32;
  580. /* Always clear the LSB, which is the highest priority */
  581. if (c0 < c1) {
  582. ap0 &= ~BIT(c0);
  583. __vgic_v3_write_ap0rn(ap0, i);
  584. hap += c0;
  585. } else {
  586. ap1 &= ~BIT(c1);
  587. __vgic_v3_write_ap1rn(ap1, i);
  588. hap += c1;
  589. }
  590. /* Rescale to 8 bits of priority */
  591. return hap << __vgic_v3_bpr_min();
  592. }
  593. return GICv3_IDLE_PRIORITY;
  594. }
  595. static void __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  596. {
  597. u64 lr_val;
  598. u8 lr_prio, pmr;
  599. int lr, grp;
  600. grp = __vgic_v3_get_group(vcpu);
  601. lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
  602. if (lr < 0)
  603. goto spurious;
  604. if (grp != !!(lr_val & ICH_LR_GROUP))
  605. goto spurious;
  606. pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
  607. lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
  608. if (pmr <= lr_prio)
  609. goto spurious;
  610. if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp))
  611. goto spurious;
  612. lr_val &= ~ICH_LR_STATE;
  613. lr_val |= ICH_LR_ACTIVE_BIT;
  614. __gic_v3_set_lr(lr_val, lr);
  615. __vgic_v3_set_active_priority(lr_prio, vmcr, grp);
  616. vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
  617. return;
  618. spurious:
  619. vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS);
  620. }
  621. static void __vgic_v3_clear_active_lr(int lr, u64 lr_val)
  622. {
  623. lr_val &= ~ICH_LR_ACTIVE_BIT;
  624. if (lr_val & ICH_LR_HW) {
  625. u32 pid;
  626. pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
  627. gic_write_dir(pid);
  628. }
  629. __gic_v3_set_lr(lr_val, lr);
  630. }
  631. static void __vgic_v3_bump_eoicount(void)
  632. {
  633. u32 hcr;
  634. hcr = read_gicreg(ICH_HCR_EL2);
  635. hcr += 1 << ICH_HCR_EOIcount_SHIFT;
  636. write_gicreg(hcr, ICH_HCR_EL2);
  637. }
  638. static void __vgic_v3_write_dir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  639. {
  640. u32 vid = vcpu_get_reg(vcpu, rt);
  641. u64 lr_val;
  642. int lr;
  643. /* EOImode == 0, nothing to be done here */
  644. if (!(vmcr & ICH_VMCR_EOIM_MASK))
  645. return;
  646. /* No deactivate to be performed on an LPI */
  647. if (vid >= VGIC_MIN_LPI)
  648. return;
  649. lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
  650. if (lr == -1) {
  651. __vgic_v3_bump_eoicount();
  652. return;
  653. }
  654. __vgic_v3_clear_active_lr(lr, lr_val);
  655. }
  656. static void __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  657. {
  658. u32 vid = vcpu_get_reg(vcpu, rt);
  659. u64 lr_val;
  660. u8 lr_prio, act_prio;
  661. int lr, grp;
  662. grp = __vgic_v3_get_group(vcpu);
  663. /* Drop priority in any case */
  664. act_prio = __vgic_v3_clear_highest_active_priority();
  665. lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
  666. if (lr == -1) {
  667. /* Do not bump EOIcount for LPIs that aren't in the LRs */
  668. if (!(vid >= VGIC_MIN_LPI))
  669. __vgic_v3_bump_eoicount();
  670. return;
  671. }
  672. /* EOImode == 1 and not an LPI, nothing to be done here */
  673. if ((vmcr & ICH_VMCR_EOIM_MASK) && !(vid >= VGIC_MIN_LPI))
  674. return;
  675. lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
  676. /* If priorities or group do not match, the guest has fscked-up. */
  677. if (grp != !!(lr_val & ICH_LR_GROUP) ||
  678. __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio)
  679. return;
  680. /* Let's now perform the deactivation */
  681. __vgic_v3_clear_active_lr(lr, lr_val);
  682. }
  683. static void __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  684. {
  685. vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK));
  686. }
  687. static void __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  688. {
  689. vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK));
  690. }
  691. static void __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  692. {
  693. u64 val = vcpu_get_reg(vcpu, rt);
  694. if (val & 1)
  695. vmcr |= ICH_VMCR_ENG0_MASK;
  696. else
  697. vmcr &= ~ICH_VMCR_ENG0_MASK;
  698. __vgic_v3_write_vmcr(vmcr);
  699. }
  700. static void __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  701. {
  702. u64 val = vcpu_get_reg(vcpu, rt);
  703. if (val & 1)
  704. vmcr |= ICH_VMCR_ENG1_MASK;
  705. else
  706. vmcr &= ~ICH_VMCR_ENG1_MASK;
  707. __vgic_v3_write_vmcr(vmcr);
  708. }
  709. static void __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  710. {
  711. vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr));
  712. }
  713. static void __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  714. {
  715. vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
  716. }
  717. static void __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  718. {
  719. u64 val = vcpu_get_reg(vcpu, rt);
  720. u8 bpr_min = __vgic_v3_bpr_min() - 1;
  721. /* Enforce BPR limiting */
  722. if (val < bpr_min)
  723. val = bpr_min;
  724. val <<= ICH_VMCR_BPR0_SHIFT;
  725. val &= ICH_VMCR_BPR0_MASK;
  726. vmcr &= ~ICH_VMCR_BPR0_MASK;
  727. vmcr |= val;
  728. __vgic_v3_write_vmcr(vmcr);
  729. }
  730. static void __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  731. {
  732. u64 val = vcpu_get_reg(vcpu, rt);
  733. u8 bpr_min = __vgic_v3_bpr_min();
  734. if (vmcr & ICH_VMCR_CBPR_MASK)
  735. return;
  736. /* Enforce BPR limiting */
  737. if (val < bpr_min)
  738. val = bpr_min;
  739. val <<= ICH_VMCR_BPR1_SHIFT;
  740. val &= ICH_VMCR_BPR1_MASK;
  741. vmcr &= ~ICH_VMCR_BPR1_MASK;
  742. vmcr |= val;
  743. __vgic_v3_write_vmcr(vmcr);
  744. }
  745. static void __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
  746. {
  747. u32 val;
  748. if (!__vgic_v3_get_group(vcpu))
  749. val = __vgic_v3_read_ap0rn(n);
  750. else
  751. val = __vgic_v3_read_ap1rn(n);
  752. vcpu_set_reg(vcpu, rt, val);
  753. }
  754. static void __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
  755. {
  756. u32 val = vcpu_get_reg(vcpu, rt);
  757. if (!__vgic_v3_get_group(vcpu))
  758. __vgic_v3_write_ap0rn(val, n);
  759. else
  760. __vgic_v3_write_ap1rn(val, n);
  761. }
  762. static void __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu,
  763. u32 vmcr, int rt)
  764. {
  765. __vgic_v3_read_apxrn(vcpu, rt, 0);
  766. }
  767. static void __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu,
  768. u32 vmcr, int rt)
  769. {
  770. __vgic_v3_read_apxrn(vcpu, rt, 1);
  771. }
  772. static void __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  773. {
  774. __vgic_v3_read_apxrn(vcpu, rt, 2);
  775. }
  776. static void __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  777. {
  778. __vgic_v3_read_apxrn(vcpu, rt, 3);
  779. }
  780. static void __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  781. {
  782. __vgic_v3_write_apxrn(vcpu, rt, 0);
  783. }
  784. static void __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  785. {
  786. __vgic_v3_write_apxrn(vcpu, rt, 1);
  787. }
  788. static void __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  789. {
  790. __vgic_v3_write_apxrn(vcpu, rt, 2);
  791. }
  792. static void __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  793. {
  794. __vgic_v3_write_apxrn(vcpu, rt, 3);
  795. }
  796. static void __vgic_v3_read_hppir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  797. {
  798. u64 lr_val;
  799. int lr, lr_grp, grp;
  800. grp = __vgic_v3_get_group(vcpu);
  801. lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
  802. if (lr == -1)
  803. goto spurious;
  804. lr_grp = !!(lr_val & ICH_LR_GROUP);
  805. if (lr_grp != grp)
  806. lr_val = ICC_IAR1_EL1_SPURIOUS;
  807. spurious:
  808. vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
  809. }
  810. static void __vgic_v3_read_pmr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  811. {
  812. vmcr &= ICH_VMCR_PMR_MASK;
  813. vmcr >>= ICH_VMCR_PMR_SHIFT;
  814. vcpu_set_reg(vcpu, rt, vmcr);
  815. }
  816. static void __vgic_v3_write_pmr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  817. {
  818. u32 val = vcpu_get_reg(vcpu, rt);
  819. val <<= ICH_VMCR_PMR_SHIFT;
  820. val &= ICH_VMCR_PMR_MASK;
  821. vmcr &= ~ICH_VMCR_PMR_MASK;
  822. vmcr |= val;
  823. write_gicreg(vmcr, ICH_VMCR_EL2);
  824. }
  825. static void __vgic_v3_read_rpr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  826. {
  827. u32 val = __vgic_v3_get_highest_active_priority();
  828. vcpu_set_reg(vcpu, rt, val);
  829. }
  830. static void __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  831. {
  832. u32 vtr, val;
  833. vtr = read_gicreg(ICH_VTR_EL2);
  834. /* PRIbits */
  835. val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
  836. /* IDbits */
  837. val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
  838. /* SEIS */
  839. if (kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK)
  840. val |= BIT(ICC_CTLR_EL1_SEIS_SHIFT);
  841. /* A3V */
  842. val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
  843. /* EOImode */
  844. val |= ((vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT) << ICC_CTLR_EL1_EOImode_SHIFT;
  845. /* CBPR */
  846. val |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
  847. vcpu_set_reg(vcpu, rt, val);
  848. }
  849. static void __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
  850. {
  851. u32 val = vcpu_get_reg(vcpu, rt);
  852. if (val & ICC_CTLR_EL1_CBPR_MASK)
  853. vmcr |= ICH_VMCR_CBPR_MASK;
  854. else
  855. vmcr &= ~ICH_VMCR_CBPR_MASK;
  856. if (val & ICC_CTLR_EL1_EOImode_MASK)
  857. vmcr |= ICH_VMCR_EOIM_MASK;
  858. else
  859. vmcr &= ~ICH_VMCR_EOIM_MASK;
  860. write_gicreg(vmcr, ICH_VMCR_EL2);
  861. }
  862. int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
  863. {
  864. int rt;
  865. u64 esr;
  866. u32 vmcr;
  867. void (*fn)(struct kvm_vcpu *, u32, int);
  868. bool is_read;
  869. u32 sysreg;
  870. esr = kvm_vcpu_get_esr(vcpu);
  871. if (vcpu_mode_is_32bit(vcpu)) {
  872. if (!kvm_condition_valid(vcpu)) {
  873. __kvm_skip_instr(vcpu);
  874. return 1;
  875. }
  876. sysreg = esr_cp15_to_sysreg(esr);
  877. } else {
  878. sysreg = esr_sys64_to_sysreg(esr);
  879. }
  880. is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
  881. switch (sysreg) {
  882. case SYS_ICC_IAR0_EL1:
  883. case SYS_ICC_IAR1_EL1:
  884. if (unlikely(!is_read))
  885. return 0;
  886. fn = __vgic_v3_read_iar;
  887. break;
  888. case SYS_ICC_EOIR0_EL1:
  889. case SYS_ICC_EOIR1_EL1:
  890. if (unlikely(is_read))
  891. return 0;
  892. fn = __vgic_v3_write_eoir;
  893. break;
  894. case SYS_ICC_IGRPEN1_EL1:
  895. if (is_read)
  896. fn = __vgic_v3_read_igrpen1;
  897. else
  898. fn = __vgic_v3_write_igrpen1;
  899. break;
  900. case SYS_ICC_BPR1_EL1:
  901. if (is_read)
  902. fn = __vgic_v3_read_bpr1;
  903. else
  904. fn = __vgic_v3_write_bpr1;
  905. break;
  906. case SYS_ICC_AP0Rn_EL1(0):
  907. case SYS_ICC_AP1Rn_EL1(0):
  908. if (is_read)
  909. fn = __vgic_v3_read_apxr0;
  910. else
  911. fn = __vgic_v3_write_apxr0;
  912. break;
  913. case SYS_ICC_AP0Rn_EL1(1):
  914. case SYS_ICC_AP1Rn_EL1(1):
  915. if (is_read)
  916. fn = __vgic_v3_read_apxr1;
  917. else
  918. fn = __vgic_v3_write_apxr1;
  919. break;
  920. case SYS_ICC_AP0Rn_EL1(2):
  921. case SYS_ICC_AP1Rn_EL1(2):
  922. if (is_read)
  923. fn = __vgic_v3_read_apxr2;
  924. else
  925. fn = __vgic_v3_write_apxr2;
  926. break;
  927. case SYS_ICC_AP0Rn_EL1(3):
  928. case SYS_ICC_AP1Rn_EL1(3):
  929. if (is_read)
  930. fn = __vgic_v3_read_apxr3;
  931. else
  932. fn = __vgic_v3_write_apxr3;
  933. break;
  934. case SYS_ICC_HPPIR0_EL1:
  935. case SYS_ICC_HPPIR1_EL1:
  936. if (unlikely(!is_read))
  937. return 0;
  938. fn = __vgic_v3_read_hppir;
  939. break;
  940. case SYS_ICC_IGRPEN0_EL1:
  941. if (is_read)
  942. fn = __vgic_v3_read_igrpen0;
  943. else
  944. fn = __vgic_v3_write_igrpen0;
  945. break;
  946. case SYS_ICC_BPR0_EL1:
  947. if (is_read)
  948. fn = __vgic_v3_read_bpr0;
  949. else
  950. fn = __vgic_v3_write_bpr0;
  951. break;
  952. case SYS_ICC_DIR_EL1:
  953. if (unlikely(is_read))
  954. return 0;
  955. fn = __vgic_v3_write_dir;
  956. break;
  957. case SYS_ICC_RPR_EL1:
  958. if (unlikely(!is_read))
  959. return 0;
  960. fn = __vgic_v3_read_rpr;
  961. break;
  962. case SYS_ICC_CTLR_EL1:
  963. if (is_read)
  964. fn = __vgic_v3_read_ctlr;
  965. else
  966. fn = __vgic_v3_write_ctlr;
  967. break;
  968. case SYS_ICC_PMR_EL1:
  969. if (is_read)
  970. fn = __vgic_v3_read_pmr;
  971. else
  972. fn = __vgic_v3_write_pmr;
  973. break;
  974. default:
  975. return 0;
  976. }
  977. vmcr = __vgic_v3_read_vmcr();
  978. rt = kvm_vcpu_sys_get_rt(vcpu);
  979. fn(vcpu, vmcr, rt);
  980. __kvm_skip_instr(vcpu);
  981. return 1;
  982. }