vgic-sys-reg-v3.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * VGIC system registers handling functions for AArch64 mode
  4. */
  5. #include <linux/irqchip/arm-gic-v3.h>
  6. #include <linux/kvm.h>
  7. #include <linux/kvm_host.h>
  8. #include <asm/kvm_emulate.h>
  9. #include "vgic/vgic.h"
  10. #include "sys_regs.h"
  11. static int set_gic_ctlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
  12. u64 val)
  13. {
  14. u32 host_pri_bits, host_id_bits, host_seis, host_a3v, seis, a3v;
  15. struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
  16. struct vgic_vmcr vmcr;
  17. vgic_get_vmcr(vcpu, &vmcr);
  18. /*
  19. * Disallow restoring VM state if not supported by this
  20. * hardware.
  21. */
  22. host_pri_bits = FIELD_GET(ICC_CTLR_EL1_PRI_BITS_MASK, val) + 1;
  23. if (host_pri_bits > vgic_v3_cpu->num_pri_bits)
  24. return -EINVAL;
  25. vgic_v3_cpu->num_pri_bits = host_pri_bits;
  26. host_id_bits = FIELD_GET(ICC_CTLR_EL1_ID_BITS_MASK, val);
  27. if (host_id_bits > vgic_v3_cpu->num_id_bits)
  28. return -EINVAL;
  29. vgic_v3_cpu->num_id_bits = host_id_bits;
  30. host_seis = FIELD_GET(ICH_VTR_SEIS_MASK, kvm_vgic_global_state.ich_vtr_el2);
  31. seis = FIELD_GET(ICC_CTLR_EL1_SEIS_MASK, val);
  32. if (host_seis != seis)
  33. return -EINVAL;
  34. host_a3v = FIELD_GET(ICH_VTR_A3V_MASK, kvm_vgic_global_state.ich_vtr_el2);
  35. a3v = FIELD_GET(ICC_CTLR_EL1_A3V_MASK, val);
  36. if (host_a3v != a3v)
  37. return -EINVAL;
  38. /*
  39. * Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
  40. * The vgic_set_vmcr() will convert to ICH_VMCR layout.
  41. */
  42. vmcr.cbpr = FIELD_GET(ICC_CTLR_EL1_CBPR_MASK, val);
  43. vmcr.eoim = FIELD_GET(ICC_CTLR_EL1_EOImode_MASK, val);
  44. vgic_set_vmcr(vcpu, &vmcr);
  45. return 0;
  46. }
  47. static int get_gic_ctlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
  48. u64 *valp)
  49. {
  50. struct vgic_cpu *vgic_v3_cpu = &vcpu->arch.vgic_cpu;
  51. struct vgic_vmcr vmcr;
  52. u64 val;
  53. vgic_get_vmcr(vcpu, &vmcr);
  54. val = 0;
  55. val |= FIELD_PREP(ICC_CTLR_EL1_PRI_BITS_MASK, vgic_v3_cpu->num_pri_bits - 1);
  56. val |= FIELD_PREP(ICC_CTLR_EL1_ID_BITS_MASK, vgic_v3_cpu->num_id_bits);
  57. val |= FIELD_PREP(ICC_CTLR_EL1_SEIS_MASK,
  58. FIELD_GET(ICH_VTR_SEIS_MASK,
  59. kvm_vgic_global_state.ich_vtr_el2));
  60. val |= FIELD_PREP(ICC_CTLR_EL1_A3V_MASK,
  61. FIELD_GET(ICH_VTR_A3V_MASK, kvm_vgic_global_state.ich_vtr_el2));
  62. /*
  63. * The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
  64. * Extract it directly using ICC_CTLR_EL1 reg definitions.
  65. */
  66. val |= FIELD_PREP(ICC_CTLR_EL1_CBPR_MASK, vmcr.cbpr);
  67. val |= FIELD_PREP(ICC_CTLR_EL1_EOImode_MASK, vmcr.eoim);
  68. *valp = val;
  69. return 0;
  70. }
  71. static int set_gic_pmr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
  72. u64 val)
  73. {
  74. struct vgic_vmcr vmcr;
  75. vgic_get_vmcr(vcpu, &vmcr);
  76. vmcr.pmr = FIELD_GET(ICC_PMR_EL1_MASK, val);
  77. vgic_set_vmcr(vcpu, &vmcr);
  78. return 0;
  79. }
  80. static int get_gic_pmr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
  81. u64 *val)
  82. {
  83. struct vgic_vmcr vmcr;
  84. vgic_get_vmcr(vcpu, &vmcr);
  85. *val = FIELD_PREP(ICC_PMR_EL1_MASK, vmcr.pmr);
  86. return 0;
  87. }
  88. static int set_gic_bpr0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
  89. u64 val)
  90. {
  91. struct vgic_vmcr vmcr;
  92. vgic_get_vmcr(vcpu, &vmcr);
  93. vmcr.bpr = FIELD_GET(ICC_BPR0_EL1_MASK, val);
  94. vgic_set_vmcr(vcpu, &vmcr);
  95. return 0;
  96. }
  97. static int get_gic_bpr0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
  98. u64 *val)
  99. {
  100. struct vgic_vmcr vmcr;
  101. vgic_get_vmcr(vcpu, &vmcr);
  102. *val = FIELD_PREP(ICC_BPR0_EL1_MASK, vmcr.bpr);
  103. return 0;
  104. }
  105. static int set_gic_bpr1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
  106. u64 val)
  107. {
  108. struct vgic_vmcr vmcr;
  109. vgic_get_vmcr(vcpu, &vmcr);
  110. if (!vmcr.cbpr) {
  111. vmcr.abpr = FIELD_GET(ICC_BPR1_EL1_MASK, val);
  112. vgic_set_vmcr(vcpu, &vmcr);
  113. }
  114. return 0;
  115. }
  116. static int get_gic_bpr1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
  117. u64 *val)
  118. {
  119. struct vgic_vmcr vmcr;
  120. vgic_get_vmcr(vcpu, &vmcr);
  121. if (!vmcr.cbpr)
  122. *val = FIELD_PREP(ICC_BPR1_EL1_MASK, vmcr.abpr);
  123. else
  124. *val = min((vmcr.bpr + 1), 7U);
  125. return 0;
  126. }
  127. static int set_gic_grpen0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
  128. u64 val)
  129. {
  130. struct vgic_vmcr vmcr;
  131. vgic_get_vmcr(vcpu, &vmcr);
  132. vmcr.grpen0 = FIELD_GET(ICC_IGRPEN0_EL1_MASK, val);
  133. vgic_set_vmcr(vcpu, &vmcr);
  134. return 0;
  135. }
  136. static int get_gic_grpen0(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
  137. u64 *val)
  138. {
  139. struct vgic_vmcr vmcr;
  140. vgic_get_vmcr(vcpu, &vmcr);
  141. *val = FIELD_PREP(ICC_IGRPEN0_EL1_MASK, vmcr.grpen0);
  142. return 0;
  143. }
  144. static int set_gic_grpen1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
  145. u64 val)
  146. {
  147. struct vgic_vmcr vmcr;
  148. vgic_get_vmcr(vcpu, &vmcr);
  149. vmcr.grpen1 = FIELD_GET(ICC_IGRPEN1_EL1_MASK, val);
  150. vgic_set_vmcr(vcpu, &vmcr);
  151. return 0;
  152. }
  153. static int get_gic_grpen1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
  154. u64 *val)
  155. {
  156. struct vgic_vmcr vmcr;
  157. vgic_get_vmcr(vcpu, &vmcr);
  158. *val = FIELD_GET(ICC_IGRPEN1_EL1_MASK, vmcr.grpen1);
  159. return 0;
  160. }
  161. static void set_apr_reg(struct kvm_vcpu *vcpu, u64 val, u8 apr, u8 idx)
  162. {
  163. struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
  164. if (apr)
  165. vgicv3->vgic_ap1r[idx] = val;
  166. else
  167. vgicv3->vgic_ap0r[idx] = val;
  168. }
  169. static u64 get_apr_reg(struct kvm_vcpu *vcpu, u8 apr, u8 idx)
  170. {
  171. struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
  172. if (apr)
  173. return vgicv3->vgic_ap1r[idx];
  174. else
  175. return vgicv3->vgic_ap0r[idx];
  176. }
  177. static int set_gic_ap0r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
  178. u64 val)
  179. {
  180. u8 idx = r->Op2 & 3;
  181. if (idx > vgic_v3_max_apr_idx(vcpu))
  182. return -EINVAL;
  183. set_apr_reg(vcpu, val, 0, idx);
  184. return 0;
  185. }
  186. static int get_gic_ap0r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
  187. u64 *val)
  188. {
  189. u8 idx = r->Op2 & 3;
  190. if (idx > vgic_v3_max_apr_idx(vcpu))
  191. return -EINVAL;
  192. *val = get_apr_reg(vcpu, 0, idx);
  193. return 0;
  194. }
  195. static int set_gic_ap1r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
  196. u64 val)
  197. {
  198. u8 idx = r->Op2 & 3;
  199. if (idx > vgic_v3_max_apr_idx(vcpu))
  200. return -EINVAL;
  201. set_apr_reg(vcpu, val, 1, idx);
  202. return 0;
  203. }
  204. static int get_gic_ap1r(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
  205. u64 *val)
  206. {
  207. u8 idx = r->Op2 & 3;
  208. if (idx > vgic_v3_max_apr_idx(vcpu))
  209. return -EINVAL;
  210. *val = get_apr_reg(vcpu, 1, idx);
  211. return 0;
  212. }
  213. static int set_gic_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
  214. u64 val)
  215. {
  216. /* Validate SRE bit */
  217. if (!(val & ICC_SRE_EL1_SRE))
  218. return -EINVAL;
  219. return 0;
  220. }
  221. static int get_gic_sre(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
  222. u64 *val)
  223. {
  224. struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
  225. *val = vgicv3->vgic_sre;
  226. return 0;
  227. }
  228. static const struct sys_reg_desc gic_v3_icc_reg_descs[] = {
  229. { SYS_DESC(SYS_ICC_PMR_EL1),
  230. .set_user = set_gic_pmr, .get_user = get_gic_pmr, },
  231. { SYS_DESC(SYS_ICC_BPR0_EL1),
  232. .set_user = set_gic_bpr0, .get_user = get_gic_bpr0, },
  233. { SYS_DESC(SYS_ICC_AP0R0_EL1),
  234. .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
  235. { SYS_DESC(SYS_ICC_AP0R1_EL1),
  236. .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
  237. { SYS_DESC(SYS_ICC_AP0R2_EL1),
  238. .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
  239. { SYS_DESC(SYS_ICC_AP0R3_EL1),
  240. .set_user = set_gic_ap0r, .get_user = get_gic_ap0r, },
  241. { SYS_DESC(SYS_ICC_AP1R0_EL1),
  242. .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
  243. { SYS_DESC(SYS_ICC_AP1R1_EL1),
  244. .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
  245. { SYS_DESC(SYS_ICC_AP1R2_EL1),
  246. .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
  247. { SYS_DESC(SYS_ICC_AP1R3_EL1),
  248. .set_user = set_gic_ap1r, .get_user = get_gic_ap1r, },
  249. { SYS_DESC(SYS_ICC_BPR1_EL1),
  250. .set_user = set_gic_bpr1, .get_user = get_gic_bpr1, },
  251. { SYS_DESC(SYS_ICC_CTLR_EL1),
  252. .set_user = set_gic_ctlr, .get_user = get_gic_ctlr, },
  253. { SYS_DESC(SYS_ICC_SRE_EL1),
  254. .set_user = set_gic_sre, .get_user = get_gic_sre, },
  255. { SYS_DESC(SYS_ICC_IGRPEN0_EL1),
  256. .set_user = set_gic_grpen0, .get_user = get_gic_grpen0, },
  257. { SYS_DESC(SYS_ICC_IGRPEN1_EL1),
  258. .set_user = set_gic_grpen1, .get_user = get_gic_grpen1, },
  259. };
  260. static u64 attr_to_id(u64 attr)
  261. {
  262. return ARM64_SYS_REG(FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP0_MASK, attr),
  263. FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP1_MASK, attr),
  264. FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_CRN_MASK, attr),
  265. FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_CRM_MASK, attr),
  266. FIELD_GET(KVM_REG_ARM_VGIC_SYSREG_OP2_MASK, attr));
  267. }
  268. int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
  269. {
  270. if (get_reg_by_id(attr_to_id(attr->attr), gic_v3_icc_reg_descs,
  271. ARRAY_SIZE(gic_v3_icc_reg_descs)))
  272. return 0;
  273. return -ENXIO;
  274. }
  275. int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu,
  276. struct kvm_device_attr *attr,
  277. bool is_write)
  278. {
  279. struct kvm_one_reg reg = {
  280. .id = attr_to_id(attr->attr),
  281. .addr = attr->addr,
  282. };
  283. if (is_write)
  284. return kvm_sys_reg_set_user(vcpu, &reg, gic_v3_icc_reg_descs,
  285. ARRAY_SIZE(gic_v3_icc_reg_descs));
  286. else
  287. return kvm_sys_reg_get_user(vcpu, &reg, gic_v3_icc_reg_descs,
  288. ARRAY_SIZE(gic_v3_icc_reg_descs));
  289. }