vgic-mmio.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * VGIC MMIO handling functions
  4. */
  5. #include <linux/bitops.h>
  6. #include <linux/bsearch.h>
  7. #include <linux/interrupt.h>
  8. #include <linux/irq.h>
  9. #include <linux/kvm.h>
  10. #include <linux/kvm_host.h>
  11. #include <kvm/iodev.h>
  12. #include <kvm/arm_arch_timer.h>
  13. #include <kvm/arm_vgic.h>
  14. #include "vgic.h"
  15. #include "vgic-mmio.h"
  16. unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
  17. gpa_t addr, unsigned int len)
  18. {
  19. return 0;
  20. }
  21. unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
  22. gpa_t addr, unsigned int len)
  23. {
  24. return -1UL;
  25. }
  26. void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
  27. unsigned int len, unsigned long val)
  28. {
  29. /* Ignore */
  30. }
  31. int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
  32. unsigned int len, unsigned long val)
  33. {
  34. /* Ignore */
  35. return 0;
  36. }
  37. unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
  38. gpa_t addr, unsigned int len)
  39. {
  40. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  41. u32 value = 0;
  42. int i;
  43. /* Loop over all IRQs affected by this read */
  44. for (i = 0; i < len * 8; i++) {
  45. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  46. if (irq->group)
  47. value |= BIT(i);
  48. vgic_put_irq(vcpu->kvm, irq);
  49. }
  50. return value;
  51. }
  52. static void vgic_update_vsgi(struct vgic_irq *irq)
  53. {
  54. WARN_ON(its_prop_update_vsgi(irq->host_irq, irq->priority, irq->group));
  55. }
  56. void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
  57. unsigned int len, unsigned long val)
  58. {
  59. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  60. int i;
  61. unsigned long flags;
  62. for (i = 0; i < len * 8; i++) {
  63. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  64. raw_spin_lock_irqsave(&irq->irq_lock, flags);
  65. irq->group = !!(val & BIT(i));
  66. if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
  67. vgic_update_vsgi(irq);
  68. raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
  69. } else {
  70. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  71. }
  72. vgic_put_irq(vcpu->kvm, irq);
  73. }
  74. }
  75. /*
  76. * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
  77. * of the enabled bit, so there is only one function for both here.
  78. */
  79. unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
  80. gpa_t addr, unsigned int len)
  81. {
  82. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  83. u32 value = 0;
  84. int i;
  85. /* Loop over all IRQs affected by this read */
  86. for (i = 0; i < len * 8; i++) {
  87. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  88. if (irq->enabled)
  89. value |= (1U << i);
  90. vgic_put_irq(vcpu->kvm, irq);
  91. }
  92. return value;
  93. }
  94. void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
  95. gpa_t addr, unsigned int len,
  96. unsigned long val)
  97. {
  98. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  99. int i;
  100. unsigned long flags;
  101. for_each_set_bit(i, &val, len * 8) {
  102. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  103. raw_spin_lock_irqsave(&irq->irq_lock, flags);
  104. if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
  105. if (!irq->enabled) {
  106. struct irq_data *data;
  107. irq->enabled = true;
  108. data = &irq_to_desc(irq->host_irq)->irq_data;
  109. while (irqd_irq_disabled(data))
  110. enable_irq(irq->host_irq);
  111. }
  112. raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
  113. vgic_put_irq(vcpu->kvm, irq);
  114. continue;
  115. } else if (vgic_irq_is_mapped_level(irq)) {
  116. bool was_high = irq->line_level;
  117. /*
  118. * We need to update the state of the interrupt because
  119. * the guest might have changed the state of the device
  120. * while the interrupt was disabled at the VGIC level.
  121. */
  122. irq->line_level = vgic_get_phys_line_level(irq);
  123. /*
  124. * Deactivate the physical interrupt so the GIC will let
  125. * us know when it is asserted again.
  126. */
  127. if (!irq->active && was_high && !irq->line_level)
  128. vgic_irq_set_phys_active(irq, false);
  129. }
  130. irq->enabled = true;
  131. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  132. vgic_put_irq(vcpu->kvm, irq);
  133. }
  134. }
  135. void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
  136. gpa_t addr, unsigned int len,
  137. unsigned long val)
  138. {
  139. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  140. int i;
  141. unsigned long flags;
  142. for_each_set_bit(i, &val, len * 8) {
  143. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  144. raw_spin_lock_irqsave(&irq->irq_lock, flags);
  145. if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled)
  146. disable_irq_nosync(irq->host_irq);
  147. irq->enabled = false;
  148. raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
  149. vgic_put_irq(vcpu->kvm, irq);
  150. }
  151. }
  152. int vgic_uaccess_write_senable(struct kvm_vcpu *vcpu,
  153. gpa_t addr, unsigned int len,
  154. unsigned long val)
  155. {
  156. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  157. int i;
  158. unsigned long flags;
  159. for_each_set_bit(i, &val, len * 8) {
  160. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  161. raw_spin_lock_irqsave(&irq->irq_lock, flags);
  162. irq->enabled = true;
  163. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  164. vgic_put_irq(vcpu->kvm, irq);
  165. }
  166. return 0;
  167. }
  168. int vgic_uaccess_write_cenable(struct kvm_vcpu *vcpu,
  169. gpa_t addr, unsigned int len,
  170. unsigned long val)
  171. {
  172. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  173. int i;
  174. unsigned long flags;
  175. for_each_set_bit(i, &val, len * 8) {
  176. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  177. raw_spin_lock_irqsave(&irq->irq_lock, flags);
  178. irq->enabled = false;
  179. raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
  180. vgic_put_irq(vcpu->kvm, irq);
  181. }
  182. return 0;
  183. }
  184. static unsigned long __read_pending(struct kvm_vcpu *vcpu,
  185. gpa_t addr, unsigned int len,
  186. bool is_user)
  187. {
  188. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  189. u32 value = 0;
  190. int i;
  191. /* Loop over all IRQs affected by this read */
  192. for (i = 0; i < len * 8; i++) {
  193. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  194. unsigned long flags;
  195. bool val;
  196. /*
  197. * When used from userspace with a GICv3 model:
  198. *
  199. * Pending state of interrupt is latched in pending_latch
  200. * variable. Userspace will save and restore pending state
  201. * and line_level separately.
  202. * Refer to Documentation/virt/kvm/devices/arm-vgic-v3.rst
  203. * for handling of ISPENDR and ICPENDR.
  204. */
  205. raw_spin_lock_irqsave(&irq->irq_lock, flags);
  206. if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
  207. int err;
  208. val = false;
  209. err = irq_get_irqchip_state(irq->host_irq,
  210. IRQCHIP_STATE_PENDING,
  211. &val);
  212. WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
  213. } else if (!is_user && vgic_irq_is_mapped_level(irq)) {
  214. val = vgic_get_phys_line_level(irq);
  215. } else {
  216. switch (vcpu->kvm->arch.vgic.vgic_model) {
  217. case KVM_DEV_TYPE_ARM_VGIC_V3:
  218. if (is_user) {
  219. val = irq->pending_latch;
  220. break;
  221. }
  222. fallthrough;
  223. default:
  224. val = irq_is_pending(irq);
  225. break;
  226. }
  227. }
  228. value |= ((u32)val << i);
  229. raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
  230. vgic_put_irq(vcpu->kvm, irq);
  231. }
  232. return value;
  233. }
  234. unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
  235. gpa_t addr, unsigned int len)
  236. {
  237. return __read_pending(vcpu, addr, len, false);
  238. }
  239. unsigned long vgic_uaccess_read_pending(struct kvm_vcpu *vcpu,
  240. gpa_t addr, unsigned int len)
  241. {
  242. return __read_pending(vcpu, addr, len, true);
  243. }
  244. static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
  245. {
  246. return (vgic_irq_is_sgi(irq->intid) &&
  247. vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
  248. }
  249. void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
  250. gpa_t addr, unsigned int len,
  251. unsigned long val)
  252. {
  253. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  254. int i;
  255. unsigned long flags;
  256. for_each_set_bit(i, &val, len * 8) {
  257. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  258. /* GICD_ISPENDR0 SGI bits are WI */
  259. if (is_vgic_v2_sgi(vcpu, irq)) {
  260. vgic_put_irq(vcpu->kvm, irq);
  261. continue;
  262. }
  263. raw_spin_lock_irqsave(&irq->irq_lock, flags);
  264. if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
  265. /* HW SGI? Ask the GIC to inject it */
  266. int err;
  267. err = irq_set_irqchip_state(irq->host_irq,
  268. IRQCHIP_STATE_PENDING,
  269. true);
  270. WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
  271. raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
  272. vgic_put_irq(vcpu->kvm, irq);
  273. continue;
  274. }
  275. irq->pending_latch = true;
  276. if (irq->hw)
  277. vgic_irq_set_phys_active(irq, true);
  278. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  279. vgic_put_irq(vcpu->kvm, irq);
  280. }
  281. }
  282. int vgic_uaccess_write_spending(struct kvm_vcpu *vcpu,
  283. gpa_t addr, unsigned int len,
  284. unsigned long val)
  285. {
  286. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  287. int i;
  288. unsigned long flags;
  289. for_each_set_bit(i, &val, len * 8) {
  290. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  291. raw_spin_lock_irqsave(&irq->irq_lock, flags);
  292. irq->pending_latch = true;
  293. /*
  294. * GICv2 SGIs are terribly broken. We can't restore
  295. * the source of the interrupt, so just pick the vcpu
  296. * itself as the source...
  297. */
  298. if (is_vgic_v2_sgi(vcpu, irq))
  299. irq->source |= BIT(vcpu->vcpu_id);
  300. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  301. vgic_put_irq(vcpu->kvm, irq);
  302. }
  303. return 0;
  304. }
  305. /* Must be called with irq->irq_lock held */
  306. static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
  307. {
  308. irq->pending_latch = false;
  309. /*
  310. * We don't want the guest to effectively mask the physical
  311. * interrupt by doing a write to SPENDR followed by a write to
  312. * CPENDR for HW interrupts, so we clear the active state on
  313. * the physical side if the virtual interrupt is not active.
  314. * This may lead to taking an additional interrupt on the
  315. * host, but that should not be a problem as the worst that
  316. * can happen is an additional vgic injection. We also clear
  317. * the pending state to maintain proper semantics for edge HW
  318. * interrupts.
  319. */
  320. vgic_irq_set_phys_pending(irq, false);
  321. if (!irq->active)
  322. vgic_irq_set_phys_active(irq, false);
  323. }
  324. void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
  325. gpa_t addr, unsigned int len,
  326. unsigned long val)
  327. {
  328. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  329. int i;
  330. unsigned long flags;
  331. for_each_set_bit(i, &val, len * 8) {
  332. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  333. /* GICD_ICPENDR0 SGI bits are WI */
  334. if (is_vgic_v2_sgi(vcpu, irq)) {
  335. vgic_put_irq(vcpu->kvm, irq);
  336. continue;
  337. }
  338. raw_spin_lock_irqsave(&irq->irq_lock, flags);
  339. if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
  340. /* HW SGI? Ask the GIC to clear its pending bit */
  341. int err;
  342. err = irq_set_irqchip_state(irq->host_irq,
  343. IRQCHIP_STATE_PENDING,
  344. false);
  345. WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
  346. raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
  347. vgic_put_irq(vcpu->kvm, irq);
  348. continue;
  349. }
  350. if (irq->hw)
  351. vgic_hw_irq_cpending(vcpu, irq);
  352. else
  353. irq->pending_latch = false;
  354. raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
  355. vgic_put_irq(vcpu->kvm, irq);
  356. }
  357. }
  358. int vgic_uaccess_write_cpending(struct kvm_vcpu *vcpu,
  359. gpa_t addr, unsigned int len,
  360. unsigned long val)
  361. {
  362. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  363. int i;
  364. unsigned long flags;
  365. for_each_set_bit(i, &val, len * 8) {
  366. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  367. raw_spin_lock_irqsave(&irq->irq_lock, flags);
  368. /*
  369. * More fun with GICv2 SGIs! If we're clearing one of them
  370. * from userspace, which source vcpu to clear? Let's not
  371. * even think of it, and blow the whole set.
  372. */
  373. if (is_vgic_v2_sgi(vcpu, irq))
  374. irq->source = 0;
  375. irq->pending_latch = false;
  376. raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
  377. vgic_put_irq(vcpu->kvm, irq);
  378. }
  379. return 0;
  380. }
  381. /*
  382. * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
  383. * is not queued on some running VCPU's LRs, because then the change to the
  384. * active state can be overwritten when the VCPU's state is synced coming back
  385. * from the guest.
  386. *
  387. * For shared interrupts as well as GICv3 private interrupts, we have to
  388. * stop all the VCPUs because interrupts can be migrated while we don't hold
  389. * the IRQ locks and we don't want to be chasing moving targets.
  390. *
  391. * For GICv2 private interrupts we don't have to do anything because
  392. * userspace accesses to the VGIC state already require all VCPUs to be
  393. * stopped, and only the VCPU itself can modify its private interrupts
  394. * active state, which guarantees that the VCPU is not running.
  395. */
  396. static void vgic_access_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
  397. {
  398. if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
  399. intid >= VGIC_NR_PRIVATE_IRQS)
  400. kvm_arm_halt_guest(vcpu->kvm);
  401. }
  402. /* See vgic_access_active_prepare */
  403. static void vgic_access_active_finish(struct kvm_vcpu *vcpu, u32 intid)
  404. {
  405. if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
  406. intid >= VGIC_NR_PRIVATE_IRQS)
  407. kvm_arm_resume_guest(vcpu->kvm);
  408. }
  409. static unsigned long __vgic_mmio_read_active(struct kvm_vcpu *vcpu,
  410. gpa_t addr, unsigned int len)
  411. {
  412. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  413. u32 value = 0;
  414. int i;
  415. /* Loop over all IRQs affected by this read */
  416. for (i = 0; i < len * 8; i++) {
  417. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  418. /*
  419. * Even for HW interrupts, don't evaluate the HW state as
  420. * all the guest is interested in is the virtual state.
  421. */
  422. if (irq->active)
  423. value |= (1U << i);
  424. vgic_put_irq(vcpu->kvm, irq);
  425. }
  426. return value;
  427. }
  428. unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
  429. gpa_t addr, unsigned int len)
  430. {
  431. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  432. u32 val;
  433. mutex_lock(&vcpu->kvm->arch.config_lock);
  434. vgic_access_active_prepare(vcpu, intid);
  435. val = __vgic_mmio_read_active(vcpu, addr, len);
  436. vgic_access_active_finish(vcpu, intid);
  437. mutex_unlock(&vcpu->kvm->arch.config_lock);
  438. return val;
  439. }
  440. unsigned long vgic_uaccess_read_active(struct kvm_vcpu *vcpu,
  441. gpa_t addr, unsigned int len)
  442. {
  443. return __vgic_mmio_read_active(vcpu, addr, len);
  444. }
  445. /* Must be called with irq->irq_lock held */
  446. static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
  447. bool active, bool is_uaccess)
  448. {
  449. if (is_uaccess)
  450. return;
  451. irq->active = active;
  452. vgic_irq_set_phys_active(irq, active);
  453. }
  454. static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
  455. bool active)
  456. {
  457. unsigned long flags;
  458. struct kvm_vcpu *requester_vcpu = kvm_get_running_vcpu();
  459. raw_spin_lock_irqsave(&irq->irq_lock, flags);
  460. if (irq->hw && !vgic_irq_is_sgi(irq->intid)) {
  461. vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
  462. } else if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
  463. /*
  464. * GICv4.1 VSGI feature doesn't track an active state,
  465. * so let's not kid ourselves, there is nothing we can
  466. * do here.
  467. */
  468. irq->active = false;
  469. } else {
  470. u32 model = vcpu->kvm->arch.vgic.vgic_model;
  471. u8 active_source;
  472. irq->active = active;
  473. /*
  474. * The GICv2 architecture indicates that the source CPUID for
  475. * an SGI should be provided during an EOI which implies that
  476. * the active state is stored somewhere, but at the same time
  477. * this state is not architecturally exposed anywhere and we
  478. * have no way of knowing the right source.
  479. *
  480. * This may lead to a VCPU not being able to receive
  481. * additional instances of a particular SGI after migration
  482. * for a GICv2 VM on some GIC implementations. Oh well.
  483. */
  484. active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
  485. if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
  486. active && vgic_irq_is_sgi(irq->intid))
  487. irq->active_source = active_source;
  488. }
  489. if (irq->active)
  490. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  491. else
  492. raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
  493. }
  494. static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
  495. gpa_t addr, unsigned int len,
  496. unsigned long val)
  497. {
  498. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  499. int i;
  500. for_each_set_bit(i, &val, len * 8) {
  501. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  502. vgic_mmio_change_active(vcpu, irq, false);
  503. vgic_put_irq(vcpu->kvm, irq);
  504. }
  505. }
  506. void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
  507. gpa_t addr, unsigned int len,
  508. unsigned long val)
  509. {
  510. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  511. mutex_lock(&vcpu->kvm->arch.config_lock);
  512. vgic_access_active_prepare(vcpu, intid);
  513. __vgic_mmio_write_cactive(vcpu, addr, len, val);
  514. vgic_access_active_finish(vcpu, intid);
  515. mutex_unlock(&vcpu->kvm->arch.config_lock);
  516. }
  517. int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
  518. gpa_t addr, unsigned int len,
  519. unsigned long val)
  520. {
  521. __vgic_mmio_write_cactive(vcpu, addr, len, val);
  522. return 0;
  523. }
  524. static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
  525. gpa_t addr, unsigned int len,
  526. unsigned long val)
  527. {
  528. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  529. int i;
  530. for_each_set_bit(i, &val, len * 8) {
  531. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  532. vgic_mmio_change_active(vcpu, irq, true);
  533. vgic_put_irq(vcpu->kvm, irq);
  534. }
  535. }
  536. void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
  537. gpa_t addr, unsigned int len,
  538. unsigned long val)
  539. {
  540. u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
  541. mutex_lock(&vcpu->kvm->arch.config_lock);
  542. vgic_access_active_prepare(vcpu, intid);
  543. __vgic_mmio_write_sactive(vcpu, addr, len, val);
  544. vgic_access_active_finish(vcpu, intid);
  545. mutex_unlock(&vcpu->kvm->arch.config_lock);
  546. }
  547. int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
  548. gpa_t addr, unsigned int len,
  549. unsigned long val)
  550. {
  551. __vgic_mmio_write_sactive(vcpu, addr, len, val);
  552. return 0;
  553. }
  554. unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
  555. gpa_t addr, unsigned int len)
  556. {
  557. u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
  558. int i;
  559. u64 val = 0;
  560. for (i = 0; i < len; i++) {
  561. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  562. val |= (u64)irq->priority << (i * 8);
  563. vgic_put_irq(vcpu->kvm, irq);
  564. }
  565. return val;
  566. }
  567. /*
  568. * We currently don't handle changing the priority of an interrupt that
  569. * is already pending on a VCPU. If there is a need for this, we would
  570. * need to make this VCPU exit and re-evaluate the priorities, potentially
  571. * leading to this interrupt getting presented now to the guest (if it has
  572. * been masked by the priority mask before).
  573. */
  574. void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
  575. gpa_t addr, unsigned int len,
  576. unsigned long val)
  577. {
  578. u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
  579. int i;
  580. unsigned long flags;
  581. for (i = 0; i < len; i++) {
  582. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  583. raw_spin_lock_irqsave(&irq->irq_lock, flags);
  584. /* Narrow the priority range to what we actually support */
  585. irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
  586. if (irq->hw && vgic_irq_is_sgi(irq->intid))
  587. vgic_update_vsgi(irq);
  588. raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
  589. vgic_put_irq(vcpu->kvm, irq);
  590. }
  591. }
  592. unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
  593. gpa_t addr, unsigned int len)
  594. {
  595. u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
  596. u32 value = 0;
  597. int i;
  598. for (i = 0; i < len * 4; i++) {
  599. struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  600. if (irq->config == VGIC_CONFIG_EDGE)
  601. value |= (2U << (i * 2));
  602. vgic_put_irq(vcpu->kvm, irq);
  603. }
  604. return value;
  605. }
  606. void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
  607. gpa_t addr, unsigned int len,
  608. unsigned long val)
  609. {
  610. u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
  611. int i;
  612. unsigned long flags;
  613. for (i = 0; i < len * 4; i++) {
  614. struct vgic_irq *irq;
  615. /*
  616. * The configuration cannot be changed for SGIs in general,
  617. * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
  618. * code relies on PPIs being level triggered, so we also
  619. * make them read-only here.
  620. */
  621. if (intid + i < VGIC_NR_PRIVATE_IRQS)
  622. continue;
  623. irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  624. raw_spin_lock_irqsave(&irq->irq_lock, flags);
  625. if (test_bit(i * 2 + 1, &val))
  626. irq->config = VGIC_CONFIG_EDGE;
  627. else
  628. irq->config = VGIC_CONFIG_LEVEL;
  629. raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
  630. vgic_put_irq(vcpu->kvm, irq);
  631. }
  632. }
  633. u32 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
  634. {
  635. int i;
  636. u32 val = 0;
  637. int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
  638. for (i = 0; i < 32; i++) {
  639. struct vgic_irq *irq;
  640. if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
  641. continue;
  642. irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  643. if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
  644. val |= (1U << i);
  645. vgic_put_irq(vcpu->kvm, irq);
  646. }
  647. return val;
  648. }
  649. void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
  650. const u32 val)
  651. {
  652. int i;
  653. int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
  654. unsigned long flags;
  655. for (i = 0; i < 32; i++) {
  656. struct vgic_irq *irq;
  657. bool new_level;
  658. if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
  659. continue;
  660. irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
  661. /*
  662. * Line level is set irrespective of irq type
  663. * (level or edge) to avoid dependency that VM should
  664. * restore irq config before line level.
  665. */
  666. new_level = !!(val & (1U << i));
  667. raw_spin_lock_irqsave(&irq->irq_lock, flags);
  668. irq->line_level = new_level;
  669. if (new_level)
  670. vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
  671. else
  672. raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
  673. vgic_put_irq(vcpu->kvm, irq);
  674. }
  675. }
  676. static int match_region(const void *key, const void *elt)
  677. {
  678. const unsigned int offset = (unsigned long)key;
  679. const struct vgic_register_region *region = elt;
  680. if (offset < region->reg_offset)
  681. return -1;
  682. if (offset >= region->reg_offset + region->len)
  683. return 1;
  684. return 0;
  685. }
  686. const struct vgic_register_region *
  687. vgic_find_mmio_region(const struct vgic_register_region *regions,
  688. int nr_regions, unsigned int offset)
  689. {
  690. return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
  691. sizeof(regions[0]), match_region);
  692. }
  693. void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
  694. {
  695. if (kvm_vgic_global_state.type == VGIC_V2)
  696. vgic_v2_set_vmcr(vcpu, vmcr);
  697. else
  698. vgic_v3_set_vmcr(vcpu, vmcr);
  699. }
  700. void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
  701. {
  702. if (kvm_vgic_global_state.type == VGIC_V2)
  703. vgic_v2_get_vmcr(vcpu, vmcr);
  704. else
  705. vgic_v3_get_vmcr(vcpu, vmcr);
  706. }
  707. /*
  708. * kvm_mmio_read_buf() returns a value in a format where it can be converted
  709. * to a byte array and be directly observed as the guest wanted it to appear
  710. * in memory if it had done the store itself, which is LE for the GIC, as the
  711. * guest knows the GIC is always LE.
  712. *
  713. * We convert this value to the CPUs native format to deal with it as a data
  714. * value.
  715. */
  716. unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
  717. {
  718. unsigned long data = kvm_mmio_read_buf(val, len);
  719. switch (len) {
  720. case 1:
  721. return data;
  722. case 2:
  723. return le16_to_cpu(data);
  724. case 4:
  725. return le32_to_cpu(data);
  726. default:
  727. return le64_to_cpu(data);
  728. }
  729. }
  730. /*
  731. * kvm_mmio_write_buf() expects a value in a format such that if converted to
  732. * a byte array it is observed as the guest would see it if it could perform
  733. * the load directly. Since the GIC is LE, and the guest knows this, the
  734. * guest expects a value in little endian format.
  735. *
  736. * We convert the data value from the CPUs native format to LE so that the
  737. * value is returned in the proper format.
  738. */
  739. void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
  740. unsigned long data)
  741. {
  742. switch (len) {
  743. case 1:
  744. break;
  745. case 2:
  746. data = cpu_to_le16(data);
  747. break;
  748. case 4:
  749. data = cpu_to_le32(data);
  750. break;
  751. default:
  752. data = cpu_to_le64(data);
  753. }
  754. kvm_mmio_write_buf(buf, len, data);
  755. }
  756. static
  757. struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
  758. {
  759. return container_of(dev, struct vgic_io_device, dev);
  760. }
  761. static bool check_region(const struct kvm *kvm,
  762. const struct vgic_register_region *region,
  763. gpa_t addr, int len)
  764. {
  765. int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
  766. switch (len) {
  767. case sizeof(u8):
  768. flags = VGIC_ACCESS_8bit;
  769. break;
  770. case sizeof(u32):
  771. flags = VGIC_ACCESS_32bit;
  772. break;
  773. case sizeof(u64):
  774. flags = VGIC_ACCESS_64bit;
  775. break;
  776. default:
  777. return false;
  778. }
  779. if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
  780. if (!region->bits_per_irq)
  781. return true;
  782. /* Do we access a non-allocated IRQ? */
  783. return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
  784. }
  785. return false;
  786. }
  787. const struct vgic_register_region *
  788. vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
  789. gpa_t addr, int len)
  790. {
  791. const struct vgic_register_region *region;
  792. region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
  793. addr - iodev->base_addr);
  794. if (!region || !check_region(vcpu->kvm, region, addr, len))
  795. return NULL;
  796. return region;
  797. }
  798. static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
  799. gpa_t addr, u32 *val)
  800. {
  801. const struct vgic_register_region *region;
  802. struct kvm_vcpu *r_vcpu;
  803. region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
  804. if (!region) {
  805. *val = 0;
  806. return 0;
  807. }
  808. r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
  809. if (region->uaccess_read)
  810. *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
  811. else
  812. *val = region->read(r_vcpu, addr, sizeof(u32));
  813. return 0;
  814. }
  815. static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
  816. gpa_t addr, const u32 *val)
  817. {
  818. const struct vgic_register_region *region;
  819. struct kvm_vcpu *r_vcpu;
  820. region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
  821. if (!region)
  822. return 0;
  823. r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
  824. if (region->uaccess_write)
  825. return region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
  826. region->write(r_vcpu, addr, sizeof(u32), *val);
  827. return 0;
  828. }
  829. /*
  830. * Userland access to VGIC registers.
  831. */
  832. int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
  833. bool is_write, int offset, u32 *val)
  834. {
  835. if (is_write)
  836. return vgic_uaccess_write(vcpu, dev, offset, val);
  837. else
  838. return vgic_uaccess_read(vcpu, dev, offset, val);
  839. }
  840. static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  841. gpa_t addr, int len, void *val)
  842. {
  843. struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
  844. const struct vgic_register_region *region;
  845. unsigned long data = 0;
  846. region = vgic_get_mmio_region(vcpu, iodev, addr, len);
  847. if (!region) {
  848. memset(val, 0, len);
  849. return 0;
  850. }
  851. switch (iodev->iodev_type) {
  852. case IODEV_CPUIF:
  853. data = region->read(vcpu, addr, len);
  854. break;
  855. case IODEV_DIST:
  856. data = region->read(vcpu, addr, len);
  857. break;
  858. case IODEV_REDIST:
  859. data = region->read(iodev->redist_vcpu, addr, len);
  860. break;
  861. case IODEV_ITS:
  862. data = region->its_read(vcpu->kvm, iodev->its, addr, len);
  863. break;
  864. }
  865. vgic_data_host_to_mmio_bus(val, len, data);
  866. return 0;
  867. }
  868. static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
  869. gpa_t addr, int len, const void *val)
  870. {
  871. struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
  872. const struct vgic_register_region *region;
  873. unsigned long data = vgic_data_mmio_bus_to_host(val, len);
  874. region = vgic_get_mmio_region(vcpu, iodev, addr, len);
  875. if (!region)
  876. return 0;
  877. switch (iodev->iodev_type) {
  878. case IODEV_CPUIF:
  879. region->write(vcpu, addr, len, data);
  880. break;
  881. case IODEV_DIST:
  882. region->write(vcpu, addr, len, data);
  883. break;
  884. case IODEV_REDIST:
  885. region->write(iodev->redist_vcpu, addr, len, data);
  886. break;
  887. case IODEV_ITS:
  888. region->its_write(vcpu->kvm, iodev->its, addr, len, data);
  889. break;
  890. }
  891. return 0;
  892. }
  893. const struct kvm_io_device_ops kvm_io_gic_ops = {
  894. .read = dispatch_mmio_read,
  895. .write = dispatch_mmio_write,
  896. };
  897. int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
  898. enum vgic_type type)
  899. {
  900. struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
  901. unsigned int len;
  902. switch (type) {
  903. case VGIC_V2:
  904. len = vgic_v2_init_dist_iodev(io_device);
  905. break;
  906. case VGIC_V3:
  907. len = vgic_v3_init_dist_iodev(io_device);
  908. break;
  909. default:
  910. BUG_ON(1);
  911. }
  912. io_device->base_addr = dist_base_address;
  913. io_device->iodev_type = IODEV_DIST;
  914. io_device->redist_vcpu = NULL;
  915. return kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
  916. len, &io_device->dev);
  917. }