ioapic.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745
  1. /*
  2. * Copyright (C) 2001 MandrakeSoft S.A.
  3. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  4. *
  5. * MandrakeSoft S.A.
  6. * 43, rue d'Aboukir
  7. * 75002 Paris - France
  8. * http://www.linux-mandrake.com/
  9. * http://www.mandrakesoft.com/
  10. *
  11. * This library is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU Lesser General Public
  13. * License as published by the Free Software Foundation; either
  14. * version 2 of the License, or (at your option) any later version.
  15. *
  16. * This library is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * Lesser General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU Lesser General Public
  22. * License along with this library; if not, write to the Free Software
  23. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  24. *
  25. * Yunhong Jiang <[email protected]>
  26. * Yaozu (Eddie) Dong <[email protected]>
  27. * Based on Xen 3.1 code.
  28. */
  29. #include <linux/kvm_host.h>
  30. #include <linux/kvm.h>
  31. #include <linux/mm.h>
  32. #include <linux/highmem.h>
  33. #include <linux/smp.h>
  34. #include <linux/hrtimer.h>
  35. #include <linux/io.h>
  36. #include <linux/slab.h>
  37. #include <linux/export.h>
  38. #include <linux/nospec.h>
  39. #include <asm/processor.h>
  40. #include <asm/page.h>
  41. #include <asm/current.h>
  42. #include <trace/events/kvm.h>
  43. #include "ioapic.h"
  44. #include "lapic.h"
  45. #include "irq.h"
  46. static int ioapic_service(struct kvm_ioapic *vioapic, int irq,
  47. bool line_status);
  48. static void kvm_ioapic_update_eoi_one(struct kvm_vcpu *vcpu,
  49. struct kvm_ioapic *ioapic,
  50. int trigger_mode,
  51. int pin);
  52. static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic)
  53. {
  54. unsigned long result = 0;
  55. switch (ioapic->ioregsel) {
  56. case IOAPIC_REG_VERSION:
  57. result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
  58. | (IOAPIC_VERSION_ID & 0xff));
  59. break;
  60. case IOAPIC_REG_APIC_ID:
  61. case IOAPIC_REG_ARB_ID:
  62. result = ((ioapic->id & 0xf) << 24);
  63. break;
  64. default:
  65. {
  66. u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
  67. u64 redir_content = ~0ULL;
  68. if (redir_index < IOAPIC_NUM_PINS) {
  69. u32 index = array_index_nospec(
  70. redir_index, IOAPIC_NUM_PINS);
  71. redir_content = ioapic->redirtbl[index].bits;
  72. }
  73. result = (ioapic->ioregsel & 0x1) ?
  74. (redir_content >> 32) & 0xffffffff :
  75. redir_content & 0xffffffff;
  76. break;
  77. }
  78. }
  79. return result;
  80. }
  81. static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
  82. {
  83. ioapic->rtc_status.pending_eoi = 0;
  84. bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_IDS);
  85. }
  86. static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
  87. static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic)
  88. {
  89. if (WARN_ON(ioapic->rtc_status.pending_eoi < 0))
  90. kvm_rtc_eoi_tracking_restore_all(ioapic);
  91. }
  92. static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
  93. {
  94. bool new_val, old_val;
  95. struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
  96. struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
  97. union kvm_ioapic_redirect_entry *e;
  98. e = &ioapic->redirtbl[RTC_GSI];
  99. if (!kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
  100. e->fields.dest_id,
  101. kvm_lapic_irq_dest_mode(!!e->fields.dest_mode)))
  102. return;
  103. new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
  104. old_val = test_bit(vcpu->vcpu_id, dest_map->map);
  105. if (new_val == old_val)
  106. return;
  107. if (new_val) {
  108. __set_bit(vcpu->vcpu_id, dest_map->map);
  109. dest_map->vectors[vcpu->vcpu_id] = e->fields.vector;
  110. ioapic->rtc_status.pending_eoi++;
  111. } else {
  112. __clear_bit(vcpu->vcpu_id, dest_map->map);
  113. ioapic->rtc_status.pending_eoi--;
  114. rtc_status_pending_eoi_check_valid(ioapic);
  115. }
  116. }
  117. void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
  118. {
  119. struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
  120. spin_lock(&ioapic->lock);
  121. __rtc_irq_eoi_tracking_restore_one(vcpu);
  122. spin_unlock(&ioapic->lock);
  123. }
  124. static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
  125. {
  126. struct kvm_vcpu *vcpu;
  127. unsigned long i;
  128. if (RTC_GSI >= IOAPIC_NUM_PINS)
  129. return;
  130. rtc_irq_eoi_tracking_reset(ioapic);
  131. kvm_for_each_vcpu(i, vcpu, ioapic->kvm)
  132. __rtc_irq_eoi_tracking_restore_one(vcpu);
  133. }
  134. static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu,
  135. int vector)
  136. {
  137. struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
  138. /* RTC special handling */
  139. if (test_bit(vcpu->vcpu_id, dest_map->map) &&
  140. (vector == dest_map->vectors[vcpu->vcpu_id]) &&
  141. (test_and_clear_bit(vcpu->vcpu_id,
  142. ioapic->rtc_status.dest_map.map))) {
  143. --ioapic->rtc_status.pending_eoi;
  144. rtc_status_pending_eoi_check_valid(ioapic);
  145. }
  146. }
  147. static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
  148. {
  149. if (ioapic->rtc_status.pending_eoi > 0)
  150. return true; /* coalesced */
  151. return false;
  152. }
  153. static void ioapic_lazy_update_eoi(struct kvm_ioapic *ioapic, int irq)
  154. {
  155. unsigned long i;
  156. struct kvm_vcpu *vcpu;
  157. union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
  158. kvm_for_each_vcpu(i, vcpu, ioapic->kvm) {
  159. if (!kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
  160. entry->fields.dest_id,
  161. entry->fields.dest_mode) ||
  162. kvm_apic_pending_eoi(vcpu, entry->fields.vector))
  163. continue;
  164. /*
  165. * If no longer has pending EOI in LAPICs, update
  166. * EOI for this vector.
  167. */
  168. rtc_irq_eoi(ioapic, vcpu, entry->fields.vector);
  169. break;
  170. }
  171. }
  172. static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
  173. int irq_level, bool line_status)
  174. {
  175. union kvm_ioapic_redirect_entry entry;
  176. u32 mask = 1 << irq;
  177. u32 old_irr;
  178. int edge, ret;
  179. entry = ioapic->redirtbl[irq];
  180. edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
  181. if (!irq_level) {
  182. ioapic->irr &= ~mask;
  183. ret = 1;
  184. goto out;
  185. }
  186. /*
  187. * AMD SVM AVIC accelerate EOI write iff the interrupt is edge
  188. * triggered, in which case the in-kernel IOAPIC will not be able
  189. * to receive the EOI. In this case, we do a lazy update of the
  190. * pending EOI when trying to set IOAPIC irq.
  191. */
  192. if (edge && kvm_apicv_activated(ioapic->kvm))
  193. ioapic_lazy_update_eoi(ioapic, irq);
  194. /*
  195. * Return 0 for coalesced interrupts; for edge-triggered interrupts,
  196. * this only happens if a previous edge has not been delivered due
  197. * to masking. For level interrupts, the remote_irr field tells
  198. * us if the interrupt is waiting for an EOI.
  199. *
  200. * RTC is special: it is edge-triggered, but userspace likes to know
  201. * if it has been already ack-ed via EOI because coalesced RTC
  202. * interrupts lead to time drift in Windows guests. So we track
  203. * EOI manually for the RTC interrupt.
  204. */
  205. if (irq == RTC_GSI && line_status &&
  206. rtc_irq_check_coalesced(ioapic)) {
  207. ret = 0;
  208. goto out;
  209. }
  210. old_irr = ioapic->irr;
  211. ioapic->irr |= mask;
  212. if (edge) {
  213. ioapic->irr_delivered &= ~mask;
  214. if (old_irr == ioapic->irr) {
  215. ret = 0;
  216. goto out;
  217. }
  218. }
  219. ret = ioapic_service(ioapic, irq, line_status);
  220. out:
  221. trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
  222. return ret;
  223. }
  224. static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr)
  225. {
  226. u32 idx;
  227. rtc_irq_eoi_tracking_reset(ioapic);
  228. for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS)
  229. ioapic_set_irq(ioapic, idx, 1, true);
  230. kvm_rtc_eoi_tracking_restore_all(ioapic);
  231. }
  232. void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
  233. {
  234. struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
  235. struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
  236. union kvm_ioapic_redirect_entry *e;
  237. int index;
  238. spin_lock(&ioapic->lock);
  239. /* Make sure we see any missing RTC EOI */
  240. if (test_bit(vcpu->vcpu_id, dest_map->map))
  241. __set_bit(dest_map->vectors[vcpu->vcpu_id],
  242. ioapic_handled_vectors);
  243. for (index = 0; index < IOAPIC_NUM_PINS; index++) {
  244. e = &ioapic->redirtbl[index];
  245. if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
  246. kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
  247. index == RTC_GSI) {
  248. u16 dm = kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
  249. if (kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
  250. e->fields.dest_id, dm) ||
  251. kvm_apic_pending_eoi(vcpu, e->fields.vector))
  252. __set_bit(e->fields.vector,
  253. ioapic_handled_vectors);
  254. }
  255. }
  256. spin_unlock(&ioapic->lock);
  257. }
  258. void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
  259. {
  260. if (!ioapic_in_kernel(kvm))
  261. return;
  262. kvm_make_scan_ioapic_request(kvm);
  263. }
  264. static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
  265. {
  266. unsigned index;
  267. bool mask_before, mask_after;
  268. union kvm_ioapic_redirect_entry *e;
  269. int old_remote_irr, old_delivery_status, old_dest_id, old_dest_mode;
  270. DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
  271. switch (ioapic->ioregsel) {
  272. case IOAPIC_REG_VERSION:
  273. /* Writes are ignored. */
  274. break;
  275. case IOAPIC_REG_APIC_ID:
  276. ioapic->id = (val >> 24) & 0xf;
  277. break;
  278. case IOAPIC_REG_ARB_ID:
  279. break;
  280. default:
  281. index = (ioapic->ioregsel - 0x10) >> 1;
  282. if (index >= IOAPIC_NUM_PINS)
  283. return;
  284. index = array_index_nospec(index, IOAPIC_NUM_PINS);
  285. e = &ioapic->redirtbl[index];
  286. mask_before = e->fields.mask;
  287. /* Preserve read-only fields */
  288. old_remote_irr = e->fields.remote_irr;
  289. old_delivery_status = e->fields.delivery_status;
  290. old_dest_id = e->fields.dest_id;
  291. old_dest_mode = e->fields.dest_mode;
  292. if (ioapic->ioregsel & 1) {
  293. e->bits &= 0xffffffff;
  294. e->bits |= (u64) val << 32;
  295. } else {
  296. e->bits &= ~0xffffffffULL;
  297. e->bits |= (u32) val;
  298. }
  299. e->fields.remote_irr = old_remote_irr;
  300. e->fields.delivery_status = old_delivery_status;
  301. /*
  302. * Some OSes (Linux, Xen) assume that Remote IRR bit will
  303. * be cleared by IOAPIC hardware when the entry is configured
  304. * as edge-triggered. This behavior is used to simulate an
  305. * explicit EOI on IOAPICs that don't have the EOI register.
  306. */
  307. if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
  308. e->fields.remote_irr = 0;
  309. mask_after = e->fields.mask;
  310. if (mask_before != mask_after)
  311. kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
  312. if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
  313. && ioapic->irr & (1 << index))
  314. ioapic_service(ioapic, index, false);
  315. if (e->fields.delivery_mode == APIC_DM_FIXED) {
  316. struct kvm_lapic_irq irq;
  317. irq.vector = e->fields.vector;
  318. irq.delivery_mode = e->fields.delivery_mode << 8;
  319. irq.dest_mode =
  320. kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
  321. irq.level = false;
  322. irq.trig_mode = e->fields.trig_mode;
  323. irq.shorthand = APIC_DEST_NOSHORT;
  324. irq.dest_id = e->fields.dest_id;
  325. irq.msi_redir_hint = false;
  326. bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
  327. kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
  328. vcpu_bitmap);
  329. if (old_dest_mode != e->fields.dest_mode ||
  330. old_dest_id != e->fields.dest_id) {
  331. /*
  332. * Update vcpu_bitmap with vcpus specified in
  333. * the previous request as well. This is done to
  334. * keep ioapic_handled_vectors synchronized.
  335. */
  336. irq.dest_id = old_dest_id;
  337. irq.dest_mode =
  338. kvm_lapic_irq_dest_mode(
  339. !!e->fields.dest_mode);
  340. kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
  341. vcpu_bitmap);
  342. }
  343. kvm_make_scan_ioapic_request_mask(ioapic->kvm,
  344. vcpu_bitmap);
  345. } else {
  346. kvm_make_scan_ioapic_request(ioapic->kvm);
  347. }
  348. break;
  349. }
  350. }
  351. static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
  352. {
  353. union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
  354. struct kvm_lapic_irq irqe;
  355. int ret;
  356. if (entry->fields.mask ||
  357. (entry->fields.trig_mode == IOAPIC_LEVEL_TRIG &&
  358. entry->fields.remote_irr))
  359. return -1;
  360. irqe.dest_id = entry->fields.dest_id;
  361. irqe.vector = entry->fields.vector;
  362. irqe.dest_mode = kvm_lapic_irq_dest_mode(!!entry->fields.dest_mode);
  363. irqe.trig_mode = entry->fields.trig_mode;
  364. irqe.delivery_mode = entry->fields.delivery_mode << 8;
  365. irqe.level = 1;
  366. irqe.shorthand = APIC_DEST_NOSHORT;
  367. irqe.msi_redir_hint = false;
  368. if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
  369. ioapic->irr_delivered |= 1 << irq;
  370. if (irq == RTC_GSI && line_status) {
  371. /*
  372. * pending_eoi cannot ever become negative (see
  373. * rtc_status_pending_eoi_check_valid) and the caller
  374. * ensures that it is only called if it is >= zero, namely
  375. * if rtc_irq_check_coalesced returns false).
  376. */
  377. BUG_ON(ioapic->rtc_status.pending_eoi != 0);
  378. ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
  379. &ioapic->rtc_status.dest_map);
  380. ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
  381. } else
  382. ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
  383. if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG)
  384. entry->fields.remote_irr = 1;
  385. return ret;
  386. }
  387. int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
  388. int level, bool line_status)
  389. {
  390. int ret, irq_level;
  391. BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS);
  392. spin_lock(&ioapic->lock);
  393. irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq],
  394. irq_source_id, level);
  395. ret = ioapic_set_irq(ioapic, irq, irq_level, line_status);
  396. spin_unlock(&ioapic->lock);
  397. return ret;
  398. }
  399. void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
  400. {
  401. int i;
  402. spin_lock(&ioapic->lock);
  403. for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
  404. __clear_bit(irq_source_id, &ioapic->irq_states[i]);
  405. spin_unlock(&ioapic->lock);
  406. }
  407. static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
  408. {
  409. int i;
  410. struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic,
  411. eoi_inject.work);
  412. spin_lock(&ioapic->lock);
  413. for (i = 0; i < IOAPIC_NUM_PINS; i++) {
  414. union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
  415. if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG)
  416. continue;
  417. if (ioapic->irr & (1 << i) && !ent->fields.remote_irr)
  418. ioapic_service(ioapic, i, false);
  419. }
  420. spin_unlock(&ioapic->lock);
  421. }
  422. #define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000
  423. static void kvm_ioapic_update_eoi_one(struct kvm_vcpu *vcpu,
  424. struct kvm_ioapic *ioapic,
  425. int trigger_mode,
  426. int pin)
  427. {
  428. struct kvm_lapic *apic = vcpu->arch.apic;
  429. union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[pin];
  430. /*
  431. * We are dropping lock while calling ack notifiers because ack
  432. * notifier callbacks for assigned devices call into IOAPIC
  433. * recursively. Since remote_irr is cleared only after call
  434. * to notifiers if the same vector will be delivered while lock
  435. * is dropped it will be put into irr and will be delivered
  436. * after ack notifier returns.
  437. */
  438. spin_unlock(&ioapic->lock);
  439. kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, pin);
  440. spin_lock(&ioapic->lock);
  441. if (trigger_mode != IOAPIC_LEVEL_TRIG ||
  442. kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
  443. return;
  444. ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
  445. ent->fields.remote_irr = 0;
  446. if (!ent->fields.mask && (ioapic->irr & (1 << pin))) {
  447. ++ioapic->irq_eoi[pin];
  448. if (ioapic->irq_eoi[pin] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
  449. /*
  450. * Real hardware does not deliver the interrupt
  451. * immediately during eoi broadcast, and this
  452. * lets a buggy guest make slow progress
  453. * even if it does not correctly handle a
  454. * level-triggered interrupt. Emulate this
  455. * behavior if we detect an interrupt storm.
  456. */
  457. schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
  458. ioapic->irq_eoi[pin] = 0;
  459. trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
  460. } else {
  461. ioapic_service(ioapic, pin, false);
  462. }
  463. } else {
  464. ioapic->irq_eoi[pin] = 0;
  465. }
  466. }
  467. void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
  468. {
  469. int i;
  470. struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
  471. spin_lock(&ioapic->lock);
  472. rtc_irq_eoi(ioapic, vcpu, vector);
  473. for (i = 0; i < IOAPIC_NUM_PINS; i++) {
  474. union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
  475. if (ent->fields.vector != vector)
  476. continue;
  477. kvm_ioapic_update_eoi_one(vcpu, ioapic, trigger_mode, i);
  478. }
  479. spin_unlock(&ioapic->lock);
  480. }
  481. static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
  482. {
  483. return container_of(dev, struct kvm_ioapic, dev);
  484. }
  485. static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
  486. {
  487. return ((addr >= ioapic->base_address &&
  488. (addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
  489. }
  490. static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
  491. gpa_t addr, int len, void *val)
  492. {
  493. struct kvm_ioapic *ioapic = to_ioapic(this);
  494. u32 result;
  495. if (!ioapic_in_range(ioapic, addr))
  496. return -EOPNOTSUPP;
  497. ASSERT(!(addr & 0xf)); /* check alignment */
  498. addr &= 0xff;
  499. spin_lock(&ioapic->lock);
  500. switch (addr) {
  501. case IOAPIC_REG_SELECT:
  502. result = ioapic->ioregsel;
  503. break;
  504. case IOAPIC_REG_WINDOW:
  505. result = ioapic_read_indirect(ioapic);
  506. break;
  507. default:
  508. result = 0;
  509. break;
  510. }
  511. spin_unlock(&ioapic->lock);
  512. switch (len) {
  513. case 8:
  514. *(u64 *) val = result;
  515. break;
  516. case 1:
  517. case 2:
  518. case 4:
  519. memcpy(val, (char *)&result, len);
  520. break;
  521. default:
  522. printk(KERN_WARNING "ioapic: wrong length %d\n", len);
  523. }
  524. return 0;
  525. }
  526. static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
  527. gpa_t addr, int len, const void *val)
  528. {
  529. struct kvm_ioapic *ioapic = to_ioapic(this);
  530. u32 data;
  531. if (!ioapic_in_range(ioapic, addr))
  532. return -EOPNOTSUPP;
  533. ASSERT(!(addr & 0xf)); /* check alignment */
  534. switch (len) {
  535. case 8:
  536. case 4:
  537. data = *(u32 *) val;
  538. break;
  539. case 2:
  540. data = *(u16 *) val;
  541. break;
  542. case 1:
  543. data = *(u8 *) val;
  544. break;
  545. default:
  546. printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
  547. return 0;
  548. }
  549. addr &= 0xff;
  550. spin_lock(&ioapic->lock);
  551. switch (addr) {
  552. case IOAPIC_REG_SELECT:
  553. ioapic->ioregsel = data & 0xFF; /* 8-bit register */
  554. break;
  555. case IOAPIC_REG_WINDOW:
  556. ioapic_write_indirect(ioapic, data);
  557. break;
  558. default:
  559. break;
  560. }
  561. spin_unlock(&ioapic->lock);
  562. return 0;
  563. }
  564. static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
  565. {
  566. int i;
  567. cancel_delayed_work_sync(&ioapic->eoi_inject);
  568. for (i = 0; i < IOAPIC_NUM_PINS; i++)
  569. ioapic->redirtbl[i].fields.mask = 1;
  570. ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
  571. ioapic->ioregsel = 0;
  572. ioapic->irr = 0;
  573. ioapic->irr_delivered = 0;
  574. ioapic->id = 0;
  575. memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
  576. rtc_irq_eoi_tracking_reset(ioapic);
  577. }
  578. static const struct kvm_io_device_ops ioapic_mmio_ops = {
  579. .read = ioapic_mmio_read,
  580. .write = ioapic_mmio_write,
  581. };
  582. int kvm_ioapic_init(struct kvm *kvm)
  583. {
  584. struct kvm_ioapic *ioapic;
  585. int ret;
  586. ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL_ACCOUNT);
  587. if (!ioapic)
  588. return -ENOMEM;
  589. spin_lock_init(&ioapic->lock);
  590. INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work);
  591. kvm->arch.vioapic = ioapic;
  592. kvm_ioapic_reset(ioapic);
  593. kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
  594. ioapic->kvm = kvm;
  595. mutex_lock(&kvm->slots_lock);
  596. ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address,
  597. IOAPIC_MEM_LENGTH, &ioapic->dev);
  598. mutex_unlock(&kvm->slots_lock);
  599. if (ret < 0) {
  600. kvm->arch.vioapic = NULL;
  601. kfree(ioapic);
  602. }
  603. return ret;
  604. }
  605. void kvm_ioapic_destroy(struct kvm *kvm)
  606. {
  607. struct kvm_ioapic *ioapic = kvm->arch.vioapic;
  608. if (!ioapic)
  609. return;
  610. cancel_delayed_work_sync(&ioapic->eoi_inject);
  611. mutex_lock(&kvm->slots_lock);
  612. kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
  613. mutex_unlock(&kvm->slots_lock);
  614. kvm->arch.vioapic = NULL;
  615. kfree(ioapic);
  616. }
  617. void kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
  618. {
  619. struct kvm_ioapic *ioapic = kvm->arch.vioapic;
  620. spin_lock(&ioapic->lock);
  621. memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
  622. state->irr &= ~ioapic->irr_delivered;
  623. spin_unlock(&ioapic->lock);
  624. }
  625. void kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
  626. {
  627. struct kvm_ioapic *ioapic = kvm->arch.vioapic;
  628. spin_lock(&ioapic->lock);
  629. memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
  630. ioapic->irr = 0;
  631. ioapic->irr_delivered = 0;
  632. kvm_make_scan_ioapic_request(kvm);
  633. kvm_ioapic_inject_all(ioapic, state->irr);
  634. spin_unlock(&ioapic->lock);
  635. }