events_base.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Xen event channels
  4. *
  5. * Xen models interrupts with abstract event channels. Because each
  6. * domain gets 1024 event channels, but NR_IRQ is not that large, we
  7. * must dynamically map irqs<->event channels. The event channels
  8. * interface with the rest of the kernel by defining a xen interrupt
  9. * chip. When an event is received, it is mapped to an irq and sent
  10. * through the normal interrupt processing path.
  11. *
  12. * There are four kinds of events which can be mapped to an event
  13. * channel:
  14. *
  15. * 1. Inter-domain notifications. This includes all the virtual
  16. * device events, since they're driven by front-ends in another domain
  17. * (typically dom0).
  18. * 2. VIRQs, typically used for timers. These are per-cpu events.
  19. * 3. IPIs.
  20. * 4. PIRQs - Hardware interrupts.
  21. *
  22. * Jeremy Fitzhardinge <[email protected]>, XenSource Inc, 2007
  23. */
  24. #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  25. #include <linux/linkage.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/irq.h>
  28. #include <linux/moduleparam.h>
  29. #include <linux/string.h>
  30. #include <linux/memblock.h>
  31. #include <linux/slab.h>
  32. #include <linux/irqnr.h>
  33. #include <linux/pci.h>
  34. #include <linux/rcupdate.h>
  35. #include <linux/spinlock.h>
  36. #include <linux/cpuhotplug.h>
  37. #include <linux/atomic.h>
  38. #include <linux/ktime.h>
  39. #ifdef CONFIG_X86
  40. #include <asm/desc.h>
  41. #include <asm/ptrace.h>
  42. #include <asm/idtentry.h>
  43. #include <asm/irq.h>
  44. #include <asm/io_apic.h>
  45. #include <asm/i8259.h>
  46. #include <asm/xen/cpuid.h>
  47. #include <asm/xen/pci.h>
  48. #endif
  49. #include <asm/sync_bitops.h>
  50. #include <asm/xen/hypercall.h>
  51. #include <asm/xen/hypervisor.h>
  52. #include <xen/page.h>
  53. #include <xen/xen.h>
  54. #include <xen/hvm.h>
  55. #include <xen/xen-ops.h>
  56. #include <xen/events.h>
  57. #include <xen/interface/xen.h>
  58. #include <xen/interface/event_channel.h>
  59. #include <xen/interface/hvm/hvm_op.h>
  60. #include <xen/interface/hvm/params.h>
  61. #include <xen/interface/physdev.h>
  62. #include <xen/interface/sched.h>
  63. #include <xen/interface/vcpu.h>
  64. #include <xen/xenbus.h>
  65. #include <asm/hw_irq.h>
  66. #include "events_internal.h"
  67. #undef MODULE_PARAM_PREFIX
  68. #define MODULE_PARAM_PREFIX "xen."
  69. /* Interrupt types. */
  70. enum xen_irq_type {
  71. IRQT_UNBOUND = 0,
  72. IRQT_PIRQ,
  73. IRQT_VIRQ,
  74. IRQT_IPI,
  75. IRQT_EVTCHN
  76. };
  77. /*
  78. * Packed IRQ information:
  79. * type - enum xen_irq_type
  80. * event channel - irq->event channel mapping
  81. * cpu - cpu this event channel is bound to
  82. * index - type-specific information:
  83. * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
  84. * guest, or GSI (real passthrough IRQ) of the device.
  85. * VIRQ - virq number
  86. * IPI - IPI vector
  87. * EVTCHN -
  88. */
  89. struct irq_info {
  90. struct list_head list;
  91. struct list_head eoi_list;
  92. struct rcu_work rwork;
  93. short refcnt;
  94. u8 spurious_cnt;
  95. u8 is_accounted;
  96. short type; /* type: IRQT_* */
  97. u8 mask_reason; /* Why is event channel masked */
  98. #define EVT_MASK_REASON_EXPLICIT 0x01
  99. #define EVT_MASK_REASON_TEMPORARY 0x02
  100. #define EVT_MASK_REASON_EOI_PENDING 0x04
  101. u8 is_active; /* Is event just being handled? */
  102. unsigned irq;
  103. evtchn_port_t evtchn; /* event channel */
  104. unsigned short cpu; /* cpu bound */
  105. unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
  106. unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
  107. u64 eoi_time; /* Time in jiffies when to EOI. */
  108. raw_spinlock_t lock;
  109. union {
  110. unsigned short virq;
  111. enum ipi_vector ipi;
  112. struct {
  113. unsigned short pirq;
  114. unsigned short gsi;
  115. unsigned char vector;
  116. unsigned char flags;
  117. uint16_t domid;
  118. } pirq;
  119. struct xenbus_device *interdomain;
  120. } u;
  121. };
  122. #define PIRQ_NEEDS_EOI (1 << 0)
  123. #define PIRQ_SHAREABLE (1 << 1)
  124. #define PIRQ_MSI_GROUP (1 << 2)
  125. static uint __read_mostly event_loop_timeout = 2;
  126. module_param(event_loop_timeout, uint, 0644);
  127. static uint __read_mostly event_eoi_delay = 10;
  128. module_param(event_eoi_delay, uint, 0644);
  129. const struct evtchn_ops *evtchn_ops;
  130. /*
  131. * This lock protects updates to the following mapping and reference-count
  132. * arrays. The lock does not need to be acquired to read the mapping tables.
  133. */
  134. static DEFINE_MUTEX(irq_mapping_update_lock);
  135. /*
  136. * Lock hierarchy:
  137. *
  138. * irq_mapping_update_lock
  139. * IRQ-desc lock
  140. * percpu eoi_list_lock
  141. * irq_info->lock
  142. */
  143. static LIST_HEAD(xen_irq_list_head);
  144. /* IRQ <-> VIRQ mapping. */
  145. static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
  146. /* IRQ <-> IPI mapping */
  147. static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
  148. /* Event channel distribution data */
  149. static atomic_t channels_on_cpu[NR_CPUS];
  150. static int **evtchn_to_irq;
  151. #ifdef CONFIG_X86
  152. static unsigned long *pirq_eoi_map;
  153. #endif
  154. static bool (*pirq_needs_eoi)(unsigned irq);
  155. #define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
  156. #define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
  157. #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
  158. /* Xen will never allocate port zero for any purpose. */
  159. #define VALID_EVTCHN(chn) ((chn) != 0)
  160. static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY];
  161. static struct irq_chip xen_dynamic_chip;
  162. static struct irq_chip xen_lateeoi_chip;
  163. static struct irq_chip xen_percpu_chip;
  164. static struct irq_chip xen_pirq_chip;
  165. static void enable_dynirq(struct irq_data *data);
  166. static void disable_dynirq(struct irq_data *data);
  167. static DEFINE_PER_CPU(unsigned int, irq_epoch);
  168. static void clear_evtchn_to_irq_row(int *evtchn_row)
  169. {
  170. unsigned col;
  171. for (col = 0; col < EVTCHN_PER_ROW; col++)
  172. WRITE_ONCE(evtchn_row[col], -1);
  173. }
  174. static void clear_evtchn_to_irq_all(void)
  175. {
  176. unsigned row;
  177. for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
  178. if (evtchn_to_irq[row] == NULL)
  179. continue;
  180. clear_evtchn_to_irq_row(evtchn_to_irq[row]);
  181. }
  182. }
  183. static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
  184. {
  185. unsigned row;
  186. unsigned col;
  187. int *evtchn_row;
  188. if (evtchn >= xen_evtchn_max_channels())
  189. return -EINVAL;
  190. row = EVTCHN_ROW(evtchn);
  191. col = EVTCHN_COL(evtchn);
  192. if (evtchn_to_irq[row] == NULL) {
  193. /* Unallocated irq entries return -1 anyway */
  194. if (irq == -1)
  195. return 0;
  196. evtchn_row = (int *) __get_free_pages(GFP_KERNEL, 0);
  197. if (evtchn_row == NULL)
  198. return -ENOMEM;
  199. clear_evtchn_to_irq_row(evtchn_row);
  200. /*
  201. * We've prepared an empty row for the mapping. If a different
  202. * thread was faster inserting it, we can drop ours.
  203. */
  204. if (cmpxchg(&evtchn_to_irq[row], NULL, evtchn_row) != NULL)
  205. free_page((unsigned long) evtchn_row);
  206. }
  207. WRITE_ONCE(evtchn_to_irq[row][col], irq);
  208. return 0;
  209. }
  210. int get_evtchn_to_irq(evtchn_port_t evtchn)
  211. {
  212. if (evtchn >= xen_evtchn_max_channels())
  213. return -1;
  214. if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
  215. return -1;
  216. return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
  217. }
  218. /* Get info for IRQ */
  219. static struct irq_info *info_for_irq(unsigned irq)
  220. {
  221. if (irq < nr_legacy_irqs())
  222. return legacy_info_ptrs[irq];
  223. else
  224. return irq_get_chip_data(irq);
  225. }
  226. static void set_info_for_irq(unsigned int irq, struct irq_info *info)
  227. {
  228. if (irq < nr_legacy_irqs())
  229. legacy_info_ptrs[irq] = info;
  230. else
  231. irq_set_chip_data(irq, info);
  232. }
  233. /* Per CPU channel accounting */
  234. static void channels_on_cpu_dec(struct irq_info *info)
  235. {
  236. if (!info->is_accounted)
  237. return;
  238. info->is_accounted = 0;
  239. if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
  240. return;
  241. WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], -1 , 0));
  242. }
  243. static void channels_on_cpu_inc(struct irq_info *info)
  244. {
  245. if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
  246. return;
  247. if (WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], 1,
  248. INT_MAX)))
  249. return;
  250. info->is_accounted = 1;
  251. }
  252. static void delayed_free_irq(struct work_struct *work)
  253. {
  254. struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
  255. rwork);
  256. unsigned int irq = info->irq;
  257. /* Remove the info pointer only now, with no potential users left. */
  258. set_info_for_irq(irq, NULL);
  259. kfree(info);
  260. /* Legacy IRQ descriptors are managed by the arch. */
  261. if (irq >= nr_legacy_irqs())
  262. irq_free_desc(irq);
  263. }
  264. /* Constructors for packed IRQ information. */
  265. static int xen_irq_info_common_setup(struct irq_info *info,
  266. unsigned irq,
  267. enum xen_irq_type type,
  268. evtchn_port_t evtchn,
  269. unsigned short cpu)
  270. {
  271. int ret;
  272. BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
  273. info->type = type;
  274. info->irq = irq;
  275. info->evtchn = evtchn;
  276. info->cpu = cpu;
  277. info->mask_reason = EVT_MASK_REASON_EXPLICIT;
  278. raw_spin_lock_init(&info->lock);
  279. ret = set_evtchn_to_irq(evtchn, irq);
  280. if (ret < 0)
  281. return ret;
  282. irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
  283. return xen_evtchn_port_setup(evtchn);
  284. }
  285. static int xen_irq_info_evtchn_setup(unsigned irq,
  286. evtchn_port_t evtchn,
  287. struct xenbus_device *dev)
  288. {
  289. struct irq_info *info = info_for_irq(irq);
  290. int ret;
  291. ret = xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
  292. info->u.interdomain = dev;
  293. if (dev)
  294. atomic_inc(&dev->event_channels);
  295. return ret;
  296. }
  297. static int xen_irq_info_ipi_setup(unsigned cpu,
  298. unsigned irq,
  299. evtchn_port_t evtchn,
  300. enum ipi_vector ipi)
  301. {
  302. struct irq_info *info = info_for_irq(irq);
  303. info->u.ipi = ipi;
  304. per_cpu(ipi_to_irq, cpu)[ipi] = irq;
  305. return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
  306. }
  307. static int xen_irq_info_virq_setup(unsigned cpu,
  308. unsigned irq,
  309. evtchn_port_t evtchn,
  310. unsigned virq)
  311. {
  312. struct irq_info *info = info_for_irq(irq);
  313. info->u.virq = virq;
  314. per_cpu(virq_to_irq, cpu)[virq] = irq;
  315. return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
  316. }
  317. static int xen_irq_info_pirq_setup(unsigned irq,
  318. evtchn_port_t evtchn,
  319. unsigned pirq,
  320. unsigned gsi,
  321. uint16_t domid,
  322. unsigned char flags)
  323. {
  324. struct irq_info *info = info_for_irq(irq);
  325. info->u.pirq.pirq = pirq;
  326. info->u.pirq.gsi = gsi;
  327. info->u.pirq.domid = domid;
  328. info->u.pirq.flags = flags;
  329. return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
  330. }
  331. static void xen_irq_info_cleanup(struct irq_info *info)
  332. {
  333. set_evtchn_to_irq(info->evtchn, -1);
  334. xen_evtchn_port_remove(info->evtchn, info->cpu);
  335. info->evtchn = 0;
  336. channels_on_cpu_dec(info);
  337. }
  338. /*
  339. * Accessors for packed IRQ information.
  340. */
  341. evtchn_port_t evtchn_from_irq(unsigned irq)
  342. {
  343. const struct irq_info *info = NULL;
  344. if (likely(irq < nr_irqs))
  345. info = info_for_irq(irq);
  346. if (!info)
  347. return 0;
  348. return info->evtchn;
  349. }
  350. unsigned int irq_from_evtchn(evtchn_port_t evtchn)
  351. {
  352. return get_evtchn_to_irq(evtchn);
  353. }
  354. EXPORT_SYMBOL_GPL(irq_from_evtchn);
  355. int irq_from_virq(unsigned int cpu, unsigned int virq)
  356. {
  357. return per_cpu(virq_to_irq, cpu)[virq];
  358. }
  359. static enum ipi_vector ipi_from_irq(unsigned irq)
  360. {
  361. struct irq_info *info = info_for_irq(irq);
  362. BUG_ON(info == NULL);
  363. BUG_ON(info->type != IRQT_IPI);
  364. return info->u.ipi;
  365. }
  366. static unsigned virq_from_irq(unsigned irq)
  367. {
  368. struct irq_info *info = info_for_irq(irq);
  369. BUG_ON(info == NULL);
  370. BUG_ON(info->type != IRQT_VIRQ);
  371. return info->u.virq;
  372. }
  373. static unsigned pirq_from_irq(unsigned irq)
  374. {
  375. struct irq_info *info = info_for_irq(irq);
  376. BUG_ON(info == NULL);
  377. BUG_ON(info->type != IRQT_PIRQ);
  378. return info->u.pirq.pirq;
  379. }
  380. static enum xen_irq_type type_from_irq(unsigned irq)
  381. {
  382. return info_for_irq(irq)->type;
  383. }
  384. static unsigned cpu_from_irq(unsigned irq)
  385. {
  386. return info_for_irq(irq)->cpu;
  387. }
  388. unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
  389. {
  390. int irq = get_evtchn_to_irq(evtchn);
  391. unsigned ret = 0;
  392. if (irq != -1)
  393. ret = cpu_from_irq(irq);
  394. return ret;
  395. }
  396. static void do_mask(struct irq_info *info, u8 reason)
  397. {
  398. unsigned long flags;
  399. raw_spin_lock_irqsave(&info->lock, flags);
  400. if (!info->mask_reason)
  401. mask_evtchn(info->evtchn);
  402. info->mask_reason |= reason;
  403. raw_spin_unlock_irqrestore(&info->lock, flags);
  404. }
  405. static void do_unmask(struct irq_info *info, u8 reason)
  406. {
  407. unsigned long flags;
  408. raw_spin_lock_irqsave(&info->lock, flags);
  409. info->mask_reason &= ~reason;
  410. if (!info->mask_reason)
  411. unmask_evtchn(info->evtchn);
  412. raw_spin_unlock_irqrestore(&info->lock, flags);
  413. }
  414. #ifdef CONFIG_X86
  415. static bool pirq_check_eoi_map(unsigned irq)
  416. {
  417. return test_bit(pirq_from_irq(irq), pirq_eoi_map);
  418. }
  419. #endif
  420. static bool pirq_needs_eoi_flag(unsigned irq)
  421. {
  422. struct irq_info *info = info_for_irq(irq);
  423. BUG_ON(info->type != IRQT_PIRQ);
  424. return info->u.pirq.flags & PIRQ_NEEDS_EOI;
  425. }
  426. static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
  427. bool force_affinity)
  428. {
  429. int irq = get_evtchn_to_irq(evtchn);
  430. struct irq_info *info = info_for_irq(irq);
  431. BUG_ON(irq == -1);
  432. if (IS_ENABLED(CONFIG_SMP) && force_affinity) {
  433. struct irq_data *data = irq_get_irq_data(irq);
  434. irq_data_update_affinity(data, cpumask_of(cpu));
  435. irq_data_update_effective_affinity(data, cpumask_of(cpu));
  436. }
  437. xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu);
  438. channels_on_cpu_dec(info);
  439. info->cpu = cpu;
  440. channels_on_cpu_inc(info);
  441. }
  442. /**
  443. * notify_remote_via_irq - send event to remote end of event channel via irq
  444. * @irq: irq of event channel to send event to
  445. *
  446. * Unlike notify_remote_via_evtchn(), this is safe to use across
  447. * save/restore. Notifications on a broken connection are silently
  448. * dropped.
  449. */
  450. void notify_remote_via_irq(int irq)
  451. {
  452. evtchn_port_t evtchn = evtchn_from_irq(irq);
  453. if (VALID_EVTCHN(evtchn))
  454. notify_remote_via_evtchn(evtchn);
  455. }
  456. EXPORT_SYMBOL_GPL(notify_remote_via_irq);
  457. struct lateeoi_work {
  458. struct delayed_work delayed;
  459. spinlock_t eoi_list_lock;
  460. struct list_head eoi_list;
  461. };
  462. static DEFINE_PER_CPU(struct lateeoi_work, lateeoi);
  463. static void lateeoi_list_del(struct irq_info *info)
  464. {
  465. struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
  466. unsigned long flags;
  467. spin_lock_irqsave(&eoi->eoi_list_lock, flags);
  468. list_del_init(&info->eoi_list);
  469. spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
  470. }
  471. static void lateeoi_list_add(struct irq_info *info)
  472. {
  473. struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
  474. struct irq_info *elem;
  475. u64 now = get_jiffies_64();
  476. unsigned long delay;
  477. unsigned long flags;
  478. if (now < info->eoi_time)
  479. delay = info->eoi_time - now;
  480. else
  481. delay = 1;
  482. spin_lock_irqsave(&eoi->eoi_list_lock, flags);
  483. elem = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
  484. eoi_list);
  485. if (!elem || info->eoi_time < elem->eoi_time) {
  486. list_add(&info->eoi_list, &eoi->eoi_list);
  487. mod_delayed_work_on(info->eoi_cpu, system_wq,
  488. &eoi->delayed, delay);
  489. } else {
  490. list_for_each_entry_reverse(elem, &eoi->eoi_list, eoi_list) {
  491. if (elem->eoi_time <= info->eoi_time)
  492. break;
  493. }
  494. list_add(&info->eoi_list, &elem->eoi_list);
  495. }
  496. spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
  497. }
  498. static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
  499. {
  500. evtchn_port_t evtchn;
  501. unsigned int cpu;
  502. unsigned int delay = 0;
  503. evtchn = info->evtchn;
  504. if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list))
  505. return;
  506. if (spurious) {
  507. struct xenbus_device *dev = info->u.interdomain;
  508. unsigned int threshold = 1;
  509. if (dev && dev->spurious_threshold)
  510. threshold = dev->spurious_threshold;
  511. if ((1 << info->spurious_cnt) < (HZ << 2)) {
  512. if (info->spurious_cnt != 0xFF)
  513. info->spurious_cnt++;
  514. }
  515. if (info->spurious_cnt > threshold) {
  516. delay = 1 << (info->spurious_cnt - 1 - threshold);
  517. if (delay > HZ)
  518. delay = HZ;
  519. if (!info->eoi_time)
  520. info->eoi_cpu = smp_processor_id();
  521. info->eoi_time = get_jiffies_64() + delay;
  522. if (dev)
  523. atomic_add(delay, &dev->jiffies_eoi_delayed);
  524. }
  525. if (dev)
  526. atomic_inc(&dev->spurious_events);
  527. } else {
  528. info->spurious_cnt = 0;
  529. }
  530. cpu = info->eoi_cpu;
  531. if (info->eoi_time &&
  532. (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
  533. lateeoi_list_add(info);
  534. return;
  535. }
  536. info->eoi_time = 0;
  537. /* is_active hasn't been reset yet, do it now. */
  538. smp_store_release(&info->is_active, 0);
  539. do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
  540. }
  541. static void xen_irq_lateeoi_worker(struct work_struct *work)
  542. {
  543. struct lateeoi_work *eoi;
  544. struct irq_info *info;
  545. u64 now = get_jiffies_64();
  546. unsigned long flags;
  547. eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
  548. rcu_read_lock();
  549. while (true) {
  550. spin_lock_irqsave(&eoi->eoi_list_lock, flags);
  551. info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
  552. eoi_list);
  553. if (info == NULL)
  554. break;
  555. if (now < info->eoi_time) {
  556. mod_delayed_work_on(info->eoi_cpu, system_wq,
  557. &eoi->delayed,
  558. info->eoi_time - now);
  559. break;
  560. }
  561. list_del_init(&info->eoi_list);
  562. spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
  563. info->eoi_time = 0;
  564. xen_irq_lateeoi_locked(info, false);
  565. }
  566. spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
  567. rcu_read_unlock();
  568. }
  569. static void xen_cpu_init_eoi(unsigned int cpu)
  570. {
  571. struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu);
  572. INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker);
  573. spin_lock_init(&eoi->eoi_list_lock);
  574. INIT_LIST_HEAD(&eoi->eoi_list);
  575. }
  576. void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
  577. {
  578. struct irq_info *info;
  579. rcu_read_lock();
  580. info = info_for_irq(irq);
  581. if (info)
  582. xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
  583. rcu_read_unlock();
  584. }
  585. EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
  586. static void xen_irq_init(unsigned irq)
  587. {
  588. struct irq_info *info;
  589. info = kzalloc(sizeof(*info), GFP_KERNEL);
  590. if (info == NULL)
  591. panic("Unable to allocate metadata for IRQ%d\n", irq);
  592. info->type = IRQT_UNBOUND;
  593. info->refcnt = -1;
  594. INIT_RCU_WORK(&info->rwork, delayed_free_irq);
  595. set_info_for_irq(irq, info);
  596. /*
  597. * Interrupt affinity setting can be immediate. No point
  598. * in delaying it until an interrupt is handled.
  599. */
  600. irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
  601. INIT_LIST_HEAD(&info->eoi_list);
  602. list_add_tail(&info->list, &xen_irq_list_head);
  603. }
  604. static int __must_check xen_allocate_irqs_dynamic(int nvec)
  605. {
  606. int i, irq = irq_alloc_descs(-1, 0, nvec, -1);
  607. if (irq >= 0) {
  608. for (i = 0; i < nvec; i++)
  609. xen_irq_init(irq + i);
  610. }
  611. return irq;
  612. }
  613. static inline int __must_check xen_allocate_irq_dynamic(void)
  614. {
  615. return xen_allocate_irqs_dynamic(1);
  616. }
  617. static int __must_check xen_allocate_irq_gsi(unsigned gsi)
  618. {
  619. int irq;
  620. /*
  621. * A PV guest has no concept of a GSI (since it has no ACPI
  622. * nor access to/knowledge of the physical APICs). Therefore
  623. * all IRQs are dynamically allocated from the entire IRQ
  624. * space.
  625. */
  626. if (xen_pv_domain() && !xen_initial_domain())
  627. return xen_allocate_irq_dynamic();
  628. /* Legacy IRQ descriptors are already allocated by the arch. */
  629. if (gsi < nr_legacy_irqs())
  630. irq = gsi;
  631. else
  632. irq = irq_alloc_desc_at(gsi, -1);
  633. xen_irq_init(irq);
  634. return irq;
  635. }
  636. static void xen_free_irq(unsigned irq)
  637. {
  638. struct irq_info *info = info_for_irq(irq);
  639. if (WARN_ON(!info))
  640. return;
  641. if (!list_empty(&info->eoi_list))
  642. lateeoi_list_del(info);
  643. list_del(&info->list);
  644. WARN_ON(info->refcnt > 0);
  645. queue_rcu_work(system_wq, &info->rwork);
  646. }
  647. static void xen_evtchn_close(evtchn_port_t port)
  648. {
  649. struct evtchn_close close;
  650. close.port = port;
  651. if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
  652. BUG();
  653. }
  654. /* Not called for lateeoi events. */
  655. static void event_handler_exit(struct irq_info *info)
  656. {
  657. smp_store_release(&info->is_active, 0);
  658. clear_evtchn(info->evtchn);
  659. }
  660. static void pirq_query_unmask(int irq)
  661. {
  662. struct physdev_irq_status_query irq_status;
  663. struct irq_info *info = info_for_irq(irq);
  664. BUG_ON(info->type != IRQT_PIRQ);
  665. irq_status.irq = pirq_from_irq(irq);
  666. if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
  667. irq_status.flags = 0;
  668. info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
  669. if (irq_status.flags & XENIRQSTAT_needs_eoi)
  670. info->u.pirq.flags |= PIRQ_NEEDS_EOI;
  671. }
  672. static void eoi_pirq(struct irq_data *data)
  673. {
  674. struct irq_info *info = info_for_irq(data->irq);
  675. evtchn_port_t evtchn = info ? info->evtchn : 0;
  676. struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
  677. int rc = 0;
  678. if (!VALID_EVTCHN(evtchn))
  679. return;
  680. event_handler_exit(info);
  681. if (pirq_needs_eoi(data->irq)) {
  682. rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
  683. WARN_ON(rc);
  684. }
  685. }
  686. static void mask_ack_pirq(struct irq_data *data)
  687. {
  688. disable_dynirq(data);
  689. eoi_pirq(data);
  690. }
  691. static unsigned int __startup_pirq(unsigned int irq)
  692. {
  693. struct evtchn_bind_pirq bind_pirq;
  694. struct irq_info *info = info_for_irq(irq);
  695. evtchn_port_t evtchn = evtchn_from_irq(irq);
  696. int rc;
  697. BUG_ON(info->type != IRQT_PIRQ);
  698. if (VALID_EVTCHN(evtchn))
  699. goto out;
  700. bind_pirq.pirq = pirq_from_irq(irq);
  701. /* NB. We are happy to share unless we are probing. */
  702. bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
  703. BIND_PIRQ__WILL_SHARE : 0;
  704. rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
  705. if (rc != 0) {
  706. pr_warn("Failed to obtain physical IRQ %d\n", irq);
  707. return 0;
  708. }
  709. evtchn = bind_pirq.port;
  710. pirq_query_unmask(irq);
  711. rc = set_evtchn_to_irq(evtchn, irq);
  712. if (rc)
  713. goto err;
  714. info->evtchn = evtchn;
  715. bind_evtchn_to_cpu(evtchn, 0, false);
  716. rc = xen_evtchn_port_setup(evtchn);
  717. if (rc)
  718. goto err;
  719. out:
  720. do_unmask(info, EVT_MASK_REASON_EXPLICIT);
  721. eoi_pirq(irq_get_irq_data(irq));
  722. return 0;
  723. err:
  724. pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
  725. xen_evtchn_close(evtchn);
  726. return 0;
  727. }
  728. static unsigned int startup_pirq(struct irq_data *data)
  729. {
  730. return __startup_pirq(data->irq);
  731. }
  732. static void shutdown_pirq(struct irq_data *data)
  733. {
  734. unsigned int irq = data->irq;
  735. struct irq_info *info = info_for_irq(irq);
  736. evtchn_port_t evtchn = evtchn_from_irq(irq);
  737. BUG_ON(info->type != IRQT_PIRQ);
  738. if (!VALID_EVTCHN(evtchn))
  739. return;
  740. do_mask(info, EVT_MASK_REASON_EXPLICIT);
  741. xen_evtchn_close(evtchn);
  742. xen_irq_info_cleanup(info);
  743. }
  744. static void enable_pirq(struct irq_data *data)
  745. {
  746. enable_dynirq(data);
  747. }
  748. static void disable_pirq(struct irq_data *data)
  749. {
  750. disable_dynirq(data);
  751. }
  752. int xen_irq_from_gsi(unsigned gsi)
  753. {
  754. struct irq_info *info;
  755. list_for_each_entry(info, &xen_irq_list_head, list) {
  756. if (info->type != IRQT_PIRQ)
  757. continue;
  758. if (info->u.pirq.gsi == gsi)
  759. return info->irq;
  760. }
  761. return -1;
  762. }
  763. EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
  764. static void __unbind_from_irq(unsigned int irq)
  765. {
  766. evtchn_port_t evtchn = evtchn_from_irq(irq);
  767. struct irq_info *info = info_for_irq(irq);
  768. if (info->refcnt > 0) {
  769. info->refcnt--;
  770. if (info->refcnt != 0)
  771. return;
  772. }
  773. if (VALID_EVTCHN(evtchn)) {
  774. unsigned int cpu = cpu_from_irq(irq);
  775. struct xenbus_device *dev;
  776. xen_evtchn_close(evtchn);
  777. switch (type_from_irq(irq)) {
  778. case IRQT_VIRQ:
  779. per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
  780. break;
  781. case IRQT_IPI:
  782. per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
  783. break;
  784. case IRQT_EVTCHN:
  785. dev = info->u.interdomain;
  786. if (dev)
  787. atomic_dec(&dev->event_channels);
  788. break;
  789. default:
  790. break;
  791. }
  792. xen_irq_info_cleanup(info);
  793. }
  794. xen_free_irq(irq);
  795. }
  796. /*
  797. * Do not make any assumptions regarding the relationship between the
  798. * IRQ number returned here and the Xen pirq argument.
  799. *
  800. * Note: We don't assign an event channel until the irq actually started
  801. * up. Return an existing irq if we've already got one for the gsi.
  802. *
  803. * Shareable implies level triggered, not shareable implies edge
  804. * triggered here.
  805. */
  806. int xen_bind_pirq_gsi_to_irq(unsigned gsi,
  807. unsigned pirq, int shareable, char *name)
  808. {
  809. int irq;
  810. struct physdev_irq irq_op;
  811. int ret;
  812. mutex_lock(&irq_mapping_update_lock);
  813. irq = xen_irq_from_gsi(gsi);
  814. if (irq != -1) {
  815. pr_info("%s: returning irq %d for gsi %u\n",
  816. __func__, irq, gsi);
  817. goto out;
  818. }
  819. irq = xen_allocate_irq_gsi(gsi);
  820. if (irq < 0)
  821. goto out;
  822. irq_op.irq = irq;
  823. irq_op.vector = 0;
  824. /* Only the privileged domain can do this. For non-priv, the pcifront
  825. * driver provides a PCI bus that does the call to do exactly
  826. * this in the priv domain. */
  827. if (xen_initial_domain() &&
  828. HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
  829. xen_free_irq(irq);
  830. irq = -ENOSPC;
  831. goto out;
  832. }
  833. ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
  834. shareable ? PIRQ_SHAREABLE : 0);
  835. if (ret < 0) {
  836. __unbind_from_irq(irq);
  837. irq = ret;
  838. goto out;
  839. }
  840. pirq_query_unmask(irq);
  841. /* We try to use the handler with the appropriate semantic for the
  842. * type of interrupt: if the interrupt is an edge triggered
  843. * interrupt we use handle_edge_irq.
  844. *
  845. * On the other hand if the interrupt is level triggered we use
  846. * handle_fasteoi_irq like the native code does for this kind of
  847. * interrupts.
  848. *
  849. * Depending on the Xen version, pirq_needs_eoi might return true
  850. * not only for level triggered interrupts but for edge triggered
  851. * interrupts too. In any case Xen always honors the eoi mechanism,
  852. * not injecting any more pirqs of the same kind if the first one
  853. * hasn't received an eoi yet. Therefore using the fasteoi handler
  854. * is the right choice either way.
  855. */
  856. if (shareable)
  857. irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
  858. handle_fasteoi_irq, name);
  859. else
  860. irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
  861. handle_edge_irq, name);
  862. out:
  863. mutex_unlock(&irq_mapping_update_lock);
  864. return irq;
  865. }
  866. #ifdef CONFIG_PCI_MSI
  867. int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
  868. {
  869. int rc;
  870. struct physdev_get_free_pirq op_get_free_pirq;
  871. op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
  872. rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
  873. WARN_ONCE(rc == -ENOSYS,
  874. "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
  875. return rc ? -1 : op_get_free_pirq.pirq;
  876. }
  877. int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
  878. int pirq, int nvec, const char *name, domid_t domid)
  879. {
  880. int i, irq, ret;
  881. mutex_lock(&irq_mapping_update_lock);
  882. irq = xen_allocate_irqs_dynamic(nvec);
  883. if (irq < 0)
  884. goto out;
  885. for (i = 0; i < nvec; i++) {
  886. irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
  887. ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid,
  888. i == 0 ? 0 : PIRQ_MSI_GROUP);
  889. if (ret < 0)
  890. goto error_irq;
  891. }
  892. ret = irq_set_msi_desc(irq, msidesc);
  893. if (ret < 0)
  894. goto error_irq;
  895. out:
  896. mutex_unlock(&irq_mapping_update_lock);
  897. return irq;
  898. error_irq:
  899. while (nvec--)
  900. __unbind_from_irq(irq + nvec);
  901. mutex_unlock(&irq_mapping_update_lock);
  902. return ret;
  903. }
  904. #endif
  905. int xen_destroy_irq(int irq)
  906. {
  907. struct physdev_unmap_pirq unmap_irq;
  908. struct irq_info *info = info_for_irq(irq);
  909. int rc = -ENOENT;
  910. mutex_lock(&irq_mapping_update_lock);
  911. /*
  912. * If trying to remove a vector in a MSI group different
  913. * than the first one skip the PIRQ unmap unless this vector
  914. * is the first one in the group.
  915. */
  916. if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) {
  917. unmap_irq.pirq = info->u.pirq.pirq;
  918. unmap_irq.domid = info->u.pirq.domid;
  919. rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
  920. /* If another domain quits without making the pci_disable_msix
  921. * call, the Xen hypervisor takes care of freeing the PIRQs
  922. * (free_domain_pirqs).
  923. */
  924. if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
  925. pr_info("domain %d does not have %d anymore\n",
  926. info->u.pirq.domid, info->u.pirq.pirq);
  927. else if (rc) {
  928. pr_warn("unmap irq failed %d\n", rc);
  929. goto out;
  930. }
  931. }
  932. xen_free_irq(irq);
  933. out:
  934. mutex_unlock(&irq_mapping_update_lock);
  935. return rc;
  936. }
  937. int xen_irq_from_pirq(unsigned pirq)
  938. {
  939. int irq;
  940. struct irq_info *info;
  941. mutex_lock(&irq_mapping_update_lock);
  942. list_for_each_entry(info, &xen_irq_list_head, list) {
  943. if (info->type != IRQT_PIRQ)
  944. continue;
  945. irq = info->irq;
  946. if (info->u.pirq.pirq == pirq)
  947. goto out;
  948. }
  949. irq = -1;
  950. out:
  951. mutex_unlock(&irq_mapping_update_lock);
  952. return irq;
  953. }
  954. int xen_pirq_from_irq(unsigned irq)
  955. {
  956. return pirq_from_irq(irq);
  957. }
  958. EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
  959. static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
  960. struct xenbus_device *dev)
  961. {
  962. int irq;
  963. int ret;
  964. if (evtchn >= xen_evtchn_max_channels())
  965. return -ENOMEM;
  966. mutex_lock(&irq_mapping_update_lock);
  967. irq = get_evtchn_to_irq(evtchn);
  968. if (irq == -1) {
  969. irq = xen_allocate_irq_dynamic();
  970. if (irq < 0)
  971. goto out;
  972. irq_set_chip_and_handler_name(irq, chip,
  973. handle_edge_irq, "event");
  974. ret = xen_irq_info_evtchn_setup(irq, evtchn, dev);
  975. if (ret < 0) {
  976. __unbind_from_irq(irq);
  977. irq = ret;
  978. goto out;
  979. }
  980. /*
  981. * New interdomain events are initially bound to vCPU0 This
  982. * is required to setup the event channel in the first
  983. * place and also important for UP guests because the
  984. * affinity setting is not invoked on them so nothing would
  985. * bind the channel.
  986. */
  987. bind_evtchn_to_cpu(evtchn, 0, false);
  988. } else {
  989. struct irq_info *info = info_for_irq(irq);
  990. WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
  991. }
  992. out:
  993. mutex_unlock(&irq_mapping_update_lock);
  994. return irq;
  995. }
  996. int bind_evtchn_to_irq(evtchn_port_t evtchn)
  997. {
  998. return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip, NULL);
  999. }
  1000. EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
  1001. int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
  1002. {
  1003. return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip, NULL);
  1004. }
  1005. EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
  1006. static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
  1007. {
  1008. struct evtchn_bind_ipi bind_ipi;
  1009. evtchn_port_t evtchn;
  1010. int ret, irq;
  1011. mutex_lock(&irq_mapping_update_lock);
  1012. irq = per_cpu(ipi_to_irq, cpu)[ipi];
  1013. if (irq == -1) {
  1014. irq = xen_allocate_irq_dynamic();
  1015. if (irq < 0)
  1016. goto out;
  1017. irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
  1018. handle_percpu_irq, "ipi");
  1019. bind_ipi.vcpu = xen_vcpu_nr(cpu);
  1020. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
  1021. &bind_ipi) != 0)
  1022. BUG();
  1023. evtchn = bind_ipi.port;
  1024. ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
  1025. if (ret < 0) {
  1026. __unbind_from_irq(irq);
  1027. irq = ret;
  1028. goto out;
  1029. }
  1030. /*
  1031. * Force the affinity mask to the target CPU so proc shows
  1032. * the correct target.
  1033. */
  1034. bind_evtchn_to_cpu(evtchn, cpu, true);
  1035. } else {
  1036. struct irq_info *info = info_for_irq(irq);
  1037. WARN_ON(info == NULL || info->type != IRQT_IPI);
  1038. }
  1039. out:
  1040. mutex_unlock(&irq_mapping_update_lock);
  1041. return irq;
  1042. }
  1043. static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
  1044. evtchn_port_t remote_port,
  1045. struct irq_chip *chip)
  1046. {
  1047. struct evtchn_bind_interdomain bind_interdomain;
  1048. int err;
  1049. bind_interdomain.remote_dom = dev->otherend_id;
  1050. bind_interdomain.remote_port = remote_port;
  1051. err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
  1052. &bind_interdomain);
  1053. return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port,
  1054. chip, dev);
  1055. }
  1056. int bind_interdomain_evtchn_to_irq_lateeoi(struct xenbus_device *dev,
  1057. evtchn_port_t remote_port)
  1058. {
  1059. return bind_interdomain_evtchn_to_irq_chip(dev, remote_port,
  1060. &xen_lateeoi_chip);
  1061. }
  1062. EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
  1063. static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
  1064. {
  1065. struct evtchn_status status;
  1066. evtchn_port_t port;
  1067. int rc = -ENOENT;
  1068. memset(&status, 0, sizeof(status));
  1069. for (port = 0; port < xen_evtchn_max_channels(); port++) {
  1070. status.dom = DOMID_SELF;
  1071. status.port = port;
  1072. rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
  1073. if (rc < 0)
  1074. continue;
  1075. if (status.status != EVTCHNSTAT_virq)
  1076. continue;
  1077. if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
  1078. *evtchn = port;
  1079. break;
  1080. }
  1081. }
  1082. return rc;
  1083. }
  1084. /**
  1085. * xen_evtchn_nr_channels - number of usable event channel ports
  1086. *
  1087. * This may be less than the maximum supported by the current
  1088. * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum
  1089. * supported.
  1090. */
  1091. unsigned xen_evtchn_nr_channels(void)
  1092. {
  1093. return evtchn_ops->nr_channels();
  1094. }
  1095. EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
  1096. int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
  1097. {
  1098. struct evtchn_bind_virq bind_virq;
  1099. evtchn_port_t evtchn = 0;
  1100. int irq, ret;
  1101. mutex_lock(&irq_mapping_update_lock);
  1102. irq = per_cpu(virq_to_irq, cpu)[virq];
  1103. if (irq == -1) {
  1104. irq = xen_allocate_irq_dynamic();
  1105. if (irq < 0)
  1106. goto out;
  1107. if (percpu)
  1108. irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
  1109. handle_percpu_irq, "virq");
  1110. else
  1111. irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
  1112. handle_edge_irq, "virq");
  1113. bind_virq.virq = virq;
  1114. bind_virq.vcpu = xen_vcpu_nr(cpu);
  1115. ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
  1116. &bind_virq);
  1117. if (ret == 0)
  1118. evtchn = bind_virq.port;
  1119. else {
  1120. if (ret == -EEXIST)
  1121. ret = find_virq(virq, cpu, &evtchn);
  1122. BUG_ON(ret < 0);
  1123. }
  1124. ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
  1125. if (ret < 0) {
  1126. __unbind_from_irq(irq);
  1127. irq = ret;
  1128. goto out;
  1129. }
  1130. /*
  1131. * Force the affinity mask for percpu interrupts so proc
  1132. * shows the correct target.
  1133. */
  1134. bind_evtchn_to_cpu(evtchn, cpu, percpu);
  1135. } else {
  1136. struct irq_info *info = info_for_irq(irq);
  1137. WARN_ON(info == NULL || info->type != IRQT_VIRQ);
  1138. }
  1139. out:
  1140. mutex_unlock(&irq_mapping_update_lock);
  1141. return irq;
  1142. }
  1143. static void unbind_from_irq(unsigned int irq)
  1144. {
  1145. mutex_lock(&irq_mapping_update_lock);
  1146. __unbind_from_irq(irq);
  1147. mutex_unlock(&irq_mapping_update_lock);
  1148. }
  1149. static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,
  1150. irq_handler_t handler,
  1151. unsigned long irqflags,
  1152. const char *devname, void *dev_id,
  1153. struct irq_chip *chip)
  1154. {
  1155. int irq, retval;
  1156. irq = bind_evtchn_to_irq_chip(evtchn, chip, NULL);
  1157. if (irq < 0)
  1158. return irq;
  1159. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  1160. if (retval != 0) {
  1161. unbind_from_irq(irq);
  1162. return retval;
  1163. }
  1164. return irq;
  1165. }
  1166. int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
  1167. irq_handler_t handler,
  1168. unsigned long irqflags,
  1169. const char *devname, void *dev_id)
  1170. {
  1171. return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
  1172. devname, dev_id,
  1173. &xen_dynamic_chip);
  1174. }
  1175. EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
  1176. int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
  1177. irq_handler_t handler,
  1178. unsigned long irqflags,
  1179. const char *devname, void *dev_id)
  1180. {
  1181. return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
  1182. devname, dev_id,
  1183. &xen_lateeoi_chip);
  1184. }
  1185. EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler_lateeoi);
  1186. static int bind_interdomain_evtchn_to_irqhandler_chip(
  1187. struct xenbus_device *dev, evtchn_port_t remote_port,
  1188. irq_handler_t handler, unsigned long irqflags,
  1189. const char *devname, void *dev_id, struct irq_chip *chip)
  1190. {
  1191. int irq, retval;
  1192. irq = bind_interdomain_evtchn_to_irq_chip(dev, remote_port, chip);
  1193. if (irq < 0)
  1194. return irq;
  1195. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  1196. if (retval != 0) {
  1197. unbind_from_irq(irq);
  1198. return retval;
  1199. }
  1200. return irq;
  1201. }
  1202. int bind_interdomain_evtchn_to_irqhandler_lateeoi(struct xenbus_device *dev,
  1203. evtchn_port_t remote_port,
  1204. irq_handler_t handler,
  1205. unsigned long irqflags,
  1206. const char *devname,
  1207. void *dev_id)
  1208. {
  1209. return bind_interdomain_evtchn_to_irqhandler_chip(dev,
  1210. remote_port, handler, irqflags, devname,
  1211. dev_id, &xen_lateeoi_chip);
  1212. }
  1213. EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler_lateeoi);
  1214. int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
  1215. irq_handler_t handler,
  1216. unsigned long irqflags, const char *devname, void *dev_id)
  1217. {
  1218. int irq, retval;
  1219. irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
  1220. if (irq < 0)
  1221. return irq;
  1222. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  1223. if (retval != 0) {
  1224. unbind_from_irq(irq);
  1225. return retval;
  1226. }
  1227. return irq;
  1228. }
  1229. EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
  1230. int bind_ipi_to_irqhandler(enum ipi_vector ipi,
  1231. unsigned int cpu,
  1232. irq_handler_t handler,
  1233. unsigned long irqflags,
  1234. const char *devname,
  1235. void *dev_id)
  1236. {
  1237. int irq, retval;
  1238. irq = bind_ipi_to_irq(ipi, cpu);
  1239. if (irq < 0)
  1240. return irq;
  1241. irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
  1242. retval = request_irq(irq, handler, irqflags, devname, dev_id);
  1243. if (retval != 0) {
  1244. unbind_from_irq(irq);
  1245. return retval;
  1246. }
  1247. return irq;
  1248. }
  1249. void unbind_from_irqhandler(unsigned int irq, void *dev_id)
  1250. {
  1251. struct irq_info *info = info_for_irq(irq);
  1252. if (WARN_ON(!info))
  1253. return;
  1254. free_irq(irq, dev_id);
  1255. unbind_from_irq(irq);
  1256. }
  1257. EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
  1258. /**
  1259. * xen_set_irq_priority() - set an event channel priority.
  1260. * @irq:irq bound to an event channel.
  1261. * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN.
  1262. */
  1263. int xen_set_irq_priority(unsigned irq, unsigned priority)
  1264. {
  1265. struct evtchn_set_priority set_priority;
  1266. set_priority.port = evtchn_from_irq(irq);
  1267. set_priority.priority = priority;
  1268. return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority,
  1269. &set_priority);
  1270. }
  1271. EXPORT_SYMBOL_GPL(xen_set_irq_priority);
  1272. int evtchn_make_refcounted(evtchn_port_t evtchn)
  1273. {
  1274. int irq = get_evtchn_to_irq(evtchn);
  1275. struct irq_info *info;
  1276. if (irq == -1)
  1277. return -ENOENT;
  1278. info = info_for_irq(irq);
  1279. if (!info)
  1280. return -ENOENT;
  1281. WARN_ON(info->refcnt != -1);
  1282. info->refcnt = 1;
  1283. return 0;
  1284. }
  1285. EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
  1286. int evtchn_get(evtchn_port_t evtchn)
  1287. {
  1288. int irq;
  1289. struct irq_info *info;
  1290. int err = -ENOENT;
  1291. if (evtchn >= xen_evtchn_max_channels())
  1292. return -EINVAL;
  1293. mutex_lock(&irq_mapping_update_lock);
  1294. irq = get_evtchn_to_irq(evtchn);
  1295. if (irq == -1)
  1296. goto done;
  1297. info = info_for_irq(irq);
  1298. if (!info)
  1299. goto done;
  1300. err = -EINVAL;
  1301. if (info->refcnt <= 0 || info->refcnt == SHRT_MAX)
  1302. goto done;
  1303. info->refcnt++;
  1304. err = 0;
  1305. done:
  1306. mutex_unlock(&irq_mapping_update_lock);
  1307. return err;
  1308. }
  1309. EXPORT_SYMBOL_GPL(evtchn_get);
  1310. void evtchn_put(evtchn_port_t evtchn)
  1311. {
  1312. int irq = get_evtchn_to_irq(evtchn);
  1313. if (WARN_ON(irq == -1))
  1314. return;
  1315. unbind_from_irq(irq);
  1316. }
  1317. EXPORT_SYMBOL_GPL(evtchn_put);
  1318. void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
  1319. {
  1320. int irq;
  1321. #ifdef CONFIG_X86
  1322. if (unlikely(vector == XEN_NMI_VECTOR)) {
  1323. int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, xen_vcpu_nr(cpu),
  1324. NULL);
  1325. if (rc < 0)
  1326. printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
  1327. return;
  1328. }
  1329. #endif
  1330. irq = per_cpu(ipi_to_irq, cpu)[vector];
  1331. BUG_ON(irq < 0);
  1332. notify_remote_via_irq(irq);
  1333. }
  1334. struct evtchn_loop_ctrl {
  1335. ktime_t timeout;
  1336. unsigned count;
  1337. bool defer_eoi;
  1338. };
  1339. void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
  1340. {
  1341. int irq;
  1342. struct irq_info *info;
  1343. struct xenbus_device *dev;
  1344. irq = get_evtchn_to_irq(port);
  1345. if (irq == -1)
  1346. return;
  1347. /*
  1348. * Check for timeout every 256 events.
  1349. * We are setting the timeout value only after the first 256
  1350. * events in order to not hurt the common case of few loop
  1351. * iterations. The 256 is basically an arbitrary value.
  1352. *
  1353. * In case we are hitting the timeout we need to defer all further
  1354. * EOIs in order to ensure to leave the event handling loop rather
  1355. * sooner than later.
  1356. */
  1357. if (!ctrl->defer_eoi && !(++ctrl->count & 0xff)) {
  1358. ktime_t kt = ktime_get();
  1359. if (!ctrl->timeout) {
  1360. kt = ktime_add_ms(kt,
  1361. jiffies_to_msecs(event_loop_timeout));
  1362. ctrl->timeout = kt;
  1363. } else if (kt > ctrl->timeout) {
  1364. ctrl->defer_eoi = true;
  1365. }
  1366. }
  1367. info = info_for_irq(irq);
  1368. if (xchg_acquire(&info->is_active, 1))
  1369. return;
  1370. dev = (info->type == IRQT_EVTCHN) ? info->u.interdomain : NULL;
  1371. if (dev)
  1372. atomic_inc(&dev->events);
  1373. if (ctrl->defer_eoi) {
  1374. info->eoi_cpu = smp_processor_id();
  1375. info->irq_epoch = __this_cpu_read(irq_epoch);
  1376. info->eoi_time = get_jiffies_64() + event_eoi_delay;
  1377. }
  1378. generic_handle_irq(irq);
  1379. }
  1380. int xen_evtchn_do_upcall(void)
  1381. {
  1382. struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
  1383. int ret = vcpu_info->evtchn_upcall_pending ? IRQ_HANDLED : IRQ_NONE;
  1384. int cpu = smp_processor_id();
  1385. struct evtchn_loop_ctrl ctrl = { 0 };
  1386. /*
  1387. * When closing an event channel the associated IRQ must not be freed
  1388. * until all cpus have left the event handling loop. This is ensured
  1389. * by taking the rcu_read_lock() while handling events, as freeing of
  1390. * the IRQ is handled via queue_rcu_work() _after_ closing the event
  1391. * channel.
  1392. */
  1393. rcu_read_lock();
  1394. do {
  1395. vcpu_info->evtchn_upcall_pending = 0;
  1396. xen_evtchn_handle_events(cpu, &ctrl);
  1397. BUG_ON(!irqs_disabled());
  1398. virt_rmb(); /* Hypervisor can set upcall pending. */
  1399. } while (vcpu_info->evtchn_upcall_pending);
  1400. rcu_read_unlock();
  1401. /*
  1402. * Increment irq_epoch only now to defer EOIs only for
  1403. * xen_irq_lateeoi() invocations occurring from inside the loop
  1404. * above.
  1405. */
  1406. __this_cpu_inc(irq_epoch);
  1407. return ret;
  1408. }
  1409. EXPORT_SYMBOL_GPL(xen_evtchn_do_upcall);
  1410. /* Rebind a new event channel to an existing irq. */
  1411. void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
  1412. {
  1413. struct irq_info *info = info_for_irq(irq);
  1414. if (WARN_ON(!info))
  1415. return;
  1416. /* Make sure the irq is masked, since the new event channel
  1417. will also be masked. */
  1418. disable_irq(irq);
  1419. mutex_lock(&irq_mapping_update_lock);
  1420. /* After resume the irq<->evtchn mappings are all cleared out */
  1421. BUG_ON(get_evtchn_to_irq(evtchn) != -1);
  1422. /* Expect irq to have been bound before,
  1423. so there should be a proper type */
  1424. BUG_ON(info->type == IRQT_UNBOUND);
  1425. (void)xen_irq_info_evtchn_setup(irq, evtchn, NULL);
  1426. mutex_unlock(&irq_mapping_update_lock);
  1427. bind_evtchn_to_cpu(evtchn, info->cpu, false);
  1428. /* Unmask the event channel. */
  1429. enable_irq(irq);
  1430. }
  1431. /* Rebind an evtchn so that it gets delivered to a specific cpu */
  1432. static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
  1433. {
  1434. struct evtchn_bind_vcpu bind_vcpu;
  1435. evtchn_port_t evtchn = info ? info->evtchn : 0;
  1436. if (!VALID_EVTCHN(evtchn))
  1437. return -1;
  1438. if (!xen_support_evtchn_rebind())
  1439. return -1;
  1440. /* Send future instances of this interrupt to other vcpu. */
  1441. bind_vcpu.port = evtchn;
  1442. bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
  1443. /*
  1444. * Mask the event while changing the VCPU binding to prevent
  1445. * it being delivered on an unexpected VCPU.
  1446. */
  1447. do_mask(info, EVT_MASK_REASON_TEMPORARY);
  1448. /*
  1449. * If this fails, it usually just indicates that we're dealing with a
  1450. * virq or IPI channel, which don't actually need to be rebound. Ignore
  1451. * it, but don't do the xenlinux-level rebind in that case.
  1452. */
  1453. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
  1454. bind_evtchn_to_cpu(evtchn, tcpu, false);
  1455. do_unmask(info, EVT_MASK_REASON_TEMPORARY);
  1456. return 0;
  1457. }
  1458. /*
  1459. * Find the CPU within @dest mask which has the least number of channels
  1460. * assigned. This is not precise as the per cpu counts can be modified
  1461. * concurrently.
  1462. */
  1463. static unsigned int select_target_cpu(const struct cpumask *dest)
  1464. {
  1465. unsigned int cpu, best_cpu = UINT_MAX, minch = UINT_MAX;
  1466. for_each_cpu_and(cpu, dest, cpu_online_mask) {
  1467. unsigned int curch = atomic_read(&channels_on_cpu[cpu]);
  1468. if (curch < minch) {
  1469. minch = curch;
  1470. best_cpu = cpu;
  1471. }
  1472. }
  1473. /*
  1474. * Catch the unlikely case that dest contains no online CPUs. Can't
  1475. * recurse.
  1476. */
  1477. if (best_cpu == UINT_MAX)
  1478. return select_target_cpu(cpu_online_mask);
  1479. return best_cpu;
  1480. }
  1481. static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
  1482. bool force)
  1483. {
  1484. unsigned int tcpu = select_target_cpu(dest);
  1485. int ret;
  1486. ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu);
  1487. if (!ret)
  1488. irq_data_update_effective_affinity(data, cpumask_of(tcpu));
  1489. return ret;
  1490. }
  1491. static void enable_dynirq(struct irq_data *data)
  1492. {
  1493. struct irq_info *info = info_for_irq(data->irq);
  1494. evtchn_port_t evtchn = info ? info->evtchn : 0;
  1495. if (VALID_EVTCHN(evtchn))
  1496. do_unmask(info, EVT_MASK_REASON_EXPLICIT);
  1497. }
  1498. static void disable_dynirq(struct irq_data *data)
  1499. {
  1500. struct irq_info *info = info_for_irq(data->irq);
  1501. evtchn_port_t evtchn = info ? info->evtchn : 0;
  1502. if (VALID_EVTCHN(evtchn))
  1503. do_mask(info, EVT_MASK_REASON_EXPLICIT);
  1504. }
  1505. static void ack_dynirq(struct irq_data *data)
  1506. {
  1507. struct irq_info *info = info_for_irq(data->irq);
  1508. evtchn_port_t evtchn = info ? info->evtchn : 0;
  1509. if (VALID_EVTCHN(evtchn))
  1510. event_handler_exit(info);
  1511. }
  1512. static void mask_ack_dynirq(struct irq_data *data)
  1513. {
  1514. disable_dynirq(data);
  1515. ack_dynirq(data);
  1516. }
  1517. static void lateeoi_ack_dynirq(struct irq_data *data)
  1518. {
  1519. struct irq_info *info = info_for_irq(data->irq);
  1520. evtchn_port_t evtchn = info ? info->evtchn : 0;
  1521. if (VALID_EVTCHN(evtchn)) {
  1522. do_mask(info, EVT_MASK_REASON_EOI_PENDING);
  1523. /*
  1524. * Don't call event_handler_exit().
  1525. * Need to keep is_active non-zero in order to ignore re-raised
  1526. * events after cpu affinity changes while a lateeoi is pending.
  1527. */
  1528. clear_evtchn(evtchn);
  1529. }
  1530. }
  1531. static void lateeoi_mask_ack_dynirq(struct irq_data *data)
  1532. {
  1533. struct irq_info *info = info_for_irq(data->irq);
  1534. evtchn_port_t evtchn = info ? info->evtchn : 0;
  1535. if (VALID_EVTCHN(evtchn)) {
  1536. do_mask(info, EVT_MASK_REASON_EXPLICIT);
  1537. event_handler_exit(info);
  1538. }
  1539. }
  1540. static int retrigger_dynirq(struct irq_data *data)
  1541. {
  1542. struct irq_info *info = info_for_irq(data->irq);
  1543. evtchn_port_t evtchn = info ? info->evtchn : 0;
  1544. if (!VALID_EVTCHN(evtchn))
  1545. return 0;
  1546. do_mask(info, EVT_MASK_REASON_TEMPORARY);
  1547. set_evtchn(evtchn);
  1548. do_unmask(info, EVT_MASK_REASON_TEMPORARY);
  1549. return 1;
  1550. }
  1551. static void restore_pirqs(void)
  1552. {
  1553. int pirq, rc, irq, gsi;
  1554. struct physdev_map_pirq map_irq;
  1555. struct irq_info *info;
  1556. list_for_each_entry(info, &xen_irq_list_head, list) {
  1557. if (info->type != IRQT_PIRQ)
  1558. continue;
  1559. pirq = info->u.pirq.pirq;
  1560. gsi = info->u.pirq.gsi;
  1561. irq = info->irq;
  1562. /* save/restore of PT devices doesn't work, so at this point the
  1563. * only devices present are GSI based emulated devices */
  1564. if (!gsi)
  1565. continue;
  1566. map_irq.domid = DOMID_SELF;
  1567. map_irq.type = MAP_PIRQ_TYPE_GSI;
  1568. map_irq.index = gsi;
  1569. map_irq.pirq = pirq;
  1570. rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
  1571. if (rc) {
  1572. pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
  1573. gsi, irq, pirq, rc);
  1574. xen_free_irq(irq);
  1575. continue;
  1576. }
  1577. printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
  1578. __startup_pirq(irq);
  1579. }
  1580. }
  1581. static void restore_cpu_virqs(unsigned int cpu)
  1582. {
  1583. struct evtchn_bind_virq bind_virq;
  1584. evtchn_port_t evtchn;
  1585. int virq, irq;
  1586. for (virq = 0; virq < NR_VIRQS; virq++) {
  1587. if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
  1588. continue;
  1589. BUG_ON(virq_from_irq(irq) != virq);
  1590. /* Get a new binding from Xen. */
  1591. bind_virq.virq = virq;
  1592. bind_virq.vcpu = xen_vcpu_nr(cpu);
  1593. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
  1594. &bind_virq) != 0)
  1595. BUG();
  1596. evtchn = bind_virq.port;
  1597. /* Record the new mapping. */
  1598. (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
  1599. /* The affinity mask is still valid */
  1600. bind_evtchn_to_cpu(evtchn, cpu, false);
  1601. }
  1602. }
  1603. static void restore_cpu_ipis(unsigned int cpu)
  1604. {
  1605. struct evtchn_bind_ipi bind_ipi;
  1606. evtchn_port_t evtchn;
  1607. int ipi, irq;
  1608. for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
  1609. if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
  1610. continue;
  1611. BUG_ON(ipi_from_irq(irq) != ipi);
  1612. /* Get a new binding from Xen. */
  1613. bind_ipi.vcpu = xen_vcpu_nr(cpu);
  1614. if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
  1615. &bind_ipi) != 0)
  1616. BUG();
  1617. evtchn = bind_ipi.port;
  1618. /* Record the new mapping. */
  1619. (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
  1620. /* The affinity mask is still valid */
  1621. bind_evtchn_to_cpu(evtchn, cpu, false);
  1622. }
  1623. }
  1624. /* Clear an irq's pending state, in preparation for polling on it */
  1625. void xen_clear_irq_pending(int irq)
  1626. {
  1627. struct irq_info *info = info_for_irq(irq);
  1628. evtchn_port_t evtchn = info ? info->evtchn : 0;
  1629. if (VALID_EVTCHN(evtchn))
  1630. event_handler_exit(info);
  1631. }
  1632. EXPORT_SYMBOL(xen_clear_irq_pending);
  1633. void xen_set_irq_pending(int irq)
  1634. {
  1635. evtchn_port_t evtchn = evtchn_from_irq(irq);
  1636. if (VALID_EVTCHN(evtchn))
  1637. set_evtchn(evtchn);
  1638. }
  1639. bool xen_test_irq_pending(int irq)
  1640. {
  1641. evtchn_port_t evtchn = evtchn_from_irq(irq);
  1642. bool ret = false;
  1643. if (VALID_EVTCHN(evtchn))
  1644. ret = test_evtchn(evtchn);
  1645. return ret;
  1646. }
  1647. /* Poll waiting for an irq to become pending with timeout. In the usual case,
  1648. * the irq will be disabled so it won't deliver an interrupt. */
  1649. void xen_poll_irq_timeout(int irq, u64 timeout)
  1650. {
  1651. evtchn_port_t evtchn = evtchn_from_irq(irq);
  1652. if (VALID_EVTCHN(evtchn)) {
  1653. struct sched_poll poll;
  1654. poll.nr_ports = 1;
  1655. poll.timeout = timeout;
  1656. set_xen_guest_handle(poll.ports, &evtchn);
  1657. if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
  1658. BUG();
  1659. }
  1660. }
  1661. EXPORT_SYMBOL(xen_poll_irq_timeout);
  1662. /* Poll waiting for an irq to become pending. In the usual case, the
  1663. * irq will be disabled so it won't deliver an interrupt. */
  1664. void xen_poll_irq(int irq)
  1665. {
  1666. xen_poll_irq_timeout(irq, 0 /* no timeout */);
  1667. }
  1668. /* Check whether the IRQ line is shared with other guests. */
  1669. int xen_test_irq_shared(int irq)
  1670. {
  1671. struct irq_info *info = info_for_irq(irq);
  1672. struct physdev_irq_status_query irq_status;
  1673. if (WARN_ON(!info))
  1674. return -ENOENT;
  1675. irq_status.irq = info->u.pirq.pirq;
  1676. if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
  1677. return 0;
  1678. return !(irq_status.flags & XENIRQSTAT_shared);
  1679. }
  1680. EXPORT_SYMBOL_GPL(xen_test_irq_shared);
  1681. void xen_irq_resume(void)
  1682. {
  1683. unsigned int cpu;
  1684. struct irq_info *info;
  1685. /* New event-channel space is not 'live' yet. */
  1686. xen_evtchn_resume();
  1687. /* No IRQ <-> event-channel mappings. */
  1688. list_for_each_entry(info, &xen_irq_list_head, list) {
  1689. /* Zap event-channel binding */
  1690. info->evtchn = 0;
  1691. /* Adjust accounting */
  1692. channels_on_cpu_dec(info);
  1693. }
  1694. clear_evtchn_to_irq_all();
  1695. for_each_possible_cpu(cpu) {
  1696. restore_cpu_virqs(cpu);
  1697. restore_cpu_ipis(cpu);
  1698. }
  1699. restore_pirqs();
  1700. }
  1701. static struct irq_chip xen_dynamic_chip __read_mostly = {
  1702. .name = "xen-dyn",
  1703. .irq_disable = disable_dynirq,
  1704. .irq_mask = disable_dynirq,
  1705. .irq_unmask = enable_dynirq,
  1706. .irq_ack = ack_dynirq,
  1707. .irq_mask_ack = mask_ack_dynirq,
  1708. .irq_set_affinity = set_affinity_irq,
  1709. .irq_retrigger = retrigger_dynirq,
  1710. };
  1711. static struct irq_chip xen_lateeoi_chip __read_mostly = {
  1712. /* The chip name needs to contain "xen-dyn" for irqbalance to work. */
  1713. .name = "xen-dyn-lateeoi",
  1714. .irq_disable = disable_dynirq,
  1715. .irq_mask = disable_dynirq,
  1716. .irq_unmask = enable_dynirq,
  1717. .irq_ack = lateeoi_ack_dynirq,
  1718. .irq_mask_ack = lateeoi_mask_ack_dynirq,
  1719. .irq_set_affinity = set_affinity_irq,
  1720. .irq_retrigger = retrigger_dynirq,
  1721. };
  1722. static struct irq_chip xen_pirq_chip __read_mostly = {
  1723. .name = "xen-pirq",
  1724. .irq_startup = startup_pirq,
  1725. .irq_shutdown = shutdown_pirq,
  1726. .irq_enable = enable_pirq,
  1727. .irq_disable = disable_pirq,
  1728. .irq_mask = disable_dynirq,
  1729. .irq_unmask = enable_dynirq,
  1730. .irq_ack = eoi_pirq,
  1731. .irq_eoi = eoi_pirq,
  1732. .irq_mask_ack = mask_ack_pirq,
  1733. .irq_set_affinity = set_affinity_irq,
  1734. .irq_retrigger = retrigger_dynirq,
  1735. };
  1736. static struct irq_chip xen_percpu_chip __read_mostly = {
  1737. .name = "xen-percpu",
  1738. .irq_disable = disable_dynirq,
  1739. .irq_mask = disable_dynirq,
  1740. .irq_unmask = enable_dynirq,
  1741. .irq_ack = ack_dynirq,
  1742. };
  1743. #ifdef CONFIG_X86
  1744. #ifdef CONFIG_XEN_PVHVM
  1745. /* Vector callbacks are better than PCI interrupts to receive event
  1746. * channel notifications because we can receive vector callbacks on any
  1747. * vcpu and we don't need PCI support or APIC interactions. */
  1748. void xen_setup_callback_vector(void)
  1749. {
  1750. uint64_t callback_via;
  1751. if (xen_have_vector_callback) {
  1752. callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
  1753. if (xen_set_callback_via(callback_via)) {
  1754. pr_err("Request for Xen HVM callback vector failed\n");
  1755. xen_have_vector_callback = false;
  1756. }
  1757. }
  1758. }
  1759. /*
  1760. * Setup per-vCPU vector-type callbacks. If this setup is unavailable,
  1761. * fallback to the global vector-type callback.
  1762. */
  1763. static __init void xen_init_setup_upcall_vector(void)
  1764. {
  1765. if (!xen_have_vector_callback)
  1766. return;
  1767. if ((cpuid_eax(xen_cpuid_base() + 4) & XEN_HVM_CPUID_UPCALL_VECTOR) &&
  1768. !xen_set_upcall_vector(0))
  1769. xen_percpu_upcall = true;
  1770. else if (xen_feature(XENFEAT_hvm_callback_vector))
  1771. xen_setup_callback_vector();
  1772. else
  1773. xen_have_vector_callback = false;
  1774. }
  1775. int xen_set_upcall_vector(unsigned int cpu)
  1776. {
  1777. int rc;
  1778. xen_hvm_evtchn_upcall_vector_t op = {
  1779. .vector = HYPERVISOR_CALLBACK_VECTOR,
  1780. .vcpu = per_cpu(xen_vcpu_id, cpu),
  1781. };
  1782. rc = HYPERVISOR_hvm_op(HVMOP_set_evtchn_upcall_vector, &op);
  1783. if (rc)
  1784. return rc;
  1785. /* Trick toolstack to think we are enlightened. */
  1786. if (!cpu)
  1787. rc = xen_set_callback_via(1);
  1788. return rc;
  1789. }
  1790. static __init void xen_alloc_callback_vector(void)
  1791. {
  1792. if (!xen_have_vector_callback)
  1793. return;
  1794. pr_info("Xen HVM callback vector for event delivery is enabled\n");
  1795. alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_xen_hvm_callback);
  1796. }
  1797. #else
  1798. void xen_setup_callback_vector(void) {}
  1799. static inline void xen_init_setup_upcall_vector(void) {}
  1800. int xen_set_upcall_vector(unsigned int cpu) {}
  1801. static inline void xen_alloc_callback_vector(void) {}
  1802. #endif /* CONFIG_XEN_PVHVM */
  1803. #endif /* CONFIG_X86 */
  1804. bool xen_fifo_events = true;
  1805. module_param_named(fifo_events, xen_fifo_events, bool, 0);
  1806. static int xen_evtchn_cpu_prepare(unsigned int cpu)
  1807. {
  1808. int ret = 0;
  1809. xen_cpu_init_eoi(cpu);
  1810. if (evtchn_ops->percpu_init)
  1811. ret = evtchn_ops->percpu_init(cpu);
  1812. return ret;
  1813. }
  1814. static int xen_evtchn_cpu_dead(unsigned int cpu)
  1815. {
  1816. int ret = 0;
  1817. if (evtchn_ops->percpu_deinit)
  1818. ret = evtchn_ops->percpu_deinit(cpu);
  1819. return ret;
  1820. }
  1821. void __init xen_init_IRQ(void)
  1822. {
  1823. int ret = -EINVAL;
  1824. evtchn_port_t evtchn;
  1825. if (xen_fifo_events)
  1826. ret = xen_evtchn_fifo_init();
  1827. if (ret < 0) {
  1828. xen_evtchn_2l_init();
  1829. xen_fifo_events = false;
  1830. }
  1831. xen_cpu_init_eoi(smp_processor_id());
  1832. cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
  1833. "xen/evtchn:prepare",
  1834. xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
  1835. evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
  1836. sizeof(*evtchn_to_irq), GFP_KERNEL);
  1837. BUG_ON(!evtchn_to_irq);
  1838. /* No event channels are 'live' right now. */
  1839. for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
  1840. mask_evtchn(evtchn);
  1841. pirq_needs_eoi = pirq_needs_eoi_flag;
  1842. #ifdef CONFIG_X86
  1843. if (xen_pv_domain()) {
  1844. if (xen_initial_domain())
  1845. pci_xen_initial_domain();
  1846. }
  1847. xen_init_setup_upcall_vector();
  1848. xen_alloc_callback_vector();
  1849. if (xen_hvm_domain()) {
  1850. native_init_IRQ();
  1851. /* pci_xen_hvm_init must be called after native_init_IRQ so that
  1852. * __acpi_register_gsi can point at the right function */
  1853. pci_xen_hvm_init();
  1854. } else {
  1855. int rc;
  1856. struct physdev_pirq_eoi_gmfn eoi_gmfn;
  1857. pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
  1858. eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
  1859. rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
  1860. if (rc != 0) {
  1861. free_page((unsigned long) pirq_eoi_map);
  1862. pirq_eoi_map = NULL;
  1863. } else
  1864. pirq_needs_eoi = pirq_check_eoi_map;
  1865. }
  1866. #endif
  1867. }