irq-apple-aic.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright The Asahi Linux Contributors
  4. *
  5. * Based on irq-lpc32xx:
  6. * Copyright 2015-2016 Vladimir Zapolskiy <[email protected]>
  7. * Based on irq-bcm2836:
  8. * Copyright 2015 Broadcom
  9. */
  10. /*
  11. * AIC is a fairly simple interrupt controller with the following features:
  12. *
  13. * - 896 level-triggered hardware IRQs
  14. * - Single mask bit per IRQ
  15. * - Per-IRQ affinity setting
  16. * - Automatic masking on event delivery (auto-ack)
  17. * - Software triggering (ORed with hw line)
  18. * - 2 per-CPU IPIs (meant as "self" and "other", but they are
  19. * interchangeable if not symmetric)
  20. * - Automatic prioritization (single event/ack register per CPU, lower IRQs =
  21. * higher priority)
  22. * - Automatic masking on ack
  23. * - Default "this CPU" register view and explicit per-CPU views
  24. *
  25. * In addition, this driver also handles FIQs, as these are routed to the same
  26. * IRQ vector. These are used for Fast IPIs, the ARMv8 timer IRQs, and
  27. * performance counters (TODO).
  28. *
  29. * Implementation notes:
  30. *
  31. * - This driver creates two IRQ domains, one for HW IRQs and internal FIQs,
  32. * and one for IPIs.
  33. * - Since Linux needs more than 2 IPIs, we implement a software IRQ controller
  34. * and funnel all IPIs into one per-CPU IPI (the second "self" IPI is unused).
  35. * - FIQ hwirq numbers are assigned after true hwirqs, and are per-cpu.
  36. * - DT bindings use 3-cell form (like GIC):
  37. * - <0 nr flags> - hwirq #nr
  38. * - <1 nr flags> - FIQ #nr
  39. * - nr=0 Physical HV timer
  40. * - nr=1 Virtual HV timer
  41. * - nr=2 Physical guest timer
  42. * - nr=3 Virtual guest timer
  43. */
  44. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  45. #include <linux/bits.h>
  46. #include <linux/bitfield.h>
  47. #include <linux/cpuhotplug.h>
  48. #include <linux/io.h>
  49. #include <linux/irqchip.h>
  50. #include <linux/irqchip/arm-vgic-info.h>
  51. #include <linux/irqdomain.h>
  52. #include <linux/jump_label.h>
  53. #include <linux/limits.h>
  54. #include <linux/of_address.h>
  55. #include <linux/slab.h>
  56. #include <asm/apple_m1_pmu.h>
  57. #include <asm/cputype.h>
  58. #include <asm/exception.h>
  59. #include <asm/sysreg.h>
  60. #include <asm/virt.h>
  61. #include <dt-bindings/interrupt-controller/apple-aic.h>
  62. /*
  63. * AIC v1 registers (MMIO)
  64. */
  65. #define AIC_INFO 0x0004
  66. #define AIC_INFO_NR_IRQ GENMASK(15, 0)
  67. #define AIC_CONFIG 0x0010
  68. #define AIC_WHOAMI 0x2000
  69. #define AIC_EVENT 0x2004
  70. #define AIC_EVENT_DIE GENMASK(31, 24)
  71. #define AIC_EVENT_TYPE GENMASK(23, 16)
  72. #define AIC_EVENT_NUM GENMASK(15, 0)
  73. #define AIC_EVENT_TYPE_FIQ 0 /* Software use */
  74. #define AIC_EVENT_TYPE_IRQ 1
  75. #define AIC_EVENT_TYPE_IPI 4
  76. #define AIC_EVENT_IPI_OTHER 1
  77. #define AIC_EVENT_IPI_SELF 2
  78. #define AIC_IPI_SEND 0x2008
  79. #define AIC_IPI_ACK 0x200c
  80. #define AIC_IPI_MASK_SET 0x2024
  81. #define AIC_IPI_MASK_CLR 0x2028
  82. #define AIC_IPI_SEND_CPU(cpu) BIT(cpu)
  83. #define AIC_IPI_OTHER BIT(0)
  84. #define AIC_IPI_SELF BIT(31)
  85. #define AIC_TARGET_CPU 0x3000
  86. #define AIC_CPU_IPI_SET(cpu) (0x5008 + ((cpu) << 7))
  87. #define AIC_CPU_IPI_CLR(cpu) (0x500c + ((cpu) << 7))
  88. #define AIC_CPU_IPI_MASK_SET(cpu) (0x5024 + ((cpu) << 7))
  89. #define AIC_CPU_IPI_MASK_CLR(cpu) (0x5028 + ((cpu) << 7))
  90. #define AIC_MAX_IRQ 0x400
  91. /*
  92. * AIC v2 registers (MMIO)
  93. */
  94. #define AIC2_VERSION 0x0000
  95. #define AIC2_VERSION_VER GENMASK(7, 0)
  96. #define AIC2_INFO1 0x0004
  97. #define AIC2_INFO1_NR_IRQ GENMASK(15, 0)
  98. #define AIC2_INFO1_LAST_DIE GENMASK(27, 24)
  99. #define AIC2_INFO2 0x0008
  100. #define AIC2_INFO3 0x000c
  101. #define AIC2_INFO3_MAX_IRQ GENMASK(15, 0)
  102. #define AIC2_INFO3_MAX_DIE GENMASK(27, 24)
  103. #define AIC2_RESET 0x0010
  104. #define AIC2_RESET_RESET BIT(0)
  105. #define AIC2_CONFIG 0x0014
  106. #define AIC2_CONFIG_ENABLE BIT(0)
  107. #define AIC2_CONFIG_PREFER_PCPU BIT(28)
  108. #define AIC2_TIMEOUT 0x0028
  109. #define AIC2_CLUSTER_PRIO 0x0030
  110. #define AIC2_DELAY_GROUPS 0x0100
  111. #define AIC2_IRQ_CFG 0x2000
  112. /*
  113. * AIC2 registers are laid out like this, starting at AIC2_IRQ_CFG:
  114. *
  115. * Repeat for each die:
  116. * IRQ_CFG: u32 * MAX_IRQS
  117. * SW_SET: u32 * (MAX_IRQS / 32)
  118. * SW_CLR: u32 * (MAX_IRQS / 32)
  119. * MASK_SET: u32 * (MAX_IRQS / 32)
  120. * MASK_CLR: u32 * (MAX_IRQS / 32)
  121. * HW_STATE: u32 * (MAX_IRQS / 32)
  122. *
  123. * This is followed by a set of event registers, each 16K page aligned.
  124. * The first one is the AP event register we will use. Unfortunately,
  125. * the actual implemented die count is not specified anywhere in the
  126. * capability registers, so we have to explicitly specify the event
  127. * register as a second reg entry in the device tree to remain
  128. * forward-compatible.
  129. */
  130. #define AIC2_IRQ_CFG_TARGET GENMASK(3, 0)
  131. #define AIC2_IRQ_CFG_DELAY_IDX GENMASK(7, 5)
  132. #define MASK_REG(x) (4 * ((x) >> 5))
  133. #define MASK_BIT(x) BIT((x) & GENMASK(4, 0))
  134. /*
  135. * IMP-DEF sysregs that control FIQ sources
  136. */
  137. /* IPI request registers */
  138. #define SYS_IMP_APL_IPI_RR_LOCAL_EL1 sys_reg(3, 5, 15, 0, 0)
  139. #define SYS_IMP_APL_IPI_RR_GLOBAL_EL1 sys_reg(3, 5, 15, 0, 1)
  140. #define IPI_RR_CPU GENMASK(7, 0)
  141. /* Cluster only used for the GLOBAL register */
  142. #define IPI_RR_CLUSTER GENMASK(23, 16)
  143. #define IPI_RR_TYPE GENMASK(29, 28)
  144. #define IPI_RR_IMMEDIATE 0
  145. #define IPI_RR_RETRACT 1
  146. #define IPI_RR_DEFERRED 2
  147. #define IPI_RR_NOWAKE 3
  148. /* IPI status register */
  149. #define SYS_IMP_APL_IPI_SR_EL1 sys_reg(3, 5, 15, 1, 1)
  150. #define IPI_SR_PENDING BIT(0)
  151. /* Guest timer FIQ enable register */
  152. #define SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2 sys_reg(3, 5, 15, 1, 3)
  153. #define VM_TMR_FIQ_ENABLE_V BIT(0)
  154. #define VM_TMR_FIQ_ENABLE_P BIT(1)
  155. /* Deferred IPI countdown register */
  156. #define SYS_IMP_APL_IPI_CR_EL1 sys_reg(3, 5, 15, 3, 1)
  157. /* Uncore PMC control register */
  158. #define SYS_IMP_APL_UPMCR0_EL1 sys_reg(3, 7, 15, 0, 4)
  159. #define UPMCR0_IMODE GENMASK(18, 16)
  160. #define UPMCR0_IMODE_OFF 0
  161. #define UPMCR0_IMODE_AIC 2
  162. #define UPMCR0_IMODE_HALT 3
  163. #define UPMCR0_IMODE_FIQ 4
  164. /* Uncore PMC status register */
  165. #define SYS_IMP_APL_UPMSR_EL1 sys_reg(3, 7, 15, 6, 4)
  166. #define UPMSR_IACT BIT(0)
  167. /* MPIDR fields */
  168. #define MPIDR_CPU(x) MPIDR_AFFINITY_LEVEL(x, 0)
  169. #define MPIDR_CLUSTER(x) MPIDR_AFFINITY_LEVEL(x, 1)
  170. #define AIC_IRQ_HWIRQ(die, irq) (FIELD_PREP(AIC_EVENT_DIE, die) | \
  171. FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_IRQ) | \
  172. FIELD_PREP(AIC_EVENT_NUM, irq))
  173. #define AIC_FIQ_HWIRQ(x) (FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_FIQ) | \
  174. FIELD_PREP(AIC_EVENT_NUM, x))
  175. #define AIC_HWIRQ_IRQ(x) FIELD_GET(AIC_EVENT_NUM, x)
  176. #define AIC_HWIRQ_DIE(x) FIELD_GET(AIC_EVENT_DIE, x)
  177. #define AIC_NR_FIQ 6
  178. #define AIC_NR_SWIPI 32
  179. /*
  180. * FIQ hwirq index definitions: FIQ sources use the DT binding defines
  181. * directly, except that timers are special. At the irqchip level, the
  182. * two timer types are represented by their access method: _EL0 registers
  183. * or _EL02 registers. In the DT binding, the timers are represented
  184. * by their purpose (HV or guest). This mapping is for when the kernel is
  185. * running at EL2 (with VHE). When the kernel is running at EL1, the
  186. * mapping differs and aic_irq_domain_translate() performs the remapping.
  187. */
  188. #define AIC_TMR_EL0_PHYS AIC_TMR_HV_PHYS
  189. #define AIC_TMR_EL0_VIRT AIC_TMR_HV_VIRT
  190. #define AIC_TMR_EL02_PHYS AIC_TMR_GUEST_PHYS
  191. #define AIC_TMR_EL02_VIRT AIC_TMR_GUEST_VIRT
  192. static DEFINE_STATIC_KEY_TRUE(use_fast_ipi);
  193. struct aic_info {
  194. int version;
  195. /* Register offsets */
  196. u32 event;
  197. u32 target_cpu;
  198. u32 irq_cfg;
  199. u32 sw_set;
  200. u32 sw_clr;
  201. u32 mask_set;
  202. u32 mask_clr;
  203. u32 die_stride;
  204. /* Features */
  205. bool fast_ipi;
  206. };
  207. static const struct aic_info aic1_info = {
  208. .version = 1,
  209. .event = AIC_EVENT,
  210. .target_cpu = AIC_TARGET_CPU,
  211. };
  212. static const struct aic_info aic1_fipi_info = {
  213. .version = 1,
  214. .event = AIC_EVENT,
  215. .target_cpu = AIC_TARGET_CPU,
  216. .fast_ipi = true,
  217. };
  218. static const struct aic_info aic2_info = {
  219. .version = 2,
  220. .irq_cfg = AIC2_IRQ_CFG,
  221. .fast_ipi = true,
  222. };
  223. static const struct of_device_id aic_info_match[] = {
  224. {
  225. .compatible = "apple,t8103-aic",
  226. .data = &aic1_fipi_info,
  227. },
  228. {
  229. .compatible = "apple,aic",
  230. .data = &aic1_info,
  231. },
  232. {
  233. .compatible = "apple,aic2",
  234. .data = &aic2_info,
  235. },
  236. {}
  237. };
  238. struct aic_irq_chip {
  239. void __iomem *base;
  240. void __iomem *event;
  241. struct irq_domain *hw_domain;
  242. struct irq_domain *ipi_domain;
  243. struct {
  244. cpumask_t aff;
  245. } *fiq_aff[AIC_NR_FIQ];
  246. int nr_irq;
  247. int max_irq;
  248. int nr_die;
  249. int max_die;
  250. struct aic_info info;
  251. };
  252. static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked);
  253. static DEFINE_PER_CPU(atomic_t, aic_vipi_flag);
  254. static DEFINE_PER_CPU(atomic_t, aic_vipi_enable);
  255. static struct aic_irq_chip *aic_irqc;
  256. static void aic_handle_ipi(struct pt_regs *regs);
  257. static u32 aic_ic_read(struct aic_irq_chip *ic, u32 reg)
  258. {
  259. return readl_relaxed(ic->base + reg);
  260. }
  261. static void aic_ic_write(struct aic_irq_chip *ic, u32 reg, u32 val)
  262. {
  263. writel_relaxed(val, ic->base + reg);
  264. }
  265. /*
  266. * IRQ irqchip
  267. */
  268. static void aic_irq_mask(struct irq_data *d)
  269. {
  270. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  271. struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
  272. u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
  273. u32 irq = AIC_HWIRQ_IRQ(hwirq);
  274. aic_ic_write(ic, ic->info.mask_set + off + MASK_REG(irq), MASK_BIT(irq));
  275. }
  276. static void aic_irq_unmask(struct irq_data *d)
  277. {
  278. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  279. struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
  280. u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
  281. u32 irq = AIC_HWIRQ_IRQ(hwirq);
  282. aic_ic_write(ic, ic->info.mask_clr + off + MASK_REG(irq), MASK_BIT(irq));
  283. }
  284. static void aic_irq_eoi(struct irq_data *d)
  285. {
  286. /*
  287. * Reading the interrupt reason automatically acknowledges and masks
  288. * the IRQ, so we just unmask it here if needed.
  289. */
  290. if (!irqd_irq_masked(d))
  291. aic_irq_unmask(d);
  292. }
  293. static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs)
  294. {
  295. struct aic_irq_chip *ic = aic_irqc;
  296. u32 event, type, irq;
  297. do {
  298. /*
  299. * We cannot use a relaxed read here, as reads from DMA buffers
  300. * need to be ordered after the IRQ fires.
  301. */
  302. event = readl(ic->event + ic->info.event);
  303. type = FIELD_GET(AIC_EVENT_TYPE, event);
  304. irq = FIELD_GET(AIC_EVENT_NUM, event);
  305. if (type == AIC_EVENT_TYPE_IRQ)
  306. generic_handle_domain_irq(aic_irqc->hw_domain, event);
  307. else if (type == AIC_EVENT_TYPE_IPI && irq == 1)
  308. aic_handle_ipi(regs);
  309. else if (event != 0)
  310. pr_err_ratelimited("Unknown IRQ event %d, %d\n", type, irq);
  311. } while (event);
  312. /*
  313. * vGIC maintenance interrupts end up here too, so we need to check
  314. * for them separately. This should never trigger if KVM is working
  315. * properly, because it will have already taken care of clearing it
  316. * on guest exit before this handler runs.
  317. */
  318. if (is_kernel_in_hyp_mode() && (read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) &&
  319. read_sysreg_s(SYS_ICH_MISR_EL2) != 0) {
  320. pr_err_ratelimited("vGIC IRQ fired and not handled by KVM, disabling.\n");
  321. sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);
  322. }
  323. }
  324. static int aic_irq_set_affinity(struct irq_data *d,
  325. const struct cpumask *mask_val, bool force)
  326. {
  327. irq_hw_number_t hwirq = irqd_to_hwirq(d);
  328. struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
  329. int cpu;
  330. BUG_ON(!ic->info.target_cpu);
  331. if (force)
  332. cpu = cpumask_first(mask_val);
  333. else
  334. cpu = cpumask_any_and(mask_val, cpu_online_mask);
  335. aic_ic_write(ic, ic->info.target_cpu + AIC_HWIRQ_IRQ(hwirq) * 4, BIT(cpu));
  336. irq_data_update_effective_affinity(d, cpumask_of(cpu));
  337. return IRQ_SET_MASK_OK;
  338. }
  339. static int aic_irq_set_type(struct irq_data *d, unsigned int type)
  340. {
  341. /*
  342. * Some IRQs (e.g. MSIs) implicitly have edge semantics, and we don't
  343. * have a way to find out the type of any given IRQ, so just allow both.
  344. */
  345. return (type == IRQ_TYPE_LEVEL_HIGH || type == IRQ_TYPE_EDGE_RISING) ? 0 : -EINVAL;
  346. }
  347. static struct irq_chip aic_chip = {
  348. .name = "AIC",
  349. .irq_mask = aic_irq_mask,
  350. .irq_unmask = aic_irq_unmask,
  351. .irq_eoi = aic_irq_eoi,
  352. .irq_set_affinity = aic_irq_set_affinity,
  353. .irq_set_type = aic_irq_set_type,
  354. };
  355. static struct irq_chip aic2_chip = {
  356. .name = "AIC2",
  357. .irq_mask = aic_irq_mask,
  358. .irq_unmask = aic_irq_unmask,
  359. .irq_eoi = aic_irq_eoi,
  360. .irq_set_type = aic_irq_set_type,
  361. };
  362. /*
  363. * FIQ irqchip
  364. */
  365. static unsigned long aic_fiq_get_idx(struct irq_data *d)
  366. {
  367. return AIC_HWIRQ_IRQ(irqd_to_hwirq(d));
  368. }
  369. static void aic_fiq_set_mask(struct irq_data *d)
  370. {
  371. /* Only the guest timers have real mask bits, unfortunately. */
  372. switch (aic_fiq_get_idx(d)) {
  373. case AIC_TMR_EL02_PHYS:
  374. sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_P, 0);
  375. isb();
  376. break;
  377. case AIC_TMR_EL02_VIRT:
  378. sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_V, 0);
  379. isb();
  380. break;
  381. default:
  382. break;
  383. }
  384. }
  385. static void aic_fiq_clear_mask(struct irq_data *d)
  386. {
  387. switch (aic_fiq_get_idx(d)) {
  388. case AIC_TMR_EL02_PHYS:
  389. sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_P);
  390. isb();
  391. break;
  392. case AIC_TMR_EL02_VIRT:
  393. sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_V);
  394. isb();
  395. break;
  396. default:
  397. break;
  398. }
  399. }
  400. static void aic_fiq_mask(struct irq_data *d)
  401. {
  402. aic_fiq_set_mask(d);
  403. __this_cpu_and(aic_fiq_unmasked, ~BIT(aic_fiq_get_idx(d)));
  404. }
  405. static void aic_fiq_unmask(struct irq_data *d)
  406. {
  407. aic_fiq_clear_mask(d);
  408. __this_cpu_or(aic_fiq_unmasked, BIT(aic_fiq_get_idx(d)));
  409. }
  410. static void aic_fiq_eoi(struct irq_data *d)
  411. {
  412. /* We mask to ack (where we can), so we need to unmask at EOI. */
  413. if (__this_cpu_read(aic_fiq_unmasked) & BIT(aic_fiq_get_idx(d)))
  414. aic_fiq_clear_mask(d);
  415. }
  416. #define TIMER_FIRING(x) \
  417. (((x) & (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_MASK | \
  418. ARCH_TIMER_CTRL_IT_STAT)) == \
  419. (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT))
  420. static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
  421. {
  422. /*
  423. * It would be really nice if we had a system register that lets us get
  424. * the FIQ source state without having to peek down into sources...
  425. * but such a register does not seem to exist.
  426. *
  427. * So, we have these potential sources to test for:
  428. * - Fast IPIs (not yet used)
  429. * - The 4 timers (CNTP, CNTV for each of HV and guest)
  430. * - Per-core PMCs (not yet supported)
  431. * - Per-cluster uncore PMCs (not yet supported)
  432. *
  433. * Since not dealing with any of these results in a FIQ storm,
  434. * we check for everything here, even things we don't support yet.
  435. */
  436. if (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) {
  437. if (static_branch_likely(&use_fast_ipi)) {
  438. aic_handle_ipi(regs);
  439. } else {
  440. pr_err_ratelimited("Fast IPI fired. Acking.\n");
  441. write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
  442. }
  443. }
  444. if (TIMER_FIRING(read_sysreg(cntp_ctl_el0)))
  445. generic_handle_domain_irq(aic_irqc->hw_domain,
  446. AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS));
  447. if (TIMER_FIRING(read_sysreg(cntv_ctl_el0)))
  448. generic_handle_domain_irq(aic_irqc->hw_domain,
  449. AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT));
  450. if (is_kernel_in_hyp_mode()) {
  451. uint64_t enabled = read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2);
  452. if ((enabled & VM_TMR_FIQ_ENABLE_P) &&
  453. TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02)))
  454. generic_handle_domain_irq(aic_irqc->hw_domain,
  455. AIC_FIQ_HWIRQ(AIC_TMR_EL02_PHYS));
  456. if ((enabled & VM_TMR_FIQ_ENABLE_V) &&
  457. TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02)))
  458. generic_handle_domain_irq(aic_irqc->hw_domain,
  459. AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT));
  460. }
  461. if (read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & PMCR0_IACT) {
  462. int irq;
  463. if (cpumask_test_cpu(smp_processor_id(),
  464. &aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff))
  465. irq = AIC_CPU_PMU_P;
  466. else
  467. irq = AIC_CPU_PMU_E;
  468. generic_handle_domain_irq(aic_irqc->hw_domain,
  469. AIC_FIQ_HWIRQ(irq));
  470. }
  471. if (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ &&
  472. (read_sysreg_s(SYS_IMP_APL_UPMSR_EL1) & UPMSR_IACT)) {
  473. /* Same story with uncore PMCs */
  474. pr_err_ratelimited("Uncore PMC FIQ fired. Masking.\n");
  475. sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
  476. FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
  477. }
  478. }
  479. static int aic_fiq_set_type(struct irq_data *d, unsigned int type)
  480. {
  481. return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL;
  482. }
  483. static struct irq_chip fiq_chip = {
  484. .name = "AIC-FIQ",
  485. .irq_mask = aic_fiq_mask,
  486. .irq_unmask = aic_fiq_unmask,
  487. .irq_ack = aic_fiq_set_mask,
  488. .irq_eoi = aic_fiq_eoi,
  489. .irq_set_type = aic_fiq_set_type,
  490. };
  491. /*
  492. * Main IRQ domain
  493. */
  494. static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq,
  495. irq_hw_number_t hw)
  496. {
  497. struct aic_irq_chip *ic = id->host_data;
  498. u32 type = FIELD_GET(AIC_EVENT_TYPE, hw);
  499. struct irq_chip *chip = &aic_chip;
  500. if (ic->info.version == 2)
  501. chip = &aic2_chip;
  502. if (type == AIC_EVENT_TYPE_IRQ) {
  503. irq_domain_set_info(id, irq, hw, chip, id->host_data,
  504. handle_fasteoi_irq, NULL, NULL);
  505. irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
  506. } else {
  507. int fiq = FIELD_GET(AIC_EVENT_NUM, hw);
  508. switch (fiq) {
  509. case AIC_CPU_PMU_P:
  510. case AIC_CPU_PMU_E:
  511. irq_set_percpu_devid_partition(irq, &ic->fiq_aff[fiq]->aff);
  512. break;
  513. default:
  514. irq_set_percpu_devid(irq);
  515. break;
  516. }
  517. irq_domain_set_info(id, irq, hw, &fiq_chip, id->host_data,
  518. handle_percpu_devid_irq, NULL, NULL);
  519. }
  520. return 0;
  521. }
  522. static int aic_irq_domain_translate(struct irq_domain *id,
  523. struct irq_fwspec *fwspec,
  524. unsigned long *hwirq,
  525. unsigned int *type)
  526. {
  527. struct aic_irq_chip *ic = id->host_data;
  528. u32 *args;
  529. u32 die = 0;
  530. if (fwspec->param_count < 3 || fwspec->param_count > 4 ||
  531. !is_of_node(fwspec->fwnode))
  532. return -EINVAL;
  533. args = &fwspec->param[1];
  534. if (fwspec->param_count == 4) {
  535. die = args[0];
  536. args++;
  537. }
  538. switch (fwspec->param[0]) {
  539. case AIC_IRQ:
  540. if (die >= ic->nr_die)
  541. return -EINVAL;
  542. if (args[0] >= ic->nr_irq)
  543. return -EINVAL;
  544. *hwirq = AIC_IRQ_HWIRQ(die, args[0]);
  545. break;
  546. case AIC_FIQ:
  547. if (die != 0)
  548. return -EINVAL;
  549. if (args[0] >= AIC_NR_FIQ)
  550. return -EINVAL;
  551. *hwirq = AIC_FIQ_HWIRQ(args[0]);
  552. /*
  553. * In EL1 the non-redirected registers are the guest's,
  554. * not EL2's, so remap the hwirqs to match.
  555. */
  556. if (!is_kernel_in_hyp_mode()) {
  557. switch (args[0]) {
  558. case AIC_TMR_GUEST_PHYS:
  559. *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS);
  560. break;
  561. case AIC_TMR_GUEST_VIRT:
  562. *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT);
  563. break;
  564. case AIC_TMR_HV_PHYS:
  565. case AIC_TMR_HV_VIRT:
  566. return -ENOENT;
  567. default:
  568. break;
  569. }
  570. }
  571. break;
  572. default:
  573. return -EINVAL;
  574. }
  575. *type = args[1] & IRQ_TYPE_SENSE_MASK;
  576. return 0;
  577. }
  578. static int aic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
  579. unsigned int nr_irqs, void *arg)
  580. {
  581. unsigned int type = IRQ_TYPE_NONE;
  582. struct irq_fwspec *fwspec = arg;
  583. irq_hw_number_t hwirq;
  584. int i, ret;
  585. ret = aic_irq_domain_translate(domain, fwspec, &hwirq, &type);
  586. if (ret)
  587. return ret;
  588. for (i = 0; i < nr_irqs; i++) {
  589. ret = aic_irq_domain_map(domain, virq + i, hwirq + i);
  590. if (ret)
  591. return ret;
  592. }
  593. return 0;
  594. }
  595. static void aic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
  596. unsigned int nr_irqs)
  597. {
  598. int i;
  599. for (i = 0; i < nr_irqs; i++) {
  600. struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
  601. irq_set_handler(virq + i, NULL);
  602. irq_domain_reset_irq_data(d);
  603. }
  604. }
  605. static const struct irq_domain_ops aic_irq_domain_ops = {
  606. .translate = aic_irq_domain_translate,
  607. .alloc = aic_irq_domain_alloc,
  608. .free = aic_irq_domain_free,
  609. };
  610. /*
  611. * IPI irqchip
  612. */
  613. static void aic_ipi_send_fast(int cpu)
  614. {
  615. u64 mpidr = cpu_logical_map(cpu);
  616. u64 my_mpidr = read_cpuid_mpidr();
  617. u64 cluster = MPIDR_CLUSTER(mpidr);
  618. u64 idx = MPIDR_CPU(mpidr);
  619. if (MPIDR_CLUSTER(my_mpidr) == cluster)
  620. write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx),
  621. SYS_IMP_APL_IPI_RR_LOCAL_EL1);
  622. else
  623. write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx) | FIELD_PREP(IPI_RR_CLUSTER, cluster),
  624. SYS_IMP_APL_IPI_RR_GLOBAL_EL1);
  625. isb();
  626. }
  627. static void aic_ipi_mask(struct irq_data *d)
  628. {
  629. u32 irq_bit = BIT(irqd_to_hwirq(d));
  630. /* No specific ordering requirements needed here. */
  631. atomic_andnot(irq_bit, this_cpu_ptr(&aic_vipi_enable));
  632. }
  633. static void aic_ipi_unmask(struct irq_data *d)
  634. {
  635. struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
  636. u32 irq_bit = BIT(irqd_to_hwirq(d));
  637. atomic_or(irq_bit, this_cpu_ptr(&aic_vipi_enable));
  638. /*
  639. * The atomic_or() above must complete before the atomic_read()
  640. * below to avoid racing aic_ipi_send_mask().
  641. */
  642. smp_mb__after_atomic();
  643. /*
  644. * If a pending vIPI was unmasked, raise a HW IPI to ourselves.
  645. * No barriers needed here since this is a self-IPI.
  646. */
  647. if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit) {
  648. if (static_branch_likely(&use_fast_ipi))
  649. aic_ipi_send_fast(smp_processor_id());
  650. else
  651. aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id()));
  652. }
  653. }
  654. static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
  655. {
  656. struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
  657. u32 irq_bit = BIT(irqd_to_hwirq(d));
  658. u32 send = 0;
  659. int cpu;
  660. unsigned long pending;
  661. for_each_cpu(cpu, mask) {
  662. /*
  663. * This sequence is the mirror of the one in aic_ipi_unmask();
  664. * see the comment there. Additionally, release semantics
  665. * ensure that the vIPI flag set is ordered after any shared
  666. * memory accesses that precede it. This therefore also pairs
  667. * with the atomic_fetch_andnot in aic_handle_ipi().
  668. */
  669. pending = atomic_fetch_or_release(irq_bit, per_cpu_ptr(&aic_vipi_flag, cpu));
  670. /*
  671. * The atomic_fetch_or_release() above must complete before the
  672. * atomic_read() below to avoid racing aic_ipi_unmask().
  673. */
  674. smp_mb__after_atomic();
  675. if (!(pending & irq_bit) &&
  676. (atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit)) {
  677. if (static_branch_likely(&use_fast_ipi))
  678. aic_ipi_send_fast(cpu);
  679. else
  680. send |= AIC_IPI_SEND_CPU(cpu);
  681. }
  682. }
  683. /*
  684. * The flag writes must complete before the physical IPI is issued
  685. * to another CPU. This is implied by the control dependency on
  686. * the result of atomic_read_acquire() above, which is itself
  687. * already ordered after the vIPI flag write.
  688. */
  689. if (send)
  690. aic_ic_write(ic, AIC_IPI_SEND, send);
  691. }
  692. static struct irq_chip ipi_chip = {
  693. .name = "AIC-IPI",
  694. .irq_mask = aic_ipi_mask,
  695. .irq_unmask = aic_ipi_unmask,
  696. .ipi_send_mask = aic_ipi_send_mask,
  697. };
  698. /*
  699. * IPI IRQ domain
  700. */
  701. static void aic_handle_ipi(struct pt_regs *regs)
  702. {
  703. int i;
  704. unsigned long enabled, firing;
  705. /*
  706. * Ack the IPI. We need to order this after the AIC event read, but
  707. * that is enforced by normal MMIO ordering guarantees.
  708. *
  709. * For the Fast IPI case, this needs to be ordered before the vIPI
  710. * handling below, so we need to isb();
  711. */
  712. if (static_branch_likely(&use_fast_ipi)) {
  713. write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
  714. isb();
  715. } else {
  716. aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
  717. }
  718. /*
  719. * The mask read does not need to be ordered. Only we can change
  720. * our own mask anyway, so no races are possible here, as long as
  721. * we are properly in the interrupt handler (which is covered by
  722. * the barrier that is part of the top-level AIC handler's readl()).
  723. */
  724. enabled = atomic_read(this_cpu_ptr(&aic_vipi_enable));
  725. /*
  726. * Clear the IPIs we are about to handle. This pairs with the
  727. * atomic_fetch_or_release() in aic_ipi_send_mask(), and needs to be
  728. * ordered after the aic_ic_write() above (to avoid dropping vIPIs) and
  729. * before IPI handling code (to avoid races handling vIPIs before they
  730. * are signaled). The former is taken care of by the release semantics
  731. * of the write portion, while the latter is taken care of by the
  732. * acquire semantics of the read portion.
  733. */
  734. firing = atomic_fetch_andnot(enabled, this_cpu_ptr(&aic_vipi_flag)) & enabled;
  735. for_each_set_bit(i, &firing, AIC_NR_SWIPI)
  736. generic_handle_domain_irq(aic_irqc->ipi_domain, i);
  737. /*
  738. * No ordering needed here; at worst this just changes the timing of
  739. * when the next IPI will be delivered.
  740. */
  741. if (!static_branch_likely(&use_fast_ipi))
  742. aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
  743. }
  744. static int aic_ipi_alloc(struct irq_domain *d, unsigned int virq,
  745. unsigned int nr_irqs, void *args)
  746. {
  747. int i;
  748. for (i = 0; i < nr_irqs; i++) {
  749. irq_set_percpu_devid(virq + i);
  750. irq_domain_set_info(d, virq + i, i, &ipi_chip, d->host_data,
  751. handle_percpu_devid_irq, NULL, NULL);
  752. }
  753. return 0;
  754. }
  755. static void aic_ipi_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs)
  756. {
  757. /* Not freeing IPIs */
  758. }
  759. static const struct irq_domain_ops aic_ipi_domain_ops = {
  760. .alloc = aic_ipi_alloc,
  761. .free = aic_ipi_free,
  762. };
  763. static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
  764. {
  765. struct irq_domain *ipi_domain;
  766. int base_ipi;
  767. ipi_domain = irq_domain_create_linear(irqc->hw_domain->fwnode, AIC_NR_SWIPI,
  768. &aic_ipi_domain_ops, irqc);
  769. if (WARN_ON(!ipi_domain))
  770. return -ENODEV;
  771. ipi_domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE;
  772. irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
  773. base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, AIC_NR_SWIPI,
  774. NUMA_NO_NODE, NULL, false, NULL);
  775. if (WARN_ON(!base_ipi)) {
  776. irq_domain_remove(ipi_domain);
  777. return -ENODEV;
  778. }
  779. set_smp_ipi_range(base_ipi, AIC_NR_SWIPI);
  780. irqc->ipi_domain = ipi_domain;
  781. return 0;
  782. }
  783. static int aic_init_cpu(unsigned int cpu)
  784. {
  785. /* Mask all hard-wired per-CPU IRQ/FIQ sources */
  786. /* Pending Fast IPI FIQs */
  787. write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
  788. /* Timer FIQs */
  789. sysreg_clear_set(cntp_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);
  790. sysreg_clear_set(cntv_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);
  791. /* EL2-only (VHE mode) IRQ sources */
  792. if (is_kernel_in_hyp_mode()) {
  793. /* Guest timers */
  794. sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2,
  795. VM_TMR_FIQ_ENABLE_V | VM_TMR_FIQ_ENABLE_P, 0);
  796. /* vGIC maintenance IRQ */
  797. sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);
  798. }
  799. /* PMC FIQ */
  800. sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1, PMCR0_IMODE | PMCR0_IACT,
  801. FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF));
  802. /* Uncore PMC FIQ */
  803. sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
  804. FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
  805. /* Commit all of the above */
  806. isb();
  807. if (aic_irqc->info.version == 1) {
  808. /*
  809. * Make sure the kernel's idea of logical CPU order is the same as AIC's
  810. * If we ever end up with a mismatch here, we will have to introduce
  811. * a mapping table similar to what other irqchip drivers do.
  812. */
  813. WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id());
  814. /*
  815. * Always keep IPIs unmasked at the hardware level (except auto-masking
  816. * by AIC during processing). We manage masks at the vIPI level.
  817. * These registers only exist on AICv1, AICv2 always uses fast IPIs.
  818. */
  819. aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER);
  820. if (static_branch_likely(&use_fast_ipi)) {
  821. aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER);
  822. } else {
  823. aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF);
  824. aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
  825. }
  826. }
  827. /* Initialize the local mask state */
  828. __this_cpu_write(aic_fiq_unmasked, 0);
  829. return 0;
  830. }
  831. static struct gic_kvm_info vgic_info __initdata = {
  832. .type = GIC_V3,
  833. .no_maint_irq_mask = true,
  834. .no_hw_deactivation = true,
  835. };
  836. static void build_fiq_affinity(struct aic_irq_chip *ic, struct device_node *aff)
  837. {
  838. int i, n;
  839. u32 fiq;
  840. if (of_property_read_u32(aff, "apple,fiq-index", &fiq) ||
  841. WARN_ON(fiq >= AIC_NR_FIQ) || ic->fiq_aff[fiq])
  842. return;
  843. n = of_property_count_elems_of_size(aff, "cpus", sizeof(u32));
  844. if (WARN_ON(n < 0))
  845. return;
  846. ic->fiq_aff[fiq] = kzalloc(sizeof(*ic->fiq_aff[fiq]), GFP_KERNEL);
  847. if (!ic->fiq_aff[fiq])
  848. return;
  849. for (i = 0; i < n; i++) {
  850. struct device_node *cpu_node;
  851. u32 cpu_phandle;
  852. int cpu;
  853. if (of_property_read_u32_index(aff, "cpus", i, &cpu_phandle))
  854. continue;
  855. cpu_node = of_find_node_by_phandle(cpu_phandle);
  856. if (WARN_ON(!cpu_node))
  857. continue;
  858. cpu = of_cpu_node_to_id(cpu_node);
  859. of_node_put(cpu_node);
  860. if (WARN_ON(cpu < 0))
  861. continue;
  862. cpumask_set_cpu(cpu, &ic->fiq_aff[fiq]->aff);
  863. }
  864. }
  865. static int __init aic_of_ic_init(struct device_node *node, struct device_node *parent)
  866. {
  867. int i, die;
  868. u32 off, start_off;
  869. void __iomem *regs;
  870. struct aic_irq_chip *irqc;
  871. struct device_node *affs;
  872. const struct of_device_id *match;
  873. regs = of_iomap(node, 0);
  874. if (WARN_ON(!regs))
  875. return -EIO;
  876. irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
  877. if (!irqc) {
  878. iounmap(regs);
  879. return -ENOMEM;
  880. }
  881. irqc->base = regs;
  882. match = of_match_node(aic_info_match, node);
  883. if (!match)
  884. goto err_unmap;
  885. irqc->info = *(struct aic_info *)match->data;
  886. aic_irqc = irqc;
  887. switch (irqc->info.version) {
  888. case 1: {
  889. u32 info;
  890. info = aic_ic_read(irqc, AIC_INFO);
  891. irqc->nr_irq = FIELD_GET(AIC_INFO_NR_IRQ, info);
  892. irqc->max_irq = AIC_MAX_IRQ;
  893. irqc->nr_die = irqc->max_die = 1;
  894. off = start_off = irqc->info.target_cpu;
  895. off += sizeof(u32) * irqc->max_irq; /* TARGET_CPU */
  896. irqc->event = irqc->base;
  897. break;
  898. }
  899. case 2: {
  900. u32 info1, info3;
  901. info1 = aic_ic_read(irqc, AIC2_INFO1);
  902. info3 = aic_ic_read(irqc, AIC2_INFO3);
  903. irqc->nr_irq = FIELD_GET(AIC2_INFO1_NR_IRQ, info1);
  904. irqc->max_irq = FIELD_GET(AIC2_INFO3_MAX_IRQ, info3);
  905. irqc->nr_die = FIELD_GET(AIC2_INFO1_LAST_DIE, info1) + 1;
  906. irqc->max_die = FIELD_GET(AIC2_INFO3_MAX_DIE, info3);
  907. off = start_off = irqc->info.irq_cfg;
  908. off += sizeof(u32) * irqc->max_irq; /* IRQ_CFG */
  909. irqc->event = of_iomap(node, 1);
  910. if (WARN_ON(!irqc->event))
  911. goto err_unmap;
  912. break;
  913. }
  914. }
  915. irqc->info.sw_set = off;
  916. off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_SET */
  917. irqc->info.sw_clr = off;
  918. off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_CLR */
  919. irqc->info.mask_set = off;
  920. off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_SET */
  921. irqc->info.mask_clr = off;
  922. off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_CLR */
  923. off += sizeof(u32) * (irqc->max_irq >> 5); /* HW_STATE */
  924. if (irqc->info.fast_ipi)
  925. static_branch_enable(&use_fast_ipi);
  926. else
  927. static_branch_disable(&use_fast_ipi);
  928. irqc->info.die_stride = off - start_off;
  929. irqc->hw_domain = irq_domain_create_tree(of_node_to_fwnode(node),
  930. &aic_irq_domain_ops, irqc);
  931. if (WARN_ON(!irqc->hw_domain))
  932. goto err_unmap;
  933. irq_domain_update_bus_token(irqc->hw_domain, DOMAIN_BUS_WIRED);
  934. if (aic_init_smp(irqc, node))
  935. goto err_remove_domain;
  936. affs = of_get_child_by_name(node, "affinities");
  937. if (affs) {
  938. struct device_node *chld;
  939. for_each_child_of_node(affs, chld)
  940. build_fiq_affinity(irqc, chld);
  941. }
  942. of_node_put(affs);
  943. set_handle_irq(aic_handle_irq);
  944. set_handle_fiq(aic_handle_fiq);
  945. off = 0;
  946. for (die = 0; die < irqc->nr_die; die++) {
  947. for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
  948. aic_ic_write(irqc, irqc->info.mask_set + off + i * 4, U32_MAX);
  949. for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
  950. aic_ic_write(irqc, irqc->info.sw_clr + off + i * 4, U32_MAX);
  951. if (irqc->info.target_cpu)
  952. for (i = 0; i < irqc->nr_irq; i++)
  953. aic_ic_write(irqc, irqc->info.target_cpu + off + i * 4, 1);
  954. off += irqc->info.die_stride;
  955. }
  956. if (irqc->info.version == 2) {
  957. u32 config = aic_ic_read(irqc, AIC2_CONFIG);
  958. config |= AIC2_CONFIG_ENABLE;
  959. aic_ic_write(irqc, AIC2_CONFIG, config);
  960. }
  961. if (!is_kernel_in_hyp_mode())
  962. pr_info("Kernel running in EL1, mapping interrupts");
  963. if (static_branch_likely(&use_fast_ipi))
  964. pr_info("Using Fast IPIs");
  965. cpuhp_setup_state(CPUHP_AP_IRQ_APPLE_AIC_STARTING,
  966. "irqchip/apple-aic/ipi:starting",
  967. aic_init_cpu, NULL);
  968. vgic_set_kvm_info(&vgic_info);
  969. pr_info("Initialized with %d/%d IRQs * %d/%d die(s), %d FIQs, %d vIPIs",
  970. irqc->nr_irq, irqc->max_irq, irqc->nr_die, irqc->max_die, AIC_NR_FIQ, AIC_NR_SWIPI);
  971. return 0;
  972. err_remove_domain:
  973. irq_domain_remove(irqc->hw_domain);
  974. err_unmap:
  975. if (irqc->event && irqc->event != irqc->base)
  976. iounmap(irqc->event);
  977. iounmap(irqc->base);
  978. kfree(irqc);
  979. return -ENODEV;
  980. }
  981. IRQCHIP_DECLARE(apple_aic, "apple,aic", aic_of_ic_init);
  982. IRQCHIP_DECLARE(apple_aic2, "apple,aic2", aic_of_ic_init);