irq-gic-v3.c 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
  4. * Author: Marc Zyngier <[email protected]>
  5. */
  6. #define pr_fmt(fmt) "GICv3: " fmt
  7. #include <linux/acpi.h>
  8. #include <linux/cpu.h>
  9. #include <linux/cpu_pm.h>
  10. #include <linux/delay.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/irqdomain.h>
  13. #include <linux/of.h>
  14. #include <linux/of_address.h>
  15. #include <linux/of_irq.h>
  16. #include <linux/percpu.h>
  17. #include <linux/refcount.h>
  18. #include <linux/slab.h>
  19. #include <linux/syscore_ops.h>
  20. #include <trace/hooks/gic_v3.h>
  21. #include <trace/hooks/gic.h>
  22. #include <linux/irqchip.h>
  23. #include <linux/irqchip/arm-gic-common.h>
  24. #include <linux/irqchip/arm-gic-v3.h>
  25. #include <linux/irqchip/irq-partition-percpu.h>
  26. #include <asm/cputype.h>
  27. #include <asm/exception.h>
  28. #include <asm/smp_plat.h>
  29. #include <asm/virt.h>
  30. #include "irq-gic-common.h"
  31. #define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80)
  32. #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
  33. #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
  34. #define FLAGS_WORKAROUND_MTK_GICR_SAVE (1ULL << 2)
  35. #define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 3)
  36. #define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
  37. struct redist_region {
  38. void __iomem *redist_base;
  39. phys_addr_t phys_base;
  40. bool single_redist;
  41. };
  42. static struct gic_chip_data_v3 gic_data __read_mostly;
  43. static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
  44. static DEFINE_STATIC_KEY_FALSE(gic_arm64_2941627_erratum);
  45. #define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
  46. #define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
  47. #define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
  48. /*
  49. * The behaviours of RPR and PMR registers differ depending on the value of
  50. * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
  51. * distributor and redistributors depends on whether security is enabled in the
  52. * GIC.
  53. *
  54. * When security is enabled, non-secure priority values from the (re)distributor
  55. * are presented to the GIC CPUIF as follow:
  56. * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
  57. *
  58. * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure
  59. * EL1 are subject to a similar operation thus matching the priorities presented
  60. * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0,
  61. * these values are unchanged by the GIC.
  62. *
  63. * see GICv3/GICv4 Architecture Specification (IHI0069D):
  64. * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
  65. * priorities.
  66. * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
  67. * interrupt.
  68. */
  69. static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
  70. /*
  71. * Global static key controlling whether an update to PMR allowing more
  72. * interrupts requires to be propagated to the redistributor (DSB SY).
  73. * And this needs to be exported for modules to be able to enable
  74. * interrupts...
  75. */
  76. DEFINE_STATIC_KEY_FALSE(gic_pmr_sync);
  77. EXPORT_SYMBOL(gic_pmr_sync);
  78. DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
  79. EXPORT_SYMBOL(gic_nonsecure_priorities);
  80. /*
  81. * When the Non-secure world has access to group 0 interrupts (as a
  82. * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will
  83. * return the Distributor's view of the interrupt priority.
  84. *
  85. * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority
  86. * written by software is moved to the Non-secure range by the Distributor.
  87. *
  88. * If both are true (which is when gic_nonsecure_priorities gets enabled),
  89. * we need to shift down the priority programmed by software to match it
  90. * against the value returned by ICC_RPR_EL1.
  91. */
  92. #define GICD_INT_RPR_PRI(priority) \
  93. ({ \
  94. u32 __priority = (priority); \
  95. if (static_branch_unlikely(&gic_nonsecure_priorities)) \
  96. __priority = 0x80 | (__priority >> 1); \
  97. \
  98. __priority; \
  99. })
  100. /* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
  101. static refcount_t *ppi_nmi_refs;
  102. static struct gic_kvm_info gic_v3_kvm_info __initdata;
  103. static DEFINE_PER_CPU(bool, has_rss);
  104. #define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4)
  105. #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
  106. #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
  107. #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
  108. /* Our default, arbitrary priority value. Linux only uses one anyway. */
  109. #define DEFAULT_PMR_VALUE 0xf0
  110. enum gic_intid_range {
  111. SGI_RANGE,
  112. PPI_RANGE,
  113. SPI_RANGE,
  114. EPPI_RANGE,
  115. ESPI_RANGE,
  116. LPI_RANGE,
  117. __INVALID_RANGE__
  118. };
  119. static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
  120. {
  121. switch (hwirq) {
  122. case 0 ... 15:
  123. return SGI_RANGE;
  124. case 16 ... 31:
  125. return PPI_RANGE;
  126. case 32 ... 1019:
  127. return SPI_RANGE;
  128. case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63):
  129. return EPPI_RANGE;
  130. case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023):
  131. return ESPI_RANGE;
  132. case 8192 ... GENMASK(23, 0):
  133. return LPI_RANGE;
  134. default:
  135. return __INVALID_RANGE__;
  136. }
  137. }
  138. static enum gic_intid_range get_intid_range(struct irq_data *d)
  139. {
  140. return __get_intid_range(d->hwirq);
  141. }
  142. static inline unsigned int gic_irq(struct irq_data *d)
  143. {
  144. return d->hwirq;
  145. }
  146. static inline bool gic_irq_in_rdist(struct irq_data *d)
  147. {
  148. switch (get_intid_range(d)) {
  149. case SGI_RANGE:
  150. case PPI_RANGE:
  151. case EPPI_RANGE:
  152. return true;
  153. default:
  154. return false;
  155. }
  156. }
  157. static inline void __iomem *gic_dist_base(struct irq_data *d)
  158. {
  159. switch (get_intid_range(d)) {
  160. case SGI_RANGE:
  161. case PPI_RANGE:
  162. case EPPI_RANGE:
  163. /* SGI+PPI -> SGI_base for this CPU */
  164. return gic_data_rdist_sgi_base();
  165. case SPI_RANGE:
  166. case ESPI_RANGE:
  167. /* SPI -> dist_base */
  168. return gic_data.dist_base;
  169. default:
  170. return NULL;
  171. }
  172. }
  173. static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
  174. {
  175. u32 count = 1000000; /* 1s! */
  176. while (readl_relaxed(base + GICD_CTLR) & bit) {
  177. count--;
  178. if (!count) {
  179. pr_err_ratelimited("RWP timeout, gone fishing\n");
  180. return;
  181. }
  182. cpu_relax();
  183. udelay(1);
  184. }
  185. }
  186. /* Wait for completion of a distributor change */
  187. void gic_v3_dist_wait_for_rwp(void)
  188. {
  189. gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
  190. }
  191. EXPORT_SYMBOL_GPL(gic_v3_dist_wait_for_rwp);
  192. /* Wait for completion of a redistributor change */
  193. static void gic_redist_wait_for_rwp(void)
  194. {
  195. gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
  196. }
  197. #ifdef CONFIG_ARM64
  198. static u64 __maybe_unused gic_read_iar(void)
  199. {
  200. if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154))
  201. return gic_read_iar_cavium_thunderx();
  202. else
  203. return gic_read_iar_common();
  204. }
  205. #endif
  206. static void gic_enable_redist(bool enable)
  207. {
  208. void __iomem *rbase;
  209. u32 count = 1000000; /* 1s! */
  210. u32 val;
  211. if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
  212. return;
  213. rbase = gic_data_rdist_rd_base();
  214. val = readl_relaxed(rbase + GICR_WAKER);
  215. if (enable)
  216. /* Wake up this CPU redistributor */
  217. val &= ~GICR_WAKER_ProcessorSleep;
  218. else
  219. val |= GICR_WAKER_ProcessorSleep;
  220. writel_relaxed(val, rbase + GICR_WAKER);
  221. if (!enable) { /* Check that GICR_WAKER is writeable */
  222. val = readl_relaxed(rbase + GICR_WAKER);
  223. if (!(val & GICR_WAKER_ProcessorSleep))
  224. return; /* No PM support in this redistributor */
  225. }
  226. while (--count) {
  227. val = readl_relaxed(rbase + GICR_WAKER);
  228. if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep))
  229. break;
  230. cpu_relax();
  231. udelay(1);
  232. }
  233. if (!count)
  234. pr_err_ratelimited("redistributor failed to %s...\n",
  235. enable ? "wakeup" : "sleep");
  236. }
  237. /*
  238. * Routines to disable, enable, EOI and route interrupts
  239. */
  240. static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
  241. {
  242. switch (get_intid_range(d)) {
  243. case SGI_RANGE:
  244. case PPI_RANGE:
  245. case SPI_RANGE:
  246. *index = d->hwirq;
  247. return offset;
  248. case EPPI_RANGE:
  249. /*
  250. * Contrary to the ESPI range, the EPPI range is contiguous
  251. * to the PPI range in the registers, so let's adjust the
  252. * displacement accordingly. Consistency is overrated.
  253. */
  254. *index = d->hwirq - EPPI_BASE_INTID + 32;
  255. return offset;
  256. case ESPI_RANGE:
  257. *index = d->hwirq - ESPI_BASE_INTID;
  258. switch (offset) {
  259. case GICD_ISENABLER:
  260. return GICD_ISENABLERnE;
  261. case GICD_ICENABLER:
  262. return GICD_ICENABLERnE;
  263. case GICD_ISPENDR:
  264. return GICD_ISPENDRnE;
  265. case GICD_ICPENDR:
  266. return GICD_ICPENDRnE;
  267. case GICD_ISACTIVER:
  268. return GICD_ISACTIVERnE;
  269. case GICD_ICACTIVER:
  270. return GICD_ICACTIVERnE;
  271. case GICD_IPRIORITYR:
  272. return GICD_IPRIORITYRnE;
  273. case GICD_ICFGR:
  274. return GICD_ICFGRnE;
  275. case GICD_IROUTER:
  276. return GICD_IROUTERnE;
  277. default:
  278. break;
  279. }
  280. break;
  281. default:
  282. break;
  283. }
  284. WARN_ON(1);
  285. *index = d->hwirq;
  286. return offset;
  287. }
  288. static int gic_peek_irq(struct irq_data *d, u32 offset)
  289. {
  290. void __iomem *base;
  291. u32 index, mask;
  292. offset = convert_offset_index(d, offset, &index);
  293. mask = 1 << (index % 32);
  294. if (gic_irq_in_rdist(d))
  295. base = gic_data_rdist_sgi_base();
  296. else
  297. base = gic_data.dist_base;
  298. return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
  299. }
  300. static void gic_poke_irq(struct irq_data *d, u32 offset)
  301. {
  302. void __iomem *base;
  303. u32 index, mask;
  304. offset = convert_offset_index(d, offset, &index);
  305. mask = 1 << (index % 32);
  306. if (gic_irq_in_rdist(d))
  307. base = gic_data_rdist_sgi_base();
  308. else
  309. base = gic_data.dist_base;
  310. writel_relaxed(mask, base + offset + (index / 32) * 4);
  311. }
  312. static void gic_mask_irq(struct irq_data *d)
  313. {
  314. gic_poke_irq(d, GICD_ICENABLER);
  315. if (gic_irq_in_rdist(d))
  316. gic_redist_wait_for_rwp();
  317. else
  318. gic_v3_dist_wait_for_rwp();
  319. }
  320. static void gic_eoimode1_mask_irq(struct irq_data *d)
  321. {
  322. gic_mask_irq(d);
  323. /*
  324. * When masking a forwarded interrupt, make sure it is
  325. * deactivated as well.
  326. *
  327. * This ensures that an interrupt that is getting
  328. * disabled/masked will not get "stuck", because there is
  329. * noone to deactivate it (guest is being terminated).
  330. */
  331. if (irqd_is_forwarded_to_vcpu(d))
  332. gic_poke_irq(d, GICD_ICACTIVER);
  333. }
  334. static void gic_unmask_irq(struct irq_data *d)
  335. {
  336. gic_poke_irq(d, GICD_ISENABLER);
  337. }
  338. static inline bool gic_supports_nmi(void)
  339. {
  340. return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
  341. static_branch_likely(&supports_pseudo_nmis);
  342. }
  343. static int gic_irq_set_irqchip_state(struct irq_data *d,
  344. enum irqchip_irq_state which, bool val)
  345. {
  346. u32 reg;
  347. if (d->hwirq >= 8192) /* SGI/PPI/SPI only */
  348. return -EINVAL;
  349. switch (which) {
  350. case IRQCHIP_STATE_PENDING:
  351. reg = val ? GICD_ISPENDR : GICD_ICPENDR;
  352. break;
  353. case IRQCHIP_STATE_ACTIVE:
  354. reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
  355. break;
  356. case IRQCHIP_STATE_MASKED:
  357. if (val) {
  358. gic_mask_irq(d);
  359. return 0;
  360. }
  361. reg = GICD_ISENABLER;
  362. break;
  363. default:
  364. return -EINVAL;
  365. }
  366. gic_poke_irq(d, reg);
  367. return 0;
  368. }
  369. static int gic_irq_get_irqchip_state(struct irq_data *d,
  370. enum irqchip_irq_state which, bool *val)
  371. {
  372. if (d->hwirq >= 8192) /* PPI/SPI only */
  373. return -EINVAL;
  374. switch (which) {
  375. case IRQCHIP_STATE_PENDING:
  376. *val = gic_peek_irq(d, GICD_ISPENDR);
  377. break;
  378. case IRQCHIP_STATE_ACTIVE:
  379. *val = gic_peek_irq(d, GICD_ISACTIVER);
  380. break;
  381. case IRQCHIP_STATE_MASKED:
  382. *val = !gic_peek_irq(d, GICD_ISENABLER);
  383. break;
  384. default:
  385. return -EINVAL;
  386. }
  387. return 0;
  388. }
  389. static void gic_irq_set_prio(struct irq_data *d, u8 prio)
  390. {
  391. void __iomem *base = gic_dist_base(d);
  392. u32 offset, index;
  393. offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
  394. writeb_relaxed(prio, base + offset + index);
  395. }
  396. static u32 __gic_get_ppi_index(irq_hw_number_t hwirq)
  397. {
  398. switch (__get_intid_range(hwirq)) {
  399. case PPI_RANGE:
  400. return hwirq - 16;
  401. case EPPI_RANGE:
  402. return hwirq - EPPI_BASE_INTID + 16;
  403. default:
  404. unreachable();
  405. }
  406. }
  407. static u32 gic_get_ppi_index(struct irq_data *d)
  408. {
  409. return __gic_get_ppi_index(d->hwirq);
  410. }
  411. static int gic_irq_nmi_setup(struct irq_data *d)
  412. {
  413. struct irq_desc *desc = irq_to_desc(d->irq);
  414. if (!gic_supports_nmi())
  415. return -EINVAL;
  416. if (gic_peek_irq(d, GICD_ISENABLER)) {
  417. pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
  418. return -EINVAL;
  419. }
  420. /*
  421. * A secondary irq_chip should be in charge of LPI request,
  422. * it should not be possible to get there
  423. */
  424. if (WARN_ON(gic_irq(d) >= 8192))
  425. return -EINVAL;
  426. /* desc lock should already be held */
  427. if (gic_irq_in_rdist(d)) {
  428. u32 idx = gic_get_ppi_index(d);
  429. /* Setting up PPI as NMI, only switch handler for first NMI */
  430. if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) {
  431. refcount_set(&ppi_nmi_refs[idx], 1);
  432. desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
  433. }
  434. } else {
  435. desc->handle_irq = handle_fasteoi_nmi;
  436. }
  437. gic_irq_set_prio(d, GICD_INT_NMI_PRI);
  438. return 0;
  439. }
  440. static void gic_irq_nmi_teardown(struct irq_data *d)
  441. {
  442. struct irq_desc *desc = irq_to_desc(d->irq);
  443. if (WARN_ON(!gic_supports_nmi()))
  444. return;
  445. if (gic_peek_irq(d, GICD_ISENABLER)) {
  446. pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
  447. return;
  448. }
  449. /*
  450. * A secondary irq_chip should be in charge of LPI request,
  451. * it should not be possible to get there
  452. */
  453. if (WARN_ON(gic_irq(d) >= 8192))
  454. return;
  455. /* desc lock should already be held */
  456. if (gic_irq_in_rdist(d)) {
  457. u32 idx = gic_get_ppi_index(d);
  458. /* Tearing down NMI, only switch handler for last NMI */
  459. if (refcount_dec_and_test(&ppi_nmi_refs[idx]))
  460. desc->handle_irq = handle_percpu_devid_irq;
  461. } else {
  462. desc->handle_irq = handle_fasteoi_irq;
  463. }
  464. gic_irq_set_prio(d, GICD_INT_DEF_PRI);
  465. }
  466. static bool gic_arm64_erratum_2941627_needed(struct irq_data *d)
  467. {
  468. enum gic_intid_range range;
  469. if (!static_branch_unlikely(&gic_arm64_2941627_erratum))
  470. return false;
  471. range = get_intid_range(d);
  472. /*
  473. * The workaround is needed if the IRQ is an SPI and
  474. * the target cpu is different from the one we are
  475. * executing on.
  476. */
  477. return (range == SPI_RANGE || range == ESPI_RANGE) &&
  478. !cpumask_test_cpu(raw_smp_processor_id(),
  479. irq_data_get_effective_affinity_mask(d));
  480. }
  481. static void gic_eoi_irq(struct irq_data *d)
  482. {
  483. write_gicreg(gic_irq(d), ICC_EOIR1_EL1);
  484. isb();
  485. if (gic_arm64_erratum_2941627_needed(d)) {
  486. /*
  487. * Make sure the GIC stream deactivate packet
  488. * issued by ICC_EOIR1_EL1 has completed before
  489. * deactivating through GICD_IACTIVER.
  490. */
  491. dsb(sy);
  492. gic_poke_irq(d, GICD_ICACTIVER);
  493. }
  494. }
  495. static void gic_eoimode1_eoi_irq(struct irq_data *d)
  496. {
  497. /*
  498. * No need to deactivate an LPI, or an interrupt that
  499. * is is getting forwarded to a vcpu.
  500. */
  501. if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
  502. return;
  503. if (!gic_arm64_erratum_2941627_needed(d))
  504. gic_write_dir(gic_irq(d));
  505. else
  506. gic_poke_irq(d, GICD_ICACTIVER);
  507. }
  508. static int gic_set_type(struct irq_data *d, unsigned int type)
  509. {
  510. enum gic_intid_range range;
  511. unsigned int irq = gic_irq(d);
  512. void __iomem *base;
  513. u32 offset, index;
  514. int ret;
  515. range = get_intid_range(d);
  516. /* Interrupt configuration for SGIs can't be changed */
  517. if (range == SGI_RANGE)
  518. return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
  519. /* SPIs have restrictions on the supported types */
  520. if ((range == SPI_RANGE || range == ESPI_RANGE) &&
  521. type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
  522. return -EINVAL;
  523. if (gic_irq_in_rdist(d))
  524. base = gic_data_rdist_sgi_base();
  525. else
  526. base = gic_data.dist_base;
  527. offset = convert_offset_index(d, GICD_ICFGR, &index);
  528. ret = gic_configure_irq(index, type, base + offset, NULL);
  529. if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
  530. /* Misconfigured PPIs are usually not fatal */
  531. pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq);
  532. ret = 0;
  533. }
  534. return ret;
  535. }
  536. static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
  537. {
  538. if (get_intid_range(d) == SGI_RANGE)
  539. return -EINVAL;
  540. if (vcpu)
  541. irqd_set_forwarded_to_vcpu(d);
  542. else
  543. irqd_clr_forwarded_to_vcpu(d);
  544. return 0;
  545. }
  546. static u64 gic_cpu_to_affinity(int cpu)
  547. {
  548. u64 mpidr = cpu_logical_map(cpu);
  549. u64 aff;
  550. /* ASR8601 needs to have its affinities shifted down... */
  551. if (unlikely(gic_data.flags & FLAGS_WORKAROUND_ASR_ERRATUM_8601001))
  552. mpidr = (MPIDR_AFFINITY_LEVEL(mpidr, 1) |
  553. (MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8));
  554. aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
  555. MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
  556. MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
  557. MPIDR_AFFINITY_LEVEL(mpidr, 0));
  558. return aff;
  559. }
  560. static void gic_deactivate_unhandled(u32 irqnr)
  561. {
  562. if (static_branch_likely(&supports_deactivate_key)) {
  563. if (irqnr < 8192)
  564. gic_write_dir(irqnr);
  565. } else {
  566. write_gicreg(irqnr, ICC_EOIR1_EL1);
  567. isb();
  568. }
  569. }
  570. /*
  571. * Follow a read of the IAR with any HW maintenance that needs to happen prior
  572. * to invoking the relevant IRQ handler. We must do two things:
  573. *
  574. * (1) Ensure instruction ordering between a read of IAR and subsequent
  575. * instructions in the IRQ handler using an ISB.
  576. *
  577. * It is possible for the IAR to report an IRQ which was signalled *after*
  578. * the CPU took an IRQ exception as multiple interrupts can race to be
  579. * recognized by the GIC, earlier interrupts could be withdrawn, and/or
  580. * later interrupts could be prioritized by the GIC.
  581. *
  582. * For devices which are tightly coupled to the CPU, such as PMUs, a
  583. * context synchronization event is necessary to ensure that system
  584. * register state is not stale, as these may have been indirectly written
  585. * *after* exception entry.
  586. *
  587. * (2) Deactivate the interrupt when EOI mode 1 is in use.
  588. */
  589. static inline void gic_complete_ack(u32 irqnr)
  590. {
  591. if (static_branch_likely(&supports_deactivate_key))
  592. write_gicreg(irqnr, ICC_EOIR1_EL1);
  593. isb();
  594. }
  595. static bool gic_rpr_is_nmi_prio(void)
  596. {
  597. if (!gic_supports_nmi())
  598. return false;
  599. return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI));
  600. }
  601. static bool gic_irqnr_is_special(u32 irqnr)
  602. {
  603. return irqnr >= 1020 && irqnr <= 1023;
  604. }
  605. static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs)
  606. {
  607. if (gic_irqnr_is_special(irqnr))
  608. return;
  609. gic_complete_ack(irqnr);
  610. if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
  611. WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr);
  612. gic_deactivate_unhandled(irqnr);
  613. }
  614. }
  615. static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
  616. {
  617. if (gic_irqnr_is_special(irqnr))
  618. return;
  619. gic_complete_ack(irqnr);
  620. if (generic_handle_domain_nmi(gic_data.domain, irqnr)) {
  621. WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr);
  622. gic_deactivate_unhandled(irqnr);
  623. }
  624. }
  625. /*
  626. * An exception has been taken from a context with IRQs enabled, and this could
  627. * be an IRQ or an NMI.
  628. *
  629. * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear
  630. * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning,
  631. * after handling any NMI but before handling any IRQ.
  632. *
  633. * The entry code has performed IRQ entry, and if an NMI is detected we must
  634. * perform NMI entry/exit around invoking the handler.
  635. */
  636. static void __gic_handle_irq_from_irqson(struct pt_regs *regs)
  637. {
  638. bool is_nmi;
  639. u32 irqnr;
  640. irqnr = gic_read_iar();
  641. is_nmi = gic_rpr_is_nmi_prio();
  642. if (is_nmi) {
  643. nmi_enter();
  644. __gic_handle_nmi(irqnr, regs);
  645. nmi_exit();
  646. }
  647. if (gic_prio_masking_enabled()) {
  648. gic_pmr_mask_irqs();
  649. gic_arch_enable_irqs();
  650. }
  651. if (!is_nmi)
  652. __gic_handle_irq(irqnr, regs);
  653. }
  654. /*
  655. * An exception has been taken from a context with IRQs disabled, which can only
  656. * be an NMI.
  657. *
  658. * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave
  659. * DAIF.IF (and ICC_PMR_EL1) unchanged.
  660. *
  661. * The entry code has performed NMI entry.
  662. */
  663. static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs)
  664. {
  665. u64 pmr;
  666. u32 irqnr;
  667. /*
  668. * We were in a context with IRQs disabled. However, the
  669. * entry code has set PMR to a value that allows any
  670. * interrupt to be acknowledged, and not just NMIs. This can
  671. * lead to surprising effects if the NMI has been retired in
  672. * the meantime, and that there is an IRQ pending. The IRQ
  673. * would then be taken in NMI context, something that nobody
  674. * wants to debug twice.
  675. *
  676. * Until we sort this, drop PMR again to a level that will
  677. * actually only allow NMIs before reading IAR, and then
  678. * restore it to what it was.
  679. */
  680. pmr = gic_read_pmr();
  681. gic_pmr_mask_irqs();
  682. isb();
  683. irqnr = gic_read_iar();
  684. gic_write_pmr(pmr);
  685. __gic_handle_nmi(irqnr, regs);
  686. }
  687. static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
  688. {
  689. if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs)))
  690. __gic_handle_irq_from_irqsoff(regs);
  691. else
  692. __gic_handle_irq_from_irqson(regs);
  693. }
  694. static u32 gic_get_pribits(void)
  695. {
  696. u32 pribits;
  697. pribits = gic_read_ctlr();
  698. pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
  699. pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
  700. pribits++;
  701. return pribits;
  702. }
  703. static bool gic_has_group0(void)
  704. {
  705. u32 val;
  706. u32 old_pmr;
  707. old_pmr = gic_read_pmr();
  708. /*
  709. * Let's find out if Group0 is under control of EL3 or not by
  710. * setting the highest possible, non-zero priority in PMR.
  711. *
  712. * If SCR_EL3.FIQ is set, the priority gets shifted down in
  713. * order for the CPU interface to set bit 7, and keep the
  714. * actual priority in the non-secure range. In the process, it
  715. * looses the least significant bit and the actual priority
  716. * becomes 0x80. Reading it back returns 0, indicating that
  717. * we're don't have access to Group0.
  718. */
  719. gic_write_pmr(BIT(8 - gic_get_pribits()));
  720. val = gic_read_pmr();
  721. gic_write_pmr(old_pmr);
  722. return val != 0;
  723. }
  724. void gic_v3_dist_init(void)
  725. {
  726. unsigned int i;
  727. u64 affinity;
  728. void __iomem *base = gic_data.dist_base;
  729. u32 val;
  730. /* Disable the distributor */
  731. writel_relaxed(0, base + GICD_CTLR);
  732. gic_v3_dist_wait_for_rwp();
  733. /*
  734. * Configure SPIs as non-secure Group-1. This will only matter
  735. * if the GIC only has a single security state. This will not
  736. * do the right thing if the kernel is running in secure mode,
  737. * but that's not the intended use case anyway.
  738. */
  739. for (i = 32; i < GIC_LINE_NR; i += 32)
  740. writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
  741. /* Extended SPI range, not handled by the GICv2/GICv3 common code */
  742. for (i = 0; i < GIC_ESPI_NR; i += 32) {
  743. writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
  744. writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
  745. }
  746. for (i = 0; i < GIC_ESPI_NR; i += 32)
  747. writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
  748. for (i = 0; i < GIC_ESPI_NR; i += 16)
  749. writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
  750. for (i = 0; i < GIC_ESPI_NR; i += 4)
  751. writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i);
  752. /* Now do the common stuff */
  753. gic_dist_config(base, GIC_LINE_NR, NULL);
  754. val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
  755. if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
  756. pr_info("Enabling SGIs without active state\n");
  757. val |= GICD_CTLR_nASSGIreq;
  758. }
  759. /* Enable distributor with ARE, Group1, and wait for it to drain */
  760. writel_relaxed(val, base + GICD_CTLR);
  761. gic_v3_dist_wait_for_rwp();
  762. /*
  763. * Set all global interrupts to the boot CPU only. ARE must be
  764. * enabled.
  765. */
  766. affinity = gic_cpu_to_affinity(smp_processor_id());
  767. for (i = 32; i < GIC_LINE_NR; i++) {
  768. trace_android_vh_gic_v3_affinity_init(i, GICD_IROUTER, &affinity);
  769. gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
  770. }
  771. for (i = 0; i < GIC_ESPI_NR; i++) {
  772. trace_android_vh_gic_v3_affinity_init(i, GICD_IROUTERnE, &affinity);
  773. gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
  774. }
  775. }
  776. EXPORT_SYMBOL_GPL(gic_v3_dist_init);
  777. static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
  778. {
  779. int ret = -ENODEV;
  780. int i;
  781. for (i = 0; i < gic_data.nr_redist_regions; i++) {
  782. void __iomem *ptr = gic_data.redist_regions[i].redist_base;
  783. u64 typer;
  784. u32 reg;
  785. reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
  786. if (reg != GIC_PIDR2_ARCH_GICv3 &&
  787. reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
  788. pr_warn("No redistributor present @%p\n", ptr);
  789. break;
  790. }
  791. do {
  792. typer = gic_read_typer(ptr + GICR_TYPER);
  793. ret = fn(gic_data.redist_regions + i, ptr);
  794. if (!ret)
  795. return 0;
  796. if (gic_data.redist_regions[i].single_redist)
  797. break;
  798. if (gic_data.redist_stride) {
  799. ptr += gic_data.redist_stride;
  800. } else {
  801. ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
  802. if (typer & GICR_TYPER_VLPIS)
  803. ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
  804. }
  805. } while (!(typer & GICR_TYPER_LAST));
  806. }
  807. return ret ? -ENODEV : 0;
  808. }
  809. static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
  810. {
  811. unsigned long mpidr;
  812. u64 typer;
  813. u32 aff;
  814. /*
  815. * Convert affinity to a 32bit value that can be matched to
  816. * GICR_TYPER bits [63:32].
  817. */
  818. mpidr = gic_cpu_to_affinity(smp_processor_id());
  819. aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
  820. MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
  821. MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
  822. MPIDR_AFFINITY_LEVEL(mpidr, 0));
  823. typer = gic_read_typer(ptr + GICR_TYPER);
  824. if ((typer >> 32) == aff) {
  825. u64 offset = ptr - region->redist_base;
  826. raw_spin_lock_init(&gic_data_rdist()->rd_lock);
  827. gic_data_rdist_rd_base() = ptr;
  828. gic_data_rdist()->phys_base = region->phys_base + offset;
  829. pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
  830. smp_processor_id(), mpidr,
  831. (int)(region - gic_data.redist_regions),
  832. &gic_data_rdist()->phys_base);
  833. return 0;
  834. }
  835. /* Try next one */
  836. return 1;
  837. }
  838. static int gic_populate_rdist(void)
  839. {
  840. if (gic_iterate_rdists(__gic_populate_rdist) == 0)
  841. return 0;
  842. /* We couldn't even deal with ourselves... */
  843. WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
  844. smp_processor_id(),
  845. (unsigned long)cpu_logical_map(smp_processor_id()));
  846. return -ENODEV;
  847. }
  848. static int __gic_update_rdist_properties(struct redist_region *region,
  849. void __iomem *ptr)
  850. {
  851. u64 typer = gic_read_typer(ptr + GICR_TYPER);
  852. u32 ctlr = readl_relaxed(ptr + GICR_CTLR);
  853. /* Boot-time cleanup */
  854. if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
  855. u64 val;
  856. /* Deactivate any present vPE */
  857. val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER);
  858. if (val & GICR_VPENDBASER_Valid)
  859. gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
  860. ptr + SZ_128K + GICR_VPENDBASER);
  861. /* Mark the VPE table as invalid */
  862. val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER);
  863. val &= ~GICR_VPROPBASER_4_1_VALID;
  864. gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER);
  865. }
  866. gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
  867. /*
  868. * TYPER.RVPEID implies some form of DirectLPI, no matter what the
  869. * doc says... :-/ And CTLR.IR implies another subset of DirectLPI
  870. * that the ITS driver can make use of for LPIs (and not VLPIs).
  871. *
  872. * These are 3 different ways to express the same thing, depending
  873. * on the revision of the architecture and its relaxations over
  874. * time. Just group them under the 'direct_lpi' banner.
  875. */
  876. gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
  877. gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
  878. !!(ctlr & GICR_CTLR_IR) |
  879. gic_data.rdists.has_rvpeid);
  880. gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
  881. /* Detect non-sensical configurations */
  882. if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
  883. gic_data.rdists.has_direct_lpi = false;
  884. gic_data.rdists.has_vlpis = false;
  885. gic_data.rdists.has_rvpeid = false;
  886. }
  887. gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
  888. return 1;
  889. }
  890. static void gic_update_rdist_properties(void)
  891. {
  892. gic_data.ppi_nr = UINT_MAX;
  893. gic_iterate_rdists(__gic_update_rdist_properties);
  894. if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
  895. gic_data.ppi_nr = 0;
  896. pr_info("GICv3 features: %d PPIs%s%s\n",
  897. gic_data.ppi_nr,
  898. gic_data.has_rss ? ", RSS" : "",
  899. gic_data.rdists.has_direct_lpi ? ", DirectLPI" : "");
  900. if (gic_data.rdists.has_vlpis)
  901. pr_info("GICv4 features: %s%s%s\n",
  902. gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
  903. gic_data.rdists.has_rvpeid ? "RVPEID " : "",
  904. gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
  905. }
  906. /* Check whether it's single security state view */
  907. static inline bool gic_dist_security_disabled(void)
  908. {
  909. return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
  910. }
  911. static void gic_cpu_sys_reg_init(void)
  912. {
  913. int i, cpu = smp_processor_id();
  914. u64 mpidr = gic_cpu_to_affinity(cpu);
  915. u64 need_rss = MPIDR_RS(mpidr);
  916. bool group0;
  917. u32 pribits;
  918. /*
  919. * Need to check that the SRE bit has actually been set. If
  920. * not, it means that SRE is disabled at EL2. We're going to
  921. * die painfully, and there is nothing we can do about it.
  922. *
  923. * Kindly inform the luser.
  924. */
  925. if (!gic_enable_sre())
  926. pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
  927. pribits = gic_get_pribits();
  928. group0 = gic_has_group0();
  929. /* Set priority mask register */
  930. if (!gic_prio_masking_enabled()) {
  931. write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
  932. } else if (gic_supports_nmi()) {
  933. /*
  934. * Mismatch configuration with boot CPU, the system is likely
  935. * to die as interrupt masking will not work properly on all
  936. * CPUs
  937. *
  938. * The boot CPU calls this function before enabling NMI support,
  939. * and as a result we'll never see this warning in the boot path
  940. * for that CPU.
  941. */
  942. if (static_branch_unlikely(&gic_nonsecure_priorities))
  943. WARN_ON(!group0 || gic_dist_security_disabled());
  944. else
  945. WARN_ON(group0 && !gic_dist_security_disabled());
  946. }
  947. /*
  948. * Some firmwares hand over to the kernel with the BPR changed from
  949. * its reset value (and with a value large enough to prevent
  950. * any pre-emptive interrupts from working at all). Writing a zero
  951. * to BPR restores is reset value.
  952. */
  953. gic_write_bpr1(0);
  954. if (static_branch_likely(&supports_deactivate_key)) {
  955. /* EOI drops priority only (mode 1) */
  956. gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
  957. } else {
  958. /* EOI deactivates interrupt too (mode 0) */
  959. gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
  960. }
  961. /* Always whack Group0 before Group1 */
  962. if (group0) {
  963. switch(pribits) {
  964. case 8:
  965. case 7:
  966. write_gicreg(0, ICC_AP0R3_EL1);
  967. write_gicreg(0, ICC_AP0R2_EL1);
  968. fallthrough;
  969. case 6:
  970. write_gicreg(0, ICC_AP0R1_EL1);
  971. fallthrough;
  972. case 5:
  973. case 4:
  974. write_gicreg(0, ICC_AP0R0_EL1);
  975. }
  976. isb();
  977. }
  978. switch(pribits) {
  979. case 8:
  980. case 7:
  981. write_gicreg(0, ICC_AP1R3_EL1);
  982. write_gicreg(0, ICC_AP1R2_EL1);
  983. fallthrough;
  984. case 6:
  985. write_gicreg(0, ICC_AP1R1_EL1);
  986. fallthrough;
  987. case 5:
  988. case 4:
  989. write_gicreg(0, ICC_AP1R0_EL1);
  990. }
  991. isb();
  992. /* ... and let's hit the road... */
  993. gic_write_grpen1(1);
  994. /* Keep the RSS capability status in per_cpu variable */
  995. per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
  996. /* Check all the CPUs have capable of sending SGIs to other CPUs */
  997. for_each_online_cpu(i) {
  998. bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
  999. need_rss |= MPIDR_RS(gic_cpu_to_affinity(i));
  1000. if (need_rss && (!have_rss))
  1001. pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
  1002. cpu, (unsigned long)mpidr,
  1003. i, (unsigned long)gic_cpu_to_affinity(i));
  1004. }
  1005. /**
  1006. * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
  1007. * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
  1008. * UNPREDICTABLE choice of :
  1009. * - The write is ignored.
  1010. * - The RS field is treated as 0.
  1011. */
  1012. if (need_rss && (!gic_data.has_rss))
  1013. pr_crit_once("RSS is required but GICD doesn't support it\n");
  1014. }
  1015. static bool gicv3_nolpi;
  1016. static int __init gicv3_nolpi_cfg(char *buf)
  1017. {
  1018. return strtobool(buf, &gicv3_nolpi);
  1019. }
  1020. early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
  1021. static int gic_dist_supports_lpis(void)
  1022. {
  1023. return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
  1024. !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
  1025. !gicv3_nolpi);
  1026. }
  1027. void gic_v3_cpu_init(void)
  1028. {
  1029. void __iomem *rbase;
  1030. int i;
  1031. /* Register ourselves with the rest of the world */
  1032. if (gic_populate_rdist())
  1033. return;
  1034. gic_enable_redist(true);
  1035. WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) &&
  1036. !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
  1037. "Distributor has extended ranges, but CPU%d doesn't\n",
  1038. smp_processor_id());
  1039. rbase = gic_data_rdist_sgi_base();
  1040. /* Configure SGIs/PPIs as non-secure Group-1 */
  1041. for (i = 0; i < gic_data.ppi_nr + 16; i += 32)
  1042. writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
  1043. gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp);
  1044. /* initialise system registers */
  1045. gic_cpu_sys_reg_init();
  1046. }
  1047. EXPORT_SYMBOL_GPL(gic_v3_cpu_init);
  1048. #ifdef CONFIG_SMP
  1049. #define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
  1050. #define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL)
  1051. static int gic_starting_cpu(unsigned int cpu)
  1052. {
  1053. gic_v3_cpu_init();
  1054. if (gic_dist_supports_lpis())
  1055. its_cpu_init();
  1056. return 0;
  1057. }
  1058. static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
  1059. unsigned long cluster_id)
  1060. {
  1061. int next_cpu, cpu = *base_cpu;
  1062. unsigned long mpidr;
  1063. u16 tlist = 0;
  1064. mpidr = gic_cpu_to_affinity(cpu);
  1065. while (cpu < nr_cpu_ids) {
  1066. tlist |= 1 << (mpidr & 0xf);
  1067. next_cpu = cpumask_next(cpu, mask);
  1068. if (next_cpu >= nr_cpu_ids)
  1069. goto out;
  1070. cpu = next_cpu;
  1071. mpidr = gic_cpu_to_affinity(cpu);
  1072. if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
  1073. cpu--;
  1074. goto out;
  1075. }
  1076. }
  1077. out:
  1078. *base_cpu = cpu;
  1079. return tlist;
  1080. }
  1081. #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
  1082. (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
  1083. << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
  1084. static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
  1085. {
  1086. u64 val;
  1087. val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
  1088. MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
  1089. irq << ICC_SGI1R_SGI_ID_SHIFT |
  1090. MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
  1091. MPIDR_TO_SGI_RS(cluster_id) |
  1092. tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
  1093. pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
  1094. gic_write_sgi1r(val);
  1095. }
  1096. static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
  1097. {
  1098. int cpu;
  1099. if (WARN_ON(d->hwirq >= 16))
  1100. return;
  1101. /*
  1102. * Ensure that stores to Normal memory are visible to the
  1103. * other CPUs before issuing the IPI.
  1104. */
  1105. dsb(ishst);
  1106. for_each_cpu(cpu, mask) {
  1107. u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu));
  1108. u16 tlist;
  1109. tlist = gic_compute_target_list(&cpu, mask, cluster_id);
  1110. gic_send_sgi(cluster_id, tlist, d->hwirq);
  1111. }
  1112. /* Force the above writes to ICC_SGI1R_EL1 to be executed */
  1113. isb();
  1114. }
  1115. static void __init gic_smp_init(void)
  1116. {
  1117. struct irq_fwspec sgi_fwspec = {
  1118. .fwnode = gic_data.fwnode,
  1119. .param_count = 1,
  1120. };
  1121. int base_sgi;
  1122. cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
  1123. "irqchip/arm/gicv3:starting",
  1124. gic_starting_cpu, NULL);
  1125. /* Register all 8 non-secure SGIs */
  1126. base_sgi = __irq_domain_alloc_irqs(gic_data.domain, -1, 8,
  1127. NUMA_NO_NODE, &sgi_fwspec,
  1128. false, NULL);
  1129. if (WARN_ON(base_sgi <= 0))
  1130. return;
  1131. set_smp_ipi_range(base_sgi, 8);
  1132. }
  1133. static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
  1134. bool force)
  1135. {
  1136. unsigned int cpu;
  1137. u32 offset, index;
  1138. void __iomem *reg;
  1139. int enabled;
  1140. u64 val;
  1141. if (force)
  1142. cpu = cpumask_first(mask_val);
  1143. else
  1144. cpu = cpumask_any_and(mask_val, cpu_online_mask);
  1145. if (cpu >= nr_cpu_ids)
  1146. return -EINVAL;
  1147. if (gic_irq_in_rdist(d))
  1148. return -EINVAL;
  1149. /* If interrupt was enabled, disable it first */
  1150. enabled = gic_peek_irq(d, GICD_ISENABLER);
  1151. if (enabled)
  1152. gic_mask_irq(d);
  1153. offset = convert_offset_index(d, GICD_IROUTER, &index);
  1154. reg = gic_dist_base(d) + offset + (index * 8);
  1155. val = gic_cpu_to_affinity(cpu);
  1156. trace_android_rvh_gic_v3_set_affinity(d, mask_val, &val, force, gic_dist_base(d),
  1157. gic_data.redist_regions[0].redist_base,
  1158. gic_data.redist_stride);
  1159. gic_write_irouter(val, reg);
  1160. /*
  1161. * If the interrupt was enabled, enabled it again. Otherwise,
  1162. * just wait for the distributor to have digested our changes.
  1163. */
  1164. if (enabled)
  1165. gic_unmask_irq(d);
  1166. irq_data_update_effective_affinity(d, cpumask_of(cpu));
  1167. return IRQ_SET_MASK_OK_DONE;
  1168. }
  1169. #else
  1170. #define gic_set_affinity NULL
  1171. #define gic_ipi_send_mask NULL
  1172. #define gic_smp_init() do { } while(0)
  1173. #endif
  1174. static int gic_retrigger(struct irq_data *data)
  1175. {
  1176. return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
  1177. }
  1178. #ifdef CONFIG_CPU_PM
  1179. static int gic_cpu_pm_notifier(struct notifier_block *self,
  1180. unsigned long cmd, void *v)
  1181. {
  1182. if (cmd == CPU_PM_EXIT) {
  1183. if (gic_dist_security_disabled())
  1184. gic_enable_redist(true);
  1185. gic_cpu_sys_reg_init();
  1186. } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
  1187. gic_write_grpen1(0);
  1188. gic_enable_redist(false);
  1189. }
  1190. return NOTIFY_OK;
  1191. }
  1192. static struct notifier_block gic_cpu_pm_notifier_block = {
  1193. .notifier_call = gic_cpu_pm_notifier,
  1194. };
  1195. static void gic_cpu_pm_init(void)
  1196. {
  1197. cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
  1198. }
  1199. #else
  1200. static inline void gic_cpu_pm_init(void) { }
  1201. #endif /* CONFIG_CPU_PM */
  1202. #ifdef CONFIG_PM
  1203. void gic_v3_resume(void)
  1204. {
  1205. trace_android_vh_gic_resume(&gic_data);
  1206. }
  1207. EXPORT_SYMBOL_GPL(gic_v3_resume);
  1208. static int gic_v3_suspend(void)
  1209. {
  1210. trace_android_vh_gic_v3_suspend(&gic_data);
  1211. return 0;
  1212. }
  1213. static struct syscore_ops gic_syscore_ops = {
  1214. .resume = gic_v3_resume,
  1215. .suspend = gic_v3_suspend,
  1216. };
  1217. static void gic_syscore_init(void)
  1218. {
  1219. register_syscore_ops(&gic_syscore_ops);
  1220. }
  1221. #else
  1222. static inline void gic_syscore_init(void) { }
  1223. void gic_v3_resume(void) { }
  1224. static int gic_v3_suspend(void) { return 0; }
  1225. #endif
  1226. static struct irq_chip gic_chip = {
  1227. .name = "GICv3",
  1228. .irq_mask = gic_mask_irq,
  1229. .irq_unmask = gic_unmask_irq,
  1230. .irq_eoi = gic_eoi_irq,
  1231. .irq_set_type = gic_set_type,
  1232. .irq_set_affinity = gic_set_affinity,
  1233. .irq_retrigger = gic_retrigger,
  1234. .irq_get_irqchip_state = gic_irq_get_irqchip_state,
  1235. .irq_set_irqchip_state = gic_irq_set_irqchip_state,
  1236. .irq_nmi_setup = gic_irq_nmi_setup,
  1237. .irq_nmi_teardown = gic_irq_nmi_teardown,
  1238. .ipi_send_mask = gic_ipi_send_mask,
  1239. .flags = IRQCHIP_SET_TYPE_MASKED |
  1240. IRQCHIP_SKIP_SET_WAKE |
  1241. IRQCHIP_MASK_ON_SUSPEND,
  1242. };
  1243. static struct irq_chip gic_eoimode1_chip = {
  1244. .name = "GICv3",
  1245. .irq_mask = gic_eoimode1_mask_irq,
  1246. .irq_unmask = gic_unmask_irq,
  1247. .irq_eoi = gic_eoimode1_eoi_irq,
  1248. .irq_set_type = gic_set_type,
  1249. .irq_set_affinity = gic_set_affinity,
  1250. .irq_retrigger = gic_retrigger,
  1251. .irq_get_irqchip_state = gic_irq_get_irqchip_state,
  1252. .irq_set_irqchip_state = gic_irq_set_irqchip_state,
  1253. .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
  1254. .irq_nmi_setup = gic_irq_nmi_setup,
  1255. .irq_nmi_teardown = gic_irq_nmi_teardown,
  1256. .ipi_send_mask = gic_ipi_send_mask,
  1257. .flags = IRQCHIP_SET_TYPE_MASKED |
  1258. IRQCHIP_SKIP_SET_WAKE |
  1259. IRQCHIP_MASK_ON_SUSPEND,
  1260. };
  1261. static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
  1262. irq_hw_number_t hw)
  1263. {
  1264. struct irq_chip *chip = &gic_chip;
  1265. struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
  1266. if (static_branch_likely(&supports_deactivate_key))
  1267. chip = &gic_eoimode1_chip;
  1268. switch (__get_intid_range(hw)) {
  1269. case SGI_RANGE:
  1270. case PPI_RANGE:
  1271. case EPPI_RANGE:
  1272. irq_set_percpu_devid(irq);
  1273. irq_domain_set_info(d, irq, hw, chip, d->host_data,
  1274. handle_percpu_devid_irq, NULL, NULL);
  1275. break;
  1276. case SPI_RANGE:
  1277. case ESPI_RANGE:
  1278. irq_domain_set_info(d, irq, hw, chip, d->host_data,
  1279. handle_fasteoi_irq, NULL, NULL);
  1280. irq_set_probe(irq);
  1281. irqd_set_single_target(irqd);
  1282. break;
  1283. case LPI_RANGE:
  1284. if (!gic_dist_supports_lpis())
  1285. return -EPERM;
  1286. irq_domain_set_info(d, irq, hw, chip, d->host_data,
  1287. handle_fasteoi_irq, NULL, NULL);
  1288. break;
  1289. default:
  1290. return -EPERM;
  1291. }
  1292. /* Prevents SW retriggers which mess up the ACK/EOI ordering */
  1293. irqd_set_handle_enforce_irqctx(irqd);
  1294. return 0;
  1295. }
  1296. static int gic_irq_domain_translate(struct irq_domain *d,
  1297. struct irq_fwspec *fwspec,
  1298. unsigned long *hwirq,
  1299. unsigned int *type)
  1300. {
  1301. if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
  1302. *hwirq = fwspec->param[0];
  1303. *type = IRQ_TYPE_EDGE_RISING;
  1304. return 0;
  1305. }
  1306. if (is_of_node(fwspec->fwnode)) {
  1307. if (fwspec->param_count < 3)
  1308. return -EINVAL;
  1309. switch (fwspec->param[0]) {
  1310. case 0: /* SPI */
  1311. *hwirq = fwspec->param[1] + 32;
  1312. break;
  1313. case 1: /* PPI */
  1314. *hwirq = fwspec->param[1] + 16;
  1315. break;
  1316. case 2: /* ESPI */
  1317. *hwirq = fwspec->param[1] + ESPI_BASE_INTID;
  1318. break;
  1319. case 3: /* EPPI */
  1320. *hwirq = fwspec->param[1] + EPPI_BASE_INTID;
  1321. break;
  1322. case GIC_IRQ_TYPE_LPI: /* LPI */
  1323. *hwirq = fwspec->param[1];
  1324. break;
  1325. case GIC_IRQ_TYPE_PARTITION:
  1326. *hwirq = fwspec->param[1];
  1327. if (fwspec->param[1] >= 16)
  1328. *hwirq += EPPI_BASE_INTID - 16;
  1329. else
  1330. *hwirq += 16;
  1331. break;
  1332. default:
  1333. return -EINVAL;
  1334. }
  1335. *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
  1336. /*
  1337. * Make it clear that broken DTs are... broken.
  1338. * Partitioned PPIs are an unfortunate exception.
  1339. */
  1340. WARN_ON(*type == IRQ_TYPE_NONE &&
  1341. fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
  1342. return 0;
  1343. }
  1344. if (is_fwnode_irqchip(fwspec->fwnode)) {
  1345. if(fwspec->param_count != 2)
  1346. return -EINVAL;
  1347. if (fwspec->param[0] < 16) {
  1348. pr_err(FW_BUG "Illegal GSI%d translation request\n",
  1349. fwspec->param[0]);
  1350. return -EINVAL;
  1351. }
  1352. *hwirq = fwspec->param[0];
  1353. *type = fwspec->param[1];
  1354. WARN_ON(*type == IRQ_TYPE_NONE);
  1355. return 0;
  1356. }
  1357. return -EINVAL;
  1358. }
  1359. static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
  1360. unsigned int nr_irqs, void *arg)
  1361. {
  1362. int i, ret;
  1363. irq_hw_number_t hwirq;
  1364. unsigned int type = IRQ_TYPE_NONE;
  1365. struct irq_fwspec *fwspec = arg;
  1366. ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
  1367. if (ret)
  1368. return ret;
  1369. for (i = 0; i < nr_irqs; i++) {
  1370. ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
  1371. if (ret)
  1372. return ret;
  1373. }
  1374. return 0;
  1375. }
  1376. static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
  1377. unsigned int nr_irqs)
  1378. {
  1379. int i;
  1380. for (i = 0; i < nr_irqs; i++) {
  1381. struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
  1382. irq_set_handler(virq + i, NULL);
  1383. irq_domain_reset_irq_data(d);
  1384. }
  1385. }
  1386. static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec,
  1387. irq_hw_number_t hwirq)
  1388. {
  1389. enum gic_intid_range range;
  1390. if (!gic_data.ppi_descs)
  1391. return false;
  1392. if (!is_of_node(fwspec->fwnode))
  1393. return false;
  1394. if (fwspec->param_count < 4 || !fwspec->param[3])
  1395. return false;
  1396. range = __get_intid_range(hwirq);
  1397. if (range != PPI_RANGE && range != EPPI_RANGE)
  1398. return false;
  1399. return true;
  1400. }
  1401. static int gic_irq_domain_select(struct irq_domain *d,
  1402. struct irq_fwspec *fwspec,
  1403. enum irq_domain_bus_token bus_token)
  1404. {
  1405. unsigned int type, ret, ppi_idx;
  1406. irq_hw_number_t hwirq;
  1407. /* Not for us */
  1408. if (fwspec->fwnode != d->fwnode)
  1409. return 0;
  1410. /* If this is not DT, then we have a single domain */
  1411. if (!is_of_node(fwspec->fwnode))
  1412. return 1;
  1413. ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type);
  1414. if (WARN_ON_ONCE(ret))
  1415. return 0;
  1416. if (!fwspec_is_partitioned_ppi(fwspec, hwirq))
  1417. return d == gic_data.domain;
  1418. /*
  1419. * If this is a PPI and we have a 4th (non-null) parameter,
  1420. * then we need to match the partition domain.
  1421. */
  1422. ppi_idx = __gic_get_ppi_index(hwirq);
  1423. return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]);
  1424. }
  1425. static const struct irq_domain_ops gic_irq_domain_ops = {
  1426. .translate = gic_irq_domain_translate,
  1427. .alloc = gic_irq_domain_alloc,
  1428. .free = gic_irq_domain_free,
  1429. .select = gic_irq_domain_select,
  1430. };
  1431. static int partition_domain_translate(struct irq_domain *d,
  1432. struct irq_fwspec *fwspec,
  1433. unsigned long *hwirq,
  1434. unsigned int *type)
  1435. {
  1436. unsigned long ppi_intid;
  1437. struct device_node *np;
  1438. unsigned int ppi_idx;
  1439. int ret;
  1440. if (!gic_data.ppi_descs)
  1441. return -ENOMEM;
  1442. np = of_find_node_by_phandle(fwspec->param[3]);
  1443. if (WARN_ON(!np))
  1444. return -EINVAL;
  1445. ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type);
  1446. if (WARN_ON_ONCE(ret))
  1447. return 0;
  1448. ppi_idx = __gic_get_ppi_index(ppi_intid);
  1449. ret = partition_translate_id(gic_data.ppi_descs[ppi_idx],
  1450. of_node_to_fwnode(np));
  1451. if (ret < 0)
  1452. return ret;
  1453. *hwirq = ret;
  1454. *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
  1455. return 0;
  1456. }
  1457. static const struct irq_domain_ops partition_domain_ops = {
  1458. .translate = partition_domain_translate,
  1459. .select = gic_irq_domain_select,
  1460. };
  1461. static bool gic_enable_quirk_msm8996(void *data)
  1462. {
  1463. struct gic_chip_data_v3 *d = data;
  1464. d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
  1465. return true;
  1466. }
  1467. static bool gic_enable_quirk_mtk_gicr(void *data)
  1468. {
  1469. struct gic_chip_data_v3 *d = data;
  1470. d->flags |= FLAGS_WORKAROUND_MTK_GICR_SAVE;
  1471. return true;
  1472. }
  1473. static bool gic_enable_quirk_cavium_38539(void *data)
  1474. {
  1475. struct gic_chip_data_v3 *d = data;
  1476. d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
  1477. return true;
  1478. }
  1479. static bool gic_enable_quirk_hip06_07(void *data)
  1480. {
  1481. struct gic_chip_data_v3 *d = data;
  1482. /*
  1483. * HIP06 GICD_IIDR clashes with GIC-600 product number (despite
  1484. * not being an actual ARM implementation). The saving grace is
  1485. * that GIC-600 doesn't have ESPI, so nothing to do in that case.
  1486. * HIP07 doesn't even have a proper IIDR, and still pretends to
  1487. * have ESPI. In both cases, put them right.
  1488. */
  1489. if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
  1490. /* Zero both ESPI and the RES0 field next to it... */
  1491. d->rdists.gicd_typer &= ~GENMASK(9, 8);
  1492. return true;
  1493. }
  1494. return false;
  1495. }
  1496. static bool gic_enable_quirk_arm64_2941627(void *data)
  1497. {
  1498. static_branch_enable(&gic_arm64_2941627_erratum);
  1499. return true;
  1500. }
  1501. static bool gic_enable_quirk_asr8601(void *data)
  1502. {
  1503. struct gic_chip_data_v3 *d = data;
  1504. d->flags |= FLAGS_WORKAROUND_ASR_ERRATUM_8601001;
  1505. return true;
  1506. }
  1507. static const struct gic_quirk gic_quirks[] = {
  1508. {
  1509. .desc = "GICv3: Qualcomm MSM8996 broken firmware",
  1510. .compatible = "qcom,msm8996-gic-v3",
  1511. .init = gic_enable_quirk_msm8996,
  1512. },
  1513. {
  1514. .desc = "GICv3: Mediatek Chromebook GICR save problem",
  1515. .property = "mediatek,broken-save-restore-fw",
  1516. .init = gic_enable_quirk_mtk_gicr,
  1517. },
  1518. {
  1519. .desc = "GICv3: ASR erratum 8601001",
  1520. .compatible = "asr,asr8601-gic-v3",
  1521. .init = gic_enable_quirk_asr8601,
  1522. },
  1523. {
  1524. .desc = "GICv3: HIP06 erratum 161010803",
  1525. .iidr = 0x0204043b,
  1526. .mask = 0xffffffff,
  1527. .init = gic_enable_quirk_hip06_07,
  1528. },
  1529. {
  1530. .desc = "GICv3: HIP07 erratum 161010803",
  1531. .iidr = 0x00000000,
  1532. .mask = 0xffffffff,
  1533. .init = gic_enable_quirk_hip06_07,
  1534. },
  1535. {
  1536. /*
  1537. * Reserved register accesses generate a Synchronous
  1538. * External Abort. This erratum applies to:
  1539. * - ThunderX: CN88xx
  1540. * - OCTEON TX: CN83xx, CN81xx
  1541. * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
  1542. */
  1543. .desc = "GICv3: Cavium erratum 38539",
  1544. .iidr = 0xa000034c,
  1545. .mask = 0xe8f00fff,
  1546. .init = gic_enable_quirk_cavium_38539,
  1547. },
  1548. {
  1549. /*
  1550. * GIC-700: 2941627 workaround - IP variant [0,1]
  1551. *
  1552. */
  1553. .desc = "GICv3: ARM64 erratum 2941627",
  1554. .iidr = 0x0400043b,
  1555. .mask = 0xff0e0fff,
  1556. .init = gic_enable_quirk_arm64_2941627,
  1557. },
  1558. {
  1559. /*
  1560. * GIC-700: 2941627 workaround - IP variant [2]
  1561. */
  1562. .desc = "GICv3: ARM64 erratum 2941627",
  1563. .iidr = 0x0402043b,
  1564. .mask = 0xff0f0fff,
  1565. .init = gic_enable_quirk_arm64_2941627,
  1566. },
  1567. {
  1568. }
  1569. };
  1570. static void gic_enable_nmi_support(void)
  1571. {
  1572. int i;
  1573. if (!gic_prio_masking_enabled())
  1574. return;
  1575. if (gic_data.flags & FLAGS_WORKAROUND_MTK_GICR_SAVE) {
  1576. pr_warn("Skipping NMI enable due to firmware issues\n");
  1577. return;
  1578. }
  1579. ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
  1580. if (!ppi_nmi_refs)
  1581. return;
  1582. for (i = 0; i < gic_data.ppi_nr; i++)
  1583. refcount_set(&ppi_nmi_refs[i], 0);
  1584. /*
  1585. * Linux itself doesn't use 1:N distribution, so has no need to
  1586. * set PMHE. The only reason to have it set is if EL3 requires it
  1587. * (and we can't change it).
  1588. */
  1589. if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK)
  1590. static_branch_enable(&gic_pmr_sync);
  1591. pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
  1592. static_branch_unlikely(&gic_pmr_sync) ? "forced" : "relaxed");
  1593. /*
  1594. * How priority values are used by the GIC depends on two things:
  1595. * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
  1596. * and if Group 0 interrupts can be delivered to Linux in the non-secure
  1597. * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
  1598. * ICC_PMR_EL1 register and the priority that software assigns to
  1599. * interrupts:
  1600. *
  1601. * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority
  1602. * -----------------------------------------------------------
  1603. * 1 | - | unchanged | unchanged
  1604. * -----------------------------------------------------------
  1605. * 0 | 1 | non-secure | non-secure
  1606. * -----------------------------------------------------------
  1607. * 0 | 0 | unchanged | non-secure
  1608. *
  1609. * where non-secure means that the value is right-shifted by one and the
  1610. * MSB bit set, to make it fit in the non-secure priority range.
  1611. *
  1612. * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
  1613. * are both either modified or unchanged, we can use the same set of
  1614. * priorities.
  1615. *
  1616. * In the last case, where only the interrupt priorities are modified to
  1617. * be in the non-secure range, we use a different PMR value to mask IRQs
  1618. * and the rest of the values that we use remain unchanged.
  1619. */
  1620. if (gic_has_group0() && !gic_dist_security_disabled())
  1621. static_branch_enable(&gic_nonsecure_priorities);
  1622. static_branch_enable(&supports_pseudo_nmis);
  1623. if (static_branch_likely(&supports_deactivate_key))
  1624. gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
  1625. else
  1626. gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
  1627. }
  1628. static int __init gic_init_bases(void __iomem *dist_base,
  1629. struct redist_region *rdist_regs,
  1630. u32 nr_redist_regions,
  1631. u64 redist_stride,
  1632. struct fwnode_handle *handle)
  1633. {
  1634. u32 typer;
  1635. int err;
  1636. if (!is_hyp_mode_available())
  1637. static_branch_disable(&supports_deactivate_key);
  1638. if (static_branch_likely(&supports_deactivate_key))
  1639. pr_info("GIC: Using split EOI/Deactivate mode\n");
  1640. gic_data.fwnode = handle;
  1641. gic_data.dist_base = dist_base;
  1642. gic_data.redist_regions = rdist_regs;
  1643. gic_data.nr_redist_regions = nr_redist_regions;
  1644. gic_data.redist_stride = redist_stride;
  1645. /*
  1646. * Find out how many interrupts are supported.
  1647. */
  1648. typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
  1649. gic_data.rdists.gicd_typer = typer;
  1650. gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR),
  1651. gic_quirks, &gic_data);
  1652. pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
  1653. pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
  1654. /*
  1655. * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
  1656. * architecture spec (which says that reserved registers are RES0).
  1657. */
  1658. if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
  1659. gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
  1660. gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
  1661. &gic_data);
  1662. gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
  1663. gic_data.rdists.has_rvpeid = true;
  1664. gic_data.rdists.has_vlpis = true;
  1665. gic_data.rdists.has_direct_lpi = true;
  1666. gic_data.rdists.has_vpend_valid_dirty = true;
  1667. if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
  1668. err = -ENOMEM;
  1669. goto out_free;
  1670. }
  1671. irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
  1672. gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
  1673. if (typer & GICD_TYPER_MBIS) {
  1674. err = mbi_init(handle, gic_data.domain);
  1675. if (err)
  1676. pr_err("Failed to initialize MBIs\n");
  1677. }
  1678. set_handle_irq(gic_handle_irq);
  1679. gic_update_rdist_properties();
  1680. gic_v3_dist_init();
  1681. gic_v3_cpu_init();
  1682. gic_smp_init();
  1683. gic_cpu_pm_init();
  1684. gic_syscore_init();
  1685. if (gic_dist_supports_lpis()) {
  1686. its_init(handle, &gic_data.rdists, gic_data.domain);
  1687. its_cpu_init();
  1688. its_lpi_memreserve_init();
  1689. } else {
  1690. if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
  1691. gicv2m_init(handle, gic_data.domain);
  1692. }
  1693. gic_enable_nmi_support();
  1694. return 0;
  1695. out_free:
  1696. if (gic_data.domain)
  1697. irq_domain_remove(gic_data.domain);
  1698. free_percpu(gic_data.rdists.rdist);
  1699. return err;
  1700. }
  1701. static int __init gic_validate_dist_version(void __iomem *dist_base)
  1702. {
  1703. u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
  1704. if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
  1705. return -ENODEV;
  1706. return 0;
  1707. }
  1708. /* Create all possible partitions at boot time */
  1709. static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
  1710. {
  1711. struct device_node *parts_node, *child_part;
  1712. int part_idx = 0, i;
  1713. int nr_parts;
  1714. struct partition_affinity *parts;
  1715. parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
  1716. if (!parts_node)
  1717. return;
  1718. gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
  1719. if (!gic_data.ppi_descs)
  1720. goto out_put_node;
  1721. nr_parts = of_get_child_count(parts_node);
  1722. if (!nr_parts)
  1723. goto out_put_node;
  1724. parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
  1725. if (WARN_ON(!parts))
  1726. goto out_put_node;
  1727. for_each_child_of_node(parts_node, child_part) {
  1728. struct partition_affinity *part;
  1729. int n;
  1730. part = &parts[part_idx];
  1731. part->partition_id = of_node_to_fwnode(child_part);
  1732. pr_info("GIC: PPI partition %pOFn[%d] { ",
  1733. child_part, part_idx);
  1734. n = of_property_count_elems_of_size(child_part, "affinity",
  1735. sizeof(u32));
  1736. WARN_ON(n <= 0);
  1737. for (i = 0; i < n; i++) {
  1738. int err, cpu;
  1739. u32 cpu_phandle;
  1740. struct device_node *cpu_node;
  1741. err = of_property_read_u32_index(child_part, "affinity",
  1742. i, &cpu_phandle);
  1743. if (WARN_ON(err))
  1744. continue;
  1745. cpu_node = of_find_node_by_phandle(cpu_phandle);
  1746. if (WARN_ON(!cpu_node))
  1747. continue;
  1748. cpu = of_cpu_node_to_id(cpu_node);
  1749. if (WARN_ON(cpu < 0)) {
  1750. of_node_put(cpu_node);
  1751. continue;
  1752. }
  1753. pr_cont("%pOF[%d] ", cpu_node, cpu);
  1754. cpumask_set_cpu(cpu, &part->mask);
  1755. of_node_put(cpu_node);
  1756. }
  1757. pr_cont("}\n");
  1758. part_idx++;
  1759. }
  1760. for (i = 0; i < gic_data.ppi_nr; i++) {
  1761. unsigned int irq;
  1762. struct partition_desc *desc;
  1763. struct irq_fwspec ppi_fwspec = {
  1764. .fwnode = gic_data.fwnode,
  1765. .param_count = 3,
  1766. .param = {
  1767. [0] = GIC_IRQ_TYPE_PARTITION,
  1768. [1] = i,
  1769. [2] = IRQ_TYPE_NONE,
  1770. },
  1771. };
  1772. irq = irq_create_fwspec_mapping(&ppi_fwspec);
  1773. if (WARN_ON(!irq))
  1774. continue;
  1775. desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
  1776. irq, &partition_domain_ops);
  1777. if (WARN_ON(!desc))
  1778. continue;
  1779. gic_data.ppi_descs[i] = desc;
  1780. }
  1781. out_put_node:
  1782. of_node_put(parts_node);
  1783. }
  1784. static void __init gic_of_setup_kvm_info(struct device_node *node)
  1785. {
  1786. int ret;
  1787. struct resource r;
  1788. u32 gicv_idx;
  1789. gic_v3_kvm_info.type = GIC_V3;
  1790. gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
  1791. if (!gic_v3_kvm_info.maint_irq)
  1792. return;
  1793. if (of_property_read_u32(node, "#redistributor-regions",
  1794. &gicv_idx))
  1795. gicv_idx = 1;
  1796. gicv_idx += 3; /* Also skip GICD, GICC, GICH */
  1797. ret = of_address_to_resource(node, gicv_idx, &r);
  1798. if (!ret)
  1799. gic_v3_kvm_info.vcpu = r;
  1800. gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
  1801. gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
  1802. vgic_set_kvm_info(&gic_v3_kvm_info);
  1803. }
  1804. static void gic_request_region(resource_size_t base, resource_size_t size,
  1805. const char *name)
  1806. {
  1807. if (!request_mem_region(base, size, name))
  1808. pr_warn_once(FW_BUG "%s region %pa has overlapping address\n",
  1809. name, &base);
  1810. }
  1811. static void __iomem *gic_of_iomap(struct device_node *node, int idx,
  1812. const char *name, struct resource *res)
  1813. {
  1814. void __iomem *base;
  1815. int ret;
  1816. ret = of_address_to_resource(node, idx, res);
  1817. if (ret)
  1818. return IOMEM_ERR_PTR(ret);
  1819. gic_request_region(res->start, resource_size(res), name);
  1820. base = of_iomap(node, idx);
  1821. return base ?: IOMEM_ERR_PTR(-ENOMEM);
  1822. }
  1823. static int __init gic_of_init(struct device_node *node, struct device_node *parent)
  1824. {
  1825. void __iomem *dist_base;
  1826. struct redist_region *rdist_regs;
  1827. struct resource res;
  1828. u64 redist_stride;
  1829. u32 nr_redist_regions;
  1830. int err, i;
  1831. dist_base = gic_of_iomap(node, 0, "GICD", &res);
  1832. if (IS_ERR(dist_base)) {
  1833. pr_err("%pOF: unable to map gic dist registers\n", node);
  1834. return PTR_ERR(dist_base);
  1835. }
  1836. err = gic_validate_dist_version(dist_base);
  1837. if (err) {
  1838. pr_err("%pOF: no distributor detected, giving up\n", node);
  1839. goto out_unmap_dist;
  1840. }
  1841. if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
  1842. nr_redist_regions = 1;
  1843. rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
  1844. GFP_KERNEL);
  1845. if (!rdist_regs) {
  1846. err = -ENOMEM;
  1847. goto out_unmap_dist;
  1848. }
  1849. for (i = 0; i < nr_redist_regions; i++) {
  1850. rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res);
  1851. if (IS_ERR(rdist_regs[i].redist_base)) {
  1852. pr_err("%pOF: couldn't map region %d\n", node, i);
  1853. err = -ENODEV;
  1854. goto out_unmap_rdist;
  1855. }
  1856. rdist_regs[i].phys_base = res.start;
  1857. }
  1858. if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
  1859. redist_stride = 0;
  1860. gic_enable_of_quirks(node, gic_quirks, &gic_data);
  1861. err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
  1862. redist_stride, &node->fwnode);
  1863. if (err)
  1864. goto out_unmap_rdist;
  1865. gic_populate_ppi_partitions(node);
  1866. if (static_branch_likely(&supports_deactivate_key))
  1867. gic_of_setup_kvm_info(node);
  1868. return 0;
  1869. out_unmap_rdist:
  1870. for (i = 0; i < nr_redist_regions; i++)
  1871. if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base))
  1872. iounmap(rdist_regs[i].redist_base);
  1873. kfree(rdist_regs);
  1874. out_unmap_dist:
  1875. iounmap(dist_base);
  1876. return err;
  1877. }
  1878. IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
  1879. #ifdef CONFIG_ACPI
  1880. static struct
  1881. {
  1882. void __iomem *dist_base;
  1883. struct redist_region *redist_regs;
  1884. u32 nr_redist_regions;
  1885. bool single_redist;
  1886. int enabled_rdists;
  1887. u32 maint_irq;
  1888. int maint_irq_mode;
  1889. phys_addr_t vcpu_base;
  1890. } acpi_data __initdata;
  1891. static void __init
  1892. gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
  1893. {
  1894. static int count = 0;
  1895. acpi_data.redist_regs[count].phys_base = phys_base;
  1896. acpi_data.redist_regs[count].redist_base = redist_base;
  1897. acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
  1898. count++;
  1899. }
  1900. static int __init
  1901. gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
  1902. const unsigned long end)
  1903. {
  1904. struct acpi_madt_generic_redistributor *redist =
  1905. (struct acpi_madt_generic_redistributor *)header;
  1906. void __iomem *redist_base;
  1907. redist_base = ioremap(redist->base_address, redist->length);
  1908. if (!redist_base) {
  1909. pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
  1910. return -ENOMEM;
  1911. }
  1912. gic_request_region(redist->base_address, redist->length, "GICR");
  1913. gic_acpi_register_redist(redist->base_address, redist_base);
  1914. return 0;
  1915. }
  1916. static int __init
  1917. gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
  1918. const unsigned long end)
  1919. {
  1920. struct acpi_madt_generic_interrupt *gicc =
  1921. (struct acpi_madt_generic_interrupt *)header;
  1922. u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
  1923. u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
  1924. void __iomem *redist_base;
  1925. /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */
  1926. if (!(gicc->flags & ACPI_MADT_ENABLED))
  1927. return 0;
  1928. redist_base = ioremap(gicc->gicr_base_address, size);
  1929. if (!redist_base)
  1930. return -ENOMEM;
  1931. gic_request_region(gicc->gicr_base_address, size, "GICR");
  1932. gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
  1933. return 0;
  1934. }
  1935. static int __init gic_acpi_collect_gicr_base(void)
  1936. {
  1937. acpi_tbl_entry_handler redist_parser;
  1938. enum acpi_madt_type type;
  1939. if (acpi_data.single_redist) {
  1940. type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
  1941. redist_parser = gic_acpi_parse_madt_gicc;
  1942. } else {
  1943. type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
  1944. redist_parser = gic_acpi_parse_madt_redist;
  1945. }
  1946. /* Collect redistributor base addresses in GICR entries */
  1947. if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
  1948. return 0;
  1949. pr_info("No valid GICR entries exist\n");
  1950. return -ENODEV;
  1951. }
  1952. static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
  1953. const unsigned long end)
  1954. {
  1955. /* Subtable presence means that redist exists, that's it */
  1956. return 0;
  1957. }
  1958. static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
  1959. const unsigned long end)
  1960. {
  1961. struct acpi_madt_generic_interrupt *gicc =
  1962. (struct acpi_madt_generic_interrupt *)header;
  1963. /*
  1964. * If GICC is enabled and has valid gicr base address, then it means
  1965. * GICR base is presented via GICC
  1966. */
  1967. if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) {
  1968. acpi_data.enabled_rdists++;
  1969. return 0;
  1970. }
  1971. /*
  1972. * It's perfectly valid firmware can pass disabled GICC entry, driver
  1973. * should not treat as errors, skip the entry instead of probe fail.
  1974. */
  1975. if (!(gicc->flags & ACPI_MADT_ENABLED))
  1976. return 0;
  1977. return -ENODEV;
  1978. }
  1979. static int __init gic_acpi_count_gicr_regions(void)
  1980. {
  1981. int count;
  1982. /*
  1983. * Count how many redistributor regions we have. It is not allowed
  1984. * to mix redistributor description, GICR and GICC subtables have to be
  1985. * mutually exclusive.
  1986. */
  1987. count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
  1988. gic_acpi_match_gicr, 0);
  1989. if (count > 0) {
  1990. acpi_data.single_redist = false;
  1991. return count;
  1992. }
  1993. count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
  1994. gic_acpi_match_gicc, 0);
  1995. if (count > 0) {
  1996. acpi_data.single_redist = true;
  1997. count = acpi_data.enabled_rdists;
  1998. }
  1999. return count;
  2000. }
  2001. static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
  2002. struct acpi_probe_entry *ape)
  2003. {
  2004. struct acpi_madt_generic_distributor *dist;
  2005. int count;
  2006. dist = (struct acpi_madt_generic_distributor *)header;
  2007. if (dist->version != ape->driver_data)
  2008. return false;
  2009. /* We need to do that exercise anyway, the sooner the better */
  2010. count = gic_acpi_count_gicr_regions();
  2011. if (count <= 0)
  2012. return false;
  2013. acpi_data.nr_redist_regions = count;
  2014. return true;
  2015. }
  2016. static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
  2017. const unsigned long end)
  2018. {
  2019. struct acpi_madt_generic_interrupt *gicc =
  2020. (struct acpi_madt_generic_interrupt *)header;
  2021. int maint_irq_mode;
  2022. static int first_madt = true;
  2023. /* Skip unusable CPUs */
  2024. if (!(gicc->flags & ACPI_MADT_ENABLED))
  2025. return 0;
  2026. maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
  2027. ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
  2028. if (first_madt) {
  2029. first_madt = false;
  2030. acpi_data.maint_irq = gicc->vgic_interrupt;
  2031. acpi_data.maint_irq_mode = maint_irq_mode;
  2032. acpi_data.vcpu_base = gicc->gicv_base_address;
  2033. return 0;
  2034. }
  2035. /*
  2036. * The maintenance interrupt and GICV should be the same for every CPU
  2037. */
  2038. if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
  2039. (acpi_data.maint_irq_mode != maint_irq_mode) ||
  2040. (acpi_data.vcpu_base != gicc->gicv_base_address))
  2041. return -EINVAL;
  2042. return 0;
  2043. }
  2044. static bool __init gic_acpi_collect_virt_info(void)
  2045. {
  2046. int count;
  2047. count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
  2048. gic_acpi_parse_virt_madt_gicc, 0);
  2049. return (count > 0);
  2050. }
  2051. #define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
  2052. #define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
  2053. #define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
  2054. static void __init gic_acpi_setup_kvm_info(void)
  2055. {
  2056. int irq;
  2057. if (!gic_acpi_collect_virt_info()) {
  2058. pr_warn("Unable to get hardware information used for virtualization\n");
  2059. return;
  2060. }
  2061. gic_v3_kvm_info.type = GIC_V3;
  2062. irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
  2063. acpi_data.maint_irq_mode,
  2064. ACPI_ACTIVE_HIGH);
  2065. if (irq <= 0)
  2066. return;
  2067. gic_v3_kvm_info.maint_irq = irq;
  2068. if (acpi_data.vcpu_base) {
  2069. struct resource *vcpu = &gic_v3_kvm_info.vcpu;
  2070. vcpu->flags = IORESOURCE_MEM;
  2071. vcpu->start = acpi_data.vcpu_base;
  2072. vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
  2073. }
  2074. gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
  2075. gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
  2076. vgic_set_kvm_info(&gic_v3_kvm_info);
  2077. }
  2078. static struct fwnode_handle *gsi_domain_handle;
  2079. static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi)
  2080. {
  2081. return gsi_domain_handle;
  2082. }
  2083. static int __init
  2084. gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
  2085. {
  2086. struct acpi_madt_generic_distributor *dist;
  2087. size_t size;
  2088. int i, err;
  2089. /* Get distributor base address */
  2090. dist = (struct acpi_madt_generic_distributor *)header;
  2091. acpi_data.dist_base = ioremap(dist->base_address,
  2092. ACPI_GICV3_DIST_MEM_SIZE);
  2093. if (!acpi_data.dist_base) {
  2094. pr_err("Unable to map GICD registers\n");
  2095. return -ENOMEM;
  2096. }
  2097. gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD");
  2098. err = gic_validate_dist_version(acpi_data.dist_base);
  2099. if (err) {
  2100. pr_err("No distributor detected at @%p, giving up\n",
  2101. acpi_data.dist_base);
  2102. goto out_dist_unmap;
  2103. }
  2104. size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
  2105. acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
  2106. if (!acpi_data.redist_regs) {
  2107. err = -ENOMEM;
  2108. goto out_dist_unmap;
  2109. }
  2110. err = gic_acpi_collect_gicr_base();
  2111. if (err)
  2112. goto out_redist_unmap;
  2113. gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
  2114. if (!gsi_domain_handle) {
  2115. err = -ENOMEM;
  2116. goto out_redist_unmap;
  2117. }
  2118. err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs,
  2119. acpi_data.nr_redist_regions, 0, gsi_domain_handle);
  2120. if (err)
  2121. goto out_fwhandle_free;
  2122. acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id);
  2123. if (static_branch_likely(&supports_deactivate_key))
  2124. gic_acpi_setup_kvm_info();
  2125. return 0;
  2126. out_fwhandle_free:
  2127. irq_domain_free_fwnode(gsi_domain_handle);
  2128. out_redist_unmap:
  2129. for (i = 0; i < acpi_data.nr_redist_regions; i++)
  2130. if (acpi_data.redist_regs[i].redist_base)
  2131. iounmap(acpi_data.redist_regs[i].redist_base);
  2132. kfree(acpi_data.redist_regs);
  2133. out_dist_unmap:
  2134. iounmap(acpi_data.dist_base);
  2135. return err;
  2136. }
  2137. IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
  2138. acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
  2139. gic_acpi_init);
  2140. IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
  2141. acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
  2142. gic_acpi_init);
  2143. IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
  2144. acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
  2145. gic_acpi_init);
  2146. #endif