manage.c 77 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  4. * Copyright (C) 2005-2006 Thomas Gleixner
  5. *
  6. * This file contains driver APIs to the irq subsystem.
  7. */
  8. #define pr_fmt(fmt) "genirq: " fmt
  9. #include <linux/irq.h>
  10. #include <linux/kthread.h>
  11. #include <linux/module.h>
  12. #include <linux/random.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/irqdomain.h>
  15. #include <linux/slab.h>
  16. #include <linux/sched.h>
  17. #include <linux/sched/rt.h>
  18. #include <linux/sched/task.h>
  19. #include <linux/sched/isolation.h>
  20. #include <uapi/linux/sched/types.h>
  21. #include <linux/task_work.h>
  22. #include "internals.h"
  23. #if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
  24. DEFINE_STATIC_KEY_FALSE(force_irqthreads_key);
  25. static int __init setup_forced_irqthreads(char *arg)
  26. {
  27. static_branch_enable(&force_irqthreads_key);
  28. return 0;
  29. }
  30. early_param("threadirqs", setup_forced_irqthreads);
  31. #endif
  32. static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
  33. {
  34. struct irq_data *irqd = irq_desc_get_irq_data(desc);
  35. bool inprogress;
  36. do {
  37. unsigned long flags;
  38. /*
  39. * Wait until we're out of the critical section. This might
  40. * give the wrong answer due to the lack of memory barriers.
  41. */
  42. while (irqd_irq_inprogress(&desc->irq_data))
  43. cpu_relax();
  44. /* Ok, that indicated we're done: double-check carefully. */
  45. raw_spin_lock_irqsave(&desc->lock, flags);
  46. inprogress = irqd_irq_inprogress(&desc->irq_data);
  47. /*
  48. * If requested and supported, check at the chip whether it
  49. * is in flight at the hardware level, i.e. already pending
  50. * in a CPU and waiting for service and acknowledge.
  51. */
  52. if (!inprogress && sync_chip) {
  53. /*
  54. * Ignore the return code. inprogress is only updated
  55. * when the chip supports it.
  56. */
  57. __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
  58. &inprogress);
  59. }
  60. raw_spin_unlock_irqrestore(&desc->lock, flags);
  61. /* Oops, that failed? */
  62. } while (inprogress);
  63. }
  64. /**
  65. * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
  66. * @irq: interrupt number to wait for
  67. *
  68. * This function waits for any pending hard IRQ handlers for this
  69. * interrupt to complete before returning. If you use this
  70. * function while holding a resource the IRQ handler may need you
  71. * will deadlock. It does not take associated threaded handlers
  72. * into account.
  73. *
  74. * Do not use this for shutdown scenarios where you must be sure
  75. * that all parts (hardirq and threaded handler) have completed.
  76. *
  77. * Returns: false if a threaded handler is active.
  78. *
  79. * This function may be called - with care - from IRQ context.
  80. *
  81. * It does not check whether there is an interrupt in flight at the
  82. * hardware level, but not serviced yet, as this might deadlock when
  83. * called with interrupts disabled and the target CPU of the interrupt
  84. * is the current CPU.
  85. */
  86. bool synchronize_hardirq(unsigned int irq)
  87. {
  88. struct irq_desc *desc = irq_to_desc(irq);
  89. if (desc) {
  90. __synchronize_hardirq(desc, false);
  91. return !atomic_read(&desc->threads_active);
  92. }
  93. return true;
  94. }
  95. EXPORT_SYMBOL(synchronize_hardirq);
  96. /**
  97. * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
  98. * @irq: interrupt number to wait for
  99. *
  100. * This function waits for any pending IRQ handlers for this interrupt
  101. * to complete before returning. If you use this function while
  102. * holding a resource the IRQ handler may need you will deadlock.
  103. *
  104. * Can only be called from preemptible code as it might sleep when
  105. * an interrupt thread is associated to @irq.
  106. *
  107. * It optionally makes sure (when the irq chip supports that method)
  108. * that the interrupt is not pending in any CPU and waiting for
  109. * service.
  110. */
  111. void synchronize_irq(unsigned int irq)
  112. {
  113. struct irq_desc *desc = irq_to_desc(irq);
  114. if (desc) {
  115. __synchronize_hardirq(desc, true);
  116. /*
  117. * We made sure that no hardirq handler is
  118. * running. Now verify that no threaded handlers are
  119. * active.
  120. */
  121. wait_event(desc->wait_for_threads,
  122. !atomic_read(&desc->threads_active));
  123. }
  124. }
  125. EXPORT_SYMBOL(synchronize_irq);
  126. #ifdef CONFIG_SMP
  127. cpumask_var_t irq_default_affinity;
  128. static bool __irq_can_set_affinity(struct irq_desc *desc)
  129. {
  130. if (!desc || !irqd_can_balance(&desc->irq_data) ||
  131. !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
  132. return false;
  133. return true;
  134. }
  135. /**
  136. * irq_can_set_affinity - Check if the affinity of a given irq can be set
  137. * @irq: Interrupt to check
  138. *
  139. */
  140. int irq_can_set_affinity(unsigned int irq)
  141. {
  142. return __irq_can_set_affinity(irq_to_desc(irq));
  143. }
  144. /**
  145. * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
  146. * @irq: Interrupt to check
  147. *
  148. * Like irq_can_set_affinity() above, but additionally checks for the
  149. * AFFINITY_MANAGED flag.
  150. */
  151. bool irq_can_set_affinity_usr(unsigned int irq)
  152. {
  153. struct irq_desc *desc = irq_to_desc(irq);
  154. return __irq_can_set_affinity(desc) &&
  155. !irqd_affinity_is_managed(&desc->irq_data);
  156. }
  157. /**
  158. * irq_set_thread_affinity - Notify irq threads to adjust affinity
  159. * @desc: irq descriptor which has affinity changed
  160. *
  161. * We just set IRQTF_AFFINITY and delegate the affinity setting
  162. * to the interrupt thread itself. We can not call
  163. * set_cpus_allowed_ptr() here as we hold desc->lock and this
  164. * code can be called from hard interrupt context.
  165. */
  166. void irq_set_thread_affinity(struct irq_desc *desc)
  167. {
  168. struct irqaction *action;
  169. for_each_action_of_desc(desc, action)
  170. if (action->thread)
  171. set_bit(IRQTF_AFFINITY, &action->thread_flags);
  172. }
  173. #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
  174. static void irq_validate_effective_affinity(struct irq_data *data)
  175. {
  176. const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
  177. struct irq_chip *chip = irq_data_get_irq_chip(data);
  178. if (!cpumask_empty(m))
  179. return;
  180. pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
  181. chip->name, data->irq);
  182. }
  183. #else
  184. static inline void irq_validate_effective_affinity(struct irq_data *data) { }
  185. #endif
  186. int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
  187. bool force)
  188. {
  189. struct irq_desc *desc = irq_data_to_desc(data);
  190. struct irq_chip *chip = irq_data_get_irq_chip(data);
  191. const struct cpumask *prog_mask;
  192. int ret;
  193. static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
  194. static struct cpumask tmp_mask;
  195. if (!chip || !chip->irq_set_affinity)
  196. return -EINVAL;
  197. raw_spin_lock(&tmp_mask_lock);
  198. /*
  199. * If this is a managed interrupt and housekeeping is enabled on
  200. * it check whether the requested affinity mask intersects with
  201. * a housekeeping CPU. If so, then remove the isolated CPUs from
  202. * the mask and just keep the housekeeping CPU(s). This prevents
  203. * the affinity setter from routing the interrupt to an isolated
  204. * CPU to avoid that I/O submitted from a housekeeping CPU causes
  205. * interrupts on an isolated one.
  206. *
  207. * If the masks do not intersect or include online CPU(s) then
  208. * keep the requested mask. The isolated target CPUs are only
  209. * receiving interrupts when the I/O operation was submitted
  210. * directly from them.
  211. *
  212. * If all housekeeping CPUs in the affinity mask are offline, the
  213. * interrupt will be migrated by the CPU hotplug code once a
  214. * housekeeping CPU which belongs to the affinity mask comes
  215. * online.
  216. */
  217. if (irqd_affinity_is_managed(data) &&
  218. housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) {
  219. const struct cpumask *hk_mask;
  220. hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
  221. cpumask_and(&tmp_mask, mask, hk_mask);
  222. if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
  223. prog_mask = mask;
  224. else
  225. prog_mask = &tmp_mask;
  226. } else {
  227. prog_mask = mask;
  228. }
  229. /*
  230. * Make sure we only provide online CPUs to the irqchip,
  231. * unless we are being asked to force the affinity (in which
  232. * case we do as we are told).
  233. */
  234. cpumask_and(&tmp_mask, prog_mask, cpu_online_mask);
  235. if (!force && !cpumask_empty(&tmp_mask))
  236. ret = chip->irq_set_affinity(data, &tmp_mask, force);
  237. else if (force)
  238. ret = chip->irq_set_affinity(data, mask, force);
  239. else
  240. ret = -EINVAL;
  241. raw_spin_unlock(&tmp_mask_lock);
  242. switch (ret) {
  243. case IRQ_SET_MASK_OK:
  244. case IRQ_SET_MASK_OK_DONE:
  245. cpumask_copy(desc->irq_common_data.affinity, mask);
  246. fallthrough;
  247. case IRQ_SET_MASK_OK_NOCOPY:
  248. irq_validate_effective_affinity(data);
  249. irq_set_thread_affinity(desc);
  250. ret = 0;
  251. }
  252. return ret;
  253. }
  254. EXPORT_SYMBOL_GPL(irq_do_set_affinity);
  255. #ifdef CONFIG_GENERIC_PENDING_IRQ
  256. static inline int irq_set_affinity_pending(struct irq_data *data,
  257. const struct cpumask *dest)
  258. {
  259. struct irq_desc *desc = irq_data_to_desc(data);
  260. irqd_set_move_pending(data);
  261. irq_copy_pending(desc, dest);
  262. return 0;
  263. }
  264. #else
  265. static inline int irq_set_affinity_pending(struct irq_data *data,
  266. const struct cpumask *dest)
  267. {
  268. return -EBUSY;
  269. }
  270. #endif
  271. static int irq_try_set_affinity(struct irq_data *data,
  272. const struct cpumask *dest, bool force)
  273. {
  274. int ret = irq_do_set_affinity(data, dest, force);
  275. /*
  276. * In case that the underlying vector management is busy and the
  277. * architecture supports the generic pending mechanism then utilize
  278. * this to avoid returning an error to user space.
  279. */
  280. if (ret == -EBUSY && !force)
  281. ret = irq_set_affinity_pending(data, dest);
  282. return ret;
  283. }
  284. static bool irq_set_affinity_deactivated(struct irq_data *data,
  285. const struct cpumask *mask, bool force)
  286. {
  287. struct irq_desc *desc = irq_data_to_desc(data);
  288. /*
  289. * Handle irq chips which can handle affinity only in activated
  290. * state correctly
  291. *
  292. * If the interrupt is not yet activated, just store the affinity
  293. * mask and do not call the chip driver at all. On activation the
  294. * driver has to make sure anyway that the interrupt is in a
  295. * usable state so startup works.
  296. */
  297. if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
  298. irqd_is_activated(data) || !irqd_affinity_on_activate(data))
  299. return false;
  300. cpumask_copy(desc->irq_common_data.affinity, mask);
  301. irq_data_update_effective_affinity(data, mask);
  302. irqd_set(data, IRQD_AFFINITY_SET);
  303. return true;
  304. }
  305. int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
  306. bool force)
  307. {
  308. struct irq_chip *chip = irq_data_get_irq_chip(data);
  309. struct irq_desc *desc = irq_data_to_desc(data);
  310. int ret = 0;
  311. if (!chip || !chip->irq_set_affinity)
  312. return -EINVAL;
  313. if (irq_set_affinity_deactivated(data, mask, force))
  314. return 0;
  315. if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
  316. ret = irq_try_set_affinity(data, mask, force);
  317. } else {
  318. irqd_set_move_pending(data);
  319. irq_copy_pending(desc, mask);
  320. }
  321. if (desc->affinity_notify) {
  322. kref_get(&desc->affinity_notify->kref);
  323. if (!schedule_work(&desc->affinity_notify->work)) {
  324. /* Work was already scheduled, drop our extra ref */
  325. kref_put(&desc->affinity_notify->kref,
  326. desc->affinity_notify->release);
  327. }
  328. }
  329. irqd_set(data, IRQD_AFFINITY_SET);
  330. return ret;
  331. }
  332. /**
  333. * irq_update_affinity_desc - Update affinity management for an interrupt
  334. * @irq: The interrupt number to update
  335. * @affinity: Pointer to the affinity descriptor
  336. *
  337. * This interface can be used to configure the affinity management of
  338. * interrupts which have been allocated already.
  339. *
  340. * There are certain limitations on when it may be used - attempts to use it
  341. * for when the kernel is configured for generic IRQ reservation mode (in
  342. * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with
  343. * managed/non-managed interrupt accounting. In addition, attempts to use it on
  344. * an interrupt which is already started or which has already been configured
  345. * as managed will also fail, as these mean invalid init state or double init.
  346. */
  347. int irq_update_affinity_desc(unsigned int irq,
  348. struct irq_affinity_desc *affinity)
  349. {
  350. struct irq_desc *desc;
  351. unsigned long flags;
  352. bool activated;
  353. int ret = 0;
  354. /*
  355. * Supporting this with the reservation scheme used by x86 needs
  356. * some more thought. Fail it for now.
  357. */
  358. if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
  359. return -EOPNOTSUPP;
  360. desc = irq_get_desc_buslock(irq, &flags, 0);
  361. if (!desc)
  362. return -EINVAL;
  363. /* Requires the interrupt to be shut down */
  364. if (irqd_is_started(&desc->irq_data)) {
  365. ret = -EBUSY;
  366. goto out_unlock;
  367. }
  368. /* Interrupts which are already managed cannot be modified */
  369. if (irqd_affinity_is_managed(&desc->irq_data)) {
  370. ret = -EBUSY;
  371. goto out_unlock;
  372. }
  373. /*
  374. * Deactivate the interrupt. That's required to undo
  375. * anything an earlier activation has established.
  376. */
  377. activated = irqd_is_activated(&desc->irq_data);
  378. if (activated)
  379. irq_domain_deactivate_irq(&desc->irq_data);
  380. if (affinity->is_managed) {
  381. irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
  382. irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
  383. }
  384. cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
  385. /* Restore the activation state */
  386. if (activated)
  387. irq_domain_activate_irq(&desc->irq_data, false);
  388. out_unlock:
  389. irq_put_desc_busunlock(desc, flags);
  390. return ret;
  391. }
  392. static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
  393. bool force)
  394. {
  395. struct irq_desc *desc = irq_to_desc(irq);
  396. unsigned long flags;
  397. int ret;
  398. if (!desc)
  399. return -EINVAL;
  400. raw_spin_lock_irqsave(&desc->lock, flags);
  401. ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
  402. raw_spin_unlock_irqrestore(&desc->lock, flags);
  403. return ret;
  404. }
  405. /**
  406. * irq_set_affinity - Set the irq affinity of a given irq
  407. * @irq: Interrupt to set affinity
  408. * @cpumask: cpumask
  409. *
  410. * Fails if cpumask does not contain an online CPU
  411. */
  412. int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
  413. {
  414. return __irq_set_affinity(irq, cpumask, false);
  415. }
  416. EXPORT_SYMBOL_GPL(irq_set_affinity);
  417. /**
  418. * irq_force_affinity - Force the irq affinity of a given irq
  419. * @irq: Interrupt to set affinity
  420. * @cpumask: cpumask
  421. *
  422. * Same as irq_set_affinity, but without checking the mask against
  423. * online cpus.
  424. *
  425. * Solely for low level cpu hotplug code, where we need to make per
  426. * cpu interrupts affine before the cpu becomes online.
  427. */
  428. int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
  429. {
  430. return __irq_set_affinity(irq, cpumask, true);
  431. }
  432. EXPORT_SYMBOL_GPL(irq_force_affinity);
  433. int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
  434. bool setaffinity)
  435. {
  436. unsigned long flags;
  437. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  438. if (!desc)
  439. return -EINVAL;
  440. desc->affinity_hint = m;
  441. irq_put_desc_unlock(desc, flags);
  442. if (m && setaffinity)
  443. __irq_set_affinity(irq, m, false);
  444. return 0;
  445. }
  446. EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint);
  447. static void irq_affinity_notify(struct work_struct *work)
  448. {
  449. struct irq_affinity_notify *notify =
  450. container_of(work, struct irq_affinity_notify, work);
  451. struct irq_desc *desc = irq_to_desc(notify->irq);
  452. cpumask_var_t cpumask;
  453. unsigned long flags;
  454. if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
  455. goto out;
  456. raw_spin_lock_irqsave(&desc->lock, flags);
  457. if (irq_move_pending(&desc->irq_data))
  458. irq_get_pending(cpumask, desc);
  459. else
  460. cpumask_copy(cpumask, desc->irq_common_data.affinity);
  461. raw_spin_unlock_irqrestore(&desc->lock, flags);
  462. notify->notify(notify, cpumask);
  463. free_cpumask_var(cpumask);
  464. out:
  465. kref_put(&notify->kref, notify->release);
  466. }
  467. /**
  468. * irq_set_affinity_notifier - control notification of IRQ affinity changes
  469. * @irq: Interrupt for which to enable/disable notification
  470. * @notify: Context for notification, or %NULL to disable
  471. * notification. Function pointers must be initialised;
  472. * the other fields will be initialised by this function.
  473. *
  474. * Must be called in process context. Notification may only be enabled
  475. * after the IRQ is allocated and must be disabled before the IRQ is
  476. * freed using free_irq().
  477. */
  478. int
  479. irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
  480. {
  481. struct irq_desc *desc = irq_to_desc(irq);
  482. struct irq_affinity_notify *old_notify;
  483. unsigned long flags;
  484. /* The release function is promised process context */
  485. might_sleep();
  486. if (!desc || desc->istate & IRQS_NMI)
  487. return -EINVAL;
  488. /* Complete initialisation of *notify */
  489. if (notify) {
  490. notify->irq = irq;
  491. kref_init(&notify->kref);
  492. INIT_WORK(&notify->work, irq_affinity_notify);
  493. }
  494. raw_spin_lock_irqsave(&desc->lock, flags);
  495. old_notify = desc->affinity_notify;
  496. desc->affinity_notify = notify;
  497. raw_spin_unlock_irqrestore(&desc->lock, flags);
  498. if (old_notify) {
  499. if (cancel_work_sync(&old_notify->work)) {
  500. /* Pending work had a ref, put that one too */
  501. kref_put(&old_notify->kref, old_notify->release);
  502. }
  503. kref_put(&old_notify->kref, old_notify->release);
  504. }
  505. return 0;
  506. }
  507. EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
  508. #ifndef CONFIG_AUTO_IRQ_AFFINITY
  509. /*
  510. * Generic version of the affinity autoselector.
  511. */
  512. int irq_setup_affinity(struct irq_desc *desc)
  513. {
  514. struct cpumask *set = irq_default_affinity;
  515. int ret, node = irq_desc_get_node(desc);
  516. static DEFINE_RAW_SPINLOCK(mask_lock);
  517. static struct cpumask mask;
  518. /* Excludes PER_CPU and NO_BALANCE interrupts */
  519. if (!__irq_can_set_affinity(desc))
  520. return 0;
  521. raw_spin_lock(&mask_lock);
  522. /*
  523. * Preserve the managed affinity setting and a userspace affinity
  524. * setup, but make sure that one of the targets is online.
  525. */
  526. if (irqd_affinity_is_managed(&desc->irq_data) ||
  527. irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
  528. if (cpumask_intersects(desc->irq_common_data.affinity,
  529. cpu_online_mask))
  530. set = desc->irq_common_data.affinity;
  531. else
  532. irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
  533. }
  534. cpumask_and(&mask, cpu_online_mask, set);
  535. if (cpumask_empty(&mask))
  536. cpumask_copy(&mask, cpu_online_mask);
  537. if (node != NUMA_NO_NODE) {
  538. const struct cpumask *nodemask = cpumask_of_node(node);
  539. /* make sure at least one of the cpus in nodemask is online */
  540. if (cpumask_intersects(&mask, nodemask))
  541. cpumask_and(&mask, &mask, nodemask);
  542. }
  543. ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
  544. raw_spin_unlock(&mask_lock);
  545. return ret;
  546. }
  547. #else
  548. /* Wrapper for ALPHA specific affinity selector magic */
  549. int irq_setup_affinity(struct irq_desc *desc)
  550. {
  551. return irq_select_affinity(irq_desc_get_irq(desc));
  552. }
  553. #endif /* CONFIG_AUTO_IRQ_AFFINITY */
  554. #endif /* CONFIG_SMP */
  555. /**
  556. * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
  557. * @irq: interrupt number to set affinity
  558. * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
  559. * specific data for percpu_devid interrupts
  560. *
  561. * This function uses the vCPU specific data to set the vCPU
  562. * affinity for an irq. The vCPU specific data is passed from
  563. * outside, such as KVM. One example code path is as below:
  564. * KVM -> IOMMU -> irq_set_vcpu_affinity().
  565. */
  566. int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
  567. {
  568. unsigned long flags;
  569. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  570. struct irq_data *data;
  571. struct irq_chip *chip;
  572. int ret = -ENOSYS;
  573. if (!desc)
  574. return -EINVAL;
  575. data = irq_desc_get_irq_data(desc);
  576. do {
  577. chip = irq_data_get_irq_chip(data);
  578. if (chip && chip->irq_set_vcpu_affinity)
  579. break;
  580. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  581. data = data->parent_data;
  582. #else
  583. data = NULL;
  584. #endif
  585. } while (data);
  586. if (data)
  587. ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
  588. irq_put_desc_unlock(desc, flags);
  589. return ret;
  590. }
  591. EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
  592. void __disable_irq(struct irq_desc *desc)
  593. {
  594. if (!desc->depth++)
  595. irq_disable(desc);
  596. }
  597. static int __disable_irq_nosync(unsigned int irq)
  598. {
  599. unsigned long flags;
  600. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  601. if (!desc)
  602. return -EINVAL;
  603. __disable_irq(desc);
  604. irq_put_desc_busunlock(desc, flags);
  605. return 0;
  606. }
  607. /**
  608. * disable_irq_nosync - disable an irq without waiting
  609. * @irq: Interrupt to disable
  610. *
  611. * Disable the selected interrupt line. Disables and Enables are
  612. * nested.
  613. * Unlike disable_irq(), this function does not ensure existing
  614. * instances of the IRQ handler have completed before returning.
  615. *
  616. * This function may be called from IRQ context.
  617. */
  618. void disable_irq_nosync(unsigned int irq)
  619. {
  620. __disable_irq_nosync(irq);
  621. }
  622. EXPORT_SYMBOL(disable_irq_nosync);
  623. /**
  624. * disable_irq - disable an irq and wait for completion
  625. * @irq: Interrupt to disable
  626. *
  627. * Disable the selected interrupt line. Enables and Disables are
  628. * nested.
  629. * This function waits for any pending IRQ handlers for this interrupt
  630. * to complete before returning. If you use this function while
  631. * holding a resource the IRQ handler may need you will deadlock.
  632. *
  633. * This function may be called - with care - from IRQ context.
  634. */
  635. void disable_irq(unsigned int irq)
  636. {
  637. if (!__disable_irq_nosync(irq))
  638. synchronize_irq(irq);
  639. }
  640. EXPORT_SYMBOL(disable_irq);
  641. /**
  642. * disable_hardirq - disables an irq and waits for hardirq completion
  643. * @irq: Interrupt to disable
  644. *
  645. * Disable the selected interrupt line. Enables and Disables are
  646. * nested.
  647. * This function waits for any pending hard IRQ handlers for this
  648. * interrupt to complete before returning. If you use this function while
  649. * holding a resource the hard IRQ handler may need you will deadlock.
  650. *
  651. * When used to optimistically disable an interrupt from atomic context
  652. * the return value must be checked.
  653. *
  654. * Returns: false if a threaded handler is active.
  655. *
  656. * This function may be called - with care - from IRQ context.
  657. */
  658. bool disable_hardirq(unsigned int irq)
  659. {
  660. if (!__disable_irq_nosync(irq))
  661. return synchronize_hardirq(irq);
  662. return false;
  663. }
  664. EXPORT_SYMBOL_GPL(disable_hardirq);
  665. /**
  666. * disable_nmi_nosync - disable an nmi without waiting
  667. * @irq: Interrupt to disable
  668. *
  669. * Disable the selected interrupt line. Disables and enables are
  670. * nested.
  671. * The interrupt to disable must have been requested through request_nmi.
  672. * Unlike disable_nmi(), this function does not ensure existing
  673. * instances of the IRQ handler have completed before returning.
  674. */
  675. void disable_nmi_nosync(unsigned int irq)
  676. {
  677. disable_irq_nosync(irq);
  678. }
  679. void __enable_irq(struct irq_desc *desc)
  680. {
  681. switch (desc->depth) {
  682. case 0:
  683. err_out:
  684. WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
  685. irq_desc_get_irq(desc));
  686. break;
  687. case 1: {
  688. if (desc->istate & IRQS_SUSPENDED)
  689. goto err_out;
  690. /* Prevent probing on this irq: */
  691. irq_settings_set_noprobe(desc);
  692. /*
  693. * Call irq_startup() not irq_enable() here because the
  694. * interrupt might be marked NOAUTOEN. So irq_startup()
  695. * needs to be invoked when it gets enabled the first
  696. * time. If it was already started up, then irq_startup()
  697. * will invoke irq_enable() under the hood.
  698. */
  699. irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
  700. break;
  701. }
  702. default:
  703. desc->depth--;
  704. }
  705. }
  706. /**
  707. * enable_irq - enable handling of an irq
  708. * @irq: Interrupt to enable
  709. *
  710. * Undoes the effect of one call to disable_irq(). If this
  711. * matches the last disable, processing of interrupts on this
  712. * IRQ line is re-enabled.
  713. *
  714. * This function may be called from IRQ context only when
  715. * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
  716. */
  717. void enable_irq(unsigned int irq)
  718. {
  719. unsigned long flags;
  720. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  721. if (!desc)
  722. return;
  723. if (WARN(!desc->irq_data.chip,
  724. KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
  725. goto out;
  726. __enable_irq(desc);
  727. out:
  728. irq_put_desc_busunlock(desc, flags);
  729. }
  730. EXPORT_SYMBOL(enable_irq);
  731. /**
  732. * enable_nmi - enable handling of an nmi
  733. * @irq: Interrupt to enable
  734. *
  735. * The interrupt to enable must have been requested through request_nmi.
  736. * Undoes the effect of one call to disable_nmi(). If this
  737. * matches the last disable, processing of interrupts on this
  738. * IRQ line is re-enabled.
  739. */
  740. void enable_nmi(unsigned int irq)
  741. {
  742. enable_irq(irq);
  743. }
  744. static int set_irq_wake_real(unsigned int irq, unsigned int on)
  745. {
  746. struct irq_desc *desc = irq_to_desc(irq);
  747. int ret = -ENXIO;
  748. if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE)
  749. return 0;
  750. if (desc->irq_data.chip->irq_set_wake)
  751. ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
  752. return ret;
  753. }
  754. /**
  755. * irq_set_irq_wake - control irq power management wakeup
  756. * @irq: interrupt to control
  757. * @on: enable/disable power management wakeup
  758. *
  759. * Enable/disable power management wakeup mode, which is
  760. * disabled by default. Enables and disables must match,
  761. * just as they match for non-wakeup mode support.
  762. *
  763. * Wakeup mode lets this IRQ wake the system from sleep
  764. * states like "suspend to RAM".
  765. *
  766. * Note: irq enable/disable state is completely orthogonal
  767. * to the enable/disable state of irq wake. An irq can be
  768. * disabled with disable_irq() and still wake the system as
  769. * long as the irq has wake enabled. If this does not hold,
  770. * then the underlying irq chip and the related driver need
  771. * to be investigated.
  772. */
  773. int irq_set_irq_wake(unsigned int irq, unsigned int on)
  774. {
  775. unsigned long flags;
  776. struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  777. int ret = 0;
  778. if (!desc)
  779. return -EINVAL;
  780. /* Don't use NMIs as wake up interrupts please */
  781. if (desc->istate & IRQS_NMI) {
  782. ret = -EINVAL;
  783. goto out_unlock;
  784. }
  785. /* wakeup-capable irqs can be shared between drivers that
  786. * don't need to have the same sleep mode behaviors.
  787. */
  788. if (on) {
  789. if (desc->wake_depth++ == 0) {
  790. ret = set_irq_wake_real(irq, on);
  791. if (ret)
  792. desc->wake_depth = 0;
  793. else
  794. irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
  795. }
  796. } else {
  797. if (desc->wake_depth == 0) {
  798. WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
  799. } else if (--desc->wake_depth == 0) {
  800. ret = set_irq_wake_real(irq, on);
  801. if (ret)
  802. desc->wake_depth = 1;
  803. else
  804. irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
  805. }
  806. }
  807. out_unlock:
  808. irq_put_desc_busunlock(desc, flags);
  809. return ret;
  810. }
  811. EXPORT_SYMBOL(irq_set_irq_wake);
  812. /*
  813. * Internal function that tells the architecture code whether a
  814. * particular irq has been exclusively allocated or is available
  815. * for driver use.
  816. */
  817. int can_request_irq(unsigned int irq, unsigned long irqflags)
  818. {
  819. unsigned long flags;
  820. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  821. int canrequest = 0;
  822. if (!desc)
  823. return 0;
  824. if (irq_settings_can_request(desc)) {
  825. if (!desc->action ||
  826. irqflags & desc->action->flags & IRQF_SHARED)
  827. canrequest = 1;
  828. }
  829. irq_put_desc_unlock(desc, flags);
  830. return canrequest;
  831. }
  832. int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
  833. {
  834. struct irq_chip *chip = desc->irq_data.chip;
  835. int ret, unmask = 0;
  836. if (!chip || !chip->irq_set_type) {
  837. /*
  838. * IRQF_TRIGGER_* but the PIC does not support multiple
  839. * flow-types?
  840. */
  841. pr_debug("No set_type function for IRQ %d (%s)\n",
  842. irq_desc_get_irq(desc),
  843. chip ? (chip->name ? : "unknown") : "unknown");
  844. return 0;
  845. }
  846. if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
  847. if (!irqd_irq_masked(&desc->irq_data))
  848. mask_irq(desc);
  849. if (!irqd_irq_disabled(&desc->irq_data))
  850. unmask = 1;
  851. }
  852. /* Mask all flags except trigger mode */
  853. flags &= IRQ_TYPE_SENSE_MASK;
  854. ret = chip->irq_set_type(&desc->irq_data, flags);
  855. switch (ret) {
  856. case IRQ_SET_MASK_OK:
  857. case IRQ_SET_MASK_OK_DONE:
  858. irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
  859. irqd_set(&desc->irq_data, flags);
  860. fallthrough;
  861. case IRQ_SET_MASK_OK_NOCOPY:
  862. flags = irqd_get_trigger_type(&desc->irq_data);
  863. irq_settings_set_trigger_mask(desc, flags);
  864. irqd_clear(&desc->irq_data, IRQD_LEVEL);
  865. irq_settings_clr_level(desc);
  866. if (flags & IRQ_TYPE_LEVEL_MASK) {
  867. irq_settings_set_level(desc);
  868. irqd_set(&desc->irq_data, IRQD_LEVEL);
  869. }
  870. ret = 0;
  871. break;
  872. default:
  873. pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
  874. flags, irq_desc_get_irq(desc), chip->irq_set_type);
  875. }
  876. if (unmask)
  877. unmask_irq(desc);
  878. return ret;
  879. }
  880. #ifdef CONFIG_HARDIRQS_SW_RESEND
  881. int irq_set_parent(int irq, int parent_irq)
  882. {
  883. unsigned long flags;
  884. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  885. if (!desc)
  886. return -EINVAL;
  887. desc->parent_irq = parent_irq;
  888. irq_put_desc_unlock(desc, flags);
  889. return 0;
  890. }
  891. EXPORT_SYMBOL_GPL(irq_set_parent);
  892. #endif
  893. /*
  894. * Default primary interrupt handler for threaded interrupts. Is
  895. * assigned as primary handler when request_threaded_irq is called
  896. * with handler == NULL. Useful for oneshot interrupts.
  897. */
  898. static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
  899. {
  900. return IRQ_WAKE_THREAD;
  901. }
  902. /*
  903. * Primary handler for nested threaded interrupts. Should never be
  904. * called.
  905. */
  906. static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
  907. {
  908. WARN(1, "Primary handler called for nested irq %d\n", irq);
  909. return IRQ_NONE;
  910. }
  911. static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
  912. {
  913. WARN(1, "Secondary action handler called for irq %d\n", irq);
  914. return IRQ_NONE;
  915. }
  916. static int irq_wait_for_interrupt(struct irqaction *action)
  917. {
  918. for (;;) {
  919. set_current_state(TASK_INTERRUPTIBLE);
  920. if (kthread_should_stop()) {
  921. /* may need to run one last time */
  922. if (test_and_clear_bit(IRQTF_RUNTHREAD,
  923. &action->thread_flags)) {
  924. __set_current_state(TASK_RUNNING);
  925. return 0;
  926. }
  927. __set_current_state(TASK_RUNNING);
  928. return -1;
  929. }
  930. if (test_and_clear_bit(IRQTF_RUNTHREAD,
  931. &action->thread_flags)) {
  932. __set_current_state(TASK_RUNNING);
  933. return 0;
  934. }
  935. schedule();
  936. }
  937. }
  938. /*
  939. * Oneshot interrupts keep the irq line masked until the threaded
  940. * handler finished. unmask if the interrupt has not been disabled and
  941. * is marked MASKED.
  942. */
  943. static void irq_finalize_oneshot(struct irq_desc *desc,
  944. struct irqaction *action)
  945. {
  946. if (!(desc->istate & IRQS_ONESHOT) ||
  947. action->handler == irq_forced_secondary_handler)
  948. return;
  949. again:
  950. chip_bus_lock(desc);
  951. raw_spin_lock_irq(&desc->lock);
  952. /*
  953. * Implausible though it may be we need to protect us against
  954. * the following scenario:
  955. *
  956. * The thread is faster done than the hard interrupt handler
  957. * on the other CPU. If we unmask the irq line then the
  958. * interrupt can come in again and masks the line, leaves due
  959. * to IRQS_INPROGRESS and the irq line is masked forever.
  960. *
  961. * This also serializes the state of shared oneshot handlers
  962. * versus "desc->threads_oneshot |= action->thread_mask;" in
  963. * irq_wake_thread(). See the comment there which explains the
  964. * serialization.
  965. */
  966. if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
  967. raw_spin_unlock_irq(&desc->lock);
  968. chip_bus_sync_unlock(desc);
  969. cpu_relax();
  970. goto again;
  971. }
  972. /*
  973. * Now check again, whether the thread should run. Otherwise
  974. * we would clear the threads_oneshot bit of this thread which
  975. * was just set.
  976. */
  977. if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
  978. goto out_unlock;
  979. desc->threads_oneshot &= ~action->thread_mask;
  980. if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
  981. irqd_irq_masked(&desc->irq_data))
  982. unmask_threaded_irq(desc);
  983. out_unlock:
  984. raw_spin_unlock_irq(&desc->lock);
  985. chip_bus_sync_unlock(desc);
  986. }
  987. #ifdef CONFIG_SMP
  988. /*
  989. * Check whether we need to change the affinity of the interrupt thread.
  990. */
  991. static void
  992. irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
  993. {
  994. cpumask_var_t mask;
  995. bool valid = true;
  996. if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
  997. return;
  998. /*
  999. * In case we are out of memory we set IRQTF_AFFINITY again and
  1000. * try again next time
  1001. */
  1002. if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
  1003. set_bit(IRQTF_AFFINITY, &action->thread_flags);
  1004. return;
  1005. }
  1006. raw_spin_lock_irq(&desc->lock);
  1007. /*
  1008. * This code is triggered unconditionally. Check the affinity
  1009. * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
  1010. */
  1011. if (cpumask_available(desc->irq_common_data.affinity)) {
  1012. const struct cpumask *m;
  1013. m = irq_data_get_effective_affinity_mask(&desc->irq_data);
  1014. cpumask_copy(mask, m);
  1015. } else {
  1016. valid = false;
  1017. }
  1018. raw_spin_unlock_irq(&desc->lock);
  1019. if (valid)
  1020. set_cpus_allowed_ptr(current, mask);
  1021. free_cpumask_var(mask);
  1022. }
  1023. #else
  1024. static inline void
  1025. irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
  1026. #endif
  1027. /*
  1028. * Interrupts which are not explicitly requested as threaded
  1029. * interrupts rely on the implicit bh/preempt disable of the hard irq
  1030. * context. So we need to disable bh here to avoid deadlocks and other
  1031. * side effects.
  1032. */
  1033. static irqreturn_t
  1034. irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
  1035. {
  1036. irqreturn_t ret;
  1037. local_bh_disable();
  1038. if (!IS_ENABLED(CONFIG_PREEMPT_RT))
  1039. local_irq_disable();
  1040. ret = action->thread_fn(action->irq, action->dev_id);
  1041. if (ret == IRQ_HANDLED)
  1042. atomic_inc(&desc->threads_handled);
  1043. irq_finalize_oneshot(desc, action);
  1044. if (!IS_ENABLED(CONFIG_PREEMPT_RT))
  1045. local_irq_enable();
  1046. local_bh_enable();
  1047. return ret;
  1048. }
  1049. /*
  1050. * Interrupts explicitly requested as threaded interrupts want to be
  1051. * preemptible - many of them need to sleep and wait for slow busses to
  1052. * complete.
  1053. */
  1054. static irqreturn_t irq_thread_fn(struct irq_desc *desc,
  1055. struct irqaction *action)
  1056. {
  1057. irqreturn_t ret;
  1058. ret = action->thread_fn(action->irq, action->dev_id);
  1059. if (ret == IRQ_HANDLED)
  1060. atomic_inc(&desc->threads_handled);
  1061. irq_finalize_oneshot(desc, action);
  1062. return ret;
  1063. }
  1064. static void wake_threads_waitq(struct irq_desc *desc)
  1065. {
  1066. if (atomic_dec_and_test(&desc->threads_active))
  1067. wake_up(&desc->wait_for_threads);
  1068. }
  1069. static void irq_thread_dtor(struct callback_head *unused)
  1070. {
  1071. struct task_struct *tsk = current;
  1072. struct irq_desc *desc;
  1073. struct irqaction *action;
  1074. if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
  1075. return;
  1076. action = kthread_data(tsk);
  1077. pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
  1078. tsk->comm, tsk->pid, action->irq);
  1079. desc = irq_to_desc(action->irq);
  1080. /*
  1081. * If IRQTF_RUNTHREAD is set, we need to decrement
  1082. * desc->threads_active and wake possible waiters.
  1083. */
  1084. if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
  1085. wake_threads_waitq(desc);
  1086. /* Prevent a stale desc->threads_oneshot */
  1087. irq_finalize_oneshot(desc, action);
  1088. }
  1089. static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
  1090. {
  1091. struct irqaction *secondary = action->secondary;
  1092. if (WARN_ON_ONCE(!secondary))
  1093. return;
  1094. raw_spin_lock_irq(&desc->lock);
  1095. __irq_wake_thread(desc, secondary);
  1096. raw_spin_unlock_irq(&desc->lock);
  1097. }
  1098. /*
  1099. * Internal function to notify that a interrupt thread is ready.
  1100. */
  1101. static void irq_thread_set_ready(struct irq_desc *desc,
  1102. struct irqaction *action)
  1103. {
  1104. set_bit(IRQTF_READY, &action->thread_flags);
  1105. wake_up(&desc->wait_for_threads);
  1106. }
  1107. /*
  1108. * Internal function to wake up a interrupt thread and wait until it is
  1109. * ready.
  1110. */
  1111. static void wake_up_and_wait_for_irq_thread_ready(struct irq_desc *desc,
  1112. struct irqaction *action)
  1113. {
  1114. if (!action || !action->thread)
  1115. return;
  1116. wake_up_process(action->thread);
  1117. wait_event(desc->wait_for_threads,
  1118. test_bit(IRQTF_READY, &action->thread_flags));
  1119. }
  1120. /*
  1121. * Interrupt handler thread
  1122. */
  1123. static int irq_thread(void *data)
  1124. {
  1125. struct callback_head on_exit_work;
  1126. struct irqaction *action = data;
  1127. struct irq_desc *desc = irq_to_desc(action->irq);
  1128. irqreturn_t (*handler_fn)(struct irq_desc *desc,
  1129. struct irqaction *action);
  1130. irq_thread_set_ready(desc, action);
  1131. sched_set_fifo(current);
  1132. if (force_irqthreads() && test_bit(IRQTF_FORCED_THREAD,
  1133. &action->thread_flags))
  1134. handler_fn = irq_forced_thread_fn;
  1135. else
  1136. handler_fn = irq_thread_fn;
  1137. init_task_work(&on_exit_work, irq_thread_dtor);
  1138. task_work_add(current, &on_exit_work, TWA_NONE);
  1139. irq_thread_check_affinity(desc, action);
  1140. while (!irq_wait_for_interrupt(action)) {
  1141. irqreturn_t action_ret;
  1142. irq_thread_check_affinity(desc, action);
  1143. action_ret = handler_fn(desc, action);
  1144. if (action_ret == IRQ_WAKE_THREAD)
  1145. irq_wake_secondary(desc, action);
  1146. wake_threads_waitq(desc);
  1147. }
  1148. /*
  1149. * This is the regular exit path. __free_irq() is stopping the
  1150. * thread via kthread_stop() after calling
  1151. * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
  1152. * oneshot mask bit can be set.
  1153. */
  1154. task_work_cancel(current, irq_thread_dtor);
  1155. return 0;
  1156. }
  1157. /**
  1158. * irq_wake_thread - wake the irq thread for the action identified by dev_id
  1159. * @irq: Interrupt line
  1160. * @dev_id: Device identity for which the thread should be woken
  1161. *
  1162. */
  1163. void irq_wake_thread(unsigned int irq, void *dev_id)
  1164. {
  1165. struct irq_desc *desc = irq_to_desc(irq);
  1166. struct irqaction *action;
  1167. unsigned long flags;
  1168. if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  1169. return;
  1170. raw_spin_lock_irqsave(&desc->lock, flags);
  1171. for_each_action_of_desc(desc, action) {
  1172. if (action->dev_id == dev_id) {
  1173. if (action->thread)
  1174. __irq_wake_thread(desc, action);
  1175. break;
  1176. }
  1177. }
  1178. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1179. }
  1180. EXPORT_SYMBOL_GPL(irq_wake_thread);
  1181. static int irq_setup_forced_threading(struct irqaction *new)
  1182. {
  1183. if (!force_irqthreads())
  1184. return 0;
  1185. if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
  1186. return 0;
  1187. /*
  1188. * No further action required for interrupts which are requested as
  1189. * threaded interrupts already
  1190. */
  1191. if (new->handler == irq_default_primary_handler)
  1192. return 0;
  1193. new->flags |= IRQF_ONESHOT;
  1194. /*
  1195. * Handle the case where we have a real primary handler and a
  1196. * thread handler. We force thread them as well by creating a
  1197. * secondary action.
  1198. */
  1199. if (new->handler && new->thread_fn) {
  1200. /* Allocate the secondary action */
  1201. new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
  1202. if (!new->secondary)
  1203. return -ENOMEM;
  1204. new->secondary->handler = irq_forced_secondary_handler;
  1205. new->secondary->thread_fn = new->thread_fn;
  1206. new->secondary->dev_id = new->dev_id;
  1207. new->secondary->irq = new->irq;
  1208. new->secondary->name = new->name;
  1209. }
  1210. /* Deal with the primary handler */
  1211. set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
  1212. new->thread_fn = new->handler;
  1213. new->handler = irq_default_primary_handler;
  1214. return 0;
  1215. }
  1216. static int irq_request_resources(struct irq_desc *desc)
  1217. {
  1218. struct irq_data *d = &desc->irq_data;
  1219. struct irq_chip *c = d->chip;
  1220. return c->irq_request_resources ? c->irq_request_resources(d) : 0;
  1221. }
  1222. static void irq_release_resources(struct irq_desc *desc)
  1223. {
  1224. struct irq_data *d = &desc->irq_data;
  1225. struct irq_chip *c = d->chip;
  1226. if (c->irq_release_resources)
  1227. c->irq_release_resources(d);
  1228. }
  1229. static bool irq_supports_nmi(struct irq_desc *desc)
  1230. {
  1231. struct irq_data *d = irq_desc_get_irq_data(desc);
  1232. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  1233. /* Only IRQs directly managed by the root irqchip can be set as NMI */
  1234. if (d->parent_data)
  1235. return false;
  1236. #endif
  1237. /* Don't support NMIs for chips behind a slow bus */
  1238. if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
  1239. return false;
  1240. return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
  1241. }
  1242. static int irq_nmi_setup(struct irq_desc *desc)
  1243. {
  1244. struct irq_data *d = irq_desc_get_irq_data(desc);
  1245. struct irq_chip *c = d->chip;
  1246. return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
  1247. }
  1248. static void irq_nmi_teardown(struct irq_desc *desc)
  1249. {
  1250. struct irq_data *d = irq_desc_get_irq_data(desc);
  1251. struct irq_chip *c = d->chip;
  1252. if (c->irq_nmi_teardown)
  1253. c->irq_nmi_teardown(d);
  1254. }
  1255. static int
  1256. setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
  1257. {
  1258. struct task_struct *t;
  1259. if (!secondary) {
  1260. t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
  1261. new->name);
  1262. } else {
  1263. t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
  1264. new->name);
  1265. }
  1266. if (IS_ERR(t))
  1267. return PTR_ERR(t);
  1268. /*
  1269. * We keep the reference to the task struct even if
  1270. * the thread dies to avoid that the interrupt code
  1271. * references an already freed task_struct.
  1272. */
  1273. new->thread = get_task_struct(t);
  1274. /*
  1275. * Tell the thread to set its affinity. This is
  1276. * important for shared interrupt handlers as we do
  1277. * not invoke setup_affinity() for the secondary
  1278. * handlers as everything is already set up. Even for
  1279. * interrupts marked with IRQF_NO_BALANCE this is
  1280. * correct as we want the thread to move to the cpu(s)
  1281. * on which the requesting code placed the interrupt.
  1282. */
  1283. set_bit(IRQTF_AFFINITY, &new->thread_flags);
  1284. return 0;
  1285. }
  1286. /*
  1287. * Internal function to register an irqaction - typically used to
  1288. * allocate special interrupts that are part of the architecture.
  1289. *
  1290. * Locking rules:
  1291. *
  1292. * desc->request_mutex Provides serialization against a concurrent free_irq()
  1293. * chip_bus_lock Provides serialization for slow bus operations
  1294. * desc->lock Provides serialization against hard interrupts
  1295. *
  1296. * chip_bus_lock and desc->lock are sufficient for all other management and
  1297. * interrupt related functions. desc->request_mutex solely serializes
  1298. * request/free_irq().
  1299. */
  1300. static int
  1301. __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
  1302. {
  1303. struct irqaction *old, **old_ptr;
  1304. unsigned long flags, thread_mask = 0;
  1305. int ret, nested, shared = 0;
  1306. if (!desc)
  1307. return -EINVAL;
  1308. if (desc->irq_data.chip == &no_irq_chip)
  1309. return -ENOSYS;
  1310. if (!try_module_get(desc->owner))
  1311. return -ENODEV;
  1312. new->irq = irq;
  1313. /*
  1314. * If the trigger type is not specified by the caller,
  1315. * then use the default for this interrupt.
  1316. */
  1317. if (!(new->flags & IRQF_TRIGGER_MASK))
  1318. new->flags |= irqd_get_trigger_type(&desc->irq_data);
  1319. /*
  1320. * Check whether the interrupt nests into another interrupt
  1321. * thread.
  1322. */
  1323. nested = irq_settings_is_nested_thread(desc);
  1324. if (nested) {
  1325. if (!new->thread_fn) {
  1326. ret = -EINVAL;
  1327. goto out_mput;
  1328. }
  1329. /*
  1330. * Replace the primary handler which was provided from
  1331. * the driver for non nested interrupt handling by the
  1332. * dummy function which warns when called.
  1333. */
  1334. new->handler = irq_nested_primary_handler;
  1335. } else {
  1336. if (irq_settings_can_thread(desc)) {
  1337. ret = irq_setup_forced_threading(new);
  1338. if (ret)
  1339. goto out_mput;
  1340. }
  1341. }
  1342. /*
  1343. * Create a handler thread when a thread function is supplied
  1344. * and the interrupt does not nest into another interrupt
  1345. * thread.
  1346. */
  1347. if (new->thread_fn && !nested) {
  1348. ret = setup_irq_thread(new, irq, false);
  1349. if (ret)
  1350. goto out_mput;
  1351. if (new->secondary) {
  1352. ret = setup_irq_thread(new->secondary, irq, true);
  1353. if (ret)
  1354. goto out_thread;
  1355. }
  1356. }
  1357. /*
  1358. * Drivers are often written to work w/o knowledge about the
  1359. * underlying irq chip implementation, so a request for a
  1360. * threaded irq without a primary hard irq context handler
  1361. * requires the ONESHOT flag to be set. Some irq chips like
  1362. * MSI based interrupts are per se one shot safe. Check the
  1363. * chip flags, so we can avoid the unmask dance at the end of
  1364. * the threaded handler for those.
  1365. */
  1366. if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
  1367. new->flags &= ~IRQF_ONESHOT;
  1368. /*
  1369. * Protects against a concurrent __free_irq() call which might wait
  1370. * for synchronize_hardirq() to complete without holding the optional
  1371. * chip bus lock and desc->lock. Also protects against handing out
  1372. * a recycled oneshot thread_mask bit while it's still in use by
  1373. * its previous owner.
  1374. */
  1375. mutex_lock(&desc->request_mutex);
  1376. /*
  1377. * Acquire bus lock as the irq_request_resources() callback below
  1378. * might rely on the serialization or the magic power management
  1379. * functions which are abusing the irq_bus_lock() callback,
  1380. */
  1381. chip_bus_lock(desc);
  1382. /* First installed action requests resources. */
  1383. if (!desc->action) {
  1384. ret = irq_request_resources(desc);
  1385. if (ret) {
  1386. pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
  1387. new->name, irq, desc->irq_data.chip->name);
  1388. goto out_bus_unlock;
  1389. }
  1390. }
  1391. /*
  1392. * The following block of code has to be executed atomically
  1393. * protected against a concurrent interrupt and any of the other
  1394. * management calls which are not serialized via
  1395. * desc->request_mutex or the optional bus lock.
  1396. */
  1397. raw_spin_lock_irqsave(&desc->lock, flags);
  1398. old_ptr = &desc->action;
  1399. old = *old_ptr;
  1400. if (old) {
  1401. /*
  1402. * Can't share interrupts unless both agree to and are
  1403. * the same type (level, edge, polarity). So both flag
  1404. * fields must have IRQF_SHARED set and the bits which
  1405. * set the trigger type must match. Also all must
  1406. * agree on ONESHOT.
  1407. * Interrupt lines used for NMIs cannot be shared.
  1408. */
  1409. unsigned int oldtype;
  1410. if (desc->istate & IRQS_NMI) {
  1411. pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
  1412. new->name, irq, desc->irq_data.chip->name);
  1413. ret = -EINVAL;
  1414. goto out_unlock;
  1415. }
  1416. /*
  1417. * If nobody did set the configuration before, inherit
  1418. * the one provided by the requester.
  1419. */
  1420. if (irqd_trigger_type_was_set(&desc->irq_data)) {
  1421. oldtype = irqd_get_trigger_type(&desc->irq_data);
  1422. } else {
  1423. oldtype = new->flags & IRQF_TRIGGER_MASK;
  1424. irqd_set_trigger_type(&desc->irq_data, oldtype);
  1425. }
  1426. if (!((old->flags & new->flags) & IRQF_SHARED) ||
  1427. (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
  1428. ((old->flags ^ new->flags) & IRQF_ONESHOT))
  1429. goto mismatch;
  1430. /* All handlers must agree on per-cpuness */
  1431. if ((old->flags & IRQF_PERCPU) !=
  1432. (new->flags & IRQF_PERCPU))
  1433. goto mismatch;
  1434. /* add new interrupt at end of irq queue */
  1435. do {
  1436. /*
  1437. * Or all existing action->thread_mask bits,
  1438. * so we can find the next zero bit for this
  1439. * new action.
  1440. */
  1441. thread_mask |= old->thread_mask;
  1442. old_ptr = &old->next;
  1443. old = *old_ptr;
  1444. } while (old);
  1445. shared = 1;
  1446. }
  1447. /*
  1448. * Setup the thread mask for this irqaction for ONESHOT. For
  1449. * !ONESHOT irqs the thread mask is 0 so we can avoid a
  1450. * conditional in irq_wake_thread().
  1451. */
  1452. if (new->flags & IRQF_ONESHOT) {
  1453. /*
  1454. * Unlikely to have 32 resp 64 irqs sharing one line,
  1455. * but who knows.
  1456. */
  1457. if (thread_mask == ~0UL) {
  1458. ret = -EBUSY;
  1459. goto out_unlock;
  1460. }
  1461. /*
  1462. * The thread_mask for the action is or'ed to
  1463. * desc->thread_active to indicate that the
  1464. * IRQF_ONESHOT thread handler has been woken, but not
  1465. * yet finished. The bit is cleared when a thread
  1466. * completes. When all threads of a shared interrupt
  1467. * line have completed desc->threads_active becomes
  1468. * zero and the interrupt line is unmasked. See
  1469. * handle.c:irq_wake_thread() for further information.
  1470. *
  1471. * If no thread is woken by primary (hard irq context)
  1472. * interrupt handlers, then desc->threads_active is
  1473. * also checked for zero to unmask the irq line in the
  1474. * affected hard irq flow handlers
  1475. * (handle_[fasteoi|level]_irq).
  1476. *
  1477. * The new action gets the first zero bit of
  1478. * thread_mask assigned. See the loop above which or's
  1479. * all existing action->thread_mask bits.
  1480. */
  1481. new->thread_mask = 1UL << ffz(thread_mask);
  1482. } else if (new->handler == irq_default_primary_handler &&
  1483. !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
  1484. /*
  1485. * The interrupt was requested with handler = NULL, so
  1486. * we use the default primary handler for it. But it
  1487. * does not have the oneshot flag set. In combination
  1488. * with level interrupts this is deadly, because the
  1489. * default primary handler just wakes the thread, then
  1490. * the irq lines is reenabled, but the device still
  1491. * has the level irq asserted. Rinse and repeat....
  1492. *
  1493. * While this works for edge type interrupts, we play
  1494. * it safe and reject unconditionally because we can't
  1495. * say for sure which type this interrupt really
  1496. * has. The type flags are unreliable as the
  1497. * underlying chip implementation can override them.
  1498. */
  1499. pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
  1500. new->name, irq);
  1501. ret = -EINVAL;
  1502. goto out_unlock;
  1503. }
  1504. if (!shared) {
  1505. /* Setup the type (level, edge polarity) if configured: */
  1506. if (new->flags & IRQF_TRIGGER_MASK) {
  1507. ret = __irq_set_trigger(desc,
  1508. new->flags & IRQF_TRIGGER_MASK);
  1509. if (ret)
  1510. goto out_unlock;
  1511. }
  1512. /*
  1513. * Activate the interrupt. That activation must happen
  1514. * independently of IRQ_NOAUTOEN. request_irq() can fail
  1515. * and the callers are supposed to handle
  1516. * that. enable_irq() of an interrupt requested with
  1517. * IRQ_NOAUTOEN is not supposed to fail. The activation
  1518. * keeps it in shutdown mode, it merily associates
  1519. * resources if necessary and if that's not possible it
  1520. * fails. Interrupts which are in managed shutdown mode
  1521. * will simply ignore that activation request.
  1522. */
  1523. ret = irq_activate(desc);
  1524. if (ret)
  1525. goto out_unlock;
  1526. desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
  1527. IRQS_ONESHOT | IRQS_WAITING);
  1528. irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
  1529. if (new->flags & IRQF_PERCPU) {
  1530. irqd_set(&desc->irq_data, IRQD_PER_CPU);
  1531. irq_settings_set_per_cpu(desc);
  1532. if (new->flags & IRQF_NO_DEBUG)
  1533. irq_settings_set_no_debug(desc);
  1534. }
  1535. if (noirqdebug)
  1536. irq_settings_set_no_debug(desc);
  1537. if (new->flags & IRQF_ONESHOT)
  1538. desc->istate |= IRQS_ONESHOT;
  1539. /* Exclude IRQ from balancing if requested */
  1540. if (new->flags & IRQF_NOBALANCING) {
  1541. irq_settings_set_no_balancing(desc);
  1542. irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
  1543. }
  1544. if (!(new->flags & IRQF_NO_AUTOEN) &&
  1545. irq_settings_can_autoenable(desc)) {
  1546. irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
  1547. } else {
  1548. /*
  1549. * Shared interrupts do not go well with disabling
  1550. * auto enable. The sharing interrupt might request
  1551. * it while it's still disabled and then wait for
  1552. * interrupts forever.
  1553. */
  1554. WARN_ON_ONCE(new->flags & IRQF_SHARED);
  1555. /* Undo nested disables: */
  1556. desc->depth = 1;
  1557. }
  1558. } else if (new->flags & IRQF_TRIGGER_MASK) {
  1559. unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
  1560. unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
  1561. if (nmsk != omsk)
  1562. /* hope the handler works with current trigger mode */
  1563. pr_warn("irq %d uses trigger mode %u; requested %u\n",
  1564. irq, omsk, nmsk);
  1565. }
  1566. *old_ptr = new;
  1567. irq_pm_install_action(desc, new);
  1568. /* Reset broken irq detection when installing new handler */
  1569. desc->irq_count = 0;
  1570. desc->irqs_unhandled = 0;
  1571. /*
  1572. * Check whether we disabled the irq via the spurious handler
  1573. * before. Reenable it and give it another chance.
  1574. */
  1575. if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
  1576. desc->istate &= ~IRQS_SPURIOUS_DISABLED;
  1577. __enable_irq(desc);
  1578. }
  1579. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1580. chip_bus_sync_unlock(desc);
  1581. mutex_unlock(&desc->request_mutex);
  1582. irq_setup_timings(desc, new);
  1583. wake_up_and_wait_for_irq_thread_ready(desc, new);
  1584. wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);
  1585. register_irq_proc(irq, desc);
  1586. new->dir = NULL;
  1587. register_handler_proc(irq, new);
  1588. return 0;
  1589. mismatch:
  1590. if (!(new->flags & IRQF_PROBE_SHARED)) {
  1591. pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
  1592. irq, new->flags, new->name, old->flags, old->name);
  1593. #ifdef CONFIG_DEBUG_SHIRQ
  1594. dump_stack();
  1595. #endif
  1596. }
  1597. ret = -EBUSY;
  1598. out_unlock:
  1599. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1600. if (!desc->action)
  1601. irq_release_resources(desc);
  1602. out_bus_unlock:
  1603. chip_bus_sync_unlock(desc);
  1604. mutex_unlock(&desc->request_mutex);
  1605. out_thread:
  1606. if (new->thread) {
  1607. struct task_struct *t = new->thread;
  1608. new->thread = NULL;
  1609. kthread_stop(t);
  1610. put_task_struct(t);
  1611. }
  1612. if (new->secondary && new->secondary->thread) {
  1613. struct task_struct *t = new->secondary->thread;
  1614. new->secondary->thread = NULL;
  1615. kthread_stop(t);
  1616. put_task_struct(t);
  1617. }
  1618. out_mput:
  1619. module_put(desc->owner);
  1620. return ret;
  1621. }
  1622. /*
  1623. * Internal function to unregister an irqaction - used to free
  1624. * regular and special interrupts that are part of the architecture.
  1625. */
  1626. static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
  1627. {
  1628. unsigned irq = desc->irq_data.irq;
  1629. struct irqaction *action, **action_ptr;
  1630. unsigned long flags;
  1631. WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
  1632. mutex_lock(&desc->request_mutex);
  1633. chip_bus_lock(desc);
  1634. raw_spin_lock_irqsave(&desc->lock, flags);
  1635. /*
  1636. * There can be multiple actions per IRQ descriptor, find the right
  1637. * one based on the dev_id:
  1638. */
  1639. action_ptr = &desc->action;
  1640. for (;;) {
  1641. action = *action_ptr;
  1642. if (!action) {
  1643. WARN(1, "Trying to free already-free IRQ %d\n", irq);
  1644. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1645. chip_bus_sync_unlock(desc);
  1646. mutex_unlock(&desc->request_mutex);
  1647. return NULL;
  1648. }
  1649. if (action->dev_id == dev_id)
  1650. break;
  1651. action_ptr = &action->next;
  1652. }
  1653. /* Found it - now remove it from the list of entries: */
  1654. *action_ptr = action->next;
  1655. irq_pm_remove_action(desc, action);
  1656. /* If this was the last handler, shut down the IRQ line: */
  1657. if (!desc->action) {
  1658. irq_settings_clr_disable_unlazy(desc);
  1659. /* Only shutdown. Deactivate after synchronize_hardirq() */
  1660. irq_shutdown(desc);
  1661. }
  1662. #ifdef CONFIG_SMP
  1663. /* make sure affinity_hint is cleaned up */
  1664. if (WARN_ON_ONCE(desc->affinity_hint))
  1665. desc->affinity_hint = NULL;
  1666. #endif
  1667. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1668. /*
  1669. * Drop bus_lock here so the changes which were done in the chip
  1670. * callbacks above are synced out to the irq chips which hang
  1671. * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
  1672. *
  1673. * Aside of that the bus_lock can also be taken from the threaded
  1674. * handler in irq_finalize_oneshot() which results in a deadlock
  1675. * because kthread_stop() would wait forever for the thread to
  1676. * complete, which is blocked on the bus lock.
  1677. *
  1678. * The still held desc->request_mutex() protects against a
  1679. * concurrent request_irq() of this irq so the release of resources
  1680. * and timing data is properly serialized.
  1681. */
  1682. chip_bus_sync_unlock(desc);
  1683. unregister_handler_proc(irq, action);
  1684. /*
  1685. * Make sure it's not being used on another CPU and if the chip
  1686. * supports it also make sure that there is no (not yet serviced)
  1687. * interrupt in flight at the hardware level.
  1688. */
  1689. __synchronize_hardirq(desc, true);
  1690. #ifdef CONFIG_DEBUG_SHIRQ
  1691. /*
  1692. * It's a shared IRQ -- the driver ought to be prepared for an IRQ
  1693. * event to happen even now it's being freed, so let's make sure that
  1694. * is so by doing an extra call to the handler ....
  1695. *
  1696. * ( We do this after actually deregistering it, to make sure that a
  1697. * 'real' IRQ doesn't run in parallel with our fake. )
  1698. */
  1699. if (action->flags & IRQF_SHARED) {
  1700. local_irq_save(flags);
  1701. action->handler(irq, dev_id);
  1702. local_irq_restore(flags);
  1703. }
  1704. #endif
  1705. /*
  1706. * The action has already been removed above, but the thread writes
  1707. * its oneshot mask bit when it completes. Though request_mutex is
  1708. * held across this which prevents __setup_irq() from handing out
  1709. * the same bit to a newly requested action.
  1710. */
  1711. if (action->thread) {
  1712. kthread_stop(action->thread);
  1713. put_task_struct(action->thread);
  1714. if (action->secondary && action->secondary->thread) {
  1715. kthread_stop(action->secondary->thread);
  1716. put_task_struct(action->secondary->thread);
  1717. }
  1718. }
  1719. /* Last action releases resources */
  1720. if (!desc->action) {
  1721. /*
  1722. * Reacquire bus lock as irq_release_resources() might
  1723. * require it to deallocate resources over the slow bus.
  1724. */
  1725. chip_bus_lock(desc);
  1726. /*
  1727. * There is no interrupt on the fly anymore. Deactivate it
  1728. * completely.
  1729. */
  1730. raw_spin_lock_irqsave(&desc->lock, flags);
  1731. irq_domain_deactivate_irq(&desc->irq_data);
  1732. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1733. irq_release_resources(desc);
  1734. chip_bus_sync_unlock(desc);
  1735. irq_remove_timings(desc);
  1736. }
  1737. mutex_unlock(&desc->request_mutex);
  1738. irq_chip_pm_put(&desc->irq_data);
  1739. module_put(desc->owner);
  1740. kfree(action->secondary);
  1741. return action;
  1742. }
  1743. /**
  1744. * free_irq - free an interrupt allocated with request_irq
  1745. * @irq: Interrupt line to free
  1746. * @dev_id: Device identity to free
  1747. *
  1748. * Remove an interrupt handler. The handler is removed and if the
  1749. * interrupt line is no longer in use by any driver it is disabled.
  1750. * On a shared IRQ the caller must ensure the interrupt is disabled
  1751. * on the card it drives before calling this function. The function
  1752. * does not return until any executing interrupts for this IRQ
  1753. * have completed.
  1754. *
  1755. * This function must not be called from interrupt context.
  1756. *
  1757. * Returns the devname argument passed to request_irq.
  1758. */
  1759. const void *free_irq(unsigned int irq, void *dev_id)
  1760. {
  1761. struct irq_desc *desc = irq_to_desc(irq);
  1762. struct irqaction *action;
  1763. const char *devname;
  1764. if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  1765. return NULL;
  1766. #ifdef CONFIG_SMP
  1767. if (WARN_ON(desc->affinity_notify))
  1768. desc->affinity_notify = NULL;
  1769. #endif
  1770. action = __free_irq(desc, dev_id);
  1771. if (!action)
  1772. return NULL;
  1773. devname = action->name;
  1774. kfree(action);
  1775. return devname;
  1776. }
  1777. EXPORT_SYMBOL(free_irq);
  1778. /* This function must be called with desc->lock held */
  1779. static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
  1780. {
  1781. const char *devname = NULL;
  1782. desc->istate &= ~IRQS_NMI;
  1783. if (!WARN_ON(desc->action == NULL)) {
  1784. irq_pm_remove_action(desc, desc->action);
  1785. devname = desc->action->name;
  1786. unregister_handler_proc(irq, desc->action);
  1787. kfree(desc->action);
  1788. desc->action = NULL;
  1789. }
  1790. irq_settings_clr_disable_unlazy(desc);
  1791. irq_shutdown_and_deactivate(desc);
  1792. irq_release_resources(desc);
  1793. irq_chip_pm_put(&desc->irq_data);
  1794. module_put(desc->owner);
  1795. return devname;
  1796. }
  1797. const void *free_nmi(unsigned int irq, void *dev_id)
  1798. {
  1799. struct irq_desc *desc = irq_to_desc(irq);
  1800. unsigned long flags;
  1801. const void *devname;
  1802. if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
  1803. return NULL;
  1804. if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  1805. return NULL;
  1806. /* NMI still enabled */
  1807. if (WARN_ON(desc->depth == 0))
  1808. disable_nmi_nosync(irq);
  1809. raw_spin_lock_irqsave(&desc->lock, flags);
  1810. irq_nmi_teardown(desc);
  1811. devname = __cleanup_nmi(irq, desc);
  1812. raw_spin_unlock_irqrestore(&desc->lock, flags);
  1813. return devname;
  1814. }
  1815. /**
  1816. * request_threaded_irq - allocate an interrupt line
  1817. * @irq: Interrupt line to allocate
  1818. * @handler: Function to be called when the IRQ occurs.
  1819. * Primary handler for threaded interrupts.
  1820. * If handler is NULL and thread_fn != NULL
  1821. * the default primary handler is installed.
  1822. * @thread_fn: Function called from the irq handler thread
  1823. * If NULL, no irq thread is created
  1824. * @irqflags: Interrupt type flags
  1825. * @devname: An ascii name for the claiming device
  1826. * @dev_id: A cookie passed back to the handler function
  1827. *
  1828. * This call allocates interrupt resources and enables the
  1829. * interrupt line and IRQ handling. From the point this
  1830. * call is made your handler function may be invoked. Since
  1831. * your handler function must clear any interrupt the board
  1832. * raises, you must take care both to initialise your hardware
  1833. * and to set up the interrupt handler in the right order.
  1834. *
  1835. * If you want to set up a threaded irq handler for your device
  1836. * then you need to supply @handler and @thread_fn. @handler is
  1837. * still called in hard interrupt context and has to check
  1838. * whether the interrupt originates from the device. If yes it
  1839. * needs to disable the interrupt on the device and return
  1840. * IRQ_WAKE_THREAD which will wake up the handler thread and run
  1841. * @thread_fn. This split handler design is necessary to support
  1842. * shared interrupts.
  1843. *
  1844. * Dev_id must be globally unique. Normally the address of the
  1845. * device data structure is used as the cookie. Since the handler
  1846. * receives this value it makes sense to use it.
  1847. *
  1848. * If your interrupt is shared you must pass a non NULL dev_id
  1849. * as this is required when freeing the interrupt.
  1850. *
  1851. * Flags:
  1852. *
  1853. * IRQF_SHARED Interrupt is shared
  1854. * IRQF_TRIGGER_* Specify active edge(s) or level
  1855. * IRQF_ONESHOT Run thread_fn with interrupt line masked
  1856. */
  1857. int request_threaded_irq(unsigned int irq, irq_handler_t handler,
  1858. irq_handler_t thread_fn, unsigned long irqflags,
  1859. const char *devname, void *dev_id)
  1860. {
  1861. struct irqaction *action;
  1862. struct irq_desc *desc;
  1863. int retval;
  1864. if (irq == IRQ_NOTCONNECTED)
  1865. return -ENOTCONN;
  1866. /*
  1867. * Sanity-check: shared interrupts must pass in a real dev-ID,
  1868. * otherwise we'll have trouble later trying to figure out
  1869. * which interrupt is which (messes up the interrupt freeing
  1870. * logic etc).
  1871. *
  1872. * Also shared interrupts do not go well with disabling auto enable.
  1873. * The sharing interrupt might request it while it's still disabled
  1874. * and then wait for interrupts forever.
  1875. *
  1876. * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
  1877. * it cannot be set along with IRQF_NO_SUSPEND.
  1878. */
  1879. if (((irqflags & IRQF_SHARED) && !dev_id) ||
  1880. ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) ||
  1881. (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
  1882. ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
  1883. return -EINVAL;
  1884. desc = irq_to_desc(irq);
  1885. if (!desc)
  1886. return -EINVAL;
  1887. if (!irq_settings_can_request(desc) ||
  1888. WARN_ON(irq_settings_is_per_cpu_devid(desc)))
  1889. return -EINVAL;
  1890. if (!handler) {
  1891. if (!thread_fn)
  1892. return -EINVAL;
  1893. handler = irq_default_primary_handler;
  1894. }
  1895. action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
  1896. if (!action)
  1897. return -ENOMEM;
  1898. action->handler = handler;
  1899. action->thread_fn = thread_fn;
  1900. action->flags = irqflags;
  1901. action->name = devname;
  1902. action->dev_id = dev_id;
  1903. retval = irq_chip_pm_get(&desc->irq_data);
  1904. if (retval < 0) {
  1905. kfree(action);
  1906. return retval;
  1907. }
  1908. retval = __setup_irq(irq, desc, action);
  1909. if (retval) {
  1910. irq_chip_pm_put(&desc->irq_data);
  1911. kfree(action->secondary);
  1912. kfree(action);
  1913. }
  1914. #ifdef CONFIG_DEBUG_SHIRQ_FIXME
  1915. if (!retval && (irqflags & IRQF_SHARED)) {
  1916. /*
  1917. * It's a shared IRQ -- the driver ought to be prepared for it
  1918. * to happen immediately, so let's make sure....
  1919. * We disable the irq to make sure that a 'real' IRQ doesn't
  1920. * run in parallel with our fake.
  1921. */
  1922. unsigned long flags;
  1923. disable_irq(irq);
  1924. local_irq_save(flags);
  1925. handler(irq, dev_id);
  1926. local_irq_restore(flags);
  1927. enable_irq(irq);
  1928. }
  1929. #endif
  1930. return retval;
  1931. }
  1932. EXPORT_SYMBOL(request_threaded_irq);
  1933. /**
  1934. * request_any_context_irq - allocate an interrupt line
  1935. * @irq: Interrupt line to allocate
  1936. * @handler: Function to be called when the IRQ occurs.
  1937. * Threaded handler for threaded interrupts.
  1938. * @flags: Interrupt type flags
  1939. * @name: An ascii name for the claiming device
  1940. * @dev_id: A cookie passed back to the handler function
  1941. *
  1942. * This call allocates interrupt resources and enables the
  1943. * interrupt line and IRQ handling. It selects either a
  1944. * hardirq or threaded handling method depending on the
  1945. * context.
  1946. *
  1947. * On failure, it returns a negative value. On success,
  1948. * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
  1949. */
  1950. int request_any_context_irq(unsigned int irq, irq_handler_t handler,
  1951. unsigned long flags, const char *name, void *dev_id)
  1952. {
  1953. struct irq_desc *desc;
  1954. int ret;
  1955. if (irq == IRQ_NOTCONNECTED)
  1956. return -ENOTCONN;
  1957. desc = irq_to_desc(irq);
  1958. if (!desc)
  1959. return -EINVAL;
  1960. if (irq_settings_is_nested_thread(desc)) {
  1961. ret = request_threaded_irq(irq, NULL, handler,
  1962. flags, name, dev_id);
  1963. return !ret ? IRQC_IS_NESTED : ret;
  1964. }
  1965. ret = request_irq(irq, handler, flags, name, dev_id);
  1966. return !ret ? IRQC_IS_HARDIRQ : ret;
  1967. }
  1968. EXPORT_SYMBOL_GPL(request_any_context_irq);
  1969. /**
  1970. * request_nmi - allocate an interrupt line for NMI delivery
  1971. * @irq: Interrupt line to allocate
  1972. * @handler: Function to be called when the IRQ occurs.
  1973. * Threaded handler for threaded interrupts.
  1974. * @irqflags: Interrupt type flags
  1975. * @name: An ascii name for the claiming device
  1976. * @dev_id: A cookie passed back to the handler function
  1977. *
  1978. * This call allocates interrupt resources and enables the
  1979. * interrupt line and IRQ handling. It sets up the IRQ line
  1980. * to be handled as an NMI.
  1981. *
  1982. * An interrupt line delivering NMIs cannot be shared and IRQ handling
  1983. * cannot be threaded.
  1984. *
  1985. * Interrupt lines requested for NMI delivering must produce per cpu
  1986. * interrupts and have auto enabling setting disabled.
  1987. *
  1988. * Dev_id must be globally unique. Normally the address of the
  1989. * device data structure is used as the cookie. Since the handler
  1990. * receives this value it makes sense to use it.
  1991. *
  1992. * If the interrupt line cannot be used to deliver NMIs, function
  1993. * will fail and return a negative value.
  1994. */
  1995. int request_nmi(unsigned int irq, irq_handler_t handler,
  1996. unsigned long irqflags, const char *name, void *dev_id)
  1997. {
  1998. struct irqaction *action;
  1999. struct irq_desc *desc;
  2000. unsigned long flags;
  2001. int retval;
  2002. if (irq == IRQ_NOTCONNECTED)
  2003. return -ENOTCONN;
  2004. /* NMI cannot be shared, used for Polling */
  2005. if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
  2006. return -EINVAL;
  2007. if (!(irqflags & IRQF_PERCPU))
  2008. return -EINVAL;
  2009. if (!handler)
  2010. return -EINVAL;
  2011. desc = irq_to_desc(irq);
  2012. if (!desc || (irq_settings_can_autoenable(desc) &&
  2013. !(irqflags & IRQF_NO_AUTOEN)) ||
  2014. !irq_settings_can_request(desc) ||
  2015. WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
  2016. !irq_supports_nmi(desc))
  2017. return -EINVAL;
  2018. action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
  2019. if (!action)
  2020. return -ENOMEM;
  2021. action->handler = handler;
  2022. action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
  2023. action->name = name;
  2024. action->dev_id = dev_id;
  2025. retval = irq_chip_pm_get(&desc->irq_data);
  2026. if (retval < 0)
  2027. goto err_out;
  2028. retval = __setup_irq(irq, desc, action);
  2029. if (retval)
  2030. goto err_irq_setup;
  2031. raw_spin_lock_irqsave(&desc->lock, flags);
  2032. /* Setup NMI state */
  2033. desc->istate |= IRQS_NMI;
  2034. retval = irq_nmi_setup(desc);
  2035. if (retval) {
  2036. __cleanup_nmi(irq, desc);
  2037. raw_spin_unlock_irqrestore(&desc->lock, flags);
  2038. return -EINVAL;
  2039. }
  2040. raw_spin_unlock_irqrestore(&desc->lock, flags);
  2041. return 0;
  2042. err_irq_setup:
  2043. irq_chip_pm_put(&desc->irq_data);
  2044. err_out:
  2045. kfree(action);
  2046. return retval;
  2047. }
  2048. void enable_percpu_irq(unsigned int irq, unsigned int type)
  2049. {
  2050. unsigned int cpu = smp_processor_id();
  2051. unsigned long flags;
  2052. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
  2053. if (!desc)
  2054. return;
  2055. /*
  2056. * If the trigger type is not specified by the caller, then
  2057. * use the default for this interrupt.
  2058. */
  2059. type &= IRQ_TYPE_SENSE_MASK;
  2060. if (type == IRQ_TYPE_NONE)
  2061. type = irqd_get_trigger_type(&desc->irq_data);
  2062. if (type != IRQ_TYPE_NONE) {
  2063. int ret;
  2064. ret = __irq_set_trigger(desc, type);
  2065. if (ret) {
  2066. WARN(1, "failed to set type for IRQ%d\n", irq);
  2067. goto out;
  2068. }
  2069. }
  2070. irq_percpu_enable(desc, cpu);
  2071. out:
  2072. irq_put_desc_unlock(desc, flags);
  2073. }
  2074. EXPORT_SYMBOL_GPL(enable_percpu_irq);
  2075. void enable_percpu_nmi(unsigned int irq, unsigned int type)
  2076. {
  2077. enable_percpu_irq(irq, type);
  2078. }
  2079. /**
  2080. * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
  2081. * @irq: Linux irq number to check for
  2082. *
  2083. * Must be called from a non migratable context. Returns the enable
  2084. * state of a per cpu interrupt on the current cpu.
  2085. */
  2086. bool irq_percpu_is_enabled(unsigned int irq)
  2087. {
  2088. unsigned int cpu = smp_processor_id();
  2089. struct irq_desc *desc;
  2090. unsigned long flags;
  2091. bool is_enabled;
  2092. desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
  2093. if (!desc)
  2094. return false;
  2095. is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
  2096. irq_put_desc_unlock(desc, flags);
  2097. return is_enabled;
  2098. }
  2099. EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
  2100. void disable_percpu_irq(unsigned int irq)
  2101. {
  2102. unsigned int cpu = smp_processor_id();
  2103. unsigned long flags;
  2104. struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
  2105. if (!desc)
  2106. return;
  2107. irq_percpu_disable(desc, cpu);
  2108. irq_put_desc_unlock(desc, flags);
  2109. }
  2110. EXPORT_SYMBOL_GPL(disable_percpu_irq);
  2111. void disable_percpu_nmi(unsigned int irq)
  2112. {
  2113. disable_percpu_irq(irq);
  2114. }
  2115. /*
  2116. * Internal function to unregister a percpu irqaction.
  2117. */
  2118. static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
  2119. {
  2120. struct irq_desc *desc = irq_to_desc(irq);
  2121. struct irqaction *action;
  2122. unsigned long flags;
  2123. WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
  2124. if (!desc)
  2125. return NULL;
  2126. raw_spin_lock_irqsave(&desc->lock, flags);
  2127. action = desc->action;
  2128. if (!action || action->percpu_dev_id != dev_id) {
  2129. WARN(1, "Trying to free already-free IRQ %d\n", irq);
  2130. goto bad;
  2131. }
  2132. if (!cpumask_empty(desc->percpu_enabled)) {
  2133. WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
  2134. irq, cpumask_first(desc->percpu_enabled));
  2135. goto bad;
  2136. }
  2137. /* Found it - now remove it from the list of entries: */
  2138. desc->action = NULL;
  2139. desc->istate &= ~IRQS_NMI;
  2140. raw_spin_unlock_irqrestore(&desc->lock, flags);
  2141. unregister_handler_proc(irq, action);
  2142. irq_chip_pm_put(&desc->irq_data);
  2143. module_put(desc->owner);
  2144. return action;
  2145. bad:
  2146. raw_spin_unlock_irqrestore(&desc->lock, flags);
  2147. return NULL;
  2148. }
  2149. /**
  2150. * remove_percpu_irq - free a per-cpu interrupt
  2151. * @irq: Interrupt line to free
  2152. * @act: irqaction for the interrupt
  2153. *
  2154. * Used to remove interrupts statically setup by the early boot process.
  2155. */
  2156. void remove_percpu_irq(unsigned int irq, struct irqaction *act)
  2157. {
  2158. struct irq_desc *desc = irq_to_desc(irq);
  2159. if (desc && irq_settings_is_per_cpu_devid(desc))
  2160. __free_percpu_irq(irq, act->percpu_dev_id);
  2161. }
  2162. /**
  2163. * free_percpu_irq - free an interrupt allocated with request_percpu_irq
  2164. * @irq: Interrupt line to free
  2165. * @dev_id: Device identity to free
  2166. *
  2167. * Remove a percpu interrupt handler. The handler is removed, but
  2168. * the interrupt line is not disabled. This must be done on each
  2169. * CPU before calling this function. The function does not return
  2170. * until any executing interrupts for this IRQ have completed.
  2171. *
  2172. * This function must not be called from interrupt context.
  2173. */
  2174. void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
  2175. {
  2176. struct irq_desc *desc = irq_to_desc(irq);
  2177. if (!desc || !irq_settings_is_per_cpu_devid(desc))
  2178. return;
  2179. chip_bus_lock(desc);
  2180. kfree(__free_percpu_irq(irq, dev_id));
  2181. chip_bus_sync_unlock(desc);
  2182. }
  2183. EXPORT_SYMBOL_GPL(free_percpu_irq);
  2184. void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
  2185. {
  2186. struct irq_desc *desc = irq_to_desc(irq);
  2187. if (!desc || !irq_settings_is_per_cpu_devid(desc))
  2188. return;
  2189. if (WARN_ON(!(desc->istate & IRQS_NMI)))
  2190. return;
  2191. kfree(__free_percpu_irq(irq, dev_id));
  2192. }
  2193. /**
  2194. * setup_percpu_irq - setup a per-cpu interrupt
  2195. * @irq: Interrupt line to setup
  2196. * @act: irqaction for the interrupt
  2197. *
  2198. * Used to statically setup per-cpu interrupts in the early boot process.
  2199. */
  2200. int setup_percpu_irq(unsigned int irq, struct irqaction *act)
  2201. {
  2202. struct irq_desc *desc = irq_to_desc(irq);
  2203. int retval;
  2204. if (!desc || !irq_settings_is_per_cpu_devid(desc))
  2205. return -EINVAL;
  2206. retval = irq_chip_pm_get(&desc->irq_data);
  2207. if (retval < 0)
  2208. return retval;
  2209. retval = __setup_irq(irq, desc, act);
  2210. if (retval)
  2211. irq_chip_pm_put(&desc->irq_data);
  2212. return retval;
  2213. }
  2214. /**
  2215. * __request_percpu_irq - allocate a percpu interrupt line
  2216. * @irq: Interrupt line to allocate
  2217. * @handler: Function to be called when the IRQ occurs.
  2218. * @flags: Interrupt type flags (IRQF_TIMER only)
  2219. * @devname: An ascii name for the claiming device
  2220. * @dev_id: A percpu cookie passed back to the handler function
  2221. *
  2222. * This call allocates interrupt resources and enables the
  2223. * interrupt on the local CPU. If the interrupt is supposed to be
  2224. * enabled on other CPUs, it has to be done on each CPU using
  2225. * enable_percpu_irq().
  2226. *
  2227. * Dev_id must be globally unique. It is a per-cpu variable, and
  2228. * the handler gets called with the interrupted CPU's instance of
  2229. * that variable.
  2230. */
  2231. int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
  2232. unsigned long flags, const char *devname,
  2233. void __percpu *dev_id)
  2234. {
  2235. struct irqaction *action;
  2236. struct irq_desc *desc;
  2237. int retval;
  2238. if (!dev_id)
  2239. return -EINVAL;
  2240. desc = irq_to_desc(irq);
  2241. if (!desc || !irq_settings_can_request(desc) ||
  2242. !irq_settings_is_per_cpu_devid(desc))
  2243. return -EINVAL;
  2244. if (flags && flags != IRQF_TIMER)
  2245. return -EINVAL;
  2246. action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
  2247. if (!action)
  2248. return -ENOMEM;
  2249. action->handler = handler;
  2250. action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
  2251. action->name = devname;
  2252. action->percpu_dev_id = dev_id;
  2253. retval = irq_chip_pm_get(&desc->irq_data);
  2254. if (retval < 0) {
  2255. kfree(action);
  2256. return retval;
  2257. }
  2258. retval = __setup_irq(irq, desc, action);
  2259. if (retval) {
  2260. irq_chip_pm_put(&desc->irq_data);
  2261. kfree(action);
  2262. }
  2263. return retval;
  2264. }
  2265. EXPORT_SYMBOL_GPL(__request_percpu_irq);
  2266. /**
  2267. * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
  2268. * @irq: Interrupt line to allocate
  2269. * @handler: Function to be called when the IRQ occurs.
  2270. * @name: An ascii name for the claiming device
  2271. * @dev_id: A percpu cookie passed back to the handler function
  2272. *
  2273. * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
  2274. * have to be setup on each CPU by calling prepare_percpu_nmi() before
  2275. * being enabled on the same CPU by using enable_percpu_nmi().
  2276. *
  2277. * Dev_id must be globally unique. It is a per-cpu variable, and
  2278. * the handler gets called with the interrupted CPU's instance of
  2279. * that variable.
  2280. *
  2281. * Interrupt lines requested for NMI delivering should have auto enabling
  2282. * setting disabled.
  2283. *
  2284. * If the interrupt line cannot be used to deliver NMIs, function
  2285. * will fail returning a negative value.
  2286. */
  2287. int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
  2288. const char *name, void __percpu *dev_id)
  2289. {
  2290. struct irqaction *action;
  2291. struct irq_desc *desc;
  2292. unsigned long flags;
  2293. int retval;
  2294. if (!handler)
  2295. return -EINVAL;
  2296. desc = irq_to_desc(irq);
  2297. if (!desc || !irq_settings_can_request(desc) ||
  2298. !irq_settings_is_per_cpu_devid(desc) ||
  2299. irq_settings_can_autoenable(desc) ||
  2300. !irq_supports_nmi(desc))
  2301. return -EINVAL;
  2302. /* The line cannot already be NMI */
  2303. if (desc->istate & IRQS_NMI)
  2304. return -EINVAL;
  2305. action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
  2306. if (!action)
  2307. return -ENOMEM;
  2308. action->handler = handler;
  2309. action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
  2310. | IRQF_NOBALANCING;
  2311. action->name = name;
  2312. action->percpu_dev_id = dev_id;
  2313. retval = irq_chip_pm_get(&desc->irq_data);
  2314. if (retval < 0)
  2315. goto err_out;
  2316. retval = __setup_irq(irq, desc, action);
  2317. if (retval)
  2318. goto err_irq_setup;
  2319. raw_spin_lock_irqsave(&desc->lock, flags);
  2320. desc->istate |= IRQS_NMI;
  2321. raw_spin_unlock_irqrestore(&desc->lock, flags);
  2322. return 0;
  2323. err_irq_setup:
  2324. irq_chip_pm_put(&desc->irq_data);
  2325. err_out:
  2326. kfree(action);
  2327. return retval;
  2328. }
  2329. /**
  2330. * prepare_percpu_nmi - performs CPU local setup for NMI delivery
  2331. * @irq: Interrupt line to prepare for NMI delivery
  2332. *
  2333. * This call prepares an interrupt line to deliver NMI on the current CPU,
  2334. * before that interrupt line gets enabled with enable_percpu_nmi().
  2335. *
  2336. * As a CPU local operation, this should be called from non-preemptible
  2337. * context.
  2338. *
  2339. * If the interrupt line cannot be used to deliver NMIs, function
  2340. * will fail returning a negative value.
  2341. */
  2342. int prepare_percpu_nmi(unsigned int irq)
  2343. {
  2344. unsigned long flags;
  2345. struct irq_desc *desc;
  2346. int ret = 0;
  2347. WARN_ON(preemptible());
  2348. desc = irq_get_desc_lock(irq, &flags,
  2349. IRQ_GET_DESC_CHECK_PERCPU);
  2350. if (!desc)
  2351. return -EINVAL;
  2352. if (WARN(!(desc->istate & IRQS_NMI),
  2353. KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
  2354. irq)) {
  2355. ret = -EINVAL;
  2356. goto out;
  2357. }
  2358. ret = irq_nmi_setup(desc);
  2359. if (ret) {
  2360. pr_err("Failed to setup NMI delivery: irq %u\n", irq);
  2361. goto out;
  2362. }
  2363. out:
  2364. irq_put_desc_unlock(desc, flags);
  2365. return ret;
  2366. }
  2367. /**
  2368. * teardown_percpu_nmi - undoes NMI setup of IRQ line
  2369. * @irq: Interrupt line from which CPU local NMI configuration should be
  2370. * removed
  2371. *
  2372. * This call undoes the setup done by prepare_percpu_nmi().
  2373. *
  2374. * IRQ line should not be enabled for the current CPU.
  2375. *
  2376. * As a CPU local operation, this should be called from non-preemptible
  2377. * context.
  2378. */
  2379. void teardown_percpu_nmi(unsigned int irq)
  2380. {
  2381. unsigned long flags;
  2382. struct irq_desc *desc;
  2383. WARN_ON(preemptible());
  2384. desc = irq_get_desc_lock(irq, &flags,
  2385. IRQ_GET_DESC_CHECK_PERCPU);
  2386. if (!desc)
  2387. return;
  2388. if (WARN_ON(!(desc->istate & IRQS_NMI)))
  2389. goto out;
  2390. irq_nmi_teardown(desc);
  2391. out:
  2392. irq_put_desc_unlock(desc, flags);
  2393. }
  2394. int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
  2395. bool *state)
  2396. {
  2397. struct irq_chip *chip;
  2398. int err = -EINVAL;
  2399. do {
  2400. chip = irq_data_get_irq_chip(data);
  2401. if (WARN_ON_ONCE(!chip))
  2402. return -ENODEV;
  2403. if (chip->irq_get_irqchip_state)
  2404. break;
  2405. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  2406. data = data->parent_data;
  2407. #else
  2408. data = NULL;
  2409. #endif
  2410. } while (data);
  2411. if (data)
  2412. err = chip->irq_get_irqchip_state(data, which, state);
  2413. return err;
  2414. }
  2415. /**
  2416. * irq_get_irqchip_state - returns the irqchip state of a interrupt.
  2417. * @irq: Interrupt line that is forwarded to a VM
  2418. * @which: One of IRQCHIP_STATE_* the caller wants to know about
  2419. * @state: a pointer to a boolean where the state is to be stored
  2420. *
  2421. * This call snapshots the internal irqchip state of an
  2422. * interrupt, returning into @state the bit corresponding to
  2423. * stage @which
  2424. *
  2425. * This function should be called with preemption disabled if the
  2426. * interrupt controller has per-cpu registers.
  2427. */
  2428. int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
  2429. bool *state)
  2430. {
  2431. struct irq_desc *desc;
  2432. struct irq_data *data;
  2433. unsigned long flags;
  2434. int err = -EINVAL;
  2435. desc = irq_get_desc_buslock(irq, &flags, 0);
  2436. if (!desc)
  2437. return err;
  2438. data = irq_desc_get_irq_data(desc);
  2439. err = __irq_get_irqchip_state(data, which, state);
  2440. irq_put_desc_busunlock(desc, flags);
  2441. return err;
  2442. }
  2443. EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
  2444. /**
  2445. * irq_set_irqchip_state - set the state of a forwarded interrupt.
  2446. * @irq: Interrupt line that is forwarded to a VM
  2447. * @which: State to be restored (one of IRQCHIP_STATE_*)
  2448. * @val: Value corresponding to @which
  2449. *
  2450. * This call sets the internal irqchip state of an interrupt,
  2451. * depending on the value of @which.
  2452. *
  2453. * This function should be called with migration disabled if the
  2454. * interrupt controller has per-cpu registers.
  2455. */
  2456. int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
  2457. bool val)
  2458. {
  2459. struct irq_desc *desc;
  2460. struct irq_data *data;
  2461. struct irq_chip *chip;
  2462. unsigned long flags;
  2463. int err = -EINVAL;
  2464. desc = irq_get_desc_buslock(irq, &flags, 0);
  2465. if (!desc)
  2466. return err;
  2467. data = irq_desc_get_irq_data(desc);
  2468. do {
  2469. chip = irq_data_get_irq_chip(data);
  2470. if (WARN_ON_ONCE(!chip)) {
  2471. err = -ENODEV;
  2472. goto out_unlock;
  2473. }
  2474. if (chip->irq_set_irqchip_state)
  2475. break;
  2476. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  2477. data = data->parent_data;
  2478. #else
  2479. data = NULL;
  2480. #endif
  2481. } while (data);
  2482. if (data)
  2483. err = chip->irq_set_irqchip_state(data, which, val);
  2484. out_unlock:
  2485. irq_put_desc_busunlock(desc, flags);
  2486. return err;
  2487. }
  2488. EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
  2489. /**
  2490. * irq_has_action - Check whether an interrupt is requested
  2491. * @irq: The linux irq number
  2492. *
  2493. * Returns: A snapshot of the current state
  2494. */
  2495. bool irq_has_action(unsigned int irq)
  2496. {
  2497. bool res;
  2498. rcu_read_lock();
  2499. res = irq_desc_has_action(irq_to_desc(irq));
  2500. rcu_read_unlock();
  2501. return res;
  2502. }
  2503. EXPORT_SYMBOL_GPL(irq_has_action);
  2504. /**
  2505. * irq_check_status_bit - Check whether bits in the irq descriptor status are set
  2506. * @irq: The linux irq number
  2507. * @bitmask: The bitmask to evaluate
  2508. *
  2509. * Returns: True if one of the bits in @bitmask is set
  2510. */
  2511. bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
  2512. {
  2513. struct irq_desc *desc;
  2514. bool res = false;
  2515. rcu_read_lock();
  2516. desc = irq_to_desc(irq);
  2517. if (desc)
  2518. res = !!(desc->status_use_accessors & bitmask);
  2519. rcu_read_unlock();
  2520. return res;
  2521. }
  2522. EXPORT_SYMBOL_GPL(irq_check_status_bit);