octeon-irq.c 74 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2004-2016 Cavium, Inc.
  7. */
  8. #include <linux/of_address.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/irqdomain.h>
  11. #include <linux/bitops.h>
  12. #include <linux/of_irq.h>
  13. #include <linux/percpu.h>
  14. #include <linux/slab.h>
  15. #include <linux/irq.h>
  16. #include <linux/smp.h>
  17. #include <linux/of.h>
  18. #include <asm/octeon/octeon.h>
  19. #include <asm/octeon/cvmx-ciu2-defs.h>
  20. #include <asm/octeon/cvmx-ciu3-defs.h>
  21. static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
  22. static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
  23. static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
  24. static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip2);
  25. static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip3);
  26. static DEFINE_PER_CPU(struct octeon_ciu3_info *, octeon_ciu3_info);
  27. #define CIU3_MBOX_PER_CORE 10
  28. /*
  29. * The 8 most significant bits of the intsn identify the interrupt major block.
  30. * Each major block might use its own interrupt domain. Thus 256 domains are
  31. * needed.
  32. */
  33. #define MAX_CIU3_DOMAINS 256
  34. typedef irq_hw_number_t (*octeon_ciu3_intsn2hw_t)(struct irq_domain *, unsigned int);
  35. /* Information for each ciu3 in the system */
  36. struct octeon_ciu3_info {
  37. u64 ciu3_addr;
  38. int node;
  39. struct irq_domain *domain[MAX_CIU3_DOMAINS];
  40. octeon_ciu3_intsn2hw_t intsn2hw[MAX_CIU3_DOMAINS];
  41. };
  42. /* Each ciu3 in the system uses its own data (one ciu3 per node) */
  43. static struct octeon_ciu3_info *octeon_ciu3_info_per_node[4];
  44. struct octeon_irq_ciu_domain_data {
  45. int num_sum; /* number of sum registers (2 or 3). */
  46. };
  47. /* Register offsets from ciu3_addr */
  48. #define CIU3_CONST 0x220
  49. #define CIU3_IDT_CTL(_idt) ((_idt) * 8 + 0x110000)
  50. #define CIU3_IDT_PP(_idt, _idx) ((_idt) * 32 + (_idx) * 8 + 0x120000)
  51. #define CIU3_IDT_IO(_idt) ((_idt) * 8 + 0x130000)
  52. #define CIU3_DEST_PP_INT(_pp_ip) ((_pp_ip) * 8 + 0x200000)
  53. #define CIU3_DEST_IO_INT(_io) ((_io) * 8 + 0x210000)
  54. #define CIU3_ISC_CTL(_intsn) ((_intsn) * 8 + 0x80000000)
  55. #define CIU3_ISC_W1C(_intsn) ((_intsn) * 8 + 0x90000000)
  56. #define CIU3_ISC_W1S(_intsn) ((_intsn) * 8 + 0xa0000000)
  57. static __read_mostly int octeon_irq_ciu_to_irq[8][64];
  58. struct octeon_ciu_chip_data {
  59. union {
  60. struct { /* only used for ciu3 */
  61. u64 ciu3_addr;
  62. unsigned int intsn;
  63. };
  64. struct { /* only used for ciu/ciu2 */
  65. u8 line;
  66. u8 bit;
  67. };
  68. };
  69. int gpio_line;
  70. int current_cpu; /* Next CPU expected to take this irq */
  71. int ciu_node; /* NUMA node number of the CIU */
  72. };
  73. struct octeon_core_chip_data {
  74. struct mutex core_irq_mutex;
  75. bool current_en;
  76. bool desired_en;
  77. u8 bit;
  78. };
  79. #define MIPS_CORE_IRQ_LINES 8
  80. static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
  81. static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
  82. struct irq_chip *chip,
  83. irq_flow_handler_t handler)
  84. {
  85. struct octeon_ciu_chip_data *cd;
  86. cd = kzalloc(sizeof(*cd), GFP_KERNEL);
  87. if (!cd)
  88. return -ENOMEM;
  89. irq_set_chip_and_handler(irq, chip, handler);
  90. cd->line = line;
  91. cd->bit = bit;
  92. cd->gpio_line = gpio_line;
  93. irq_set_chip_data(irq, cd);
  94. octeon_irq_ciu_to_irq[line][bit] = irq;
  95. return 0;
  96. }
  97. static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq)
  98. {
  99. struct irq_data *data = irq_get_irq_data(irq);
  100. struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
  101. irq_set_chip_data(irq, NULL);
  102. kfree(cd);
  103. }
  104. static int octeon_irq_force_ciu_mapping(struct irq_domain *domain,
  105. int irq, int line, int bit)
  106. {
  107. struct device_node *of_node;
  108. int ret;
  109. of_node = irq_domain_get_of_node(domain);
  110. if (!of_node)
  111. return -EINVAL;
  112. ret = irq_alloc_desc_at(irq, of_node_to_nid(of_node));
  113. if (ret < 0)
  114. return ret;
  115. return irq_domain_associate(domain, irq, line << 6 | bit);
  116. }
  117. static int octeon_coreid_for_cpu(int cpu)
  118. {
  119. #ifdef CONFIG_SMP
  120. return cpu_logical_map(cpu);
  121. #else
  122. return cvmx_get_core_num();
  123. #endif
  124. }
  125. static int octeon_cpu_for_coreid(int coreid)
  126. {
  127. #ifdef CONFIG_SMP
  128. return cpu_number_map(coreid);
  129. #else
  130. return smp_processor_id();
  131. #endif
  132. }
  133. static void octeon_irq_core_ack(struct irq_data *data)
  134. {
  135. struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
  136. unsigned int bit = cd->bit;
  137. /*
  138. * We don't need to disable IRQs to make these atomic since
  139. * they are already disabled earlier in the low level
  140. * interrupt code.
  141. */
  142. clear_c0_status(0x100 << bit);
  143. /* The two user interrupts must be cleared manually. */
  144. if (bit < 2)
  145. clear_c0_cause(0x100 << bit);
  146. }
  147. static void octeon_irq_core_eoi(struct irq_data *data)
  148. {
  149. struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
  150. /*
  151. * We don't need to disable IRQs to make these atomic since
  152. * they are already disabled earlier in the low level
  153. * interrupt code.
  154. */
  155. set_c0_status(0x100 << cd->bit);
  156. }
  157. static void octeon_irq_core_set_enable_local(void *arg)
  158. {
  159. struct irq_data *data = arg;
  160. struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
  161. unsigned int mask = 0x100 << cd->bit;
  162. /*
  163. * Interrupts are already disabled, so these are atomic.
  164. */
  165. if (cd->desired_en)
  166. set_c0_status(mask);
  167. else
  168. clear_c0_status(mask);
  169. }
  170. static void octeon_irq_core_disable(struct irq_data *data)
  171. {
  172. struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
  173. cd->desired_en = false;
  174. }
  175. static void octeon_irq_core_enable(struct irq_data *data)
  176. {
  177. struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
  178. cd->desired_en = true;
  179. }
  180. static void octeon_irq_core_bus_lock(struct irq_data *data)
  181. {
  182. struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
  183. mutex_lock(&cd->core_irq_mutex);
  184. }
  185. static void octeon_irq_core_bus_sync_unlock(struct irq_data *data)
  186. {
  187. struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
  188. if (cd->desired_en != cd->current_en) {
  189. on_each_cpu(octeon_irq_core_set_enable_local, data, 1);
  190. cd->current_en = cd->desired_en;
  191. }
  192. mutex_unlock(&cd->core_irq_mutex);
  193. }
  194. static struct irq_chip octeon_irq_chip_core = {
  195. .name = "Core",
  196. .irq_enable = octeon_irq_core_enable,
  197. .irq_disable = octeon_irq_core_disable,
  198. .irq_ack = octeon_irq_core_ack,
  199. .irq_eoi = octeon_irq_core_eoi,
  200. .irq_bus_lock = octeon_irq_core_bus_lock,
  201. .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock,
  202. .irq_cpu_online = octeon_irq_core_eoi,
  203. .irq_cpu_offline = octeon_irq_core_ack,
  204. .flags = IRQCHIP_ONOFFLINE_ENABLED,
  205. };
  206. static void __init octeon_irq_init_core(void)
  207. {
  208. int i;
  209. int irq;
  210. struct octeon_core_chip_data *cd;
  211. for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) {
  212. cd = &octeon_irq_core_chip_data[i];
  213. cd->current_en = false;
  214. cd->desired_en = false;
  215. cd->bit = i;
  216. mutex_init(&cd->core_irq_mutex);
  217. irq = OCTEON_IRQ_SW0 + i;
  218. irq_set_chip_data(irq, cd);
  219. irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
  220. handle_percpu_irq);
  221. }
  222. }
  223. static int next_cpu_for_irq(struct irq_data *data)
  224. {
  225. #ifdef CONFIG_SMP
  226. int cpu;
  227. const struct cpumask *mask = irq_data_get_affinity_mask(data);
  228. int weight = cpumask_weight(mask);
  229. struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
  230. if (weight > 1) {
  231. cpu = cd->current_cpu;
  232. for (;;) {
  233. cpu = cpumask_next(cpu, mask);
  234. if (cpu >= nr_cpu_ids) {
  235. cpu = -1;
  236. continue;
  237. } else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
  238. break;
  239. }
  240. }
  241. } else if (weight == 1) {
  242. cpu = cpumask_first(mask);
  243. } else {
  244. cpu = smp_processor_id();
  245. }
  246. cd->current_cpu = cpu;
  247. return cpu;
  248. #else
  249. return smp_processor_id();
  250. #endif
  251. }
  252. static void octeon_irq_ciu_enable(struct irq_data *data)
  253. {
  254. int cpu = next_cpu_for_irq(data);
  255. int coreid = octeon_coreid_for_cpu(cpu);
  256. unsigned long *pen;
  257. unsigned long flags;
  258. struct octeon_ciu_chip_data *cd;
  259. raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
  260. cd = irq_data_get_irq_chip_data(data);
  261. raw_spin_lock_irqsave(lock, flags);
  262. if (cd->line == 0) {
  263. pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
  264. __set_bit(cd->bit, pen);
  265. /*
  266. * Must be visible to octeon_irq_ip{2,3}_ciu() before
  267. * enabling the irq.
  268. */
  269. wmb();
  270. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
  271. } else {
  272. pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
  273. __set_bit(cd->bit, pen);
  274. /*
  275. * Must be visible to octeon_irq_ip{2,3}_ciu() before
  276. * enabling the irq.
  277. */
  278. wmb();
  279. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
  280. }
  281. raw_spin_unlock_irqrestore(lock, flags);
  282. }
  283. static void octeon_irq_ciu_enable_local(struct irq_data *data)
  284. {
  285. unsigned long *pen;
  286. unsigned long flags;
  287. struct octeon_ciu_chip_data *cd;
  288. raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
  289. cd = irq_data_get_irq_chip_data(data);
  290. raw_spin_lock_irqsave(lock, flags);
  291. if (cd->line == 0) {
  292. pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
  293. __set_bit(cd->bit, pen);
  294. /*
  295. * Must be visible to octeon_irq_ip{2,3}_ciu() before
  296. * enabling the irq.
  297. */
  298. wmb();
  299. cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
  300. } else {
  301. pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
  302. __set_bit(cd->bit, pen);
  303. /*
  304. * Must be visible to octeon_irq_ip{2,3}_ciu() before
  305. * enabling the irq.
  306. */
  307. wmb();
  308. cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
  309. }
  310. raw_spin_unlock_irqrestore(lock, flags);
  311. }
  312. static void octeon_irq_ciu_disable_local(struct irq_data *data)
  313. {
  314. unsigned long *pen;
  315. unsigned long flags;
  316. struct octeon_ciu_chip_data *cd;
  317. raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
  318. cd = irq_data_get_irq_chip_data(data);
  319. raw_spin_lock_irqsave(lock, flags);
  320. if (cd->line == 0) {
  321. pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
  322. __clear_bit(cd->bit, pen);
  323. /*
  324. * Must be visible to octeon_irq_ip{2,3}_ciu() before
  325. * enabling the irq.
  326. */
  327. wmb();
  328. cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
  329. } else {
  330. pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
  331. __clear_bit(cd->bit, pen);
  332. /*
  333. * Must be visible to octeon_irq_ip{2,3}_ciu() before
  334. * enabling the irq.
  335. */
  336. wmb();
  337. cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
  338. }
  339. raw_spin_unlock_irqrestore(lock, flags);
  340. }
  341. static void octeon_irq_ciu_disable_all(struct irq_data *data)
  342. {
  343. unsigned long flags;
  344. unsigned long *pen;
  345. int cpu;
  346. struct octeon_ciu_chip_data *cd;
  347. raw_spinlock_t *lock;
  348. cd = irq_data_get_irq_chip_data(data);
  349. for_each_online_cpu(cpu) {
  350. int coreid = octeon_coreid_for_cpu(cpu);
  351. lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
  352. if (cd->line == 0)
  353. pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
  354. else
  355. pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
  356. raw_spin_lock_irqsave(lock, flags);
  357. __clear_bit(cd->bit, pen);
  358. /*
  359. * Must be visible to octeon_irq_ip{2,3}_ciu() before
  360. * enabling the irq.
  361. */
  362. wmb();
  363. if (cd->line == 0)
  364. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
  365. else
  366. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
  367. raw_spin_unlock_irqrestore(lock, flags);
  368. }
  369. }
  370. static void octeon_irq_ciu_enable_all(struct irq_data *data)
  371. {
  372. unsigned long flags;
  373. unsigned long *pen;
  374. int cpu;
  375. struct octeon_ciu_chip_data *cd;
  376. raw_spinlock_t *lock;
  377. cd = irq_data_get_irq_chip_data(data);
  378. for_each_online_cpu(cpu) {
  379. int coreid = octeon_coreid_for_cpu(cpu);
  380. lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
  381. if (cd->line == 0)
  382. pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
  383. else
  384. pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
  385. raw_spin_lock_irqsave(lock, flags);
  386. __set_bit(cd->bit, pen);
  387. /*
  388. * Must be visible to octeon_irq_ip{2,3}_ciu() before
  389. * enabling the irq.
  390. */
  391. wmb();
  392. if (cd->line == 0)
  393. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
  394. else
  395. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
  396. raw_spin_unlock_irqrestore(lock, flags);
  397. }
  398. }
  399. /*
  400. * Enable the irq on the next core in the affinity set for chips that
  401. * have the EN*_W1{S,C} registers.
  402. */
  403. static void octeon_irq_ciu_enable_v2(struct irq_data *data)
  404. {
  405. u64 mask;
  406. int cpu = next_cpu_for_irq(data);
  407. struct octeon_ciu_chip_data *cd;
  408. cd = irq_data_get_irq_chip_data(data);
  409. mask = 1ull << (cd->bit);
  410. /*
  411. * Called under the desc lock, so these should never get out
  412. * of sync.
  413. */
  414. if (cd->line == 0) {
  415. int index = octeon_coreid_for_cpu(cpu) * 2;
  416. set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
  417. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  418. } else {
  419. int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  420. set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
  421. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  422. }
  423. }
  424. /*
  425. * Enable the irq in the sum2 registers.
  426. */
  427. static void octeon_irq_ciu_enable_sum2(struct irq_data *data)
  428. {
  429. u64 mask;
  430. int cpu = next_cpu_for_irq(data);
  431. int index = octeon_coreid_for_cpu(cpu);
  432. struct octeon_ciu_chip_data *cd;
  433. cd = irq_data_get_irq_chip_data(data);
  434. mask = 1ull << (cd->bit);
  435. cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
  436. }
  437. /*
  438. * Disable the irq in the sum2 registers.
  439. */
  440. static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data)
  441. {
  442. u64 mask;
  443. int cpu = next_cpu_for_irq(data);
  444. int index = octeon_coreid_for_cpu(cpu);
  445. struct octeon_ciu_chip_data *cd;
  446. cd = irq_data_get_irq_chip_data(data);
  447. mask = 1ull << (cd->bit);
  448. cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
  449. }
  450. static void octeon_irq_ciu_ack_sum2(struct irq_data *data)
  451. {
  452. u64 mask;
  453. int cpu = next_cpu_for_irq(data);
  454. int index = octeon_coreid_for_cpu(cpu);
  455. struct octeon_ciu_chip_data *cd;
  456. cd = irq_data_get_irq_chip_data(data);
  457. mask = 1ull << (cd->bit);
  458. cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask);
  459. }
  460. static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data)
  461. {
  462. int cpu;
  463. struct octeon_ciu_chip_data *cd;
  464. u64 mask;
  465. cd = irq_data_get_irq_chip_data(data);
  466. mask = 1ull << (cd->bit);
  467. for_each_online_cpu(cpu) {
  468. int coreid = octeon_coreid_for_cpu(cpu);
  469. cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask);
  470. }
  471. }
  472. /*
  473. * Enable the irq on the current CPU for chips that
  474. * have the EN*_W1{S,C} registers.
  475. */
  476. static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
  477. {
  478. u64 mask;
  479. struct octeon_ciu_chip_data *cd;
  480. cd = irq_data_get_irq_chip_data(data);
  481. mask = 1ull << (cd->bit);
  482. if (cd->line == 0) {
  483. int index = cvmx_get_core_num() * 2;
  484. set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
  485. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  486. } else {
  487. int index = cvmx_get_core_num() * 2 + 1;
  488. set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
  489. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  490. }
  491. }
  492. static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
  493. {
  494. u64 mask;
  495. struct octeon_ciu_chip_data *cd;
  496. cd = irq_data_get_irq_chip_data(data);
  497. mask = 1ull << (cd->bit);
  498. if (cd->line == 0) {
  499. int index = cvmx_get_core_num() * 2;
  500. clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
  501. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  502. } else {
  503. int index = cvmx_get_core_num() * 2 + 1;
  504. clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
  505. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  506. }
  507. }
  508. /*
  509. * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
  510. */
  511. static void octeon_irq_ciu_ack(struct irq_data *data)
  512. {
  513. u64 mask;
  514. struct octeon_ciu_chip_data *cd;
  515. cd = irq_data_get_irq_chip_data(data);
  516. mask = 1ull << (cd->bit);
  517. if (cd->line == 0) {
  518. int index = cvmx_get_core_num() * 2;
  519. cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
  520. } else {
  521. cvmx_write_csr(CVMX_CIU_INT_SUM1, mask);
  522. }
  523. }
  524. /*
  525. * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
  526. * registers.
  527. */
  528. static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
  529. {
  530. int cpu;
  531. u64 mask;
  532. struct octeon_ciu_chip_data *cd;
  533. cd = irq_data_get_irq_chip_data(data);
  534. mask = 1ull << (cd->bit);
  535. if (cd->line == 0) {
  536. for_each_online_cpu(cpu) {
  537. int index = octeon_coreid_for_cpu(cpu) * 2;
  538. clear_bit(cd->bit,
  539. &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
  540. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  541. }
  542. } else {
  543. for_each_online_cpu(cpu) {
  544. int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  545. clear_bit(cd->bit,
  546. &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
  547. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  548. }
  549. }
  550. }
  551. /*
  552. * Enable the irq on the all cores for chips that have the EN*_W1{S,C}
  553. * registers.
  554. */
  555. static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
  556. {
  557. int cpu;
  558. u64 mask;
  559. struct octeon_ciu_chip_data *cd;
  560. cd = irq_data_get_irq_chip_data(data);
  561. mask = 1ull << (cd->bit);
  562. if (cd->line == 0) {
  563. for_each_online_cpu(cpu) {
  564. int index = octeon_coreid_for_cpu(cpu) * 2;
  565. set_bit(cd->bit,
  566. &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
  567. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  568. }
  569. } else {
  570. for_each_online_cpu(cpu) {
  571. int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  572. set_bit(cd->bit,
  573. &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
  574. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  575. }
  576. }
  577. }
  578. static int octeon_irq_ciu_set_type(struct irq_data *data, unsigned int t)
  579. {
  580. irqd_set_trigger_type(data, t);
  581. if (t & IRQ_TYPE_EDGE_BOTH)
  582. irq_set_handler_locked(data, handle_edge_irq);
  583. else
  584. irq_set_handler_locked(data, handle_level_irq);
  585. return IRQ_SET_MASK_OK;
  586. }
  587. static void octeon_irq_gpio_setup(struct irq_data *data)
  588. {
  589. union cvmx_gpio_bit_cfgx cfg;
  590. struct octeon_ciu_chip_data *cd;
  591. u32 t = irqd_get_trigger_type(data);
  592. cd = irq_data_get_irq_chip_data(data);
  593. cfg.u64 = 0;
  594. cfg.s.int_en = 1;
  595. cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0;
  596. cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0;
  597. /* 140 nS glitch filter*/
  598. cfg.s.fil_cnt = 7;
  599. cfg.s.fil_sel = 3;
  600. cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64);
  601. }
  602. static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
  603. {
  604. octeon_irq_gpio_setup(data);
  605. octeon_irq_ciu_enable_v2(data);
  606. }
  607. static void octeon_irq_ciu_enable_gpio(struct irq_data *data)
  608. {
  609. octeon_irq_gpio_setup(data);
  610. octeon_irq_ciu_enable(data);
  611. }
  612. static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
  613. {
  614. irqd_set_trigger_type(data, t);
  615. octeon_irq_gpio_setup(data);
  616. if (t & IRQ_TYPE_EDGE_BOTH)
  617. irq_set_handler_locked(data, handle_edge_irq);
  618. else
  619. irq_set_handler_locked(data, handle_level_irq);
  620. return IRQ_SET_MASK_OK;
  621. }
  622. static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
  623. {
  624. struct octeon_ciu_chip_data *cd;
  625. cd = irq_data_get_irq_chip_data(data);
  626. cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
  627. octeon_irq_ciu_disable_all_v2(data);
  628. }
  629. static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
  630. {
  631. struct octeon_ciu_chip_data *cd;
  632. cd = irq_data_get_irq_chip_data(data);
  633. cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
  634. octeon_irq_ciu_disable_all(data);
  635. }
  636. static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
  637. {
  638. struct octeon_ciu_chip_data *cd;
  639. u64 mask;
  640. cd = irq_data_get_irq_chip_data(data);
  641. mask = 1ull << (cd->gpio_line);
  642. cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
  643. }
  644. #ifdef CONFIG_SMP
  645. static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
  646. {
  647. int cpu = smp_processor_id();
  648. cpumask_t new_affinity;
  649. const struct cpumask *mask = irq_data_get_affinity_mask(data);
  650. if (!cpumask_test_cpu(cpu, mask))
  651. return;
  652. if (cpumask_weight(mask) > 1) {
  653. /*
  654. * It has multi CPU affinity, just remove this CPU
  655. * from the affinity set.
  656. */
  657. cpumask_copy(&new_affinity, mask);
  658. cpumask_clear_cpu(cpu, &new_affinity);
  659. } else {
  660. /* Otherwise, put it on lowest numbered online CPU. */
  661. cpumask_clear(&new_affinity);
  662. cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
  663. }
  664. irq_set_affinity_locked(data, &new_affinity, false);
  665. }
  666. static int octeon_irq_ciu_set_affinity(struct irq_data *data,
  667. const struct cpumask *dest, bool force)
  668. {
  669. int cpu;
  670. bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
  671. unsigned long flags;
  672. struct octeon_ciu_chip_data *cd;
  673. unsigned long *pen;
  674. raw_spinlock_t *lock;
  675. cd = irq_data_get_irq_chip_data(data);
  676. /*
  677. * For non-v2 CIU, we will allow only single CPU affinity.
  678. * This removes the need to do locking in the .ack/.eoi
  679. * functions.
  680. */
  681. if (cpumask_weight(dest) != 1)
  682. return -EINVAL;
  683. if (!enable_one)
  684. return 0;
  685. for_each_online_cpu(cpu) {
  686. int coreid = octeon_coreid_for_cpu(cpu);
  687. lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
  688. raw_spin_lock_irqsave(lock, flags);
  689. if (cd->line == 0)
  690. pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
  691. else
  692. pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
  693. if (cpumask_test_cpu(cpu, dest) && enable_one) {
  694. enable_one = false;
  695. __set_bit(cd->bit, pen);
  696. } else {
  697. __clear_bit(cd->bit, pen);
  698. }
  699. /*
  700. * Must be visible to octeon_irq_ip{2,3}_ciu() before
  701. * enabling the irq.
  702. */
  703. wmb();
  704. if (cd->line == 0)
  705. cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
  706. else
  707. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
  708. raw_spin_unlock_irqrestore(lock, flags);
  709. }
  710. return 0;
  711. }
  712. /*
  713. * Set affinity for the irq for chips that have the EN*_W1{S,C}
  714. * registers.
  715. */
  716. static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
  717. const struct cpumask *dest,
  718. bool force)
  719. {
  720. int cpu;
  721. bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
  722. u64 mask;
  723. struct octeon_ciu_chip_data *cd;
  724. if (!enable_one)
  725. return 0;
  726. cd = irq_data_get_irq_chip_data(data);
  727. mask = 1ull << cd->bit;
  728. if (cd->line == 0) {
  729. for_each_online_cpu(cpu) {
  730. unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
  731. int index = octeon_coreid_for_cpu(cpu) * 2;
  732. if (cpumask_test_cpu(cpu, dest) && enable_one) {
  733. enable_one = false;
  734. set_bit(cd->bit, pen);
  735. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
  736. } else {
  737. clear_bit(cd->bit, pen);
  738. cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
  739. }
  740. }
  741. } else {
  742. for_each_online_cpu(cpu) {
  743. unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
  744. int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
  745. if (cpumask_test_cpu(cpu, dest) && enable_one) {
  746. enable_one = false;
  747. set_bit(cd->bit, pen);
  748. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
  749. } else {
  750. clear_bit(cd->bit, pen);
  751. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
  752. }
  753. }
  754. }
  755. return 0;
  756. }
  757. static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data,
  758. const struct cpumask *dest,
  759. bool force)
  760. {
  761. int cpu;
  762. bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
  763. u64 mask;
  764. struct octeon_ciu_chip_data *cd;
  765. if (!enable_one)
  766. return 0;
  767. cd = irq_data_get_irq_chip_data(data);
  768. mask = 1ull << cd->bit;
  769. for_each_online_cpu(cpu) {
  770. int index = octeon_coreid_for_cpu(cpu);
  771. if (cpumask_test_cpu(cpu, dest) && enable_one) {
  772. enable_one = false;
  773. cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
  774. } else {
  775. cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
  776. }
  777. }
  778. return 0;
  779. }
  780. #endif
  781. static unsigned int edge_startup(struct irq_data *data)
  782. {
  783. /* ack any pending edge-irq at startup, so there is
  784. * an _edge_ to fire on when the event reappears.
  785. */
  786. data->chip->irq_ack(data);
  787. data->chip->irq_enable(data);
  788. return 0;
  789. }
  790. /*
  791. * Newer octeon chips have support for lockless CIU operation.
  792. */
  793. static struct irq_chip octeon_irq_chip_ciu_v2 = {
  794. .name = "CIU",
  795. .irq_enable = octeon_irq_ciu_enable_v2,
  796. .irq_disable = octeon_irq_ciu_disable_all_v2,
  797. .irq_mask = octeon_irq_ciu_disable_local_v2,
  798. .irq_unmask = octeon_irq_ciu_enable_v2,
  799. #ifdef CONFIG_SMP
  800. .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
  801. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  802. #endif
  803. };
  804. static struct irq_chip octeon_irq_chip_ciu_v2_edge = {
  805. .name = "CIU",
  806. .irq_enable = octeon_irq_ciu_enable_v2,
  807. .irq_disable = octeon_irq_ciu_disable_all_v2,
  808. .irq_ack = octeon_irq_ciu_ack,
  809. .irq_mask = octeon_irq_ciu_disable_local_v2,
  810. .irq_unmask = octeon_irq_ciu_enable_v2,
  811. #ifdef CONFIG_SMP
  812. .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
  813. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  814. #endif
  815. };
  816. /*
  817. * Newer octeon chips have support for lockless CIU operation.
  818. */
  819. static struct irq_chip octeon_irq_chip_ciu_sum2 = {
  820. .name = "CIU",
  821. .irq_enable = octeon_irq_ciu_enable_sum2,
  822. .irq_disable = octeon_irq_ciu_disable_all_sum2,
  823. .irq_mask = octeon_irq_ciu_disable_local_sum2,
  824. .irq_unmask = octeon_irq_ciu_enable_sum2,
  825. #ifdef CONFIG_SMP
  826. .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
  827. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  828. #endif
  829. };
  830. static struct irq_chip octeon_irq_chip_ciu_sum2_edge = {
  831. .name = "CIU",
  832. .irq_enable = octeon_irq_ciu_enable_sum2,
  833. .irq_disable = octeon_irq_ciu_disable_all_sum2,
  834. .irq_ack = octeon_irq_ciu_ack_sum2,
  835. .irq_mask = octeon_irq_ciu_disable_local_sum2,
  836. .irq_unmask = octeon_irq_ciu_enable_sum2,
  837. #ifdef CONFIG_SMP
  838. .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
  839. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  840. #endif
  841. };
  842. static struct irq_chip octeon_irq_chip_ciu = {
  843. .name = "CIU",
  844. .irq_enable = octeon_irq_ciu_enable,
  845. .irq_disable = octeon_irq_ciu_disable_all,
  846. .irq_mask = octeon_irq_ciu_disable_local,
  847. .irq_unmask = octeon_irq_ciu_enable,
  848. #ifdef CONFIG_SMP
  849. .irq_set_affinity = octeon_irq_ciu_set_affinity,
  850. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  851. #endif
  852. };
  853. static struct irq_chip octeon_irq_chip_ciu_edge = {
  854. .name = "CIU",
  855. .irq_enable = octeon_irq_ciu_enable,
  856. .irq_disable = octeon_irq_ciu_disable_all,
  857. .irq_ack = octeon_irq_ciu_ack,
  858. .irq_mask = octeon_irq_ciu_disable_local,
  859. .irq_unmask = octeon_irq_ciu_enable,
  860. #ifdef CONFIG_SMP
  861. .irq_set_affinity = octeon_irq_ciu_set_affinity,
  862. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  863. #endif
  864. };
  865. /* The mbox versions don't do any affinity or round-robin. */
  866. static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = {
  867. .name = "CIU-M",
  868. .irq_enable = octeon_irq_ciu_enable_all_v2,
  869. .irq_disable = octeon_irq_ciu_disable_all_v2,
  870. .irq_ack = octeon_irq_ciu_disable_local_v2,
  871. .irq_eoi = octeon_irq_ciu_enable_local_v2,
  872. .irq_cpu_online = octeon_irq_ciu_enable_local_v2,
  873. .irq_cpu_offline = octeon_irq_ciu_disable_local_v2,
  874. .flags = IRQCHIP_ONOFFLINE_ENABLED,
  875. };
  876. static struct irq_chip octeon_irq_chip_ciu_mbox = {
  877. .name = "CIU-M",
  878. .irq_enable = octeon_irq_ciu_enable_all,
  879. .irq_disable = octeon_irq_ciu_disable_all,
  880. .irq_ack = octeon_irq_ciu_disable_local,
  881. .irq_eoi = octeon_irq_ciu_enable_local,
  882. .irq_cpu_online = octeon_irq_ciu_enable_local,
  883. .irq_cpu_offline = octeon_irq_ciu_disable_local,
  884. .flags = IRQCHIP_ONOFFLINE_ENABLED,
  885. };
  886. static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
  887. .name = "CIU-GPIO",
  888. .irq_enable = octeon_irq_ciu_enable_gpio_v2,
  889. .irq_disable = octeon_irq_ciu_disable_gpio_v2,
  890. .irq_ack = octeon_irq_ciu_gpio_ack,
  891. .irq_mask = octeon_irq_ciu_disable_local_v2,
  892. .irq_unmask = octeon_irq_ciu_enable_v2,
  893. .irq_set_type = octeon_irq_ciu_gpio_set_type,
  894. #ifdef CONFIG_SMP
  895. .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
  896. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  897. #endif
  898. .flags = IRQCHIP_SET_TYPE_MASKED,
  899. };
  900. static struct irq_chip octeon_irq_chip_ciu_gpio = {
  901. .name = "CIU-GPIO",
  902. .irq_enable = octeon_irq_ciu_enable_gpio,
  903. .irq_disable = octeon_irq_ciu_disable_gpio,
  904. .irq_mask = octeon_irq_ciu_disable_local,
  905. .irq_unmask = octeon_irq_ciu_enable,
  906. .irq_ack = octeon_irq_ciu_gpio_ack,
  907. .irq_set_type = octeon_irq_ciu_gpio_set_type,
  908. #ifdef CONFIG_SMP
  909. .irq_set_affinity = octeon_irq_ciu_set_affinity,
  910. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  911. #endif
  912. .flags = IRQCHIP_SET_TYPE_MASKED,
  913. };
  914. /*
  915. * Watchdog interrupts are special. They are associated with a single
  916. * core, so we hardwire the affinity to that core.
  917. */
  918. static void octeon_irq_ciu_wd_enable(struct irq_data *data)
  919. {
  920. unsigned long flags;
  921. unsigned long *pen;
  922. int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
  923. int cpu = octeon_cpu_for_coreid(coreid);
  924. raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
  925. raw_spin_lock_irqsave(lock, flags);
  926. pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
  927. __set_bit(coreid, pen);
  928. /*
  929. * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling
  930. * the irq.
  931. */
  932. wmb();
  933. cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
  934. raw_spin_unlock_irqrestore(lock, flags);
  935. }
  936. /*
  937. * Watchdog interrupts are special. They are associated with a single
  938. * core, so we hardwire the affinity to that core.
  939. */
  940. static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data)
  941. {
  942. int coreid = data->irq - OCTEON_IRQ_WDOG0;
  943. int cpu = octeon_cpu_for_coreid(coreid);
  944. set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
  945. cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid);
  946. }
  947. static struct irq_chip octeon_irq_chip_ciu_wd_v2 = {
  948. .name = "CIU-W",
  949. .irq_enable = octeon_irq_ciu1_wd_enable_v2,
  950. .irq_disable = octeon_irq_ciu_disable_all_v2,
  951. .irq_mask = octeon_irq_ciu_disable_local_v2,
  952. .irq_unmask = octeon_irq_ciu_enable_local_v2,
  953. };
  954. static struct irq_chip octeon_irq_chip_ciu_wd = {
  955. .name = "CIU-W",
  956. .irq_enable = octeon_irq_ciu_wd_enable,
  957. .irq_disable = octeon_irq_ciu_disable_all,
  958. .irq_mask = octeon_irq_ciu_disable_local,
  959. .irq_unmask = octeon_irq_ciu_enable_local,
  960. };
  961. static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit)
  962. {
  963. bool edge = false;
  964. if (line == 0)
  965. switch (bit) {
  966. case 48 ... 49: /* GMX DRP */
  967. case 50: /* IPD_DRP */
  968. case 52 ... 55: /* Timers */
  969. case 58: /* MPI */
  970. edge = true;
  971. break;
  972. default:
  973. break;
  974. }
  975. else /* line == 1 */
  976. switch (bit) {
  977. case 47: /* PTP */
  978. edge = true;
  979. break;
  980. default:
  981. break;
  982. }
  983. return edge;
  984. }
  985. struct octeon_irq_gpio_domain_data {
  986. unsigned int base_hwirq;
  987. };
  988. static int octeon_irq_gpio_xlat(struct irq_domain *d,
  989. struct device_node *node,
  990. const u32 *intspec,
  991. unsigned int intsize,
  992. unsigned long *out_hwirq,
  993. unsigned int *out_type)
  994. {
  995. unsigned int type;
  996. unsigned int pin;
  997. unsigned int trigger;
  998. if (irq_domain_get_of_node(d) != node)
  999. return -EINVAL;
  1000. if (intsize < 2)
  1001. return -EINVAL;
  1002. pin = intspec[0];
  1003. if (pin >= 16)
  1004. return -EINVAL;
  1005. trigger = intspec[1];
  1006. switch (trigger) {
  1007. case 1:
  1008. type = IRQ_TYPE_EDGE_RISING;
  1009. break;
  1010. case 2:
  1011. type = IRQ_TYPE_EDGE_FALLING;
  1012. break;
  1013. case 4:
  1014. type = IRQ_TYPE_LEVEL_HIGH;
  1015. break;
  1016. case 8:
  1017. type = IRQ_TYPE_LEVEL_LOW;
  1018. break;
  1019. default:
  1020. pr_err("Error: (%pOFn) Invalid irq trigger specification: %x\n",
  1021. node,
  1022. trigger);
  1023. type = IRQ_TYPE_LEVEL_LOW;
  1024. break;
  1025. }
  1026. *out_type = type;
  1027. *out_hwirq = pin;
  1028. return 0;
  1029. }
  1030. static int octeon_irq_ciu_xlat(struct irq_domain *d,
  1031. struct device_node *node,
  1032. const u32 *intspec,
  1033. unsigned int intsize,
  1034. unsigned long *out_hwirq,
  1035. unsigned int *out_type)
  1036. {
  1037. unsigned int ciu, bit;
  1038. struct octeon_irq_ciu_domain_data *dd = d->host_data;
  1039. ciu = intspec[0];
  1040. bit = intspec[1];
  1041. if (ciu >= dd->num_sum || bit > 63)
  1042. return -EINVAL;
  1043. *out_hwirq = (ciu << 6) | bit;
  1044. *out_type = 0;
  1045. return 0;
  1046. }
  1047. static struct irq_chip *octeon_irq_ciu_chip;
  1048. static struct irq_chip *octeon_irq_ciu_chip_edge;
  1049. static struct irq_chip *octeon_irq_gpio_chip;
  1050. static int octeon_irq_ciu_map(struct irq_domain *d,
  1051. unsigned int virq, irq_hw_number_t hw)
  1052. {
  1053. int rv;
  1054. unsigned int line = hw >> 6;
  1055. unsigned int bit = hw & 63;
  1056. struct octeon_irq_ciu_domain_data *dd = d->host_data;
  1057. if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0)
  1058. return -EINVAL;
  1059. if (line == 2) {
  1060. if (octeon_irq_ciu_is_edge(line, bit))
  1061. rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
  1062. &octeon_irq_chip_ciu_sum2_edge,
  1063. handle_edge_irq);
  1064. else
  1065. rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
  1066. &octeon_irq_chip_ciu_sum2,
  1067. handle_level_irq);
  1068. } else {
  1069. if (octeon_irq_ciu_is_edge(line, bit))
  1070. rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
  1071. octeon_irq_ciu_chip_edge,
  1072. handle_edge_irq);
  1073. else
  1074. rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
  1075. octeon_irq_ciu_chip,
  1076. handle_level_irq);
  1077. }
  1078. return rv;
  1079. }
  1080. static int octeon_irq_gpio_map(struct irq_domain *d,
  1081. unsigned int virq, irq_hw_number_t hw)
  1082. {
  1083. struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
  1084. unsigned int line, bit;
  1085. int r;
  1086. line = (hw + gpiod->base_hwirq) >> 6;
  1087. bit = (hw + gpiod->base_hwirq) & 63;
  1088. if (line >= ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
  1089. octeon_irq_ciu_to_irq[line][bit] != 0)
  1090. return -EINVAL;
  1091. /*
  1092. * Default to handle_level_irq. If the DT contains a different
  1093. * trigger type, it will call the irq_set_type callback and
  1094. * the handler gets updated.
  1095. */
  1096. r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
  1097. octeon_irq_gpio_chip, handle_level_irq);
  1098. return r;
  1099. }
  1100. static const struct irq_domain_ops octeon_irq_domain_ciu_ops = {
  1101. .map = octeon_irq_ciu_map,
  1102. .unmap = octeon_irq_free_cd,
  1103. .xlate = octeon_irq_ciu_xlat,
  1104. };
  1105. static const struct irq_domain_ops octeon_irq_domain_gpio_ops = {
  1106. .map = octeon_irq_gpio_map,
  1107. .unmap = octeon_irq_free_cd,
  1108. .xlate = octeon_irq_gpio_xlat,
  1109. };
  1110. static void octeon_irq_ip2_ciu(void)
  1111. {
  1112. const unsigned long core_id = cvmx_get_core_num();
  1113. u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
  1114. ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror);
  1115. if (likely(ciu_sum)) {
  1116. int bit = fls64(ciu_sum) - 1;
  1117. int irq = octeon_irq_ciu_to_irq[0][bit];
  1118. if (likely(irq))
  1119. do_IRQ(irq);
  1120. else
  1121. spurious_interrupt();
  1122. } else {
  1123. spurious_interrupt();
  1124. }
  1125. }
  1126. static void octeon_irq_ip3_ciu(void)
  1127. {
  1128. u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
  1129. ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror);
  1130. if (likely(ciu_sum)) {
  1131. int bit = fls64(ciu_sum) - 1;
  1132. int irq = octeon_irq_ciu_to_irq[1][bit];
  1133. if (likely(irq))
  1134. do_IRQ(irq);
  1135. else
  1136. spurious_interrupt();
  1137. } else {
  1138. spurious_interrupt();
  1139. }
  1140. }
  1141. static void octeon_irq_ip4_ciu(void)
  1142. {
  1143. int coreid = cvmx_get_core_num();
  1144. u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid));
  1145. u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid));
  1146. ciu_sum &= ciu_en;
  1147. if (likely(ciu_sum)) {
  1148. int bit = fls64(ciu_sum) - 1;
  1149. int irq = octeon_irq_ciu_to_irq[2][bit];
  1150. if (likely(irq))
  1151. do_IRQ(irq);
  1152. else
  1153. spurious_interrupt();
  1154. } else {
  1155. spurious_interrupt();
  1156. }
  1157. }
  1158. static bool octeon_irq_use_ip4;
  1159. static void octeon_irq_local_enable_ip4(void *arg)
  1160. {
  1161. set_c0_status(STATUSF_IP4);
  1162. }
  1163. static void octeon_irq_ip4_mask(void)
  1164. {
  1165. clear_c0_status(STATUSF_IP4);
  1166. spurious_interrupt();
  1167. }
  1168. static void (*octeon_irq_ip2)(void);
  1169. static void (*octeon_irq_ip3)(void);
  1170. static void (*octeon_irq_ip4)(void);
  1171. void (*octeon_irq_setup_secondary)(void);
  1172. void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h)
  1173. {
  1174. octeon_irq_ip4 = h;
  1175. octeon_irq_use_ip4 = true;
  1176. on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1);
  1177. }
  1178. static void octeon_irq_percpu_enable(void)
  1179. {
  1180. irq_cpu_online();
  1181. }
  1182. static void octeon_irq_init_ciu_percpu(void)
  1183. {
  1184. int coreid = cvmx_get_core_num();
  1185. __this_cpu_write(octeon_irq_ciu0_en_mirror, 0);
  1186. __this_cpu_write(octeon_irq_ciu1_en_mirror, 0);
  1187. wmb();
  1188. raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock));
  1189. /*
  1190. * Disable All CIU Interrupts. The ones we need will be
  1191. * enabled later. Read the SUM register so we know the write
  1192. * completed.
  1193. */
  1194. cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
  1195. cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
  1196. cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
  1197. cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
  1198. cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
  1199. }
  1200. static void octeon_irq_init_ciu2_percpu(void)
  1201. {
  1202. u64 regx, ipx;
  1203. int coreid = cvmx_get_core_num();
  1204. u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid);
  1205. /*
  1206. * Disable All CIU2 Interrupts. The ones we need will be
  1207. * enabled later. Read the SUM register so we know the write
  1208. * completed.
  1209. *
  1210. * There are 9 registers and 3 IPX levels with strides 0x1000
  1211. * and 0x200 respectively. Use loops to clear them.
  1212. */
  1213. for (regx = 0; regx <= 0x8000; regx += 0x1000) {
  1214. for (ipx = 0; ipx <= 0x400; ipx += 0x200)
  1215. cvmx_write_csr(base + regx + ipx, 0);
  1216. }
  1217. cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid));
  1218. }
  1219. static void octeon_irq_setup_secondary_ciu(void)
  1220. {
  1221. octeon_irq_init_ciu_percpu();
  1222. octeon_irq_percpu_enable();
  1223. /* Enable the CIU lines */
  1224. set_c0_status(STATUSF_IP3 | STATUSF_IP2);
  1225. if (octeon_irq_use_ip4)
  1226. set_c0_status(STATUSF_IP4);
  1227. else
  1228. clear_c0_status(STATUSF_IP4);
  1229. }
  1230. static void octeon_irq_setup_secondary_ciu2(void)
  1231. {
  1232. octeon_irq_init_ciu2_percpu();
  1233. octeon_irq_percpu_enable();
  1234. /* Enable the CIU lines */
  1235. set_c0_status(STATUSF_IP3 | STATUSF_IP2);
  1236. if (octeon_irq_use_ip4)
  1237. set_c0_status(STATUSF_IP4);
  1238. else
  1239. clear_c0_status(STATUSF_IP4);
  1240. }
  1241. static int __init octeon_irq_init_ciu(
  1242. struct device_node *ciu_node, struct device_node *parent)
  1243. {
  1244. int i, r;
  1245. struct irq_chip *chip;
  1246. struct irq_chip *chip_edge;
  1247. struct irq_chip *chip_mbox;
  1248. struct irq_chip *chip_wd;
  1249. struct irq_domain *ciu_domain = NULL;
  1250. struct octeon_irq_ciu_domain_data *dd;
  1251. dd = kzalloc(sizeof(*dd), GFP_KERNEL);
  1252. if (!dd)
  1253. return -ENOMEM;
  1254. octeon_irq_init_ciu_percpu();
  1255. octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
  1256. octeon_irq_ip2 = octeon_irq_ip2_ciu;
  1257. octeon_irq_ip3 = octeon_irq_ip3_ciu;
  1258. if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
  1259. && !OCTEON_IS_MODEL(OCTEON_CN63XX)) {
  1260. octeon_irq_ip4 = octeon_irq_ip4_ciu;
  1261. dd->num_sum = 3;
  1262. octeon_irq_use_ip4 = true;
  1263. } else {
  1264. octeon_irq_ip4 = octeon_irq_ip4_mask;
  1265. dd->num_sum = 2;
  1266. octeon_irq_use_ip4 = false;
  1267. }
  1268. if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
  1269. OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
  1270. OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
  1271. OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
  1272. chip = &octeon_irq_chip_ciu_v2;
  1273. chip_edge = &octeon_irq_chip_ciu_v2_edge;
  1274. chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
  1275. chip_wd = &octeon_irq_chip_ciu_wd_v2;
  1276. octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
  1277. } else {
  1278. chip = &octeon_irq_chip_ciu;
  1279. chip_edge = &octeon_irq_chip_ciu_edge;
  1280. chip_mbox = &octeon_irq_chip_ciu_mbox;
  1281. chip_wd = &octeon_irq_chip_ciu_wd;
  1282. octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
  1283. }
  1284. octeon_irq_ciu_chip = chip;
  1285. octeon_irq_ciu_chip_edge = chip_edge;
  1286. /* Mips internal */
  1287. octeon_irq_init_core();
  1288. ciu_domain = irq_domain_add_tree(
  1289. ciu_node, &octeon_irq_domain_ciu_ops, dd);
  1290. irq_set_default_host(ciu_domain);
  1291. /* CIU_0 */
  1292. for (i = 0; i < 16; i++) {
  1293. r = octeon_irq_force_ciu_mapping(
  1294. ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
  1295. if (r)
  1296. goto err;
  1297. }
  1298. r = irq_alloc_desc_at(OCTEON_IRQ_MBOX0, -1);
  1299. if (r < 0) {
  1300. pr_err("Failed to allocate desc for %s\n", "OCTEON_IRQ_MBOX0");
  1301. goto err;
  1302. }
  1303. r = octeon_irq_set_ciu_mapping(
  1304. OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
  1305. if (r)
  1306. goto err;
  1307. r = irq_alloc_desc_at(OCTEON_IRQ_MBOX1, -1);
  1308. if (r < 0) {
  1309. pr_err("Failed to allocate desc for %s\n", "OCTEON_IRQ_MBOX1");
  1310. goto err;
  1311. }
  1312. r = octeon_irq_set_ciu_mapping(
  1313. OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
  1314. if (r)
  1315. goto err;
  1316. for (i = 0; i < 4; i++) {
  1317. r = octeon_irq_force_ciu_mapping(
  1318. ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
  1319. if (r)
  1320. goto err;
  1321. }
  1322. for (i = 0; i < 4; i++) {
  1323. r = octeon_irq_force_ciu_mapping(
  1324. ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
  1325. if (r)
  1326. goto err;
  1327. }
  1328. r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
  1329. if (r)
  1330. goto err;
  1331. r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
  1332. if (r)
  1333. goto err;
  1334. for (i = 0; i < 4; i++) {
  1335. r = octeon_irq_force_ciu_mapping(
  1336. ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
  1337. if (r)
  1338. goto err;
  1339. }
  1340. r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
  1341. if (r)
  1342. goto err;
  1343. r = irq_alloc_descs(OCTEON_IRQ_WDOG0, OCTEON_IRQ_WDOG0, 16, -1);
  1344. if (r < 0) {
  1345. pr_err("Failed to allocate desc for %s\n", "OCTEON_IRQ_WDOGx");
  1346. goto err;
  1347. }
  1348. /* CIU_1 */
  1349. for (i = 0; i < 16; i++) {
  1350. r = octeon_irq_set_ciu_mapping(
  1351. i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd,
  1352. handle_level_irq);
  1353. if (r)
  1354. goto err;
  1355. }
  1356. /* Enable the CIU lines */
  1357. set_c0_status(STATUSF_IP3 | STATUSF_IP2);
  1358. if (octeon_irq_use_ip4)
  1359. set_c0_status(STATUSF_IP4);
  1360. else
  1361. clear_c0_status(STATUSF_IP4);
  1362. return 0;
  1363. err:
  1364. return r;
  1365. }
  1366. static int __init octeon_irq_init_gpio(
  1367. struct device_node *gpio_node, struct device_node *parent)
  1368. {
  1369. struct octeon_irq_gpio_domain_data *gpiod;
  1370. u32 interrupt_cells;
  1371. unsigned int base_hwirq;
  1372. int r;
  1373. r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells);
  1374. if (r)
  1375. return r;
  1376. if (interrupt_cells == 1) {
  1377. u32 v;
  1378. r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v);
  1379. if (r) {
  1380. pr_warn("No \"interrupts\" property.\n");
  1381. return r;
  1382. }
  1383. base_hwirq = v;
  1384. } else if (interrupt_cells == 2) {
  1385. u32 v0, v1;
  1386. r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0);
  1387. if (r) {
  1388. pr_warn("No \"interrupts\" property.\n");
  1389. return r;
  1390. }
  1391. r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1);
  1392. if (r) {
  1393. pr_warn("No \"interrupts\" property.\n");
  1394. return r;
  1395. }
  1396. base_hwirq = (v0 << 6) | v1;
  1397. } else {
  1398. pr_warn("Bad \"#interrupt-cells\" property: %u\n",
  1399. interrupt_cells);
  1400. return -EINVAL;
  1401. }
  1402. gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
  1403. if (gpiod) {
  1404. /* gpio domain host_data is the base hwirq number. */
  1405. gpiod->base_hwirq = base_hwirq;
  1406. irq_domain_add_linear(
  1407. gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
  1408. } else {
  1409. pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
  1410. return -ENOMEM;
  1411. }
  1412. /*
  1413. * Clear the OF_POPULATED flag that was set by of_irq_init()
  1414. * so that all GPIO devices will be probed.
  1415. */
  1416. of_node_clear_flag(gpio_node, OF_POPULATED);
  1417. return 0;
  1418. }
  1419. /*
  1420. * Watchdog interrupts are special. They are associated with a single
  1421. * core, so we hardwire the affinity to that core.
  1422. */
  1423. static void octeon_irq_ciu2_wd_enable(struct irq_data *data)
  1424. {
  1425. u64 mask;
  1426. u64 en_addr;
  1427. int coreid = data->irq - OCTEON_IRQ_WDOG0;
  1428. struct octeon_ciu_chip_data *cd;
  1429. cd = irq_data_get_irq_chip_data(data);
  1430. mask = 1ull << (cd->bit);
  1431. en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
  1432. (0x1000ull * cd->line);
  1433. cvmx_write_csr(en_addr, mask);
  1434. }
  1435. static void octeon_irq_ciu2_enable(struct irq_data *data)
  1436. {
  1437. u64 mask;
  1438. u64 en_addr;
  1439. int cpu = next_cpu_for_irq(data);
  1440. int coreid = octeon_coreid_for_cpu(cpu);
  1441. struct octeon_ciu_chip_data *cd;
  1442. cd = irq_data_get_irq_chip_data(data);
  1443. mask = 1ull << (cd->bit);
  1444. en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
  1445. (0x1000ull * cd->line);
  1446. cvmx_write_csr(en_addr, mask);
  1447. }
  1448. static void octeon_irq_ciu2_enable_local(struct irq_data *data)
  1449. {
  1450. u64 mask;
  1451. u64 en_addr;
  1452. int coreid = cvmx_get_core_num();
  1453. struct octeon_ciu_chip_data *cd;
  1454. cd = irq_data_get_irq_chip_data(data);
  1455. mask = 1ull << (cd->bit);
  1456. en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
  1457. (0x1000ull * cd->line);
  1458. cvmx_write_csr(en_addr, mask);
  1459. }
  1460. static void octeon_irq_ciu2_disable_local(struct irq_data *data)
  1461. {
  1462. u64 mask;
  1463. u64 en_addr;
  1464. int coreid = cvmx_get_core_num();
  1465. struct octeon_ciu_chip_data *cd;
  1466. cd = irq_data_get_irq_chip_data(data);
  1467. mask = 1ull << (cd->bit);
  1468. en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) +
  1469. (0x1000ull * cd->line);
  1470. cvmx_write_csr(en_addr, mask);
  1471. }
  1472. static void octeon_irq_ciu2_ack(struct irq_data *data)
  1473. {
  1474. u64 mask;
  1475. u64 en_addr;
  1476. int coreid = cvmx_get_core_num();
  1477. struct octeon_ciu_chip_data *cd;
  1478. cd = irq_data_get_irq_chip_data(data);
  1479. mask = 1ull << (cd->bit);
  1480. en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line);
  1481. cvmx_write_csr(en_addr, mask);
  1482. }
  1483. static void octeon_irq_ciu2_disable_all(struct irq_data *data)
  1484. {
  1485. int cpu;
  1486. u64 mask;
  1487. struct octeon_ciu_chip_data *cd;
  1488. cd = irq_data_get_irq_chip_data(data);
  1489. mask = 1ull << (cd->bit);
  1490. for_each_online_cpu(cpu) {
  1491. u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
  1492. octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line);
  1493. cvmx_write_csr(en_addr, mask);
  1494. }
  1495. }
  1496. static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data)
  1497. {
  1498. int cpu;
  1499. u64 mask;
  1500. mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
  1501. for_each_online_cpu(cpu) {
  1502. u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(
  1503. octeon_coreid_for_cpu(cpu));
  1504. cvmx_write_csr(en_addr, mask);
  1505. }
  1506. }
  1507. static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data)
  1508. {
  1509. int cpu;
  1510. u64 mask;
  1511. mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
  1512. for_each_online_cpu(cpu) {
  1513. u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(
  1514. octeon_coreid_for_cpu(cpu));
  1515. cvmx_write_csr(en_addr, mask);
  1516. }
  1517. }
  1518. static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data)
  1519. {
  1520. u64 mask;
  1521. u64 en_addr;
  1522. int coreid = cvmx_get_core_num();
  1523. mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
  1524. en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid);
  1525. cvmx_write_csr(en_addr, mask);
  1526. }
  1527. static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data)
  1528. {
  1529. u64 mask;
  1530. u64 en_addr;
  1531. int coreid = cvmx_get_core_num();
  1532. mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
  1533. en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid);
  1534. cvmx_write_csr(en_addr, mask);
  1535. }
  1536. #ifdef CONFIG_SMP
  1537. static int octeon_irq_ciu2_set_affinity(struct irq_data *data,
  1538. const struct cpumask *dest, bool force)
  1539. {
  1540. int cpu;
  1541. bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
  1542. u64 mask;
  1543. struct octeon_ciu_chip_data *cd;
  1544. if (!enable_one)
  1545. return 0;
  1546. cd = irq_data_get_irq_chip_data(data);
  1547. mask = 1ull << cd->bit;
  1548. for_each_online_cpu(cpu) {
  1549. u64 en_addr;
  1550. if (cpumask_test_cpu(cpu, dest) && enable_one) {
  1551. enable_one = false;
  1552. en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(
  1553. octeon_coreid_for_cpu(cpu)) +
  1554. (0x1000ull * cd->line);
  1555. } else {
  1556. en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
  1557. octeon_coreid_for_cpu(cpu)) +
  1558. (0x1000ull * cd->line);
  1559. }
  1560. cvmx_write_csr(en_addr, mask);
  1561. }
  1562. return 0;
  1563. }
  1564. #endif
  1565. static void octeon_irq_ciu2_enable_gpio(struct irq_data *data)
  1566. {
  1567. octeon_irq_gpio_setup(data);
  1568. octeon_irq_ciu2_enable(data);
  1569. }
  1570. static void octeon_irq_ciu2_disable_gpio(struct irq_data *data)
  1571. {
  1572. struct octeon_ciu_chip_data *cd;
  1573. cd = irq_data_get_irq_chip_data(data);
  1574. cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
  1575. octeon_irq_ciu2_disable_all(data);
  1576. }
  1577. static struct irq_chip octeon_irq_chip_ciu2 = {
  1578. .name = "CIU2-E",
  1579. .irq_enable = octeon_irq_ciu2_enable,
  1580. .irq_disable = octeon_irq_ciu2_disable_all,
  1581. .irq_mask = octeon_irq_ciu2_disable_local,
  1582. .irq_unmask = octeon_irq_ciu2_enable,
  1583. #ifdef CONFIG_SMP
  1584. .irq_set_affinity = octeon_irq_ciu2_set_affinity,
  1585. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  1586. #endif
  1587. };
  1588. static struct irq_chip octeon_irq_chip_ciu2_edge = {
  1589. .name = "CIU2-E",
  1590. .irq_enable = octeon_irq_ciu2_enable,
  1591. .irq_disable = octeon_irq_ciu2_disable_all,
  1592. .irq_ack = octeon_irq_ciu2_ack,
  1593. .irq_mask = octeon_irq_ciu2_disable_local,
  1594. .irq_unmask = octeon_irq_ciu2_enable,
  1595. #ifdef CONFIG_SMP
  1596. .irq_set_affinity = octeon_irq_ciu2_set_affinity,
  1597. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  1598. #endif
  1599. };
  1600. static struct irq_chip octeon_irq_chip_ciu2_mbox = {
  1601. .name = "CIU2-M",
  1602. .irq_enable = octeon_irq_ciu2_mbox_enable_all,
  1603. .irq_disable = octeon_irq_ciu2_mbox_disable_all,
  1604. .irq_ack = octeon_irq_ciu2_mbox_disable_local,
  1605. .irq_eoi = octeon_irq_ciu2_mbox_enable_local,
  1606. .irq_cpu_online = octeon_irq_ciu2_mbox_enable_local,
  1607. .irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local,
  1608. .flags = IRQCHIP_ONOFFLINE_ENABLED,
  1609. };
  1610. static struct irq_chip octeon_irq_chip_ciu2_wd = {
  1611. .name = "CIU2-W",
  1612. .irq_enable = octeon_irq_ciu2_wd_enable,
  1613. .irq_disable = octeon_irq_ciu2_disable_all,
  1614. .irq_mask = octeon_irq_ciu2_disable_local,
  1615. .irq_unmask = octeon_irq_ciu2_enable_local,
  1616. };
  1617. static struct irq_chip octeon_irq_chip_ciu2_gpio = {
  1618. .name = "CIU-GPIO",
  1619. .irq_enable = octeon_irq_ciu2_enable_gpio,
  1620. .irq_disable = octeon_irq_ciu2_disable_gpio,
  1621. .irq_ack = octeon_irq_ciu_gpio_ack,
  1622. .irq_mask = octeon_irq_ciu2_disable_local,
  1623. .irq_unmask = octeon_irq_ciu2_enable,
  1624. .irq_set_type = octeon_irq_ciu_gpio_set_type,
  1625. #ifdef CONFIG_SMP
  1626. .irq_set_affinity = octeon_irq_ciu2_set_affinity,
  1627. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  1628. #endif
  1629. .flags = IRQCHIP_SET_TYPE_MASKED,
  1630. };
  1631. static int octeon_irq_ciu2_xlat(struct irq_domain *d,
  1632. struct device_node *node,
  1633. const u32 *intspec,
  1634. unsigned int intsize,
  1635. unsigned long *out_hwirq,
  1636. unsigned int *out_type)
  1637. {
  1638. unsigned int ciu, bit;
  1639. ciu = intspec[0];
  1640. bit = intspec[1];
  1641. *out_hwirq = (ciu << 6) | bit;
  1642. *out_type = 0;
  1643. return 0;
  1644. }
  1645. static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
  1646. {
  1647. bool edge = false;
  1648. if (line == 3) /* MIO */
  1649. switch (bit) {
  1650. case 2: /* IPD_DRP */
  1651. case 8 ... 11: /* Timers */
  1652. case 48: /* PTP */
  1653. edge = true;
  1654. break;
  1655. default:
  1656. break;
  1657. }
  1658. else if (line == 6) /* PKT */
  1659. switch (bit) {
  1660. case 52 ... 53: /* ILK_DRP */
  1661. case 8 ... 12: /* GMX_DRP */
  1662. edge = true;
  1663. break;
  1664. default:
  1665. break;
  1666. }
  1667. return edge;
  1668. }
  1669. static int octeon_irq_ciu2_map(struct irq_domain *d,
  1670. unsigned int virq, irq_hw_number_t hw)
  1671. {
  1672. unsigned int line = hw >> 6;
  1673. unsigned int bit = hw & 63;
  1674. /*
  1675. * Don't map irq if it is reserved for GPIO.
  1676. * (Line 7 are the GPIO lines.)
  1677. */
  1678. if (line == 7)
  1679. return 0;
  1680. if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0)
  1681. return -EINVAL;
  1682. if (octeon_irq_ciu2_is_edge(line, bit))
  1683. octeon_irq_set_ciu_mapping(virq, line, bit, 0,
  1684. &octeon_irq_chip_ciu2_edge,
  1685. handle_edge_irq);
  1686. else
  1687. octeon_irq_set_ciu_mapping(virq, line, bit, 0,
  1688. &octeon_irq_chip_ciu2,
  1689. handle_level_irq);
  1690. return 0;
  1691. }
  1692. static const struct irq_domain_ops octeon_irq_domain_ciu2_ops = {
  1693. .map = octeon_irq_ciu2_map,
  1694. .unmap = octeon_irq_free_cd,
  1695. .xlate = octeon_irq_ciu2_xlat,
  1696. };
  1697. static void octeon_irq_ciu2(void)
  1698. {
  1699. int line;
  1700. int bit;
  1701. int irq;
  1702. u64 src_reg, src, sum;
  1703. const unsigned long core_id = cvmx_get_core_num();
  1704. sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful;
  1705. if (unlikely(!sum))
  1706. goto spurious;
  1707. line = fls64(sum) - 1;
  1708. src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line);
  1709. src = cvmx_read_csr(src_reg);
  1710. if (unlikely(!src))
  1711. goto spurious;
  1712. bit = fls64(src) - 1;
  1713. irq = octeon_irq_ciu_to_irq[line][bit];
  1714. if (unlikely(!irq))
  1715. goto spurious;
  1716. do_IRQ(irq);
  1717. goto out;
  1718. spurious:
  1719. spurious_interrupt();
  1720. out:
  1721. /* CN68XX pass 1.x has an errata that accessing the ACK registers
  1722. can stop interrupts from propagating */
  1723. if (OCTEON_IS_MODEL(OCTEON_CN68XX))
  1724. cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
  1725. else
  1726. cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id));
  1727. return;
  1728. }
  1729. static void octeon_irq_ciu2_mbox(void)
  1730. {
  1731. int line;
  1732. const unsigned long core_id = cvmx_get_core_num();
  1733. u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60;
  1734. if (unlikely(!sum))
  1735. goto spurious;
  1736. line = fls64(sum) - 1;
  1737. do_IRQ(OCTEON_IRQ_MBOX0 + line);
  1738. goto out;
  1739. spurious:
  1740. spurious_interrupt();
  1741. out:
  1742. /* CN68XX pass 1.x has an errata that accessing the ACK registers
  1743. can stop interrupts from propagating */
  1744. if (OCTEON_IS_MODEL(OCTEON_CN68XX))
  1745. cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY);
  1746. else
  1747. cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id));
  1748. return;
  1749. }
  1750. static int __init octeon_irq_init_ciu2(
  1751. struct device_node *ciu_node, struct device_node *parent)
  1752. {
  1753. unsigned int i, r;
  1754. struct irq_domain *ciu_domain = NULL;
  1755. octeon_irq_init_ciu2_percpu();
  1756. octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2;
  1757. octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio;
  1758. octeon_irq_ip2 = octeon_irq_ciu2;
  1759. octeon_irq_ip3 = octeon_irq_ciu2_mbox;
  1760. octeon_irq_ip4 = octeon_irq_ip4_mask;
  1761. /* Mips internal */
  1762. octeon_irq_init_core();
  1763. ciu_domain = irq_domain_add_tree(
  1764. ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
  1765. irq_set_default_host(ciu_domain);
  1766. /* CUI2 */
  1767. for (i = 0; i < 64; i++) {
  1768. r = octeon_irq_force_ciu_mapping(
  1769. ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
  1770. if (r)
  1771. goto err;
  1772. }
  1773. for (i = 0; i < 32; i++) {
  1774. r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
  1775. &octeon_irq_chip_ciu2_wd, handle_level_irq);
  1776. if (r)
  1777. goto err;
  1778. }
  1779. for (i = 0; i < 4; i++) {
  1780. r = octeon_irq_force_ciu_mapping(
  1781. ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
  1782. if (r)
  1783. goto err;
  1784. }
  1785. for (i = 0; i < 4; i++) {
  1786. r = octeon_irq_force_ciu_mapping(
  1787. ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
  1788. if (r)
  1789. goto err;
  1790. }
  1791. for (i = 0; i < 4; i++) {
  1792. r = octeon_irq_force_ciu_mapping(
  1793. ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
  1794. if (r)
  1795. goto err;
  1796. }
  1797. irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
  1798. irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
  1799. irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
  1800. irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
  1801. /* Enable the CIU lines */
  1802. set_c0_status(STATUSF_IP3 | STATUSF_IP2);
  1803. clear_c0_status(STATUSF_IP4);
  1804. return 0;
  1805. err:
  1806. return r;
  1807. }
  1808. struct octeon_irq_cib_host_data {
  1809. raw_spinlock_t lock;
  1810. u64 raw_reg;
  1811. u64 en_reg;
  1812. int max_bits;
  1813. };
  1814. struct octeon_irq_cib_chip_data {
  1815. struct octeon_irq_cib_host_data *host_data;
  1816. int bit;
  1817. };
  1818. static void octeon_irq_cib_enable(struct irq_data *data)
  1819. {
  1820. unsigned long flags;
  1821. u64 en;
  1822. struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
  1823. struct octeon_irq_cib_host_data *host_data = cd->host_data;
  1824. raw_spin_lock_irqsave(&host_data->lock, flags);
  1825. en = cvmx_read_csr(host_data->en_reg);
  1826. en |= 1ull << cd->bit;
  1827. cvmx_write_csr(host_data->en_reg, en);
  1828. raw_spin_unlock_irqrestore(&host_data->lock, flags);
  1829. }
  1830. static void octeon_irq_cib_disable(struct irq_data *data)
  1831. {
  1832. unsigned long flags;
  1833. u64 en;
  1834. struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
  1835. struct octeon_irq_cib_host_data *host_data = cd->host_data;
  1836. raw_spin_lock_irqsave(&host_data->lock, flags);
  1837. en = cvmx_read_csr(host_data->en_reg);
  1838. en &= ~(1ull << cd->bit);
  1839. cvmx_write_csr(host_data->en_reg, en);
  1840. raw_spin_unlock_irqrestore(&host_data->lock, flags);
  1841. }
  1842. static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t)
  1843. {
  1844. irqd_set_trigger_type(data, t);
  1845. return IRQ_SET_MASK_OK;
  1846. }
  1847. static struct irq_chip octeon_irq_chip_cib = {
  1848. .name = "CIB",
  1849. .irq_enable = octeon_irq_cib_enable,
  1850. .irq_disable = octeon_irq_cib_disable,
  1851. .irq_mask = octeon_irq_cib_disable,
  1852. .irq_unmask = octeon_irq_cib_enable,
  1853. .irq_set_type = octeon_irq_cib_set_type,
  1854. };
  1855. static int octeon_irq_cib_xlat(struct irq_domain *d,
  1856. struct device_node *node,
  1857. const u32 *intspec,
  1858. unsigned int intsize,
  1859. unsigned long *out_hwirq,
  1860. unsigned int *out_type)
  1861. {
  1862. unsigned int type = 0;
  1863. if (intsize == 2)
  1864. type = intspec[1];
  1865. switch (type) {
  1866. case 0: /* unofficial value, but we might as well let it work. */
  1867. case 4: /* official value for level triggering. */
  1868. *out_type = IRQ_TYPE_LEVEL_HIGH;
  1869. break;
  1870. case 1: /* official value for edge triggering. */
  1871. *out_type = IRQ_TYPE_EDGE_RISING;
  1872. break;
  1873. default: /* Nothing else is acceptable. */
  1874. return -EINVAL;
  1875. }
  1876. *out_hwirq = intspec[0];
  1877. return 0;
  1878. }
  1879. static int octeon_irq_cib_map(struct irq_domain *d,
  1880. unsigned int virq, irq_hw_number_t hw)
  1881. {
  1882. struct octeon_irq_cib_host_data *host_data = d->host_data;
  1883. struct octeon_irq_cib_chip_data *cd;
  1884. if (hw >= host_data->max_bits) {
  1885. pr_err("ERROR: %s mapping %u is too big!\n",
  1886. irq_domain_get_of_node(d)->name, (unsigned)hw);
  1887. return -EINVAL;
  1888. }
  1889. cd = kzalloc(sizeof(*cd), GFP_KERNEL);
  1890. if (!cd)
  1891. return -ENOMEM;
  1892. cd->host_data = host_data;
  1893. cd->bit = hw;
  1894. irq_set_chip_and_handler(virq, &octeon_irq_chip_cib,
  1895. handle_simple_irq);
  1896. irq_set_chip_data(virq, cd);
  1897. return 0;
  1898. }
  1899. static const struct irq_domain_ops octeon_irq_domain_cib_ops = {
  1900. .map = octeon_irq_cib_map,
  1901. .unmap = octeon_irq_free_cd,
  1902. .xlate = octeon_irq_cib_xlat,
  1903. };
  1904. /* Chain to real handler. */
  1905. static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data)
  1906. {
  1907. u64 en;
  1908. u64 raw;
  1909. u64 bits;
  1910. int i;
  1911. int irq;
  1912. struct irq_domain *cib_domain = data;
  1913. struct octeon_irq_cib_host_data *host_data = cib_domain->host_data;
  1914. en = cvmx_read_csr(host_data->en_reg);
  1915. raw = cvmx_read_csr(host_data->raw_reg);
  1916. bits = en & raw;
  1917. for (i = 0; i < host_data->max_bits; i++) {
  1918. if ((bits & 1ull << i) == 0)
  1919. continue;
  1920. irq = irq_find_mapping(cib_domain, i);
  1921. if (!irq) {
  1922. unsigned long flags;
  1923. pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n",
  1924. i, host_data->raw_reg);
  1925. raw_spin_lock_irqsave(&host_data->lock, flags);
  1926. en = cvmx_read_csr(host_data->en_reg);
  1927. en &= ~(1ull << i);
  1928. cvmx_write_csr(host_data->en_reg, en);
  1929. cvmx_write_csr(host_data->raw_reg, 1ull << i);
  1930. raw_spin_unlock_irqrestore(&host_data->lock, flags);
  1931. } else {
  1932. struct irq_desc *desc = irq_to_desc(irq);
  1933. struct irq_data *irq_data = irq_desc_get_irq_data(desc);
  1934. /* If edge, acknowledge the bit we will be sending. */
  1935. if (irqd_get_trigger_type(irq_data) &
  1936. IRQ_TYPE_EDGE_BOTH)
  1937. cvmx_write_csr(host_data->raw_reg, 1ull << i);
  1938. generic_handle_irq_desc(desc);
  1939. }
  1940. }
  1941. return IRQ_HANDLED;
  1942. }
  1943. static int __init octeon_irq_init_cib(struct device_node *ciu_node,
  1944. struct device_node *parent)
  1945. {
  1946. const __be32 *addr;
  1947. u32 val;
  1948. struct octeon_irq_cib_host_data *host_data;
  1949. int parent_irq;
  1950. int r;
  1951. struct irq_domain *cib_domain;
  1952. parent_irq = irq_of_parse_and_map(ciu_node, 0);
  1953. if (!parent_irq) {
  1954. pr_err("ERROR: Couldn't acquire parent_irq for %pOFn\n",
  1955. ciu_node);
  1956. return -EINVAL;
  1957. }
  1958. host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
  1959. if (!host_data)
  1960. return -ENOMEM;
  1961. raw_spin_lock_init(&host_data->lock);
  1962. addr = of_get_address(ciu_node, 0, NULL, NULL);
  1963. if (!addr) {
  1964. pr_err("ERROR: Couldn't acquire reg(0) %pOFn\n", ciu_node);
  1965. return -EINVAL;
  1966. }
  1967. host_data->raw_reg = (u64)phys_to_virt(
  1968. of_translate_address(ciu_node, addr));
  1969. addr = of_get_address(ciu_node, 1, NULL, NULL);
  1970. if (!addr) {
  1971. pr_err("ERROR: Couldn't acquire reg(1) %pOFn\n", ciu_node);
  1972. return -EINVAL;
  1973. }
  1974. host_data->en_reg = (u64)phys_to_virt(
  1975. of_translate_address(ciu_node, addr));
  1976. r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
  1977. if (r) {
  1978. pr_err("ERROR: Couldn't read cavium,max-bits from %pOFn\n",
  1979. ciu_node);
  1980. return r;
  1981. }
  1982. host_data->max_bits = val;
  1983. cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits,
  1984. &octeon_irq_domain_cib_ops,
  1985. host_data);
  1986. if (!cib_domain) {
  1987. pr_err("ERROR: Couldn't irq_domain_add_linear()\n");
  1988. return -ENOMEM;
  1989. }
  1990. cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */
  1991. cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */
  1992. r = request_irq(parent_irq, octeon_irq_cib_handler,
  1993. IRQF_NO_THREAD, "cib", cib_domain);
  1994. if (r) {
  1995. pr_err("request_irq cib failed %d\n", r);
  1996. return r;
  1997. }
  1998. pr_info("CIB interrupt controller probed: %llx %d\n",
  1999. host_data->raw_reg, host_data->max_bits);
  2000. return 0;
  2001. }
  2002. int octeon_irq_ciu3_xlat(struct irq_domain *d,
  2003. struct device_node *node,
  2004. const u32 *intspec,
  2005. unsigned int intsize,
  2006. unsigned long *out_hwirq,
  2007. unsigned int *out_type)
  2008. {
  2009. struct octeon_ciu3_info *ciu3_info = d->host_data;
  2010. unsigned int hwirq, type, intsn_major;
  2011. union cvmx_ciu3_iscx_ctl isc;
  2012. if (intsize < 2)
  2013. return -EINVAL;
  2014. hwirq = intspec[0];
  2015. type = intspec[1];
  2016. if (hwirq >= (1 << 20))
  2017. return -EINVAL;
  2018. intsn_major = hwirq >> 12;
  2019. switch (intsn_major) {
  2020. case 0x04: /* Software handled separately. */
  2021. return -EINVAL;
  2022. default:
  2023. break;
  2024. }
  2025. isc.u64 = cvmx_read_csr(ciu3_info->ciu3_addr + CIU3_ISC_CTL(hwirq));
  2026. if (!isc.s.imp)
  2027. return -EINVAL;
  2028. switch (type) {
  2029. case 4: /* official value for level triggering. */
  2030. *out_type = IRQ_TYPE_LEVEL_HIGH;
  2031. break;
  2032. case 0: /* unofficial value, but we might as well let it work. */
  2033. case 1: /* official value for edge triggering. */
  2034. *out_type = IRQ_TYPE_EDGE_RISING;
  2035. break;
  2036. default: /* Nothing else is acceptable. */
  2037. return -EINVAL;
  2038. }
  2039. *out_hwirq = hwirq;
  2040. return 0;
  2041. }
  2042. void octeon_irq_ciu3_enable(struct irq_data *data)
  2043. {
  2044. int cpu;
  2045. union cvmx_ciu3_iscx_ctl isc_ctl;
  2046. union cvmx_ciu3_iscx_w1c isc_w1c;
  2047. u64 isc_ctl_addr;
  2048. struct octeon_ciu_chip_data *cd;
  2049. cpu = next_cpu_for_irq(data);
  2050. cd = irq_data_get_irq_chip_data(data);
  2051. isc_w1c.u64 = 0;
  2052. isc_w1c.s.en = 1;
  2053. cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
  2054. isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
  2055. isc_ctl.u64 = 0;
  2056. isc_ctl.s.en = 1;
  2057. isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
  2058. cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
  2059. cvmx_read_csr(isc_ctl_addr);
  2060. }
  2061. void octeon_irq_ciu3_disable(struct irq_data *data)
  2062. {
  2063. u64 isc_ctl_addr;
  2064. union cvmx_ciu3_iscx_w1c isc_w1c;
  2065. struct octeon_ciu_chip_data *cd;
  2066. cd = irq_data_get_irq_chip_data(data);
  2067. isc_w1c.u64 = 0;
  2068. isc_w1c.s.en = 1;
  2069. isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
  2070. cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
  2071. cvmx_write_csr(isc_ctl_addr, 0);
  2072. cvmx_read_csr(isc_ctl_addr);
  2073. }
  2074. void octeon_irq_ciu3_ack(struct irq_data *data)
  2075. {
  2076. u64 isc_w1c_addr;
  2077. union cvmx_ciu3_iscx_w1c isc_w1c;
  2078. struct octeon_ciu_chip_data *cd;
  2079. u32 trigger_type = irqd_get_trigger_type(data);
  2080. /*
  2081. * We use a single irq_chip, so we have to do nothing to ack a
  2082. * level interrupt.
  2083. */
  2084. if (!(trigger_type & IRQ_TYPE_EDGE_BOTH))
  2085. return;
  2086. cd = irq_data_get_irq_chip_data(data);
  2087. isc_w1c.u64 = 0;
  2088. isc_w1c.s.raw = 1;
  2089. isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
  2090. cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
  2091. cvmx_read_csr(isc_w1c_addr);
  2092. }
  2093. void octeon_irq_ciu3_mask(struct irq_data *data)
  2094. {
  2095. union cvmx_ciu3_iscx_w1c isc_w1c;
  2096. u64 isc_w1c_addr;
  2097. struct octeon_ciu_chip_data *cd;
  2098. cd = irq_data_get_irq_chip_data(data);
  2099. isc_w1c.u64 = 0;
  2100. isc_w1c.s.en = 1;
  2101. isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
  2102. cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
  2103. cvmx_read_csr(isc_w1c_addr);
  2104. }
  2105. void octeon_irq_ciu3_mask_ack(struct irq_data *data)
  2106. {
  2107. union cvmx_ciu3_iscx_w1c isc_w1c;
  2108. u64 isc_w1c_addr;
  2109. struct octeon_ciu_chip_data *cd;
  2110. u32 trigger_type = irqd_get_trigger_type(data);
  2111. cd = irq_data_get_irq_chip_data(data);
  2112. isc_w1c.u64 = 0;
  2113. isc_w1c.s.en = 1;
  2114. /*
  2115. * We use a single irq_chip, so only ack an edge (!level)
  2116. * interrupt.
  2117. */
  2118. if (trigger_type & IRQ_TYPE_EDGE_BOTH)
  2119. isc_w1c.s.raw = 1;
  2120. isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
  2121. cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
  2122. cvmx_read_csr(isc_w1c_addr);
  2123. }
  2124. #ifdef CONFIG_SMP
  2125. static int octeon_irq_ciu3_set_affinity(struct irq_data *data,
  2126. const struct cpumask *dest, bool force)
  2127. {
  2128. union cvmx_ciu3_iscx_ctl isc_ctl;
  2129. union cvmx_ciu3_iscx_w1c isc_w1c;
  2130. u64 isc_ctl_addr;
  2131. int cpu;
  2132. bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
  2133. struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
  2134. if (!cpumask_subset(dest, cpumask_of_node(cd->ciu_node)))
  2135. return -EINVAL;
  2136. if (!enable_one)
  2137. return IRQ_SET_MASK_OK;
  2138. cd = irq_data_get_irq_chip_data(data);
  2139. cpu = cpumask_first(dest);
  2140. if (cpu >= nr_cpu_ids)
  2141. cpu = smp_processor_id();
  2142. cd->current_cpu = cpu;
  2143. isc_w1c.u64 = 0;
  2144. isc_w1c.s.en = 1;
  2145. cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
  2146. isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
  2147. isc_ctl.u64 = 0;
  2148. isc_ctl.s.en = 1;
  2149. isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
  2150. cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
  2151. cvmx_read_csr(isc_ctl_addr);
  2152. return IRQ_SET_MASK_OK;
  2153. }
  2154. #endif
  2155. static struct irq_chip octeon_irq_chip_ciu3 = {
  2156. .name = "CIU3",
  2157. .irq_startup = edge_startup,
  2158. .irq_enable = octeon_irq_ciu3_enable,
  2159. .irq_disable = octeon_irq_ciu3_disable,
  2160. .irq_ack = octeon_irq_ciu3_ack,
  2161. .irq_mask = octeon_irq_ciu3_mask,
  2162. .irq_mask_ack = octeon_irq_ciu3_mask_ack,
  2163. .irq_unmask = octeon_irq_ciu3_enable,
  2164. .irq_set_type = octeon_irq_ciu_set_type,
  2165. #ifdef CONFIG_SMP
  2166. .irq_set_affinity = octeon_irq_ciu3_set_affinity,
  2167. .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
  2168. #endif
  2169. };
  2170. int octeon_irq_ciu3_mapx(struct irq_domain *d, unsigned int virq,
  2171. irq_hw_number_t hw, struct irq_chip *chip)
  2172. {
  2173. struct octeon_ciu3_info *ciu3_info = d->host_data;
  2174. struct octeon_ciu_chip_data *cd = kzalloc_node(sizeof(*cd), GFP_KERNEL,
  2175. ciu3_info->node);
  2176. if (!cd)
  2177. return -ENOMEM;
  2178. cd->intsn = hw;
  2179. cd->current_cpu = -1;
  2180. cd->ciu3_addr = ciu3_info->ciu3_addr;
  2181. cd->ciu_node = ciu3_info->node;
  2182. irq_set_chip_and_handler(virq, chip, handle_edge_irq);
  2183. irq_set_chip_data(virq, cd);
  2184. return 0;
  2185. }
  2186. static int octeon_irq_ciu3_map(struct irq_domain *d,
  2187. unsigned int virq, irq_hw_number_t hw)
  2188. {
  2189. return octeon_irq_ciu3_mapx(d, virq, hw, &octeon_irq_chip_ciu3);
  2190. }
  2191. static const struct irq_domain_ops octeon_dflt_domain_ciu3_ops = {
  2192. .map = octeon_irq_ciu3_map,
  2193. .unmap = octeon_irq_free_cd,
  2194. .xlate = octeon_irq_ciu3_xlat,
  2195. };
  2196. static void octeon_irq_ciu3_ip2(void)
  2197. {
  2198. union cvmx_ciu3_destx_pp_int dest_pp_int;
  2199. struct octeon_ciu3_info *ciu3_info;
  2200. u64 ciu3_addr;
  2201. ciu3_info = __this_cpu_read(octeon_ciu3_info);
  2202. ciu3_addr = ciu3_info->ciu3_addr;
  2203. dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(3 * cvmx_get_local_core_num()));
  2204. if (likely(dest_pp_int.s.intr)) {
  2205. irq_hw_number_t intsn = dest_pp_int.s.intsn;
  2206. irq_hw_number_t hw;
  2207. struct irq_domain *domain;
  2208. /* Get the domain to use from the major block */
  2209. int block = intsn >> 12;
  2210. int ret;
  2211. domain = ciu3_info->domain[block];
  2212. if (ciu3_info->intsn2hw[block])
  2213. hw = ciu3_info->intsn2hw[block](domain, intsn);
  2214. else
  2215. hw = intsn;
  2216. irq_enter();
  2217. ret = generic_handle_domain_irq(domain, hw);
  2218. irq_exit();
  2219. if (ret < 0) {
  2220. union cvmx_ciu3_iscx_w1c isc_w1c;
  2221. u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
  2222. isc_w1c.u64 = 0;
  2223. isc_w1c.s.en = 1;
  2224. cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
  2225. cvmx_read_csr(isc_w1c_addr);
  2226. spurious_interrupt();
  2227. }
  2228. } else {
  2229. spurious_interrupt();
  2230. }
  2231. }
  2232. /*
  2233. * 10 mbox per core starting from zero.
  2234. * Base mbox is core * 10
  2235. */
  2236. static unsigned int octeon_irq_ciu3_base_mbox_intsn(int core)
  2237. {
  2238. /* SW (mbox) are 0x04 in bits 12..19 */
  2239. return 0x04000 + CIU3_MBOX_PER_CORE * core;
  2240. }
  2241. static unsigned int octeon_irq_ciu3_mbox_intsn_for_core(int core, unsigned int mbox)
  2242. {
  2243. return octeon_irq_ciu3_base_mbox_intsn(core) + mbox;
  2244. }
  2245. static unsigned int octeon_irq_ciu3_mbox_intsn_for_cpu(int cpu, unsigned int mbox)
  2246. {
  2247. int local_core = octeon_coreid_for_cpu(cpu) & 0x3f;
  2248. return octeon_irq_ciu3_mbox_intsn_for_core(local_core, mbox);
  2249. }
  2250. static void octeon_irq_ciu3_mbox(void)
  2251. {
  2252. union cvmx_ciu3_destx_pp_int dest_pp_int;
  2253. struct octeon_ciu3_info *ciu3_info;
  2254. u64 ciu3_addr;
  2255. int core = cvmx_get_local_core_num();
  2256. ciu3_info = __this_cpu_read(octeon_ciu3_info);
  2257. ciu3_addr = ciu3_info->ciu3_addr;
  2258. dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(1 + 3 * core));
  2259. if (likely(dest_pp_int.s.intr)) {
  2260. irq_hw_number_t intsn = dest_pp_int.s.intsn;
  2261. int mbox = intsn - octeon_irq_ciu3_base_mbox_intsn(core);
  2262. if (likely(mbox >= 0 && mbox < CIU3_MBOX_PER_CORE)) {
  2263. do_IRQ(mbox + OCTEON_IRQ_MBOX0);
  2264. } else {
  2265. union cvmx_ciu3_iscx_w1c isc_w1c;
  2266. u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
  2267. isc_w1c.u64 = 0;
  2268. isc_w1c.s.en = 1;
  2269. cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
  2270. cvmx_read_csr(isc_w1c_addr);
  2271. spurious_interrupt();
  2272. }
  2273. } else {
  2274. spurious_interrupt();
  2275. }
  2276. }
  2277. void octeon_ciu3_mbox_send(int cpu, unsigned int mbox)
  2278. {
  2279. struct octeon_ciu3_info *ciu3_info;
  2280. unsigned int intsn;
  2281. union cvmx_ciu3_iscx_w1s isc_w1s;
  2282. u64 isc_w1s_addr;
  2283. if (WARN_ON_ONCE(mbox >= CIU3_MBOX_PER_CORE))
  2284. return;
  2285. intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
  2286. ciu3_info = per_cpu(octeon_ciu3_info, cpu);
  2287. isc_w1s_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1S(intsn);
  2288. isc_w1s.u64 = 0;
  2289. isc_w1s.s.raw = 1;
  2290. cvmx_write_csr(isc_w1s_addr, isc_w1s.u64);
  2291. cvmx_read_csr(isc_w1s_addr);
  2292. }
  2293. static void octeon_irq_ciu3_mbox_set_enable(struct irq_data *data, int cpu, bool en)
  2294. {
  2295. struct octeon_ciu3_info *ciu3_info;
  2296. unsigned int intsn;
  2297. u64 isc_ctl_addr, isc_w1c_addr;
  2298. union cvmx_ciu3_iscx_ctl isc_ctl;
  2299. unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
  2300. intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
  2301. ciu3_info = per_cpu(octeon_ciu3_info, cpu);
  2302. isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
  2303. isc_ctl_addr = ciu3_info->ciu3_addr + CIU3_ISC_CTL(intsn);
  2304. isc_ctl.u64 = 0;
  2305. isc_ctl.s.en = 1;
  2306. cvmx_write_csr(isc_w1c_addr, isc_ctl.u64);
  2307. cvmx_write_csr(isc_ctl_addr, 0);
  2308. if (en) {
  2309. unsigned int idt = per_cpu(octeon_irq_ciu3_idt_ip3, cpu);
  2310. isc_ctl.u64 = 0;
  2311. isc_ctl.s.en = 1;
  2312. isc_ctl.s.idt = idt;
  2313. cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
  2314. }
  2315. cvmx_read_csr(isc_ctl_addr);
  2316. }
  2317. static void octeon_irq_ciu3_mbox_enable(struct irq_data *data)
  2318. {
  2319. int cpu;
  2320. unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
  2321. WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
  2322. for_each_online_cpu(cpu)
  2323. octeon_irq_ciu3_mbox_set_enable(data, cpu, true);
  2324. }
  2325. static void octeon_irq_ciu3_mbox_disable(struct irq_data *data)
  2326. {
  2327. int cpu;
  2328. unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
  2329. WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
  2330. for_each_online_cpu(cpu)
  2331. octeon_irq_ciu3_mbox_set_enable(data, cpu, false);
  2332. }
  2333. static void octeon_irq_ciu3_mbox_ack(struct irq_data *data)
  2334. {
  2335. struct octeon_ciu3_info *ciu3_info;
  2336. unsigned int intsn;
  2337. u64 isc_w1c_addr;
  2338. union cvmx_ciu3_iscx_w1c isc_w1c;
  2339. unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
  2340. intsn = octeon_irq_ciu3_mbox_intsn_for_core(cvmx_get_local_core_num(), mbox);
  2341. isc_w1c.u64 = 0;
  2342. isc_w1c.s.raw = 1;
  2343. ciu3_info = __this_cpu_read(octeon_ciu3_info);
  2344. isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
  2345. cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
  2346. cvmx_read_csr(isc_w1c_addr);
  2347. }
  2348. static void octeon_irq_ciu3_mbox_cpu_online(struct irq_data *data)
  2349. {
  2350. octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), true);
  2351. }
  2352. static void octeon_irq_ciu3_mbox_cpu_offline(struct irq_data *data)
  2353. {
  2354. octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), false);
  2355. }
  2356. static int octeon_irq_ciu3_alloc_resources(struct octeon_ciu3_info *ciu3_info)
  2357. {
  2358. u64 b = ciu3_info->ciu3_addr;
  2359. int idt_ip2, idt_ip3, idt_ip4;
  2360. int unused_idt2;
  2361. int core = cvmx_get_local_core_num();
  2362. int i;
  2363. __this_cpu_write(octeon_ciu3_info, ciu3_info);
  2364. /*
  2365. * 4 idt per core starting from 1 because zero is reserved.
  2366. * Base idt per core is 4 * core + 1
  2367. */
  2368. idt_ip2 = core * 4 + 1;
  2369. idt_ip3 = core * 4 + 2;
  2370. idt_ip4 = core * 4 + 3;
  2371. unused_idt2 = core * 4 + 4;
  2372. __this_cpu_write(octeon_irq_ciu3_idt_ip2, idt_ip2);
  2373. __this_cpu_write(octeon_irq_ciu3_idt_ip3, idt_ip3);
  2374. /* ip2 interrupts for this CPU */
  2375. cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip2), 0);
  2376. cvmx_write_csr(b + CIU3_IDT_PP(idt_ip2, 0), 1ull << core);
  2377. cvmx_write_csr(b + CIU3_IDT_IO(idt_ip2), 0);
  2378. /* ip3 interrupts for this CPU */
  2379. cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip3), 1);
  2380. cvmx_write_csr(b + CIU3_IDT_PP(idt_ip3, 0), 1ull << core);
  2381. cvmx_write_csr(b + CIU3_IDT_IO(idt_ip3), 0);
  2382. /* ip4 interrupts for this CPU */
  2383. cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip4), 2);
  2384. cvmx_write_csr(b + CIU3_IDT_PP(idt_ip4, 0), 0);
  2385. cvmx_write_csr(b + CIU3_IDT_IO(idt_ip4), 0);
  2386. cvmx_write_csr(b + CIU3_IDT_CTL(unused_idt2), 0);
  2387. cvmx_write_csr(b + CIU3_IDT_PP(unused_idt2, 0), 0);
  2388. cvmx_write_csr(b + CIU3_IDT_IO(unused_idt2), 0);
  2389. for (i = 0; i < CIU3_MBOX_PER_CORE; i++) {
  2390. unsigned int intsn = octeon_irq_ciu3_mbox_intsn_for_core(core, i);
  2391. cvmx_write_csr(b + CIU3_ISC_W1C(intsn), 2);
  2392. cvmx_write_csr(b + CIU3_ISC_CTL(intsn), 0);
  2393. }
  2394. return 0;
  2395. }
  2396. static void octeon_irq_setup_secondary_ciu3(void)
  2397. {
  2398. struct octeon_ciu3_info *ciu3_info;
  2399. ciu3_info = octeon_ciu3_info_per_node[cvmx_get_node_num()];
  2400. octeon_irq_ciu3_alloc_resources(ciu3_info);
  2401. irq_cpu_online();
  2402. /* Enable the CIU lines */
  2403. set_c0_status(STATUSF_IP3 | STATUSF_IP2);
  2404. if (octeon_irq_use_ip4)
  2405. set_c0_status(STATUSF_IP4);
  2406. else
  2407. clear_c0_status(STATUSF_IP4);
  2408. }
  2409. static struct irq_chip octeon_irq_chip_ciu3_mbox = {
  2410. .name = "CIU3-M",
  2411. .irq_enable = octeon_irq_ciu3_mbox_enable,
  2412. .irq_disable = octeon_irq_ciu3_mbox_disable,
  2413. .irq_ack = octeon_irq_ciu3_mbox_ack,
  2414. .irq_cpu_online = octeon_irq_ciu3_mbox_cpu_online,
  2415. .irq_cpu_offline = octeon_irq_ciu3_mbox_cpu_offline,
  2416. .flags = IRQCHIP_ONOFFLINE_ENABLED,
  2417. };
  2418. static int __init octeon_irq_init_ciu3(struct device_node *ciu_node,
  2419. struct device_node *parent)
  2420. {
  2421. int i;
  2422. int node;
  2423. struct irq_domain *domain;
  2424. struct octeon_ciu3_info *ciu3_info;
  2425. const __be32 *zero_addr;
  2426. u64 base_addr;
  2427. union cvmx_ciu3_const consts;
  2428. node = 0; /* of_node_to_nid(ciu_node); */
  2429. ciu3_info = kzalloc_node(sizeof(*ciu3_info), GFP_KERNEL, node);
  2430. if (!ciu3_info)
  2431. return -ENOMEM;
  2432. zero_addr = of_get_address(ciu_node, 0, NULL, NULL);
  2433. if (WARN_ON(!zero_addr))
  2434. return -EINVAL;
  2435. base_addr = of_translate_address(ciu_node, zero_addr);
  2436. base_addr = (u64)phys_to_virt(base_addr);
  2437. ciu3_info->ciu3_addr = base_addr;
  2438. ciu3_info->node = node;
  2439. consts.u64 = cvmx_read_csr(base_addr + CIU3_CONST);
  2440. octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu3;
  2441. octeon_irq_ip2 = octeon_irq_ciu3_ip2;
  2442. octeon_irq_ip3 = octeon_irq_ciu3_mbox;
  2443. octeon_irq_ip4 = octeon_irq_ip4_mask;
  2444. if (node == cvmx_get_node_num()) {
  2445. /* Mips internal */
  2446. octeon_irq_init_core();
  2447. /* Only do per CPU things if it is the CIU of the boot node. */
  2448. i = irq_alloc_descs_from(OCTEON_IRQ_MBOX0, 8, node);
  2449. WARN_ON(i < 0);
  2450. for (i = 0; i < 8; i++)
  2451. irq_set_chip_and_handler(i + OCTEON_IRQ_MBOX0,
  2452. &octeon_irq_chip_ciu3_mbox, handle_percpu_irq);
  2453. }
  2454. /*
  2455. * Initialize all domains to use the default domain. Specific major
  2456. * blocks will overwrite the default domain as needed.
  2457. */
  2458. domain = irq_domain_add_tree(ciu_node, &octeon_dflt_domain_ciu3_ops,
  2459. ciu3_info);
  2460. for (i = 0; i < MAX_CIU3_DOMAINS; i++)
  2461. ciu3_info->domain[i] = domain;
  2462. octeon_ciu3_info_per_node[node] = ciu3_info;
  2463. if (node == cvmx_get_node_num()) {
  2464. /* Only do per CPU things if it is the CIU of the boot node. */
  2465. octeon_irq_ciu3_alloc_resources(ciu3_info);
  2466. if (node == 0)
  2467. irq_set_default_host(domain);
  2468. octeon_irq_use_ip4 = false;
  2469. /* Enable the CIU lines */
  2470. set_c0_status(STATUSF_IP2 | STATUSF_IP3);
  2471. clear_c0_status(STATUSF_IP4);
  2472. }
  2473. return 0;
  2474. }
  2475. static struct of_device_id ciu_types[] __initdata = {
  2476. {.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu},
  2477. {.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio},
  2478. {.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2},
  2479. {.compatible = "cavium,octeon-7890-ciu3", .data = octeon_irq_init_ciu3},
  2480. {.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib},
  2481. {}
  2482. };
  2483. void __init arch_init_irq(void)
  2484. {
  2485. #ifdef CONFIG_SMP
  2486. /* Set the default affinity to the boot cpu. */
  2487. cpumask_clear(irq_default_affinity);
  2488. cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
  2489. #endif
  2490. of_irq_init(ciu_types);
  2491. }
  2492. asmlinkage void plat_irq_dispatch(void)
  2493. {
  2494. unsigned long cop0_cause;
  2495. unsigned long cop0_status;
  2496. while (1) {
  2497. cop0_cause = read_c0_cause();
  2498. cop0_status = read_c0_status();
  2499. cop0_cause &= cop0_status;
  2500. cop0_cause &= ST0_IM;
  2501. if (cop0_cause & STATUSF_IP2)
  2502. octeon_irq_ip2();
  2503. else if (cop0_cause & STATUSF_IP3)
  2504. octeon_irq_ip3();
  2505. else if (cop0_cause & STATUSF_IP4)
  2506. octeon_irq_ip4();
  2507. else if (cop0_cause)
  2508. do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
  2509. else
  2510. break;
  2511. }
  2512. }
  2513. #ifdef CONFIG_HOTPLUG_CPU
  2514. void octeon_fixup_irqs(void)
  2515. {
  2516. irq_cpu_offline();
  2517. }
  2518. #endif /* CONFIG_HOTPLUG_CPU */
  2519. struct irq_domain *octeon_irq_get_block_domain(int node, uint8_t block)
  2520. {
  2521. struct octeon_ciu3_info *ciu3_info;
  2522. ciu3_info = octeon_ciu3_info_per_node[node & CVMX_NODE_MASK];
  2523. return ciu3_info->domain[block];
  2524. }
  2525. EXPORT_SYMBOL(octeon_irq_get_block_domain);