gic_intr_routing.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "gic-router: %s: " fmt, __func__
  7. #include <linux/bits.h>
  8. #include <linux/cpuhotplug.h>
  9. #include <linux/cpumask.h>
  10. #include <linux/module.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/irq.h>
  13. #include <linux/irqdesc.h>
  14. #include <linux/irqnr.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/of_address.h>
  17. #include <linux/of_irq.h>
  18. #include <linux/io.h>
  19. #include <linux/types.h>
  20. #include <linux/delay.h>
  21. #include <linux/workqueue.h>
  22. #include <linux/irqchip/arm-gic-v3.h>
  23. #include <trace/hooks/gic_v3.h>
  24. #include "irq_internals.h"
  25. #include <linux/tracepoint.h>
  26. #include <trace/events/irq.h>
  27. #define NUM_CLASS_CPUS NR_CPUS
  28. #define GIC_INTERRUPT_ROUTING_MODE BIT(31)
  29. #define GICD_ICLAR2 0xE008
  30. #define GICD_SETCLASSR 0x28
  31. #define GICD_TYPER_1_OF_N BIT(25)
  32. #define GICR_CTLR_DPG1NS BIT(25)
  33. #define MAX_IRQS 1020U
  34. #define GIC_V3_NAME "GICv3"
  35. struct gic_intr_routing_data {
  36. struct irq_chip *gic_chip;
  37. cpumask_t gic_routing_class0_cpus;
  38. cpumask_t gic_routing_class1_cpus;
  39. cpumask_t class0_active_cpus;
  40. enum cpuhp_state gic_affinity_cpuhp_state;
  41. bool gic_is_virtual;
  42. bool gic_supports_1_of_N;
  43. bool gic_1_of_N_init_done;
  44. atomic_t abort_balancing;
  45. atomic_t affinity_initialized;
  46. void __iomem *rbase;
  47. u64 redist_stride;
  48. int dpg1ns_init;
  49. };
  50. static struct gic_intr_routing_data gic_routing_data;
  51. static DEFINE_SPINLOCK(gic_class_lock);
  52. static DEFINE_SPINLOCK(gic_init_lock);
  53. static DECLARE_BITMAP(active_gic_class0, MAX_IRQS);
  54. static DECLARE_BITMAP(active_gic_class1, MAX_IRQS);
  55. static DECLARE_BITMAP(gic_class_initialized, MAX_IRQS);
  56. static DECLARE_BITMAP(gic_saved_class0, MAX_IRQS);
  57. static void affinity_initialize_workfn(struct work_struct *work);
  58. static DECLARE_DELAYED_WORK(affinity_initialize_work, affinity_initialize_workfn);
  59. struct gic_quirk {
  60. const char *desc;
  61. bool (*init)(void __iomem *base);
  62. u32 iidr;
  63. u32 mask;
  64. };
  65. static bool gicd_typer_1_of_N_supported(void __iomem *base)
  66. {
  67. return !(readl_relaxed(base + GICD_TYPER) & GICD_TYPER_1_OF_N);
  68. }
  69. static bool gic_enable_virtual_1_of_N(void __iomem *base)
  70. {
  71. gic_routing_data.gic_is_virtual = true;
  72. gic_routing_data.gic_supports_1_of_N =
  73. gicd_typer_1_of_N_supported(base);
  74. return true;
  75. }
  76. static bool gic_enable_1_of_N(void __iomem *base)
  77. {
  78. gic_routing_data.gic_supports_1_of_N =
  79. gicd_typer_1_of_N_supported(base);
  80. return true;
  81. }
  82. static const struct gic_quirk gic_quirks[] = {
  83. {
  84. .desc = "Virtual GIC",
  85. .init = gic_enable_virtual_1_of_N,
  86. .iidr = 0x47000070,
  87. .mask = 0xff000fff,
  88. },
  89. {
  90. /* GIC 600 */
  91. .desc = "Physical GIC",
  92. .iidr = 0x0200043b,
  93. .mask = 0xff000fff,
  94. .init = gic_enable_1_of_N,
  95. },
  96. {
  97. /* GIC 700 */
  98. .desc = "Physical GIC",
  99. .iidr = 0x0400043b,
  100. .mask = 0xff000fff,
  101. .init = gic_enable_1_of_N,
  102. },
  103. {
  104. }
  105. };
  106. static void qcom_gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
  107. void __iomem *data)
  108. {
  109. for (; quirks->desc; quirks++) {
  110. if (quirks->iidr != (quirks->mask & iidr))
  111. continue;
  112. if (quirks->init(data))
  113. pr_info("QGIC: enabling affinity routing for %s\n",
  114. quirks->desc);
  115. }
  116. }
  117. /*
  118. * Check whether class update is needed.
  119. * Hypervisor sets the initial class for each irq to class 0.
  120. * So, for virtual GIC, for the first update of class, for
  121. * a SPI, skip class update, if irq affinity maps to class 0.
  122. * For next class update, check the current class setting, and
  123. * skip class update, if it hasn't changed.
  124. */
  125. static bool gic_need_class_update(u32 irq, bool is_class0, bool is_class1)
  126. {
  127. pr_debug("initialized: %d is_class0: %d test-class0: %d is_class1: %d test-class1: %d\n",
  128. test_bit(irq, gic_class_initialized), is_class0,
  129. test_bit(irq, active_gic_class0),
  130. is_class1, test_bit(irq, active_gic_class1));
  131. if (gic_routing_data.gic_is_virtual &&
  132. !test_bit(irq, gic_class_initialized) &&
  133. is_class0 && !is_class1)
  134. return false;
  135. if (test_bit(irq, gic_class_initialized) &&
  136. (is_class0 == !!test_bit(irq, active_gic_class0)) &&
  137. (is_class1 == !!test_bit(irq, active_gic_class1))) {
  138. return false;
  139. }
  140. return true;
  141. }
  142. void gic_do_class_update_virtual(
  143. void __iomem *base, u32 hwirq,
  144. bool is_class0, bool is_class1)
  145. {
  146. void __iomem *reg = base + GICD_SETCLASSR;
  147. int val = hwirq & GENMASK(12, 0);
  148. if (is_class0)
  149. val |= BIT(30);
  150. if (is_class1)
  151. val |= BIT(31);
  152. pr_debug("Set class of hwirq: %d class: %#x\n", hwirq, val);
  153. writel_relaxed(val, reg);
  154. }
  155. void gic_do_class_update_physical(
  156. void __iomem *base, u32 irq,
  157. bool is_class0, bool is_class1)
  158. {
  159. void __iomem *reg = base + GICD_ICLAR2 + (irq / 16) * 4;
  160. int val, offset, class_bits_val = 0;
  161. if (is_class0)
  162. class_bits_val = 0x2;
  163. if (is_class1)
  164. class_bits_val |= 0x1;
  165. spin_lock(&gic_class_lock);
  166. val = readl_relaxed(reg);
  167. offset = (irq % 16) << 2;
  168. val &= ~(0x3 << offset);
  169. val |= class_bits_val << offset;
  170. writel_relaxed(val, reg);
  171. spin_unlock(&gic_class_lock);
  172. pr_debug("Set class of hwirq: %d class: %#x\n", (irq + 32), val);
  173. }
  174. void gic_do_class_update(
  175. void __iomem *base, u32 irq, bool is_class0,
  176. bool is_class1)
  177. {
  178. if (gic_routing_data.gic_is_virtual)
  179. gic_do_class_update_virtual(base, irq + 32, is_class0,
  180. is_class1);
  181. else
  182. gic_do_class_update_physical(base, irq, is_class0,
  183. is_class1);
  184. }
  185. /** IRQ Balancing Design
  186. *
  187. * 1. At module load time, queue a work (affinity_initialize_work)
  188. * to set InterruptRoutingMode for all SPIs.
  189. *
  190. * 1.1. In addition, set the class for all SPIs, based on the current
  191. * affinity mask.
  192. * For ex. assuming class 0 contains cpus 0-3, and class 1 4-7.
  193. *
  194. * a. All of below affinity masks map to class 0
  195. * 0xf, 0x3, 0x7.
  196. *
  197. * b. Similarly, below affinity masks maps to class 1:
  198. * 0xf0, 0x30, 0x70
  199. *
  200. * c. Any combination of affinity mask containing cpus from both
  201. * classes will map to both classes:
  202. * 0x11 , 0x31 , 0x13 , 0xff
  203. *
  204. *
  205. * InterruptRoutingMode and class is not set in following scenarios:
  206. *
  207. * a. Affinity mask contains single cpu
  208. * Note: for an irq, where single cpu is online, out of the multi-cpu
  209. * mask, affinity mask still contains original set affinity mask.
  210. * So, class and IRM setting are retained for these irqs.
  211. *
  212. * b. Broken, affinity, when all cpus in the affinity mask, goes offline.
  213. * Class is retained in this case. However IRM setting is cleared.
  214. *
  215. * 2. Hotplug behavior
  216. *
  217. * 2.1. When all cpus of class 0 goes offline; a snapshot of the irqs in
  218. * class 0 is taken.
  219. *
  220. * 2.2 When first cpu of class 0 comes online. Affinity mask for all irqs
  221. * in class 0, is set to all class 0 cpus.
  222. *
  223. * Note: we do not spread back Gold (class 1) irqs, for the all gold cores
  224. * hotplug case. This behavior matches what current irq balancers provide.
  225. * In case of a need for this additional functionality, we can add that in
  226. * future.
  227. *
  228. * 3. Unhandled corner cases:
  229. *
  230. * 3.1 Any irq with affinity mask containing subset of class 0 cpus are
  231. * not spread, if only cpus of that affinity mask go offline and
  232. * comes back online.
  233. *
  234. * 3.2 Any class0 irq, for which affinity is broken, and the new
  235. * effective affinity CPU (CPU4 in our example) goes offline; such
  236. * irqs won't be spread to class 0 cpus, once those CPUs come back
  237. * online. This is not a problem for cases where due to some
  238. * constraint, CPU4 is never hotplugged.
  239. */
  240. static void trace_gic_v3_set_affinity(void *unused, struct irq_data *d,
  241. const struct cpumask *mask_val, u64 *affinity,
  242. bool force, void __iomem *base,
  243. void __iomem *rbase, u64 redist_stride)
  244. {
  245. const struct cpumask *cpu_affinity = mask_val;
  246. bool is_class0 = false, is_class1 = false;
  247. u32 irq = d->hwirq - 32;
  248. bool need_class_update = false;
  249. const struct cpumask *current_affinity = irq_data_get_affinity_mask(d);
  250. struct cpumask all_cpus;
  251. int cpu;
  252. u32 gicr_ctlr_val;
  253. void __iomem *cpu_gicr_ctlr_addr;
  254. if (d->hwirq < 32 || d->hwirq >= MAX_IRQS)
  255. return;
  256. pr_debug("irq : %d mask: %*pb current affinity: %*pb\n",
  257. d->hwirq, cpumask_pr_args(cpu_affinity),
  258. cpumask_pr_args(current_affinity));
  259. if (!gic_routing_data.gic_1_of_N_init_done) {
  260. spin_lock(&gic_init_lock);
  261. if (!gic_routing_data.gic_1_of_N_init_done) {
  262. qcom_gic_enable_quirks(readl_relaxed(base + GICD_IIDR),
  263. gic_quirks, base);
  264. WRITE_ONCE(gic_routing_data.gic_chip, d->chip);
  265. /* Order readers of .gic_chip */
  266. smp_wmb();
  267. WRITE_ONCE(gic_routing_data.gic_1_of_N_init_done,
  268. true);
  269. }
  270. spin_unlock(&gic_init_lock);
  271. }
  272. if (!gic_routing_data.gic_supports_1_of_N)
  273. return;
  274. gic_routing_data.rbase = rbase;
  275. gic_routing_data.redist_stride = redist_stride;
  276. /*
  277. * Set DPG1NS bit to 0 for all online cores
  278. * and 1 for all offline cores.
  279. */
  280. if (!gic_routing_data.gic_is_virtual && !gic_routing_data.dpg1ns_init) {
  281. for_each_possible_cpu(cpu) {
  282. cpu_gicr_ctlr_addr = rbase + (cpu * redist_stride) + GICR_CTLR;
  283. gicr_ctlr_val = readl_relaxed(cpu_gicr_ctlr_addr);
  284. if (!cpu_online(cpu))
  285. writel_relaxed(gicr_ctlr_val | GICR_CTLR_DPG1NS,
  286. cpu_gicr_ctlr_addr);
  287. else
  288. writel_relaxed(gicr_ctlr_val & ~(GICR_CTLR_DPG1NS),
  289. cpu_gicr_ctlr_addr);
  290. }
  291. gic_routing_data.dpg1ns_init = 1;
  292. }
  293. cpu = smp_processor_id();
  294. if (cpumask_subset(current_affinity,
  295. &gic_routing_data.gic_routing_class0_cpus)) {
  296. if (!cpumask_intersects(cpu_online_mask,
  297. &gic_routing_data.gic_routing_class0_cpus) &&
  298. !cpu_online(cpu) &&
  299. cpumask_test_cpu(cpu,
  300. &gic_routing_data.gic_routing_class0_cpus)) {
  301. pr_debug("Affinity broken class 0 irq: %d\n", d->hwirq);
  302. return;
  303. }
  304. }
  305. if (cpumask_subset(current_affinity,
  306. &gic_routing_data.gic_routing_class1_cpus)) {
  307. if (!cpumask_intersects(cpu_online_mask,
  308. &gic_routing_data.gic_routing_class1_cpus) &&
  309. !cpu_online(cpu) &&
  310. cpumask_test_cpu(cpu,
  311. &gic_routing_data.gic_routing_class1_cpus)){
  312. pr_debug("Affinity broken class 1 irq: %d\n", d->hwirq);
  313. return;
  314. }
  315. }
  316. cpumask_or(&all_cpus, &gic_routing_data.gic_routing_class0_cpus,
  317. &gic_routing_data.gic_routing_class1_cpus);
  318. if (!cpumask_subset(cpu_affinity, &gic_routing_data.gic_routing_class0_cpus) &&
  319. !cpumask_equal(&gic_routing_data.gic_routing_class0_cpus, cpu_affinity) &&
  320. !cpumask_equal(&gic_routing_data.gic_routing_class1_cpus, cpu_affinity) &&
  321. !cpumask_equal(&all_cpus, cpu_affinity)) {
  322. pr_debug("irq: %d has subset affinity, skip class setting\n", d->hwirq);
  323. goto clear_class;
  324. }
  325. if (cpumask_any_and(cpu_affinity, cpu_online_mask) >= nr_cpu_ids)
  326. cpu_affinity = cpu_online_mask;
  327. if (cpumask_subset(cpu_affinity,
  328. &gic_routing_data.gic_routing_class0_cpus)) {
  329. is_class0 = true;
  330. } else if (cpumask_subset(cpu_affinity,
  331. &gic_routing_data.gic_routing_class1_cpus)) {
  332. is_class1 = true;
  333. } else {
  334. is_class1 = is_class0 = true;
  335. }
  336. if (!(is_class0 || is_class1))
  337. goto clear_class;
  338. *affinity |= GIC_INTERRUPT_ROUTING_MODE;
  339. need_class_update = gic_need_class_update(irq, is_class0, is_class1);
  340. spin_lock(&gic_class_lock);
  341. set_bit(irq, gic_class_initialized);
  342. if (is_class0)
  343. set_bit(irq, active_gic_class0);
  344. else
  345. clear_bit(irq, active_gic_class0);
  346. if (is_class1)
  347. set_bit(irq, active_gic_class1);
  348. else
  349. clear_bit(irq, active_gic_class1);
  350. spin_unlock(&gic_class_lock);
  351. if (need_class_update)
  352. gic_do_class_update(base, irq, is_class0, is_class1);
  353. return;
  354. clear_class:
  355. spin_lock(&gic_class_lock);
  356. clear_bit(irq, active_gic_class0);
  357. clear_bit(irq, active_gic_class1);
  358. spin_unlock(&gic_class_lock);
  359. }
  360. static bool is_gic_chip(struct irq_desc *desc, struct irq_chip *gic_chip)
  361. {
  362. struct irq_data *data = irq_desc_get_irq_data(desc);
  363. struct irq_chip *chip = irq_data_get_irq_chip(data);
  364. if (!chip)
  365. return false;
  366. if (gic_chip)
  367. return (gic_chip == chip);
  368. else
  369. return !strcmp(chip->name, GIC_V3_NAME);
  370. }
  371. static bool need_affinity_setting(struct irq_desc *desc,
  372. struct irq_chip *gic_chip,
  373. bool check_saved_class)
  374. {
  375. bool need_affinity;
  376. struct irq_data *data = irq_desc_get_irq_data(desc);
  377. u32 irq = data->hwirq - 32;
  378. if (data->hwirq < 32 || data->hwirq >= MAX_IRQS)
  379. return false;
  380. need_affinity = is_gic_chip(desc, gic_chip);
  381. if (!need_affinity)
  382. return false;
  383. if (check_saved_class &&
  384. !bitmap_empty(gic_saved_class0, MAX_IRQS)) {
  385. spin_lock(&gic_class_lock);
  386. need_affinity = test_bit(irq, active_gic_class0) &&
  387. !test_bit(irq, active_gic_class1);
  388. spin_unlock(&gic_class_lock);
  389. }
  390. return need_affinity;
  391. }
  392. static void affinity_initialize_workfn(struct work_struct *work)
  393. {
  394. struct irq_chip *gic_chip = NULL;
  395. struct irq_desc *desc;
  396. unsigned long flags;
  397. int i, err;
  398. bool affinity_setting = false;
  399. cpumask_t affinity = { CPU_BITS_NONE };
  400. struct irq_data *d;
  401. if (READ_ONCE(gic_routing_data.gic_1_of_N_init_done)) {
  402. /* Order .gic_1_of_N_init_done and .gic_chip read */
  403. smp_rmb();
  404. gic_chip = READ_ONCE(gic_routing_data.gic_chip);
  405. }
  406. for (i = 1; i < nr_irqs; i++) {
  407. if (atomic_add_return(0,
  408. &gic_routing_data.abort_balancing))
  409. return;
  410. /* .abort_balancing read before affinity setting */
  411. smp_mb__after_atomic();
  412. local_irq_save(flags);
  413. rcu_read_lock();
  414. desc = irq_to_desc(i);
  415. if (!desc)
  416. goto out_rcu_lock;
  417. raw_spin_lock(&desc->lock);
  418. d = irq_desc_get_irq_data(desc);
  419. affinity_setting = need_affinity_setting(
  420. desc, gic_chip, true);
  421. if (!bitmap_empty(gic_saved_class0, MAX_IRQS))
  422. cpumask_copy(&affinity,
  423. &gic_routing_data.gic_routing_class0_cpus);
  424. else
  425. cpumask_copy(&affinity, desc->irq_common_data.affinity);
  426. if (affinity_setting) {
  427. if (cpumask_any_and(&affinity, cpu_online_mask) >=
  428. nr_cpu_ids)
  429. cpumask_copy(&affinity, cpu_online_mask);
  430. err = irq_do_set_affinity(d, &affinity, false);
  431. if (err)
  432. pr_warn_ratelimited(
  433. "IRQ%u: affinity initialize failed(%d).\n",
  434. d->irq, err);
  435. }
  436. raw_spin_unlock(&desc->lock);
  437. out_rcu_lock:
  438. rcu_read_unlock();
  439. local_irq_restore(flags);
  440. }
  441. /* All affinity settings completion before .affinity_initialized = 1 */
  442. smp_mb__before_atomic();
  443. atomic_set(&gic_routing_data.affinity_initialized, 1);
  444. }
  445. static int gic_affinity_cpu_online(unsigned int cpu)
  446. {
  447. u32 gicr_ctlr_val;
  448. void __iomem *cpu_gicr_ctlr_addr;
  449. if (!gic_routing_data.gic_is_virtual && gic_routing_data.dpg1ns_init) {
  450. cpu_gicr_ctlr_addr = gic_routing_data.rbase +
  451. (cpu * gic_routing_data.redist_stride) + GICR_CTLR;
  452. gicr_ctlr_val = readl_relaxed(cpu_gicr_ctlr_addr);
  453. writel_relaxed(gicr_ctlr_val & ~(GICR_CTLR_DPG1NS), cpu_gicr_ctlr_addr);
  454. }
  455. if (cpumask_test_cpu(cpu,
  456. &gic_routing_data.gic_routing_class0_cpus)) {
  457. if (cpumask_empty(&gic_routing_data.class0_active_cpus))
  458. /*
  459. * Use a sane delay (matches existing irq balancers
  460. * delay)
  461. */
  462. schedule_delayed_work(&affinity_initialize_work,
  463. msecs_to_jiffies(5000));
  464. cpumask_set_cpu(cpu,
  465. &gic_routing_data.class0_active_cpus);
  466. }
  467. return 0;
  468. }
  469. static int gic_affinity_cpu_offline(unsigned int cpu)
  470. {
  471. unsigned long flags;
  472. u32 gicr_ctlr_val;
  473. void __iomem *cpu_gicr_ctlr_addr;
  474. if (!gic_routing_data.gic_is_virtual && gic_routing_data.dpg1ns_init) {
  475. cpu_gicr_ctlr_addr = gic_routing_data.rbase +
  476. (cpu * gic_routing_data.redist_stride) + GICR_CTLR;
  477. gicr_ctlr_val = readl_relaxed(cpu_gicr_ctlr_addr);
  478. writel_relaxed(gicr_ctlr_val | GICR_CTLR_DPG1NS, cpu_gicr_ctlr_addr);
  479. }
  480. if (!cpumask_test_cpu(cpu,
  481. &gic_routing_data.gic_routing_class0_cpus))
  482. return 0;
  483. cpumask_clear_cpu(cpu, &gic_routing_data.class0_active_cpus);
  484. if (cpumask_empty(&gic_routing_data.class0_active_cpus)) {
  485. /* Use RmW op to get the current value */
  486. if (!atomic_add_return(0,
  487. &gic_routing_data.affinity_initialized)) {
  488. flush_delayed_work(&affinity_initialize_work);
  489. /*
  490. * Flush initial work before gic_saved_class0
  491. * update below.
  492. */
  493. smp_mb();
  494. } else {
  495. atomic_set(&gic_routing_data.abort_balancing, 1);
  496. /* .abort_balancing write before cancel work */
  497. smp_mb__after_atomic();
  498. cancel_delayed_work_sync(&affinity_initialize_work);
  499. /* .abort_balancing write after cancel work */
  500. smp_mb__before_atomic();
  501. atomic_set(&gic_routing_data.abort_balancing, 0);
  502. }
  503. spin_lock_irqsave(&gic_class_lock, flags);
  504. bitmap_andnot(gic_saved_class0,
  505. active_gic_class0,
  506. active_gic_class1, MAX_IRQS);
  507. spin_unlock_irqrestore(&gic_class_lock, flags);
  508. }
  509. return 0;
  510. }
  511. void gic_irq_handler_entry_notifer(void *ignore, int irq,
  512. struct irqaction *action)
  513. {
  514. struct irq_chip *gic_chip = NULL;
  515. struct irq_desc *desc;
  516. struct irq_data *data;
  517. u32 hwirq, hwirq_bitpos;
  518. const struct cpumask *effective_affinity;
  519. if (!action->thread_fn)
  520. return;
  521. desc = irq_to_desc(irq);
  522. if (!desc)
  523. return;
  524. data = irq_desc_get_irq_data(desc);
  525. hwirq = data->hwirq;
  526. if (hwirq < 32 || hwirq >= MAX_IRQS)
  527. return;
  528. if (READ_ONCE(gic_routing_data.gic_1_of_N_init_done)) {
  529. /* Order .gic_1_of_N_init_done and .gic_chip read */
  530. smp_rmb();
  531. gic_chip = READ_ONCE(gic_routing_data.gic_chip);
  532. }
  533. if (!is_gic_chip(desc, gic_chip))
  534. return;
  535. hwirq_bitpos = hwirq - 32;
  536. if (!test_bit(hwirq_bitpos, active_gic_class0) &&
  537. !test_bit(hwirq_bitpos, active_gic_class1))
  538. return;
  539. if (raw_spin_trylock(&desc->lock)) {
  540. effective_affinity =
  541. irq_data_get_effective_affinity_mask(data);
  542. if (!cpumask_equal(effective_affinity,
  543. desc->irq_common_data.affinity)) {
  544. pr_debug("Update effective affinity %d mask: %*pb irq: %d\n",
  545. hwirq,
  546. cpumask_pr_args(desc->irq_common_data.affinity),
  547. irq);
  548. irq_data_update_effective_affinity(
  549. data, desc->irq_common_data.affinity);
  550. set_bit(IRQTF_AFFINITY, &action->thread_flags);
  551. }
  552. raw_spin_unlock(&desc->lock);
  553. }
  554. }
  555. static int gic_intr_routing_probe(struct platform_device *pdev)
  556. {
  557. int i, cpus_len;
  558. int rc = 0;
  559. u32 class0_cpus[NUM_CLASS_CPUS] = {0};
  560. u32 class1_cpus[NUM_CLASS_CPUS] = {0};
  561. cpus_len = of_property_read_variable_u32_array(
  562. pdev->dev.of_node,
  563. "qcom,gic-class0-cpus",
  564. class0_cpus, 0, NUM_CLASS_CPUS);
  565. for (i = 0; i < cpus_len; i++)
  566. if (class0_cpus[i] < num_possible_cpus())
  567. cpumask_set_cpu(class0_cpus[i],
  568. &gic_routing_data.gic_routing_class0_cpus);
  569. cpus_len = of_property_read_variable_u32_array(
  570. pdev->dev.of_node,
  571. "qcom,gic-class1-cpus",
  572. class1_cpus, 0, NUM_CLASS_CPUS);
  573. for (i = 0; i < cpus_len; i++)
  574. if (class1_cpus[i] < num_possible_cpus())
  575. cpumask_set_cpu(class1_cpus[i],
  576. &gic_routing_data.gic_routing_class1_cpus);
  577. register_trace_android_rvh_gic_v3_set_affinity(
  578. trace_gic_v3_set_affinity, NULL);
  579. register_trace_irq_handler_entry(gic_irq_handler_entry_notifer, NULL);
  580. rc = cpuhp_setup_state(
  581. CPUHP_AP_ONLINE_DYN, "qcom/gic_affinity_setting:online",
  582. gic_affinity_cpu_online, gic_affinity_cpu_offline);
  583. if (rc < 0) {
  584. cpuhp_remove_state(CPUHP_AP_ONLINE_DYN);
  585. pr_err(
  586. "Failed to register CPUHP state: qcom/gic_affinity_setting:online\n");
  587. }
  588. gic_routing_data.gic_affinity_cpuhp_state = rc;
  589. pr_info("GIC Interrupt Routing Driver Registered\n");
  590. return 0;
  591. }
  592. static const struct of_device_id gic_intr_routing_of_match[] = {
  593. { .compatible = "qcom,gic-intr-routing"},
  594. {}
  595. };
  596. MODULE_DEVICE_TABLE(of, gic_intr_routing_of_match);
  597. static struct platform_driver gic_intr_routing_driver = {
  598. .probe = gic_intr_routing_probe,
  599. .driver = {
  600. .name = "gic_intr_routing",
  601. .of_match_table = gic_intr_routing_of_match,
  602. },
  603. };
  604. module_platform_driver(gic_intr_routing_driver);
  605. MODULE_DESCRIPTION("Qualcomm Technologies, Inc. GIC Interrupt Routing Driver");
  606. MODULE_LICENSE("GPL");