ipi.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2015 Imagination Technologies Ltd
  4. * Author: Qais Yousef <[email protected]>
  5. *
  6. * This file contains driver APIs to the IPI subsystem.
  7. */
  8. #define pr_fmt(fmt) "genirq/ipi: " fmt
  9. #include <linux/irqdomain.h>
  10. #include <linux/irq.h>
  11. /**
  12. * irq_reserve_ipi() - Setup an IPI to destination cpumask
  13. * @domain: IPI domain
  14. * @dest: cpumask of CPUs which can receive the IPI
  15. *
  16. * Allocate a virq that can be used to send IPI to any CPU in dest mask.
  17. *
  18. * Return: Linux IRQ number on success or error code on failure
  19. */
  20. int irq_reserve_ipi(struct irq_domain *domain,
  21. const struct cpumask *dest)
  22. {
  23. unsigned int nr_irqs, offset;
  24. struct irq_data *data;
  25. int virq, i;
  26. if (!domain ||!irq_domain_is_ipi(domain)) {
  27. pr_warn("Reservation on a non IPI domain\n");
  28. return -EINVAL;
  29. }
  30. if (!cpumask_subset(dest, cpu_possible_mask)) {
  31. pr_warn("Reservation is not in possible_cpu_mask\n");
  32. return -EINVAL;
  33. }
  34. nr_irqs = cpumask_weight(dest);
  35. if (!nr_irqs) {
  36. pr_warn("Reservation for empty destination mask\n");
  37. return -EINVAL;
  38. }
  39. if (irq_domain_is_ipi_single(domain)) {
  40. /*
  41. * If the underlying implementation uses a single HW irq on
  42. * all cpus then we only need a single Linux irq number for
  43. * it. We have no restrictions vs. the destination mask. The
  44. * underlying implementation can deal with holes nicely.
  45. */
  46. nr_irqs = 1;
  47. offset = 0;
  48. } else {
  49. unsigned int next;
  50. /*
  51. * The IPI requires a separate HW irq on each CPU. We require
  52. * that the destination mask is consecutive. If an
  53. * implementation needs to support holes, it can reserve
  54. * several IPI ranges.
  55. */
  56. offset = cpumask_first(dest);
  57. /*
  58. * Find a hole and if found look for another set bit after the
  59. * hole. For now we don't support this scenario.
  60. */
  61. next = cpumask_next_zero(offset, dest);
  62. if (next < nr_cpu_ids)
  63. next = cpumask_next(next, dest);
  64. if (next < nr_cpu_ids) {
  65. pr_warn("Destination mask has holes\n");
  66. return -EINVAL;
  67. }
  68. }
  69. virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE, NULL);
  70. if (virq <= 0) {
  71. pr_warn("Can't reserve IPI, failed to alloc descs\n");
  72. return -ENOMEM;
  73. }
  74. virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE,
  75. (void *) dest, true, NULL);
  76. if (virq <= 0) {
  77. pr_warn("Can't reserve IPI, failed to alloc hw irqs\n");
  78. goto free_descs;
  79. }
  80. for (i = 0; i < nr_irqs; i++) {
  81. data = irq_get_irq_data(virq + i);
  82. cpumask_copy(data->common->affinity, dest);
  83. data->common->ipi_offset = offset;
  84. irq_set_status_flags(virq + i, IRQ_NO_BALANCING);
  85. }
  86. return virq;
  87. free_descs:
  88. irq_free_descs(virq, nr_irqs);
  89. return -EBUSY;
  90. }
  91. /**
  92. * irq_destroy_ipi() - unreserve an IPI that was previously allocated
  93. * @irq: Linux IRQ number to be destroyed
  94. * @dest: cpumask of CPUs which should have the IPI removed
  95. *
  96. * The IPIs allocated with irq_reserve_ipi() are returned to the system
  97. * destroying all virqs associated with them.
  98. *
  99. * Return: %0 on success or error code on failure.
  100. */
  101. int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
  102. {
  103. struct irq_data *data = irq_get_irq_data(irq);
  104. const struct cpumask *ipimask;
  105. struct irq_domain *domain;
  106. unsigned int nr_irqs;
  107. if (!irq || !data)
  108. return -EINVAL;
  109. domain = data->domain;
  110. if (WARN_ON(domain == NULL))
  111. return -EINVAL;
  112. if (!irq_domain_is_ipi(domain)) {
  113. pr_warn("Trying to destroy a non IPI domain!\n");
  114. return -EINVAL;
  115. }
  116. ipimask = irq_data_get_affinity_mask(data);
  117. if (!ipimask || WARN_ON(!cpumask_subset(dest, ipimask)))
  118. /*
  119. * Must be destroying a subset of CPUs to which this IPI
  120. * was set up to target
  121. */
  122. return -EINVAL;
  123. if (irq_domain_is_ipi_per_cpu(domain)) {
  124. irq = irq + cpumask_first(dest) - data->common->ipi_offset;
  125. nr_irqs = cpumask_weight(dest);
  126. } else {
  127. nr_irqs = 1;
  128. }
  129. irq_domain_free_irqs(irq, nr_irqs);
  130. return 0;
  131. }
  132. /**
  133. * ipi_get_hwirq - Get the hwirq associated with an IPI to a CPU
  134. * @irq: Linux IRQ number
  135. * @cpu: the target CPU
  136. *
  137. * When dealing with coprocessors IPI, we need to inform the coprocessor of
  138. * the hwirq it needs to use to receive and send IPIs.
  139. *
  140. * Return: hwirq value on success or INVALID_HWIRQ on failure.
  141. */
  142. irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
  143. {
  144. struct irq_data *data = irq_get_irq_data(irq);
  145. const struct cpumask *ipimask;
  146. if (!data || cpu >= nr_cpu_ids)
  147. return INVALID_HWIRQ;
  148. ipimask = irq_data_get_affinity_mask(data);
  149. if (!ipimask || !cpumask_test_cpu(cpu, ipimask))
  150. return INVALID_HWIRQ;
  151. /*
  152. * Get the real hardware irq number if the underlying implementation
  153. * uses a separate irq per cpu. If the underlying implementation uses
  154. * a single hardware irq for all cpus then the IPI send mechanism
  155. * needs to take care of the cpu destinations.
  156. */
  157. if (irq_domain_is_ipi_per_cpu(data->domain))
  158. data = irq_get_irq_data(irq + cpu - data->common->ipi_offset);
  159. return data ? irqd_to_hwirq(data) : INVALID_HWIRQ;
  160. }
  161. EXPORT_SYMBOL_GPL(ipi_get_hwirq);
  162. static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
  163. const struct cpumask *dest, unsigned int cpu)
  164. {
  165. const struct cpumask *ipimask;
  166. if (!chip || !data)
  167. return -EINVAL;
  168. if (!chip->ipi_send_single && !chip->ipi_send_mask)
  169. return -EINVAL;
  170. if (cpu >= nr_cpu_ids)
  171. return -EINVAL;
  172. ipimask = irq_data_get_affinity_mask(data);
  173. if (!ipimask)
  174. return -EINVAL;
  175. if (dest) {
  176. if (!cpumask_subset(dest, ipimask))
  177. return -EINVAL;
  178. } else {
  179. if (!cpumask_test_cpu(cpu, ipimask))
  180. return -EINVAL;
  181. }
  182. return 0;
  183. }
  184. /**
  185. * __ipi_send_single - send an IPI to a target Linux SMP CPU
  186. * @desc: pointer to irq_desc of the IRQ
  187. * @cpu: destination CPU, must in the destination mask passed to
  188. * irq_reserve_ipi()
  189. *
  190. * This function is for architecture or core code to speed up IPI sending. Not
  191. * usable from driver code.
  192. *
  193. * Return: %0 on success or negative error number on failure.
  194. */
  195. int __ipi_send_single(struct irq_desc *desc, unsigned int cpu)
  196. {
  197. struct irq_data *data = irq_desc_get_irq_data(desc);
  198. struct irq_chip *chip = irq_data_get_irq_chip(data);
  199. #ifdef DEBUG
  200. /*
  201. * Minimise the overhead by omitting the checks for Linux SMP IPIs.
  202. * Since the callers should be arch or core code which is generally
  203. * trusted, only check for errors when debugging.
  204. */
  205. if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
  206. return -EINVAL;
  207. #endif
  208. if (!chip->ipi_send_single) {
  209. chip->ipi_send_mask(data, cpumask_of(cpu));
  210. return 0;
  211. }
  212. /* FIXME: Store this information in irqdata flags */
  213. if (irq_domain_is_ipi_per_cpu(data->domain) &&
  214. cpu != data->common->ipi_offset) {
  215. /* use the correct data for that cpu */
  216. unsigned irq = data->irq + cpu - data->common->ipi_offset;
  217. data = irq_get_irq_data(irq);
  218. }
  219. chip->ipi_send_single(data, cpu);
  220. return 0;
  221. }
  222. /**
  223. * __ipi_send_mask - send an IPI to target Linux SMP CPU(s)
  224. * @desc: pointer to irq_desc of the IRQ
  225. * @dest: dest CPU(s), must be a subset of the mask passed to
  226. * irq_reserve_ipi()
  227. *
  228. * This function is for architecture or core code to speed up IPI sending. Not
  229. * usable from driver code.
  230. *
  231. * Return: %0 on success or negative error number on failure.
  232. */
  233. int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest)
  234. {
  235. struct irq_data *data = irq_desc_get_irq_data(desc);
  236. struct irq_chip *chip = irq_data_get_irq_chip(data);
  237. unsigned int cpu;
  238. #ifdef DEBUG
  239. /*
  240. * Minimise the overhead by omitting the checks for Linux SMP IPIs.
  241. * Since the callers should be arch or core code which is generally
  242. * trusted, only check for errors when debugging.
  243. */
  244. if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0)))
  245. return -EINVAL;
  246. #endif
  247. if (chip->ipi_send_mask) {
  248. chip->ipi_send_mask(data, dest);
  249. return 0;
  250. }
  251. if (irq_domain_is_ipi_per_cpu(data->domain)) {
  252. unsigned int base = data->irq;
  253. for_each_cpu(cpu, dest) {
  254. unsigned irq = base + cpu - data->common->ipi_offset;
  255. data = irq_get_irq_data(irq);
  256. chip->ipi_send_single(data, cpu);
  257. }
  258. } else {
  259. for_each_cpu(cpu, dest)
  260. chip->ipi_send_single(data, cpu);
  261. }
  262. return 0;
  263. }
  264. /**
  265. * ipi_send_single - Send an IPI to a single CPU
  266. * @virq: Linux IRQ number from irq_reserve_ipi()
  267. * @cpu: destination CPU, must in the destination mask passed to
  268. * irq_reserve_ipi()
  269. *
  270. * Return: %0 on success or negative error number on failure.
  271. */
  272. int ipi_send_single(unsigned int virq, unsigned int cpu)
  273. {
  274. struct irq_desc *desc = irq_to_desc(virq);
  275. struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL;
  276. struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL;
  277. if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
  278. return -EINVAL;
  279. return __ipi_send_single(desc, cpu);
  280. }
  281. EXPORT_SYMBOL_GPL(ipi_send_single);
  282. /**
  283. * ipi_send_mask - Send an IPI to target CPU(s)
  284. * @virq: Linux IRQ number from irq_reserve_ipi()
  285. * @dest: dest CPU(s), must be a subset of the mask passed to
  286. * irq_reserve_ipi()
  287. *
  288. * Return: %0 on success or negative error number on failure.
  289. */
  290. int ipi_send_mask(unsigned int virq, const struct cpumask *dest)
  291. {
  292. struct irq_desc *desc = irq_to_desc(virq);
  293. struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL;
  294. struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL;
  295. if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0)))
  296. return -EINVAL;
  297. return __ipi_send_mask(desc, dest);
  298. }
  299. EXPORT_SYMBOL_GPL(ipi_send_mask);