smp.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/smp.h>
  3. #include <linux/cpu.h>
  4. #include <linux/slab.h>
  5. #include <linux/cpumask.h>
  6. #include <linux/percpu.h>
  7. #include <xen/events.h>
  8. #include <xen/hvc-console.h>
  9. #include "xen-ops.h"
  10. #include "smp.h"
  11. static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
  12. static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
  13. static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
  14. static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
  15. static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
  16. static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
  17. /*
  18. * Reschedule call back.
  19. */
  20. static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
  21. {
  22. inc_irq_stat(irq_resched_count);
  23. scheduler_ipi();
  24. return IRQ_HANDLED;
  25. }
  26. void xen_smp_intr_free(unsigned int cpu)
  27. {
  28. kfree(per_cpu(xen_resched_irq, cpu).name);
  29. per_cpu(xen_resched_irq, cpu).name = NULL;
  30. if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
  31. unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
  32. per_cpu(xen_resched_irq, cpu).irq = -1;
  33. }
  34. kfree(per_cpu(xen_callfunc_irq, cpu).name);
  35. per_cpu(xen_callfunc_irq, cpu).name = NULL;
  36. if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
  37. unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
  38. per_cpu(xen_callfunc_irq, cpu).irq = -1;
  39. }
  40. kfree(per_cpu(xen_debug_irq, cpu).name);
  41. per_cpu(xen_debug_irq, cpu).name = NULL;
  42. if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
  43. unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
  44. per_cpu(xen_debug_irq, cpu).irq = -1;
  45. }
  46. kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
  47. per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
  48. if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
  49. unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
  50. NULL);
  51. per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
  52. }
  53. }
  54. int xen_smp_intr_init(unsigned int cpu)
  55. {
  56. int rc;
  57. char *resched_name, *callfunc_name, *debug_name;
  58. resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
  59. per_cpu(xen_resched_irq, cpu).name = resched_name;
  60. rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
  61. cpu,
  62. xen_reschedule_interrupt,
  63. IRQF_PERCPU|IRQF_NOBALANCING,
  64. resched_name,
  65. NULL);
  66. if (rc < 0)
  67. goto fail;
  68. per_cpu(xen_resched_irq, cpu).irq = rc;
  69. callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
  70. per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
  71. rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
  72. cpu,
  73. xen_call_function_interrupt,
  74. IRQF_PERCPU|IRQF_NOBALANCING,
  75. callfunc_name,
  76. NULL);
  77. if (rc < 0)
  78. goto fail;
  79. per_cpu(xen_callfunc_irq, cpu).irq = rc;
  80. if (!xen_fifo_events) {
  81. debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
  82. per_cpu(xen_debug_irq, cpu).name = debug_name;
  83. rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
  84. xen_debug_interrupt,
  85. IRQF_PERCPU | IRQF_NOBALANCING,
  86. debug_name, NULL);
  87. if (rc < 0)
  88. goto fail;
  89. per_cpu(xen_debug_irq, cpu).irq = rc;
  90. }
  91. callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
  92. per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
  93. rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
  94. cpu,
  95. xen_call_function_single_interrupt,
  96. IRQF_PERCPU|IRQF_NOBALANCING,
  97. callfunc_name,
  98. NULL);
  99. if (rc < 0)
  100. goto fail;
  101. per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
  102. return 0;
  103. fail:
  104. xen_smp_intr_free(cpu);
  105. return rc;
  106. }
  107. void __init xen_smp_cpus_done(unsigned int max_cpus)
  108. {
  109. if (xen_hvm_domain())
  110. native_smp_cpus_done(max_cpus);
  111. else
  112. calculate_max_logical_packages();
  113. }
  114. void xen_smp_send_reschedule(int cpu)
  115. {
  116. xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
  117. }
  118. static void __xen_send_IPI_mask(const struct cpumask *mask,
  119. int vector)
  120. {
  121. unsigned cpu;
  122. for_each_cpu_and(cpu, mask, cpu_online_mask)
  123. xen_send_IPI_one(cpu, vector);
  124. }
  125. void xen_smp_send_call_function_ipi(const struct cpumask *mask)
  126. {
  127. int cpu;
  128. __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
  129. /* Make sure other vcpus get a chance to run if they need to. */
  130. for_each_cpu(cpu, mask) {
  131. if (xen_vcpu_stolen(cpu)) {
  132. HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
  133. break;
  134. }
  135. }
  136. }
  137. void xen_smp_send_call_function_single_ipi(int cpu)
  138. {
  139. __xen_send_IPI_mask(cpumask_of(cpu),
  140. XEN_CALL_FUNCTION_SINGLE_VECTOR);
  141. }
  142. static inline int xen_map_vector(int vector)
  143. {
  144. int xen_vector;
  145. switch (vector) {
  146. case RESCHEDULE_VECTOR:
  147. xen_vector = XEN_RESCHEDULE_VECTOR;
  148. break;
  149. case CALL_FUNCTION_VECTOR:
  150. xen_vector = XEN_CALL_FUNCTION_VECTOR;
  151. break;
  152. case CALL_FUNCTION_SINGLE_VECTOR:
  153. xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
  154. break;
  155. case IRQ_WORK_VECTOR:
  156. xen_vector = XEN_IRQ_WORK_VECTOR;
  157. break;
  158. #ifdef CONFIG_X86_64
  159. case NMI_VECTOR:
  160. case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
  161. xen_vector = XEN_NMI_VECTOR;
  162. break;
  163. #endif
  164. default:
  165. xen_vector = -1;
  166. printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
  167. vector);
  168. }
  169. return xen_vector;
  170. }
  171. void xen_send_IPI_mask(const struct cpumask *mask,
  172. int vector)
  173. {
  174. int xen_vector = xen_map_vector(vector);
  175. if (xen_vector >= 0)
  176. __xen_send_IPI_mask(mask, xen_vector);
  177. }
  178. void xen_send_IPI_all(int vector)
  179. {
  180. int xen_vector = xen_map_vector(vector);
  181. if (xen_vector >= 0)
  182. __xen_send_IPI_mask(cpu_online_mask, xen_vector);
  183. }
  184. void xen_send_IPI_self(int vector)
  185. {
  186. int xen_vector = xen_map_vector(vector);
  187. if (xen_vector >= 0)
  188. xen_send_IPI_one(smp_processor_id(), xen_vector);
  189. }
  190. void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
  191. int vector)
  192. {
  193. unsigned cpu;
  194. unsigned int this_cpu = smp_processor_id();
  195. int xen_vector = xen_map_vector(vector);
  196. if (!(num_online_cpus() > 1) || (xen_vector < 0))
  197. return;
  198. for_each_cpu_and(cpu, mask, cpu_online_mask) {
  199. if (this_cpu == cpu)
  200. continue;
  201. xen_send_IPI_one(cpu, xen_vector);
  202. }
  203. }
  204. void xen_send_IPI_allbutself(int vector)
  205. {
  206. xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
  207. }
  208. static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
  209. {
  210. generic_smp_call_function_interrupt();
  211. inc_irq_stat(irq_call_count);
  212. return IRQ_HANDLED;
  213. }
  214. static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
  215. {
  216. generic_smp_call_function_single_interrupt();
  217. inc_irq_stat(irq_call_count);
  218. return IRQ_HANDLED;
  219. }