ipi.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/cpumask.h>
  3. #include <linux/smp.h>
  4. #include <asm/io_apic.h>
  5. #include "local.h"
  6. DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand);
  7. #ifdef CONFIG_SMP
  8. static int apic_ipi_shorthand_off __ro_after_init;
  9. static __init int apic_ipi_shorthand(char *str)
  10. {
  11. get_option(&str, &apic_ipi_shorthand_off);
  12. return 1;
  13. }
  14. __setup("no_ipi_broadcast=", apic_ipi_shorthand);
  15. static int __init print_ipi_mode(void)
  16. {
  17. pr_info("IPI shorthand broadcast: %s\n",
  18. apic_ipi_shorthand_off ? "disabled" : "enabled");
  19. return 0;
  20. }
  21. late_initcall(print_ipi_mode);
  22. void apic_smt_update(void)
  23. {
  24. /*
  25. * Do not switch to broadcast mode if:
  26. * - Disabled on the command line
  27. * - Only a single CPU is online
  28. * - Not all present CPUs have been at least booted once
  29. *
  30. * The latter is important as the local APIC might be in some
  31. * random state and a broadcast might cause havoc. That's
  32. * especially true for NMI broadcasting.
  33. */
  34. if (apic_ipi_shorthand_off || num_online_cpus() == 1 ||
  35. !cpumask_equal(cpu_present_mask, &cpus_booted_once_mask)) {
  36. static_branch_disable(&apic_use_ipi_shorthand);
  37. } else {
  38. static_branch_enable(&apic_use_ipi_shorthand);
  39. }
  40. }
  41. void apic_send_IPI_allbutself(unsigned int vector)
  42. {
  43. if (num_online_cpus() < 2)
  44. return;
  45. if (static_branch_likely(&apic_use_ipi_shorthand))
  46. apic->send_IPI_allbutself(vector);
  47. else
  48. apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
  49. }
  50. /*
  51. * Send a 'reschedule' IPI to another CPU. It goes straight through and
  52. * wastes no time serializing anything. Worst case is that we lose a
  53. * reschedule ...
  54. */
  55. void native_smp_send_reschedule(int cpu)
  56. {
  57. if (unlikely(cpu_is_offline(cpu))) {
  58. WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
  59. return;
  60. }
  61. apic->send_IPI(cpu, RESCHEDULE_VECTOR);
  62. }
  63. void native_send_call_func_single_ipi(int cpu)
  64. {
  65. apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
  66. }
  67. void native_send_call_func_ipi(const struct cpumask *mask)
  68. {
  69. if (static_branch_likely(&apic_use_ipi_shorthand)) {
  70. unsigned int cpu = smp_processor_id();
  71. if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask))
  72. goto sendmask;
  73. if (cpumask_test_cpu(cpu, mask))
  74. apic->send_IPI_all(CALL_FUNCTION_VECTOR);
  75. else if (num_online_cpus() > 1)
  76. apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
  77. return;
  78. }
  79. sendmask:
  80. apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
  81. }
  82. #endif /* CONFIG_SMP */
  83. static inline int __prepare_ICR2(unsigned int mask)
  84. {
  85. return SET_XAPIC_DEST_FIELD(mask);
  86. }
  87. static inline void __xapic_wait_icr_idle(void)
  88. {
  89. while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
  90. cpu_relax();
  91. }
  92. void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
  93. {
  94. /*
  95. * Subtle. In the case of the 'never do double writes' workaround
  96. * we have to lock out interrupts to be safe. As we don't care
  97. * of the value read we use an atomic rmw access to avoid costly
  98. * cli/sti. Otherwise we use an even cheaper single atomic write
  99. * to the APIC.
  100. */
  101. unsigned int cfg;
  102. /*
  103. * Wait for idle.
  104. */
  105. if (unlikely(vector == NMI_VECTOR))
  106. safe_apic_wait_icr_idle();
  107. else
  108. __xapic_wait_icr_idle();
  109. /*
  110. * No need to touch the target chip field. Also the destination
  111. * mode is ignored when a shorthand is used.
  112. */
  113. cfg = __prepare_ICR(shortcut, vector, 0);
  114. /*
  115. * Send the IPI. The write to APIC_ICR fires this off.
  116. */
  117. native_apic_mem_write(APIC_ICR, cfg);
  118. }
  119. /*
  120. * This is used to send an IPI with no shorthand notation (the destination is
  121. * specified in bits 56 to 63 of the ICR).
  122. */
  123. void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
  124. {
  125. unsigned long cfg;
  126. /*
  127. * Wait for idle.
  128. */
  129. if (unlikely(vector == NMI_VECTOR))
  130. safe_apic_wait_icr_idle();
  131. else
  132. __xapic_wait_icr_idle();
  133. /*
  134. * prepare target chip field
  135. */
  136. cfg = __prepare_ICR2(mask);
  137. native_apic_mem_write(APIC_ICR2, cfg);
  138. /*
  139. * program the ICR
  140. */
  141. cfg = __prepare_ICR(0, vector, dest);
  142. /*
  143. * Send the IPI. The write to APIC_ICR fires this off.
  144. */
  145. native_apic_mem_write(APIC_ICR, cfg);
  146. }
  147. void default_send_IPI_single_phys(int cpu, int vector)
  148. {
  149. unsigned long flags;
  150. local_irq_save(flags);
  151. __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
  152. vector, APIC_DEST_PHYSICAL);
  153. local_irq_restore(flags);
  154. }
  155. void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
  156. {
  157. unsigned long query_cpu;
  158. unsigned long flags;
  159. /*
  160. * Hack. The clustered APIC addressing mode doesn't allow us to send
  161. * to an arbitrary mask, so I do a unicast to each CPU instead.
  162. * - mbligh
  163. */
  164. local_irq_save(flags);
  165. for_each_cpu(query_cpu, mask) {
  166. __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
  167. query_cpu), vector, APIC_DEST_PHYSICAL);
  168. }
  169. local_irq_restore(flags);
  170. }
  171. void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
  172. int vector)
  173. {
  174. unsigned int this_cpu = smp_processor_id();
  175. unsigned int query_cpu;
  176. unsigned long flags;
  177. /* See Hack comment above */
  178. local_irq_save(flags);
  179. for_each_cpu(query_cpu, mask) {
  180. if (query_cpu == this_cpu)
  181. continue;
  182. __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
  183. query_cpu), vector, APIC_DEST_PHYSICAL);
  184. }
  185. local_irq_restore(flags);
  186. }
  187. /*
  188. * Helper function for APICs which insist on cpumasks
  189. */
  190. void default_send_IPI_single(int cpu, int vector)
  191. {
  192. apic->send_IPI_mask(cpumask_of(cpu), vector);
  193. }
  194. void default_send_IPI_allbutself(int vector)
  195. {
  196. __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
  197. }
  198. void default_send_IPI_all(int vector)
  199. {
  200. __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
  201. }
  202. void default_send_IPI_self(int vector)
  203. {
  204. __default_send_IPI_shortcut(APIC_DEST_SELF, vector);
  205. }
  206. #ifdef CONFIG_X86_32
  207. void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
  208. int vector)
  209. {
  210. unsigned long flags;
  211. unsigned int query_cpu;
  212. /*
  213. * Hack. The clustered APIC addressing mode doesn't allow us to send
  214. * to an arbitrary mask, so I do a unicasts to each CPU instead. This
  215. * should be modified to do 1 message per cluster ID - mbligh
  216. */
  217. local_irq_save(flags);
  218. for_each_cpu(query_cpu, mask)
  219. __default_send_IPI_dest_field(
  220. early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
  221. vector, APIC_DEST_LOGICAL);
  222. local_irq_restore(flags);
  223. }
  224. void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
  225. int vector)
  226. {
  227. unsigned long flags;
  228. unsigned int query_cpu;
  229. unsigned int this_cpu = smp_processor_id();
  230. /* See Hack comment above */
  231. local_irq_save(flags);
  232. for_each_cpu(query_cpu, mask) {
  233. if (query_cpu == this_cpu)
  234. continue;
  235. __default_send_IPI_dest_field(
  236. early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
  237. vector, APIC_DEST_LOGICAL);
  238. }
  239. local_irq_restore(flags);
  240. }
  241. /*
  242. * This is only used on smaller machines.
  243. */
  244. void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
  245. {
  246. unsigned long mask = cpumask_bits(cpumask)[0];
  247. unsigned long flags;
  248. if (!mask)
  249. return;
  250. local_irq_save(flags);
  251. WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
  252. __default_send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL);
  253. local_irq_restore(flags);
  254. }
  255. /* must come after the send_IPI functions above for inlining */
  256. static int convert_apicid_to_cpu(int apic_id)
  257. {
  258. int i;
  259. for_each_possible_cpu(i) {
  260. if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
  261. return i;
  262. }
  263. return -1;
  264. }
  265. int safe_smp_processor_id(void)
  266. {
  267. int apicid, cpuid;
  268. if (!boot_cpu_has(X86_FEATURE_APIC))
  269. return 0;
  270. apicid = hard_smp_processor_id();
  271. if (apicid == BAD_APICID)
  272. return 0;
  273. cpuid = convert_apicid_to_cpu(apicid);
  274. return cpuid >= 0 ? cpuid : 0;
  275. }
  276. #endif