smp.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * SMP initialisation and IPI support
  4. * Based on arch/arm64/kernel/smp.c
  5. *
  6. * Copyright (C) 2012 ARM Ltd.
  7. * Copyright (C) 2015 Regents of the University of California
  8. * Copyright (C) 2017 SiFive
  9. */
  10. #include <linux/cpu.h>
  11. #include <linux/clockchips.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/module.h>
  14. #include <linux/kexec.h>
  15. #include <linux/profile.h>
  16. #include <linux/smp.h>
  17. #include <linux/sched.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/delay.h>
  20. #include <linux/irq_work.h>
  21. #include <asm/sbi.h>
  22. #include <asm/tlbflush.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/cpu_ops.h>
  25. enum ipi_message_type {
  26. IPI_RESCHEDULE,
  27. IPI_CALL_FUNC,
  28. IPI_CPU_STOP,
  29. IPI_CPU_CRASH_STOP,
  30. IPI_IRQ_WORK,
  31. IPI_TIMER,
  32. IPI_MAX
  33. };
  34. unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = {
  35. [0 ... NR_CPUS-1] = INVALID_HARTID
  36. };
  37. void __init smp_setup_processor_id(void)
  38. {
  39. cpuid_to_hartid_map(0) = boot_cpu_hartid;
  40. }
  41. /* A collection of single bit ipi messages. */
  42. static struct {
  43. unsigned long stats[IPI_MAX] ____cacheline_aligned;
  44. unsigned long bits ____cacheline_aligned;
  45. } ipi_data[NR_CPUS] __cacheline_aligned;
  46. int riscv_hartid_to_cpuid(unsigned long hartid)
  47. {
  48. int i;
  49. for (i = 0; i < NR_CPUS; i++)
  50. if (cpuid_to_hartid_map(i) == hartid)
  51. return i;
  52. return -ENOENT;
  53. }
  54. bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
  55. {
  56. return phys_id == cpuid_to_hartid_map(cpu);
  57. }
  58. static void ipi_stop(void)
  59. {
  60. set_cpu_online(smp_processor_id(), false);
  61. while (1)
  62. wait_for_interrupt();
  63. }
  64. #ifdef CONFIG_KEXEC_CORE
  65. static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
  66. static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
  67. {
  68. crash_save_cpu(regs, cpu);
  69. atomic_dec(&waiting_for_crash_ipi);
  70. local_irq_disable();
  71. #ifdef CONFIG_HOTPLUG_CPU
  72. if (cpu_has_hotplug(cpu))
  73. cpu_ops[cpu]->cpu_stop();
  74. #endif
  75. for(;;)
  76. wait_for_interrupt();
  77. }
  78. #else
  79. static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
  80. {
  81. unreachable();
  82. }
  83. #endif
  84. static const struct riscv_ipi_ops *ipi_ops __ro_after_init;
  85. void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops)
  86. {
  87. ipi_ops = ops;
  88. }
  89. EXPORT_SYMBOL_GPL(riscv_set_ipi_ops);
  90. void riscv_clear_ipi(void)
  91. {
  92. if (ipi_ops && ipi_ops->ipi_clear)
  93. ipi_ops->ipi_clear();
  94. csr_clear(CSR_IP, IE_SIE);
  95. }
  96. EXPORT_SYMBOL_GPL(riscv_clear_ipi);
  97. static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
  98. {
  99. int cpu;
  100. smp_mb__before_atomic();
  101. for_each_cpu(cpu, mask)
  102. set_bit(op, &ipi_data[cpu].bits);
  103. smp_mb__after_atomic();
  104. if (ipi_ops && ipi_ops->ipi_inject)
  105. ipi_ops->ipi_inject(mask);
  106. else
  107. pr_warn("SMP: IPI inject method not available\n");
  108. }
  109. static void send_ipi_single(int cpu, enum ipi_message_type op)
  110. {
  111. smp_mb__before_atomic();
  112. set_bit(op, &ipi_data[cpu].bits);
  113. smp_mb__after_atomic();
  114. if (ipi_ops && ipi_ops->ipi_inject)
  115. ipi_ops->ipi_inject(cpumask_of(cpu));
  116. else
  117. pr_warn("SMP: IPI inject method not available\n");
  118. }
  119. #ifdef CONFIG_IRQ_WORK
  120. void arch_irq_work_raise(void)
  121. {
  122. send_ipi_single(smp_processor_id(), IPI_IRQ_WORK);
  123. }
  124. #endif
  125. void handle_IPI(struct pt_regs *regs)
  126. {
  127. unsigned int cpu = smp_processor_id();
  128. unsigned long *pending_ipis = &ipi_data[cpu].bits;
  129. unsigned long *stats = ipi_data[cpu].stats;
  130. riscv_clear_ipi();
  131. while (true) {
  132. unsigned long ops;
  133. /* Order bit clearing and data access. */
  134. mb();
  135. ops = xchg(pending_ipis, 0);
  136. if (ops == 0)
  137. return;
  138. if (ops & (1 << IPI_RESCHEDULE)) {
  139. stats[IPI_RESCHEDULE]++;
  140. scheduler_ipi();
  141. }
  142. if (ops & (1 << IPI_CALL_FUNC)) {
  143. stats[IPI_CALL_FUNC]++;
  144. generic_smp_call_function_interrupt();
  145. }
  146. if (ops & (1 << IPI_CPU_STOP)) {
  147. stats[IPI_CPU_STOP]++;
  148. ipi_stop();
  149. }
  150. if (ops & (1 << IPI_CPU_CRASH_STOP)) {
  151. ipi_cpu_crash_stop(cpu, get_irq_regs());
  152. }
  153. if (ops & (1 << IPI_IRQ_WORK)) {
  154. stats[IPI_IRQ_WORK]++;
  155. irq_work_run();
  156. }
  157. #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
  158. if (ops & (1 << IPI_TIMER)) {
  159. stats[IPI_TIMER]++;
  160. tick_receive_broadcast();
  161. }
  162. #endif
  163. BUG_ON((ops >> IPI_MAX) != 0);
  164. /* Order data access and bit testing. */
  165. mb();
  166. }
  167. }
  168. static const char * const ipi_names[] = {
  169. [IPI_RESCHEDULE] = "Rescheduling interrupts",
  170. [IPI_CALL_FUNC] = "Function call interrupts",
  171. [IPI_CPU_STOP] = "CPU stop interrupts",
  172. [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts",
  173. [IPI_IRQ_WORK] = "IRQ work interrupts",
  174. [IPI_TIMER] = "Timer broadcast interrupts",
  175. };
  176. void show_ipi_stats(struct seq_file *p, int prec)
  177. {
  178. unsigned int cpu, i;
  179. for (i = 0; i < IPI_MAX; i++) {
  180. seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
  181. prec >= 4 ? " " : "");
  182. for_each_online_cpu(cpu)
  183. seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]);
  184. seq_printf(p, " %s\n", ipi_names[i]);
  185. }
  186. }
  187. void arch_send_call_function_ipi_mask(struct cpumask *mask)
  188. {
  189. send_ipi_mask(mask, IPI_CALL_FUNC);
  190. }
  191. void arch_send_call_function_single_ipi(int cpu)
  192. {
  193. send_ipi_single(cpu, IPI_CALL_FUNC);
  194. }
  195. #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
  196. void tick_broadcast(const struct cpumask *mask)
  197. {
  198. send_ipi_mask(mask, IPI_TIMER);
  199. }
  200. #endif
  201. void smp_send_stop(void)
  202. {
  203. unsigned long timeout;
  204. if (num_online_cpus() > 1) {
  205. cpumask_t mask;
  206. cpumask_copy(&mask, cpu_online_mask);
  207. cpumask_clear_cpu(smp_processor_id(), &mask);
  208. if (system_state <= SYSTEM_RUNNING)
  209. pr_crit("SMP: stopping secondary CPUs\n");
  210. send_ipi_mask(&mask, IPI_CPU_STOP);
  211. }
  212. /* Wait up to one second for other CPUs to stop */
  213. timeout = USEC_PER_SEC;
  214. while (num_online_cpus() > 1 && timeout--)
  215. udelay(1);
  216. if (num_online_cpus() > 1)
  217. pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
  218. cpumask_pr_args(cpu_online_mask));
  219. }
  220. #ifdef CONFIG_KEXEC_CORE
  221. /*
  222. * The number of CPUs online, not counting this CPU (which may not be
  223. * fully online and so not counted in num_online_cpus()).
  224. */
  225. static inline unsigned int num_other_online_cpus(void)
  226. {
  227. unsigned int this_cpu_online = cpu_online(smp_processor_id());
  228. return num_online_cpus() - this_cpu_online;
  229. }
  230. void crash_smp_send_stop(void)
  231. {
  232. static int cpus_stopped;
  233. cpumask_t mask;
  234. unsigned long timeout;
  235. /*
  236. * This function can be called twice in panic path, but obviously
  237. * we execute this only once.
  238. */
  239. if (cpus_stopped)
  240. return;
  241. cpus_stopped = 1;
  242. /*
  243. * If this cpu is the only one alive at this point in time, online or
  244. * not, there are no stop messages to be sent around, so just back out.
  245. */
  246. if (num_other_online_cpus() == 0)
  247. return;
  248. cpumask_copy(&mask, cpu_online_mask);
  249. cpumask_clear_cpu(smp_processor_id(), &mask);
  250. atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
  251. pr_crit("SMP: stopping secondary CPUs\n");
  252. send_ipi_mask(&mask, IPI_CPU_CRASH_STOP);
  253. /* Wait up to one second for other CPUs to stop */
  254. timeout = USEC_PER_SEC;
  255. while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--)
  256. udelay(1);
  257. if (atomic_read(&waiting_for_crash_ipi) > 0)
  258. pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
  259. cpumask_pr_args(&mask));
  260. }
  261. bool smp_crash_stop_failed(void)
  262. {
  263. return (atomic_read(&waiting_for_crash_ipi) > 0);
  264. }
  265. #endif
  266. void smp_send_reschedule(int cpu)
  267. {
  268. send_ipi_single(cpu, IPI_RESCHEDULE);
  269. }
  270. EXPORT_SYMBOL_GPL(smp_send_reschedule);