smp.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/module.h>
  3. #include <linux/init.h>
  4. #include <linux/kernel.h>
  5. #include <linux/mm.h>
  6. #include <linux/sched.h>
  7. #include <linux/kernel_stat.h>
  8. #include <linux/notifier.h>
  9. #include <linux/cpu.h>
  10. #include <linux/percpu.h>
  11. #include <linux/delay.h>
  12. #include <linux/err.h>
  13. #include <linux/irq.h>
  14. #include <linux/irq_work.h>
  15. #include <linux/irqdomain.h>
  16. #include <linux/of.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/sched/task_stack.h>
  19. #include <linux/sched/mm.h>
  20. #include <linux/sched/hotplug.h>
  21. #include <asm/irq.h>
  22. #include <asm/traps.h>
  23. #include <asm/sections.h>
  24. #include <asm/mmu_context.h>
  25. #ifdef CONFIG_CPU_HAS_FPU
  26. #include <abi/fpu.h>
  27. #endif
  28. enum ipi_message_type {
  29. IPI_EMPTY,
  30. IPI_RESCHEDULE,
  31. IPI_CALL_FUNC,
  32. IPI_IRQ_WORK,
  33. IPI_MAX
  34. };
  35. struct ipi_data_struct {
  36. unsigned long bits ____cacheline_aligned;
  37. unsigned long stats[IPI_MAX] ____cacheline_aligned;
  38. };
  39. static DEFINE_PER_CPU(struct ipi_data_struct, ipi_data);
  40. static irqreturn_t handle_ipi(int irq, void *dev)
  41. {
  42. unsigned long *stats = this_cpu_ptr(&ipi_data)->stats;
  43. while (true) {
  44. unsigned long ops;
  45. ops = xchg(&this_cpu_ptr(&ipi_data)->bits, 0);
  46. if (ops == 0)
  47. return IRQ_HANDLED;
  48. if (ops & (1 << IPI_RESCHEDULE)) {
  49. stats[IPI_RESCHEDULE]++;
  50. scheduler_ipi();
  51. }
  52. if (ops & (1 << IPI_CALL_FUNC)) {
  53. stats[IPI_CALL_FUNC]++;
  54. generic_smp_call_function_interrupt();
  55. }
  56. if (ops & (1 << IPI_IRQ_WORK)) {
  57. stats[IPI_IRQ_WORK]++;
  58. irq_work_run();
  59. }
  60. BUG_ON((ops >> IPI_MAX) != 0);
  61. }
  62. return IRQ_HANDLED;
  63. }
  64. static void (*send_arch_ipi)(const struct cpumask *mask);
  65. static int ipi_irq;
  66. void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq)
  67. {
  68. if (send_arch_ipi)
  69. return;
  70. send_arch_ipi = func;
  71. ipi_irq = irq;
  72. }
  73. static void
  74. send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
  75. {
  76. int i;
  77. for_each_cpu(i, to_whom)
  78. set_bit(operation, &per_cpu_ptr(&ipi_data, i)->bits);
  79. smp_mb();
  80. send_arch_ipi(to_whom);
  81. }
  82. static const char * const ipi_names[] = {
  83. [IPI_EMPTY] = "Empty interrupts",
  84. [IPI_RESCHEDULE] = "Rescheduling interrupts",
  85. [IPI_CALL_FUNC] = "Function call interrupts",
  86. [IPI_IRQ_WORK] = "Irq work interrupts",
  87. };
  88. int arch_show_interrupts(struct seq_file *p, int prec)
  89. {
  90. unsigned int cpu, i;
  91. for (i = 0; i < IPI_MAX; i++) {
  92. seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
  93. prec >= 4 ? " " : "");
  94. for_each_online_cpu(cpu)
  95. seq_printf(p, "%10lu ",
  96. per_cpu_ptr(&ipi_data, cpu)->stats[i]);
  97. seq_printf(p, " %s\n", ipi_names[i]);
  98. }
  99. return 0;
  100. }
  101. void arch_send_call_function_ipi_mask(struct cpumask *mask)
  102. {
  103. send_ipi_message(mask, IPI_CALL_FUNC);
  104. }
  105. void arch_send_call_function_single_ipi(int cpu)
  106. {
  107. send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
  108. }
  109. static void ipi_stop(void *unused)
  110. {
  111. while (1);
  112. }
  113. void smp_send_stop(void)
  114. {
  115. on_each_cpu(ipi_stop, NULL, 1);
  116. }
  117. void smp_send_reschedule(int cpu)
  118. {
  119. send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
  120. }
  121. #ifdef CONFIG_IRQ_WORK
  122. void arch_irq_work_raise(void)
  123. {
  124. send_ipi_message(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
  125. }
  126. #endif
  127. void __init smp_prepare_boot_cpu(void)
  128. {
  129. }
  130. void __init smp_prepare_cpus(unsigned int max_cpus)
  131. {
  132. }
  133. static int ipi_dummy_dev;
  134. void __init setup_smp_ipi(void)
  135. {
  136. int rc;
  137. if (ipi_irq == 0)
  138. return;
  139. rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt",
  140. &ipi_dummy_dev);
  141. if (rc)
  142. panic("%s IRQ request failed\n", __func__);
  143. enable_percpu_irq(ipi_irq, 0);
  144. }
  145. void __init setup_smp(void)
  146. {
  147. struct device_node *node = NULL;
  148. unsigned int cpu;
  149. for_each_of_cpu_node(node) {
  150. if (!of_device_is_available(node))
  151. continue;
  152. cpu = of_get_cpu_hwid(node, 0);
  153. if (cpu >= NR_CPUS)
  154. continue;
  155. set_cpu_possible(cpu, true);
  156. set_cpu_present(cpu, true);
  157. }
  158. }
  159. extern void _start_smp_secondary(void);
  160. volatile unsigned int secondary_hint;
  161. volatile unsigned int secondary_hint2;
  162. volatile unsigned int secondary_ccr;
  163. volatile unsigned int secondary_stack;
  164. volatile unsigned int secondary_msa1;
  165. volatile unsigned int secondary_pgd;
  166. int __cpu_up(unsigned int cpu, struct task_struct *tidle)
  167. {
  168. unsigned long mask = 1 << cpu;
  169. secondary_stack =
  170. (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8;
  171. secondary_hint = mfcr("cr31");
  172. secondary_hint2 = mfcr("cr<21, 1>");
  173. secondary_ccr = mfcr("cr18");
  174. secondary_msa1 = read_mmu_msa1();
  175. secondary_pgd = mfcr("cr<29, 15>");
  176. /*
  177. * Because other CPUs are in reset status, we must flush data
  178. * from cache to out and secondary CPUs use them in
  179. * csky_start_secondary(void)
  180. */
  181. mtcr("cr17", 0x22);
  182. if (mask & mfcr("cr<29, 0>")) {
  183. send_arch_ipi(cpumask_of(cpu));
  184. } else {
  185. /* Enable cpu in SMP reset ctrl reg */
  186. mask |= mfcr("cr<29, 0>");
  187. mtcr("cr<29, 0>", mask);
  188. }
  189. /* Wait for the cpu online */
  190. while (!cpu_online(cpu));
  191. secondary_stack = 0;
  192. return 0;
  193. }
  194. void __init smp_cpus_done(unsigned int max_cpus)
  195. {
  196. }
  197. void csky_start_secondary(void)
  198. {
  199. struct mm_struct *mm = &init_mm;
  200. unsigned int cpu = smp_processor_id();
  201. mtcr("cr31", secondary_hint);
  202. mtcr("cr<21, 1>", secondary_hint2);
  203. mtcr("cr18", secondary_ccr);
  204. mtcr("vbr", vec_base);
  205. flush_tlb_all();
  206. write_mmu_pagemask(0);
  207. #ifdef CONFIG_CPU_HAS_FPU
  208. init_fpu();
  209. #endif
  210. enable_percpu_irq(ipi_irq, 0);
  211. mmget(mm);
  212. mmgrab(mm);
  213. current->active_mm = mm;
  214. cpumask_set_cpu(cpu, mm_cpumask(mm));
  215. notify_cpu_starting(cpu);
  216. set_cpu_online(cpu, true);
  217. pr_info("CPU%u Online: %s...\n", cpu, __func__);
  218. local_irq_enable();
  219. cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
  220. }
  221. #ifdef CONFIG_HOTPLUG_CPU
  222. int __cpu_disable(void)
  223. {
  224. unsigned int cpu = smp_processor_id();
  225. set_cpu_online(cpu, false);
  226. irq_migrate_all_off_this_cpu();
  227. clear_tasks_mm_cpumask(cpu);
  228. return 0;
  229. }
  230. void __cpu_die(unsigned int cpu)
  231. {
  232. if (!cpu_wait_death(cpu, 5)) {
  233. pr_crit("CPU%u: shutdown failed\n", cpu);
  234. return;
  235. }
  236. pr_notice("CPU%u: shutdown\n", cpu);
  237. }
  238. void arch_cpu_idle_dead(void)
  239. {
  240. idle_task_exit();
  241. cpu_report_death();
  242. while (!secondary_stack)
  243. arch_cpu_idle();
  244. local_irq_disable();
  245. asm volatile(
  246. "mov sp, %0\n"
  247. "mov r8, %0\n"
  248. "jmpi csky_start_secondary"
  249. :
  250. : "r" (secondary_stack));
  251. }
  252. #endif