smpboot.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * SMP initialisation and IPI support
  4. * Based on arch/arm64/kernel/smp.c
  5. *
  6. * Copyright (C) 2012 ARM Ltd.
  7. * Copyright (C) 2015 Regents of the University of California
  8. * Copyright (C) 2017 SiFive
  9. */
  10. #include <linux/arch_topology.h>
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/kernel.h>
  14. #include <linux/mm.h>
  15. #include <linux/sched.h>
  16. #include <linux/kernel_stat.h>
  17. #include <linux/notifier.h>
  18. #include <linux/cpu.h>
  19. #include <linux/percpu.h>
  20. #include <linux/delay.h>
  21. #include <linux/err.h>
  22. #include <linux/irq.h>
  23. #include <linux/of.h>
  24. #include <linux/sched/task_stack.h>
  25. #include <linux/sched/mm.h>
  26. #include <asm/cpu_ops.h>
  27. #include <asm/irq.h>
  28. #include <asm/mmu_context.h>
  29. #include <asm/numa.h>
  30. #include <asm/tlbflush.h>
  31. #include <asm/sections.h>
  32. #include <asm/sbi.h>
  33. #include <asm/smp.h>
  34. #include "head.h"
  35. static DECLARE_COMPLETION(cpu_running);
  36. void __init smp_prepare_boot_cpu(void)
  37. {
  38. }
  39. void __init smp_prepare_cpus(unsigned int max_cpus)
  40. {
  41. int cpuid;
  42. int ret;
  43. unsigned int curr_cpuid;
  44. init_cpu_topology();
  45. curr_cpuid = smp_processor_id();
  46. store_cpu_topology(curr_cpuid);
  47. numa_store_cpu_info(curr_cpuid);
  48. numa_add_cpu(curr_cpuid);
  49. /* This covers non-smp usecase mandated by "nosmp" option */
  50. if (max_cpus == 0)
  51. return;
  52. for_each_possible_cpu(cpuid) {
  53. if (cpuid == curr_cpuid)
  54. continue;
  55. if (cpu_ops[cpuid]->cpu_prepare) {
  56. ret = cpu_ops[cpuid]->cpu_prepare(cpuid);
  57. if (ret)
  58. continue;
  59. }
  60. set_cpu_present(cpuid, true);
  61. numa_store_cpu_info(cpuid);
  62. }
  63. }
  64. void __init setup_smp(void)
  65. {
  66. struct device_node *dn;
  67. unsigned long hart;
  68. bool found_boot_cpu = false;
  69. int cpuid = 1;
  70. int rc;
  71. cpu_set_ops(0);
  72. for_each_of_cpu_node(dn) {
  73. rc = riscv_of_processor_hartid(dn, &hart);
  74. if (rc < 0)
  75. continue;
  76. if (hart == cpuid_to_hartid_map(0)) {
  77. BUG_ON(found_boot_cpu);
  78. found_boot_cpu = 1;
  79. early_map_cpu_to_node(0, of_node_to_nid(dn));
  80. continue;
  81. }
  82. if (cpuid >= NR_CPUS) {
  83. pr_warn("Invalid cpuid [%d] for hartid [%lu]\n",
  84. cpuid, hart);
  85. continue;
  86. }
  87. cpuid_to_hartid_map(cpuid) = hart;
  88. early_map_cpu_to_node(cpuid, of_node_to_nid(dn));
  89. cpuid++;
  90. }
  91. BUG_ON(!found_boot_cpu);
  92. if (cpuid > nr_cpu_ids)
  93. pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n",
  94. cpuid, nr_cpu_ids);
  95. for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
  96. if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
  97. cpu_set_ops(cpuid);
  98. set_cpu_possible(cpuid, true);
  99. }
  100. }
  101. }
  102. static int start_secondary_cpu(int cpu, struct task_struct *tidle)
  103. {
  104. if (cpu_ops[cpu]->cpu_start)
  105. return cpu_ops[cpu]->cpu_start(cpu, tidle);
  106. return -EOPNOTSUPP;
  107. }
  108. int __cpu_up(unsigned int cpu, struct task_struct *tidle)
  109. {
  110. int ret = 0;
  111. tidle->thread_info.cpu = cpu;
  112. ret = start_secondary_cpu(cpu, tidle);
  113. if (!ret) {
  114. wait_for_completion_timeout(&cpu_running,
  115. msecs_to_jiffies(1000));
  116. if (!cpu_online(cpu)) {
  117. pr_crit("CPU%u: failed to come online\n", cpu);
  118. ret = -EIO;
  119. }
  120. } else {
  121. pr_crit("CPU%u: failed to start\n", cpu);
  122. }
  123. return ret;
  124. }
  125. void __init smp_cpus_done(unsigned int max_cpus)
  126. {
  127. }
  128. /*
  129. * C entry point for a secondary processor.
  130. */
  131. asmlinkage __visible void smp_callin(void)
  132. {
  133. struct mm_struct *mm = &init_mm;
  134. unsigned int curr_cpuid = smp_processor_id();
  135. riscv_clear_ipi();
  136. /* All kernel threads share the same mm context. */
  137. mmgrab(mm);
  138. current->active_mm = mm;
  139. store_cpu_topology(curr_cpuid);
  140. notify_cpu_starting(curr_cpuid);
  141. numa_add_cpu(curr_cpuid);
  142. set_cpu_online(curr_cpuid, 1);
  143. /*
  144. * Remote TLB flushes are ignored while the CPU is offline, so emit
  145. * a local TLB flush right now just in case.
  146. */
  147. local_flush_tlb_all();
  148. complete(&cpu_running);
  149. /*
  150. * Disable preemption before enabling interrupts, so we don't try to
  151. * schedule a CPU that hasn't actually started yet.
  152. */
  153. local_irq_enable();
  154. cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
  155. }