smp.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330
  1. /*
  2. * Copyright (C) 2014 Stefan Kristiansson <[email protected]>
  3. * Copyright (C) 2017 Stafford Horne <[email protected]>
  4. *
  5. * Based on arm64 and arc implementations
  6. * Copyright (C) 2013 ARM Ltd.
  7. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  8. *
  9. * This file is licensed under the terms of the GNU General Public License
  10. * version 2. This program is licensed "as is" without any warranty of any
  11. * kind, whether express or implied.
  12. */
  13. #include <linux/smp.h>
  14. #include <linux/cpu.h>
  15. #include <linux/sched.h>
  16. #include <linux/sched/mm.h>
  17. #include <linux/irq.h>
  18. #include <linux/of.h>
  19. #include <asm/cpuinfo.h>
  20. #include <asm/mmu_context.h>
  21. #include <asm/tlbflush.h>
  22. #include <asm/cacheflush.h>
  23. #include <asm/time.h>
  24. static void (*smp_cross_call)(const struct cpumask *, unsigned int);
  25. unsigned long secondary_release = -1;
  26. struct thread_info *secondary_thread_info;
  27. enum ipi_msg_type {
  28. IPI_WAKEUP,
  29. IPI_RESCHEDULE,
  30. IPI_CALL_FUNC,
  31. IPI_CALL_FUNC_SINGLE,
  32. };
  33. static DEFINE_SPINLOCK(boot_lock);
  34. static void boot_secondary(unsigned int cpu, struct task_struct *idle)
  35. {
  36. /*
  37. * set synchronisation state between this boot processor
  38. * and the secondary one
  39. */
  40. spin_lock(&boot_lock);
  41. secondary_release = cpu;
  42. smp_cross_call(cpumask_of(cpu), IPI_WAKEUP);
  43. /*
  44. * now the secondary core is starting up let it run its
  45. * calibrations, then wait for it to finish
  46. */
  47. spin_unlock(&boot_lock);
  48. }
  49. void __init smp_prepare_boot_cpu(void)
  50. {
  51. }
  52. void __init smp_init_cpus(void)
  53. {
  54. struct device_node *cpu;
  55. u32 cpu_id;
  56. for_each_of_cpu_node(cpu) {
  57. cpu_id = of_get_cpu_hwid(cpu, 0);
  58. if (cpu_id < NR_CPUS)
  59. set_cpu_possible(cpu_id, true);
  60. }
  61. }
  62. void __init smp_prepare_cpus(unsigned int max_cpus)
  63. {
  64. unsigned int cpu;
  65. /*
  66. * Initialise the present map, which describes the set of CPUs
  67. * actually populated at the present time.
  68. */
  69. for_each_possible_cpu(cpu) {
  70. if (cpu < max_cpus)
  71. set_cpu_present(cpu, true);
  72. }
  73. }
  74. void __init smp_cpus_done(unsigned int max_cpus)
  75. {
  76. }
  77. static DECLARE_COMPLETION(cpu_running);
  78. int __cpu_up(unsigned int cpu, struct task_struct *idle)
  79. {
  80. if (smp_cross_call == NULL) {
  81. pr_warn("CPU%u: failed to start, IPI controller missing",
  82. cpu);
  83. return -EIO;
  84. }
  85. secondary_thread_info = task_thread_info(idle);
  86. current_pgd[cpu] = init_mm.pgd;
  87. boot_secondary(cpu, idle);
  88. if (!wait_for_completion_timeout(&cpu_running,
  89. msecs_to_jiffies(1000))) {
  90. pr_crit("CPU%u: failed to start\n", cpu);
  91. return -EIO;
  92. }
  93. synchronise_count_master(cpu);
  94. return 0;
  95. }
  96. asmlinkage __init void secondary_start_kernel(void)
  97. {
  98. struct mm_struct *mm = &init_mm;
  99. unsigned int cpu = smp_processor_id();
  100. /*
  101. * All kernel threads share the same mm context; grab a
  102. * reference and switch to it.
  103. */
  104. mmgrab(mm);
  105. current->active_mm = mm;
  106. cpumask_set_cpu(cpu, mm_cpumask(mm));
  107. pr_info("CPU%u: Booted secondary processor\n", cpu);
  108. setup_cpuinfo();
  109. openrisc_clockevent_init();
  110. notify_cpu_starting(cpu);
  111. /*
  112. * OK, now it's safe to let the boot CPU continue
  113. */
  114. complete(&cpu_running);
  115. synchronise_count_slave(cpu);
  116. set_cpu_online(cpu, true);
  117. local_irq_enable();
  118. /*
  119. * OK, it's off to the idle thread for us
  120. */
  121. cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
  122. }
  123. void handle_IPI(unsigned int ipi_msg)
  124. {
  125. unsigned int cpu = smp_processor_id();
  126. switch (ipi_msg) {
  127. case IPI_WAKEUP:
  128. break;
  129. case IPI_RESCHEDULE:
  130. scheduler_ipi();
  131. break;
  132. case IPI_CALL_FUNC:
  133. generic_smp_call_function_interrupt();
  134. break;
  135. case IPI_CALL_FUNC_SINGLE:
  136. generic_smp_call_function_single_interrupt();
  137. break;
  138. default:
  139. WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg);
  140. break;
  141. }
  142. }
  143. void smp_send_reschedule(int cpu)
  144. {
  145. smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
  146. }
  147. static void stop_this_cpu(void *dummy)
  148. {
  149. /* Remove this CPU */
  150. set_cpu_online(smp_processor_id(), false);
  151. local_irq_disable();
  152. /* CPU Doze */
  153. if (mfspr(SPR_UPR) & SPR_UPR_PMP)
  154. mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
  155. /* If that didn't work, infinite loop */
  156. while (1)
  157. ;
  158. }
  159. void smp_send_stop(void)
  160. {
  161. smp_call_function(stop_this_cpu, NULL, 0);
  162. }
  163. void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
  164. {
  165. smp_cross_call = fn;
  166. }
  167. void arch_send_call_function_single_ipi(int cpu)
  168. {
  169. smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
  170. }
  171. void arch_send_call_function_ipi_mask(const struct cpumask *mask)
  172. {
  173. smp_cross_call(mask, IPI_CALL_FUNC);
  174. }
  175. /* TLB flush operations - Performed on each CPU*/
  176. static inline void ipi_flush_tlb_all(void *ignored)
  177. {
  178. local_flush_tlb_all();
  179. }
  180. static inline void ipi_flush_tlb_mm(void *info)
  181. {
  182. struct mm_struct *mm = (struct mm_struct *)info;
  183. local_flush_tlb_mm(mm);
  184. }
  185. static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm)
  186. {
  187. unsigned int cpuid;
  188. if (cpumask_empty(cmask))
  189. return;
  190. cpuid = get_cpu();
  191. if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
  192. /* local cpu is the only cpu present in cpumask */
  193. local_flush_tlb_mm(mm);
  194. } else {
  195. on_each_cpu_mask(cmask, ipi_flush_tlb_mm, mm, 1);
  196. }
  197. put_cpu();
  198. }
  199. struct flush_tlb_data {
  200. unsigned long addr1;
  201. unsigned long addr2;
  202. };
  203. static inline void ipi_flush_tlb_page(void *info)
  204. {
  205. struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
  206. local_flush_tlb_page(NULL, fd->addr1);
  207. }
  208. static inline void ipi_flush_tlb_range(void *info)
  209. {
  210. struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
  211. local_flush_tlb_range(NULL, fd->addr1, fd->addr2);
  212. }
  213. static void smp_flush_tlb_range(const struct cpumask *cmask, unsigned long start,
  214. unsigned long end)
  215. {
  216. unsigned int cpuid;
  217. if (cpumask_empty(cmask))
  218. return;
  219. cpuid = get_cpu();
  220. if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
  221. /* local cpu is the only cpu present in cpumask */
  222. if ((end - start) <= PAGE_SIZE)
  223. local_flush_tlb_page(NULL, start);
  224. else
  225. local_flush_tlb_range(NULL, start, end);
  226. } else {
  227. struct flush_tlb_data fd;
  228. fd.addr1 = start;
  229. fd.addr2 = end;
  230. if ((end - start) <= PAGE_SIZE)
  231. on_each_cpu_mask(cmask, ipi_flush_tlb_page, &fd, 1);
  232. else
  233. on_each_cpu_mask(cmask, ipi_flush_tlb_range, &fd, 1);
  234. }
  235. put_cpu();
  236. }
  237. void flush_tlb_all(void)
  238. {
  239. on_each_cpu(ipi_flush_tlb_all, NULL, 1);
  240. }
  241. void flush_tlb_mm(struct mm_struct *mm)
  242. {
  243. smp_flush_tlb_mm(mm_cpumask(mm), mm);
  244. }
  245. void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
  246. {
  247. smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE);
  248. }
  249. void flush_tlb_range(struct vm_area_struct *vma,
  250. unsigned long start, unsigned long end)
  251. {
  252. const struct cpumask *cmask = vma ? mm_cpumask(vma->vm_mm)
  253. : cpu_online_mask;
  254. smp_flush_tlb_range(cmask, start, end);
  255. }
  256. /* Instruction cache invalidate - performed on each cpu */
  257. static void ipi_icache_page_inv(void *arg)
  258. {
  259. struct page *page = arg;
  260. local_icache_page_inv(page);
  261. }
  262. void smp_icache_page_inv(struct page *page)
  263. {
  264. on_each_cpu(ipi_icache_page_inv, page, 1);
  265. }
  266. EXPORT_SYMBOL(smp_icache_page_inv);