smp.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * SMP support for Hexagon
  4. *
  5. * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
  6. */
  7. #include <linux/err.h>
  8. #include <linux/errno.h>
  9. #include <linux/kernel.h>
  10. #include <linux/init.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/module.h>
  13. #include <linux/percpu.h>
  14. #include <linux/sched/mm.h>
  15. #include <linux/smp.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/cpu.h>
  18. #include <linux/mm_types.h>
  19. #include <asm/time.h> /* timer_interrupt */
  20. #include <asm/hexagon_vm.h>
  21. #define BASE_IPI_IRQ 26
  22. /*
  23. * cpu_possible_mask needs to be filled out prior to setup_per_cpu_areas
  24. * (which is prior to any of our smp_prepare_cpu crap), in order to set
  25. * up the... per_cpu areas.
  26. */
  27. struct ipi_data {
  28. unsigned long bits;
  29. };
  30. static DEFINE_PER_CPU(struct ipi_data, ipi_data);
  31. static inline void __handle_ipi(unsigned long *ops, struct ipi_data *ipi,
  32. int cpu)
  33. {
  34. unsigned long msg = 0;
  35. do {
  36. msg = find_next_bit(ops, BITS_PER_LONG, msg+1);
  37. switch (msg) {
  38. case IPI_TIMER:
  39. ipi_timer();
  40. break;
  41. case IPI_CALL_FUNC:
  42. generic_smp_call_function_interrupt();
  43. break;
  44. case IPI_CPU_STOP:
  45. /*
  46. * call vmstop()
  47. */
  48. __vmstop();
  49. break;
  50. case IPI_RESCHEDULE:
  51. scheduler_ipi();
  52. break;
  53. }
  54. } while (msg < BITS_PER_LONG);
  55. }
  56. /* Used for IPI call from other CPU's to unmask int */
  57. void smp_vm_unmask_irq(void *info)
  58. {
  59. __vmintop_locen((long) info);
  60. }
  61. /*
  62. * This is based on Alpha's IPI stuff.
  63. * Supposed to take (int, void*) as args now.
  64. * Specifically, first arg is irq, second is the irq_desc.
  65. */
  66. irqreturn_t handle_ipi(int irq, void *desc)
  67. {
  68. int cpu = smp_processor_id();
  69. struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
  70. unsigned long ops;
  71. while ((ops = xchg(&ipi->bits, 0)) != 0)
  72. __handle_ipi(&ops, ipi, cpu);
  73. return IRQ_HANDLED;
  74. }
  75. void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
  76. {
  77. unsigned long flags;
  78. unsigned long cpu;
  79. unsigned long retval;
  80. local_irq_save(flags);
  81. for_each_cpu(cpu, cpumask) {
  82. struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
  83. set_bit(msg, &ipi->bits);
  84. /* Possible barrier here */
  85. retval = __vmintop_post(BASE_IPI_IRQ+cpu);
  86. if (retval != 0) {
  87. printk(KERN_ERR "interrupt %ld not configured?\n",
  88. BASE_IPI_IRQ+cpu);
  89. }
  90. }
  91. local_irq_restore(flags);
  92. }
  93. void __init smp_prepare_boot_cpu(void)
  94. {
  95. }
  96. /*
  97. * interrupts should already be disabled from the VM
  98. * SP should already be correct; need to set THREADINFO_REG
  99. * to point to current thread info
  100. */
  101. void start_secondary(void)
  102. {
  103. unsigned long thread_ptr;
  104. unsigned int cpu, irq;
  105. /* Calculate thread_info pointer from stack pointer */
  106. __asm__ __volatile__(
  107. "%0 = SP;\n"
  108. : "=r" (thread_ptr)
  109. );
  110. thread_ptr = thread_ptr & ~(THREAD_SIZE-1);
  111. __asm__ __volatile__(
  112. QUOTED_THREADINFO_REG " = %0;\n"
  113. :
  114. : "r" (thread_ptr)
  115. );
  116. /* Set the memory struct */
  117. mmgrab(&init_mm);
  118. current->active_mm = &init_mm;
  119. cpu = smp_processor_id();
  120. irq = BASE_IPI_IRQ + cpu;
  121. if (request_irq(irq, handle_ipi, IRQF_TRIGGER_RISING, "ipi_handler",
  122. NULL))
  123. pr_err("Failed to request irq %u (ipi_handler)\n", irq);
  124. /* Register the clock_event dummy */
  125. setup_percpu_clockdev();
  126. printk(KERN_INFO "%s cpu %d\n", __func__, current_thread_info()->cpu);
  127. notify_cpu_starting(cpu);
  128. set_cpu_online(cpu, true);
  129. local_irq_enable();
  130. cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
  131. }
  132. /*
  133. * called once for each present cpu
  134. * apparently starts up the CPU and then
  135. * maintains control until "cpu_online(cpu)" is set.
  136. */
  137. int __cpu_up(unsigned int cpu, struct task_struct *idle)
  138. {
  139. struct thread_info *thread = (struct thread_info *)idle->stack;
  140. void *stack_start;
  141. thread->cpu = cpu;
  142. /* Boot to the head. */
  143. stack_start = ((void *) thread) + THREAD_SIZE;
  144. __vmstart(start_secondary, stack_start);
  145. while (!cpu_online(cpu))
  146. barrier();
  147. return 0;
  148. }
  149. void __init smp_cpus_done(unsigned int max_cpus)
  150. {
  151. }
  152. void __init smp_prepare_cpus(unsigned int max_cpus)
  153. {
  154. int i, irq = BASE_IPI_IRQ;
  155. /*
  156. * should eventually have some sort of machine
  157. * descriptor that has this stuff
  158. */
  159. /* Right now, let's just fake it. */
  160. for (i = 0; i < max_cpus; i++)
  161. set_cpu_present(i, true);
  162. /* Also need to register the interrupts for IPI */
  163. if (max_cpus > 1) {
  164. if (request_irq(irq, handle_ipi, IRQF_TRIGGER_RISING,
  165. "ipi_handler", NULL))
  166. pr_err("Failed to request irq %d (ipi_handler)\n", irq);
  167. }
  168. }
  169. void smp_send_reschedule(int cpu)
  170. {
  171. send_ipi(cpumask_of(cpu), IPI_RESCHEDULE);
  172. }
  173. void smp_send_stop(void)
  174. {
  175. struct cpumask targets;
  176. cpumask_copy(&targets, cpu_online_mask);
  177. cpumask_clear_cpu(smp_processor_id(), &targets);
  178. send_ipi(&targets, IPI_CPU_STOP);
  179. }
  180. void arch_send_call_function_single_ipi(int cpu)
  181. {
  182. send_ipi(cpumask_of(cpu), IPI_CALL_FUNC);
  183. }
  184. void arch_send_call_function_ipi_mask(const struct cpumask *mask)
  185. {
  186. send_ipi(mask, IPI_CALL_FUNC);
  187. }
  188. void smp_start_cpus(void)
  189. {
  190. int i;
  191. for (i = 0; i < NR_CPUS; i++)
  192. set_cpu_possible(i, true);
  193. }