platsmp.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2002 ARM Ltd.
  4. * All Rights Reserved
  5. *
  6. * This code is specific to the hardware found on ARM Realview and
  7. * Versatile Express platforms where the CPUs are unable to be individually
  8. * woken, and where there is no way to hot-unplug CPUs. Real platforms
  9. * should not copy this code.
  10. */
  11. #include <linux/init.h>
  12. #include <linux/errno.h>
  13. #include <linux/delay.h>
  14. #include <linux/device.h>
  15. #include <linux/jiffies.h>
  16. #include <linux/smp.h>
  17. #include <asm/cacheflush.h>
  18. #include <asm/smp_plat.h>
  19. #include "platsmp.h"
  20. /*
  21. * versatile_cpu_release controls the release of CPUs from the holding
  22. * pen in headsmp.S, which exists because we are not always able to
  23. * control the release of individual CPUs from the board firmware.
  24. * Production platforms do not need this.
  25. */
  26. volatile int versatile_cpu_release = -1;
  27. /*
  28. * Write versatile_cpu_release in a way that is guaranteed to be visible to
  29. * all observers, irrespective of whether they're taking part in coherency
  30. * or not. This is necessary for the hotplug code to work reliably.
  31. */
  32. static void versatile_write_cpu_release(int val)
  33. {
  34. versatile_cpu_release = val;
  35. smp_wmb();
  36. sync_cache_w(&versatile_cpu_release);
  37. }
  38. /*
  39. * versatile_lock exists to avoid running the loops_per_jiffy delay loop
  40. * calibrations on the secondary CPU while the requesting CPU is using
  41. * the limited-bandwidth bus - which affects the calibration value.
  42. * Production platforms do not need this.
  43. */
  44. static DEFINE_RAW_SPINLOCK(versatile_lock);
  45. void versatile_secondary_init(unsigned int cpu)
  46. {
  47. /*
  48. * let the primary processor know we're out of the
  49. * pen, then head off into the C entry point
  50. */
  51. versatile_write_cpu_release(-1);
  52. /*
  53. * Synchronise with the boot thread.
  54. */
  55. raw_spin_lock(&versatile_lock);
  56. raw_spin_unlock(&versatile_lock);
  57. }
  58. int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
  59. {
  60. unsigned long timeout;
  61. /*
  62. * Set synchronisation state between this boot processor
  63. * and the secondary one
  64. */
  65. raw_spin_lock(&versatile_lock);
  66. /*
  67. * This is really belt and braces; we hold unintended secondary
  68. * CPUs in the holding pen until we're ready for them. However,
  69. * since we haven't sent them a soft interrupt, they shouldn't
  70. * be there.
  71. */
  72. versatile_write_cpu_release(cpu_logical_map(cpu));
  73. /*
  74. * Send the secondary CPU a soft interrupt, thereby causing
  75. * the boot monitor to read the system wide flags register,
  76. * and branch to the address found there.
  77. */
  78. arch_send_wakeup_ipi_mask(cpumask_of(cpu));
  79. timeout = jiffies + (1 * HZ);
  80. while (time_before(jiffies, timeout)) {
  81. smp_rmb();
  82. if (versatile_cpu_release == -1)
  83. break;
  84. udelay(10);
  85. }
  86. /*
  87. * now the secondary core is starting up let it run its
  88. * calibrations, then wait for it to finish
  89. */
  90. raw_spin_unlock(&versatile_lock);
  91. return versatile_cpu_release != -1 ? -ENOSYS : 0;
  92. }