crash.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Architecture specific (PPC64) functions for kexec based crash dumps.
  4. *
  5. * Copyright (C) 2005, IBM Corp.
  6. *
  7. * Created by: Haren Myneni
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/smp.h>
  11. #include <linux/reboot.h>
  12. #include <linux/kexec.h>
  13. #include <linux/export.h>
  14. #include <linux/crash_dump.h>
  15. #include <linux/delay.h>
  16. #include <linux/irq.h>
  17. #include <linux/types.h>
  18. #include <asm/processor.h>
  19. #include <asm/machdep.h>
  20. #include <asm/kexec.h>
  21. #include <asm/smp.h>
  22. #include <asm/setjmp.h>
  23. #include <asm/debug.h>
  24. #include <asm/interrupt.h>
  25. /*
  26. * The primary CPU waits a while for all secondary CPUs to enter. This is to
  27. * avoid sending an IPI if the secondary CPUs are entering
  28. * crash_kexec_secondary on their own (eg via a system reset).
  29. *
  30. * The secondary timeout has to be longer than the primary. Both timeouts are
  31. * in milliseconds.
  32. */
  33. #define PRIMARY_TIMEOUT 500
  34. #define SECONDARY_TIMEOUT 1000
  35. #define IPI_TIMEOUT 10000
  36. #define REAL_MODE_TIMEOUT 10000
  37. static int time_to_dump;
  38. /*
  39. * In case of system reset, secondary CPUs enter crash_kexec_secondary with out
  40. * having to send an IPI explicitly. So, indicate if the crash is via
  41. * system reset to avoid sending another IPI.
  42. */
  43. static int is_via_system_reset;
  44. /*
  45. * crash_wake_offline should be set to 1 by platforms that intend to wake
  46. * up offline cpus prior to jumping to a kdump kernel. Currently powernv
  47. * sets it to 1, since we want to avoid things from happening when an
  48. * offline CPU wakes up due to something like an HMI (malfunction error),
  49. * which propagates to all threads.
  50. */
  51. int crash_wake_offline;
  52. #define CRASH_HANDLER_MAX 3
  53. /* List of shutdown handles */
  54. static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX];
  55. static DEFINE_SPINLOCK(crash_handlers_lock);
  56. static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
  57. static int crash_shutdown_cpu = -1;
  58. static int handle_fault(struct pt_regs *regs)
  59. {
  60. if (crash_shutdown_cpu == smp_processor_id())
  61. longjmp(crash_shutdown_buf, 1);
  62. return 0;
  63. }
  64. #ifdef CONFIG_SMP
  65. static atomic_t cpus_in_crash;
  66. void crash_ipi_callback(struct pt_regs *regs)
  67. {
  68. static cpumask_t cpus_state_saved = CPU_MASK_NONE;
  69. int cpu = smp_processor_id();
  70. hard_irq_disable();
  71. if (!cpumask_test_cpu(cpu, &cpus_state_saved)) {
  72. crash_save_cpu(regs, cpu);
  73. cpumask_set_cpu(cpu, &cpus_state_saved);
  74. }
  75. atomic_inc(&cpus_in_crash);
  76. smp_mb__after_atomic();
  77. /*
  78. * Starting the kdump boot.
  79. * This barrier is needed to make sure that all CPUs are stopped.
  80. */
  81. while (!time_to_dump)
  82. cpu_relax();
  83. if (ppc_md.kexec_cpu_down)
  84. ppc_md.kexec_cpu_down(1, 1);
  85. #ifdef CONFIG_PPC64
  86. kexec_smp_wait();
  87. #else
  88. for (;;); /* FIXME */
  89. #endif
  90. /* NOTREACHED */
  91. }
  92. static void crash_kexec_prepare_cpus(void)
  93. {
  94. unsigned int msecs;
  95. volatile unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
  96. volatile int tries = 0;
  97. int (*old_handler)(struct pt_regs *regs);
  98. printk(KERN_EMERG "Sending IPI to other CPUs\n");
  99. if (crash_wake_offline)
  100. ncpus = num_present_cpus() - 1;
  101. /*
  102. * If we came in via system reset, secondaries enter via crash_kexec_secondary().
  103. * So, wait a while for the secondary CPUs to enter for that case.
  104. * Else, send IPI to all other CPUs.
  105. */
  106. if (is_via_system_reset)
  107. mdelay(PRIMARY_TIMEOUT);
  108. else
  109. crash_send_ipi(crash_ipi_callback);
  110. smp_wmb();
  111. again:
  112. /*
  113. * FIXME: Until we will have the way to stop other CPUs reliably,
  114. * the crash CPU will send an IPI and wait for other CPUs to
  115. * respond.
  116. */
  117. msecs = IPI_TIMEOUT;
  118. while ((atomic_read(&cpus_in_crash) < ncpus) && (--msecs > 0))
  119. mdelay(1);
  120. /* Would it be better to replace the trap vector here? */
  121. if (atomic_read(&cpus_in_crash) >= ncpus) {
  122. printk(KERN_EMERG "IPI complete\n");
  123. return;
  124. }
  125. printk(KERN_EMERG "ERROR: %d cpu(s) not responding\n",
  126. ncpus - atomic_read(&cpus_in_crash));
  127. /*
  128. * If we have a panic timeout set then we can't wait indefinitely
  129. * for someone to activate system reset. We also give up on the
  130. * second time through if system reset fail to work.
  131. */
  132. if ((panic_timeout > 0) || (tries > 0))
  133. return;
  134. /*
  135. * A system reset will cause all CPUs to take an 0x100 exception.
  136. * The primary CPU returns here via setjmp, and the secondary
  137. * CPUs reexecute the crash_kexec_secondary path.
  138. */
  139. old_handler = __debugger;
  140. __debugger = handle_fault;
  141. crash_shutdown_cpu = smp_processor_id();
  142. if (setjmp(crash_shutdown_buf) == 0) {
  143. printk(KERN_EMERG "Activate system reset (dumprestart) "
  144. "to stop other cpu(s)\n");
  145. /*
  146. * A system reset will force all CPUs to execute the
  147. * crash code again. We need to reset cpus_in_crash so we
  148. * wait for everyone to do this.
  149. */
  150. atomic_set(&cpus_in_crash, 0);
  151. smp_mb();
  152. while (atomic_read(&cpus_in_crash) < ncpus)
  153. cpu_relax();
  154. }
  155. crash_shutdown_cpu = -1;
  156. __debugger = old_handler;
  157. tries++;
  158. goto again;
  159. }
  160. /*
  161. * This function will be called by secondary cpus.
  162. */
  163. void crash_kexec_secondary(struct pt_regs *regs)
  164. {
  165. unsigned long flags;
  166. int msecs = SECONDARY_TIMEOUT;
  167. local_irq_save(flags);
  168. /* Wait for the primary crash CPU to signal its progress */
  169. while (crashing_cpu < 0) {
  170. if (--msecs < 0) {
  171. /* No response, kdump image may not have been loaded */
  172. local_irq_restore(flags);
  173. return;
  174. }
  175. mdelay(1);
  176. }
  177. crash_ipi_callback(regs);
  178. }
  179. #else /* ! CONFIG_SMP */
  180. static void crash_kexec_prepare_cpus(void)
  181. {
  182. /*
  183. * move the secondaries to us so that we can copy
  184. * the new kernel 0-0x100 safely
  185. *
  186. * do this if kexec in setup.c ?
  187. */
  188. #ifdef CONFIG_PPC64
  189. smp_release_cpus();
  190. #else
  191. /* FIXME */
  192. #endif
  193. }
  194. void crash_kexec_secondary(struct pt_regs *regs)
  195. {
  196. }
  197. #endif /* CONFIG_SMP */
  198. /* wait for all the CPUs to hit real mode but timeout if they don't come in */
  199. #if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
  200. noinstr static void __maybe_unused crash_kexec_wait_realmode(int cpu)
  201. {
  202. unsigned int msecs;
  203. int i;
  204. msecs = REAL_MODE_TIMEOUT;
  205. for (i=0; i < nr_cpu_ids && msecs > 0; i++) {
  206. if (i == cpu)
  207. continue;
  208. while (paca_ptrs[i]->kexec_state < KEXEC_STATE_REAL_MODE) {
  209. barrier();
  210. if (!cpu_possible(i) || !cpu_online(i) || (msecs <= 0))
  211. break;
  212. msecs--;
  213. mdelay(1);
  214. }
  215. }
  216. mb();
  217. }
  218. #else
  219. static inline void crash_kexec_wait_realmode(int cpu) {}
  220. #endif /* CONFIG_SMP && CONFIG_PPC64 */
  221. void crash_kexec_prepare(void)
  222. {
  223. /* Avoid hardlocking with irresponsive CPU holding logbuf_lock */
  224. printk_deferred_enter();
  225. /*
  226. * This function is only called after the system
  227. * has panicked or is otherwise in a critical state.
  228. * The minimum amount of code to allow a kexec'd kernel
  229. * to run successfully needs to happen here.
  230. *
  231. * In practice this means stopping other cpus in
  232. * an SMP system.
  233. * The kernel is broken so disable interrupts.
  234. */
  235. hard_irq_disable();
  236. /*
  237. * Make a note of crashing cpu. Will be used in machine_kexec
  238. * such that another IPI will not be sent.
  239. */
  240. crashing_cpu = smp_processor_id();
  241. crash_kexec_prepare_cpus();
  242. }
  243. /*
  244. * Register a function to be called on shutdown. Only use this if you
  245. * can't reset your device in the second kernel.
  246. */
  247. int crash_shutdown_register(crash_shutdown_t handler)
  248. {
  249. unsigned int i, rc;
  250. spin_lock(&crash_handlers_lock);
  251. for (i = 0 ; i < CRASH_HANDLER_MAX; i++)
  252. if (!crash_shutdown_handles[i]) {
  253. /* Insert handle at first empty entry */
  254. crash_shutdown_handles[i] = handler;
  255. rc = 0;
  256. break;
  257. }
  258. if (i == CRASH_HANDLER_MAX) {
  259. printk(KERN_ERR "Crash shutdown handles full, "
  260. "not registered.\n");
  261. rc = 1;
  262. }
  263. spin_unlock(&crash_handlers_lock);
  264. return rc;
  265. }
  266. EXPORT_SYMBOL(crash_shutdown_register);
  267. int crash_shutdown_unregister(crash_shutdown_t handler)
  268. {
  269. unsigned int i, rc;
  270. spin_lock(&crash_handlers_lock);
  271. for (i = 0 ; i < CRASH_HANDLER_MAX; i++)
  272. if (crash_shutdown_handles[i] == handler)
  273. break;
  274. if (i == CRASH_HANDLER_MAX) {
  275. printk(KERN_ERR "Crash shutdown handle not found\n");
  276. rc = 1;
  277. } else {
  278. /* Shift handles down */
  279. for (; i < (CRASH_HANDLER_MAX - 1); i++)
  280. crash_shutdown_handles[i] =
  281. crash_shutdown_handles[i+1];
  282. /*
  283. * Reset last entry to NULL now that it has been shifted down,
  284. * this will allow new handles to be added here.
  285. */
  286. crash_shutdown_handles[i] = NULL;
  287. rc = 0;
  288. }
  289. spin_unlock(&crash_handlers_lock);
  290. return rc;
  291. }
  292. EXPORT_SYMBOL(crash_shutdown_unregister);
  293. void default_machine_crash_shutdown(struct pt_regs *regs)
  294. {
  295. unsigned int i;
  296. int (*old_handler)(struct pt_regs *regs);
  297. if (TRAP(regs) == INTERRUPT_SYSTEM_RESET)
  298. is_via_system_reset = 1;
  299. crash_smp_send_stop();
  300. crash_save_cpu(regs, crashing_cpu);
  301. time_to_dump = 1;
  302. crash_kexec_wait_realmode(crashing_cpu);
  303. machine_kexec_mask_interrupts();
  304. /*
  305. * Call registered shutdown routines safely. Swap out
  306. * __debugger_fault_handler, and replace on exit.
  307. */
  308. old_handler = __debugger_fault_handler;
  309. __debugger_fault_handler = handle_fault;
  310. crash_shutdown_cpu = smp_processor_id();
  311. for (i = 0; i < CRASH_HANDLER_MAX && crash_shutdown_handles[i]; i++) {
  312. if (setjmp(crash_shutdown_buf) == 0) {
  313. /*
  314. * Insert syncs and delay to ensure
  315. * instructions in the dangerous region don't
  316. * leak away from this protected region.
  317. */
  318. asm volatile("sync; isync");
  319. /* dangerous region */
  320. crash_shutdown_handles[i]();
  321. asm volatile("sync; isync");
  322. }
  323. }
  324. crash_shutdown_cpu = -1;
  325. __debugger_fault_handler = old_handler;
  326. if (ppc_md.kexec_cpu_down)
  327. ppc_md.kexec_cpu_down(1, 0);
  328. }