machine_kexec.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * machine_kexec.c for kexec
  4. * Created by <[email protected]> on Thu Oct 12 15:15:06 2006
  5. */
  6. #include <linux/compiler.h>
  7. #include <linux/kexec.h>
  8. #include <linux/mm.h>
  9. #include <linux/delay.h>
  10. #include <linux/libfdt.h>
  11. #include <asm/cacheflush.h>
  12. #include <asm/page.h>
  13. extern const unsigned char relocate_new_kernel[];
  14. extern const size_t relocate_new_kernel_size;
  15. extern unsigned long kexec_start_address;
  16. extern unsigned long kexec_indirection_page;
  17. static unsigned long reboot_code_buffer;
  18. #ifdef CONFIG_SMP
  19. static void (*relocated_kexec_smp_wait)(void *);
  20. atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
  21. void (*_crash_smp_send_stop)(void) = NULL;
  22. #endif
  23. void (*_machine_kexec_shutdown)(void) = NULL;
  24. void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
  25. static void kexec_image_info(const struct kimage *kimage)
  26. {
  27. unsigned long i;
  28. pr_debug("kexec kimage info:\n");
  29. pr_debug(" type: %d\n", kimage->type);
  30. pr_debug(" start: %lx\n", kimage->start);
  31. pr_debug(" head: %lx\n", kimage->head);
  32. pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
  33. for (i = 0; i < kimage->nr_segments; i++) {
  34. pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
  35. i,
  36. kimage->segment[i].mem,
  37. kimage->segment[i].mem + kimage->segment[i].memsz,
  38. (unsigned long)kimage->segment[i].memsz,
  39. (unsigned long)kimage->segment[i].memsz / PAGE_SIZE);
  40. }
  41. }
  42. #ifdef CONFIG_UHI_BOOT
  43. static int uhi_machine_kexec_prepare(struct kimage *kimage)
  44. {
  45. int i;
  46. /*
  47. * In case DTB file is not passed to the new kernel, a flat device
  48. * tree will be created by kexec tool. It holds modified command
  49. * line for the new kernel.
  50. */
  51. for (i = 0; i < kimage->nr_segments; i++) {
  52. struct fdt_header fdt;
  53. if (kimage->segment[i].memsz <= sizeof(fdt))
  54. continue;
  55. if (copy_from_user(&fdt, kimage->segment[i].buf, sizeof(fdt)))
  56. continue;
  57. if (fdt_check_header(&fdt))
  58. continue;
  59. kexec_args[0] = -2;
  60. kexec_args[1] = (unsigned long)
  61. phys_to_virt((unsigned long)kimage->segment[i].mem);
  62. break;
  63. }
  64. return 0;
  65. }
  66. int (*_machine_kexec_prepare)(struct kimage *) = uhi_machine_kexec_prepare;
  67. #else
  68. int (*_machine_kexec_prepare)(struct kimage *) = NULL;
  69. #endif /* CONFIG_UHI_BOOT */
  70. int
  71. machine_kexec_prepare(struct kimage *kimage)
  72. {
  73. #ifdef CONFIG_SMP
  74. if (!kexec_nonboot_cpu_func())
  75. return -EINVAL;
  76. #endif
  77. kexec_image_info(kimage);
  78. if (_machine_kexec_prepare)
  79. return _machine_kexec_prepare(kimage);
  80. return 0;
  81. }
  82. void
  83. machine_kexec_cleanup(struct kimage *kimage)
  84. {
  85. }
  86. #ifdef CONFIG_SMP
  87. static void kexec_shutdown_secondary(void *param)
  88. {
  89. int cpu = smp_processor_id();
  90. if (!cpu_online(cpu))
  91. return;
  92. /* We won't be sent IPIs any more. */
  93. set_cpu_online(cpu, false);
  94. local_irq_disable();
  95. while (!atomic_read(&kexec_ready_to_reboot))
  96. cpu_relax();
  97. kexec_reboot();
  98. /* NOTREACHED */
  99. }
  100. #endif
  101. void
  102. machine_shutdown(void)
  103. {
  104. if (_machine_kexec_shutdown)
  105. _machine_kexec_shutdown();
  106. #ifdef CONFIG_SMP
  107. smp_call_function(kexec_shutdown_secondary, NULL, 0);
  108. while (num_online_cpus() > 1) {
  109. cpu_relax();
  110. mdelay(1);
  111. }
  112. #endif
  113. }
  114. void
  115. machine_crash_shutdown(struct pt_regs *regs)
  116. {
  117. if (_machine_crash_shutdown)
  118. _machine_crash_shutdown(regs);
  119. else
  120. default_machine_crash_shutdown(regs);
  121. }
  122. #ifdef CONFIG_SMP
  123. void kexec_nonboot_cpu_jump(void)
  124. {
  125. local_flush_icache_range((unsigned long)relocated_kexec_smp_wait,
  126. reboot_code_buffer + relocate_new_kernel_size);
  127. relocated_kexec_smp_wait(NULL);
  128. }
  129. #endif
  130. void kexec_reboot(void)
  131. {
  132. void (*do_kexec)(void) __noreturn;
  133. /*
  134. * We know we were online, and there will be no incoming IPIs at
  135. * this point. Mark online again before rebooting so that the crash
  136. * analysis tool will see us correctly.
  137. */
  138. set_cpu_online(smp_processor_id(), true);
  139. /* Ensure remote CPUs observe that we're online before rebooting. */
  140. smp_mb__after_atomic();
  141. #ifdef CONFIG_SMP
  142. if (smp_processor_id() > 0) {
  143. /*
  144. * Instead of cpu_relax() or wait, this is needed for kexec
  145. * smp reboot. Kdump usually doesn't require an smp new
  146. * kernel, but kexec may do.
  147. */
  148. kexec_nonboot_cpu();
  149. /* NOTREACHED */
  150. }
  151. #endif
  152. /*
  153. * Make sure we get correct instructions written by the
  154. * machine_kexec() CPU.
  155. */
  156. local_flush_icache_range(reboot_code_buffer,
  157. reboot_code_buffer + relocate_new_kernel_size);
  158. do_kexec = (void *)reboot_code_buffer;
  159. do_kexec();
  160. }
  161. void
  162. machine_kexec(struct kimage *image)
  163. {
  164. unsigned long entry;
  165. unsigned long *ptr;
  166. reboot_code_buffer =
  167. (unsigned long)page_address(image->control_code_page);
  168. kexec_start_address =
  169. (unsigned long) phys_to_virt(image->start);
  170. if (image->type == KEXEC_TYPE_DEFAULT) {
  171. kexec_indirection_page =
  172. (unsigned long) phys_to_virt(image->head & PAGE_MASK);
  173. } else {
  174. kexec_indirection_page = (unsigned long)&image->head;
  175. }
  176. memcpy((void*)reboot_code_buffer, relocate_new_kernel,
  177. relocate_new_kernel_size);
  178. /*
  179. * The generic kexec code builds a page list with physical
  180. * addresses. they are directly accessible through KSEG0 (or
  181. * CKSEG0 or XPHYS if on 64bit system), hence the
  182. * phys_to_virt() call.
  183. */
  184. for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
  185. ptr = (entry & IND_INDIRECTION) ?
  186. phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
  187. if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
  188. *ptr & IND_DESTINATION)
  189. *ptr = (unsigned long) phys_to_virt(*ptr);
  190. }
  191. /* Mark offline BEFORE disabling local irq. */
  192. set_cpu_online(smp_processor_id(), false);
  193. /*
  194. * we do not want to be bothered.
  195. */
  196. local_irq_disable();
  197. printk("Will call new kernel at %08lx\n", image->start);
  198. printk("Bye ...\n");
  199. /* Make reboot code buffer available to the boot CPU. */
  200. __flush_cache_all();
  201. #ifdef CONFIG_SMP
  202. /* All secondary cpus now may jump to kexec_wait cycle */
  203. relocated_kexec_smp_wait = reboot_code_buffer +
  204. (void *)(kexec_smp_wait - relocate_new_kernel);
  205. smp_wmb();
  206. atomic_set(&kexec_ready_to_reboot, 1);
  207. #endif
  208. kexec_reboot();
  209. }