machine_kexec_file.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * kexec_file for arm64
  4. *
  5. * Copyright (C) 2018 Linaro Limited
  6. * Author: AKASHI Takahiro <[email protected]>
  7. *
  8. * Most code is derived from arm64 port of kexec-tools
  9. */
  10. #define pr_fmt(fmt) "kexec_file: " fmt
  11. #include <linux/ioport.h>
  12. #include <linux/kernel.h>
  13. #include <linux/kexec.h>
  14. #include <linux/libfdt.h>
  15. #include <linux/memblock.h>
  16. #include <linux/of.h>
  17. #include <linux/of_fdt.h>
  18. #include <linux/slab.h>
  19. #include <linux/string.h>
  20. #include <linux/types.h>
  21. #include <linux/vmalloc.h>
  22. const struct kexec_file_ops * const kexec_file_loaders[] = {
  23. &kexec_image_ops,
  24. NULL
  25. };
  26. int arch_kimage_file_post_load_cleanup(struct kimage *image)
  27. {
  28. kvfree(image->arch.dtb);
  29. image->arch.dtb = NULL;
  30. vfree(image->elf_headers);
  31. image->elf_headers = NULL;
  32. image->elf_headers_sz = 0;
  33. return kexec_image_post_load_cleanup_default(image);
  34. }
  35. static int prepare_elf_headers(void **addr, unsigned long *sz)
  36. {
  37. struct crash_mem *cmem;
  38. unsigned int nr_ranges;
  39. int ret;
  40. u64 i;
  41. phys_addr_t start, end;
  42. nr_ranges = 2; /* for exclusion of crashkernel region */
  43. for_each_mem_range(i, &start, &end)
  44. nr_ranges++;
  45. cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL);
  46. if (!cmem)
  47. return -ENOMEM;
  48. cmem->max_nr_ranges = nr_ranges;
  49. cmem->nr_ranges = 0;
  50. for_each_mem_range(i, &start, &end) {
  51. cmem->ranges[cmem->nr_ranges].start = start;
  52. cmem->ranges[cmem->nr_ranges].end = end - 1;
  53. cmem->nr_ranges++;
  54. }
  55. /* Exclude crashkernel region */
  56. ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
  57. if (ret)
  58. goto out;
  59. if (crashk_low_res.end) {
  60. ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
  61. if (ret)
  62. goto out;
  63. }
  64. ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
  65. out:
  66. kfree(cmem);
  67. return ret;
  68. }
  69. /*
  70. * Tries to add the initrd and DTB to the image. If it is not possible to find
  71. * valid locations, this function will undo changes to the image and return non
  72. * zero.
  73. */
  74. int load_other_segments(struct kimage *image,
  75. unsigned long kernel_load_addr,
  76. unsigned long kernel_size,
  77. char *initrd, unsigned long initrd_len,
  78. char *cmdline)
  79. {
  80. struct kexec_buf kbuf;
  81. void *headers, *dtb = NULL;
  82. unsigned long headers_sz, initrd_load_addr = 0, dtb_len,
  83. orig_segments = image->nr_segments;
  84. int ret = 0;
  85. kbuf.image = image;
  86. /* not allocate anything below the kernel */
  87. kbuf.buf_min = kernel_load_addr + kernel_size;
  88. /* load elf core header */
  89. if (image->type == KEXEC_TYPE_CRASH) {
  90. ret = prepare_elf_headers(&headers, &headers_sz);
  91. if (ret) {
  92. pr_err("Preparing elf core header failed\n");
  93. goto out_err;
  94. }
  95. kbuf.buffer = headers;
  96. kbuf.bufsz = headers_sz;
  97. kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
  98. kbuf.memsz = headers_sz;
  99. kbuf.buf_align = SZ_64K; /* largest supported page size */
  100. kbuf.buf_max = ULONG_MAX;
  101. kbuf.top_down = true;
  102. ret = kexec_add_buffer(&kbuf);
  103. if (ret) {
  104. vfree(headers);
  105. goto out_err;
  106. }
  107. image->elf_headers = headers;
  108. image->elf_load_addr = kbuf.mem;
  109. image->elf_headers_sz = headers_sz;
  110. pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
  111. image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
  112. }
  113. /* load initrd */
  114. if (initrd) {
  115. kbuf.buffer = initrd;
  116. kbuf.bufsz = initrd_len;
  117. kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
  118. kbuf.memsz = initrd_len;
  119. kbuf.buf_align = 0;
  120. /* within 1GB-aligned window of up to 32GB in size */
  121. kbuf.buf_max = round_down(kernel_load_addr, SZ_1G)
  122. + (unsigned long)SZ_1G * 32;
  123. kbuf.top_down = false;
  124. ret = kexec_add_buffer(&kbuf);
  125. if (ret)
  126. goto out_err;
  127. initrd_load_addr = kbuf.mem;
  128. pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
  129. initrd_load_addr, kbuf.bufsz, kbuf.memsz);
  130. }
  131. /* load dtb */
  132. dtb = of_kexec_alloc_and_setup_fdt(image, initrd_load_addr,
  133. initrd_len, cmdline, 0);
  134. if (!dtb) {
  135. pr_err("Preparing for new dtb failed\n");
  136. ret = -EINVAL;
  137. goto out_err;
  138. }
  139. /* trim it */
  140. fdt_pack(dtb);
  141. dtb_len = fdt_totalsize(dtb);
  142. kbuf.buffer = dtb;
  143. kbuf.bufsz = dtb_len;
  144. kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
  145. kbuf.memsz = dtb_len;
  146. /* not across 2MB boundary */
  147. kbuf.buf_align = SZ_2M;
  148. kbuf.buf_max = ULONG_MAX;
  149. kbuf.top_down = true;
  150. ret = kexec_add_buffer(&kbuf);
  151. if (ret)
  152. goto out_err;
  153. image->arch.dtb = dtb;
  154. image->arch.dtb_mem = kbuf.mem;
  155. pr_debug("Loaded dtb at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
  156. kbuf.mem, kbuf.bufsz, kbuf.memsz);
  157. return 0;
  158. out_err:
  159. image->nr_segments = orig_segments;
  160. kvfree(dtb);
  161. return ret;
  162. }