file_load_64.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * ppc64 code to implement the kexec_file_load syscall
  4. *
  5. * Copyright (C) 2004 Adam Litke ([email protected])
  6. * Copyright (C) 2004 IBM Corp.
  7. * Copyright (C) 2004,2005 Milton D Miller II, IBM Corporation
  8. * Copyright (C) 2005 R Sharada ([email protected])
  9. * Copyright (C) 2006 Mohan Kumar M ([email protected])
  10. * Copyright (C) 2020 IBM Corporation
  11. *
  12. * Based on kexec-tools' kexec-ppc64.c, kexec-elf-rel-ppc64.c, fs2dt.c.
  13. * Heavily modified for the kernel by
  14. * Hari Bathini, IBM Corporation.
  15. */
  16. #include <linux/kexec.h>
  17. #include <linux/of_fdt.h>
  18. #include <linux/libfdt.h>
  19. #include <linux/of_device.h>
  20. #include <linux/memblock.h>
  21. #include <linux/slab.h>
  22. #include <linux/vmalloc.h>
  23. #include <asm/setup.h>
  24. #include <asm/drmem.h>
  25. #include <asm/firmware.h>
  26. #include <asm/kexec_ranges.h>
  27. #include <asm/crashdump-ppc64.h>
  28. struct umem_info {
  29. u64 *buf; /* data buffer for usable-memory property */
  30. u32 size; /* size allocated for the data buffer */
  31. u32 max_entries; /* maximum no. of entries */
  32. u32 idx; /* index of current entry */
  33. /* usable memory ranges to look up */
  34. unsigned int nr_ranges;
  35. const struct crash_mem_range *ranges;
  36. };
  37. const struct kexec_file_ops * const kexec_file_loaders[] = {
  38. &kexec_elf64_ops,
  39. NULL
  40. };
  41. /**
  42. * get_exclude_memory_ranges - Get exclude memory ranges. This list includes
  43. * regions like opal/rtas, tce-table, initrd,
  44. * kernel, htab which should be avoided while
  45. * setting up kexec load segments.
  46. * @mem_ranges: Range list to add the memory ranges to.
  47. *
  48. * Returns 0 on success, negative errno on error.
  49. */
  50. static int get_exclude_memory_ranges(struct crash_mem **mem_ranges)
  51. {
  52. int ret;
  53. ret = add_tce_mem_ranges(mem_ranges);
  54. if (ret)
  55. goto out;
  56. ret = add_initrd_mem_range(mem_ranges);
  57. if (ret)
  58. goto out;
  59. ret = add_htab_mem_range(mem_ranges);
  60. if (ret)
  61. goto out;
  62. ret = add_kernel_mem_range(mem_ranges);
  63. if (ret)
  64. goto out;
  65. ret = add_rtas_mem_range(mem_ranges);
  66. if (ret)
  67. goto out;
  68. ret = add_opal_mem_range(mem_ranges);
  69. if (ret)
  70. goto out;
  71. ret = add_reserved_mem_ranges(mem_ranges);
  72. if (ret)
  73. goto out;
  74. /* exclude memory ranges should be sorted for easy lookup */
  75. sort_memory_ranges(*mem_ranges, true);
  76. out:
  77. if (ret)
  78. pr_err("Failed to setup exclude memory ranges\n");
  79. return ret;
  80. }
  81. /**
  82. * get_usable_memory_ranges - Get usable memory ranges. This list includes
  83. * regions like crashkernel, opal/rtas & tce-table,
  84. * that kdump kernel could use.
  85. * @mem_ranges: Range list to add the memory ranges to.
  86. *
  87. * Returns 0 on success, negative errno on error.
  88. */
  89. static int get_usable_memory_ranges(struct crash_mem **mem_ranges)
  90. {
  91. int ret;
  92. /*
  93. * Early boot failure observed on guests when low memory (first memory
  94. * block?) is not added to usable memory. So, add [0, crashk_res.end]
  95. * instead of [crashk_res.start, crashk_res.end] to workaround it.
  96. * Also, crashed kernel's memory must be added to reserve map to
  97. * avoid kdump kernel from using it.
  98. */
  99. ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1);
  100. if (ret)
  101. goto out;
  102. ret = add_rtas_mem_range(mem_ranges);
  103. if (ret)
  104. goto out;
  105. ret = add_opal_mem_range(mem_ranges);
  106. if (ret)
  107. goto out;
  108. ret = add_tce_mem_ranges(mem_ranges);
  109. out:
  110. if (ret)
  111. pr_err("Failed to setup usable memory ranges\n");
  112. return ret;
  113. }
  114. /**
  115. * get_crash_memory_ranges - Get crash memory ranges. This list includes
  116. * first/crashing kernel's memory regions that
  117. * would be exported via an elfcore.
  118. * @mem_ranges: Range list to add the memory ranges to.
  119. *
  120. * Returns 0 on success, negative errno on error.
  121. */
  122. static int get_crash_memory_ranges(struct crash_mem **mem_ranges)
  123. {
  124. phys_addr_t base, end;
  125. struct crash_mem *tmem;
  126. u64 i;
  127. int ret;
  128. for_each_mem_range(i, &base, &end) {
  129. u64 size = end - base;
  130. /* Skip backup memory region, which needs a separate entry */
  131. if (base == BACKUP_SRC_START) {
  132. if (size > BACKUP_SRC_SIZE) {
  133. base = BACKUP_SRC_END + 1;
  134. size -= BACKUP_SRC_SIZE;
  135. } else
  136. continue;
  137. }
  138. ret = add_mem_range(mem_ranges, base, size);
  139. if (ret)
  140. goto out;
  141. /* Try merging adjacent ranges before reallocation attempt */
  142. if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges)
  143. sort_memory_ranges(*mem_ranges, true);
  144. }
  145. /* Reallocate memory ranges if there is no space to split ranges */
  146. tmem = *mem_ranges;
  147. if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) {
  148. tmem = realloc_mem_ranges(mem_ranges);
  149. if (!tmem)
  150. goto out;
  151. }
  152. /* Exclude crashkernel region */
  153. ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end);
  154. if (ret)
  155. goto out;
  156. /*
  157. * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL
  158. * regions are exported to save their context at the time of
  159. * crash, they should actually be backed up just like the
  160. * first 64K bytes of memory.
  161. */
  162. ret = add_rtas_mem_range(mem_ranges);
  163. if (ret)
  164. goto out;
  165. ret = add_opal_mem_range(mem_ranges);
  166. if (ret)
  167. goto out;
  168. /* create a separate program header for the backup region */
  169. ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE);
  170. if (ret)
  171. goto out;
  172. sort_memory_ranges(*mem_ranges, false);
  173. out:
  174. if (ret)
  175. pr_err("Failed to setup crash memory ranges\n");
  176. return ret;
  177. }
  178. /**
  179. * get_reserved_memory_ranges - Get reserve memory ranges. This list includes
  180. * memory regions that should be added to the
  181. * memory reserve map to ensure the region is
  182. * protected from any mischief.
  183. * @mem_ranges: Range list to add the memory ranges to.
  184. *
  185. * Returns 0 on success, negative errno on error.
  186. */
  187. static int get_reserved_memory_ranges(struct crash_mem **mem_ranges)
  188. {
  189. int ret;
  190. ret = add_rtas_mem_range(mem_ranges);
  191. if (ret)
  192. goto out;
  193. ret = add_tce_mem_ranges(mem_ranges);
  194. if (ret)
  195. goto out;
  196. ret = add_reserved_mem_ranges(mem_ranges);
  197. out:
  198. if (ret)
  199. pr_err("Failed to setup reserved memory ranges\n");
  200. return ret;
  201. }
  202. /**
  203. * __locate_mem_hole_top_down - Looks top down for a large enough memory hole
  204. * in the memory regions between buf_min & buf_max
  205. * for the buffer. If found, sets kbuf->mem.
  206. * @kbuf: Buffer contents and memory parameters.
  207. * @buf_min: Minimum address for the buffer.
  208. * @buf_max: Maximum address for the buffer.
  209. *
  210. * Returns 0 on success, negative errno on error.
  211. */
  212. static int __locate_mem_hole_top_down(struct kexec_buf *kbuf,
  213. u64 buf_min, u64 buf_max)
  214. {
  215. int ret = -EADDRNOTAVAIL;
  216. phys_addr_t start, end;
  217. u64 i;
  218. for_each_mem_range_rev(i, &start, &end) {
  219. /*
  220. * memblock uses [start, end) convention while it is
  221. * [start, end] here. Fix the off-by-one to have the
  222. * same convention.
  223. */
  224. end -= 1;
  225. if (start > buf_max)
  226. continue;
  227. /* Memory hole not found */
  228. if (end < buf_min)
  229. break;
  230. /* Adjust memory region based on the given range */
  231. if (start < buf_min)
  232. start = buf_min;
  233. if (end > buf_max)
  234. end = buf_max;
  235. start = ALIGN(start, kbuf->buf_align);
  236. if (start < end && (end - start + 1) >= kbuf->memsz) {
  237. /* Suitable memory range found. Set kbuf->mem */
  238. kbuf->mem = ALIGN_DOWN(end - kbuf->memsz + 1,
  239. kbuf->buf_align);
  240. ret = 0;
  241. break;
  242. }
  243. }
  244. return ret;
  245. }
  246. /**
  247. * locate_mem_hole_top_down_ppc64 - Skip special memory regions to find a
  248. * suitable buffer with top down approach.
  249. * @kbuf: Buffer contents and memory parameters.
  250. * @buf_min: Minimum address for the buffer.
  251. * @buf_max: Maximum address for the buffer.
  252. * @emem: Exclude memory ranges.
  253. *
  254. * Returns 0 on success, negative errno on error.
  255. */
  256. static int locate_mem_hole_top_down_ppc64(struct kexec_buf *kbuf,
  257. u64 buf_min, u64 buf_max,
  258. const struct crash_mem *emem)
  259. {
  260. int i, ret = 0, err = -EADDRNOTAVAIL;
  261. u64 start, end, tmin, tmax;
  262. tmax = buf_max;
  263. for (i = (emem->nr_ranges - 1); i >= 0; i--) {
  264. start = emem->ranges[i].start;
  265. end = emem->ranges[i].end;
  266. if (start > tmax)
  267. continue;
  268. if (end < tmax) {
  269. tmin = (end < buf_min ? buf_min : end + 1);
  270. ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
  271. if (!ret)
  272. return 0;
  273. }
  274. tmax = start - 1;
  275. if (tmax < buf_min) {
  276. ret = err;
  277. break;
  278. }
  279. ret = 0;
  280. }
  281. if (!ret) {
  282. tmin = buf_min;
  283. ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
  284. }
  285. return ret;
  286. }
  287. /**
  288. * __locate_mem_hole_bottom_up - Looks bottom up for a large enough memory hole
  289. * in the memory regions between buf_min & buf_max
  290. * for the buffer. If found, sets kbuf->mem.
  291. * @kbuf: Buffer contents and memory parameters.
  292. * @buf_min: Minimum address for the buffer.
  293. * @buf_max: Maximum address for the buffer.
  294. *
  295. * Returns 0 on success, negative errno on error.
  296. */
  297. static int __locate_mem_hole_bottom_up(struct kexec_buf *kbuf,
  298. u64 buf_min, u64 buf_max)
  299. {
  300. int ret = -EADDRNOTAVAIL;
  301. phys_addr_t start, end;
  302. u64 i;
  303. for_each_mem_range(i, &start, &end) {
  304. /*
  305. * memblock uses [start, end) convention while it is
  306. * [start, end] here. Fix the off-by-one to have the
  307. * same convention.
  308. */
  309. end -= 1;
  310. if (end < buf_min)
  311. continue;
  312. /* Memory hole not found */
  313. if (start > buf_max)
  314. break;
  315. /* Adjust memory region based on the given range */
  316. if (start < buf_min)
  317. start = buf_min;
  318. if (end > buf_max)
  319. end = buf_max;
  320. start = ALIGN(start, kbuf->buf_align);
  321. if (start < end && (end - start + 1) >= kbuf->memsz) {
  322. /* Suitable memory range found. Set kbuf->mem */
  323. kbuf->mem = start;
  324. ret = 0;
  325. break;
  326. }
  327. }
  328. return ret;
  329. }
  330. /**
  331. * locate_mem_hole_bottom_up_ppc64 - Skip special memory regions to find a
  332. * suitable buffer with bottom up approach.
  333. * @kbuf: Buffer contents and memory parameters.
  334. * @buf_min: Minimum address for the buffer.
  335. * @buf_max: Maximum address for the buffer.
  336. * @emem: Exclude memory ranges.
  337. *
  338. * Returns 0 on success, negative errno on error.
  339. */
  340. static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf,
  341. u64 buf_min, u64 buf_max,
  342. const struct crash_mem *emem)
  343. {
  344. int i, ret = 0, err = -EADDRNOTAVAIL;
  345. u64 start, end, tmin, tmax;
  346. tmin = buf_min;
  347. for (i = 0; i < emem->nr_ranges; i++) {
  348. start = emem->ranges[i].start;
  349. end = emem->ranges[i].end;
  350. if (end < tmin)
  351. continue;
  352. if (start > tmin) {
  353. tmax = (start > buf_max ? buf_max : start - 1);
  354. ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
  355. if (!ret)
  356. return 0;
  357. }
  358. tmin = end + 1;
  359. if (tmin > buf_max) {
  360. ret = err;
  361. break;
  362. }
  363. ret = 0;
  364. }
  365. if (!ret) {
  366. tmax = buf_max;
  367. ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
  368. }
  369. return ret;
  370. }
  371. /**
  372. * check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries
  373. * @um_info: Usable memory buffer and ranges info.
  374. * @cnt: No. of entries to accommodate.
  375. *
  376. * Frees up the old buffer if memory reallocation fails.
  377. *
  378. * Returns buffer on success, NULL on error.
  379. */
  380. static u64 *check_realloc_usable_mem(struct umem_info *um_info, int cnt)
  381. {
  382. u32 new_size;
  383. u64 *tbuf;
  384. if ((um_info->idx + cnt) <= um_info->max_entries)
  385. return um_info->buf;
  386. new_size = um_info->size + MEM_RANGE_CHUNK_SZ;
  387. tbuf = krealloc(um_info->buf, new_size, GFP_KERNEL);
  388. if (tbuf) {
  389. um_info->buf = tbuf;
  390. um_info->size = new_size;
  391. um_info->max_entries = (um_info->size / sizeof(u64));
  392. }
  393. return tbuf;
  394. }
  395. /**
  396. * add_usable_mem - Add the usable memory ranges within the given memory range
  397. * to the buffer
  398. * @um_info: Usable memory buffer and ranges info.
  399. * @base: Base address of memory range to look for.
  400. * @end: End address of memory range to look for.
  401. *
  402. * Returns 0 on success, negative errno on error.
  403. */
  404. static int add_usable_mem(struct umem_info *um_info, u64 base, u64 end)
  405. {
  406. u64 loc_base, loc_end;
  407. bool add;
  408. int i;
  409. for (i = 0; i < um_info->nr_ranges; i++) {
  410. add = false;
  411. loc_base = um_info->ranges[i].start;
  412. loc_end = um_info->ranges[i].end;
  413. if (loc_base >= base && loc_end <= end)
  414. add = true;
  415. else if (base < loc_end && end > loc_base) {
  416. if (loc_base < base)
  417. loc_base = base;
  418. if (loc_end > end)
  419. loc_end = end;
  420. add = true;
  421. }
  422. if (add) {
  423. if (!check_realloc_usable_mem(um_info, 2))
  424. return -ENOMEM;
  425. um_info->buf[um_info->idx++] = cpu_to_be64(loc_base);
  426. um_info->buf[um_info->idx++] =
  427. cpu_to_be64(loc_end - loc_base + 1);
  428. }
  429. }
  430. return 0;
  431. }
  432. /**
  433. * kdump_setup_usable_lmb - This is a callback function that gets called by
  434. * walk_drmem_lmbs for every LMB to set its
  435. * usable memory ranges.
  436. * @lmb: LMB info.
  437. * @usm: linux,drconf-usable-memory property value.
  438. * @data: Pointer to usable memory buffer and ranges info.
  439. *
  440. * Returns 0 on success, negative errno on error.
  441. */
  442. static int kdump_setup_usable_lmb(struct drmem_lmb *lmb, const __be32 **usm,
  443. void *data)
  444. {
  445. struct umem_info *um_info;
  446. int tmp_idx, ret;
  447. u64 base, end;
  448. /*
  449. * kdump load isn't supported on kernels already booted with
  450. * linux,drconf-usable-memory property.
  451. */
  452. if (*usm) {
  453. pr_err("linux,drconf-usable-memory property already exists!");
  454. return -EINVAL;
  455. }
  456. um_info = data;
  457. tmp_idx = um_info->idx;
  458. if (!check_realloc_usable_mem(um_info, 1))
  459. return -ENOMEM;
  460. um_info->idx++;
  461. base = lmb->base_addr;
  462. end = base + drmem_lmb_size() - 1;
  463. ret = add_usable_mem(um_info, base, end);
  464. if (!ret) {
  465. /*
  466. * Update the no. of ranges added. Two entries (base & size)
  467. * for every range added.
  468. */
  469. um_info->buf[tmp_idx] =
  470. cpu_to_be64((um_info->idx - tmp_idx - 1) / 2);
  471. }
  472. return ret;
  473. }
  474. #define NODE_PATH_LEN 256
  475. /**
  476. * add_usable_mem_property - Add usable memory property for the given
  477. * memory node.
  478. * @fdt: Flattened device tree for the kdump kernel.
  479. * @dn: Memory node.
  480. * @um_info: Usable memory buffer and ranges info.
  481. *
  482. * Returns 0 on success, negative errno on error.
  483. */
  484. static int add_usable_mem_property(void *fdt, struct device_node *dn,
  485. struct umem_info *um_info)
  486. {
  487. int n_mem_addr_cells, n_mem_size_cells, node;
  488. char path[NODE_PATH_LEN];
  489. int i, len, ranges, ret;
  490. const __be32 *prop;
  491. u64 base, end;
  492. of_node_get(dn);
  493. if (snprintf(path, NODE_PATH_LEN, "%pOF", dn) > (NODE_PATH_LEN - 1)) {
  494. pr_err("Buffer (%d) too small for memory node: %pOF\n",
  495. NODE_PATH_LEN, dn);
  496. return -EOVERFLOW;
  497. }
  498. pr_debug("Memory node path: %s\n", path);
  499. /* Now that we know the path, find its offset in kdump kernel's fdt */
  500. node = fdt_path_offset(fdt, path);
  501. if (node < 0) {
  502. pr_err("Malformed device tree: error reading %s\n", path);
  503. ret = -EINVAL;
  504. goto out;
  505. }
  506. /* Get the address & size cells */
  507. n_mem_addr_cells = of_n_addr_cells(dn);
  508. n_mem_size_cells = of_n_size_cells(dn);
  509. pr_debug("address cells: %d, size cells: %d\n", n_mem_addr_cells,
  510. n_mem_size_cells);
  511. um_info->idx = 0;
  512. if (!check_realloc_usable_mem(um_info, 2)) {
  513. ret = -ENOMEM;
  514. goto out;
  515. }
  516. prop = of_get_property(dn, "reg", &len);
  517. if (!prop || len <= 0) {
  518. ret = 0;
  519. goto out;
  520. }
  521. /*
  522. * "reg" property represents sequence of (addr,size) tuples
  523. * each representing a memory range.
  524. */
  525. ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
  526. for (i = 0; i < ranges; i++) {
  527. base = of_read_number(prop, n_mem_addr_cells);
  528. prop += n_mem_addr_cells;
  529. end = base + of_read_number(prop, n_mem_size_cells) - 1;
  530. prop += n_mem_size_cells;
  531. ret = add_usable_mem(um_info, base, end);
  532. if (ret)
  533. goto out;
  534. }
  535. /*
  536. * No kdump kernel usable memory found in this memory node.
  537. * Write (0,0) tuple in linux,usable-memory property for
  538. * this region to be ignored.
  539. */
  540. if (um_info->idx == 0) {
  541. um_info->buf[0] = 0;
  542. um_info->buf[1] = 0;
  543. um_info->idx = 2;
  544. }
  545. ret = fdt_setprop(fdt, node, "linux,usable-memory", um_info->buf,
  546. (um_info->idx * sizeof(u64)));
  547. out:
  548. of_node_put(dn);
  549. return ret;
  550. }
  551. /**
  552. * update_usable_mem_fdt - Updates kdump kernel's fdt with linux,usable-memory
  553. * and linux,drconf-usable-memory DT properties as
  554. * appropriate to restrict its memory usage.
  555. * @fdt: Flattened device tree for the kdump kernel.
  556. * @usable_mem: Usable memory ranges for kdump kernel.
  557. *
  558. * Returns 0 on success, negative errno on error.
  559. */
  560. static int update_usable_mem_fdt(void *fdt, struct crash_mem *usable_mem)
  561. {
  562. struct umem_info um_info;
  563. struct device_node *dn;
  564. int node, ret = 0;
  565. if (!usable_mem) {
  566. pr_err("Usable memory ranges for kdump kernel not found\n");
  567. return -ENOENT;
  568. }
  569. node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
  570. if (node == -FDT_ERR_NOTFOUND)
  571. pr_debug("No dynamic reconfiguration memory found\n");
  572. else if (node < 0) {
  573. pr_err("Malformed device tree: error reading /ibm,dynamic-reconfiguration-memory.\n");
  574. return -EINVAL;
  575. }
  576. um_info.buf = NULL;
  577. um_info.size = 0;
  578. um_info.max_entries = 0;
  579. um_info.idx = 0;
  580. /* Memory ranges to look up */
  581. um_info.ranges = &(usable_mem->ranges[0]);
  582. um_info.nr_ranges = usable_mem->nr_ranges;
  583. dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
  584. if (dn) {
  585. ret = walk_drmem_lmbs(dn, &um_info, kdump_setup_usable_lmb);
  586. of_node_put(dn);
  587. if (ret) {
  588. pr_err("Could not setup linux,drconf-usable-memory property for kdump\n");
  589. goto out;
  590. }
  591. ret = fdt_setprop(fdt, node, "linux,drconf-usable-memory",
  592. um_info.buf, (um_info.idx * sizeof(u64)));
  593. if (ret) {
  594. pr_err("Failed to update fdt with linux,drconf-usable-memory property");
  595. goto out;
  596. }
  597. }
  598. /*
  599. * Walk through each memory node and set linux,usable-memory property
  600. * for the corresponding node in kdump kernel's fdt.
  601. */
  602. for_each_node_by_type(dn, "memory") {
  603. ret = add_usable_mem_property(fdt, dn, &um_info);
  604. if (ret) {
  605. pr_err("Failed to set linux,usable-memory property for %s node",
  606. dn->full_name);
  607. of_node_put(dn);
  608. goto out;
  609. }
  610. }
  611. out:
  612. kfree(um_info.buf);
  613. return ret;
  614. }
  615. /**
  616. * load_backup_segment - Locate a memory hole to place the backup region.
  617. * @image: Kexec image.
  618. * @kbuf: Buffer contents and memory parameters.
  619. *
  620. * Returns 0 on success, negative errno on error.
  621. */
  622. static int load_backup_segment(struct kimage *image, struct kexec_buf *kbuf)
  623. {
  624. void *buf;
  625. int ret;
  626. /*
  627. * Setup a source buffer for backup segment.
  628. *
  629. * A source buffer has no meaning for backup region as data will
  630. * be copied from backup source, after crash, in the purgatory.
  631. * But as load segment code doesn't recognize such segments,
  632. * setup a dummy source buffer to keep it happy for now.
  633. */
  634. buf = vzalloc(BACKUP_SRC_SIZE);
  635. if (!buf)
  636. return -ENOMEM;
  637. kbuf->buffer = buf;
  638. kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
  639. kbuf->bufsz = kbuf->memsz = BACKUP_SRC_SIZE;
  640. kbuf->top_down = false;
  641. ret = kexec_add_buffer(kbuf);
  642. if (ret) {
  643. vfree(buf);
  644. return ret;
  645. }
  646. image->arch.backup_buf = buf;
  647. image->arch.backup_start = kbuf->mem;
  648. return 0;
  649. }
  650. /**
  651. * update_backup_region_phdr - Update backup region's offset for the core to
  652. * export the region appropriately.
  653. * @image: Kexec image.
  654. * @ehdr: ELF core header.
  655. *
  656. * Assumes an exclusive program header is setup for the backup region
  657. * in the ELF headers
  658. *
  659. * Returns nothing.
  660. */
  661. static void update_backup_region_phdr(struct kimage *image, Elf64_Ehdr *ehdr)
  662. {
  663. Elf64_Phdr *phdr;
  664. unsigned int i;
  665. phdr = (Elf64_Phdr *)(ehdr + 1);
  666. for (i = 0; i < ehdr->e_phnum; i++) {
  667. if (phdr->p_paddr == BACKUP_SRC_START) {
  668. phdr->p_offset = image->arch.backup_start;
  669. pr_debug("Backup region offset updated to 0x%lx\n",
  670. image->arch.backup_start);
  671. return;
  672. }
  673. }
  674. }
  675. /**
  676. * load_elfcorehdr_segment - Setup crash memory ranges and initialize elfcorehdr
  677. * segment needed to load kdump kernel.
  678. * @image: Kexec image.
  679. * @kbuf: Buffer contents and memory parameters.
  680. *
  681. * Returns 0 on success, negative errno on error.
  682. */
  683. static int load_elfcorehdr_segment(struct kimage *image, struct kexec_buf *kbuf)
  684. {
  685. struct crash_mem *cmem = NULL;
  686. unsigned long headers_sz;
  687. void *headers = NULL;
  688. int ret;
  689. ret = get_crash_memory_ranges(&cmem);
  690. if (ret)
  691. goto out;
  692. /* Setup elfcorehdr segment */
  693. ret = crash_prepare_elf64_headers(cmem, false, &headers, &headers_sz);
  694. if (ret) {
  695. pr_err("Failed to prepare elf headers for the core\n");
  696. goto out;
  697. }
  698. /* Fix the offset for backup region in the ELF header */
  699. update_backup_region_phdr(image, headers);
  700. kbuf->buffer = headers;
  701. kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
  702. kbuf->bufsz = kbuf->memsz = headers_sz;
  703. kbuf->top_down = false;
  704. ret = kexec_add_buffer(kbuf);
  705. if (ret) {
  706. vfree(headers);
  707. goto out;
  708. }
  709. image->elf_load_addr = kbuf->mem;
  710. image->elf_headers_sz = headers_sz;
  711. image->elf_headers = headers;
  712. out:
  713. kfree(cmem);
  714. return ret;
  715. }
  716. /**
  717. * load_crashdump_segments_ppc64 - Initialize the additional segements needed
  718. * to load kdump kernel.
  719. * @image: Kexec image.
  720. * @kbuf: Buffer contents and memory parameters.
  721. *
  722. * Returns 0 on success, negative errno on error.
  723. */
  724. int load_crashdump_segments_ppc64(struct kimage *image,
  725. struct kexec_buf *kbuf)
  726. {
  727. int ret;
  728. /* Load backup segment - first 64K bytes of the crashing kernel */
  729. ret = load_backup_segment(image, kbuf);
  730. if (ret) {
  731. pr_err("Failed to load backup segment\n");
  732. return ret;
  733. }
  734. pr_debug("Loaded the backup region at 0x%lx\n", kbuf->mem);
  735. /* Load elfcorehdr segment - to export crashing kernel's vmcore */
  736. ret = load_elfcorehdr_segment(image, kbuf);
  737. if (ret) {
  738. pr_err("Failed to load elfcorehdr segment\n");
  739. return ret;
  740. }
  741. pr_debug("Loaded elf core header at 0x%lx, bufsz=0x%lx memsz=0x%lx\n",
  742. image->elf_load_addr, kbuf->bufsz, kbuf->memsz);
  743. return 0;
  744. }
  745. /**
  746. * setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global
  747. * variables and call setup_purgatory() to initialize
  748. * common global variable.
  749. * @image: kexec image.
  750. * @slave_code: Slave code for the purgatory.
  751. * @fdt: Flattened device tree for the next kernel.
  752. * @kernel_load_addr: Address where the kernel is loaded.
  753. * @fdt_load_addr: Address where the flattened device tree is loaded.
  754. *
  755. * Returns 0 on success, negative errno on error.
  756. */
  757. int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
  758. const void *fdt, unsigned long kernel_load_addr,
  759. unsigned long fdt_load_addr)
  760. {
  761. struct device_node *dn = NULL;
  762. int ret;
  763. ret = setup_purgatory(image, slave_code, fdt, kernel_load_addr,
  764. fdt_load_addr);
  765. if (ret)
  766. goto out;
  767. if (image->type == KEXEC_TYPE_CRASH) {
  768. u32 my_run_at_load = 1;
  769. /*
  770. * Tell relocatable kernel to run at load address
  771. * via the word meant for that at 0x5c.
  772. */
  773. ret = kexec_purgatory_get_set_symbol(image, "run_at_load",
  774. &my_run_at_load,
  775. sizeof(my_run_at_load),
  776. false);
  777. if (ret)
  778. goto out;
  779. }
  780. /* Tell purgatory where to look for backup region */
  781. ret = kexec_purgatory_get_set_symbol(image, "backup_start",
  782. &image->arch.backup_start,
  783. sizeof(image->arch.backup_start),
  784. false);
  785. if (ret)
  786. goto out;
  787. /* Setup OPAL base & entry values */
  788. dn = of_find_node_by_path("/ibm,opal");
  789. if (dn) {
  790. u64 val;
  791. of_property_read_u64(dn, "opal-base-address", &val);
  792. ret = kexec_purgatory_get_set_symbol(image, "opal_base", &val,
  793. sizeof(val), false);
  794. if (ret)
  795. goto out;
  796. of_property_read_u64(dn, "opal-entry-address", &val);
  797. ret = kexec_purgatory_get_set_symbol(image, "opal_entry", &val,
  798. sizeof(val), false);
  799. }
  800. out:
  801. if (ret)
  802. pr_err("Failed to setup purgatory symbols");
  803. of_node_put(dn);
  804. return ret;
  805. }
  806. /**
  807. * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to
  808. * setup FDT for kexec/kdump kernel.
  809. * @image: kexec image being loaded.
  810. *
  811. * Returns the estimated extra size needed for kexec/kdump kernel FDT.
  812. */
  813. unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
  814. {
  815. u64 usm_entries;
  816. if (image->type != KEXEC_TYPE_CRASH)
  817. return 0;
  818. /*
  819. * For kdump kernel, account for linux,usable-memory and
  820. * linux,drconf-usable-memory properties. Get an approximate on the
  821. * number of usable memory entries and use for FDT size estimation.
  822. */
  823. usm_entries = ((memblock_end_of_DRAM() / drmem_lmb_size()) +
  824. (2 * (resource_size(&crashk_res) / drmem_lmb_size())));
  825. return (unsigned int)(usm_entries * sizeof(u64));
  826. }
  827. /**
  828. * add_node_props - Reads node properties from device node structure and add
  829. * them to fdt.
  830. * @fdt: Flattened device tree of the kernel
  831. * @node_offset: offset of the node to add a property at
  832. * @dn: device node pointer
  833. *
  834. * Returns 0 on success, negative errno on error.
  835. */
  836. static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
  837. {
  838. int ret = 0;
  839. struct property *pp;
  840. if (!dn)
  841. return -EINVAL;
  842. for_each_property_of_node(dn, pp) {
  843. ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
  844. if (ret < 0) {
  845. pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
  846. return ret;
  847. }
  848. }
  849. return ret;
  850. }
  851. /**
  852. * update_cpus_node - Update cpus node of flattened device tree using of_root
  853. * device node.
  854. * @fdt: Flattened device tree of the kernel.
  855. *
  856. * Returns 0 on success, negative errno on error.
  857. */
  858. static int update_cpus_node(void *fdt)
  859. {
  860. struct device_node *cpus_node, *dn;
  861. int cpus_offset, cpus_subnode_offset, ret = 0;
  862. cpus_offset = fdt_path_offset(fdt, "/cpus");
  863. if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
  864. pr_err("Malformed device tree: error reading /cpus node: %s\n",
  865. fdt_strerror(cpus_offset));
  866. return cpus_offset;
  867. }
  868. if (cpus_offset > 0) {
  869. ret = fdt_del_node(fdt, cpus_offset);
  870. if (ret < 0) {
  871. pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
  872. return -EINVAL;
  873. }
  874. }
  875. /* Add cpus node to fdt */
  876. cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
  877. if (cpus_offset < 0) {
  878. pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
  879. return -EINVAL;
  880. }
  881. /* Add cpus node properties */
  882. cpus_node = of_find_node_by_path("/cpus");
  883. ret = add_node_props(fdt, cpus_offset, cpus_node);
  884. of_node_put(cpus_node);
  885. if (ret < 0)
  886. return ret;
  887. /* Loop through all subnodes of cpus and add them to fdt */
  888. for_each_node_by_type(dn, "cpu") {
  889. cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
  890. if (cpus_subnode_offset < 0) {
  891. pr_err("Unable to add %s subnode: %s\n", dn->full_name,
  892. fdt_strerror(cpus_subnode_offset));
  893. ret = cpus_subnode_offset;
  894. goto out;
  895. }
  896. ret = add_node_props(fdt, cpus_subnode_offset, dn);
  897. if (ret < 0)
  898. goto out;
  899. }
  900. out:
  901. of_node_put(dn);
  902. return ret;
  903. }
  904. static int copy_property(void *fdt, int node_offset, const struct device_node *dn,
  905. const char *propname)
  906. {
  907. const void *prop, *fdtprop;
  908. int len = 0, fdtlen = 0;
  909. prop = of_get_property(dn, propname, &len);
  910. fdtprop = fdt_getprop(fdt, node_offset, propname, &fdtlen);
  911. if (fdtprop && !prop)
  912. return fdt_delprop(fdt, node_offset, propname);
  913. else if (prop)
  914. return fdt_setprop(fdt, node_offset, propname, prop, len);
  915. else
  916. return -FDT_ERR_NOTFOUND;
  917. }
  918. static int update_pci_dma_nodes(void *fdt, const char *dmapropname)
  919. {
  920. struct device_node *dn;
  921. int pci_offset, root_offset, ret = 0;
  922. if (!firmware_has_feature(FW_FEATURE_LPAR))
  923. return 0;
  924. root_offset = fdt_path_offset(fdt, "/");
  925. for_each_node_with_property(dn, dmapropname) {
  926. pci_offset = fdt_subnode_offset(fdt, root_offset, of_node_full_name(dn));
  927. if (pci_offset < 0)
  928. continue;
  929. ret = copy_property(fdt, pci_offset, dn, "ibm,dma-window");
  930. if (ret < 0)
  931. break;
  932. ret = copy_property(fdt, pci_offset, dn, dmapropname);
  933. if (ret < 0)
  934. break;
  935. }
  936. return ret;
  937. }
  938. /**
  939. * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
  940. * being loaded.
  941. * @image: kexec image being loaded.
  942. * @fdt: Flattened device tree for the next kernel.
  943. * @initrd_load_addr: Address where the next initrd will be loaded.
  944. * @initrd_len: Size of the next initrd, or 0 if there will be none.
  945. * @cmdline: Command line for the next kernel, or NULL if there will
  946. * be none.
  947. *
  948. * Returns 0 on success, negative errno on error.
  949. */
  950. int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
  951. unsigned long initrd_load_addr,
  952. unsigned long initrd_len, const char *cmdline)
  953. {
  954. struct crash_mem *umem = NULL, *rmem = NULL;
  955. int i, nr_ranges, ret;
  956. /*
  957. * Restrict memory usage for kdump kernel by setting up
  958. * usable memory ranges and memory reserve map.
  959. */
  960. if (image->type == KEXEC_TYPE_CRASH) {
  961. ret = get_usable_memory_ranges(&umem);
  962. if (ret)
  963. goto out;
  964. ret = update_usable_mem_fdt(fdt, umem);
  965. if (ret) {
  966. pr_err("Error setting up usable-memory property for kdump kernel\n");
  967. goto out;
  968. }
  969. /*
  970. * Ensure we don't touch crashed kernel's memory except the
  971. * first 64K of RAM, which will be backed up.
  972. */
  973. ret = fdt_add_mem_rsv(fdt, BACKUP_SRC_END + 1,
  974. crashk_res.start - BACKUP_SRC_SIZE);
  975. if (ret) {
  976. pr_err("Error reserving crash memory: %s\n",
  977. fdt_strerror(ret));
  978. goto out;
  979. }
  980. /* Ensure backup region is not used by kdump/capture kernel */
  981. ret = fdt_add_mem_rsv(fdt, image->arch.backup_start,
  982. BACKUP_SRC_SIZE);
  983. if (ret) {
  984. pr_err("Error reserving memory for backup: %s\n",
  985. fdt_strerror(ret));
  986. goto out;
  987. }
  988. }
  989. /* Update cpus nodes information to account hotplug CPUs. */
  990. ret = update_cpus_node(fdt);
  991. if (ret < 0)
  992. goto out;
  993. #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
  994. #define DMA64_PROPNAME "linux,dma64-ddr-window-info"
  995. ret = update_pci_dma_nodes(fdt, DIRECT64_PROPNAME);
  996. if (ret < 0)
  997. goto out;
  998. ret = update_pci_dma_nodes(fdt, DMA64_PROPNAME);
  999. if (ret < 0)
  1000. goto out;
  1001. #undef DMA64_PROPNAME
  1002. #undef DIRECT64_PROPNAME
  1003. /* Update memory reserve map */
  1004. ret = get_reserved_memory_ranges(&rmem);
  1005. if (ret)
  1006. goto out;
  1007. nr_ranges = rmem ? rmem->nr_ranges : 0;
  1008. for (i = 0; i < nr_ranges; i++) {
  1009. u64 base, size;
  1010. base = rmem->ranges[i].start;
  1011. size = rmem->ranges[i].end - base + 1;
  1012. ret = fdt_add_mem_rsv(fdt, base, size);
  1013. if (ret) {
  1014. pr_err("Error updating memory reserve map: %s\n",
  1015. fdt_strerror(ret));
  1016. goto out;
  1017. }
  1018. }
  1019. out:
  1020. kfree(rmem);
  1021. kfree(umem);
  1022. return ret;
  1023. }
  1024. /**
  1025. * arch_kexec_locate_mem_hole - Skip special memory regions like rtas, opal,
  1026. * tce-table, reserved-ranges & such (exclude
  1027. * memory ranges) as they can't be used for kexec
  1028. * segment buffer. Sets kbuf->mem when a suitable
  1029. * memory hole is found.
  1030. * @kbuf: Buffer contents and memory parameters.
  1031. *
  1032. * Assumes minimum of PAGE_SIZE alignment for kbuf->memsz & kbuf->buf_align.
  1033. *
  1034. * Returns 0 on success, negative errno on error.
  1035. */
  1036. int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
  1037. {
  1038. struct crash_mem **emem;
  1039. u64 buf_min, buf_max;
  1040. int ret;
  1041. /* Look up the exclude ranges list while locating the memory hole */
  1042. emem = &(kbuf->image->arch.exclude_ranges);
  1043. if (!(*emem) || ((*emem)->nr_ranges == 0)) {
  1044. pr_warn("No exclude range list. Using the default locate mem hole method\n");
  1045. return kexec_locate_mem_hole(kbuf);
  1046. }
  1047. buf_min = kbuf->buf_min;
  1048. buf_max = kbuf->buf_max;
  1049. /* Segments for kdump kernel should be within crashkernel region */
  1050. if (kbuf->image->type == KEXEC_TYPE_CRASH) {
  1051. buf_min = (buf_min < crashk_res.start ?
  1052. crashk_res.start : buf_min);
  1053. buf_max = (buf_max > crashk_res.end ?
  1054. crashk_res.end : buf_max);
  1055. }
  1056. if (buf_min > buf_max) {
  1057. pr_err("Invalid buffer min and/or max values\n");
  1058. return -EINVAL;
  1059. }
  1060. if (kbuf->top_down)
  1061. ret = locate_mem_hole_top_down_ppc64(kbuf, buf_min, buf_max,
  1062. *emem);
  1063. else
  1064. ret = locate_mem_hole_bottom_up_ppc64(kbuf, buf_min, buf_max,
  1065. *emem);
  1066. /* Add the buffer allocated to the exclude list for the next lookup */
  1067. if (!ret) {
  1068. add_mem_range(emem, kbuf->mem, kbuf->memsz);
  1069. sort_memory_ranges(*emem, true);
  1070. } else {
  1071. pr_err("Failed to locate memory buffer of size %lu\n",
  1072. kbuf->memsz);
  1073. }
  1074. return ret;
  1075. }
  1076. /**
  1077. * arch_kexec_kernel_image_probe - Does additional handling needed to setup
  1078. * kexec segments.
  1079. * @image: kexec image being loaded.
  1080. * @buf: Buffer pointing to elf data.
  1081. * @buf_len: Length of the buffer.
  1082. *
  1083. * Returns 0 on success, negative errno on error.
  1084. */
  1085. int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
  1086. unsigned long buf_len)
  1087. {
  1088. int ret;
  1089. /* Get exclude memory ranges needed for setting up kexec segments */
  1090. ret = get_exclude_memory_ranges(&(image->arch.exclude_ranges));
  1091. if (ret) {
  1092. pr_err("Failed to setup exclude memory ranges for buffer lookup\n");
  1093. return ret;
  1094. }
  1095. return kexec_image_probe_default(image, buf, buf_len);
  1096. }
  1097. /**
  1098. * arch_kimage_file_post_load_cleanup - Frees up all the allocations done
  1099. * while loading the image.
  1100. * @image: kexec image being loaded.
  1101. *
  1102. * Returns 0 on success, negative errno on error.
  1103. */
  1104. int arch_kimage_file_post_load_cleanup(struct kimage *image)
  1105. {
  1106. kfree(image->arch.exclude_ranges);
  1107. image->arch.exclude_ranges = NULL;
  1108. vfree(image->arch.backup_buf);
  1109. image->arch.backup_buf = NULL;
  1110. vfree(image->elf_headers);
  1111. image->elf_headers = NULL;
  1112. image->elf_headers_sz = 0;
  1113. kvfree(image->arch.fdt);
  1114. image->arch.fdt = NULL;
  1115. return kexec_image_post_load_cleanup_default(image);
  1116. }