initramfs.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/init.h>
  3. #include <linux/async.h>
  4. #include <linux/fs.h>
  5. #include <linux/slab.h>
  6. #include <linux/types.h>
  7. #include <linux/fcntl.h>
  8. #include <linux/delay.h>
  9. #include <linux/string.h>
  10. #include <linux/dirent.h>
  11. #include <linux/syscalls.h>
  12. #include <linux/utime.h>
  13. #include <linux/file.h>
  14. #include <linux/memblock.h>
  15. #include <linux/mm.h>
  16. #include <linux/namei.h>
  17. #include <linux/init_syscalls.h>
  18. #include <linux/task_work.h>
  19. #include <linux/umh.h>
  20. static __initdata bool csum_present;
  21. static __initdata u32 io_csum;
  22. static ssize_t __init xwrite(struct file *file, const unsigned char *p,
  23. size_t count, loff_t *pos)
  24. {
  25. ssize_t out = 0;
  26. /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
  27. while (count) {
  28. ssize_t rv = kernel_write(file, p, count, pos);
  29. if (rv < 0) {
  30. if (rv == -EINTR || rv == -EAGAIN)
  31. continue;
  32. return out ? out : rv;
  33. } else if (rv == 0)
  34. break;
  35. if (csum_present) {
  36. ssize_t i;
  37. for (i = 0; i < rv; i++)
  38. io_csum += p[i];
  39. }
  40. p += rv;
  41. out += rv;
  42. count -= rv;
  43. }
  44. return out;
  45. }
  46. static __initdata char *message;
  47. static void __init error(char *x)
  48. {
  49. if (!message)
  50. message = x;
  51. }
  52. static void panic_show_mem(const char *fmt, ...)
  53. {
  54. va_list args;
  55. show_mem(0, NULL);
  56. va_start(args, fmt);
  57. panic(fmt, args);
  58. va_end(args);
  59. }
  60. /* link hash */
  61. #define N_ALIGN(len) ((((len) + 1) & ~3) + 2)
  62. static __initdata struct hash {
  63. int ino, minor, major;
  64. umode_t mode;
  65. struct hash *next;
  66. char name[N_ALIGN(PATH_MAX)];
  67. } *head[32];
  68. static inline int hash(int major, int minor, int ino)
  69. {
  70. unsigned long tmp = ino + minor + (major << 3);
  71. tmp += tmp >> 5;
  72. return tmp & 31;
  73. }
  74. static char __init *find_link(int major, int minor, int ino,
  75. umode_t mode, char *name)
  76. {
  77. struct hash **p, *q;
  78. for (p = head + hash(major, minor, ino); *p; p = &(*p)->next) {
  79. if ((*p)->ino != ino)
  80. continue;
  81. if ((*p)->minor != minor)
  82. continue;
  83. if ((*p)->major != major)
  84. continue;
  85. if (((*p)->mode ^ mode) & S_IFMT)
  86. continue;
  87. return (*p)->name;
  88. }
  89. q = kmalloc(sizeof(struct hash), GFP_KERNEL);
  90. if (!q)
  91. panic_show_mem("can't allocate link hash entry");
  92. q->major = major;
  93. q->minor = minor;
  94. q->ino = ino;
  95. q->mode = mode;
  96. strcpy(q->name, name);
  97. q->next = NULL;
  98. *p = q;
  99. return NULL;
  100. }
  101. static void __init free_hash(void)
  102. {
  103. struct hash **p, *q;
  104. for (p = head; p < head + 32; p++) {
  105. while (*p) {
  106. q = *p;
  107. *p = q->next;
  108. kfree(q);
  109. }
  110. }
  111. }
  112. #ifdef CONFIG_INITRAMFS_PRESERVE_MTIME
  113. static void __init do_utime(char *filename, time64_t mtime)
  114. {
  115. struct timespec64 t[2] = { { .tv_sec = mtime }, { .tv_sec = mtime } };
  116. init_utimes(filename, t);
  117. }
  118. static void __init do_utime_path(const struct path *path, time64_t mtime)
  119. {
  120. struct timespec64 t[2] = { { .tv_sec = mtime }, { .tv_sec = mtime } };
  121. vfs_utimes(path, t);
  122. }
  123. static __initdata LIST_HEAD(dir_list);
  124. struct dir_entry {
  125. struct list_head list;
  126. time64_t mtime;
  127. char name[];
  128. };
  129. static void __init dir_add(const char *name, time64_t mtime)
  130. {
  131. size_t nlen = strlen(name) + 1;
  132. struct dir_entry *de;
  133. de = kmalloc(sizeof(struct dir_entry) + nlen, GFP_KERNEL);
  134. if (!de)
  135. panic_show_mem("can't allocate dir_entry buffer");
  136. INIT_LIST_HEAD(&de->list);
  137. strscpy(de->name, name, nlen);
  138. de->mtime = mtime;
  139. list_add(&de->list, &dir_list);
  140. }
  141. static void __init dir_utime(void)
  142. {
  143. struct dir_entry *de, *tmp;
  144. list_for_each_entry_safe(de, tmp, &dir_list, list) {
  145. list_del(&de->list);
  146. do_utime(de->name, de->mtime);
  147. kfree(de);
  148. }
  149. }
  150. #else
  151. static void __init do_utime(char *filename, time64_t mtime) {}
  152. static void __init do_utime_path(const struct path *path, time64_t mtime) {}
  153. static void __init dir_add(const char *name, time64_t mtime) {}
  154. static void __init dir_utime(void) {}
  155. #endif
  156. static __initdata time64_t mtime;
  157. /* cpio header parsing */
  158. static __initdata unsigned long ino, major, minor, nlink;
  159. static __initdata umode_t mode;
  160. static __initdata unsigned long body_len, name_len;
  161. static __initdata uid_t uid;
  162. static __initdata gid_t gid;
  163. static __initdata unsigned rdev;
  164. static __initdata u32 hdr_csum;
  165. static void __init parse_header(char *s)
  166. {
  167. unsigned long parsed[13];
  168. char buf[9];
  169. int i;
  170. buf[8] = '\0';
  171. for (i = 0, s += 6; i < 13; i++, s += 8) {
  172. memcpy(buf, s, 8);
  173. parsed[i] = simple_strtoul(buf, NULL, 16);
  174. }
  175. ino = parsed[0];
  176. mode = parsed[1];
  177. uid = parsed[2];
  178. gid = parsed[3];
  179. nlink = parsed[4];
  180. mtime = parsed[5]; /* breaks in y2106 */
  181. body_len = parsed[6];
  182. major = parsed[7];
  183. minor = parsed[8];
  184. rdev = new_encode_dev(MKDEV(parsed[9], parsed[10]));
  185. name_len = parsed[11];
  186. hdr_csum = parsed[12];
  187. }
  188. /* FSM */
  189. static __initdata enum state {
  190. Start,
  191. Collect,
  192. GotHeader,
  193. SkipIt,
  194. GotName,
  195. CopyFile,
  196. GotSymlink,
  197. Reset
  198. } state, next_state;
  199. static __initdata char *victim;
  200. static unsigned long byte_count __initdata;
  201. static __initdata loff_t this_header, next_header;
  202. static inline void __init eat(unsigned n)
  203. {
  204. victim += n;
  205. this_header += n;
  206. byte_count -= n;
  207. }
  208. static __initdata char *collected;
  209. static long remains __initdata;
  210. static __initdata char *collect;
  211. static void __init read_into(char *buf, unsigned size, enum state next)
  212. {
  213. if (byte_count >= size) {
  214. collected = victim;
  215. eat(size);
  216. state = next;
  217. } else {
  218. collect = collected = buf;
  219. remains = size;
  220. next_state = next;
  221. state = Collect;
  222. }
  223. }
  224. static __initdata char *header_buf, *symlink_buf, *name_buf;
  225. static int __init do_start(void)
  226. {
  227. read_into(header_buf, 110, GotHeader);
  228. return 0;
  229. }
  230. static int __init do_collect(void)
  231. {
  232. unsigned long n = remains;
  233. if (byte_count < n)
  234. n = byte_count;
  235. memcpy(collect, victim, n);
  236. eat(n);
  237. collect += n;
  238. if ((remains -= n) != 0)
  239. return 1;
  240. state = next_state;
  241. return 0;
  242. }
  243. static int __init do_header(void)
  244. {
  245. if (!memcmp(collected, "070701", 6)) {
  246. csum_present = false;
  247. } else if (!memcmp(collected, "070702", 6)) {
  248. csum_present = true;
  249. } else {
  250. if (memcmp(collected, "070707", 6) == 0)
  251. error("incorrect cpio method used: use -H newc option");
  252. else
  253. error("no cpio magic");
  254. return 1;
  255. }
  256. parse_header(collected);
  257. next_header = this_header + N_ALIGN(name_len) + body_len;
  258. next_header = (next_header + 3) & ~3;
  259. state = SkipIt;
  260. if (name_len <= 0 || name_len > PATH_MAX)
  261. return 0;
  262. if (S_ISLNK(mode)) {
  263. if (body_len > PATH_MAX)
  264. return 0;
  265. collect = collected = symlink_buf;
  266. remains = N_ALIGN(name_len) + body_len;
  267. next_state = GotSymlink;
  268. state = Collect;
  269. return 0;
  270. }
  271. if (S_ISREG(mode) || !body_len)
  272. read_into(name_buf, N_ALIGN(name_len), GotName);
  273. return 0;
  274. }
  275. static int __init do_skip(void)
  276. {
  277. if (this_header + byte_count < next_header) {
  278. eat(byte_count);
  279. return 1;
  280. } else {
  281. eat(next_header - this_header);
  282. state = next_state;
  283. return 0;
  284. }
  285. }
  286. static int __init do_reset(void)
  287. {
  288. while (byte_count && *victim == '\0')
  289. eat(1);
  290. if (byte_count && (this_header & 3))
  291. error("broken padding");
  292. return 1;
  293. }
  294. static void __init clean_path(char *path, umode_t fmode)
  295. {
  296. struct kstat st;
  297. if (!init_stat(path, &st, AT_SYMLINK_NOFOLLOW) &&
  298. (st.mode ^ fmode) & S_IFMT) {
  299. if (S_ISDIR(st.mode))
  300. init_rmdir(path);
  301. else
  302. init_unlink(path);
  303. }
  304. }
  305. static int __init maybe_link(void)
  306. {
  307. if (nlink >= 2) {
  308. char *old = find_link(major, minor, ino, mode, collected);
  309. if (old) {
  310. clean_path(collected, 0);
  311. return (init_link(old, collected) < 0) ? -1 : 1;
  312. }
  313. }
  314. return 0;
  315. }
  316. static __initdata struct file *wfile;
  317. static __initdata loff_t wfile_pos;
  318. static int __init do_name(void)
  319. {
  320. state = SkipIt;
  321. next_state = Reset;
  322. if (strcmp(collected, "TRAILER!!!") == 0) {
  323. free_hash();
  324. return 0;
  325. }
  326. clean_path(collected, mode);
  327. if (S_ISREG(mode)) {
  328. int ml = maybe_link();
  329. if (ml >= 0) {
  330. int openflags = O_WRONLY|O_CREAT;
  331. if (ml != 1)
  332. openflags |= O_TRUNC;
  333. wfile = filp_open(collected, openflags, mode);
  334. if (IS_ERR(wfile))
  335. return 0;
  336. wfile_pos = 0;
  337. io_csum = 0;
  338. vfs_fchown(wfile, uid, gid);
  339. vfs_fchmod(wfile, mode);
  340. if (body_len)
  341. vfs_truncate(&wfile->f_path, body_len);
  342. state = CopyFile;
  343. }
  344. } else if (S_ISDIR(mode)) {
  345. init_mkdir(collected, mode);
  346. init_chown(collected, uid, gid, 0);
  347. init_chmod(collected, mode);
  348. dir_add(collected, mtime);
  349. } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
  350. S_ISFIFO(mode) || S_ISSOCK(mode)) {
  351. if (maybe_link() == 0) {
  352. init_mknod(collected, mode, rdev);
  353. init_chown(collected, uid, gid, 0);
  354. init_chmod(collected, mode);
  355. do_utime(collected, mtime);
  356. }
  357. }
  358. return 0;
  359. }
  360. static int __init do_copy(void)
  361. {
  362. if (byte_count >= body_len) {
  363. if (xwrite(wfile, victim, body_len, &wfile_pos) != body_len)
  364. error("write error");
  365. do_utime_path(&wfile->f_path, mtime);
  366. fput(wfile);
  367. if (csum_present && io_csum != hdr_csum)
  368. error("bad data checksum");
  369. eat(body_len);
  370. state = SkipIt;
  371. return 0;
  372. } else {
  373. if (xwrite(wfile, victim, byte_count, &wfile_pos) != byte_count)
  374. error("write error");
  375. body_len -= byte_count;
  376. eat(byte_count);
  377. return 1;
  378. }
  379. }
  380. static int __init do_symlink(void)
  381. {
  382. collected[N_ALIGN(name_len) + body_len] = '\0';
  383. clean_path(collected, 0);
  384. init_symlink(collected + N_ALIGN(name_len), collected);
  385. init_chown(collected, uid, gid, AT_SYMLINK_NOFOLLOW);
  386. do_utime(collected, mtime);
  387. state = SkipIt;
  388. next_state = Reset;
  389. return 0;
  390. }
  391. static __initdata int (*actions[])(void) = {
  392. [Start] = do_start,
  393. [Collect] = do_collect,
  394. [GotHeader] = do_header,
  395. [SkipIt] = do_skip,
  396. [GotName] = do_name,
  397. [CopyFile] = do_copy,
  398. [GotSymlink] = do_symlink,
  399. [Reset] = do_reset,
  400. };
  401. static long __init write_buffer(char *buf, unsigned long len)
  402. {
  403. byte_count = len;
  404. victim = buf;
  405. while (!actions[state]())
  406. ;
  407. return len - byte_count;
  408. }
  409. static long __init flush_buffer(void *bufv, unsigned long len)
  410. {
  411. char *buf = (char *) bufv;
  412. long written;
  413. long origLen = len;
  414. if (message)
  415. return -1;
  416. while ((written = write_buffer(buf, len)) < len && !message) {
  417. char c = buf[written];
  418. if (c == '0') {
  419. buf += written;
  420. len -= written;
  421. state = Start;
  422. } else if (c == 0) {
  423. buf += written;
  424. len -= written;
  425. state = Reset;
  426. } else
  427. error("junk within compressed archive");
  428. }
  429. return origLen;
  430. }
  431. static unsigned long my_inptr __initdata; /* index of next byte to be processed in inbuf */
  432. #include <linux/decompress/generic.h>
  433. static char * __init unpack_to_rootfs(char *buf, unsigned long len)
  434. {
  435. long written;
  436. decompress_fn decompress;
  437. const char *compress_name;
  438. static __initdata char msg_buf[64];
  439. header_buf = kmalloc(110, GFP_KERNEL);
  440. symlink_buf = kmalloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1, GFP_KERNEL);
  441. name_buf = kmalloc(N_ALIGN(PATH_MAX), GFP_KERNEL);
  442. if (!header_buf || !symlink_buf || !name_buf)
  443. panic_show_mem("can't allocate buffers");
  444. state = Start;
  445. this_header = 0;
  446. message = NULL;
  447. while (!message && len) {
  448. loff_t saved_offset = this_header;
  449. if (*buf == '0' && !(this_header & 3)) {
  450. state = Start;
  451. written = write_buffer(buf, len);
  452. buf += written;
  453. len -= written;
  454. continue;
  455. }
  456. if (!*buf) {
  457. buf++;
  458. len--;
  459. this_header++;
  460. continue;
  461. }
  462. this_header = 0;
  463. decompress = decompress_method(buf, len, &compress_name);
  464. pr_debug("Detected %s compressed data\n", compress_name);
  465. if (decompress) {
  466. int res = decompress(buf, len, NULL, flush_buffer, NULL,
  467. &my_inptr, error);
  468. if (res)
  469. error("decompressor failed");
  470. } else if (compress_name) {
  471. if (!message) {
  472. snprintf(msg_buf, sizeof msg_buf,
  473. "compression method %s not configured",
  474. compress_name);
  475. message = msg_buf;
  476. }
  477. } else
  478. error("invalid magic at start of compressed archive");
  479. if (state != Reset)
  480. error("junk at the end of compressed archive");
  481. this_header = saved_offset + my_inptr;
  482. buf += my_inptr;
  483. len -= my_inptr;
  484. }
  485. dir_utime();
  486. kfree(name_buf);
  487. kfree(symlink_buf);
  488. kfree(header_buf);
  489. return message;
  490. }
  491. static int __initdata do_retain_initrd;
  492. static int __init retain_initrd_param(char *str)
  493. {
  494. if (*str)
  495. return 0;
  496. do_retain_initrd = 1;
  497. return 1;
  498. }
  499. __setup("retain_initrd", retain_initrd_param);
  500. #ifdef CONFIG_ARCH_HAS_KEEPINITRD
  501. static int __init keepinitrd_setup(char *__unused)
  502. {
  503. do_retain_initrd = 1;
  504. return 1;
  505. }
  506. __setup("keepinitrd", keepinitrd_setup);
  507. #endif
  508. static bool __initdata initramfs_async = true;
  509. static int __init initramfs_async_setup(char *str)
  510. {
  511. strtobool(str, &initramfs_async);
  512. return 1;
  513. }
  514. __setup("initramfs_async=", initramfs_async_setup);
  515. extern char __initramfs_start[];
  516. extern unsigned long __initramfs_size;
  517. #include <linux/initrd.h>
  518. #include <linux/kexec.h>
  519. void __init reserve_initrd_mem(void)
  520. {
  521. phys_addr_t start;
  522. unsigned long size;
  523. /* Ignore the virtul address computed during device tree parsing */
  524. initrd_start = initrd_end = 0;
  525. if (!phys_initrd_size)
  526. return;
  527. /*
  528. * Round the memory region to page boundaries as per free_initrd_mem()
  529. * This allows us to detect whether the pages overlapping the initrd
  530. * are in use, but more importantly, reserves the entire set of pages
  531. * as we don't want these pages allocated for other purposes.
  532. */
  533. start = round_down(phys_initrd_start, PAGE_SIZE);
  534. size = phys_initrd_size + (phys_initrd_start - start);
  535. size = round_up(size, PAGE_SIZE);
  536. if (!memblock_is_region_memory(start, size)) {
  537. pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
  538. (u64)start, size);
  539. goto disable;
  540. }
  541. if (memblock_is_region_reserved(start, size)) {
  542. pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
  543. (u64)start, size);
  544. goto disable;
  545. }
  546. memblock_reserve(start, size);
  547. /* Now convert initrd to virtual addresses */
  548. initrd_start = (unsigned long)__va(phys_initrd_start);
  549. initrd_end = initrd_start + phys_initrd_size;
  550. initrd_below_start_ok = 1;
  551. return;
  552. disable:
  553. pr_cont(" - disabling initrd\n");
  554. initrd_start = 0;
  555. initrd_end = 0;
  556. }
  557. void __weak __init free_initrd_mem(unsigned long start, unsigned long end)
  558. {
  559. #ifdef CONFIG_ARCH_KEEP_MEMBLOCK
  560. unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE);
  561. unsigned long aligned_end = ALIGN(end, PAGE_SIZE);
  562. memblock_free((void *)aligned_start, aligned_end - aligned_start);
  563. #endif
  564. free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
  565. "initrd");
  566. }
  567. #ifdef CONFIG_KEXEC_CORE
  568. static bool __init kexec_free_initrd(void)
  569. {
  570. unsigned long crashk_start = (unsigned long)__va(crashk_res.start);
  571. unsigned long crashk_end = (unsigned long)__va(crashk_res.end);
  572. /*
  573. * If the initrd region is overlapped with crashkernel reserved region,
  574. * free only memory that is not part of crashkernel region.
  575. */
  576. if (initrd_start >= crashk_end || initrd_end <= crashk_start)
  577. return false;
  578. /*
  579. * Initialize initrd memory region since the kexec boot does not do.
  580. */
  581. memset((void *)initrd_start, 0, initrd_end - initrd_start);
  582. if (initrd_start < crashk_start)
  583. free_initrd_mem(initrd_start, crashk_start);
  584. if (initrd_end > crashk_end)
  585. free_initrd_mem(crashk_end, initrd_end);
  586. return true;
  587. }
  588. #else
  589. static inline bool kexec_free_initrd(void)
  590. {
  591. return false;
  592. }
  593. #endif /* CONFIG_KEXEC_CORE */
  594. #ifdef CONFIG_BLK_DEV_RAM
  595. static void __init populate_initrd_image(char *err)
  596. {
  597. ssize_t written;
  598. struct file *file;
  599. loff_t pos = 0;
  600. unpack_to_rootfs(__initramfs_start, __initramfs_size);
  601. printk(KERN_INFO "rootfs image is not initramfs (%s); looks like an initrd\n",
  602. err);
  603. file = filp_open("/initrd.image", O_WRONLY | O_CREAT, 0700);
  604. if (IS_ERR(file))
  605. return;
  606. written = xwrite(file, (char *)initrd_start, initrd_end - initrd_start,
  607. &pos);
  608. if (written != initrd_end - initrd_start)
  609. pr_err("/initrd.image: incomplete write (%zd != %ld)\n",
  610. written, initrd_end - initrd_start);
  611. fput(file);
  612. }
  613. #endif /* CONFIG_BLK_DEV_RAM */
  614. static void __init do_populate_rootfs(void *unused, async_cookie_t cookie)
  615. {
  616. /* Load the built in initramfs */
  617. char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
  618. if (err)
  619. panic_show_mem("%s", err); /* Failed to decompress INTERNAL initramfs */
  620. if (!initrd_start || IS_ENABLED(CONFIG_INITRAMFS_FORCE))
  621. goto done;
  622. if (IS_ENABLED(CONFIG_BLK_DEV_RAM))
  623. printk(KERN_INFO "Trying to unpack rootfs image as initramfs...\n");
  624. else
  625. printk(KERN_INFO "Unpacking initramfs...\n");
  626. err = unpack_to_rootfs((char *)initrd_start, initrd_end - initrd_start);
  627. if (err) {
  628. #ifdef CONFIG_BLK_DEV_RAM
  629. populate_initrd_image(err);
  630. #else
  631. printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
  632. #endif
  633. }
  634. done:
  635. /*
  636. * If the initrd region is overlapped with crashkernel reserved region,
  637. * free only memory that is not part of crashkernel region.
  638. */
  639. if (!do_retain_initrd && initrd_start && !kexec_free_initrd())
  640. free_initrd_mem(initrd_start, initrd_end);
  641. initrd_start = 0;
  642. initrd_end = 0;
  643. flush_delayed_fput();
  644. task_work_run();
  645. }
  646. static ASYNC_DOMAIN_EXCLUSIVE(initramfs_domain);
  647. static async_cookie_t initramfs_cookie;
  648. void wait_for_initramfs(void)
  649. {
  650. if (!initramfs_cookie) {
  651. /*
  652. * Something before rootfs_initcall wants to access
  653. * the filesystem/initramfs. Probably a bug. Make a
  654. * note, avoid deadlocking the machine, and let the
  655. * caller's access fail as it used to.
  656. */
  657. pr_warn_once("wait_for_initramfs() called before rootfs_initcalls\n");
  658. return;
  659. }
  660. async_synchronize_cookie_domain(initramfs_cookie + 1, &initramfs_domain);
  661. }
  662. EXPORT_SYMBOL_GPL(wait_for_initramfs);
  663. static int __init populate_rootfs(void)
  664. {
  665. initramfs_cookie = async_schedule_domain(do_populate_rootfs, NULL,
  666. &initramfs_domain);
  667. usermodehelper_enable();
  668. if (!initramfs_async)
  669. wait_for_initramfs();
  670. return 0;
  671. }
  672. rootfs_initcall(populate_rootfs);