fdt.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Functions for working with the Flattened Device Tree data format
  4. *
  5. * Copyright 2009 Benjamin Herrenschmidt, IBM Corp
  6. * [email protected]
  7. */
  8. #define pr_fmt(fmt) "OF: fdt: " fmt
  9. #include <linux/crash_dump.h>
  10. #include <linux/crc32.h>
  11. #include <linux/kernel.h>
  12. #include <linux/initrd.h>
  13. #include <linux/memblock.h>
  14. #include <linux/mutex.h>
  15. #include <linux/of.h>
  16. #include <linux/of_fdt.h>
  17. #include <linux/of_reserved_mem.h>
  18. #include <linux/sizes.h>
  19. #include <linux/string.h>
  20. #include <linux/errno.h>
  21. #include <linux/slab.h>
  22. #include <linux/libfdt.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/serial_core.h>
  25. #include <linux/sysfs.h>
  26. #include <linux/random.h>
  27. #include <asm/setup.h> /* for COMMAND_LINE_SIZE */
  28. #include <asm/page.h>
  29. #include "of_private.h"
  30. /*
  31. * of_fdt_limit_memory - limit the number of regions in the /memory node
  32. * @limit: maximum entries
  33. *
  34. * Adjust the flattened device tree to have at most 'limit' number of
  35. * memory entries in the /memory node. This function may be called
  36. * any time after initial_boot_param is set.
  37. */
  38. void __init of_fdt_limit_memory(int limit)
  39. {
  40. int memory;
  41. int len;
  42. const void *val;
  43. int nr_address_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
  44. int nr_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
  45. const __be32 *addr_prop;
  46. const __be32 *size_prop;
  47. int root_offset;
  48. int cell_size;
  49. root_offset = fdt_path_offset(initial_boot_params, "/");
  50. if (root_offset < 0)
  51. return;
  52. addr_prop = fdt_getprop(initial_boot_params, root_offset,
  53. "#address-cells", NULL);
  54. if (addr_prop)
  55. nr_address_cells = fdt32_to_cpu(*addr_prop);
  56. size_prop = fdt_getprop(initial_boot_params, root_offset,
  57. "#size-cells", NULL);
  58. if (size_prop)
  59. nr_size_cells = fdt32_to_cpu(*size_prop);
  60. cell_size = sizeof(uint32_t)*(nr_address_cells + nr_size_cells);
  61. memory = fdt_path_offset(initial_boot_params, "/memory");
  62. if (memory > 0) {
  63. val = fdt_getprop(initial_boot_params, memory, "reg", &len);
  64. if (len > limit*cell_size) {
  65. len = limit*cell_size;
  66. pr_debug("Limiting number of entries to %d\n", limit);
  67. fdt_setprop(initial_boot_params, memory, "reg", val,
  68. len);
  69. }
  70. }
  71. }
  72. static bool of_fdt_device_is_available(const void *blob, unsigned long node)
  73. {
  74. const char *status = fdt_getprop(blob, node, "status", NULL);
  75. if (!status)
  76. return true;
  77. if (!strcmp(status, "ok") || !strcmp(status, "okay"))
  78. return true;
  79. return false;
  80. }
  81. static void *unflatten_dt_alloc(void **mem, unsigned long size,
  82. unsigned long align)
  83. {
  84. void *res;
  85. *mem = PTR_ALIGN(*mem, align);
  86. res = *mem;
  87. *mem += size;
  88. return res;
  89. }
  90. static void populate_properties(const void *blob,
  91. int offset,
  92. void **mem,
  93. struct device_node *np,
  94. const char *nodename,
  95. bool dryrun)
  96. {
  97. struct property *pp, **pprev = NULL;
  98. int cur;
  99. bool has_name = false;
  100. pprev = &np->properties;
  101. for (cur = fdt_first_property_offset(blob, offset);
  102. cur >= 0;
  103. cur = fdt_next_property_offset(blob, cur)) {
  104. const __be32 *val;
  105. const char *pname;
  106. u32 sz;
  107. val = fdt_getprop_by_offset(blob, cur, &pname, &sz);
  108. if (!val) {
  109. pr_warn("Cannot locate property at 0x%x\n", cur);
  110. continue;
  111. }
  112. if (!pname) {
  113. pr_warn("Cannot find property name at 0x%x\n", cur);
  114. continue;
  115. }
  116. if (!strcmp(pname, "name"))
  117. has_name = true;
  118. pp = unflatten_dt_alloc(mem, sizeof(struct property),
  119. __alignof__(struct property));
  120. if (dryrun)
  121. continue;
  122. /* We accept flattened tree phandles either in
  123. * ePAPR-style "phandle" properties, or the
  124. * legacy "linux,phandle" properties. If both
  125. * appear and have different values, things
  126. * will get weird. Don't do that.
  127. */
  128. if (!strcmp(pname, "phandle") ||
  129. !strcmp(pname, "linux,phandle")) {
  130. if (!np->phandle)
  131. np->phandle = be32_to_cpup(val);
  132. }
  133. /* And we process the "ibm,phandle" property
  134. * used in pSeries dynamic device tree
  135. * stuff
  136. */
  137. if (!strcmp(pname, "ibm,phandle"))
  138. np->phandle = be32_to_cpup(val);
  139. pp->name = (char *)pname;
  140. pp->length = sz;
  141. pp->value = (__be32 *)val;
  142. *pprev = pp;
  143. pprev = &pp->next;
  144. }
  145. /* With version 0x10 we may not have the name property,
  146. * recreate it here from the unit name if absent
  147. */
  148. if (!has_name) {
  149. const char *p = nodename, *ps = p, *pa = NULL;
  150. int len;
  151. while (*p) {
  152. if ((*p) == '@')
  153. pa = p;
  154. else if ((*p) == '/')
  155. ps = p + 1;
  156. p++;
  157. }
  158. if (pa < ps)
  159. pa = p;
  160. len = (pa - ps) + 1;
  161. pp = unflatten_dt_alloc(mem, sizeof(struct property) + len,
  162. __alignof__(struct property));
  163. if (!dryrun) {
  164. pp->name = "name";
  165. pp->length = len;
  166. pp->value = pp + 1;
  167. *pprev = pp;
  168. memcpy(pp->value, ps, len - 1);
  169. ((char *)pp->value)[len - 1] = 0;
  170. pr_debug("fixed up name for %s -> %s\n",
  171. nodename, (char *)pp->value);
  172. }
  173. }
  174. }
  175. static int populate_node(const void *blob,
  176. int offset,
  177. void **mem,
  178. struct device_node *dad,
  179. struct device_node **pnp,
  180. bool dryrun)
  181. {
  182. struct device_node *np;
  183. const char *pathp;
  184. int len;
  185. pathp = fdt_get_name(blob, offset, &len);
  186. if (!pathp) {
  187. *pnp = NULL;
  188. return len;
  189. }
  190. len++;
  191. np = unflatten_dt_alloc(mem, sizeof(struct device_node) + len,
  192. __alignof__(struct device_node));
  193. if (!dryrun) {
  194. char *fn;
  195. of_node_init(np);
  196. np->full_name = fn = ((char *)np) + sizeof(*np);
  197. memcpy(fn, pathp, len);
  198. if (dad != NULL) {
  199. np->parent = dad;
  200. np->sibling = dad->child;
  201. dad->child = np;
  202. }
  203. }
  204. populate_properties(blob, offset, mem, np, pathp, dryrun);
  205. if (!dryrun) {
  206. np->name = of_get_property(np, "name", NULL);
  207. if (!np->name)
  208. np->name = "<NULL>";
  209. }
  210. *pnp = np;
  211. return 0;
  212. }
  213. static void reverse_nodes(struct device_node *parent)
  214. {
  215. struct device_node *child, *next;
  216. /* In-depth first */
  217. child = parent->child;
  218. while (child) {
  219. reverse_nodes(child);
  220. child = child->sibling;
  221. }
  222. /* Reverse the nodes in the child list */
  223. child = parent->child;
  224. parent->child = NULL;
  225. while (child) {
  226. next = child->sibling;
  227. child->sibling = parent->child;
  228. parent->child = child;
  229. child = next;
  230. }
  231. }
  232. /**
  233. * unflatten_dt_nodes - Alloc and populate a device_node from the flat tree
  234. * @blob: The parent device tree blob
  235. * @mem: Memory chunk to use for allocating device nodes and properties
  236. * @dad: Parent struct device_node
  237. * @nodepp: The device_node tree created by the call
  238. *
  239. * Return: The size of unflattened device tree or error code
  240. */
  241. static int unflatten_dt_nodes(const void *blob,
  242. void *mem,
  243. struct device_node *dad,
  244. struct device_node **nodepp)
  245. {
  246. struct device_node *root;
  247. int offset = 0, depth = 0, initial_depth = 0;
  248. #define FDT_MAX_DEPTH 64
  249. struct device_node *nps[FDT_MAX_DEPTH];
  250. void *base = mem;
  251. bool dryrun = !base;
  252. int ret;
  253. if (nodepp)
  254. *nodepp = NULL;
  255. /*
  256. * We're unflattening device sub-tree if @dad is valid. There are
  257. * possibly multiple nodes in the first level of depth. We need
  258. * set @depth to 1 to make fdt_next_node() happy as it bails
  259. * immediately when negative @depth is found. Otherwise, the device
  260. * nodes except the first one won't be unflattened successfully.
  261. */
  262. if (dad)
  263. depth = initial_depth = 1;
  264. root = dad;
  265. nps[depth] = dad;
  266. for (offset = 0;
  267. offset >= 0 && depth >= initial_depth;
  268. offset = fdt_next_node(blob, offset, &depth)) {
  269. if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1))
  270. continue;
  271. if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
  272. !of_fdt_device_is_available(blob, offset))
  273. continue;
  274. ret = populate_node(blob, offset, &mem, nps[depth],
  275. &nps[depth+1], dryrun);
  276. if (ret < 0)
  277. return ret;
  278. if (!dryrun && nodepp && !*nodepp)
  279. *nodepp = nps[depth+1];
  280. if (!dryrun && !root)
  281. root = nps[depth+1];
  282. }
  283. if (offset < 0 && offset != -FDT_ERR_NOTFOUND) {
  284. pr_err("Error %d processing FDT\n", offset);
  285. return -EINVAL;
  286. }
  287. /*
  288. * Reverse the child list. Some drivers assumes node order matches .dts
  289. * node order
  290. */
  291. if (!dryrun)
  292. reverse_nodes(root);
  293. return mem - base;
  294. }
  295. /**
  296. * __unflatten_device_tree - create tree of device_nodes from flat blob
  297. * @blob: The blob to expand
  298. * @dad: Parent device node
  299. * @mynodes: The device_node tree created by the call
  300. * @dt_alloc: An allocator that provides a virtual address to memory
  301. * for the resulting tree
  302. * @detached: if true set OF_DETACHED on @mynodes
  303. *
  304. * unflattens a device-tree, creating the tree of struct device_node. It also
  305. * fills the "name" and "type" pointers of the nodes so the normal device-tree
  306. * walking functions can be used.
  307. *
  308. * Return: NULL on failure or the memory chunk containing the unflattened
  309. * device tree on success.
  310. */
  311. void *__unflatten_device_tree(const void *blob,
  312. struct device_node *dad,
  313. struct device_node **mynodes,
  314. void *(*dt_alloc)(u64 size, u64 align),
  315. bool detached)
  316. {
  317. int size;
  318. void *mem;
  319. int ret;
  320. if (mynodes)
  321. *mynodes = NULL;
  322. pr_debug(" -> unflatten_device_tree()\n");
  323. if (!blob) {
  324. pr_debug("No device tree pointer\n");
  325. return NULL;
  326. }
  327. pr_debug("Unflattening device tree:\n");
  328. pr_debug("magic: %08x\n", fdt_magic(blob));
  329. pr_debug("size: %08x\n", fdt_totalsize(blob));
  330. pr_debug("version: %08x\n", fdt_version(blob));
  331. if (fdt_check_header(blob)) {
  332. pr_err("Invalid device tree blob header\n");
  333. return NULL;
  334. }
  335. /* First pass, scan for size */
  336. size = unflatten_dt_nodes(blob, NULL, dad, NULL);
  337. if (size <= 0)
  338. return NULL;
  339. size = ALIGN(size, 4);
  340. pr_debug(" size is %d, allocating...\n", size);
  341. /* Allocate memory for the expanded device tree */
  342. mem = dt_alloc(size + 4, __alignof__(struct device_node));
  343. if (!mem)
  344. return NULL;
  345. memset(mem, 0, size);
  346. *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
  347. pr_debug(" unflattening %p...\n", mem);
  348. /* Second pass, do actual unflattening */
  349. ret = unflatten_dt_nodes(blob, mem, dad, mynodes);
  350. if (be32_to_cpup(mem + size) != 0xdeadbeef)
  351. pr_warn("End of tree marker overwritten: %08x\n",
  352. be32_to_cpup(mem + size));
  353. if (ret <= 0)
  354. return NULL;
  355. if (detached && mynodes && *mynodes) {
  356. of_node_set_flag(*mynodes, OF_DETACHED);
  357. pr_debug("unflattened tree is detached\n");
  358. }
  359. pr_debug(" <- unflatten_device_tree()\n");
  360. return mem;
  361. }
  362. EXPORT_SYMBOL_GPL(__unflatten_device_tree);
  363. static void *kernel_tree_alloc(u64 size, u64 align)
  364. {
  365. return kzalloc(size, GFP_KERNEL);
  366. }
  367. static DEFINE_MUTEX(of_fdt_unflatten_mutex);
  368. /**
  369. * of_fdt_unflatten_tree - create tree of device_nodes from flat blob
  370. * @blob: Flat device tree blob
  371. * @dad: Parent device node
  372. * @mynodes: The device tree created by the call
  373. *
  374. * unflattens the device-tree passed by the firmware, creating the
  375. * tree of struct device_node. It also fills the "name" and "type"
  376. * pointers of the nodes so the normal device-tree walking functions
  377. * can be used.
  378. *
  379. * Return: NULL on failure or the memory chunk containing the unflattened
  380. * device tree on success.
  381. */
  382. void *of_fdt_unflatten_tree(const unsigned long *blob,
  383. struct device_node *dad,
  384. struct device_node **mynodes)
  385. {
  386. void *mem;
  387. mutex_lock(&of_fdt_unflatten_mutex);
  388. mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc,
  389. true);
  390. mutex_unlock(&of_fdt_unflatten_mutex);
  391. return mem;
  392. }
  393. EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
  394. /* Everything below here references initial_boot_params directly. */
  395. int __initdata dt_root_addr_cells;
  396. int __initdata dt_root_size_cells;
  397. void *initial_boot_params __ro_after_init;
  398. #ifdef CONFIG_OF_EARLY_FLATTREE
  399. static u32 of_fdt_crc32;
  400. static int __init early_init_dt_reserve_memory(phys_addr_t base,
  401. phys_addr_t size, bool nomap)
  402. {
  403. if (nomap) {
  404. /*
  405. * If the memory is already reserved (by another region), we
  406. * should not allow it to be marked nomap, but don't worry
  407. * if the region isn't memory as it won't be mapped.
  408. */
  409. if (memblock_overlaps_region(&memblock.memory, base, size) &&
  410. memblock_is_region_reserved(base, size))
  411. return -EBUSY;
  412. return memblock_mark_nomap(base, size);
  413. }
  414. return memblock_reserve(base, size);
  415. }
  416. /*
  417. * __reserved_mem_reserve_reg() - reserve all memory described in 'reg' property
  418. */
  419. static int __init __reserved_mem_reserve_reg(unsigned long node,
  420. const char *uname)
  421. {
  422. int t_len = (dt_root_addr_cells + dt_root_size_cells) * sizeof(__be32);
  423. phys_addr_t base, size;
  424. int len;
  425. const __be32 *prop;
  426. int first = 1;
  427. bool nomap;
  428. prop = of_get_flat_dt_prop(node, "reg", &len);
  429. if (!prop)
  430. return -ENOENT;
  431. if (len && len % t_len != 0) {
  432. pr_err("Reserved memory: invalid reg property in '%s', skipping node.\n",
  433. uname);
  434. return -EINVAL;
  435. }
  436. nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
  437. while (len >= t_len) {
  438. base = dt_mem_next_cell(dt_root_addr_cells, &prop);
  439. size = dt_mem_next_cell(dt_root_size_cells, &prop);
  440. if (size &&
  441. early_init_dt_reserve_memory(base, size, nomap) == 0)
  442. pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
  443. uname, &base, (unsigned long)(size / SZ_1M));
  444. else
  445. pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
  446. uname, &base, (unsigned long)(size / SZ_1M));
  447. len -= t_len;
  448. if (first) {
  449. fdt_reserved_mem_save_node(node, uname, base, size);
  450. first = 0;
  451. }
  452. }
  453. return 0;
  454. }
  455. /*
  456. * __reserved_mem_check_root() - check if #size-cells, #address-cells provided
  457. * in /reserved-memory matches the values supported by the current implementation,
  458. * also check if ranges property has been provided
  459. */
  460. static int __init __reserved_mem_check_root(unsigned long node)
  461. {
  462. const __be32 *prop;
  463. prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
  464. if (!prop || be32_to_cpup(prop) != dt_root_size_cells)
  465. return -EINVAL;
  466. prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
  467. if (!prop || be32_to_cpup(prop) != dt_root_addr_cells)
  468. return -EINVAL;
  469. prop = of_get_flat_dt_prop(node, "ranges", NULL);
  470. if (!prop)
  471. return -EINVAL;
  472. return 0;
  473. }
  474. /*
  475. * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
  476. */
  477. static int __init fdt_scan_reserved_mem(void)
  478. {
  479. int node, child;
  480. const void *fdt = initial_boot_params;
  481. node = fdt_path_offset(fdt, "/reserved-memory");
  482. if (node < 0)
  483. return -ENODEV;
  484. if (__reserved_mem_check_root(node) != 0) {
  485. pr_err("Reserved memory: unsupported node format, ignoring\n");
  486. return -EINVAL;
  487. }
  488. fdt_for_each_subnode(child, fdt, node) {
  489. const char *uname;
  490. int err;
  491. if (!of_fdt_device_is_available(fdt, child))
  492. continue;
  493. uname = fdt_get_name(fdt, child, NULL);
  494. err = __reserved_mem_reserve_reg(child, uname);
  495. if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL))
  496. fdt_reserved_mem_save_node(child, uname, 0, 0);
  497. }
  498. return 0;
  499. }
  500. /*
  501. * fdt_reserve_elfcorehdr() - reserves memory for elf core header
  502. *
  503. * This function reserves the memory occupied by an elf core header
  504. * described in the device tree. This region contains all the
  505. * information about primary kernel's core image and is used by a dump
  506. * capture kernel to access the system memory on primary kernel.
  507. */
  508. static void __init fdt_reserve_elfcorehdr(void)
  509. {
  510. if (!IS_ENABLED(CONFIG_CRASH_DUMP) || !elfcorehdr_size)
  511. return;
  512. if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
  513. pr_warn("elfcorehdr is overlapped\n");
  514. return;
  515. }
  516. memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
  517. pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
  518. elfcorehdr_size >> 10, elfcorehdr_addr);
  519. }
  520. /**
  521. * early_init_fdt_scan_reserved_mem() - create reserved memory regions
  522. *
  523. * This function grabs memory from early allocator for device exclusive use
  524. * defined in device tree structures. It should be called by arch specific code
  525. * once the early allocator (i.e. memblock) has been fully activated.
  526. */
  527. void __init early_init_fdt_scan_reserved_mem(void)
  528. {
  529. int n;
  530. u64 base, size;
  531. if (!initial_boot_params)
  532. return;
  533. /* Process header /memreserve/ fields */
  534. for (n = 0; ; n++) {
  535. fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
  536. if (!size)
  537. break;
  538. memblock_reserve(base, size);
  539. }
  540. fdt_scan_reserved_mem();
  541. fdt_reserve_elfcorehdr();
  542. fdt_init_reserved_mem();
  543. }
  544. /**
  545. * early_init_fdt_reserve_self() - reserve the memory used by the FDT blob
  546. */
  547. void __init early_init_fdt_reserve_self(void)
  548. {
  549. if (!initial_boot_params)
  550. return;
  551. /* Reserve the dtb region */
  552. memblock_reserve(__pa(initial_boot_params),
  553. fdt_totalsize(initial_boot_params));
  554. }
  555. /**
  556. * of_scan_flat_dt - scan flattened tree blob and call callback on each.
  557. * @it: callback function
  558. * @data: context data pointer
  559. *
  560. * This function is used to scan the flattened device-tree, it is
  561. * used to extract the memory information at boot before we can
  562. * unflatten the tree
  563. */
  564. int __init of_scan_flat_dt(int (*it)(unsigned long node,
  565. const char *uname, int depth,
  566. void *data),
  567. void *data)
  568. {
  569. const void *blob = initial_boot_params;
  570. const char *pathp;
  571. int offset, rc = 0, depth = -1;
  572. if (!blob)
  573. return 0;
  574. for (offset = fdt_next_node(blob, -1, &depth);
  575. offset >= 0 && depth >= 0 && !rc;
  576. offset = fdt_next_node(blob, offset, &depth)) {
  577. pathp = fdt_get_name(blob, offset, NULL);
  578. rc = it(offset, pathp, depth, data);
  579. }
  580. return rc;
  581. }
  582. /**
  583. * of_scan_flat_dt_subnodes - scan sub-nodes of a node call callback on each.
  584. * @parent: parent node
  585. * @it: callback function
  586. * @data: context data pointer
  587. *
  588. * This function is used to scan sub-nodes of a node.
  589. */
  590. int __init of_scan_flat_dt_subnodes(unsigned long parent,
  591. int (*it)(unsigned long node,
  592. const char *uname,
  593. void *data),
  594. void *data)
  595. {
  596. const void *blob = initial_boot_params;
  597. int node;
  598. fdt_for_each_subnode(node, blob, parent) {
  599. const char *pathp;
  600. int rc;
  601. pathp = fdt_get_name(blob, node, NULL);
  602. rc = it(node, pathp, data);
  603. if (rc)
  604. return rc;
  605. }
  606. return 0;
  607. }
  608. /**
  609. * of_get_flat_dt_subnode_by_name - get the subnode by given name
  610. *
  611. * @node: the parent node
  612. * @uname: the name of subnode
  613. * @return offset of the subnode, or -FDT_ERR_NOTFOUND if there is none
  614. */
  615. int __init of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname)
  616. {
  617. return fdt_subnode_offset(initial_boot_params, node, uname);
  618. }
  619. /*
  620. * of_get_flat_dt_root - find the root node in the flat blob
  621. */
  622. unsigned long __init of_get_flat_dt_root(void)
  623. {
  624. return 0;
  625. }
  626. /*
  627. * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr
  628. *
  629. * This function can be used within scan_flattened_dt callback to get
  630. * access to properties
  631. */
  632. const void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
  633. int *size)
  634. {
  635. return fdt_getprop(initial_boot_params, node, name, size);
  636. }
  637. /**
  638. * of_fdt_is_compatible - Return true if given node from the given blob has
  639. * compat in its compatible list
  640. * @blob: A device tree blob
  641. * @node: node to test
  642. * @compat: compatible string to compare with compatible list.
  643. *
  644. * Return: a non-zero value on match with smaller values returned for more
  645. * specific compatible values.
  646. */
  647. static int of_fdt_is_compatible(const void *blob,
  648. unsigned long node, const char *compat)
  649. {
  650. const char *cp;
  651. int cplen;
  652. unsigned long l, score = 0;
  653. cp = fdt_getprop(blob, node, "compatible", &cplen);
  654. if (cp == NULL)
  655. return 0;
  656. while (cplen > 0) {
  657. score++;
  658. if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
  659. return score;
  660. l = strlen(cp) + 1;
  661. cp += l;
  662. cplen -= l;
  663. }
  664. return 0;
  665. }
  666. /**
  667. * of_flat_dt_is_compatible - Return true if given node has compat in compatible list
  668. * @node: node to test
  669. * @compat: compatible string to compare with compatible list.
  670. */
  671. int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
  672. {
  673. return of_fdt_is_compatible(initial_boot_params, node, compat);
  674. }
  675. /*
  676. * of_flat_dt_match - Return true if node matches a list of compatible values
  677. */
  678. static int __init of_flat_dt_match(unsigned long node, const char *const *compat)
  679. {
  680. unsigned int tmp, score = 0;
  681. if (!compat)
  682. return 0;
  683. while (*compat) {
  684. tmp = of_fdt_is_compatible(initial_boot_params, node, *compat);
  685. if (tmp && (score == 0 || (tmp < score)))
  686. score = tmp;
  687. compat++;
  688. }
  689. return score;
  690. }
  691. /*
  692. * of_get_flat_dt_phandle - Given a node in the flat blob, return the phandle
  693. */
  694. uint32_t __init of_get_flat_dt_phandle(unsigned long node)
  695. {
  696. return fdt_get_phandle(initial_boot_params, node);
  697. }
  698. const char * __init of_flat_dt_get_machine_name(void)
  699. {
  700. const char *name;
  701. unsigned long dt_root = of_get_flat_dt_root();
  702. name = of_get_flat_dt_prop(dt_root, "model", NULL);
  703. if (!name)
  704. name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
  705. return name;
  706. }
  707. /**
  708. * of_flat_dt_match_machine - Iterate match tables to find matching machine.
  709. *
  710. * @default_match: A machine specific ptr to return in case of no match.
  711. * @get_next_compat: callback function to return next compatible match table.
  712. *
  713. * Iterate through machine match tables to find the best match for the machine
  714. * compatible string in the FDT.
  715. */
  716. const void * __init of_flat_dt_match_machine(const void *default_match,
  717. const void * (*get_next_compat)(const char * const**))
  718. {
  719. const void *data = NULL;
  720. const void *best_data = default_match;
  721. const char *const *compat;
  722. unsigned long dt_root;
  723. unsigned int best_score = ~1, score = 0;
  724. dt_root = of_get_flat_dt_root();
  725. while ((data = get_next_compat(&compat))) {
  726. score = of_flat_dt_match(dt_root, compat);
  727. if (score > 0 && score < best_score) {
  728. best_data = data;
  729. best_score = score;
  730. }
  731. }
  732. if (!best_data) {
  733. const char *prop;
  734. int size;
  735. pr_err("\n unrecognized device tree list:\n[ ");
  736. prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
  737. if (prop) {
  738. while (size > 0) {
  739. printk("'%s' ", prop);
  740. size -= strlen(prop) + 1;
  741. prop += strlen(prop) + 1;
  742. }
  743. }
  744. printk("]\n\n");
  745. return NULL;
  746. }
  747. pr_info("Machine model: %s\n", of_flat_dt_get_machine_name());
  748. return best_data;
  749. }
  750. static void __early_init_dt_declare_initrd(unsigned long start,
  751. unsigned long end)
  752. {
  753. /* ARM64 would cause a BUG to occur here when CONFIG_DEBUG_VM is
  754. * enabled since __va() is called too early. ARM64 does make use
  755. * of phys_initrd_start/phys_initrd_size so we can skip this
  756. * conversion.
  757. */
  758. if (!IS_ENABLED(CONFIG_ARM64)) {
  759. initrd_start = (unsigned long)__va(start);
  760. initrd_end = (unsigned long)__va(end);
  761. initrd_below_start_ok = 1;
  762. }
  763. }
  764. /**
  765. * early_init_dt_check_for_initrd - Decode initrd location from flat tree
  766. * @node: reference to node containing initrd location ('chosen')
  767. */
  768. static void __init early_init_dt_check_for_initrd(unsigned long node)
  769. {
  770. u64 start, end;
  771. int len;
  772. const __be32 *prop;
  773. if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
  774. return;
  775. pr_debug("Looking for initrd properties... ");
  776. prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
  777. if (!prop)
  778. return;
  779. start = of_read_number(prop, len/4);
  780. prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
  781. if (!prop)
  782. return;
  783. end = of_read_number(prop, len/4);
  784. if (start > end)
  785. return;
  786. __early_init_dt_declare_initrd(start, end);
  787. phys_initrd_start = start;
  788. phys_initrd_size = end - start;
  789. pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n", start, end);
  790. }
  791. /**
  792. * early_init_dt_check_for_elfcorehdr - Decode elfcorehdr location from flat
  793. * tree
  794. * @node: reference to node containing elfcorehdr location ('chosen')
  795. */
  796. static void __init early_init_dt_check_for_elfcorehdr(unsigned long node)
  797. {
  798. const __be32 *prop;
  799. int len;
  800. if (!IS_ENABLED(CONFIG_CRASH_DUMP))
  801. return;
  802. pr_debug("Looking for elfcorehdr property... ");
  803. prop = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
  804. if (!prop || (len < (dt_root_addr_cells + dt_root_size_cells)))
  805. return;
  806. elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &prop);
  807. elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &prop);
  808. pr_debug("elfcorehdr_start=0x%llx elfcorehdr_size=0x%llx\n",
  809. elfcorehdr_addr, elfcorehdr_size);
  810. }
  811. static unsigned long chosen_node_offset = -FDT_ERR_NOTFOUND;
  812. /*
  813. * The main usage of linux,usable-memory-range is for crash dump kernel.
  814. * Originally, the number of usable-memory regions is one. Now there may
  815. * be two regions, low region and high region.
  816. * To make compatibility with existing user-space and older kdump, the low
  817. * region is always the last range of linux,usable-memory-range if exist.
  818. */
  819. #define MAX_USABLE_RANGES 2
  820. /**
  821. * early_init_dt_check_for_usable_mem_range - Decode usable memory range
  822. * location from flat tree
  823. */
  824. void __init early_init_dt_check_for_usable_mem_range(void)
  825. {
  826. struct memblock_region rgn[MAX_USABLE_RANGES] = {0};
  827. const __be32 *prop, *endp;
  828. int len, i;
  829. unsigned long node = chosen_node_offset;
  830. if ((long)node < 0)
  831. return;
  832. pr_debug("Looking for usable-memory-range property... ");
  833. prop = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
  834. if (!prop || (len % (dt_root_addr_cells + dt_root_size_cells)))
  835. return;
  836. endp = prop + (len / sizeof(__be32));
  837. for (i = 0; i < MAX_USABLE_RANGES && prop < endp; i++) {
  838. rgn[i].base = dt_mem_next_cell(dt_root_addr_cells, &prop);
  839. rgn[i].size = dt_mem_next_cell(dt_root_size_cells, &prop);
  840. pr_debug("cap_mem_regions[%d]: base=%pa, size=%pa\n",
  841. i, &rgn[i].base, &rgn[i].size);
  842. }
  843. memblock_cap_memory_range(rgn[0].base, rgn[0].size);
  844. for (i = 1; i < MAX_USABLE_RANGES && rgn[i].size; i++)
  845. memblock_add(rgn[i].base, rgn[i].size);
  846. }
  847. #ifdef CONFIG_SERIAL_EARLYCON
  848. int __init early_init_dt_scan_chosen_stdout(void)
  849. {
  850. int offset;
  851. const char *p, *q, *options = NULL;
  852. int l;
  853. const struct earlycon_id *match;
  854. const void *fdt = initial_boot_params;
  855. int ret;
  856. offset = fdt_path_offset(fdt, "/chosen");
  857. if (offset < 0)
  858. offset = fdt_path_offset(fdt, "/chosen@0");
  859. if (offset < 0)
  860. return -ENOENT;
  861. p = fdt_getprop(fdt, offset, "stdout-path", &l);
  862. if (!p)
  863. p = fdt_getprop(fdt, offset, "linux,stdout-path", &l);
  864. if (!p || !l)
  865. return -ENOENT;
  866. q = strchrnul(p, ':');
  867. if (*q != '\0')
  868. options = q + 1;
  869. l = q - p;
  870. /* Get the node specified by stdout-path */
  871. offset = fdt_path_offset_namelen(fdt, p, l);
  872. if (offset < 0) {
  873. pr_warn("earlycon: stdout-path %.*s not found\n", l, p);
  874. return 0;
  875. }
  876. for (match = __earlycon_table; match < __earlycon_table_end; match++) {
  877. if (!match->compatible[0])
  878. continue;
  879. if (fdt_node_check_compatible(fdt, offset, match->compatible))
  880. continue;
  881. ret = of_setup_earlycon(match, offset, options);
  882. if (!ret || ret == -EALREADY)
  883. return 0;
  884. }
  885. return -ENODEV;
  886. }
  887. #endif
  888. /*
  889. * early_init_dt_scan_root - fetch the top level address and size cells
  890. */
  891. int __init early_init_dt_scan_root(void)
  892. {
  893. const __be32 *prop;
  894. const void *fdt = initial_boot_params;
  895. int node = fdt_path_offset(fdt, "/");
  896. if (node < 0)
  897. return -ENODEV;
  898. dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
  899. dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
  900. prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
  901. if (prop)
  902. dt_root_size_cells = be32_to_cpup(prop);
  903. pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells);
  904. prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
  905. if (prop)
  906. dt_root_addr_cells = be32_to_cpup(prop);
  907. pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
  908. return 0;
  909. }
  910. u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
  911. {
  912. const __be32 *p = *cellp;
  913. *cellp = p + s;
  914. return of_read_number(p, s);
  915. }
  916. /*
  917. * early_init_dt_scan_memory - Look for and parse memory nodes
  918. */
  919. int __init early_init_dt_scan_memory(void)
  920. {
  921. int node, found_memory = 0;
  922. const void *fdt = initial_boot_params;
  923. fdt_for_each_subnode(node, fdt, 0) {
  924. const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  925. const __be32 *reg, *endp;
  926. int l;
  927. bool hotpluggable;
  928. /* We are scanning "memory" nodes only */
  929. if (type == NULL || strcmp(type, "memory") != 0)
  930. continue;
  931. if (!of_fdt_device_is_available(fdt, node))
  932. continue;
  933. reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
  934. if (reg == NULL)
  935. reg = of_get_flat_dt_prop(node, "reg", &l);
  936. if (reg == NULL)
  937. continue;
  938. endp = reg + (l / sizeof(__be32));
  939. hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);
  940. pr_debug("memory scan node %s, reg size %d,\n",
  941. fdt_get_name(fdt, node, NULL), l);
  942. while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
  943. u64 base, size;
  944. base = dt_mem_next_cell(dt_root_addr_cells, &reg);
  945. size = dt_mem_next_cell(dt_root_size_cells, &reg);
  946. if (size == 0)
  947. continue;
  948. pr_debug(" - %llx, %llx\n", base, size);
  949. early_init_dt_add_memory_arch(base, size);
  950. found_memory = 1;
  951. if (!hotpluggable)
  952. continue;
  953. if (memblock_mark_hotplug(base, size))
  954. pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n",
  955. base, base + size);
  956. }
  957. }
  958. return found_memory;
  959. }
  960. /*
  961. * Convert configs to something easy to use in C code
  962. */
  963. #if defined(CONFIG_CMDLINE_FORCE)
  964. static const int overwrite_incoming_cmdline = 1;
  965. static const int read_dt_cmdline;
  966. static const int concat_cmdline;
  967. #elif defined(CONFIG_CMDLINE_EXTEND)
  968. static const int overwrite_incoming_cmdline;
  969. static const int read_dt_cmdline = 1;
  970. static const int concat_cmdline = 1;
  971. #else /* CMDLINE_FROM_BOOTLOADER */
  972. static const int overwrite_incoming_cmdline;
  973. static const int read_dt_cmdline = 1;
  974. static const int concat_cmdline;
  975. #endif
  976. #ifdef CONFIG_CMDLINE
  977. static const char *config_cmdline = CONFIG_CMDLINE;
  978. #else
  979. static const char *config_cmdline = "";
  980. #endif
  981. int __init early_init_dt_scan_chosen(char *cmdline)
  982. {
  983. int l = 0, node;
  984. const char *p = NULL;
  985. const void *rng_seed;
  986. const void *fdt = initial_boot_params;
  987. node = fdt_path_offset(fdt, "/chosen");
  988. if (node < 0)
  989. node = fdt_path_offset(fdt, "/chosen@0");
  990. if (node < 0)
  991. /* Handle the cmdline config options even if no /chosen node */
  992. goto handle_cmdline;
  993. chosen_node_offset = node;
  994. early_init_dt_check_for_initrd(node);
  995. early_init_dt_check_for_elfcorehdr(node);
  996. rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
  997. if (rng_seed && l > 0) {
  998. add_bootloader_randomness(rng_seed, l);
  999. /* try to clear seed so it won't be found. */
  1000. fdt_nop_property(initial_boot_params, node, "rng-seed");
  1001. /* update CRC check value */
  1002. of_fdt_crc32 = crc32_be(~0, initial_boot_params,
  1003. fdt_totalsize(initial_boot_params));
  1004. }
  1005. /* Put CONFIG_CMDLINE in if forced or if data had nothing in it to start */
  1006. if (overwrite_incoming_cmdline || !cmdline[0])
  1007. strscpy(cmdline, config_cmdline, COMMAND_LINE_SIZE);
  1008. /* Retrieve command line unless forcing */
  1009. if (read_dt_cmdline)
  1010. p = of_get_flat_dt_prop(node, "bootargs", &l);
  1011. if (p != NULL && l > 0) {
  1012. if (concat_cmdline) {
  1013. int cmdline_len;
  1014. int copy_len;
  1015. strlcat(cmdline, " ", COMMAND_LINE_SIZE);
  1016. cmdline_len = strlen(cmdline);
  1017. copy_len = COMMAND_LINE_SIZE - cmdline_len - 1;
  1018. copy_len = min((int)l, copy_len);
  1019. strncpy(cmdline + cmdline_len, p, copy_len);
  1020. cmdline[cmdline_len + copy_len] = '\0';
  1021. } else {
  1022. strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
  1023. }
  1024. }
  1025. handle_cmdline:
  1026. pr_debug("Command line is: %s\n", (char *)cmdline);
  1027. return 0;
  1028. }
  1029. #ifndef MIN_MEMBLOCK_ADDR
  1030. #define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET)
  1031. #endif
  1032. #ifndef MAX_MEMBLOCK_ADDR
  1033. #define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0)
  1034. #endif
  1035. void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
  1036. {
  1037. const u64 phys_offset = MIN_MEMBLOCK_ADDR;
  1038. if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
  1039. pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
  1040. base, base + size);
  1041. return;
  1042. }
  1043. if (!PAGE_ALIGNED(base)) {
  1044. size -= PAGE_SIZE - (base & ~PAGE_MASK);
  1045. base = PAGE_ALIGN(base);
  1046. }
  1047. size &= PAGE_MASK;
  1048. if (base > MAX_MEMBLOCK_ADDR) {
  1049. pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
  1050. base, base + size);
  1051. return;
  1052. }
  1053. if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
  1054. pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
  1055. ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
  1056. size = MAX_MEMBLOCK_ADDR - base + 1;
  1057. }
  1058. if (base + size < phys_offset) {
  1059. pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
  1060. base, base + size);
  1061. return;
  1062. }
  1063. if (base < phys_offset) {
  1064. pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
  1065. base, phys_offset);
  1066. size -= phys_offset - base;
  1067. base = phys_offset;
  1068. }
  1069. memblock_add(base, size);
  1070. }
  1071. static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
  1072. {
  1073. void *ptr = memblock_alloc(size, align);
  1074. if (!ptr)
  1075. panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
  1076. __func__, size, align);
  1077. return ptr;
  1078. }
  1079. bool __init early_init_dt_verify(void *params)
  1080. {
  1081. if (!params)
  1082. return false;
  1083. /* check device tree validity */
  1084. if (fdt_check_header(params))
  1085. return false;
  1086. /* Setup flat device-tree pointer */
  1087. initial_boot_params = params;
  1088. of_fdt_crc32 = crc32_be(~0, initial_boot_params,
  1089. fdt_totalsize(initial_boot_params));
  1090. return true;
  1091. }
  1092. void __init early_init_dt_scan_nodes(void)
  1093. {
  1094. int rc;
  1095. /* Initialize {size,address}-cells info */
  1096. early_init_dt_scan_root();
  1097. /* Retrieve various information from the /chosen node */
  1098. rc = early_init_dt_scan_chosen(boot_command_line);
  1099. if (rc)
  1100. pr_warn("No chosen node found, continuing without\n");
  1101. /* Setup memory, calling early_init_dt_add_memory_arch */
  1102. early_init_dt_scan_memory();
  1103. /* Handle linux,usable-memory-range property */
  1104. early_init_dt_check_for_usable_mem_range();
  1105. }
  1106. bool __init early_init_dt_scan(void *params)
  1107. {
  1108. bool status;
  1109. status = early_init_dt_verify(params);
  1110. if (!status)
  1111. return false;
  1112. early_init_dt_scan_nodes();
  1113. return true;
  1114. }
  1115. /**
  1116. * unflatten_device_tree - create tree of device_nodes from flat blob
  1117. *
  1118. * unflattens the device-tree passed by the firmware, creating the
  1119. * tree of struct device_node. It also fills the "name" and "type"
  1120. * pointers of the nodes so the normal device-tree walking functions
  1121. * can be used.
  1122. */
  1123. void __init unflatten_device_tree(void)
  1124. {
  1125. __unflatten_device_tree(initial_boot_params, NULL, &of_root,
  1126. early_init_dt_alloc_memory_arch, false);
  1127. /* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */
  1128. of_alias_scan(early_init_dt_alloc_memory_arch);
  1129. unittest_unflatten_overlay_base();
  1130. }
  1131. /**
  1132. * unflatten_and_copy_device_tree - copy and create tree of device_nodes from flat blob
  1133. *
  1134. * Copies and unflattens the device-tree passed by the firmware, creating the
  1135. * tree of struct device_node. It also fills the "name" and "type"
  1136. * pointers of the nodes so the normal device-tree walking functions
  1137. * can be used. This should only be used when the FDT memory has not been
  1138. * reserved such is the case when the FDT is built-in to the kernel init
  1139. * section. If the FDT memory is reserved already then unflatten_device_tree
  1140. * should be used instead.
  1141. */
  1142. void __init unflatten_and_copy_device_tree(void)
  1143. {
  1144. int size;
  1145. void *dt;
  1146. if (!initial_boot_params) {
  1147. pr_warn("No valid device tree found, continuing without\n");
  1148. return;
  1149. }
  1150. size = fdt_totalsize(initial_boot_params);
  1151. dt = early_init_dt_alloc_memory_arch(size,
  1152. roundup_pow_of_two(FDT_V17_SIZE));
  1153. if (dt) {
  1154. memcpy(dt, initial_boot_params, size);
  1155. initial_boot_params = dt;
  1156. }
  1157. unflatten_device_tree();
  1158. }
  1159. #ifdef CONFIG_SYSFS
  1160. static ssize_t of_fdt_raw_read(struct file *filp, struct kobject *kobj,
  1161. struct bin_attribute *bin_attr,
  1162. char *buf, loff_t off, size_t count)
  1163. {
  1164. memcpy(buf, initial_boot_params + off, count);
  1165. return count;
  1166. }
  1167. static int __init of_fdt_raw_init(void)
  1168. {
  1169. static struct bin_attribute of_fdt_raw_attr =
  1170. __BIN_ATTR(fdt, S_IRUSR, of_fdt_raw_read, NULL, 0);
  1171. if (!initial_boot_params)
  1172. return 0;
  1173. if (of_fdt_crc32 != crc32_be(~0, initial_boot_params,
  1174. fdt_totalsize(initial_boot_params))) {
  1175. pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n");
  1176. return 0;
  1177. }
  1178. of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
  1179. return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
  1180. }
  1181. late_initcall(of_fdt_raw_init);
  1182. #endif
  1183. #endif /* CONFIG_OF_EARLY_FLATTREE */